diff options
630 files changed, 10199 insertions, 4934 deletions
@@ -73,6 +73,7 @@ Juha Yrjola <juha.yrjola@nokia.com> Juha Yrjola <juha.yrjola@solidboot.com> Kay Sievers <kay.sievers@vrfy.org> Kenneth W Chen <kenneth.w.chen@intel.com> +Konstantin Khlebnikov <koct9i@gmail.com> <k.khlebnikov@samsung.com> Koushik <raghavendra.koushik@neterion.com> Kuninori Morimoto <kuninori.morimoto.gx@renesas.com> Leonid I Ananiev <leonid.i.ananiev@intel.com> diff --git a/Documentation/ABI/testing/sysfs-bus-event_source-devices-events b/Documentation/ABI/testing/sysfs-bus-event_source-devices-events index 20979f8b3edb..505f080d20a1 100644 --- a/Documentation/ABI/testing/sysfs-bus-event_source-devices-events +++ b/Documentation/ABI/testing/sysfs-bus-event_source-devices-events @@ -52,12 +52,18 @@ Description: Per-pmu performance monitoring events specific to the running syste event=0x2abc event=0x423,inv,cmask=0x3 domain=0x1,offset=0x8,starting_index=0xffff + domain=0x1,offset=0x8,core=? Each of the assignments indicates a value to be assigned to a particular set of bits (as defined by the format file corresponding to the <term>) in the perf_event structure passed to the perf_open syscall. + In the case of the last example, a value replacing "?" would + need to be provided by the user selecting the particular event. + This is referred to as "event parameterization". Event + parameters have the format 'param=?'. + What: /sys/bus/event_source/devices/<pmu>/events/<event>.unit Date: 2014/02/24 Contact: Linux kernel mailing list <linux-kernel@vger.kernel.org> diff --git a/Documentation/RCU/stallwarn.txt b/Documentation/RCU/stallwarn.txt index ed186a902d31..b57c0c1cdac6 100644 --- a/Documentation/RCU/stallwarn.txt +++ b/Documentation/RCU/stallwarn.txt @@ -15,7 +15,7 @@ CONFIG_RCU_CPU_STALL_TIMEOUT 21 seconds. This configuration parameter may be changed at runtime via the - /sys/module/rcutree/parameters/rcu_cpu_stall_timeout, however + /sys/module/rcupdate/parameters/rcu_cpu_stall_timeout, however this parameter is checked only at the beginning of a cycle. So if you are 10 seconds into a 40-second stall, setting this sysfs parameter to (say) five will shorten the timeout for the @@ -152,6 +152,15 @@ no non-lazy callbacks ("." is printed otherwise, as shown above) and "D" indicates that dyntick-idle processing is enabled ("." is printed otherwise, for example, if disabled via the "nohz=" kernel boot parameter). +If the relevant grace-period kthread has been unable to run prior to +the stall warning, the following additional line is printed: + + rcu_preempt kthread starved for 2023 jiffies! + +Starving the grace-period kthreads of CPU time can of course result in +RCU CPU stall warnings even when all CPUs and tasks have passed through +the required quiescent states. + Multiple Warnings From One Stall @@ -187,6 +196,11 @@ o For !CONFIG_PREEMPT kernels, a CPU looping anywhere in the behavior, you might need to replace some of the cond_resched() calls with calls to cond_resched_rcu_qs(). +o Anything that prevents RCU's grace-period kthreads from running. + This can result in the "All QSes seen" console-log message. + This message will include information on when the kthread last + ran and how often it should be expected to run. + o A CPU-bound real-time task in a CONFIG_PREEMPT kernel, which might happen to preempt a low-priority task in the middle of an RCU read-side critical section. This is especially damaging if diff --git a/Documentation/RCU/trace.txt b/Documentation/RCU/trace.txt index b63b9bb3bc0c..08651da15448 100644 --- a/Documentation/RCU/trace.txt +++ b/Documentation/RCU/trace.txt @@ -56,14 +56,14 @@ rcuboost: The output of "cat rcu/rcu_preempt/rcudata" looks as follows: - 0!c=30455 g=30456 pq=1 qp=1 dt=126535/140000000000000/0 df=2002 of=4 ql=0/0 qs=N... b=10 ci=74572 nci=0 co=1131 ca=716 - 1!c=30719 g=30720 pq=1 qp=0 dt=132007/140000000000000/0 df=1874 of=10 ql=0/0 qs=N... b=10 ci=123209 nci=0 co=685 ca=982 - 2!c=30150 g=30151 pq=1 qp=1 dt=138537/140000000000000/0 df=1707 of=8 ql=0/0 qs=N... b=10 ci=80132 nci=0 co=1328 ca=1458 - 3 c=31249 g=31250 pq=1 qp=0 dt=107255/140000000000000/0 df=1749 of=6 ql=0/450 qs=NRW. b=10 ci=151700 nci=0 co=509 ca=622 - 4!c=29502 g=29503 pq=1 qp=1 dt=83647/140000000000000/0 df=965 of=5 ql=0/0 qs=N... b=10 ci=65643 nci=0 co=1373 ca=1521 - 5 c=31201 g=31202 pq=1 qp=1 dt=70422/0/0 df=535 of=7 ql=0/0 qs=.... b=10 ci=58500 nci=0 co=764 ca=698 - 6!c=30253 g=30254 pq=1 qp=1 dt=95363/140000000000000/0 df=780 of=5 ql=0/0 qs=N... b=10 ci=100607 nci=0 co=1414 ca=1353 - 7 c=31178 g=31178 pq=1 qp=0 dt=91536/0/0 df=547 of=4 ql=0/0 qs=.... b=10 ci=109819 nci=0 co=1115 ca=969 + 0!c=30455 g=30456 pq=1/0 qp=1 dt=126535/140000000000000/0 df=2002 of=4 ql=0/0 qs=N... b=10 ci=74572 nci=0 co=1131 ca=716 + 1!c=30719 g=30720 pq=1/0 qp=0 dt=132007/140000000000000/0 df=1874 of=10 ql=0/0 qs=N... b=10 ci=123209 nci=0 co=685 ca=982 + 2!c=30150 g=30151 pq=1/1 qp=1 dt=138537/140000000000000/0 df=1707 of=8 ql=0/0 qs=N... b=10 ci=80132 nci=0 co=1328 ca=1458 + 3 c=31249 g=31250 pq=1/1 qp=0 dt=107255/140000000000000/0 df=1749 of=6 ql=0/450 qs=NRW. b=10 ci=151700 nci=0 co=509 ca=622 + 4!c=29502 g=29503 pq=1/0 qp=1 dt=83647/140000000000000/0 df=965 of=5 ql=0/0 qs=N... b=10 ci=65643 nci=0 co=1373 ca=1521 + 5 c=31201 g=31202 pq=1/0 qp=1 dt=70422/0/0 df=535 of=7 ql=0/0 qs=.... b=10 ci=58500 nci=0 co=764 ca=698 + 6!c=30253 g=30254 pq=1/0 qp=1 dt=95363/140000000000000/0 df=780 of=5 ql=0/0 qs=N... b=10 ci=100607 nci=0 co=1414 ca=1353 + 7 c=31178 g=31178 pq=1/0 qp=0 dt=91536/0/0 df=547 of=4 ql=0/0 qs=.... b=10 ci=109819 nci=0 co=1115 ca=969 This file has one line per CPU, or eight for this 8-CPU system. The fields are as follows: @@ -188,14 +188,14 @@ o "ca" is the number of RCU callbacks that have been adopted by this Kernels compiled with CONFIG_RCU_BOOST=y display the following from /debug/rcu/rcu_preempt/rcudata: - 0!c=12865 g=12866 pq=1 qp=1 dt=83113/140000000000000/0 df=288 of=11 ql=0/0 qs=N... kt=0/O ktl=944 b=10 ci=60709 nci=0 co=748 ca=871 - 1 c=14407 g=14408 pq=1 qp=0 dt=100679/140000000000000/0 df=378 of=7 ql=0/119 qs=NRW. kt=0/W ktl=9b6 b=10 ci=109740 nci=0 co=589 ca=485 - 2 c=14407 g=14408 pq=1 qp=0 dt=105486/0/0 df=90 of=9 ql=0/89 qs=NRW. kt=0/W ktl=c0c b=10 ci=83113 nci=0 co=533 ca=490 - 3 c=14407 g=14408 pq=1 qp=0 dt=107138/0/0 df=142 of=8 ql=0/188 qs=NRW. kt=0/W ktl=b96 b=10 ci=121114 nci=0 co=426 ca=290 - 4 c=14405 g=14406 pq=1 qp=1 dt=50238/0/0 df=706 of=7 ql=0/0 qs=.... kt=0/W ktl=812 b=10 ci=34929 nci=0 co=643 ca=114 - 5!c=14168 g=14169 pq=1 qp=0 dt=45465/140000000000000/0 df=161 of=11 ql=0/0 qs=N... kt=0/O ktl=b4d b=10 ci=47712 nci=0 co=677 ca=722 - 6 c=14404 g=14405 pq=1 qp=0 dt=59454/0/0 df=94 of=6 ql=0/0 qs=.... kt=0/W ktl=e57 b=10 ci=55597 nci=0 co=701 ca=811 - 7 c=14407 g=14408 pq=1 qp=1 dt=68850/0/0 df=31 of=8 ql=0/0 qs=.... kt=0/W ktl=14bd b=10 ci=77475 nci=0 co=508 ca=1042 + 0!c=12865 g=12866 pq=1/0 qp=1 dt=83113/140000000000000/0 df=288 of=11 ql=0/0 qs=N... kt=0/O ktl=944 b=10 ci=60709 nci=0 co=748 ca=871 + 1 c=14407 g=14408 pq=1/0 qp=0 dt=100679/140000000000000/0 df=378 of=7 ql=0/119 qs=NRW. kt=0/W ktl=9b6 b=10 ci=109740 nci=0 co=589 ca=485 + 2 c=14407 g=14408 pq=1/0 qp=0 dt=105486/0/0 df=90 of=9 ql=0/89 qs=NRW. kt=0/W ktl=c0c b=10 ci=83113 nci=0 co=533 ca=490 + 3 c=14407 g=14408 pq=1/0 qp=0 dt=107138/0/0 df=142 of=8 ql=0/188 qs=NRW. kt=0/W ktl=b96 b=10 ci=121114 nci=0 co=426 ca=290 + 4 c=14405 g=14406 pq=1/0 qp=1 dt=50238/0/0 df=706 of=7 ql=0/0 qs=.... kt=0/W ktl=812 b=10 ci=34929 nci=0 co=643 ca=114 + 5!c=14168 g=14169 pq=1/0 qp=0 dt=45465/140000000000000/0 df=161 of=11 ql=0/0 qs=N... kt=0/O ktl=b4d b=10 ci=47712 nci=0 co=677 ca=722 + 6 c=14404 g=14405 pq=1/0 qp=0 dt=59454/0/0 df=94 of=6 ql=0/0 qs=.... kt=0/W ktl=e57 b=10 ci=55597 nci=0 co=701 ca=811 + 7 c=14407 g=14408 pq=1/0 qp=1 dt=68850/0/0 df=31 of=8 ql=0/0 qs=.... kt=0/W ktl=14bd b=10 ci=77475 nci=0 co=508 ca=1042 This is similar to the output discussed above, but contains the following additional fields: diff --git a/Documentation/devicetree/bindings/i2c/i2c-st.txt b/Documentation/devicetree/bindings/i2c/i2c-st.txt index 437e0db3823c..4c26fda3844a 100644 --- a/Documentation/devicetree/bindings/i2c/i2c-st.txt +++ b/Documentation/devicetree/bindings/i2c/i2c-st.txt @@ -31,7 +31,7 @@ i2c0: i2c@fed40000 { compatible = "st,comms-ssc4-i2c"; reg = <0xfed40000 0x110>; interrupts = <GIC_SPI 187 IRQ_TYPE_LEVEL_HIGH>; - clocks = <&CLK_S_ICN_REG_0>; + clocks = <&clk_s_a0_ls CLK_ICN_REG>; clock-names = "ssc"; clock-frequency = <400000>; pinctrl-names = "default"; diff --git a/Documentation/devicetree/bindings/i2c/trivial-devices.txt b/Documentation/devicetree/bindings/i2c/trivial-devices.txt index 9f4e3824e71e..9f41d05be3be 100644 --- a/Documentation/devicetree/bindings/i2c/trivial-devices.txt +++ b/Documentation/devicetree/bindings/i2c/trivial-devices.txt @@ -47,6 +47,7 @@ dallas,ds3232 Extremely Accurate I²C RTC with Integrated Crystal and SRAM dallas,ds4510 CPU Supervisor with Nonvolatile Memory and Programmable I/O dallas,ds75 Digital Thermometer and Thermostat dlg,da9053 DA9053: flexible system level PMIC with multicore support +dlg,da9063 DA9063: system PMIC for quad-core application processors epson,rx8025 High-Stability. I2C-Bus INTERFACE REAL TIME CLOCK MODULE epson,rx8581 I2C-BUS INTERFACE REAL TIME CLOCK MODULE fsl,mag3110 MAG3110: Xtrinsic High Accuracy, 3D Magnetometer diff --git a/Documentation/devicetree/bindings/mfd/max77686.txt b/Documentation/devicetree/bindings/mfd/max77686.txt index 75fdfaf41831..e39f0bc1f55e 100644 --- a/Documentation/devicetree/bindings/mfd/max77686.txt +++ b/Documentation/devicetree/bindings/mfd/max77686.txt @@ -39,6 +39,12 @@ to get matched with their hardware counterparts as follow: -BUCKn : 1-4. Use standard regulator bindings for it ('regulator-off-in-suspend'). + LDO20, LDO21, LDO22, BUCK8 and BUCK9 can be configured to GPIO enable + control. To turn this feature on this property must be added to the regulator + sub-node: + - maxim,ena-gpios : one GPIO specifier enable control (the gpio + flags are actually ignored and always + ACTIVE_HIGH is used) Example: @@ -65,4 +71,12 @@ Example: regulator-always-on; regulator-boot-on; }; + + buck9_reg { + regulator-compatible = "BUCK9"; + regulator-name = "CAM_ISP_CORE_1.2V"; + regulator-min-microvolt = <1000000>; + regulator-max-microvolt = <1200000>; + maxim,ena-gpios = <&gpm0 3 GPIO_ACTIVE_HIGH>; + }; } diff --git a/Documentation/devicetree/bindings/regulator/da9211.txt b/Documentation/devicetree/bindings/regulator/da9211.txt index 240019a82f9a..eb618907c7de 100644 --- a/Documentation/devicetree/bindings/regulator/da9211.txt +++ b/Documentation/devicetree/bindings/regulator/da9211.txt @@ -11,6 +11,7 @@ Required properties: BUCKA and BUCKB. Optional properties: +- enable-gpios: platform gpio for control of BUCKA/BUCKB. - Any optional property defined in regulator.txt Example 1) DA9211 @@ -27,6 +28,7 @@ Example 1) DA9211 regulator-max-microvolt = <1570000>; regulator-min-microamp = <2000000>; regulator-max-microamp = <5000000>; + enable-gpios = <&gpio 27 0>; }; BUCKB { regulator-name = "VBUCKB"; @@ -34,11 +36,12 @@ Example 1) DA9211 regulator-max-microvolt = <1570000>; regulator-min-microamp = <2000000>; regulator-max-microamp = <5000000>; + enable-gpios = <&gpio 17 0>; }; }; }; -Example 2) DA92113 +Example 2) DA9213 pmic: da9213@68 { compatible = "dlg,da9213"; reg = <0x68>; @@ -51,6 +54,7 @@ Example 2) DA92113 regulator-max-microvolt = <1570000>; regulator-min-microamp = <3000000>; regulator-max-microamp = <6000000>; + enable-gpios = <&gpio 27 0>; }; BUCKB { regulator-name = "VBUCKB"; @@ -58,6 +62,7 @@ Example 2) DA92113 regulator-max-microvolt = <1570000>; regulator-min-microamp = <3000000>; regulator-max-microamp = <6000000>; + enable-gpios = <&gpio 17 0>; }; }; }; diff --git a/Documentation/devicetree/bindings/regulator/isl9305.txt b/Documentation/devicetree/bindings/regulator/isl9305.txt index a626fc1bbf0d..d6e7c9ec9413 100644 --- a/Documentation/devicetree/bindings/regulator/isl9305.txt +++ b/Documentation/devicetree/bindings/regulator/isl9305.txt @@ -2,7 +2,7 @@ Intersil ISL9305/ISL9305H voltage regulator Required properties: -- compatible: "isl,isl9305" or "isl,isl9305h" +- compatible: "isil,isl9305" or "isil,isl9305h" - reg: I2C slave address, usually 0x68. - regulators: A node that houses a sub-node for each regulator within the device. Each sub-node is identified using the node's name, with valid @@ -19,7 +19,7 @@ Optional properties: Example pmic: isl9305@68 { - compatible = "isl,isl9305"; + compatible = "isil,isl9305"; reg = <0x68>; VINDCD1-supply = <&system_power>; diff --git a/Documentation/devicetree/bindings/regulator/mt6397-regulator.txt b/Documentation/devicetree/bindings/regulator/mt6397-regulator.txt new file mode 100644 index 000000000000..a42b1d6e9863 --- /dev/null +++ b/Documentation/devicetree/bindings/regulator/mt6397-regulator.txt @@ -0,0 +1,217 @@ +Mediatek MT6397 Regulator Driver + +Required properties: +- compatible: "mediatek,mt6397-regulator" +- mt6397regulator: List of regulators provided by this controller. It is named + according to its regulator type, buck_<name> and ldo_<name>. + The definition for each of these nodes is defined using the standard binding + for regulators at Documentation/devicetree/bindings/regulator/regulator.txt. + +The valid names for regulators are:: +BUCK: + buck_vpca15, buck_vpca7, buck_vsramca15, buck_vsramca7, buck_vcore, buck_vgpu, + buck_vdrm, buck_vio18 +LDO: + ldo_vtcxo, ldo_va28, ldo_vcama, ldo_vio28, ldo_vusb, ldo_vmc, ldo_vmch, + ldo_vemc3v3, ldo_vgp1, ldo_vgp2, ldo_vgp3, ldo_vgp4, ldo_vgp5, ldo_vgp6, + ldo_vibr + +Example: + pmic { + compatible = "mediatek,mt6397"; + + mt6397regulator: mt6397regulator { + compatible = "mediatek,mt6397-regulator"; + + mt6397_vpca15_reg: buck_vpca15 { + regulator-compatible = "buck_vpca15"; + regulator-name = "vpca15"; + regulator-min-microvolt = < 850000>; + regulator-max-microvolt = <1350000>; + regulator-ramp-delay = <12500>; + regulator-enable-ramp-delay = <200>; + }; + + mt6397_vpca7_reg: buck_vpca7 { + regulator-compatible = "buck_vpca7"; + regulator-name = "vpca7"; + regulator-min-microvolt = < 850000>; + regulator-max-microvolt = <1350000>; + regulator-ramp-delay = <12500>; + regulator-enable-ramp-delay = <115>; + }; + + mt6397_vsramca15_reg: buck_vsramca15 { + regulator-compatible = "buck_vsramca15"; + regulator-name = "vsramca15"; + regulator-min-microvolt = < 850000>; + regulator-max-microvolt = <1350000>; + regulator-ramp-delay = <12500>; + regulator-enable-ramp-delay = <115>; + + }; + + mt6397_vsramca7_reg: buck_vsramca7 { + regulator-compatible = "buck_vsramca7"; + regulator-name = "vsramca7"; + regulator-min-microvolt = < 850000>; + regulator-max-microvolt = <1350000>; + regulator-ramp-delay = <12500>; + regulator-enable-ramp-delay = <115>; + + }; + + mt6397_vcore_reg: buck_vcore { + regulator-compatible = "buck_vcore"; + regulator-name = "vcore"; + regulator-min-microvolt = < 850000>; + regulator-max-microvolt = <1350000>; + regulator-ramp-delay = <12500>; + regulator-enable-ramp-delay = <115>; + }; + + mt6397_vgpu_reg: buck_vgpu { + regulator-compatible = "buck_vgpu"; + regulator-name = "vgpu"; + regulator-min-microvolt = < 700000>; + regulator-max-microvolt = <1350000>; + regulator-ramp-delay = <12500>; + regulator-enable-ramp-delay = <115>; + }; + + mt6397_vdrm_reg: buck_vdrm { + regulator-compatible = "buck_vdrm"; + regulator-name = "vdrm"; + regulator-min-microvolt = < 800000>; + regulator-max-microvolt = <1400000>; + regulator-ramp-delay = <12500>; + regulator-enable-ramp-delay = <500>; + }; + + mt6397_vio18_reg: buck_vio18 { + regulator-compatible = "buck_vio18"; + regulator-name = "vio18"; + regulator-min-microvolt = <1500000>; + regulator-max-microvolt = <2120000>; + regulator-ramp-delay = <12500>; + regulator-enable-ramp-delay = <500>; + }; + + mt6397_vtcxo_reg: ldo_vtcxo { + regulator-compatible = "ldo_vtcxo"; + regulator-name = "vtcxo"; + regulator-min-microvolt = <2800000>; + regulator-max-microvolt = <2800000>; + regulator-enable-ramp-delay = <90>; + }; + + mt6397_va28_reg: ldo_va28 { + regulator-compatible = "ldo_va28"; + regulator-name = "va28"; + /* fixed output 2.8 V */ + regulator-enable-ramp-delay = <218>; + }; + + mt6397_vcama_reg: ldo_vcama { + regulator-compatible = "ldo_vcama"; + regulator-name = "vcama"; + regulator-min-microvolt = <1500000>; + regulator-max-microvolt = <2800000>; + regulator-enable-ramp-delay = <218>; + }; + + mt6397_vio28_reg: ldo_vio28 { + regulator-compatible = "ldo_vio28"; + regulator-name = "vio28"; + /* fixed output 2.8 V */ + regulator-enable-ramp-delay = <240>; + }; + + mt6397_usb_reg: ldo_vusb { + regulator-compatible = "ldo_vusb"; + regulator-name = "vusb"; + /* fixed output 3.3 V */ + regulator-enable-ramp-delay = <218>; + }; + + mt6397_vmc_reg: ldo_vmc { + regulator-compatible = "ldo_vmc"; + regulator-name = "vmc"; + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <3300000>; + regulator-enable-ramp-delay = <218>; + }; + + mt6397_vmch_reg: ldo_vmch { + regulator-compatible = "ldo_vmch"; + regulator-name = "vmch"; + regulator-min-microvolt = <3000000>; + regulator-max-microvolt = <3300000>; + regulator-enable-ramp-delay = <218>; + }; + + mt6397_vemc_3v3_reg: ldo_vemc3v3 { + regulator-compatible = "ldo_vemc3v3"; + regulator-name = "vemc_3v3"; + regulator-min-microvolt = <3000000>; + regulator-max-microvolt = <3300000>; + regulator-enable-ramp-delay = <218>; + }; + + mt6397_vgp1_reg: ldo_vgp1 { + regulator-compatible = "ldo_vgp1"; + regulator-name = "vcamd"; + regulator-min-microvolt = <1220000>; + regulator-max-microvolt = <3300000>; + regulator-enable-ramp-delay = <240>; + }; + + mt6397_vgp2_reg: ldo_vgp2 { + egulator-compatible = "ldo_vgp2"; + regulator-name = "vcamio"; + regulator-min-microvolt = <1000000>; + regulator-max-microvolt = <3300000>; + regulator-enable-ramp-delay = <218>; + }; + + mt6397_vgp3_reg: ldo_vgp3 { + regulator-compatible = "ldo_vgp3"; + regulator-name = "vcamaf"; + regulator-min-microvolt = <1200000>; + regulator-max-microvolt = <3300000>; + regulator-enable-ramp-delay = <218>; + }; + + mt6397_vgp4_reg: ldo_vgp4 { + regulator-compatible = "ldo_vgp4"; + regulator-name = "vgp4"; + regulator-min-microvolt = <1200000>; + regulator-max-microvolt = <3300000>; + regulator-enable-ramp-delay = <218>; + }; + + mt6397_vgp5_reg: ldo_vgp5 { + regulator-compatible = "ldo_vgp5"; + regulator-name = "vgp5"; + regulator-min-microvolt = <1200000>; + regulator-max-microvolt = <3000000>; + regulator-enable-ramp-delay = <218>; + }; + + mt6397_vgp6_reg: ldo_vgp6 { + regulator-compatible = "ldo_vgp6"; + regulator-name = "vgp6"; + regulator-min-microvolt = <1200000>; + regulator-max-microvolt = <3300000>; + regulator-enable-ramp-delay = <218>; + }; + + mt6397_vibr_reg: ldo_vibr { + regulator-compatible = "ldo_vibr"; + regulator-name = "vibr"; + regulator-min-microvolt = <1200000>; + regulator-max-microvolt = <3300000>; + regulator-enable-ramp-delay = <218>; + }; + }; + }; diff --git a/Documentation/devicetree/bindings/regulator/pfuze100.txt b/Documentation/devicetree/bindings/regulator/pfuze100.txt index 34ef5d16d0f1..9b40db88f637 100644 --- a/Documentation/devicetree/bindings/regulator/pfuze100.txt +++ b/Documentation/devicetree/bindings/regulator/pfuze100.txt @@ -1,7 +1,7 @@ PFUZE100 family of regulators Required properties: -- compatible: "fsl,pfuze100" or "fsl,pfuze200" +- compatible: "fsl,pfuze100", "fsl,pfuze200", "fsl,pfuze3000" - reg: I2C slave address Required child node: @@ -14,6 +14,8 @@ Required child node: sw1ab,sw1c,sw2,sw3a,sw3b,sw4,swbst,vsnvs,vrefddr,vgen1~vgen6 --PFUZE200 sw1ab,sw2,sw3a,sw3b,swbst,vsnvs,vrefddr,vgen1~vgen6 + --PFUZE3000 + sw1a,sw1b,sw2,sw3,swbst,vsnvs,vrefddr,vldo1,vldo2,vccsd,v33,vldo3,vldo4 Each regulator is defined using the standard binding for regulators. @@ -205,3 +207,93 @@ Example 2: PFUZE200 }; }; }; + +Example 3: PFUZE3000 + + pmic: pfuze3000@08 { + compatible = "fsl,pfuze3000"; + reg = <0x08>; + + regulators { + sw1a_reg: sw1a { + regulator-min-microvolt = <700000>; + regulator-max-microvolt = <1475000>; + regulator-boot-on; + regulator-always-on; + regulator-ramp-delay = <6250>; + }; + /* use sw1c_reg to align with pfuze100/pfuze200 */ + sw1c_reg: sw1b { + regulator-min-microvolt = <700000>; + regulator-max-microvolt = <1475000>; + regulator-boot-on; + regulator-always-on; + regulator-ramp-delay = <6250>; + }; + + sw2_reg: sw2 { + regulator-min-microvolt = <2500000>; + regulator-max-microvolt = <3300000>; + regulator-boot-on; + regulator-always-on; + }; + + sw3a_reg: sw3 { + regulator-min-microvolt = <900000>; + regulator-max-microvolt = <1650000>; + regulator-boot-on; + regulator-always-on; + }; + + swbst_reg: swbst { + regulator-min-microvolt = <5000000>; + regulator-max-microvolt = <5150000>; + }; + + snvs_reg: vsnvs { + regulator-min-microvolt = <1000000>; + regulator-max-microvolt = <3000000>; + regulator-boot-on; + regulator-always-on; + }; + + vref_reg: vrefddr { + regulator-boot-on; + regulator-always-on; + }; + + vgen1_reg: vldo1 { + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <3300000>; + regulator-always-on; + }; + + vgen2_reg: vldo2 { + regulator-min-microvolt = <800000>; + regulator-max-microvolt = <1550000>; + }; + + vgen3_reg: vccsd { + regulator-min-microvolt = <2850000>; + regulator-max-microvolt = <3300000>; + regulator-always-on; + }; + + vgen4_reg: v33 { + regulator-min-microvolt = <2850000>; + regulator-max-microvolt = <3300000>; + }; + + vgen5_reg: vldo3 { + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <3300000>; + regulator-always-on; + }; + + vgen6_reg: vldo4 { + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <3300000>; + regulator-always-on; + }; + }; + }; diff --git a/Documentation/devicetree/bindings/spi/sh-msiof.txt b/Documentation/devicetree/bindings/spi/sh-msiof.txt index d11c3721e7cd..4c388bb2f0a2 100644 --- a/Documentation/devicetree/bindings/spi/sh-msiof.txt +++ b/Documentation/devicetree/bindings/spi/sh-msiof.txt @@ -30,6 +30,22 @@ Optional properties: specifiers, one for transmission, and one for reception. - dma-names : Must contain a list of two DMA names, "tx" and "rx". +- renesas,dtdl : delay sync signal (setup) in transmit mode. + Must contain one of the following values: + 0 (no bit delay) + 50 (0.5-clock-cycle delay) + 100 (1-clock-cycle delay) + 150 (1.5-clock-cycle delay) + 200 (2-clock-cycle delay) + +- renesas,syncdl : delay sync signal (hold) in transmit mode. + Must contain one of the following values: + 0 (no bit delay) + 50 (0.5-clock-cycle delay) + 100 (1-clock-cycle delay) + 150 (1.5-clock-cycle delay) + 200 (2-clock-cycle delay) + 300 (3-clock-cycle delay) Optional properties, deprecated for soctype-specific bindings: - renesas,tx-fifo-size : Overrides the default tx fifo size given in words diff --git a/Documentation/devicetree/bindings/spi/spi-sirf.txt b/Documentation/devicetree/bindings/spi/spi-sirf.txt new file mode 100644 index 000000000000..4c7adb8f777c --- /dev/null +++ b/Documentation/devicetree/bindings/spi/spi-sirf.txt @@ -0,0 +1,41 @@ +* CSR SiRFprimaII Serial Peripheral Interface + +Required properties: +- compatible : Should be "sirf,prima2-spi" +- reg : Offset and length of the register set for the device +- interrupts : Should contain SPI interrupt +- resets: phandle to the reset controller asserting this device in + reset + See ../reset/reset.txt for details. +- dmas : Must contain an entry for each entry in clock-names. + See ../dma/dma.txt for details. +- dma-names : Must include the following entries: + - rx + - tx +- clocks : Must contain an entry for each entry in clock-names. + See ../clocks/clock-bindings.txt for details. + +- #address-cells: Number of cells required to define a chip select + address on the SPI bus. Should be set to 1. +- #size-cells: Should be zero. + +Optional properties: +- spi-max-frequency: Specifies maximum SPI clock frequency, + Units - Hz. Definition as per + Documentation/devicetree/bindings/spi/spi-bus.txt +- cs-gpios: should specify GPIOs used for chipselects. + +Example: + +spi0: spi@b00d0000 { + compatible = "sirf,prima2-spi"; + reg = <0xb00d0000 0x10000>; + interrupts = <15>; + dmas = <&dmac1 9>, + <&dmac1 4>; + dma-names = "rx", "tx"; + #address-cells = <1>; + #size-cells = <0>; + clocks = <&clks 19>; + resets = <&rstc 26>; +}; diff --git a/Documentation/devicetree/bindings/spi/spi-st-ssc.txt b/Documentation/devicetree/bindings/spi/spi-st-ssc.txt new file mode 100644 index 000000000000..fe54959ec957 --- /dev/null +++ b/Documentation/devicetree/bindings/spi/spi-st-ssc.txt @@ -0,0 +1,40 @@ +STMicroelectronics SSC (SPI) Controller +--------------------------------------- + +Required properties: +- compatible : "st,comms-ssc4-spi" +- reg : Offset and length of the device's register set +- interrupts : The interrupt specifier +- clock-names : Must contain "ssc" +- clocks : Must contain an entry for each name in clock-names + See ../clk/* +- pinctrl-names : Uses "default", can use "sleep" if provided + See ../pinctrl/pinctrl-binding.txt + +Optional properties: +- cs-gpios : List of GPIO chip selects + See ../spi/spi-bus.txt + +Child nodes represent devices on the SPI bus + See ../spi/spi-bus.txt + +Example: + spi@9840000 { + compatible = "st,comms-ssc4-spi"; + reg = <0x9840000 0x110>; + interrupts = <GIC_SPI 112 IRQ_TYPE_LEVEL_HIGH>; + clocks = <&clk_s_c0_flexgen CLK_EXT2F_A9>; + clock-names = "ssc"; + pinctrl-0 = <&pinctrl_spi0_default>; + pinctrl-names = "default"; + cs-gpios = <&pio17 5 0>; + #address-cells = <1>; + #size-cells = <0>; + + st95hf@0{ + compatible = "st,st95hf"; + reg = <0>; + spi-max-frequency = <1000000>; + interrupts = <2 IRQ_TYPE_EDGE_FALLING>; + }; + }; diff --git a/Documentation/futex-requeue-pi.txt b/Documentation/futex-requeue-pi.txt index 31b16610c416..77b36f59d16b 100644 --- a/Documentation/futex-requeue-pi.txt +++ b/Documentation/futex-requeue-pi.txt @@ -98,7 +98,7 @@ rt_mutex_start_proxy_lock() and rt_mutex_finish_proxy_lock(), which allow the requeue code to acquire an uncontended rt_mutex on behalf of the waiter and to enqueue the waiter on a contended rt_mutex. Two new system calls provide the kernel<->user interface to -requeue_pi: FUTEX_WAIT_REQUEUE_PI and FUTEX_REQUEUE_CMP_PI. +requeue_pi: FUTEX_WAIT_REQUEUE_PI and FUTEX_CMP_REQUEUE_PI. FUTEX_WAIT_REQUEUE_PI is called by the waiter (pthread_cond_wait() and pthread_cond_timedwait()) to block on the initial futex and wait @@ -107,7 +107,7 @@ result of a high-speed collision between futex_wait() and futex_lock_pi(), with some extra logic to check for the additional wake-up scenarios. -FUTEX_REQUEUE_CMP_PI is called by the waker +FUTEX_CMP_REQUEUE_PI is called by the waker (pthread_cond_broadcast() and pthread_cond_signal()) to requeue and possibly wake the waiting tasks. Internally, this system call is still handled by futex_requeue (by passing requeue_pi=1). Before @@ -120,12 +120,12 @@ task as a waiter on the underlying rt_mutex. It is possible that the lock can be acquired at this stage as well, if so, the next waiter is woken to finish the acquisition of the lock. -FUTEX_REQUEUE_PI accepts nr_wake and nr_requeue as arguments, but +FUTEX_CMP_REQUEUE_PI accepts nr_wake and nr_requeue as arguments, but their sum is all that really matters. futex_requeue() will wake or requeue up to nr_wake + nr_requeue tasks. It will wake only as many tasks as it can acquire the lock for, which in the majority of cases should be 0 as good programming practice dictates that the caller of either pthread_cond_broadcast() or pthread_cond_signal() acquire the -mutex prior to making the call. FUTEX_REQUEUE_PI requires that +mutex prior to making the call. FUTEX_CMP_REQUEUE_PI requires that nr_wake=1. nr_requeue should be INT_MAX for broadcast and 0 for signal. diff --git a/Documentation/hwmon/ina2xx b/Documentation/hwmon/ina2xx index 4223c2d3b508..cfd31d94c872 100644 --- a/Documentation/hwmon/ina2xx +++ b/Documentation/hwmon/ina2xx @@ -26,6 +26,12 @@ Supported chips: Datasheet: Publicly available at the Texas Instruments website http://www.ti.com/ + * Texas Instruments INA231 + Prefix: 'ina231' + Addresses: I2C 0x40 - 0x4f + Datasheet: Publicly available at the Texas Instruments website + http://www.ti.com/ + Author: Lothar Felten <l-felten@ti.com> Description @@ -41,9 +47,18 @@ interface. The INA220 monitors both shunt drop and supply voltage. The INA226 is a current shunt and power monitor with an I2C interface. The INA226 monitors both a shunt voltage drop and bus supply voltage. -The INA230 is a high or low side current shunt and power monitor with an I2C -interface. The INA230 monitors both a shunt voltage drop and bus supply voltage. +INA230 and INA231 are high or low side current shunt and power monitors +with an I2C interface. The chips monitor both a shunt voltage drop and +bus supply voltage. -The shunt value in micro-ohms can be set via platform data or device tree. -Please refer to the Documentation/devicetree/bindings/i2c/ina2xx.txt for bindings +The shunt value in micro-ohms can be set via platform data or device tree at +compile-time or via the shunt_resistor attribute in sysfs at run-time. Please +refer to the Documentation/devicetree/bindings/i2c/ina2xx.txt for bindings if the device tree is used. + +Additionally ina226 supports update_interval attribute as described in +Documentation/hwmon/sysfs-interface. Internally the interval is the sum of +bus and shunt voltage conversion times multiplied by the averaging rate. We +don't touch the conversion times and only modify the number of averages. The +lower limit of the update_interval is 2 ms, the upper limit is 2253 ms. +The actual programmed interval may vary from the desired value. diff --git a/Documentation/locking/lockdep-design.txt b/Documentation/locking/lockdep-design.txt index 5dbc99c04f6e..5001280e9d82 100644 --- a/Documentation/locking/lockdep-design.txt +++ b/Documentation/locking/lockdep-design.txt @@ -34,7 +34,7 @@ The validator tracks lock-class usage history into 4n + 1 separate state bits: - 'ever held with STATE enabled' - 'ever held as readlock with STATE enabled' -Where STATE can be either one of (kernel/lockdep_states.h) +Where STATE can be either one of (kernel/locking/lockdep_states.h) - hardirq - softirq - reclaim_fs diff --git a/Documentation/memory-barriers.txt b/Documentation/memory-barriers.txt index 70a09f8a0383..ca2387ef27ab 100644 --- a/Documentation/memory-barriers.txt +++ b/Documentation/memory-barriers.txt @@ -269,6 +269,50 @@ And there are a number of things that _must_ or _must_not_ be assumed: STORE *(A + 4) = Y; STORE *A = X; STORE {*A, *(A + 4) } = {X, Y}; +And there are anti-guarantees: + + (*) These guarantees do not apply to bitfields, because compilers often + generate code to modify these using non-atomic read-modify-write + sequences. Do not attempt to use bitfields to synchronize parallel + algorithms. + + (*) Even in cases where bitfields are protected by locks, all fields + in a given bitfield must be protected by one lock. If two fields + in a given bitfield are protected by different locks, the compiler's + non-atomic read-modify-write sequences can cause an update to one + field to corrupt the value of an adjacent field. + + (*) These guarantees apply only to properly aligned and sized scalar + variables. "Properly sized" currently means variables that are + the same size as "char", "short", "int" and "long". "Properly + aligned" means the natural alignment, thus no constraints for + "char", two-byte alignment for "short", four-byte alignment for + "int", and either four-byte or eight-byte alignment for "long", + on 32-bit and 64-bit systems, respectively. Note that these + guarantees were introduced into the C11 standard, so beware when + using older pre-C11 compilers (for example, gcc 4.6). The portion + of the standard containing this guarantee is Section 3.14, which + defines "memory location" as follows: + + memory location + either an object of scalar type, or a maximal sequence + of adjacent bit-fields all having nonzero width + + NOTE 1: Two threads of execution can update and access + separate memory locations without interfering with + each other. + + NOTE 2: A bit-field and an adjacent non-bit-field member + are in separate memory locations. The same applies + to two bit-fields, if one is declared inside a nested + structure declaration and the other is not, or if the two + are separated by a zero-length bit-field declaration, + or if they are separated by a non-bit-field member + declaration. It is not safe to concurrently update two + bit-fields in the same structure if all members declared + between them are also bit-fields, no matter what the + sizes of those intervening bit-fields happen to be. + ========================= WHAT ARE MEMORY BARRIERS? @@ -750,7 +794,7 @@ In summary: However, they do -not- guarantee any other sort of ordering: Not prior loads against later loads, nor prior stores against later anything. If you need these other forms of ordering, - use smb_rmb(), smp_wmb(), or, in the case of prior stores and + use smp_rmb(), smp_wmb(), or, in the case of prior stores and later loads, smp_mb(). (*) If both legs of the "if" statement begin with identical stores diff --git a/Documentation/networking/netlink_mmap.txt b/Documentation/networking/netlink_mmap.txt index c6af4bac5aa8..54f10478e8e3 100644 --- a/Documentation/networking/netlink_mmap.txt +++ b/Documentation/networking/netlink_mmap.txt @@ -199,16 +199,9 @@ frame header. TX limitations -------------- -Kernel processing usually involves validation of the message received by -user-space, then processing its contents. The kernel must assure that -userspace is not able to modify the message contents after they have been -validated. In order to do so, the message is copied from the ring frame -to an allocated buffer if either of these conditions is false: - -- only a single mapping of the ring exists -- the file descriptor is not shared between processes - -This means that for threaded programs, the kernel will fall back to copying. +As of Jan 2015 the message is always copied from the ring frame to an +allocated buffer due to unresolved security concerns. +See commit 4682a0358639b29cf ("netlink: Always copy on mmap TX."). Example ------- diff --git a/MAINTAINERS b/MAINTAINERS index 2ebb056cbe0a..d66a97dd3a12 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -708,6 +708,16 @@ X: drivers/iio/*/adjd* F: drivers/staging/iio/*/ad* F: staging/iio/trigger/iio-trig-bfin-timer.c +ANDROID DRIVERS +M: Greg Kroah-Hartman <gregkh@linuxfoundation.org> +M: Arve Hjønnevåg <arve@android.com> +M: Riley Andrews <riandrews@android.com> +T: git git://git.kernel.org/pub/scm/linux/kernel/gregkh/staging.git +L: devel@driverdev.osuosl.org +S: Supported +F: drivers/android/ +F: drivers/staging/android/ + AOA (Apple Onboard Audio) ALSA DRIVER M: Johannes Berg <johannes@sipsolutions.net> L: linuxppc-dev@lists.ozlabs.org @@ -4943,6 +4953,16 @@ F: Documentation/input/multi-touch-protocol.txt F: drivers/input/input-mt.c K: \b(ABS|SYN)_MT_ +INTEL ASoC BDW/HSW DRIVERS +M: Jie Yang <yang.jie@linux.intel.com> +L: alsa-devel@alsa-project.org +S: Supported +F: sound/soc/intel/sst-haswell* +F: sound/soc/intel/sst-dsp* +F: sound/soc/intel/sst-firmware.c +F: sound/soc/intel/broadwell.c +F: sound/soc/intel/haswell.c + INTEL C600 SERIES SAS CONTROLLER DRIVER M: Intel SCU Linux support <intel-linux-scu@intel.com> M: Artur Paszkiewicz <artur.paszkiewicz@intel.com> @@ -9231,7 +9251,6 @@ F: drivers/net/ethernet/dlink/sundance.c SUPERH L: linux-sh@vger.kernel.org -W: http://www.linux-sh.org Q: http://patchwork.kernel.org/project/linux-sh/list/ S: Orphan F: Documentation/sh/ @@ -10166,6 +10185,7 @@ USERSPACE I/O (UIO) M: "Hans J. Koch" <hjk@hansjkoch.de> M: Greg Kroah-Hartman <gregkh@linuxfoundation.org> S: Maintained +T: git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/char-misc.git F: Documentation/DocBook/uio-howto.tmpl F: drivers/uio/ F: include/linux/uio*.h @@ -1,7 +1,7 @@ VERSION = 3 PATCHLEVEL = 19 SUBLEVEL = 0 -EXTRAVERSION = -rc6 +EXTRAVERSION = NAME = Diseased Newt # *DOCUMENTATION* diff --git a/arch/alpha/mm/fault.c b/arch/alpha/mm/fault.c index 98838a05ba6d..9d0ac091a52a 100644 --- a/arch/alpha/mm/fault.c +++ b/arch/alpha/mm/fault.c @@ -156,6 +156,8 @@ retry: if (unlikely(fault & VM_FAULT_ERROR)) { if (fault & VM_FAULT_OOM) goto out_of_memory; + else if (fault & VM_FAULT_SIGSEGV) + goto bad_area; else if (fault & VM_FAULT_SIGBUS) goto do_sigbus; BUG(); diff --git a/arch/arc/mm/fault.c b/arch/arc/mm/fault.c index 6f7e3a68803a..563cb27e37f5 100644 --- a/arch/arc/mm/fault.c +++ b/arch/arc/mm/fault.c @@ -161,6 +161,8 @@ good_area: if (fault & VM_FAULT_OOM) goto out_of_memory; + else if (fault & VM_FAULT_SIGSEGV) + goto bad_area; else if (fault & VM_FAULT_SIGBUS) goto do_sigbus; diff --git a/arch/arm/boot/compressed/head.S b/arch/arm/boot/compressed/head.S index 68be9017593d..132c70e2d2f1 100644 --- a/arch/arm/boot/compressed/head.S +++ b/arch/arm/boot/compressed/head.S @@ -263,16 +263,37 @@ restart: adr r0, LC0 * OK... Let's do some funky business here. * If we do have a DTB appended to zImage, and we do have * an ATAG list around, we want the later to be translated - * and folded into the former here. To be on the safe side, - * let's temporarily move the stack away into the malloc - * area. No GOT fixup has occurred yet, but none of the - * code we're about to call uses any global variable. + * and folded into the former here. No GOT fixup has occurred + * yet, but none of the code we're about to call uses any + * global variable. */ - add sp, sp, #0x10000 + + /* Get the initial DTB size */ + ldr r5, [r6, #4] +#ifndef __ARMEB__ + /* convert to little endian */ + eor r1, r5, r5, ror #16 + bic r1, r1, #0x00ff0000 + mov r5, r5, ror #8 + eor r5, r5, r1, lsr #8 +#endif + /* 50% DTB growth should be good enough */ + add r5, r5, r5, lsr #1 + /* preserve 64-bit alignment */ + add r5, r5, #7 + bic r5, r5, #7 + /* clamp to 32KB min and 1MB max */ + cmp r5, #(1 << 15) + movlo r5, #(1 << 15) + cmp r5, #(1 << 20) + movhi r5, #(1 << 20) + /* temporarily relocate the stack past the DTB work space */ + add sp, sp, r5 + stmfd sp!, {r0-r3, ip, lr} mov r0, r8 mov r1, r6 - sub r2, sp, r6 + mov r2, r5 bl atags_to_fdt /* @@ -285,11 +306,11 @@ restart: adr r0, LC0 bic r0, r0, #1 add r0, r0, #0x100 mov r1, r6 - sub r2, sp, r6 + mov r2, r5 bleq atags_to_fdt ldmfd sp!, {r0-r3, ip, lr} - sub sp, sp, #0x10000 + sub sp, sp, r5 #endif mov r8, r6 @ use the appended device tree @@ -306,7 +327,7 @@ restart: adr r0, LC0 subs r1, r5, r1 addhi r9, r9, r1 - /* Get the dtb's size */ + /* Get the current DTB size */ ldr r5, [r6, #4] #ifndef __ARMEB__ /* convert r5 (dtb size) to little endian */ diff --git a/arch/arm/boot/dts/exynos4.dtsi b/arch/arm/boot/dts/exynos4.dtsi index b8168f1f8139..24ff27049ce0 100644 --- a/arch/arm/boot/dts/exynos4.dtsi +++ b/arch/arm/boot/dts/exynos4.dtsi @@ -368,7 +368,7 @@ }; i2s1: i2s@13960000 { - compatible = "samsung,s5pv210-i2s"; + compatible = "samsung,s3c6410-i2s"; reg = <0x13960000 0x100>; clocks = <&clock CLK_I2S1>; clock-names = "iis"; @@ -378,7 +378,7 @@ }; i2s2: i2s@13970000 { - compatible = "samsung,s5pv210-i2s"; + compatible = "samsung,s3c6410-i2s"; reg = <0x13970000 0x100>; clocks = <&clock CLK_I2S2>; clock-names = "iis"; diff --git a/arch/arm/boot/dts/imx6sx-sdb.dts b/arch/arm/boot/dts/imx6sx-sdb.dts index 8c1febd7e3f2..c108bb451337 100644 --- a/arch/arm/boot/dts/imx6sx-sdb.dts +++ b/arch/arm/boot/dts/imx6sx-sdb.dts @@ -166,12 +166,12 @@ #address-cells = <1>; #size-cells = <0>; - ethphy1: ethernet-phy@0 { - reg = <0>; + ethphy1: ethernet-phy@1 { + reg = <1>; }; - ethphy2: ethernet-phy@1 { - reg = <1>; + ethphy2: ethernet-phy@2 { + reg = <2>; }; }; }; diff --git a/arch/arm/boot/dts/sun4i-a10.dtsi b/arch/arm/boot/dts/sun4i-a10.dtsi index 7b4099fcf817..d5c4669224b1 100644 --- a/arch/arm/boot/dts/sun4i-a10.dtsi +++ b/arch/arm/boot/dts/sun4i-a10.dtsi @@ -17,14 +17,6 @@ aliases { ethernet0 = &emac; - serial0 = &uart0; - serial1 = &uart1; - serial2 = &uart2; - serial3 = &uart3; - serial4 = &uart4; - serial5 = &uart5; - serial6 = &uart6; - serial7 = &uart7; }; chosen { @@ -39,6 +31,14 @@ <&ahb_gates 44>; status = "disabled"; }; + + framebuffer@1 { + compatible = "allwinner,simple-framebuffer", "simple-framebuffer"; + allwinner,pipeline = "de_fe0-de_be0-lcd0-hdmi"; + clocks = <&pll5 1>, <&ahb_gates 36>, <&ahb_gates 43>, + <&ahb_gates 44>, <&ahb_gates 46>; + status = "disabled"; + }; }; cpus { @@ -438,8 +438,8 @@ reg-names = "phy_ctrl", "pmu1", "pmu2"; clocks = <&usb_clk 8>; clock-names = "usb_phy"; - resets = <&usb_clk 1>, <&usb_clk 2>; - reset-names = "usb1_reset", "usb2_reset"; + resets = <&usb_clk 0>, <&usb_clk 1>, <&usb_clk 2>; + reset-names = "usb0_reset", "usb1_reset", "usb2_reset"; status = "disabled"; }; diff --git a/arch/arm/boot/dts/sun5i-a10s-olinuxino-micro.dts b/arch/arm/boot/dts/sun5i-a10s-olinuxino-micro.dts index fe3c559ca6a8..bfa742817690 100644 --- a/arch/arm/boot/dts/sun5i-a10s-olinuxino-micro.dts +++ b/arch/arm/boot/dts/sun5i-a10s-olinuxino-micro.dts @@ -55,6 +55,12 @@ model = "Olimex A10s-Olinuxino Micro"; compatible = "olimex,a10s-olinuxino-micro", "allwinner,sun5i-a10s"; + aliases { + serial0 = &uart0; + serial1 = &uart2; + serial2 = &uart3; + }; + soc@01c00000 { emac: ethernet@01c0b000 { pinctrl-names = "default"; diff --git a/arch/arm/boot/dts/sun5i-a10s.dtsi b/arch/arm/boot/dts/sun5i-a10s.dtsi index 1b76667f3182..2e7d8263799d 100644 --- a/arch/arm/boot/dts/sun5i-a10s.dtsi +++ b/arch/arm/boot/dts/sun5i-a10s.dtsi @@ -18,10 +18,6 @@ aliases { ethernet0 = &emac; - serial0 = &uart0; - serial1 = &uart1; - serial2 = &uart2; - serial3 = &uart3; }; chosen { @@ -390,8 +386,8 @@ reg-names = "phy_ctrl", "pmu1"; clocks = <&usb_clk 8>; clock-names = "usb_phy"; - resets = <&usb_clk 1>; - reset-names = "usb1_reset"; + resets = <&usb_clk 0>, <&usb_clk 1>; + reset-names = "usb0_reset", "usb1_reset"; status = "disabled"; }; diff --git a/arch/arm/boot/dts/sun5i-a13-hsg-h702.dts b/arch/arm/boot/dts/sun5i-a13-hsg-h702.dts index eeed1f236ee8..c7be3abd9fcc 100644 --- a/arch/arm/boot/dts/sun5i-a13-hsg-h702.dts +++ b/arch/arm/boot/dts/sun5i-a13-hsg-h702.dts @@ -53,6 +53,10 @@ model = "HSG H702"; compatible = "hsg,h702", "allwinner,sun5i-a13"; + aliases { + serial0 = &uart1; + }; + soc@01c00000 { mmc0: mmc@01c0f000 { pinctrl-names = "default"; diff --git a/arch/arm/boot/dts/sun5i-a13-olinuxino-micro.dts b/arch/arm/boot/dts/sun5i-a13-olinuxino-micro.dts index 916ee8bb826f..3decefb3c37a 100644 --- a/arch/arm/boot/dts/sun5i-a13-olinuxino-micro.dts +++ b/arch/arm/boot/dts/sun5i-a13-olinuxino-micro.dts @@ -54,6 +54,10 @@ model = "Olimex A13-Olinuxino Micro"; compatible = "olimex,a13-olinuxino-micro", "allwinner,sun5i-a13"; + aliases { + serial0 = &uart1; + }; + soc@01c00000 { mmc0: mmc@01c0f000 { pinctrl-names = "default"; diff --git a/arch/arm/boot/dts/sun5i-a13-olinuxino.dts b/arch/arm/boot/dts/sun5i-a13-olinuxino.dts index e31d291d14cb..b421f7fa197b 100644 --- a/arch/arm/boot/dts/sun5i-a13-olinuxino.dts +++ b/arch/arm/boot/dts/sun5i-a13-olinuxino.dts @@ -55,6 +55,10 @@ model = "Olimex A13-Olinuxino"; compatible = "olimex,a13-olinuxino", "allwinner,sun5i-a13"; + aliases { + serial0 = &uart1; + }; + soc@01c00000 { mmc0: mmc@01c0f000 { pinctrl-names = "default"; diff --git a/arch/arm/boot/dts/sun5i-a13.dtsi b/arch/arm/boot/dts/sun5i-a13.dtsi index c35217ea1f64..c556688f8b8b 100644 --- a/arch/arm/boot/dts/sun5i-a13.dtsi +++ b/arch/arm/boot/dts/sun5i-a13.dtsi @@ -16,11 +16,6 @@ / { interrupt-parent = <&intc>; - aliases { - serial0 = &uart1; - serial1 = &uart3; - }; - cpus { #address-cells = <1>; #size-cells = <0>; @@ -349,8 +344,8 @@ reg-names = "phy_ctrl", "pmu1"; clocks = <&usb_clk 8>; clock-names = "usb_phy"; - resets = <&usb_clk 1>; - reset-names = "usb1_reset"; + resets = <&usb_clk 0>, <&usb_clk 1>; + reset-names = "usb0_reset", "usb1_reset"; status = "disabled"; }; diff --git a/arch/arm/boot/dts/sun6i-a31.dtsi b/arch/arm/boot/dts/sun6i-a31.dtsi index f47156b6572b..1e7e7bcf8307 100644 --- a/arch/arm/boot/dts/sun6i-a31.dtsi +++ b/arch/arm/boot/dts/sun6i-a31.dtsi @@ -53,12 +53,6 @@ interrupt-parent = <&gic>; aliases { - serial0 = &uart0; - serial1 = &uart1; - serial2 = &uart2; - serial3 = &uart3; - serial4 = &uart4; - serial5 = &uart5; ethernet0 = &gmac; }; diff --git a/arch/arm/boot/dts/sun7i-a20-bananapi.dts b/arch/arm/boot/dts/sun7i-a20-bananapi.dts index 1cf1214cc068..bd7b15add697 100644 --- a/arch/arm/boot/dts/sun7i-a20-bananapi.dts +++ b/arch/arm/boot/dts/sun7i-a20-bananapi.dts @@ -55,6 +55,12 @@ model = "LeMaker Banana Pi"; compatible = "lemaker,bananapi", "allwinner,sun7i-a20"; + aliases { + serial0 = &uart0; + serial1 = &uart3; + serial2 = &uart7; + }; + soc@01c00000 { spi0: spi@01c05000 { pinctrl-names = "default"; diff --git a/arch/arm/boot/dts/sun7i-a20-hummingbird.dts b/arch/arm/boot/dts/sun7i-a20-hummingbird.dts index 0e4bfa3b2b85..0bcefcbbb756 100644 --- a/arch/arm/boot/dts/sun7i-a20-hummingbird.dts +++ b/arch/arm/boot/dts/sun7i-a20-hummingbird.dts @@ -19,6 +19,14 @@ model = "Merrii A20 Hummingbird"; compatible = "merrii,a20-hummingbird", "allwinner,sun7i-a20"; + aliases { + serial0 = &uart0; + serial1 = &uart2; + serial2 = &uart3; + serial3 = &uart4; + serial4 = &uart5; + }; + soc@01c00000 { mmc0: mmc@01c0f000 { pinctrl-names = "default"; diff --git a/arch/arm/boot/dts/sun7i-a20-olinuxino-micro.dts b/arch/arm/boot/dts/sun7i-a20-olinuxino-micro.dts index 9d669cdf031d..66cc77707198 100644 --- a/arch/arm/boot/dts/sun7i-a20-olinuxino-micro.dts +++ b/arch/arm/boot/dts/sun7i-a20-olinuxino-micro.dts @@ -20,6 +20,9 @@ compatible = "olimex,a20-olinuxino-micro", "allwinner,sun7i-a20"; aliases { + serial0 = &uart0; + serial1 = &uart6; + serial2 = &uart7; spi0 = &spi1; spi1 = &spi2; }; diff --git a/arch/arm/boot/dts/sun7i-a20.dtsi b/arch/arm/boot/dts/sun7i-a20.dtsi index e21ce5992d56..89749ce34a84 100644 --- a/arch/arm/boot/dts/sun7i-a20.dtsi +++ b/arch/arm/boot/dts/sun7i-a20.dtsi @@ -54,14 +54,6 @@ aliases { ethernet0 = &gmac; - serial0 = &uart0; - serial1 = &uart1; - serial2 = &uart2; - serial3 = &uart3; - serial4 = &uart4; - serial5 = &uart5; - serial6 = &uart6; - serial7 = &uart7; }; chosen { diff --git a/arch/arm/boot/dts/sun8i-a23-ippo-q8h-v5.dts b/arch/arm/boot/dts/sun8i-a23-ippo-q8h-v5.dts index 7f2117ce6985..32ad80804dbb 100644 --- a/arch/arm/boot/dts/sun8i-a23-ippo-q8h-v5.dts +++ b/arch/arm/boot/dts/sun8i-a23-ippo-q8h-v5.dts @@ -55,6 +55,10 @@ model = "Ippo Q8H Dual Core Tablet (v5)"; compatible = "ippo,q8h-v5", "allwinner,sun8i-a23"; + aliases { + serial0 = &r_uart; + }; + chosen { bootargs = "earlyprintk console=ttyS0,115200"; }; diff --git a/arch/arm/boot/dts/sun8i-a23.dtsi b/arch/arm/boot/dts/sun8i-a23.dtsi index 0746cd1024d7..86584fcf5e32 100644 --- a/arch/arm/boot/dts/sun8i-a23.dtsi +++ b/arch/arm/boot/dts/sun8i-a23.dtsi @@ -52,15 +52,6 @@ / { interrupt-parent = <&gic>; - aliases { - serial0 = &uart0; - serial1 = &uart1; - serial2 = &uart2; - serial3 = &uart3; - serial4 = &uart4; - serial5 = &r_uart; - }; - cpus { #address-cells = <1>; #size-cells = <0>; diff --git a/arch/arm/boot/dts/sun9i-a80-optimus.dts b/arch/arm/boot/dts/sun9i-a80-optimus.dts index 506948f582ee..11ec71072e81 100644 --- a/arch/arm/boot/dts/sun9i-a80-optimus.dts +++ b/arch/arm/boot/dts/sun9i-a80-optimus.dts @@ -54,6 +54,11 @@ model = "Merrii A80 Optimus Board"; compatible = "merrii,a80-optimus", "allwinner,sun9i-a80"; + aliases { + serial0 = &uart0; + serial1 = &uart4; + }; + chosen { bootargs = "earlyprintk console=ttyS0,115200"; }; diff --git a/arch/arm/boot/dts/sun9i-a80.dtsi b/arch/arm/boot/dts/sun9i-a80.dtsi index 494714f67b57..9ef4438206a9 100644 --- a/arch/arm/boot/dts/sun9i-a80.dtsi +++ b/arch/arm/boot/dts/sun9i-a80.dtsi @@ -52,16 +52,6 @@ / { interrupt-parent = <&gic>; - aliases { - serial0 = &uart0; - serial1 = &uart1; - serial2 = &uart2; - serial3 = &uart3; - serial4 = &uart4; - serial5 = &uart5; - serial6 = &r_uart; - }; - cpus { #address-cells = <1>; #size-cells = <0>; diff --git a/arch/arm/include/asm/kvm_emulate.h b/arch/arm/include/asm/kvm_emulate.h index 66ce17655bb9..7b0152321b20 100644 --- a/arch/arm/include/asm/kvm_emulate.h +++ b/arch/arm/include/asm/kvm_emulate.h @@ -38,6 +38,16 @@ static inline void vcpu_reset_hcr(struct kvm_vcpu *vcpu) vcpu->arch.hcr = HCR_GUEST_MASK; } +static inline unsigned long vcpu_get_hcr(struct kvm_vcpu *vcpu) +{ + return vcpu->arch.hcr; +} + +static inline void vcpu_set_hcr(struct kvm_vcpu *vcpu, unsigned long hcr) +{ + vcpu->arch.hcr = hcr; +} + static inline bool vcpu_mode_is_32bit(struct kvm_vcpu *vcpu) { return 1; diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h index 254e0650e48b..04b4ea0b550a 100644 --- a/arch/arm/include/asm/kvm_host.h +++ b/arch/arm/include/asm/kvm_host.h @@ -125,9 +125,6 @@ struct kvm_vcpu_arch { * Anything that is not used directly from assembly code goes * here. */ - /* dcache set/way operation pending */ - int last_pcpu; - cpumask_t require_dcache_flush; /* Don't run the guest on this vcpu */ bool pause; diff --git a/arch/arm/include/asm/kvm_mmu.h b/arch/arm/include/asm/kvm_mmu.h index 63e0ecc04901..1bca8f8af442 100644 --- a/arch/arm/include/asm/kvm_mmu.h +++ b/arch/arm/include/asm/kvm_mmu.h @@ -44,6 +44,7 @@ #ifndef __ASSEMBLY__ +#include <linux/highmem.h> #include <asm/cacheflush.h> #include <asm/pgalloc.h> @@ -161,13 +162,10 @@ static inline bool vcpu_has_cache_enabled(struct kvm_vcpu *vcpu) return (vcpu->arch.cp15[c1_SCTLR] & 0b101) == 0b101; } -static inline void coherent_cache_guest_page(struct kvm_vcpu *vcpu, hva_t hva, - unsigned long size, - bool ipa_uncached) +static inline void __coherent_cache_guest_page(struct kvm_vcpu *vcpu, pfn_t pfn, + unsigned long size, + bool ipa_uncached) { - if (!vcpu_has_cache_enabled(vcpu) || ipa_uncached) - kvm_flush_dcache_to_poc((void *)hva, size); - /* * If we are going to insert an instruction page and the icache is * either VIPT or PIPT, there is a potential problem where the host @@ -179,18 +177,77 @@ static inline void coherent_cache_guest_page(struct kvm_vcpu *vcpu, hva_t hva, * * VIVT caches are tagged using both the ASID and the VMID and doesn't * need any kind of flushing (DDI 0406C.b - Page B3-1392). + * + * We need to do this through a kernel mapping (using the + * user-space mapping has proved to be the wrong + * solution). For that, we need to kmap one page at a time, + * and iterate over the range. */ - if (icache_is_pipt()) { - __cpuc_coherent_user_range(hva, hva + size); - } else if (!icache_is_vivt_asid_tagged()) { + + bool need_flush = !vcpu_has_cache_enabled(vcpu) || ipa_uncached; + + VM_BUG_ON(size & PAGE_MASK); + + if (!need_flush && !icache_is_pipt()) + goto vipt_cache; + + while (size) { + void *va = kmap_atomic_pfn(pfn); + + if (need_flush) + kvm_flush_dcache_to_poc(va, PAGE_SIZE); + + if (icache_is_pipt()) + __cpuc_coherent_user_range((unsigned long)va, + (unsigned long)va + PAGE_SIZE); + + size -= PAGE_SIZE; + pfn++; + + kunmap_atomic(va); + } + +vipt_cache: + if (!icache_is_pipt() && !icache_is_vivt_asid_tagged()) { /* any kind of VIPT cache */ __flush_icache_all(); } } +static inline void __kvm_flush_dcache_pte(pte_t pte) +{ + void *va = kmap_atomic(pte_page(pte)); + + kvm_flush_dcache_to_poc(va, PAGE_SIZE); + + kunmap_atomic(va); +} + +static inline void __kvm_flush_dcache_pmd(pmd_t pmd) +{ + unsigned long size = PMD_SIZE; + pfn_t pfn = pmd_pfn(pmd); + + while (size) { + void *va = kmap_atomic_pfn(pfn); + + kvm_flush_dcache_to_poc(va, PAGE_SIZE); + + pfn++; + size -= PAGE_SIZE; + + kunmap_atomic(va); + } +} + +static inline void __kvm_flush_dcache_pud(pud_t pud) +{ +} + #define kvm_virt_to_phys(x) virt_to_idmap((unsigned long)(x)) -void stage2_flush_vm(struct kvm *kvm); +void kvm_set_way_flush(struct kvm_vcpu *vcpu); +void kvm_toggle_cache(struct kvm_vcpu *vcpu, bool was_enabled); #endif /* !__ASSEMBLY__ */ diff --git a/arch/arm/kernel/entry-v7m.S b/arch/arm/kernel/entry-v7m.S index 2260f1855820..8944f4991c3c 100644 --- a/arch/arm/kernel/entry-v7m.S +++ b/arch/arm/kernel/entry-v7m.S @@ -22,10 +22,12 @@ __invalid_entry: v7m_exception_entry +#ifdef CONFIG_PRINTK adr r0, strerr mrs r1, ipsr mov r2, lr bl printk +#endif mov r0, sp bl show_regs 1: b 1b diff --git a/arch/arm/kvm/Kconfig b/arch/arm/kvm/Kconfig index 466bd299b1a8..3afee5f40f4f 100644 --- a/arch/arm/kvm/Kconfig +++ b/arch/arm/kvm/Kconfig @@ -23,6 +23,7 @@ config KVM select HAVE_KVM_CPU_RELAX_INTERCEPT select KVM_MMIO select KVM_ARM_HOST + select SRCU depends on ARM_VIRT_EXT && ARM_LPAE ---help--- Support hosting virtualized guest machines. You will also diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c index 2d6d91001062..0b0d58a905c4 100644 --- a/arch/arm/kvm/arm.c +++ b/arch/arm/kvm/arm.c @@ -281,15 +281,6 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) vcpu->cpu = cpu; vcpu->arch.host_cpu_context = this_cpu_ptr(kvm_host_cpu_state); - /* - * Check whether this vcpu requires the cache to be flushed on - * this physical CPU. This is a consequence of doing dcache - * operations by set/way on this vcpu. We do it here to be in - * a non-preemptible section. - */ - if (cpumask_test_and_clear_cpu(cpu, &vcpu->arch.require_dcache_flush)) - flush_cache_all(); /* We'd really want v7_flush_dcache_all() */ - kvm_arm_set_running_vcpu(vcpu); } @@ -541,7 +532,6 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) ret = kvm_call_hyp(__kvm_vcpu_run, vcpu); vcpu->mode = OUTSIDE_GUEST_MODE; - vcpu->arch.last_pcpu = smp_processor_id(); kvm_guest_exit(); trace_kvm_exit(*vcpu_pc(vcpu)); /* diff --git a/arch/arm/kvm/coproc.c b/arch/arm/kvm/coproc.c index 7928dbdf2102..f3d88dc388bc 100644 --- a/arch/arm/kvm/coproc.c +++ b/arch/arm/kvm/coproc.c @@ -189,82 +189,40 @@ static bool access_l2ectlr(struct kvm_vcpu *vcpu, return true; } -/* See note at ARM ARM B1.14.4 */ +/* + * See note at ARMv7 ARM B1.14.4 (TL;DR: S/W ops are not easily virtualized). + */ static bool access_dcsw(struct kvm_vcpu *vcpu, const struct coproc_params *p, const struct coproc_reg *r) { - unsigned long val; - int cpu; - if (!p->is_write) return read_from_write_only(vcpu, p); - cpu = get_cpu(); - - cpumask_setall(&vcpu->arch.require_dcache_flush); - cpumask_clear_cpu(cpu, &vcpu->arch.require_dcache_flush); - - /* If we were already preempted, take the long way around */ - if (cpu != vcpu->arch.last_pcpu) { - flush_cache_all(); - goto done; - } - - val = *vcpu_reg(vcpu, p->Rt1); - - switch (p->CRm) { - case 6: /* Upgrade DCISW to DCCISW, as per HCR.SWIO */ - case 14: /* DCCISW */ - asm volatile("mcr p15, 0, %0, c7, c14, 2" : : "r" (val)); - break; - - case 10: /* DCCSW */ - asm volatile("mcr p15, 0, %0, c7, c10, 2" : : "r" (val)); - break; - } - -done: - put_cpu(); - + kvm_set_way_flush(vcpu); return true; } /* * Generic accessor for VM registers. Only called as long as HCR_TVM - * is set. + * is set. If the guest enables the MMU, we stop trapping the VM + * sys_regs and leave it in complete control of the caches. + * + * Used by the cpu-specific code. */ -static bool access_vm_reg(struct kvm_vcpu *vcpu, - const struct coproc_params *p, - const struct coproc_reg *r) +bool access_vm_reg(struct kvm_vcpu *vcpu, + const struct coproc_params *p, + const struct coproc_reg *r) { + bool was_enabled = vcpu_has_cache_enabled(vcpu); + BUG_ON(!p->is_write); vcpu->arch.cp15[r->reg] = *vcpu_reg(vcpu, p->Rt1); if (p->is_64bit) vcpu->arch.cp15[r->reg + 1] = *vcpu_reg(vcpu, p->Rt2); - return true; -} - -/* - * SCTLR accessor. Only called as long as HCR_TVM is set. If the - * guest enables the MMU, we stop trapping the VM sys_regs and leave - * it in complete control of the caches. - * - * Used by the cpu-specific code. - */ -bool access_sctlr(struct kvm_vcpu *vcpu, - const struct coproc_params *p, - const struct coproc_reg *r) -{ - access_vm_reg(vcpu, p, r); - - if (vcpu_has_cache_enabled(vcpu)) { /* MMU+Caches enabled? */ - vcpu->arch.hcr &= ~HCR_TVM; - stage2_flush_vm(vcpu->kvm); - } - + kvm_toggle_cache(vcpu, was_enabled); return true; } diff --git a/arch/arm/kvm/coproc.h b/arch/arm/kvm/coproc.h index 1a44bbe39643..88d24a3a9778 100644 --- a/arch/arm/kvm/coproc.h +++ b/arch/arm/kvm/coproc.h @@ -153,8 +153,8 @@ static inline int cmp_reg(const struct coproc_reg *i1, #define is64 .is_64 = true #define is32 .is_64 = false -bool access_sctlr(struct kvm_vcpu *vcpu, - const struct coproc_params *p, - const struct coproc_reg *r); +bool access_vm_reg(struct kvm_vcpu *vcpu, + const struct coproc_params *p, + const struct coproc_reg *r); #endif /* __ARM_KVM_COPROC_LOCAL_H__ */ diff --git a/arch/arm/kvm/coproc_a15.c b/arch/arm/kvm/coproc_a15.c index e6f4ae48bda9..a7136757d373 100644 --- a/arch/arm/kvm/coproc_a15.c +++ b/arch/arm/kvm/coproc_a15.c @@ -34,7 +34,7 @@ static const struct coproc_reg a15_regs[] = { /* SCTLR: swapped by interrupt.S. */ { CRn( 1), CRm( 0), Op1( 0), Op2( 0), is32, - access_sctlr, reset_val, c1_SCTLR, 0x00C50078 }, + access_vm_reg, reset_val, c1_SCTLR, 0x00C50078 }, }; static struct kvm_coproc_target_table a15_target_table = { diff --git a/arch/arm/kvm/coproc_a7.c b/arch/arm/kvm/coproc_a7.c index 17fc7cd479d3..b19e46d1b2c0 100644 --- a/arch/arm/kvm/coproc_a7.c +++ b/arch/arm/kvm/coproc_a7.c @@ -37,7 +37,7 @@ static const struct coproc_reg a7_regs[] = { /* SCTLR: swapped by interrupt.S. */ { CRn( 1), CRm( 0), Op1( 0), Op2( 0), is32, - access_sctlr, reset_val, c1_SCTLR, 0x00C50878 }, + access_vm_reg, reset_val, c1_SCTLR, 0x00C50878 }, }; static struct kvm_coproc_target_table a7_target_table = { diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c index 1dc9778a00af..136662547ca6 100644 --- a/arch/arm/kvm/mmu.c +++ b/arch/arm/kvm/mmu.c @@ -58,6 +58,26 @@ static void kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa) kvm_call_hyp(__kvm_tlb_flush_vmid_ipa, kvm, ipa); } +/* + * D-Cache management functions. They take the page table entries by + * value, as they are flushing the cache using the kernel mapping (or + * kmap on 32bit). + */ +static void kvm_flush_dcache_pte(pte_t pte) +{ + __kvm_flush_dcache_pte(pte); +} + +static void kvm_flush_dcache_pmd(pmd_t pmd) +{ + __kvm_flush_dcache_pmd(pmd); +} + +static void kvm_flush_dcache_pud(pud_t pud) +{ + __kvm_flush_dcache_pud(pud); +} + static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache, int min, int max) { @@ -119,6 +139,26 @@ static void clear_pmd_entry(struct kvm *kvm, pmd_t *pmd, phys_addr_t addr) put_page(virt_to_page(pmd)); } +/* + * Unmapping vs dcache management: + * + * If a guest maps certain memory pages as uncached, all writes will + * bypass the data cache and go directly to RAM. However, the CPUs + * can still speculate reads (not writes) and fill cache lines with + * data. + * + * Those cache lines will be *clean* cache lines though, so a + * clean+invalidate operation is equivalent to an invalidate + * operation, because no cache lines are marked dirty. + * + * Those clean cache lines could be filled prior to an uncached write + * by the guest, and the cache coherent IO subsystem would therefore + * end up writing old data to disk. + * + * This is why right after unmapping a page/section and invalidating + * the corresponding TLBs, we call kvm_flush_dcache_p*() to make sure + * the IO subsystem will never hit in the cache. + */ static void unmap_ptes(struct kvm *kvm, pmd_t *pmd, phys_addr_t addr, phys_addr_t end) { @@ -128,9 +168,16 @@ static void unmap_ptes(struct kvm *kvm, pmd_t *pmd, start_pte = pte = pte_offset_kernel(pmd, addr); do { if (!pte_none(*pte)) { + pte_t old_pte = *pte; + kvm_set_pte(pte, __pte(0)); - put_page(virt_to_page(pte)); kvm_tlb_flush_vmid_ipa(kvm, addr); + + /* No need to invalidate the cache for device mappings */ + if ((pte_val(old_pte) & PAGE_S2_DEVICE) != PAGE_S2_DEVICE) + kvm_flush_dcache_pte(old_pte); + + put_page(virt_to_page(pte)); } } while (pte++, addr += PAGE_SIZE, addr != end); @@ -149,8 +196,13 @@ static void unmap_pmds(struct kvm *kvm, pud_t *pud, next = kvm_pmd_addr_end(addr, end); if (!pmd_none(*pmd)) { if (kvm_pmd_huge(*pmd)) { + pmd_t old_pmd = *pmd; + pmd_clear(pmd); kvm_tlb_flush_vmid_ipa(kvm, addr); + + kvm_flush_dcache_pmd(old_pmd); + put_page(virt_to_page(pmd)); } else { unmap_ptes(kvm, pmd, addr, next); @@ -173,8 +225,13 @@ static void unmap_puds(struct kvm *kvm, pgd_t *pgd, next = kvm_pud_addr_end(addr, end); if (!pud_none(*pud)) { if (pud_huge(*pud)) { + pud_t old_pud = *pud; + pud_clear(pud); kvm_tlb_flush_vmid_ipa(kvm, addr); + + kvm_flush_dcache_pud(old_pud); + put_page(virt_to_page(pud)); } else { unmap_pmds(kvm, pud, addr, next); @@ -209,10 +266,9 @@ static void stage2_flush_ptes(struct kvm *kvm, pmd_t *pmd, pte = pte_offset_kernel(pmd, addr); do { - if (!pte_none(*pte)) { - hva_t hva = gfn_to_hva(kvm, addr >> PAGE_SHIFT); - kvm_flush_dcache_to_poc((void*)hva, PAGE_SIZE); - } + if (!pte_none(*pte) && + (pte_val(*pte) & PAGE_S2_DEVICE) != PAGE_S2_DEVICE) + kvm_flush_dcache_pte(*pte); } while (pte++, addr += PAGE_SIZE, addr != end); } @@ -226,12 +282,10 @@ static void stage2_flush_pmds(struct kvm *kvm, pud_t *pud, do { next = kvm_pmd_addr_end(addr, end); if (!pmd_none(*pmd)) { - if (kvm_pmd_huge(*pmd)) { - hva_t hva = gfn_to_hva(kvm, addr >> PAGE_SHIFT); - kvm_flush_dcache_to_poc((void*)hva, PMD_SIZE); - } else { + if (kvm_pmd_huge(*pmd)) + kvm_flush_dcache_pmd(*pmd); + else stage2_flush_ptes(kvm, pmd, addr, next); - } } } while (pmd++, addr = next, addr != end); } @@ -246,12 +300,10 @@ static void stage2_flush_puds(struct kvm *kvm, pgd_t *pgd, do { next = kvm_pud_addr_end(addr, end); if (!pud_none(*pud)) { - if (pud_huge(*pud)) { - hva_t hva = gfn_to_hva(kvm, addr >> PAGE_SHIFT); - kvm_flush_dcache_to_poc((void*)hva, PUD_SIZE); - } else { + if (pud_huge(*pud)) + kvm_flush_dcache_pud(*pud); + else stage2_flush_pmds(kvm, pud, addr, next); - } } } while (pud++, addr = next, addr != end); } @@ -278,7 +330,7 @@ static void stage2_flush_memslot(struct kvm *kvm, * Go through the stage 2 page tables and invalidate any cache lines * backing memory already mapped to the VM. */ -void stage2_flush_vm(struct kvm *kvm) +static void stage2_flush_vm(struct kvm *kvm) { struct kvm_memslots *slots; struct kvm_memory_slot *memslot; @@ -905,6 +957,12 @@ static bool kvm_is_device_pfn(unsigned long pfn) return !pfn_valid(pfn); } +static void coherent_cache_guest_page(struct kvm_vcpu *vcpu, pfn_t pfn, + unsigned long size, bool uncached) +{ + __coherent_cache_guest_page(vcpu, pfn, size, uncached); +} + static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, struct kvm_memory_slot *memslot, unsigned long hva, unsigned long fault_status) @@ -994,8 +1052,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, kvm_set_s2pmd_writable(&new_pmd); kvm_set_pfn_dirty(pfn); } - coherent_cache_guest_page(vcpu, hva & PMD_MASK, PMD_SIZE, - fault_ipa_uncached); + coherent_cache_guest_page(vcpu, pfn, PMD_SIZE, fault_ipa_uncached); ret = stage2_set_pmd_huge(kvm, memcache, fault_ipa, &new_pmd); } else { pte_t new_pte = pfn_pte(pfn, mem_type); @@ -1003,8 +1060,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, kvm_set_s2pte_writable(&new_pte); kvm_set_pfn_dirty(pfn); } - coherent_cache_guest_page(vcpu, hva, PAGE_SIZE, - fault_ipa_uncached); + coherent_cache_guest_page(vcpu, pfn, PAGE_SIZE, fault_ipa_uncached); ret = stage2_set_pte(kvm, memcache, fault_ipa, &new_pte, pgprot_val(mem_type) == pgprot_val(PAGE_S2_DEVICE)); } @@ -1411,3 +1467,71 @@ void kvm_arch_flush_shadow_memslot(struct kvm *kvm, unmap_stage2_range(kvm, gpa, size); spin_unlock(&kvm->mmu_lock); } + +/* + * See note at ARMv7 ARM B1.14.4 (TL;DR: S/W ops are not easily virtualized). + * + * Main problems: + * - S/W ops are local to a CPU (not broadcast) + * - We have line migration behind our back (speculation) + * - System caches don't support S/W at all (damn!) + * + * In the face of the above, the best we can do is to try and convert + * S/W ops to VA ops. Because the guest is not allowed to infer the + * S/W to PA mapping, it can only use S/W to nuke the whole cache, + * which is a rather good thing for us. + * + * Also, it is only used when turning caches on/off ("The expected + * usage of the cache maintenance instructions that operate by set/way + * is associated with the cache maintenance instructions associated + * with the powerdown and powerup of caches, if this is required by + * the implementation."). + * + * We use the following policy: + * + * - If we trap a S/W operation, we enable VM trapping to detect + * caches being turned on/off, and do a full clean. + * + * - We flush the caches on both caches being turned on and off. + * + * - Once the caches are enabled, we stop trapping VM ops. + */ +void kvm_set_way_flush(struct kvm_vcpu *vcpu) +{ + unsigned long hcr = vcpu_get_hcr(vcpu); + + /* + * If this is the first time we do a S/W operation + * (i.e. HCR_TVM not set) flush the whole memory, and set the + * VM trapping. + * + * Otherwise, rely on the VM trapping to wait for the MMU + + * Caches to be turned off. At that point, we'll be able to + * clean the caches again. + */ + if (!(hcr & HCR_TVM)) { + trace_kvm_set_way_flush(*vcpu_pc(vcpu), + vcpu_has_cache_enabled(vcpu)); + stage2_flush_vm(vcpu->kvm); + vcpu_set_hcr(vcpu, hcr | HCR_TVM); + } +} + +void kvm_toggle_cache(struct kvm_vcpu *vcpu, bool was_enabled) +{ + bool now_enabled = vcpu_has_cache_enabled(vcpu); + + /* + * If switching the MMU+caches on, need to invalidate the caches. + * If switching it off, need to clean the caches. + * Clean + invalidate does the trick always. + */ + if (now_enabled != was_enabled) + stage2_flush_vm(vcpu->kvm); + + /* Caches are now on, stop trapping VM ops (until a S/W op) */ + if (now_enabled) + vcpu_set_hcr(vcpu, vcpu_get_hcr(vcpu) & ~HCR_TVM); + + trace_kvm_toggle_cache(*vcpu_pc(vcpu), was_enabled, now_enabled); +} diff --git a/arch/arm/kvm/trace.h b/arch/arm/kvm/trace.h index b1d640f78623..b6a6e7102201 100644 --- a/arch/arm/kvm/trace.h +++ b/arch/arm/kvm/trace.h @@ -223,6 +223,45 @@ TRACE_EVENT(kvm_hvc, __entry->vcpu_pc, __entry->r0, __entry->imm) ); +TRACE_EVENT(kvm_set_way_flush, + TP_PROTO(unsigned long vcpu_pc, bool cache), + TP_ARGS(vcpu_pc, cache), + + TP_STRUCT__entry( + __field( unsigned long, vcpu_pc ) + __field( bool, cache ) + ), + + TP_fast_assign( + __entry->vcpu_pc = vcpu_pc; + __entry->cache = cache; + ), + + TP_printk("S/W flush at 0x%016lx (cache %s)", + __entry->vcpu_pc, __entry->cache ? "on" : "off") +); + +TRACE_EVENT(kvm_toggle_cache, + TP_PROTO(unsigned long vcpu_pc, bool was, bool now), + TP_ARGS(vcpu_pc, was, now), + + TP_STRUCT__entry( + __field( unsigned long, vcpu_pc ) + __field( bool, was ) + __field( bool, now ) + ), + + TP_fast_assign( + __entry->vcpu_pc = vcpu_pc; + __entry->was = was; + __entry->now = now; + ), + + TP_printk("VM op at 0x%016lx (cache was %s, now %s)", + __entry->vcpu_pc, __entry->was ? "on" : "off", + __entry->now ? "on" : "off") +); + #endif /* _TRACE_KVM_H */ #undef TRACE_INCLUDE_PATH diff --git a/arch/arm/mach-mvebu/coherency.c b/arch/arm/mach-mvebu/coherency.c index caa21e9b8cd9..ccef8806bb58 100644 --- a/arch/arm/mach-mvebu/coherency.c +++ b/arch/arm/mach-mvebu/coherency.c @@ -190,6 +190,13 @@ static void __init armada_375_380_coherency_init(struct device_node *np) arch_ioremap_caller = armada_pcie_wa_ioremap_caller; /* + * We should switch the PL310 to I/O coherency mode only if + * I/O coherency is actually enabled. + */ + if (!coherency_available()) + return; + + /* * Add the PL310 property "arm,io-coherent". This makes sure the * outer sync operation is not used, which allows to * workaround the system erratum that causes deadlocks when diff --git a/arch/arm/mach-shmobile/board-ape6evm.c b/arch/arm/mach-shmobile/board-ape6evm.c index 66f67816a844..444f22d370f0 100644 --- a/arch/arm/mach-shmobile/board-ape6evm.c +++ b/arch/arm/mach-shmobile/board-ape6evm.c @@ -18,6 +18,8 @@ #include <linux/gpio_keys.h> #include <linux/input.h> #include <linux/interrupt.h> +#include <linux/irqchip.h> +#include <linux/irqchip/arm-gic.h> #include <linux/kernel.h> #include <linux/mfd/tmio.h> #include <linux/mmc/host.h> @@ -273,6 +275,22 @@ static void __init ape6evm_add_standard_devices(void) sizeof(ape6evm_leds_pdata)); } +static void __init ape6evm_legacy_init_time(void) +{ + /* Do not invoke DT-based timers via clocksource_of_init() */ +} + +static void __init ape6evm_legacy_init_irq(void) +{ + void __iomem *gic_dist_base = ioremap_nocache(0xf1001000, 0x1000); + void __iomem *gic_cpu_base = ioremap_nocache(0xf1002000, 0x1000); + + gic_init(0, 29, gic_dist_base, gic_cpu_base); + + /* Do not invoke DT-based interrupt code via irqchip_init() */ +} + + static const char *ape6evm_boards_compat_dt[] __initdata = { "renesas,ape6evm", NULL, @@ -280,7 +298,9 @@ static const char *ape6evm_boards_compat_dt[] __initdata = { DT_MACHINE_START(APE6EVM_DT, "ape6evm") .init_early = shmobile_init_delay, + .init_irq = ape6evm_legacy_init_irq, .init_machine = ape6evm_add_standard_devices, .init_late = shmobile_init_late, .dt_compat = ape6evm_boards_compat_dt, + .init_time = ape6evm_legacy_init_time, MACHINE_END diff --git a/arch/arm/mach-shmobile/board-lager.c b/arch/arm/mach-shmobile/board-lager.c index f8197eb6e566..65b128dd4072 100644 --- a/arch/arm/mach-shmobile/board-lager.c +++ b/arch/arm/mach-shmobile/board-lager.c @@ -21,6 +21,8 @@ #include <linux/input.h> #include <linux/interrupt.h> #include <linux/irq.h> +#include <linux/irqchip.h> +#include <linux/irqchip/arm-gic.h> #include <linux/kernel.h> #include <linux/leds.h> #include <linux/mfd/tmio.h> @@ -811,6 +813,16 @@ static void __init lager_init(void) lager_ksz8041_fixup); } +static void __init lager_legacy_init_irq(void) +{ + void __iomem *gic_dist_base = ioremap_nocache(0xf1001000, 0x1000); + void __iomem *gic_cpu_base = ioremap_nocache(0xf1002000, 0x1000); + + gic_init(0, 29, gic_dist_base, gic_cpu_base); + + /* Do not invoke DT-based interrupt code via irqchip_init() */ +} + static const char * const lager_boards_compat_dt[] __initconst = { "renesas,lager", NULL, @@ -819,6 +831,7 @@ static const char * const lager_boards_compat_dt[] __initconst = { DT_MACHINE_START(LAGER_DT, "lager") .smp = smp_ops(r8a7790_smp_ops), .init_early = shmobile_init_delay, + .init_irq = lager_legacy_init_irq, .init_time = rcar_gen2_timer_init, .init_machine = lager_init, .init_late = shmobile_init_late, diff --git a/arch/arm/mach-shmobile/setup-rcar-gen2.c b/arch/arm/mach-shmobile/setup-rcar-gen2.c index 3dd6edd9bd1d..cc9470dfb1ce 100644 --- a/arch/arm/mach-shmobile/setup-rcar-gen2.c +++ b/arch/arm/mach-shmobile/setup-rcar-gen2.c @@ -133,7 +133,9 @@ void __init rcar_gen2_timer_init(void) #ifdef CONFIG_COMMON_CLK rcar_gen2_clocks_init(mode); #endif +#ifdef CONFIG_ARCH_SHMOBILE_MULTI clocksource_of_init(); +#endif } struct memory_reserve_config { diff --git a/arch/arm/mach-shmobile/timer.c b/arch/arm/mach-shmobile/timer.c index f1d027aa7a81..0edf2a6d2bbe 100644 --- a/arch/arm/mach-shmobile/timer.c +++ b/arch/arm/mach-shmobile/timer.c @@ -70,6 +70,18 @@ void __init shmobile_init_delay(void) if (!max_freq) return; +#ifdef CONFIG_ARCH_SHMOBILE_LEGACY + /* Non-multiplatform r8a73a4 SoC cannot use arch timer due + * to GIC being initialized from C and arch timer via DT */ + if (of_machine_is_compatible("renesas,r8a73a4")) + has_arch_timer = false; + + /* Non-multiplatform r8a7790 SoC cannot use arch timer due + * to GIC being initialized from C and arch timer via DT */ + if (of_machine_is_compatible("renesas,r8a7790")) + has_arch_timer = false; +#endif + if (!has_arch_timer || !IS_ENABLED(CONFIG_ARM_ARCH_TIMER)) { if (is_a7_a8_a9) shmobile_setup_delay_hz(max_freq, 1, 3); diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig index 03823e784f63..c43c71455566 100644 --- a/arch/arm/mm/Kconfig +++ b/arch/arm/mm/Kconfig @@ -1012,6 +1012,7 @@ config ARCH_SUPPORTS_BIG_ENDIAN config ARM_KERNMEM_PERMS bool "Restrict kernel memory permissions" + depends on MMU help If this is set, kernel memory other than kernel text (and rodata) will be made non-executable. The tradeoff is that each region is diff --git a/arch/arm/mm/context.c b/arch/arm/mm/context.c index 91892569710f..845769e41332 100644 --- a/arch/arm/mm/context.c +++ b/arch/arm/mm/context.c @@ -144,21 +144,17 @@ static void flush_context(unsigned int cpu) /* Update the list of reserved ASIDs and the ASID bitmap. */ bitmap_clear(asid_map, 0, NUM_USER_ASIDS); for_each_possible_cpu(i) { - if (i == cpu) { - asid = 0; - } else { - asid = atomic64_xchg(&per_cpu(active_asids, i), 0); - /* - * If this CPU has already been through a - * rollover, but hasn't run another task in - * the meantime, we must preserve its reserved - * ASID, as this is the only trace we have of - * the process it is still running. - */ - if (asid == 0) - asid = per_cpu(reserved_asids, i); - __set_bit(asid & ~ASID_MASK, asid_map); - } + asid = atomic64_xchg(&per_cpu(active_asids, i), 0); + /* + * If this CPU has already been through a + * rollover, but hasn't run another task in + * the meantime, we must preserve its reserved + * ASID, as this is the only trace we have of + * the process it is still running. + */ + if (asid == 0) + asid = per_cpu(reserved_asids, i); + __set_bit(asid & ~ASID_MASK, asid_map); per_cpu(reserved_asids, i) = asid; } diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index 7864797609b3..903dba064a03 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c @@ -1940,13 +1940,32 @@ void arm_iommu_release_mapping(struct dma_iommu_mapping *mapping) } EXPORT_SYMBOL_GPL(arm_iommu_release_mapping); +static int __arm_iommu_attach_device(struct device *dev, + struct dma_iommu_mapping *mapping) +{ + int err; + + err = iommu_attach_device(mapping->domain, dev); + if (err) + return err; + + kref_get(&mapping->kref); + dev->archdata.mapping = mapping; + + pr_debug("Attached IOMMU controller to %s device.\n", dev_name(dev)); + return 0; +} + /** * arm_iommu_attach_device * @dev: valid struct device pointer * @mapping: io address space mapping structure (returned from * arm_iommu_create_mapping) * - * Attaches specified io address space mapping to the provided device, + * Attaches specified io address space mapping to the provided device. + * This replaces the dma operations (dma_map_ops pointer) with the + * IOMMU aware version. + * * More than one client might be attached to the same io address space * mapping. */ @@ -1955,25 +1974,16 @@ int arm_iommu_attach_device(struct device *dev, { int err; - err = iommu_attach_device(mapping->domain, dev); + err = __arm_iommu_attach_device(dev, mapping); if (err) return err; - kref_get(&mapping->kref); - dev->archdata.mapping = mapping; - - pr_debug("Attached IOMMU controller to %s device.\n", dev_name(dev)); + set_dma_ops(dev, &iommu_ops); return 0; } EXPORT_SYMBOL_GPL(arm_iommu_attach_device); -/** - * arm_iommu_detach_device - * @dev: valid struct device pointer - * - * Detaches the provided device from a previously attached map. - */ -void arm_iommu_detach_device(struct device *dev) +static void __arm_iommu_detach_device(struct device *dev) { struct dma_iommu_mapping *mapping; @@ -1989,6 +1999,19 @@ void arm_iommu_detach_device(struct device *dev) pr_debug("Detached IOMMU controller from %s device.\n", dev_name(dev)); } + +/** + * arm_iommu_detach_device + * @dev: valid struct device pointer + * + * Detaches the provided device from a previously attached map. + * This voids the dma operations (dma_map_ops pointer) + */ +void arm_iommu_detach_device(struct device *dev) +{ + __arm_iommu_detach_device(dev); + set_dma_ops(dev, NULL); +} EXPORT_SYMBOL_GPL(arm_iommu_detach_device); static struct dma_map_ops *arm_get_iommu_dma_map_ops(bool coherent) @@ -2011,7 +2034,7 @@ static bool arm_setup_iommu_dma_ops(struct device *dev, u64 dma_base, u64 size, return false; } - if (arm_iommu_attach_device(dev, mapping)) { + if (__arm_iommu_attach_device(dev, mapping)) { pr_warn("Failed to attached device %s to IOMMU_mapping\n", dev_name(dev)); arm_iommu_release_mapping(mapping); @@ -2025,7 +2048,10 @@ static void arm_teardown_iommu_dma_ops(struct device *dev) { struct dma_iommu_mapping *mapping = dev->archdata.mapping; - arm_iommu_detach_device(dev); + if (!mapping) + return; + + __arm_iommu_detach_device(dev); arm_iommu_release_mapping(mapping); } diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h index 865a7e28ea2d..3cb4c856b10d 100644 --- a/arch/arm64/include/asm/kvm_emulate.h +++ b/arch/arm64/include/asm/kvm_emulate.h @@ -45,6 +45,16 @@ static inline void vcpu_reset_hcr(struct kvm_vcpu *vcpu) vcpu->arch.hcr_el2 &= ~HCR_RW; } +static inline unsigned long vcpu_get_hcr(struct kvm_vcpu *vcpu) +{ + return vcpu->arch.hcr_el2; +} + +static inline void vcpu_set_hcr(struct kvm_vcpu *vcpu, unsigned long hcr) +{ + vcpu->arch.hcr_el2 = hcr; +} + static inline unsigned long *vcpu_pc(const struct kvm_vcpu *vcpu) { return (unsigned long *)&vcpu_gp_regs(vcpu)->regs.pc; diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index 0b7dfdb931df..acd101a9014d 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -116,9 +116,6 @@ struct kvm_vcpu_arch { * Anything that is not used directly from assembly code goes * here. */ - /* dcache set/way operation pending */ - int last_pcpu; - cpumask_t require_dcache_flush; /* Don't run the guest */ bool pause; diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h index 14a74f136272..adcf49547301 100644 --- a/arch/arm64/include/asm/kvm_mmu.h +++ b/arch/arm64/include/asm/kvm_mmu.h @@ -243,24 +243,46 @@ static inline bool vcpu_has_cache_enabled(struct kvm_vcpu *vcpu) return (vcpu_sys_reg(vcpu, SCTLR_EL1) & 0b101) == 0b101; } -static inline void coherent_cache_guest_page(struct kvm_vcpu *vcpu, hva_t hva, - unsigned long size, - bool ipa_uncached) +static inline void __coherent_cache_guest_page(struct kvm_vcpu *vcpu, pfn_t pfn, + unsigned long size, + bool ipa_uncached) { + void *va = page_address(pfn_to_page(pfn)); + if (!vcpu_has_cache_enabled(vcpu) || ipa_uncached) - kvm_flush_dcache_to_poc((void *)hva, size); + kvm_flush_dcache_to_poc(va, size); if (!icache_is_aliasing()) { /* PIPT */ - flush_icache_range(hva, hva + size); + flush_icache_range((unsigned long)va, + (unsigned long)va + size); } else if (!icache_is_aivivt()) { /* non ASID-tagged VIVT */ /* any kind of VIPT cache */ __flush_icache_all(); } } +static inline void __kvm_flush_dcache_pte(pte_t pte) +{ + struct page *page = pte_page(pte); + kvm_flush_dcache_to_poc(page_address(page), PAGE_SIZE); +} + +static inline void __kvm_flush_dcache_pmd(pmd_t pmd) +{ + struct page *page = pmd_page(pmd); + kvm_flush_dcache_to_poc(page_address(page), PMD_SIZE); +} + +static inline void __kvm_flush_dcache_pud(pud_t pud) +{ + struct page *page = pud_page(pud); + kvm_flush_dcache_to_poc(page_address(page), PUD_SIZE); +} + #define kvm_virt_to_phys(x) __virt_to_phys((unsigned long)(x)) -void stage2_flush_vm(struct kvm *kvm); +void kvm_set_way_flush(struct kvm_vcpu *vcpu); +void kvm_toggle_cache(struct kvm_vcpu *vcpu, bool was_enabled); #endif /* __ASSEMBLY__ */ #endif /* __ARM64_KVM_MMU_H__ */ diff --git a/arch/arm64/kvm/Kconfig b/arch/arm64/kvm/Kconfig index 8ba85e9ea388..b334084d3675 100644 --- a/arch/arm64/kvm/Kconfig +++ b/arch/arm64/kvm/Kconfig @@ -26,6 +26,7 @@ config KVM select KVM_ARM_HOST select KVM_ARM_VGIC select KVM_ARM_TIMER + select SRCU ---help--- Support hosting virtualized guest machines. diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c index 3d7c2df89946..f31e8bb2bc5b 100644 --- a/arch/arm64/kvm/sys_regs.c +++ b/arch/arm64/kvm/sys_regs.c @@ -69,68 +69,31 @@ static u32 get_ccsidr(u32 csselr) return ccsidr; } -static void do_dc_cisw(u32 val) -{ - asm volatile("dc cisw, %x0" : : "r" (val)); - dsb(ish); -} - -static void do_dc_csw(u32 val) -{ - asm volatile("dc csw, %x0" : : "r" (val)); - dsb(ish); -} - -/* See note at ARM ARM B1.14.4 */ +/* + * See note at ARMv7 ARM B1.14.4 (TL;DR: S/W ops are not easily virtualized). + */ static bool access_dcsw(struct kvm_vcpu *vcpu, const struct sys_reg_params *p, const struct sys_reg_desc *r) { - unsigned long val; - int cpu; - if (!p->is_write) return read_from_write_only(vcpu, p); - cpu = get_cpu(); - - cpumask_setall(&vcpu->arch.require_dcache_flush); - cpumask_clear_cpu(cpu, &vcpu->arch.require_dcache_flush); - - /* If we were already preempted, take the long way around */ - if (cpu != vcpu->arch.last_pcpu) { - flush_cache_all(); - goto done; - } - - val = *vcpu_reg(vcpu, p->Rt); - - switch (p->CRm) { - case 6: /* Upgrade DCISW to DCCISW, as per HCR.SWIO */ - case 14: /* DCCISW */ - do_dc_cisw(val); - break; - - case 10: /* DCCSW */ - do_dc_csw(val); - break; - } - -done: - put_cpu(); - + kvm_set_way_flush(vcpu); return true; } /* * Generic accessor for VM registers. Only called as long as HCR_TVM - * is set. + * is set. If the guest enables the MMU, we stop trapping the VM + * sys_regs and leave it in complete control of the caches. */ static bool access_vm_reg(struct kvm_vcpu *vcpu, const struct sys_reg_params *p, const struct sys_reg_desc *r) { unsigned long val; + bool was_enabled = vcpu_has_cache_enabled(vcpu); BUG_ON(!p->is_write); @@ -143,25 +106,7 @@ static bool access_vm_reg(struct kvm_vcpu *vcpu, vcpu_cp15_64_low(vcpu, r->reg) = val & 0xffffffffUL; } - return true; -} - -/* - * SCTLR_EL1 accessor. Only called as long as HCR_TVM is set. If the - * guest enables the MMU, we stop trapping the VM sys_regs and leave - * it in complete control of the caches. - */ -static bool access_sctlr(struct kvm_vcpu *vcpu, - const struct sys_reg_params *p, - const struct sys_reg_desc *r) -{ - access_vm_reg(vcpu, p, r); - - if (vcpu_has_cache_enabled(vcpu)) { /* MMU+Caches enabled? */ - vcpu->arch.hcr_el2 &= ~HCR_TVM; - stage2_flush_vm(vcpu->kvm); - } - + kvm_toggle_cache(vcpu, was_enabled); return true; } @@ -377,7 +322,7 @@ static const struct sys_reg_desc sys_reg_descs[] = { NULL, reset_mpidr, MPIDR_EL1 }, /* SCTLR_EL1 */ { Op0(0b11), Op1(0b000), CRn(0b0001), CRm(0b0000), Op2(0b000), - access_sctlr, reset_val, SCTLR_EL1, 0x00C50078 }, + access_vm_reg, reset_val, SCTLR_EL1, 0x00C50078 }, /* CPACR_EL1 */ { Op0(0b11), Op1(0b000), CRn(0b0001), CRm(0b0000), Op2(0b010), NULL, reset_val, CPACR_EL1, 0 }, @@ -657,7 +602,7 @@ static const struct sys_reg_desc cp14_64_regs[] = { * register). */ static const struct sys_reg_desc cp15_regs[] = { - { Op1( 0), CRn( 1), CRm( 0), Op2( 0), access_sctlr, NULL, c1_SCTLR }, + { Op1( 0), CRn( 1), CRm( 0), Op2( 0), access_vm_reg, NULL, c1_SCTLR }, { Op1( 0), CRn( 2), CRm( 0), Op2( 0), access_vm_reg, NULL, c2_TTBR0 }, { Op1( 0), CRn( 2), CRm( 0), Op2( 1), access_vm_reg, NULL, c2_TTBR1 }, { Op1( 0), CRn( 2), CRm( 0), Op2( 2), access_vm_reg, NULL, c2_TTBCR }, diff --git a/arch/avr32/mm/fault.c b/arch/avr32/mm/fault.c index 0eca93327195..d223a8b57c1e 100644 --- a/arch/avr32/mm/fault.c +++ b/arch/avr32/mm/fault.c @@ -142,6 +142,8 @@ good_area: if (unlikely(fault & VM_FAULT_ERROR)) { if (fault & VM_FAULT_OOM) goto out_of_memory; + else if (fault & VM_FAULT_SIGSEGV) + goto bad_area; else if (fault & VM_FAULT_SIGBUS) goto do_sigbus; BUG(); diff --git a/arch/cris/mm/fault.c b/arch/cris/mm/fault.c index 1790f22e71a2..2686a7aa8ec8 100644 --- a/arch/cris/mm/fault.c +++ b/arch/cris/mm/fault.c @@ -176,6 +176,8 @@ retry: if (unlikely(fault & VM_FAULT_ERROR)) { if (fault & VM_FAULT_OOM) goto out_of_memory; + else if (fault & VM_FAULT_SIGSEGV) + goto bad_area; else if (fault & VM_FAULT_SIGBUS) goto do_sigbus; BUG(); diff --git a/arch/frv/mm/fault.c b/arch/frv/mm/fault.c index 9a66372fc7c7..ec4917ddf678 100644 --- a/arch/frv/mm/fault.c +++ b/arch/frv/mm/fault.c @@ -168,6 +168,8 @@ asmlinkage void do_page_fault(int datammu, unsigned long esr0, unsigned long ear if (unlikely(fault & VM_FAULT_ERROR)) { if (fault & VM_FAULT_OOM) goto out_of_memory; + else if (fault & VM_FAULT_SIGSEGV) + goto bad_area; else if (fault & VM_FAULT_SIGBUS) goto do_sigbus; BUG(); diff --git a/arch/ia64/mm/fault.c b/arch/ia64/mm/fault.c index 7225dad87094..ba5ba7accd0d 100644 --- a/arch/ia64/mm/fault.c +++ b/arch/ia64/mm/fault.c @@ -172,6 +172,8 @@ retry: */ if (fault & VM_FAULT_OOM) { goto out_of_memory; + } else if (fault & VM_FAULT_SIGSEGV) { + goto bad_area; } else if (fault & VM_FAULT_SIGBUS) { signal = SIGBUS; goto bad_area; diff --git a/arch/m32r/mm/fault.c b/arch/m32r/mm/fault.c index e9c6a8014bd6..e3d4d4890104 100644 --- a/arch/m32r/mm/fault.c +++ b/arch/m32r/mm/fault.c @@ -200,6 +200,8 @@ good_area: if (unlikely(fault & VM_FAULT_ERROR)) { if (fault & VM_FAULT_OOM) goto out_of_memory; + else if (fault & VM_FAULT_SIGSEGV) + goto bad_area; else if (fault & VM_FAULT_SIGBUS) goto do_sigbus; BUG(); diff --git a/arch/m68k/mm/fault.c b/arch/m68k/mm/fault.c index 2bd7487440c4..b2f04aee46ec 100644 --- a/arch/m68k/mm/fault.c +++ b/arch/m68k/mm/fault.c @@ -145,6 +145,8 @@ good_area: if (unlikely(fault & VM_FAULT_ERROR)) { if (fault & VM_FAULT_OOM) goto out_of_memory; + else if (fault & VM_FAULT_SIGSEGV) + goto map_err; else if (fault & VM_FAULT_SIGBUS) goto bus_err; BUG(); diff --git a/arch/metag/mm/fault.c b/arch/metag/mm/fault.c index 332680e5ebf2..2de5dc695a87 100644 --- a/arch/metag/mm/fault.c +++ b/arch/metag/mm/fault.c @@ -141,6 +141,8 @@ good_area: if (unlikely(fault & VM_FAULT_ERROR)) { if (fault & VM_FAULT_OOM) goto out_of_memory; + else if (fault & VM_FAULT_SIGSEGV) + goto bad_area; else if (fault & VM_FAULT_SIGBUS) goto do_sigbus; BUG(); diff --git a/arch/microblaze/mm/fault.c b/arch/microblaze/mm/fault.c index fa4cf52aa7a6..d46a5ebb7570 100644 --- a/arch/microblaze/mm/fault.c +++ b/arch/microblaze/mm/fault.c @@ -224,6 +224,8 @@ good_area: if (unlikely(fault & VM_FAULT_ERROR)) { if (fault & VM_FAULT_OOM) goto out_of_memory; + else if (fault & VM_FAULT_SIGSEGV) + goto bad_area; else if (fault & VM_FAULT_SIGBUS) goto do_sigbus; BUG(); diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig index 3289969ee423..843713c05b79 100644 --- a/arch/mips/Kconfig +++ b/arch/mips/Kconfig @@ -2656,27 +2656,21 @@ config TRAD_SIGNALS bool config MIPS32_COMPAT - bool "Kernel support for Linux/MIPS 32-bit binary compatibility" - depends on 64BIT - help - Select this option if you want Linux/MIPS 32-bit binary - compatibility. Since all software available for Linux/MIPS is - currently 32-bit you should say Y here. + bool config COMPAT bool - depends on MIPS32_COMPAT - select ARCH_WANT_OLD_COMPAT_IPC - default y config SYSVIPC_COMPAT bool - depends on COMPAT && SYSVIPC - default y config MIPS32_O32 bool "Kernel support for o32 binaries" - depends on MIPS32_COMPAT + depends on 64BIT + select ARCH_WANT_OLD_COMPAT_IPC + select COMPAT + select MIPS32_COMPAT + select SYSVIPC_COMPAT if SYSVIPC help Select this option if you want to run o32 binaries. These are pure 32-bit binaries as used by the 32-bit Linux/MIPS port. Most of @@ -2686,7 +2680,10 @@ config MIPS32_O32 config MIPS32_N32 bool "Kernel support for n32 binaries" - depends on MIPS32_COMPAT + depends on 64BIT + select COMPAT + select MIPS32_COMPAT + select SYSVIPC_COMPAT if SYSVIPC help Select this option if you want to run n32 binaries. These are 64-bit binaries using 32-bit quantities for addressing and certain diff --git a/arch/mips/boot/elf2ecoff.c b/arch/mips/boot/elf2ecoff.c index 8585078ae50e..2a4c52e27f41 100644 --- a/arch/mips/boot/elf2ecoff.c +++ b/arch/mips/boot/elf2ecoff.c @@ -49,7 +49,8 @@ /* * Some extra ELF definitions */ -#define PT_MIPS_REGINFO 0x70000000 /* Register usage information */ +#define PT_MIPS_REGINFO 0x70000000 /* Register usage information */ +#define PT_MIPS_ABIFLAGS 0x70000003 /* Records ABI related flags */ /* -------------------------------------------------------------------- */ @@ -349,39 +350,46 @@ int main(int argc, char *argv[]) for (i = 0; i < ex.e_phnum; i++) { /* Section types we can ignore... */ - if (ph[i].p_type == PT_NULL || ph[i].p_type == PT_NOTE || - ph[i].p_type == PT_PHDR - || ph[i].p_type == PT_MIPS_REGINFO) + switch (ph[i].p_type) { + case PT_NULL: + case PT_NOTE: + case PT_PHDR: + case PT_MIPS_REGINFO: + case PT_MIPS_ABIFLAGS: continue; - /* Section types we can't handle... */ - else if (ph[i].p_type != PT_LOAD) { - fprintf(stderr, - "Program header %d type %d can't be converted.\n", - ex.e_phnum, ph[i].p_type); - exit(1); - } - /* Writable (data) segment? */ - if (ph[i].p_flags & PF_W) { - struct sect ndata, nbss; - ndata.vaddr = ph[i].p_vaddr; - ndata.len = ph[i].p_filesz; - nbss.vaddr = ph[i].p_vaddr + ph[i].p_filesz; - nbss.len = ph[i].p_memsz - ph[i].p_filesz; + case PT_LOAD: + /* Writable (data) segment? */ + if (ph[i].p_flags & PF_W) { + struct sect ndata, nbss; + + ndata.vaddr = ph[i].p_vaddr; + ndata.len = ph[i].p_filesz; + nbss.vaddr = ph[i].p_vaddr + ph[i].p_filesz; + nbss.len = ph[i].p_memsz - ph[i].p_filesz; - combine(&data, &ndata, 0); - combine(&bss, &nbss, 1); - } else { - struct sect ntxt; + combine(&data, &ndata, 0); + combine(&bss, &nbss, 1); + } else { + struct sect ntxt; - ntxt.vaddr = ph[i].p_vaddr; - ntxt.len = ph[i].p_filesz; + ntxt.vaddr = ph[i].p_vaddr; + ntxt.len = ph[i].p_filesz; - combine(&text, &ntxt, 0); + combine(&text, &ntxt, 0); + } + /* Remember the lowest segment start address. */ + if (ph[i].p_vaddr < cur_vma) + cur_vma = ph[i].p_vaddr; + break; + + default: + /* Section types we can't handle... */ + fprintf(stderr, + "Program header %d type %d can't be converted.\n", + ex.e_phnum, ph[i].p_type); + exit(1); } - /* Remember the lowest segment start address. */ - if (ph[i].p_vaddr < cur_vma) - cur_vma = ph[i].p_vaddr; } /* Sections must be in order to be converted... */ diff --git a/arch/mips/cavium-octeon/smp.c b/arch/mips/cavium-octeon/smp.c index ecd903dd1c45..8b1eeffa12ed 100644 --- a/arch/mips/cavium-octeon/smp.c +++ b/arch/mips/cavium-octeon/smp.c @@ -240,9 +240,7 @@ static int octeon_cpu_disable(void) set_cpu_online(cpu, false); cpu_clear(cpu, cpu_callin_map); - local_irq_disable(); octeon_fixup_irqs(); - local_irq_enable(); flush_cache_all(); local_flush_tlb_all(); diff --git a/arch/mips/configs/malta_defconfig b/arch/mips/configs/malta_defconfig index f57b96dcf7df..61a4460d67d3 100644 --- a/arch/mips/configs/malta_defconfig +++ b/arch/mips/configs/malta_defconfig @@ -132,7 +132,6 @@ CONFIG_IP_NF_MATCH_ECN=m CONFIG_IP_NF_MATCH_TTL=m CONFIG_IP_NF_FILTER=m CONFIG_IP_NF_TARGET_REJECT=m -CONFIG_IP_NF_TARGET_ULOG=m CONFIG_IP_NF_MANGLE=m CONFIG_IP_NF_TARGET_CLUSTERIP=m CONFIG_IP_NF_TARGET_ECN=m @@ -175,7 +174,6 @@ CONFIG_BRIDGE_EBT_MARK_T=m CONFIG_BRIDGE_EBT_REDIRECT=m CONFIG_BRIDGE_EBT_SNAT=m CONFIG_BRIDGE_EBT_LOG=m -CONFIG_BRIDGE_EBT_ULOG=m CONFIG_BRIDGE_EBT_NFLOG=m CONFIG_IP_SCTP=m CONFIG_BRIDGE=m @@ -220,8 +218,6 @@ CONFIG_NET_ACT_SKBEDIT=m CONFIG_NET_CLS_IND=y CONFIG_CFG80211=m CONFIG_MAC80211=m -CONFIG_MAC80211_RC_PID=y -CONFIG_MAC80211_RC_DEFAULT_PID=y CONFIG_MAC80211_MESH=y CONFIG_RFKILL=m CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" @@ -248,19 +244,13 @@ CONFIG_ATA_OVER_ETH=m CONFIG_IDE=y CONFIG_BLK_DEV_IDECD=y CONFIG_IDE_GENERIC=y -CONFIG_BLK_DEV_GENERIC=y -CONFIG_BLK_DEV_PIIX=y -CONFIG_BLK_DEV_IT8213=m -CONFIG_BLK_DEV_TC86C001=m CONFIG_RAID_ATTRS=m -CONFIG_SCSI=m -CONFIG_BLK_DEV_SD=m +CONFIG_BLK_DEV_SD=y CONFIG_CHR_DEV_ST=m CONFIG_CHR_DEV_OSST=m CONFIG_BLK_DEV_SR=m CONFIG_BLK_DEV_SR_VENDOR=y CONFIG_CHR_DEV_SG=m -CONFIG_SCSI_MULTI_LUN=y CONFIG_SCSI_CONSTANTS=y CONFIG_SCSI_LOGGING=y CONFIG_SCSI_SCAN_ASYNC=y @@ -273,6 +263,8 @@ CONFIG_SCSI_AACRAID=m CONFIG_SCSI_AIC7XXX=m CONFIG_AIC7XXX_RESET_DELAY_MS=15000 # CONFIG_AIC7XXX_DEBUG_ENABLE is not set +CONFIG_ATA=y +CONFIG_ATA_PIIX=y CONFIG_MD=y CONFIG_BLK_DEV_MD=m CONFIG_MD_LINEAR=m @@ -340,6 +332,7 @@ CONFIG_UIO=m CONFIG_UIO_CIF=m CONFIG_EXT2_FS=y CONFIG_EXT3_FS=y +CONFIG_EXT4_FS=y CONFIG_REISERFS_FS=m CONFIG_REISERFS_PROC_INFO=y CONFIG_REISERFS_FS_XATTR=y @@ -441,4 +434,3 @@ CONFIG_CRYPTO_SERPENT=m CONFIG_CRYPTO_TEA=m CONFIG_CRYPTO_TWOFISH=m # CONFIG_CRYPTO_ANSI_CPRNG is not set -CONFIG_CRC16=m diff --git a/arch/mips/include/asm/fpu.h b/arch/mips/include/asm/fpu.h index 994d21939676..affebb78f5d6 100644 --- a/arch/mips/include/asm/fpu.h +++ b/arch/mips/include/asm/fpu.h @@ -64,7 +64,7 @@ static inline int __enable_fpu(enum fpu_mode mode) return SIGFPE; /* set FRE */ - write_c0_config5(read_c0_config5() | MIPS_CONF5_FRE); + set_c0_config5(MIPS_CONF5_FRE); goto fr_common; case FPU_64BIT: @@ -74,8 +74,10 @@ static inline int __enable_fpu(enum fpu_mode mode) #endif /* fall through */ case FPU_32BIT: - /* clear FRE */ - write_c0_config5(read_c0_config5() & ~MIPS_CONF5_FRE); + if (cpu_has_fre) { + /* clear FRE */ + clear_c0_config5(MIPS_CONF5_FRE); + } fr_common: /* set CU1 & change FR appropriately */ fr = (int)mode & FPU_FR_MASK; @@ -182,25 +184,32 @@ static inline int init_fpu(void) int ret = 0; if (cpu_has_fpu) { + unsigned int config5; + ret = __own_fpu(); - if (!ret) { - unsigned int config5 = read_c0_config5(); - - /* - * Ensure FRE is clear whilst running _init_fpu, since - * single precision FP instructions are used. If FRE - * was set then we'll just end up initialising all 32 - * 64b registers. - */ - write_c0_config5(config5 & ~MIPS_CONF5_FRE); - enable_fpu_hazard(); + if (ret) + return ret; + if (!cpu_has_fre) { _init_fpu(); - /* Restore FRE */ - write_c0_config5(config5); - enable_fpu_hazard(); + return 0; } + + /* + * Ensure FRE is clear whilst running _init_fpu, since + * single precision FP instructions are used. If FRE + * was set then we'll just end up initialising all 32 + * 64b registers. + */ + config5 = clear_c0_config5(MIPS_CONF5_FRE); + enable_fpu_hazard(); + + _init_fpu(); + + /* Restore FRE */ + write_c0_config5(config5); + enable_fpu_hazard(); } else fpu_emulator_init_fpu(); diff --git a/arch/mips/include/asm/fw/arc/hinv.h b/arch/mips/include/asm/fw/arc/hinv.h index f8d37d1df5de..9fac64a26353 100644 --- a/arch/mips/include/asm/fw/arc/hinv.h +++ b/arch/mips/include/asm/fw/arc/hinv.h @@ -119,7 +119,7 @@ union key_u { #define SGI_ARCS_REV 10 /* rev .10, 3/04/92 */ #endif -typedef struct component { +typedef struct { CONFIGCLASS Class; CONFIGTYPE Type; IDENTIFIERFLAG Flags; @@ -140,7 +140,7 @@ struct cfgdata { }; /* System ID */ -typedef struct systemid { +typedef struct { CHAR VendorId[8]; CHAR ProductId[8]; } SYSTEMID; @@ -166,7 +166,7 @@ typedef enum memorytype { #endif /* _NT_PROM */ } MEMORYTYPE; -typedef struct memorydescriptor { +typedef struct { MEMORYTYPE Type; LONG BasePage; LONG PageCount; diff --git a/arch/mips/include/asm/mips-cm.h b/arch/mips/include/asm/mips-cm.h index b95a827d763e..59c0901bdd84 100644 --- a/arch/mips/include/asm/mips-cm.h +++ b/arch/mips/include/asm/mips-cm.h @@ -89,9 +89,9 @@ static inline bool mips_cm_has_l2sync(void) /* Macros to ease the creation of register access functions */ #define BUILD_CM_R_(name, off) \ -static inline u32 *addr_gcr_##name(void) \ +static inline u32 __iomem *addr_gcr_##name(void) \ { \ - return (u32 *)(mips_cm_base + (off)); \ + return (u32 __iomem *)(mips_cm_base + (off)); \ } \ \ static inline u32 read_gcr_##name(void) \ diff --git a/arch/mips/include/asm/mipsregs.h b/arch/mips/include/asm/mipsregs.h index 5e4aef304b02..5b720d8c2745 100644 --- a/arch/mips/include/asm/mipsregs.h +++ b/arch/mips/include/asm/mipsregs.h @@ -1386,12 +1386,27 @@ do { \ __res; \ }) +#define _write_32bit_cp1_register(dest, val, gas_hardfloat) \ +do { \ + __asm__ __volatile__( \ + " .set push \n" \ + " .set reorder \n" \ + " "STR(gas_hardfloat)" \n" \ + " ctc1 %0,"STR(dest)" \n" \ + " .set pop \n" \ + : : "r" (val)); \ +} while (0) + #ifdef GAS_HAS_SET_HARDFLOAT #define read_32bit_cp1_register(source) \ _read_32bit_cp1_register(source, .set hardfloat) +#define write_32bit_cp1_register(dest, val) \ + _write_32bit_cp1_register(dest, val, .set hardfloat) #else #define read_32bit_cp1_register(source) \ _read_32bit_cp1_register(source, ) +#define write_32bit_cp1_register(dest, val) \ + _write_32bit_cp1_register(dest, val, ) #endif #ifdef HAVE_AS_DSP diff --git a/arch/mips/include/asm/syscall.h b/arch/mips/include/asm/syscall.h index bb7963753730..6499d93ae68d 100644 --- a/arch/mips/include/asm/syscall.h +++ b/arch/mips/include/asm/syscall.h @@ -29,13 +29,7 @@ static inline long syscall_get_nr(struct task_struct *task, struct pt_regs *regs) { - /* O32 ABI syscall() - Either 64-bit with O32 or 32-bit */ - if ((config_enabled(CONFIG_32BIT) || - test_tsk_thread_flag(task, TIF_32BIT_REGS)) && - (regs->regs[2] == __NR_syscall)) - return regs->regs[4]; - else - return regs->regs[2]; + return current_thread_info()->syscall; } static inline unsigned long mips_get_syscall_arg(unsigned long *arg, diff --git a/arch/mips/include/asm/thread_info.h b/arch/mips/include/asm/thread_info.h index 99eea59604e9..e4440f92b366 100644 --- a/arch/mips/include/asm/thread_info.h +++ b/arch/mips/include/asm/thread_info.h @@ -36,6 +36,7 @@ struct thread_info { */ struct restart_block restart_block; struct pt_regs *regs; + long syscall; /* syscall number */ }; /* diff --git a/arch/mips/include/uapi/asm/unistd.h b/arch/mips/include/uapi/asm/unistd.h index d001bb1ad177..c03088f9f514 100644 --- a/arch/mips/include/uapi/asm/unistd.h +++ b/arch/mips/include/uapi/asm/unistd.h @@ -376,16 +376,17 @@ #define __NR_getrandom (__NR_Linux + 353) #define __NR_memfd_create (__NR_Linux + 354) #define __NR_bpf (__NR_Linux + 355) +#define __NR_execveat (__NR_Linux + 356) /* * Offset of the last Linux o32 flavoured syscall */ -#define __NR_Linux_syscalls 355 +#define __NR_Linux_syscalls 356 #endif /* _MIPS_SIM == _MIPS_SIM_ABI32 */ #define __NR_O32_Linux 4000 -#define __NR_O32_Linux_syscalls 355 +#define __NR_O32_Linux_syscalls 356 #if _MIPS_SIM == _MIPS_SIM_ABI64 @@ -709,16 +710,17 @@ #define __NR_getrandom (__NR_Linux + 313) #define __NR_memfd_create (__NR_Linux + 314) #define __NR_bpf (__NR_Linux + 315) +#define __NR_execveat (__NR_Linux + 316) /* * Offset of the last Linux 64-bit flavoured syscall */ -#define __NR_Linux_syscalls 315 +#define __NR_Linux_syscalls 316 #endif /* _MIPS_SIM == _MIPS_SIM_ABI64 */ #define __NR_64_Linux 5000 -#define __NR_64_Linux_syscalls 315 +#define __NR_64_Linux_syscalls 316 #if _MIPS_SIM == _MIPS_SIM_NABI32 @@ -1046,15 +1048,16 @@ #define __NR_getrandom (__NR_Linux + 317) #define __NR_memfd_create (__NR_Linux + 318) #define __NR_bpf (__NR_Linux + 319) +#define __NR_execveat (__NR_Linux + 320) /* * Offset of the last N32 flavoured syscall */ -#define __NR_Linux_syscalls 319 +#define __NR_Linux_syscalls 320 #endif /* _MIPS_SIM == _MIPS_SIM_NABI32 */ #define __NR_N32_Linux 6000 -#define __NR_N32_Linux_syscalls 319 +#define __NR_N32_Linux_syscalls 320 #endif /* _UAPI_ASM_UNISTD_H */ diff --git a/arch/mips/jz4740/irq.c b/arch/mips/jz4740/irq.c index 2531da1d3add..97206b3deb97 100644 --- a/arch/mips/jz4740/irq.c +++ b/arch/mips/jz4740/irq.c @@ -30,6 +30,9 @@ #include <asm/irq_cpu.h> #include <asm/mach-jz4740/base.h> +#include <asm/mach-jz4740/irq.h> + +#include "irq.h" static void __iomem *jz_intc_base; diff --git a/arch/mips/kernel/elf.c b/arch/mips/kernel/elf.c index c92b15df6893..a5b5b56485c1 100644 --- a/arch/mips/kernel/elf.c +++ b/arch/mips/kernel/elf.c @@ -19,8 +19,8 @@ enum { int arch_elf_pt_proc(void *_ehdr, void *_phdr, struct file *elf, bool is_interp, struct arch_elf_state *state) { - struct elfhdr *ehdr = _ehdr; - struct elf_phdr *phdr = _phdr; + struct elf32_hdr *ehdr = _ehdr; + struct elf32_phdr *phdr = _phdr; struct mips_elf_abiflags_v0 abiflags; int ret; @@ -48,7 +48,7 @@ int arch_elf_pt_proc(void *_ehdr, void *_phdr, struct file *elf, return 0; } -static inline unsigned get_fp_abi(struct elfhdr *ehdr, int in_abi) +static inline unsigned get_fp_abi(struct elf32_hdr *ehdr, int in_abi) { /* If the ABI requirement is provided, simply return that */ if (in_abi != -1) @@ -65,7 +65,7 @@ static inline unsigned get_fp_abi(struct elfhdr *ehdr, int in_abi) int arch_check_elf(void *_ehdr, bool has_interpreter, struct arch_elf_state *state) { - struct elfhdr *ehdr = _ehdr; + struct elf32_hdr *ehdr = _ehdr; unsigned fp_abi, interp_fp_abi, abi0, abi1; /* Ignore non-O32 binaries */ diff --git a/arch/mips/kernel/irq_cpu.c b/arch/mips/kernel/irq_cpu.c index 590c2c980fd3..6eb7a3f515fc 100644 --- a/arch/mips/kernel/irq_cpu.c +++ b/arch/mips/kernel/irq_cpu.c @@ -57,6 +57,8 @@ static struct irq_chip mips_cpu_irq_controller = { .irq_mask_ack = mask_mips_irq, .irq_unmask = unmask_mips_irq, .irq_eoi = unmask_mips_irq, + .irq_disable = mask_mips_irq, + .irq_enable = unmask_mips_irq, }; /* @@ -93,6 +95,8 @@ static struct irq_chip mips_mt_cpu_irq_controller = { .irq_mask_ack = mips_mt_cpu_irq_ack, .irq_unmask = unmask_mips_irq, .irq_eoi = unmask_mips_irq, + .irq_disable = mask_mips_irq, + .irq_enable = unmask_mips_irq, }; asmlinkage void __weak plat_irq_dispatch(void) diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c index eb76434828e8..85bff5d513e5 100644 --- a/arch/mips/kernel/process.c +++ b/arch/mips/kernel/process.c @@ -82,6 +82,30 @@ void flush_thread(void) { } +int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src) +{ + /* + * Save any process state which is live in hardware registers to the + * parent context prior to duplication. This prevents the new child + * state becoming stale if the parent is preempted before copy_thread() + * gets a chance to save the parent's live hardware registers to the + * child context. + */ + preempt_disable(); + + if (is_msa_enabled()) + save_msa(current); + else if (is_fpu_owner()) + _save_fp(current); + + save_dsp(current); + + preempt_enable(); + + *dst = *src; + return 0; +} + int copy_thread(unsigned long clone_flags, unsigned long usp, unsigned long arg, struct task_struct *p) { @@ -92,18 +116,6 @@ int copy_thread(unsigned long clone_flags, unsigned long usp, childksp = (unsigned long)task_stack_page(p) + THREAD_SIZE - 32; - preempt_disable(); - - if (is_msa_enabled()) - save_msa(p); - else if (is_fpu_owner()) - save_fp(p); - - if (cpu_has_dsp) - save_dsp(p); - - preempt_enable(); - /* set up new TSS. */ childregs = (struct pt_regs *) childksp - 1; /* Put the stack after the struct pt_regs. */ diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c index 9d1487d83293..510452812594 100644 --- a/arch/mips/kernel/ptrace.c +++ b/arch/mips/kernel/ptrace.c @@ -770,6 +770,8 @@ asmlinkage long syscall_trace_enter(struct pt_regs *regs, long syscall) long ret = 0; user_exit(); + current_thread_info()->syscall = syscall; + if (secure_computing() == -1) return -1; diff --git a/arch/mips/kernel/scall32-o32.S b/arch/mips/kernel/scall32-o32.S index 00cad1005a16..6e8de80bb446 100644 --- a/arch/mips/kernel/scall32-o32.S +++ b/arch/mips/kernel/scall32-o32.S @@ -181,6 +181,7 @@ illegal_syscall: sll t1, t0, 2 beqz v0, einval lw t2, sys_call_table(t1) # syscall routine + sw a0, PT_R2(sp) # call routine directly on restart /* Some syscalls like execve get their arguments from struct pt_regs and claim zero arguments in the syscall table. Thus we have to @@ -580,3 +581,4 @@ EXPORT(sys_call_table) PTR sys_getrandom PTR sys_memfd_create PTR sys_bpf /* 4355 */ + PTR sys_execveat diff --git a/arch/mips/kernel/scall64-64.S b/arch/mips/kernel/scall64-64.S index 5251565e344b..ad4d44635c76 100644 --- a/arch/mips/kernel/scall64-64.S +++ b/arch/mips/kernel/scall64-64.S @@ -435,4 +435,5 @@ EXPORT(sys_call_table) PTR sys_getrandom PTR sys_memfd_create PTR sys_bpf /* 5315 */ + PTR sys_execveat .size sys_call_table,.-sys_call_table diff --git a/arch/mips/kernel/scall64-n32.S b/arch/mips/kernel/scall64-n32.S index 77e74398b828..446cc654da56 100644 --- a/arch/mips/kernel/scall64-n32.S +++ b/arch/mips/kernel/scall64-n32.S @@ -428,4 +428,5 @@ EXPORT(sysn32_call_table) PTR sys_getrandom PTR sys_memfd_create PTR sys_bpf + PTR compat_sys_execveat /* 6320 */ .size sysn32_call_table,.-sysn32_call_table diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S index 6f8db9f728e8..d07b210fbeff 100644 --- a/arch/mips/kernel/scall64-o32.S +++ b/arch/mips/kernel/scall64-o32.S @@ -186,6 +186,7 @@ LEAF(sys32_syscall) dsll t1, t0, 3 beqz v0, einval ld t2, sys32_call_table(t1) # syscall routine + sd a0, PT_R2(sp) # call routine directly on restart move a0, a1 # shift argument registers move a1, a2 @@ -565,4 +566,5 @@ EXPORT(sys32_call_table) PTR sys_getrandom PTR sys_memfd_create PTR sys_bpf /* 4355 */ + PTR compat_sys_execveat .size sys32_call_table,.-sys32_call_table diff --git a/arch/mips/kernel/smp-cmp.c b/arch/mips/kernel/smp-cmp.c index 1e0a93c5a3e7..e36a859af666 100644 --- a/arch/mips/kernel/smp-cmp.c +++ b/arch/mips/kernel/smp-cmp.c @@ -44,8 +44,8 @@ static void cmp_init_secondary(void) struct cpuinfo_mips *c __maybe_unused = ¤t_cpu_data; /* Assume GIC is present */ - change_c0_status(ST0_IM, STATUSF_IP3 | STATUSF_IP4 | STATUSF_IP6 | - STATUSF_IP7); + change_c0_status(ST0_IM, STATUSF_IP2 | STATUSF_IP3 | STATUSF_IP4 | + STATUSF_IP5 | STATUSF_IP6 | STATUSF_IP7); /* Enable per-cpu interrupts: platform specific */ diff --git a/arch/mips/kernel/smp-mt.c b/arch/mips/kernel/smp-mt.c index ad86951b73bd..17ea705f6c40 100644 --- a/arch/mips/kernel/smp-mt.c +++ b/arch/mips/kernel/smp-mt.c @@ -161,7 +161,8 @@ static void vsmp_init_secondary(void) #ifdef CONFIG_MIPS_GIC /* This is Malta specific: IPI,performance and timer interrupts */ if (gic_present) - change_c0_status(ST0_IM, STATUSF_IP3 | STATUSF_IP4 | + change_c0_status(ST0_IM, STATUSF_IP2 | STATUSF_IP3 | + STATUSF_IP4 | STATUSF_IP5 | STATUSF_IP6 | STATUSF_IP7); else #endif diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c index c94c4e92e17d..1c0d8c50b7e1 100644 --- a/arch/mips/kernel/smp.c +++ b/arch/mips/kernel/smp.c @@ -123,10 +123,10 @@ asmlinkage void start_secondary(void) unsigned int cpu; cpu_probe(); - cpu_report(); per_cpu_trap_init(false); mips_clockevent_init(); mp_ops->init_secondary(); + cpu_report(); /* * XXX parity protection should be folded in here when it's converted diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c index ad3d2031c327..c3b41e24c05a 100644 --- a/arch/mips/kernel/traps.c +++ b/arch/mips/kernel/traps.c @@ -1231,7 +1231,8 @@ static int enable_restore_fp_context(int msa) /* Restore the scalar FP control & status register */ if (!was_fpu_owner) - asm volatile("ctc1 %0, $31" : : "r"(current->thread.fpu.fcr31)); + write_32bit_cp1_register(CP1_STATUS, + current->thread.fpu.fcr31); } out: diff --git a/arch/mips/kvm/Kconfig b/arch/mips/kvm/Kconfig index 30e334e823bd..2ae12825529f 100644 --- a/arch/mips/kvm/Kconfig +++ b/arch/mips/kvm/Kconfig @@ -20,6 +20,7 @@ config KVM select PREEMPT_NOTIFIERS select ANON_INODES select KVM_MMIO + select SRCU ---help--- Support for hosting Guest kernels. Currently supported on MIPS32 processors. diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c index becc42bb1849..70ab5d664332 100644 --- a/arch/mips/mm/fault.c +++ b/arch/mips/mm/fault.c @@ -158,6 +158,8 @@ good_area: if (unlikely(fault & VM_FAULT_ERROR)) { if (fault & VM_FAULT_OOM) goto out_of_memory; + else if (fault & VM_FAULT_SIGSEGV) + goto bad_area; else if (fault & VM_FAULT_SIGBUS) goto do_sigbus; BUG(); diff --git a/arch/mips/mm/tlb-r4k.c b/arch/mips/mm/tlb-r4k.c index e90b2e899291..30639a6e9b8c 100644 --- a/arch/mips/mm/tlb-r4k.c +++ b/arch/mips/mm/tlb-r4k.c @@ -489,6 +489,8 @@ static void r4k_tlb_configure(void) #ifdef CONFIG_64BIT pg |= PG_ELPA; #endif + if (cpu_has_rixiex) + pg |= PG_IEC; write_c0_pagegrain(pg); } diff --git a/arch/mn10300/include/asm/cacheflush.h b/arch/mn10300/include/asm/cacheflush.h index faed90240ded..6d6df839948f 100644 --- a/arch/mn10300/include/asm/cacheflush.h +++ b/arch/mn10300/include/asm/cacheflush.h @@ -159,13 +159,6 @@ extern void flush_icache_range(unsigned long start, unsigned long end); #define copy_from_user_page(vma, page, vaddr, dst, src, len) \ memcpy(dst, src, len) -/* - * Internal debugging function - */ -#ifdef CONFIG_DEBUG_PAGEALLOC -extern void kernel_map_pages(struct page *page, int numpages, int enable); -#endif - #endif /* __ASSEMBLY__ */ #endif /* _ASM_CACHEFLUSH_H */ diff --git a/arch/mn10300/mm/fault.c b/arch/mn10300/mm/fault.c index 3516cbdf1ee9..0c2cc5d39c8e 100644 --- a/arch/mn10300/mm/fault.c +++ b/arch/mn10300/mm/fault.c @@ -262,6 +262,8 @@ good_area: if (unlikely(fault & VM_FAULT_ERROR)) { if (fault & VM_FAULT_OOM) goto out_of_memory; + else if (fault & VM_FAULT_SIGSEGV) + goto bad_area; else if (fault & VM_FAULT_SIGBUS) goto do_sigbus; BUG(); diff --git a/arch/nios2/mm/fault.c b/arch/nios2/mm/fault.c index 15a0bb5fc06d..d194c0427b26 100644 --- a/arch/nios2/mm/fault.c +++ b/arch/nios2/mm/fault.c @@ -135,6 +135,8 @@ survive: if (unlikely(fault & VM_FAULT_ERROR)) { if (fault & VM_FAULT_OOM) goto out_of_memory; + else if (fault & VM_FAULT_SIGSEGV) + goto bad_area; else if (fault & VM_FAULT_SIGBUS) goto do_sigbus; BUG(); @@ -157,9 +159,11 @@ bad_area: bad_area_nosemaphore: /* User mode accesses just cause a SIGSEGV */ if (user_mode(regs)) { - pr_alert("%s: unhandled page fault (%d) at 0x%08lx, " - "cause %ld\n", current->comm, SIGSEGV, address, cause); - show_regs(regs); + if (unhandled_signal(current, SIGSEGV) && printk_ratelimit()) { + pr_info("%s: unhandled page fault (%d) at 0x%08lx, " + "cause %ld\n", current->comm, SIGSEGV, address, cause); + show_regs(regs); + } _exception(SIGSEGV, regs, code, address); return; } diff --git a/arch/openrisc/mm/fault.c b/arch/openrisc/mm/fault.c index 0703acf7d327..230ac20ae794 100644 --- a/arch/openrisc/mm/fault.c +++ b/arch/openrisc/mm/fault.c @@ -171,6 +171,8 @@ good_area: if (unlikely(fault & VM_FAULT_ERROR)) { if (fault & VM_FAULT_OOM) goto out_of_memory; + else if (fault & VM_FAULT_SIGSEGV) + goto bad_area; else if (fault & VM_FAULT_SIGBUS) goto do_sigbus; BUG(); diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c index 3ca9c1131cfe..e5120e653240 100644 --- a/arch/parisc/mm/fault.c +++ b/arch/parisc/mm/fault.c @@ -256,6 +256,8 @@ good_area: */ if (fault & VM_FAULT_OOM) goto out_of_memory; + else if (fault & VM_FAULT_SIGSEGV) + goto bad_area; else if (fault & VM_FAULT_SIGBUS) goto bad_area; BUG(); diff --git a/arch/powerpc/include/asm/cacheflush.h b/arch/powerpc/include/asm/cacheflush.h index 5b9312220e84..30b35fff2dea 100644 --- a/arch/powerpc/include/asm/cacheflush.h +++ b/arch/powerpc/include/asm/cacheflush.h @@ -60,13 +60,6 @@ extern void flush_dcache_phys_range(unsigned long start, unsigned long stop); #define copy_from_user_page(vma, page, vaddr, dst, src, len) \ memcpy(dst, src, len) - - -#ifdef CONFIG_DEBUG_PAGEALLOC -/* internal debugging function */ -void kernel_map_pages(struct page *page, int numpages, int enable); -#endif - #endif /* __KERNEL__ */ #endif /* _ASM_POWERPC_CACHEFLUSH_H */ diff --git a/arch/powerpc/kvm/Kconfig b/arch/powerpc/kvm/Kconfig index f5769f19ae25..11850f310fb4 100644 --- a/arch/powerpc/kvm/Kconfig +++ b/arch/powerpc/kvm/Kconfig @@ -21,6 +21,7 @@ config KVM select PREEMPT_NOTIFIERS select ANON_INODES select HAVE_KVM_EVENTFD + select SRCU config KVM_BOOK3S_HANDLER bool diff --git a/arch/powerpc/mm/copro_fault.c b/arch/powerpc/mm/copro_fault.c index 5a236f082c78..1b5305d4bdab 100644 --- a/arch/powerpc/mm/copro_fault.c +++ b/arch/powerpc/mm/copro_fault.c @@ -76,7 +76,7 @@ int copro_handle_mm_fault(struct mm_struct *mm, unsigned long ea, if (*flt & VM_FAULT_OOM) { ret = -ENOMEM; goto out_unlock; - } else if (*flt & VM_FAULT_SIGBUS) { + } else if (*flt & (VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV)) { ret = -EFAULT; goto out_unlock; } diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c index eb79907f34fa..6154b0a2b063 100644 --- a/arch/powerpc/mm/fault.c +++ b/arch/powerpc/mm/fault.c @@ -437,6 +437,8 @@ good_area: */ fault = handle_mm_fault(mm, vma, address, flags); if (unlikely(fault & (VM_FAULT_RETRY|VM_FAULT_ERROR))) { + if (fault & VM_FAULT_SIGSEGV) + goto bad_area; rc = mm_fault_error(regs, address, fault); if (rc >= MM_FAULT_RETURN) goto bail; diff --git a/arch/powerpc/platforms/powernv/setup.c b/arch/powerpc/platforms/powernv/setup.c index b700a329c31d..d2de7d5d7574 100644 --- a/arch/powerpc/platforms/powernv/setup.c +++ b/arch/powerpc/platforms/powernv/setup.c @@ -304,7 +304,7 @@ int pnv_save_sprs_for_winkle(void) * all cpus at boot. Get these reg values of current cpu and use the * same accross all cpus. */ - uint64_t lpcr_val = mfspr(SPRN_LPCR); + uint64_t lpcr_val = mfspr(SPRN_LPCR) & ~(u64)LPCR_PECE1; uint64_t hid0_val = mfspr(SPRN_HID0); uint64_t hid1_val = mfspr(SPRN_HID1); uint64_t hid4_val = mfspr(SPRN_HID4); diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c index 5b150f0c5df9..13c6e200b24e 100644 --- a/arch/powerpc/xmon/xmon.c +++ b/arch/powerpc/xmon/xmon.c @@ -337,6 +337,7 @@ static inline void disable_surveillance(void) args.token = rtas_token("set-indicator"); if (args.token == RTAS_UNKNOWN_SERVICE) return; + args.token = cpu_to_be32(args.token); args.nargs = cpu_to_be32(3); args.nret = cpu_to_be32(1); args.rets = &args.args[3]; diff --git a/arch/s390/include/asm/cacheflush.h b/arch/s390/include/asm/cacheflush.h index 3e20383d0921..58fae7d098cf 100644 --- a/arch/s390/include/asm/cacheflush.h +++ b/arch/s390/include/asm/cacheflush.h @@ -4,10 +4,6 @@ /* Caches aren't brain-dead on the s390. */ #include <asm-generic/cacheflush.h> -#ifdef CONFIG_DEBUG_PAGEALLOC -void kernel_map_pages(struct page *page, int numpages, int enable); -#endif - int set_memory_ro(unsigned long addr, int numpages); int set_memory_rw(unsigned long addr, int numpages); int set_memory_nx(unsigned long addr, int numpages); diff --git a/arch/s390/kvm/Kconfig b/arch/s390/kvm/Kconfig index 646db9c467d1..5fce52cf0e57 100644 --- a/arch/s390/kvm/Kconfig +++ b/arch/s390/kvm/Kconfig @@ -28,6 +28,7 @@ config KVM select HAVE_KVM_IRQCHIP select HAVE_KVM_IRQFD select HAVE_KVM_IRQ_ROUTING + select SRCU ---help--- Support hosting paravirtualized guest machines using the SIE virtualization capability on the mainframe. This should work diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c index 811937bb90be..9065d5aa3932 100644 --- a/arch/s390/mm/fault.c +++ b/arch/s390/mm/fault.c @@ -374,6 +374,12 @@ static noinline void do_fault_error(struct pt_regs *regs, int fault) do_no_context(regs); else pagefault_out_of_memory(); + } else if (fault & VM_FAULT_SIGSEGV) { + /* Kernel mode? Handle exceptions or die */ + if (!user_mode(regs)) + do_no_context(regs); + else + do_sigsegv(regs, SEGV_MAPERR); } else if (fault & VM_FAULT_SIGBUS) { /* Kernel mode? Handle exceptions or die */ if (!user_mode(regs)) diff --git a/arch/score/mm/fault.c b/arch/score/mm/fault.c index 52238983527d..6860beb2a280 100644 --- a/arch/score/mm/fault.c +++ b/arch/score/mm/fault.c @@ -114,6 +114,8 @@ good_area: if (unlikely(fault & VM_FAULT_ERROR)) { if (fault & VM_FAULT_OOM) goto out_of_memory; + else if (fault & VM_FAULT_SIGSEGV) + goto bad_area; else if (fault & VM_FAULT_SIGBUS) goto do_sigbus; BUG(); diff --git a/arch/sh/mm/fault.c b/arch/sh/mm/fault.c index 541dc6101508..a58fec9b55e0 100644 --- a/arch/sh/mm/fault.c +++ b/arch/sh/mm/fault.c @@ -353,6 +353,8 @@ mm_fault_error(struct pt_regs *regs, unsigned long error_code, } else { if (fault & VM_FAULT_SIGBUS) do_sigbus(regs, error_code, address); + else if (fault & VM_FAULT_SIGSEGV) + bad_area(regs, error_code, address); else BUG(); } diff --git a/arch/sparc/include/asm/cacheflush_64.h b/arch/sparc/include/asm/cacheflush_64.h index 38965379e350..68513c41e10d 100644 --- a/arch/sparc/include/asm/cacheflush_64.h +++ b/arch/sparc/include/asm/cacheflush_64.h @@ -74,11 +74,6 @@ void flush_ptrace_access(struct vm_area_struct *, struct page *, #define flush_cache_vmap(start, end) do { } while (0) #define flush_cache_vunmap(start, end) do { } while (0) -#ifdef CONFIG_DEBUG_PAGEALLOC -/* internal debugging function */ -void kernel_map_pages(struct page *page, int numpages, int enable); -#endif - #endif /* !__ASSEMBLY__ */ #endif /* _SPARC64_CACHEFLUSH_H */ diff --git a/arch/sparc/mm/fault_32.c b/arch/sparc/mm/fault_32.c index 908e8c17c902..70d817154fe8 100644 --- a/arch/sparc/mm/fault_32.c +++ b/arch/sparc/mm/fault_32.c @@ -249,6 +249,8 @@ good_area: if (unlikely(fault & VM_FAULT_ERROR)) { if (fault & VM_FAULT_OOM) goto out_of_memory; + else if (fault & VM_FAULT_SIGSEGV) + goto bad_area; else if (fault & VM_FAULT_SIGBUS) goto do_sigbus; BUG(); diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c index 18fcd7167095..479823249429 100644 --- a/arch/sparc/mm/fault_64.c +++ b/arch/sparc/mm/fault_64.c @@ -446,6 +446,8 @@ good_area: if (unlikely(fault & VM_FAULT_ERROR)) { if (fault & VM_FAULT_OOM) goto out_of_memory; + else if (fault & VM_FAULT_SIGSEGV) + goto bad_area; else if (fault & VM_FAULT_SIGBUS) goto do_sigbus; BUG(); diff --git a/arch/tile/kvm/Kconfig b/arch/tile/kvm/Kconfig index 2298cb1daff7..1e968f7550dc 100644 --- a/arch/tile/kvm/Kconfig +++ b/arch/tile/kvm/Kconfig @@ -21,6 +21,7 @@ config KVM depends on HAVE_KVM && MODULES select PREEMPT_NOTIFIERS select ANON_INODES + select SRCU ---help--- Support hosting paravirtualized guest machines. diff --git a/arch/tile/mm/fault.c b/arch/tile/mm/fault.c index 565e25a98334..0f61a73534e6 100644 --- a/arch/tile/mm/fault.c +++ b/arch/tile/mm/fault.c @@ -442,6 +442,8 @@ good_area: if (unlikely(fault & VM_FAULT_ERROR)) { if (fault & VM_FAULT_OOM) goto out_of_memory; + else if (fault & VM_FAULT_SIGSEGV) + goto bad_area; else if (fault & VM_FAULT_SIGBUS) goto do_sigbus; BUG(); diff --git a/arch/um/kernel/trap.c b/arch/um/kernel/trap.c index 5678c3571e7c..209617302df8 100644 --- a/arch/um/kernel/trap.c +++ b/arch/um/kernel/trap.c @@ -80,6 +80,8 @@ good_area: if (unlikely(fault & VM_FAULT_ERROR)) { if (fault & VM_FAULT_OOM) { goto out_of_memory; + } else if (fault & VM_FAULT_SIGSEGV) { + goto out; } else if (fault & VM_FAULT_SIGBUS) { err = -EACCES; goto out; diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 0dc9d0144a27..85588a891c06 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -138,6 +138,7 @@ config X86 select HAVE_ACPI_APEI_NMI if ACPI select ACPI_LEGACY_TABLES_LOOKUP if ACPI select X86_FEATURE_NAMES if PROC_FS + select SRCU config INSTRUCTION_DECODER def_bool y diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h index aede2c347bde..90a54851aedc 100644 --- a/arch/x86/include/asm/cpufeature.h +++ b/arch/x86/include/asm/cpufeature.h @@ -174,6 +174,7 @@ #define X86_FEATURE_TOPOEXT ( 6*32+22) /* topology extensions CPUID leafs */ #define X86_FEATURE_PERFCTR_CORE ( 6*32+23) /* core performance counter extensions */ #define X86_FEATURE_PERFCTR_NB ( 6*32+24) /* NB performance counter extensions */ +#define X86_FEATURE_BPEXT (6*32+26) /* data breakpoint extension */ #define X86_FEATURE_PERFCTR_L2 ( 6*32+28) /* L2 performance counter extensions */ /* @@ -388,6 +389,7 @@ extern const char * const x86_bug_flags[NBUGINTS*32]; #define cpu_has_cx16 boot_cpu_has(X86_FEATURE_CX16) #define cpu_has_eager_fpu boot_cpu_has(X86_FEATURE_EAGER_FPU) #define cpu_has_topoext boot_cpu_has(X86_FEATURE_TOPOEXT) +#define cpu_has_bpext boot_cpu_has(X86_FEATURE_BPEXT) #if __GNUC__ >= 4 extern void warn_pre_alternatives(void); diff --git a/arch/x86/include/asm/debugreg.h b/arch/x86/include/asm/debugreg.h index 61fd18b83b6c..12cb66f6d3a5 100644 --- a/arch/x86/include/asm/debugreg.h +++ b/arch/x86/include/asm/debugreg.h @@ -114,5 +114,10 @@ static inline void debug_stack_usage_inc(void) { } static inline void debug_stack_usage_dec(void) { } #endif /* X86_64 */ +#ifdef CONFIG_CPU_SUP_AMD +extern void set_dr_addr_mask(unsigned long mask, int dr); +#else +static inline void set_dr_addr_mask(unsigned long mask, int dr) { } +#endif #endif /* _ASM_X86_DEBUGREG_H */ diff --git a/arch/x86/include/asm/hw_breakpoint.h b/arch/x86/include/asm/hw_breakpoint.h index ef1c4d2d41ec..6c98be864a75 100644 --- a/arch/x86/include/asm/hw_breakpoint.h +++ b/arch/x86/include/asm/hw_breakpoint.h @@ -12,6 +12,7 @@ */ struct arch_hw_breakpoint { unsigned long address; + unsigned long mask; u8 len; u8 type; }; diff --git a/arch/x86/include/uapi/asm/msr-index.h b/arch/x86/include/uapi/asm/msr-index.h index c8aa65d56027..d979e5abae55 100644 --- a/arch/x86/include/uapi/asm/msr-index.h +++ b/arch/x86/include/uapi/asm/msr-index.h @@ -251,6 +251,10 @@ /* Fam 16h MSRs */ #define MSR_F16H_L2I_PERF_CTL 0xc0010230 #define MSR_F16H_L2I_PERF_CTR 0xc0010231 +#define MSR_F16H_DR1_ADDR_MASK 0xc0011019 +#define MSR_F16H_DR2_ADDR_MASK 0xc001101a +#define MSR_F16H_DR3_ADDR_MASK 0xc001101b +#define MSR_F16H_DR0_ADDR_MASK 0xc0011027 /* Fam 15h MSRs */ #define MSR_F15H_PERF_CTL 0xc0010200 diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c index 15c5df92f74e..a220239cea65 100644 --- a/arch/x86/kernel/cpu/amd.c +++ b/arch/x86/kernel/cpu/amd.c @@ -869,3 +869,22 @@ static bool cpu_has_amd_erratum(struct cpuinfo_x86 *cpu, const int *erratum) return false; } + +void set_dr_addr_mask(unsigned long mask, int dr) +{ + if (!cpu_has_bpext) + return; + + switch (dr) { + case 0: + wrmsr(MSR_F16H_DR0_ADDR_MASK, mask, 0); + break; + case 1: + case 2: + case 3: + wrmsr(MSR_F16H_DR1_ADDR_MASK - 1 + dr, mask, 0); + break; + default: + break; + } +} diff --git a/arch/x86/kernel/cpu/microcode/core.c b/arch/x86/kernel/cpu/microcode/core.c index 15c29096136b..36a83617eb21 100644 --- a/arch/x86/kernel/cpu/microcode/core.c +++ b/arch/x86/kernel/cpu/microcode/core.c @@ -552,7 +552,7 @@ static int __init microcode_init(void) int error; if (paravirt_enabled() || dis_ucode_ldr) - return 0; + return -EINVAL; if (c->x86_vendor == X86_VENDOR_INTEL) microcode_ops = init_intel_microcode(); diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c index 944bf019b74f..498b6d967138 100644 --- a/arch/x86/kernel/cpu/perf_event_intel.c +++ b/arch/x86/kernel/cpu/perf_event_intel.c @@ -2431,6 +2431,7 @@ __init int intel_pmu_init(void) break; case 55: /* 22nm Atom "Silvermont" */ + case 76: /* 14nm Atom "Airmont" */ case 77: /* 22nm Atom "Silvermont Avoton/Rangely" */ memcpy(hw_cache_event_ids, slm_hw_cache_event_ids, sizeof(hw_cache_event_ids)); diff --git a/arch/x86/kernel/cpu/perf_event_intel_rapl.c b/arch/x86/kernel/cpu/perf_event_intel_rapl.c index 6e434f8e5fc8..c4bb8b8e5017 100644 --- a/arch/x86/kernel/cpu/perf_event_intel_rapl.c +++ b/arch/x86/kernel/cpu/perf_event_intel_rapl.c @@ -142,7 +142,7 @@ static inline u64 rapl_scale(u64 v) * or use ldexp(count, -32). * Watts = Joules/Time delta */ - return v << (32 - __this_cpu_read(rapl_pmu->hw_unit)); + return v << (32 - __this_cpu_read(rapl_pmu)->hw_unit); } static u64 rapl_event_update(struct perf_event *event) diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.c b/arch/x86/kernel/cpu/perf_event_intel_uncore.c index 10b8d3eaaf15..c635b8b49e93 100644 --- a/arch/x86/kernel/cpu/perf_event_intel_uncore.c +++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.c @@ -840,7 +840,6 @@ static int uncore_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id box->phys_id = phys_id; box->pci_dev = pdev; box->pmu = pmu; - uncore_box_init(box); pci_set_drvdata(pdev, box); raw_spin_lock(&uncore_box_lock); @@ -1004,10 +1003,8 @@ static int uncore_cpu_starting(int cpu) pmu = &type->pmus[j]; box = *per_cpu_ptr(pmu->box, cpu); /* called by uncore_cpu_init? */ - if (box && box->phys_id >= 0) { - uncore_box_init(box); + if (box && box->phys_id >= 0) continue; - } for_each_online_cpu(k) { exist = *per_cpu_ptr(pmu->box, k); @@ -1023,10 +1020,8 @@ static int uncore_cpu_starting(int cpu) } } - if (box) { + if (box) box->phys_id = phys_id; - uncore_box_init(box); - } } } return 0; diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.h b/arch/x86/kernel/cpu/perf_event_intel_uncore.h index 863d9b02563e..6c8c1e7e69d8 100644 --- a/arch/x86/kernel/cpu/perf_event_intel_uncore.h +++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.h @@ -257,6 +257,14 @@ static inline int uncore_num_counters(struct intel_uncore_box *box) return box->pmu->type->num_counters; } +static inline void uncore_box_init(struct intel_uncore_box *box) +{ + if (!test_and_set_bit(UNCORE_BOX_FLAG_INITIATED, &box->flags)) { + if (box->pmu->type->ops->init_box) + box->pmu->type->ops->init_box(box); + } +} + static inline void uncore_disable_box(struct intel_uncore_box *box) { if (box->pmu->type->ops->disable_box) @@ -265,6 +273,8 @@ static inline void uncore_disable_box(struct intel_uncore_box *box) static inline void uncore_enable_box(struct intel_uncore_box *box) { + uncore_box_init(box); + if (box->pmu->type->ops->enable_box) box->pmu->type->ops->enable_box(box); } @@ -287,14 +297,6 @@ static inline u64 uncore_read_counter(struct intel_uncore_box *box, return box->pmu->type->ops->read_counter(box, event); } -static inline void uncore_box_init(struct intel_uncore_box *box) -{ - if (!test_and_set_bit(UNCORE_BOX_FLAG_INITIATED, &box->flags)) { - if (box->pmu->type->ops->init_box) - box->pmu->type->ops->init_box(box); - } -} - static inline bool uncore_box_is_fake(struct intel_uncore_box *box) { return (box->phys_id < 0); diff --git a/arch/x86/kernel/hw_breakpoint.c b/arch/x86/kernel/hw_breakpoint.c index 3d5fb509bdeb..7114ba220fd4 100644 --- a/arch/x86/kernel/hw_breakpoint.c +++ b/arch/x86/kernel/hw_breakpoint.c @@ -126,6 +126,8 @@ int arch_install_hw_breakpoint(struct perf_event *bp) *dr7 |= encode_dr7(i, info->len, info->type); set_debugreg(*dr7, 7); + if (info->mask) + set_dr_addr_mask(info->mask, i); return 0; } @@ -161,29 +163,8 @@ void arch_uninstall_hw_breakpoint(struct perf_event *bp) *dr7 &= ~__encode_dr7(i, info->len, info->type); set_debugreg(*dr7, 7); -} - -static int get_hbp_len(u8 hbp_len) -{ - unsigned int len_in_bytes = 0; - - switch (hbp_len) { - case X86_BREAKPOINT_LEN_1: - len_in_bytes = 1; - break; - case X86_BREAKPOINT_LEN_2: - len_in_bytes = 2; - break; - case X86_BREAKPOINT_LEN_4: - len_in_bytes = 4; - break; -#ifdef CONFIG_X86_64 - case X86_BREAKPOINT_LEN_8: - len_in_bytes = 8; - break; -#endif - } - return len_in_bytes; + if (info->mask) + set_dr_addr_mask(0, i); } /* @@ -196,7 +177,7 @@ int arch_check_bp_in_kernelspace(struct perf_event *bp) struct arch_hw_breakpoint *info = counter_arch_bp(bp); va = info->address; - len = get_hbp_len(info->len); + len = bp->attr.bp_len; return (va >= TASK_SIZE) && ((va + len - 1) >= TASK_SIZE); } @@ -277,6 +258,8 @@ static int arch_build_bp_info(struct perf_event *bp) } /* Len */ + info->mask = 0; + switch (bp->attr.bp_len) { case HW_BREAKPOINT_LEN_1: info->len = X86_BREAKPOINT_LEN_1; @@ -293,11 +276,17 @@ static int arch_build_bp_info(struct perf_event *bp) break; #endif default: - return -EINVAL; + if (!is_power_of_2(bp->attr.bp_len)) + return -EINVAL; + if (!cpu_has_bpext) + return -EOPNOTSUPP; + info->mask = bp->attr.bp_len - 1; + info->len = X86_BREAKPOINT_LEN_1; } return 0; } + /* * Validate the arch-specific HW Breakpoint register settings */ @@ -312,11 +301,11 @@ int arch_validate_hwbkpt_settings(struct perf_event *bp) if (ret) return ret; - ret = -EINVAL; - switch (info->len) { case X86_BREAKPOINT_LEN_1: align = 0; + if (info->mask) + align = info->mask; break; case X86_BREAKPOINT_LEN_2: align = 1; @@ -330,7 +319,7 @@ int arch_validate_hwbkpt_settings(struct perf_event *bp) break; #endif default: - return ret; + WARN_ON_ONCE(1); } /* diff --git a/arch/x86/kvm/Kconfig b/arch/x86/kvm/Kconfig index f9d16ff56c6b..7dc7ba577ecd 100644 --- a/arch/x86/kvm/Kconfig +++ b/arch/x86/kvm/Kconfig @@ -40,6 +40,7 @@ config KVM select HAVE_KVM_MSI select HAVE_KVM_CPU_RELAX_INTERCEPT select KVM_VFIO + select SRCU ---help--- Support hosting fully virtualized guest machines using hardware virtualization extensions. You will need a fairly recent diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c index 4f0c0b954686..d52dcf0776ea 100644 --- a/arch/x86/kvm/lapic.c +++ b/arch/x86/kvm/lapic.c @@ -192,6 +192,9 @@ static void recalculate_apic_map(struct kvm *kvm) u16 cid, lid; u32 ldr, aid; + if (!kvm_apic_present(vcpu)) + continue; + aid = kvm_apic_id(apic); ldr = kvm_apic_get_reg(apic, APIC_LDR); cid = apic_cluster_id(new, ldr); diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c index 38dcec403b46..e3ff27a5b634 100644 --- a/arch/x86/mm/fault.c +++ b/arch/x86/mm/fault.c @@ -898,6 +898,8 @@ mm_fault_error(struct pt_regs *regs, unsigned long error_code, if (fault & (VM_FAULT_SIGBUS|VM_FAULT_HWPOISON| VM_FAULT_HWPOISON_LARGE)) do_sigbus(regs, error_code, address, fault); + else if (fault & VM_FAULT_SIGSEGV) + bad_area_nosemaphore(regs, error_code, address); else BUG(); } diff --git a/arch/x86/pci/common.c b/arch/x86/pci/common.c index 7b20bccf3648..2fb384724ebb 100644 --- a/arch/x86/pci/common.c +++ b/arch/x86/pci/common.c @@ -448,6 +448,22 @@ static const struct dmi_system_id pciprobe_dmi_table[] __initconst = { DMI_MATCH(DMI_PRODUCT_NAME, "ftServer"), }, }, + { + .callback = set_scan_all, + .ident = "Stratus/NEC ftServer", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "NEC"), + DMI_MATCH(DMI_PRODUCT_NAME, "Express5800/R32"), + }, + }, + { + .callback = set_scan_all, + .ident = "Stratus/NEC ftServer", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "NEC"), + DMI_MATCH(DMI_PRODUCT_NAME, "Express5800/R31"), + }, + }, {} }; diff --git a/arch/x86/pci/intel_mid_pci.c b/arch/x86/pci/intel_mid_pci.c index 44b9271580b5..852aa4c92da0 100644 --- a/arch/x86/pci/intel_mid_pci.c +++ b/arch/x86/pci/intel_mid_pci.c @@ -293,7 +293,6 @@ static void mrst_power_off_unused_dev(struct pci_dev *dev) DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0801, mrst_power_off_unused_dev); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0809, mrst_power_off_unused_dev); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x080C, mrst_power_off_unused_dev); -DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0812, mrst_power_off_unused_dev); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0815, mrst_power_off_unused_dev); /* diff --git a/arch/xtensa/mm/fault.c b/arch/xtensa/mm/fault.c index b57c4f91f487..9e3571a6535c 100644 --- a/arch/xtensa/mm/fault.c +++ b/arch/xtensa/mm/fault.c @@ -117,6 +117,8 @@ good_area: if (unlikely(fault & VM_FAULT_ERROR)) { if (fault & VM_FAULT_OOM) goto out_of_memory; + else if (fault & VM_FAULT_SIGSEGV) + goto bad_area; else if (fault & VM_FAULT_SIGBUS) goto do_sigbus; BUG(); diff --git a/block/blk-mq-sysfs.c b/block/blk-mq-sysfs.c index 6774a0e69867..1630a20d5dcf 100644 --- a/block/blk-mq-sysfs.c +++ b/block/blk-mq-sysfs.c @@ -15,26 +15,6 @@ static void blk_mq_sysfs_release(struct kobject *kobj) { - struct request_queue *q; - - q = container_of(kobj, struct request_queue, mq_kobj); - free_percpu(q->queue_ctx); -} - -static void blk_mq_ctx_release(struct kobject *kobj) -{ - struct blk_mq_ctx *ctx; - - ctx = container_of(kobj, struct blk_mq_ctx, kobj); - kobject_put(&ctx->queue->mq_kobj); -} - -static void blk_mq_hctx_release(struct kobject *kobj) -{ - struct blk_mq_hw_ctx *hctx; - - hctx = container_of(kobj, struct blk_mq_hw_ctx, kobj); - kfree(hctx); } struct blk_mq_ctx_sysfs_entry { @@ -338,13 +318,13 @@ static struct kobj_type blk_mq_ktype = { static struct kobj_type blk_mq_ctx_ktype = { .sysfs_ops = &blk_mq_sysfs_ops, .default_attrs = default_ctx_attrs, - .release = blk_mq_ctx_release, + .release = blk_mq_sysfs_release, }; static struct kobj_type blk_mq_hw_ktype = { .sysfs_ops = &blk_mq_hw_sysfs_ops, .default_attrs = default_hw_ctx_attrs, - .release = blk_mq_hctx_release, + .release = blk_mq_sysfs_release, }; static void blk_mq_unregister_hctx(struct blk_mq_hw_ctx *hctx) @@ -375,7 +355,6 @@ static int blk_mq_register_hctx(struct blk_mq_hw_ctx *hctx) return ret; hctx_for_each_ctx(hctx, ctx, i) { - kobject_get(&q->mq_kobj); ret = kobject_add(&ctx->kobj, &hctx->kobj, "cpu%u", ctx->cpu); if (ret) break; diff --git a/block/blk-mq.c b/block/blk-mq.c index 9ee3b87c4498..2390c5541e71 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -1867,6 +1867,27 @@ static void blk_mq_add_queue_tag_set(struct blk_mq_tag_set *set, mutex_unlock(&set->tag_list_lock); } +/* + * It is the actual release handler for mq, but we do it from + * request queue's release handler for avoiding use-after-free + * and headache because q->mq_kobj shouldn't have been introduced, + * but we can't group ctx/kctx kobj without it. + */ +void blk_mq_release(struct request_queue *q) +{ + struct blk_mq_hw_ctx *hctx; + unsigned int i; + + /* hctx kobj stays in hctx */ + queue_for_each_hw_ctx(q, hctx, i) + kfree(hctx); + + kfree(q->queue_hw_ctx); + + /* ctx kobj stays in queue_ctx */ + free_percpu(q->queue_ctx); +} + struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set) { struct blk_mq_hw_ctx **hctxs; @@ -2000,10 +2021,8 @@ void blk_mq_free_queue(struct request_queue *q) percpu_ref_exit(&q->mq_usage_counter); - kfree(q->queue_hw_ctx); kfree(q->mq_map); - q->queue_hw_ctx = NULL; q->mq_map = NULL; mutex_lock(&all_q_mutex); diff --git a/block/blk-mq.h b/block/blk-mq.h index 4f4f943c22c3..6a48c4c0d8a2 100644 --- a/block/blk-mq.h +++ b/block/blk-mq.h @@ -62,6 +62,8 @@ extern void blk_mq_sysfs_unregister(struct request_queue *q); extern void blk_mq_rq_timed_out(struct request *req, bool reserved); +void blk_mq_release(struct request_queue *q); + /* * Basic implementation of sparser bitmap, allowing the user to spread * the bits over more cachelines. diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c index 935ea2aa0730..faaf36ade7eb 100644 --- a/block/blk-sysfs.c +++ b/block/blk-sysfs.c @@ -517,6 +517,8 @@ static void blk_release_queue(struct kobject *kobj) if (!q->mq_ops) blk_free_flush_queue(q->fq); + else + blk_mq_release(q); blk_trace_shutdown(q); diff --git a/drivers/Kconfig b/drivers/Kconfig index 694d5a70d6ce..c70d6e45dc10 100644 --- a/drivers/Kconfig +++ b/drivers/Kconfig @@ -134,8 +134,6 @@ source "drivers/staging/Kconfig" source "drivers/platform/Kconfig" -source "drivers/soc/Kconfig" - source "drivers/clk/Kconfig" source "drivers/hwspinlock/Kconfig" diff --git a/drivers/acpi/acpi_lpss.c b/drivers/acpi/acpi_lpss.c index 4f3febf8a589..e75737fd7eef 100644 --- a/drivers/acpi/acpi_lpss.c +++ b/drivers/acpi/acpi_lpss.c @@ -1,7 +1,7 @@ /* * ACPI support for Intel Lynxpoint LPSS. * - * Copyright (C) 2013, 2014, Intel Corporation + * Copyright (C) 2013, Intel Corporation * Authors: Mika Westerberg <mika.westerberg@linux.intel.com> * Rafael J. Wysocki <rafael.j.wysocki@intel.com> * @@ -60,8 +60,6 @@ ACPI_MODULE_NAME("acpi_lpss"); #define LPSS_CLK_DIVIDER BIT(2) #define LPSS_LTR BIT(3) #define LPSS_SAVE_CTX BIT(4) -#define LPSS_DEV_PROXY BIT(5) -#define LPSS_PROXY_REQ BIT(6) struct lpss_private_data; @@ -72,10 +70,8 @@ struct lpss_device_desc { void (*setup)(struct lpss_private_data *pdata); }; -static struct device *proxy_device; - static struct lpss_device_desc lpss_dma_desc = { - .flags = LPSS_CLK | LPSS_PROXY_REQ, + .flags = LPSS_CLK, }; struct lpss_private_data { @@ -150,24 +146,22 @@ static struct lpss_device_desc byt_pwm_dev_desc = { }; static struct lpss_device_desc byt_uart_dev_desc = { - .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_SAVE_CTX | - LPSS_DEV_PROXY, + .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_SAVE_CTX, .prv_offset = 0x800, .setup = lpss_uart_setup, }; static struct lpss_device_desc byt_spi_dev_desc = { - .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_SAVE_CTX | - LPSS_DEV_PROXY, + .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_SAVE_CTX, .prv_offset = 0x400, }; static struct lpss_device_desc byt_sdio_dev_desc = { - .flags = LPSS_CLK | LPSS_DEV_PROXY, + .flags = LPSS_CLK, }; static struct lpss_device_desc byt_i2c_dev_desc = { - .flags = LPSS_CLK | LPSS_SAVE_CTX | LPSS_DEV_PROXY, + .flags = LPSS_CLK | LPSS_SAVE_CTX, .prv_offset = 0x800, .setup = byt_i2c_setup, }; @@ -374,8 +368,6 @@ static int acpi_lpss_create_device(struct acpi_device *adev, adev->driver_data = pdata; pdev = acpi_create_platform_device(adev); if (!IS_ERR_OR_NULL(pdev)) { - if (!proxy_device && dev_desc->flags & LPSS_DEV_PROXY) - proxy_device = &pdev->dev; return 1; } @@ -600,14 +592,7 @@ static int acpi_lpss_runtime_suspend(struct device *dev) if (pdata->dev_desc->flags & LPSS_SAVE_CTX) acpi_lpss_save_ctx(dev, pdata); - ret = acpi_dev_runtime_suspend(dev); - if (ret) - return ret; - - if (pdata->dev_desc->flags & LPSS_PROXY_REQ && proxy_device) - return pm_runtime_put_sync_suspend(proxy_device); - - return 0; + return acpi_dev_runtime_suspend(dev); } static int acpi_lpss_runtime_resume(struct device *dev) @@ -615,12 +600,6 @@ static int acpi_lpss_runtime_resume(struct device *dev) struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev)); int ret; - if (pdata->dev_desc->flags & LPSS_PROXY_REQ && proxy_device) { - ret = pm_runtime_get_sync(proxy_device); - if (ret) - return ret; - } - ret = acpi_dev_runtime_resume(dev); if (ret) return ret; diff --git a/drivers/base/regmap/internal.h b/drivers/base/regmap/internal.h index 0da5865df5b1..beb8b27d4621 100644 --- a/drivers/base/regmap/internal.h +++ b/drivers/base/regmap/internal.h @@ -51,9 +51,11 @@ struct regmap_async { struct regmap { union { struct mutex mutex; - spinlock_t spinlock; + struct { + spinlock_t spinlock; + unsigned long spinlock_flags; + }; }; - unsigned long spinlock_flags; regmap_lock lock; regmap_unlock unlock; void *lock_arg; /* This is passed to lock/unlock functions */ @@ -233,6 +235,10 @@ int _regmap_raw_write(struct regmap *map, unsigned int reg, void regmap_async_complete_cb(struct regmap_async *async, int ret); +enum regmap_endian regmap_get_val_endian(struct device *dev, + const struct regmap_bus *bus, + const struct regmap_config *config); + extern struct regcache_ops regcache_rbtree_ops; extern struct regcache_ops regcache_lzo_ops; extern struct regcache_ops regcache_flat_ops; diff --git a/drivers/base/regmap/regmap-ac97.c b/drivers/base/regmap/regmap-ac97.c index e4c45d2299c1..8d304e2a943d 100644 --- a/drivers/base/regmap/regmap-ac97.c +++ b/drivers/base/regmap/regmap-ac97.c @@ -74,8 +74,8 @@ static int regmap_ac97_reg_write(void *context, unsigned int reg, } static const struct regmap_bus ac97_regmap_bus = { - .reg_write = regmap_ac97_reg_write, - .reg_read = regmap_ac97_reg_read, + .reg_write = regmap_ac97_reg_write, + .reg_read = regmap_ac97_reg_read, }; /** diff --git a/drivers/base/regmap/regmap-i2c.c b/drivers/base/regmap/regmap-i2c.c index 053150a7f9f2..4b76e33110a2 100644 --- a/drivers/base/regmap/regmap-i2c.c +++ b/drivers/base/regmap/regmap-i2c.c @@ -14,6 +14,7 @@ #include <linux/i2c.h> #include <linux/module.h> +#include "internal.h" static int regmap_smbus_byte_reg_read(void *context, unsigned int reg, unsigned int *val) @@ -87,6 +88,42 @@ static struct regmap_bus regmap_smbus_word = { .reg_read = regmap_smbus_word_reg_read, }; +static int regmap_smbus_word_read_swapped(void *context, unsigned int reg, + unsigned int *val) +{ + struct device *dev = context; + struct i2c_client *i2c = to_i2c_client(dev); + int ret; + + if (reg > 0xff) + return -EINVAL; + + ret = i2c_smbus_read_word_swapped(i2c, reg); + if (ret < 0) + return ret; + + *val = ret; + + return 0; +} + +static int regmap_smbus_word_write_swapped(void *context, unsigned int reg, + unsigned int val) +{ + struct device *dev = context; + struct i2c_client *i2c = to_i2c_client(dev); + + if (val > 0xffff || reg > 0xff) + return -EINVAL; + + return i2c_smbus_write_word_swapped(i2c, reg, val); +} + +static struct regmap_bus regmap_smbus_word_swapped = { + .reg_write = regmap_smbus_word_write_swapped, + .reg_read = regmap_smbus_word_read_swapped, +}; + static int regmap_i2c_write(void *context, const void *data, size_t count) { struct device *dev = context; @@ -180,7 +217,14 @@ static const struct regmap_bus *regmap_get_i2c_bus(struct i2c_client *i2c, else if (config->val_bits == 16 && config->reg_bits == 8 && i2c_check_functionality(i2c->adapter, I2C_FUNC_SMBUS_WORD_DATA)) - return ®map_smbus_word; + switch (regmap_get_val_endian(&i2c->dev, NULL, config)) { + case REGMAP_ENDIAN_LITTLE: + return ®map_smbus_word; + case REGMAP_ENDIAN_BIG: + return ®map_smbus_word_swapped; + default: /* everything else is not supported */ + break; + } else if (config->val_bits == 8 && config->reg_bits == 8 && i2c_check_functionality(i2c->adapter, I2C_FUNC_SMBUS_BYTE_DATA)) diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c index d2f8a818d200..f99b098ddabf 100644 --- a/drivers/base/regmap/regmap.c +++ b/drivers/base/regmap/regmap.c @@ -473,9 +473,9 @@ static enum regmap_endian regmap_get_reg_endian(const struct regmap_bus *bus, return REGMAP_ENDIAN_BIG; } -static enum regmap_endian regmap_get_val_endian(struct device *dev, - const struct regmap_bus *bus, - const struct regmap_config *config) +enum regmap_endian regmap_get_val_endian(struct device *dev, + const struct regmap_bus *bus, + const struct regmap_config *config) { struct device_node *np; enum regmap_endian endian; @@ -513,6 +513,7 @@ static enum regmap_endian regmap_get_val_endian(struct device *dev, /* Use this if no other value was found */ return REGMAP_ENDIAN_BIG; } +EXPORT_SYMBOL_GPL(regmap_get_val_endian); /** * regmap_init(): Initialise register map diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index 3ec85dfce124..8a86b62466f7 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c @@ -2098,32 +2098,26 @@ static void rbd_dev_parent_put(struct rbd_device *rbd_dev) * If an image has a non-zero parent overlap, get a reference to its * parent. * - * We must get the reference before checking for the overlap to - * coordinate properly with zeroing the parent overlap in - * rbd_dev_v2_parent_info() when an image gets flattened. We - * drop it again if there is no overlap. - * * Returns true if the rbd device has a parent with a non-zero * overlap and a reference for it was successfully taken, or * false otherwise. */ static bool rbd_dev_parent_get(struct rbd_device *rbd_dev) { - int counter; + int counter = 0; if (!rbd_dev->parent_spec) return false; - counter = atomic_inc_return_safe(&rbd_dev->parent_ref); - if (counter > 0 && rbd_dev->parent_overlap) - return true; - - /* Image was flattened, but parent is not yet torn down */ + down_read(&rbd_dev->header_rwsem); + if (rbd_dev->parent_overlap) + counter = atomic_inc_return_safe(&rbd_dev->parent_ref); + up_read(&rbd_dev->header_rwsem); if (counter < 0) rbd_warn(rbd_dev, "parent reference overflow"); - return false; + return counter > 0; } /* @@ -4239,7 +4233,6 @@ static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev) */ if (rbd_dev->parent_overlap) { rbd_dev->parent_overlap = 0; - smp_mb(); rbd_dev_parent_put(rbd_dev); pr_info("%s: clone image has been flattened\n", rbd_dev->disk->disk_name); @@ -4285,7 +4278,6 @@ static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev) * treat it specially. */ rbd_dev->parent_overlap = overlap; - smp_mb(); if (!overlap) { /* A null parent_spec indicates it's the initial probe */ @@ -5114,10 +5106,7 @@ static void rbd_dev_unprobe(struct rbd_device *rbd_dev) { struct rbd_image_header *header; - /* Drop parent reference unless it's already been done (or none) */ - - if (rbd_dev->parent_overlap) - rbd_dev_parent_put(rbd_dev); + rbd_dev_parent_put(rbd_dev); /* Free dynamic fields from the header, then zero it out */ diff --git a/drivers/char/random.c b/drivers/char/random.c index 04645c09fe5e..9cd6968e2f92 100644 --- a/drivers/char/random.c +++ b/drivers/char/random.c @@ -569,19 +569,19 @@ static void fast_mix(struct fast_pool *f) __u32 c = f->pool[2], d = f->pool[3]; a += b; c += d; - b = rol32(a, 6); d = rol32(c, 27); + b = rol32(b, 6); d = rol32(d, 27); d ^= a; b ^= c; a += b; c += d; - b = rol32(a, 16); d = rol32(c, 14); + b = rol32(b, 16); d = rol32(d, 14); d ^= a; b ^= c; a += b; c += d; - b = rol32(a, 6); d = rol32(c, 27); + b = rol32(b, 6); d = rol32(d, 27); d ^= a; b ^= c; a += b; c += d; - b = rol32(a, 16); d = rol32(c, 14); + b = rol32(b, 16); d = rol32(d, 14); d ^= a; b ^= c; f->pool[0] = a; f->pool[1] = b; diff --git a/drivers/clk/Kconfig b/drivers/clk/Kconfig index 3f44f292d066..91f86131bb7a 100644 --- a/drivers/clk/Kconfig +++ b/drivers/clk/Kconfig @@ -13,6 +13,7 @@ config COMMON_CLK bool select HAVE_CLK_PREPARE select CLKDEV_LOOKUP + select SRCU ---help--- The common clock framework is a single definition of struct clk, useful across many platforms, as well as an diff --git a/drivers/cpufreq/Kconfig b/drivers/cpufreq/Kconfig index 29b2ef5a68b9..a171fef2c2b6 100644 --- a/drivers/cpufreq/Kconfig +++ b/drivers/cpufreq/Kconfig @@ -2,6 +2,7 @@ menu "CPU Frequency scaling" config CPU_FREQ bool "CPU Frequency scaling" + select SRCU help CPU Frequency scaling allows you to change the clock speed of CPUs on the fly. This is a nice method to save power, because diff --git a/drivers/devfreq/Kconfig b/drivers/devfreq/Kconfig index faf4e70c42e0..3891f6781298 100644 --- a/drivers/devfreq/Kconfig +++ b/drivers/devfreq/Kconfig @@ -1,5 +1,6 @@ menuconfig PM_DEVFREQ bool "Generic Dynamic Voltage and Frequency Scaling (DVFS) support" + select SRCU help A device may have a list of frequencies and voltages available. devfreq, a generic DVFS framework can be registered for a device diff --git a/drivers/gpio/gpio-mcp23s08.c b/drivers/gpio/gpio-mcp23s08.c index da9c316059bc..eea5d7e578c9 100644 --- a/drivers/gpio/gpio-mcp23s08.c +++ b/drivers/gpio/gpio-mcp23s08.c @@ -801,9 +801,11 @@ static int mcp230xx_probe(struct i2c_client *client, client->irq = irq_of_parse_and_map(client->dev.of_node, 0); } else { pdata = dev_get_platdata(&client->dev); - if (!pdata || !gpio_is_valid(pdata->base)) { - dev_dbg(&client->dev, "invalid platform data\n"); - return -EINVAL; + if (!pdata) { + pdata = devm_kzalloc(&client->dev, + sizeof(struct mcp23s08_platform_data), + GFP_KERNEL); + pdata->base = -1; } } @@ -924,10 +926,11 @@ static int mcp23s08_probe(struct spi_device *spi) } else { type = spi_get_device_id(spi)->driver_data; pdata = dev_get_platdata(&spi->dev); - if (!pdata || !gpio_is_valid(pdata->base)) { - dev_dbg(&spi->dev, - "invalid or missing platform data\n"); - return -EINVAL; + if (!pdata) { + pdata = devm_kzalloc(&spi->dev, + sizeof(struct mcp23s08_platform_data), + GFP_KERNEL); + pdata->base = -1; } for (addr = 0; addr < ARRAY_SIZE(pdata->chip); addr++) { diff --git a/drivers/gpio/gpio-omap.c b/drivers/gpio/gpio-omap.c index 30646cfe0efa..f476ae2eb0b3 100644 --- a/drivers/gpio/gpio-omap.c +++ b/drivers/gpio/gpio-omap.c @@ -88,6 +88,8 @@ struct gpio_bank { #define BANK_USED(bank) (bank->mod_usage || bank->irq_usage) #define LINE_USED(line, offset) (line & (BIT(offset))) +static void omap_gpio_unmask_irq(struct irq_data *d); + static int omap_irq_to_gpio(struct gpio_bank *bank, unsigned int gpio_irq) { return bank->chip.base + gpio_irq; @@ -477,6 +479,16 @@ static int omap_gpio_is_input(struct gpio_bank *bank, int mask) return readl_relaxed(reg) & mask; } +static void omap_gpio_init_irq(struct gpio_bank *bank, unsigned gpio, + unsigned offset) +{ + if (!LINE_USED(bank->mod_usage, offset)) { + omap_enable_gpio_module(bank, offset); + omap_set_gpio_direction(bank, offset, 1); + } + bank->irq_usage |= BIT(GPIO_INDEX(bank, gpio)); +} + static int omap_gpio_irq_type(struct irq_data *d, unsigned type) { struct gpio_bank *bank = omap_irq_data_get_bank(d); @@ -506,15 +518,11 @@ static int omap_gpio_irq_type(struct irq_data *d, unsigned type) spin_lock_irqsave(&bank->lock, flags); offset = GPIO_INDEX(bank, gpio); retval = omap_set_gpio_triggering(bank, offset, type); - if (!LINE_USED(bank->mod_usage, offset)) { - omap_enable_gpio_module(bank, offset); - omap_set_gpio_direction(bank, offset, 1); - } else if (!omap_gpio_is_input(bank, BIT(offset))) { + omap_gpio_init_irq(bank, gpio, offset); + if (!omap_gpio_is_input(bank, BIT(offset))) { spin_unlock_irqrestore(&bank->lock, flags); return -EINVAL; } - - bank->irq_usage |= BIT(GPIO_INDEX(bank, gpio)); spin_unlock_irqrestore(&bank->lock, flags); if (type & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH)) @@ -792,6 +800,24 @@ exit: pm_runtime_put(bank->dev); } +static unsigned int omap_gpio_irq_startup(struct irq_data *d) +{ + struct gpio_bank *bank = omap_irq_data_get_bank(d); + unsigned int gpio = omap_irq_to_gpio(bank, d->hwirq); + unsigned long flags; + unsigned offset = GPIO_INDEX(bank, gpio); + + if (!BANK_USED(bank)) + pm_runtime_get_sync(bank->dev); + + spin_lock_irqsave(&bank->lock, flags); + omap_gpio_init_irq(bank, gpio, offset); + spin_unlock_irqrestore(&bank->lock, flags); + omap_gpio_unmask_irq(d); + + return 0; +} + static void omap_gpio_irq_shutdown(struct irq_data *d) { struct gpio_bank *bank = omap_irq_data_get_bank(d); @@ -1181,6 +1207,7 @@ static int omap_gpio_probe(struct platform_device *pdev) if (!irqc) return -ENOMEM; + irqc->irq_startup = omap_gpio_irq_startup, irqc->irq_shutdown = omap_gpio_irq_shutdown, irqc->irq_ack = omap_gpio_ack_irq, irqc->irq_mask = omap_gpio_mask_irq, diff --git a/drivers/gpio/gpiolib-sysfs.c b/drivers/gpio/gpiolib-sysfs.c index f62aa115d79a..7722ed53bd65 100644 --- a/drivers/gpio/gpiolib-sysfs.c +++ b/drivers/gpio/gpiolib-sysfs.c @@ -648,6 +648,7 @@ int gpiod_export_link(struct device *dev, const char *name, if (tdev != NULL) { status = sysfs_create_link(&dev->kobj, &tdev->kobj, name); + put_device(tdev); } else { status = -ENODEV; } @@ -695,7 +696,7 @@ int gpiod_sysfs_set_active_low(struct gpio_desc *desc, int value) } status = sysfs_set_active_low(desc, dev, value); - + put_device(dev); unlock: mutex_unlock(&sysfs_lock); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c index 633532a2e7ec..25bc47f3c1cf 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c @@ -26,6 +26,7 @@ #include <linux/slab.h> #include "kfd_priv.h" #include "kfd_device_queue_manager.h" +#include "kfd_pm4_headers.h" #define MQD_SIZE_ALIGNED 768 @@ -169,9 +170,8 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd, kfd->shared_resources = *gpu_resources; /* calculate max size of mqds needed for queues */ - size = max_num_of_processes * - max_num_of_queues_per_process * - kfd->device_info->mqd_size_aligned; + size = max_num_of_queues_per_device * + kfd->device_info->mqd_size_aligned; /* add another 512KB for all other allocations on gart */ size += 512 * 1024; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c index 30c8fda9622e..0fd592799d58 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c @@ -183,6 +183,13 @@ static int create_queue_nocpsch(struct device_queue_manager *dqm, mutex_lock(&dqm->lock); + if (dqm->total_queue_count >= max_num_of_queues_per_device) { + pr_warn("amdkfd: Can't create new usermode queue because %d queues were already created\n", + dqm->total_queue_count); + mutex_unlock(&dqm->lock); + return -EPERM; + } + if (list_empty(&qpd->queues_list)) { retval = allocate_vmid(dqm, qpd, q); if (retval != 0) { @@ -207,6 +214,14 @@ static int create_queue_nocpsch(struct device_queue_manager *dqm, list_add(&q->list, &qpd->queues_list); dqm->queue_count++; + /* + * Unconditionally increment this counter, regardless of the queue's + * type or whether the queue is active. + */ + dqm->total_queue_count++; + pr_debug("Total of %d queues are accountable so far\n", + dqm->total_queue_count); + mutex_unlock(&dqm->lock); return 0; } @@ -326,6 +341,15 @@ static int destroy_queue_nocpsch(struct device_queue_manager *dqm, if (list_empty(&qpd->queues_list)) deallocate_vmid(dqm, qpd, q); dqm->queue_count--; + + /* + * Unconditionally decrement this counter, regardless of the queue's + * type + */ + dqm->total_queue_count--; + pr_debug("Total of %d queues are accountable so far\n", + dqm->total_queue_count); + out: mutex_unlock(&dqm->lock); return retval; @@ -541,10 +565,14 @@ static int init_pipelines(struct device_queue_manager *dqm, for (i = 0; i < pipes_num; i++) { inx = i + first_pipe; + /* + * HPD buffer on GTT is allocated by amdkfd, no need to waste + * space in GTT for pipelines we don't initialize + */ pipe_hpd_addr = dqm->pipelines_addr + i * CIK_HPD_EOP_BYTES; pr_debug("kfd: pipeline address %llX\n", pipe_hpd_addr); /* = log2(bytes/4)-1 */ - kfd2kgd->init_pipeline(dqm->dev->kgd, i, + kfd2kgd->init_pipeline(dqm->dev->kgd, inx, CIK_HPD_EOP_BYTES_LOG2 - 3, pipe_hpd_addr); } @@ -560,7 +588,7 @@ static int init_scheduler(struct device_queue_manager *dqm) pr_debug("kfd: In %s\n", __func__); - retval = init_pipelines(dqm, get_pipes_num(dqm), KFD_DQM_FIRST_PIPE); + retval = init_pipelines(dqm, get_pipes_num(dqm), get_first_pipe(dqm)); if (retval != 0) return retval; @@ -752,6 +780,21 @@ static int create_kernel_queue_cpsch(struct device_queue_manager *dqm, pr_debug("kfd: In func %s\n", __func__); mutex_lock(&dqm->lock); + if (dqm->total_queue_count >= max_num_of_queues_per_device) { + pr_warn("amdkfd: Can't create new kernel queue because %d queues were already created\n", + dqm->total_queue_count); + mutex_unlock(&dqm->lock); + return -EPERM; + } + + /* + * Unconditionally increment this counter, regardless of the queue's + * type or whether the queue is active. + */ + dqm->total_queue_count++; + pr_debug("Total of %d queues are accountable so far\n", + dqm->total_queue_count); + list_add(&kq->list, &qpd->priv_queue_list); dqm->queue_count++; qpd->is_debug = true; @@ -775,6 +818,13 @@ static void destroy_kernel_queue_cpsch(struct device_queue_manager *dqm, dqm->queue_count--; qpd->is_debug = false; execute_queues_cpsch(dqm, false); + /* + * Unconditionally decrement this counter, regardless of the queue's + * type. + */ + dqm->total_queue_count--; + pr_debug("Total of %d queues are accountable so far\n", + dqm->total_queue_count); mutex_unlock(&dqm->lock); } @@ -793,6 +843,13 @@ static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q, mutex_lock(&dqm->lock); + if (dqm->total_queue_count >= max_num_of_queues_per_device) { + pr_warn("amdkfd: Can't create new usermode queue because %d queues were already created\n", + dqm->total_queue_count); + retval = -EPERM; + goto out; + } + mqd = dqm->get_mqd_manager(dqm, KFD_MQD_TYPE_CIK_CP); if (mqd == NULL) { mutex_unlock(&dqm->lock); @@ -810,6 +867,15 @@ static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q, retval = execute_queues_cpsch(dqm, false); } + /* + * Unconditionally increment this counter, regardless of the queue's + * type or whether the queue is active. + */ + dqm->total_queue_count++; + + pr_debug("Total of %d queues are accountable so far\n", + dqm->total_queue_count); + out: mutex_unlock(&dqm->lock); return retval; @@ -930,6 +996,14 @@ static int destroy_queue_cpsch(struct device_queue_manager *dqm, mqd->uninit_mqd(mqd, q->mqd, q->mqd_mem_obj); + /* + * Unconditionally decrement this counter, regardless of the queue's + * type + */ + dqm->total_queue_count--; + pr_debug("Total of %d queues are accountable so far\n", + dqm->total_queue_count); + mutex_unlock(&dqm->lock); return 0; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h index c3f189e8ae35..52035bf0c1cb 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h @@ -130,6 +130,7 @@ struct device_queue_manager { struct list_head queues; unsigned int processes_count; unsigned int queue_count; + unsigned int total_queue_count; unsigned int next_pipe_to_allocate; unsigned int *allocated_queues; unsigned int vmid_bitmap; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_module.c b/drivers/gpu/drm/amd/amdkfd/kfd_module.c index 95d5af138e6e..1c385c23dd0b 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_module.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_module.c @@ -50,15 +50,10 @@ module_param(sched_policy, int, 0444); MODULE_PARM_DESC(sched_policy, "Kernel cmdline parameter that defines the amdkfd scheduling policy"); -int max_num_of_processes = KFD_MAX_NUM_OF_PROCESSES_DEFAULT; -module_param(max_num_of_processes, int, 0444); -MODULE_PARM_DESC(max_num_of_processes, - "Kernel cmdline parameter that defines the amdkfd maximum number of supported processes"); - -int max_num_of_queues_per_process = KFD_MAX_NUM_OF_QUEUES_PER_PROCESS_DEFAULT; -module_param(max_num_of_queues_per_process, int, 0444); -MODULE_PARM_DESC(max_num_of_queues_per_process, - "Kernel cmdline parameter that defines the amdkfd maximum number of supported queues per process"); +int max_num_of_queues_per_device = KFD_MAX_NUM_OF_QUEUES_PER_DEVICE_DEFAULT; +module_param(max_num_of_queues_per_device, int, 0444); +MODULE_PARM_DESC(max_num_of_queues_per_device, + "Maximum number of supported queues per device (1 = Minimum, 4096 = default)"); bool kgd2kfd_init(unsigned interface_version, const struct kfd2kgd_calls *f2g, @@ -100,16 +95,10 @@ static int __init kfd_module_init(void) } /* Verify module parameters */ - if ((max_num_of_processes < 0) || - (max_num_of_processes > KFD_MAX_NUM_OF_PROCESSES)) { - pr_err("kfd: max_num_of_processes must be between 0 to KFD_MAX_NUM_OF_PROCESSES\n"); - return -1; - } - - if ((max_num_of_queues_per_process < 0) || - (max_num_of_queues_per_process > - KFD_MAX_NUM_OF_QUEUES_PER_PROCESS)) { - pr_err("kfd: max_num_of_queues_per_process must be between 0 to KFD_MAX_NUM_OF_QUEUES_PER_PROCESS\n"); + if ((max_num_of_queues_per_device < 1) || + (max_num_of_queues_per_device > + KFD_MAX_NUM_OF_QUEUES_PER_DEVICE)) { + pr_err("kfd: max_num_of_queues_per_device must be between 1 to KFD_MAX_NUM_OF_QUEUES_PER_DEVICE\n"); return -1; } diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_pasid.c b/drivers/gpu/drm/amd/amdkfd/kfd_pasid.c index 4c25ef504f79..6cfe7f1f18cf 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_pasid.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_pasid.c @@ -30,7 +30,7 @@ static DEFINE_MUTEX(pasid_mutex); int kfd_pasid_init(void) { - pasid_limit = max_num_of_processes; + pasid_limit = KFD_MAX_NUM_OF_PROCESSES; pasid_bitmap = kcalloc(BITS_TO_LONGS(pasid_limit), sizeof(long), GFP_KERNEL); if (!pasid_bitmap) diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h index b3dc13c83169..96dc10e8904a 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h @@ -52,20 +52,19 @@ #define kfd_alloc_struct(ptr_to_struct) \ ((typeof(ptr_to_struct)) kzalloc(sizeof(*ptr_to_struct), GFP_KERNEL)) -/* Kernel module parameter to specify maximum number of supported processes */ -extern int max_num_of_processes; - -#define KFD_MAX_NUM_OF_PROCESSES_DEFAULT 32 #define KFD_MAX_NUM_OF_PROCESSES 512 +#define KFD_MAX_NUM_OF_QUEUES_PER_PROCESS 1024 /* - * Kernel module parameter to specify maximum number of supported queues - * per process + * Kernel module parameter to specify maximum number of supported queues per + * device */ -extern int max_num_of_queues_per_process; +extern int max_num_of_queues_per_device; -#define KFD_MAX_NUM_OF_QUEUES_PER_PROCESS_DEFAULT 128 -#define KFD_MAX_NUM_OF_QUEUES_PER_PROCESS 1024 +#define KFD_MAX_NUM_OF_QUEUES_PER_DEVICE_DEFAULT 4096 +#define KFD_MAX_NUM_OF_QUEUES_PER_DEVICE \ + (KFD_MAX_NUM_OF_PROCESSES * \ + KFD_MAX_NUM_OF_QUEUES_PER_PROCESS) #define KFD_KERNEL_QUEUE_SIZE 2048 diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c index 47526780d736..2fda1927bff7 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c @@ -54,11 +54,11 @@ static int find_available_queue_slot(struct process_queue_manager *pqm, pr_debug("kfd: in %s\n", __func__); found = find_first_zero_bit(pqm->queue_slot_bitmap, - max_num_of_queues_per_process); + KFD_MAX_NUM_OF_QUEUES_PER_PROCESS); pr_debug("kfd: the new slot id %lu\n", found); - if (found >= max_num_of_queues_per_process) { + if (found >= KFD_MAX_NUM_OF_QUEUES_PER_PROCESS) { pr_info("amdkfd: Can not open more queues for process with pasid %d\n", pqm->process->pasid); return -ENOMEM; @@ -76,7 +76,7 @@ int pqm_init(struct process_queue_manager *pqm, struct kfd_process *p) INIT_LIST_HEAD(&pqm->queues); pqm->queue_slot_bitmap = - kzalloc(DIV_ROUND_UP(max_num_of_queues_per_process, + kzalloc(DIV_ROUND_UP(KFD_MAX_NUM_OF_QUEUES_PER_PROCESS, BITS_PER_BYTE), GFP_KERNEL); if (pqm->queue_slot_bitmap == NULL) return -ENOMEM; @@ -203,6 +203,7 @@ int pqm_create_queue(struct process_queue_manager *pqm, pqn->kq = NULL; retval = dev->dqm->create_queue(dev->dqm, q, &pdd->qpd, &q->properties.vmid); + pr_debug("DQM returned %d for create_queue\n", retval); print_queue(q); break; case KFD_QUEUE_TYPE_DIQ: @@ -222,7 +223,7 @@ int pqm_create_queue(struct process_queue_manager *pqm, } if (retval != 0) { - pr_err("kfd: error dqm create queue\n"); + pr_debug("Error dqm create queue\n"); goto err_create_queue; } @@ -241,7 +242,10 @@ int pqm_create_queue(struct process_queue_manager *pqm, err_create_queue: kfree(pqn); err_allocate_pqn: + /* check if queues list is empty unregister process from device */ clear_bit(*qid, pqm->queue_slot_bitmap); + if (list_empty(&pqm->queues)) + dev->dqm->unregister_process(dev->dqm, &pdd->qpd); return retval; } @@ -311,7 +315,11 @@ int pqm_update_queue(struct process_queue_manager *pqm, unsigned int qid, BUG_ON(!pqm); pqn = get_queue_by_qid(pqm, qid); - BUG_ON(!pqn); + if (!pqn) { + pr_debug("amdkfd: No queue %d exists for update operation\n", + qid); + return -EFAULT; + } pqn->q->properties.queue_address = p->queue_address; pqn->q->properties.queue_size = p->queue_size; diff --git a/drivers/gpu/drm/cirrus/cirrus_drv.c b/drivers/gpu/drm/cirrus/cirrus_drv.c index c2a1cba1e984..b9140032962d 100644 --- a/drivers/gpu/drm/cirrus/cirrus_drv.c +++ b/drivers/gpu/drm/cirrus/cirrus_drv.c @@ -16,9 +16,12 @@ #include "cirrus_drv.h" int cirrus_modeset = -1; +int cirrus_bpp = 24; MODULE_PARM_DESC(modeset, "Disable/Enable modesetting"); module_param_named(modeset, cirrus_modeset, int, 0400); +MODULE_PARM_DESC(bpp, "Max bits-per-pixel (default:24)"); +module_param_named(bpp, cirrus_bpp, int, 0400); /* * This is the generic driver code. This binds the driver to the drm core, diff --git a/drivers/gpu/drm/cirrus/cirrus_drv.h b/drivers/gpu/drm/cirrus/cirrus_drv.h index 693a4565c4ff..705061537a27 100644 --- a/drivers/gpu/drm/cirrus/cirrus_drv.h +++ b/drivers/gpu/drm/cirrus/cirrus_drv.h @@ -262,4 +262,7 @@ static inline void cirrus_bo_unreserve(struct cirrus_bo *bo) int cirrus_bo_push_sysram(struct cirrus_bo *bo); int cirrus_bo_pin(struct cirrus_bo *bo, u32 pl_flag, u64 *gpu_addr); + +extern int cirrus_bpp; + #endif /* __CIRRUS_DRV_H__ */ diff --git a/drivers/gpu/drm/cirrus/cirrus_main.c b/drivers/gpu/drm/cirrus/cirrus_main.c index 4c2d68e9102d..e4b976658087 100644 --- a/drivers/gpu/drm/cirrus/cirrus_main.c +++ b/drivers/gpu/drm/cirrus/cirrus_main.c @@ -320,6 +320,8 @@ bool cirrus_check_framebuffer(struct cirrus_device *cdev, int width, int height, const int max_pitch = 0x1FF << 3; /* (4096 - 1) & ~111b bytes */ const int max_size = cdev->mc.vram_size; + if (bpp > cirrus_bpp) + return false; if (bpp > 32) return false; diff --git a/drivers/gpu/drm/cirrus/cirrus_mode.c b/drivers/gpu/drm/cirrus/cirrus_mode.c index 99d4a74ffeaf..61385f2298bf 100644 --- a/drivers/gpu/drm/cirrus/cirrus_mode.c +++ b/drivers/gpu/drm/cirrus/cirrus_mode.c @@ -501,8 +501,13 @@ static int cirrus_vga_get_modes(struct drm_connector *connector) int count; /* Just add a static list of modes */ - count = drm_add_modes_noedid(connector, 1280, 1024); - drm_set_preferred_mode(connector, 1024, 768); + if (cirrus_bpp <= 24) { + count = drm_add_modes_noedid(connector, 1280, 1024); + drm_set_preferred_mode(connector, 1024, 768); + } else { + count = drm_add_modes_noedid(connector, 800, 600); + drm_set_preferred_mode(connector, 800, 600); + } return count; } diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c index cf775a4449c1..dc386ebe5193 100644 --- a/drivers/gpu/drm/drm_fb_helper.c +++ b/drivers/gpu/drm/drm_fb_helper.c @@ -145,6 +145,31 @@ int drm_fb_helper_add_one_connector(struct drm_fb_helper *fb_helper, struct drm_ } EXPORT_SYMBOL(drm_fb_helper_add_one_connector); +static void remove_from_modeset(struct drm_mode_set *set, + struct drm_connector *connector) +{ + int i, j; + + for (i = 0; i < set->num_connectors; i++) { + if (set->connectors[i] == connector) + break; + } + + if (i == set->num_connectors) + return; + + for (j = i + 1; j < set->num_connectors; j++) { + set->connectors[j - 1] = set->connectors[j]; + } + set->num_connectors--; + + /* because i915 is pissy about this.. + * TODO maybe need to makes sure we set it back to !=NULL somewhere? + */ + if (set->num_connectors == 0) + set->fb = NULL; +} + int drm_fb_helper_remove_one_connector(struct drm_fb_helper *fb_helper, struct drm_connector *connector) { @@ -167,6 +192,11 @@ int drm_fb_helper_remove_one_connector(struct drm_fb_helper *fb_helper, } fb_helper->connector_count--; kfree(fb_helper_connector); + + /* also cleanup dangling references to the connector: */ + for (i = 0; i < fb_helper->crtc_count; i++) + remove_from_modeset(&fb_helper->crtc_info[i].mode_set, connector); + return 0; } EXPORT_SYMBOL(drm_fb_helper_remove_one_connector); diff --git a/drivers/gpu/drm/i2c/tda998x_drv.c b/drivers/gpu/drm/i2c/tda998x_drv.c index d4762799351d..a9041d1a8ff0 100644 --- a/drivers/gpu/drm/i2c/tda998x_drv.c +++ b/drivers/gpu/drm/i2c/tda998x_drv.c @@ -32,6 +32,8 @@ struct tda998x_priv { struct i2c_client *cec; struct i2c_client *hdmi; + struct mutex mutex; + struct delayed_work dwork; uint16_t rev; uint8_t current_page; int dpms; @@ -402,9 +404,10 @@ reg_read_range(struct tda998x_priv *priv, uint16_t reg, char *buf, int cnt) uint8_t addr = REG2ADDR(reg); int ret; + mutex_lock(&priv->mutex); ret = set_page(priv, reg); if (ret < 0) - return ret; + goto out; ret = i2c_master_send(client, &addr, sizeof(addr)); if (ret < 0) @@ -414,10 +417,12 @@ reg_read_range(struct tda998x_priv *priv, uint16_t reg, char *buf, int cnt) if (ret < 0) goto fail; - return ret; + goto out; fail: dev_err(&client->dev, "Error %d reading from 0x%x\n", ret, reg); +out: + mutex_unlock(&priv->mutex); return ret; } @@ -431,13 +436,16 @@ reg_write_range(struct tda998x_priv *priv, uint16_t reg, uint8_t *p, int cnt) buf[0] = REG2ADDR(reg); memcpy(&buf[1], p, cnt); + mutex_lock(&priv->mutex); ret = set_page(priv, reg); if (ret < 0) - return; + goto out; ret = i2c_master_send(client, buf, cnt + 1); if (ret < 0) dev_err(&client->dev, "Error %d writing to 0x%x\n", ret, reg); +out: + mutex_unlock(&priv->mutex); } static int @@ -459,13 +467,16 @@ reg_write(struct tda998x_priv *priv, uint16_t reg, uint8_t val) uint8_t buf[] = {REG2ADDR(reg), val}; int ret; + mutex_lock(&priv->mutex); ret = set_page(priv, reg); if (ret < 0) - return; + goto out; ret = i2c_master_send(client, buf, sizeof(buf)); if (ret < 0) dev_err(&client->dev, "Error %d writing to 0x%x\n", ret, reg); +out: + mutex_unlock(&priv->mutex); } static void @@ -475,13 +486,16 @@ reg_write16(struct tda998x_priv *priv, uint16_t reg, uint16_t val) uint8_t buf[] = {REG2ADDR(reg), val >> 8, val}; int ret; + mutex_lock(&priv->mutex); ret = set_page(priv, reg); if (ret < 0) - return; + goto out; ret = i2c_master_send(client, buf, sizeof(buf)); if (ret < 0) dev_err(&client->dev, "Error %d writing to 0x%x\n", ret, reg); +out: + mutex_unlock(&priv->mutex); } static void @@ -536,6 +550,17 @@ tda998x_reset(struct tda998x_priv *priv) reg_write(priv, REG_MUX_VP_VIP_OUT, 0x24); } +/* handle HDMI connect/disconnect */ +static void tda998x_hpd(struct work_struct *work) +{ + struct delayed_work *dwork = to_delayed_work(work); + struct tda998x_priv *priv = + container_of(dwork, struct tda998x_priv, dwork); + + if (priv->encoder && priv->encoder->dev) + drm_kms_helper_hotplug_event(priv->encoder->dev); +} + /* * only 2 interrupts may occur: screen plug/unplug and EDID read */ @@ -559,8 +584,7 @@ static irqreturn_t tda998x_irq_thread(int irq, void *data) priv->wq_edid_wait = 0; wake_up(&priv->wq_edid); } else if (cec != 0) { /* HPD change */ - if (priv->encoder && priv->encoder->dev) - drm_helper_hpd_irq_event(priv->encoder->dev); + schedule_delayed_work(&priv->dwork, HZ/10); } return IRQ_HANDLED; } @@ -1170,8 +1194,10 @@ static void tda998x_destroy(struct tda998x_priv *priv) /* disable all IRQs and free the IRQ handler */ cec_write(priv, REG_CEC_RXSHPDINTENA, 0); reg_clear(priv, REG_INT_FLAGS_2, INT_FLAGS_2_EDID_BLK_RD); - if (priv->hdmi->irq) + if (priv->hdmi->irq) { free_irq(priv->hdmi->irq, priv); + cancel_delayed_work_sync(&priv->dwork); + } i2c_unregister_device(priv->cec); } @@ -1255,6 +1281,7 @@ static int tda998x_create(struct i2c_client *client, struct tda998x_priv *priv) struct device_node *np = client->dev.of_node; u32 video; int rev_lo, rev_hi, ret; + unsigned short cec_addr; priv->vip_cntrl_0 = VIP_CNTRL_0_SWAP_A(2) | VIP_CNTRL_0_SWAP_B(3); priv->vip_cntrl_1 = VIP_CNTRL_1_SWAP_C(0) | VIP_CNTRL_1_SWAP_D(1); @@ -1262,12 +1289,16 @@ static int tda998x_create(struct i2c_client *client, struct tda998x_priv *priv) priv->current_page = 0xff; priv->hdmi = client; - priv->cec = i2c_new_dummy(client->adapter, 0x34); + /* CEC I2C address bound to TDA998x I2C addr by configuration pins */ + cec_addr = 0x34 + (client->addr & 0x03); + priv->cec = i2c_new_dummy(client->adapter, cec_addr); if (!priv->cec) return -ENODEV; priv->dpms = DRM_MODE_DPMS_OFF; + mutex_init(&priv->mutex); /* protect the page access */ + /* wake up the device: */ cec_write(priv, REG_CEC_ENAMODS, CEC_ENAMODS_EN_RXSENS | CEC_ENAMODS_EN_HDMI); @@ -1323,8 +1354,9 @@ static int tda998x_create(struct i2c_client *client, struct tda998x_priv *priv) if (client->irq) { int irqf_trigger; - /* init read EDID waitqueue */ + /* init read EDID waitqueue and HDP work */ init_waitqueue_head(&priv->wq_edid); + INIT_DELAYED_WORK(&priv->dwork, tda998x_hpd); /* clear pending interrupts */ reg_read(priv, REG_INT_FLAGS_0); diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 574057cd1d09..7643300828c3 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -462,19 +462,13 @@ void intel_detect_pch(struct drm_device *dev) } else if (id == INTEL_PCH_LPT_DEVICE_ID_TYPE) { dev_priv->pch_type = PCH_LPT; DRM_DEBUG_KMS("Found LynxPoint PCH\n"); - WARN_ON(!IS_HASWELL(dev)); - WARN_ON(IS_HSW_ULT(dev)); - } else if (IS_BROADWELL(dev)) { - dev_priv->pch_type = PCH_LPT; - dev_priv->pch_id = - INTEL_PCH_LPT_LP_DEVICE_ID_TYPE; - DRM_DEBUG_KMS("This is Broadwell, assuming " - "LynxPoint LP PCH\n"); + WARN_ON(!IS_HASWELL(dev) && !IS_BROADWELL(dev)); + WARN_ON(IS_HSW_ULT(dev) || IS_BDW_ULT(dev)); } else if (id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) { dev_priv->pch_type = PCH_LPT; DRM_DEBUG_KMS("Found LynxPoint LP PCH\n"); - WARN_ON(!IS_HASWELL(dev)); - WARN_ON(!IS_HSW_ULT(dev)); + WARN_ON(!IS_HASWELL(dev) && !IS_BROADWELL(dev)); + WARN_ON(!IS_HSW_ULT(dev) && !IS_BDW_ULT(dev)); } else if (id == INTEL_PCH_SPT_DEVICE_ID_TYPE) { dev_priv->pch_type = PCH_SPT; DRM_DEBUG_KMS("Found SunrisePoint PCH\n"); diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index e9f891c432f8..9d7a7155bf02 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -2159,8 +2159,7 @@ struct drm_i915_cmd_table { #define IS_HSW_EARLY_SDV(dev) (IS_HASWELL(dev) && \ (INTEL_DEVID(dev) & 0xFF00) == 0x0C00) #define IS_BDW_ULT(dev) (IS_BROADWELL(dev) && \ - ((INTEL_DEVID(dev) & 0xf) == 0x2 || \ - (INTEL_DEVID(dev) & 0xf) == 0x6 || \ + ((INTEL_DEVID(dev) & 0xf) == 0x6 || \ (INTEL_DEVID(dev) & 0xf) == 0xe)) #define IS_BDW_GT3(dev) (IS_BROADWELL(dev) && \ (INTEL_DEVID(dev) & 0x00F0) == 0x0020) diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 76354d3ba925..5f614828d365 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -3148,6 +3148,13 @@ static void i965_write_fence_reg(struct drm_device *dev, int reg, u32 size = i915_gem_obj_ggtt_size(obj); uint64_t val; + /* Adjust fence size to match tiled area */ + if (obj->tiling_mode != I915_TILING_NONE) { + uint32_t row_size = obj->stride * + (obj->tiling_mode == I915_TILING_Y ? 32 : 8); + size = (size / row_size) * row_size; + } + val = (uint64_t)((i915_gem_obj_ggtt_offset(obj) + size - 4096) & 0xfffff000) << 32; val |= i915_gem_obj_ggtt_offset(obj) & 0xfffff000; @@ -4884,25 +4891,18 @@ i915_gem_init_hw(struct drm_device *dev) for (i = 0; i < NUM_L3_SLICES(dev); i++) i915_gem_l3_remap(&dev_priv->ring[RCS], i); - /* - * XXX: Contexts should only be initialized once. Doing a switch to the - * default context switch however is something we'd like to do after - * reset or thaw (the latter may not actually be necessary for HW, but - * goes with our code better). Context switching requires rings (for - * the do_switch), but before enabling PPGTT. So don't move this. - */ - ret = i915_gem_context_enable(dev_priv); + ret = i915_ppgtt_init_hw(dev); if (ret && ret != -EIO) { - DRM_ERROR("Context enable failed %d\n", ret); + DRM_ERROR("PPGTT enable failed %d\n", ret); i915_gem_cleanup_ringbuffer(dev); - - return ret; } - ret = i915_ppgtt_init_hw(dev); + ret = i915_gem_context_enable(dev_priv); if (ret && ret != -EIO) { - DRM_ERROR("PPGTT enable failed %d\n", ret); + DRM_ERROR("Context enable failed %d\n", ret); i915_gem_cleanup_ringbuffer(dev); + + return ret; } return ret; diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c index 4d63839bd9b4..dfb783a8f2c3 100644 --- a/drivers/gpu/drm/i915/intel_panel.c +++ b/drivers/gpu/drm/i915/intel_panel.c @@ -962,7 +962,7 @@ void intel_panel_enable_backlight(struct intel_connector *connector) WARN_ON(panel->backlight.max == 0); - if (panel->backlight.level == 0) { + if (panel->backlight.level <= panel->backlight.min) { panel->backlight.level = panel->backlight.max; if (panel->backlight.device) panel->backlight.device->props.brightness = diff --git a/drivers/gpu/drm/radeon/cik_sdma.c b/drivers/gpu/drm/radeon/cik_sdma.c index a0133c74f4cf..42cd0cffe210 100644 --- a/drivers/gpu/drm/radeon/cik_sdma.c +++ b/drivers/gpu/drm/radeon/cik_sdma.c @@ -816,7 +816,6 @@ void cik_sdma_vm_write_pages(struct radeon_device *rdev, for (; ndw > 0; ndw -= 2, --count, pe += 8) { if (flags & R600_PTE_SYSTEM) { value = radeon_vm_map_gart(rdev, addr); - value &= 0xFFFFFFFFFFFFF000ULL; } else if (flags & R600_PTE_VALID) { value = addr; } else { diff --git a/drivers/gpu/drm/radeon/ni_dma.c b/drivers/gpu/drm/radeon/ni_dma.c index 4be2bb7cbef3..ce787a9f12c0 100644 --- a/drivers/gpu/drm/radeon/ni_dma.c +++ b/drivers/gpu/drm/radeon/ni_dma.c @@ -372,7 +372,6 @@ void cayman_dma_vm_write_pages(struct radeon_device *rdev, for (; ndw > 0; ndw -= 2, --count, pe += 8) { if (flags & R600_PTE_SYSTEM) { value = radeon_vm_map_gart(rdev, addr); - value &= 0xFFFFFFFFFFFFF000ULL; } else if (flags & R600_PTE_VALID) { value = addr; } else { diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c index 74f06d540591..279801ca5110 100644 --- a/drivers/gpu/drm/radeon/r100.c +++ b/drivers/gpu/drm/radeon/r100.c @@ -644,6 +644,7 @@ int r100_pci_gart_init(struct radeon_device *rdev) return r; rdev->gart.table_size = rdev->gart.num_gpu_pages * 4; rdev->asic->gart.tlb_flush = &r100_pci_gart_tlb_flush; + rdev->asic->gart.get_page_entry = &r100_pci_gart_get_page_entry; rdev->asic->gart.set_page = &r100_pci_gart_set_page; return radeon_gart_table_ram_alloc(rdev); } @@ -681,11 +682,16 @@ void r100_pci_gart_disable(struct radeon_device *rdev) WREG32(RADEON_AIC_HI_ADDR, 0); } +uint64_t r100_pci_gart_get_page_entry(uint64_t addr, uint32_t flags) +{ + return addr; +} + void r100_pci_gart_set_page(struct radeon_device *rdev, unsigned i, - uint64_t addr, uint32_t flags) + uint64_t entry) { u32 *gtt = rdev->gart.ptr; - gtt[i] = cpu_to_le32(lower_32_bits(addr)); + gtt[i] = cpu_to_le32(lower_32_bits(entry)); } void r100_pci_gart_fini(struct radeon_device *rdev) diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c index 064ad5569cca..08d68f3e13e9 100644 --- a/drivers/gpu/drm/radeon/r300.c +++ b/drivers/gpu/drm/radeon/r300.c @@ -73,11 +73,8 @@ void rv370_pcie_gart_tlb_flush(struct radeon_device *rdev) #define R300_PTE_WRITEABLE (1 << 2) #define R300_PTE_READABLE (1 << 3) -void rv370_pcie_gart_set_page(struct radeon_device *rdev, unsigned i, - uint64_t addr, uint32_t flags) +uint64_t rv370_pcie_gart_get_page_entry(uint64_t addr, uint32_t flags) { - void __iomem *ptr = rdev->gart.ptr; - addr = (lower_32_bits(addr) >> 8) | ((upper_32_bits(addr) & 0xff) << 24); if (flags & RADEON_GART_PAGE_READ) @@ -86,10 +83,18 @@ void rv370_pcie_gart_set_page(struct radeon_device *rdev, unsigned i, addr |= R300_PTE_WRITEABLE; if (!(flags & RADEON_GART_PAGE_SNOOP)) addr |= R300_PTE_UNSNOOPED; + return addr; +} + +void rv370_pcie_gart_set_page(struct radeon_device *rdev, unsigned i, + uint64_t entry) +{ + void __iomem *ptr = rdev->gart.ptr; + /* on x86 we want this to be CPU endian, on powerpc * on powerpc without HW swappers, it'll get swapped on way * into VRAM - so no need for cpu_to_le32 on VRAM tables */ - writel(addr, ((void __iomem *)ptr) + (i * 4)); + writel(entry, ((void __iomem *)ptr) + (i * 4)); } int rv370_pcie_gart_init(struct radeon_device *rdev) @@ -109,6 +114,7 @@ int rv370_pcie_gart_init(struct radeon_device *rdev) DRM_ERROR("Failed to register debugfs file for PCIE gart !\n"); rdev->gart.table_size = rdev->gart.num_gpu_pages * 4; rdev->asic->gart.tlb_flush = &rv370_pcie_gart_tlb_flush; + rdev->asic->gart.get_page_entry = &rv370_pcie_gart_get_page_entry; rdev->asic->gart.set_page = &rv370_pcie_gart_set_page; return radeon_gart_table_vram_alloc(rdev); } diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index 54529b837afa..3f2a8d3febca 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h @@ -242,6 +242,7 @@ bool radeon_get_bios(struct radeon_device *rdev); * Dummy page */ struct radeon_dummy_page { + uint64_t entry; struct page *page; dma_addr_t addr; }; @@ -645,7 +646,7 @@ struct radeon_gart { unsigned num_cpu_pages; unsigned table_size; struct page **pages; - dma_addr_t *pages_addr; + uint64_t *pages_entry; bool ready; }; @@ -1847,8 +1848,9 @@ struct radeon_asic { /* gart */ struct { void (*tlb_flush)(struct radeon_device *rdev); + uint64_t (*get_page_entry)(uint64_t addr, uint32_t flags); void (*set_page)(struct radeon_device *rdev, unsigned i, - uint64_t addr, uint32_t flags); + uint64_t entry); } gart; struct { int (*init)(struct radeon_device *rdev); @@ -2852,7 +2854,8 @@ static inline void radeon_ring_write(struct radeon_ring *ring, uint32_t v) #define radeon_vga_set_state(rdev, state) (rdev)->asic->vga_set_state((rdev), (state)) #define radeon_asic_reset(rdev) (rdev)->asic->asic_reset((rdev)) #define radeon_gart_tlb_flush(rdev) (rdev)->asic->gart.tlb_flush((rdev)) -#define radeon_gart_set_page(rdev, i, p, f) (rdev)->asic->gart.set_page((rdev), (i), (p), (f)) +#define radeon_gart_get_page_entry(a, f) (rdev)->asic->gart.get_page_entry((a), (f)) +#define radeon_gart_set_page(rdev, i, e) (rdev)->asic->gart.set_page((rdev), (i), (e)) #define radeon_asic_vm_init(rdev) (rdev)->asic->vm.init((rdev)) #define radeon_asic_vm_fini(rdev) (rdev)->asic->vm.fini((rdev)) #define radeon_asic_vm_copy_pages(rdev, ib, pe, src, count) ((rdev)->asic->vm.copy_pages((rdev), (ib), (pe), (src), (count))) diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c index 121aff6a3b41..ed0e10eee2dc 100644 --- a/drivers/gpu/drm/radeon/radeon_asic.c +++ b/drivers/gpu/drm/radeon/radeon_asic.c @@ -159,11 +159,13 @@ void radeon_agp_disable(struct radeon_device *rdev) DRM_INFO("Forcing AGP to PCIE mode\n"); rdev->flags |= RADEON_IS_PCIE; rdev->asic->gart.tlb_flush = &rv370_pcie_gart_tlb_flush; + rdev->asic->gart.get_page_entry = &rv370_pcie_gart_get_page_entry; rdev->asic->gart.set_page = &rv370_pcie_gart_set_page; } else { DRM_INFO("Forcing AGP to PCI mode\n"); rdev->flags |= RADEON_IS_PCI; rdev->asic->gart.tlb_flush = &r100_pci_gart_tlb_flush; + rdev->asic->gart.get_page_entry = &r100_pci_gart_get_page_entry; rdev->asic->gart.set_page = &r100_pci_gart_set_page; } rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024; @@ -199,6 +201,7 @@ static struct radeon_asic r100_asic = { .mc_wait_for_idle = &r100_mc_wait_for_idle, .gart = { .tlb_flush = &r100_pci_gart_tlb_flush, + .get_page_entry = &r100_pci_gart_get_page_entry, .set_page = &r100_pci_gart_set_page, }, .ring = { @@ -265,6 +268,7 @@ static struct radeon_asic r200_asic = { .mc_wait_for_idle = &r100_mc_wait_for_idle, .gart = { .tlb_flush = &r100_pci_gart_tlb_flush, + .get_page_entry = &r100_pci_gart_get_page_entry, .set_page = &r100_pci_gart_set_page, }, .ring = { @@ -359,6 +363,7 @@ static struct radeon_asic r300_asic = { .mc_wait_for_idle = &r300_mc_wait_for_idle, .gart = { .tlb_flush = &r100_pci_gart_tlb_flush, + .get_page_entry = &r100_pci_gart_get_page_entry, .set_page = &r100_pci_gart_set_page, }, .ring = { @@ -425,6 +430,7 @@ static struct radeon_asic r300_asic_pcie = { .mc_wait_for_idle = &r300_mc_wait_for_idle, .gart = { .tlb_flush = &rv370_pcie_gart_tlb_flush, + .get_page_entry = &rv370_pcie_gart_get_page_entry, .set_page = &rv370_pcie_gart_set_page, }, .ring = { @@ -491,6 +497,7 @@ static struct radeon_asic r420_asic = { .mc_wait_for_idle = &r300_mc_wait_for_idle, .gart = { .tlb_flush = &rv370_pcie_gart_tlb_flush, + .get_page_entry = &rv370_pcie_gart_get_page_entry, .set_page = &rv370_pcie_gart_set_page, }, .ring = { @@ -557,6 +564,7 @@ static struct radeon_asic rs400_asic = { .mc_wait_for_idle = &rs400_mc_wait_for_idle, .gart = { .tlb_flush = &rs400_gart_tlb_flush, + .get_page_entry = &rs400_gart_get_page_entry, .set_page = &rs400_gart_set_page, }, .ring = { @@ -623,6 +631,7 @@ static struct radeon_asic rs600_asic = { .mc_wait_for_idle = &rs600_mc_wait_for_idle, .gart = { .tlb_flush = &rs600_gart_tlb_flush, + .get_page_entry = &rs600_gart_get_page_entry, .set_page = &rs600_gart_set_page, }, .ring = { @@ -691,6 +700,7 @@ static struct radeon_asic rs690_asic = { .mc_wait_for_idle = &rs690_mc_wait_for_idle, .gart = { .tlb_flush = &rs400_gart_tlb_flush, + .get_page_entry = &rs400_gart_get_page_entry, .set_page = &rs400_gart_set_page, }, .ring = { @@ -759,6 +769,7 @@ static struct radeon_asic rv515_asic = { .mc_wait_for_idle = &rv515_mc_wait_for_idle, .gart = { .tlb_flush = &rv370_pcie_gart_tlb_flush, + .get_page_entry = &rv370_pcie_gart_get_page_entry, .set_page = &rv370_pcie_gart_set_page, }, .ring = { @@ -825,6 +836,7 @@ static struct radeon_asic r520_asic = { .mc_wait_for_idle = &r520_mc_wait_for_idle, .gart = { .tlb_flush = &rv370_pcie_gart_tlb_flush, + .get_page_entry = &rv370_pcie_gart_get_page_entry, .set_page = &rv370_pcie_gart_set_page, }, .ring = { @@ -919,6 +931,7 @@ static struct radeon_asic r600_asic = { .get_gpu_clock_counter = &r600_get_gpu_clock_counter, .gart = { .tlb_flush = &r600_pcie_gart_tlb_flush, + .get_page_entry = &rs600_gart_get_page_entry, .set_page = &rs600_gart_set_page, }, .ring = { @@ -1004,6 +1017,7 @@ static struct radeon_asic rv6xx_asic = { .get_gpu_clock_counter = &r600_get_gpu_clock_counter, .gart = { .tlb_flush = &r600_pcie_gart_tlb_flush, + .get_page_entry = &rs600_gart_get_page_entry, .set_page = &rs600_gart_set_page, }, .ring = { @@ -1095,6 +1109,7 @@ static struct radeon_asic rs780_asic = { .get_gpu_clock_counter = &r600_get_gpu_clock_counter, .gart = { .tlb_flush = &r600_pcie_gart_tlb_flush, + .get_page_entry = &rs600_gart_get_page_entry, .set_page = &rs600_gart_set_page, }, .ring = { @@ -1199,6 +1214,7 @@ static struct radeon_asic rv770_asic = { .get_gpu_clock_counter = &r600_get_gpu_clock_counter, .gart = { .tlb_flush = &r600_pcie_gart_tlb_flush, + .get_page_entry = &rs600_gart_get_page_entry, .set_page = &rs600_gart_set_page, }, .ring = { @@ -1317,6 +1333,7 @@ static struct radeon_asic evergreen_asic = { .get_gpu_clock_counter = &r600_get_gpu_clock_counter, .gart = { .tlb_flush = &evergreen_pcie_gart_tlb_flush, + .get_page_entry = &rs600_gart_get_page_entry, .set_page = &rs600_gart_set_page, }, .ring = { @@ -1409,6 +1426,7 @@ static struct radeon_asic sumo_asic = { .get_gpu_clock_counter = &r600_get_gpu_clock_counter, .gart = { .tlb_flush = &evergreen_pcie_gart_tlb_flush, + .get_page_entry = &rs600_gart_get_page_entry, .set_page = &rs600_gart_set_page, }, .ring = { @@ -1500,6 +1518,7 @@ static struct radeon_asic btc_asic = { .get_gpu_clock_counter = &r600_get_gpu_clock_counter, .gart = { .tlb_flush = &evergreen_pcie_gart_tlb_flush, + .get_page_entry = &rs600_gart_get_page_entry, .set_page = &rs600_gart_set_page, }, .ring = { @@ -1635,6 +1654,7 @@ static struct radeon_asic cayman_asic = { .get_gpu_clock_counter = &r600_get_gpu_clock_counter, .gart = { .tlb_flush = &cayman_pcie_gart_tlb_flush, + .get_page_entry = &rs600_gart_get_page_entry, .set_page = &rs600_gart_set_page, }, .vm = { @@ -1738,6 +1758,7 @@ static struct radeon_asic trinity_asic = { .get_gpu_clock_counter = &r600_get_gpu_clock_counter, .gart = { .tlb_flush = &cayman_pcie_gart_tlb_flush, + .get_page_entry = &rs600_gart_get_page_entry, .set_page = &rs600_gart_set_page, }, .vm = { @@ -1871,6 +1892,7 @@ static struct radeon_asic si_asic = { .get_gpu_clock_counter = &si_get_gpu_clock_counter, .gart = { .tlb_flush = &si_pcie_gart_tlb_flush, + .get_page_entry = &rs600_gart_get_page_entry, .set_page = &rs600_gart_set_page, }, .vm = { @@ -2032,6 +2054,7 @@ static struct radeon_asic ci_asic = { .get_gpu_clock_counter = &cik_get_gpu_clock_counter, .gart = { .tlb_flush = &cik_pcie_gart_tlb_flush, + .get_page_entry = &rs600_gart_get_page_entry, .set_page = &rs600_gart_set_page, }, .vm = { @@ -2139,6 +2162,7 @@ static struct radeon_asic kv_asic = { .get_gpu_clock_counter = &cik_get_gpu_clock_counter, .gart = { .tlb_flush = &cik_pcie_gart_tlb_flush, + .get_page_entry = &rs600_gart_get_page_entry, .set_page = &rs600_gart_set_page, }, .vm = { diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h index 2a45d548d5ec..8d787d115653 100644 --- a/drivers/gpu/drm/radeon/radeon_asic.h +++ b/drivers/gpu/drm/radeon/radeon_asic.h @@ -67,8 +67,9 @@ bool r100_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *cp); int r100_asic_reset(struct radeon_device *rdev); u32 r100_get_vblank_counter(struct radeon_device *rdev, int crtc); void r100_pci_gart_tlb_flush(struct radeon_device *rdev); +uint64_t r100_pci_gart_get_page_entry(uint64_t addr, uint32_t flags); void r100_pci_gart_set_page(struct radeon_device *rdev, unsigned i, - uint64_t addr, uint32_t flags); + uint64_t entry); void r100_ring_start(struct radeon_device *rdev, struct radeon_ring *ring); int r100_irq_set(struct radeon_device *rdev); int r100_irq_process(struct radeon_device *rdev); @@ -172,8 +173,9 @@ extern void r300_fence_ring_emit(struct radeon_device *rdev, struct radeon_fence *fence); extern int r300_cs_parse(struct radeon_cs_parser *p); extern void rv370_pcie_gart_tlb_flush(struct radeon_device *rdev); +extern uint64_t rv370_pcie_gart_get_page_entry(uint64_t addr, uint32_t flags); extern void rv370_pcie_gart_set_page(struct radeon_device *rdev, unsigned i, - uint64_t addr, uint32_t flags); + uint64_t entry); extern void rv370_set_pcie_lanes(struct radeon_device *rdev, int lanes); extern int rv370_get_pcie_lanes(struct radeon_device *rdev); extern void r300_set_reg_safe(struct radeon_device *rdev); @@ -208,8 +210,9 @@ extern void rs400_fini(struct radeon_device *rdev); extern int rs400_suspend(struct radeon_device *rdev); extern int rs400_resume(struct radeon_device *rdev); void rs400_gart_tlb_flush(struct radeon_device *rdev); +uint64_t rs400_gart_get_page_entry(uint64_t addr, uint32_t flags); void rs400_gart_set_page(struct radeon_device *rdev, unsigned i, - uint64_t addr, uint32_t flags); + uint64_t entry); uint32_t rs400_mc_rreg(struct radeon_device *rdev, uint32_t reg); void rs400_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); int rs400_gart_init(struct radeon_device *rdev); @@ -232,8 +235,9 @@ int rs600_irq_process(struct radeon_device *rdev); void rs600_irq_disable(struct radeon_device *rdev); u32 rs600_get_vblank_counter(struct radeon_device *rdev, int crtc); void rs600_gart_tlb_flush(struct radeon_device *rdev); +uint64_t rs600_gart_get_page_entry(uint64_t addr, uint32_t flags); void rs600_gart_set_page(struct radeon_device *rdev, unsigned i, - uint64_t addr, uint32_t flags); + uint64_t entry); uint32_t rs600_mc_rreg(struct radeon_device *rdev, uint32_t reg); void rs600_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); void rs600_bandwidth_update(struct radeon_device *rdev); diff --git a/drivers/gpu/drm/radeon/radeon_benchmark.c b/drivers/gpu/drm/radeon/radeon_benchmark.c index 9e7f23dd14bd..87d5fb21cb61 100644 --- a/drivers/gpu/drm/radeon/radeon_benchmark.c +++ b/drivers/gpu/drm/radeon/radeon_benchmark.c @@ -34,7 +34,8 @@ static int radeon_benchmark_do_move(struct radeon_device *rdev, unsigned size, uint64_t saddr, uint64_t daddr, - int flag, int n) + int flag, int n, + struct reservation_object *resv) { unsigned long start_jiffies; unsigned long end_jiffies; @@ -47,12 +48,12 @@ static int radeon_benchmark_do_move(struct radeon_device *rdev, unsigned size, case RADEON_BENCHMARK_COPY_DMA: fence = radeon_copy_dma(rdev, saddr, daddr, size / RADEON_GPU_PAGE_SIZE, - NULL); + resv); break; case RADEON_BENCHMARK_COPY_BLIT: fence = radeon_copy_blit(rdev, saddr, daddr, size / RADEON_GPU_PAGE_SIZE, - NULL); + resv); break; default: DRM_ERROR("Unknown copy method\n"); @@ -120,7 +121,8 @@ static void radeon_benchmark_move(struct radeon_device *rdev, unsigned size, if (rdev->asic->copy.dma) { time = radeon_benchmark_do_move(rdev, size, saddr, daddr, - RADEON_BENCHMARK_COPY_DMA, n); + RADEON_BENCHMARK_COPY_DMA, n, + dobj->tbo.resv); if (time < 0) goto out_cleanup; if (time > 0) @@ -130,7 +132,8 @@ static void radeon_benchmark_move(struct radeon_device *rdev, unsigned size, if (rdev->asic->copy.blit) { time = radeon_benchmark_do_move(rdev, size, saddr, daddr, - RADEON_BENCHMARK_COPY_BLIT, n); + RADEON_BENCHMARK_COPY_BLIT, n, + dobj->tbo.resv); if (time < 0) goto out_cleanup; if (time > 0) diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c index 0ec65168f331..bd7519fdd3f4 100644 --- a/drivers/gpu/drm/radeon/radeon_device.c +++ b/drivers/gpu/drm/radeon/radeon_device.c @@ -774,6 +774,8 @@ int radeon_dummy_page_init(struct radeon_device *rdev) rdev->dummy_page.page = NULL; return -ENOMEM; } + rdev->dummy_page.entry = radeon_gart_get_page_entry(rdev->dummy_page.addr, + RADEON_GART_PAGE_DUMMY); return 0; } diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c index 102116902a07..913fafa597ad 100644 --- a/drivers/gpu/drm/radeon/radeon_display.c +++ b/drivers/gpu/drm/radeon/radeon_display.c @@ -960,6 +960,9 @@ void radeon_compute_pll_avivo(struct radeon_pll *pll, if (pll->flags & RADEON_PLL_USE_FRAC_FB_DIV && pll->flags & RADEON_PLL_USE_REF_DIV) ref_div_max = pll->reference_div; + else if (pll->flags & RADEON_PLL_PREFER_MINM_OVER_MAXP) + /* fix for problems on RS880 */ + ref_div_max = min(pll->max_ref_div, 7u); else ref_div_max = pll->max_ref_div; diff --git a/drivers/gpu/drm/radeon/radeon_gart.c b/drivers/gpu/drm/radeon/radeon_gart.c index 84146d5901aa..5450fa95a47e 100644 --- a/drivers/gpu/drm/radeon/radeon_gart.c +++ b/drivers/gpu/drm/radeon/radeon_gart.c @@ -165,6 +165,19 @@ int radeon_gart_table_vram_pin(struct radeon_device *rdev) radeon_bo_unpin(rdev->gart.robj); radeon_bo_unreserve(rdev->gart.robj); rdev->gart.table_addr = gpu_addr; + + if (!r) { + int i; + + /* We might have dropped some GART table updates while it wasn't + * mapped, restore all entries + */ + for (i = 0; i < rdev->gart.num_gpu_pages; i++) + radeon_gart_set_page(rdev, i, rdev->gart.pages_entry[i]); + mb(); + radeon_gart_tlb_flush(rdev); + } + return r; } @@ -228,7 +241,6 @@ void radeon_gart_unbind(struct radeon_device *rdev, unsigned offset, unsigned t; unsigned p; int i, j; - u64 page_base; if (!rdev->gart.ready) { WARN(1, "trying to unbind memory from uninitialized GART !\n"); @@ -239,14 +251,12 @@ void radeon_gart_unbind(struct radeon_device *rdev, unsigned offset, for (i = 0; i < pages; i++, p++) { if (rdev->gart.pages[p]) { rdev->gart.pages[p] = NULL; - rdev->gart.pages_addr[p] = rdev->dummy_page.addr; - page_base = rdev->gart.pages_addr[p]; for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) { + rdev->gart.pages_entry[t] = rdev->dummy_page.entry; if (rdev->gart.ptr) { - radeon_gart_set_page(rdev, t, page_base, - RADEON_GART_PAGE_DUMMY); + radeon_gart_set_page(rdev, t, + rdev->dummy_page.entry); } - page_base += RADEON_GPU_PAGE_SIZE; } } } @@ -274,7 +284,7 @@ int radeon_gart_bind(struct radeon_device *rdev, unsigned offset, { unsigned t; unsigned p; - uint64_t page_base; + uint64_t page_base, page_entry; int i, j; if (!rdev->gart.ready) { @@ -285,14 +295,15 @@ int radeon_gart_bind(struct radeon_device *rdev, unsigned offset, p = t / (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); for (i = 0; i < pages; i++, p++) { - rdev->gart.pages_addr[p] = dma_addr[i]; rdev->gart.pages[p] = pagelist[i]; - if (rdev->gart.ptr) { - page_base = rdev->gart.pages_addr[p]; - for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) { - radeon_gart_set_page(rdev, t, page_base, flags); - page_base += RADEON_GPU_PAGE_SIZE; + page_base = dma_addr[i]; + for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) { + page_entry = radeon_gart_get_page_entry(page_base, flags); + rdev->gart.pages_entry[t] = page_entry; + if (rdev->gart.ptr) { + radeon_gart_set_page(rdev, t, page_entry); } + page_base += RADEON_GPU_PAGE_SIZE; } } mb(); @@ -334,16 +345,15 @@ int radeon_gart_init(struct radeon_device *rdev) radeon_gart_fini(rdev); return -ENOMEM; } - rdev->gart.pages_addr = vzalloc(sizeof(dma_addr_t) * - rdev->gart.num_cpu_pages); - if (rdev->gart.pages_addr == NULL) { + rdev->gart.pages_entry = vmalloc(sizeof(uint64_t) * + rdev->gart.num_gpu_pages); + if (rdev->gart.pages_entry == NULL) { radeon_gart_fini(rdev); return -ENOMEM; } /* set GART entry to point to the dummy page by default */ - for (i = 0; i < rdev->gart.num_cpu_pages; i++) { - rdev->gart.pages_addr[i] = rdev->dummy_page.addr; - } + for (i = 0; i < rdev->gart.num_gpu_pages; i++) + rdev->gart.pages_entry[i] = rdev->dummy_page.entry; return 0; } @@ -356,15 +366,15 @@ int radeon_gart_init(struct radeon_device *rdev) */ void radeon_gart_fini(struct radeon_device *rdev) { - if (rdev->gart.pages && rdev->gart.pages_addr && rdev->gart.ready) { + if (rdev->gart.ready) { /* unbind pages */ radeon_gart_unbind(rdev, 0, rdev->gart.num_cpu_pages); } rdev->gart.ready = false; vfree(rdev->gart.pages); - vfree(rdev->gart.pages_addr); + vfree(rdev->gart.pages_entry); rdev->gart.pages = NULL; - rdev->gart.pages_addr = NULL; + rdev->gart.pages_entry = NULL; radeon_dummy_page_fini(rdev); } diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c index d0b4f7d1140d..ac3c1310b953 100644 --- a/drivers/gpu/drm/radeon/radeon_gem.c +++ b/drivers/gpu/drm/radeon/radeon_gem.c @@ -146,7 +146,8 @@ int radeon_gem_object_open(struct drm_gem_object *obj, struct drm_file *file_pri struct radeon_bo_va *bo_va; int r; - if (rdev->family < CHIP_CAYMAN) { + if ((rdev->family < CHIP_CAYMAN) || + (!rdev->accel_working)) { return 0; } @@ -176,7 +177,8 @@ void radeon_gem_object_close(struct drm_gem_object *obj, struct radeon_bo_va *bo_va; int r; - if (rdev->family < CHIP_CAYMAN) { + if ((rdev->family < CHIP_CAYMAN) || + (!rdev->accel_working)) { return; } diff --git a/drivers/gpu/drm/radeon/radeon_kfd.c b/drivers/gpu/drm/radeon/radeon_kfd.c index 8bf87f1203cc..bef9a0953284 100644 --- a/drivers/gpu/drm/radeon/radeon_kfd.c +++ b/drivers/gpu/drm/radeon/radeon_kfd.c @@ -436,7 +436,7 @@ static int kgd_init_memory(struct kgd_dev *kgd) static int kgd_init_pipeline(struct kgd_dev *kgd, uint32_t pipe_id, uint32_t hpd_size, uint64_t hpd_gpu_addr) { - uint32_t mec = (++pipe_id / CIK_PIPE_PER_MEC) + 1; + uint32_t mec = (pipe_id / CIK_PIPE_PER_MEC) + 1; uint32_t pipe = (pipe_id % CIK_PIPE_PER_MEC); lock_srbm(kgd, mec, pipe, 0, 0); diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c index 3cf9c1fa6475..686411e4e4f6 100644 --- a/drivers/gpu/drm/radeon/radeon_kms.c +++ b/drivers/gpu/drm/radeon/radeon_kms.c @@ -605,14 +605,14 @@ int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv) return -ENOMEM; } - vm = &fpriv->vm; - r = radeon_vm_init(rdev, vm); - if (r) { - kfree(fpriv); - return r; - } - if (rdev->accel_working) { + vm = &fpriv->vm; + r = radeon_vm_init(rdev, vm); + if (r) { + kfree(fpriv); + return r; + } + r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false); if (r) { radeon_vm_fini(rdev, vm); @@ -668,9 +668,9 @@ void radeon_driver_postclose_kms(struct drm_device *dev, radeon_vm_bo_rmv(rdev, vm->ib_bo_va); radeon_bo_unreserve(rdev->ring_tmp_bo.bo); } + radeon_vm_fini(rdev, vm); } - radeon_vm_fini(rdev, vm); kfree(fpriv); file_priv->driver_priv = NULL; } diff --git a/drivers/gpu/drm/radeon/radeon_test.c b/drivers/gpu/drm/radeon/radeon_test.c index 07b506b41008..791818165c76 100644 --- a/drivers/gpu/drm/radeon/radeon_test.c +++ b/drivers/gpu/drm/radeon/radeon_test.c @@ -119,11 +119,11 @@ static void radeon_do_test_moves(struct radeon_device *rdev, int flag) if (ring == R600_RING_TYPE_DMA_INDEX) fence = radeon_copy_dma(rdev, gtt_addr, vram_addr, size / RADEON_GPU_PAGE_SIZE, - NULL); + vram_obj->tbo.resv); else fence = radeon_copy_blit(rdev, gtt_addr, vram_addr, size / RADEON_GPU_PAGE_SIZE, - NULL); + vram_obj->tbo.resv); if (IS_ERR(fence)) { DRM_ERROR("Failed GTT->VRAM copy %d\n", i); r = PTR_ERR(fence); @@ -170,11 +170,11 @@ static void radeon_do_test_moves(struct radeon_device *rdev, int flag) if (ring == R600_RING_TYPE_DMA_INDEX) fence = radeon_copy_dma(rdev, vram_addr, gtt_addr, size / RADEON_GPU_PAGE_SIZE, - NULL); + vram_obj->tbo.resv); else fence = radeon_copy_blit(rdev, vram_addr, gtt_addr, size / RADEON_GPU_PAGE_SIZE, - NULL); + vram_obj->tbo.resv); if (IS_ERR(fence)) { DRM_ERROR("Failed VRAM->GTT copy %d\n", i); r = PTR_ERR(fence); diff --git a/drivers/gpu/drm/radeon/radeon_vm.c b/drivers/gpu/drm/radeon/radeon_vm.c index cde48c42b30a..2a5a4a9e772d 100644 --- a/drivers/gpu/drm/radeon/radeon_vm.c +++ b/drivers/gpu/drm/radeon/radeon_vm.c @@ -587,10 +587,8 @@ uint64_t radeon_vm_map_gart(struct radeon_device *rdev, uint64_t addr) uint64_t result; /* page table offset */ - result = rdev->gart.pages_addr[addr >> PAGE_SHIFT]; - - /* in case cpu page size != gpu page size*/ - result |= addr & (~PAGE_MASK); + result = rdev->gart.pages_entry[addr >> RADEON_GPU_PAGE_SHIFT]; + result &= ~RADEON_GPU_PAGE_MASK; return result; } @@ -745,9 +743,11 @@ static void radeon_vm_frag_ptes(struct radeon_device *rdev, */ /* NI is optimized for 256KB fragments, SI and newer for 64KB */ - uint64_t frag_flags = rdev->family == CHIP_CAYMAN ? + uint64_t frag_flags = ((rdev->family == CHIP_CAYMAN) || + (rdev->family == CHIP_ARUBA)) ? R600_PTE_FRAG_256KB : R600_PTE_FRAG_64KB; - uint64_t frag_align = rdev->family == CHIP_CAYMAN ? 0x200 : 0x80; + uint64_t frag_align = ((rdev->family == CHIP_CAYMAN) || + (rdev->family == CHIP_ARUBA)) ? 0x200 : 0x80; uint64_t frag_start = ALIGN(pe_start, frag_align); uint64_t frag_end = pe_end & ~(frag_align - 1); diff --git a/drivers/gpu/drm/radeon/rs400.c b/drivers/gpu/drm/radeon/rs400.c index c5799f16aa4b..34e3235f41d2 100644 --- a/drivers/gpu/drm/radeon/rs400.c +++ b/drivers/gpu/drm/radeon/rs400.c @@ -212,11 +212,9 @@ void rs400_gart_fini(struct radeon_device *rdev) #define RS400_PTE_WRITEABLE (1 << 2) #define RS400_PTE_READABLE (1 << 3) -void rs400_gart_set_page(struct radeon_device *rdev, unsigned i, - uint64_t addr, uint32_t flags) +uint64_t rs400_gart_get_page_entry(uint64_t addr, uint32_t flags) { uint32_t entry; - u32 *gtt = rdev->gart.ptr; entry = (lower_32_bits(addr) & PAGE_MASK) | ((upper_32_bits(addr) & 0xff) << 4); @@ -226,8 +224,14 @@ void rs400_gart_set_page(struct radeon_device *rdev, unsigned i, entry |= RS400_PTE_WRITEABLE; if (!(flags & RADEON_GART_PAGE_SNOOP)) entry |= RS400_PTE_UNSNOOPED; - entry = cpu_to_le32(entry); - gtt[i] = entry; + return entry; +} + +void rs400_gart_set_page(struct radeon_device *rdev, unsigned i, + uint64_t entry) +{ + u32 *gtt = rdev->gart.ptr; + gtt[i] = cpu_to_le32(lower_32_bits(entry)); } int rs400_mc_wait_for_idle(struct radeon_device *rdev) diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c index 9acb1c3c005b..74bce91aecc1 100644 --- a/drivers/gpu/drm/radeon/rs600.c +++ b/drivers/gpu/drm/radeon/rs600.c @@ -625,11 +625,8 @@ static void rs600_gart_fini(struct radeon_device *rdev) radeon_gart_table_vram_free(rdev); } -void rs600_gart_set_page(struct radeon_device *rdev, unsigned i, - uint64_t addr, uint32_t flags) +uint64_t rs600_gart_get_page_entry(uint64_t addr, uint32_t flags) { - void __iomem *ptr = (void *)rdev->gart.ptr; - addr = addr & 0xFFFFFFFFFFFFF000ULL; addr |= R600_PTE_SYSTEM; if (flags & RADEON_GART_PAGE_VALID) @@ -640,7 +637,14 @@ void rs600_gart_set_page(struct radeon_device *rdev, unsigned i, addr |= R600_PTE_WRITEABLE; if (flags & RADEON_GART_PAGE_SNOOP) addr |= R600_PTE_SNOOPED; - writeq(addr, ptr + (i * 8)); + return addr; +} + +void rs600_gart_set_page(struct radeon_device *rdev, unsigned i, + uint64_t entry) +{ + void __iomem *ptr = (void *)rdev->gart.ptr; + writeq(entry, ptr + (i * 8)); } int rs600_irq_set(struct radeon_device *rdev) diff --git a/drivers/gpu/drm/radeon/si_dma.c b/drivers/gpu/drm/radeon/si_dma.c index aa7b872b2c43..83207929fc62 100644 --- a/drivers/gpu/drm/radeon/si_dma.c +++ b/drivers/gpu/drm/radeon/si_dma.c @@ -123,7 +123,6 @@ void si_dma_vm_write_pages(struct radeon_device *rdev, for (; ndw > 0; ndw -= 2, --count, pe += 8) { if (flags & R600_PTE_SYSTEM) { value = radeon_vm_map_gart(rdev, addr); - value &= 0xFFFFFFFFFFFFF000ULL; } else if (flags & R600_PTE_VALID) { value = addr; } else { diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c index 7b5d22110f25..6c6b655defcf 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c @@ -406,11 +406,9 @@ int vmw_3d_resource_inc(struct vmw_private *dev_priv, if (unlikely(ret != 0)) --dev_priv->num_3d_resources; } else if (unhide_svga) { - mutex_lock(&dev_priv->hw_mutex); vmw_write(dev_priv, SVGA_REG_ENABLE, vmw_read(dev_priv, SVGA_REG_ENABLE) & ~SVGA_REG_ENABLE_HIDE); - mutex_unlock(&dev_priv->hw_mutex); } mutex_unlock(&dev_priv->release_mutex); @@ -433,13 +431,10 @@ void vmw_3d_resource_dec(struct vmw_private *dev_priv, mutex_lock(&dev_priv->release_mutex); if (unlikely(--dev_priv->num_3d_resources == 0)) vmw_release_device(dev_priv); - else if (hide_svga) { - mutex_lock(&dev_priv->hw_mutex); + else if (hide_svga) vmw_write(dev_priv, SVGA_REG_ENABLE, vmw_read(dev_priv, SVGA_REG_ENABLE) | SVGA_REG_ENABLE_HIDE); - mutex_unlock(&dev_priv->hw_mutex); - } n3d = (int32_t) dev_priv->num_3d_resources; mutex_unlock(&dev_priv->release_mutex); @@ -600,12 +595,14 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) dev_priv->dev = dev; dev_priv->vmw_chipset = chipset; dev_priv->last_read_seqno = (uint32_t) -100; - mutex_init(&dev_priv->hw_mutex); mutex_init(&dev_priv->cmdbuf_mutex); mutex_init(&dev_priv->release_mutex); mutex_init(&dev_priv->binding_mutex); rwlock_init(&dev_priv->resource_lock); ttm_lock_init(&dev_priv->reservation_sem); + spin_lock_init(&dev_priv->hw_lock); + spin_lock_init(&dev_priv->waiter_lock); + spin_lock_init(&dev_priv->cap_lock); for (i = vmw_res_context; i < vmw_res_max; ++i) { idr_init(&dev_priv->res_idr[i]); @@ -626,14 +623,11 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) dev_priv->enable_fb = enable_fbdev; - mutex_lock(&dev_priv->hw_mutex); - vmw_write(dev_priv, SVGA_REG_ID, SVGA_ID_2); svga_id = vmw_read(dev_priv, SVGA_REG_ID); if (svga_id != SVGA_ID_2) { ret = -ENOSYS; DRM_ERROR("Unsupported SVGA ID 0x%x\n", svga_id); - mutex_unlock(&dev_priv->hw_mutex); goto out_err0; } @@ -683,10 +677,8 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) dev_priv->prim_bb_mem = dev_priv->vram_size; ret = vmw_dma_masks(dev_priv); - if (unlikely(ret != 0)) { - mutex_unlock(&dev_priv->hw_mutex); + if (unlikely(ret != 0)) goto out_err0; - } /* * Limit back buffer size to VRAM size. Remove this once @@ -695,8 +687,6 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) if (dev_priv->prim_bb_mem > dev_priv->vram_size) dev_priv->prim_bb_mem = dev_priv->vram_size; - mutex_unlock(&dev_priv->hw_mutex); - vmw_print_capabilities(dev_priv->capabilities); if (dev_priv->capabilities & SVGA_CAP_GMR2) { @@ -1160,9 +1150,7 @@ static int vmw_master_set(struct drm_device *dev, if (unlikely(ret != 0)) return ret; vmw_kms_save_vga(dev_priv); - mutex_lock(&dev_priv->hw_mutex); vmw_write(dev_priv, SVGA_REG_TRACES, 0); - mutex_unlock(&dev_priv->hw_mutex); } if (active) { @@ -1196,9 +1184,7 @@ out_no_active_lock: if (!dev_priv->enable_fb) { vmw_kms_restore_vga(dev_priv); vmw_3d_resource_dec(dev_priv, true); - mutex_lock(&dev_priv->hw_mutex); vmw_write(dev_priv, SVGA_REG_TRACES, 1); - mutex_unlock(&dev_priv->hw_mutex); } return ret; } @@ -1233,9 +1219,7 @@ static void vmw_master_drop(struct drm_device *dev, DRM_ERROR("Unable to clean VRAM on master drop.\n"); vmw_kms_restore_vga(dev_priv); vmw_3d_resource_dec(dev_priv, true); - mutex_lock(&dev_priv->hw_mutex); vmw_write(dev_priv, SVGA_REG_TRACES, 1); - mutex_unlock(&dev_priv->hw_mutex); } dev_priv->active_master = &dev_priv->fbdev_master; @@ -1367,10 +1351,8 @@ static void vmw_pm_complete(struct device *kdev) struct drm_device *dev = pci_get_drvdata(pdev); struct vmw_private *dev_priv = vmw_priv(dev); - mutex_lock(&dev_priv->hw_mutex); vmw_write(dev_priv, SVGA_REG_ID, SVGA_ID_2); (void) vmw_read(dev_priv, SVGA_REG_ID); - mutex_unlock(&dev_priv->hw_mutex); /** * Reclaim 3d reference held by fbdev and potentially diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h index 4ee799b43d5d..d26a6daa9719 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h @@ -399,7 +399,8 @@ struct vmw_private { uint32_t memory_size; bool has_gmr; bool has_mob; - struct mutex hw_mutex; + spinlock_t hw_lock; + spinlock_t cap_lock; /* * VGA registers. @@ -449,8 +450,9 @@ struct vmw_private { atomic_t marker_seq; wait_queue_head_t fence_queue; wait_queue_head_t fifo_queue; - int fence_queue_waiters; /* Protected by hw_mutex */ - int goal_queue_waiters; /* Protected by hw_mutex */ + spinlock_t waiter_lock; + int fence_queue_waiters; /* Protected by waiter_lock */ + int goal_queue_waiters; /* Protected by waiter_lock */ atomic_t fifo_queue_waiters; uint32_t last_read_seqno; spinlock_t irq_lock; @@ -553,20 +555,35 @@ static inline struct vmw_master *vmw_master(struct drm_master *master) return (struct vmw_master *) master->driver_priv; } +/* + * The locking here is fine-grained, so that it is performed once + * for every read- and write operation. This is of course costly, but we + * don't perform much register access in the timing critical paths anyway. + * Instead we have the extra benefit of being sure that we don't forget + * the hw lock around register accesses. + */ static inline void vmw_write(struct vmw_private *dev_priv, unsigned int offset, uint32_t value) { + unsigned long irq_flags; + + spin_lock_irqsave(&dev_priv->hw_lock, irq_flags); outl(offset, dev_priv->io_start + VMWGFX_INDEX_PORT); outl(value, dev_priv->io_start + VMWGFX_VALUE_PORT); + spin_unlock_irqrestore(&dev_priv->hw_lock, irq_flags); } static inline uint32_t vmw_read(struct vmw_private *dev_priv, unsigned int offset) { - uint32_t val; + unsigned long irq_flags; + u32 val; + spin_lock_irqsave(&dev_priv->hw_lock, irq_flags); outl(offset, dev_priv->io_start + VMWGFX_INDEX_PORT); val = inl(dev_priv->io_start + VMWGFX_VALUE_PORT); + spin_unlock_irqrestore(&dev_priv->hw_lock, irq_flags); + return val; } diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c index b7594cb758af..945f1e0dad92 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c @@ -35,7 +35,7 @@ struct vmw_fence_manager { struct vmw_private *dev_priv; spinlock_t lock; struct list_head fence_list; - struct work_struct work, ping_work; + struct work_struct work; u32 user_fence_size; u32 fence_size; u32 event_fence_action_size; @@ -134,14 +134,6 @@ static const char *vmw_fence_get_timeline_name(struct fence *f) return "svga"; } -static void vmw_fence_ping_func(struct work_struct *work) -{ - struct vmw_fence_manager *fman = - container_of(work, struct vmw_fence_manager, ping_work); - - vmw_fifo_ping_host(fman->dev_priv, SVGA_SYNC_GENERIC); -} - static bool vmw_fence_enable_signaling(struct fence *f) { struct vmw_fence_obj *fence = @@ -155,11 +147,7 @@ static bool vmw_fence_enable_signaling(struct fence *f) if (seqno - fence->base.seqno < VMW_FENCE_WRAP) return false; - if (mutex_trylock(&dev_priv->hw_mutex)) { - vmw_fifo_ping_host_locked(dev_priv, SVGA_SYNC_GENERIC); - mutex_unlock(&dev_priv->hw_mutex); - } else - schedule_work(&fman->ping_work); + vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC); return true; } @@ -305,7 +293,6 @@ struct vmw_fence_manager *vmw_fence_manager_init(struct vmw_private *dev_priv) INIT_LIST_HEAD(&fman->fence_list); INIT_LIST_HEAD(&fman->cleanup_list); INIT_WORK(&fman->work, &vmw_fence_work_func); - INIT_WORK(&fman->ping_work, &vmw_fence_ping_func); fman->fifo_down = true; fman->user_fence_size = ttm_round_pot(sizeof(struct vmw_user_fence)); fman->fence_size = ttm_round_pot(sizeof(struct vmw_fence_obj)); @@ -323,7 +310,6 @@ void vmw_fence_manager_takedown(struct vmw_fence_manager *fman) bool lists_empty; (void) cancel_work_sync(&fman->work); - (void) cancel_work_sync(&fman->ping_work); spin_lock_irqsave(&fman->lock, irq_flags); lists_empty = list_empty(&fman->fence_list) && diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c index 09e10aefcd8e..39f2b03888e7 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c @@ -44,10 +44,10 @@ bool vmw_fifo_have_3d(struct vmw_private *dev_priv) if (!dev_priv->has_mob) return false; - mutex_lock(&dev_priv->hw_mutex); + spin_lock(&dev_priv->cap_lock); vmw_write(dev_priv, SVGA_REG_DEV_CAP, SVGA3D_DEVCAP_3D); result = vmw_read(dev_priv, SVGA_REG_DEV_CAP); - mutex_unlock(&dev_priv->hw_mutex); + spin_unlock(&dev_priv->cap_lock); return (result != 0); } @@ -120,7 +120,6 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo) DRM_INFO("height %d\n", vmw_read(dev_priv, SVGA_REG_HEIGHT)); DRM_INFO("bpp %d\n", vmw_read(dev_priv, SVGA_REG_BITS_PER_PIXEL)); - mutex_lock(&dev_priv->hw_mutex); dev_priv->enable_state = vmw_read(dev_priv, SVGA_REG_ENABLE); dev_priv->config_done_state = vmw_read(dev_priv, SVGA_REG_CONFIG_DONE); dev_priv->traces_state = vmw_read(dev_priv, SVGA_REG_TRACES); @@ -143,7 +142,6 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo) mb(); vmw_write(dev_priv, SVGA_REG_CONFIG_DONE, 1); - mutex_unlock(&dev_priv->hw_mutex); max = ioread32(fifo_mem + SVGA_FIFO_MAX); min = ioread32(fifo_mem + SVGA_FIFO_MIN); @@ -160,31 +158,28 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo) return vmw_fifo_send_fence(dev_priv, &dummy); } -void vmw_fifo_ping_host_locked(struct vmw_private *dev_priv, uint32_t reason) +void vmw_fifo_ping_host(struct vmw_private *dev_priv, uint32_t reason) { __le32 __iomem *fifo_mem = dev_priv->mmio_virt; + static DEFINE_SPINLOCK(ping_lock); + unsigned long irq_flags; + /* + * The ping_lock is needed because we don't have an atomic + * test-and-set of the SVGA_FIFO_BUSY register. + */ + spin_lock_irqsave(&ping_lock, irq_flags); if (unlikely(ioread32(fifo_mem + SVGA_FIFO_BUSY) == 0)) { iowrite32(1, fifo_mem + SVGA_FIFO_BUSY); vmw_write(dev_priv, SVGA_REG_SYNC, reason); } -} - -void vmw_fifo_ping_host(struct vmw_private *dev_priv, uint32_t reason) -{ - mutex_lock(&dev_priv->hw_mutex); - - vmw_fifo_ping_host_locked(dev_priv, reason); - - mutex_unlock(&dev_priv->hw_mutex); + spin_unlock_irqrestore(&ping_lock, irq_flags); } void vmw_fifo_release(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo) { __le32 __iomem *fifo_mem = dev_priv->mmio_virt; - mutex_lock(&dev_priv->hw_mutex); - vmw_write(dev_priv, SVGA_REG_SYNC, SVGA_SYNC_GENERIC); while (vmw_read(dev_priv, SVGA_REG_BUSY) != 0) ; @@ -198,7 +193,6 @@ void vmw_fifo_release(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo) vmw_write(dev_priv, SVGA_REG_TRACES, dev_priv->traces_state); - mutex_unlock(&dev_priv->hw_mutex); vmw_marker_queue_takedown(&fifo->marker_queue); if (likely(fifo->static_buffer != NULL)) { @@ -271,7 +265,7 @@ static int vmw_fifo_wait(struct vmw_private *dev_priv, return vmw_fifo_wait_noirq(dev_priv, bytes, interruptible, timeout); - mutex_lock(&dev_priv->hw_mutex); + spin_lock(&dev_priv->waiter_lock); if (atomic_add_return(1, &dev_priv->fifo_queue_waiters) > 0) { spin_lock_irqsave(&dev_priv->irq_lock, irq_flags); outl(SVGA_IRQFLAG_FIFO_PROGRESS, @@ -280,7 +274,7 @@ static int vmw_fifo_wait(struct vmw_private *dev_priv, vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask); spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags); } - mutex_unlock(&dev_priv->hw_mutex); + spin_unlock(&dev_priv->waiter_lock); if (interruptible) ret = wait_event_interruptible_timeout @@ -296,14 +290,14 @@ static int vmw_fifo_wait(struct vmw_private *dev_priv, else if (likely(ret > 0)) ret = 0; - mutex_lock(&dev_priv->hw_mutex); + spin_lock(&dev_priv->waiter_lock); if (atomic_dec_and_test(&dev_priv->fifo_queue_waiters)) { spin_lock_irqsave(&dev_priv->irq_lock, irq_flags); dev_priv->irq_mask &= ~SVGA_IRQFLAG_FIFO_PROGRESS; vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask); spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags); } - mutex_unlock(&dev_priv->hw_mutex); + spin_unlock(&dev_priv->waiter_lock); return ret; } diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c index 37881ecf5d7a..69c8ce23123c 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c @@ -135,13 +135,13 @@ static int vmw_fill_compat_cap(struct vmw_private *dev_priv, void *bounce, (pair_offset + max_size * sizeof(SVGA3dCapPair)) / sizeof(u32); compat_cap->header.type = SVGA3DCAPS_RECORD_DEVCAPS; - mutex_lock(&dev_priv->hw_mutex); + spin_lock(&dev_priv->cap_lock); for (i = 0; i < max_size; ++i) { vmw_write(dev_priv, SVGA_REG_DEV_CAP, i); compat_cap->pairs[i][0] = i; compat_cap->pairs[i][1] = vmw_read(dev_priv, SVGA_REG_DEV_CAP); } - mutex_unlock(&dev_priv->hw_mutex); + spin_unlock(&dev_priv->cap_lock); return 0; } @@ -191,12 +191,12 @@ int vmw_get_cap_3d_ioctl(struct drm_device *dev, void *data, if (num > SVGA3D_DEVCAP_MAX) num = SVGA3D_DEVCAP_MAX; - mutex_lock(&dev_priv->hw_mutex); + spin_lock(&dev_priv->cap_lock); for (i = 0; i < num; ++i) { vmw_write(dev_priv, SVGA_REG_DEV_CAP, i); *bounce32++ = vmw_read(dev_priv, SVGA_REG_DEV_CAP); } - mutex_unlock(&dev_priv->hw_mutex); + spin_unlock(&dev_priv->cap_lock); } else if (gb_objects) { ret = vmw_fill_compat_cap(dev_priv, bounce, size); if (unlikely(ret != 0)) diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c index 0c423766c441..9fe9827ee499 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c @@ -62,13 +62,8 @@ irqreturn_t vmw_irq_handler(int irq, void *arg) static bool vmw_fifo_idle(struct vmw_private *dev_priv, uint32_t seqno) { - uint32_t busy; - mutex_lock(&dev_priv->hw_mutex); - busy = vmw_read(dev_priv, SVGA_REG_BUSY); - mutex_unlock(&dev_priv->hw_mutex); - - return (busy == 0); + return (vmw_read(dev_priv, SVGA_REG_BUSY) == 0); } void vmw_update_seqno(struct vmw_private *dev_priv, @@ -184,7 +179,7 @@ int vmw_fallback_wait(struct vmw_private *dev_priv, void vmw_seqno_waiter_add(struct vmw_private *dev_priv) { - mutex_lock(&dev_priv->hw_mutex); + spin_lock(&dev_priv->waiter_lock); if (dev_priv->fence_queue_waiters++ == 0) { unsigned long irq_flags; @@ -195,12 +190,12 @@ void vmw_seqno_waiter_add(struct vmw_private *dev_priv) vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask); spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags); } - mutex_unlock(&dev_priv->hw_mutex); + spin_unlock(&dev_priv->waiter_lock); } void vmw_seqno_waiter_remove(struct vmw_private *dev_priv) { - mutex_lock(&dev_priv->hw_mutex); + spin_lock(&dev_priv->waiter_lock); if (--dev_priv->fence_queue_waiters == 0) { unsigned long irq_flags; @@ -209,13 +204,13 @@ void vmw_seqno_waiter_remove(struct vmw_private *dev_priv) vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask); spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags); } - mutex_unlock(&dev_priv->hw_mutex); + spin_unlock(&dev_priv->waiter_lock); } void vmw_goal_waiter_add(struct vmw_private *dev_priv) { - mutex_lock(&dev_priv->hw_mutex); + spin_lock(&dev_priv->waiter_lock); if (dev_priv->goal_queue_waiters++ == 0) { unsigned long irq_flags; @@ -226,12 +221,12 @@ void vmw_goal_waiter_add(struct vmw_private *dev_priv) vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask); spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags); } - mutex_unlock(&dev_priv->hw_mutex); + spin_unlock(&dev_priv->waiter_lock); } void vmw_goal_waiter_remove(struct vmw_private *dev_priv) { - mutex_lock(&dev_priv->hw_mutex); + spin_lock(&dev_priv->waiter_lock); if (--dev_priv->goal_queue_waiters == 0) { unsigned long irq_flags; @@ -240,7 +235,7 @@ void vmw_goal_waiter_remove(struct vmw_private *dev_priv) vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask); spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags); } - mutex_unlock(&dev_priv->hw_mutex); + spin_unlock(&dev_priv->waiter_lock); } int vmw_wait_seqno(struct vmw_private *dev_priv, @@ -315,9 +310,7 @@ void vmw_irq_uninstall(struct drm_device *dev) if (!(dev_priv->capabilities & SVGA_CAP_IRQMASK)) return; - mutex_lock(&dev_priv->hw_mutex); vmw_write(dev_priv, SVGA_REG_IRQMASK, 0); - mutex_unlock(&dev_priv->hw_mutex); status = inl(dev_priv->io_start + VMWGFX_IRQSTATUS_PORT); outl(status, dev_priv->io_start + VMWGFX_IRQSTATUS_PORT); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c index 3725b521d931..8725b79e7847 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c @@ -1828,9 +1828,7 @@ vmw_du_connector_detect(struct drm_connector *connector, bool force) struct vmw_private *dev_priv = vmw_priv(dev); struct vmw_display_unit *du = vmw_connector_to_du(connector); - mutex_lock(&dev_priv->hw_mutex); num_displays = vmw_read(dev_priv, SVGA_REG_NUM_DISPLAYS); - mutex_unlock(&dev_priv->hw_mutex); return ((vmw_connector_to_du(connector)->unit < num_displays && du->pref_active) ? diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig index a7de26d1ac80..d931cbbed240 100644 --- a/drivers/hwmon/Kconfig +++ b/drivers/hwmon/Kconfig @@ -1389,6 +1389,7 @@ config SENSORS_ADS1015 config SENSORS_ADS7828 tristate "Texas Instruments ADS7828 and compatibles" depends on I2C + select REGMAP_I2C help If you say yes here you get support for Texas Instruments ADS7828 and ADS7830 8-channel A/D converters. ADS7828 resolution is 12-bit, while @@ -1430,8 +1431,8 @@ config SENSORS_INA2XX tristate "Texas Instruments INA219 and compatibles" depends on I2C help - If you say yes here you get support for INA219, INA220, INA226, and - INA230 power monitor chips. + If you say yes here you get support for INA219, INA220, INA226, + INA230, and INA231 power monitor chips. The INA2xx driver is configured for the default configuration of the part as described in the datasheet. diff --git a/drivers/hwmon/abx500.c b/drivers/hwmon/abx500.c index 13875968c844..6cb89c0ebab6 100644 --- a/drivers/hwmon/abx500.c +++ b/drivers/hwmon/abx500.c @@ -221,7 +221,7 @@ static ssize_t show_min(struct device *dev, struct abx500_temp *data = dev_get_drvdata(dev); struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); - return sprintf(buf, "%ld\n", data->min[attr->index]); + return sprintf(buf, "%lu\n", data->min[attr->index]); } static ssize_t show_max(struct device *dev, @@ -230,7 +230,7 @@ static ssize_t show_max(struct device *dev, struct abx500_temp *data = dev_get_drvdata(dev); struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); - return sprintf(buf, "%ld\n", data->max[attr->index]); + return sprintf(buf, "%lu\n", data->max[attr->index]); } static ssize_t show_max_hyst(struct device *dev, @@ -239,7 +239,7 @@ static ssize_t show_max_hyst(struct device *dev, struct abx500_temp *data = dev_get_drvdata(dev); struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); - return sprintf(buf, "%ld\n", data->max_hyst[attr->index]); + return sprintf(buf, "%lu\n", data->max_hyst[attr->index]); } static ssize_t show_min_alarm(struct device *dev, diff --git a/drivers/hwmon/ad7314.c b/drivers/hwmon/ad7314.c index f4f9b219bf16..11955467fc0f 100644 --- a/drivers/hwmon/ad7314.c +++ b/drivers/hwmon/ad7314.c @@ -16,6 +16,7 @@ #include <linux/err.h> #include <linux/hwmon.h> #include <linux/hwmon-sysfs.h> +#include <linux/bitops.h> /* * AD7314 temperature masks @@ -67,7 +68,7 @@ static ssize_t ad7314_show_temperature(struct device *dev, switch (spi_get_device_id(chip->spi_dev)->driver_data) { case ad7314: data = (ret & AD7314_TEMP_MASK) >> AD7314_TEMP_SHIFT; - data = (data << 6) >> 6; + data = sign_extend32(data, 9); return sprintf(buf, "%d\n", 250 * data); case adt7301: @@ -78,7 +79,7 @@ static ssize_t ad7314_show_temperature(struct device *dev, * register. 1lsb - 31.25 milli degrees centigrade */ data = ret & ADT7301_TEMP_MASK; - data = (data << 2) >> 2; + data = sign_extend32(data, 13); return sprintf(buf, "%d\n", DIV_ROUND_CLOSEST(data * 3125, 100)); diff --git a/drivers/hwmon/adc128d818.c b/drivers/hwmon/adc128d818.c index 0625e50d7a6e..ad2b47e40345 100644 --- a/drivers/hwmon/adc128d818.c +++ b/drivers/hwmon/adc128d818.c @@ -27,6 +27,7 @@ #include <linux/err.h> #include <linux/regulator/consumer.h> #include <linux/mutex.h> +#include <linux/bitops.h> /* Addresses to scan * The chip also supports addresses 0x35..0x37. Don't scan those addresses @@ -189,7 +190,7 @@ static ssize_t adc128_show_temp(struct device *dev, if (IS_ERR(data)) return PTR_ERR(data); - temp = (data->temp[index] << 7) >> 7; /* sign extend */ + temp = sign_extend32(data->temp[index], 8); return sprintf(buf, "%d\n", temp * 500);/* 0.5 degrees C resolution */ } diff --git a/drivers/hwmon/ads7828.c b/drivers/hwmon/ads7828.c index a622d40eec17..bce4e9ff21bf 100644 --- a/drivers/hwmon/ads7828.c +++ b/drivers/hwmon/ads7828.c @@ -30,14 +30,12 @@ #include <linux/hwmon-sysfs.h> #include <linux/i2c.h> #include <linux/init.h> -#include <linux/jiffies.h> #include <linux/module.h> -#include <linux/mutex.h> #include <linux/platform_data/ads7828.h> +#include <linux/regmap.h> #include <linux/slab.h> /* The ADS7828 registers */ -#define ADS7828_NCH 8 /* 8 channels supported */ #define ADS7828_CMD_SD_SE 0x80 /* Single ended inputs */ #define ADS7828_CMD_PD1 0x04 /* Internal vref OFF && A/D ON */ #define ADS7828_CMD_PD3 0x0C /* Internal vref ON && A/D ON */ @@ -50,17 +48,9 @@ enum ads7828_chips { ads7828, ads7830 }; /* Client specific data */ struct ads7828_data { - struct i2c_client *client; - struct mutex update_lock; /* Mutex protecting updates */ - unsigned long last_updated; /* Last updated time (in jiffies) */ - u16 adc_input[ADS7828_NCH]; /* ADS7828_NCH samples */ - bool valid; /* Validity flag */ - bool diff_input; /* Differential input */ - bool ext_vref; /* External voltage reference */ - unsigned int vref_mv; /* voltage reference value */ + struct regmap *regmap; u8 cmd_byte; /* Command byte without channel bits */ unsigned int lsb_resol; /* Resolution of the ADC sample LSB */ - s32 (*read_channel)(const struct i2c_client *client, u8 command); }; /* Command byte C2,C1,C0 - see datasheet */ @@ -69,42 +59,22 @@ static inline u8 ads7828_cmd_byte(u8 cmd, int ch) return cmd | (((ch >> 1) | (ch & 0x01) << 2) << 4); } -/* Update data for the device (all 8 channels) */ -static struct ads7828_data *ads7828_update_device(struct device *dev) -{ - struct ads7828_data *data = dev_get_drvdata(dev); - struct i2c_client *client = data->client; - - mutex_lock(&data->update_lock); - - if (time_after(jiffies, data->last_updated + HZ + HZ / 2) - || !data->valid) { - unsigned int ch; - dev_dbg(&client->dev, "Starting ads7828 update\n"); - - for (ch = 0; ch < ADS7828_NCH; ch++) { - u8 cmd = ads7828_cmd_byte(data->cmd_byte, ch); - data->adc_input[ch] = data->read_channel(client, cmd); - } - data->last_updated = jiffies; - data->valid = true; - } - - mutex_unlock(&data->update_lock); - - return data; -} - /* sysfs callback function */ static ssize_t ads7828_show_in(struct device *dev, struct device_attribute *da, char *buf) { struct sensor_device_attribute *attr = to_sensor_dev_attr(da); - struct ads7828_data *data = ads7828_update_device(dev); - unsigned int value = DIV_ROUND_CLOSEST(data->adc_input[attr->index] * - data->lsb_resol, 1000); + struct ads7828_data *data = dev_get_drvdata(dev); + u8 cmd = ads7828_cmd_byte(data->cmd_byte, attr->index); + unsigned int regval; + int err; - return sprintf(buf, "%d\n", value); + err = regmap_read(data->regmap, cmd, ®val); + if (err < 0) + return err; + + return sprintf(buf, "%d\n", + DIV_ROUND_CLOSEST(regval * data->lsb_resol, 1000)); } static SENSOR_DEVICE_ATTR(in0_input, S_IRUGO, ads7828_show_in, NULL, 0); @@ -130,6 +100,16 @@ static struct attribute *ads7828_attrs[] = { ATTRIBUTE_GROUPS(ads7828); +static const struct regmap_config ads2828_regmap_config = { + .reg_bits = 8, + .val_bits = 16, +}; + +static const struct regmap_config ads2830_regmap_config = { + .reg_bits = 8, + .val_bits = 8, +}; + static int ads7828_probe(struct i2c_client *client, const struct i2c_device_id *id) { @@ -137,42 +117,40 @@ static int ads7828_probe(struct i2c_client *client, struct ads7828_platform_data *pdata = dev_get_platdata(dev); struct ads7828_data *data; struct device *hwmon_dev; + unsigned int vref_mv = ADS7828_INT_VREF_MV; + bool diff_input = false; + bool ext_vref = false; data = devm_kzalloc(dev, sizeof(struct ads7828_data), GFP_KERNEL); if (!data) return -ENOMEM; if (pdata) { - data->diff_input = pdata->diff_input; - data->ext_vref = pdata->ext_vref; - if (data->ext_vref) - data->vref_mv = pdata->vref_mv; + diff_input = pdata->diff_input; + ext_vref = pdata->ext_vref; + if (ext_vref && pdata->vref_mv) + vref_mv = pdata->vref_mv; } - /* Bound Vref with min/max values if it was provided */ - if (data->vref_mv) - data->vref_mv = clamp_val(data->vref_mv, - ADS7828_EXT_VREF_MV_MIN, - ADS7828_EXT_VREF_MV_MAX); - else - data->vref_mv = ADS7828_INT_VREF_MV; + /* Bound Vref with min/max values */ + vref_mv = clamp_val(vref_mv, ADS7828_EXT_VREF_MV_MIN, + ADS7828_EXT_VREF_MV_MAX); /* ADS7828 uses 12-bit samples, while ADS7830 is 8-bit */ if (id->driver_data == ads7828) { - data->lsb_resol = DIV_ROUND_CLOSEST(data->vref_mv * 1000, 4096); - data->read_channel = i2c_smbus_read_word_swapped; + data->lsb_resol = DIV_ROUND_CLOSEST(vref_mv * 1000, 4096); + data->regmap = devm_regmap_init_i2c(client, + &ads2828_regmap_config); } else { - data->lsb_resol = DIV_ROUND_CLOSEST(data->vref_mv * 1000, 256); - data->read_channel = i2c_smbus_read_byte_data; + data->lsb_resol = DIV_ROUND_CLOSEST(vref_mv * 1000, 256); + data->regmap = devm_regmap_init_i2c(client, + &ads2830_regmap_config); } - data->cmd_byte = data->ext_vref ? ADS7828_CMD_PD1 : ADS7828_CMD_PD3; - if (!data->diff_input) + data->cmd_byte = ext_vref ? ADS7828_CMD_PD1 : ADS7828_CMD_PD3; + if (!diff_input) data->cmd_byte |= ADS7828_CMD_SD_SE; - data->client = client; - mutex_init(&data->update_lock); - hwmon_dev = devm_hwmon_device_register_with_groups(dev, client->name, data, ads7828_groups); diff --git a/drivers/hwmon/ina2xx.c b/drivers/hwmon/ina2xx.c index e01feba909c3..d1542b7d4bc3 100644 --- a/drivers/hwmon/ina2xx.c +++ b/drivers/hwmon/ina2xx.c @@ -35,6 +35,7 @@ #include <linux/hwmon-sysfs.h> #include <linux/jiffies.h> #include <linux/of.h> +#include <linux/delay.h> #include <linux/platform_data/ina2xx.h> @@ -51,7 +52,6 @@ #define INA226_ALERT_LIMIT 0x07 #define INA226_DIE_ID 0xFF - /* register count */ #define INA219_REGISTERS 6 #define INA226_REGISTERS 8 @@ -64,6 +64,24 @@ /* worst case is 68.10 ms (~14.6Hz, ina219) */ #define INA2XX_CONVERSION_RATE 15 +#define INA2XX_MAX_DELAY 69 /* worst case delay in ms */ + +#define INA2XX_RSHUNT_DEFAULT 10000 + +/* bit mask for reading the averaging setting in the configuration register */ +#define INA226_AVG_RD_MASK 0x0E00 + +#define INA226_READ_AVG(reg) (((reg) & INA226_AVG_RD_MASK) >> 9) +#define INA226_SHIFT_AVG(val) ((val) << 9) + +/* common attrs, ina226 attrs and NULL */ +#define INA2XX_MAX_ATTRIBUTE_GROUPS 3 + +/* + * Both bus voltage and shunt voltage conversion times for ina226 are set + * to 0b0100 on POR, which translates to 2200 microseconds in total. + */ +#define INA226_TOTAL_CONV_TIME_DEFAULT 2200 enum ina2xx_ids { ina219, ina226 }; @@ -81,11 +99,16 @@ struct ina2xx_data { struct i2c_client *client; const struct ina2xx_config *config; + long rshunt; + u16 curr_config; + struct mutex update_lock; bool valid; unsigned long last_updated; + int update_interval; /* in jiffies */ int kind; + const struct attribute_group *groups[INA2XX_MAX_ATTRIBUTE_GROUPS]; u16 regs[INA2XX_MAX_REGISTERS]; }; @@ -110,34 +133,156 @@ static const struct ina2xx_config ina2xx_config[] = { }, }; -static struct ina2xx_data *ina2xx_update_device(struct device *dev) +/* + * Available averaging rates for ina226. The indices correspond with + * the bit values expected by the chip (according to the ina226 datasheet, + * table 3 AVG bit settings, found at + * http://www.ti.com/lit/ds/symlink/ina226.pdf. + */ +static const int ina226_avg_tab[] = { 1, 4, 16, 64, 128, 256, 512, 1024 }; + +static int ina226_avg_bits(int avg) +{ + int i; + + /* Get the closest average from the tab. */ + for (i = 0; i < ARRAY_SIZE(ina226_avg_tab) - 1; i++) { + if (avg <= (ina226_avg_tab[i] + ina226_avg_tab[i + 1]) / 2) + break; + } + + return i; /* Return 0b0111 for values greater than 1024. */ +} + +static int ina226_reg_to_interval(u16 config) +{ + int avg = ina226_avg_tab[INA226_READ_AVG(config)]; + + /* + * Multiply the total conversion time by the number of averages. + * Return the result in milliseconds. + */ + return DIV_ROUND_CLOSEST(avg * INA226_TOTAL_CONV_TIME_DEFAULT, 1000); +} + +static u16 ina226_interval_to_reg(int interval, u16 config) +{ + int avg, avg_bits; + + avg = DIV_ROUND_CLOSEST(interval * 1000, + INA226_TOTAL_CONV_TIME_DEFAULT); + avg_bits = ina226_avg_bits(avg); + + return (config & ~INA226_AVG_RD_MASK) | INA226_SHIFT_AVG(avg_bits); +} + +static void ina226_set_update_interval(struct ina2xx_data *data) +{ + int ms; + + ms = ina226_reg_to_interval(data->curr_config); + data->update_interval = msecs_to_jiffies(ms); +} + +static int ina2xx_calibrate(struct ina2xx_data *data) +{ + u16 val = DIV_ROUND_CLOSEST(data->config->calibration_factor, + data->rshunt); + + return i2c_smbus_write_word_swapped(data->client, + INA2XX_CALIBRATION, val); +} + +/* + * Initialize the configuration and calibration registers. + */ +static int ina2xx_init(struct ina2xx_data *data) { - struct ina2xx_data *data = dev_get_drvdata(dev); struct i2c_client *client = data->client; - struct ina2xx_data *ret = data; + int ret; - mutex_lock(&data->update_lock); + /* device configuration */ + ret = i2c_smbus_write_word_swapped(client, INA2XX_CONFIG, + data->curr_config); + if (ret < 0) + return ret; - if (time_after(jiffies, data->last_updated + - HZ / INA2XX_CONVERSION_RATE) || !data->valid) { + /* + * Set current LSB to 1mA, shunt is in uOhms + * (equation 13 in datasheet). + */ + return ina2xx_calibrate(data); +} - int i; +static int ina2xx_do_update(struct device *dev) +{ + struct ina2xx_data *data = dev_get_drvdata(dev); + struct i2c_client *client = data->client; + int i, rv, retry; - dev_dbg(&client->dev, "Starting ina2xx update\n"); + dev_dbg(&client->dev, "Starting ina2xx update\n"); + for (retry = 5; retry; retry--) { /* Read all registers */ for (i = 0; i < data->config->registers; i++) { - int rv = i2c_smbus_read_word_swapped(client, i); - if (rv < 0) { - ret = ERR_PTR(rv); - goto abort; - } + rv = i2c_smbus_read_word_swapped(client, i); + if (rv < 0) + return rv; data->regs[i] = rv; } + + /* + * If the current value in the calibration register is 0, the + * power and current registers will also remain at 0. In case + * the chip has been reset let's check the calibration + * register and reinitialize if needed. + */ + if (data->regs[INA2XX_CALIBRATION] == 0) { + dev_warn(dev, "chip not calibrated, reinitializing\n"); + + rv = ina2xx_init(data); + if (rv < 0) + return rv; + + /* + * Let's make sure the power and current registers + * have been updated before trying again. + */ + msleep(INA2XX_MAX_DELAY); + continue; + } + data->last_updated = jiffies; data->valid = 1; + + return 0; } -abort: + + /* + * If we're here then although all write operations succeeded, the + * chip still returns 0 in the calibration register. Nothing more we + * can do here. + */ + dev_err(dev, "unable to reinitialize the chip\n"); + return -ENODEV; +} + +static struct ina2xx_data *ina2xx_update_device(struct device *dev) +{ + struct ina2xx_data *data = dev_get_drvdata(dev); + struct ina2xx_data *ret = data; + unsigned long after; + int rv; + + mutex_lock(&data->update_lock); + + after = data->last_updated + data->update_interval; + if (time_after(jiffies, after) || !data->valid) { + rv = ina2xx_do_update(dev); + if (rv < 0) + ret = ERR_PTR(rv); + } + mutex_unlock(&data->update_lock); return ret; } @@ -164,6 +309,10 @@ static int ina2xx_get_value(struct ina2xx_data *data, u8 reg) /* signed register, LSB=1mA (selected), in mA */ val = (s16)data->regs[reg]; break; + case INA2XX_CALIBRATION: + val = DIV_ROUND_CLOSEST(data->config->calibration_factor, + data->regs[reg]); + break; default: /* programmer goofed */ WARN_ON_ONCE(1); @@ -187,6 +336,85 @@ static ssize_t ina2xx_show_value(struct device *dev, ina2xx_get_value(data, attr->index)); } +static ssize_t ina2xx_set_shunt(struct device *dev, + struct device_attribute *da, + const char *buf, size_t count) +{ + struct ina2xx_data *data = ina2xx_update_device(dev); + unsigned long val; + int status; + + if (IS_ERR(data)) + return PTR_ERR(data); + + status = kstrtoul(buf, 10, &val); + if (status < 0) + return status; + + if (val == 0 || + /* Values greater than the calibration factor make no sense. */ + val > data->config->calibration_factor) + return -EINVAL; + + mutex_lock(&data->update_lock); + data->rshunt = val; + status = ina2xx_calibrate(data); + mutex_unlock(&data->update_lock); + if (status < 0) + return status; + + return count; +} + +static ssize_t ina226_set_interval(struct device *dev, + struct device_attribute *da, + const char *buf, size_t count) +{ + struct ina2xx_data *data = dev_get_drvdata(dev); + unsigned long val; + int status; + + status = kstrtoul(buf, 10, &val); + if (status < 0) + return status; + + if (val > INT_MAX || val == 0) + return -EINVAL; + + mutex_lock(&data->update_lock); + data->curr_config = ina226_interval_to_reg(val, + data->regs[INA2XX_CONFIG]); + status = i2c_smbus_write_word_swapped(data->client, + INA2XX_CONFIG, + data->curr_config); + + ina226_set_update_interval(data); + /* Make sure the next access re-reads all registers. */ + data->valid = 0; + mutex_unlock(&data->update_lock); + if (status < 0) + return status; + + return count; +} + +static ssize_t ina226_show_interval(struct device *dev, + struct device_attribute *da, char *buf) +{ + struct ina2xx_data *data = ina2xx_update_device(dev); + + if (IS_ERR(data)) + return PTR_ERR(data); + + /* + * We don't use data->update_interval here as we want to display + * the actual interval used by the chip and jiffies_to_msecs() + * doesn't seem to be accurate enough. + */ + return snprintf(buf, PAGE_SIZE, "%d\n", + ina226_reg_to_interval(data->regs[INA2XX_CONFIG])); +} + /* shunt voltage */ static SENSOR_DEVICE_ATTR(in0_input, S_IRUGO, ina2xx_show_value, NULL, INA2XX_SHUNT_VOLTAGE); @@ -203,15 +431,37 @@ static SENSOR_DEVICE_ATTR(curr1_input, S_IRUGO, ina2xx_show_value, NULL, static SENSOR_DEVICE_ATTR(power1_input, S_IRUGO, ina2xx_show_value, NULL, INA2XX_POWER); +/* shunt resistance */ +static SENSOR_DEVICE_ATTR(shunt_resistor, S_IRUGO | S_IWUSR, + ina2xx_show_value, ina2xx_set_shunt, + INA2XX_CALIBRATION); + +/* update interval (ina226 only) */ +static SENSOR_DEVICE_ATTR(update_interval, S_IRUGO | S_IWUSR, + ina226_show_interval, ina226_set_interval, 0); + /* pointers to created device attributes */ static struct attribute *ina2xx_attrs[] = { &sensor_dev_attr_in0_input.dev_attr.attr, &sensor_dev_attr_in1_input.dev_attr.attr, &sensor_dev_attr_curr1_input.dev_attr.attr, &sensor_dev_attr_power1_input.dev_attr.attr, + &sensor_dev_attr_shunt_resistor.dev_attr.attr, NULL, }; -ATTRIBUTE_GROUPS(ina2xx); + +static const struct attribute_group ina2xx_group = { + .attrs = ina2xx_attrs, +}; + +static struct attribute *ina226_attrs[] = { + &sensor_dev_attr_update_interval.dev_attr.attr, + NULL, +}; + +static const struct attribute_group ina226_group = { + .attrs = ina226_attrs, +}; static int ina2xx_probe(struct i2c_client *client, const struct i2c_device_id *id) @@ -221,9 +471,8 @@ static int ina2xx_probe(struct i2c_client *client, struct device *dev = &client->dev; struct ina2xx_data *data; struct device *hwmon_dev; - long shunt = 10000; /* default shunt value 10mOhms */ u32 val; - int ret; + int ret, group = 0; if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_WORD_DATA)) return -ENODEV; @@ -234,50 +483,52 @@ static int ina2xx_probe(struct i2c_client *client, if (dev_get_platdata(dev)) { pdata = dev_get_platdata(dev); - shunt = pdata->shunt_uohms; + data->rshunt = pdata->shunt_uohms; } else if (!of_property_read_u32(dev->of_node, "shunt-resistor", &val)) { - shunt = val; + data->rshunt = val; + } else { + data->rshunt = INA2XX_RSHUNT_DEFAULT; } - if (shunt <= 0) - return -ENODEV; - /* set the device type */ data->kind = id->driver_data; data->config = &ina2xx_config[data->kind]; - - /* device configuration */ - ret = i2c_smbus_write_word_swapped(client, INA2XX_CONFIG, - data->config->config_default); - if (ret < 0) { - dev_err(dev, - "error writing to the config register: %d", ret); - return -ENODEV; - } + data->curr_config = data->config->config_default; + data->client = client; /* - * Set current LSB to 1mA, shunt is in uOhms - * (equation 13 in datasheet). + * Ina226 has a variable update_interval. For ina219 we + * use a constant value. */ - ret = i2c_smbus_write_word_swapped(client, INA2XX_CALIBRATION, - data->config->calibration_factor / shunt); + if (data->kind == ina226) + ina226_set_update_interval(data); + else + data->update_interval = HZ / INA2XX_CONVERSION_RATE; + + if (data->rshunt <= 0 || + data->rshunt > data->config->calibration_factor) + return -ENODEV; + + ret = ina2xx_init(data); if (ret < 0) { - dev_err(dev, - "error writing to the calibration register: %d", ret); + dev_err(dev, "error configuring the device: %d\n", ret); return -ENODEV; } - data->client = client; mutex_init(&data->update_lock); + data->groups[group++] = &ina2xx_group; + if (data->kind == ina226) + data->groups[group++] = &ina226_group; + hwmon_dev = devm_hwmon_device_register_with_groups(dev, client->name, - data, ina2xx_groups); + data, data->groups); if (IS_ERR(hwmon_dev)) return PTR_ERR(hwmon_dev); dev_info(dev, "power monitor %s (Rshunt = %li uOhm)\n", - id->name, shunt); + id->name, data->rshunt); return 0; } @@ -287,6 +538,7 @@ static const struct i2c_device_id ina2xx_id[] = { { "ina220", ina219 }, { "ina226", ina226 }, { "ina230", ina226 }, + { "ina231", ina226 }, { } }; MODULE_DEVICE_TABLE(i2c, ina2xx_id); diff --git a/drivers/hwmon/jc42.c b/drivers/hwmon/jc42.c index 388f8bcd898e..996bdfd5cf25 100644 --- a/drivers/hwmon/jc42.c +++ b/drivers/hwmon/jc42.c @@ -201,7 +201,7 @@ struct jc42_data { #define JC42_TEMP_MIN 0 #define JC42_TEMP_MAX 125000 -static u16 jc42_temp_to_reg(int temp, bool extended) +static u16 jc42_temp_to_reg(long temp, bool extended) { int ntemp = clamp_val(temp, extended ? JC42_TEMP_MIN_EXTENDED : @@ -213,11 +213,7 @@ static u16 jc42_temp_to_reg(int temp, bool extended) static int jc42_temp_from_reg(s16 reg) { - reg &= 0x1fff; - - /* sign extend register */ - if (reg & 0x1000) - reg |= 0xf000; + reg = sign_extend32(reg, 12); /* convert from 0.0625 to 0.001 resolution */ return reg * 125 / 2; @@ -308,15 +304,18 @@ static ssize_t set_temp_crit_hyst(struct device *dev, const char *buf, size_t count) { struct jc42_data *data = dev_get_drvdata(dev); - unsigned long val; + long val; int diff, hyst; int err; int ret = count; - if (kstrtoul(buf, 10, &val) < 0) + if (kstrtol(buf, 10, &val) < 0) return -EINVAL; + val = clamp_val(val, (data->extended ? JC42_TEMP_MIN_EXTENDED : + JC42_TEMP_MIN) - 6000, JC42_TEMP_MAX); diff = jc42_temp_from_reg(data->temp[t_crit]) - val; + hyst = 0; if (diff > 0) { if (diff < 2250) diff --git a/drivers/hwmon/nct7802.c b/drivers/hwmon/nct7802.c index ec5678289e4a..55765790907b 100644 --- a/drivers/hwmon/nct7802.c +++ b/drivers/hwmon/nct7802.c @@ -779,7 +779,7 @@ static bool nct7802_regmap_is_volatile(struct device *dev, unsigned int reg) return reg != REG_BANK && reg <= 0x20; } -static struct regmap_config nct7802_regmap_config = { +static const struct regmap_config nct7802_regmap_config = { .reg_bits = 8, .val_bits = 8, .cache_type = REGCACHE_RBTREE, diff --git a/drivers/hwmon/tmp102.c b/drivers/hwmon/tmp102.c index ba9f478f64ee..9da2735f1424 100644 --- a/drivers/hwmon/tmp102.c +++ b/drivers/hwmon/tmp102.c @@ -253,7 +253,7 @@ static int tmp102_remove(struct i2c_client *client) return 0; } -#ifdef CONFIG_PM +#ifdef CONFIG_PM_SLEEP static int tmp102_suspend(struct device *dev) { struct i2c_client *client = to_i2c_client(dev); @@ -279,17 +279,10 @@ static int tmp102_resume(struct device *dev) config &= ~TMP102_CONF_SD; return i2c_smbus_write_word_swapped(client, TMP102_CONF_REG, config); } - -static const struct dev_pm_ops tmp102_dev_pm_ops = { - .suspend = tmp102_suspend, - .resume = tmp102_resume, -}; - -#define TMP102_DEV_PM_OPS (&tmp102_dev_pm_ops) -#else -#define TMP102_DEV_PM_OPS NULL #endif /* CONFIG_PM */ +static SIMPLE_DEV_PM_OPS(tmp102_dev_pm_ops, tmp102_suspend, tmp102_resume); + static const struct i2c_device_id tmp102_id[] = { { "tmp102", 0 }, { } @@ -298,7 +291,7 @@ MODULE_DEVICE_TABLE(i2c, tmp102_id); static struct i2c_driver tmp102_driver = { .driver.name = DRIVER_NAME, - .driver.pm = TMP102_DEV_PM_OPS, + .driver.pm = &tmp102_dev_pm_ops, .probe = tmp102_probe, .remove = tmp102_remove, .id_table = tmp102_id, diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig index 31e8308ba899..ab838d9e28b6 100644 --- a/drivers/i2c/busses/Kconfig +++ b/drivers/i2c/busses/Kconfig @@ -881,6 +881,7 @@ config I2C_XLR config I2C_RCAR tristate "Renesas R-Car I2C Controller" depends on ARCH_SHMOBILE || COMPILE_TEST + select I2C_SLAVE help If you say yes to this option, support will be included for the R-Car I2C controller. diff --git a/drivers/i2c/busses/i2c-s3c2410.c b/drivers/i2c/busses/i2c-s3c2410.c index bff20a589621..958c8db4ec30 100644 --- a/drivers/i2c/busses/i2c-s3c2410.c +++ b/drivers/i2c/busses/i2c-s3c2410.c @@ -785,14 +785,16 @@ static int s3c24xx_i2c_xfer(struct i2c_adapter *adap, int ret; pm_runtime_get_sync(&adap->dev); - clk_prepare_enable(i2c->clk); + ret = clk_enable(i2c->clk); + if (ret) + return ret; for (retry = 0; retry < adap->retries; retry++) { ret = s3c24xx_i2c_doxfer(i2c, msgs, num); if (ret != -EAGAIN) { - clk_disable_unprepare(i2c->clk); + clk_disable(i2c->clk); pm_runtime_put(&adap->dev); return ret; } @@ -802,7 +804,7 @@ static int s3c24xx_i2c_xfer(struct i2c_adapter *adap, udelay(100); } - clk_disable_unprepare(i2c->clk); + clk_disable(i2c->clk); pm_runtime_put(&adap->dev); return -EREMOTEIO; } @@ -1197,7 +1199,7 @@ static int s3c24xx_i2c_probe(struct platform_device *pdev) clk_prepare_enable(i2c->clk); ret = s3c24xx_i2c_init(i2c); - clk_disable_unprepare(i2c->clk); + clk_disable(i2c->clk); if (ret != 0) { dev_err(&pdev->dev, "I2C controller init failed\n"); return ret; @@ -1210,6 +1212,7 @@ static int s3c24xx_i2c_probe(struct platform_device *pdev) i2c->irq = ret = platform_get_irq(pdev, 0); if (ret <= 0) { dev_err(&pdev->dev, "cannot find IRQ\n"); + clk_unprepare(i2c->clk); return ret; } @@ -1218,6 +1221,7 @@ static int s3c24xx_i2c_probe(struct platform_device *pdev) if (ret != 0) { dev_err(&pdev->dev, "cannot claim IRQ %d\n", i2c->irq); + clk_unprepare(i2c->clk); return ret; } } @@ -1225,6 +1229,7 @@ static int s3c24xx_i2c_probe(struct platform_device *pdev) ret = s3c24xx_i2c_register_cpufreq(i2c); if (ret < 0) { dev_err(&pdev->dev, "failed to register cpufreq notifier\n"); + clk_unprepare(i2c->clk); return ret; } @@ -1241,6 +1246,7 @@ static int s3c24xx_i2c_probe(struct platform_device *pdev) if (ret < 0) { dev_err(&pdev->dev, "failed to add bus to i2c core\n"); s3c24xx_i2c_deregister_cpufreq(i2c); + clk_unprepare(i2c->clk); return ret; } @@ -1262,6 +1268,8 @@ static int s3c24xx_i2c_remove(struct platform_device *pdev) { struct s3c24xx_i2c *i2c = platform_get_drvdata(pdev); + clk_unprepare(i2c->clk); + pm_runtime_disable(&i2c->adap.dev); pm_runtime_disable(&pdev->dev); @@ -1293,13 +1301,16 @@ static int s3c24xx_i2c_resume_noirq(struct device *dev) { struct platform_device *pdev = to_platform_device(dev); struct s3c24xx_i2c *i2c = platform_get_drvdata(pdev); + int ret; if (!IS_ERR(i2c->sysreg)) regmap_write(i2c->sysreg, EXYNOS5_SYS_I2C_CFG, i2c->sys_i2c_cfg); - clk_prepare_enable(i2c->clk); + ret = clk_enable(i2c->clk); + if (ret) + return ret; s3c24xx_i2c_init(i2c); - clk_disable_unprepare(i2c->clk); + clk_disable(i2c->clk); i2c->suspended = 0; return 0; diff --git a/drivers/i2c/busses/i2c-sh_mobile.c b/drivers/i2c/busses/i2c-sh_mobile.c index 440d5dbc8b5f..007818b3e174 100644 --- a/drivers/i2c/busses/i2c-sh_mobile.c +++ b/drivers/i2c/busses/i2c-sh_mobile.c @@ -139,6 +139,7 @@ struct sh_mobile_i2c_data { int pos; int sr; bool send_stop; + bool stop_after_dma; struct resource *res; struct dma_chan *dma_tx; @@ -407,7 +408,7 @@ static int sh_mobile_i2c_isr_tx(struct sh_mobile_i2c_data *pd) if (pd->pos == pd->msg->len) { /* Send stop if we haven't yet (DMA case) */ - if (pd->send_stop && (iic_rd(pd, ICCR) & ICCR_BBSY)) + if (pd->send_stop && pd->stop_after_dma) i2c_op(pd, OP_TX_STOP, 0); return 1; } @@ -449,6 +450,13 @@ static int sh_mobile_i2c_isr_rx(struct sh_mobile_i2c_data *pd) real_pos = pd->pos - 2; if (pd->pos == pd->msg->len) { + if (pd->stop_after_dma) { + /* Simulate PIO end condition after DMA transfer */ + i2c_op(pd, OP_RX_STOP, 0); + pd->pos++; + break; + } + if (real_pos < 0) { i2c_op(pd, OP_RX_STOP, 0); break; @@ -536,6 +544,7 @@ static void sh_mobile_i2c_dma_callback(void *data) sh_mobile_i2c_dma_unmap(pd); pd->pos = pd->msg->len; + pd->stop_after_dma = true; iic_set_clr(pd, ICIC, 0, ICIC_TDMAE | ICIC_RDMAE); } @@ -726,6 +735,7 @@ static int sh_mobile_i2c_xfer(struct i2c_adapter *adapter, bool do_start = pd->send_stop || !i; msg = &msgs[i]; pd->send_stop = i == num - 1 || msg->flags & I2C_M_STOP; + pd->stop_after_dma = false; err = start_ch(pd, msg, do_start); if (err) diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c index 39d25a8cb1ad..e9eae57a2b50 100644 --- a/drivers/i2c/i2c-core.c +++ b/drivers/i2c/i2c-core.c @@ -2972,6 +2972,7 @@ trace: } EXPORT_SYMBOL(i2c_smbus_xfer); +#if IS_ENABLED(CONFIG_I2C_SLAVE) int i2c_slave_register(struct i2c_client *client, i2c_slave_cb_t slave_cb) { int ret; @@ -3019,6 +3020,7 @@ int i2c_slave_unregister(struct i2c_client *client) return ret; } EXPORT_SYMBOL_GPL(i2c_slave_unregister); +#endif MODULE_AUTHOR("Simon G. Vogl <simon@tk.uni-linz.ac.at>"); MODULE_DESCRIPTION("I2C-Bus main module"); diff --git a/drivers/i2c/i2c-slave-eeprom.c b/drivers/i2c/i2c-slave-eeprom.c index 6631400b5f02..cf9b09db092f 100644 --- a/drivers/i2c/i2c-slave-eeprom.c +++ b/drivers/i2c/i2c-slave-eeprom.c @@ -74,7 +74,7 @@ static ssize_t i2c_slave_eeprom_bin_read(struct file *filp, struct kobject *kobj struct eeprom_data *eeprom; unsigned long flags; - if (off + count >= attr->size) + if (off + count > attr->size) return -EFBIG; eeprom = dev_get_drvdata(container_of(kobj, struct device, kobj)); @@ -92,7 +92,7 @@ static ssize_t i2c_slave_eeprom_bin_write(struct file *filp, struct kobject *kob struct eeprom_data *eeprom; unsigned long flags; - if (off + count >= attr->size) + if (off + count > attr->size) return -EFBIG; eeprom = dev_get_drvdata(container_of(kobj, struct device, kobj)); diff --git a/drivers/infiniband/core/uverbs.h b/drivers/infiniband/core/uverbs.h index b716b0815644..643c08a025a5 100644 --- a/drivers/infiniband/core/uverbs.h +++ b/drivers/infiniband/core/uverbs.h @@ -258,6 +258,5 @@ IB_UVERBS_DECLARE_CMD(close_xrcd); IB_UVERBS_DECLARE_EX_CMD(create_flow); IB_UVERBS_DECLARE_EX_CMD(destroy_flow); -IB_UVERBS_DECLARE_EX_CMD(query_device); #endif /* UVERBS_H */ diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c index 532d8eba8b02..b7943ff16ed3 100644 --- a/drivers/infiniband/core/uverbs_cmd.c +++ b/drivers/infiniband/core/uverbs_cmd.c @@ -400,52 +400,6 @@ err: return ret; } -static void copy_query_dev_fields(struct ib_uverbs_file *file, - struct ib_uverbs_query_device_resp *resp, - struct ib_device_attr *attr) -{ - resp->fw_ver = attr->fw_ver; - resp->node_guid = file->device->ib_dev->node_guid; - resp->sys_image_guid = attr->sys_image_guid; - resp->max_mr_size = attr->max_mr_size; - resp->page_size_cap = attr->page_size_cap; - resp->vendor_id = attr->vendor_id; - resp->vendor_part_id = attr->vendor_part_id; - resp->hw_ver = attr->hw_ver; - resp->max_qp = attr->max_qp; - resp->max_qp_wr = attr->max_qp_wr; - resp->device_cap_flags = attr->device_cap_flags; - resp->max_sge = attr->max_sge; - resp->max_sge_rd = attr->max_sge_rd; - resp->max_cq = attr->max_cq; - resp->max_cqe = attr->max_cqe; - resp->max_mr = attr->max_mr; - resp->max_pd = attr->max_pd; - resp->max_qp_rd_atom = attr->max_qp_rd_atom; - resp->max_ee_rd_atom = attr->max_ee_rd_atom; - resp->max_res_rd_atom = attr->max_res_rd_atom; - resp->max_qp_init_rd_atom = attr->max_qp_init_rd_atom; - resp->max_ee_init_rd_atom = attr->max_ee_init_rd_atom; - resp->atomic_cap = attr->atomic_cap; - resp->max_ee = attr->max_ee; - resp->max_rdd = attr->max_rdd; - resp->max_mw = attr->max_mw; - resp->max_raw_ipv6_qp = attr->max_raw_ipv6_qp; - resp->max_raw_ethy_qp = attr->max_raw_ethy_qp; - resp->max_mcast_grp = attr->max_mcast_grp; - resp->max_mcast_qp_attach = attr->max_mcast_qp_attach; - resp->max_total_mcast_qp_attach = attr->max_total_mcast_qp_attach; - resp->max_ah = attr->max_ah; - resp->max_fmr = attr->max_fmr; - resp->max_map_per_fmr = attr->max_map_per_fmr; - resp->max_srq = attr->max_srq; - resp->max_srq_wr = attr->max_srq_wr; - resp->max_srq_sge = attr->max_srq_sge; - resp->max_pkeys = attr->max_pkeys; - resp->local_ca_ack_delay = attr->local_ca_ack_delay; - resp->phys_port_cnt = file->device->ib_dev->phys_port_cnt; -} - ssize_t ib_uverbs_query_device(struct ib_uverbs_file *file, const char __user *buf, int in_len, int out_len) @@ -466,7 +420,47 @@ ssize_t ib_uverbs_query_device(struct ib_uverbs_file *file, return ret; memset(&resp, 0, sizeof resp); - copy_query_dev_fields(file, &resp, &attr); + + resp.fw_ver = attr.fw_ver; + resp.node_guid = file->device->ib_dev->node_guid; + resp.sys_image_guid = attr.sys_image_guid; + resp.max_mr_size = attr.max_mr_size; + resp.page_size_cap = attr.page_size_cap; + resp.vendor_id = attr.vendor_id; + resp.vendor_part_id = attr.vendor_part_id; + resp.hw_ver = attr.hw_ver; + resp.max_qp = attr.max_qp; + resp.max_qp_wr = attr.max_qp_wr; + resp.device_cap_flags = attr.device_cap_flags; + resp.max_sge = attr.max_sge; + resp.max_sge_rd = attr.max_sge_rd; + resp.max_cq = attr.max_cq; + resp.max_cqe = attr.max_cqe; + resp.max_mr = attr.max_mr; + resp.max_pd = attr.max_pd; + resp.max_qp_rd_atom = attr.max_qp_rd_atom; + resp.max_ee_rd_atom = attr.max_ee_rd_atom; + resp.max_res_rd_atom = attr.max_res_rd_atom; + resp.max_qp_init_rd_atom = attr.max_qp_init_rd_atom; + resp.max_ee_init_rd_atom = attr.max_ee_init_rd_atom; + resp.atomic_cap = attr.atomic_cap; + resp.max_ee = attr.max_ee; + resp.max_rdd = attr.max_rdd; + resp.max_mw = attr.max_mw; + resp.max_raw_ipv6_qp = attr.max_raw_ipv6_qp; + resp.max_raw_ethy_qp = attr.max_raw_ethy_qp; + resp.max_mcast_grp = attr.max_mcast_grp; + resp.max_mcast_qp_attach = attr.max_mcast_qp_attach; + resp.max_total_mcast_qp_attach = attr.max_total_mcast_qp_attach; + resp.max_ah = attr.max_ah; + resp.max_fmr = attr.max_fmr; + resp.max_map_per_fmr = attr.max_map_per_fmr; + resp.max_srq = attr.max_srq; + resp.max_srq_wr = attr.max_srq_wr; + resp.max_srq_sge = attr.max_srq_sge; + resp.max_pkeys = attr.max_pkeys; + resp.local_ca_ack_delay = attr.local_ca_ack_delay; + resp.phys_port_cnt = file->device->ib_dev->phys_port_cnt; if (copy_to_user((void __user *) (unsigned long) cmd.response, &resp, sizeof resp)) @@ -3293,52 +3287,3 @@ ssize_t ib_uverbs_destroy_srq(struct ib_uverbs_file *file, return ret ? ret : in_len; } - -int ib_uverbs_ex_query_device(struct ib_uverbs_file *file, - struct ib_udata *ucore, - struct ib_udata *uhw) -{ - struct ib_uverbs_ex_query_device_resp resp; - struct ib_uverbs_ex_query_device cmd; - struct ib_device_attr attr; - struct ib_device *device; - int err; - - device = file->device->ib_dev; - if (ucore->inlen < sizeof(cmd)) - return -EINVAL; - - err = ib_copy_from_udata(&cmd, ucore, sizeof(cmd)); - if (err) - return err; - - if (cmd.reserved) - return -EINVAL; - - err = device->query_device(device, &attr); - if (err) - return err; - - memset(&resp, 0, sizeof(resp)); - copy_query_dev_fields(file, &resp.base, &attr); - resp.comp_mask = 0; - -#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING - if (cmd.comp_mask & IB_USER_VERBS_EX_QUERY_DEVICE_ODP) { - resp.odp_caps.general_caps = attr.odp_caps.general_caps; - resp.odp_caps.per_transport_caps.rc_odp_caps = - attr.odp_caps.per_transport_caps.rc_odp_caps; - resp.odp_caps.per_transport_caps.uc_odp_caps = - attr.odp_caps.per_transport_caps.uc_odp_caps; - resp.odp_caps.per_transport_caps.ud_odp_caps = - attr.odp_caps.per_transport_caps.ud_odp_caps; - resp.comp_mask |= IB_USER_VERBS_EX_QUERY_DEVICE_ODP; - } -#endif - - err = ib_copy_to_udata(ucore, &resp, sizeof(resp)); - if (err) - return err; - - return 0; -} diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c index e6c23b9eab33..5db1a8cc388d 100644 --- a/drivers/infiniband/core/uverbs_main.c +++ b/drivers/infiniband/core/uverbs_main.c @@ -123,7 +123,6 @@ static int (*uverbs_ex_cmd_table[])(struct ib_uverbs_file *file, struct ib_udata *uhw) = { [IB_USER_VERBS_EX_CMD_CREATE_FLOW] = ib_uverbs_ex_create_flow, [IB_USER_VERBS_EX_CMD_DESTROY_FLOW] = ib_uverbs_ex_destroy_flow, - [IB_USER_VERBS_EX_CMD_QUERY_DEVICE] = ib_uverbs_ex_query_device }; static void ib_uverbs_add_one(struct ib_device *device); diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c index 8a87404e9c76..03bf81211a54 100644 --- a/drivers/infiniband/hw/mlx5/main.c +++ b/drivers/infiniband/hw/mlx5/main.c @@ -1331,8 +1331,6 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev) (1ull << IB_USER_VERBS_CMD_DESTROY_SRQ) | (1ull << IB_USER_VERBS_CMD_CREATE_XSRQ) | (1ull << IB_USER_VERBS_CMD_OPEN_QP); - dev->ib_dev.uverbs_ex_cmd_mask = - (1ull << IB_USER_VERBS_EX_CMD_QUERY_DEVICE); dev->ib_dev.query_device = mlx5_ib_query_device; dev->ib_dev.query_port = mlx5_ib_query_port; diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h index 8ba80a6d3a46..d7562beb5423 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib.h +++ b/drivers/infiniband/ulp/ipoib/ipoib.h @@ -98,15 +98,9 @@ enum { IPOIB_MCAST_FLAG_FOUND = 0, /* used in set_multicast_list */ IPOIB_MCAST_FLAG_SENDONLY = 1, - /* - * For IPOIB_MCAST_FLAG_BUSY - * When set, in flight join and mcast->mc is unreliable - * When clear and mcast->mc IS_ERR_OR_NULL, need to restart or - * haven't started yet - * When clear and mcast->mc is valid pointer, join was successful - */ - IPOIB_MCAST_FLAG_BUSY = 2, + IPOIB_MCAST_FLAG_BUSY = 2, /* joining or already joined */ IPOIB_MCAST_FLAG_ATTACHED = 3, + IPOIB_MCAST_JOIN_STARTED = 4, MAX_SEND_CQE = 16, IPOIB_CM_COPYBREAK = 256, @@ -323,7 +317,6 @@ struct ipoib_dev_priv { struct list_head multicast_list; struct rb_root multicast_tree; - struct workqueue_struct *wq; struct delayed_work mcast_task; struct work_struct carrier_on_task; struct work_struct flush_light; @@ -484,10 +477,10 @@ void ipoib_ib_dev_flush_heavy(struct work_struct *work); void ipoib_pkey_event(struct work_struct *work); void ipoib_ib_dev_cleanup(struct net_device *dev); -int ipoib_ib_dev_open(struct net_device *dev); +int ipoib_ib_dev_open(struct net_device *dev, int flush); int ipoib_ib_dev_up(struct net_device *dev); -int ipoib_ib_dev_down(struct net_device *dev); -int ipoib_ib_dev_stop(struct net_device *dev); +int ipoib_ib_dev_down(struct net_device *dev, int flush); +int ipoib_ib_dev_stop(struct net_device *dev, int flush); void ipoib_pkey_dev_check_presence(struct net_device *dev); int ipoib_dev_init(struct net_device *dev, struct ib_device *ca, int port); @@ -499,7 +492,7 @@ void ipoib_mcast_send(struct net_device *dev, u8 *daddr, struct sk_buff *skb); void ipoib_mcast_restart_task(struct work_struct *work); int ipoib_mcast_start_thread(struct net_device *dev); -int ipoib_mcast_stop_thread(struct net_device *dev); +int ipoib_mcast_stop_thread(struct net_device *dev, int flush); void ipoib_mcast_dev_down(struct net_device *dev); void ipoib_mcast_dev_flush(struct net_device *dev); diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c index 56959adb6c7d..933efcea0d03 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c @@ -474,7 +474,7 @@ static int ipoib_cm_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *even } spin_lock_irq(&priv->lock); - queue_delayed_work(priv->wq, + queue_delayed_work(ipoib_workqueue, &priv->cm.stale_task, IPOIB_CM_RX_DELAY); /* Add this entry to passive ids list head, but do not re-add it * if IB_EVENT_QP_LAST_WQE_REACHED has moved it to flush list. */ @@ -576,7 +576,7 @@ void ipoib_cm_handle_rx_wc(struct net_device *dev, struct ib_wc *wc) spin_lock_irqsave(&priv->lock, flags); list_splice_init(&priv->cm.rx_drain_list, &priv->cm.rx_reap_list); ipoib_cm_start_rx_drain(priv); - queue_work(priv->wq, &priv->cm.rx_reap_task); + queue_work(ipoib_workqueue, &priv->cm.rx_reap_task); spin_unlock_irqrestore(&priv->lock, flags); } else ipoib_warn(priv, "cm recv completion event with wrid %d (> %d)\n", @@ -603,7 +603,7 @@ void ipoib_cm_handle_rx_wc(struct net_device *dev, struct ib_wc *wc) spin_lock_irqsave(&priv->lock, flags); list_move(&p->list, &priv->cm.rx_reap_list); spin_unlock_irqrestore(&priv->lock, flags); - queue_work(priv->wq, &priv->cm.rx_reap_task); + queue_work(ipoib_workqueue, &priv->cm.rx_reap_task); } return; } @@ -827,7 +827,7 @@ void ipoib_cm_handle_tx_wc(struct net_device *dev, struct ib_wc *wc) if (test_and_clear_bit(IPOIB_FLAG_INITIALIZED, &tx->flags)) { list_move(&tx->list, &priv->cm.reap_list); - queue_work(priv->wq, &priv->cm.reap_task); + queue_work(ipoib_workqueue, &priv->cm.reap_task); } clear_bit(IPOIB_FLAG_OPER_UP, &tx->flags); @@ -1255,7 +1255,7 @@ static int ipoib_cm_tx_handler(struct ib_cm_id *cm_id, if (test_and_clear_bit(IPOIB_FLAG_INITIALIZED, &tx->flags)) { list_move(&tx->list, &priv->cm.reap_list); - queue_work(priv->wq, &priv->cm.reap_task); + queue_work(ipoib_workqueue, &priv->cm.reap_task); } spin_unlock_irqrestore(&priv->lock, flags); @@ -1284,7 +1284,7 @@ struct ipoib_cm_tx *ipoib_cm_create_tx(struct net_device *dev, struct ipoib_path tx->dev = dev; list_add(&tx->list, &priv->cm.start_list); set_bit(IPOIB_FLAG_INITIALIZED, &tx->flags); - queue_work(priv->wq, &priv->cm.start_task); + queue_work(ipoib_workqueue, &priv->cm.start_task); return tx; } @@ -1295,7 +1295,7 @@ void ipoib_cm_destroy_tx(struct ipoib_cm_tx *tx) if (test_and_clear_bit(IPOIB_FLAG_INITIALIZED, &tx->flags)) { spin_lock_irqsave(&priv->lock, flags); list_move(&tx->list, &priv->cm.reap_list); - queue_work(priv->wq, &priv->cm.reap_task); + queue_work(ipoib_workqueue, &priv->cm.reap_task); ipoib_dbg(priv, "Reap connection for gid %pI6\n", tx->neigh->daddr + 4); tx->neigh = NULL; @@ -1417,7 +1417,7 @@ void ipoib_cm_skb_too_long(struct net_device *dev, struct sk_buff *skb, skb_queue_tail(&priv->cm.skb_queue, skb); if (e) - queue_work(priv->wq, &priv->cm.skb_task); + queue_work(ipoib_workqueue, &priv->cm.skb_task); } static void ipoib_cm_rx_reap(struct work_struct *work) @@ -1450,7 +1450,7 @@ static void ipoib_cm_stale_task(struct work_struct *work) } if (!list_empty(&priv->cm.passive_ids)) - queue_delayed_work(priv->wq, + queue_delayed_work(ipoib_workqueue, &priv->cm.stale_task, IPOIB_CM_RX_DELAY); spin_unlock_irq(&priv->lock); } diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c index fe65abb5150c..72626c348174 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c @@ -655,7 +655,7 @@ void ipoib_reap_ah(struct work_struct *work) __ipoib_reap_ah(dev); if (!test_bit(IPOIB_STOP_REAPER, &priv->flags)) - queue_delayed_work(priv->wq, &priv->ah_reap_task, + queue_delayed_work(ipoib_workqueue, &priv->ah_reap_task, round_jiffies_relative(HZ)); } @@ -664,7 +664,7 @@ static void ipoib_ib_tx_timer_func(unsigned long ctx) drain_tx_cq((struct net_device *)ctx); } -int ipoib_ib_dev_open(struct net_device *dev) +int ipoib_ib_dev_open(struct net_device *dev, int flush) { struct ipoib_dev_priv *priv = netdev_priv(dev); int ret; @@ -696,7 +696,7 @@ int ipoib_ib_dev_open(struct net_device *dev) } clear_bit(IPOIB_STOP_REAPER, &priv->flags); - queue_delayed_work(priv->wq, &priv->ah_reap_task, + queue_delayed_work(ipoib_workqueue, &priv->ah_reap_task, round_jiffies_relative(HZ)); if (!test_and_set_bit(IPOIB_FLAG_INITIALIZED, &priv->flags)) @@ -706,7 +706,7 @@ int ipoib_ib_dev_open(struct net_device *dev) dev_stop: if (!test_and_set_bit(IPOIB_FLAG_INITIALIZED, &priv->flags)) napi_enable(&priv->napi); - ipoib_ib_dev_stop(dev); + ipoib_ib_dev_stop(dev, flush); return -1; } @@ -738,7 +738,7 @@ int ipoib_ib_dev_up(struct net_device *dev) return ipoib_mcast_start_thread(dev); } -int ipoib_ib_dev_down(struct net_device *dev) +int ipoib_ib_dev_down(struct net_device *dev, int flush) { struct ipoib_dev_priv *priv = netdev_priv(dev); @@ -747,7 +747,7 @@ int ipoib_ib_dev_down(struct net_device *dev) clear_bit(IPOIB_FLAG_OPER_UP, &priv->flags); netif_carrier_off(dev); - ipoib_mcast_stop_thread(dev); + ipoib_mcast_stop_thread(dev, flush); ipoib_mcast_dev_flush(dev); ipoib_flush_paths(dev); @@ -807,7 +807,7 @@ void ipoib_drain_cq(struct net_device *dev) local_bh_enable(); } -int ipoib_ib_dev_stop(struct net_device *dev) +int ipoib_ib_dev_stop(struct net_device *dev, int flush) { struct ipoib_dev_priv *priv = netdev_priv(dev); struct ib_qp_attr qp_attr; @@ -880,7 +880,8 @@ timeout: /* Wait for all AHs to be reaped */ set_bit(IPOIB_STOP_REAPER, &priv->flags); cancel_delayed_work(&priv->ah_reap_task); - flush_workqueue(priv->wq); + if (flush) + flush_workqueue(ipoib_workqueue); begin = jiffies; @@ -917,7 +918,7 @@ int ipoib_ib_dev_init(struct net_device *dev, struct ib_device *ca, int port) (unsigned long) dev); if (dev->flags & IFF_UP) { - if (ipoib_ib_dev_open(dev)) { + if (ipoib_ib_dev_open(dev, 1)) { ipoib_transport_dev_cleanup(dev); return -ENODEV; } @@ -1039,12 +1040,12 @@ static void __ipoib_ib_dev_flush(struct ipoib_dev_priv *priv, } if (level >= IPOIB_FLUSH_NORMAL) - ipoib_ib_dev_down(dev); + ipoib_ib_dev_down(dev, 0); if (level == IPOIB_FLUSH_HEAVY) { if (test_bit(IPOIB_FLAG_INITIALIZED, &priv->flags)) - ipoib_ib_dev_stop(dev); - if (ipoib_ib_dev_open(dev) != 0) + ipoib_ib_dev_stop(dev, 0); + if (ipoib_ib_dev_open(dev, 0) != 0) return; if (netif_queue_stopped(dev)) netif_start_queue(dev); @@ -1096,7 +1097,7 @@ void ipoib_ib_dev_cleanup(struct net_device *dev) */ ipoib_flush_paths(dev); - ipoib_mcast_stop_thread(dev); + ipoib_mcast_stop_thread(dev, 1); ipoib_mcast_dev_flush(dev); ipoib_transport_dev_cleanup(dev); diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c index 6bad17d4d588..58b5aa3b6f2d 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_main.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c @@ -108,7 +108,7 @@ int ipoib_open(struct net_device *dev) set_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags); - if (ipoib_ib_dev_open(dev)) { + if (ipoib_ib_dev_open(dev, 1)) { if (!test_bit(IPOIB_PKEY_ASSIGNED, &priv->flags)) return 0; goto err_disable; @@ -139,7 +139,7 @@ int ipoib_open(struct net_device *dev) return 0; err_stop: - ipoib_ib_dev_stop(dev); + ipoib_ib_dev_stop(dev, 1); err_disable: clear_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags); @@ -157,8 +157,8 @@ static int ipoib_stop(struct net_device *dev) netif_stop_queue(dev); - ipoib_ib_dev_down(dev); - ipoib_ib_dev_stop(dev); + ipoib_ib_dev_down(dev, 1); + ipoib_ib_dev_stop(dev, 0); if (!test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags)) { struct ipoib_dev_priv *cpriv; @@ -839,7 +839,7 @@ static void ipoib_set_mcast_list(struct net_device *dev) return; } - queue_work(priv->wq, &priv->restart_task); + queue_work(ipoib_workqueue, &priv->restart_task); } static u32 ipoib_addr_hash(struct ipoib_neigh_hash *htbl, u8 *daddr) @@ -954,7 +954,7 @@ static void ipoib_reap_neigh(struct work_struct *work) __ipoib_reap_neigh(priv); if (!test_bit(IPOIB_STOP_NEIGH_GC, &priv->flags)) - queue_delayed_work(priv->wq, &priv->neigh_reap_task, + queue_delayed_work(ipoib_workqueue, &priv->neigh_reap_task, arp_tbl.gc_interval); } @@ -1133,7 +1133,7 @@ static int ipoib_neigh_hash_init(struct ipoib_dev_priv *priv) /* start garbage collection */ clear_bit(IPOIB_STOP_NEIGH_GC, &priv->flags); - queue_delayed_work(priv->wq, &priv->neigh_reap_task, + queue_delayed_work(ipoib_workqueue, &priv->neigh_reap_task, arp_tbl.gc_interval); return 0; @@ -1262,13 +1262,15 @@ int ipoib_dev_init(struct net_device *dev, struct ib_device *ca, int port) { struct ipoib_dev_priv *priv = netdev_priv(dev); + if (ipoib_neigh_hash_init(priv) < 0) + goto out; /* Allocate RX/TX "rings" to hold queued skbs */ priv->rx_ring = kzalloc(ipoib_recvq_size * sizeof *priv->rx_ring, GFP_KERNEL); if (!priv->rx_ring) { printk(KERN_WARNING "%s: failed to allocate RX ring (%d entries)\n", ca->name, ipoib_recvq_size); - goto out; + goto out_neigh_hash_cleanup; } priv->tx_ring = vzalloc(ipoib_sendq_size * sizeof *priv->tx_ring); @@ -1283,24 +1285,16 @@ int ipoib_dev_init(struct net_device *dev, struct ib_device *ca, int port) if (ipoib_ib_dev_init(dev, ca, port)) goto out_tx_ring_cleanup; - /* - * Must be after ipoib_ib_dev_init so we can allocate a per - * device wq there and use it here - */ - if (ipoib_neigh_hash_init(priv) < 0) - goto out_dev_uninit; - return 0; -out_dev_uninit: - ipoib_ib_dev_cleanup(dev); - out_tx_ring_cleanup: vfree(priv->tx_ring); out_rx_ring_cleanup: kfree(priv->rx_ring); +out_neigh_hash_cleanup: + ipoib_neigh_hash_uninit(dev); out: return -ENOMEM; } @@ -1323,12 +1317,6 @@ void ipoib_dev_cleanup(struct net_device *dev) } unregister_netdevice_many(&head); - /* - * Must be before ipoib_ib_dev_cleanup or we delete an in use - * work queue - */ - ipoib_neigh_hash_uninit(dev); - ipoib_ib_dev_cleanup(dev); kfree(priv->rx_ring); @@ -1336,6 +1324,8 @@ void ipoib_dev_cleanup(struct net_device *dev) priv->rx_ring = NULL; priv->tx_ring = NULL; + + ipoib_neigh_hash_uninit(dev); } static const struct header_ops ipoib_header_ops = { @@ -1646,7 +1636,7 @@ register_failed: /* Stop GC if started before flush */ set_bit(IPOIB_STOP_NEIGH_GC, &priv->flags); cancel_delayed_work(&priv->neigh_reap_task); - flush_workqueue(priv->wq); + flush_workqueue(ipoib_workqueue); event_failed: ipoib_dev_cleanup(priv->dev); @@ -1717,7 +1707,7 @@ static void ipoib_remove_one(struct ib_device *device) /* Stop GC */ set_bit(IPOIB_STOP_NEIGH_GC, &priv->flags); cancel_delayed_work(&priv->neigh_reap_task); - flush_workqueue(priv->wq); + flush_workqueue(ipoib_workqueue); unregister_netdev(priv->dev); free_netdev(priv->dev); @@ -1758,13 +1748,8 @@ static int __init ipoib_init_module(void) * unregister_netdev() and linkwatch_event take the rtnl lock, * so flush_scheduled_work() can deadlock during device * removal. - * - * In addition, bringing one device up and another down at the - * same time can deadlock a single workqueue, so we have this - * global fallback workqueue, but we also attempt to open a - * per device workqueue each time we bring an interface up */ - ipoib_workqueue = create_singlethread_workqueue("ipoib_flush"); + ipoib_workqueue = create_singlethread_workqueue("ipoib"); if (!ipoib_workqueue) { ret = -ENOMEM; goto err_fs; diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c index bc50dd0d0e4d..ffb83b5f7e80 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c @@ -190,6 +190,12 @@ static int ipoib_mcast_join_finish(struct ipoib_mcast *mcast, spin_unlock_irq(&priv->lock); priv->tx_wr.wr.ud.remote_qkey = priv->qkey; set_qkey = 1; + + if (!ipoib_cm_admin_enabled(dev)) { + rtnl_lock(); + dev_set_mtu(dev, min(priv->mcast_mtu, priv->admin_mtu)); + rtnl_unlock(); + } } if (!test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags)) { @@ -271,27 +277,16 @@ ipoib_mcast_sendonly_join_complete(int status, struct ipoib_mcast *mcast = multicast->context; struct net_device *dev = mcast->dev; - /* - * We have to take the mutex to force mcast_sendonly_join to - * return from ib_sa_multicast_join and set mcast->mc to a - * valid value. Otherwise we were racing with ourselves in - * that we might fail here, but get a valid return from - * ib_sa_multicast_join after we had cleared mcast->mc here, - * resulting in mis-matched joins and leaves and a deadlock - */ - mutex_lock(&mcast_mutex); - /* We trap for port events ourselves. */ if (status == -ENETRESET) - goto out; + return 0; if (!status) status = ipoib_mcast_join_finish(mcast, &multicast->rec); if (status) { if (mcast->logcount++ < 20) - ipoib_dbg_mcast(netdev_priv(dev), "sendonly multicast " - "join failed for %pI6, status %d\n", + ipoib_dbg_mcast(netdev_priv(dev), "multicast join failed for %pI6, status %d\n", mcast->mcmember.mgid.raw, status); /* Flush out any queued packets */ @@ -301,15 +296,11 @@ ipoib_mcast_sendonly_join_complete(int status, dev_kfree_skb_any(skb_dequeue(&mcast->pkt_queue)); } netif_tx_unlock_bh(dev); + + /* Clear the busy flag so we try again */ + status = test_and_clear_bit(IPOIB_MCAST_FLAG_BUSY, + &mcast->flags); } -out: - clear_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags); - if (status) - mcast->mc = NULL; - complete(&mcast->done); - if (status == -ENETRESET) - status = 0; - mutex_unlock(&mcast_mutex); return status; } @@ -327,14 +318,12 @@ static int ipoib_mcast_sendonly_join(struct ipoib_mcast *mcast) int ret = 0; if (!test_bit(IPOIB_FLAG_OPER_UP, &priv->flags)) { - ipoib_dbg_mcast(priv, "device shutting down, no sendonly " - "multicast joins\n"); + ipoib_dbg_mcast(priv, "device shutting down, no multicast joins\n"); return -ENODEV; } - if (test_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags)) { - ipoib_dbg_mcast(priv, "multicast entry busy, skipping " - "sendonly join\n"); + if (test_and_set_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags)) { + ipoib_dbg_mcast(priv, "multicast entry busy, skipping\n"); return -EBUSY; } @@ -342,9 +331,6 @@ static int ipoib_mcast_sendonly_join(struct ipoib_mcast *mcast) rec.port_gid = priv->local_gid; rec.pkey = cpu_to_be16(priv->pkey); - mutex_lock(&mcast_mutex); - init_completion(&mcast->done); - set_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags); mcast->mc = ib_sa_join_multicast(&ipoib_sa_client, priv->ca, priv->port, &rec, IB_SA_MCMEMBER_REC_MGID | @@ -357,14 +343,12 @@ static int ipoib_mcast_sendonly_join(struct ipoib_mcast *mcast) if (IS_ERR(mcast->mc)) { ret = PTR_ERR(mcast->mc); clear_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags); - complete(&mcast->done); - ipoib_warn(priv, "ib_sa_join_multicast for sendonly join " - "failed (ret = %d)\n", ret); + ipoib_warn(priv, "ib_sa_join_multicast failed (ret = %d)\n", + ret); } else { - ipoib_dbg_mcast(priv, "no multicast record for %pI6, starting " - "sendonly join\n", mcast->mcmember.mgid.raw); + ipoib_dbg_mcast(priv, "no multicast record for %pI6, starting join\n", + mcast->mcmember.mgid.raw); } - mutex_unlock(&mcast_mutex); return ret; } @@ -375,29 +359,18 @@ void ipoib_mcast_carrier_on_task(struct work_struct *work) carrier_on_task); struct ib_port_attr attr; + /* + * Take rtnl_lock to avoid racing with ipoib_stop() and + * turning the carrier back on while a device is being + * removed. + */ if (ib_query_port(priv->ca, priv->port, &attr) || attr.state != IB_PORT_ACTIVE) { ipoib_dbg(priv, "Keeping carrier off until IB port is active\n"); return; } - /* - * Take rtnl_lock to avoid racing with ipoib_stop() and - * turning the carrier back on while a device is being - * removed. However, ipoib_stop() will attempt to flush - * the workqueue while holding the rtnl lock, so loop - * on trylock until either we get the lock or we see - * FLAG_ADMIN_UP go away as that signals that we are bailing - * and can safely ignore the carrier on work. - */ - while (!rtnl_trylock()) { - if (!test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags)) - return; - else - msleep(20); - } - if (!ipoib_cm_admin_enabled(priv->dev)) - dev_set_mtu(priv->dev, min(priv->mcast_mtu, priv->admin_mtu)); + rtnl_lock(); netif_carrier_on(priv->dev); rtnl_unlock(); } @@ -412,63 +385,60 @@ static int ipoib_mcast_join_complete(int status, ipoib_dbg_mcast(priv, "join completion for %pI6 (status %d)\n", mcast->mcmember.mgid.raw, status); - /* - * We have to take the mutex to force mcast_join to - * return from ib_sa_multicast_join and set mcast->mc to a - * valid value. Otherwise we were racing with ourselves in - * that we might fail here, but get a valid return from - * ib_sa_multicast_join after we had cleared mcast->mc here, - * resulting in mis-matched joins and leaves and a deadlock - */ - mutex_lock(&mcast_mutex); - /* We trap for port events ourselves. */ - if (status == -ENETRESET) + if (status == -ENETRESET) { + status = 0; goto out; + } if (!status) status = ipoib_mcast_join_finish(mcast, &multicast->rec); if (!status) { mcast->backoff = 1; + mutex_lock(&mcast_mutex); if (test_bit(IPOIB_MCAST_RUN, &priv->flags)) - queue_delayed_work(priv->wq, &priv->mcast_task, 0); + queue_delayed_work(ipoib_workqueue, + &priv->mcast_task, 0); + mutex_unlock(&mcast_mutex); /* - * Defer carrier on work to priv->wq to avoid a + * Defer carrier on work to ipoib_workqueue to avoid a * deadlock on rtnl_lock here. */ if (mcast == priv->broadcast) - queue_work(priv->wq, &priv->carrier_on_task); - } else { - if (mcast->logcount++ < 20) { - if (status == -ETIMEDOUT || status == -EAGAIN) { - ipoib_dbg_mcast(priv, "multicast join failed for %pI6, status %d\n", - mcast->mcmember.mgid.raw, status); - } else { - ipoib_warn(priv, "multicast join failed for %pI6, status %d\n", - mcast->mcmember.mgid.raw, status); - } - } + queue_work(ipoib_workqueue, &priv->carrier_on_task); - mcast->backoff *= 2; - if (mcast->backoff > IPOIB_MAX_BACKOFF_SECONDS) - mcast->backoff = IPOIB_MAX_BACKOFF_SECONDS; + status = 0; + goto out; } -out: + + if (mcast->logcount++ < 20) { + if (status == -ETIMEDOUT || status == -EAGAIN) { + ipoib_dbg_mcast(priv, "multicast join failed for %pI6, status %d\n", + mcast->mcmember.mgid.raw, status); + } else { + ipoib_warn(priv, "multicast join failed for %pI6, status %d\n", + mcast->mcmember.mgid.raw, status); + } + } + + mcast->backoff *= 2; + if (mcast->backoff > IPOIB_MAX_BACKOFF_SECONDS) + mcast->backoff = IPOIB_MAX_BACKOFF_SECONDS; + + /* Clear the busy flag so we try again */ + status = test_and_clear_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags); + + mutex_lock(&mcast_mutex); spin_lock_irq(&priv->lock); - clear_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags); - if (status) - mcast->mc = NULL; - complete(&mcast->done); - if (status == -ENETRESET) - status = 0; - if (status && test_bit(IPOIB_MCAST_RUN, &priv->flags)) - queue_delayed_work(priv->wq, &priv->mcast_task, + if (test_bit(IPOIB_MCAST_RUN, &priv->flags)) + queue_delayed_work(ipoib_workqueue, &priv->mcast_task, mcast->backoff * HZ); spin_unlock_irq(&priv->lock); mutex_unlock(&mcast_mutex); - +out: + complete(&mcast->done); return status; } @@ -517,9 +487,10 @@ static void ipoib_mcast_join(struct net_device *dev, struct ipoib_mcast *mcast, rec.hop_limit = priv->broadcast->mcmember.hop_limit; } - mutex_lock(&mcast_mutex); - init_completion(&mcast->done); set_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags); + init_completion(&mcast->done); + set_bit(IPOIB_MCAST_JOIN_STARTED, &mcast->flags); + mcast->mc = ib_sa_join_multicast(&ipoib_sa_client, priv->ca, priv->port, &rec, comp_mask, GFP_KERNEL, ipoib_mcast_join_complete, mcast); @@ -533,11 +504,13 @@ static void ipoib_mcast_join(struct net_device *dev, struct ipoib_mcast *mcast, if (mcast->backoff > IPOIB_MAX_BACKOFF_SECONDS) mcast->backoff = IPOIB_MAX_BACKOFF_SECONDS; + mutex_lock(&mcast_mutex); if (test_bit(IPOIB_MCAST_RUN, &priv->flags)) - queue_delayed_work(priv->wq, &priv->mcast_task, + queue_delayed_work(ipoib_workqueue, + &priv->mcast_task, mcast->backoff * HZ); + mutex_unlock(&mcast_mutex); } - mutex_unlock(&mcast_mutex); } void ipoib_mcast_join_task(struct work_struct *work) @@ -574,8 +547,8 @@ void ipoib_mcast_join_task(struct work_struct *work) ipoib_warn(priv, "failed to allocate broadcast group\n"); mutex_lock(&mcast_mutex); if (test_bit(IPOIB_MCAST_RUN, &priv->flags)) - queue_delayed_work(priv->wq, &priv->mcast_task, - HZ); + queue_delayed_work(ipoib_workqueue, + &priv->mcast_task, HZ); mutex_unlock(&mcast_mutex); return; } @@ -590,8 +563,7 @@ void ipoib_mcast_join_task(struct work_struct *work) } if (!test_bit(IPOIB_MCAST_FLAG_ATTACHED, &priv->broadcast->flags)) { - if (IS_ERR_OR_NULL(priv->broadcast->mc) && - !test_bit(IPOIB_MCAST_FLAG_BUSY, &priv->broadcast->flags)) + if (!test_bit(IPOIB_MCAST_FLAG_BUSY, &priv->broadcast->flags)) ipoib_mcast_join(dev, priv->broadcast, 0); return; } @@ -599,33 +571,23 @@ void ipoib_mcast_join_task(struct work_struct *work) while (1) { struct ipoib_mcast *mcast = NULL; - /* - * Need the mutex so our flags are consistent, need the - * priv->lock so we don't race with list removals in either - * mcast_dev_flush or mcast_restart_task - */ - mutex_lock(&mcast_mutex); spin_lock_irq(&priv->lock); list_for_each_entry(mcast, &priv->multicast_list, list) { - if (IS_ERR_OR_NULL(mcast->mc) && - !test_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags) && - !test_bit(IPOIB_MCAST_FLAG_ATTACHED, &mcast->flags)) { + if (!test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags) + && !test_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags) + && !test_bit(IPOIB_MCAST_FLAG_ATTACHED, &mcast->flags)) { /* Found the next unjoined group */ break; } } spin_unlock_irq(&priv->lock); - mutex_unlock(&mcast_mutex); if (&mcast->list == &priv->multicast_list) { /* All done */ break; } - if (test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags)) - ipoib_mcast_sendonly_join(mcast); - else - ipoib_mcast_join(dev, mcast, 1); + ipoib_mcast_join(dev, mcast, 1); return; } @@ -642,13 +604,13 @@ int ipoib_mcast_start_thread(struct net_device *dev) mutex_lock(&mcast_mutex); if (!test_and_set_bit(IPOIB_MCAST_RUN, &priv->flags)) - queue_delayed_work(priv->wq, &priv->mcast_task, 0); + queue_delayed_work(ipoib_workqueue, &priv->mcast_task, 0); mutex_unlock(&mcast_mutex); return 0; } -int ipoib_mcast_stop_thread(struct net_device *dev) +int ipoib_mcast_stop_thread(struct net_device *dev, int flush) { struct ipoib_dev_priv *priv = netdev_priv(dev); @@ -659,7 +621,8 @@ int ipoib_mcast_stop_thread(struct net_device *dev) cancel_delayed_work(&priv->mcast_task); mutex_unlock(&mcast_mutex); - flush_workqueue(priv->wq); + if (flush) + flush_workqueue(ipoib_workqueue); return 0; } @@ -670,9 +633,6 @@ static int ipoib_mcast_leave(struct net_device *dev, struct ipoib_mcast *mcast) int ret = 0; if (test_and_clear_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags)) - ipoib_warn(priv, "ipoib_mcast_leave on an in-flight join\n"); - - if (!IS_ERR_OR_NULL(mcast->mc)) ib_sa_free_multicast(mcast->mc); if (test_and_clear_bit(IPOIB_MCAST_FLAG_ATTACHED, &mcast->flags)) { @@ -725,8 +685,6 @@ void ipoib_mcast_send(struct net_device *dev, u8 *daddr, struct sk_buff *skb) memcpy(mcast->mcmember.mgid.raw, mgid, sizeof (union ib_gid)); __ipoib_mcast_add(dev, mcast); list_add_tail(&mcast->list, &priv->multicast_list); - if (!test_and_set_bit(IPOIB_MCAST_RUN, &priv->flags)) - queue_delayed_work(priv->wq, &priv->mcast_task, 0); } if (!mcast->ah) { @@ -740,6 +698,8 @@ void ipoib_mcast_send(struct net_device *dev, u8 *daddr, struct sk_buff *skb) if (test_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags)) ipoib_dbg_mcast(priv, "no address vector, " "but multicast join already started\n"); + else if (test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags)) + ipoib_mcast_sendonly_join(mcast); /* * If lookup completes between here and out:, don't @@ -799,12 +759,9 @@ void ipoib_mcast_dev_flush(struct net_device *dev) spin_unlock_irqrestore(&priv->lock, flags); - /* - * make sure the in-flight joins have finished before we attempt - * to leave - */ + /* seperate between the wait to the leave*/ list_for_each_entry_safe(mcast, tmcast, &remove_list, list) - if (test_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags)) + if (test_bit(IPOIB_MCAST_JOIN_STARTED, &mcast->flags)) wait_for_completion(&mcast->done); list_for_each_entry_safe(mcast, tmcast, &remove_list, list) { @@ -837,6 +794,8 @@ void ipoib_mcast_restart_task(struct work_struct *work) ipoib_dbg_mcast(priv, "restarting multicast task\n"); + ipoib_mcast_stop_thread(dev, 0); + local_irq_save(flags); netif_addr_lock(dev); spin_lock(&priv->lock); @@ -921,38 +880,14 @@ void ipoib_mcast_restart_task(struct work_struct *work) netif_addr_unlock(dev); local_irq_restore(flags); - /* - * make sure the in-flight joins have finished before we attempt - * to leave - */ - list_for_each_entry_safe(mcast, tmcast, &remove_list, list) - if (test_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags)) - wait_for_completion(&mcast->done); - - /* - * We have to cancel outside of the spinlock, but we have to - * take the rtnl lock or else we race with the removal of - * entries from the remove list in mcast_dev_flush as part - * of ipoib_stop(). We detect the drop of the ADMIN_UP flag - * to signal that we have hit this particular race, and we - * return since we know we don't need to do anything else - * anyway. - */ - while (!rtnl_trylock()) { - if (!test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags)) - return; - else - msleep(20); - } + /* We have to cancel outside of the spinlock */ list_for_each_entry_safe(mcast, tmcast, &remove_list, list) { ipoib_mcast_leave(mcast->dev, mcast); ipoib_mcast_free(mcast); } - /* - * Restart our join task if needed - */ - ipoib_mcast_start_thread(dev); - rtnl_unlock(); + + if (test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags)) + ipoib_mcast_start_thread(dev); } #ifdef CONFIG_INFINIBAND_IPOIB_DEBUG diff --git a/drivers/infiniband/ulp/ipoib/ipoib_verbs.c b/drivers/infiniband/ulp/ipoib/ipoib_verbs.c index b72a753eb41d..c56d5d44c53b 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_verbs.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_verbs.c @@ -145,20 +145,10 @@ int ipoib_transport_dev_init(struct net_device *dev, struct ib_device *ca) int ret, size; int i; - /* - * the various IPoIB tasks assume they will never race against - * themselves, so always use a single thread workqueue - */ - priv->wq = create_singlethread_workqueue("ipoib_wq"); - if (!priv->wq) { - printk(KERN_WARNING "ipoib: failed to allocate device WQ\n"); - return -ENODEV; - } - priv->pd = ib_alloc_pd(priv->ca); if (IS_ERR(priv->pd)) { printk(KERN_WARNING "%s: failed to allocate PD\n", ca->name); - goto out_free_wq; + return -ENODEV; } priv->mr = ib_get_dma_mr(priv->pd, IB_ACCESS_LOCAL_WRITE); @@ -252,10 +242,6 @@ out_free_mr: out_free_pd: ib_dealloc_pd(priv->pd); - -out_free_wq: - destroy_workqueue(priv->wq); - priv->wq = NULL; return -ENODEV; } @@ -284,12 +270,6 @@ void ipoib_transport_dev_cleanup(struct net_device *dev) if (ib_dealloc_pd(priv->pd)) ipoib_warn(priv, "ib_dealloc_pd failed\n"); - - if (priv->wq) { - flush_workqueue(priv->wq); - destroy_workqueue(priv->wq); - priv->wq = NULL; - } } void ipoib_event(struct ib_event_handler *handler, diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c index 77ecf6d32237..6e22682c8255 100644 --- a/drivers/input/mouse/elantech.c +++ b/drivers/input/mouse/elantech.c @@ -1097,6 +1097,8 @@ static int elantech_get_resolution_v4(struct psmouse *psmouse, * Asus UX31 0x361f00 20, 15, 0e clickpad * Asus UX32VD 0x361f02 00, 15, 0e clickpad * Avatar AVIU-145A2 0x361f00 ? clickpad + * Fujitsu LIFEBOOK E544 0x470f00 d0, 12, 09 2 hw buttons + * Fujitsu LIFEBOOK E554 0x570f01 40, 14, 0c 2 hw buttons * Fujitsu H730 0x570f00 c0, 14, 0c 3 hw buttons (**) * Gigabyte U2442 0x450f01 58, 17, 0c 2 hw buttons * Lenovo L430 0x350f02 b9, 15, 0c 2 hw buttons (*) @@ -1475,6 +1477,20 @@ static const struct dmi_system_id elantech_dmi_force_crc_enabled[] = { DMI_MATCH(DMI_PRODUCT_NAME, "CELSIUS H730"), }, }, + { + /* Fujitsu LIFEBOOK E554 does not work with crc_enabled == 0 */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"), + DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK E554"), + }, + }, + { + /* Fujitsu LIFEBOOK E544 does not work with crc_enabled == 0 */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"), + DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK E544"), + }, + }, #endif { } }; diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c index f9472920d986..23e26e0768b5 100644 --- a/drivers/input/mouse/synaptics.c +++ b/drivers/input/mouse/synaptics.c @@ -135,8 +135,9 @@ static const struct min_max_quirk min_max_pnpid_table[] = { 1232, 5710, 1156, 4696 }, { - (const char * const []){"LEN0034", "LEN0036", "LEN0039", - "LEN2002", "LEN2004", NULL}, + (const char * const []){"LEN0034", "LEN0036", "LEN0037", + "LEN0039", "LEN2002", "LEN2004", + NULL}, 1024, 5112, 2024, 4832 }, { @@ -165,7 +166,7 @@ static const char * const topbuttonpad_pnp_ids[] = { "LEN0034", /* T431s, L440, L540, T540, W540, X1 Carbon 2nd */ "LEN0035", /* X240 */ "LEN0036", /* T440 */ - "LEN0037", + "LEN0037", /* X1 Carbon 2nd */ "LEN0038", "LEN0039", /* T440s */ "LEN0041", diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h index 764857b4e268..c11556563ef0 100644 --- a/drivers/input/serio/i8042-x86ia64io.h +++ b/drivers/input/serio/i8042-x86ia64io.h @@ -152,6 +152,14 @@ static const struct dmi_system_id __initconst i8042_dmi_noloop_table[] = { }, }, { + /* Medion Akoya E7225 */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Medion"), + DMI_MATCH(DMI_PRODUCT_NAME, "Akoya E7225"), + DMI_MATCH(DMI_PRODUCT_VERSION, "1.0"), + }, + }, + { /* Blue FB5601 */ .matches = { DMI_MATCH(DMI_SYS_VENDOR, "blue"), diff --git a/drivers/iommu/tegra-gart.c b/drivers/iommu/tegra-gart.c index f722a0c466cf..c48da057dbb1 100644 --- a/drivers/iommu/tegra-gart.c +++ b/drivers/iommu/tegra-gart.c @@ -315,6 +315,7 @@ static const struct iommu_ops gart_iommu_ops = { .attach_dev = gart_iommu_attach_dev, .detach_dev = gart_iommu_detach_dev, .map = gart_iommu_map, + .map_sg = default_iommu_map_sg, .unmap = gart_iommu_unmap, .iova_to_phys = gart_iommu_iova_to_phys, .pgsize_bitmap = GART_IOMMU_PGSIZES, @@ -395,7 +396,7 @@ static int tegra_gart_probe(struct platform_device *pdev) do_gart_setup(gart, NULL); gart_handle = gart; - bus_set_iommu(&platform_bus_type, &gart_iommu_ops); + return 0; } diff --git a/drivers/irqchip/irq-mips-gic.c b/drivers/irqchip/irq-mips-gic.c index 2b0468e3df6a..56b96c63dc4b 100644 --- a/drivers/irqchip/irq-mips-gic.c +++ b/drivers/irqchip/irq-mips-gic.c @@ -37,6 +37,7 @@ static struct irq_domain *gic_irq_domain; static int gic_shared_intrs; static int gic_vpes; static unsigned int gic_cpu_pin; +static unsigned int timer_cpu_pin; static struct irq_chip gic_level_irq_controller, gic_edge_irq_controller; static void __gic_irq_dispatch(void); @@ -616,6 +617,8 @@ static int gic_local_irq_domain_map(struct irq_domain *d, unsigned int virq, gic_write(GIC_REG(VPE_OTHER, GIC_VPE_COMPARE_MAP), val); break; case GIC_LOCAL_INT_TIMER: + /* CONFIG_MIPS_CMP workaround (see __gic_init) */ + val = GIC_MAP_TO_PIN_MSK | timer_cpu_pin; gic_write(GIC_REG(VPE_OTHER, GIC_VPE_TIMER_MAP), val); break; case GIC_LOCAL_INT_PERFCTR: @@ -713,12 +716,36 @@ static void __init __gic_init(unsigned long gic_base_addr, if (cpu_has_veic) { /* Always use vector 1 in EIC mode */ gic_cpu_pin = 0; + timer_cpu_pin = gic_cpu_pin; set_vi_handler(gic_cpu_pin + GIC_PIN_TO_VEC_OFFSET, __gic_irq_dispatch); } else { gic_cpu_pin = cpu_vec - GIC_CPU_PIN_OFFSET; irq_set_chained_handler(MIPS_CPU_IRQ_BASE + cpu_vec, gic_irq_dispatch); + /* + * With the CMP implementation of SMP (deprecated), other CPUs + * are started by the bootloader and put into a timer based + * waiting poll loop. We must not re-route those CPU's local + * timer interrupts as the wait instruction will never finish, + * so just handle whatever CPU interrupt it is routed to by + * default. + * + * This workaround should be removed when CMP support is + * dropped. + */ + if (IS_ENABLED(CONFIG_MIPS_CMP) && + gic_local_irq_is_routable(GIC_LOCAL_INT_TIMER)) { + timer_cpu_pin = gic_read(GIC_REG(VPE_LOCAL, + GIC_VPE_TIMER_MAP)) & + GIC_MAP_MSK; + irq_set_chained_handler(MIPS_CPU_IRQ_BASE + + GIC_CPU_PIN_OFFSET + + timer_cpu_pin, + gic_irq_dispatch); + } else { + timer_cpu_pin = gic_cpu_pin; + } } gic_irq_domain = irq_domain_add_simple(node, GIC_NUM_LOCAL_INTRS + diff --git a/drivers/isdn/hardware/eicon/message.c b/drivers/isdn/hardware/eicon/message.c index 0b380603a578..d7c286656a25 100644 --- a/drivers/isdn/hardware/eicon/message.c +++ b/drivers/isdn/hardware/eicon/message.c @@ -1474,7 +1474,7 @@ static byte connect_res(dword Id, word Number, DIVA_CAPI_ADAPTER *a, add_ai(plci, &parms[5]); sig_req(plci, REJECT, 0); } - else if (Reject == 1 || Reject > 9) + else if (Reject == 1 || Reject >= 9) { add_ai(plci, &parms[5]); sig_req(plci, HANGUP, 0); diff --git a/drivers/md/Kconfig b/drivers/md/Kconfig index 5bdedf6df153..c355a226a024 100644 --- a/drivers/md/Kconfig +++ b/drivers/md/Kconfig @@ -5,6 +5,7 @@ menuconfig MD bool "Multiple devices driver support (RAID and LVM)" depends on BLOCK + select SRCU help Support multiple physical spindles through a single logical device. Required for RAID and logical volume management. diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c index da3604e73e8a..1695ee5f3ffc 100644 --- a/drivers/md/bitmap.c +++ b/drivers/md/bitmap.c @@ -72,6 +72,19 @@ __acquires(bitmap->lock) /* this page has not been allocated yet */ spin_unlock_irq(&bitmap->lock); + /* It is possible that this is being called inside a + * prepare_to_wait/finish_wait loop from raid5c:make_request(). + * In general it is not permitted to sleep in that context as it + * can cause the loop to spin freely. + * That doesn't apply here as we can only reach this point + * once with any loop. + * When this function completes, either bp[page].map or + * bp[page].hijacked. In either case, this function will + * abort before getting to this point again. So there is + * no risk of a free-spin, and so it is safe to assert + * that sleeping here is allowed. + */ + sched_annotate_sleep(); mappage = kzalloc(PAGE_SIZE, GFP_NOIO); spin_lock_irq(&bitmap->lock); diff --git a/drivers/md/dm-cache-metadata.c b/drivers/md/dm-cache-metadata.c index 21b156242e42..c1c010498a21 100644 --- a/drivers/md/dm-cache-metadata.c +++ b/drivers/md/dm-cache-metadata.c @@ -683,7 +683,7 @@ static struct dm_cache_metadata *metadata_open(struct block_device *bdev, cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); if (!cmd) { DMERR("could not allocate metadata struct"); - return NULL; + return ERR_PTR(-ENOMEM); } atomic_set(&cmd->ref_count, 1); @@ -745,7 +745,7 @@ static struct dm_cache_metadata *lookup_or_open(struct block_device *bdev, return cmd; cmd = metadata_open(bdev, data_block_size, may_format_device, policy_hint_size); - if (cmd) { + if (!IS_ERR(cmd)) { mutex_lock(&table_lock); cmd2 = lookup(bdev); if (cmd2) { @@ -780,9 +780,10 @@ struct dm_cache_metadata *dm_cache_metadata_open(struct block_device *bdev, { struct dm_cache_metadata *cmd = lookup_or_open(bdev, data_block_size, may_format_device, policy_hint_size); - if (cmd && !same_params(cmd, data_block_size)) { + + if (!IS_ERR(cmd) && !same_params(cmd, data_block_size)) { dm_cache_metadata_close(cmd); - return NULL; + return ERR_PTR(-EINVAL); } return cmd; diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c index 493478989dbd..07705ee181e3 100644 --- a/drivers/md/dm-thin.c +++ b/drivers/md/dm-thin.c @@ -3385,6 +3385,12 @@ static int pool_message(struct dm_target *ti, unsigned argc, char **argv) struct pool_c *pt = ti->private; struct pool *pool = pt->pool; + if (get_pool_mode(pool) >= PM_READ_ONLY) { + DMERR("%s: unable to service pool target messages in READ_ONLY or FAIL mode", + dm_device_name(pool->pool_md)); + return -EINVAL; + } + if (!strcasecmp(argv[0], "create_thin")) r = process_create_thin_mesg(argc, argv, pool); diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index c1b0d52bfcb0..b98765f6f77f 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -3195,6 +3195,11 @@ static void handle_stripe_dirtying(struct r5conf *conf, (unsigned long long)sh->sector, rcw, qread, test_bit(STRIPE_DELAYED, &sh->state)); } + + if (rcw > disks && rmw > disks && + !test_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) + set_bit(STRIPE_DELAYED, &sh->state); + /* now if nothing is locked, and if we have enough data, * we can start a write request */ diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig index d6607ee9c855..84673ebcf428 100644 --- a/drivers/net/Kconfig +++ b/drivers/net/Kconfig @@ -197,6 +197,7 @@ config NETCONSOLE_DYNAMIC config NETPOLL def_bool NETCONSOLE + select SRCU config NET_POLL_CONTROLLER def_bool NETPOLL diff --git a/drivers/net/caif/caif_hsi.c b/drivers/net/caif/caif_hsi.c index 5e40a8b68cbe..b3b922adc0e4 100644 --- a/drivers/net/caif/caif_hsi.c +++ b/drivers/net/caif/caif_hsi.c @@ -1415,7 +1415,6 @@ static int caif_hsi_newlink(struct net *src_net, struct net_device *dev, cfhsi = netdev_priv(dev); cfhsi_netlink_parms(data, cfhsi); - dev_net_set(cfhsi->ndev, src_net); get_ops = symbol_get(cfhsi_get_ops); if (!get_ops) { diff --git a/drivers/net/can/c_can/c_can.c b/drivers/net/can/c_can/c_can.c index f94a9fa60488..c672c4dcffac 100644 --- a/drivers/net/can/c_can/c_can.c +++ b/drivers/net/can/c_can/c_can.c @@ -615,6 +615,9 @@ static void c_can_stop(struct net_device *dev) c_can_irq_control(priv, false); + /* put ctrl to init on stop to end ongoing transmission */ + priv->write_reg(priv, C_CAN_CTRL_REG, CONTROL_INIT); + /* deactivate pins */ pinctrl_pm_select_sleep_state(dev->dev.parent); priv->can.state = CAN_STATE_STOPPED; diff --git a/drivers/net/can/usb/kvaser_usb.c b/drivers/net/can/usb/kvaser_usb.c index c32cd61073bc..7af379ca861b 100644 --- a/drivers/net/can/usb/kvaser_usb.c +++ b/drivers/net/can/usb/kvaser_usb.c @@ -587,7 +587,7 @@ static int kvaser_usb_simple_msg_async(struct kvaser_usb_net_priv *priv, usb_sndbulkpipe(dev->udev, dev->bulk_out->bEndpointAddress), buf, msg->len, - kvaser_usb_simple_msg_callback, priv); + kvaser_usb_simple_msg_callback, netdev); usb_anchor_urb(urb, &priv->tx_submitted); err = usb_submit_urb(urb, GFP_ATOMIC); @@ -662,11 +662,6 @@ static void kvaser_usb_rx_error(const struct kvaser_usb *dev, priv = dev->nets[channel]; stats = &priv->netdev->stats; - if (status & M16C_STATE_BUS_RESET) { - kvaser_usb_unlink_tx_urbs(priv); - return; - } - skb = alloc_can_err_skb(priv->netdev, &cf); if (!skb) { stats->rx_dropped++; @@ -677,7 +672,7 @@ static void kvaser_usb_rx_error(const struct kvaser_usb *dev, netdev_dbg(priv->netdev, "Error status: 0x%02x\n", status); - if (status & M16C_STATE_BUS_OFF) { + if (status & (M16C_STATE_BUS_OFF | M16C_STATE_BUS_RESET)) { cf->can_id |= CAN_ERR_BUSOFF; priv->can.can_stats.bus_off++; @@ -703,9 +698,7 @@ static void kvaser_usb_rx_error(const struct kvaser_usb *dev, } new_state = CAN_STATE_ERROR_PASSIVE; - } - - if (status == M16C_STATE_BUS_ERROR) { + } else if (status & M16C_STATE_BUS_ERROR) { if ((priv->can.state < CAN_STATE_ERROR_WARNING) && ((txerr >= 96) || (rxerr >= 96))) { cf->can_id |= CAN_ERR_CRTL; @@ -715,7 +708,8 @@ static void kvaser_usb_rx_error(const struct kvaser_usb *dev, priv->can.can_stats.error_warning++; new_state = CAN_STATE_ERROR_WARNING; - } else if (priv->can.state > CAN_STATE_ERROR_ACTIVE) { + } else if ((priv->can.state > CAN_STATE_ERROR_ACTIVE) && + ((txerr < 96) && (rxerr < 96))) { cf->can_id |= CAN_ERR_PROT; cf->data[2] = CAN_ERR_PROT_ACTIVE; @@ -1590,7 +1584,7 @@ static int kvaser_usb_probe(struct usb_interface *intf, { struct kvaser_usb *dev; int err = -ENOMEM; - int i; + int i, retry = 3; dev = devm_kzalloc(&intf->dev, sizeof(*dev), GFP_KERNEL); if (!dev) @@ -1608,7 +1602,15 @@ static int kvaser_usb_probe(struct usb_interface *intf, usb_set_intfdata(intf, dev); - err = kvaser_usb_get_software_info(dev); + /* On some x86 laptops, plugging a Kvaser device again after + * an unplug makes the firmware always ignore the very first + * command. For such a case, provide some room for retries + * instead of completely exiting the driver. + */ + do { + err = kvaser_usb_get_software_info(dev); + } while (--retry && err == -ETIMEDOUT); + if (err) { dev_err(&intf->dev, "Cannot get software infos, error %d\n", err); diff --git a/drivers/net/ethernet/amd/Kconfig b/drivers/net/ethernet/amd/Kconfig index 7a5e4aa5415e..77f1f6048ddd 100644 --- a/drivers/net/ethernet/amd/Kconfig +++ b/drivers/net/ethernet/amd/Kconfig @@ -45,7 +45,7 @@ config AMD8111_ETH config LANCE tristate "AMD LANCE and PCnet (AT1500 and NE2100) support" - depends on ISA && ISA_DMA_API + depends on ISA && ISA_DMA_API && !ARM ---help--- If you have a network (Ethernet) card of this type, say Y and read the Ethernet-HOWTO, available from @@ -142,7 +142,7 @@ config PCMCIA_NMCLAN config NI65 tristate "NI6510 support" - depends on ISA && ISA_DMA_API + depends on ISA && ISA_DMA_API && !ARM ---help--- If you have a network (Ethernet) card of this type, say Y and read the Ethernet-HOWTO, available from diff --git a/drivers/net/ethernet/amd/nmclan_cs.c b/drivers/net/ethernet/amd/nmclan_cs.c index 5b22764ba88d..27245efe9f50 100644 --- a/drivers/net/ethernet/amd/nmclan_cs.c +++ b/drivers/net/ethernet/amd/nmclan_cs.c @@ -952,6 +952,8 @@ static irqreturn_t mace_interrupt(int irq, void *dev_id) do { /* WARNING: MACE_IR is a READ/CLEAR port! */ status = inb(ioaddr + AM2150_MACE_BASE + MACE_IR); + if (!(status & ~MACE_IMR_DEFAULT) && IntrCnt == MACE_MAX_IR_ITERATIONS) + return IRQ_NONE; pr_debug("mace_interrupt: irq 0x%X status 0x%X.\n", irq, status); diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-common.h b/drivers/net/ethernet/amd/xgbe/xgbe-common.h index 75b08c63d39f..29a09271b64a 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-common.h +++ b/drivers/net/ethernet/amd/xgbe/xgbe-common.h @@ -767,16 +767,17 @@ #define MTL_Q_RQOMR 0x40 #define MTL_Q_RQMPOCR 0x44 #define MTL_Q_RQDR 0x4c +#define MTL_Q_RQFCR 0x50 #define MTL_Q_IER 0x70 #define MTL_Q_ISR 0x74 /* MTL queue register entry bit positions and sizes */ +#define MTL_Q_RQFCR_RFA_INDEX 1 +#define MTL_Q_RQFCR_RFA_WIDTH 6 +#define MTL_Q_RQFCR_RFD_INDEX 17 +#define MTL_Q_RQFCR_RFD_WIDTH 6 #define MTL_Q_RQOMR_EHFC_INDEX 7 #define MTL_Q_RQOMR_EHFC_WIDTH 1 -#define MTL_Q_RQOMR_RFA_INDEX 8 -#define MTL_Q_RQOMR_RFA_WIDTH 3 -#define MTL_Q_RQOMR_RFD_INDEX 13 -#define MTL_Q_RQOMR_RFD_WIDTH 3 #define MTL_Q_RQOMR_RQS_INDEX 16 #define MTL_Q_RQOMR_RQS_WIDTH 9 #define MTL_Q_RQOMR_RSF_INDEX 5 diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c index 53f5f66ec2ee..4c66cd1d1e60 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c @@ -2079,10 +2079,10 @@ static void xgbe_config_flow_control_threshold(struct xgbe_prv_data *pdata) for (i = 0; i < pdata->rx_q_count; i++) { /* Activate flow control when less than 4k left in fifo */ - XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, RFA, 2); + XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQFCR, RFA, 2); /* De-activate flow control when more than 6k left in fifo */ - XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, RFD, 4); + XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQFCR, RFD, 4); } } diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c index 7bb5f07dbeef..e5ffb2ccb67d 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c @@ -523,6 +523,7 @@ void xgbe_get_all_hw_features(struct xgbe_prv_data *pdata) hw_feat->sph = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, SPHEN); hw_feat->tso = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, TSOEN); hw_feat->dma_debug = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, DBGMEMA); + hw_feat->rss = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, RSSEN); hw_feat->tc_cnt = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, NUMTC); hw_feat->hash_table_size = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, HASHTBLSZ); @@ -552,13 +553,14 @@ void xgbe_get_all_hw_features(struct xgbe_prv_data *pdata) break; } - /* The Queue and Channel counts are zero based so increment them + /* The Queue, Channel and TC counts are zero based so increment them * to get the actual number */ hw_feat->rx_q_cnt++; hw_feat->tx_q_cnt++; hw_feat->rx_ch_cnt++; hw_feat->tx_ch_cnt++; + hw_feat->tc_cnt++; DBGPR("<--xgbe_get_all_hw_features\n"); } diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c index 83a50280bb70..793f3b73eeff 100644 --- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c @@ -369,6 +369,8 @@ static int xgene_enet_process_ring(struct xgene_enet_desc_ring *ring, if (unlikely(xgene_enet_is_desc_slot_empty(raw_desc))) break; + /* read fpqnum field after dataaddr field */ + dma_rmb(); if (is_rx_desc(raw_desc)) ret = xgene_enet_rx_frame(ring, raw_desc); else diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c index 1d1147c93d59..e468ed3f210f 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c @@ -3175,7 +3175,7 @@ static int bnx2x_poll(struct napi_struct *napi, int budget) } #endif if (!bnx2x_fp_lock_napi(fp)) - return work_done; + return budget; for_each_cos_in_tx_queue(fp, cos) if (bnx2x_tx_queue_has_work(fp->txdata_ptr[cos])) diff --git a/drivers/net/ethernet/cirrus/Kconfig b/drivers/net/ethernet/cirrus/Kconfig index 7403dff8f14a..905ac5f5d9a6 100644 --- a/drivers/net/ethernet/cirrus/Kconfig +++ b/drivers/net/ethernet/cirrus/Kconfig @@ -32,7 +32,8 @@ config CS89x0 will be called cs89x0. config CS89x0_PLATFORM - bool "CS89x0 platform driver support" + bool "CS89x0 platform driver support" if HAS_IOPORT_MAP + default !HAS_IOPORT_MAP depends on CS89x0 help Say Y to compile the cs89x0 driver as a platform driver. This diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c index b29e027c476e..e356afa44e7d 100644 --- a/drivers/net/ethernet/cisco/enic/enic_main.c +++ b/drivers/net/ethernet/cisco/enic/enic_main.c @@ -1335,7 +1335,7 @@ static int enic_poll_msix_rq(struct napi_struct *napi, int budget) int err; if (!enic_poll_lock_napi(&enic->rq[rq])) - return work_done; + return budget; /* Service RQ */ diff --git a/drivers/net/ethernet/freescale/gianfar_ethtool.c b/drivers/net/ethernet/freescale/gianfar_ethtool.c index 3e1a9c1a67a9..fda12fb32ec7 100644 --- a/drivers/net/ethernet/freescale/gianfar_ethtool.c +++ b/drivers/net/ethernet/freescale/gianfar_ethtool.c @@ -1586,7 +1586,7 @@ static int gfar_write_filer_table(struct gfar_private *priv, return -EBUSY; /* Fill regular entries */ - for (; i < MAX_FILER_IDX - 1 && (tab->fe[i].ctrl | tab->fe[i].ctrl); + for (; i < MAX_FILER_IDX - 1 && (tab->fe[i].ctrl | tab->fe[i].prop); i++) gfar_write_filer(priv, i, tab->fe[i].ctrl, tab->fe[i].prop); /* Fill the rest with fall-troughs */ diff --git a/drivers/net/ethernet/intel/igbvf/netdev.c b/drivers/net/ethernet/intel/igbvf/netdev.c index 63c807c9b21c..edea13b0ee85 100644 --- a/drivers/net/ethernet/intel/igbvf/netdev.c +++ b/drivers/net/ethernet/intel/igbvf/netdev.c @@ -1907,7 +1907,8 @@ static void igbvf_watchdog_task(struct work_struct *work) static int igbvf_tso(struct igbvf_adapter *adapter, struct igbvf_ring *tx_ring, - struct sk_buff *skb, u32 tx_flags, u8 *hdr_len) + struct sk_buff *skb, u32 tx_flags, u8 *hdr_len, + __be16 protocol) { struct e1000_adv_tx_context_desc *context_desc; struct igbvf_buffer *buffer_info; @@ -1927,7 +1928,7 @@ static int igbvf_tso(struct igbvf_adapter *adapter, l4len = tcp_hdrlen(skb); *hdr_len += l4len; - if (skb->protocol == htons(ETH_P_IP)) { + if (protocol == htons(ETH_P_IP)) { struct iphdr *iph = ip_hdr(skb); iph->tot_len = 0; iph->check = 0; @@ -1958,7 +1959,7 @@ static int igbvf_tso(struct igbvf_adapter *adapter, /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */ tu_cmd |= (E1000_TXD_CMD_DEXT | E1000_ADVTXD_DTYP_CTXT); - if (skb->protocol == htons(ETH_P_IP)) + if (protocol == htons(ETH_P_IP)) tu_cmd |= E1000_ADVTXD_TUCMD_IPV4; tu_cmd |= E1000_ADVTXD_TUCMD_L4T_TCP; @@ -1984,7 +1985,8 @@ static int igbvf_tso(struct igbvf_adapter *adapter, static inline bool igbvf_tx_csum(struct igbvf_adapter *adapter, struct igbvf_ring *tx_ring, - struct sk_buff *skb, u32 tx_flags) + struct sk_buff *skb, u32 tx_flags, + __be16 protocol) { struct e1000_adv_tx_context_desc *context_desc; unsigned int i; @@ -2011,7 +2013,7 @@ static inline bool igbvf_tx_csum(struct igbvf_adapter *adapter, tu_cmd |= (E1000_TXD_CMD_DEXT | E1000_ADVTXD_DTYP_CTXT); if (skb->ip_summed == CHECKSUM_PARTIAL) { - switch (skb->protocol) { + switch (protocol) { case htons(ETH_P_IP): tu_cmd |= E1000_ADVTXD_TUCMD_IPV4; if (ip_hdr(skb)->protocol == IPPROTO_TCP) @@ -2211,6 +2213,7 @@ static netdev_tx_t igbvf_xmit_frame_ring_adv(struct sk_buff *skb, u8 hdr_len = 0; int count = 0; int tso = 0; + __be16 protocol = vlan_get_protocol(skb); if (test_bit(__IGBVF_DOWN, &adapter->state)) { dev_kfree_skb_any(skb); @@ -2239,13 +2242,13 @@ static netdev_tx_t igbvf_xmit_frame_ring_adv(struct sk_buff *skb, tx_flags |= (vlan_tx_tag_get(skb) << IGBVF_TX_FLAGS_VLAN_SHIFT); } - if (skb->protocol == htons(ETH_P_IP)) + if (protocol == htons(ETH_P_IP)) tx_flags |= IGBVF_TX_FLAGS_IPV4; first = tx_ring->next_to_use; tso = skb_is_gso(skb) ? - igbvf_tso(adapter, tx_ring, skb, tx_flags, &hdr_len) : 0; + igbvf_tso(adapter, tx_ring, skb, tx_flags, &hdr_len, protocol) : 0; if (unlikely(tso < 0)) { dev_kfree_skb_any(skb); return NETDEV_TX_OK; @@ -2253,7 +2256,7 @@ static netdev_tx_t igbvf_xmit_frame_ring_adv(struct sk_buff *skb, if (tso) tx_flags |= IGBVF_TX_FLAGS_TSO; - else if (igbvf_tx_csum(adapter, tx_ring, skb, tx_flags) && + else if (igbvf_tx_csum(adapter, tx_ring, skb, tx_flags, protocol) && (skb->ip_summed == CHECKSUM_PARTIAL)) tx_flags |= IGBVF_TX_FLAGS_CSUM; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 2ed2c7de2304..67b02bde179e 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -7227,11 +7227,11 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, if (!vhdr) goto out_drop; - protocol = vhdr->h_vlan_encapsulated_proto; tx_flags |= ntohs(vhdr->h_vlan_TCI) << IXGBE_TX_FLAGS_VLAN_SHIFT; tx_flags |= IXGBE_TX_FLAGS_SW_VLAN; } + protocol = vlan_get_protocol(skb); if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && adapter->ptp_clock && diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c index 62a0d8e0f17d..38c7a0be8197 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c @@ -3099,7 +3099,7 @@ static int ixgbevf_tso(struct ixgbevf_ring *tx_ring, /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */ type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_TCP; - if (skb->protocol == htons(ETH_P_IP)) { + if (first->protocol == htons(ETH_P_IP)) { struct iphdr *iph = ip_hdr(skb); iph->tot_len = 0; iph->check = 0; @@ -3156,7 +3156,7 @@ static void ixgbevf_tx_csum(struct ixgbevf_ring *tx_ring, if (skb->ip_summed == CHECKSUM_PARTIAL) { u8 l4_hdr = 0; - switch (skb->protocol) { + switch (first->protocol) { case htons(ETH_P_IP): vlan_macip_lens |= skb_network_header_len(skb); type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4; diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c index a62fc38f045e..1c75829eb166 100644 --- a/drivers/net/ethernet/marvell/mv643xx_eth.c +++ b/drivers/net/ethernet/marvell/mv643xx_eth.c @@ -192,6 +192,10 @@ static char mv643xx_eth_driver_version[] = "1.4"; #define IS_TSO_HEADER(txq, addr) \ ((addr >= txq->tso_hdrs_dma) && \ (addr < txq->tso_hdrs_dma + txq->tx_ring_size * TSO_HEADER_SIZE)) + +#define DESC_DMA_MAP_SINGLE 0 +#define DESC_DMA_MAP_PAGE 1 + /* * RX/TX descriptors. */ @@ -362,6 +366,7 @@ struct tx_queue { dma_addr_t tso_hdrs_dma; struct tx_desc *tx_desc_area; + char *tx_desc_mapping; /* array to track the type of the dma mapping */ dma_addr_t tx_desc_dma; int tx_desc_area_size; @@ -750,6 +755,7 @@ txq_put_data_tso(struct net_device *dev, struct tx_queue *txq, if (txq->tx_curr_desc == txq->tx_ring_size) txq->tx_curr_desc = 0; desc = &txq->tx_desc_area[tx_index]; + txq->tx_desc_mapping[tx_index] = DESC_DMA_MAP_SINGLE; desc->l4i_chk = 0; desc->byte_cnt = length; @@ -879,14 +885,13 @@ static void txq_submit_frag_skb(struct tx_queue *txq, struct sk_buff *skb) skb_frag_t *this_frag; int tx_index; struct tx_desc *desc; - void *addr; this_frag = &skb_shinfo(skb)->frags[frag]; - addr = page_address(this_frag->page.p) + this_frag->page_offset; tx_index = txq->tx_curr_desc++; if (txq->tx_curr_desc == txq->tx_ring_size) txq->tx_curr_desc = 0; desc = &txq->tx_desc_area[tx_index]; + txq->tx_desc_mapping[tx_index] = DESC_DMA_MAP_PAGE; /* * The last fragment will generate an interrupt @@ -902,8 +907,9 @@ static void txq_submit_frag_skb(struct tx_queue *txq, struct sk_buff *skb) desc->l4i_chk = 0; desc->byte_cnt = skb_frag_size(this_frag); - desc->buf_ptr = dma_map_single(mp->dev->dev.parent, addr, - desc->byte_cnt, DMA_TO_DEVICE); + desc->buf_ptr = skb_frag_dma_map(mp->dev->dev.parent, + this_frag, 0, desc->byte_cnt, + DMA_TO_DEVICE); } } @@ -936,6 +942,7 @@ static int txq_submit_skb(struct tx_queue *txq, struct sk_buff *skb, if (txq->tx_curr_desc == txq->tx_ring_size) txq->tx_curr_desc = 0; desc = &txq->tx_desc_area[tx_index]; + txq->tx_desc_mapping[tx_index] = DESC_DMA_MAP_SINGLE; if (nr_frags) { txq_submit_frag_skb(txq, skb); @@ -1047,9 +1054,12 @@ static int txq_reclaim(struct tx_queue *txq, int budget, int force) int tx_index; struct tx_desc *desc; u32 cmd_sts; + char desc_dma_map; tx_index = txq->tx_used_desc; desc = &txq->tx_desc_area[tx_index]; + desc_dma_map = txq->tx_desc_mapping[tx_index]; + cmd_sts = desc->cmd_sts; if (cmd_sts & BUFFER_OWNED_BY_DMA) { @@ -1065,9 +1075,19 @@ static int txq_reclaim(struct tx_queue *txq, int budget, int force) reclaimed++; txq->tx_desc_count--; - if (!IS_TSO_HEADER(txq, desc->buf_ptr)) - dma_unmap_single(mp->dev->dev.parent, desc->buf_ptr, - desc->byte_cnt, DMA_TO_DEVICE); + if (!IS_TSO_HEADER(txq, desc->buf_ptr)) { + + if (desc_dma_map == DESC_DMA_MAP_PAGE) + dma_unmap_page(mp->dev->dev.parent, + desc->buf_ptr, + desc->byte_cnt, + DMA_TO_DEVICE); + else + dma_unmap_single(mp->dev->dev.parent, + desc->buf_ptr, + desc->byte_cnt, + DMA_TO_DEVICE); + } if (cmd_sts & TX_ENABLE_INTERRUPT) { struct sk_buff *skb = __skb_dequeue(&txq->tx_skb); @@ -1996,6 +2016,7 @@ static int txq_init(struct mv643xx_eth_private *mp, int index) struct tx_queue *txq = mp->txq + index; struct tx_desc *tx_desc; int size; + int ret; int i; txq->index = index; @@ -2048,18 +2069,34 @@ static int txq_init(struct mv643xx_eth_private *mp, int index) nexti * sizeof(struct tx_desc); } + txq->tx_desc_mapping = kcalloc(txq->tx_ring_size, sizeof(char), + GFP_KERNEL); + if (!txq->tx_desc_mapping) { + ret = -ENOMEM; + goto err_free_desc_area; + } + /* Allocate DMA buffers for TSO MAC/IP/TCP headers */ txq->tso_hdrs = dma_alloc_coherent(mp->dev->dev.parent, txq->tx_ring_size * TSO_HEADER_SIZE, &txq->tso_hdrs_dma, GFP_KERNEL); if (txq->tso_hdrs == NULL) { - dma_free_coherent(mp->dev->dev.parent, txq->tx_desc_area_size, - txq->tx_desc_area, txq->tx_desc_dma); - return -ENOMEM; + ret = -ENOMEM; + goto err_free_desc_mapping; } skb_queue_head_init(&txq->tx_skb); return 0; + +err_free_desc_mapping: + kfree(txq->tx_desc_mapping); +err_free_desc_area: + if (index == 0 && size <= mp->tx_desc_sram_size) + iounmap(txq->tx_desc_area); + else + dma_free_coherent(mp->dev->dev.parent, txq->tx_desc_area_size, + txq->tx_desc_area, txq->tx_desc_dma); + return ret; } static void txq_deinit(struct tx_queue *txq) @@ -2077,6 +2114,8 @@ static void txq_deinit(struct tx_queue *txq) else dma_free_coherent(mp->dev->dev.parent, txq->tx_desc_area_size, txq->tx_desc_area, txq->tx_desc_dma); + kfree(txq->tx_desc_mapping); + if (txq->tso_hdrs) dma_free_coherent(mp->dev->dev.parent, txq->tx_ring_size * TSO_HEADER_SIZE, diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h index bdd4eea2247c..210691c89b6c 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h @@ -235,7 +235,8 @@ do { \ extern int mlx4_log_num_mgm_entry_size; extern int log_mtts_per_seg; -#define MLX4_MAX_NUM_SLAVES (MLX4_MAX_NUM_PF + MLX4_MAX_NUM_VF) +#define MLX4_MAX_NUM_SLAVES (min(MLX4_MAX_NUM_PF + MLX4_MAX_NUM_VF, \ + MLX4_MFUNC_MAX)) #define ALL_SLAVES 0xff struct mlx4_bitmap { diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c index 613037584d08..c531c8ae1be4 100644 --- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c +++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c @@ -2388,7 +2388,10 @@ static int netxen_nic_poll(struct napi_struct *napi, int budget) work_done = netxen_process_rcv_ring(sds_ring, budget); - if ((work_done < budget) && tx_complete) { + if (!tx_complete) + work_done = budget; + + if (work_done < budget) { napi_complete(&sds_ring->napi); if (test_bit(__NX_DEV_UP, &adapter->state)) netxen_nic_enable_int(sds_ring); diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c index 18e5de72e9b4..4e1f58cf19ce 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c @@ -967,7 +967,12 @@ static int qlcnic_poll(struct napi_struct *napi, int budget) tx_complete = qlcnic_process_cmd_ring(adapter, tx_ring, budget); work_done = qlcnic_process_rcv_ring(sds_ring, budget); - if ((work_done < budget) && tx_complete) { + + /* Check if we need a repoll */ + if (!tx_complete) + work_done = budget; + + if (work_done < budget) { napi_complete(&sds_ring->napi); if (test_bit(__QLCNIC_DEV_UP, &adapter->state)) { qlcnic_enable_sds_intr(adapter, sds_ring); @@ -992,6 +997,9 @@ static int qlcnic_tx_poll(struct napi_struct *napi, int budget) napi_complete(&tx_ring->napi); if (test_bit(__QLCNIC_DEV_UP, &adapter->state)) qlcnic_enable_tx_intr(adapter, tx_ring); + } else { + /* As qlcnic_process_cmd_ring() returned 0, we need a repoll*/ + work_done = budget; } return work_done; @@ -1950,7 +1958,12 @@ static int qlcnic_83xx_msix_sriov_vf_poll(struct napi_struct *napi, int budget) tx_complete = qlcnic_process_cmd_ring(adapter, tx_ring, budget); work_done = qlcnic_83xx_process_rcv_ring(sds_ring, budget); - if ((work_done < budget) && tx_complete) { + + /* Check if we need a repoll */ + if (!tx_complete) + work_done = budget; + + if (work_done < budget) { napi_complete(&sds_ring->napi); qlcnic_enable_sds_intr(adapter, sds_ring); } @@ -1973,7 +1986,12 @@ static int qlcnic_83xx_poll(struct napi_struct *napi, int budget) tx_complete = qlcnic_process_cmd_ring(adapter, tx_ring, budget); work_done = qlcnic_83xx_process_rcv_ring(sds_ring, budget); - if ((work_done < budget) && tx_complete) { + + /* Check if we need a repoll */ + if (!tx_complete) + work_done = budget; + + if (work_done < budget) { napi_complete(&sds_ring->napi); qlcnic_enable_sds_intr(adapter, sds_ring); } @@ -1995,6 +2013,9 @@ static int qlcnic_83xx_msix_tx_poll(struct napi_struct *napi, int budget) napi_complete(&tx_ring->napi); if (test_bit(__QLCNIC_DEV_UP , &adapter->state)) qlcnic_enable_tx_intr(adapter, tx_ring); + } else { + /* need a repoll */ + work_done = budget; } return work_done; diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_main.c b/drivers/net/ethernet/qlogic/qlge/qlge_main.c index 6c904a6cad2a..ef5aed3b1225 100644 --- a/drivers/net/ethernet/qlogic/qlge/qlge_main.c +++ b/drivers/net/ethernet/qlogic/qlge/qlge_main.c @@ -2351,23 +2351,29 @@ static int qlge_update_hw_vlan_features(struct net_device *ndev, { struct ql_adapter *qdev = netdev_priv(ndev); int status = 0; + bool need_restart = netif_running(ndev); - status = ql_adapter_down(qdev); - if (status) { - netif_err(qdev, link, qdev->ndev, - "Failed to bring down the adapter\n"); - return status; + if (need_restart) { + status = ql_adapter_down(qdev); + if (status) { + netif_err(qdev, link, qdev->ndev, + "Failed to bring down the adapter\n"); + return status; + } } /* update the features with resent change */ ndev->features = features; - status = ql_adapter_up(qdev); - if (status) { - netif_err(qdev, link, qdev->ndev, - "Failed to bring up the adapter\n"); - return status; + if (need_restart) { + status = ql_adapter_up(qdev); + if (status) { + netif_err(qdev, link, qdev->ndev, + "Failed to bring up the adapter\n"); + return status; + } } + return status; } diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c index 6576243222af..04283fe0e6a7 100644 --- a/drivers/net/ethernet/renesas/sh_eth.c +++ b/drivers/net/ethernet/renesas/sh_eth.c @@ -396,6 +396,9 @@ static const u16 sh_eth_offset_fast_sh3_sh2[SH_ETH_MAX_REGISTER_OFFSET] = { [TSU_ADRL31] = 0x01fc, }; +static void sh_eth_rcv_snd_disable(struct net_device *ndev); +static struct net_device_stats *sh_eth_get_stats(struct net_device *ndev); + static bool sh_eth_is_gether(struct sh_eth_private *mdp) { return mdp->reg_offset == sh_eth_offset_gigabit; @@ -1120,6 +1123,7 @@ static void sh_eth_ring_format(struct net_device *ndev) int rx_ringsize = sizeof(*rxdesc) * mdp->num_rx_ring; int tx_ringsize = sizeof(*txdesc) * mdp->num_tx_ring; int skbuff_size = mdp->rx_buf_sz + SH_ETH_RX_ALIGN - 1; + dma_addr_t dma_addr; mdp->cur_rx = 0; mdp->cur_tx = 0; @@ -1133,7 +1137,6 @@ static void sh_eth_ring_format(struct net_device *ndev) /* skb */ mdp->rx_skbuff[i] = NULL; skb = netdev_alloc_skb(ndev, skbuff_size); - mdp->rx_skbuff[i] = skb; if (skb == NULL) break; sh_eth_set_receive_align(skb); @@ -1142,9 +1145,15 @@ static void sh_eth_ring_format(struct net_device *ndev) rxdesc = &mdp->rx_ring[i]; /* The size of the buffer is a multiple of 16 bytes. */ rxdesc->buffer_length = ALIGN(mdp->rx_buf_sz, 16); - dma_map_single(&ndev->dev, skb->data, rxdesc->buffer_length, - DMA_FROM_DEVICE); - rxdesc->addr = virt_to_phys(skb->data); + dma_addr = dma_map_single(&ndev->dev, skb->data, + rxdesc->buffer_length, + DMA_FROM_DEVICE); + if (dma_mapping_error(&ndev->dev, dma_addr)) { + kfree_skb(skb); + break; + } + mdp->rx_skbuff[i] = skb; + rxdesc->addr = dma_addr; rxdesc->status = cpu_to_edmac(mdp, RD_RACT | RD_RFP); /* Rx descriptor address set */ @@ -1316,8 +1325,10 @@ static int sh_eth_dev_init(struct net_device *ndev, bool start) RFLR); sh_eth_write(ndev, sh_eth_read(ndev, EESR), EESR); - if (start) + if (start) { + mdp->irq_enabled = true; sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR); + } /* PAUSE Prohibition */ val = (sh_eth_read(ndev, ECMR) & ECMR_DM) | @@ -1356,6 +1367,33 @@ static int sh_eth_dev_init(struct net_device *ndev, bool start) return ret; } +static void sh_eth_dev_exit(struct net_device *ndev) +{ + struct sh_eth_private *mdp = netdev_priv(ndev); + int i; + + /* Deactivate all TX descriptors, so DMA should stop at next + * packet boundary if it's currently running + */ + for (i = 0; i < mdp->num_tx_ring; i++) + mdp->tx_ring[i].status &= ~cpu_to_edmac(mdp, TD_TACT); + + /* Disable TX FIFO egress to MAC */ + sh_eth_rcv_snd_disable(ndev); + + /* Stop RX DMA at next packet boundary */ + sh_eth_write(ndev, 0, EDRRR); + + /* Aside from TX DMA, we can't tell when the hardware is + * really stopped, so we need to reset to make sure. + * Before doing that, wait for long enough to *probably* + * finish transmitting the last packet and poll stats. + */ + msleep(2); /* max frame time at 10 Mbps < 1250 us */ + sh_eth_get_stats(ndev); + sh_eth_reset(ndev); +} + /* free Tx skb function */ static int sh_eth_txfree(struct net_device *ndev) { @@ -1400,6 +1438,7 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota) u16 pkt_len = 0; u32 desc_status; int skbuff_size = mdp->rx_buf_sz + SH_ETH_RX_ALIGN - 1; + dma_addr_t dma_addr; boguscnt = min(boguscnt, *quota); limit = boguscnt; @@ -1447,9 +1486,9 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota) mdp->rx_skbuff[entry] = NULL; if (mdp->cd->rpadir) skb_reserve(skb, NET_IP_ALIGN); - dma_sync_single_for_cpu(&ndev->dev, rxdesc->addr, - ALIGN(mdp->rx_buf_sz, 16), - DMA_FROM_DEVICE); + dma_unmap_single(&ndev->dev, rxdesc->addr, + ALIGN(mdp->rx_buf_sz, 16), + DMA_FROM_DEVICE); skb_put(skb, pkt_len); skb->protocol = eth_type_trans(skb, ndev); netif_receive_skb(skb); @@ -1469,15 +1508,20 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota) if (mdp->rx_skbuff[entry] == NULL) { skb = netdev_alloc_skb(ndev, skbuff_size); - mdp->rx_skbuff[entry] = skb; if (skb == NULL) break; /* Better luck next round. */ sh_eth_set_receive_align(skb); - dma_map_single(&ndev->dev, skb->data, - rxdesc->buffer_length, DMA_FROM_DEVICE); + dma_addr = dma_map_single(&ndev->dev, skb->data, + rxdesc->buffer_length, + DMA_FROM_DEVICE); + if (dma_mapping_error(&ndev->dev, dma_addr)) { + kfree_skb(skb); + break; + } + mdp->rx_skbuff[entry] = skb; skb_checksum_none_assert(skb); - rxdesc->addr = virt_to_phys(skb->data); + rxdesc->addr = dma_addr; } if (entry >= mdp->num_rx_ring - 1) rxdesc->status |= @@ -1573,7 +1617,6 @@ ignore_link: if (intr_status & EESR_RFRMER) { /* Receive Frame Overflow int */ ndev->stats.rx_frame_errors++; - netif_err(mdp, rx_err, ndev, "Receive Abort\n"); } } @@ -1592,13 +1635,11 @@ ignore_link: if (intr_status & EESR_RDE) { /* Receive Descriptor Empty int */ ndev->stats.rx_over_errors++; - netif_err(mdp, rx_err, ndev, "Receive Descriptor Empty\n"); } if (intr_status & EESR_RFE) { /* Receive FIFO Overflow int */ ndev->stats.rx_fifo_errors++; - netif_err(mdp, rx_err, ndev, "Receive FIFO Overflow\n"); } if (!mdp->cd->no_ade && (intr_status & EESR_ADE)) { @@ -1653,7 +1694,12 @@ static irqreturn_t sh_eth_interrupt(int irq, void *netdev) if (intr_status & (EESR_RX_CHECK | cd->tx_check | cd->eesr_err_check)) ret = IRQ_HANDLED; else - goto other_irq; + goto out; + + if (!likely(mdp->irq_enabled)) { + sh_eth_write(ndev, 0, EESIPR); + goto out; + } if (intr_status & EESR_RX_CHECK) { if (napi_schedule_prep(&mdp->napi)) { @@ -1684,7 +1730,7 @@ static irqreturn_t sh_eth_interrupt(int irq, void *netdev) sh_eth_error(ndev, intr_status); } -other_irq: +out: spin_unlock(&mdp->lock); return ret; @@ -1712,7 +1758,8 @@ static int sh_eth_poll(struct napi_struct *napi, int budget) napi_complete(napi); /* Reenable Rx interrupts */ - sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR); + if (mdp->irq_enabled) + sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR); out: return budget - quota; } @@ -1968,40 +2015,50 @@ static int sh_eth_set_ringparam(struct net_device *ndev, return -EINVAL; if (netif_running(ndev)) { + netif_device_detach(ndev); netif_tx_disable(ndev); - /* Disable interrupts by clearing the interrupt mask. */ - sh_eth_write(ndev, 0x0000, EESIPR); - /* Stop the chip's Tx and Rx processes. */ - sh_eth_write(ndev, 0, EDTRR); - sh_eth_write(ndev, 0, EDRRR); + + /* Serialise with the interrupt handler and NAPI, then + * disable interrupts. We have to clear the + * irq_enabled flag first to ensure that interrupts + * won't be re-enabled. + */ + mdp->irq_enabled = false; synchronize_irq(ndev->irq); - } + napi_synchronize(&mdp->napi); + sh_eth_write(ndev, 0x0000, EESIPR); - /* Free all the skbuffs in the Rx queue. */ - sh_eth_ring_free(ndev); - /* Free DMA buffer */ - sh_eth_free_dma_buffer(mdp); + sh_eth_dev_exit(ndev); + + /* Free all the skbuffs in the Rx queue. */ + sh_eth_ring_free(ndev); + /* Free DMA buffer */ + sh_eth_free_dma_buffer(mdp); + } /* Set new parameters */ mdp->num_rx_ring = ring->rx_pending; mdp->num_tx_ring = ring->tx_pending; - ret = sh_eth_ring_init(ndev); - if (ret < 0) { - netdev_err(ndev, "%s: sh_eth_ring_init failed.\n", __func__); - return ret; - } - ret = sh_eth_dev_init(ndev, false); - if (ret < 0) { - netdev_err(ndev, "%s: sh_eth_dev_init failed.\n", __func__); - return ret; - } - if (netif_running(ndev)) { + ret = sh_eth_ring_init(ndev); + if (ret < 0) { + netdev_err(ndev, "%s: sh_eth_ring_init failed.\n", + __func__); + return ret; + } + ret = sh_eth_dev_init(ndev, false); + if (ret < 0) { + netdev_err(ndev, "%s: sh_eth_dev_init failed.\n", + __func__); + return ret; + } + + mdp->irq_enabled = true; sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR); /* Setting the Rx mode will start the Rx process. */ sh_eth_write(ndev, EDRRR_R, EDRRR); - netif_wake_queue(ndev); + netif_device_attach(ndev); } return 0; @@ -2117,6 +2174,9 @@ static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev) } spin_unlock_irqrestore(&mdp->lock, flags); + if (skb_padto(skb, ETH_ZLEN)) + return NETDEV_TX_OK; + entry = mdp->cur_tx % mdp->num_tx_ring; mdp->tx_skbuff[entry] = skb; txdesc = &mdp->tx_ring[entry]; @@ -2126,10 +2186,11 @@ static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev) skb->len + 2); txdesc->addr = dma_map_single(&ndev->dev, skb->data, skb->len, DMA_TO_DEVICE); - if (skb->len < ETH_ZLEN) - txdesc->buffer_length = ETH_ZLEN; - else - txdesc->buffer_length = skb->len; + if (dma_mapping_error(&ndev->dev, txdesc->addr)) { + kfree_skb(skb); + return NETDEV_TX_OK; + } + txdesc->buffer_length = skb->len; if (entry >= mdp->num_tx_ring - 1) txdesc->status |= cpu_to_edmac(mdp, TD_TACT | TD_TDLE); @@ -2181,14 +2242,17 @@ static int sh_eth_close(struct net_device *ndev) netif_stop_queue(ndev); - /* Disable interrupts by clearing the interrupt mask. */ + /* Serialise with the interrupt handler and NAPI, then disable + * interrupts. We have to clear the irq_enabled flag first to + * ensure that interrupts won't be re-enabled. + */ + mdp->irq_enabled = false; + synchronize_irq(ndev->irq); + napi_disable(&mdp->napi); sh_eth_write(ndev, 0x0000, EESIPR); - /* Stop the chip's Tx and Rx processes. */ - sh_eth_write(ndev, 0, EDTRR); - sh_eth_write(ndev, 0, EDRRR); + sh_eth_dev_exit(ndev); - sh_eth_get_stats(ndev); /* PHY Disconnect */ if (mdp->phydev) { phy_stop(mdp->phydev); @@ -2198,8 +2262,6 @@ static int sh_eth_close(struct net_device *ndev) free_irq(ndev->irq, ndev); - napi_disable(&mdp->napi); - /* Free all the skbuffs in the Rx queue. */ sh_eth_ring_free(ndev); diff --git a/drivers/net/ethernet/renesas/sh_eth.h b/drivers/net/ethernet/renesas/sh_eth.h index 71f5de1171bd..332d3c16d483 100644 --- a/drivers/net/ethernet/renesas/sh_eth.h +++ b/drivers/net/ethernet/renesas/sh_eth.h @@ -513,6 +513,7 @@ struct sh_eth_private { u32 rx_buf_sz; /* Based on MTU+slack. */ int edmac_endian; struct napi_struct napi; + bool irq_enabled; /* MII transceiver section. */ u32 phy_id; /* PHY ID */ struct mii_bus *mii_bus; /* MDIO bus control */ diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 8c6b7c1651e5..cf62ff4c8c56 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -2778,6 +2778,9 @@ static int stmmac_hw_init(struct stmmac_priv *priv) * @addr: iobase memory address * Description: this is the main probe function used to * call the alloc_etherdev, allocate the priv structure. + * Return: + * on success the new private structure is returned, otherwise the error + * pointer. */ struct stmmac_priv *stmmac_dvr_probe(struct device *device, struct plat_stmmacenet_data *plat_dat, @@ -2789,7 +2792,7 @@ struct stmmac_priv *stmmac_dvr_probe(struct device *device, ndev = alloc_etherdev(sizeof(struct stmmac_priv)); if (!ndev) - return NULL; + return ERR_PTR(-ENOMEM); SET_NETDEV_DEV(ndev, device); diff --git a/drivers/net/ethernet/sun/sunvnet.c b/drivers/net/ethernet/sun/sunvnet.c index d2835bf7b4fb..3699b98d5b2c 100644 --- a/drivers/net/ethernet/sun/sunvnet.c +++ b/drivers/net/ethernet/sun/sunvnet.c @@ -1119,6 +1119,7 @@ static inline struct sk_buff *vnet_skb_shape(struct sk_buff *skb, int ncookies) skb_shinfo(nskb)->gso_size = skb_shinfo(skb)->gso_size; skb_shinfo(nskb)->gso_type = skb_shinfo(skb)->gso_type; } + nskb->queue_mapping = skb->queue_mapping; dev_kfree_skb(skb); skb = nskb; } diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c index e068d48b0f21..a39131f494ec 100644 --- a/drivers/net/ethernet/ti/cpsw.c +++ b/drivers/net/ethernet/ti/cpsw.c @@ -1683,6 +1683,19 @@ static int cpsw_ndo_vlan_rx_add_vid(struct net_device *ndev, if (vid == priv->data.default_vlan) return 0; + if (priv->data.dual_emac) { + /* In dual EMAC, reserved VLAN id should not be used for + * creating VLAN interfaces as this can break the dual + * EMAC port separation + */ + int i; + + for (i = 0; i < priv->data.slaves; i++) { + if (vid == priv->slaves[i].port_vlan) + return -EINVAL; + } + } + dev_info(priv->dev, "Adding vlanid %d to vlan filter\n", vid); return cpsw_add_vlan_ale_entry(priv, vid); } @@ -1696,6 +1709,15 @@ static int cpsw_ndo_vlan_rx_kill_vid(struct net_device *ndev, if (vid == priv->data.default_vlan) return 0; + if (priv->data.dual_emac) { + int i; + + for (i = 0; i < priv->data.slaves; i++) { + if (vid == priv->slaves[i].port_vlan) + return -EINVAL; + } + } + dev_info(priv->dev, "removing vlanid %d from vlan filter\n", vid); ret = cpsw_ale_del_vlan(priv->ale, vid, 0); if (ret != 0) diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c index 9f49c0129a78..7cd4eb38abfa 100644 --- a/drivers/net/hyperv/netvsc.c +++ b/drivers/net/hyperv/netvsc.c @@ -716,7 +716,7 @@ int netvsc_send(struct hv_device *device, u64 req_id; unsigned int section_index = NETVSC_INVALID_INDEX; u32 msg_size = 0; - struct sk_buff *skb; + struct sk_buff *skb = NULL; u16 q_idx = packet->q_idx; @@ -743,8 +743,6 @@ int netvsc_send(struct hv_device *device, packet); skb = (struct sk_buff *) (unsigned long)packet->send_completion_tid; - if (skb) - dev_kfree_skb_any(skb); packet->page_buf_cnt = 0; } } @@ -810,6 +808,13 @@ int netvsc_send(struct hv_device *device, packet, ret); } + if (ret != 0) { + if (section_index != NETVSC_INVALID_INDEX) + netvsc_free_send_slot(net_device, section_index); + } else if (skb) { + dev_kfree_skb_any(skb); + } + return ret; } diff --git a/drivers/net/ipvlan/ipvlan_core.c b/drivers/net/ipvlan/ipvlan_core.c index a14d87783245..2e195289ddf4 100644 --- a/drivers/net/ipvlan/ipvlan_core.c +++ b/drivers/net/ipvlan/ipvlan_core.c @@ -377,9 +377,11 @@ static int ipvlan_process_v6_outbound(struct sk_buff *skb) }; dst = ip6_route_output(dev_net(dev), NULL, &fl6); - if (IS_ERR(dst)) + if (dst->error) { + ret = dst->error; + dst_release(dst); goto err; - + } skb_dst_drop(skb); skb_dst_set(skb, dst); err = ip6_local_out(skb); diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c index 7df221788cd4..919f4fccc322 100644 --- a/drivers/net/macvtap.c +++ b/drivers/net/macvtap.c @@ -17,7 +17,6 @@ #include <linux/fs.h> #include <linux/uio.h> -#include <net/ipv6.h> #include <net/net_namespace.h> #include <net/rtnetlink.h> #include <net/sock.h> @@ -81,7 +80,7 @@ static struct cdev macvtap_cdev; static const struct proto_ops macvtap_socket_ops; #define TUN_OFFLOADS (NETIF_F_HW_CSUM | NETIF_F_TSO_ECN | NETIF_F_TSO | \ - NETIF_F_TSO6) + NETIF_F_TSO6 | NETIF_F_UFO) #define RX_OFFLOADS (NETIF_F_GRO | NETIF_F_LRO) #define TAP_FEATURES (NETIF_F_GSO | NETIF_F_SG) @@ -586,11 +585,7 @@ static int macvtap_skb_from_vnet_hdr(struct macvtap_queue *q, gso_type = SKB_GSO_TCPV6; break; case VIRTIO_NET_HDR_GSO_UDP: - pr_warn_once("macvtap: %s: using disabled UFO feature; please fix this program\n", - current->comm); gso_type = SKB_GSO_UDP; - if (skb->protocol == htons(ETH_P_IPV6)) - ipv6_proxy_select_ident(skb); break; default: return -EINVAL; @@ -636,6 +631,8 @@ static void macvtap_skb_to_vnet_hdr(struct macvtap_queue *q, vnet_hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV4; else if (sinfo->gso_type & SKB_GSO_TCPV6) vnet_hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV6; + else if (sinfo->gso_type & SKB_GSO_UDP) + vnet_hdr->gso_type = VIRTIO_NET_HDR_GSO_UDP; else BUG(); if (sinfo->gso_type & SKB_GSO_TCP_ECN) @@ -965,6 +962,9 @@ static int set_offload(struct macvtap_queue *q, unsigned long arg) if (arg & TUN_F_TSO6) feature_mask |= NETIF_F_TSO6; } + + if (arg & TUN_F_UFO) + feature_mask |= NETIF_F_UFO; } /* tun/tap driver inverts the usage for TSO offloads, where @@ -975,7 +975,7 @@ static int set_offload(struct macvtap_queue *q, unsigned long arg) * When user space turns off TSO, we turn off GSO/LRO so that * user-space will not receive TSO frames. */ - if (feature_mask & (NETIF_F_TSO | NETIF_F_TSO6)) + if (feature_mask & (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_UFO)) features |= RX_OFFLOADS; else features &= ~RX_OFFLOADS; @@ -1090,7 +1090,7 @@ static long macvtap_ioctl(struct file *file, unsigned int cmd, case TUNSETOFFLOAD: /* let the user check for future flags */ if (arg & ~(TUN_F_CSUM | TUN_F_TSO4 | TUN_F_TSO6 | - TUN_F_TSO_ECN)) + TUN_F_TSO_ECN | TUN_F_UFO)) return -EINVAL; rtnl_lock(); diff --git a/drivers/net/ppp/ppp_deflate.c b/drivers/net/ppp/ppp_deflate.c index 602c625d95d5..b5edc7f96a39 100644 --- a/drivers/net/ppp/ppp_deflate.c +++ b/drivers/net/ppp/ppp_deflate.c @@ -246,7 +246,7 @@ static int z_compress(void *arg, unsigned char *rptr, unsigned char *obuf, /* * See if we managed to reduce the size of the packet. */ - if (olen < isize) { + if (olen < isize && olen <= osize) { state->stats.comp_bytes += olen; state->stats.comp_packets++; } else { diff --git a/drivers/net/tun.c b/drivers/net/tun.c index 8c8dc16839a7..10f9e4021b5a 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c @@ -65,7 +65,6 @@ #include <linux/nsproxy.h> #include <linux/virtio_net.h> #include <linux/rcupdate.h> -#include <net/ipv6.h> #include <net/net_namespace.h> #include <net/netns/generic.h> #include <net/rtnetlink.h> @@ -187,7 +186,7 @@ struct tun_struct { struct net_device *dev; netdev_features_t set_features; #define TUN_USER_FEATURES (NETIF_F_HW_CSUM|NETIF_F_TSO_ECN|NETIF_F_TSO| \ - NETIF_F_TSO6) + NETIF_F_TSO6|NETIF_F_UFO) int vnet_hdr_sz; int sndbuf; @@ -1167,8 +1166,6 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile, break; } - skb_reset_network_header(skb); - if (gso.gso_type != VIRTIO_NET_HDR_GSO_NONE) { pr_debug("GSO!\n"); switch (gso.gso_type & ~VIRTIO_NET_HDR_GSO_ECN) { @@ -1179,20 +1176,8 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile, skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6; break; case VIRTIO_NET_HDR_GSO_UDP: - { - static bool warned; - - if (!warned) { - warned = true; - netdev_warn(tun->dev, - "%s: using disabled UFO feature; please fix this program\n", - current->comm); - } skb_shinfo(skb)->gso_type = SKB_GSO_UDP; - if (skb->protocol == htons(ETH_P_IPV6)) - ipv6_proxy_select_ident(skb); break; - } default: tun->dev->stats.rx_frame_errors++; kfree_skb(skb); @@ -1221,6 +1206,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile, skb_shinfo(skb)->tx_flags |= SKBTX_SHARED_FRAG; } + skb_reset_network_header(skb); skb_probe_transport_header(skb, 0); rxhash = skb_get_hash(skb); @@ -1298,6 +1284,8 @@ static ssize_t tun_put_user(struct tun_struct *tun, gso.gso_type = VIRTIO_NET_HDR_GSO_TCPV4; else if (sinfo->gso_type & SKB_GSO_TCPV6) gso.gso_type = VIRTIO_NET_HDR_GSO_TCPV6; + else if (sinfo->gso_type & SKB_GSO_UDP) + gso.gso_type = VIRTIO_NET_HDR_GSO_UDP; else { pr_err("unexpected GSO type: " "0x%x, gso_size %d, hdr_len %d\n", @@ -1746,6 +1734,11 @@ static int set_offload(struct tun_struct *tun, unsigned long arg) features |= NETIF_F_TSO6; arg &= ~(TUN_F_TSO4|TUN_F_TSO6); } + + if (arg & TUN_F_UFO) { + features |= NETIF_F_UFO; + arg &= ~TUN_F_UFO; + } } /* This gives the user a way to test for new features in future by diff --git a/drivers/net/usb/sr9700.c b/drivers/net/usb/sr9700.c index 99b69af14274..4a1e9c489f1f 100644 --- a/drivers/net/usb/sr9700.c +++ b/drivers/net/usb/sr9700.c @@ -77,7 +77,7 @@ static int wait_phy_eeprom_ready(struct usbnet *dev, int phy) int ret; udelay(1); - ret = sr_read_reg(dev, EPCR, &tmp); + ret = sr_read_reg(dev, SR_EPCR, &tmp); if (ret < 0) return ret; @@ -98,15 +98,15 @@ static int sr_share_read_word(struct usbnet *dev, int phy, u8 reg, mutex_lock(&dev->phy_mutex); - sr_write_reg(dev, EPAR, phy ? (reg | EPAR_PHY_ADR) : reg); - sr_write_reg(dev, EPCR, phy ? (EPCR_EPOS | EPCR_ERPRR) : EPCR_ERPRR); + sr_write_reg(dev, SR_EPAR, phy ? (reg | EPAR_PHY_ADR) : reg); + sr_write_reg(dev, SR_EPCR, phy ? (EPCR_EPOS | EPCR_ERPRR) : EPCR_ERPRR); ret = wait_phy_eeprom_ready(dev, phy); if (ret < 0) goto out_unlock; - sr_write_reg(dev, EPCR, 0x0); - ret = sr_read(dev, EPDR, 2, value); + sr_write_reg(dev, SR_EPCR, 0x0); + ret = sr_read(dev, SR_EPDR, 2, value); netdev_dbg(dev->net, "read shared %d 0x%02x returned 0x%04x, %d\n", phy, reg, *value, ret); @@ -123,19 +123,19 @@ static int sr_share_write_word(struct usbnet *dev, int phy, u8 reg, mutex_lock(&dev->phy_mutex); - ret = sr_write(dev, EPDR, 2, &value); + ret = sr_write(dev, SR_EPDR, 2, &value); if (ret < 0) goto out_unlock; - sr_write_reg(dev, EPAR, phy ? (reg | EPAR_PHY_ADR) : reg); - sr_write_reg(dev, EPCR, phy ? (EPCR_WEP | EPCR_EPOS | EPCR_ERPRW) : + sr_write_reg(dev, SR_EPAR, phy ? (reg | EPAR_PHY_ADR) : reg); + sr_write_reg(dev, SR_EPCR, phy ? (EPCR_WEP | EPCR_EPOS | EPCR_ERPRW) : (EPCR_WEP | EPCR_ERPRW)); ret = wait_phy_eeprom_ready(dev, phy); if (ret < 0) goto out_unlock; - sr_write_reg(dev, EPCR, 0x0); + sr_write_reg(dev, SR_EPCR, 0x0); out_unlock: mutex_unlock(&dev->phy_mutex); @@ -188,7 +188,7 @@ static int sr_mdio_read(struct net_device *netdev, int phy_id, int loc) if (loc == MII_BMSR) { u8 value; - sr_read_reg(dev, NSR, &value); + sr_read_reg(dev, SR_NSR, &value); if (value & NSR_LINKST) rc = 1; } @@ -228,7 +228,7 @@ static u32 sr9700_get_link(struct net_device *netdev) int rc = 0; /* Get the Link Status directly */ - sr_read_reg(dev, NSR, &value); + sr_read_reg(dev, SR_NSR, &value); if (value & NSR_LINKST) rc = 1; @@ -281,8 +281,8 @@ static void sr9700_set_multicast(struct net_device *netdev) } } - sr_write_async(dev, MAR, SR_MCAST_SIZE, hashes); - sr_write_reg_async(dev, RCR, rx_ctl); + sr_write_async(dev, SR_MAR, SR_MCAST_SIZE, hashes); + sr_write_reg_async(dev, SR_RCR, rx_ctl); } static int sr9700_set_mac_address(struct net_device *netdev, void *p) @@ -297,7 +297,7 @@ static int sr9700_set_mac_address(struct net_device *netdev, void *p) } memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); - sr_write_async(dev, PAR, 6, netdev->dev_addr); + sr_write_async(dev, SR_PAR, 6, netdev->dev_addr); return 0; } @@ -340,7 +340,7 @@ static int sr9700_bind(struct usbnet *dev, struct usb_interface *intf) mii->phy_id_mask = 0x1f; mii->reg_num_mask = 0x1f; - sr_write_reg(dev, NCR, NCR_RST); + sr_write_reg(dev, SR_NCR, NCR_RST); udelay(20); /* read MAC @@ -348,17 +348,17 @@ static int sr9700_bind(struct usbnet *dev, struct usb_interface *intf) * EEPROM automatically to PAR. In case there is no EEPROM externally, * a default MAC address is stored in PAR for making chip work properly. */ - if (sr_read(dev, PAR, ETH_ALEN, netdev->dev_addr) < 0) { + if (sr_read(dev, SR_PAR, ETH_ALEN, netdev->dev_addr) < 0) { netdev_err(netdev, "Error reading MAC address\n"); ret = -ENODEV; goto out; } /* power up and reset phy */ - sr_write_reg(dev, PRR, PRR_PHY_RST); + sr_write_reg(dev, SR_PRR, PRR_PHY_RST); /* at least 10ms, here 20ms for safe */ mdelay(20); - sr_write_reg(dev, PRR, 0); + sr_write_reg(dev, SR_PRR, 0); /* at least 1ms, here 2ms for reading right register */ udelay(2 * 1000); diff --git a/drivers/net/usb/sr9700.h b/drivers/net/usb/sr9700.h index fd687c575e74..258b030277e7 100644 --- a/drivers/net/usb/sr9700.h +++ b/drivers/net/usb/sr9700.h @@ -14,13 +14,13 @@ /* sr9700 spec. register table on Linux platform */ /* Network Control Reg */ -#define NCR 0x00 +#define SR_NCR 0x00 #define NCR_RST (1 << 0) #define NCR_LBK (3 << 1) #define NCR_FDX (1 << 3) #define NCR_WAKEEN (1 << 6) /* Network Status Reg */ -#define NSR 0x01 +#define SR_NSR 0x01 #define NSR_RXRDY (1 << 0) #define NSR_RXOV (1 << 1) #define NSR_TX1END (1 << 2) @@ -30,7 +30,7 @@ #define NSR_LINKST (1 << 6) #define NSR_SPEED (1 << 7) /* Tx Control Reg */ -#define TCR 0x02 +#define SR_TCR 0x02 #define TCR_CRC_DIS (1 << 1) #define TCR_PAD_DIS (1 << 2) #define TCR_LC_CARE (1 << 3) @@ -38,7 +38,7 @@ #define TCR_EXCECM (1 << 5) #define TCR_LF_EN (1 << 6) /* Tx Status Reg for Packet Index 1 */ -#define TSR1 0x03 +#define SR_TSR1 0x03 #define TSR1_EC (1 << 2) #define TSR1_COL (1 << 3) #define TSR1_LC (1 << 4) @@ -46,7 +46,7 @@ #define TSR1_LOC (1 << 6) #define TSR1_TLF (1 << 7) /* Tx Status Reg for Packet Index 2 */ -#define TSR2 0x04 +#define SR_TSR2 0x04 #define TSR2_EC (1 << 2) #define TSR2_COL (1 << 3) #define TSR2_LC (1 << 4) @@ -54,7 +54,7 @@ #define TSR2_LOC (1 << 6) #define TSR2_TLF (1 << 7) /* Rx Control Reg*/ -#define RCR 0x05 +#define SR_RCR 0x05 #define RCR_RXEN (1 << 0) #define RCR_PRMSC (1 << 1) #define RCR_RUNT (1 << 2) @@ -62,87 +62,87 @@ #define RCR_DIS_CRC (1 << 4) #define RCR_DIS_LONG (1 << 5) /* Rx Status Reg */ -#define RSR 0x06 +#define SR_RSR 0x06 #define RSR_AE (1 << 2) #define RSR_MF (1 << 6) #define RSR_RF (1 << 7) /* Rx Overflow Counter Reg */ -#define ROCR 0x07 +#define SR_ROCR 0x07 #define ROCR_ROC (0x7F << 0) #define ROCR_RXFU (1 << 7) /* Back Pressure Threshold Reg */ -#define BPTR 0x08 +#define SR_BPTR 0x08 #define BPTR_JPT (0x0F << 0) #define BPTR_BPHW (0x0F << 4) /* Flow Control Threshold Reg */ -#define FCTR 0x09 +#define SR_FCTR 0x09 #define FCTR_LWOT (0x0F << 0) #define FCTR_HWOT (0x0F << 4) /* rx/tx Flow Control Reg */ -#define FCR 0x0A +#define SR_FCR 0x0A #define FCR_FLCE (1 << 0) #define FCR_BKPA (1 << 4) #define FCR_TXPEN (1 << 5) #define FCR_TXPF (1 << 6) #define FCR_TXP0 (1 << 7) /* Eeprom & Phy Control Reg */ -#define EPCR 0x0B +#define SR_EPCR 0x0B #define EPCR_ERRE (1 << 0) #define EPCR_ERPRW (1 << 1) #define EPCR_ERPRR (1 << 2) #define EPCR_EPOS (1 << 3) #define EPCR_WEP (1 << 4) /* Eeprom & Phy Address Reg */ -#define EPAR 0x0C +#define SR_EPAR 0x0C #define EPAR_EROA (0x3F << 0) #define EPAR_PHY_ADR_MASK (0x03 << 6) #define EPAR_PHY_ADR (0x01 << 6) /* Eeprom & Phy Data Reg */ -#define EPDR 0x0D /* 0x0D ~ 0x0E for Data Reg Low & High */ +#define SR_EPDR 0x0D /* 0x0D ~ 0x0E for Data Reg Low & High */ /* Wakeup Control Reg */ -#define WCR 0x0F +#define SR_WCR 0x0F #define WCR_MAGICST (1 << 0) #define WCR_LINKST (1 << 2) #define WCR_MAGICEN (1 << 3) #define WCR_LINKEN (1 << 5) /* Physical Address Reg */ -#define PAR 0x10 /* 0x10 ~ 0x15 6 bytes for PAR */ +#define SR_PAR 0x10 /* 0x10 ~ 0x15 6 bytes for PAR */ /* Multicast Address Reg */ -#define MAR 0x16 /* 0x16 ~ 0x1D 8 bytes for MAR */ +#define SR_MAR 0x16 /* 0x16 ~ 0x1D 8 bytes for MAR */ /* 0x1e unused */ /* Phy Reset Reg */ -#define PRR 0x1F +#define SR_PRR 0x1F #define PRR_PHY_RST (1 << 0) /* Tx sdram Write Pointer Address Low */ -#define TWPAL 0x20 +#define SR_TWPAL 0x20 /* Tx sdram Write Pointer Address High */ -#define TWPAH 0x21 +#define SR_TWPAH 0x21 /* Tx sdram Read Pointer Address Low */ -#define TRPAL 0x22 +#define SR_TRPAL 0x22 /* Tx sdram Read Pointer Address High */ -#define TRPAH 0x23 +#define SR_TRPAH 0x23 /* Rx sdram Write Pointer Address Low */ -#define RWPAL 0x24 +#define SR_RWPAL 0x24 /* Rx sdram Write Pointer Address High */ -#define RWPAH 0x25 +#define SR_RWPAH 0x25 /* Rx sdram Read Pointer Address Low */ -#define RRPAL 0x26 +#define SR_RRPAL 0x26 /* Rx sdram Read Pointer Address High */ -#define RRPAH 0x27 +#define SR_RRPAH 0x27 /* Vendor ID register */ -#define VID 0x28 /* 0x28 ~ 0x29 2 bytes for VID */ +#define SR_VID 0x28 /* 0x28 ~ 0x29 2 bytes for VID */ /* Product ID register */ -#define PID 0x2A /* 0x2A ~ 0x2B 2 bytes for PID */ +#define SR_PID 0x2A /* 0x2A ~ 0x2B 2 bytes for PID */ /* CHIP Revision register */ -#define CHIPR 0x2C +#define SR_CHIPR 0x2C /* 0x2D --> 0xEF unused */ /* USB Device Address */ -#define USBDA 0xF0 +#define SR_USBDA 0xF0 #define USBDA_USBFA (0x7F << 0) /* RX packet Counter Reg */ -#define RXC 0xF1 +#define SR_RXC 0xF1 /* Tx packet Counter & USB Status Reg */ -#define TXC_USBS 0xF2 +#define SR_TXC_USBS 0xF2 #define TXC_USBS_TXC0 (1 << 0) #define TXC_USBS_TXC1 (1 << 1) #define TXC_USBS_TXC2 (1 << 2) @@ -150,7 +150,7 @@ #define TXC_USBS_SUSFLAG (1 << 6) #define TXC_USBS_RXFAULT (1 << 7) /* USB Control register */ -#define USBC 0xF4 +#define SR_USBC 0xF4 #define USBC_EP3NAK (1 << 4) #define USBC_EP3ACK (1 << 5) diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index 5ca97713bfb3..059fdf1bf5ee 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -490,17 +490,8 @@ static void receive_buf(struct virtnet_info *vi, struct receive_queue *rq, skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4; break; case VIRTIO_NET_HDR_GSO_UDP: - { - static bool warned; - - if (!warned) { - warned = true; - netdev_warn(dev, - "host using disabled UFO feature; please fix it\n"); - } skb_shinfo(skb)->gso_type = SKB_GSO_UDP; break; - } case VIRTIO_NET_HDR_GSO_TCPV6: skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6; break; @@ -888,6 +879,8 @@ static int xmit_skb(struct send_queue *sq, struct sk_buff *skb) hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV4; else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV6; + else if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP) + hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_UDP; else BUG(); if (skb_shinfo(skb)->gso_type & SKB_GSO_TCP_ECN) @@ -1748,7 +1741,7 @@ static int virtnet_probe(struct virtio_device *vdev) dev->features |= NETIF_F_HW_CSUM|NETIF_F_SG|NETIF_F_FRAGLIST; if (virtio_has_feature(vdev, VIRTIO_NET_F_GSO)) { - dev->hw_features |= NETIF_F_TSO + dev->hw_features |= NETIF_F_TSO | NETIF_F_UFO | NETIF_F_TSO_ECN | NETIF_F_TSO6; } /* Individual feature bits: what can host handle? */ @@ -1758,9 +1751,11 @@ static int virtnet_probe(struct virtio_device *vdev) dev->hw_features |= NETIF_F_TSO6; if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_ECN)) dev->hw_features |= NETIF_F_TSO_ECN; + if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_UFO)) + dev->hw_features |= NETIF_F_UFO; if (gso) - dev->features |= dev->hw_features & NETIF_F_ALL_TSO; + dev->features |= dev->hw_features & (NETIF_F_ALL_TSO|NETIF_F_UFO); /* (!csum && gso) case will be fixed by register_netdev() */ } if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_CSUM)) @@ -1798,7 +1793,8 @@ static int virtnet_probe(struct virtio_device *vdev) /* If we can receive ANY GSO packets, we must allocate large ones. */ if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO4) || virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO6) || - virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_ECN)) + virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_ECN) || + virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_UFO)) vi->big_packets = true; if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF)) @@ -1994,9 +1990,9 @@ static struct virtio_device_id id_table[] = { static unsigned int features[] = { VIRTIO_NET_F_CSUM, VIRTIO_NET_F_GUEST_CSUM, VIRTIO_NET_F_GSO, VIRTIO_NET_F_MAC, - VIRTIO_NET_F_HOST_TSO4, VIRTIO_NET_F_HOST_TSO6, + VIRTIO_NET_F_HOST_TSO4, VIRTIO_NET_F_HOST_UFO, VIRTIO_NET_F_HOST_TSO6, VIRTIO_NET_F_HOST_ECN, VIRTIO_NET_F_GUEST_TSO4, VIRTIO_NET_F_GUEST_TSO6, - VIRTIO_NET_F_GUEST_ECN, + VIRTIO_NET_F_GUEST_ECN, VIRTIO_NET_F_GUEST_UFO, VIRTIO_NET_F_MRG_RXBUF, VIRTIO_NET_F_STATUS, VIRTIO_NET_F_CTRL_VQ, VIRTIO_NET_F_CTRL_RX, VIRTIO_NET_F_CTRL_VLAN, VIRTIO_NET_F_GUEST_ANNOUNCE, VIRTIO_NET_F_MQ, diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index 7fbd89fbe107..a8c755dcab14 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c @@ -2432,10 +2432,10 @@ static void vxlan_sock_work(struct work_struct *work) dev_put(vxlan->dev); } -static int vxlan_newlink(struct net *net, struct net_device *dev, +static int vxlan_newlink(struct net *src_net, struct net_device *dev, struct nlattr *tb[], struct nlattr *data[]) { - struct vxlan_net *vn = net_generic(net, vxlan_net_id); + struct vxlan_net *vn = net_generic(src_net, vxlan_net_id); struct vxlan_dev *vxlan = netdev_priv(dev); struct vxlan_rdst *dst = &vxlan->default_dst; __u32 vni; @@ -2445,7 +2445,7 @@ static int vxlan_newlink(struct net *net, struct net_device *dev, if (!data[IFLA_VXLAN_ID]) return -EINVAL; - vxlan->net = dev_net(dev); + vxlan->net = src_net; vni = nla_get_u32(data[IFLA_VXLAN_ID]); dst->remote_vni = vni; @@ -2481,7 +2481,7 @@ static int vxlan_newlink(struct net *net, struct net_device *dev, if (data[IFLA_VXLAN_LINK] && (dst->remote_ifindex = nla_get_u32(data[IFLA_VXLAN_LINK]))) { struct net_device *lowerdev - = __dev_get_by_index(net, dst->remote_ifindex); + = __dev_get_by_index(src_net, dst->remote_ifindex); if (!lowerdev) { pr_info("ifindex %d does not exist\n", dst->remote_ifindex); @@ -2557,7 +2557,7 @@ static int vxlan_newlink(struct net *net, struct net_device *dev, nla_get_u8(data[IFLA_VXLAN_UDP_ZERO_CSUM6_RX])) vxlan->flags |= VXLAN_F_UDP_ZERO_CSUM6_RX; - if (vxlan_find_vni(net, vni, use_ipv6 ? AF_INET6 : AF_INET, + if (vxlan_find_vni(src_net, vni, use_ipv6 ? AF_INET6 : AF_INET, vxlan->dst_port)) { pr_info("duplicate VNI %u\n", vni); return -EEXIST; diff --git a/drivers/net/wan/Kconfig b/drivers/net/wan/Kconfig index 94e234975c61..a2fdd15f285a 100644 --- a/drivers/net/wan/Kconfig +++ b/drivers/net/wan/Kconfig @@ -25,7 +25,7 @@ if WAN # There is no way to detect a comtrol sv11 - force it modular for now. config HOSTESS_SV11 tristate "Comtrol Hostess SV-11 support" - depends on ISA && m && ISA_DMA_API && INET && HDLC + depends on ISA && m && ISA_DMA_API && INET && HDLC && VIRT_TO_BUS help Driver for Comtrol Hostess SV-11 network card which operates on low speed synchronous serial links at up to @@ -37,7 +37,7 @@ config HOSTESS_SV11 # The COSA/SRP driver has not been tested as non-modular yet. config COSA tristate "COSA/SRP sync serial boards support" - depends on ISA && m && ISA_DMA_API && HDLC + depends on ISA && m && ISA_DMA_API && HDLC && VIRT_TO_BUS ---help--- Driver for COSA and SRP synchronous serial boards. @@ -87,7 +87,7 @@ config LANMEDIA # There is no way to detect a Sealevel board. Force it modular config SEALEVEL_4021 tristate "Sealevel Systems 4021 support" - depends on ISA && m && ISA_DMA_API && INET && HDLC + depends on ISA && m && ISA_DMA_API && INET && HDLC && VIRT_TO_BUS help This is a driver for the Sealevel Systems ACB 56 serial I/O adapter. diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c index 9a72640237cb..62b0bf4fdf6b 100644 --- a/drivers/net/wireless/ath/ath9k/main.c +++ b/drivers/net/wireless/ath/ath9k/main.c @@ -285,6 +285,7 @@ static int ath_reset_internal(struct ath_softc *sc, struct ath9k_channel *hchan) __ath_cancel_work(sc); + disable_irq(sc->irq); tasklet_disable(&sc->intr_tq); tasklet_disable(&sc->bcon_tasklet); spin_lock_bh(&sc->sc_pcu_lock); @@ -331,6 +332,7 @@ static int ath_reset_internal(struct ath_softc *sc, struct ath9k_channel *hchan) r = -EIO; out: + enable_irq(sc->irq); spin_unlock_bh(&sc->sc_pcu_lock); tasklet_enable(&sc->bcon_tasklet); tasklet_enable(&sc->intr_tq); @@ -512,9 +514,6 @@ irqreturn_t ath_isr(int irq, void *dev) if (!ah || test_bit(ATH_OP_INVALID, &common->op_flags)) return IRQ_NONE; - if (!AR_SREV_9100(ah) && test_bit(ATH_OP_HW_RESET, &common->op_flags)) - return IRQ_NONE; - /* shared irq, not for us */ if (!ath9k_hw_intrpend(ah)) return IRQ_NONE; @@ -529,7 +528,7 @@ irqreturn_t ath_isr(int irq, void *dev) ath9k_debug_sync_cause(sc, sync_cause); status &= ah->imask; /* discard unasked-for bits */ - if (AR_SREV_9100(ah) && test_bit(ATH_OP_HW_RESET, &common->op_flags)) + if (test_bit(ATH_OP_HW_RESET, &common->op_flags)) return IRQ_HANDLED; /* diff --git a/drivers/net/wireless/iwlwifi/iwl-fw-file.h b/drivers/net/wireless/iwlwifi/iwl-fw-file.h index 1bbe4fc47b97..660ddb1b7d8a 100644 --- a/drivers/net/wireless/iwlwifi/iwl-fw-file.h +++ b/drivers/net/wireless/iwlwifi/iwl-fw-file.h @@ -246,6 +246,7 @@ enum iwl_ucode_tlv_flag { * @IWL_UCODE_TLV_API_BASIC_DWELL: use only basic dwell time in scan command, * regardless of the band or the number of the probes. FW will calculate * the actual dwell time. + * @IWL_UCODE_TLV_API_SINGLE_SCAN_EBS: EBS is supported for single scans too. */ enum iwl_ucode_tlv_api { IWL_UCODE_TLV_API_WOWLAN_CONFIG_TID = BIT(0), @@ -257,6 +258,7 @@ enum iwl_ucode_tlv_api { IWL_UCODE_TLV_API_SF_NO_DUMMY_NOTIF = BIT(7), IWL_UCODE_TLV_API_FRAGMENTED_SCAN = BIT(8), IWL_UCODE_TLV_API_BASIC_DWELL = BIT(13), + IWL_UCODE_TLV_API_SINGLE_SCAN_EBS = BIT(16), }; /** diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h index 201846de94e7..cfc0e65b34a5 100644 --- a/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h +++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h @@ -653,8 +653,11 @@ enum iwl_scan_channel_flags { }; /* iwl_scan_channel_opt - CHANNEL_OPTIMIZATION_API_S - * @flags: enum iwl_scan_channel_flgs - * @non_ebs_ratio: how many regular scan iteration before EBS + * @flags: enum iwl_scan_channel_flags + * @non_ebs_ratio: defines the ratio of number of scan iterations where EBS is + * involved. + * 1 - EBS is disabled. + * 2 - every second scan will be full scan(and so on). */ struct iwl_scan_channel_opt { __le16 flags; diff --git a/drivers/net/wireless/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/iwlwifi/mvm/mac80211.c index e880f9d4717b..20915587c820 100644 --- a/drivers/net/wireless/iwlwifi/mvm/mac80211.c +++ b/drivers/net/wireless/iwlwifi/mvm/mac80211.c @@ -3343,18 +3343,16 @@ static void iwl_mvm_mac_flush(struct ieee80211_hw *hw, msk |= mvmsta->tfd_queue_msk; } - if (drop) { - if (iwl_mvm_flush_tx_path(mvm, msk, true)) - IWL_ERR(mvm, "flush request fail\n"); - mutex_unlock(&mvm->mutex); - } else { - mutex_unlock(&mvm->mutex); + msk &= ~BIT(vif->hw_queue[IEEE80211_AC_VO]); - /* this can take a while, and we may need/want other operations - * to succeed while doing this, so do it without the mutex held - */ - iwl_trans_wait_tx_queue_empty(mvm->trans, msk); - } + if (iwl_mvm_flush_tx_path(mvm, msk, true)) + IWL_ERR(mvm, "flush request fail\n"); + mutex_unlock(&mvm->mutex); + + /* this can take a while, and we may need/want other operations + * to succeed while doing this, so do it without the mutex held + */ + iwl_trans_wait_tx_queue_empty(mvm->trans, msk); } const struct ieee80211_ops iwl_mvm_hw_ops = { diff --git a/drivers/net/wireless/iwlwifi/mvm/scan.c b/drivers/net/wireless/iwlwifi/mvm/scan.c index ec9a8e7bae1d..844bf7c4c8de 100644 --- a/drivers/net/wireless/iwlwifi/mvm/scan.c +++ b/drivers/net/wireless/iwlwifi/mvm/scan.c @@ -72,6 +72,8 @@ #define IWL_PLCP_QUIET_THRESH 1 #define IWL_ACTIVE_QUIET_TIME 10 +#define IWL_DENSE_EBS_SCAN_RATIO 5 +#define IWL_SPARSE_EBS_SCAN_RATIO 1 struct iwl_mvm_scan_params { u32 max_out_time; @@ -1105,6 +1107,12 @@ int iwl_mvm_scan_offload_stop(struct iwl_mvm *mvm, bool notify) return iwl_umac_scan_stop(mvm, IWL_UMAC_SCAN_UID_SCHED_SCAN, notify); + if (mvm->scan_status == IWL_MVM_SCAN_NONE) + return 0; + + if (iwl_mvm_is_radio_killed(mvm)) + goto out; + if (mvm->scan_status != IWL_MVM_SCAN_SCHED && (!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_LMAC_SCAN) || mvm->scan_status != IWL_MVM_SCAN_OS)) { @@ -1141,6 +1149,7 @@ int iwl_mvm_scan_offload_stop(struct iwl_mvm *mvm, bool notify) if (mvm->scan_status == IWL_MVM_SCAN_OS) iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN); +out: mvm->scan_status = IWL_MVM_SCAN_NONE; if (notify) { @@ -1297,18 +1306,6 @@ iwl_mvm_build_generic_unified_scan_cmd(struct iwl_mvm *mvm, cmd->scan_prio = cpu_to_le32(IWL_SCAN_PRIORITY_HIGH); cmd->iter_num = cpu_to_le32(1); - if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_EBS_SUPPORT && - mvm->last_ebs_successful) { - cmd->channel_opt[0].flags = - cpu_to_le16(IWL_SCAN_CHANNEL_FLAG_EBS | - IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE | - IWL_SCAN_CHANNEL_FLAG_CACHE_ADD); - cmd->channel_opt[1].flags = - cpu_to_le16(IWL_SCAN_CHANNEL_FLAG_EBS | - IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE | - IWL_SCAN_CHANNEL_FLAG_CACHE_ADD); - } - if (iwl_mvm_rrm_scan_needed(mvm)) cmd->scan_flags |= cpu_to_le32(IWL_MVM_LMAC_SCAN_FLAGS_RRM_ENABLED); @@ -1383,6 +1380,22 @@ int iwl_mvm_unified_scan_lmac(struct iwl_mvm *mvm, cmd->schedule[1].iterations = 0; cmd->schedule[1].full_scan_mul = 0; + if (mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_SINGLE_SCAN_EBS && + mvm->last_ebs_successful) { + cmd->channel_opt[0].flags = + cpu_to_le16(IWL_SCAN_CHANNEL_FLAG_EBS | + IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE | + IWL_SCAN_CHANNEL_FLAG_CACHE_ADD); + cmd->channel_opt[0].non_ebs_ratio = + cpu_to_le16(IWL_DENSE_EBS_SCAN_RATIO); + cmd->channel_opt[1].flags = + cpu_to_le16(IWL_SCAN_CHANNEL_FLAG_EBS | + IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE | + IWL_SCAN_CHANNEL_FLAG_CACHE_ADD); + cmd->channel_opt[1].non_ebs_ratio = + cpu_to_le16(IWL_SPARSE_EBS_SCAN_RATIO); + } + for (i = 1; i <= req->req.n_ssids; i++) ssid_bitmap |= BIT(i); @@ -1483,6 +1496,22 @@ int iwl_mvm_unified_sched_scan_lmac(struct iwl_mvm *mvm, cmd->schedule[1].iterations = 0xff; cmd->schedule[1].full_scan_mul = IWL_FULL_SCAN_MULTIPLIER; + if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_EBS_SUPPORT && + mvm->last_ebs_successful) { + cmd->channel_opt[0].flags = + cpu_to_le16(IWL_SCAN_CHANNEL_FLAG_EBS | + IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE | + IWL_SCAN_CHANNEL_FLAG_CACHE_ADD); + cmd->channel_opt[0].non_ebs_ratio = + cpu_to_le16(IWL_DENSE_EBS_SCAN_RATIO); + cmd->channel_opt[1].flags = + cpu_to_le16(IWL_SCAN_CHANNEL_FLAG_EBS | + IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE | + IWL_SCAN_CHANNEL_FLAG_CACHE_ADD); + cmd->channel_opt[1].non_ebs_ratio = + cpu_to_le16(IWL_SPARSE_EBS_SCAN_RATIO); + } + iwl_mvm_lmac_scan_cfg_channels(mvm, req->channels, req->n_channels, ssid_bitmap, cmd); diff --git a/drivers/net/wireless/iwlwifi/mvm/tx.c b/drivers/net/wireless/iwlwifi/mvm/tx.c index 4333306ccdee..c59d07567d90 100644 --- a/drivers/net/wireless/iwlwifi/mvm/tx.c +++ b/drivers/net/wireless/iwlwifi/mvm/tx.c @@ -90,8 +90,6 @@ void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb, if (ieee80211_is_probe_resp(fc)) tx_flags |= TX_CMD_FLG_TSF; - else if (ieee80211_is_back_req(fc)) - tx_flags |= TX_CMD_FLG_ACK | TX_CMD_FLG_BAR; if (ieee80211_has_morefrags(fc)) tx_flags |= TX_CMD_FLG_MORE_FRAG; @@ -100,6 +98,15 @@ void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb, u8 *qc = ieee80211_get_qos_ctl(hdr); tx_cmd->tid_tspec = qc[0] & 0xf; tx_flags &= ~TX_CMD_FLG_SEQ_CTL; + } else if (ieee80211_is_back_req(fc)) { + struct ieee80211_bar *bar = (void *)skb->data; + u16 control = le16_to_cpu(bar->control); + + tx_flags |= TX_CMD_FLG_ACK | TX_CMD_FLG_BAR; + tx_cmd->tid_tspec = (control & + IEEE80211_BAR_CTRL_TID_INFO_MASK) >> + IEEE80211_BAR_CTRL_TID_INFO_SHIFT; + WARN_ON_ONCE(tx_cmd->tid_tspec >= IWL_MAX_TID_COUNT); } else { tx_cmd->tid_tspec = IWL_TID_NON_QOS; if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c index 9259a732e8a4..037f74f0fcf6 100644 --- a/drivers/net/xen-netback/interface.c +++ b/drivers/net/xen-netback/interface.c @@ -578,6 +578,7 @@ int xenvif_connect(struct xenvif_queue *queue, unsigned long tx_ring_ref, goto err_rx_unbind; } queue->task = task; + get_task_struct(task); task = kthread_create(xenvif_dealloc_kthread, (void *)queue, "%s-dealloc", queue->name); @@ -634,6 +635,7 @@ void xenvif_disconnect(struct xenvif *vif) if (queue->task) { kthread_stop(queue->task); + put_task_struct(queue->task); queue->task = NULL; } diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c index 908e65e9b821..c8ce701a7efb 100644 --- a/drivers/net/xen-netback/netback.c +++ b/drivers/net/xen-netback/netback.c @@ -2109,8 +2109,7 @@ int xenvif_kthread_guest_rx(void *data) */ if (unlikely(vif->disabled && queue->id == 0)) { xenvif_carrier_off(vif); - xenvif_rx_queue_purge(queue); - continue; + break; } if (!skb_queue_empty(&queue->rx_queue)) diff --git a/drivers/pci/host/pcie-designware.c b/drivers/pci/host/pcie-designware.c index df781cdf13c1..17ca98657a28 100644 --- a/drivers/pci/host/pcie-designware.c +++ b/drivers/pci/host/pcie-designware.c @@ -283,6 +283,9 @@ static int dw_msi_setup_irq(struct msi_controller *chip, struct pci_dev *pdev, struct msi_msg msg; struct pcie_port *pp = sys_to_pcie(pdev->bus->sysdata); + if (desc->msi_attrib.is_msix) + return -EINVAL; + irq = assign_irq(1, desc, &pos); if (irq < 0) return irq; diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c index e52356aa09b8..903d5078b5ed 100644 --- a/drivers/pci/quirks.c +++ b/drivers/pci/quirks.c @@ -324,18 +324,52 @@ static void quirk_s3_64M(struct pci_dev *dev) DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_S3, PCI_DEVICE_ID_S3_868, quirk_s3_64M); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_S3, PCI_DEVICE_ID_S3_968, quirk_s3_64M); +static void quirk_io(struct pci_dev *dev, int pos, unsigned size, + const char *name) +{ + u32 region; + struct pci_bus_region bus_region; + struct resource *res = dev->resource + pos; + + pci_read_config_dword(dev, PCI_BASE_ADDRESS_0 + (pos << 2), ®ion); + + if (!region) + return; + + res->name = pci_name(dev); + res->flags = region & ~PCI_BASE_ADDRESS_IO_MASK; + res->flags |= + (IORESOURCE_IO | IORESOURCE_PCI_FIXED | IORESOURCE_SIZEALIGN); + region &= ~(size - 1); + + /* Convert from PCI bus to resource space */ + bus_region.start = region; + bus_region.end = region + size - 1; + pcibios_bus_to_resource(dev->bus, res, &bus_region); + + dev_info(&dev->dev, FW_BUG "%s quirk: reg 0x%x: %pR\n", + name, PCI_BASE_ADDRESS_0 + (pos << 2), res); +} + /* * Some CS5536 BIOSes (for example, the Soekris NET5501 board w/ comBIOS * ver. 1.33 20070103) don't set the correct ISA PCI region header info. * BAR0 should be 8 bytes; instead, it may be set to something like 8k * (which conflicts w/ BAR1's memory range). + * + * CS553x's ISA PCI BARs may also be read-only (ref: + * https://bugzilla.kernel.org/show_bug.cgi?id=85991 - Comment #4 forward). */ static void quirk_cs5536_vsa(struct pci_dev *dev) { + static char *name = "CS5536 ISA bridge"; + if (pci_resource_len(dev, 0) != 8) { - struct resource *res = &dev->resource[0]; - res->end = res->start + 8 - 1; - dev_info(&dev->dev, "CS5536 ISA bridge bug detected (incorrect header); workaround applied\n"); + quirk_io(dev, 0, 8, name); /* SMB */ + quirk_io(dev, 1, 256, name); /* GPIO */ + quirk_io(dev, 2, 64, name); /* MFGPT */ + dev_info(&dev->dev, "%s bug detected (incorrect header); workaround applied\n", + name); } } DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CS5536_ISA, quirk_cs5536_vsa); diff --git a/drivers/pinctrl/pinctrl-at91.c b/drivers/pinctrl/pinctrl-at91.c index dfd021e8268f..f4cd0b9b2438 100644 --- a/drivers/pinctrl/pinctrl-at91.c +++ b/drivers/pinctrl/pinctrl-at91.c @@ -177,7 +177,7 @@ struct at91_pinctrl { struct device *dev; struct pinctrl_dev *pctl; - int nbanks; + int nactive_banks; uint32_t *mux_mask; int nmux; @@ -653,12 +653,18 @@ static int pin_check_config(struct at91_pinctrl *info, const char *name, int mux; /* check if it's a valid config */ - if (pin->bank >= info->nbanks) { + if (pin->bank >= gpio_banks) { dev_err(info->dev, "%s: pin conf %d bank_id %d >= nbanks %d\n", - name, index, pin->bank, info->nbanks); + name, index, pin->bank, gpio_banks); return -EINVAL; } + if (!gpio_chips[pin->bank]) { + dev_err(info->dev, "%s: pin conf %d bank_id %d not enabled\n", + name, index, pin->bank); + return -ENXIO; + } + if (pin->pin >= MAX_NB_GPIO_PER_BANK) { dev_err(info->dev, "%s: pin conf %d pin_bank_id %d >= %d\n", name, index, pin->pin, MAX_NB_GPIO_PER_BANK); @@ -981,7 +987,8 @@ static void at91_pinctrl_child_count(struct at91_pinctrl *info, for_each_child_of_node(np, child) { if (of_device_is_compatible(child, gpio_compat)) { - info->nbanks++; + if (of_device_is_available(child)) + info->nactive_banks++; } else { info->nfunctions++; info->ngroups += of_get_child_count(child); @@ -1003,11 +1010,11 @@ static int at91_pinctrl_mux_mask(struct at91_pinctrl *info, } size /= sizeof(*list); - if (!size || size % info->nbanks) { - dev_err(info->dev, "wrong mux mask array should be by %d\n", info->nbanks); + if (!size || size % gpio_banks) { + dev_err(info->dev, "wrong mux mask array should be by %d\n", gpio_banks); return -EINVAL; } - info->nmux = size / info->nbanks; + info->nmux = size / gpio_banks; info->mux_mask = devm_kzalloc(info->dev, sizeof(u32) * size, GFP_KERNEL); if (!info->mux_mask) { @@ -1131,7 +1138,7 @@ static int at91_pinctrl_probe_dt(struct platform_device *pdev, of_match_device(at91_pinctrl_of_match, &pdev->dev)->data; at91_pinctrl_child_count(info, np); - if (info->nbanks < 1) { + if (gpio_banks < 1) { dev_err(&pdev->dev, "you need to specify at least one gpio-controller\n"); return -EINVAL; } @@ -1144,7 +1151,7 @@ static int at91_pinctrl_probe_dt(struct platform_device *pdev, dev_dbg(&pdev->dev, "mux-mask\n"); tmp = info->mux_mask; - for (i = 0; i < info->nbanks; i++) { + for (i = 0; i < gpio_banks; i++) { for (j = 0; j < info->nmux; j++, tmp++) { dev_dbg(&pdev->dev, "%d:%d\t0x%x\n", i, j, tmp[0]); } @@ -1162,7 +1169,7 @@ static int at91_pinctrl_probe_dt(struct platform_device *pdev, if (!info->groups) return -ENOMEM; - dev_dbg(&pdev->dev, "nbanks = %d\n", info->nbanks); + dev_dbg(&pdev->dev, "nbanks = %d\n", gpio_banks); dev_dbg(&pdev->dev, "nfunctions = %d\n", info->nfunctions); dev_dbg(&pdev->dev, "ngroups = %d\n", info->ngroups); @@ -1185,7 +1192,7 @@ static int at91_pinctrl_probe(struct platform_device *pdev) { struct at91_pinctrl *info; struct pinctrl_pin_desc *pdesc; - int ret, i, j, k; + int ret, i, j, k, ngpio_chips_enabled = 0; info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL); if (!info) @@ -1200,23 +1207,27 @@ static int at91_pinctrl_probe(struct platform_device *pdev) * to obtain references to the struct gpio_chip * for them, and we * need this to proceed. */ - for (i = 0; i < info->nbanks; i++) { - if (!gpio_chips[i]) { - dev_warn(&pdev->dev, "GPIO chip %d not registered yet\n", i); - devm_kfree(&pdev->dev, info); - return -EPROBE_DEFER; - } + for (i = 0; i < gpio_banks; i++) + if (gpio_chips[i]) + ngpio_chips_enabled++; + + if (ngpio_chips_enabled < info->nactive_banks) { + dev_warn(&pdev->dev, + "All GPIO chips are not registered yet (%d/%d)\n", + ngpio_chips_enabled, info->nactive_banks); + devm_kfree(&pdev->dev, info); + return -EPROBE_DEFER; } at91_pinctrl_desc.name = dev_name(&pdev->dev); - at91_pinctrl_desc.npins = info->nbanks * MAX_NB_GPIO_PER_BANK; + at91_pinctrl_desc.npins = gpio_banks * MAX_NB_GPIO_PER_BANK; at91_pinctrl_desc.pins = pdesc = devm_kzalloc(&pdev->dev, sizeof(*pdesc) * at91_pinctrl_desc.npins, GFP_KERNEL); if (!at91_pinctrl_desc.pins) return -ENOMEM; - for (i = 0 , k = 0; i < info->nbanks; i++) { + for (i = 0, k = 0; i < gpio_banks; i++) { for (j = 0; j < MAX_NB_GPIO_PER_BANK; j++, k++) { pdesc->number = k; pdesc->name = kasprintf(GFP_KERNEL, "pio%c%d", i + 'A', j); @@ -1234,8 +1245,9 @@ static int at91_pinctrl_probe(struct platform_device *pdev) } /* We will handle a range of GPIO pins */ - for (i = 0; i < info->nbanks; i++) - pinctrl_add_gpio_range(info->pctl, &gpio_chips[i]->range); + for (i = 0; i < gpio_banks; i++) + if (gpio_chips[i]) + pinctrl_add_gpio_range(info->pctl, &gpio_chips[i]->range); dev_info(&pdev->dev, "initialized AT91 pinctrl driver\n"); @@ -1613,9 +1625,10 @@ static void gpio_irq_handler(unsigned irq, struct irq_desc *desc) static int at91_gpio_of_irq_setup(struct platform_device *pdev, struct at91_gpio_chip *at91_gpio) { + struct gpio_chip *gpiochip_prev = NULL; struct at91_gpio_chip *prev = NULL; struct irq_data *d = irq_get_irq_data(at91_gpio->pioc_virq); - int ret; + int ret, i; at91_gpio->pioc_hwirq = irqd_to_hwirq(d); @@ -1641,24 +1654,33 @@ static int at91_gpio_of_irq_setup(struct platform_device *pdev, return ret; } - /* Setup chained handler */ - if (at91_gpio->pioc_idx) - prev = gpio_chips[at91_gpio->pioc_idx - 1]; - /* The top level handler handles one bank of GPIOs, except * on some SoC it can handle up to three... * We only set up the handler for the first of the list. */ - if (prev && prev->next == at91_gpio) + gpiochip_prev = irq_get_handler_data(at91_gpio->pioc_virq); + if (!gpiochip_prev) { + /* Then register the chain on the parent IRQ */ + gpiochip_set_chained_irqchip(&at91_gpio->chip, + &gpio_irqchip, + at91_gpio->pioc_virq, + gpio_irq_handler); return 0; + } - /* Then register the chain on the parent IRQ */ - gpiochip_set_chained_irqchip(&at91_gpio->chip, - &gpio_irqchip, - at91_gpio->pioc_virq, - gpio_irq_handler); + prev = container_of(gpiochip_prev, struct at91_gpio_chip, chip); - return 0; + /* we can only have 2 banks before */ + for (i = 0; i < 2; i++) { + if (prev->next) { + prev = prev->next; + } else { + prev->next = at91_gpio; + return 0; + } + } + + return -EINVAL; } /* This structure is replicated for each GPIO block allocated at probe time */ @@ -1675,24 +1697,6 @@ static struct gpio_chip at91_gpio_template = { .ngpio = MAX_NB_GPIO_PER_BANK, }; -static void at91_gpio_probe_fixup(void) -{ - unsigned i; - struct at91_gpio_chip *at91_gpio, *last = NULL; - - for (i = 0; i < gpio_banks; i++) { - at91_gpio = gpio_chips[i]; - - /* - * GPIO controller are grouped on some SoC: - * PIOC, PIOD and PIOE can share the same IRQ line - */ - if (last && last->pioc_virq == at91_gpio->pioc_virq) - last->next = at91_gpio; - last = at91_gpio; - } -} - static struct of_device_id at91_gpio_of_match[] = { { .compatible = "atmel,at91sam9x5-gpio", .data = &at91sam9x5_ops, }, { .compatible = "atmel,at91rm9200-gpio", .data = &at91rm9200_ops }, @@ -1805,8 +1809,6 @@ static int at91_gpio_probe(struct platform_device *pdev) gpio_chips[alias_idx] = at91_chip; gpio_banks = max(gpio_banks, alias_idx + 1); - at91_gpio_probe_fixup(); - ret = at91_gpio_of_irq_setup(pdev, at91_chip); if (ret) goto irq_setup_err; diff --git a/drivers/regulator/Kconfig b/drivers/regulator/Kconfig index c3a60b57a865..a6f116aa5235 100644 --- a/drivers/regulator/Kconfig +++ b/drivers/regulator/Kconfig @@ -414,6 +414,14 @@ config REGULATOR_MAX77802 Exynos5420/Exynos5800 SoCs to control various voltages. It includes support for control of voltage and ramp speed. +config REGULATOR_MAX77843 + tristate "Maxim 77843 regulator" + depends on MFD_MAX77843 + help + This driver controls a Maxim 77843 regulator. + The regulator include two 'SAFEOUT' for USB(Universal Serial Bus) + This is suitable for Exynos5433 SoC chips. + config REGULATOR_MC13XXX_CORE tristate @@ -433,6 +441,15 @@ config REGULATOR_MC13892 Say y here to support the regulators found on the Freescale MC13892 PMIC. +config REGULATOR_MT6397 + tristate "MediaTek MT6397 PMIC" + depends on MFD_MT6397 + help + Say y here to select this option to enable the power regulator of + MediaTek MT6397 PMIC. + This driver supports the control of different power rails of device + through regulator interface. + config REGULATOR_PALMAS tristate "TI Palmas PMIC Regulators" depends on MFD_PALMAS diff --git a/drivers/regulator/Makefile b/drivers/regulator/Makefile index 1f28ebfc6f3a..2c4da15e1545 100644 --- a/drivers/regulator/Makefile +++ b/drivers/regulator/Makefile @@ -55,9 +55,11 @@ obj-$(CONFIG_REGULATOR_MAX8998) += max8998.o obj-$(CONFIG_REGULATOR_MAX77686) += max77686.o obj-$(CONFIG_REGULATOR_MAX77693) += max77693.o obj-$(CONFIG_REGULATOR_MAX77802) += max77802.o +obj-$(CONFIG_REGULATOR_MAX77843) += max77843.o obj-$(CONFIG_REGULATOR_MC13783) += mc13783-regulator.o obj-$(CONFIG_REGULATOR_MC13892) += mc13892-regulator.o obj-$(CONFIG_REGULATOR_MC13XXX_CORE) += mc13xxx-regulator-core.o +obj-$(CONFIG_REGULATOR_MT6397) += mt6397-regulator.o obj-$(CONFIG_REGULATOR_QCOM_RPM) += qcom_rpm-regulator.o obj-$(CONFIG_REGULATOR_PALMAS) += palmas-regulator.o obj-$(CONFIG_REGULATOR_PFUZE100) += pfuze100-regulator.o diff --git a/drivers/regulator/axp20x-regulator.c b/drivers/regulator/axp20x-regulator.c index f23d7e1f2ee7..e4331f5e5d7d 100644 --- a/drivers/regulator/axp20x-regulator.c +++ b/drivers/regulator/axp20x-regulator.c @@ -32,11 +32,13 @@ #define AXP20X_FREQ_DCDC_MASK 0x0f -#define AXP20X_DESC_IO(_id, _supply, _min, _max, _step, _vreg, _vmask, _ereg, \ - _emask, _enable_val, _disable_val) \ +#define AXP20X_DESC_IO(_id, _match, _supply, _min, _max, _step, _vreg, _vmask, \ + _ereg, _emask, _enable_val, _disable_val) \ [AXP20X_##_id] = { \ .name = #_id, \ .supply_name = (_supply), \ + .of_match = of_match_ptr(_match), \ + .regulators_node = of_match_ptr("regulators"), \ .type = REGULATOR_VOLTAGE, \ .id = AXP20X_##_id, \ .n_voltages = (((_max) - (_min)) / (_step) + 1), \ @@ -52,11 +54,13 @@ .ops = &axp20x_ops, \ } -#define AXP20X_DESC(_id, _supply, _min, _max, _step, _vreg, _vmask, _ereg, \ - _emask) \ +#define AXP20X_DESC(_id, _match, _supply, _min, _max, _step, _vreg, _vmask, \ + _ereg, _emask) \ [AXP20X_##_id] = { \ .name = #_id, \ .supply_name = (_supply), \ + .of_match = of_match_ptr(_match), \ + .regulators_node = of_match_ptr("regulators"), \ .type = REGULATOR_VOLTAGE, \ .id = AXP20X_##_id, \ .n_voltages = (((_max) - (_min)) / (_step) + 1), \ @@ -70,10 +74,12 @@ .ops = &axp20x_ops, \ } -#define AXP20X_DESC_FIXED(_id, _supply, _volt) \ +#define AXP20X_DESC_FIXED(_id, _match, _supply, _volt) \ [AXP20X_##_id] = { \ .name = #_id, \ .supply_name = (_supply), \ + .of_match = of_match_ptr(_match), \ + .regulators_node = of_match_ptr("regulators"), \ .type = REGULATOR_VOLTAGE, \ .id = AXP20X_##_id, \ .n_voltages = 1, \ @@ -82,10 +88,13 @@ .ops = &axp20x_ops_fixed \ } -#define AXP20X_DESC_TABLE(_id, _supply, _table, _vreg, _vmask, _ereg, _emask) \ +#define AXP20X_DESC_TABLE(_id, _match, _supply, _table, _vreg, _vmask, _ereg, \ + _emask) \ [AXP20X_##_id] = { \ .name = #_id, \ .supply_name = (_supply), \ + .of_match = of_match_ptr(_match), \ + .regulators_node = of_match_ptr("regulators"), \ .type = REGULATOR_VOLTAGE, \ .id = AXP20X_##_id, \ .n_voltages = ARRAY_SIZE(_table), \ @@ -127,36 +136,20 @@ static struct regulator_ops axp20x_ops = { }; static const struct regulator_desc axp20x_regulators[] = { - AXP20X_DESC(DCDC2, "vin2", 700, 2275, 25, AXP20X_DCDC2_V_OUT, 0x3f, - AXP20X_PWR_OUT_CTRL, 0x10), - AXP20X_DESC(DCDC3, "vin3", 700, 3500, 25, AXP20X_DCDC3_V_OUT, 0x7f, - AXP20X_PWR_OUT_CTRL, 0x02), - AXP20X_DESC_FIXED(LDO1, "acin", 1300), - AXP20X_DESC(LDO2, "ldo24in", 1800, 3300, 100, AXP20X_LDO24_V_OUT, 0xf0, - AXP20X_PWR_OUT_CTRL, 0x04), - AXP20X_DESC(LDO3, "ldo3in", 700, 3500, 25, AXP20X_LDO3_V_OUT, 0x7f, - AXP20X_PWR_OUT_CTRL, 0x40), - AXP20X_DESC_TABLE(LDO4, "ldo24in", axp20x_ldo4_data, AXP20X_LDO24_V_OUT, 0x0f, - AXP20X_PWR_OUT_CTRL, 0x08), - AXP20X_DESC_IO(LDO5, "ldo5in", 1800, 3300, 100, AXP20X_LDO5_V_OUT, 0xf0, - AXP20X_GPIO0_CTRL, 0x07, AXP20X_IO_ENABLED, - AXP20X_IO_DISABLED), -}; - -#define AXP_MATCH(_name, _id) \ - [AXP20X_##_id] = { \ - .name = #_name, \ - .driver_data = (void *) &axp20x_regulators[AXP20X_##_id], \ - } - -static struct of_regulator_match axp20x_matches[] = { - AXP_MATCH(dcdc2, DCDC2), - AXP_MATCH(dcdc3, DCDC3), - AXP_MATCH(ldo1, LDO1), - AXP_MATCH(ldo2, LDO2), - AXP_MATCH(ldo3, LDO3), - AXP_MATCH(ldo4, LDO4), - AXP_MATCH(ldo5, LDO5), + AXP20X_DESC(DCDC2, "dcdc2", "vin2", 700, 2275, 25, AXP20X_DCDC2_V_OUT, + 0x3f, AXP20X_PWR_OUT_CTRL, 0x10), + AXP20X_DESC(DCDC3, "dcdc3", "vin3", 700, 3500, 25, AXP20X_DCDC3_V_OUT, + 0x7f, AXP20X_PWR_OUT_CTRL, 0x02), + AXP20X_DESC_FIXED(LDO1, "ldo1", "acin", 1300), + AXP20X_DESC(LDO2, "ldo2", "ldo24in", 1800, 3300, 100, + AXP20X_LDO24_V_OUT, 0xf0, AXP20X_PWR_OUT_CTRL, 0x04), + AXP20X_DESC(LDO3, "ldo3", "ldo3in", 700, 3500, 25, AXP20X_LDO3_V_OUT, + 0x7f, AXP20X_PWR_OUT_CTRL, 0x40), + AXP20X_DESC_TABLE(LDO4, "ldo4", "ldo24in", axp20x_ldo4_data, + AXP20X_LDO24_V_OUT, 0x0f, AXP20X_PWR_OUT_CTRL, 0x08), + AXP20X_DESC_IO(LDO5, "ldo5", "ldo5in", 1800, 3300, 100, + AXP20X_LDO5_V_OUT, 0xf0, AXP20X_GPIO0_CTRL, 0x07, + AXP20X_IO_ENABLED, AXP20X_IO_DISABLED), }; static int axp20x_set_dcdc_freq(struct platform_device *pdev, u32 dcdcfreq) @@ -193,13 +186,6 @@ static int axp20x_regulator_parse_dt(struct platform_device *pdev) if (!regulators) { dev_warn(&pdev->dev, "regulators node not found\n"); } else { - ret = of_regulator_match(&pdev->dev, regulators, axp20x_matches, - ARRAY_SIZE(axp20x_matches)); - if (ret < 0) { - dev_err(&pdev->dev, "Error parsing regulator init data: %d\n", ret); - return ret; - } - dcdcfreq = 1500; of_property_read_u32(regulators, "x-powers,dcdc-freq", &dcdcfreq); ret = axp20x_set_dcdc_freq(pdev, dcdcfreq); @@ -233,23 +219,17 @@ static int axp20x_regulator_probe(struct platform_device *pdev) { struct regulator_dev *rdev; struct axp20x_dev *axp20x = dev_get_drvdata(pdev->dev.parent); - struct regulator_config config = { }; - struct regulator_init_data *init_data; + struct regulator_config config = { + .dev = pdev->dev.parent, + .regmap = axp20x->regmap, + }; int ret, i; u32 workmode; - ret = axp20x_regulator_parse_dt(pdev); - if (ret) - return ret; + /* This only sets the dcdc freq. Ignore any errors */ + axp20x_regulator_parse_dt(pdev); for (i = 0; i < AXP20X_REG_ID_MAX; i++) { - init_data = axp20x_matches[i].init_data; - - config.dev = pdev->dev.parent; - config.init_data = init_data; - config.regmap = axp20x->regmap; - config.of_node = axp20x_matches[i].of_node; - rdev = devm_regulator_register(&pdev->dev, &axp20x_regulators[i], &config); if (IS_ERR(rdev)) { @@ -259,7 +239,8 @@ static int axp20x_regulator_probe(struct platform_device *pdev) return PTR_ERR(rdev); } - ret = of_property_read_u32(axp20x_matches[i].of_node, "x-powers,dcdc-workmode", + ret = of_property_read_u32(rdev->dev.of_node, + "x-powers,dcdc-workmode", &workmode); if (!ret) { if (axp20x_set_dcdc_workmode(rdev, i, workmode)) diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c index 9c48fb32f660..b899947d839d 100644 --- a/drivers/regulator/core.c +++ b/drivers/regulator/core.c @@ -632,49 +632,34 @@ static ssize_t regulator_bypass_show(struct device *dev, static DEVICE_ATTR(bypass, 0444, regulator_bypass_show, NULL); -/* - * These are the only attributes are present for all regulators. - * Other attributes are a function of regulator functionality. - */ -static struct attribute *regulator_dev_attrs[] = { - &dev_attr_name.attr, - &dev_attr_num_users.attr, - &dev_attr_type.attr, - NULL, -}; -ATTRIBUTE_GROUPS(regulator_dev); - -static void regulator_dev_release(struct device *dev) -{ - struct regulator_dev *rdev = dev_get_drvdata(dev); - kfree(rdev); -} - -static struct class regulator_class = { - .name = "regulator", - .dev_release = regulator_dev_release, - .dev_groups = regulator_dev_groups, -}; - /* Calculate the new optimum regulator operating mode based on the new total * consumer load. All locks held by caller */ -static void drms_uA_update(struct regulator_dev *rdev) +static int drms_uA_update(struct regulator_dev *rdev) { struct regulator *sibling; int current_uA = 0, output_uV, input_uV, err; unsigned int mode; + /* + * first check to see if we can set modes at all, otherwise just + * tell the consumer everything is OK. + */ err = regulator_check_drms(rdev); - if (err < 0 || !rdev->desc->ops->get_optimum_mode || - (!rdev->desc->ops->get_voltage && - !rdev->desc->ops->get_voltage_sel) || - !rdev->desc->ops->set_mode) - return; + if (err < 0) + return 0; + + if (!rdev->desc->ops->get_optimum_mode) + return 0; + + if (!rdev->desc->ops->set_mode) + return -EINVAL; /* get output voltage */ output_uV = _regulator_get_voltage(rdev); - if (output_uV <= 0) - return; + if (output_uV <= 0) { + rdev_err(rdev, "invalid output voltage found\n"); + return -EINVAL; + } /* get input voltage */ input_uV = 0; @@ -682,8 +667,10 @@ static void drms_uA_update(struct regulator_dev *rdev) input_uV = regulator_get_voltage(rdev->supply); if (input_uV <= 0) input_uV = rdev->constraints->input_uV; - if (input_uV <= 0) - return; + if (input_uV <= 0) { + rdev_err(rdev, "invalid input voltage found\n"); + return -EINVAL; + } /* calc total requested load */ list_for_each_entry(sibling, &rdev->consumer_list, list) @@ -695,8 +682,17 @@ static void drms_uA_update(struct regulator_dev *rdev) /* check the new mode is allowed */ err = regulator_mode_constrain(rdev, &mode); - if (err == 0) - rdev->desc->ops->set_mode(rdev, mode); + if (err < 0) { + rdev_err(rdev, "failed to get optimum mode @ %d uA %d -> %d uV\n", + current_uA, input_uV, output_uV); + return err; + } + + err = rdev->desc->ops->set_mode(rdev, mode); + if (err < 0) + rdev_err(rdev, "failed to set optimum mode %x\n", mode); + + return err; } static int suspend_set_state(struct regulator_dev *rdev, @@ -3026,75 +3022,13 @@ EXPORT_SYMBOL_GPL(regulator_get_mode); int regulator_set_optimum_mode(struct regulator *regulator, int uA_load) { struct regulator_dev *rdev = regulator->rdev; - struct regulator *consumer; - int ret, output_uV, input_uV = 0, total_uA_load = 0; - unsigned int mode; - - if (rdev->supply) - input_uV = regulator_get_voltage(rdev->supply); + int ret; mutex_lock(&rdev->mutex); - - /* - * first check to see if we can set modes at all, otherwise just - * tell the consumer everything is OK. - */ regulator->uA_load = uA_load; - ret = regulator_check_drms(rdev); - if (ret < 0) { - ret = 0; - goto out; - } - - if (!rdev->desc->ops->get_optimum_mode) - goto out; - - /* - * we can actually do this so any errors are indicators of - * potential real failure. - */ - ret = -EINVAL; - - if (!rdev->desc->ops->set_mode) - goto out; - - /* get output voltage */ - output_uV = _regulator_get_voltage(rdev); - if (output_uV <= 0) { - rdev_err(rdev, "invalid output voltage found\n"); - goto out; - } - - /* No supply? Use constraint voltage */ - if (input_uV <= 0) - input_uV = rdev->constraints->input_uV; - if (input_uV <= 0) { - rdev_err(rdev, "invalid input voltage found\n"); - goto out; - } - - /* calc total requested load for this regulator */ - list_for_each_entry(consumer, &rdev->consumer_list, list) - total_uA_load += consumer->uA_load; - - mode = rdev->desc->ops->get_optimum_mode(rdev, - input_uV, output_uV, - total_uA_load); - ret = regulator_mode_constrain(rdev, &mode); - if (ret < 0) { - rdev_err(rdev, "failed to get optimum mode @ %d uA %d -> %d uV\n", - total_uA_load, input_uV, output_uV); - goto out; - } - - ret = rdev->desc->ops->set_mode(rdev, mode); - if (ret < 0) { - rdev_err(rdev, "failed to set optimum mode %x\n", mode); - goto out; - } - ret = mode; -out: + ret = drms_uA_update(rdev); mutex_unlock(&rdev->mutex); + return ret; } EXPORT_SYMBOL_GPL(regulator_set_optimum_mode); @@ -3436,126 +3370,136 @@ int regulator_mode_to_status(unsigned int mode) } EXPORT_SYMBOL_GPL(regulator_mode_to_status); +static struct attribute *regulator_dev_attrs[] = { + &dev_attr_name.attr, + &dev_attr_num_users.attr, + &dev_attr_type.attr, + &dev_attr_microvolts.attr, + &dev_attr_microamps.attr, + &dev_attr_opmode.attr, + &dev_attr_state.attr, + &dev_attr_status.attr, + &dev_attr_bypass.attr, + &dev_attr_requested_microamps.attr, + &dev_attr_min_microvolts.attr, + &dev_attr_max_microvolts.attr, + &dev_attr_min_microamps.attr, + &dev_attr_max_microamps.attr, + &dev_attr_suspend_standby_state.attr, + &dev_attr_suspend_mem_state.attr, + &dev_attr_suspend_disk_state.attr, + &dev_attr_suspend_standby_microvolts.attr, + &dev_attr_suspend_mem_microvolts.attr, + &dev_attr_suspend_disk_microvolts.attr, + &dev_attr_suspend_standby_mode.attr, + &dev_attr_suspend_mem_mode.attr, + &dev_attr_suspend_disk_mode.attr, + NULL +}; + /* * To avoid cluttering sysfs (and memory) with useless state, only * create attributes that can be meaningfully displayed. */ -static int add_regulator_attributes(struct regulator_dev *rdev) +static umode_t regulator_attr_is_visible(struct kobject *kobj, + struct attribute *attr, int idx) { - struct device *dev = &rdev->dev; + struct device *dev = kobj_to_dev(kobj); + struct regulator_dev *rdev = container_of(dev, struct regulator_dev, dev); const struct regulator_ops *ops = rdev->desc->ops; - int status = 0; + umode_t mode = attr->mode; + + /* these three are always present */ + if (attr == &dev_attr_name.attr || + attr == &dev_attr_num_users.attr || + attr == &dev_attr_type.attr) + return mode; /* some attributes need specific methods to be displayed */ - if ((ops->get_voltage && ops->get_voltage(rdev) >= 0) || - (ops->get_voltage_sel && ops->get_voltage_sel(rdev) >= 0) || - (ops->list_voltage && ops->list_voltage(rdev, 0) >= 0) || - (rdev->desc->fixed_uV && (rdev->desc->n_voltages == 1))) { - status = device_create_file(dev, &dev_attr_microvolts); - if (status < 0) - return status; - } - if (ops->get_current_limit) { - status = device_create_file(dev, &dev_attr_microamps); - if (status < 0) - return status; - } - if (ops->get_mode) { - status = device_create_file(dev, &dev_attr_opmode); - if (status < 0) - return status; - } - if (rdev->ena_pin || ops->is_enabled) { - status = device_create_file(dev, &dev_attr_state); - if (status < 0) - return status; - } - if (ops->get_status) { - status = device_create_file(dev, &dev_attr_status); - if (status < 0) - return status; - } - if (ops->get_bypass) { - status = device_create_file(dev, &dev_attr_bypass); - if (status < 0) - return status; + if (attr == &dev_attr_microvolts.attr) { + if ((ops->get_voltage && ops->get_voltage(rdev) >= 0) || + (ops->get_voltage_sel && ops->get_voltage_sel(rdev) >= 0) || + (ops->list_voltage && ops->list_voltage(rdev, 0) >= 0) || + (rdev->desc->fixed_uV && rdev->desc->n_voltages == 1)) + return mode; + return 0; } + if (attr == &dev_attr_microamps.attr) + return ops->get_current_limit ? mode : 0; + + if (attr == &dev_attr_opmode.attr) + return ops->get_mode ? mode : 0; + + if (attr == &dev_attr_state.attr) + return (rdev->ena_pin || ops->is_enabled) ? mode : 0; + + if (attr == &dev_attr_status.attr) + return ops->get_status ? mode : 0; + + if (attr == &dev_attr_bypass.attr) + return ops->get_bypass ? mode : 0; + /* some attributes are type-specific */ - if (rdev->desc->type == REGULATOR_CURRENT) { - status = device_create_file(dev, &dev_attr_requested_microamps); - if (status < 0) - return status; - } + if (attr == &dev_attr_requested_microamps.attr) + return rdev->desc->type == REGULATOR_CURRENT ? mode : 0; /* all the other attributes exist to support constraints; * don't show them if there are no constraints, or if the * relevant supporting methods are missing. */ if (!rdev->constraints) - return status; + return 0; /* constraints need specific supporting methods */ - if (ops->set_voltage || ops->set_voltage_sel) { - status = device_create_file(dev, &dev_attr_min_microvolts); - if (status < 0) - return status; - status = device_create_file(dev, &dev_attr_max_microvolts); - if (status < 0) - return status; - } - if (ops->set_current_limit) { - status = device_create_file(dev, &dev_attr_min_microamps); - if (status < 0) - return status; - status = device_create_file(dev, &dev_attr_max_microamps); - if (status < 0) - return status; - } - - status = device_create_file(dev, &dev_attr_suspend_standby_state); - if (status < 0) - return status; - status = device_create_file(dev, &dev_attr_suspend_mem_state); - if (status < 0) - return status; - status = device_create_file(dev, &dev_attr_suspend_disk_state); - if (status < 0) - return status; + if (attr == &dev_attr_min_microvolts.attr || + attr == &dev_attr_max_microvolts.attr) + return (ops->set_voltage || ops->set_voltage_sel) ? mode : 0; + + if (attr == &dev_attr_min_microamps.attr || + attr == &dev_attr_max_microamps.attr) + return ops->set_current_limit ? mode : 0; - if (ops->set_suspend_voltage) { - status = device_create_file(dev, - &dev_attr_suspend_standby_microvolts); - if (status < 0) - return status; - status = device_create_file(dev, - &dev_attr_suspend_mem_microvolts); - if (status < 0) - return status; - status = device_create_file(dev, - &dev_attr_suspend_disk_microvolts); - if (status < 0) - return status; - } - - if (ops->set_suspend_mode) { - status = device_create_file(dev, - &dev_attr_suspend_standby_mode); - if (status < 0) - return status; - status = device_create_file(dev, - &dev_attr_suspend_mem_mode); - if (status < 0) - return status; - status = device_create_file(dev, - &dev_attr_suspend_disk_mode); - if (status < 0) - return status; - } - - return status; + if (attr == &dev_attr_suspend_standby_state.attr || + attr == &dev_attr_suspend_mem_state.attr || + attr == &dev_attr_suspend_disk_state.attr) + return mode; + + if (attr == &dev_attr_suspend_standby_microvolts.attr || + attr == &dev_attr_suspend_mem_microvolts.attr || + attr == &dev_attr_suspend_disk_microvolts.attr) + return ops->set_suspend_voltage ? mode : 0; + + if (attr == &dev_attr_suspend_standby_mode.attr || + attr == &dev_attr_suspend_mem_mode.attr || + attr == &dev_attr_suspend_disk_mode.attr) + return ops->set_suspend_mode ? mode : 0; + + return mode; } +static const struct attribute_group regulator_dev_group = { + .attrs = regulator_dev_attrs, + .is_visible = regulator_attr_is_visible, +}; + +static const struct attribute_group *regulator_dev_groups[] = { + ®ulator_dev_group, + NULL +}; + +static void regulator_dev_release(struct device *dev) +{ + struct regulator_dev *rdev = dev_get_drvdata(dev); + kfree(rdev); +} + +static struct class regulator_class = { + .name = "regulator", + .dev_release = regulator_dev_release, + .dev_groups = regulator_dev_groups, +}; + static void rdev_init_debugfs(struct regulator_dev *rdev) { rdev->debugfs = debugfs_create_dir(rdev_get_name(rdev), debugfs_root); @@ -3575,7 +3519,7 @@ static void rdev_init_debugfs(struct regulator_dev *rdev) /** * regulator_register - register regulator * @regulator_desc: regulator to register - * @config: runtime configuration for regulator + * @cfg: runtime configuration for regulator * * Called by regulator drivers to register a regulator. * Returns a valid pointer to struct regulator_dev on success @@ -3583,20 +3527,21 @@ static void rdev_init_debugfs(struct regulator_dev *rdev) */ struct regulator_dev * regulator_register(const struct regulator_desc *regulator_desc, - const struct regulator_config *config) + const struct regulator_config *cfg) { const struct regulation_constraints *constraints = NULL; const struct regulator_init_data *init_data; - static atomic_t regulator_no = ATOMIC_INIT(0); + struct regulator_config *config = NULL; + static atomic_t regulator_no = ATOMIC_INIT(-1); struct regulator_dev *rdev; struct device *dev; int ret, i; const char *supply = NULL; - if (regulator_desc == NULL || config == NULL) + if (regulator_desc == NULL || cfg == NULL) return ERR_PTR(-EINVAL); - dev = config->dev; + dev = cfg->dev; WARN_ON(!dev); if (regulator_desc->name == NULL || regulator_desc->ops == NULL) @@ -3626,7 +3571,17 @@ regulator_register(const struct regulator_desc *regulator_desc, if (rdev == NULL) return ERR_PTR(-ENOMEM); - init_data = regulator_of_get_init_data(dev, regulator_desc, + /* + * Duplicate the config so the driver could override it after + * parsing init data. + */ + config = kmemdup(cfg, sizeof(*cfg), GFP_KERNEL); + if (config == NULL) { + kfree(rdev); + return ERR_PTR(-ENOMEM); + } + + init_data = regulator_of_get_init_data(dev, regulator_desc, config, &rdev->dev.of_node); if (!init_data) { init_data = config->init_data; @@ -3660,8 +3615,8 @@ regulator_register(const struct regulator_desc *regulator_desc, /* register with sysfs */ rdev->dev.class = ®ulator_class; rdev->dev.parent = dev; - dev_set_name(&rdev->dev, "regulator.%d", - atomic_inc_return(®ulator_no) - 1); + dev_set_name(&rdev->dev, "regulator.%lu", + (unsigned long) atomic_inc_return(®ulator_no)); ret = device_register(&rdev->dev); if (ret != 0) { put_device(&rdev->dev); @@ -3694,11 +3649,6 @@ regulator_register(const struct regulator_desc *regulator_desc, if (ret < 0) goto scrub; - /* add attributes supported by this regulator */ - ret = add_regulator_attributes(rdev); - if (ret < 0) - goto scrub; - if (init_data && init_data->supply_regulator) supply = init_data->supply_regulator; else if (regulator_desc->supply_name) @@ -3754,6 +3704,7 @@ add_dev: rdev_init_debugfs(rdev); out: mutex_unlock(®ulator_list_mutex); + kfree(config); return rdev; unset_supplies: diff --git a/drivers/regulator/da9211-regulator.c b/drivers/regulator/da9211-regulator.c index c78d2106d6cb..01343419555e 100644 --- a/drivers/regulator/da9211-regulator.c +++ b/drivers/regulator/da9211-regulator.c @@ -24,6 +24,7 @@ #include <linux/regmap.h> #include <linux/irq.h> #include <linux/interrupt.h> +#include <linux/of_gpio.h> #include <linux/regulator/of_regulator.h> #include <linux/regulator/da9211.h> #include "da9211-regulator.h" @@ -276,7 +277,10 @@ static struct da9211_pdata *da9211_parse_regulators_dt( continue; pdata->init_data[n] = da9211_matches[i].init_data; - + pdata->reg_node[n] = da9211_matches[i].of_node; + pdata->gpio_ren[n] = + of_get_named_gpio(da9211_matches[i].of_node, + "enable-gpios", 0); n++; } @@ -364,7 +368,15 @@ static int da9211_regulator_init(struct da9211 *chip) config.dev = chip->dev; config.driver_data = chip; config.regmap = chip->regmap; - config.of_node = chip->dev->of_node; + config.of_node = chip->pdata->reg_node[i]; + + if (gpio_is_valid(chip->pdata->gpio_ren[i])) { + config.ena_gpio = chip->pdata->gpio_ren[i]; + config.ena_gpio_initialized = true; + } else { + config.ena_gpio = -EINVAL; + config.ena_gpio_initialized = false; + } chip->rdev[i] = devm_regulator_register(chip->dev, &da9211_regulators[i], &config); diff --git a/drivers/regulator/fan53555.c b/drivers/regulator/fan53555.c index 6c43ab2d5121..3c25db89a021 100644 --- a/drivers/regulator/fan53555.c +++ b/drivers/regulator/fan53555.c @@ -147,7 +147,7 @@ static unsigned int fan53555_get_mode(struct regulator_dev *rdev) return REGULATOR_MODE_NORMAL; } -static int slew_rates[] = { +static const int slew_rates[] = { 64000, 32000, 16000, @@ -296,7 +296,7 @@ static int fan53555_regulator_register(struct fan53555_device_info *di, return PTR_ERR_OR_ZERO(di->rdev); } -static struct regmap_config fan53555_regmap_config = { +static const struct regmap_config fan53555_regmap_config = { .reg_bits = 8, .val_bits = 8, }; diff --git a/drivers/regulator/internal.h b/drivers/regulator/internal.h index 80ba2a35a04b..c74ac8734023 100644 --- a/drivers/regulator/internal.h +++ b/drivers/regulator/internal.h @@ -38,11 +38,13 @@ struct regulator { #ifdef CONFIG_OF struct regulator_init_data *regulator_of_get_init_data(struct device *dev, const struct regulator_desc *desc, + struct regulator_config *config, struct device_node **node); #else static inline struct regulator_init_data * regulator_of_get_init_data(struct device *dev, const struct regulator_desc *desc, + struct regulator_config *config, struct device_node **node) { return NULL; diff --git a/drivers/regulator/isl9305.c b/drivers/regulator/isl9305.c index 92fefd98da58..6e3a15fe00f1 100644 --- a/drivers/regulator/isl9305.c +++ b/drivers/regulator/isl9305.c @@ -177,8 +177,10 @@ static int isl9305_i2c_probe(struct i2c_client *i2c, #ifdef CONFIG_OF static const struct of_device_id isl9305_dt_ids[] = { - { .compatible = "isl,isl9305" }, - { .compatible = "isl,isl9305h" }, + { .compatible = "isl,isl9305" }, /* for backward compat., don't use */ + { .compatible = "isil,isl9305" }, + { .compatible = "isl,isl9305h" }, /* for backward compat., don't use */ + { .compatible = "isil,isl9305h" }, {}, }; #endif diff --git a/drivers/regulator/lp872x.c b/drivers/regulator/lp872x.c index 021d64d856bb..3de328ab41f3 100644 --- a/drivers/regulator/lp872x.c +++ b/drivers/regulator/lp872x.c @@ -106,7 +106,6 @@ struct lp872x { struct device *dev; enum lp872x_id chipid; struct lp872x_platform_data *pdata; - struct regulator_dev **regulators; int num_regulators; enum lp872x_dvs_state dvs_pin; int dvs_gpio; @@ -801,8 +800,6 @@ static int lp872x_regulator_register(struct lp872x *lp) dev_err(lp->dev, "regulator register err"); return PTR_ERR(rdev); } - - *(lp->regulators + i) = rdev; } return 0; @@ -906,7 +903,7 @@ static struct lp872x_platform_data static int lp872x_probe(struct i2c_client *cl, const struct i2c_device_id *id) { struct lp872x *lp; - int ret, size, num_regulators; + int ret; const int lp872x_num_regulators[] = { [LP8720] = LP8720_NUM_REGULATORS, [LP8725] = LP8725_NUM_REGULATORS, @@ -918,38 +915,27 @@ static int lp872x_probe(struct i2c_client *cl, const struct i2c_device_id *id) lp = devm_kzalloc(&cl->dev, sizeof(struct lp872x), GFP_KERNEL); if (!lp) - goto err_mem; - - num_regulators = lp872x_num_regulators[id->driver_data]; - size = sizeof(struct regulator_dev *) * num_regulators; + return -ENOMEM; - lp->regulators = devm_kzalloc(&cl->dev, size, GFP_KERNEL); - if (!lp->regulators) - goto err_mem; + lp->num_regulators = lp872x_num_regulators[id->driver_data]; lp->regmap = devm_regmap_init_i2c(cl, &lp872x_regmap_config); if (IS_ERR(lp->regmap)) { ret = PTR_ERR(lp->regmap); dev_err(&cl->dev, "regmap init i2c err: %d\n", ret); - goto err_dev; + return ret; } lp->dev = &cl->dev; lp->pdata = dev_get_platdata(&cl->dev); lp->chipid = id->driver_data; - lp->num_regulators = num_regulators; i2c_set_clientdata(cl, lp); ret = lp872x_config(lp); if (ret) - goto err_dev; + return ret; return lp872x_regulator_register(lp); - -err_mem: - return -ENOMEM; -err_dev: - return ret; } static const struct of_device_id lp872x_dt_ids[] = { diff --git a/drivers/regulator/max14577.c b/drivers/regulator/max14577.c index bf9a44c5fdd2..b3678d289619 100644 --- a/drivers/regulator/max14577.c +++ b/drivers/regulator/max14577.c @@ -103,6 +103,8 @@ static struct regulator_ops max14577_charger_ops = { static const struct regulator_desc max14577_supported_regulators[] = { [MAX14577_SAFEOUT] = { .name = "SAFEOUT", + .of_match = of_match_ptr("SAFEOUT"), + .regulators_node = of_match_ptr("regulators"), .id = MAX14577_SAFEOUT, .ops = &max14577_safeout_ops, .type = REGULATOR_VOLTAGE, @@ -114,6 +116,8 @@ static const struct regulator_desc max14577_supported_regulators[] = { }, [MAX14577_CHARGER] = { .name = "CHARGER", + .of_match = of_match_ptr("CHARGER"), + .regulators_node = of_match_ptr("regulators"), .id = MAX14577_CHARGER, .ops = &max14577_charger_ops, .type = REGULATOR_CURRENT, @@ -137,6 +141,8 @@ static struct regulator_ops max77836_ldo_ops = { static const struct regulator_desc max77836_supported_regulators[] = { [MAX14577_SAFEOUT] = { .name = "SAFEOUT", + .of_match = of_match_ptr("SAFEOUT"), + .regulators_node = of_match_ptr("regulators"), .id = MAX14577_SAFEOUT, .ops = &max14577_safeout_ops, .type = REGULATOR_VOLTAGE, @@ -148,6 +154,8 @@ static const struct regulator_desc max77836_supported_regulators[] = { }, [MAX14577_CHARGER] = { .name = "CHARGER", + .of_match = of_match_ptr("CHARGER"), + .regulators_node = of_match_ptr("regulators"), .id = MAX14577_CHARGER, .ops = &max14577_charger_ops, .type = REGULATOR_CURRENT, @@ -157,6 +165,8 @@ static const struct regulator_desc max77836_supported_regulators[] = { }, [MAX77836_LDO1] = { .name = "LDO1", + .of_match = of_match_ptr("LDO1"), + .regulators_node = of_match_ptr("regulators"), .id = MAX77836_LDO1, .ops = &max77836_ldo_ops, .type = REGULATOR_VOLTAGE, @@ -171,6 +181,8 @@ static const struct regulator_desc max77836_supported_regulators[] = { }, [MAX77836_LDO2] = { .name = "LDO2", + .of_match = of_match_ptr("LDO2"), + .regulators_node = of_match_ptr("regulators"), .id = MAX77836_LDO2, .ops = &max77836_ldo_ops, .type = REGULATOR_VOLTAGE, @@ -198,43 +210,6 @@ static struct of_regulator_match max77836_regulator_matches[] = { { .name = "LDO2", }, }; -static int max14577_regulator_dt_parse_pdata(struct platform_device *pdev, - enum maxim_device_type dev_type) -{ - int ret; - struct device_node *np; - struct of_regulator_match *regulator_matches; - unsigned int regulator_matches_size; - - np = of_get_child_by_name(pdev->dev.parent->of_node, "regulators"); - if (!np) { - dev_err(&pdev->dev, "Failed to get child OF node for regulators\n"); - return -EINVAL; - } - - switch (dev_type) { - case MAXIM_DEVICE_TYPE_MAX77836: - regulator_matches = max77836_regulator_matches; - regulator_matches_size = ARRAY_SIZE(max77836_regulator_matches); - break; - case MAXIM_DEVICE_TYPE_MAX14577: - default: - regulator_matches = max14577_regulator_matches; - regulator_matches_size = ARRAY_SIZE(max14577_regulator_matches); - } - - ret = of_regulator_match(&pdev->dev, np, regulator_matches, - regulator_matches_size); - if (ret < 0) - dev_err(&pdev->dev, "Error parsing regulator init data: %d\n", ret); - else - ret = 0; - - of_node_put(np); - - return ret; -} - static inline struct regulator_init_data *match_init_data(int index, enum maxim_device_type dev_type) { @@ -261,11 +236,6 @@ static inline struct device_node *match_of_node(int index, } } #else /* CONFIG_OF */ -static int max14577_regulator_dt_parse_pdata(struct platform_device *pdev, - enum maxim_device_type dev_type) -{ - return 0; -} static inline struct regulator_init_data *match_init_data(int index, enum maxim_device_type dev_type) { @@ -308,16 +278,12 @@ static int max14577_regulator_probe(struct platform_device *pdev) { struct max14577 *max14577 = dev_get_drvdata(pdev->dev.parent); struct max14577_platform_data *pdata = dev_get_platdata(max14577->dev); - int i, ret; + int i, ret = 0; struct regulator_config config = {}; const struct regulator_desc *supported_regulators; unsigned int supported_regulators_size; enum maxim_device_type dev_type = max14577->dev_type; - ret = max14577_regulator_dt_parse_pdata(pdev, dev_type); - if (ret) - return ret; - switch (dev_type) { case MAXIM_DEVICE_TYPE_MAX77836: supported_regulators = max77836_supported_regulators; @@ -329,7 +295,7 @@ static int max14577_regulator_probe(struct platform_device *pdev) supported_regulators_size = ARRAY_SIZE(max14577_supported_regulators); } - config.dev = &pdev->dev; + config.dev = max14577->dev; config.driver_data = max14577; for (i = 0; i < supported_regulators_size; i++) { diff --git a/drivers/regulator/max77686.c b/drivers/regulator/max77686.c index 10d206266ac2..15fb1416bfbd 100644 --- a/drivers/regulator/max77686.c +++ b/drivers/regulator/max77686.c @@ -26,6 +26,7 @@ #include <linux/bug.h> #include <linux/err.h> #include <linux/gpio.h> +#include <linux/of_gpio.h> #include <linux/slab.h> #include <linux/platform_device.h> #include <linux/regulator/driver.h> @@ -46,6 +47,11 @@ #define MAX77686_DVS_UVSTEP 12500 /* + * Value for configuring buck[89] and LDO{20,21,22} as GPIO control. + * It is the same as 'off' for other regulators. + */ +#define MAX77686_GPIO_CONTROL 0x0 +/* * Values used for configuring LDOs and bucks. * Forcing low power mode: LDO1, 3-5, 9, 13, 17-26 */ @@ -82,6 +88,8 @@ enum max77686_ramp_rate { }; struct max77686_data { + u64 gpio_enabled:MAX77686_REGULATORS; + /* Array indexed by regulator id */ unsigned int opmode[MAX77686_REGULATORS]; }; @@ -100,6 +108,26 @@ static unsigned int max77686_get_opmode_shift(int id) } } +/* + * When regulator is configured for GPIO control then it + * replaces "normal" mode. Any change from low power mode to normal + * should actually change to GPIO control. + * Map normal mode to proper value for such regulators. + */ +static unsigned int max77686_map_normal_mode(struct max77686_data *max77686, + int id) +{ + switch (id) { + case MAX77686_BUCK8: + case MAX77686_BUCK9: + case MAX77686_LDO20 ... MAX77686_LDO22: + if (max77686->gpio_enabled & (1 << id)) + return MAX77686_GPIO_CONTROL; + } + + return MAX77686_NORMAL; +} + /* Some BUCKs and LDOs supports Normal[ON/OFF] mode during suspend */ static int max77686_set_suspend_disable(struct regulator_dev *rdev) { @@ -136,7 +164,7 @@ static int max77686_set_suspend_mode(struct regulator_dev *rdev, val = MAX77686_LDO_LOWPOWER_PWRREQ; break; case REGULATOR_MODE_NORMAL: /* ON in Normal Mode */ - val = MAX77686_NORMAL; + val = max77686_map_normal_mode(max77686, id); break; default: pr_warn("%s: regulator_suspend_mode : 0x%x not supported\n", @@ -160,7 +188,7 @@ static int max77686_ldo_set_suspend_mode(struct regulator_dev *rdev, { unsigned int val; struct max77686_data *max77686 = rdev_get_drvdata(rdev); - int ret; + int ret, id = rdev_get_id(rdev); switch (mode) { case REGULATOR_MODE_STANDBY: /* switch off */ @@ -170,7 +198,7 @@ static int max77686_ldo_set_suspend_mode(struct regulator_dev *rdev, val = MAX77686_LDO_LOWPOWER_PWRREQ; break; case REGULATOR_MODE_NORMAL: /* ON in Normal Mode */ - val = MAX77686_NORMAL; + val = max77686_map_normal_mode(max77686, id); break; default: pr_warn("%s: regulator_suspend_mode : 0x%x not supported\n", @@ -184,7 +212,7 @@ static int max77686_ldo_set_suspend_mode(struct regulator_dev *rdev, if (ret) return ret; - max77686->opmode[rdev_get_id(rdev)] = val; + max77686->opmode[id] = val; return 0; } @@ -197,7 +225,7 @@ static int max77686_enable(struct regulator_dev *rdev) shift = max77686_get_opmode_shift(id); if (max77686->opmode[id] == MAX77686_OFF_PWRREQ) - max77686->opmode[id] = MAX77686_NORMAL; + max77686->opmode[id] = max77686_map_normal_mode(max77686, id); return regmap_update_bits(rdev->regmap, rdev->desc->enable_reg, rdev->desc->enable_mask, @@ -229,6 +257,36 @@ static int max77686_set_ramp_delay(struct regulator_dev *rdev, int ramp_delay) MAX77686_RAMP_RATE_MASK, ramp_value << 6); } +static int max77686_of_parse_cb(struct device_node *np, + const struct regulator_desc *desc, + struct regulator_config *config) +{ + struct max77686_data *max77686 = config->driver_data; + + switch (desc->id) { + case MAX77686_BUCK8: + case MAX77686_BUCK9: + case MAX77686_LDO20 ... MAX77686_LDO22: + config->ena_gpio = of_get_named_gpio(np, + "maxim,ena-gpios", 0); + config->ena_gpio_flags = GPIOF_OUT_INIT_HIGH; + config->ena_gpio_initialized = true; + break; + default: + return 0; + } + + if (gpio_is_valid(config->ena_gpio)) { + max77686->gpio_enabled |= (1 << desc->id); + + return regmap_update_bits(config->regmap, desc->enable_reg, + desc->enable_mask, + MAX77686_GPIO_CONTROL); + } + + return 0; +} + static struct regulator_ops max77686_ops = { .list_voltage = regulator_list_voltage_linear, .map_voltage = regulator_map_voltage_linear, @@ -283,6 +341,7 @@ static struct regulator_ops max77686_buck_dvs_ops = { .name = "LDO"#num, \ .of_match = of_match_ptr("LDO"#num), \ .regulators_node = of_match_ptr("voltage-regulators"), \ + .of_parse_cb = max77686_of_parse_cb, \ .id = MAX77686_LDO##num, \ .ops = &max77686_ops, \ .type = REGULATOR_VOLTAGE, \ @@ -355,6 +414,7 @@ static struct regulator_ops max77686_buck_dvs_ops = { .name = "BUCK"#num, \ .of_match = of_match_ptr("BUCK"#num), \ .regulators_node = of_match_ptr("voltage-regulators"), \ + .of_parse_cb = max77686_of_parse_cb, \ .id = MAX77686_BUCK##num, \ .ops = &max77686_ops, \ .type = REGULATOR_VOLTAGE, \ diff --git a/drivers/regulator/max77843.c b/drivers/regulator/max77843.c new file mode 100644 index 000000000000..c132ef527cdd --- /dev/null +++ b/drivers/regulator/max77843.c @@ -0,0 +1,227 @@ +/* + * max77843.c - Regulator driver for the Maxim MAX77843 + * + * Copyright (C) 2015 Samsung Electronics + * Author: Jaewon Kim <jaewon02.kim@samsung.com> + * Author: Beomho Seo <beomho.seo@samsung.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#include <linux/module.h> +#include <linux/platform_device.h> +#include <linux/regulator/driver.h> +#include <linux/regulator/machine.h> +#include <linux/mfd/max77843-private.h> +#include <linux/regulator/of_regulator.h> + +enum max77843_regulator_type { + MAX77843_SAFEOUT1 = 0, + MAX77843_SAFEOUT2, + MAX77843_CHARGER, + + MAX77843_NUM, +}; + +static const unsigned int max77843_safeout_voltage_table[] = { + 4850000, + 4900000, + 4950000, + 3300000, +}; + +static int max77843_reg_is_enabled(struct regulator_dev *rdev) +{ + struct regmap *regmap = rdev->regmap; + int ret; + unsigned int reg; + + ret = regmap_read(regmap, rdev->desc->enable_reg, ®); + if (ret) { + dev_err(&rdev->dev, "Fialed to read charger register\n"); + return ret; + } + + return (reg & rdev->desc->enable_mask) == rdev->desc->enable_mask; +} + +static int max77843_reg_get_current_limit(struct regulator_dev *rdev) +{ + struct regmap *regmap = rdev->regmap; + unsigned int chg_min_uA = rdev->constraints->min_uA; + unsigned int chg_max_uA = rdev->constraints->max_uA; + unsigned int val; + int ret; + unsigned int reg, sel; + + ret = regmap_read(regmap, MAX77843_CHG_REG_CHG_CNFG_02, ®); + if (ret) { + dev_err(&rdev->dev, "Failed to read charger register\n"); + return ret; + } + + sel = reg & MAX77843_CHG_FAST_CHG_CURRENT_MASK; + + if (sel < 0x03) + sel = 0; + else + sel -= 2; + + val = chg_min_uA + MAX77843_CHG_FAST_CHG_CURRENT_STEP * sel; + if (val > chg_max_uA) + return -EINVAL; + + return val; +} + +static int max77843_reg_set_current_limit(struct regulator_dev *rdev, + int min_uA, int max_uA) +{ + struct regmap *regmap = rdev->regmap; + unsigned int chg_min_uA = rdev->constraints->min_uA; + int sel = 0; + + while (chg_min_uA + MAX77843_CHG_FAST_CHG_CURRENT_STEP * sel < min_uA) + sel++; + + if (chg_min_uA + MAX77843_CHG_FAST_CHG_CURRENT_STEP * sel > max_uA) + return -EINVAL; + + sel += 2; + + return regmap_write(regmap, MAX77843_CHG_REG_CHG_CNFG_02, sel); +} + +static struct regulator_ops max77843_charger_ops = { + .is_enabled = max77843_reg_is_enabled, + .enable = regulator_enable_regmap, + .disable = regulator_disable_regmap, + .get_current_limit = max77843_reg_get_current_limit, + .set_current_limit = max77843_reg_set_current_limit, +}; + +static struct regulator_ops max77843_regulator_ops = { + .is_enabled = regulator_is_enabled_regmap, + .enable = regulator_enable_regmap, + .disable = regulator_disable_regmap, + .list_voltage = regulator_list_voltage_table, + .get_voltage_sel = regulator_get_voltage_sel_regmap, + .set_voltage_sel = regulator_set_voltage_sel_regmap, +}; + +static const struct regulator_desc max77843_supported_regulators[] = { + [MAX77843_SAFEOUT1] = { + .name = "SAFEOUT1", + .id = MAX77843_SAFEOUT1, + .ops = &max77843_regulator_ops, + .of_match = of_match_ptr("SAFEOUT1"), + .regulators_node = of_match_ptr("regulators"), + .type = REGULATOR_VOLTAGE, + .owner = THIS_MODULE, + .n_voltages = ARRAY_SIZE(max77843_safeout_voltage_table), + .volt_table = max77843_safeout_voltage_table, + .enable_reg = MAX77843_SYS_REG_SAFEOUTCTRL, + .enable_mask = MAX77843_REG_SAFEOUTCTRL_ENSAFEOUT1, + .vsel_reg = MAX77843_SYS_REG_SAFEOUTCTRL, + .vsel_mask = MAX77843_REG_SAFEOUTCTRL_SAFEOUT1_MASK, + }, + [MAX77843_SAFEOUT2] = { + .name = "SAFEOUT2", + .id = MAX77843_SAFEOUT2, + .ops = &max77843_regulator_ops, + .of_match = of_match_ptr("SAFEOUT2"), + .regulators_node = of_match_ptr("regulators"), + .type = REGULATOR_VOLTAGE, + .owner = THIS_MODULE, + .n_voltages = ARRAY_SIZE(max77843_safeout_voltage_table), + .volt_table = max77843_safeout_voltage_table, + .enable_reg = MAX77843_SYS_REG_SAFEOUTCTRL, + .enable_mask = MAX77843_REG_SAFEOUTCTRL_ENSAFEOUT2, + .vsel_reg = MAX77843_SYS_REG_SAFEOUTCTRL, + .vsel_mask = MAX77843_REG_SAFEOUTCTRL_SAFEOUT2_MASK, + }, + [MAX77843_CHARGER] = { + .name = "CHARGER", + .id = MAX77843_CHARGER, + .ops = &max77843_charger_ops, + .of_match = of_match_ptr("CHARGER"), + .regulators_node = of_match_ptr("regulators"), + .type = REGULATOR_CURRENT, + .owner = THIS_MODULE, + .enable_reg = MAX77843_CHG_REG_CHG_CNFG_00, + .enable_mask = MAX77843_CHG_MASK, + }, +}; + +static struct regmap *max77843_get_regmap(struct max77843 *max77843, int reg_id) +{ + switch (reg_id) { + case MAX77843_SAFEOUT1: + case MAX77843_SAFEOUT2: + return max77843->regmap; + case MAX77843_CHARGER: + return max77843->regmap_chg; + default: + return max77843->regmap; + } +} + +static int max77843_regulator_probe(struct platform_device *pdev) +{ + struct max77843 *max77843 = dev_get_drvdata(pdev->dev.parent); + struct regulator_config config = {}; + int i; + + config.dev = max77843->dev; + config.driver_data = max77843; + + for (i = 0; i < ARRAY_SIZE(max77843_supported_regulators); i++) { + struct regulator_dev *regulator; + + config.regmap = max77843_get_regmap(max77843, + max77843_supported_regulators[i].id); + + regulator = devm_regulator_register(&pdev->dev, + &max77843_supported_regulators[i], &config); + if (IS_ERR(regulator)) { + dev_err(&pdev->dev, + "Failed to regiser regulator-%d\n", i); + return PTR_ERR(regulator); + } + } + + return 0; +} + +static const struct platform_device_id max77843_regulator_id[] = { + { "max77843-regulator", }, + { /* sentinel */ }, +}; + +static struct platform_driver max77843_regulator_driver = { + .driver = { + .name = "max77843-regulator", + }, + .probe = max77843_regulator_probe, + .id_table = max77843_regulator_id, +}; + +static int __init max77843_regulator_init(void) +{ + return platform_driver_register(&max77843_regulator_driver); +} +subsys_initcall(max77843_regulator_init); + +static void __exit max77843_regulator_exit(void) +{ + platform_driver_unregister(&max77843_regulator_driver); +} +module_exit(max77843_regulator_exit); + +MODULE_AUTHOR("Jaewon Kim <jaewon02.kim@samsung.com>"); +MODULE_AUTHOR("Beomho Seo <beomho.seo@samsung.com>"); +MODULE_DESCRIPTION("Maxim MAX77843 regulator driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/regulator/max8649.c b/drivers/regulator/max8649.c index c8bddcc8f911..81229579ece9 100644 --- a/drivers/regulator/max8649.c +++ b/drivers/regulator/max8649.c @@ -115,7 +115,7 @@ static unsigned int max8649_get_mode(struct regulator_dev *rdev) return REGULATOR_MODE_NORMAL; } -static struct regulator_ops max8649_dcdc_ops = { +static const struct regulator_ops max8649_dcdc_ops = { .set_voltage_sel = regulator_set_voltage_sel_regmap, .get_voltage_sel = regulator_get_voltage_sel_regmap, .list_voltage = regulator_list_voltage_linear, @@ -143,7 +143,7 @@ static struct regulator_desc dcdc_desc = { .enable_is_inverted = true, }; -static struct regmap_config max8649_regmap_config = { +static const struct regmap_config max8649_regmap_config = { .reg_bits = 8, .val_bits = 8, }; diff --git a/drivers/regulator/mt6397-regulator.c b/drivers/regulator/mt6397-regulator.c new file mode 100644 index 000000000000..a5b2f4762677 --- /dev/null +++ b/drivers/regulator/mt6397-regulator.c @@ -0,0 +1,332 @@ +/* + * Copyright (c) 2014 MediaTek Inc. + * Author: Flora Fu <flora.fu@mediatek.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include <linux/module.h> +#include <linux/of.h> +#include <linux/platform_device.h> +#include <linux/regmap.h> +#include <linux/mfd/mt6397/core.h> +#include <linux/mfd/mt6397/registers.h> +#include <linux/regulator/driver.h> +#include <linux/regulator/machine.h> +#include <linux/regulator/mt6397-regulator.h> +#include <linux/regulator/of_regulator.h> + +/* + * MT6397 regulators' information + * + * @desc: standard fields of regulator description. + * @qi: Mask for query enable signal status of regulators + * @vselon_reg: Register sections for hardware control mode of bucks + * @vselctrl_reg: Register for controlling the buck control mode. + * @vselctrl_mask: Mask for query buck's voltage control mode. + */ +struct mt6397_regulator_info { + struct regulator_desc desc; + u32 qi; + u32 vselon_reg; + u32 vselctrl_reg; + u32 vselctrl_mask; +}; + +#define MT6397_BUCK(match, vreg, min, max, step, volt_ranges, enreg, \ + vosel, vosel_mask, voselon, vosel_ctrl) \ +[MT6397_ID_##vreg] = { \ + .desc = { \ + .name = #vreg, \ + .of_match = of_match_ptr(match), \ + .ops = &mt6397_volt_range_ops, \ + .type = REGULATOR_VOLTAGE, \ + .id = MT6397_ID_##vreg, \ + .owner = THIS_MODULE, \ + .n_voltages = (max - min)/step + 1, \ + .linear_ranges = volt_ranges, \ + .n_linear_ranges = ARRAY_SIZE(volt_ranges), \ + .vsel_reg = vosel, \ + .vsel_mask = vosel_mask, \ + .enable_reg = enreg, \ + .enable_mask = BIT(0), \ + }, \ + .qi = BIT(13), \ + .vselon_reg = voselon, \ + .vselctrl_reg = vosel_ctrl, \ + .vselctrl_mask = BIT(1), \ +} + +#define MT6397_LDO(match, vreg, ldo_volt_table, enreg, enbit, vosel, \ + vosel_mask) \ +[MT6397_ID_##vreg] = { \ + .desc = { \ + .name = #vreg, \ + .of_match = of_match_ptr(match), \ + .ops = &mt6397_volt_table_ops, \ + .type = REGULATOR_VOLTAGE, \ + .id = MT6397_ID_##vreg, \ + .owner = THIS_MODULE, \ + .n_voltages = ARRAY_SIZE(ldo_volt_table), \ + .volt_table = ldo_volt_table, \ + .vsel_reg = vosel, \ + .vsel_mask = vosel_mask, \ + .enable_reg = enreg, \ + .enable_mask = BIT(enbit), \ + }, \ + .qi = BIT(15), \ +} + +#define MT6397_REG_FIXED(match, vreg, enreg, enbit, volt) \ +[MT6397_ID_##vreg] = { \ + .desc = { \ + .name = #vreg, \ + .of_match = of_match_ptr(match), \ + .ops = &mt6397_volt_fixed_ops, \ + .type = REGULATOR_VOLTAGE, \ + .id = MT6397_ID_##vreg, \ + .owner = THIS_MODULE, \ + .n_voltages = 1, \ + .enable_reg = enreg, \ + .enable_mask = BIT(enbit), \ + .min_uV = volt, \ + }, \ + .qi = BIT(15), \ +} + +static const struct regulator_linear_range buck_volt_range1[] = { + REGULATOR_LINEAR_RANGE(700000, 0, 0x7f, 6250), +}; + +static const struct regulator_linear_range buck_volt_range2[] = { + REGULATOR_LINEAR_RANGE(800000, 0, 0x7f, 6250), +}; + +static const struct regulator_linear_range buck_volt_range3[] = { + REGULATOR_LINEAR_RANGE(1500000, 0, 0x1f, 20000), +}; + +static const u32 ldo_volt_table1[] = { + 1500000, 1800000, 2500000, 2800000, +}; + +static const u32 ldo_volt_table2[] = { + 1800000, 3300000, +}; + +static const u32 ldo_volt_table3[] = { + 3000000, 3300000, +}; + +static const u32 ldo_volt_table4[] = { + 1220000, 1300000, 1500000, 1800000, 2500000, 2800000, 3000000, 3300000, +}; + +static const u32 ldo_volt_table5[] = { + 1200000, 1300000, 1500000, 1800000, 2500000, 2800000, 3000000, 3300000, +}; + +static const u32 ldo_volt_table5_v2[] = { + 1200000, 1000000, 1500000, 1800000, 2500000, 2800000, 3000000, 3300000, +}; + +static const u32 ldo_volt_table6[] = { + 1200000, 1300000, 1500000, 1800000, 2500000, 2800000, 3000000, 2000000, +}; + +static const u32 ldo_volt_table7[] = { + 1300000, 1500000, 1800000, 2000000, 2500000, 2800000, 3000000, 3300000, +}; + +static int mt6397_get_status(struct regulator_dev *rdev) +{ + int ret; + u32 regval; + struct mt6397_regulator_info *info = rdev_get_drvdata(rdev); + + ret = regmap_read(rdev->regmap, info->desc.enable_reg, ®val); + if (ret != 0) { + dev_err(&rdev->dev, "Failed to get enable reg: %d\n", ret); + return ret; + } + + return (regval & info->qi) ? REGULATOR_STATUS_ON : REGULATOR_STATUS_OFF; +} + +static struct regulator_ops mt6397_volt_range_ops = { + .list_voltage = regulator_list_voltage_linear_range, + .map_voltage = regulator_map_voltage_linear_range, + .set_voltage_sel = regulator_set_voltage_sel_regmap, + .get_voltage_sel = regulator_get_voltage_sel_regmap, + .set_voltage_time_sel = regulator_set_voltage_time_sel, + .enable = regulator_enable_regmap, + .disable = regulator_disable_regmap, + .is_enabled = regulator_is_enabled_regmap, + .get_status = mt6397_get_status, +}; + +static struct regulator_ops mt6397_volt_table_ops = { + .list_voltage = regulator_list_voltage_table, + .map_voltage = regulator_map_voltage_iterate, + .set_voltage_sel = regulator_set_voltage_sel_regmap, + .get_voltage_sel = regulator_get_voltage_sel_regmap, + .set_voltage_time_sel = regulator_set_voltage_time_sel, + .enable = regulator_enable_regmap, + .disable = regulator_disable_regmap, + .is_enabled = regulator_is_enabled_regmap, + .get_status = mt6397_get_status, +}; + +static struct regulator_ops mt6397_volt_fixed_ops = { + .list_voltage = regulator_list_voltage_linear, + .enable = regulator_enable_regmap, + .disable = regulator_disable_regmap, + .is_enabled = regulator_is_enabled_regmap, + .get_status = mt6397_get_status, +}; + +/* The array is indexed by id(MT6397_ID_XXX) */ +static struct mt6397_regulator_info mt6397_regulators[] = { + MT6397_BUCK("buck_vpca15", VPCA15, 700000, 1493750, 6250, + buck_volt_range1, MT6397_VCA15_CON7, MT6397_VCA15_CON9, 0x7f, + MT6397_VCA15_CON10, MT6397_VCA15_CON5), + MT6397_BUCK("buck_vpca7", VPCA7, 700000, 1493750, 6250, + buck_volt_range1, MT6397_VPCA7_CON7, MT6397_VPCA7_CON9, 0x7f, + MT6397_VPCA7_CON10, MT6397_VPCA7_CON5), + MT6397_BUCK("buck_vsramca15", VSRAMCA15, 700000, 1493750, 6250, + buck_volt_range1, MT6397_VSRMCA15_CON7, MT6397_VSRMCA15_CON9, + 0x7f, MT6397_VSRMCA15_CON10, MT6397_VSRMCA15_CON5), + MT6397_BUCK("buck_vsramca7", VSRAMCA7, 700000, 1493750, 6250, + buck_volt_range1, MT6397_VSRMCA7_CON7, MT6397_VSRMCA7_CON9, + 0x7f, MT6397_VSRMCA7_CON10, MT6397_VSRMCA7_CON5), + MT6397_BUCK("buck_vcore", VCORE, 700000, 1493750, 6250, + buck_volt_range1, MT6397_VCORE_CON7, MT6397_VCORE_CON9, 0x7f, + MT6397_VCORE_CON10, MT6397_VCORE_CON5), + MT6397_BUCK("buck_vgpu", VGPU, 700000, 1493750, 6250, buck_volt_range1, + MT6397_VGPU_CON7, MT6397_VGPU_CON9, 0x7f, + MT6397_VGPU_CON10, MT6397_VGPU_CON5), + MT6397_BUCK("buck_vdrm", VDRM, 800000, 1593750, 6250, buck_volt_range2, + MT6397_VDRM_CON7, MT6397_VDRM_CON9, 0x7f, + MT6397_VDRM_CON10, MT6397_VDRM_CON5), + MT6397_BUCK("buck_vio18", VIO18, 1500000, 2120000, 20000, + buck_volt_range3, MT6397_VIO18_CON7, MT6397_VIO18_CON9, 0x1f, + MT6397_VIO18_CON10, MT6397_VIO18_CON5), + MT6397_REG_FIXED("ldo_vtcxo", VTCXO, MT6397_ANALDO_CON0, 10, 2800000), + MT6397_REG_FIXED("ldo_va28", VA28, MT6397_ANALDO_CON1, 14, 2800000), + MT6397_LDO("ldo_vcama", VCAMA, ldo_volt_table1, + MT6397_ANALDO_CON2, 15, MT6397_ANALDO_CON6, 0xC0), + MT6397_REG_FIXED("ldo_vio28", VIO28, MT6397_DIGLDO_CON0, 14, 2800000), + MT6397_REG_FIXED("ldo_vusb", VUSB, MT6397_DIGLDO_CON1, 14, 3300000), + MT6397_LDO("ldo_vmc", VMC, ldo_volt_table2, + MT6397_DIGLDO_CON2, 12, MT6397_DIGLDO_CON29, 0x10), + MT6397_LDO("ldo_vmch", VMCH, ldo_volt_table3, + MT6397_DIGLDO_CON3, 14, MT6397_DIGLDO_CON17, 0x80), + MT6397_LDO("ldo_vemc3v3", VEMC3V3, ldo_volt_table3, + MT6397_DIGLDO_CON4, 14, MT6397_DIGLDO_CON18, 0x10), + MT6397_LDO("ldo_vgp1", VGP1, ldo_volt_table4, + MT6397_DIGLDO_CON5, 15, MT6397_DIGLDO_CON19, 0xE0), + MT6397_LDO("ldo_vgp2", VGP2, ldo_volt_table5, + MT6397_DIGLDO_CON6, 15, MT6397_DIGLDO_CON20, 0xE0), + MT6397_LDO("ldo_vgp3", VGP3, ldo_volt_table5, + MT6397_DIGLDO_CON7, 15, MT6397_DIGLDO_CON21, 0xE0), + MT6397_LDO("ldo_vgp4", VGP4, ldo_volt_table5, + MT6397_DIGLDO_CON8, 15, MT6397_DIGLDO_CON22, 0xE0), + MT6397_LDO("ldo_vgp5", VGP5, ldo_volt_table6, + MT6397_DIGLDO_CON9, 15, MT6397_DIGLDO_CON23, 0xE0), + MT6397_LDO("ldo_vgp6", VGP6, ldo_volt_table5, + MT6397_DIGLDO_CON10, 15, MT6397_DIGLDO_CON33, 0xE0), + MT6397_LDO("ldo_vibr", VIBR, ldo_volt_table7, + MT6397_DIGLDO_CON24, 15, MT6397_DIGLDO_CON25, 0xE00), +}; + +static int mt6397_set_buck_vosel_reg(struct platform_device *pdev) +{ + struct mt6397_chip *mt6397 = dev_get_drvdata(pdev->dev.parent); + int i; + u32 regval; + + for (i = 0; i < MT6397_MAX_REGULATOR; i++) { + if (mt6397_regulators[i].vselctrl_reg) { + if (regmap_read(mt6397->regmap, + mt6397_regulators[i].vselctrl_reg, + ®val) < 0) { + dev_err(&pdev->dev, + "Failed to read buck ctrl\n"); + return -EIO; + } + + if (regval & mt6397_regulators[i].vselctrl_mask) { + mt6397_regulators[i].desc.vsel_reg = + mt6397_regulators[i].vselon_reg; + } + } + } + + return 0; +} + +static int mt6397_regulator_probe(struct platform_device *pdev) +{ + struct mt6397_chip *mt6397 = dev_get_drvdata(pdev->dev.parent); + struct regulator_config config = {}; + struct regulator_dev *rdev; + int i; + u32 reg_value, version; + + /* Query buck controller to select activated voltage register part */ + if (mt6397_set_buck_vosel_reg(pdev)) + return -EIO; + + /* Read PMIC chip revision to update constraints and voltage table */ + if (regmap_read(mt6397->regmap, MT6397_CID, ®_value) < 0) { + dev_err(&pdev->dev, "Failed to read Chip ID\n"); + return -EIO; + } + dev_info(&pdev->dev, "Chip ID = 0x%x\n", reg_value); + + version = (reg_value & 0xFF); + switch (version) { + case MT6397_REGULATOR_ID91: + mt6397_regulators[MT6397_ID_VGP2].desc.volt_table = + ldo_volt_table5_v2; + break; + default: + break; + } + + for (i = 0; i < MT6397_MAX_REGULATOR; i++) { + config.dev = &pdev->dev; + config.driver_data = &mt6397_regulators[i]; + config.regmap = mt6397->regmap; + rdev = devm_regulator_register(&pdev->dev, + &mt6397_regulators[i].desc, &config); + if (IS_ERR(rdev)) { + dev_err(&pdev->dev, "failed to register %s\n", + mt6397_regulators[i].desc.name); + return PTR_ERR(rdev); + } + } + + return 0; +} + +static struct platform_driver mt6397_regulator_driver = { + .driver = { + .name = "mt6397-regulator", + }, + .probe = mt6397_regulator_probe, +}; + +module_platform_driver(mt6397_regulator_driver); + +MODULE_AUTHOR("Flora Fu <flora.fu@mediatek.com>"); +MODULE_DESCRIPTION("Regulator Driver for MediaTek MT6397 PMIC"); +MODULE_LICENSE("GPL"); +MODULE_ALIAS("platform:mt6397-regulator"); diff --git a/drivers/regulator/of_regulator.c b/drivers/regulator/of_regulator.c index 91eaaf010524..24e812c48d93 100644 --- a/drivers/regulator/of_regulator.c +++ b/drivers/regulator/of_regulator.c @@ -270,6 +270,7 @@ EXPORT_SYMBOL_GPL(of_regulator_match); struct regulator_init_data *regulator_of_get_init_data(struct device *dev, const struct regulator_desc *desc, + struct regulator_config *config, struct device_node **node) { struct device_node *search, *child; @@ -307,6 +308,16 @@ struct regulator_init_data *regulator_of_get_init_data(struct device *dev, break; } + if (desc->of_parse_cb) { + if (desc->of_parse_cb(child, desc, config)) { + dev_err(dev, + "driver callback failed to parse DT for regulator %s\n", + child->name); + init_data = NULL; + break; + } + } + of_node_get(child); *node = child; break; diff --git a/drivers/regulator/pfuze100-regulator.c b/drivers/regulator/pfuze100-regulator.c index c879dff597ee..8cc8d1877c44 100644 --- a/drivers/regulator/pfuze100-regulator.c +++ b/drivers/regulator/pfuze100-regulator.c @@ -56,7 +56,7 @@ #define PFUZE100_VGEN5VOL 0x70 #define PFUZE100_VGEN6VOL 0x71 -enum chips { PFUZE100, PFUZE200 }; +enum chips { PFUZE100, PFUZE200, PFUZE3000 = 3 }; struct pfuze_regulator { struct regulator_desc desc; @@ -80,9 +80,18 @@ static const int pfuze100_vsnvs[] = { 1000000, 1100000, 1200000, 1300000, 1500000, 1800000, 3000000, }; +static const int pfuze3000_sw2lo[] = { + 1500000, 1550000, 1600000, 1650000, 1700000, 1750000, 1800000, 1850000, +}; + +static const int pfuze3000_sw2hi[] = { + 2500000, 2800000, 2850000, 3000000, 3100000, 3150000, 3200000, 3300000, +}; + static const struct i2c_device_id pfuze_device_id[] = { {.name = "pfuze100", .driver_data = PFUZE100}, {.name = "pfuze200", .driver_data = PFUZE200}, + {.name = "pfuze3000", .driver_data = PFUZE3000}, { } }; MODULE_DEVICE_TABLE(i2c, pfuze_device_id); @@ -90,6 +99,7 @@ MODULE_DEVICE_TABLE(i2c, pfuze_device_id); static const struct of_device_id pfuze_dt_ids[] = { { .compatible = "fsl,pfuze100", .data = (void *)PFUZE100}, { .compatible = "fsl,pfuze200", .data = (void *)PFUZE200}, + { .compatible = "fsl,pfuze3000", .data = (void *)PFUZE3000}, { } }; MODULE_DEVICE_TABLE(of, pfuze_dt_ids); @@ -219,6 +229,60 @@ static struct regulator_ops pfuze100_swb_regulator_ops = { .stby_mask = 0x20, \ } +#define PFUZE3000_VCC_REG(_chip, _name, base, min, max, step) { \ + .desc = { \ + .name = #_name, \ + .n_voltages = ((max) - (min)) / (step) + 1, \ + .ops = &pfuze100_ldo_regulator_ops, \ + .type = REGULATOR_VOLTAGE, \ + .id = _chip ## _ ## _name, \ + .owner = THIS_MODULE, \ + .min_uV = (min), \ + .uV_step = (step), \ + .vsel_reg = (base), \ + .vsel_mask = 0x3, \ + .enable_reg = (base), \ + .enable_mask = 0x10, \ + }, \ + .stby_reg = (base), \ + .stby_mask = 0x20, \ +} + + +#define PFUZE3000_SW2_REG(_chip, _name, base, min, max, step) { \ + .desc = { \ + .name = #_name,\ + .n_voltages = ((max) - (min)) / (step) + 1, \ + .ops = &pfuze100_sw_regulator_ops, \ + .type = REGULATOR_VOLTAGE, \ + .id = _chip ## _ ## _name, \ + .owner = THIS_MODULE, \ + .min_uV = (min), \ + .uV_step = (step), \ + .vsel_reg = (base) + PFUZE100_VOL_OFFSET, \ + .vsel_mask = 0x7, \ + }, \ + .stby_reg = (base) + PFUZE100_STANDBY_OFFSET, \ + .stby_mask = 0x7, \ +} + +#define PFUZE3000_SW3_REG(_chip, _name, base, min, max, step) { \ + .desc = { \ + .name = #_name,\ + .n_voltages = ((max) - (min)) / (step) + 1, \ + .ops = &pfuze100_sw_regulator_ops, \ + .type = REGULATOR_VOLTAGE, \ + .id = _chip ## _ ## _name, \ + .owner = THIS_MODULE, \ + .min_uV = (min), \ + .uV_step = (step), \ + .vsel_reg = (base) + PFUZE100_VOL_OFFSET, \ + .vsel_mask = 0xf, \ + }, \ + .stby_reg = (base) + PFUZE100_STANDBY_OFFSET, \ + .stby_mask = 0xf, \ +} + /* PFUZE100 */ static struct pfuze_regulator pfuze100_regulators[] = { PFUZE100_SW_REG(PFUZE100, SW1AB, PFUZE100_SW1ABVOL, 300000, 1875000, 25000), @@ -254,6 +318,22 @@ static struct pfuze_regulator pfuze200_regulators[] = { PFUZE100_VGEN_REG(PFUZE200, VGEN6, PFUZE100_VGEN6VOL, 1800000, 3300000, 100000), }; +static struct pfuze_regulator pfuze3000_regulators[] = { + PFUZE100_SW_REG(PFUZE3000, SW1A, PFUZE100_SW1ABVOL, 700000, 1475000, 25000), + PFUZE100_SW_REG(PFUZE3000, SW1B, PFUZE100_SW1CVOL, 700000, 1475000, 25000), + PFUZE100_SWB_REG(PFUZE3000, SW2, PFUZE100_SW2VOL, 0x7, pfuze3000_sw2lo), + PFUZE3000_SW3_REG(PFUZE3000, SW3, PFUZE100_SW3AVOL, 900000, 1650000, 50000), + PFUZE100_SWB_REG(PFUZE3000, SWBST, PFUZE100_SWBSTCON1, 0x3, pfuze100_swbst), + PFUZE100_SWB_REG(PFUZE3000, VSNVS, PFUZE100_VSNVSVOL, 0x7, pfuze100_vsnvs), + PFUZE100_FIXED_REG(PFUZE3000, VREFDDR, PFUZE100_VREFDDRCON, 750000), + PFUZE100_VGEN_REG(PFUZE3000, VLDO1, PFUZE100_VGEN1VOL, 1800000, 3300000, 100000), + PFUZE100_VGEN_REG(PFUZE3000, VLDO2, PFUZE100_VGEN2VOL, 800000, 1550000, 50000), + PFUZE3000_VCC_REG(PFUZE3000, VCCSD, PFUZE100_VGEN3VOL, 2850000, 3300000, 150000), + PFUZE3000_VCC_REG(PFUZE3000, V33, PFUZE100_VGEN4VOL, 2850000, 3300000, 150000), + PFUZE100_VGEN_REG(PFUZE3000, VLDO3, PFUZE100_VGEN5VOL, 1800000, 3300000, 100000), + PFUZE100_VGEN_REG(PFUZE3000, VLDO4, PFUZE100_VGEN6VOL, 1800000, 3300000, 100000), +}; + static struct pfuze_regulator *pfuze_regulators; #ifdef CONFIG_OF @@ -294,6 +374,24 @@ static struct of_regulator_match pfuze200_matches[] = { { .name = "vgen6", }, }; +/* PFUZE3000 */ +static struct of_regulator_match pfuze3000_matches[] = { + + { .name = "sw1a", }, + { .name = "sw1b", }, + { .name = "sw2", }, + { .name = "sw3", }, + { .name = "swbst", }, + { .name = "vsnvs", }, + { .name = "vrefddr", }, + { .name = "vldo1", }, + { .name = "vldo2", }, + { .name = "vccsd", }, + { .name = "v33", }, + { .name = "vldo3", }, + { .name = "vldo4", }, +}; + static struct of_regulator_match *pfuze_matches; static int pfuze_parse_regulators_dt(struct pfuze_chip *chip) @@ -313,6 +411,11 @@ static int pfuze_parse_regulators_dt(struct pfuze_chip *chip) } switch (chip->chip_id) { + case PFUZE3000: + pfuze_matches = pfuze3000_matches; + ret = of_regulator_match(dev, parent, pfuze3000_matches, + ARRAY_SIZE(pfuze3000_matches)); + break; case PFUZE200: pfuze_matches = pfuze200_matches; ret = of_regulator_match(dev, parent, pfuze200_matches, @@ -378,7 +481,8 @@ static int pfuze_identify(struct pfuze_chip *pfuze_chip) * as ID=8 in PFUZE100 */ dev_info(pfuze_chip->dev, "Assuming misprogrammed ID=0x8"); - } else if ((value & 0x0f) != pfuze_chip->chip_id) { + } else if ((value & 0x0f) != pfuze_chip->chip_id && + (value & 0xf0) >> 4 != pfuze_chip->chip_id) { /* device id NOT match with your setting */ dev_warn(pfuze_chip->dev, "Illegal ID: %x\n", value); return -ENODEV; @@ -417,7 +521,7 @@ static int pfuze100_regulator_probe(struct i2c_client *client, int i, ret; const struct of_device_id *match; u32 regulator_num; - u32 sw_check_start, sw_check_end; + u32 sw_check_start, sw_check_end, sw_hi = 0x40; pfuze_chip = devm_kzalloc(&client->dev, sizeof(*pfuze_chip), GFP_KERNEL); @@ -458,13 +562,19 @@ static int pfuze100_regulator_probe(struct i2c_client *client, /* use the right regulators after identify the right device */ switch (pfuze_chip->chip_id) { + case PFUZE3000: + pfuze_regulators = pfuze3000_regulators; + regulator_num = ARRAY_SIZE(pfuze3000_regulators); + sw_check_start = PFUZE3000_SW2; + sw_check_end = PFUZE3000_SW2; + sw_hi = 1 << 3; + break; case PFUZE200: pfuze_regulators = pfuze200_regulators; regulator_num = ARRAY_SIZE(pfuze200_regulators); sw_check_start = PFUZE200_SW2; sw_check_end = PFUZE200_SW3B; break; - case PFUZE100: default: pfuze_regulators = pfuze100_regulators; @@ -474,7 +584,8 @@ static int pfuze100_regulator_probe(struct i2c_client *client, break; } dev_info(&client->dev, "pfuze%s found.\n", - (pfuze_chip->chip_id == PFUZE100) ? "100" : "200"); + (pfuze_chip->chip_id == PFUZE100) ? "100" : + ((pfuze_chip->chip_id == PFUZE200) ? "200" : "3000")); memcpy(pfuze_chip->regulator_descs, pfuze_regulators, sizeof(pfuze_chip->regulator_descs)); @@ -498,10 +609,15 @@ static int pfuze100_regulator_probe(struct i2c_client *client, /* SW2~SW4 high bit check and modify the voltage value table */ if (i >= sw_check_start && i <= sw_check_end) { regmap_read(pfuze_chip->regmap, desc->vsel_reg, &val); - if (val & 0x40) { - desc->min_uV = 800000; - desc->uV_step = 50000; - desc->n_voltages = 51; + if (val & sw_hi) { + if (pfuze_chip->chip_id == PFUZE3000) { + desc->volt_table = pfuze3000_sw2hi; + desc->n_voltages = ARRAY_SIZE(pfuze3000_sw2hi); + } else { + desc->min_uV = 800000; + desc->uV_step = 50000; + desc->n_voltages = 51; + } } } diff --git a/drivers/regulator/qcom_rpm-regulator.c b/drivers/regulator/qcom_rpm-regulator.c index 8364ff331a81..e8647f7cf25e 100644 --- a/drivers/regulator/qcom_rpm-regulator.c +++ b/drivers/regulator/qcom_rpm-regulator.c @@ -227,9 +227,11 @@ static int rpm_reg_set_mV_sel(struct regulator_dev *rdev, return uV; mutex_lock(&vreg->lock); - vreg->uV = uV; if (vreg->is_enabled) - ret = rpm_reg_write(vreg, req, vreg->uV / 1000); + ret = rpm_reg_write(vreg, req, uV / 1000); + + if (!ret) + vreg->uV = uV; mutex_unlock(&vreg->lock); return ret; @@ -252,9 +254,11 @@ static int rpm_reg_set_uV_sel(struct regulator_dev *rdev, return uV; mutex_lock(&vreg->lock); - vreg->uV = uV; if (vreg->is_enabled) - ret = rpm_reg_write(vreg, req, vreg->uV); + ret = rpm_reg_write(vreg, req, uV); + + if (!ret) + vreg->uV = uV; mutex_unlock(&vreg->lock); return ret; @@ -674,6 +678,7 @@ static int rpm_reg_probe(struct platform_device *pdev) vreg->desc.owner = THIS_MODULE; vreg->desc.type = REGULATOR_VOLTAGE; vreg->desc.name = pdev->dev.of_node->name; + vreg->desc.supply_name = "vin"; vreg->rpm = dev_get_drvdata(pdev->dev.parent); if (!vreg->rpm) { @@ -768,7 +773,7 @@ static int rpm_reg_probe(struct platform_device *pdev) break; } - if (force_mode < 0) { + if (force_mode == -1) { dev_err(&pdev->dev, "invalid force mode\n"); return -EINVAL; } diff --git a/drivers/regulator/rk808-regulator.c b/drivers/regulator/rk808-regulator.c index c94a3e0f3b91..1f93b752a81c 100644 --- a/drivers/regulator/rk808-regulator.c +++ b/drivers/regulator/rk808-regulator.c @@ -97,7 +97,7 @@ static int rk808_set_ramp_delay(struct regulator_dev *rdev, int ramp_delay) RK808_RAMP_RATE_MASK, ramp_value); } -int rk808_set_suspend_voltage(struct regulator_dev *rdev, int uv) +static int rk808_set_suspend_voltage(struct regulator_dev *rdev, int uv) { unsigned int reg; int sel = regulator_map_voltage_linear_range(rdev, uv, uv); @@ -112,7 +112,7 @@ int rk808_set_suspend_voltage(struct regulator_dev *rdev, int uv) sel); } -int rk808_set_suspend_enable(struct regulator_dev *rdev) +static int rk808_set_suspend_enable(struct regulator_dev *rdev) { unsigned int reg; @@ -123,7 +123,7 @@ int rk808_set_suspend_enable(struct regulator_dev *rdev) 0); } -int rk808_set_suspend_disable(struct regulator_dev *rdev) +static int rk808_set_suspend_disable(struct regulator_dev *rdev) { unsigned int reg; diff --git a/drivers/regulator/rt5033-regulator.c b/drivers/regulator/rt5033-regulator.c index 870cc49438db..96d2c18e051a 100644 --- a/drivers/regulator/rt5033-regulator.c +++ b/drivers/regulator/rt5033-regulator.c @@ -36,6 +36,8 @@ static struct regulator_ops rt5033_buck_ops = { static const struct regulator_desc rt5033_supported_regulators[] = { [RT5033_BUCK] = { .name = "BUCK", + .of_match = of_match_ptr("BUCK"), + .regulators_node = of_match_ptr("regulators"), .id = RT5033_BUCK, .ops = &rt5033_buck_ops, .type = REGULATOR_VOLTAGE, @@ -50,6 +52,8 @@ static const struct regulator_desc rt5033_supported_regulators[] = { }, [RT5033_LDO] = { .name = "LDO", + .of_match = of_match_ptr("LDO"), + .regulators_node = of_match_ptr("regulators"), .id = RT5033_LDO, .ops = &rt5033_buck_ops, .type = REGULATOR_VOLTAGE, @@ -64,6 +68,8 @@ static const struct regulator_desc rt5033_supported_regulators[] = { }, [RT5033_SAFE_LDO] = { .name = "SAFE_LDO", + .of_match = of_match_ptr("SAFE_LDO"), + .regulators_node = of_match_ptr("regulators"), .id = RT5033_SAFE_LDO, .ops = &rt5033_safe_ldo_ops, .type = REGULATOR_VOLTAGE, @@ -81,7 +87,7 @@ static int rt5033_regulator_probe(struct platform_device *pdev) int ret, i; struct regulator_config config = {}; - config.dev = &pdev->dev; + config.dev = rt5033->dev; config.driver_data = rt5033; for (i = 0; i < ARRAY_SIZE(rt5033_supported_regulators); i++) { diff --git a/drivers/regulator/tps65023-regulator.c b/drivers/regulator/tps65023-regulator.c index 7380af8bd50d..b941e564b3f3 100644 --- a/drivers/regulator/tps65023-regulator.c +++ b/drivers/regulator/tps65023-regulator.c @@ -173,7 +173,7 @@ static int tps65023_dcdc_set_voltage_sel(struct regulator_dev *dev, } /* Operations permitted on VDCDCx */ -static struct regulator_ops tps65023_dcdc_ops = { +static const struct regulator_ops tps65023_dcdc_ops = { .is_enabled = regulator_is_enabled_regmap, .enable = regulator_enable_regmap, .disable = regulator_disable_regmap, @@ -184,7 +184,7 @@ static struct regulator_ops tps65023_dcdc_ops = { }; /* Operations permitted on LDOx */ -static struct regulator_ops tps65023_ldo_ops = { +static const struct regulator_ops tps65023_ldo_ops = { .is_enabled = regulator_is_enabled_regmap, .enable = regulator_enable_regmap, .disable = regulator_disable_regmap, @@ -194,7 +194,7 @@ static struct regulator_ops tps65023_ldo_ops = { .map_voltage = regulator_map_voltage_ascend, }; -static struct regmap_config tps65023_regmap_config = { +static const struct regmap_config tps65023_regmap_config = { .reg_bits = 8, .val_bits = 8, }; diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c index f407e3763432..642c77c76b84 100644 --- a/drivers/s390/net/qeth_core_main.c +++ b/drivers/s390/net/qeth_core_main.c @@ -1784,6 +1784,8 @@ static int qeth_idx_activate_get_answer(struct qeth_channel *channel, QETH_DBF_TEXT(SETUP, 2, "idxanswr"); card = CARD_FROM_CDEV(channel->ccwdev); iob = qeth_get_buffer(channel); + if (!iob) + return -ENOMEM; iob->callback = idx_reply_cb; memcpy(&channel->ccw, READ_CCW, sizeof(struct ccw1)); channel->ccw.count = QETH_BUFSIZE; @@ -1834,6 +1836,8 @@ static int qeth_idx_activate_channel(struct qeth_channel *channel, QETH_DBF_TEXT(SETUP, 2, "idxactch"); iob = qeth_get_buffer(channel); + if (!iob) + return -ENOMEM; iob->callback = idx_reply_cb; memcpy(&channel->ccw, WRITE_CCW, sizeof(struct ccw1)); channel->ccw.count = IDX_ACTIVATE_SIZE; @@ -2021,10 +2025,36 @@ void qeth_prepare_control_data(struct qeth_card *card, int len, } EXPORT_SYMBOL_GPL(qeth_prepare_control_data); +/** + * qeth_send_control_data() - send control command to the card + * @card: qeth_card structure pointer + * @len: size of the command buffer + * @iob: qeth_cmd_buffer pointer + * @reply_cb: callback function pointer + * @cb_card: pointer to the qeth_card structure + * @cb_reply: pointer to the qeth_reply structure + * @cb_cmd: pointer to the original iob for non-IPA + * commands, or to the qeth_ipa_cmd structure + * for the IPA commands. + * @reply_param: private pointer passed to the callback + * + * Returns the value of the `return_code' field of the response + * block returned from the hardware, or other error indication. + * Value of zero indicates successful execution of the command. + * + * Callback function gets called one or more times, with cb_cmd + * pointing to the response returned by the hardware. Callback + * function must return non-zero if more reply blocks are expected, + * and zero if the last or only reply block is received. Callback + * function can get the value of the reply_param pointer from the + * field 'param' of the structure qeth_reply. + */ + int qeth_send_control_data(struct qeth_card *card, int len, struct qeth_cmd_buffer *iob, - int (*reply_cb)(struct qeth_card *, struct qeth_reply *, - unsigned long), + int (*reply_cb)(struct qeth_card *cb_card, + struct qeth_reply *cb_reply, + unsigned long cb_cmd), void *reply_param) { int rc; @@ -2914,9 +2944,16 @@ struct qeth_cmd_buffer *qeth_get_ipacmd_buffer(struct qeth_card *card, struct qeth_cmd_buffer *iob; struct qeth_ipa_cmd *cmd; - iob = qeth_wait_for_buffer(&card->write); - cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); - qeth_fill_ipacmd_header(card, cmd, ipacmd, prot); + iob = qeth_get_buffer(&card->write); + if (iob) { + cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); + qeth_fill_ipacmd_header(card, cmd, ipacmd, prot); + } else { + dev_warn(&card->gdev->dev, + "The qeth driver ran out of channel command buffers\n"); + QETH_DBF_MESSAGE(1, "%s The qeth driver ran out of channel command buffers", + dev_name(&card->gdev->dev)); + } return iob; } @@ -2932,6 +2969,12 @@ void qeth_prepare_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob, } EXPORT_SYMBOL_GPL(qeth_prepare_ipa_cmd); +/** + * qeth_send_ipa_cmd() - send an IPA command + * + * See qeth_send_control_data() for explanation of the arguments. + */ + int qeth_send_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob, int (*reply_cb)(struct qeth_card *, struct qeth_reply*, unsigned long), @@ -2968,6 +3011,8 @@ int qeth_send_startlan(struct qeth_card *card) QETH_DBF_TEXT(SETUP, 2, "strtlan"); iob = qeth_get_ipacmd_buffer(card, IPA_CMD_STARTLAN, 0); + if (!iob) + return -ENOMEM; rc = qeth_send_ipa_cmd(card, iob, NULL, NULL); return rc; } @@ -3013,11 +3058,13 @@ static struct qeth_cmd_buffer *qeth_get_adapter_cmd(struct qeth_card *card, iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SETADAPTERPARMS, QETH_PROT_IPV4); - cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); - cmd->data.setadapterparms.hdr.cmdlength = cmdlen; - cmd->data.setadapterparms.hdr.command_code = command; - cmd->data.setadapterparms.hdr.used_total = 1; - cmd->data.setadapterparms.hdr.seq_no = 1; + if (iob) { + cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); + cmd->data.setadapterparms.hdr.cmdlength = cmdlen; + cmd->data.setadapterparms.hdr.command_code = command; + cmd->data.setadapterparms.hdr.used_total = 1; + cmd->data.setadapterparms.hdr.seq_no = 1; + } return iob; } @@ -3030,6 +3077,8 @@ int qeth_query_setadapterparms(struct qeth_card *card) QETH_CARD_TEXT(card, 3, "queryadp"); iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_COMMANDS_SUPPORTED, sizeof(struct qeth_ipacmd_setadpparms)); + if (!iob) + return -ENOMEM; rc = qeth_send_ipa_cmd(card, iob, qeth_query_setadapterparms_cb, NULL); return rc; } @@ -3080,6 +3129,8 @@ int qeth_query_ipassists(struct qeth_card *card, enum qeth_prot_versions prot) QETH_DBF_TEXT_(SETUP, 2, "qipassi%i", prot); iob = qeth_get_ipacmd_buffer(card, IPA_CMD_QIPASSIST, prot); + if (!iob) + return -ENOMEM; rc = qeth_send_ipa_cmd(card, iob, qeth_query_ipassists_cb, NULL); return rc; } @@ -3119,6 +3170,8 @@ int qeth_query_switch_attributes(struct qeth_card *card, return -ENOMEDIUM; iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_SWITCH_ATTRIBUTES, sizeof(struct qeth_ipacmd_setadpparms_hdr)); + if (!iob) + return -ENOMEM; return qeth_send_ipa_cmd(card, iob, qeth_query_switch_attributes_cb, sw_info); } @@ -3146,6 +3199,8 @@ static int qeth_query_setdiagass(struct qeth_card *card) QETH_DBF_TEXT(SETUP, 2, "qdiagass"); iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SET_DIAG_ASS, 0); + if (!iob) + return -ENOMEM; cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); cmd->data.diagass.subcmd_len = 16; cmd->data.diagass.subcmd = QETH_DIAGS_CMD_QUERY; @@ -3197,6 +3252,8 @@ int qeth_hw_trap(struct qeth_card *card, enum qeth_diags_trap_action action) QETH_DBF_TEXT(SETUP, 2, "diagtrap"); iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SET_DIAG_ASS, 0); + if (!iob) + return -ENOMEM; cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); cmd->data.diagass.subcmd_len = 80; cmd->data.diagass.subcmd = QETH_DIAGS_CMD_TRAP; @@ -4162,6 +4219,8 @@ void qeth_setadp_promisc_mode(struct qeth_card *card) iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_PROMISC_MODE, sizeof(struct qeth_ipacmd_setadpparms)); + if (!iob) + return; cmd = (struct qeth_ipa_cmd *)(iob->data + IPA_PDU_HEADER_SIZE); cmd->data.setadapterparms.data.mode = mode; qeth_send_ipa_cmd(card, iob, qeth_setadp_promisc_mode_cb, NULL); @@ -4232,6 +4291,8 @@ int qeth_setadpparms_change_macaddr(struct qeth_card *card) iob = qeth_get_adapter_cmd(card, IPA_SETADP_ALTER_MAC_ADDRESS, sizeof(struct qeth_ipacmd_setadpparms)); + if (!iob) + return -ENOMEM; cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); cmd->data.setadapterparms.data.change_addr.cmd = CHANGE_ADDR_READ_MAC; cmd->data.setadapterparms.data.change_addr.addr_size = OSA_ADDR_LEN; @@ -4345,6 +4406,8 @@ static int qeth_setadpparms_set_access_ctrl(struct qeth_card *card, iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_ACCESS_CONTROL, sizeof(struct qeth_ipacmd_setadpparms_hdr) + sizeof(struct qeth_set_access_ctrl)); + if (!iob) + return -ENOMEM; cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); access_ctrl_req = &cmd->data.setadapterparms.data.set_access_ctrl; access_ctrl_req->subcmd_code = isolation; @@ -4588,6 +4651,10 @@ int qeth_snmp_command(struct qeth_card *card, char __user *udata) iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_SNMP_CONTROL, QETH_SNMP_SETADP_CMDLENGTH + req_len); + if (!iob) { + rc = -ENOMEM; + goto out; + } cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); memcpy(&cmd->data.setadapterparms.data.snmp, &ureq->cmd, req_len); rc = qeth_send_ipa_snmp_cmd(card, iob, QETH_SETADP_BASE_LEN + req_len, @@ -4599,7 +4666,7 @@ int qeth_snmp_command(struct qeth_card *card, char __user *udata) if (copy_to_user(udata, qinfo.udata, qinfo.udata_len)) rc = -EFAULT; } - +out: kfree(ureq); kfree(qinfo.udata); return rc; @@ -4670,6 +4737,10 @@ int qeth_query_oat_command(struct qeth_card *card, char __user *udata) iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_OAT, sizeof(struct qeth_ipacmd_setadpparms_hdr) + sizeof(struct qeth_query_oat)); + if (!iob) { + rc = -ENOMEM; + goto out_free; + } cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); oat_req = &cmd->data.setadapterparms.data.query_oat; oat_req->subcmd_code = oat_data.command; @@ -4735,6 +4806,8 @@ static int qeth_query_card_info(struct qeth_card *card, return -EOPNOTSUPP; iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_CARD_INFO, sizeof(struct qeth_ipacmd_setadpparms_hdr)); + if (!iob) + return -ENOMEM; return qeth_send_ipa_cmd(card, iob, qeth_query_card_info_cb, (void *)carrier_info); } @@ -5060,11 +5133,23 @@ retriable: card->options.adp.supported_funcs = 0; card->options.sbp.supported_funcs = 0; card->info.diagass_support = 0; - qeth_query_ipassists(card, QETH_PROT_IPV4); - if (qeth_is_supported(card, IPA_SETADAPTERPARMS)) - qeth_query_setadapterparms(card); - if (qeth_adp_supported(card, IPA_SETADP_SET_DIAG_ASSIST)) - qeth_query_setdiagass(card); + rc = qeth_query_ipassists(card, QETH_PROT_IPV4); + if (rc == -ENOMEM) + goto out; + if (qeth_is_supported(card, IPA_SETADAPTERPARMS)) { + rc = qeth_query_setadapterparms(card); + if (rc < 0) { + QETH_DBF_TEXT_(SETUP, 2, "6err%d", rc); + goto out; + } + } + if (qeth_adp_supported(card, IPA_SETADP_SET_DIAG_ASSIST)) { + rc = qeth_query_setdiagass(card); + if (rc < 0) { + QETH_DBF_TEXT_(SETUP, 2, "7err%d", rc); + goto out; + } + } return 0; out: dev_warn(&card->gdev->dev, "The qeth device driver failed to recover " diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c index d02cd1a67943..ce87ae72edbd 100644 --- a/drivers/s390/net/qeth_l2_main.c +++ b/drivers/s390/net/qeth_l2_main.c @@ -27,10 +27,7 @@ static int qeth_l2_set_offline(struct ccwgroup_device *); static int qeth_l2_stop(struct net_device *); static int qeth_l2_send_delmac(struct qeth_card *, __u8 *); static int qeth_l2_send_setdelmac(struct qeth_card *, __u8 *, - enum qeth_ipa_cmds, - int (*reply_cb) (struct qeth_card *, - struct qeth_reply*, - unsigned long)); + enum qeth_ipa_cmds); static void qeth_l2_set_multicast_list(struct net_device *); static int qeth_l2_recover(void *); static void qeth_bridgeport_query_support(struct qeth_card *card); @@ -130,56 +127,71 @@ static struct net_device *qeth_l2_netdev_by_devno(unsigned char *read_dev_no) return ndev; } -static int qeth_l2_send_setgroupmac_cb(struct qeth_card *card, - struct qeth_reply *reply, - unsigned long data) +static int qeth_setdel_makerc(struct qeth_card *card, int retcode) { - struct qeth_ipa_cmd *cmd; - __u8 *mac; + int rc; - QETH_CARD_TEXT(card, 2, "L2Sgmacb"); - cmd = (struct qeth_ipa_cmd *) data; - mac = &cmd->data.setdelmac.mac[0]; - /* MAC already registered, needed in couple/uncouple case */ - if (cmd->hdr.return_code == IPA_RC_L2_DUP_MAC) { - QETH_DBF_MESSAGE(2, "Group MAC %pM already existing on %s \n", - mac, QETH_CARD_IFNAME(card)); - cmd->hdr.return_code = 0; + if (retcode) + QETH_CARD_TEXT_(card, 2, "err%04x", retcode); + switch (retcode) { + case IPA_RC_SUCCESS: + rc = 0; + break; + case IPA_RC_L2_UNSUPPORTED_CMD: + rc = -ENOSYS; + break; + case IPA_RC_L2_ADDR_TABLE_FULL: + rc = -ENOSPC; + break; + case IPA_RC_L2_DUP_MAC: + case IPA_RC_L2_DUP_LAYER3_MAC: + rc = -EEXIST; + break; + case IPA_RC_L2_MAC_NOT_AUTH_BY_HYP: + case IPA_RC_L2_MAC_NOT_AUTH_BY_ADP: + rc = -EPERM; + break; + case IPA_RC_L2_MAC_NOT_FOUND: + rc = -ENOENT; + break; + case -ENOMEM: + rc = -ENOMEM; + break; + default: + rc = -EIO; + break; } - if (cmd->hdr.return_code) - QETH_DBF_MESSAGE(2, "Could not set group MAC %pM on %s: %x\n", - mac, QETH_CARD_IFNAME(card), cmd->hdr.return_code); - return 0; + return rc; } static int qeth_l2_send_setgroupmac(struct qeth_card *card, __u8 *mac) { - QETH_CARD_TEXT(card, 2, "L2Sgmac"); - return qeth_l2_send_setdelmac(card, mac, IPA_CMD_SETGMAC, - qeth_l2_send_setgroupmac_cb); -} - -static int qeth_l2_send_delgroupmac_cb(struct qeth_card *card, - struct qeth_reply *reply, - unsigned long data) -{ - struct qeth_ipa_cmd *cmd; - __u8 *mac; + int rc; - QETH_CARD_TEXT(card, 2, "L2Dgmacb"); - cmd = (struct qeth_ipa_cmd *) data; - mac = &cmd->data.setdelmac.mac[0]; - if (cmd->hdr.return_code) - QETH_DBF_MESSAGE(2, "Could not delete group MAC %pM on %s: %x\n", - mac, QETH_CARD_IFNAME(card), cmd->hdr.return_code); - return 0; + QETH_CARD_TEXT(card, 2, "L2Sgmac"); + rc = qeth_setdel_makerc(card, qeth_l2_send_setdelmac(card, mac, + IPA_CMD_SETGMAC)); + if (rc == -EEXIST) + QETH_DBF_MESSAGE(2, "Group MAC %pM already existing on %s\n", + mac, QETH_CARD_IFNAME(card)); + else if (rc) + QETH_DBF_MESSAGE(2, "Could not set group MAC %pM on %s: %d\n", + mac, QETH_CARD_IFNAME(card), rc); + return rc; } static int qeth_l2_send_delgroupmac(struct qeth_card *card, __u8 *mac) { + int rc; + QETH_CARD_TEXT(card, 2, "L2Dgmac"); - return qeth_l2_send_setdelmac(card, mac, IPA_CMD_DELGMAC, - qeth_l2_send_delgroupmac_cb); + rc = qeth_setdel_makerc(card, qeth_l2_send_setdelmac(card, mac, + IPA_CMD_DELGMAC)); + if (rc) + QETH_DBF_MESSAGE(2, + "Could not delete group MAC %pM on %s: %d\n", + mac, QETH_CARD_IFNAME(card), rc); + return rc; } static void qeth_l2_add_mc(struct qeth_card *card, __u8 *mac, int vmac) @@ -197,10 +209,11 @@ static void qeth_l2_add_mc(struct qeth_card *card, __u8 *mac, int vmac) mc->is_vmac = vmac; if (vmac) { - rc = qeth_l2_send_setdelmac(card, mac, IPA_CMD_SETVMAC, - NULL); + rc = qeth_setdel_makerc(card, + qeth_l2_send_setdelmac(card, mac, IPA_CMD_SETVMAC)); } else { - rc = qeth_l2_send_setgroupmac(card, mac); + rc = qeth_setdel_makerc(card, + qeth_l2_send_setgroupmac(card, mac)); } if (!rc) @@ -218,7 +231,7 @@ static void qeth_l2_del_all_mc(struct qeth_card *card, int del) if (del) { if (mc->is_vmac) qeth_l2_send_setdelmac(card, mc->mc_addr, - IPA_CMD_DELVMAC, NULL); + IPA_CMD_DELVMAC); else qeth_l2_send_delgroupmac(card, mc->mc_addr); } @@ -291,6 +304,8 @@ static int qeth_l2_send_setdelvlan(struct qeth_card *card, __u16 i, QETH_CARD_TEXT_(card, 4, "L2sdv%x", ipacmd); iob = qeth_get_ipacmd_buffer(card, ipacmd, QETH_PROT_IPV4); + if (!iob) + return -ENOMEM; cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); cmd->data.setdelvlan.vlan_id = i; return qeth_send_ipa_cmd(card, iob, @@ -313,6 +328,7 @@ static int qeth_l2_vlan_rx_add_vid(struct net_device *dev, { struct qeth_card *card = dev->ml_priv; struct qeth_vlan_vid *id; + int rc; QETH_CARD_TEXT_(card, 4, "aid:%d", vid); if (!vid) @@ -328,7 +344,11 @@ static int qeth_l2_vlan_rx_add_vid(struct net_device *dev, id = kmalloc(sizeof(struct qeth_vlan_vid), GFP_ATOMIC); if (id) { id->vid = vid; - qeth_l2_send_setdelvlan(card, vid, IPA_CMD_SETVLAN); + rc = qeth_l2_send_setdelvlan(card, vid, IPA_CMD_SETVLAN); + if (rc) { + kfree(id); + return rc; + } spin_lock_bh(&card->vlanlock); list_add_tail(&id->list, &card->vid_list); spin_unlock_bh(&card->vlanlock); @@ -343,6 +363,7 @@ static int qeth_l2_vlan_rx_kill_vid(struct net_device *dev, { struct qeth_vlan_vid *id, *tmpid = NULL; struct qeth_card *card = dev->ml_priv; + int rc = 0; QETH_CARD_TEXT_(card, 4, "kid:%d", vid); if (card->info.type == QETH_CARD_TYPE_OSM) { @@ -363,11 +384,11 @@ static int qeth_l2_vlan_rx_kill_vid(struct net_device *dev, } spin_unlock_bh(&card->vlanlock); if (tmpid) { - qeth_l2_send_setdelvlan(card, vid, IPA_CMD_DELVLAN); + rc = qeth_l2_send_setdelvlan(card, vid, IPA_CMD_DELVLAN); kfree(tmpid); } qeth_l2_set_multicast_list(card->dev); - return 0; + return rc; } static int qeth_l2_stop_card(struct qeth_card *card, int recovery_mode) @@ -539,91 +560,62 @@ out: } static int qeth_l2_send_setdelmac(struct qeth_card *card, __u8 *mac, - enum qeth_ipa_cmds ipacmd, - int (*reply_cb) (struct qeth_card *, - struct qeth_reply*, - unsigned long)) + enum qeth_ipa_cmds ipacmd) { struct qeth_ipa_cmd *cmd; struct qeth_cmd_buffer *iob; QETH_CARD_TEXT(card, 2, "L2sdmac"); iob = qeth_get_ipacmd_buffer(card, ipacmd, QETH_PROT_IPV4); + if (!iob) + return -ENOMEM; cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); cmd->data.setdelmac.mac_length = OSA_ADDR_LEN; memcpy(&cmd->data.setdelmac.mac, mac, OSA_ADDR_LEN); - return qeth_send_ipa_cmd(card, iob, reply_cb, NULL); + return qeth_send_ipa_cmd(card, iob, NULL, NULL); } -static int qeth_l2_send_setmac_cb(struct qeth_card *card, - struct qeth_reply *reply, - unsigned long data) +static int qeth_l2_send_setmac(struct qeth_card *card, __u8 *mac) { - struct qeth_ipa_cmd *cmd; + int rc; - QETH_CARD_TEXT(card, 2, "L2Smaccb"); - cmd = (struct qeth_ipa_cmd *) data; - if (cmd->hdr.return_code) { - QETH_CARD_TEXT_(card, 2, "L2er%x", cmd->hdr.return_code); + QETH_CARD_TEXT(card, 2, "L2Setmac"); + rc = qeth_setdel_makerc(card, qeth_l2_send_setdelmac(card, mac, + IPA_CMD_SETVMAC)); + if (rc == 0) { + card->info.mac_bits |= QETH_LAYER2_MAC_REGISTERED; + memcpy(card->dev->dev_addr, mac, OSA_ADDR_LEN); + dev_info(&card->gdev->dev, + "MAC address %pM successfully registered on device %s\n", + card->dev->dev_addr, card->dev->name); + } else { card->info.mac_bits &= ~QETH_LAYER2_MAC_REGISTERED; - switch (cmd->hdr.return_code) { - case IPA_RC_L2_DUP_MAC: - case IPA_RC_L2_DUP_LAYER3_MAC: + switch (rc) { + case -EEXIST: dev_warn(&card->gdev->dev, - "MAC address %pM already exists\n", - cmd->data.setdelmac.mac); + "MAC address %pM already exists\n", mac); break; - case IPA_RC_L2_MAC_NOT_AUTH_BY_HYP: - case IPA_RC_L2_MAC_NOT_AUTH_BY_ADP: + case -EPERM: dev_warn(&card->gdev->dev, - "MAC address %pM is not authorized\n", - cmd->data.setdelmac.mac); - break; - default: + "MAC address %pM is not authorized\n", mac); break; } - } else { - card->info.mac_bits |= QETH_LAYER2_MAC_REGISTERED; - memcpy(card->dev->dev_addr, cmd->data.setdelmac.mac, - OSA_ADDR_LEN); - dev_info(&card->gdev->dev, - "MAC address %pM successfully registered on device %s\n", - card->dev->dev_addr, card->dev->name); - } - return 0; -} - -static int qeth_l2_send_setmac(struct qeth_card *card, __u8 *mac) -{ - QETH_CARD_TEXT(card, 2, "L2Setmac"); - return qeth_l2_send_setdelmac(card, mac, IPA_CMD_SETVMAC, - qeth_l2_send_setmac_cb); -} - -static int qeth_l2_send_delmac_cb(struct qeth_card *card, - struct qeth_reply *reply, - unsigned long data) -{ - struct qeth_ipa_cmd *cmd; - - QETH_CARD_TEXT(card, 2, "L2Dmaccb"); - cmd = (struct qeth_ipa_cmd *) data; - if (cmd->hdr.return_code) { - QETH_CARD_TEXT_(card, 2, "err%d", cmd->hdr.return_code); - return 0; } - card->info.mac_bits &= ~QETH_LAYER2_MAC_REGISTERED; - - return 0; + return rc; } static int qeth_l2_send_delmac(struct qeth_card *card, __u8 *mac) { + int rc; + QETH_CARD_TEXT(card, 2, "L2Delmac"); if (!(card->info.mac_bits & QETH_LAYER2_MAC_REGISTERED)) return 0; - return qeth_l2_send_setdelmac(card, mac, IPA_CMD_DELVMAC, - qeth_l2_send_delmac_cb); + rc = qeth_setdel_makerc(card, qeth_l2_send_setdelmac(card, mac, + IPA_CMD_DELVMAC)); + if (rc == 0) + card->info.mac_bits &= ~QETH_LAYER2_MAC_REGISTERED; + return rc; } static int qeth_l2_request_initial_mac(struct qeth_card *card) @@ -651,7 +643,7 @@ static int qeth_l2_request_initial_mac(struct qeth_card *card) if (rc) { QETH_DBF_MESSAGE(2, "couldn't get MAC address on " "device %s: x%x\n", CARD_BUS_ID(card), rc); - QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc); + QETH_DBF_TEXT_(SETUP, 2, "1err%04x", rc); return rc; } QETH_DBF_HEX(SETUP, 2, card->dev->dev_addr, OSA_ADDR_LEN); @@ -687,7 +679,7 @@ static int qeth_l2_set_mac_address(struct net_device *dev, void *p) return -ERESTARTSYS; } rc = qeth_l2_send_delmac(card, &card->dev->dev_addr[0]); - if (!rc || (rc == IPA_RC_L2_MAC_NOT_FOUND)) + if (!rc || (rc == -ENOENT)) rc = qeth_l2_send_setmac(card, addr->sa_data); return rc ? -EINVAL : 0; } @@ -996,7 +988,7 @@ static int __qeth_l2_set_online(struct ccwgroup_device *gdev, int recovery_mode) recover_flag = card->state; rc = qeth_core_hardsetup_card(card); if (rc) { - QETH_DBF_TEXT_(SETUP, 2, "2err%d", rc); + QETH_DBF_TEXT_(SETUP, 2, "2err%04x", rc); rc = -ENODEV; goto out_remove; } @@ -1730,6 +1722,8 @@ static void qeth_bridgeport_query_support(struct qeth_card *card) QETH_CARD_TEXT(card, 2, "brqsuppo"); iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SETBRIDGEPORT, 0); + if (!iob) + return; cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); cmd->data.sbp.hdr.cmdlength = sizeof(struct qeth_ipacmd_sbp_hdr) + @@ -1805,6 +1799,8 @@ int qeth_bridgeport_query_ports(struct qeth_card *card, if (!(card->options.sbp.supported_funcs & IPA_SBP_QUERY_BRIDGE_PORTS)) return -EOPNOTSUPP; iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SETBRIDGEPORT, 0); + if (!iob) + return -ENOMEM; cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); cmd->data.sbp.hdr.cmdlength = sizeof(struct qeth_ipacmd_sbp_hdr); @@ -1817,9 +1813,7 @@ int qeth_bridgeport_query_ports(struct qeth_card *card, if (rc) return rc; rc = qeth_bridgeport_makerc(card, &cbctl, IPA_SBP_QUERY_BRIDGE_PORTS); - if (rc) - return rc; - return 0; + return rc; } EXPORT_SYMBOL_GPL(qeth_bridgeport_query_ports); @@ -1873,6 +1867,8 @@ int qeth_bridgeport_setrole(struct qeth_card *card, enum qeth_sbp_roles role) if (!(card->options.sbp.supported_funcs & setcmd)) return -EOPNOTSUPP; iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SETBRIDGEPORT, 0); + if (!iob) + return -ENOMEM; cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); cmd->data.sbp.hdr.cmdlength = cmdlength; cmd->data.sbp.hdr.command_code = setcmd; diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c index 625227ad16ee..e2a0ee845399 100644 --- a/drivers/s390/net/qeth_l3_main.c +++ b/drivers/s390/net/qeth_l3_main.c @@ -549,6 +549,8 @@ static int qeth_l3_send_setdelmc(struct qeth_card *card, QETH_CARD_TEXT(card, 4, "setdelmc"); iob = qeth_get_ipacmd_buffer(card, ipacmd, addr->proto); + if (!iob) + return -ENOMEM; cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); memcpy(&cmd->data.setdelipm.mac, addr->mac, OSA_ADDR_LEN); if (addr->proto == QETH_PROT_IPV6) @@ -588,6 +590,8 @@ static int qeth_l3_send_setdelip(struct qeth_card *card, QETH_CARD_TEXT_(card, 4, "flags%02X", flags); iob = qeth_get_ipacmd_buffer(card, ipacmd, addr->proto); + if (!iob) + return -ENOMEM; cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); if (addr->proto == QETH_PROT_IPV6) { memcpy(cmd->data.setdelip6.ip_addr, &addr->u.a6.addr, @@ -616,6 +620,8 @@ static int qeth_l3_send_setrouting(struct qeth_card *card, QETH_CARD_TEXT(card, 4, "setroutg"); iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SETRTG, prot); + if (!iob) + return -ENOMEM; cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); cmd->data.setrtg.type = (type); rc = qeth_send_ipa_cmd(card, iob, NULL, NULL); @@ -1049,12 +1055,14 @@ static struct qeth_cmd_buffer *qeth_l3_get_setassparms_cmd( QETH_CARD_TEXT(card, 4, "getasscm"); iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SETASSPARMS, prot); - cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); - cmd->data.setassparms.hdr.assist_no = ipa_func; - cmd->data.setassparms.hdr.length = 8 + len; - cmd->data.setassparms.hdr.command_code = cmd_code; - cmd->data.setassparms.hdr.return_code = 0; - cmd->data.setassparms.hdr.seq_no = 0; + if (iob) { + cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); + cmd->data.setassparms.hdr.assist_no = ipa_func; + cmd->data.setassparms.hdr.length = 8 + len; + cmd->data.setassparms.hdr.command_code = cmd_code; + cmd->data.setassparms.hdr.return_code = 0; + cmd->data.setassparms.hdr.seq_no = 0; + } return iob; } @@ -1090,6 +1098,8 @@ static int qeth_l3_send_simple_setassparms_ipv6(struct qeth_card *card, QETH_CARD_TEXT(card, 4, "simassp6"); iob = qeth_l3_get_setassparms_cmd(card, ipa_func, cmd_code, 0, QETH_PROT_IPV6); + if (!iob) + return -ENOMEM; rc = qeth_l3_send_setassparms(card, iob, 0, 0, qeth_l3_default_setassparms_cb, NULL); return rc; @@ -1108,6 +1118,8 @@ static int qeth_l3_send_simple_setassparms(struct qeth_card *card, length = sizeof(__u32); iob = qeth_l3_get_setassparms_cmd(card, ipa_func, cmd_code, length, QETH_PROT_IPV4); + if (!iob) + return -ENOMEM; rc = qeth_l3_send_setassparms(card, iob, length, data, qeth_l3_default_setassparms_cb, NULL); return rc; @@ -1494,6 +1506,8 @@ static int qeth_l3_iqd_read_initial_mac(struct qeth_card *card) iob = qeth_get_ipacmd_buffer(card, IPA_CMD_CREATE_ADDR, QETH_PROT_IPV6); + if (!iob) + return -ENOMEM; cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); *((__u16 *) &cmd->data.create_destroy_addr.unique_id[6]) = card->info.unique_id; @@ -1537,6 +1551,8 @@ static int qeth_l3_get_unique_id(struct qeth_card *card) iob = qeth_get_ipacmd_buffer(card, IPA_CMD_CREATE_ADDR, QETH_PROT_IPV6); + if (!iob) + return -ENOMEM; cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); *((__u16 *) &cmd->data.create_destroy_addr.unique_id[6]) = card->info.unique_id; @@ -1611,6 +1627,8 @@ qeth_diags_trace(struct qeth_card *card, enum qeth_diags_trace_cmds diags_cmd) QETH_DBF_TEXT(SETUP, 2, "diagtrac"); iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SET_DIAG_ASS, 0); + if (!iob) + return -ENOMEM; cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); cmd->data.diagass.subcmd_len = 16; cmd->data.diagass.subcmd = QETH_DIAGS_CMD_TRACE; @@ -2442,6 +2460,8 @@ static int qeth_l3_query_arp_cache_info(struct qeth_card *card, IPA_CMD_ASS_ARP_QUERY_INFO, sizeof(struct qeth_arp_query_data) - sizeof(char), prot); + if (!iob) + return -ENOMEM; cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); cmd->data.setassparms.data.query_arp.request_bits = 0x000F; cmd->data.setassparms.data.query_arp.reply_bits = 0; @@ -2535,6 +2555,8 @@ static int qeth_l3_arp_add_entry(struct qeth_card *card, IPA_CMD_ASS_ARP_ADD_ENTRY, sizeof(struct qeth_arp_cache_entry), QETH_PROT_IPV4); + if (!iob) + return -ENOMEM; rc = qeth_l3_send_setassparms(card, iob, sizeof(struct qeth_arp_cache_entry), (unsigned long) entry, @@ -2574,6 +2596,8 @@ static int qeth_l3_arp_remove_entry(struct qeth_card *card, IPA_CMD_ASS_ARP_REMOVE_ENTRY, 12, QETH_PROT_IPV4); + if (!iob) + return -ENOMEM; rc = qeth_l3_send_setassparms(card, iob, 12, (unsigned long)buf, qeth_l3_default_setassparms_cb, NULL); @@ -3262,6 +3286,8 @@ static const struct net_device_ops qeth_l3_osa_netdev_ops = { static int qeth_l3_setup_netdev(struct qeth_card *card) { + int rc; + if (card->info.type == QETH_CARD_TYPE_OSD || card->info.type == QETH_CARD_TYPE_OSX) { if ((card->info.link_type == QETH_LINK_TYPE_LANE_TR) || @@ -3293,7 +3319,9 @@ static int qeth_l3_setup_netdev(struct qeth_card *card) return -ENODEV; card->dev->flags |= IFF_NOARP; card->dev->netdev_ops = &qeth_l3_netdev_ops; - qeth_l3_iqd_read_initial_mac(card); + rc = qeth_l3_iqd_read_initial_mac(card); + if (rc) + return rc; if (card->options.hsuid[0]) memcpy(card->dev->perm_addr, card->options.hsuid, 9); } else @@ -3360,7 +3388,7 @@ static int __qeth_l3_set_online(struct ccwgroup_device *gdev, int recovery_mode) recover_flag = card->state; rc = qeth_core_hardsetup_card(card); if (rc) { - QETH_DBF_TEXT_(SETUP, 2, "2err%d", rc); + QETH_DBF_TEXT_(SETUP, 2, "2err%04x", rc); rc = -ENODEV; goto out_remove; } @@ -3401,7 +3429,7 @@ static int __qeth_l3_set_online(struct ccwgroup_device *gdev, int recovery_mode) contin: rc = qeth_l3_setadapter_parms(card); if (rc) - QETH_DBF_TEXT_(SETUP, 2, "2err%d", rc); + QETH_DBF_TEXT_(SETUP, 2, "2err%04x", rc); if (!card->options.sniffer) { rc = qeth_l3_start_ipassists(card); if (rc) { @@ -3410,10 +3438,10 @@ contin: } rc = qeth_l3_setrouting_v4(card); if (rc) - QETH_DBF_TEXT_(SETUP, 2, "4err%d", rc); + QETH_DBF_TEXT_(SETUP, 2, "4err%04x", rc); rc = qeth_l3_setrouting_v6(card); if (rc) - QETH_DBF_TEXT_(SETUP, 2, "5err%d", rc); + QETH_DBF_TEXT_(SETUP, 2, "5err%04x", rc); } netif_tx_disable(card->dev); diff --git a/drivers/scsi/device_handler/scsi_dh.c b/drivers/scsi/device_handler/scsi_dh.c index 1dba62c5cf6a..1efebc9eedfb 100644 --- a/drivers/scsi/device_handler/scsi_dh.c +++ b/drivers/scsi/device_handler/scsi_dh.c @@ -136,11 +136,12 @@ static void __detach_handler (struct kref *kref) struct scsi_device_handler *scsi_dh = scsi_dh_data->scsi_dh; struct scsi_device *sdev = scsi_dh_data->sdev; + scsi_dh->detach(sdev); + spin_lock_irq(sdev->request_queue->queue_lock); sdev->scsi_dh_data = NULL; spin_unlock_irq(sdev->request_queue->queue_lock); - scsi_dh->detach(sdev); sdev_printk(KERN_NOTICE, sdev, "%s: Detached\n", scsi_dh->name); module_put(scsi_dh->module); } diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c index e02885451425..9b3829931f40 100644 --- a/drivers/scsi/scsi.c +++ b/drivers/scsi/scsi.c @@ -986,9 +986,9 @@ int scsi_device_get(struct scsi_device *sdev) return -ENXIO; if (!get_device(&sdev->sdev_gendev)) return -ENXIO; - /* We can fail this if we're doing SCSI operations + /* We can fail try_module_get if we're doing SCSI operations * from module exit (like cache flush) */ - try_module_get(sdev->host->hostt->module); + __module_get(sdev->host->hostt->module); return 0; } @@ -1004,14 +1004,7 @@ EXPORT_SYMBOL(scsi_device_get); */ void scsi_device_put(struct scsi_device *sdev) { -#ifdef CONFIG_MODULE_UNLOAD - struct module *module = sdev->host->hostt->module; - - /* The module refcount will be zero if scsi_device_get() - * was called from a module removal routine */ - if (module && module_refcount(module) != 0) - module_put(module); -#endif + module_put(sdev->host->hostt->module); put_device(&sdev->sdev_gendev); } EXPORT_SYMBOL(scsi_device_put); diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c index 399516925d80..05ea0d49a3a3 100644 --- a/drivers/scsi/sd.c +++ b/drivers/scsi/sd.c @@ -2800,9 +2800,11 @@ static int sd_revalidate_disk(struct gendisk *disk) */ sd_set_flush_flag(sdkp); - max_xfer = min_not_zero(queue_max_hw_sectors(sdkp->disk->queue), - sdkp->max_xfer_blocks); + max_xfer = sdkp->max_xfer_blocks; max_xfer <<= ilog2(sdp->sector_size) - 9; + + max_xfer = min_not_zero(queue_max_hw_sectors(sdkp->disk->queue), + max_xfer); blk_queue_max_hw_sectors(sdkp->disk->queue, max_xfer); set_capacity(disk, sdkp->capacity); sd_config_write_same(sdkp); diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig index 99829985c1a1..95ccedabba4f 100644 --- a/drivers/spi/Kconfig +++ b/drivers/spi/Kconfig @@ -185,6 +185,16 @@ config SPI_DAVINCI help SPI master controller for DaVinci/DA8x/OMAP-L/AM1x SPI modules. +config SPI_DLN2 + tristate "Diolan DLN-2 USB SPI adapter" + depends on MFD_DLN2 + help + If you say yes to this option, support will be included for Diolan + DLN2, a USB to SPI interface. + + This driver can also be built as a module. If so, the module + will be called spi-dln2. + config SPI_EFM32 tristate "EFM32 SPI controller" depends on OF && ARM && (ARCH_EFM32 || COMPILE_TEST) @@ -279,7 +289,7 @@ config SPI_FSL_CPM depends on FSL_SOC config SPI_FSL_SPI - bool "Freescale SPI controller and Aeroflex Gaisler GRLIB SPI controller" + tristate "Freescale SPI controller and Aeroflex Gaisler GRLIB SPI controller" depends on OF select SPI_FSL_LIB select SPI_FSL_CPM if FSL_SOC @@ -292,7 +302,6 @@ config SPI_FSL_SPI config SPI_FSL_DSPI tristate "Freescale DSPI controller" - select SPI_BITBANG select REGMAP_MMIO depends on SOC_VF610 || COMPILE_TEST help @@ -300,7 +309,7 @@ config SPI_FSL_DSPI mode. VF610 platform uses the controller. config SPI_FSL_ESPI - bool "Freescale eSPI controller" + tristate "Freescale eSPI controller" depends on FSL_SOC select SPI_FSL_LIB help @@ -460,7 +469,6 @@ config SPI_S3C24XX_FIQ config SPI_S3C64XX tristate "Samsung S3C64XX series type SPI" depends on (PLAT_SAMSUNG || ARCH_EXYNOS) - select S3C64XX_PL080 if ARCH_S3C64XX help SPI driver for Samsung S3C64XX and newer SoCs. @@ -503,6 +511,13 @@ config SPI_SIRF help SPI driver for CSR SiRFprimaII SoCs +config SPI_ST_SSC4 + tristate "STMicroelectronics SPI SSC-based driver" + depends on ARCH_STI + help + STMicroelectronics SoCs support for SPI. If you say yes to + this option, support will be included for the SSC driven SPI. + config SPI_SUN4I tristate "Allwinner A10 SoCs SPI controller" depends on ARCH_SUNXI || COMPILE_TEST @@ -595,7 +610,6 @@ config SPI_XTENSA_XTFPGA 16 bit words in SPI mode 0, automatically asserting CS on transfer start and deasserting on end. - config SPI_NUC900 tristate "Nuvoton NUC900 series SPI" depends on ARCH_W90X900 diff --git a/drivers/spi/Makefile b/drivers/spi/Makefile index 6b9d2ac629cc..d8cbf654976b 100644 --- a/drivers/spi/Makefile +++ b/drivers/spi/Makefile @@ -27,6 +27,7 @@ obj-$(CONFIG_SPI_CADENCE) += spi-cadence.o obj-$(CONFIG_SPI_CLPS711X) += spi-clps711x.o obj-$(CONFIG_SPI_COLDFIRE_QSPI) += spi-coldfire-qspi.o obj-$(CONFIG_SPI_DAVINCI) += spi-davinci.o +obj-$(CONFIG_SPI_DLN2) += spi-dln2.o obj-$(CONFIG_SPI_DESIGNWARE) += spi-dw.o obj-$(CONFIG_SPI_DW_MMIO) += spi-dw-mmio.o obj-$(CONFIG_SPI_DW_PCI) += spi-dw-midpci.o @@ -76,6 +77,7 @@ obj-$(CONFIG_SPI_SH_HSPI) += spi-sh-hspi.o obj-$(CONFIG_SPI_SH_MSIOF) += spi-sh-msiof.o obj-$(CONFIG_SPI_SH_SCI) += spi-sh-sci.o obj-$(CONFIG_SPI_SIRF) += spi-sirf.o +obj-$(CONFIG_SPI_ST_SSC4) += spi-st-ssc4.o obj-$(CONFIG_SPI_SUN4I) += spi-sun4i.o obj-$(CONFIG_SPI_SUN6I) += spi-sun6i.o obj-$(CONFIG_SPI_TEGRA114) += spi-tegra114.o diff --git a/drivers/spi/spi-atmel.c b/drivers/spi/spi-atmel.c index 23d8f5f56579..9af7841f2e8c 100644 --- a/drivers/spi/spi-atmel.c +++ b/drivers/spi/spi-atmel.c @@ -1046,6 +1046,7 @@ static int atmel_spi_one_transfer(struct spi_master *master, struct atmel_spi_device *asd; int timeout; int ret; + unsigned long dma_timeout; as = spi_master_get_devdata(master); @@ -1103,15 +1104,12 @@ static int atmel_spi_one_transfer(struct spi_master *master, /* interrupts are disabled, so free the lock for schedule */ atmel_spi_unlock(as); - ret = wait_for_completion_timeout(&as->xfer_completion, - SPI_DMA_TIMEOUT); + dma_timeout = wait_for_completion_timeout(&as->xfer_completion, + SPI_DMA_TIMEOUT); atmel_spi_lock(as); - if (WARN_ON(ret == 0)) { - dev_err(&spi->dev, - "spi trasfer timeout, err %d\n", ret); + if (WARN_ON(dma_timeout == 0)) { + dev_err(&spi->dev, "spi transfer timeout\n"); as->done_status = -EIO; - } else { - ret = 0; } if (as->done_status) diff --git a/drivers/spi/spi-au1550.c b/drivers/spi/spi-au1550.c index 326f47973684..f45e085c01a6 100644 --- a/drivers/spi/spi-au1550.c +++ b/drivers/spi/spi-au1550.c @@ -15,10 +15,6 @@ * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/init.h> diff --git a/drivers/spi/spi-bcm2835.c b/drivers/spi/spi-bcm2835.c index 98aab457b24d..419a782ab6d5 100644 --- a/drivers/spi/spi-bcm2835.c +++ b/drivers/spi/spi-bcm2835.c @@ -17,10 +17,6 @@ * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ #include <linux/clk.h> diff --git a/drivers/spi/spi-bcm63xx.c b/drivers/spi/spi-bcm63xx.c index c20530982e26..e73e2b052c9c 100644 --- a/drivers/spi/spi-bcm63xx.c +++ b/drivers/spi/spi-bcm63xx.c @@ -13,10 +13,6 @@ * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the - * Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, */ #include <linux/kernel.h> diff --git a/drivers/spi/spi-bitbang.c b/drivers/spi/spi-bitbang.c index dc7d2c2d643e..5ef6638d5e8a 100644 --- a/drivers/spi/spi-bitbang.c +++ b/drivers/spi/spi-bitbang.c @@ -10,10 +10,6 @@ * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/spinlock.h> diff --git a/drivers/spi/spi-butterfly.c b/drivers/spi/spi-butterfly.c index ee4f91ccd8fd..9a95862986c8 100644 --- a/drivers/spi/spi-butterfly.c +++ b/drivers/spi/spi-butterfly.c @@ -12,10 +12,6 @@ * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/kernel.h> #include <linux/init.h> diff --git a/drivers/spi/spi-coldfire-qspi.c b/drivers/spi/spi-coldfire-qspi.c index 41b5dc4445f6..688956ff5095 100644 --- a/drivers/spi/spi-coldfire-qspi.c +++ b/drivers/spi/spi-coldfire-qspi.c @@ -12,11 +12,6 @@ * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA - * */ #include <linux/kernel.h> diff --git a/drivers/spi/spi-davinci.c b/drivers/spi/spi-davinci.c index b3707badb1e5..5e991065f5b0 100644 --- a/drivers/spi/spi-davinci.c +++ b/drivers/spi/spi-davinci.c @@ -11,10 +11,6 @@ * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/interrupt.h> diff --git a/drivers/spi/spi-dln2.c b/drivers/spi/spi-dln2.c new file mode 100644 index 000000000000..3b7d91d94fea --- /dev/null +++ b/drivers/spi/spi-dln2.c @@ -0,0 +1,881 @@ +/* + * Driver for the Diolan DLN-2 USB-SPI adapter + * + * Copyright (c) 2014 Intel Corporation + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation, version 2. + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/platform_device.h> +#include <linux/mfd/dln2.h> +#include <linux/spi/spi.h> +#include <linux/pm_runtime.h> +#include <asm/unaligned.h> + +#define DLN2_SPI_MODULE_ID 0x02 +#define DLN2_SPI_CMD(cmd) DLN2_CMD(cmd, DLN2_SPI_MODULE_ID) + +/* SPI commands */ +#define DLN2_SPI_GET_PORT_COUNT DLN2_SPI_CMD(0x00) +#define DLN2_SPI_ENABLE DLN2_SPI_CMD(0x11) +#define DLN2_SPI_DISABLE DLN2_SPI_CMD(0x12) +#define DLN2_SPI_IS_ENABLED DLN2_SPI_CMD(0x13) +#define DLN2_SPI_SET_MODE DLN2_SPI_CMD(0x14) +#define DLN2_SPI_GET_MODE DLN2_SPI_CMD(0x15) +#define DLN2_SPI_SET_FRAME_SIZE DLN2_SPI_CMD(0x16) +#define DLN2_SPI_GET_FRAME_SIZE DLN2_SPI_CMD(0x17) +#define DLN2_SPI_SET_FREQUENCY DLN2_SPI_CMD(0x18) +#define DLN2_SPI_GET_FREQUENCY DLN2_SPI_CMD(0x19) +#define DLN2_SPI_READ_WRITE DLN2_SPI_CMD(0x1A) +#define DLN2_SPI_READ DLN2_SPI_CMD(0x1B) +#define DLN2_SPI_WRITE DLN2_SPI_CMD(0x1C) +#define DLN2_SPI_SET_DELAY_BETWEEN_SS DLN2_SPI_CMD(0x20) +#define DLN2_SPI_GET_DELAY_BETWEEN_SS DLN2_SPI_CMD(0x21) +#define DLN2_SPI_SET_DELAY_AFTER_SS DLN2_SPI_CMD(0x22) +#define DLN2_SPI_GET_DELAY_AFTER_SS DLN2_SPI_CMD(0x23) +#define DLN2_SPI_SET_DELAY_BETWEEN_FRAMES DLN2_SPI_CMD(0x24) +#define DLN2_SPI_GET_DELAY_BETWEEN_FRAMES DLN2_SPI_CMD(0x25) +#define DLN2_SPI_SET_SS DLN2_SPI_CMD(0x26) +#define DLN2_SPI_GET_SS DLN2_SPI_CMD(0x27) +#define DLN2_SPI_RELEASE_SS DLN2_SPI_CMD(0x28) +#define DLN2_SPI_SS_VARIABLE_ENABLE DLN2_SPI_CMD(0x2B) +#define DLN2_SPI_SS_VARIABLE_DISABLE DLN2_SPI_CMD(0x2C) +#define DLN2_SPI_SS_VARIABLE_IS_ENABLED DLN2_SPI_CMD(0x2D) +#define DLN2_SPI_SS_AAT_ENABLE DLN2_SPI_CMD(0x2E) +#define DLN2_SPI_SS_AAT_DISABLE DLN2_SPI_CMD(0x2F) +#define DLN2_SPI_SS_AAT_IS_ENABLED DLN2_SPI_CMD(0x30) +#define DLN2_SPI_SS_BETWEEN_FRAMES_ENABLE DLN2_SPI_CMD(0x31) +#define DLN2_SPI_SS_BETWEEN_FRAMES_DISABLE DLN2_SPI_CMD(0x32) +#define DLN2_SPI_SS_BETWEEN_FRAMES_IS_ENABLED DLN2_SPI_CMD(0x33) +#define DLN2_SPI_SET_CPHA DLN2_SPI_CMD(0x34) +#define DLN2_SPI_GET_CPHA DLN2_SPI_CMD(0x35) +#define DLN2_SPI_SET_CPOL DLN2_SPI_CMD(0x36) +#define DLN2_SPI_GET_CPOL DLN2_SPI_CMD(0x37) +#define DLN2_SPI_SS_MULTI_ENABLE DLN2_SPI_CMD(0x38) +#define DLN2_SPI_SS_MULTI_DISABLE DLN2_SPI_CMD(0x39) +#define DLN2_SPI_SS_MULTI_IS_ENABLED DLN2_SPI_CMD(0x3A) +#define DLN2_SPI_GET_SUPPORTED_MODES DLN2_SPI_CMD(0x40) +#define DLN2_SPI_GET_SUPPORTED_CPHA_VALUES DLN2_SPI_CMD(0x41) +#define DLN2_SPI_GET_SUPPORTED_CPOL_VALUES DLN2_SPI_CMD(0x42) +#define DLN2_SPI_GET_SUPPORTED_FRAME_SIZES DLN2_SPI_CMD(0x43) +#define DLN2_SPI_GET_SS_COUNT DLN2_SPI_CMD(0x44) +#define DLN2_SPI_GET_MIN_FREQUENCY DLN2_SPI_CMD(0x45) +#define DLN2_SPI_GET_MAX_FREQUENCY DLN2_SPI_CMD(0x46) +#define DLN2_SPI_GET_MIN_DELAY_BETWEEN_SS DLN2_SPI_CMD(0x47) +#define DLN2_SPI_GET_MAX_DELAY_BETWEEN_SS DLN2_SPI_CMD(0x48) +#define DLN2_SPI_GET_MIN_DELAY_AFTER_SS DLN2_SPI_CMD(0x49) +#define DLN2_SPI_GET_MAX_DELAY_AFTER_SS DLN2_SPI_CMD(0x4A) +#define DLN2_SPI_GET_MIN_DELAY_BETWEEN_FRAMES DLN2_SPI_CMD(0x4B) +#define DLN2_SPI_GET_MAX_DELAY_BETWEEN_FRAMES DLN2_SPI_CMD(0x4C) + +#define DLN2_SPI_MAX_XFER_SIZE 256 +#define DLN2_SPI_BUF_SIZE (DLN2_SPI_MAX_XFER_SIZE + 16) +#define DLN2_SPI_ATTR_LEAVE_SS_LOW BIT(0) +#define DLN2_TRANSFERS_WAIT_COMPLETE 1 +#define DLN2_TRANSFERS_CANCEL 0 +#define DLN2_RPM_AUTOSUSPEND_TIMEOUT 2000 + +struct dln2_spi { + struct platform_device *pdev; + struct spi_master *master; + u8 port; + + /* + * This buffer will be used mainly for read/write operations. Since + * they're quite large, we cannot use the stack. Protection is not + * needed because all SPI communication is serialized by the SPI core. + */ + void *buf; + + u8 bpw; + u32 speed; + u16 mode; + u8 cs; +}; + +/* + * Enable/Disable SPI module. The disable command will wait for transfers to + * complete first. + */ +static int dln2_spi_enable(struct dln2_spi *dln2, bool enable) +{ + u16 cmd; + struct { + u8 port; + u8 wait_for_completion; + } tx; + unsigned len = sizeof(tx); + + tx.port = dln2->port; + + if (enable) { + cmd = DLN2_SPI_ENABLE; + len -= sizeof(tx.wait_for_completion); + } else { + tx.wait_for_completion = DLN2_TRANSFERS_WAIT_COMPLETE; + cmd = DLN2_SPI_DISABLE; + } + + return dln2_transfer_tx(dln2->pdev, cmd, &tx, len); +} + +/* + * Select/unselect multiple CS lines. The selected lines will be automatically + * toggled LOW/HIGH by the board firmware during transfers, provided they're + * enabled first. + * + * Ex: cs_mask = 0x03 -> CS0 & CS1 will be selected and the next WR/RD operation + * will toggle the lines LOW/HIGH automatically. + */ +static int dln2_spi_cs_set(struct dln2_spi *dln2, u8 cs_mask) +{ + struct { + u8 port; + u8 cs; + } tx; + + tx.port = dln2->port; + + /* + * According to Diolan docs, "a slave device can be selected by changing + * the corresponding bit value to 0". The rest must be set to 1. Hence + * the bitwise NOT in front. + */ + tx.cs = ~cs_mask; + + return dln2_transfer_tx(dln2->pdev, DLN2_SPI_SET_SS, &tx, sizeof(tx)); +} + +/* + * Select one CS line. The other lines will be un-selected. + */ +static int dln2_spi_cs_set_one(struct dln2_spi *dln2, u8 cs) +{ + return dln2_spi_cs_set(dln2, BIT(cs)); +} + +/* + * Enable/disable CS lines for usage. The module has to be disabled first. + */ +static int dln2_spi_cs_enable(struct dln2_spi *dln2, u8 cs_mask, bool enable) +{ + struct { + u8 port; + u8 cs; + } tx; + u16 cmd; + + tx.port = dln2->port; + tx.cs = cs_mask; + cmd = enable ? DLN2_SPI_SS_MULTI_ENABLE : DLN2_SPI_SS_MULTI_DISABLE; + + return dln2_transfer_tx(dln2->pdev, cmd, &tx, sizeof(tx)); +} + +static int dln2_spi_cs_enable_all(struct dln2_spi *dln2, bool enable) +{ + u8 cs_mask = GENMASK(dln2->master->num_chipselect - 1, 0); + + return dln2_spi_cs_enable(dln2, cs_mask, enable); +} + +static int dln2_spi_get_cs_num(struct dln2_spi *dln2, u16 *cs_num) +{ + int ret; + struct { + u8 port; + } tx; + struct { + __le16 cs_count; + } rx; + unsigned rx_len = sizeof(rx); + + tx.port = dln2->port; + ret = dln2_transfer(dln2->pdev, DLN2_SPI_GET_SS_COUNT, &tx, sizeof(tx), + &rx, &rx_len); + if (ret < 0) + return ret; + if (rx_len < sizeof(rx)) + return -EPROTO; + + *cs_num = le16_to_cpu(rx.cs_count); + + dev_dbg(&dln2->pdev->dev, "cs_num = %d\n", *cs_num); + + return 0; +} + +static int dln2_spi_get_speed(struct dln2_spi *dln2, u16 cmd, u32 *freq) +{ + int ret; + struct { + u8 port; + } tx; + struct { + __le32 speed; + } rx; + unsigned rx_len = sizeof(rx); + + tx.port = dln2->port; + + ret = dln2_transfer(dln2->pdev, cmd, &tx, sizeof(tx), &rx, &rx_len); + if (ret < 0) + return ret; + if (rx_len < sizeof(rx)) + return -EPROTO; + + *freq = le32_to_cpu(rx.speed); + + return 0; +} + +/* + * Get bus min/max frequencies. + */ +static int dln2_spi_get_speed_range(struct dln2_spi *dln2, u32 *fmin, u32 *fmax) +{ + int ret; + + ret = dln2_spi_get_speed(dln2, DLN2_SPI_GET_MIN_FREQUENCY, fmin); + if (ret < 0) + return ret; + + ret = dln2_spi_get_speed(dln2, DLN2_SPI_GET_MAX_FREQUENCY, fmax); + if (ret < 0) + return ret; + + dev_dbg(&dln2->pdev->dev, "freq_min = %d, freq_max = %d\n", + *fmin, *fmax); + + return 0; +} + +/* + * Set the bus speed. The module will automatically round down to the closest + * available frequency and returns it. The module has to be disabled first. + */ +static int dln2_spi_set_speed(struct dln2_spi *dln2, u32 speed) +{ + int ret; + struct { + u8 port; + __le32 speed; + } __packed tx; + struct { + __le32 speed; + } rx; + int rx_len = sizeof(rx); + + tx.port = dln2->port; + tx.speed = cpu_to_le32(speed); + + ret = dln2_transfer(dln2->pdev, DLN2_SPI_SET_FREQUENCY, &tx, sizeof(tx), + &rx, &rx_len); + if (ret < 0) + return ret; + if (rx_len < sizeof(rx)) + return -EPROTO; + + return 0; +} + +/* + * Change CPOL & CPHA. The module has to be disabled first. + */ +static int dln2_spi_set_mode(struct dln2_spi *dln2, u8 mode) +{ + struct { + u8 port; + u8 mode; + } tx; + + tx.port = dln2->port; + tx.mode = mode; + + return dln2_transfer_tx(dln2->pdev, DLN2_SPI_SET_MODE, &tx, sizeof(tx)); +} + +/* + * Change frame size. The module has to be disabled first. + */ +static int dln2_spi_set_bpw(struct dln2_spi *dln2, u8 bpw) +{ + struct { + u8 port; + u8 bpw; + } tx; + + tx.port = dln2->port; + tx.bpw = bpw; + + return dln2_transfer_tx(dln2->pdev, DLN2_SPI_SET_FRAME_SIZE, + &tx, sizeof(tx)); +} + +static int dln2_spi_get_supported_frame_sizes(struct dln2_spi *dln2, + u32 *bpw_mask) +{ + int ret; + struct { + u8 port; + } tx; + struct { + u8 count; + u8 frame_sizes[36]; + } *rx = dln2->buf; + unsigned rx_len = sizeof(*rx); + int i; + + tx.port = dln2->port; + + ret = dln2_transfer(dln2->pdev, DLN2_SPI_GET_SUPPORTED_FRAME_SIZES, + &tx, sizeof(tx), rx, &rx_len); + if (ret < 0) + return ret; + if (rx_len < sizeof(*rx)) + return -EPROTO; + if (rx->count > ARRAY_SIZE(rx->frame_sizes)) + return -EPROTO; + + *bpw_mask = 0; + for (i = 0; i < rx->count; i++) + *bpw_mask |= BIT(rx->frame_sizes[i] - 1); + + dev_dbg(&dln2->pdev->dev, "bpw_mask = 0x%X\n", *bpw_mask); + + return 0; +} + +/* + * Copy the data to DLN2 buffer and change the byte order to LE, requested by + * DLN2 module. SPI core makes sure that the data length is a multiple of word + * size. + */ +static int dln2_spi_copy_to_buf(u8 *dln2_buf, const u8 *src, u16 len, u8 bpw) +{ +#ifdef __LITTLE_ENDIAN + memcpy(dln2_buf, src, len); +#else + if (bpw <= 8) { + memcpy(dln2_buf, src, len); + } else if (bpw <= 16) { + __le16 *d = (__le16 *)dln2_buf; + u16 *s = (u16 *)src; + + len = len / 2; + while (len--) + *d++ = cpu_to_le16p(s++); + } else { + __le32 *d = (__le32 *)dln2_buf; + u32 *s = (u32 *)src; + + len = len / 4; + while (len--) + *d++ = cpu_to_le32p(s++); + } +#endif + + return 0; +} + +/* + * Copy the data from DLN2 buffer and convert to CPU byte order since the DLN2 + * buffer is LE ordered. SPI core makes sure that the data length is a multiple + * of word size. The RX dln2_buf is 2 byte aligned so, for BE, we have to make + * sure we avoid unaligned accesses for 32 bit case. + */ +static int dln2_spi_copy_from_buf(u8 *dest, const u8 *dln2_buf, u16 len, u8 bpw) +{ +#ifdef __LITTLE_ENDIAN + memcpy(dest, dln2_buf, len); +#else + if (bpw <= 8) { + memcpy(dest, dln2_buf, len); + } else if (bpw <= 16) { + u16 *d = (u16 *)dest; + __le16 *s = (__le16 *)dln2_buf; + + len = len / 2; + while (len--) + *d++ = le16_to_cpup(s++); + } else { + u32 *d = (u32 *)dest; + __le32 *s = (__le32 *)dln2_buf; + + len = len / 4; + while (len--) + *d++ = get_unaligned_le32(s++); + } +#endif + + return 0; +} + +/* + * Perform one write operation. + */ +static int dln2_spi_write_one(struct dln2_spi *dln2, const u8 *data, + u16 data_len, u8 attr) +{ + struct { + u8 port; + __le16 size; + u8 attr; + u8 buf[DLN2_SPI_MAX_XFER_SIZE]; + } __packed *tx = dln2->buf; + unsigned tx_len; + + BUILD_BUG_ON(sizeof(*tx) > DLN2_SPI_BUF_SIZE); + + if (data_len > DLN2_SPI_MAX_XFER_SIZE) + return -EINVAL; + + tx->port = dln2->port; + tx->size = cpu_to_le16(data_len); + tx->attr = attr; + + dln2_spi_copy_to_buf(tx->buf, data, data_len, dln2->bpw); + + tx_len = sizeof(*tx) + data_len - DLN2_SPI_MAX_XFER_SIZE; + return dln2_transfer_tx(dln2->pdev, DLN2_SPI_WRITE, tx, tx_len); +} + +/* + * Perform one read operation. + */ +static int dln2_spi_read_one(struct dln2_spi *dln2, u8 *data, + u16 data_len, u8 attr) +{ + int ret; + struct { + u8 port; + __le16 size; + u8 attr; + } __packed tx; + struct { + __le16 size; + u8 buf[DLN2_SPI_MAX_XFER_SIZE]; + } __packed *rx = dln2->buf; + unsigned rx_len = sizeof(*rx); + + BUILD_BUG_ON(sizeof(*rx) > DLN2_SPI_BUF_SIZE); + + if (data_len > DLN2_SPI_MAX_XFER_SIZE) + return -EINVAL; + + tx.port = dln2->port; + tx.size = cpu_to_le16(data_len); + tx.attr = attr; + + ret = dln2_transfer(dln2->pdev, DLN2_SPI_READ, &tx, sizeof(tx), + rx, &rx_len); + if (ret < 0) + return ret; + if (rx_len < sizeof(rx->size) + data_len) + return -EPROTO; + if (le16_to_cpu(rx->size) != data_len) + return -EPROTO; + + dln2_spi_copy_from_buf(data, rx->buf, data_len, dln2->bpw); + + return 0; +} + +/* + * Perform one write & read operation. + */ +static int dln2_spi_read_write_one(struct dln2_spi *dln2, const u8 *tx_data, + u8 *rx_data, u16 data_len, u8 attr) +{ + int ret; + struct { + u8 port; + __le16 size; + u8 attr; + u8 buf[DLN2_SPI_MAX_XFER_SIZE]; + } __packed *tx; + struct { + __le16 size; + u8 buf[DLN2_SPI_MAX_XFER_SIZE]; + } __packed *rx; + unsigned tx_len, rx_len; + + BUILD_BUG_ON(sizeof(*tx) > DLN2_SPI_BUF_SIZE || + sizeof(*rx) > DLN2_SPI_BUF_SIZE); + + if (data_len > DLN2_SPI_MAX_XFER_SIZE) + return -EINVAL; + + /* + * Since this is a pseudo full-duplex communication, we're perfectly + * safe to use the same buffer for both tx and rx. When DLN2 sends the + * response back, with the rx data, we don't need the tx buffer anymore. + */ + tx = dln2->buf; + rx = dln2->buf; + + tx->port = dln2->port; + tx->size = cpu_to_le16(data_len); + tx->attr = attr; + + dln2_spi_copy_to_buf(tx->buf, tx_data, data_len, dln2->bpw); + + tx_len = sizeof(*tx) + data_len - DLN2_SPI_MAX_XFER_SIZE; + rx_len = sizeof(*rx); + + ret = dln2_transfer(dln2->pdev, DLN2_SPI_READ_WRITE, tx, tx_len, + rx, &rx_len); + if (ret < 0) + return ret; + if (rx_len < sizeof(rx->size) + data_len) + return -EPROTO; + if (le16_to_cpu(rx->size) != data_len) + return -EPROTO; + + dln2_spi_copy_from_buf(rx_data, rx->buf, data_len, dln2->bpw); + + return 0; +} + +/* + * Read/Write wrapper. It will automatically split an operation into multiple + * single ones due to device buffer constraints. + */ +static int dln2_spi_rdwr(struct dln2_spi *dln2, const u8 *tx_data, + u8 *rx_data, u16 data_len, u8 attr) { + int ret; + u16 len; + u8 temp_attr; + u16 remaining = data_len; + u16 offset; + + do { + if (remaining > DLN2_SPI_MAX_XFER_SIZE) { + len = DLN2_SPI_MAX_XFER_SIZE; + temp_attr = DLN2_SPI_ATTR_LEAVE_SS_LOW; + } else { + len = remaining; + temp_attr = attr; + } + + offset = data_len - remaining; + + if (tx_data && rx_data) { + ret = dln2_spi_read_write_one(dln2, + tx_data + offset, + rx_data + offset, + len, temp_attr); + } else if (tx_data) { + ret = dln2_spi_write_one(dln2, + tx_data + offset, + len, temp_attr); + } else if (rx_data) { + ret = dln2_spi_read_one(dln2, + rx_data + offset, + len, temp_attr); + } else { + return -EINVAL; + } + + if (ret < 0) + return ret; + + remaining -= len; + } while (remaining); + + return 0; +} + +static int dln2_spi_prepare_message(struct spi_master *master, + struct spi_message *message) +{ + int ret; + struct dln2_spi *dln2 = spi_master_get_devdata(master); + struct spi_device *spi = message->spi; + + if (dln2->cs != spi->chip_select) { + ret = dln2_spi_cs_set_one(dln2, spi->chip_select); + if (ret < 0) + return ret; + + dln2->cs = spi->chip_select; + } + + return 0; +} + +static int dln2_spi_transfer_setup(struct dln2_spi *dln2, u32 speed, + u8 bpw, u8 mode) +{ + int ret; + bool bus_setup_change; + + bus_setup_change = dln2->speed != speed || dln2->mode != mode || + dln2->bpw != bpw; + + if (!bus_setup_change) + return 0; + + ret = dln2_spi_enable(dln2, false); + if (ret < 0) + return ret; + + if (dln2->speed != speed) { + ret = dln2_spi_set_speed(dln2, speed); + if (ret < 0) + return ret; + + dln2->speed = speed; + } + + if (dln2->mode != mode) { + ret = dln2_spi_set_mode(dln2, mode & 0x3); + if (ret < 0) + return ret; + + dln2->mode = mode; + } + + if (dln2->bpw != bpw) { + ret = dln2_spi_set_bpw(dln2, bpw); + if (ret < 0) + return ret; + + dln2->bpw = bpw; + } + + return dln2_spi_enable(dln2, true); +} + +static int dln2_spi_transfer_one(struct spi_master *master, + struct spi_device *spi, + struct spi_transfer *xfer) +{ + struct dln2_spi *dln2 = spi_master_get_devdata(master); + int status; + u8 attr = 0; + + status = dln2_spi_transfer_setup(dln2, xfer->speed_hz, + xfer->bits_per_word, + spi->mode); + if (status < 0) { + dev_err(&dln2->pdev->dev, "Cannot setup transfer\n"); + return status; + } + + if (!xfer->cs_change && !spi_transfer_is_last(master, xfer)) + attr = DLN2_SPI_ATTR_LEAVE_SS_LOW; + + status = dln2_spi_rdwr(dln2, xfer->tx_buf, xfer->rx_buf, + xfer->len, attr); + if (status < 0) + dev_err(&dln2->pdev->dev, "write/read failed!\n"); + + return status; +} + +static int dln2_spi_probe(struct platform_device *pdev) +{ + struct spi_master *master; + struct dln2_spi *dln2; + struct dln2_platform_data *pdata = dev_get_platdata(&pdev->dev); + int ret; + + master = spi_alloc_master(&pdev->dev, sizeof(*dln2)); + if (!master) + return -ENOMEM; + + platform_set_drvdata(pdev, master); + + dln2 = spi_master_get_devdata(master); + + dln2->buf = devm_kmalloc(&pdev->dev, DLN2_SPI_BUF_SIZE, GFP_KERNEL); + if (!dln2->buf) { + ret = -ENOMEM; + goto exit_free_master; + } + + dln2->master = master; + dln2->pdev = pdev; + dln2->port = pdata->port; + /* cs/mode can never be 0xff, so the first transfer will set them */ + dln2->cs = 0xff; + dln2->mode = 0xff; + + /* disable SPI module before continuing with the setup */ + ret = dln2_spi_enable(dln2, false); + if (ret < 0) { + dev_err(&pdev->dev, "Failed to disable SPI module\n"); + goto exit_free_master; + } + + ret = dln2_spi_get_cs_num(dln2, &master->num_chipselect); + if (ret < 0) { + dev_err(&pdev->dev, "Failed to get number of CS pins\n"); + goto exit_free_master; + } + + ret = dln2_spi_get_speed_range(dln2, + &master->min_speed_hz, + &master->max_speed_hz); + if (ret < 0) { + dev_err(&pdev->dev, "Failed to read bus min/max freqs\n"); + goto exit_free_master; + } + + ret = dln2_spi_get_supported_frame_sizes(dln2, + &master->bits_per_word_mask); + if (ret < 0) { + dev_err(&pdev->dev, "Failed to read supported frame sizes\n"); + goto exit_free_master; + } + + ret = dln2_spi_cs_enable_all(dln2, true); + if (ret < 0) { + dev_err(&pdev->dev, "Failed to enable CS pins\n"); + goto exit_free_master; + } + + master->bus_num = -1; + master->mode_bits = SPI_CPOL | SPI_CPHA; + master->prepare_message = dln2_spi_prepare_message; + master->transfer_one = dln2_spi_transfer_one; + master->auto_runtime_pm = true; + + /* enable SPI module, we're good to go */ + ret = dln2_spi_enable(dln2, true); + if (ret < 0) { + dev_err(&pdev->dev, "Failed to enable SPI module\n"); + goto exit_free_master; + } + + pm_runtime_set_autosuspend_delay(&pdev->dev, + DLN2_RPM_AUTOSUSPEND_TIMEOUT); + pm_runtime_use_autosuspend(&pdev->dev); + pm_runtime_set_active(&pdev->dev); + pm_runtime_enable(&pdev->dev); + + ret = devm_spi_register_master(&pdev->dev, master); + if (ret < 0) { + dev_err(&pdev->dev, "Failed to register master\n"); + goto exit_register; + } + + return ret; + +exit_register: + pm_runtime_disable(&pdev->dev); + pm_runtime_set_suspended(&pdev->dev); + + if (dln2_spi_enable(dln2, false) < 0) + dev_err(&pdev->dev, "Failed to disable SPI module\n"); +exit_free_master: + spi_master_put(master); + + return ret; +} + +static int dln2_spi_remove(struct platform_device *pdev) +{ + struct spi_master *master = spi_master_get(platform_get_drvdata(pdev)); + struct dln2_spi *dln2 = spi_master_get_devdata(master); + + pm_runtime_disable(&pdev->dev); + + if (dln2_spi_enable(dln2, false) < 0) + dev_err(&pdev->dev, "Failed to disable SPI module\n"); + + return 0; +} + +#ifdef CONFIG_PM_SLEEP +static int dln2_spi_suspend(struct device *dev) +{ + int ret; + struct spi_master *master = dev_get_drvdata(dev); + struct dln2_spi *dln2 = spi_master_get_devdata(master); + + ret = spi_master_suspend(master); + if (ret < 0) + return ret; + + if (!pm_runtime_suspended(dev)) { + ret = dln2_spi_enable(dln2, false); + if (ret < 0) + return ret; + } + + /* + * USB power may be cut off during sleep. Resetting the following + * parameters will force the board to be set up before first transfer. + */ + dln2->cs = 0xff; + dln2->speed = 0; + dln2->bpw = 0; + dln2->mode = 0xff; + + return 0; +} + +static int dln2_spi_resume(struct device *dev) +{ + int ret; + struct spi_master *master = dev_get_drvdata(dev); + struct dln2_spi *dln2 = spi_master_get_devdata(master); + + if (!pm_runtime_suspended(dev)) { + ret = dln2_spi_cs_enable_all(dln2, true); + if (ret < 0) + return ret; + + ret = dln2_spi_enable(dln2, true); + if (ret < 0) + return ret; + } + + return spi_master_resume(master); +} +#endif /* CONFIG_PM_SLEEP */ + +#ifdef CONFIG_PM +static int dln2_spi_runtime_suspend(struct device *dev) +{ + struct spi_master *master = dev_get_drvdata(dev); + struct dln2_spi *dln2 = spi_master_get_devdata(master); + + return dln2_spi_enable(dln2, false); +} + +static int dln2_spi_runtime_resume(struct device *dev) +{ + struct spi_master *master = dev_get_drvdata(dev); + struct dln2_spi *dln2 = spi_master_get_devdata(master); + + return dln2_spi_enable(dln2, true); +} +#endif /* CONFIG_PM */ + +static const struct dev_pm_ops dln2_spi_pm = { + SET_SYSTEM_SLEEP_PM_OPS(dln2_spi_suspend, dln2_spi_resume) + SET_RUNTIME_PM_OPS(dln2_spi_runtime_suspend, + dln2_spi_runtime_resume, NULL) +}; + +static struct platform_driver spi_dln2_driver = { + .driver = { + .name = "dln2-spi", + .pm = &dln2_spi_pm, + }, + .probe = dln2_spi_probe, + .remove = dln2_spi_remove, +}; +module_platform_driver(spi_dln2_driver); + +MODULE_DESCRIPTION("Driver for the Diolan DLN2 SPI master interface"); +MODULE_AUTHOR("Laurentiu Palcu <laurentiu.palcu@intel.com>"); +MODULE_LICENSE("GPL v2"); +MODULE_ALIAS("platform:dln2-spi"); diff --git a/drivers/spi/spi-dw-mid.c b/drivers/spi/spi-dw-mid.c index a67d37c7e3c0..a0197fd4e95c 100644 --- a/drivers/spi/spi-dw-mid.c +++ b/drivers/spi/spi-dw-mid.c @@ -247,9 +247,9 @@ static struct dw_spi_dma_ops mid_dma_ops = { /* Some specific info for SPI0 controller on Intel MID */ -/* HW info for MRST CLk Control Unit, one 32b reg */ +/* HW info for MRST Clk Control Unit, 32b reg per controller */ #define MRST_SPI_CLK_BASE 100000000 /* 100m */ -#define MRST_CLK_SPI0_REG 0xff11d86c +#define MRST_CLK_SPI_REG 0xff11d86c #define CLK_SPI_BDIV_OFFSET 0 #define CLK_SPI_BDIV_MASK 0x00000007 #define CLK_SPI_CDIV_OFFSET 9 @@ -261,16 +261,17 @@ int dw_spi_mid_init(struct dw_spi *dws) void __iomem *clk_reg; u32 clk_cdiv; - clk_reg = ioremap_nocache(MRST_CLK_SPI0_REG, 16); + clk_reg = ioremap_nocache(MRST_CLK_SPI_REG, 16); if (!clk_reg) return -ENOMEM; - /* get SPI controller operating freq info */ - clk_cdiv = (readl(clk_reg) & CLK_SPI_CDIV_MASK) >> CLK_SPI_CDIV_OFFSET; + /* Get SPI controller operating freq info */ + clk_cdiv = readl(clk_reg + dws->bus_num * sizeof(u32)); + clk_cdiv &= CLK_SPI_CDIV_MASK; + clk_cdiv >>= CLK_SPI_CDIV_OFFSET; dws->max_freq = MRST_SPI_CLK_BASE / (clk_cdiv + 1); - iounmap(clk_reg); - dws->num_cs = 16; + iounmap(clk_reg); #ifdef CONFIG_SPI_DW_MID_DMA dws->dma_priv = kzalloc(sizeof(struct mid_dma), GFP_KERNEL); diff --git a/drivers/spi/spi-dw-pci.c b/drivers/spi/spi-dw-pci.c index ba68da12cdf0..5ba331047cbe 100644 --- a/drivers/spi/spi-dw-pci.c +++ b/drivers/spi/spi-dw-pci.c @@ -30,10 +30,20 @@ struct dw_spi_pci { struct spi_pci_desc { int (*setup)(struct dw_spi *); + u16 num_cs; + u16 bus_num; }; -static struct spi_pci_desc spi_pci_mid_desc = { +static struct spi_pci_desc spi_pci_mid_desc_1 = { .setup = dw_spi_mid_init, + .num_cs = 32, + .bus_num = 0, +}; + +static struct spi_pci_desc spi_pci_mid_desc_2 = { + .setup = dw_spi_mid_init, + .num_cs = 4, + .bus_num = 1, }; static int spi_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) @@ -65,18 +75,23 @@ static int spi_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) dws->regs = pcim_iomap_table(pdev)[pci_bar]; - dws->bus_num = 0; - dws->num_cs = 4; dws->irq = pdev->irq; /* * Specific handling for paltforms, like dma setup, * clock rate, FIFO depth. */ - if (desc && desc->setup) { - ret = desc->setup(dws); - if (ret) - return ret; + if (desc) { + dws->num_cs = desc->num_cs; + dws->bus_num = desc->bus_num; + + if (desc->setup) { + ret = desc->setup(dws); + if (ret) + return ret; + } + } else { + return -ENODEV; } ret = dw_spi_add_host(&pdev->dev, dws); @@ -121,7 +136,14 @@ static SIMPLE_DEV_PM_OPS(dw_spi_pm_ops, spi_suspend, spi_resume); static const struct pci_device_id pci_ids[] = { /* Intel MID platform SPI controller 0 */ - { PCI_VDEVICE(INTEL, 0x0800), (kernel_ulong_t)&spi_pci_mid_desc}, + /* + * The access to the device 8086:0801 is disabled by HW, since it's + * exclusively used by SCU to communicate with MSIC. + */ + /* Intel MID platform SPI controller 1 */ + { PCI_VDEVICE(INTEL, 0x0800), (kernel_ulong_t)&spi_pci_mid_desc_1}, + /* Intel MID platform SPI controller 2 */ + { PCI_VDEVICE(INTEL, 0x0812), (kernel_ulong_t)&spi_pci_mid_desc_2}, {}, }; diff --git a/drivers/spi/spi-dw.c b/drivers/spi/spi-dw.c index 8edcd1b84562..5a97a62b298a 100644 --- a/drivers/spi/spi-dw.c +++ b/drivers/spi/spi-dw.c @@ -608,7 +608,7 @@ static void dw_spi_cleanup(struct spi_device *spi) } /* Restart the controller, disable all interrupts, clean rx fifo */ -static void spi_hw_init(struct dw_spi *dws) +static void spi_hw_init(struct device *dev, struct dw_spi *dws) { spi_enable_chip(dws, 0); spi_mask_intr(dws, 0xff); @@ -626,9 +626,10 @@ static void spi_hw_init(struct dw_spi *dws) if (fifo != dw_readw(dws, DW_SPI_TXFLTR)) break; } + dw_writew(dws, DW_SPI_TXFLTR, 0); dws->fifo_len = (fifo == 2) ? 0 : fifo - 1; - dw_writew(dws, DW_SPI_TXFLTR, 0); + dev_dbg(dev, "Detected FIFO size: %u bytes\n", dws->fifo_len); } } @@ -668,7 +669,7 @@ int dw_spi_add_host(struct device *dev, struct dw_spi *dws) master->dev.of_node = dev->of_node; /* Basic HW init */ - spi_hw_init(dws); + spi_hw_init(dev, dws); if (dws->dma_ops && dws->dma_ops->dma_init) { ret = dws->dma_ops->dma_init(dws); @@ -731,7 +732,7 @@ int dw_spi_resume_host(struct dw_spi *dws) { int ret; - spi_hw_init(dws); + spi_hw_init(&dws->master->dev, dws); ret = spi_master_resume(dws->master); if (ret) dev_err(&dws->master->dev, "fail to start queue (%d)\n", ret); diff --git a/drivers/spi/spi-falcon.c b/drivers/spi/spi-falcon.c index 912b9037e9cf..286b2c81fc6b 100644 --- a/drivers/spi/spi-falcon.c +++ b/drivers/spi/spi-falcon.c @@ -353,16 +353,6 @@ static int falcon_sflash_setup(struct spi_device *spi) return 0; } -static int falcon_sflash_prepare_xfer(struct spi_master *master) -{ - return 0; -} - -static int falcon_sflash_unprepare_xfer(struct spi_master *master) -{ - return 0; -} - static int falcon_sflash_xfer_one(struct spi_master *master, struct spi_message *m) { @@ -420,9 +410,7 @@ static int falcon_sflash_probe(struct platform_device *pdev) master->mode_bits = SPI_MODE_3; master->flags = SPI_MASTER_HALF_DUPLEX; master->setup = falcon_sflash_setup; - master->prepare_transfer_hardware = falcon_sflash_prepare_xfer; master->transfer_one_message = falcon_sflash_xfer_one; - master->unprepare_transfer_hardware = falcon_sflash_unprepare_xfer; master->dev.of_node = pdev->dev.of_node; ret = devm_spi_register_master(&pdev->dev, master); diff --git a/drivers/spi/spi-fsl-cpm.c b/drivers/spi/spi-fsl-cpm.c index e85ab1cb17a2..9c46a3058743 100644 --- a/drivers/spi/spi-fsl-cpm.c +++ b/drivers/spi/spi-fsl-cpm.c @@ -20,6 +20,7 @@ #include <linux/dma-mapping.h> #include <linux/fsl_devices.h> #include <linux/kernel.h> +#include <linux/module.h> #include <linux/of_address.h> #include <linux/spi/spi.h> #include <linux/types.h> @@ -68,6 +69,7 @@ void fsl_spi_cpm_reinit_txrx(struct mpc8xxx_spi *mspi) } } } +EXPORT_SYMBOL_GPL(fsl_spi_cpm_reinit_txrx); static void fsl_spi_cpm_bufs_start(struct mpc8xxx_spi *mspi) { @@ -162,6 +164,7 @@ err_rx_dma: dma_unmap_single(dev, mspi->tx_dma, t->len, DMA_TO_DEVICE); return -ENOMEM; } +EXPORT_SYMBOL_GPL(fsl_spi_cpm_bufs); void fsl_spi_cpm_bufs_complete(struct mpc8xxx_spi *mspi) { @@ -174,6 +177,7 @@ void fsl_spi_cpm_bufs_complete(struct mpc8xxx_spi *mspi) dma_unmap_single(dev, mspi->rx_dma, t->len, DMA_FROM_DEVICE); mspi->xfer_in_progress = NULL; } +EXPORT_SYMBOL_GPL(fsl_spi_cpm_bufs_complete); void fsl_spi_cpm_irq(struct mpc8xxx_spi *mspi, u32 events) { @@ -198,6 +202,7 @@ void fsl_spi_cpm_irq(struct mpc8xxx_spi *mspi, u32 events) else complete(&mspi->done); } +EXPORT_SYMBOL_GPL(fsl_spi_cpm_irq); static void *fsl_spi_alloc_dummy_rx(void) { @@ -375,6 +380,7 @@ err_pram: fsl_spi_free_dummy_rx(); return -ENOMEM; } +EXPORT_SYMBOL_GPL(fsl_spi_cpm_init); void fsl_spi_cpm_free(struct mpc8xxx_spi *mspi) { @@ -389,3 +395,6 @@ void fsl_spi_cpm_free(struct mpc8xxx_spi *mspi) cpm_muram_free(cpm_muram_offset(mspi->pram)); fsl_spi_free_dummy_rx(); } +EXPORT_SYMBOL_GPL(fsl_spi_cpm_free); + +MODULE_LICENSE("GPL"); diff --git a/drivers/spi/spi-fsl-dspi.c b/drivers/spi/spi-fsl-dspi.c index 4cda994d3f40..d1a39249704a 100644 --- a/drivers/spi/spi-fsl-dspi.c +++ b/drivers/spi/spi-fsl-dspi.c @@ -106,7 +106,7 @@ struct chip_data { }; struct fsl_dspi { - struct spi_bitbang bitbang; + struct spi_master *master; struct platform_device *pdev; struct regmap *regmap; @@ -114,6 +114,7 @@ struct fsl_dspi { struct clk *clk; struct spi_transfer *cur_transfer; + struct spi_message *cur_msg; struct chip_data *cur_chip; size_t len; void *tx; @@ -123,6 +124,7 @@ struct fsl_dspi { char dataflags; u8 cs; u16 void_write_data; + u32 cs_change; wait_queue_head_t waitq; u32 waitflags; @@ -225,6 +227,8 @@ static int dspi_transfer_write(struct fsl_dspi *dspi) if (dspi->len == 0 || tx_count == DSPI_FIFO_SIZE - 1) { /* last transfer in the transfer */ dspi_pushr |= SPI_PUSHR_EOQ; + if ((dspi->cs_change) && (!dspi->len)) + dspi_pushr &= ~SPI_PUSHR_CONT; } else if (tx_word && (dspi->len == 1)) dspi_pushr |= SPI_PUSHR_EOQ; @@ -246,6 +250,7 @@ static int dspi_transfer_read(struct fsl_dspi *dspi) int rx_count = 0; int rx_word = is_double_byte_mode(dspi); u16 d; + while ((dspi->rx < dspi->rx_end) && (rx_count < DSPI_FIFO_SIZE)) { if (rx_word) { @@ -276,86 +281,89 @@ static int dspi_transfer_read(struct fsl_dspi *dspi) return rx_count; } -static int dspi_txrx_transfer(struct spi_device *spi, struct spi_transfer *t) +static int dspi_transfer_one_message(struct spi_master *master, + struct spi_message *message) { - struct fsl_dspi *dspi = spi_master_get_devdata(spi->master); - dspi->cur_transfer = t; - dspi->cur_chip = spi_get_ctldata(spi); - dspi->cs = spi->chip_select; - dspi->void_write_data = dspi->cur_chip->void_write_data; - - dspi->dataflags = 0; - dspi->tx = (void *)t->tx_buf; - dspi->tx_end = dspi->tx + t->len; - dspi->rx = t->rx_buf; - dspi->rx_end = dspi->rx + t->len; - dspi->len = t->len; - - if (!dspi->rx) - dspi->dataflags |= TRAN_STATE_RX_VOID; - - if (!dspi->tx) - dspi->dataflags |= TRAN_STATE_TX_VOID; - - regmap_write(dspi->regmap, SPI_MCR, dspi->cur_chip->mcr_val); - regmap_write(dspi->regmap, SPI_CTAR(dspi->cs), dspi->cur_chip->ctar_val); - regmap_write(dspi->regmap, SPI_RSER, SPI_RSER_EOQFE); - - if (t->speed_hz) + struct fsl_dspi *dspi = spi_master_get_devdata(master); + struct spi_device *spi = message->spi; + struct spi_transfer *transfer; + int status = 0; + message->actual_length = 0; + + list_for_each_entry(transfer, &message->transfers, transfer_list) { + dspi->cur_transfer = transfer; + dspi->cur_msg = message; + dspi->cur_chip = spi_get_ctldata(spi); + dspi->cs = spi->chip_select; + if (dspi->cur_transfer->transfer_list.next + == &dspi->cur_msg->transfers) + transfer->cs_change = 1; + dspi->cs_change = transfer->cs_change; + dspi->void_write_data = dspi->cur_chip->void_write_data; + + dspi->dataflags = 0; + dspi->tx = (void *)transfer->tx_buf; + dspi->tx_end = dspi->tx + transfer->len; + dspi->rx = transfer->rx_buf; + dspi->rx_end = dspi->rx + transfer->len; + dspi->len = transfer->len; + + if (!dspi->rx) + dspi->dataflags |= TRAN_STATE_RX_VOID; + + if (!dspi->tx) + dspi->dataflags |= TRAN_STATE_TX_VOID; + + regmap_write(dspi->regmap, SPI_MCR, dspi->cur_chip->mcr_val); + regmap_update_bits(dspi->regmap, SPI_MCR, + SPI_MCR_CLR_TXF | SPI_MCR_CLR_RXF, + SPI_MCR_CLR_TXF | SPI_MCR_CLR_RXF); regmap_write(dspi->regmap, SPI_CTAR(dspi->cs), dspi->cur_chip->ctar_val); + if (transfer->speed_hz) + regmap_write(dspi->regmap, SPI_CTAR(dspi->cs), + dspi->cur_chip->ctar_val); - dspi_transfer_write(dspi); + regmap_write(dspi->regmap, SPI_RSER, SPI_RSER_EOQFE); + message->actual_length += dspi_transfer_write(dspi); - if (wait_event_interruptible(dspi->waitq, dspi->waitflags)) - dev_err(&dspi->pdev->dev, "wait transfer complete fail!\n"); - dspi->waitflags = 0; - - return t->len - dspi->len; -} + if (wait_event_interruptible(dspi->waitq, dspi->waitflags)) + dev_err(&dspi->pdev->dev, "wait transfer complete fail!\n"); + dspi->waitflags = 0; -static void dspi_chipselect(struct spi_device *spi, int value) -{ - struct fsl_dspi *dspi = spi_master_get_devdata(spi->master); - unsigned int pushr; - - regmap_read(dspi->regmap, SPI_PUSHR, &pushr); - - switch (value) { - case BITBANG_CS_ACTIVE: - pushr |= SPI_PUSHR_CONT; - break; - case BITBANG_CS_INACTIVE: - pushr &= ~SPI_PUSHR_CONT; - break; + if (transfer->delay_usecs) + udelay(transfer->delay_usecs); } - regmap_write(dspi->regmap, SPI_PUSHR, pushr); + message->status = status; + spi_finalize_current_message(master); + + return status; } -static int dspi_setup_transfer(struct spi_device *spi, struct spi_transfer *t) +static int dspi_setup(struct spi_device *spi) { struct chip_data *chip; struct fsl_dspi *dspi = spi_master_get_devdata(spi->master); unsigned char br = 0, pbr = 0, fmsz = 0; + if ((spi->bits_per_word >= 4) && (spi->bits_per_word <= 16)) { + fmsz = spi->bits_per_word - 1; + } else { + pr_err("Invalid wordsize\n"); + return -ENODEV; + } + /* Only alloc on first setup */ chip = spi_get_ctldata(spi); if (chip == NULL) { - chip = devm_kzalloc(&spi->dev, sizeof(struct chip_data), - GFP_KERNEL); + chip = kzalloc(sizeof(struct chip_data), GFP_KERNEL); if (!chip) return -ENOMEM; } chip->mcr_val = SPI_MCR_MASTER | SPI_MCR_PCSIS | SPI_MCR_CLR_TXF | SPI_MCR_CLR_RXF; - if ((spi->bits_per_word >= 4) && (spi->bits_per_word <= 16)) { - fmsz = spi->bits_per_word - 1; - } else { - pr_err("Invalid wordsize\n"); - return -ENODEV; - } chip->void_write_data = 0; @@ -374,34 +382,34 @@ static int dspi_setup_transfer(struct spi_device *spi, struct spi_transfer *t) return 0; } -static int dspi_setup(struct spi_device *spi) +static void dspi_cleanup(struct spi_device *spi) { - if (!spi->max_speed_hz) - return -EINVAL; + struct chip_data *chip = spi_get_ctldata((struct spi_device *)spi); + + dev_dbg(&spi->dev, "spi_device %u.%u cleanup\n", + spi->master->bus_num, spi->chip_select); - return dspi_setup_transfer(spi, NULL); + kfree(chip); } static irqreturn_t dspi_interrupt(int irq, void *dev_id) { struct fsl_dspi *dspi = (struct fsl_dspi *)dev_id; - regmap_write(dspi->regmap, SPI_SR, SPI_SR_EOQF); + struct spi_message *msg = dspi->cur_msg; + regmap_write(dspi->regmap, SPI_SR, SPI_SR_EOQF); dspi_transfer_read(dspi); if (!dspi->len) { if (dspi->dataflags & TRAN_STATE_WORD_ODD_NUM) regmap_update_bits(dspi->regmap, SPI_CTAR(dspi->cs), - SPI_FRAME_BITS_MASK, SPI_FRAME_BITS(16)); + SPI_FRAME_BITS_MASK, SPI_FRAME_BITS(16)); dspi->waitflags = 1; wake_up_interruptible(&dspi->waitq); - } else { - dspi_transfer_write(dspi); - - return IRQ_HANDLED; - } + } else + msg->actual_length += dspi_transfer_write(dspi); return IRQ_HANDLED; } @@ -460,13 +468,14 @@ static int dspi_probe(struct platform_device *pdev) dspi = spi_master_get_devdata(master); dspi->pdev = pdev; - dspi->bitbang.master = master; - dspi->bitbang.chipselect = dspi_chipselect; - dspi->bitbang.setup_transfer = dspi_setup_transfer; - dspi->bitbang.txrx_bufs = dspi_txrx_transfer; - dspi->bitbang.master->setup = dspi_setup; - dspi->bitbang.master->dev.of_node = pdev->dev.of_node; + dspi->master = master; + + master->transfer = NULL; + master->setup = dspi_setup; + master->transfer_one_message = dspi_transfer_one_message; + master->dev.of_node = pdev->dev.of_node; + master->cleanup = dspi_cleanup; master->mode_bits = SPI_CPOL | SPI_CPHA; master->bits_per_word_mask = SPI_BPW_MASK(4) | SPI_BPW_MASK(8) | SPI_BPW_MASK(16); @@ -525,7 +534,7 @@ static int dspi_probe(struct platform_device *pdev) init_waitqueue_head(&dspi->waitq); platform_set_drvdata(pdev, master); - ret = spi_bitbang_start(&dspi->bitbang); + ret = spi_register_master(master); if (ret != 0) { dev_err(&pdev->dev, "Problem registering DSPI master\n"); goto out_clk_put; @@ -547,9 +556,9 @@ static int dspi_remove(struct platform_device *pdev) struct fsl_dspi *dspi = spi_master_get_devdata(master); /* Disconnect from the SPI framework */ - spi_bitbang_stop(&dspi->bitbang); clk_disable_unprepare(dspi->clk); - spi_master_put(dspi->bitbang.master); + spi_unregister_master(dspi->master); + spi_master_put(dspi->master); return 0; } diff --git a/drivers/spi/spi-fsl-lib.c b/drivers/spi/spi-fsl-lib.c index 446b737e1532..cb35d2f0d0e6 100644 --- a/drivers/spi/spi-fsl-lib.c +++ b/drivers/spi/spi-fsl-lib.c @@ -21,6 +21,7 @@ #include <linux/interrupt.h> #include <linux/kernel.h> #include <linux/mm.h> +#include <linux/module.h> #include <linux/of_platform.h> #include <linux/spi/spi.h> #ifdef CONFIG_FSL_SOC @@ -35,7 +36,8 @@ void mpc8xxx_spi_rx_buf_##type(u32 data, struct mpc8xxx_spi *mpc8xxx_spi) \ type *rx = mpc8xxx_spi->rx; \ *rx++ = (type)(data >> mpc8xxx_spi->rx_shift); \ mpc8xxx_spi->rx = rx; \ -} +} \ +EXPORT_SYMBOL_GPL(mpc8xxx_spi_rx_buf_##type); #define MPC8XXX_SPI_TX_BUF(type) \ u32 mpc8xxx_spi_tx_buf_##type(struct mpc8xxx_spi *mpc8xxx_spi) \ @@ -47,7 +49,8 @@ u32 mpc8xxx_spi_tx_buf_##type(struct mpc8xxx_spi *mpc8xxx_spi) \ data = *tx++ << mpc8xxx_spi->tx_shift; \ mpc8xxx_spi->tx = tx; \ return data; \ -} +} \ +EXPORT_SYMBOL_GPL(mpc8xxx_spi_tx_buf_##type); MPC8XXX_SPI_RX_BUF(u8) MPC8XXX_SPI_RX_BUF(u16) @@ -60,6 +63,7 @@ struct mpc8xxx_spi_probe_info *to_of_pinfo(struct fsl_spi_platform_data *pdata) { return container_of(pdata, struct mpc8xxx_spi_probe_info, pdata); } +EXPORT_SYMBOL_GPL(to_of_pinfo); const char *mpc8xxx_spi_strmode(unsigned int flags) { @@ -75,6 +79,7 @@ const char *mpc8xxx_spi_strmode(unsigned int flags) } return "CPU"; } +EXPORT_SYMBOL_GPL(mpc8xxx_spi_strmode); void mpc8xxx_spi_probe(struct device *dev, struct resource *mem, unsigned int irq) @@ -102,13 +107,12 @@ void mpc8xxx_spi_probe(struct device *dev, struct resource *mem, mpc8xxx_spi->rx_shift = 0; mpc8xxx_spi->tx_shift = 0; - init_completion(&mpc8xxx_spi->done); - master->bus_num = pdata->bus_num; master->num_chipselect = pdata->max_chipselect; init_completion(&mpc8xxx_spi->done); } +EXPORT_SYMBOL_GPL(mpc8xxx_spi_probe); int mpc8xxx_spi_remove(struct device *dev) { @@ -127,6 +131,7 @@ int mpc8xxx_spi_remove(struct device *dev) return 0; } +EXPORT_SYMBOL_GPL(mpc8xxx_spi_remove); int of_mpc8xxx_spi_probe(struct platform_device *ofdev) { @@ -173,3 +178,6 @@ int of_mpc8xxx_spi_probe(struct platform_device *ofdev) return 0; } +EXPORT_SYMBOL_GPL(of_mpc8xxx_spi_probe); + +MODULE_LICENSE("GPL"); diff --git a/drivers/spi/spi-fsl-lib.h b/drivers/spi/spi-fsl-lib.h index b4ed04e8862f..1326a392adca 100644 --- a/drivers/spi/spi-fsl-lib.h +++ b/drivers/spi/spi-fsl-lib.h @@ -28,7 +28,7 @@ struct mpc8xxx_spi { /* rx & tx bufs from the spi_transfer */ const void *tx; void *rx; -#ifdef CONFIG_SPI_FSL_ESPI +#if IS_ENABLED(CONFIG_SPI_FSL_ESPI) int len; #endif @@ -68,7 +68,7 @@ struct mpc8xxx_spi { unsigned int flags; -#ifdef CONFIG_SPI_FSL_SPI +#if IS_ENABLED(CONFIG_SPI_FSL_SPI) int type; int native_chipselects; u8 max_bits_per_word; diff --git a/drivers/spi/spi-gpio.c b/drivers/spi/spi-gpio.c index aee4e7589568..1c34c9314c8a 100644 --- a/drivers/spi/spi-gpio.c +++ b/drivers/spi/spi-gpio.c @@ -12,10 +12,6 @@ * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/kernel.h> #include <linux/module.h> @@ -92,7 +88,7 @@ struct spi_gpio { /*----------------------------------------------------------------------*/ -static inline struct spi_gpio * __pure +static inline struct spi_gpio *__pure spi_to_spi_gpio(const struct spi_device *spi) { const struct spi_bitbang *bang; @@ -103,7 +99,7 @@ spi_to_spi_gpio(const struct spi_device *spi) return spi_gpio; } -static inline struct spi_gpio_platform_data * __pure +static inline struct spi_gpio_platform_data *__pure spi_to_pdata(const struct spi_device *spi) { return &spi_to_spi_gpio(spi)->pdata; diff --git a/drivers/spi/spi-img-spfi.c b/drivers/spi/spi-img-spfi.c index aad6683db81b..c01567d53581 100644 --- a/drivers/spi/spi-img-spfi.c +++ b/drivers/spi/spi-img-spfi.c @@ -160,16 +160,16 @@ static unsigned int spfi_pio_write32(struct img_spfi *spfi, const u32 *buf, unsigned int count = 0; u32 status; - while (count < max) { + while (count < max / 4) { spfi_writel(spfi, SPFI_INTERRUPT_SDFUL, SPFI_INTERRUPT_CLEAR); status = spfi_readl(spfi, SPFI_INTERRUPT_STATUS); if (status & SPFI_INTERRUPT_SDFUL) break; - spfi_writel(spfi, buf[count / 4], SPFI_TX_32BIT_VALID_DATA); - count += 4; + spfi_writel(spfi, buf[count], SPFI_TX_32BIT_VALID_DATA); + count++; } - return count; + return count * 4; } static unsigned int spfi_pio_write8(struct img_spfi *spfi, const u8 *buf, @@ -196,17 +196,17 @@ static unsigned int spfi_pio_read32(struct img_spfi *spfi, u32 *buf, unsigned int count = 0; u32 status; - while (count < max) { + while (count < max / 4) { spfi_writel(spfi, SPFI_INTERRUPT_GDEX32BIT, SPFI_INTERRUPT_CLEAR); status = spfi_readl(spfi, SPFI_INTERRUPT_STATUS); if (!(status & SPFI_INTERRUPT_GDEX32BIT)) break; - buf[count / 4] = spfi_readl(spfi, SPFI_RX_32BIT_VALID_DATA); - count += 4; + buf[count] = spfi_readl(spfi, SPFI_RX_32BIT_VALID_DATA); + count++; } - return count; + return count * 4; } static unsigned int spfi_pio_read8(struct img_spfi *spfi, u8 *buf, @@ -251,17 +251,15 @@ static int img_spfi_start_pio(struct spi_master *master, time_before(jiffies, timeout)) { unsigned int tx_count, rx_count; - switch (xfer->bits_per_word) { - case 32: + if (tx_bytes >= 4) tx_count = spfi_pio_write32(spfi, tx_buf, tx_bytes); - rx_count = spfi_pio_read32(spfi, rx_buf, rx_bytes); - break; - case 8: - default: + else tx_count = spfi_pio_write8(spfi, tx_buf, tx_bytes); + + if (rx_bytes >= 4) + rx_count = spfi_pio_read32(spfi, rx_buf, rx_bytes); + else rx_count = spfi_pio_read8(spfi, rx_buf, rx_bytes); - break; - } tx_buf += tx_count; rx_buf += rx_count; @@ -331,14 +329,11 @@ static int img_spfi_start_dma(struct spi_master *master, if (xfer->rx_buf) { rxconf.direction = DMA_DEV_TO_MEM; - switch (xfer->bits_per_word) { - case 32: + if (xfer->len % 4 == 0) { rxconf.src_addr = spfi->phys + SPFI_RX_32BIT_VALID_DATA; rxconf.src_addr_width = 4; rxconf.src_maxburst = 4; - break; - case 8: - default: + } else { rxconf.src_addr = spfi->phys + SPFI_RX_8BIT_VALID_DATA; rxconf.src_addr_width = 1; rxconf.src_maxburst = 4; @@ -358,18 +353,14 @@ static int img_spfi_start_dma(struct spi_master *master, if (xfer->tx_buf) { txconf.direction = DMA_MEM_TO_DEV; - switch (xfer->bits_per_word) { - case 32: + if (xfer->len % 4 == 0) { txconf.dst_addr = spfi->phys + SPFI_TX_32BIT_VALID_DATA; txconf.dst_addr_width = 4; txconf.dst_maxburst = 4; - break; - case 8: - default: + } else { txconf.dst_addr = spfi->phys + SPFI_TX_8BIT_VALID_DATA; txconf.dst_addr_width = 1; txconf.dst_maxburst = 4; - break; } dmaengine_slave_config(spfi->tx_ch, &txconf); @@ -508,9 +499,7 @@ static void img_spfi_set_cs(struct spi_device *spi, bool enable) static bool img_spfi_can_dma(struct spi_master *master, struct spi_device *spi, struct spi_transfer *xfer) { - if (xfer->bits_per_word == 8 && xfer->len > SPFI_8BIT_FIFO_SIZE) - return true; - if (xfer->bits_per_word == 32 && xfer->len > SPFI_32BIT_FIFO_SIZE) + if (xfer->len > SPFI_32BIT_FIFO_SIZE) return true; return false; } diff --git a/drivers/spi/spi-imx.c b/drivers/spi/spi-imx.c index 961b97d43b43..6fea4af51c41 100644 --- a/drivers/spi/spi-imx.c +++ b/drivers/spi/spi-imx.c @@ -89,7 +89,6 @@ struct spi_imx_data { struct completion xfer_done; void __iomem *base; - int irq; struct clk *clk_per; struct clk *clk_ipg; unsigned long spi_clk; @@ -823,6 +822,10 @@ static int spi_imx_sdma_init(struct device *dev, struct spi_imx_data *spi_imx, struct dma_slave_config slave_config = {}; int ret; + /* use pio mode for i.mx6dl chip TKT238285 */ + if (of_machine_is_compatible("fsl,imx6dl")) + return 0; + /* Prepare for TX DMA: */ master->dma_tx = dma_request_slave_channel(dev, "tx"); if (!master->dma_tx) { @@ -892,6 +895,7 @@ static int spi_imx_dma_transfer(struct spi_imx_data *spi_imx, { struct dma_async_tx_descriptor *desc_tx = NULL, *desc_rx = NULL; int ret; + unsigned long timeout; u32 dma; int left; struct spi_master *master = spi_imx->bitbang.master; @@ -939,17 +943,17 @@ static int spi_imx_dma_transfer(struct spi_imx_data *spi_imx, dma_async_issue_pending(master->dma_tx); dma_async_issue_pending(master->dma_rx); /* Wait SDMA to finish the data transfer.*/ - ret = wait_for_completion_timeout(&spi_imx->dma_tx_completion, + timeout = wait_for_completion_timeout(&spi_imx->dma_tx_completion, IMX_DMA_TIMEOUT); - if (!ret) { + if (!timeout) { pr_warn("%s %s: I/O Error in DMA TX\n", dev_driver_string(&master->dev), dev_name(&master->dev)); dmaengine_terminate_all(master->dma_tx); } else { - ret = wait_for_completion_timeout(&spi_imx->dma_rx_completion, - IMX_DMA_TIMEOUT); - if (!ret) { + timeout = wait_for_completion_timeout( + &spi_imx->dma_rx_completion, IMX_DMA_TIMEOUT); + if (!timeout) { pr_warn("%s %s: I/O Error in DMA RX\n", dev_driver_string(&master->dev), dev_name(&master->dev)); @@ -964,9 +968,9 @@ static int spi_imx_dma_transfer(struct spi_imx_data *spi_imx, spi_imx->dma_finished = 1; spi_imx->devtype_data->trigger(spi_imx); - if (!ret) + if (!timeout) ret = -ETIMEDOUT; - else if (ret > 0) + else ret = transfer->len; return ret; @@ -1076,7 +1080,7 @@ static int spi_imx_probe(struct platform_device *pdev) struct spi_master *master; struct spi_imx_data *spi_imx; struct resource *res; - int i, ret, num_cs; + int i, ret, num_cs, irq; if (!np && !mxc_platform_info) { dev_err(&pdev->dev, "can't get the platform data\n"); @@ -1143,16 +1147,16 @@ static int spi_imx_probe(struct platform_device *pdev) goto out_master_put; } - spi_imx->irq = platform_get_irq(pdev, 0); - if (spi_imx->irq < 0) { - ret = spi_imx->irq; + irq = platform_get_irq(pdev, 0); + if (irq < 0) { + ret = irq; goto out_master_put; } - ret = devm_request_irq(&pdev->dev, spi_imx->irq, spi_imx_isr, 0, + ret = devm_request_irq(&pdev->dev, irq, spi_imx_isr, 0, dev_name(&pdev->dev), spi_imx); if (ret) { - dev_err(&pdev->dev, "can't get irq%d: %d\n", spi_imx->irq, ret); + dev_err(&pdev->dev, "can't get irq%d: %d\n", irq, ret); goto out_master_put; } diff --git a/drivers/spi/spi-lm70llp.c b/drivers/spi/spi-lm70llp.c index 41c5765be746..ba72347cb99d 100644 --- a/drivers/spi/spi-lm70llp.c +++ b/drivers/spi/spi-lm70llp.c @@ -12,10 +12,6 @@ * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/init.h> diff --git a/drivers/spi/spi-meson-spifc.c b/drivers/spi/spi-meson-spifc.c index 1bbac0378bf7..5468fc70dbf8 100644 --- a/drivers/spi/spi-meson-spifc.c +++ b/drivers/spi/spi-meson-spifc.c @@ -85,7 +85,7 @@ struct meson_spifc { struct device *dev; }; -static struct regmap_config spifc_regmap_config = { +static const struct regmap_config spifc_regmap_config = { .reg_bits = 32, .val_bits = 32, .reg_stride = 4, diff --git a/drivers/spi/spi-mxs.c b/drivers/spi/spi-mxs.c index 4045a1e580e1..5b0e9a3e83f6 100644 --- a/drivers/spi/spi-mxs.c +++ b/drivers/spi/spi-mxs.c @@ -282,9 +282,8 @@ static int mxs_spi_txrx_dma(struct mxs_spi *spi, dmaengine_submit(desc); dma_async_issue_pending(ssp->dmach); - ret = wait_for_completion_timeout(&spi->c, - msecs_to_jiffies(SSP_TIMEOUT)); - if (!ret) { + if (!wait_for_completion_timeout(&spi->c, + msecs_to_jiffies(SSP_TIMEOUT))) { dev_err(ssp->dev, "DMA transfer timeout\n"); ret = -ETIMEDOUT; dmaengine_terminate_all(ssp->dmach); diff --git a/drivers/spi/spi-omap-100k.c b/drivers/spi/spi-omap-100k.c index 79399ae9c84c..d890d309dff9 100644 --- a/drivers/spi/spi-omap-100k.c +++ b/drivers/spi/spi-omap-100k.c @@ -16,11 +16,6 @@ * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - * */ #include <linux/kernel.h> #include <linux/init.h> diff --git a/drivers/spi/spi-omap-uwire.c b/drivers/spi/spi-omap-uwire.c index daf1ada5cd11..3c0844457c07 100644 --- a/drivers/spi/spi-omap-uwire.c +++ b/drivers/spi/spi-omap-uwire.c @@ -28,10 +28,6 @@ * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - * You should have received a copy of the GNU General Public License along - * with this program; if not, write to the Free Software Foundation, Inc., - * 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/kernel.h> #include <linux/init.h> diff --git a/drivers/spi/spi-omap2-mcspi.c b/drivers/spi/spi-omap2-mcspi.c index 3bc3cbabbbc0..4df8942058de 100644 --- a/drivers/spi/spi-omap2-mcspi.c +++ b/drivers/spi/spi-omap2-mcspi.c @@ -14,11 +14,6 @@ * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - * */ #include <linux/kernel.h> diff --git a/drivers/spi/spi-orion.c b/drivers/spi/spi-orion.c index 3dec9e0b99b8..861664776672 100644 --- a/drivers/spi/spi-orion.c +++ b/drivers/spi/spi-orion.c @@ -28,7 +28,12 @@ /* Runtime PM autosuspend timeout: PM is fairly light on this driver */ #define SPI_AUTOSUSPEND_TIMEOUT 200 -#define ORION_NUM_CHIPSELECTS 1 /* only one slave is supported*/ +/* Some SoCs using this driver support up to 8 chip selects. + * It is up to the implementer to only use the chip selects + * that are available. + */ +#define ORION_NUM_CHIPSELECTS 8 + #define ORION_SPI_WAIT_RDY_MAX_LOOP 2000 /* in usec */ #define ORION_SPI_IF_CTRL_REG 0x00 @@ -44,6 +49,10 @@ #define ARMADA_SPI_CLK_PRESCALE_MASK 0xDF #define ORION_SPI_MODE_MASK (ORION_SPI_MODE_CPOL | \ ORION_SPI_MODE_CPHA) +#define ORION_SPI_CS_MASK 0x1C +#define ORION_SPI_CS_SHIFT 2 +#define ORION_SPI_CS(cs) ((cs << ORION_SPI_CS_SHIFT) & \ + ORION_SPI_CS_MASK) enum orion_spi_type { ORION_SPI, @@ -215,9 +224,18 @@ orion_spi_setup_transfer(struct spi_device *spi, struct spi_transfer *t) return 0; } -static void orion_spi_set_cs(struct orion_spi *orion_spi, int enable) +static void orion_spi_set_cs(struct spi_device *spi, bool enable) { - if (enable) + struct orion_spi *orion_spi; + + orion_spi = spi_master_get_devdata(spi->master); + + orion_spi_clrbits(orion_spi, ORION_SPI_IF_CTRL_REG, ORION_SPI_CS_MASK); + orion_spi_setbits(orion_spi, ORION_SPI_IF_CTRL_REG, + ORION_SPI_CS(spi->chip_select)); + + /* Chip select logic is inverted from spi_set_cs */ + if (!enable) orion_spi_setbits(orion_spi, ORION_SPI_IF_CTRL_REG, 0x1); else orion_spi_clrbits(orion_spi, ORION_SPI_IF_CTRL_REG, 0x1); @@ -332,64 +350,31 @@ out: return xfer->len - count; } -static int orion_spi_transfer_one_message(struct spi_master *master, - struct spi_message *m) +static int orion_spi_transfer_one(struct spi_master *master, + struct spi_device *spi, + struct spi_transfer *t) { - struct orion_spi *orion_spi = spi_master_get_devdata(master); - struct spi_device *spi = m->spi; - struct spi_transfer *t = NULL; - int par_override = 0; int status = 0; - int cs_active = 0; - - /* Load defaults */ - status = orion_spi_setup_transfer(spi, NULL); + status = orion_spi_setup_transfer(spi, t); if (status < 0) - goto msg_done; - - list_for_each_entry(t, &m->transfers, transfer_list) { - if (par_override || t->speed_hz || t->bits_per_word) { - par_override = 1; - status = orion_spi_setup_transfer(spi, t); - if (status < 0) - break; - if (!t->speed_hz && !t->bits_per_word) - par_override = 0; - } - - if (!cs_active) { - orion_spi_set_cs(orion_spi, 1); - cs_active = 1; - } + return status; - if (t->len) - m->actual_length += orion_spi_write_read(spi, t); + if (t->len) + orion_spi_write_read(spi, t); - if (t->delay_usecs) - udelay(t->delay_usecs); - - if (t->cs_change) { - orion_spi_set_cs(orion_spi, 0); - cs_active = 0; - } - } - -msg_done: - if (cs_active) - orion_spi_set_cs(orion_spi, 0); - - m->status = status; - spi_finalize_current_message(master); + return status; +} - return 0; +static int orion_spi_setup(struct spi_device *spi) +{ + return orion_spi_setup_transfer(spi, NULL); } static int orion_spi_reset(struct orion_spi *orion_spi) { /* Verify that the CS is deasserted */ - orion_spi_set_cs(orion_spi, 0); - + orion_spi_clrbits(orion_spi, ORION_SPI_IF_CTRL_REG, 0x1); return 0; } @@ -442,9 +427,10 @@ static int orion_spi_probe(struct platform_device *pdev) /* we support only mode 0, and no options */ master->mode_bits = SPI_CPHA | SPI_CPOL; - - master->transfer_one_message = orion_spi_transfer_one_message; + master->set_cs = orion_spi_set_cs; + master->transfer_one = orion_spi_transfer_one; master->num_chipselect = ORION_NUM_CHIPSELECTS; + master->setup = orion_spi_setup; master->bits_per_word_mask = SPI_BPW_MASK(8) | SPI_BPW_MASK(16); master->auto_runtime_pm = true; diff --git a/drivers/spi/spi-pxa2xx-dma.c b/drivers/spi/spi-pxa2xx-dma.c index 62a9297e96ac..66a173939be8 100644 --- a/drivers/spi/spi-pxa2xx-dma.c +++ b/drivers/spi/spi-pxa2xx-dma.c @@ -111,23 +111,24 @@ static void pxa2xx_spi_dma_transfer_complete(struct driver_data *drv_data, * by using ->dma_running. */ if (atomic_dec_and_test(&drv_data->dma_running)) { - void __iomem *reg = drv_data->ioaddr; - /* * If the other CPU is still handling the ROR interrupt we * might not know about the error yet. So we re-check the * ROR bit here before we clear the status register. */ if (!error) { - u32 status = read_SSSR(reg) & drv_data->mask_sr; + u32 status = pxa2xx_spi_read(drv_data, SSSR) + & drv_data->mask_sr; error = status & SSSR_ROR; } /* Clear status & disable interrupts */ - write_SSCR1(read_SSCR1(reg) & ~drv_data->dma_cr1, reg); + pxa2xx_spi_write(drv_data, SSCR1, + pxa2xx_spi_read(drv_data, SSCR1) + & ~drv_data->dma_cr1); write_SSSR_CS(drv_data, drv_data->clear_sr); if (!pxa25x_ssp_comp(drv_data)) - write_SSTO(0, reg); + pxa2xx_spi_write(drv_data, SSTO, 0); if (!error) { pxa2xx_spi_unmap_dma_buffers(drv_data); @@ -139,7 +140,9 @@ static void pxa2xx_spi_dma_transfer_complete(struct driver_data *drv_data, msg->state = pxa2xx_spi_next_transfer(drv_data); } else { /* In case we got an error we disable the SSP now */ - write_SSCR0(read_SSCR0(reg) & ~SSCR0_SSE, reg); + pxa2xx_spi_write(drv_data, SSCR0, + pxa2xx_spi_read(drv_data, SSCR0) + & ~SSCR0_SSE); msg->state = ERROR_STATE; } @@ -247,7 +250,7 @@ irqreturn_t pxa2xx_spi_dma_transfer(struct driver_data *drv_data) { u32 status; - status = read_SSSR(drv_data->ioaddr) & drv_data->mask_sr; + status = pxa2xx_spi_read(drv_data, SSSR) & drv_data->mask_sr; if (status & SSSR_ROR) { dev_err(&drv_data->pdev->dev, "FIFO overrun\n"); diff --git a/drivers/spi/spi-pxa2xx-pxadma.c b/drivers/spi/spi-pxa2xx-pxadma.c index e8a26f25d5c0..2e0796a0003f 100644 --- a/drivers/spi/spi-pxa2xx-pxadma.c +++ b/drivers/spi/spi-pxa2xx-pxadma.c @@ -12,10 +12,6 @@ * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/delay.h> @@ -25,6 +21,7 @@ #include <linux/spi/spi.h> #include <linux/spi/pxa2xx_spi.h> +#include <mach/dma.h> #include "spi-pxa2xx.h" #define DMA_INT_MASK (DCSR_ENDINTR | DCSR_STARTINTR | DCSR_BUSERR) @@ -118,11 +115,11 @@ static void pxa2xx_spi_unmap_dma_buffers(struct driver_data *drv_data) drv_data->dma_mapped = 0; } -static int wait_ssp_rx_stall(void const __iomem *ioaddr) +static int wait_ssp_rx_stall(struct driver_data *drv_data) { unsigned long limit = loops_per_jiffy << 1; - while ((read_SSSR(ioaddr) & SSSR_BSY) && --limit) + while ((pxa2xx_spi_read(drv_data, SSSR) & SSSR_BSY) && --limit) cpu_relax(); return limit; @@ -141,17 +138,18 @@ static int wait_dma_channel_stop(int channel) static void pxa2xx_spi_dma_error_stop(struct driver_data *drv_data, const char *msg) { - void __iomem *reg = drv_data->ioaddr; - /* Stop and reset */ DCSR(drv_data->rx_channel) = RESET_DMA_CHANNEL; DCSR(drv_data->tx_channel) = RESET_DMA_CHANNEL; write_SSSR_CS(drv_data, drv_data->clear_sr); - write_SSCR1(read_SSCR1(reg) & ~drv_data->dma_cr1, reg); + pxa2xx_spi_write(drv_data, SSCR1, + pxa2xx_spi_read(drv_data, SSCR1) + & ~drv_data->dma_cr1); if (!pxa25x_ssp_comp(drv_data)) - write_SSTO(0, reg); + pxa2xx_spi_write(drv_data, SSTO, 0); pxa2xx_spi_flush(drv_data); - write_SSCR0(read_SSCR0(reg) & ~SSCR0_SSE, reg); + pxa2xx_spi_write(drv_data, SSCR0, + pxa2xx_spi_read(drv_data, SSCR0) & ~SSCR0_SSE); pxa2xx_spi_unmap_dma_buffers(drv_data); @@ -163,11 +161,12 @@ static void pxa2xx_spi_dma_error_stop(struct driver_data *drv_data, static void pxa2xx_spi_dma_transfer_complete(struct driver_data *drv_data) { - void __iomem *reg = drv_data->ioaddr; struct spi_message *msg = drv_data->cur_msg; /* Clear and disable interrupts on SSP and DMA channels*/ - write_SSCR1(read_SSCR1(reg) & ~drv_data->dma_cr1, reg); + pxa2xx_spi_write(drv_data, SSCR1, + pxa2xx_spi_read(drv_data, SSCR1) + & ~drv_data->dma_cr1); write_SSSR_CS(drv_data, drv_data->clear_sr); DCSR(drv_data->tx_channel) = RESET_DMA_CHANNEL; DCSR(drv_data->rx_channel) = RESET_DMA_CHANNEL; @@ -228,7 +227,7 @@ void pxa2xx_spi_dma_handler(int channel, void *data) && (drv_data->ssp_type == PXA25x_SSP)) { /* Wait for rx to stall */ - if (wait_ssp_rx_stall(drv_data->ioaddr) == 0) + if (wait_ssp_rx_stall(drv_data) == 0) dev_err(&drv_data->pdev->dev, "dma_handler: ssp rx stall failed\n"); @@ -240,9 +239,8 @@ void pxa2xx_spi_dma_handler(int channel, void *data) irqreturn_t pxa2xx_spi_dma_transfer(struct driver_data *drv_data) { u32 irq_status; - void __iomem *reg = drv_data->ioaddr; - irq_status = read_SSSR(reg) & drv_data->mask_sr; + irq_status = pxa2xx_spi_read(drv_data, SSSR) & drv_data->mask_sr; if (irq_status & SSSR_ROR) { pxa2xx_spi_dma_error_stop(drv_data, "dma_transfer: fifo overrun"); @@ -252,7 +250,7 @@ irqreturn_t pxa2xx_spi_dma_transfer(struct driver_data *drv_data) /* Check for false positive timeout */ if ((irq_status & SSSR_TINT) && (DCSR(drv_data->tx_channel) & DCSR_RUN)) { - write_SSSR(SSSR_TINT, reg); + pxa2xx_spi_write(drv_data, SSSR, SSSR_TINT); return IRQ_HANDLED; } @@ -261,7 +259,7 @@ irqreturn_t pxa2xx_spi_dma_transfer(struct driver_data *drv_data) /* Clear and disable timeout interrupt, do the rest in * dma_transfer_complete */ if (!pxa25x_ssp_comp(drv_data)) - write_SSTO(0, reg); + pxa2xx_spi_write(drv_data, SSTO, 0); /* finish this transfer, start the next */ pxa2xx_spi_dma_transfer_complete(drv_data); diff --git a/drivers/spi/spi-pxa2xx.c b/drivers/spi/spi-pxa2xx.c index 23822e7df6c1..6f72ad01e041 100644 --- a/drivers/spi/spi-pxa2xx.c +++ b/drivers/spi/spi-pxa2xx.c @@ -11,10 +11,6 @@ * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/init.h> @@ -45,8 +41,6 @@ MODULE_DESCRIPTION("PXA2xx SSP SPI Controller"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:pxa2xx-spi"); -#define MAX_BUSES 3 - #define TIMOUT_DFLT 1000 /* @@ -162,7 +156,6 @@ pxa2xx_spi_get_rx_default_thre(const struct driver_data *drv_data) static bool pxa2xx_spi_txfifo_full(const struct driver_data *drv_data) { - void __iomem *reg = drv_data->ioaddr; u32 mask; switch (drv_data->ssp_type) { @@ -174,7 +167,7 @@ static bool pxa2xx_spi_txfifo_full(const struct driver_data *drv_data) break; } - return (read_SSSR(reg) & mask) == mask; + return (pxa2xx_spi_read(drv_data, SSSR) & mask) == mask; } static void pxa2xx_spi_clear_rx_thre(const struct driver_data *drv_data, @@ -253,9 +246,6 @@ static void lpss_ssp_setup(struct driver_data *drv_data) unsigned offset = 0x400; u32 value, orig; - if (!is_lpss_ssp(drv_data)) - return; - /* * Perform auto-detection of the LPSS SSP private registers. They * can be either at 1k or 2k offset from the base address. @@ -304,9 +294,6 @@ static void lpss_ssp_cs_control(struct driver_data *drv_data, bool enable) { u32 value; - if (!is_lpss_ssp(drv_data)) - return; - value = __lpss_ssp_read_priv(drv_data, SPI_CS_CONTROL); if (enable) value &= ~SPI_CS_CONTROL_CS_HIGH; @@ -320,7 +307,7 @@ static void cs_assert(struct driver_data *drv_data) struct chip_data *chip = drv_data->cur_chip; if (drv_data->ssp_type == CE4100_SSP) { - write_SSSR(drv_data->cur_chip->frm, drv_data->ioaddr); + pxa2xx_spi_write(drv_data, SSSR, drv_data->cur_chip->frm); return; } @@ -334,7 +321,8 @@ static void cs_assert(struct driver_data *drv_data) return; } - lpss_ssp_cs_control(drv_data, true); + if (is_lpss_ssp(drv_data)) + lpss_ssp_cs_control(drv_data, true); } static void cs_deassert(struct driver_data *drv_data) @@ -354,20 +342,18 @@ static void cs_deassert(struct driver_data *drv_data) return; } - lpss_ssp_cs_control(drv_data, false); + if (is_lpss_ssp(drv_data)) + lpss_ssp_cs_control(drv_data, false); } int pxa2xx_spi_flush(struct driver_data *drv_data) { unsigned long limit = loops_per_jiffy << 1; - void __iomem *reg = drv_data->ioaddr; - do { - while (read_SSSR(reg) & SSSR_RNE) { - read_SSDR(reg); - } - } while ((read_SSSR(reg) & SSSR_BSY) && --limit); + while (pxa2xx_spi_read(drv_data, SSSR) & SSSR_RNE) + pxa2xx_spi_read(drv_data, SSDR); + } while ((pxa2xx_spi_read(drv_data, SSSR) & SSSR_BSY) && --limit); write_SSSR_CS(drv_data, SSSR_ROR); return limit; @@ -375,14 +361,13 @@ int pxa2xx_spi_flush(struct driver_data *drv_data) static int null_writer(struct driver_data *drv_data) { - void __iomem *reg = drv_data->ioaddr; u8 n_bytes = drv_data->n_bytes; if (pxa2xx_spi_txfifo_full(drv_data) || (drv_data->tx == drv_data->tx_end)) return 0; - write_SSDR(0, reg); + pxa2xx_spi_write(drv_data, SSDR, 0); drv_data->tx += n_bytes; return 1; @@ -390,12 +375,11 @@ static int null_writer(struct driver_data *drv_data) static int null_reader(struct driver_data *drv_data) { - void __iomem *reg = drv_data->ioaddr; u8 n_bytes = drv_data->n_bytes; - while ((read_SSSR(reg) & SSSR_RNE) - && (drv_data->rx < drv_data->rx_end)) { - read_SSDR(reg); + while ((pxa2xx_spi_read(drv_data, SSSR) & SSSR_RNE) + && (drv_data->rx < drv_data->rx_end)) { + pxa2xx_spi_read(drv_data, SSDR); drv_data->rx += n_bytes; } @@ -404,13 +388,11 @@ static int null_reader(struct driver_data *drv_data) static int u8_writer(struct driver_data *drv_data) { - void __iomem *reg = drv_data->ioaddr; - if (pxa2xx_spi_txfifo_full(drv_data) || (drv_data->tx == drv_data->tx_end)) return 0; - write_SSDR(*(u8 *)(drv_data->tx), reg); + pxa2xx_spi_write(drv_data, SSDR, *(u8 *)(drv_data->tx)); ++drv_data->tx; return 1; @@ -418,11 +400,9 @@ static int u8_writer(struct driver_data *drv_data) static int u8_reader(struct driver_data *drv_data) { - void __iomem *reg = drv_data->ioaddr; - - while ((read_SSSR(reg) & SSSR_RNE) - && (drv_data->rx < drv_data->rx_end)) { - *(u8 *)(drv_data->rx) = read_SSDR(reg); + while ((pxa2xx_spi_read(drv_data, SSSR) & SSSR_RNE) + && (drv_data->rx < drv_data->rx_end)) { + *(u8 *)(drv_data->rx) = pxa2xx_spi_read(drv_data, SSDR); ++drv_data->rx; } @@ -431,13 +411,11 @@ static int u8_reader(struct driver_data *drv_data) static int u16_writer(struct driver_data *drv_data) { - void __iomem *reg = drv_data->ioaddr; - if (pxa2xx_spi_txfifo_full(drv_data) || (drv_data->tx == drv_data->tx_end)) return 0; - write_SSDR(*(u16 *)(drv_data->tx), reg); + pxa2xx_spi_write(drv_data, SSDR, *(u16 *)(drv_data->tx)); drv_data->tx += 2; return 1; @@ -445,11 +423,9 @@ static int u16_writer(struct driver_data *drv_data) static int u16_reader(struct driver_data *drv_data) { - void __iomem *reg = drv_data->ioaddr; - - while ((read_SSSR(reg) & SSSR_RNE) - && (drv_data->rx < drv_data->rx_end)) { - *(u16 *)(drv_data->rx) = read_SSDR(reg); + while ((pxa2xx_spi_read(drv_data, SSSR) & SSSR_RNE) + && (drv_data->rx < drv_data->rx_end)) { + *(u16 *)(drv_data->rx) = pxa2xx_spi_read(drv_data, SSDR); drv_data->rx += 2; } @@ -458,13 +434,11 @@ static int u16_reader(struct driver_data *drv_data) static int u32_writer(struct driver_data *drv_data) { - void __iomem *reg = drv_data->ioaddr; - if (pxa2xx_spi_txfifo_full(drv_data) || (drv_data->tx == drv_data->tx_end)) return 0; - write_SSDR(*(u32 *)(drv_data->tx), reg); + pxa2xx_spi_write(drv_data, SSDR, *(u32 *)(drv_data->tx)); drv_data->tx += 4; return 1; @@ -472,11 +446,9 @@ static int u32_writer(struct driver_data *drv_data) static int u32_reader(struct driver_data *drv_data) { - void __iomem *reg = drv_data->ioaddr; - - while ((read_SSSR(reg) & SSSR_RNE) - && (drv_data->rx < drv_data->rx_end)) { - *(u32 *)(drv_data->rx) = read_SSDR(reg); + while ((pxa2xx_spi_read(drv_data, SSSR) & SSSR_RNE) + && (drv_data->rx < drv_data->rx_end)) { + *(u32 *)(drv_data->rx) = pxa2xx_spi_read(drv_data, SSDR); drv_data->rx += 4; } @@ -552,27 +524,25 @@ static void giveback(struct driver_data *drv_data) static void reset_sccr1(struct driver_data *drv_data) { - void __iomem *reg = drv_data->ioaddr; struct chip_data *chip = drv_data->cur_chip; u32 sccr1_reg; - sccr1_reg = read_SSCR1(reg) & ~drv_data->int_cr1; + sccr1_reg = pxa2xx_spi_read(drv_data, SSCR1) & ~drv_data->int_cr1; sccr1_reg &= ~SSCR1_RFT; sccr1_reg |= chip->threshold; - write_SSCR1(sccr1_reg, reg); + pxa2xx_spi_write(drv_data, SSCR1, sccr1_reg); } static void int_error_stop(struct driver_data *drv_data, const char* msg) { - void __iomem *reg = drv_data->ioaddr; - /* Stop and reset SSP */ write_SSSR_CS(drv_data, drv_data->clear_sr); reset_sccr1(drv_data); if (!pxa25x_ssp_comp(drv_data)) - write_SSTO(0, reg); + pxa2xx_spi_write(drv_data, SSTO, 0); pxa2xx_spi_flush(drv_data); - write_SSCR0(read_SSCR0(reg) & ~SSCR0_SSE, reg); + pxa2xx_spi_write(drv_data, SSCR0, + pxa2xx_spi_read(drv_data, SSCR0) & ~SSCR0_SSE); dev_err(&drv_data->pdev->dev, "%s\n", msg); @@ -582,13 +552,11 @@ static void int_error_stop(struct driver_data *drv_data, const char* msg) static void int_transfer_complete(struct driver_data *drv_data) { - void __iomem *reg = drv_data->ioaddr; - /* Stop SSP */ write_SSSR_CS(drv_data, drv_data->clear_sr); reset_sccr1(drv_data); if (!pxa25x_ssp_comp(drv_data)) - write_SSTO(0, reg); + pxa2xx_spi_write(drv_data, SSTO, 0); /* Update total byte transferred return count actual bytes read */ drv_data->cur_msg->actual_length += drv_data->len - @@ -607,12 +575,10 @@ static void int_transfer_complete(struct driver_data *drv_data) static irqreturn_t interrupt_transfer(struct driver_data *drv_data) { - void __iomem *reg = drv_data->ioaddr; + u32 irq_mask = (pxa2xx_spi_read(drv_data, SSCR1) & SSCR1_TIE) ? + drv_data->mask_sr : drv_data->mask_sr & ~SSSR_TFS; - u32 irq_mask = (read_SSCR1(reg) & SSCR1_TIE) ? - drv_data->mask_sr : drv_data->mask_sr & ~SSSR_TFS; - - u32 irq_status = read_SSSR(reg) & irq_mask; + u32 irq_status = pxa2xx_spi_read(drv_data, SSSR) & irq_mask; if (irq_status & SSSR_ROR) { int_error_stop(drv_data, "interrupt_transfer: fifo overrun"); @@ -620,7 +586,7 @@ static irqreturn_t interrupt_transfer(struct driver_data *drv_data) } if (irq_status & SSSR_TINT) { - write_SSSR(SSSR_TINT, reg); + pxa2xx_spi_write(drv_data, SSSR, SSSR_TINT); if (drv_data->read(drv_data)) { int_transfer_complete(drv_data); return IRQ_HANDLED; @@ -644,7 +610,7 @@ static irqreturn_t interrupt_transfer(struct driver_data *drv_data) u32 bytes_left; u32 sccr1_reg; - sccr1_reg = read_SSCR1(reg); + sccr1_reg = pxa2xx_spi_read(drv_data, SSCR1); sccr1_reg &= ~SSCR1_TIE; /* @@ -670,7 +636,7 @@ static irqreturn_t interrupt_transfer(struct driver_data *drv_data) pxa2xx_spi_set_rx_thre(drv_data, &sccr1_reg, rx_thre); } - write_SSCR1(sccr1_reg, reg); + pxa2xx_spi_write(drv_data, SSCR1, sccr1_reg); } /* We did something */ @@ -680,7 +646,6 @@ static irqreturn_t interrupt_transfer(struct driver_data *drv_data) static irqreturn_t ssp_int(int irq, void *dev_id) { struct driver_data *drv_data = dev_id; - void __iomem *reg = drv_data->ioaddr; u32 sccr1_reg; u32 mask = drv_data->mask_sr; u32 status; @@ -700,11 +665,11 @@ static irqreturn_t ssp_int(int irq, void *dev_id) * are all set to one. That means that the device is already * powered off. */ - status = read_SSSR(reg); + status = pxa2xx_spi_read(drv_data, SSSR); if (status == ~0) return IRQ_NONE; - sccr1_reg = read_SSCR1(reg); + sccr1_reg = pxa2xx_spi_read(drv_data, SSCR1); /* Ignore possible writes if we don't need to write */ if (!(sccr1_reg & SSCR1_TIE)) @@ -715,10 +680,14 @@ static irqreturn_t ssp_int(int irq, void *dev_id) if (!drv_data->cur_msg) { - write_SSCR0(read_SSCR0(reg) & ~SSCR0_SSE, reg); - write_SSCR1(read_SSCR1(reg) & ~drv_data->int_cr1, reg); + pxa2xx_spi_write(drv_data, SSCR0, + pxa2xx_spi_read(drv_data, SSCR0) + & ~SSCR0_SSE); + pxa2xx_spi_write(drv_data, SSCR1, + pxa2xx_spi_read(drv_data, SSCR1) + & ~drv_data->int_cr1); if (!pxa25x_ssp_comp(drv_data)) - write_SSTO(0, reg); + pxa2xx_spi_write(drv_data, SSTO, 0); write_SSSR_CS(drv_data, drv_data->clear_sr); dev_err(&drv_data->pdev->dev, @@ -787,7 +756,6 @@ static void pump_transfers(unsigned long data) struct spi_transfer *transfer = NULL; struct spi_transfer *previous = NULL; struct chip_data *chip = NULL; - void __iomem *reg = drv_data->ioaddr; u32 clk_div = 0; u8 bits = 0; u32 speed = 0; @@ -931,7 +899,7 @@ static void pump_transfers(unsigned long data) /* Clear status and start DMA engine */ cr1 = chip->cr1 | dma_thresh | drv_data->dma_cr1; - write_SSSR(drv_data->clear_sr, reg); + pxa2xx_spi_write(drv_data, SSSR, drv_data->clear_sr); pxa2xx_spi_dma_start(drv_data); } else { @@ -944,39 +912,43 @@ static void pump_transfers(unsigned long data) } if (is_lpss_ssp(drv_data)) { - if ((read_SSIRF(reg) & 0xff) != chip->lpss_rx_threshold) - write_SSIRF(chip->lpss_rx_threshold, reg); - if ((read_SSITF(reg) & 0xffff) != chip->lpss_tx_threshold) - write_SSITF(chip->lpss_tx_threshold, reg); + if ((pxa2xx_spi_read(drv_data, SSIRF) & 0xff) + != chip->lpss_rx_threshold) + pxa2xx_spi_write(drv_data, SSIRF, + chip->lpss_rx_threshold); + if ((pxa2xx_spi_read(drv_data, SSITF) & 0xffff) + != chip->lpss_tx_threshold) + pxa2xx_spi_write(drv_data, SSITF, + chip->lpss_tx_threshold); } if (is_quark_x1000_ssp(drv_data) && - (read_DDS_RATE(reg) != chip->dds_rate)) - write_DDS_RATE(chip->dds_rate, reg); + (pxa2xx_spi_read(drv_data, DDS_RATE) != chip->dds_rate)) + pxa2xx_spi_write(drv_data, DDS_RATE, chip->dds_rate); /* see if we need to reload the config registers */ - if ((read_SSCR0(reg) != cr0) || - (read_SSCR1(reg) & change_mask) != (cr1 & change_mask)) { - + if ((pxa2xx_spi_read(drv_data, SSCR0) != cr0) + || (pxa2xx_spi_read(drv_data, SSCR1) & change_mask) + != (cr1 & change_mask)) { /* stop the SSP, and update the other bits */ - write_SSCR0(cr0 & ~SSCR0_SSE, reg); + pxa2xx_spi_write(drv_data, SSCR0, cr0 & ~SSCR0_SSE); if (!pxa25x_ssp_comp(drv_data)) - write_SSTO(chip->timeout, reg); + pxa2xx_spi_write(drv_data, SSTO, chip->timeout); /* first set CR1 without interrupt and service enables */ - write_SSCR1(cr1 & change_mask, reg); + pxa2xx_spi_write(drv_data, SSCR1, cr1 & change_mask); /* restart the SSP */ - write_SSCR0(cr0, reg); + pxa2xx_spi_write(drv_data, SSCR0, cr0); } else { if (!pxa25x_ssp_comp(drv_data)) - write_SSTO(chip->timeout, reg); + pxa2xx_spi_write(drv_data, SSTO, chip->timeout); } cs_assert(drv_data); /* after chip select, release the data by enabling service * requests and interrupts, without changing any mode bits */ - write_SSCR1(cr1, reg); + pxa2xx_spi_write(drv_data, SSCR1, cr1); } static int pxa2xx_spi_transfer_one_message(struct spi_master *master, @@ -1005,8 +977,8 @@ static int pxa2xx_spi_unprepare_transfer(struct spi_master *master) struct driver_data *drv_data = spi_master_get_devdata(master); /* Disable the SSP now */ - write_SSCR0(read_SSCR0(drv_data->ioaddr) & ~SSCR0_SSE, - drv_data->ioaddr); + pxa2xx_spi_write(drv_data, SSCR0, + pxa2xx_spi_read(drv_data, SSCR0) & ~SSCR0_SSE); return 0; } @@ -1289,6 +1261,7 @@ static int pxa2xx_spi_probe(struct platform_device *pdev) struct driver_data *drv_data; struct ssp_device *ssp; int status; + u32 tmp; platform_info = dev_get_platdata(dev); if (!platform_info) { @@ -1386,38 +1359,35 @@ static int pxa2xx_spi_probe(struct platform_device *pdev) drv_data->max_clk_rate = clk_get_rate(ssp->clk); /* Load default SSP configuration */ - write_SSCR0(0, drv_data->ioaddr); + pxa2xx_spi_write(drv_data, SSCR0, 0); switch (drv_data->ssp_type) { case QUARK_X1000_SSP: - write_SSCR1(QUARK_X1000_SSCR1_RxTresh( - RX_THRESH_QUARK_X1000_DFLT) | - QUARK_X1000_SSCR1_TxTresh( - TX_THRESH_QUARK_X1000_DFLT), - drv_data->ioaddr); + tmp = QUARK_X1000_SSCR1_RxTresh(RX_THRESH_QUARK_X1000_DFLT) + | QUARK_X1000_SSCR1_TxTresh(TX_THRESH_QUARK_X1000_DFLT); + pxa2xx_spi_write(drv_data, SSCR1, tmp); /* using the Motorola SPI protocol and use 8 bit frame */ - write_SSCR0(QUARK_X1000_SSCR0_Motorola - | QUARK_X1000_SSCR0_DataSize(8), - drv_data->ioaddr); + pxa2xx_spi_write(drv_data, SSCR0, + QUARK_X1000_SSCR0_Motorola + | QUARK_X1000_SSCR0_DataSize(8)); break; default: - write_SSCR1(SSCR1_RxTresh(RX_THRESH_DFLT) | - SSCR1_TxTresh(TX_THRESH_DFLT), - drv_data->ioaddr); - write_SSCR0(SSCR0_SCR(2) - | SSCR0_Motorola - | SSCR0_DataSize(8), - drv_data->ioaddr); + tmp = SSCR1_RxTresh(RX_THRESH_DFLT) | + SSCR1_TxTresh(TX_THRESH_DFLT); + pxa2xx_spi_write(drv_data, SSCR1, tmp); + tmp = SSCR0_SCR(2) | SSCR0_Motorola | SSCR0_DataSize(8); + pxa2xx_spi_write(drv_data, SSCR0, tmp); break; } if (!pxa25x_ssp_comp(drv_data)) - write_SSTO(0, drv_data->ioaddr); + pxa2xx_spi_write(drv_data, SSTO, 0); if (!is_quark_x1000_ssp(drv_data)) - write_SSPSP(0, drv_data->ioaddr); + pxa2xx_spi_write(drv_data, SSPSP, 0); - lpss_ssp_setup(drv_data); + if (is_lpss_ssp(drv_data)) + lpss_ssp_setup(drv_data); tasklet_init(&drv_data->pump_transfers, pump_transfers, (unsigned long)drv_data); @@ -1460,7 +1430,7 @@ static int pxa2xx_spi_remove(struct platform_device *pdev) pm_runtime_get_sync(&pdev->dev); /* Disable the SSP at the peripheral and SOC level */ - write_SSCR0(0, drv_data->ioaddr); + pxa2xx_spi_write(drv_data, SSCR0, 0); clk_disable_unprepare(ssp->clk); /* Release DMA */ @@ -1497,7 +1467,7 @@ static int pxa2xx_spi_suspend(struct device *dev) status = spi_master_suspend(drv_data->master); if (status != 0) return status; - write_SSCR0(0, drv_data->ioaddr); + pxa2xx_spi_write(drv_data, SSCR0, 0); if (!pm_runtime_suspended(dev)) clk_disable_unprepare(ssp->clk); @@ -1518,7 +1488,8 @@ static int pxa2xx_spi_resume(struct device *dev) clk_prepare_enable(ssp->clk); /* Restore LPSS private register bits */ - lpss_ssp_setup(drv_data); + if (is_lpss_ssp(drv_data)) + lpss_ssp_setup(drv_data); /* Start the queue running */ status = spi_master_resume(drv_data->master); diff --git a/drivers/spi/spi-pxa2xx.h b/drivers/spi/spi-pxa2xx.h index 6bec59c90cd4..85a58c906869 100644 --- a/drivers/spi/spi-pxa2xx.h +++ b/drivers/spi/spi-pxa2xx.h @@ -115,23 +115,17 @@ struct chip_data { void (*cs_control)(u32 command); }; -#define DEFINE_SSP_REG(reg, off) \ -static inline u32 read_##reg(void const __iomem *p) \ -{ return __raw_readl(p + (off)); } \ -\ -static inline void write_##reg(u32 v, void __iomem *p) \ -{ __raw_writel(v, p + (off)); } - -DEFINE_SSP_REG(SSCR0, 0x00) -DEFINE_SSP_REG(SSCR1, 0x04) -DEFINE_SSP_REG(SSSR, 0x08) -DEFINE_SSP_REG(SSITR, 0x0c) -DEFINE_SSP_REG(SSDR, 0x10) -DEFINE_SSP_REG(DDS_RATE, 0x28) /* DDS Clock Rate */ -DEFINE_SSP_REG(SSTO, 0x28) -DEFINE_SSP_REG(SSPSP, 0x2c) -DEFINE_SSP_REG(SSITF, SSITF) -DEFINE_SSP_REG(SSIRF, SSIRF) +static inline u32 pxa2xx_spi_read(const struct driver_data *drv_data, + unsigned reg) +{ + return __raw_readl(drv_data->ioaddr + reg); +} + +static inline void pxa2xx_spi_write(const struct driver_data *drv_data, + unsigned reg, u32 val) +{ + __raw_writel(val, drv_data->ioaddr + reg); +} #define START_STATE ((void *)0) #define RUNNING_STATE ((void *)1) @@ -155,13 +149,11 @@ static inline int pxa25x_ssp_comp(struct driver_data *drv_data) static inline void write_SSSR_CS(struct driver_data *drv_data, u32 val) { - void __iomem *reg = drv_data->ioaddr; - if (drv_data->ssp_type == CE4100_SSP || drv_data->ssp_type == QUARK_X1000_SSP) - val |= read_SSSR(reg) & SSSR_ALT_FRM_MASK; + val |= pxa2xx_spi_read(drv_data, SSSR) & SSSR_ALT_FRM_MASK; - write_SSSR(val, reg); + pxa2xx_spi_write(drv_data, SSSR, val); } extern int pxa2xx_spi_flush(struct driver_data *drv_data); diff --git a/drivers/spi/spi-qup.c b/drivers/spi/spi-qup.c index e7fb5a0d2e8d..ff9cdbdb6672 100644 --- a/drivers/spi/spi-qup.c +++ b/drivers/spi/spi-qup.c @@ -337,7 +337,7 @@ static irqreturn_t spi_qup_qup_irq(int irq, void *dev_id) static int spi_qup_io_config(struct spi_device *spi, struct spi_transfer *xfer) { struct spi_qup *controller = spi_master_get_devdata(spi->master); - u32 config, iomode, mode; + u32 config, iomode, mode, control; int ret, n_words, w_size; if (spi->mode & SPI_LOOP && xfer->len > controller->in_fifo_sz) { @@ -392,6 +392,15 @@ static int spi_qup_io_config(struct spi_device *spi, struct spi_transfer *xfer) writel_relaxed(iomode, controller->base + QUP_IO_M_MODES); + control = readl_relaxed(controller->base + SPI_IO_CONTROL); + + if (spi->mode & SPI_CPOL) + control |= SPI_IO_C_CLK_IDLE_HIGH; + else + control &= ~SPI_IO_C_CLK_IDLE_HIGH; + + writel_relaxed(control, controller->base + SPI_IO_CONTROL); + config = readl_relaxed(controller->base + SPI_CONFIG); if (spi->mode & SPI_LOOP) diff --git a/drivers/spi/spi-rockchip.c b/drivers/spi/spi-rockchip.c index daabbabd26b0..1a777dc261d6 100644 --- a/drivers/spi/spi-rockchip.c +++ b/drivers/spi/spi-rockchip.c @@ -437,6 +437,7 @@ static void rockchip_spi_prepare_dma(struct rockchip_spi *rs) rs->state &= ~TXBUSY; spin_unlock_irqrestore(&rs->lock, flags); + rxdesc = NULL; if (rs->rx) { rxconf.direction = rs->dma_rx.direction; rxconf.src_addr = rs->dma_rx.addr; @@ -453,6 +454,7 @@ static void rockchip_spi_prepare_dma(struct rockchip_spi *rs) rxdesc->callback_param = rs; } + txdesc = NULL; if (rs->tx) { txconf.direction = rs->dma_tx.direction; txconf.dst_addr = rs->dma_tx.addr; @@ -470,7 +472,7 @@ static void rockchip_spi_prepare_dma(struct rockchip_spi *rs) } /* rx must be started before tx due to spi instinct */ - if (rs->rx) { + if (rxdesc) { spin_lock_irqsave(&rs->lock, flags); rs->state |= RXBUSY; spin_unlock_irqrestore(&rs->lock, flags); @@ -478,7 +480,7 @@ static void rockchip_spi_prepare_dma(struct rockchip_spi *rs) dma_async_issue_pending(rs->dma_rx.ch); } - if (rs->tx) { + if (txdesc) { spin_lock_irqsave(&rs->lock, flags); rs->state |= TXBUSY; spin_unlock_irqrestore(&rs->lock, flags); diff --git a/drivers/spi/spi-rspi.c b/drivers/spi/spi-rspi.c index 2071f788c6fb..46ce47076e63 100644 --- a/drivers/spi/spi-rspi.c +++ b/drivers/spi/spi-rspi.c @@ -15,11 +15,6 @@ * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - * */ #include <linux/module.h> diff --git a/drivers/spi/spi-s3c64xx.c b/drivers/spi/spi-s3c64xx.c index 37b19836f5cb..9231c34b5a5c 100644 --- a/drivers/spi/spi-s3c64xx.c +++ b/drivers/spi/spi-s3c64xx.c @@ -11,10 +11,6 @@ * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/init.h> diff --git a/drivers/spi/spi-sc18is602.c b/drivers/spi/spi-sc18is602.c index 237f2e7a7179..5a56acf8a43e 100644 --- a/drivers/spi/spi-sc18is602.c +++ b/drivers/spi/spi-sc18is602.c @@ -12,10 +12,6 @@ * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/kernel.h> diff --git a/drivers/spi/spi-sh-hspi.c b/drivers/spi/spi-sh-hspi.c index fc29233d0650..20e800e70442 100644 --- a/drivers/spi/spi-sh-hspi.c +++ b/drivers/spi/spi-sh-hspi.c @@ -16,11 +16,6 @@ * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - * */ #include <linux/clk.h> diff --git a/drivers/spi/spi-sh-msiof.c b/drivers/spi/spi-sh-msiof.c index 3ab7a21445fc..e57eec0b2f46 100644 --- a/drivers/spi/spi-sh-msiof.c +++ b/drivers/spi/spi-sh-msiof.c @@ -82,6 +82,8 @@ struct sh_msiof_spi_priv { #define MDR1_SYNCMD_LR 0x30000000 /* L/R mode */ #define MDR1_SYNCAC_SHIFT 25 /* Sync Polarity (1 = Active-low) */ #define MDR1_BITLSB_SHIFT 24 /* MSB/LSB First (1 = LSB first) */ +#define MDR1_DTDL_SHIFT 20 /* Data Pin Bit Delay for MSIOF_SYNC */ +#define MDR1_SYNCDL_SHIFT 16 /* Frame Sync Signal Timing Delay */ #define MDR1_FLD_MASK 0x0000000c /* Frame Sync Signal Interval (0-3) */ #define MDR1_FLD_SHIFT 2 #define MDR1_XXSTP 0x00000001 /* Transmission/Reception Stop on FIFO */ @@ -241,42 +243,80 @@ static irqreturn_t sh_msiof_spi_irq(int irq, void *data) static struct { unsigned short div; - unsigned short scr; -} const sh_msiof_spi_clk_table[] = { - { 1, SCR_BRPS( 1) | SCR_BRDV_DIV_1 }, - { 2, SCR_BRPS( 1) | SCR_BRDV_DIV_2 }, - { 4, SCR_BRPS( 1) | SCR_BRDV_DIV_4 }, - { 8, SCR_BRPS( 1) | SCR_BRDV_DIV_8 }, - { 16, SCR_BRPS( 1) | SCR_BRDV_DIV_16 }, - { 32, SCR_BRPS( 1) | SCR_BRDV_DIV_32 }, - { 64, SCR_BRPS(32) | SCR_BRDV_DIV_2 }, - { 128, SCR_BRPS(32) | SCR_BRDV_DIV_4 }, - { 256, SCR_BRPS(32) | SCR_BRDV_DIV_8 }, - { 512, SCR_BRPS(32) | SCR_BRDV_DIV_16 }, - { 1024, SCR_BRPS(32) | SCR_BRDV_DIV_32 }, + unsigned short brdv; +} const sh_msiof_spi_div_table[] = { + { 1, SCR_BRDV_DIV_1 }, + { 2, SCR_BRDV_DIV_2 }, + { 4, SCR_BRDV_DIV_4 }, + { 8, SCR_BRDV_DIV_8 }, + { 16, SCR_BRDV_DIV_16 }, + { 32, SCR_BRDV_DIV_32 }, }; static void sh_msiof_spi_set_clk_regs(struct sh_msiof_spi_priv *p, unsigned long parent_rate, u32 spi_hz) { unsigned long div = 1024; + u32 brps, scr; size_t k; if (!WARN_ON(!spi_hz || !parent_rate)) div = DIV_ROUND_UP(parent_rate, spi_hz); - /* TODO: make more fine grained */ - - for (k = 0; k < ARRAY_SIZE(sh_msiof_spi_clk_table); k++) { - if (sh_msiof_spi_clk_table[k].div >= div) + for (k = 0; k < ARRAY_SIZE(sh_msiof_spi_div_table); k++) { + brps = DIV_ROUND_UP(div, sh_msiof_spi_div_table[k].div); + if (brps <= 32) /* max of brdv is 32 */ break; } - k = min_t(int, k, ARRAY_SIZE(sh_msiof_spi_clk_table) - 1); + k = min_t(int, k, ARRAY_SIZE(sh_msiof_spi_div_table) - 1); - sh_msiof_write(p, TSCR, sh_msiof_spi_clk_table[k].scr); + scr = sh_msiof_spi_div_table[k].brdv | SCR_BRPS(brps); + sh_msiof_write(p, TSCR, scr); if (!(p->chipdata->master_flags & SPI_MASTER_MUST_TX)) - sh_msiof_write(p, RSCR, sh_msiof_spi_clk_table[k].scr); + sh_msiof_write(p, RSCR, scr); +} + +static u32 sh_msiof_get_delay_bit(u32 dtdl_or_syncdl) +{ + /* + * DTDL/SYNCDL bit : p->info->dtdl or p->info->syncdl + * b'000 : 0 + * b'001 : 100 + * b'010 : 200 + * b'011 (SYNCDL only) : 300 + * b'101 : 50 + * b'110 : 150 + */ + if (dtdl_or_syncdl % 100) + return dtdl_or_syncdl / 100 + 5; + else + return dtdl_or_syncdl / 100; +} + +static u32 sh_msiof_spi_get_dtdl_and_syncdl(struct sh_msiof_spi_priv *p) +{ + u32 val; + + if (!p->info) + return 0; + + /* check if DTDL and SYNCDL is allowed value */ + if (p->info->dtdl > 200 || p->info->syncdl > 300) { + dev_warn(&p->pdev->dev, "DTDL or SYNCDL is too large\n"); + return 0; + } + + /* check if the sum of DTDL and SYNCDL becomes an integer value */ + if ((p->info->dtdl + p->info->syncdl) % 100) { + dev_warn(&p->pdev->dev, "the sum of DTDL/SYNCDL is not good\n"); + return 0; + } + + val = sh_msiof_get_delay_bit(p->info->dtdl) << MDR1_DTDL_SHIFT; + val |= sh_msiof_get_delay_bit(p->info->syncdl) << MDR1_SYNCDL_SHIFT; + + return val; } static void sh_msiof_spi_set_pin_regs(struct sh_msiof_spi_priv *p, @@ -296,6 +336,7 @@ static void sh_msiof_spi_set_pin_regs(struct sh_msiof_spi_priv *p, tmp = MDR1_SYNCMD_SPI | 1 << MDR1_FLD_SHIFT | MDR1_XXSTP; tmp |= !cs_high << MDR1_SYNCAC_SHIFT; tmp |= lsb_first << MDR1_BITLSB_SHIFT; + tmp |= sh_msiof_spi_get_dtdl_and_syncdl(p); sh_msiof_write(p, TMDR1, tmp | MDR1_TRMD | TMDR1_PCON); if (p->chipdata->master_flags & SPI_MASTER_MUST_TX) { /* These bits are reserved if RX needs TX */ @@ -501,7 +542,7 @@ static int sh_msiof_spi_setup(struct spi_device *spi) gpio_set_value(spi->cs_gpio, !(spi->mode & SPI_CS_HIGH)); - pm_runtime_put_sync(&p->pdev->dev); + pm_runtime_put(&p->pdev->dev); return 0; } @@ -595,8 +636,7 @@ static int sh_msiof_spi_txrx_once(struct sh_msiof_spi_priv *p, } /* wait for tx fifo to be emptied / rx fifo to be filled */ - ret = wait_for_completion_timeout(&p->done, HZ); - if (!ret) { + if (!wait_for_completion_timeout(&p->done, HZ)) { dev_err(&p->pdev->dev, "PIO timeout\n"); ret = -ETIMEDOUT; goto stop_reset; @@ -706,8 +746,7 @@ static int sh_msiof_dma_once(struct sh_msiof_spi_priv *p, const void *tx, } /* wait for tx fifo to be emptied / rx fifo to be filled */ - ret = wait_for_completion_timeout(&p->done, HZ); - if (!ret) { + if (!wait_for_completion_timeout(&p->done, HZ)) { dev_err(&p->pdev->dev, "DMA timeout\n"); ret = -ETIMEDOUT; goto stop_reset; @@ -957,6 +996,8 @@ static struct sh_msiof_spi_info *sh_msiof_spi_parse_dt(struct device *dev) &info->tx_fifo_override); of_property_read_u32(np, "renesas,rx-fifo-size", &info->rx_fifo_override); + of_property_read_u32(np, "renesas,dtdl", &info->dtdl); + of_property_read_u32(np, "renesas,syncdl", &info->syncdl); info->num_chipselect = num_cs; diff --git a/drivers/spi/spi-sh.c b/drivers/spi/spi-sh.c index 1cfc906dd174..502501187c9e 100644 --- a/drivers/spi/spi-sh.c +++ b/drivers/spi/spi-sh.c @@ -14,11 +14,6 @@ * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - * */ #include <linux/module.h> diff --git a/drivers/spi/spi-sirf.c b/drivers/spi/spi-sirf.c index d075191476f0..f5715c9f68b0 100644 --- a/drivers/spi/spi-sirf.c +++ b/drivers/spi/spi-sirf.c @@ -818,7 +818,6 @@ static SIMPLE_DEV_PM_OPS(spi_sirfsoc_pm_ops, spi_sirfsoc_suspend, static const struct of_device_id spi_sirfsoc_of_match[] = { { .compatible = "sirf,prima2-spi", }, - { .compatible = "sirf,marco-spi", }, {} }; MODULE_DEVICE_TABLE(of, spi_sirfsoc_of_match); diff --git a/drivers/spi/spi-st-ssc4.c b/drivers/spi/spi-st-ssc4.c new file mode 100644 index 000000000000..2faeaa7b57a8 --- /dev/null +++ b/drivers/spi/spi-st-ssc4.c @@ -0,0 +1,504 @@ +/* + * Copyright (c) 2008-2014 STMicroelectronics Limited + * + * Author: Angus Clark <Angus.Clark@st.com> + * Patrice Chotard <patrice.chotard@st.com> + * Lee Jones <lee.jones@linaro.org> + * + * SPI master mode controller driver, used in STMicroelectronics devices. + * + * May be copied or modified under the terms of the GNU General Public + * License Version 2.0 only. See linux/COPYING for more information. + */ + +#include <linux/clk.h> +#include <linux/delay.h> +#include <linux/interrupt.h> +#include <linux/io.h> +#include <linux/module.h> +#include <linux/pinctrl/consumer.h> +#include <linux/platform_device.h> +#include <linux/of.h> +#include <linux/of_gpio.h> +#include <linux/of_irq.h> +#include <linux/pm_runtime.h> +#include <linux/spi/spi.h> +#include <linux/spi/spi_bitbang.h> + +/* SSC registers */ +#define SSC_BRG 0x000 +#define SSC_TBUF 0x004 +#define SSC_RBUF 0x008 +#define SSC_CTL 0x00C +#define SSC_IEN 0x010 +#define SSC_I2C 0x018 + +/* SSC Control */ +#define SSC_CTL_DATA_WIDTH_9 0x8 +#define SSC_CTL_DATA_WIDTH_MSK 0xf +#define SSC_CTL_BM 0xf +#define SSC_CTL_HB BIT(4) +#define SSC_CTL_PH BIT(5) +#define SSC_CTL_PO BIT(6) +#define SSC_CTL_SR BIT(7) +#define SSC_CTL_MS BIT(8) +#define SSC_CTL_EN BIT(9) +#define SSC_CTL_LPB BIT(10) +#define SSC_CTL_EN_TX_FIFO BIT(11) +#define SSC_CTL_EN_RX_FIFO BIT(12) +#define SSC_CTL_EN_CLST_RX BIT(13) + +/* SSC Interrupt Enable */ +#define SSC_IEN_TEEN BIT(2) + +#define FIFO_SIZE 8 + +struct spi_st { + /* SSC SPI Controller */ + void __iomem *base; + struct clk *clk; + struct device *dev; + + /* SSC SPI current transaction */ + const u8 *tx_ptr; + u8 *rx_ptr; + u16 bytes_per_word; + unsigned int words_remaining; + unsigned int baud; + struct completion done; +}; + +static int spi_st_clk_enable(struct spi_st *spi_st) +{ + /* + * Current platforms use one of the core clocks for SPI and I2C. + * If we attempt to disable the clock, the system will hang. + * + * TODO: Remove this when platform supports power domains. + */ + return 0; + + return clk_prepare_enable(spi_st->clk); +} + +static void spi_st_clk_disable(struct spi_st *spi_st) +{ + /* + * Current platforms use one of the core clocks for SPI and I2C. + * If we attempt to disable the clock, the system will hang. + * + * TODO: Remove this when platform supports power domains. + */ + return; + + clk_disable_unprepare(spi_st->clk); +} + +/* Load the TX FIFO */ +static void ssc_write_tx_fifo(struct spi_st *spi_st) +{ + unsigned int count, i; + uint32_t word = 0; + + if (spi_st->words_remaining > FIFO_SIZE) + count = FIFO_SIZE; + else + count = spi_st->words_remaining; + + for (i = 0; i < count; i++) { + if (spi_st->tx_ptr) { + if (spi_st->bytes_per_word == 1) { + word = *spi_st->tx_ptr++; + } else { + word = *spi_st->tx_ptr++; + word = *spi_st->tx_ptr++ | (word << 8); + } + } + writel_relaxed(word, spi_st->base + SSC_TBUF); + } +} + +/* Read the RX FIFO */ +static void ssc_read_rx_fifo(struct spi_st *spi_st) +{ + unsigned int count, i; + uint32_t word = 0; + + if (spi_st->words_remaining > FIFO_SIZE) + count = FIFO_SIZE; + else + count = spi_st->words_remaining; + + for (i = 0; i < count; i++) { + word = readl_relaxed(spi_st->base + SSC_RBUF); + + if (spi_st->rx_ptr) { + if (spi_st->bytes_per_word == 1) { + *spi_st->rx_ptr++ = (uint8_t)word; + } else { + *spi_st->rx_ptr++ = (word >> 8); + *spi_st->rx_ptr++ = word & 0xff; + } + } + } + spi_st->words_remaining -= count; +} + +static int spi_st_transfer_one(struct spi_master *master, + struct spi_device *spi, struct spi_transfer *t) +{ + struct spi_st *spi_st = spi_master_get_devdata(master); + uint32_t ctl = 0; + + /* Setup transfer */ + spi_st->tx_ptr = t->tx_buf; + spi_st->rx_ptr = t->rx_buf; + + if (spi->bits_per_word > 8) { + /* + * Anything greater than 8 bits-per-word requires 2 + * bytes-per-word in the RX/TX buffers + */ + spi_st->bytes_per_word = 2; + spi_st->words_remaining = t->len / 2; + + } else if (spi->bits_per_word == 8 && !(t->len & 0x1)) { + /* + * If transfer is even-length, and 8 bits-per-word, then + * implement as half-length 16 bits-per-word transfer + */ + spi_st->bytes_per_word = 2; + spi_st->words_remaining = t->len / 2; + + /* Set SSC_CTL to 16 bits-per-word */ + ctl = readl_relaxed(spi_st->base + SSC_CTL); + writel_relaxed((ctl | 0xf), spi_st->base + SSC_CTL); + + readl_relaxed(spi_st->base + SSC_RBUF); + + } else { + spi_st->bytes_per_word = 1; + spi_st->words_remaining = t->len; + } + + reinit_completion(&spi_st->done); + + /* Start transfer by writing to the TX FIFO */ + ssc_write_tx_fifo(spi_st); + writel_relaxed(SSC_IEN_TEEN, spi_st->base + SSC_IEN); + + /* Wait for transfer to complete */ + wait_for_completion(&spi_st->done); + + /* Restore SSC_CTL if necessary */ + if (ctl) + writel_relaxed(ctl, spi_st->base + SSC_CTL); + + spi_finalize_current_transfer(spi->master); + + return t->len; +} + +static void spi_st_cleanup(struct spi_device *spi) +{ + int cs = spi->cs_gpio; + + if (gpio_is_valid(cs)) + devm_gpio_free(&spi->dev, cs); +} + +/* the spi->mode bits understood by this driver: */ +#define MODEBITS (SPI_CPOL | SPI_CPHA | SPI_LSB_FIRST | SPI_LOOP | SPI_CS_HIGH) +static int spi_st_setup(struct spi_device *spi) +{ + struct spi_st *spi_st = spi_master_get_devdata(spi->master); + u32 spi_st_clk, sscbrg, var; + u32 hz = spi->max_speed_hz; + int cs = spi->cs_gpio; + int ret; + + if (!hz) { + dev_err(&spi->dev, "max_speed_hz unspecified\n"); + return -EINVAL; + } + + if (!gpio_is_valid(cs)) { + dev_err(&spi->dev, "%d is not a valid gpio\n", cs); + return -EINVAL; + } + + if (devm_gpio_request(&spi->dev, cs, dev_name(&spi->dev))) { + dev_err(&spi->dev, "could not request gpio:%d\n", cs); + return -EINVAL; + } + + ret = gpio_direction_output(cs, spi->mode & SPI_CS_HIGH); + if (ret) + return ret; + + spi_st_clk = clk_get_rate(spi_st->clk); + + /* Set SSC_BRF */ + sscbrg = spi_st_clk / (2 * hz); + if (sscbrg < 0x07 || sscbrg > BIT(16)) { + dev_err(&spi->dev, + "baudrate %d outside valid range %d\n", sscbrg, hz); + return -EINVAL; + } + + spi_st->baud = spi_st_clk / (2 * sscbrg); + if (sscbrg == BIT(16)) /* 16-bit counter wraps */ + sscbrg = 0x0; + + writel_relaxed(sscbrg, spi_st->base + SSC_BRG); + + dev_dbg(&spi->dev, + "setting baudrate:target= %u hz, actual= %u hz, sscbrg= %u\n", + hz, spi_st->baud, sscbrg); + + /* Set SSC_CTL and enable SSC */ + var = readl_relaxed(spi_st->base + SSC_CTL); + var |= SSC_CTL_MS; + + if (spi->mode & SPI_CPOL) + var |= SSC_CTL_PO; + else + var &= ~SSC_CTL_PO; + + if (spi->mode & SPI_CPHA) + var |= SSC_CTL_PH; + else + var &= ~SSC_CTL_PH; + + if ((spi->mode & SPI_LSB_FIRST) == 0) + var |= SSC_CTL_HB; + else + var &= ~SSC_CTL_HB; + + if (spi->mode & SPI_LOOP) + var |= SSC_CTL_LPB; + else + var &= ~SSC_CTL_LPB; + + var &= ~SSC_CTL_DATA_WIDTH_MSK; + var |= (spi->bits_per_word - 1); + + var |= SSC_CTL_EN_TX_FIFO | SSC_CTL_EN_RX_FIFO; + var |= SSC_CTL_EN; + + writel_relaxed(var, spi_st->base + SSC_CTL); + + /* Clear the status register */ + readl_relaxed(spi_st->base + SSC_RBUF); + + return 0; +} + +/* Interrupt fired when TX shift register becomes empty */ +static irqreturn_t spi_st_irq(int irq, void *dev_id) +{ + struct spi_st *spi_st = (struct spi_st *)dev_id; + + /* Read RX FIFO */ + ssc_read_rx_fifo(spi_st); + + /* Fill TX FIFO */ + if (spi_st->words_remaining) { + ssc_write_tx_fifo(spi_st); + } else { + /* TX/RX complete */ + writel_relaxed(0x0, spi_st->base + SSC_IEN); + /* + * read SSC_IEN to ensure that this bit is set + * before re-enabling interrupt + */ + readl(spi_st->base + SSC_IEN); + complete(&spi_st->done); + } + + return IRQ_HANDLED; +} + +static int spi_st_probe(struct platform_device *pdev) +{ + struct device_node *np = pdev->dev.of_node; + struct spi_master *master; + struct resource *res; + struct spi_st *spi_st; + int irq, ret = 0; + u32 var; + + master = spi_alloc_master(&pdev->dev, sizeof(*spi_st)); + if (!master) + return -ENOMEM; + + master->dev.of_node = np; + master->mode_bits = MODEBITS; + master->setup = spi_st_setup; + master->cleanup = spi_st_cleanup; + master->transfer_one = spi_st_transfer_one; + master->bits_per_word_mask = SPI_BPW_MASK(8) | SPI_BPW_MASK(16); + master->auto_runtime_pm = true; + master->bus_num = pdev->id; + spi_st = spi_master_get_devdata(master); + + spi_st->clk = devm_clk_get(&pdev->dev, "ssc"); + if (IS_ERR(spi_st->clk)) { + dev_err(&pdev->dev, "Unable to request clock\n"); + return PTR_ERR(spi_st->clk); + } + + ret = spi_st_clk_enable(spi_st); + if (ret) + return ret; + + init_completion(&spi_st->done); + + /* Get resources */ + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + spi_st->base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(spi_st->base)) { + ret = PTR_ERR(spi_st->base); + goto clk_disable; + } + + /* Disable I2C and Reset SSC */ + writel_relaxed(0x0, spi_st->base + SSC_I2C); + var = readw_relaxed(spi_st->base + SSC_CTL); + var |= SSC_CTL_SR; + writel_relaxed(var, spi_st->base + SSC_CTL); + + udelay(1); + var = readl_relaxed(spi_st->base + SSC_CTL); + var &= ~SSC_CTL_SR; + writel_relaxed(var, spi_st->base + SSC_CTL); + + /* Set SSC into slave mode before reconfiguring PIO pins */ + var = readl_relaxed(spi_st->base + SSC_CTL); + var &= ~SSC_CTL_MS; + writel_relaxed(var, spi_st->base + SSC_CTL); + + irq = irq_of_parse_and_map(np, 0); + if (!irq) { + dev_err(&pdev->dev, "IRQ missing or invalid\n"); + ret = -EINVAL; + goto clk_disable; + } + + ret = devm_request_irq(&pdev->dev, irq, spi_st_irq, 0, + pdev->name, spi_st); + if (ret) { + dev_err(&pdev->dev, "Failed to request irq %d\n", irq); + goto clk_disable; + } + + /* by default the device is on */ + pm_runtime_set_active(&pdev->dev); + pm_runtime_enable(&pdev->dev); + + platform_set_drvdata(pdev, master); + + ret = devm_spi_register_master(&pdev->dev, master); + if (ret) { + dev_err(&pdev->dev, "Failed to register master\n"); + goto clk_disable; + } + + return 0; + +clk_disable: + spi_st_clk_disable(spi_st); + + return ret; +} + +static int spi_st_remove(struct platform_device *pdev) +{ + struct spi_master *master = platform_get_drvdata(pdev); + struct spi_st *spi_st = spi_master_get_devdata(master); + + spi_st_clk_disable(spi_st); + + pinctrl_pm_select_sleep_state(&pdev->dev); + + return 0; +} + +#ifdef CONFIG_PM +static int spi_st_runtime_suspend(struct device *dev) +{ + struct spi_master *master = dev_get_drvdata(dev); + struct spi_st *spi_st = spi_master_get_devdata(master); + + writel_relaxed(0, spi_st->base + SSC_IEN); + pinctrl_pm_select_sleep_state(dev); + + spi_st_clk_disable(spi_st); + + return 0; +} + +static int spi_st_runtime_resume(struct device *dev) +{ + struct spi_master *master = dev_get_drvdata(dev); + struct spi_st *spi_st = spi_master_get_devdata(master); + int ret; + + ret = spi_st_clk_enable(spi_st); + pinctrl_pm_select_default_state(dev); + + return ret; +} +#endif + +#ifdef CONFIG_PM_SLEEP +static int spi_st_suspend(struct device *dev) +{ + struct spi_master *master = dev_get_drvdata(dev); + int ret; + + ret = spi_master_suspend(master); + if (ret) + return ret; + + return pm_runtime_force_suspend(dev); +} + +static int spi_st_resume(struct device *dev) +{ + struct spi_master *master = dev_get_drvdata(dev); + int ret; + + ret = spi_master_resume(master); + if (ret) + return ret; + + return pm_runtime_force_resume(dev); +} +#endif + +static const struct dev_pm_ops spi_st_pm = { + SET_SYSTEM_SLEEP_PM_OPS(spi_st_suspend, spi_st_resume) + SET_RUNTIME_PM_OPS(spi_st_runtime_suspend, spi_st_runtime_resume, NULL) +}; + +static struct of_device_id stm_spi_match[] = { + { .compatible = "st,comms-ssc4-spi", }, + {}, +}; +MODULE_DEVICE_TABLE(of, stm_spi_match); + +static struct platform_driver spi_st_driver = { + .driver = { + .name = "spi-st", + .pm = &spi_st_pm, + .of_match_table = of_match_ptr(stm_spi_match), + }, + .probe = spi_st_probe, + .remove = spi_st_remove, +}; +module_platform_driver(spi_st_driver); + +MODULE_AUTHOR("Patrice Chotard <patrice.chotard@st.com>"); +MODULE_DESCRIPTION("STM SSC SPI driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/spi/spi-ti-qspi.c b/drivers/spi/spi-ti-qspi.c index 6146c4cd6583..884a716e50cb 100644 --- a/drivers/spi/spi-ti-qspi.c +++ b/drivers/spi/spi-ti-qspi.c @@ -201,7 +201,7 @@ static void ti_qspi_restore_ctx(struct ti_qspi *qspi) static int qspi_write_msg(struct ti_qspi *qspi, struct spi_transfer *t) { - int wlen, count, ret; + int wlen, count; unsigned int cmd; const u8 *txbuf; @@ -230,9 +230,8 @@ static int qspi_write_msg(struct ti_qspi *qspi, struct spi_transfer *t) } ti_qspi_write(qspi, cmd, QSPI_SPI_CMD_REG); - ret = wait_for_completion_timeout(&qspi->transfer_complete, - QSPI_COMPLETION_TIMEOUT); - if (ret == 0) { + if (!wait_for_completion_timeout(&qspi->transfer_complete, + QSPI_COMPLETION_TIMEOUT)) { dev_err(qspi->dev, "write timed out\n"); return -ETIMEDOUT; } @@ -245,7 +244,7 @@ static int qspi_write_msg(struct ti_qspi *qspi, struct spi_transfer *t) static int qspi_read_msg(struct ti_qspi *qspi, struct spi_transfer *t) { - int wlen, count, ret; + int wlen, count; unsigned int cmd; u8 *rxbuf; @@ -268,9 +267,8 @@ static int qspi_read_msg(struct ti_qspi *qspi, struct spi_transfer *t) while (count) { dev_dbg(qspi->dev, "rx cmd %08x dc %08x\n", cmd, qspi->dc); ti_qspi_write(qspi, cmd, QSPI_SPI_CMD_REG); - ret = wait_for_completion_timeout(&qspi->transfer_complete, - QSPI_COMPLETION_TIMEOUT); - if (ret == 0) { + if (!wait_for_completion_timeout(&qspi->transfer_complete, + QSPI_COMPLETION_TIMEOUT)) { dev_err(qspi->dev, "read timed out\n"); return -ETIMEDOUT; } diff --git a/drivers/spi/spi-topcliff-pch.c b/drivers/spi/spi-topcliff-pch.c index be692ad50442..93dfcee0f987 100644 --- a/drivers/spi/spi-topcliff-pch.c +++ b/drivers/spi/spi-topcliff-pch.c @@ -11,10 +11,6 @@ * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA. */ #include <linux/delay.h> diff --git a/drivers/spi/spi-xilinx.c b/drivers/spi/spi-xilinx.c index 79bd84f43430..133f53a9c1d4 100644 --- a/drivers/spi/spi-xilinx.c +++ b/drivers/spi/spi-xilinx.c @@ -22,6 +22,8 @@ #include <linux/spi/xilinx_spi.h> #include <linux/io.h> +#define XILINX_SPI_MAX_CS 32 + #define XILINX_SPI_NAME "xilinx_spi" /* Register definitions as per "OPB Serial Peripheral Interface (SPI) (v1.00e) @@ -34,7 +36,8 @@ #define XSPI_CR_MASTER_MODE 0x04 #define XSPI_CR_CPOL 0x08 #define XSPI_CR_CPHA 0x10 -#define XSPI_CR_MODE_MASK (XSPI_CR_CPHA | XSPI_CR_CPOL) +#define XSPI_CR_MODE_MASK (XSPI_CR_CPHA | XSPI_CR_CPOL | \ + XSPI_CR_LSB_FIRST | XSPI_CR_LOOP) #define XSPI_CR_TXFIFO_RESET 0x20 #define XSPI_CR_RXFIFO_RESET 0x40 #define XSPI_CR_MANUAL_SSELECT 0x80 @@ -85,12 +88,11 @@ struct xilinx_spi { u8 *rx_ptr; /* pointer in the Tx buffer */ const u8 *tx_ptr; /* pointer in the Rx buffer */ - int remaining_bytes; /* the number of bytes left to transfer */ - u8 bits_per_word; + u8 bytes_per_word; + int buffer_size; /* buffer size in words */ + u32 cs_inactive; /* Level of the CS pins when inactive*/ unsigned int (*read_fn)(void __iomem *); void (*write_fn)(u32, void __iomem *); - void (*tx_fn)(struct xilinx_spi *); - void (*rx_fn)(struct xilinx_spi *); }; static void xspi_write32(u32 val, void __iomem *addr) @@ -113,49 +115,51 @@ static unsigned int xspi_read32_be(void __iomem *addr) return ioread32be(addr); } -static void xspi_tx8(struct xilinx_spi *xspi) +static void xilinx_spi_tx(struct xilinx_spi *xspi) { - xspi->write_fn(*xspi->tx_ptr, xspi->regs + XSPI_TXD_OFFSET); - xspi->tx_ptr++; -} - -static void xspi_tx16(struct xilinx_spi *xspi) -{ - xspi->write_fn(*(u16 *)(xspi->tx_ptr), xspi->regs + XSPI_TXD_OFFSET); - xspi->tx_ptr += 2; -} + u32 data = 0; -static void xspi_tx32(struct xilinx_spi *xspi) -{ - xspi->write_fn(*(u32 *)(xspi->tx_ptr), xspi->regs + XSPI_TXD_OFFSET); - xspi->tx_ptr += 4; -} - -static void xspi_rx8(struct xilinx_spi *xspi) -{ - u32 data = xspi->read_fn(xspi->regs + XSPI_RXD_OFFSET); - if (xspi->rx_ptr) { - *xspi->rx_ptr = data & 0xff; - xspi->rx_ptr++; + if (!xspi->tx_ptr) { + xspi->write_fn(0, xspi->regs + XSPI_TXD_OFFSET); + return; } -} -static void xspi_rx16(struct xilinx_spi *xspi) -{ - u32 data = xspi->read_fn(xspi->regs + XSPI_RXD_OFFSET); - if (xspi->rx_ptr) { - *(u16 *)(xspi->rx_ptr) = data & 0xffff; - xspi->rx_ptr += 2; + switch (xspi->bytes_per_word) { + case 1: + data = *(u8 *)(xspi->tx_ptr); + break; + case 2: + data = *(u16 *)(xspi->tx_ptr); + break; + case 4: + data = *(u32 *)(xspi->tx_ptr); + break; } + + xspi->write_fn(data, xspi->regs + XSPI_TXD_OFFSET); + xspi->tx_ptr += xspi->bytes_per_word; } -static void xspi_rx32(struct xilinx_spi *xspi) +static void xilinx_spi_rx(struct xilinx_spi *xspi) { u32 data = xspi->read_fn(xspi->regs + XSPI_RXD_OFFSET); - if (xspi->rx_ptr) { + + if (!xspi->rx_ptr) + return; + + switch (xspi->bytes_per_word) { + case 1: + *(u8 *)(xspi->rx_ptr) = data; + break; + case 2: + *(u16 *)(xspi->rx_ptr) = data; + break; + case 4: *(u32 *)(xspi->rx_ptr) = data; - xspi->rx_ptr += 4; + break; } + + xspi->rx_ptr += xspi->bytes_per_word; } static void xspi_init_hw(struct xilinx_spi *xspi) @@ -165,46 +169,56 @@ static void xspi_init_hw(struct xilinx_spi *xspi) /* Reset the SPI device */ xspi->write_fn(XIPIF_V123B_RESET_MASK, regs_base + XIPIF_V123B_RESETR_OFFSET); - /* Disable all the interrupts just in case */ - xspi->write_fn(0, regs_base + XIPIF_V123B_IIER_OFFSET); - /* Enable the global IPIF interrupt */ - xspi->write_fn(XIPIF_V123B_GINTR_ENABLE, - regs_base + XIPIF_V123B_DGIER_OFFSET); + /* Enable the transmit empty interrupt, which we use to determine + * progress on the transmission. + */ + xspi->write_fn(XSPI_INTR_TX_EMPTY, + regs_base + XIPIF_V123B_IIER_OFFSET); + /* Disable the global IPIF interrupt */ + xspi->write_fn(0, regs_base + XIPIF_V123B_DGIER_OFFSET); /* Deselect the slave on the SPI bus */ xspi->write_fn(0xffff, regs_base + XSPI_SSR_OFFSET); /* Disable the transmitter, enable Manual Slave Select Assertion, * put SPI controller into master mode, and enable it */ - xspi->write_fn(XSPI_CR_TRANS_INHIBIT | XSPI_CR_MANUAL_SSELECT | - XSPI_CR_MASTER_MODE | XSPI_CR_ENABLE | XSPI_CR_TXFIFO_RESET | - XSPI_CR_RXFIFO_RESET, regs_base + XSPI_CR_OFFSET); + xspi->write_fn(XSPI_CR_MANUAL_SSELECT | XSPI_CR_MASTER_MODE | + XSPI_CR_ENABLE | XSPI_CR_TXFIFO_RESET | XSPI_CR_RXFIFO_RESET, + regs_base + XSPI_CR_OFFSET); } static void xilinx_spi_chipselect(struct spi_device *spi, int is_on) { struct xilinx_spi *xspi = spi_master_get_devdata(spi->master); + u16 cr; + u32 cs; if (is_on == BITBANG_CS_INACTIVE) { /* Deselect the slave on the SPI bus */ - xspi->write_fn(0xffff, xspi->regs + XSPI_SSR_OFFSET); - } else if (is_on == BITBANG_CS_ACTIVE) { - /* Set the SPI clock phase and polarity */ - u16 cr = xspi->read_fn(xspi->regs + XSPI_CR_OFFSET) - & ~XSPI_CR_MODE_MASK; - if (spi->mode & SPI_CPHA) - cr |= XSPI_CR_CPHA; - if (spi->mode & SPI_CPOL) - cr |= XSPI_CR_CPOL; - xspi->write_fn(cr, xspi->regs + XSPI_CR_OFFSET); - - /* We do not check spi->max_speed_hz here as the SPI clock - * frequency is not software programmable (the IP block design - * parameter) - */ - - /* Activate the chip select */ - xspi->write_fn(~(0x0001 << spi->chip_select), - xspi->regs + XSPI_SSR_OFFSET); + xspi->write_fn(xspi->cs_inactive, xspi->regs + XSPI_SSR_OFFSET); + return; } + + /* Set the SPI clock phase and polarity */ + cr = xspi->read_fn(xspi->regs + XSPI_CR_OFFSET) & ~XSPI_CR_MODE_MASK; + if (spi->mode & SPI_CPHA) + cr |= XSPI_CR_CPHA; + if (spi->mode & SPI_CPOL) + cr |= XSPI_CR_CPOL; + if (spi->mode & SPI_LSB_FIRST) + cr |= XSPI_CR_LSB_FIRST; + if (spi->mode & SPI_LOOP) + cr |= XSPI_CR_LOOP; + xspi->write_fn(cr, xspi->regs + XSPI_CR_OFFSET); + + /* We do not check spi->max_speed_hz here as the SPI clock + * frequency is not software programmable (the IP block design + * parameter) + */ + + cs = xspi->cs_inactive; + cs ^= BIT(spi->chip_select); + + /* Activate the chip select */ + xspi->write_fn(cs, xspi->regs + XSPI_SSR_OFFSET); } /* spi_bitbang requires custom setup_transfer() to be defined if there is a @@ -213,85 +227,85 @@ static void xilinx_spi_chipselect(struct spi_device *spi, int is_on) static int xilinx_spi_setup_transfer(struct spi_device *spi, struct spi_transfer *t) { - return 0; -} + struct xilinx_spi *xspi = spi_master_get_devdata(spi->master); -static void xilinx_spi_fill_tx_fifo(struct xilinx_spi *xspi) -{ - u8 sr; + if (spi->mode & SPI_CS_HIGH) + xspi->cs_inactive &= ~BIT(spi->chip_select); + else + xspi->cs_inactive |= BIT(spi->chip_select); - /* Fill the Tx FIFO with as many bytes as possible */ - sr = xspi->read_fn(xspi->regs + XSPI_SR_OFFSET); - while ((sr & XSPI_SR_TX_FULL_MASK) == 0 && xspi->remaining_bytes > 0) { - if (xspi->tx_ptr) - xspi->tx_fn(xspi); - else - xspi->write_fn(0, xspi->regs + XSPI_TXD_OFFSET); - xspi->remaining_bytes -= xspi->bits_per_word / 8; - sr = xspi->read_fn(xspi->regs + XSPI_SR_OFFSET); - } + return 0; } static int xilinx_spi_txrx_bufs(struct spi_device *spi, struct spi_transfer *t) { struct xilinx_spi *xspi = spi_master_get_devdata(spi->master); - u32 ipif_ier; + int remaining_words; /* the number of words left to transfer */ + bool use_irq = false; + u16 cr = 0; /* We get here with transmitter inhibited */ xspi->tx_ptr = t->tx_buf; xspi->rx_ptr = t->rx_buf; - xspi->remaining_bytes = t->len; + remaining_words = t->len / xspi->bytes_per_word; reinit_completion(&xspi->done); + if (xspi->irq >= 0 && remaining_words > xspi->buffer_size) { + use_irq = true; + xspi->write_fn(XSPI_INTR_TX_EMPTY, + xspi->regs + XIPIF_V123B_IISR_OFFSET); + /* Enable the global IPIF interrupt */ + xspi->write_fn(XIPIF_V123B_GINTR_ENABLE, + xspi->regs + XIPIF_V123B_DGIER_OFFSET); + /* Inhibit irq to avoid spurious irqs on tx_empty*/ + cr = xspi->read_fn(xspi->regs + XSPI_CR_OFFSET); + xspi->write_fn(cr | XSPI_CR_TRANS_INHIBIT, + xspi->regs + XSPI_CR_OFFSET); + } - /* Enable the transmit empty interrupt, which we use to determine - * progress on the transmission. - */ - ipif_ier = xspi->read_fn(xspi->regs + XIPIF_V123B_IIER_OFFSET); - xspi->write_fn(ipif_ier | XSPI_INTR_TX_EMPTY, - xspi->regs + XIPIF_V123B_IIER_OFFSET); + while (remaining_words) { + int n_words, tx_words, rx_words; - for (;;) { - u16 cr; - u8 sr; + n_words = min(remaining_words, xspi->buffer_size); - xilinx_spi_fill_tx_fifo(xspi); + tx_words = n_words; + while (tx_words--) + xilinx_spi_tx(xspi); /* Start the transfer by not inhibiting the transmitter any * longer */ - cr = xspi->read_fn(xspi->regs + XSPI_CR_OFFSET) & - ~XSPI_CR_TRANS_INHIBIT; - xspi->write_fn(cr, xspi->regs + XSPI_CR_OFFSET); - wait_for_completion(&xspi->done); + if (use_irq) { + xspi->write_fn(cr, xspi->regs + XSPI_CR_OFFSET); + wait_for_completion(&xspi->done); + } else + while (!(xspi->read_fn(xspi->regs + XSPI_SR_OFFSET) & + XSPI_SR_TX_EMPTY_MASK)) + ; /* A transmit has just completed. Process received data and * check for more data to transmit. Always inhibit the * transmitter while the Isr refills the transmit register/FIFO, * or make sure it is stopped if we're done. */ - cr = xspi->read_fn(xspi->regs + XSPI_CR_OFFSET); - xspi->write_fn(cr | XSPI_CR_TRANS_INHIBIT, + if (use_irq) + xspi->write_fn(cr | XSPI_CR_TRANS_INHIBIT, xspi->regs + XSPI_CR_OFFSET); /* Read out all the data from the Rx FIFO */ - sr = xspi->read_fn(xspi->regs + XSPI_SR_OFFSET); - while ((sr & XSPI_SR_RX_EMPTY_MASK) == 0) { - xspi->rx_fn(xspi); - sr = xspi->read_fn(xspi->regs + XSPI_SR_OFFSET); - } - - /* See if there is more data to send */ - if (xspi->remaining_bytes <= 0) - break; + rx_words = n_words; + while (rx_words--) + xilinx_spi_rx(xspi); + + remaining_words -= n_words; } - /* Disable the transmit empty interrupt */ - xspi->write_fn(ipif_ier, xspi->regs + XIPIF_V123B_IIER_OFFSET); + if (use_irq) + xspi->write_fn(0, xspi->regs + XIPIF_V123B_DGIER_OFFSET); - return t->len - xspi->remaining_bytes; + return t->len; } @@ -316,6 +330,28 @@ static irqreturn_t xilinx_spi_irq(int irq, void *dev_id) return IRQ_HANDLED; } +static int xilinx_spi_find_buffer_size(struct xilinx_spi *xspi) +{ + u8 sr; + int n_words = 0; + + /* + * Before the buffer_size detection we reset the core + * to make sure we start with a clean state. + */ + xspi->write_fn(XIPIF_V123B_RESET_MASK, + xspi->regs + XIPIF_V123B_RESETR_OFFSET); + + /* Fill the Tx FIFO with as many words as possible */ + do { + xspi->write_fn(0, xspi->regs + XSPI_TXD_OFFSET); + sr = xspi->read_fn(xspi->regs + XSPI_SR_OFFSET); + n_words++; + } while (!(sr & XSPI_SR_TX_FULL_MASK)); + + return n_words; +} + static const struct of_device_id xilinx_spi_of_match[] = { { .compatible = "xlnx,xps-spi-2.00.a", }, { .compatible = "xlnx,xps-spi-2.00.b", }, @@ -348,14 +384,21 @@ static int xilinx_spi_probe(struct platform_device *pdev) return -EINVAL; } + if (num_cs > XILINX_SPI_MAX_CS) { + dev_err(&pdev->dev, "Invalid number of spi slaves\n"); + return -EINVAL; + } + master = spi_alloc_master(&pdev->dev, sizeof(struct xilinx_spi)); if (!master) return -ENODEV; /* the spi->mode bits understood by this driver: */ - master->mode_bits = SPI_CPOL | SPI_CPHA; + master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LSB_FIRST | SPI_LOOP | + SPI_CS_HIGH; xspi = spi_master_get_devdata(master); + xspi->cs_inactive = 0xffffffff; xspi->bitbang.master = master; xspi->bitbang.chipselect = xilinx_spi_chipselect; xspi->bitbang.setup_transfer = xilinx_spi_setup_transfer; @@ -392,35 +435,20 @@ static int xilinx_spi_probe(struct platform_device *pdev) } master->bits_per_word_mask = SPI_BPW_MASK(bits_per_word); - xspi->bits_per_word = bits_per_word; - if (xspi->bits_per_word == 8) { - xspi->tx_fn = xspi_tx8; - xspi->rx_fn = xspi_rx8; - } else if (xspi->bits_per_word == 16) { - xspi->tx_fn = xspi_tx16; - xspi->rx_fn = xspi_rx16; - } else if (xspi->bits_per_word == 32) { - xspi->tx_fn = xspi_tx32; - xspi->rx_fn = xspi_rx32; - } else { - ret = -EINVAL; - goto put_master; - } - - /* SPI controller initializations */ - xspi_init_hw(xspi); + xspi->bytes_per_word = bits_per_word / 8; + xspi->buffer_size = xilinx_spi_find_buffer_size(xspi); xspi->irq = platform_get_irq(pdev, 0); - if (xspi->irq < 0) { - ret = xspi->irq; - goto put_master; + if (xspi->irq >= 0) { + /* Register for SPI Interrupt */ + ret = devm_request_irq(&pdev->dev, xspi->irq, xilinx_spi_irq, 0, + dev_name(&pdev->dev), xspi); + if (ret) + goto put_master; } - /* Register for SPI Interrupt */ - ret = devm_request_irq(&pdev->dev, xspi->irq, xilinx_spi_irq, 0, - dev_name(&pdev->dev), xspi); - if (ret) - goto put_master; + /* SPI controller initializations */ + xspi_init_hw(xspi); ret = spi_bitbang_start(&xspi->bitbang); if (ret) { diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c index 66a70e9bc743..c64a3e59fce3 100644 --- a/drivers/spi/spi.c +++ b/drivers/spi/spi.c @@ -13,10 +13,6 @@ * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/kernel.h> @@ -788,7 +784,7 @@ static int spi_transfer_one_message(struct spi_master *master, struct spi_transfer *xfer; bool keep_cs = false; int ret = 0; - int ms = 1; + unsigned long ms = 1; spi_set_cs(msg->spi, true); @@ -875,31 +871,59 @@ void spi_finalize_current_transfer(struct spi_master *master) EXPORT_SYMBOL_GPL(spi_finalize_current_transfer); /** - * spi_pump_messages - kthread work function which processes spi message queue - * @work: pointer to kthread work struct contained in the master struct + * __spi_pump_messages - function which processes spi message queue + * @master: master to process queue for + * @in_kthread: true if we are in the context of the message pump thread * * This function checks if there is any spi message in the queue that * needs processing and if so call out to the driver to initialize hardware * and transfer each message. * + * Note that it is called both from the kthread itself and also from + * inside spi_sync(); the queue extraction handling at the top of the + * function should deal with this safely. */ -static void spi_pump_messages(struct kthread_work *work) +static void __spi_pump_messages(struct spi_master *master, bool in_kthread) { - struct spi_master *master = - container_of(work, struct spi_master, pump_messages); unsigned long flags; bool was_busy = false; int ret; - /* Lock queue and check for queue work */ + /* Lock queue */ spin_lock_irqsave(&master->queue_lock, flags); + + /* Make sure we are not already running a message */ + if (master->cur_msg) { + spin_unlock_irqrestore(&master->queue_lock, flags); + return; + } + + /* If another context is idling the device then defer */ + if (master->idling) { + queue_kthread_work(&master->kworker, &master->pump_messages); + spin_unlock_irqrestore(&master->queue_lock, flags); + return; + } + + /* Check if the queue is idle */ if (list_empty(&master->queue) || !master->running) { if (!master->busy) { spin_unlock_irqrestore(&master->queue_lock, flags); return; } + + /* Only do teardown in the thread */ + if (!in_kthread) { + queue_kthread_work(&master->kworker, + &master->pump_messages); + spin_unlock_irqrestore(&master->queue_lock, flags); + return; + } + master->busy = false; + master->idling = true; spin_unlock_irqrestore(&master->queue_lock, flags); + kfree(master->dummy_rx); master->dummy_rx = NULL; kfree(master->dummy_tx); @@ -913,14 +937,13 @@ static void spi_pump_messages(struct kthread_work *work) pm_runtime_put_autosuspend(master->dev.parent); } trace_spi_master_idle(master); - return; - } - /* Make sure we are not already running a message */ - if (master->cur_msg) { + spin_lock_irqsave(&master->queue_lock, flags); + master->idling = false; spin_unlock_irqrestore(&master->queue_lock, flags); return; } + /* Extract head of queue */ master->cur_msg = list_first_entry(&master->queue, struct spi_message, queue); @@ -985,13 +1008,22 @@ static void spi_pump_messages(struct kthread_work *work) } } +/** + * spi_pump_messages - kthread work function which processes spi message queue + * @work: pointer to kthread work struct contained in the master struct + */ +static void spi_pump_messages(struct kthread_work *work) +{ + struct spi_master *master = + container_of(work, struct spi_master, pump_messages); + + __spi_pump_messages(master, true); +} + static int spi_init_queue(struct spi_master *master) { struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 }; - INIT_LIST_HEAD(&master->queue); - spin_lock_init(&master->queue_lock); - master->running = false; master->busy = false; @@ -1161,12 +1193,9 @@ static int spi_destroy_queue(struct spi_master *master) return 0; } -/** - * spi_queued_transfer - transfer function for queued transfers - * @spi: spi device which is requesting transfer - * @msg: spi message which is to handled is queued to driver queue - */ -static int spi_queued_transfer(struct spi_device *spi, struct spi_message *msg) +static int __spi_queued_transfer(struct spi_device *spi, + struct spi_message *msg, + bool need_pump) { struct spi_master *master = spi->master; unsigned long flags; @@ -1181,13 +1210,23 @@ static int spi_queued_transfer(struct spi_device *spi, struct spi_message *msg) msg->status = -EINPROGRESS; list_add_tail(&msg->queue, &master->queue); - if (!master->busy) + if (!master->busy && need_pump) queue_kthread_work(&master->kworker, &master->pump_messages); spin_unlock_irqrestore(&master->queue_lock, flags); return 0; } +/** + * spi_queued_transfer - transfer function for queued transfers + * @spi: spi device which is requesting transfer + * @msg: spi message which is to handled is queued to driver queue + */ +static int spi_queued_transfer(struct spi_device *spi, struct spi_message *msg) +{ + return __spi_queued_transfer(spi, msg, true); +} + static int spi_master_initialize_queue(struct spi_master *master) { int ret; @@ -1609,6 +1648,8 @@ int spi_register_master(struct spi_master *master) dynamic = 1; } + INIT_LIST_HEAD(&master->queue); + spin_lock_init(&master->queue_lock); spin_lock_init(&master->bus_lock_spinlock); mutex_init(&master->bus_lock_mutex); master->bus_lock_flag = 0; @@ -2114,19 +2155,46 @@ static int __spi_sync(struct spi_device *spi, struct spi_message *message, DECLARE_COMPLETION_ONSTACK(done); int status; struct spi_master *master = spi->master; + unsigned long flags; + + status = __spi_validate(spi, message); + if (status != 0) + return status; message->complete = spi_complete; message->context = &done; + message->spi = spi; if (!bus_locked) mutex_lock(&master->bus_lock_mutex); - status = spi_async_locked(spi, message); + /* If we're not using the legacy transfer method then we will + * try to transfer in the calling context so special case. + * This code would be less tricky if we could remove the + * support for driver implemented message queues. + */ + if (master->transfer == spi_queued_transfer) { + spin_lock_irqsave(&master->bus_lock_spinlock, flags); + + trace_spi_message_submit(message); + + status = __spi_queued_transfer(spi, message, false); + + spin_unlock_irqrestore(&master->bus_lock_spinlock, flags); + } else { + status = spi_async_locked(spi, message); + } if (!bus_locked) mutex_unlock(&master->bus_lock_mutex); if (status == 0) { + /* Push out the messages in the calling context if we + * can. + */ + if (master->transfer == spi_queued_transfer) + __spi_pump_messages(master, false); + wait_for_completion(&done); status = message->status; } diff --git a/drivers/spi/spidev.c b/drivers/spi/spidev.c index 6941e04afb8c..4eb7a980e670 100644 --- a/drivers/spi/spidev.c +++ b/drivers/spi/spidev.c @@ -14,10 +14,6 @@ * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/init.h> @@ -317,6 +313,37 @@ done: return status; } +static struct spi_ioc_transfer * +spidev_get_ioc_message(unsigned int cmd, struct spi_ioc_transfer __user *u_ioc, + unsigned *n_ioc) +{ + struct spi_ioc_transfer *ioc; + u32 tmp; + + /* Check type, command number and direction */ + if (_IOC_TYPE(cmd) != SPI_IOC_MAGIC + || _IOC_NR(cmd) != _IOC_NR(SPI_IOC_MESSAGE(0)) + || _IOC_DIR(cmd) != _IOC_WRITE) + return ERR_PTR(-ENOTTY); + + tmp = _IOC_SIZE(cmd); + if ((tmp % sizeof(struct spi_ioc_transfer)) != 0) + return ERR_PTR(-EINVAL); + *n_ioc = tmp / sizeof(struct spi_ioc_transfer); + if (*n_ioc == 0) + return NULL; + + /* copy into scratch area */ + ioc = kmalloc(tmp, GFP_KERNEL); + if (!ioc) + return ERR_PTR(-ENOMEM); + if (__copy_from_user(ioc, u_ioc, tmp)) { + kfree(ioc); + return ERR_PTR(-EFAULT); + } + return ioc; +} + static long spidev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) { @@ -456,32 +483,15 @@ spidev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) default: /* segmented and/or full-duplex I/O request */ - if (_IOC_NR(cmd) != _IOC_NR(SPI_IOC_MESSAGE(0)) - || _IOC_DIR(cmd) != _IOC_WRITE) { - retval = -ENOTTY; - break; - } - - tmp = _IOC_SIZE(cmd); - if ((tmp % sizeof(struct spi_ioc_transfer)) != 0) { - retval = -EINVAL; - break; - } - n_ioc = tmp / sizeof(struct spi_ioc_transfer); - if (n_ioc == 0) - break; - - /* copy into scratch area */ - ioc = kmalloc(tmp, GFP_KERNEL); - if (!ioc) { - retval = -ENOMEM; - break; - } - if (__copy_from_user(ioc, (void __user *)arg, tmp)) { - kfree(ioc); - retval = -EFAULT; + /* Check message and copy into scratch area */ + ioc = spidev_get_ioc_message(cmd, + (struct spi_ioc_transfer __user *)arg, &n_ioc); + if (IS_ERR(ioc)) { + retval = PTR_ERR(ioc); break; } + if (!ioc) + break; /* n_ioc is also 0 */ /* translate to spi_message, execute */ retval = spidev_message(spidev, ioc, n_ioc); @@ -496,8 +506,67 @@ spidev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) #ifdef CONFIG_COMPAT static long +spidev_compat_ioc_message(struct file *filp, unsigned int cmd, + unsigned long arg) +{ + struct spi_ioc_transfer __user *u_ioc; + int retval = 0; + struct spidev_data *spidev; + struct spi_device *spi; + unsigned n_ioc, n; + struct spi_ioc_transfer *ioc; + + u_ioc = (struct spi_ioc_transfer __user *) compat_ptr(arg); + if (!access_ok(VERIFY_READ, u_ioc, _IOC_SIZE(cmd))) + return -EFAULT; + + /* guard against device removal before, or while, + * we issue this ioctl. + */ + spidev = filp->private_data; + spin_lock_irq(&spidev->spi_lock); + spi = spi_dev_get(spidev->spi); + spin_unlock_irq(&spidev->spi_lock); + + if (spi == NULL) + return -ESHUTDOWN; + + /* SPI_IOC_MESSAGE needs the buffer locked "normally" */ + mutex_lock(&spidev->buf_lock); + + /* Check message and copy into scratch area */ + ioc = spidev_get_ioc_message(cmd, u_ioc, &n_ioc); + if (IS_ERR(ioc)) { + retval = PTR_ERR(ioc); + goto done; + } + if (!ioc) + goto done; /* n_ioc is also 0 */ + + /* Convert buffer pointers */ + for (n = 0; n < n_ioc; n++) { + ioc[n].rx_buf = (uintptr_t) compat_ptr(ioc[n].rx_buf); + ioc[n].tx_buf = (uintptr_t) compat_ptr(ioc[n].tx_buf); + } + + /* translate to spi_message, execute */ + retval = spidev_message(spidev, ioc, n_ioc); + kfree(ioc); + +done: + mutex_unlock(&spidev->buf_lock); + spi_dev_put(spi); + return retval; +} + +static long spidev_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) { + if (_IOC_TYPE(cmd) == SPI_IOC_MAGIC + && _IOC_NR(cmd) == _IOC_NR(SPI_IOC_MESSAGE(0)) + && _IOC_DIR(cmd) == _IOC_WRITE) + return spidev_compat_ioc_message(filp, cmd, arg); + return spidev_ioctl(filp, cmd, (unsigned long)compat_ptr(arg)); } #else diff --git a/drivers/staging/lustre/lustre/llite/vvp_io.c b/drivers/staging/lustre/lustre/llite/vvp_io.c index 930f6010203e..65d610abe06e 100644 --- a/drivers/staging/lustre/lustre/llite/vvp_io.c +++ b/drivers/staging/lustre/lustre/llite/vvp_io.c @@ -632,7 +632,7 @@ static int vvp_io_kernel_fault(struct vvp_fault_io *cfio) return 0; } - if (cfio->fault.ft_flags & VM_FAULT_SIGBUS) { + if (cfio->fault.ft_flags & (VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV)) { CDEBUG(D_PAGE, "got addr %p - SIGBUS\n", vmf->virtual_address); return -EFAULT; } diff --git a/drivers/staging/nvec/nvec.c b/drivers/staging/nvec/nvec.c index 093535c6217b..120b70d72d79 100644 --- a/drivers/staging/nvec/nvec.c +++ b/drivers/staging/nvec/nvec.c @@ -85,23 +85,20 @@ static struct nvec_chip *nvec_power_handle; static const struct mfd_cell nvec_devices[] = { { .name = "nvec-kbd", - .id = 1, }, { .name = "nvec-mouse", - .id = 1, }, { .name = "nvec-power", - .id = 1, + .id = 0, }, { .name = "nvec-power", - .id = 2, + .id = 1, }, { .name = "nvec-paz00", - .id = 1, }, }; @@ -891,7 +888,7 @@ static int tegra_nvec_probe(struct platform_device *pdev) nvec_msg_free(nvec, msg); } - ret = mfd_add_devices(nvec->dev, -1, nvec_devices, + ret = mfd_add_devices(nvec->dev, 0, nvec_devices, ARRAY_SIZE(nvec_devices), NULL, 0, NULL); if (ret) dev_err(nvec->dev, "error adding subdevices\n"); diff --git a/drivers/usb/core/otg_whitelist.h b/drivers/usb/core/otg_whitelist.h index de0c9c9d7091..a6315abe7b7c 100644 --- a/drivers/usb/core/otg_whitelist.h +++ b/drivers/usb/core/otg_whitelist.h @@ -55,6 +55,11 @@ static int is_targeted(struct usb_device *dev) le16_to_cpu(dev->descriptor.idProduct) == 0xbadd)) return 0; + /* OTG PET device is always targeted (see OTG 2.0 ECN 6.4.2) */ + if ((le16_to_cpu(dev->descriptor.idVendor) == 0x1a0a && + le16_to_cpu(dev->descriptor.idProduct) == 0x0200)) + return 1; + /* NOTE: can't use usb_match_id() since interface caches * aren't set up yet. this is cut/paste from that code. */ diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c index 0ffb4ed0a945..41e510ae8c83 100644 --- a/drivers/usb/core/quirks.c +++ b/drivers/usb/core/quirks.c @@ -179,6 +179,10 @@ static const struct usb_device_id usb_quirk_list[] = { { USB_DEVICE(0x0b05, 0x17e0), .driver_info = USB_QUIRK_IGNORE_REMOTE_WAKEUP }, + /* Protocol and OTG Electrical Test Device */ + { USB_DEVICE(0x1a0a, 0x0200), .driver_info = + USB_QUIRK_LINEAR_UFRAME_INTR_BINTERVAL }, + { } /* terminating entry must be last */ }; diff --git a/drivers/usb/dwc2/core_intr.c b/drivers/usb/dwc2/core_intr.c index ad43c5bc1ef1..02e3e2d4ea56 100644 --- a/drivers/usb/dwc2/core_intr.c +++ b/drivers/usb/dwc2/core_intr.c @@ -476,13 +476,13 @@ irqreturn_t dwc2_handle_common_intr(int irq, void *dev) u32 gintsts; irqreturn_t retval = IRQ_NONE; + spin_lock(&hsotg->lock); + if (!dwc2_is_controller_alive(hsotg)) { dev_warn(hsotg->dev, "Controller is dead\n"); goto out; } - spin_lock(&hsotg->lock); - gintsts = dwc2_read_common_intr(hsotg); if (gintsts & ~GINTSTS_PRTINT) retval = IRQ_HANDLED; @@ -515,8 +515,8 @@ irqreturn_t dwc2_handle_common_intr(int irq, void *dev) } } - spin_unlock(&hsotg->lock); out: + spin_unlock(&hsotg->lock); return retval; } EXPORT_SYMBOL_GPL(dwc2_handle_common_intr); diff --git a/drivers/usb/phy/phy.c b/drivers/usb/phy/phy.c index ccfdfb24b240..2f9735b35338 100644 --- a/drivers/usb/phy/phy.c +++ b/drivers/usb/phy/phy.c @@ -34,7 +34,7 @@ static struct usb_phy *__usb_find_phy(struct list_head *list, return phy; } - return ERR_PTR(-EPROBE_DEFER); + return ERR_PTR(-ENODEV); } static struct usb_phy *__usb_find_phy_dev(struct device *dev, diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h index 11c7a9676441..d684b4b8108f 100644 --- a/drivers/usb/storage/unusual_devs.h +++ b/drivers/usb/storage/unusual_devs.h @@ -507,7 +507,7 @@ UNUSUAL_DEV( 0x04e6, 0x000c, 0x0100, 0x0100, UNUSUAL_DEV( 0x04e6, 0x000f, 0x0000, 0x9999, "SCM Microsystems", "eUSB SCSI Adapter (Bus Powered)", - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_euscsi_init, + USB_SC_SCSI, USB_PR_BULK, usb_stor_euscsi_init, US_FL_SCM_MULT_TARG ), UNUSUAL_DEV( 0x04e6, 0x0101, 0x0200, 0x0200, @@ -1995,6 +1995,13 @@ UNUSUAL_DEV( 0x152d, 0x2329, 0x0100, 0x0100, USB_SC_DEVICE, USB_PR_DEVICE, NULL, US_FL_IGNORE_RESIDUE | US_FL_SANE_SENSE ), +/* Reported by Dmitry Nezhevenko <dion@dion.org.ua> */ +UNUSUAL_DEV( 0x152d, 0x2566, 0x0114, 0x0114, + "JMicron", + "USB to ATA/ATAPI Bridge", + USB_SC_DEVICE, USB_PR_DEVICE, NULL, + US_FL_BROKEN_FUA ), + /* Entrega Technologies U1-SC25 (later Xircom PortGear PGSCSI) * and Mac USB Dock USB-SCSI */ UNUSUAL_DEV( 0x1645, 0x0007, 0x0100, 0x0133, diff --git a/drivers/usb/storage/unusual_uas.h b/drivers/usb/storage/unusual_uas.h index 6df4357d9ee3..dbc00e56c7f5 100644 --- a/drivers/usb/storage/unusual_uas.h +++ b/drivers/usb/storage/unusual_uas.h @@ -140,3 +140,10 @@ UNUSUAL_DEV(0x4971, 0x1012, 0x0000, 0x9999, "External HDD", USB_SC_DEVICE, USB_PR_DEVICE, NULL, US_FL_IGNORE_UAS), + +/* Reported-by: Richard Henderson <rth@redhat.com> */ +UNUSUAL_DEV(0x4971, 0x8017, 0x0000, 0x9999, + "SimpleTech", + "External HDD", + USB_SC_DEVICE, USB_PR_DEVICE, NULL, + US_FL_NO_REPORT_OPCODES), diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c index d415d69dc237..9484d5652ca5 100644 --- a/drivers/vhost/net.c +++ b/drivers/vhost/net.c @@ -650,8 +650,10 @@ static void handle_rx(struct vhost_net *net) break; } /* TODO: Should check and handle checksum. */ + + hdr.num_buffers = cpu_to_vhost16(vq, headcount); if (likely(mergeable) && - memcpy_toiovecend(nvq->hdr, (unsigned char *)&headcount, + memcpy_toiovecend(nvq->hdr, (void *)&hdr.num_buffers, offsetof(typeof(hdr), num_buffers), sizeof hdr.num_buffers)) { vq_err(vq, "Failed num_buffers write"); @@ -1140,6 +1140,13 @@ static long aio_read_events_ring(struct kioctx *ctx, long ret = 0; int copy_ret; + /* + * The mutex can block and wake us up and that will cause + * wait_event_interruptible_hrtimeout() to schedule without sleeping + * and repeat. This should be rare enough that it doesn't cause + * peformance issues. See the comment in read_events() for more detail. + */ + sched_annotate_sleep(); mutex_lock(&ctx->ring_lock); /* Access to ->ring_pages here is protected by ctx->ring_lock. */ diff --git a/fs/btrfs/Kconfig b/fs/btrfs/Kconfig index a66768ebc8d1..80e9c18ea64f 100644 --- a/fs/btrfs/Kconfig +++ b/fs/btrfs/Kconfig @@ -8,6 +8,7 @@ config BTRFS_FS select LZO_DECOMPRESS select RAID6_PQ select XOR_BLOCKS + select SRCU help Btrfs is a general purpose copy-on-write filesystem with extents, diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c index 2f0fbc374e87..e427cb7ee12c 100644 --- a/fs/btrfs/scrub.c +++ b/fs/btrfs/scrub.c @@ -3065,6 +3065,8 @@ static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx, path->search_commit_root = 1; path->skip_locking = 1; + ppath->search_commit_root = 1; + ppath->skip_locking = 1; /* * trigger the readahead for extent tree csum tree and wait for * completion. During readahead, the scrub is officially paused diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c index 9a02da16f2be..1a9585d4380a 100644 --- a/fs/btrfs/tree-log.c +++ b/fs/btrfs/tree-log.c @@ -2591,6 +2591,7 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans, } if (log_root_tree->log_transid_committed >= root_log_ctx.log_transid) { + blk_finish_plug(&plug); mutex_unlock(&log_root_tree->log_mutex); ret = root_log_ctx.log_ret; goto out; diff --git a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c index 9c56ef776407..7febcf2475c5 100644 --- a/fs/cifs/cifs_debug.c +++ b/fs/cifs/cifs_debug.c @@ -606,9 +606,11 @@ cifs_security_flags_handle_must_flags(unsigned int *flags) *flags = CIFSSEC_MUST_NTLMV2; else if ((*flags & CIFSSEC_MUST_NTLM) == CIFSSEC_MUST_NTLM) *flags = CIFSSEC_MUST_NTLM; - else if ((*flags & CIFSSEC_MUST_LANMAN) == CIFSSEC_MUST_LANMAN) + else if (CIFSSEC_MUST_LANMAN && + (*flags & CIFSSEC_MUST_LANMAN) == CIFSSEC_MUST_LANMAN) *flags = CIFSSEC_MUST_LANMAN; - else if ((*flags & CIFSSEC_MUST_PLNTXT) == CIFSSEC_MUST_PLNTXT) + else if (CIFSSEC_MUST_PLNTXT && + (*flags & CIFSSEC_MUST_PLNTXT) == CIFSSEC_MUST_PLNTXT) *flags = CIFSSEC_MUST_PLNTXT; *flags |= signflags; diff --git a/fs/cifs/file.c b/fs/cifs/file.c index 96b7e9b7706d..74f12877493a 100644 --- a/fs/cifs/file.c +++ b/fs/cifs/file.c @@ -366,6 +366,7 @@ void cifsFileInfo_put(struct cifsFileInfo *cifs_file) struct cifsLockInfo *li, *tmp; struct cifs_fid fid; struct cifs_pending_open open; + bool oplock_break_cancelled; spin_lock(&cifs_file_list_lock); if (--cifs_file->count > 0) { @@ -397,7 +398,7 @@ void cifsFileInfo_put(struct cifsFileInfo *cifs_file) } spin_unlock(&cifs_file_list_lock); - cancel_work_sync(&cifs_file->oplock_break); + oplock_break_cancelled = cancel_work_sync(&cifs_file->oplock_break); if (!tcon->need_reconnect && !cifs_file->invalidHandle) { struct TCP_Server_Info *server = tcon->ses->server; @@ -409,6 +410,9 @@ void cifsFileInfo_put(struct cifsFileInfo *cifs_file) _free_xid(xid); } + if (oplock_break_cancelled) + cifs_done_oplock_break(cifsi); + cifs_del_pending_open(&open); /* diff --git a/fs/cifs/smbencrypt.c b/fs/cifs/smbencrypt.c index 6c1566366a66..a4232ec4f2ba 100644 --- a/fs/cifs/smbencrypt.c +++ b/fs/cifs/smbencrypt.c @@ -221,7 +221,7 @@ E_md4hash(const unsigned char *passwd, unsigned char *p16, } rc = mdfour(p16, (unsigned char *) wpwd, len * sizeof(__le16)); - memset(wpwd, 0, 129 * sizeof(__le16)); + memzero_explicit(wpwd, sizeof(wpwd)); return rc; } diff --git a/fs/gfs2/quota.c b/fs/gfs2/quota.c index c8b148bbdc8b..3e193cb36996 100644 --- a/fs/gfs2/quota.c +++ b/fs/gfs2/quota.c @@ -667,7 +667,7 @@ static void do_qc(struct gfs2_quota_data *qd, s64 change) static int gfs2_adjust_quota(struct gfs2_inode *ip, loff_t loc, s64 change, struct gfs2_quota_data *qd, - struct fs_disk_quota *fdq) + struct qc_dqblk *fdq) { struct inode *inode = &ip->i_inode; struct gfs2_sbd *sdp = GFS2_SB(inode); @@ -697,16 +697,16 @@ static int gfs2_adjust_quota(struct gfs2_inode *ip, loff_t loc, be64_add_cpu(&q.qu_value, change); qd->qd_qb.qb_value = q.qu_value; if (fdq) { - if (fdq->d_fieldmask & FS_DQ_BSOFT) { - q.qu_warn = cpu_to_be64(fdq->d_blk_softlimit >> sdp->sd_fsb2bb_shift); + if (fdq->d_fieldmask & QC_SPC_SOFT) { + q.qu_warn = cpu_to_be64(fdq->d_spc_softlimit >> sdp->sd_sb.sb_bsize_shift); qd->qd_qb.qb_warn = q.qu_warn; } - if (fdq->d_fieldmask & FS_DQ_BHARD) { - q.qu_limit = cpu_to_be64(fdq->d_blk_hardlimit >> sdp->sd_fsb2bb_shift); + if (fdq->d_fieldmask & QC_SPC_HARD) { + q.qu_limit = cpu_to_be64(fdq->d_spc_hardlimit >> sdp->sd_sb.sb_bsize_shift); qd->qd_qb.qb_limit = q.qu_limit; } - if (fdq->d_fieldmask & FS_DQ_BCOUNT) { - q.qu_value = cpu_to_be64(fdq->d_bcount >> sdp->sd_fsb2bb_shift); + if (fdq->d_fieldmask & QC_SPACE) { + q.qu_value = cpu_to_be64(fdq->d_space >> sdp->sd_sb.sb_bsize_shift); qd->qd_qb.qb_value = q.qu_value; } } @@ -1497,7 +1497,7 @@ static int gfs2_quota_get_xstate(struct super_block *sb, } static int gfs2_get_dqblk(struct super_block *sb, struct kqid qid, - struct fs_disk_quota *fdq) + struct qc_dqblk *fdq) { struct gfs2_sbd *sdp = sb->s_fs_info; struct gfs2_quota_lvb *qlvb; @@ -1505,7 +1505,7 @@ static int gfs2_get_dqblk(struct super_block *sb, struct kqid qid, struct gfs2_holder q_gh; int error; - memset(fdq, 0, sizeof(struct fs_disk_quota)); + memset(fdq, 0, sizeof(*fdq)); if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF) return -ESRCH; /* Crazy XFS error code */ @@ -1522,12 +1522,9 @@ static int gfs2_get_dqblk(struct super_block *sb, struct kqid qid, goto out; qlvb = (struct gfs2_quota_lvb *)qd->qd_gl->gl_lksb.sb_lvbptr; - fdq->d_version = FS_DQUOT_VERSION; - fdq->d_flags = (qid.type == USRQUOTA) ? FS_USER_QUOTA : FS_GROUP_QUOTA; - fdq->d_id = from_kqid_munged(current_user_ns(), qid); - fdq->d_blk_hardlimit = be64_to_cpu(qlvb->qb_limit) << sdp->sd_fsb2bb_shift; - fdq->d_blk_softlimit = be64_to_cpu(qlvb->qb_warn) << sdp->sd_fsb2bb_shift; - fdq->d_bcount = be64_to_cpu(qlvb->qb_value) << sdp->sd_fsb2bb_shift; + fdq->d_spc_hardlimit = be64_to_cpu(qlvb->qb_limit) << sdp->sd_sb.sb_bsize_shift; + fdq->d_spc_softlimit = be64_to_cpu(qlvb->qb_warn) << sdp->sd_sb.sb_bsize_shift; + fdq->d_space = be64_to_cpu(qlvb->qb_value) << sdp->sd_sb.sb_bsize_shift; gfs2_glock_dq_uninit(&q_gh); out: @@ -1536,10 +1533,10 @@ out: } /* GFS2 only supports a subset of the XFS fields */ -#define GFS2_FIELDMASK (FS_DQ_BSOFT|FS_DQ_BHARD|FS_DQ_BCOUNT) +#define GFS2_FIELDMASK (QC_SPC_SOFT|QC_SPC_HARD|QC_SPACE) static int gfs2_set_dqblk(struct super_block *sb, struct kqid qid, - struct fs_disk_quota *fdq) + struct qc_dqblk *fdq) { struct gfs2_sbd *sdp = sb->s_fs_info; struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode); @@ -1583,17 +1580,17 @@ static int gfs2_set_dqblk(struct super_block *sb, struct kqid qid, goto out_i; /* If nothing has changed, this is a no-op */ - if ((fdq->d_fieldmask & FS_DQ_BSOFT) && - ((fdq->d_blk_softlimit >> sdp->sd_fsb2bb_shift) == be64_to_cpu(qd->qd_qb.qb_warn))) - fdq->d_fieldmask ^= FS_DQ_BSOFT; + if ((fdq->d_fieldmask & QC_SPC_SOFT) && + ((fdq->d_spc_softlimit >> sdp->sd_sb.sb_bsize_shift) == be64_to_cpu(qd->qd_qb.qb_warn))) + fdq->d_fieldmask ^= QC_SPC_SOFT; - if ((fdq->d_fieldmask & FS_DQ_BHARD) && - ((fdq->d_blk_hardlimit >> sdp->sd_fsb2bb_shift) == be64_to_cpu(qd->qd_qb.qb_limit))) - fdq->d_fieldmask ^= FS_DQ_BHARD; + if ((fdq->d_fieldmask & QC_SPC_HARD) && + ((fdq->d_spc_hardlimit >> sdp->sd_sb.sb_bsize_shift) == be64_to_cpu(qd->qd_qb.qb_limit))) + fdq->d_fieldmask ^= QC_SPC_HARD; - if ((fdq->d_fieldmask & FS_DQ_BCOUNT) && - ((fdq->d_bcount >> sdp->sd_fsb2bb_shift) == be64_to_cpu(qd->qd_qb.qb_value))) - fdq->d_fieldmask ^= FS_DQ_BCOUNT; + if ((fdq->d_fieldmask & QC_SPACE) && + ((fdq->d_space >> sdp->sd_sb.sb_bsize_shift) == be64_to_cpu(qd->qd_qb.qb_value))) + fdq->d_fieldmask ^= QC_SPACE; if (fdq->d_fieldmask == 0) goto out_i; diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c index 10bf07280f4a..294692ff83b1 100644 --- a/fs/nfs/direct.c +++ b/fs/nfs/direct.c @@ -212,6 +212,12 @@ static int nfs_direct_cmp_commit_data_verf(struct nfs_direct_req *dreq, */ ssize_t nfs_direct_IO(int rw, struct kiocb *iocb, struct iov_iter *iter, loff_t pos) { + struct inode *inode = iocb->ki_filp->f_mapping->host; + + /* we only support swap file calling nfs_direct_IO */ + if (!IS_SWAPFILE(inode)) + return 0; + #ifndef CONFIG_NFS_SWAP dprintk("NFS: nfs_direct_IO (%pD) off/no(%Ld/%lu) EINVAL\n", iocb->ki_filp, (long long) pos, iter->nr_segs); diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c index 4bffe637ea32..2211f6ba8736 100644 --- a/fs/nfs/inode.c +++ b/fs/nfs/inode.c @@ -352,8 +352,9 @@ nfs_fhget(struct super_block *sb, struct nfs_fh *fh, struct nfs_fattr *fattr, st nfs_attr_check_mountpoint(sb, fattr); - if (((fattr->valid & NFS_ATTR_FATTR_FILEID) == 0) && - !nfs_attr_use_mounted_on_fileid(fattr)) + if (nfs_attr_use_mounted_on_fileid(fattr)) + fattr->fileid = fattr->mounted_on_fileid; + else if ((fattr->valid & NFS_ATTR_FATTR_FILEID) == 0) goto out_no_inode; if ((fattr->valid & NFS_ATTR_FATTR_TYPE) == 0) goto out_no_inode; diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h index efaa31c70fbe..b6f34bfa6fe8 100644 --- a/fs/nfs/internal.h +++ b/fs/nfs/internal.h @@ -31,8 +31,6 @@ static inline int nfs_attr_use_mounted_on_fileid(struct nfs_fattr *fattr) (((fattr->valid & NFS_ATTR_FATTR_MOUNTPOINT) == 0) && ((fattr->valid & NFS_ATTR_FATTR_V4_REFERRAL) == 0))) return 0; - - fattr->fileid = fattr->mounted_on_fileid; return 1; } diff --git a/fs/nfs/nfs4client.c b/fs/nfs/nfs4client.c index 953daa44a282..706ad10b8186 100644 --- a/fs/nfs/nfs4client.c +++ b/fs/nfs/nfs4client.c @@ -639,7 +639,7 @@ int nfs41_walk_client_list(struct nfs_client *new, prev = pos; status = nfs_wait_client_init_complete(pos); - if (status == 0) { + if (pos->cl_cons_state == NFS_CS_SESSION_INITING) { nfs4_schedule_lease_recovery(pos); status = nfs4_wait_clnt_recover(pos); } diff --git a/fs/nilfs2/nilfs.h b/fs/nilfs2/nilfs.h index 91093cd74f0d..385704027575 100644 --- a/fs/nilfs2/nilfs.h +++ b/fs/nilfs2/nilfs.h @@ -141,7 +141,6 @@ enum { * @ti_save: Backup of journal_info field of task_struct * @ti_flags: Flags * @ti_count: Nest level - * @ti_garbage: List of inode to be put when releasing semaphore */ struct nilfs_transaction_info { u32 ti_magic; @@ -150,7 +149,6 @@ struct nilfs_transaction_info { one of other filesystems has a bug. */ unsigned short ti_flags; unsigned short ti_count; - struct list_head ti_garbage; }; /* ti_magic */ diff --git a/fs/nilfs2/segment.c b/fs/nilfs2/segment.c index 7ef18fc656c2..469086b9f99b 100644 --- a/fs/nilfs2/segment.c +++ b/fs/nilfs2/segment.c @@ -305,7 +305,6 @@ static void nilfs_transaction_lock(struct super_block *sb, ti->ti_count = 0; ti->ti_save = cur_ti; ti->ti_magic = NILFS_TI_MAGIC; - INIT_LIST_HEAD(&ti->ti_garbage); current->journal_info = ti; for (;;) { @@ -332,8 +331,6 @@ static void nilfs_transaction_unlock(struct super_block *sb) up_write(&nilfs->ns_segctor_sem); current->journal_info = ti->ti_save; - if (!list_empty(&ti->ti_garbage)) - nilfs_dispose_list(nilfs, &ti->ti_garbage, 0); } static void *nilfs_segctor_map_segsum_entry(struct nilfs_sc_info *sci, @@ -746,6 +743,15 @@ static void nilfs_dispose_list(struct the_nilfs *nilfs, } } +static void nilfs_iput_work_func(struct work_struct *work) +{ + struct nilfs_sc_info *sci = container_of(work, struct nilfs_sc_info, + sc_iput_work); + struct the_nilfs *nilfs = sci->sc_super->s_fs_info; + + nilfs_dispose_list(nilfs, &sci->sc_iput_queue, 0); +} + static int nilfs_test_metadata_dirty(struct the_nilfs *nilfs, struct nilfs_root *root) { @@ -1900,8 +1906,8 @@ static int nilfs_segctor_collect_dirty_files(struct nilfs_sc_info *sci, static void nilfs_segctor_drop_written_files(struct nilfs_sc_info *sci, struct the_nilfs *nilfs) { - struct nilfs_transaction_info *ti = current->journal_info; struct nilfs_inode_info *ii, *n; + int defer_iput = false; spin_lock(&nilfs->ns_inode_lock); list_for_each_entry_safe(ii, n, &sci->sc_dirty_files, i_dirty) { @@ -1912,9 +1918,24 @@ static void nilfs_segctor_drop_written_files(struct nilfs_sc_info *sci, clear_bit(NILFS_I_BUSY, &ii->i_state); brelse(ii->i_bh); ii->i_bh = NULL; - list_move_tail(&ii->i_dirty, &ti->ti_garbage); + list_del_init(&ii->i_dirty); + if (!ii->vfs_inode.i_nlink) { + /* + * Defer calling iput() to avoid a deadlock + * over I_SYNC flag for inodes with i_nlink == 0 + */ + list_add_tail(&ii->i_dirty, &sci->sc_iput_queue); + defer_iput = true; + } else { + spin_unlock(&nilfs->ns_inode_lock); + iput(&ii->vfs_inode); + spin_lock(&nilfs->ns_inode_lock); + } } spin_unlock(&nilfs->ns_inode_lock); + + if (defer_iput) + schedule_work(&sci->sc_iput_work); } /* @@ -2583,6 +2604,8 @@ static struct nilfs_sc_info *nilfs_segctor_new(struct super_block *sb, INIT_LIST_HEAD(&sci->sc_segbufs); INIT_LIST_HEAD(&sci->sc_write_logs); INIT_LIST_HEAD(&sci->sc_gc_inodes); + INIT_LIST_HEAD(&sci->sc_iput_queue); + INIT_WORK(&sci->sc_iput_work, nilfs_iput_work_func); init_timer(&sci->sc_timer); sci->sc_interval = HZ * NILFS_SC_DEFAULT_TIMEOUT; @@ -2609,6 +2632,8 @@ static void nilfs_segctor_write_out(struct nilfs_sc_info *sci) ret = nilfs_segctor_construct(sci, SC_LSEG_SR); nilfs_transaction_unlock(sci->sc_super); + flush_work(&sci->sc_iput_work); + } while (ret && retrycount-- > 0); } @@ -2633,6 +2658,9 @@ static void nilfs_segctor_destroy(struct nilfs_sc_info *sci) || sci->sc_seq_request != sci->sc_seq_done); spin_unlock(&sci->sc_state_lock); + if (flush_work(&sci->sc_iput_work)) + flag = true; + if (flag || !nilfs_segctor_confirm(sci)) nilfs_segctor_write_out(sci); @@ -2642,6 +2670,12 @@ static void nilfs_segctor_destroy(struct nilfs_sc_info *sci) nilfs_dispose_list(nilfs, &sci->sc_dirty_files, 1); } + if (!list_empty(&sci->sc_iput_queue)) { + nilfs_warning(sci->sc_super, __func__, + "iput queue is not empty\n"); + nilfs_dispose_list(nilfs, &sci->sc_iput_queue, 1); + } + WARN_ON(!list_empty(&sci->sc_segbufs)); WARN_ON(!list_empty(&sci->sc_write_logs)); diff --git a/fs/nilfs2/segment.h b/fs/nilfs2/segment.h index 38a1d0013314..a48d6de1e02c 100644 --- a/fs/nilfs2/segment.h +++ b/fs/nilfs2/segment.h @@ -26,6 +26,7 @@ #include <linux/types.h> #include <linux/fs.h> #include <linux/buffer_head.h> +#include <linux/workqueue.h> #include <linux/nilfs2_fs.h> #include "nilfs.h" @@ -92,6 +93,8 @@ struct nilfs_segsum_pointer { * @sc_nblk_inc: Block count of current generation * @sc_dirty_files: List of files to be written * @sc_gc_inodes: List of GC inodes having blocks to be written + * @sc_iput_queue: list of inodes for which iput should be done + * @sc_iput_work: work struct to defer iput call * @sc_freesegs: array of segment numbers to be freed * @sc_nfreesegs: number of segments on @sc_freesegs * @sc_dsync_inode: inode whose data pages are written for a sync operation @@ -135,6 +138,8 @@ struct nilfs_sc_info { struct list_head sc_dirty_files; struct list_head sc_gc_inodes; + struct list_head sc_iput_queue; + struct work_struct sc_iput_work; __u64 *sc_freesegs; size_t sc_nfreesegs; diff --git a/fs/notify/Kconfig b/fs/notify/Kconfig index 22c629eedd82..2a24249b30af 100644 --- a/fs/notify/Kconfig +++ b/fs/notify/Kconfig @@ -1,5 +1,6 @@ config FSNOTIFY def_bool n + select SRCU source "fs/notify/dnotify/Kconfig" source "fs/notify/inotify/Kconfig" diff --git a/fs/quota/Kconfig b/fs/quota/Kconfig index c51df1dd237e..4a09975aac90 100644 --- a/fs/quota/Kconfig +++ b/fs/quota/Kconfig @@ -5,6 +5,7 @@ config QUOTA bool "Quota support" select QUOTACTL + select SRCU help If you say Y here, you will be able to set per user limits for disk usage (also called disk quotas). Currently, it works for the diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c index 8f0acef3d184..69df5b239844 100644 --- a/fs/quota/dquot.c +++ b/fs/quota/dquot.c @@ -2396,30 +2396,25 @@ static inline qsize_t stoqb(qsize_t space) } /* Generic routine for getting common part of quota structure */ -static void do_get_dqblk(struct dquot *dquot, struct fs_disk_quota *di) +static void do_get_dqblk(struct dquot *dquot, struct qc_dqblk *di) { struct mem_dqblk *dm = &dquot->dq_dqb; memset(di, 0, sizeof(*di)); - di->d_version = FS_DQUOT_VERSION; - di->d_flags = dquot->dq_id.type == USRQUOTA ? - FS_USER_QUOTA : FS_GROUP_QUOTA; - di->d_id = from_kqid_munged(current_user_ns(), dquot->dq_id); - spin_lock(&dq_data_lock); - di->d_blk_hardlimit = stoqb(dm->dqb_bhardlimit); - di->d_blk_softlimit = stoqb(dm->dqb_bsoftlimit); + di->d_spc_hardlimit = dm->dqb_bhardlimit; + di->d_spc_softlimit = dm->dqb_bsoftlimit; di->d_ino_hardlimit = dm->dqb_ihardlimit; di->d_ino_softlimit = dm->dqb_isoftlimit; - di->d_bcount = dm->dqb_curspace + dm->dqb_rsvspace; - di->d_icount = dm->dqb_curinodes; - di->d_btimer = dm->dqb_btime; - di->d_itimer = dm->dqb_itime; + di->d_space = dm->dqb_curspace + dm->dqb_rsvspace; + di->d_ino_count = dm->dqb_curinodes; + di->d_spc_timer = dm->dqb_btime; + di->d_ino_timer = dm->dqb_itime; spin_unlock(&dq_data_lock); } int dquot_get_dqblk(struct super_block *sb, struct kqid qid, - struct fs_disk_quota *di) + struct qc_dqblk *di) { struct dquot *dquot; @@ -2433,70 +2428,70 @@ int dquot_get_dqblk(struct super_block *sb, struct kqid qid, } EXPORT_SYMBOL(dquot_get_dqblk); -#define VFS_FS_DQ_MASK \ - (FS_DQ_BCOUNT | FS_DQ_BSOFT | FS_DQ_BHARD | \ - FS_DQ_ICOUNT | FS_DQ_ISOFT | FS_DQ_IHARD | \ - FS_DQ_BTIMER | FS_DQ_ITIMER) +#define VFS_QC_MASK \ + (QC_SPACE | QC_SPC_SOFT | QC_SPC_HARD | \ + QC_INO_COUNT | QC_INO_SOFT | QC_INO_HARD | \ + QC_SPC_TIMER | QC_INO_TIMER) /* Generic routine for setting common part of quota structure */ -static int do_set_dqblk(struct dquot *dquot, struct fs_disk_quota *di) +static int do_set_dqblk(struct dquot *dquot, struct qc_dqblk *di) { struct mem_dqblk *dm = &dquot->dq_dqb; int check_blim = 0, check_ilim = 0; struct mem_dqinfo *dqi = &sb_dqopt(dquot->dq_sb)->info[dquot->dq_id.type]; - if (di->d_fieldmask & ~VFS_FS_DQ_MASK) + if (di->d_fieldmask & ~VFS_QC_MASK) return -EINVAL; - if (((di->d_fieldmask & FS_DQ_BSOFT) && - (di->d_blk_softlimit > dqi->dqi_maxblimit)) || - ((di->d_fieldmask & FS_DQ_BHARD) && - (di->d_blk_hardlimit > dqi->dqi_maxblimit)) || - ((di->d_fieldmask & FS_DQ_ISOFT) && + if (((di->d_fieldmask & QC_SPC_SOFT) && + stoqb(di->d_spc_softlimit) > dqi->dqi_maxblimit) || + ((di->d_fieldmask & QC_SPC_HARD) && + stoqb(di->d_spc_hardlimit) > dqi->dqi_maxblimit) || + ((di->d_fieldmask & QC_INO_SOFT) && (di->d_ino_softlimit > dqi->dqi_maxilimit)) || - ((di->d_fieldmask & FS_DQ_IHARD) && + ((di->d_fieldmask & QC_INO_HARD) && (di->d_ino_hardlimit > dqi->dqi_maxilimit))) return -ERANGE; spin_lock(&dq_data_lock); - if (di->d_fieldmask & FS_DQ_BCOUNT) { - dm->dqb_curspace = di->d_bcount - dm->dqb_rsvspace; + if (di->d_fieldmask & QC_SPACE) { + dm->dqb_curspace = di->d_space - dm->dqb_rsvspace; check_blim = 1; set_bit(DQ_LASTSET_B + QIF_SPACE_B, &dquot->dq_flags); } - if (di->d_fieldmask & FS_DQ_BSOFT) - dm->dqb_bsoftlimit = qbtos(di->d_blk_softlimit); - if (di->d_fieldmask & FS_DQ_BHARD) - dm->dqb_bhardlimit = qbtos(di->d_blk_hardlimit); - if (di->d_fieldmask & (FS_DQ_BSOFT | FS_DQ_BHARD)) { + if (di->d_fieldmask & QC_SPC_SOFT) + dm->dqb_bsoftlimit = di->d_spc_softlimit; + if (di->d_fieldmask & QC_SPC_HARD) + dm->dqb_bhardlimit = di->d_spc_hardlimit; + if (di->d_fieldmask & (QC_SPC_SOFT | QC_SPC_HARD)) { check_blim = 1; set_bit(DQ_LASTSET_B + QIF_BLIMITS_B, &dquot->dq_flags); } - if (di->d_fieldmask & FS_DQ_ICOUNT) { - dm->dqb_curinodes = di->d_icount; + if (di->d_fieldmask & QC_INO_COUNT) { + dm->dqb_curinodes = di->d_ino_count; check_ilim = 1; set_bit(DQ_LASTSET_B + QIF_INODES_B, &dquot->dq_flags); } - if (di->d_fieldmask & FS_DQ_ISOFT) + if (di->d_fieldmask & QC_INO_SOFT) dm->dqb_isoftlimit = di->d_ino_softlimit; - if (di->d_fieldmask & FS_DQ_IHARD) + if (di->d_fieldmask & QC_INO_HARD) dm->dqb_ihardlimit = di->d_ino_hardlimit; - if (di->d_fieldmask & (FS_DQ_ISOFT | FS_DQ_IHARD)) { + if (di->d_fieldmask & (QC_INO_SOFT | QC_INO_HARD)) { check_ilim = 1; set_bit(DQ_LASTSET_B + QIF_ILIMITS_B, &dquot->dq_flags); } - if (di->d_fieldmask & FS_DQ_BTIMER) { - dm->dqb_btime = di->d_btimer; + if (di->d_fieldmask & QC_SPC_TIMER) { + dm->dqb_btime = di->d_spc_timer; check_blim = 1; set_bit(DQ_LASTSET_B + QIF_BTIME_B, &dquot->dq_flags); } - if (di->d_fieldmask & FS_DQ_ITIMER) { - dm->dqb_itime = di->d_itimer; + if (di->d_fieldmask & QC_INO_TIMER) { + dm->dqb_itime = di->d_ino_timer; check_ilim = 1; set_bit(DQ_LASTSET_B + QIF_ITIME_B, &dquot->dq_flags); } @@ -2506,7 +2501,7 @@ static int do_set_dqblk(struct dquot *dquot, struct fs_disk_quota *di) dm->dqb_curspace < dm->dqb_bsoftlimit) { dm->dqb_btime = 0; clear_bit(DQ_BLKS_B, &dquot->dq_flags); - } else if (!(di->d_fieldmask & FS_DQ_BTIMER)) + } else if (!(di->d_fieldmask & QC_SPC_TIMER)) /* Set grace only if user hasn't provided his own... */ dm->dqb_btime = get_seconds() + dqi->dqi_bgrace; } @@ -2515,7 +2510,7 @@ static int do_set_dqblk(struct dquot *dquot, struct fs_disk_quota *di) dm->dqb_curinodes < dm->dqb_isoftlimit) { dm->dqb_itime = 0; clear_bit(DQ_INODES_B, &dquot->dq_flags); - } else if (!(di->d_fieldmask & FS_DQ_ITIMER)) + } else if (!(di->d_fieldmask & QC_INO_TIMER)) /* Set grace only if user hasn't provided his own... */ dm->dqb_itime = get_seconds() + dqi->dqi_igrace; } @@ -2531,7 +2526,7 @@ static int do_set_dqblk(struct dquot *dquot, struct fs_disk_quota *di) } int dquot_set_dqblk(struct super_block *sb, struct kqid qid, - struct fs_disk_quota *di) + struct qc_dqblk *di) { struct dquot *dquot; int rc; diff --git a/fs/quota/quota.c b/fs/quota/quota.c index 2aa4151f99d2..6f3856328eea 100644 --- a/fs/quota/quota.c +++ b/fs/quota/quota.c @@ -118,17 +118,27 @@ static int quota_setinfo(struct super_block *sb, int type, void __user *addr) return sb->s_qcop->set_info(sb, type, &info); } -static void copy_to_if_dqblk(struct if_dqblk *dst, struct fs_disk_quota *src) +static inline qsize_t qbtos(qsize_t blocks) +{ + return blocks << QIF_DQBLKSIZE_BITS; +} + +static inline qsize_t stoqb(qsize_t space) +{ + return (space + QIF_DQBLKSIZE - 1) >> QIF_DQBLKSIZE_BITS; +} + +static void copy_to_if_dqblk(struct if_dqblk *dst, struct qc_dqblk *src) { memset(dst, 0, sizeof(*dst)); - dst->dqb_bhardlimit = src->d_blk_hardlimit; - dst->dqb_bsoftlimit = src->d_blk_softlimit; - dst->dqb_curspace = src->d_bcount; + dst->dqb_bhardlimit = stoqb(src->d_spc_hardlimit); + dst->dqb_bsoftlimit = stoqb(src->d_spc_softlimit); + dst->dqb_curspace = src->d_space; dst->dqb_ihardlimit = src->d_ino_hardlimit; dst->dqb_isoftlimit = src->d_ino_softlimit; - dst->dqb_curinodes = src->d_icount; - dst->dqb_btime = src->d_btimer; - dst->dqb_itime = src->d_itimer; + dst->dqb_curinodes = src->d_ino_count; + dst->dqb_btime = src->d_spc_timer; + dst->dqb_itime = src->d_ino_timer; dst->dqb_valid = QIF_ALL; } @@ -136,7 +146,7 @@ static int quota_getquota(struct super_block *sb, int type, qid_t id, void __user *addr) { struct kqid qid; - struct fs_disk_quota fdq; + struct qc_dqblk fdq; struct if_dqblk idq; int ret; @@ -154,36 +164,36 @@ static int quota_getquota(struct super_block *sb, int type, qid_t id, return 0; } -static void copy_from_if_dqblk(struct fs_disk_quota *dst, struct if_dqblk *src) +static void copy_from_if_dqblk(struct qc_dqblk *dst, struct if_dqblk *src) { - dst->d_blk_hardlimit = src->dqb_bhardlimit; - dst->d_blk_softlimit = src->dqb_bsoftlimit; - dst->d_bcount = src->dqb_curspace; + dst->d_spc_hardlimit = qbtos(src->dqb_bhardlimit); + dst->d_spc_softlimit = qbtos(src->dqb_bsoftlimit); + dst->d_space = src->dqb_curspace; dst->d_ino_hardlimit = src->dqb_ihardlimit; dst->d_ino_softlimit = src->dqb_isoftlimit; - dst->d_icount = src->dqb_curinodes; - dst->d_btimer = src->dqb_btime; - dst->d_itimer = src->dqb_itime; + dst->d_ino_count = src->dqb_curinodes; + dst->d_spc_timer = src->dqb_btime; + dst->d_ino_timer = src->dqb_itime; dst->d_fieldmask = 0; if (src->dqb_valid & QIF_BLIMITS) - dst->d_fieldmask |= FS_DQ_BSOFT | FS_DQ_BHARD; + dst->d_fieldmask |= QC_SPC_SOFT | QC_SPC_HARD; if (src->dqb_valid & QIF_SPACE) - dst->d_fieldmask |= FS_DQ_BCOUNT; + dst->d_fieldmask |= QC_SPACE; if (src->dqb_valid & QIF_ILIMITS) - dst->d_fieldmask |= FS_DQ_ISOFT | FS_DQ_IHARD; + dst->d_fieldmask |= QC_INO_SOFT | QC_INO_HARD; if (src->dqb_valid & QIF_INODES) - dst->d_fieldmask |= FS_DQ_ICOUNT; + dst->d_fieldmask |= QC_INO_COUNT; if (src->dqb_valid & QIF_BTIME) - dst->d_fieldmask |= FS_DQ_BTIMER; + dst->d_fieldmask |= QC_SPC_TIMER; if (src->dqb_valid & QIF_ITIME) - dst->d_fieldmask |= FS_DQ_ITIMER; + dst->d_fieldmask |= QC_INO_TIMER; } static int quota_setquota(struct super_block *sb, int type, qid_t id, void __user *addr) { - struct fs_disk_quota fdq; + struct qc_dqblk fdq; struct if_dqblk idq; struct kqid qid; @@ -247,10 +257,78 @@ static int quota_getxstatev(struct super_block *sb, void __user *addr) return ret; } +/* + * XFS defines BBTOB and BTOBB macros inside fs/xfs/ and we cannot move them + * out of there as xfsprogs rely on definitions being in that header file. So + * just define same functions here for quota purposes. + */ +#define XFS_BB_SHIFT 9 + +static inline u64 quota_bbtob(u64 blocks) +{ + return blocks << XFS_BB_SHIFT; +} + +static inline u64 quota_btobb(u64 bytes) +{ + return (bytes + (1 << XFS_BB_SHIFT) - 1) >> XFS_BB_SHIFT; +} + +static void copy_from_xfs_dqblk(struct qc_dqblk *dst, struct fs_disk_quota *src) +{ + dst->d_spc_hardlimit = quota_bbtob(src->d_blk_hardlimit); + dst->d_spc_softlimit = quota_bbtob(src->d_blk_softlimit); + dst->d_ino_hardlimit = src->d_ino_hardlimit; + dst->d_ino_softlimit = src->d_ino_softlimit; + dst->d_space = quota_bbtob(src->d_bcount); + dst->d_ino_count = src->d_icount; + dst->d_ino_timer = src->d_itimer; + dst->d_spc_timer = src->d_btimer; + dst->d_ino_warns = src->d_iwarns; + dst->d_spc_warns = src->d_bwarns; + dst->d_rt_spc_hardlimit = quota_bbtob(src->d_rtb_hardlimit); + dst->d_rt_spc_softlimit = quota_bbtob(src->d_rtb_softlimit); + dst->d_rt_space = quota_bbtob(src->d_rtbcount); + dst->d_rt_spc_timer = src->d_rtbtimer; + dst->d_rt_spc_warns = src->d_rtbwarns; + dst->d_fieldmask = 0; + if (src->d_fieldmask & FS_DQ_ISOFT) + dst->d_fieldmask |= QC_INO_SOFT; + if (src->d_fieldmask & FS_DQ_IHARD) + dst->d_fieldmask |= QC_INO_HARD; + if (src->d_fieldmask & FS_DQ_BSOFT) + dst->d_fieldmask |= QC_SPC_SOFT; + if (src->d_fieldmask & FS_DQ_BHARD) + dst->d_fieldmask |= QC_SPC_HARD; + if (src->d_fieldmask & FS_DQ_RTBSOFT) + dst->d_fieldmask |= QC_RT_SPC_SOFT; + if (src->d_fieldmask & FS_DQ_RTBHARD) + dst->d_fieldmask |= QC_RT_SPC_HARD; + if (src->d_fieldmask & FS_DQ_BTIMER) + dst->d_fieldmask |= QC_SPC_TIMER; + if (src->d_fieldmask & FS_DQ_ITIMER) + dst->d_fieldmask |= QC_INO_TIMER; + if (src->d_fieldmask & FS_DQ_RTBTIMER) + dst->d_fieldmask |= QC_RT_SPC_TIMER; + if (src->d_fieldmask & FS_DQ_BWARNS) + dst->d_fieldmask |= QC_SPC_WARNS; + if (src->d_fieldmask & FS_DQ_IWARNS) + dst->d_fieldmask |= QC_INO_WARNS; + if (src->d_fieldmask & FS_DQ_RTBWARNS) + dst->d_fieldmask |= QC_RT_SPC_WARNS; + if (src->d_fieldmask & FS_DQ_BCOUNT) + dst->d_fieldmask |= QC_SPACE; + if (src->d_fieldmask & FS_DQ_ICOUNT) + dst->d_fieldmask |= QC_INO_COUNT; + if (src->d_fieldmask & FS_DQ_RTBCOUNT) + dst->d_fieldmask |= QC_RT_SPACE; +} + static int quota_setxquota(struct super_block *sb, int type, qid_t id, void __user *addr) { struct fs_disk_quota fdq; + struct qc_dqblk qdq; struct kqid qid; if (copy_from_user(&fdq, addr, sizeof(fdq))) @@ -260,13 +338,44 @@ static int quota_setxquota(struct super_block *sb, int type, qid_t id, qid = make_kqid(current_user_ns(), type, id); if (!qid_valid(qid)) return -EINVAL; - return sb->s_qcop->set_dqblk(sb, qid, &fdq); + copy_from_xfs_dqblk(&qdq, &fdq); + return sb->s_qcop->set_dqblk(sb, qid, &qdq); +} + +static void copy_to_xfs_dqblk(struct fs_disk_quota *dst, struct qc_dqblk *src, + int type, qid_t id) +{ + memset(dst, 0, sizeof(*dst)); + dst->d_version = FS_DQUOT_VERSION; + dst->d_id = id; + if (type == USRQUOTA) + dst->d_flags = FS_USER_QUOTA; + else if (type == PRJQUOTA) + dst->d_flags = FS_PROJ_QUOTA; + else + dst->d_flags = FS_GROUP_QUOTA; + dst->d_blk_hardlimit = quota_btobb(src->d_spc_hardlimit); + dst->d_blk_softlimit = quota_btobb(src->d_spc_softlimit); + dst->d_ino_hardlimit = src->d_ino_hardlimit; + dst->d_ino_softlimit = src->d_ino_softlimit; + dst->d_bcount = quota_btobb(src->d_space); + dst->d_icount = src->d_ino_count; + dst->d_itimer = src->d_ino_timer; + dst->d_btimer = src->d_spc_timer; + dst->d_iwarns = src->d_ino_warns; + dst->d_bwarns = src->d_spc_warns; + dst->d_rtb_hardlimit = quota_btobb(src->d_rt_spc_hardlimit); + dst->d_rtb_softlimit = quota_btobb(src->d_rt_spc_softlimit); + dst->d_rtbcount = quota_btobb(src->d_rt_space); + dst->d_rtbtimer = src->d_rt_spc_timer; + dst->d_rtbwarns = src->d_rt_spc_warns; } static int quota_getxquota(struct super_block *sb, int type, qid_t id, void __user *addr) { struct fs_disk_quota fdq; + struct qc_dqblk qdq; struct kqid qid; int ret; @@ -275,8 +384,11 @@ static int quota_getxquota(struct super_block *sb, int type, qid_t id, qid = make_kqid(current_user_ns(), type, id); if (!qid_valid(qid)) return -EINVAL; - ret = sb->s_qcop->get_dqblk(sb, qid, &fdq); - if (!ret && copy_to_user(addr, &fdq, sizeof(fdq))) + ret = sb->s_qcop->get_dqblk(sb, qid, &qdq); + if (ret) + return ret; + copy_to_xfs_dqblk(&fdq, &qdq, type, id); + if (copy_to_user(addr, &fdq, sizeof(fdq))) return -EFAULT; return ret; } diff --git a/fs/udf/file.c b/fs/udf/file.c index bb15771b92ae..08f3555fbeac 100644 --- a/fs/udf/file.c +++ b/fs/udf/file.c @@ -224,7 +224,7 @@ out: static int udf_release_file(struct inode *inode, struct file *filp) { if (filp->f_mode & FMODE_WRITE && - atomic_read(&inode->i_writecount) > 1) { + atomic_read(&inode->i_writecount) == 1) { /* * Grab i_mutex to avoid races with writes changing i_size * while we are running. diff --git a/fs/xfs/xfs_qm.h b/fs/xfs/xfs_qm.h index 3a07a937e232..41f6c0b9d51c 100644 --- a/fs/xfs/xfs_qm.h +++ b/fs/xfs/xfs_qm.h @@ -166,9 +166,9 @@ extern void xfs_qm_dqrele_all_inodes(struct xfs_mount *, uint); /* quota ops */ extern int xfs_qm_scall_trunc_qfiles(struct xfs_mount *, uint); extern int xfs_qm_scall_getquota(struct xfs_mount *, xfs_dqid_t, - uint, struct fs_disk_quota *); + uint, struct qc_dqblk *); extern int xfs_qm_scall_setqlim(struct xfs_mount *, xfs_dqid_t, uint, - struct fs_disk_quota *); + struct qc_dqblk *); extern int xfs_qm_scall_getqstat(struct xfs_mount *, struct fs_quota_stat *); extern int xfs_qm_scall_getqstatv(struct xfs_mount *, diff --git a/fs/xfs/xfs_qm_syscalls.c b/fs/xfs/xfs_qm_syscalls.c index 74fca68e43b6..cb6168ec92c9 100644 --- a/fs/xfs/xfs_qm_syscalls.c +++ b/fs/xfs/xfs_qm_syscalls.c @@ -39,7 +39,6 @@ STATIC int xfs_qm_log_quotaoff(xfs_mount_t *, xfs_qoff_logitem_t **, uint); STATIC int xfs_qm_log_quotaoff_end(xfs_mount_t *, xfs_qoff_logitem_t *, uint); STATIC uint xfs_qm_export_flags(uint); -STATIC uint xfs_qm_export_qtype_flags(uint); /* * Turn off quota accounting and/or enforcement for all udquots and/or @@ -573,8 +572,8 @@ xfs_qm_scall_getqstatv( return 0; } -#define XFS_DQ_MASK \ - (FS_DQ_LIMIT_MASK | FS_DQ_TIMER_MASK | FS_DQ_WARNS_MASK) +#define XFS_QC_MASK \ + (QC_LIMIT_MASK | QC_TIMER_MASK | QC_WARNS_MASK) /* * Adjust quota limits, and start/stop timers accordingly. @@ -584,7 +583,7 @@ xfs_qm_scall_setqlim( struct xfs_mount *mp, xfs_dqid_t id, uint type, - fs_disk_quota_t *newlim) + struct qc_dqblk *newlim) { struct xfs_quotainfo *q = mp->m_quotainfo; struct xfs_disk_dquot *ddq; @@ -593,9 +592,9 @@ xfs_qm_scall_setqlim( int error; xfs_qcnt_t hard, soft; - if (newlim->d_fieldmask & ~XFS_DQ_MASK) + if (newlim->d_fieldmask & ~XFS_QC_MASK) return -EINVAL; - if ((newlim->d_fieldmask & XFS_DQ_MASK) == 0) + if ((newlim->d_fieldmask & XFS_QC_MASK) == 0) return 0; /* @@ -633,11 +632,11 @@ xfs_qm_scall_setqlim( /* * Make sure that hardlimits are >= soft limits before changing. */ - hard = (newlim->d_fieldmask & FS_DQ_BHARD) ? - (xfs_qcnt_t) XFS_BB_TO_FSB(mp, newlim->d_blk_hardlimit) : + hard = (newlim->d_fieldmask & QC_SPC_HARD) ? + (xfs_qcnt_t) XFS_B_TO_FSB(mp, newlim->d_spc_hardlimit) : be64_to_cpu(ddq->d_blk_hardlimit); - soft = (newlim->d_fieldmask & FS_DQ_BSOFT) ? - (xfs_qcnt_t) XFS_BB_TO_FSB(mp, newlim->d_blk_softlimit) : + soft = (newlim->d_fieldmask & QC_SPC_SOFT) ? + (xfs_qcnt_t) XFS_B_TO_FSB(mp, newlim->d_spc_softlimit) : be64_to_cpu(ddq->d_blk_softlimit); if (hard == 0 || hard >= soft) { ddq->d_blk_hardlimit = cpu_to_be64(hard); @@ -650,11 +649,11 @@ xfs_qm_scall_setqlim( } else { xfs_debug(mp, "blkhard %Ld < blksoft %Ld", hard, soft); } - hard = (newlim->d_fieldmask & FS_DQ_RTBHARD) ? - (xfs_qcnt_t) XFS_BB_TO_FSB(mp, newlim->d_rtb_hardlimit) : + hard = (newlim->d_fieldmask & QC_RT_SPC_HARD) ? + (xfs_qcnt_t) XFS_B_TO_FSB(mp, newlim->d_rt_spc_hardlimit) : be64_to_cpu(ddq->d_rtb_hardlimit); - soft = (newlim->d_fieldmask & FS_DQ_RTBSOFT) ? - (xfs_qcnt_t) XFS_BB_TO_FSB(mp, newlim->d_rtb_softlimit) : + soft = (newlim->d_fieldmask & QC_RT_SPC_SOFT) ? + (xfs_qcnt_t) XFS_B_TO_FSB(mp, newlim->d_rt_spc_softlimit) : be64_to_cpu(ddq->d_rtb_softlimit); if (hard == 0 || hard >= soft) { ddq->d_rtb_hardlimit = cpu_to_be64(hard); @@ -667,10 +666,10 @@ xfs_qm_scall_setqlim( xfs_debug(mp, "rtbhard %Ld < rtbsoft %Ld", hard, soft); } - hard = (newlim->d_fieldmask & FS_DQ_IHARD) ? + hard = (newlim->d_fieldmask & QC_INO_HARD) ? (xfs_qcnt_t) newlim->d_ino_hardlimit : be64_to_cpu(ddq->d_ino_hardlimit); - soft = (newlim->d_fieldmask & FS_DQ_ISOFT) ? + soft = (newlim->d_fieldmask & QC_INO_SOFT) ? (xfs_qcnt_t) newlim->d_ino_softlimit : be64_to_cpu(ddq->d_ino_softlimit); if (hard == 0 || hard >= soft) { @@ -687,12 +686,12 @@ xfs_qm_scall_setqlim( /* * Update warnings counter(s) if requested */ - if (newlim->d_fieldmask & FS_DQ_BWARNS) - ddq->d_bwarns = cpu_to_be16(newlim->d_bwarns); - if (newlim->d_fieldmask & FS_DQ_IWARNS) - ddq->d_iwarns = cpu_to_be16(newlim->d_iwarns); - if (newlim->d_fieldmask & FS_DQ_RTBWARNS) - ddq->d_rtbwarns = cpu_to_be16(newlim->d_rtbwarns); + if (newlim->d_fieldmask & QC_SPC_WARNS) + ddq->d_bwarns = cpu_to_be16(newlim->d_spc_warns); + if (newlim->d_fieldmask & QC_INO_WARNS) + ddq->d_iwarns = cpu_to_be16(newlim->d_ino_warns); + if (newlim->d_fieldmask & QC_RT_SPC_WARNS) + ddq->d_rtbwarns = cpu_to_be16(newlim->d_rt_spc_warns); if (id == 0) { /* @@ -702,24 +701,24 @@ xfs_qm_scall_setqlim( * soft and hard limit values (already done, above), and * for warnings. */ - if (newlim->d_fieldmask & FS_DQ_BTIMER) { - q->qi_btimelimit = newlim->d_btimer; - ddq->d_btimer = cpu_to_be32(newlim->d_btimer); + if (newlim->d_fieldmask & QC_SPC_TIMER) { + q->qi_btimelimit = newlim->d_spc_timer; + ddq->d_btimer = cpu_to_be32(newlim->d_spc_timer); } - if (newlim->d_fieldmask & FS_DQ_ITIMER) { - q->qi_itimelimit = newlim->d_itimer; - ddq->d_itimer = cpu_to_be32(newlim->d_itimer); + if (newlim->d_fieldmask & QC_INO_TIMER) { + q->qi_itimelimit = newlim->d_ino_timer; + ddq->d_itimer = cpu_to_be32(newlim->d_ino_timer); } - if (newlim->d_fieldmask & FS_DQ_RTBTIMER) { - q->qi_rtbtimelimit = newlim->d_rtbtimer; - ddq->d_rtbtimer = cpu_to_be32(newlim->d_rtbtimer); + if (newlim->d_fieldmask & QC_RT_SPC_TIMER) { + q->qi_rtbtimelimit = newlim->d_rt_spc_timer; + ddq->d_rtbtimer = cpu_to_be32(newlim->d_rt_spc_timer); } - if (newlim->d_fieldmask & FS_DQ_BWARNS) - q->qi_bwarnlimit = newlim->d_bwarns; - if (newlim->d_fieldmask & FS_DQ_IWARNS) - q->qi_iwarnlimit = newlim->d_iwarns; - if (newlim->d_fieldmask & FS_DQ_RTBWARNS) - q->qi_rtbwarnlimit = newlim->d_rtbwarns; + if (newlim->d_fieldmask & QC_SPC_WARNS) + q->qi_bwarnlimit = newlim->d_spc_warns; + if (newlim->d_fieldmask & QC_INO_WARNS) + q->qi_iwarnlimit = newlim->d_ino_warns; + if (newlim->d_fieldmask & QC_RT_SPC_WARNS) + q->qi_rtbwarnlimit = newlim->d_rt_spc_warns; } else { /* * If the user is now over quota, start the timelimit. @@ -824,7 +823,7 @@ xfs_qm_scall_getquota( struct xfs_mount *mp, xfs_dqid_t id, uint type, - struct fs_disk_quota *dst) + struct qc_dqblk *dst) { struct xfs_dquot *dqp; int error; @@ -848,28 +847,25 @@ xfs_qm_scall_getquota( } memset(dst, 0, sizeof(*dst)); - dst->d_version = FS_DQUOT_VERSION; - dst->d_flags = xfs_qm_export_qtype_flags(dqp->q_core.d_flags); - dst->d_id = be32_to_cpu(dqp->q_core.d_id); - dst->d_blk_hardlimit = - XFS_FSB_TO_BB(mp, be64_to_cpu(dqp->q_core.d_blk_hardlimit)); - dst->d_blk_softlimit = - XFS_FSB_TO_BB(mp, be64_to_cpu(dqp->q_core.d_blk_softlimit)); + dst->d_spc_hardlimit = + XFS_FSB_TO_B(mp, be64_to_cpu(dqp->q_core.d_blk_hardlimit)); + dst->d_spc_softlimit = + XFS_FSB_TO_B(mp, be64_to_cpu(dqp->q_core.d_blk_softlimit)); dst->d_ino_hardlimit = be64_to_cpu(dqp->q_core.d_ino_hardlimit); dst->d_ino_softlimit = be64_to_cpu(dqp->q_core.d_ino_softlimit); - dst->d_bcount = XFS_FSB_TO_BB(mp, dqp->q_res_bcount); - dst->d_icount = dqp->q_res_icount; - dst->d_btimer = be32_to_cpu(dqp->q_core.d_btimer); - dst->d_itimer = be32_to_cpu(dqp->q_core.d_itimer); - dst->d_iwarns = be16_to_cpu(dqp->q_core.d_iwarns); - dst->d_bwarns = be16_to_cpu(dqp->q_core.d_bwarns); - dst->d_rtb_hardlimit = - XFS_FSB_TO_BB(mp, be64_to_cpu(dqp->q_core.d_rtb_hardlimit)); - dst->d_rtb_softlimit = - XFS_FSB_TO_BB(mp, be64_to_cpu(dqp->q_core.d_rtb_softlimit)); - dst->d_rtbcount = XFS_FSB_TO_BB(mp, dqp->q_res_rtbcount); - dst->d_rtbtimer = be32_to_cpu(dqp->q_core.d_rtbtimer); - dst->d_rtbwarns = be16_to_cpu(dqp->q_core.d_rtbwarns); + dst->d_space = XFS_FSB_TO_B(mp, dqp->q_res_bcount); + dst->d_ino_count = dqp->q_res_icount; + dst->d_spc_timer = be32_to_cpu(dqp->q_core.d_btimer); + dst->d_ino_timer = be32_to_cpu(dqp->q_core.d_itimer); + dst->d_ino_warns = be16_to_cpu(dqp->q_core.d_iwarns); + dst->d_spc_warns = be16_to_cpu(dqp->q_core.d_bwarns); + dst->d_rt_spc_hardlimit = + XFS_FSB_TO_B(mp, be64_to_cpu(dqp->q_core.d_rtb_hardlimit)); + dst->d_rt_spc_softlimit = + XFS_FSB_TO_B(mp, be64_to_cpu(dqp->q_core.d_rtb_softlimit)); + dst->d_rt_space = XFS_FSB_TO_B(mp, dqp->q_res_rtbcount); + dst->d_rt_spc_timer = be32_to_cpu(dqp->q_core.d_rtbtimer); + dst->d_rt_spc_warns = be16_to_cpu(dqp->q_core.d_rtbwarns); /* * Internally, we don't reset all the timers when quota enforcement @@ -882,23 +878,23 @@ xfs_qm_scall_getquota( dqp->q_core.d_flags == XFS_DQ_GROUP) || (!XFS_IS_PQUOTA_ENFORCED(mp) && dqp->q_core.d_flags == XFS_DQ_PROJ)) { - dst->d_btimer = 0; - dst->d_itimer = 0; - dst->d_rtbtimer = 0; + dst->d_spc_timer = 0; + dst->d_ino_timer = 0; + dst->d_rt_spc_timer = 0; } #ifdef DEBUG - if (((XFS_IS_UQUOTA_ENFORCED(mp) && dst->d_flags == FS_USER_QUOTA) || - (XFS_IS_GQUOTA_ENFORCED(mp) && dst->d_flags == FS_GROUP_QUOTA) || - (XFS_IS_PQUOTA_ENFORCED(mp) && dst->d_flags == FS_PROJ_QUOTA)) && - dst->d_id != 0) { - if ((dst->d_bcount > dst->d_blk_softlimit) && - (dst->d_blk_softlimit > 0)) { - ASSERT(dst->d_btimer != 0); + if (((XFS_IS_UQUOTA_ENFORCED(mp) && type == XFS_DQ_USER) || + (XFS_IS_GQUOTA_ENFORCED(mp) && type == XFS_DQ_GROUP) || + (XFS_IS_PQUOTA_ENFORCED(mp) && type == XFS_DQ_PROJ)) && + id != 0) { + if ((dst->d_space > dst->d_spc_softlimit) && + (dst->d_spc_softlimit > 0)) { + ASSERT(dst->d_spc_timer != 0); } - if ((dst->d_icount > dst->d_ino_softlimit) && + if ((dst->d_ino_count > dst->d_ino_softlimit) && (dst->d_ino_softlimit > 0)) { - ASSERT(dst->d_itimer != 0); + ASSERT(dst->d_ino_timer != 0); } } #endif @@ -908,26 +904,6 @@ out_put: } STATIC uint -xfs_qm_export_qtype_flags( - uint flags) -{ - /* - * Can't be more than one, or none. - */ - ASSERT((flags & (FS_PROJ_QUOTA | FS_USER_QUOTA)) != - (FS_PROJ_QUOTA | FS_USER_QUOTA)); - ASSERT((flags & (FS_PROJ_QUOTA | FS_GROUP_QUOTA)) != - (FS_PROJ_QUOTA | FS_GROUP_QUOTA)); - ASSERT((flags & (FS_USER_QUOTA | FS_GROUP_QUOTA)) != - (FS_USER_QUOTA | FS_GROUP_QUOTA)); - ASSERT((flags & (FS_PROJ_QUOTA|FS_USER_QUOTA|FS_GROUP_QUOTA)) != 0); - - return (flags & XFS_DQ_USER) ? - FS_USER_QUOTA : (flags & XFS_DQ_PROJ) ? - FS_PROJ_QUOTA : FS_GROUP_QUOTA; -} - -STATIC uint xfs_qm_export_flags( uint flags) { diff --git a/fs/xfs/xfs_quotaops.c b/fs/xfs/xfs_quotaops.c index 7542bbeca6a1..801a84c1cdc3 100644 --- a/fs/xfs/xfs_quotaops.c +++ b/fs/xfs/xfs_quotaops.c @@ -131,7 +131,7 @@ STATIC int xfs_fs_get_dqblk( struct super_block *sb, struct kqid qid, - struct fs_disk_quota *fdq) + struct qc_dqblk *qdq) { struct xfs_mount *mp = XFS_M(sb); @@ -141,14 +141,14 @@ xfs_fs_get_dqblk( return -ESRCH; return xfs_qm_scall_getquota(mp, from_kqid(&init_user_ns, qid), - xfs_quota_type(qid.type), fdq); + xfs_quota_type(qid.type), qdq); } STATIC int xfs_fs_set_dqblk( struct super_block *sb, struct kqid qid, - struct fs_disk_quota *fdq) + struct qc_dqblk *qdq) { struct xfs_mount *mp = XFS_M(sb); @@ -160,7 +160,7 @@ xfs_fs_set_dqblk( return -ESRCH; return xfs_qm_scall_setqlim(mp, from_kqid(&init_user_ns, qid), - xfs_quota_type(qid.type), fdq); + xfs_quota_type(qid.type), qdq); } const struct quotactl_ops xfs_quotactl_operations = { diff --git a/include/linux/compiler.h b/include/linux/compiler.h index 33063f872ee3..176bf816875e 100644 --- a/include/linux/compiler.h +++ b/include/linux/compiler.h @@ -385,7 +385,7 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s /* Is this type a native word size -- useful for atomic operations */ #ifndef __native_word -# define __native_word(t) (sizeof(t) == sizeof(int) || sizeof(t) == sizeof(long)) +# define __native_word(t) (sizeof(t) == sizeof(char) || sizeof(t) == sizeof(short) || sizeof(t) == sizeof(int) || sizeof(t) == sizeof(long)) #endif /* Compile time object size, -1 for unknown */ diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h index 0bebb5c348b8..d36f68b08acc 100644 --- a/include/linux/ftrace_event.h +++ b/include/linux/ftrace_event.h @@ -595,7 +595,7 @@ extern int ftrace_profile_set_filter(struct perf_event *event, int event_id, char *filter_str); extern void ftrace_profile_free_filter(struct perf_event *event); extern void *perf_trace_buf_prepare(int size, unsigned short type, - struct pt_regs *regs, int *rctxp); + struct pt_regs **regs, int *rctxp); static inline void perf_trace_buf_submit(void *raw_data, int size, int rctx, u64 addr, diff --git a/include/linux/i2c.h b/include/linux/i2c.h index e3a1721c8354..7c7695940ddd 100644 --- a/include/linux/i2c.h +++ b/include/linux/i2c.h @@ -228,7 +228,9 @@ struct i2c_client { struct device dev; /* the device structure */ int irq; /* irq issued by device */ struct list_head detected; +#if IS_ENABLED(CONFIG_I2C_SLAVE) i2c_slave_cb_t slave_cb; /* callback for slave mode */ +#endif }; #define to_i2c_client(d) container_of(d, struct i2c_client, dev) @@ -253,6 +255,7 @@ static inline void i2c_set_clientdata(struct i2c_client *dev, void *data) /* I2C slave support */ +#if IS_ENABLED(CONFIG_I2C_SLAVE) enum i2c_slave_event { I2C_SLAVE_REQ_READ_START, I2C_SLAVE_REQ_READ_END, @@ -269,6 +272,7 @@ static inline int i2c_slave_event(struct i2c_client *client, { return client->slave_cb(client, event, val); } +#endif /** * struct i2c_board_info - template for device creation @@ -404,8 +408,10 @@ struct i2c_algorithm { /* To determine what the adapter supports */ u32 (*functionality) (struct i2c_adapter *); +#if IS_ENABLED(CONFIG_I2C_SLAVE) int (*reg_slave)(struct i2c_client *client); int (*unreg_slave)(struct i2c_client *client); +#endif }; /** diff --git a/include/linux/if_vlan.h b/include/linux/if_vlan.h index 515a35e2a48a..960e666c51e4 100644 --- a/include/linux/if_vlan.h +++ b/include/linux/if_vlan.h @@ -472,27 +472,59 @@ static inline int vlan_get_tag(const struct sk_buff *skb, u16 *vlan_tci) /** * vlan_get_protocol - get protocol EtherType. * @skb: skbuff to query + * @type: first vlan protocol + * @depth: buffer to store length of eth and vlan tags in bytes * * Returns the EtherType of the packet, regardless of whether it is * vlan encapsulated (normal or hardware accelerated) or not. */ -static inline __be16 vlan_get_protocol(const struct sk_buff *skb) +static inline __be16 __vlan_get_protocol(struct sk_buff *skb, __be16 type, + int *depth) { - __be16 protocol = 0; - - if (vlan_tx_tag_present(skb) || - skb->protocol != cpu_to_be16(ETH_P_8021Q)) - protocol = skb->protocol; - else { - __be16 proto, *protop; - protop = skb_header_pointer(skb, offsetof(struct vlan_ethhdr, - h_vlan_encapsulated_proto), - sizeof(proto), &proto); - if (likely(protop)) - protocol = *protop; + unsigned int vlan_depth = skb->mac_len; + + /* if type is 802.1Q/AD then the header should already be + * present at mac_len - VLAN_HLEN (if mac_len > 0), or at + * ETH_HLEN otherwise + */ + if (type == htons(ETH_P_8021Q) || type == htons(ETH_P_8021AD)) { + if (vlan_depth) { + if (WARN_ON(vlan_depth < VLAN_HLEN)) + return 0; + vlan_depth -= VLAN_HLEN; + } else { + vlan_depth = ETH_HLEN; + } + do { + struct vlan_hdr *vh; + + if (unlikely(!pskb_may_pull(skb, + vlan_depth + VLAN_HLEN))) + return 0; + + vh = (struct vlan_hdr *)(skb->data + vlan_depth); + type = vh->h_vlan_encapsulated_proto; + vlan_depth += VLAN_HLEN; + } while (type == htons(ETH_P_8021Q) || + type == htons(ETH_P_8021AD)); } - return protocol; + if (depth) + *depth = vlan_depth; + + return type; +} + +/** + * vlan_get_protocol - get protocol EtherType. + * @skb: skbuff to query + * + * Returns the EtherType of the packet, regardless of whether it is + * vlan encapsulated (normal or hardware accelerated) or not. + */ +static inline __be16 vlan_get_protocol(struct sk_buff *skb) +{ + return __vlan_get_protocol(skb, skb->protocol, NULL); } static inline void vlan_set_encap_proto(struct sk_buff *skb, diff --git a/include/linux/kernel.h b/include/linux/kernel.h index 5449d2f4a1ef..64ce58bee6f5 100644 --- a/include/linux/kernel.h +++ b/include/linux/kernel.h @@ -176,7 +176,7 @@ extern int _cond_resched(void); */ # define might_sleep() \ do { __might_sleep(__FILE__, __LINE__, 0); might_resched(); } while (0) -# define sched_annotate_sleep() __set_current_state(TASK_RUNNING) +# define sched_annotate_sleep() (current->task_state_change = 0) #else static inline void ___might_sleep(const char *file, int line, int preempt_offset) { } diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h index 25c791e295fd..5f3a9aa7225d 100644 --- a/include/linux/mlx4/device.h +++ b/include/linux/mlx4/device.h @@ -97,7 +97,7 @@ enum { MLX4_MAX_NUM_PF = 16, MLX4_MAX_NUM_VF = 126, MLX4_MAX_NUM_VF_P_PORT = 64, - MLX4_MFUNC_MAX = 80, + MLX4_MFUNC_MAX = 128, MLX4_MAX_EQ_NUM = 1024, MLX4_MFUNC_EQ_NUM = 4, MLX4_MFUNC_MAX_EQES = 8, diff --git a/include/linux/mm.h b/include/linux/mm.h index 80fc92a49649..dd5ea3016fc4 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -1070,6 +1070,7 @@ static inline int page_mapped(struct page *page) #define VM_FAULT_WRITE 0x0008 /* Special case for get_user_pages */ #define VM_FAULT_HWPOISON 0x0010 /* Hit poisoned small page */ #define VM_FAULT_HWPOISON_LARGE 0x0020 /* Hit poisoned large page. Index encoded in upper bits */ +#define VM_FAULT_SIGSEGV 0x0040 #define VM_FAULT_NOPAGE 0x0100 /* ->fault installed the pte, not return page */ #define VM_FAULT_LOCKED 0x0200 /* ->fault locked the returned page */ @@ -1078,8 +1079,9 @@ static inline int page_mapped(struct page *page) #define VM_FAULT_HWPOISON_LARGE_MASK 0xf000 /* encodes hpage index for large hwpoison */ -#define VM_FAULT_ERROR (VM_FAULT_OOM | VM_FAULT_SIGBUS | VM_FAULT_HWPOISON | \ - VM_FAULT_FALLBACK | VM_FAULT_HWPOISON_LARGE) +#define VM_FAULT_ERROR (VM_FAULT_OOM | VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV | \ + VM_FAULT_HWPOISON | VM_FAULT_HWPOISON_LARGE | \ + VM_FAULT_FALLBACK) /* Encode hstate index for a hwpoisoned large page */ #define VM_FAULT_SET_HINDEX(x) ((x) << 12) diff --git a/include/linux/osq_lock.h b/include/linux/osq_lock.h index 90230d5811c5..3a6490e81b28 100644 --- a/include/linux/osq_lock.h +++ b/include/linux/osq_lock.h @@ -5,8 +5,11 @@ * An MCS like lock especially tailored for optimistic spinning for sleeping * lock implementations (mutex, rwsem, etc). */ - -#define OSQ_UNLOCKED_VAL (0) +struct optimistic_spin_node { + struct optimistic_spin_node *next, *prev; + int locked; /* 1 if lock acquired */ + int cpu; /* encoded CPU # + 1 value */ +}; struct optimistic_spin_queue { /* @@ -16,6 +19,8 @@ struct optimistic_spin_queue { atomic_t tail; }; +#define OSQ_UNLOCKED_VAL (0) + /* Init macro and function. */ #define OSQ_LOCK_UNLOCKED { ATOMIC_INIT(OSQ_UNLOCKED_VAL) } @@ -24,4 +29,7 @@ static inline void osq_lock_init(struct optimistic_spin_queue *lock) atomic_set(&lock->tail, OSQ_UNLOCKED_VAL); } +extern bool osq_lock(struct optimistic_spin_queue *lock); +extern void osq_unlock(struct optimistic_spin_queue *lock); + #endif diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index 4f7a61ca4b39..5cad0e6f3552 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -450,11 +450,6 @@ struct perf_event { #endif /* CONFIG_PERF_EVENTS */ }; -enum perf_event_context_type { - task_context, - cpu_context, -}; - /** * struct perf_event_context - event context structure * @@ -462,7 +457,6 @@ enum perf_event_context_type { */ struct perf_event_context { struct pmu *pmu; - enum perf_event_context_type type; /* * Protect the states of the events in the list, * nr_active, and the list: @@ -475,6 +469,7 @@ struct perf_event_context { */ struct mutex mutex; + struct list_head active_ctx_list; struct list_head pinned_groups; struct list_head flexible_groups; struct list_head event_list; @@ -525,7 +520,6 @@ struct perf_cpu_context { int exclusive; struct hrtimer hrtimer; ktime_t hrtimer_interval; - struct list_head rotation_list; struct pmu *unique_pmu; struct perf_cgroup *cgrp; }; @@ -665,6 +659,7 @@ static inline int is_software_event(struct perf_event *event) extern struct static_key perf_swevent_enabled[PERF_COUNT_SW_MAX]; +extern void ___perf_sw_event(u32, u64, struct pt_regs *, u64); extern void __perf_sw_event(u32, u64, struct pt_regs *, u64); #ifndef perf_arch_fetch_caller_regs @@ -689,14 +684,25 @@ static inline void perf_fetch_caller_regs(struct pt_regs *regs) static __always_inline void perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr) { - struct pt_regs hot_regs; + if (static_key_false(&perf_swevent_enabled[event_id])) + __perf_sw_event(event_id, nr, regs, addr); +} + +DECLARE_PER_CPU(struct pt_regs, __perf_regs[4]); +/* + * 'Special' version for the scheduler, it hard assumes no recursion, + * which is guaranteed by us not actually scheduling inside other swevents + * because those disable preemption. + */ +static __always_inline void +perf_sw_event_sched(u32 event_id, u64 nr, u64 addr) +{ if (static_key_false(&perf_swevent_enabled[event_id])) { - if (!regs) { - perf_fetch_caller_regs(&hot_regs); - regs = &hot_regs; - } - __perf_sw_event(event_id, nr, regs, addr); + struct pt_regs *regs = this_cpu_ptr(&__perf_regs[0]); + + perf_fetch_caller_regs(regs); + ___perf_sw_event(event_id, nr, regs, addr); } } @@ -712,7 +718,7 @@ static inline void perf_event_task_sched_in(struct task_struct *prev, static inline void perf_event_task_sched_out(struct task_struct *prev, struct task_struct *next) { - perf_sw_event(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, NULL, 0); + perf_sw_event_sched(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, 0); if (static_key_false(&perf_sched_events.key)) __perf_event_task_sched_out(prev, next); @@ -823,6 +829,8 @@ static inline int perf_event_refresh(struct perf_event *event, int refresh) static inline void perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr) { } static inline void +perf_sw_event_sched(u32 event_id, u64 nr, u64 addr) { } +static inline void perf_bp_event(struct perf_event *event, void *data) { } static inline int perf_register_guest_info_callbacks diff --git a/include/linux/pxa2xx_ssp.h b/include/linux/pxa2xx_ssp.h index 77aed9ea1d26..dab545bb66b3 100644 --- a/include/linux/pxa2xx_ssp.h +++ b/include/linux/pxa2xx_ssp.h @@ -37,6 +37,7 @@ #define SSDR (0x10) /* SSP Data Write/Data Read Register */ #define SSTO (0x28) /* SSP Time Out Register */ +#define DDS_RATE (0x28) /* SSP DDS Clock Rate Register (Intel Quark) */ #define SSPSP (0x2C) /* SSP Programmable Serial Protocol */ #define SSTSA (0x30) /* SSP Tx Timeslot Active */ #define SSRSA (0x34) /* SSP Rx Timeslot Active */ diff --git a/include/linux/quota.h b/include/linux/quota.h index 50978b781a19..097d7eb2441e 100644 --- a/include/linux/quota.h +++ b/include/linux/quota.h @@ -321,6 +321,49 @@ struct dquot_operations { struct path; +/* Structure for communicating via ->get_dqblk() & ->set_dqblk() */ +struct qc_dqblk { + int d_fieldmask; /* mask of fields to change in ->set_dqblk() */ + u64 d_spc_hardlimit; /* absolute limit on used space */ + u64 d_spc_softlimit; /* preferred limit on used space */ + u64 d_ino_hardlimit; /* maximum # allocated inodes */ + u64 d_ino_softlimit; /* preferred inode limit */ + u64 d_space; /* Space owned by the user */ + u64 d_ino_count; /* # inodes owned by the user */ + s64 d_ino_timer; /* zero if within inode limits */ + /* if not, we refuse service */ + s64 d_spc_timer; /* similar to above; for space */ + int d_ino_warns; /* # warnings issued wrt num inodes */ + int d_spc_warns; /* # warnings issued wrt used space */ + u64 d_rt_spc_hardlimit; /* absolute limit on realtime space */ + u64 d_rt_spc_softlimit; /* preferred limit on RT space */ + u64 d_rt_space; /* realtime space owned */ + s64 d_rt_spc_timer; /* similar to above; for RT space */ + int d_rt_spc_warns; /* # warnings issued wrt RT space */ +}; + +/* Field specifiers for ->set_dqblk() in struct qc_dqblk */ +#define QC_INO_SOFT (1<<0) +#define QC_INO_HARD (1<<1) +#define QC_SPC_SOFT (1<<2) +#define QC_SPC_HARD (1<<3) +#define QC_RT_SPC_SOFT (1<<4) +#define QC_RT_SPC_HARD (1<<5) +#define QC_LIMIT_MASK (QC_INO_SOFT | QC_INO_HARD | QC_SPC_SOFT | QC_SPC_HARD | \ + QC_RT_SPC_SOFT | QC_RT_SPC_HARD) +#define QC_SPC_TIMER (1<<6) +#define QC_INO_TIMER (1<<7) +#define QC_RT_SPC_TIMER (1<<8) +#define QC_TIMER_MASK (QC_SPC_TIMER | QC_INO_TIMER | QC_RT_SPC_TIMER) +#define QC_SPC_WARNS (1<<9) +#define QC_INO_WARNS (1<<10) +#define QC_RT_SPC_WARNS (1<<11) +#define QC_WARNS_MASK (QC_SPC_WARNS | QC_INO_WARNS | QC_RT_SPC_WARNS) +#define QC_SPACE (1<<12) +#define QC_INO_COUNT (1<<13) +#define QC_RT_SPACE (1<<14) +#define QC_ACCT_MASK (QC_SPACE | QC_INO_COUNT | QC_RT_SPACE) + /* Operations handling requests from userspace */ struct quotactl_ops { int (*quota_on)(struct super_block *, int, int, struct path *); @@ -329,8 +372,8 @@ struct quotactl_ops { int (*quota_sync)(struct super_block *, int); int (*get_info)(struct super_block *, int, struct if_dqinfo *); int (*set_info)(struct super_block *, int, struct if_dqinfo *); - int (*get_dqblk)(struct super_block *, struct kqid, struct fs_disk_quota *); - int (*set_dqblk)(struct super_block *, struct kqid, struct fs_disk_quota *); + int (*get_dqblk)(struct super_block *, struct kqid, struct qc_dqblk *); + int (*set_dqblk)(struct super_block *, struct kqid, struct qc_dqblk *); int (*get_xstate)(struct super_block *, struct fs_quota_stat *); int (*set_xstate)(struct super_block *, unsigned int, int); int (*get_xstatev)(struct super_block *, struct fs_quota_statv *); diff --git a/include/linux/quotaops.h b/include/linux/quotaops.h index f23538a6e411..29e3455f7d41 100644 --- a/include/linux/quotaops.h +++ b/include/linux/quotaops.h @@ -98,9 +98,9 @@ int dquot_quota_sync(struct super_block *sb, int type); int dquot_get_dqinfo(struct super_block *sb, int type, struct if_dqinfo *ii); int dquot_set_dqinfo(struct super_block *sb, int type, struct if_dqinfo *ii); int dquot_get_dqblk(struct super_block *sb, struct kqid id, - struct fs_disk_quota *di); + struct qc_dqblk *di); int dquot_set_dqblk(struct super_block *sb, struct kqid id, - struct fs_disk_quota *di); + struct qc_dqblk *di); int __dquot_transfer(struct inode *inode, struct dquot **transfer_to); int dquot_transfer(struct inode *inode, struct iattr *iattr); diff --git a/include/linux/rculist.h b/include/linux/rculist.h index 529bc946f450..a18b16f1dc0e 100644 --- a/include/linux/rculist.h +++ b/include/linux/rculist.h @@ -524,11 +524,11 @@ static inline void hlist_add_behind_rcu(struct hlist_node *n, * @member: the name of the hlist_node within the struct. */ #define hlist_for_each_entry_continue_rcu(pos, member) \ - for (pos = hlist_entry_safe(rcu_dereference((pos)->member.next),\ - typeof(*(pos)), member); \ + for (pos = hlist_entry_safe(rcu_dereference_raw(hlist_next_rcu( \ + &(pos)->member)), typeof(*(pos)), member); \ pos; \ - pos = hlist_entry_safe(rcu_dereference((pos)->member.next),\ - typeof(*(pos)), member)) + pos = hlist_entry_safe(rcu_dereference_raw(hlist_next_rcu( \ + &(pos)->member)), typeof(*(pos)), member)) /** * hlist_for_each_entry_continue_rcu_bh - iterate over a hlist continuing after current point @@ -536,11 +536,11 @@ static inline void hlist_add_behind_rcu(struct hlist_node *n, * @member: the name of the hlist_node within the struct. */ #define hlist_for_each_entry_continue_rcu_bh(pos, member) \ - for (pos = hlist_entry_safe(rcu_dereference_bh((pos)->member.next),\ - typeof(*(pos)), member); \ + for (pos = hlist_entry_safe(rcu_dereference_bh(hlist_next_rcu( \ + &(pos)->member)), typeof(*(pos)), member); \ pos; \ - pos = hlist_entry_safe(rcu_dereference_bh((pos)->member.next),\ - typeof(*(pos)), member)) + pos = hlist_entry_safe(rcu_dereference_bh(hlist_next_rcu( \ + &(pos)->member)), typeof(*(pos)), member)) /** * hlist_for_each_entry_from_rcu - iterate over a hlist continuing from current point diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index ed4f5939a452..78097491cd99 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h @@ -331,12 +331,13 @@ static inline void rcu_init_nohz(void) extern struct srcu_struct tasks_rcu_exit_srcu; #define rcu_note_voluntary_context_switch(t) \ do { \ + rcu_all_qs(); \ if (ACCESS_ONCE((t)->rcu_tasks_holdout)) \ ACCESS_ONCE((t)->rcu_tasks_holdout) = false; \ } while (0) #else /* #ifdef CONFIG_TASKS_RCU */ #define TASKS_RCU(x) do { } while (0) -#define rcu_note_voluntary_context_switch(t) do { } while (0) +#define rcu_note_voluntary_context_switch(t) rcu_all_qs() #endif /* #else #ifdef CONFIG_TASKS_RCU */ /** @@ -582,11 +583,11 @@ static inline void rcu_preempt_sleep_check(void) }) #define __rcu_dereference_check(p, c, space) \ ({ \ - typeof(*p) *_________p1 = (typeof(*p) *__force)ACCESS_ONCE(p); \ + /* Dependency order vs. p above. */ \ + typeof(*p) *________p1 = (typeof(*p) *__force)lockless_dereference(p); \ rcu_lockdep_assert(c, "suspicious rcu_dereference_check() usage"); \ rcu_dereference_sparse(p, space); \ - smp_read_barrier_depends(); /* Dependency order vs. p above. */ \ - ((typeof(*p) __force __kernel *)(_________p1)); \ + ((typeof(*p) __force __kernel *)(________p1)); \ }) #define __rcu_dereference_protected(p, c, space) \ ({ \ @@ -603,10 +604,10 @@ static inline void rcu_preempt_sleep_check(void) }) #define __rcu_dereference_index_check(p, c) \ ({ \ - typeof(p) _________p1 = ACCESS_ONCE(p); \ + /* Dependency order vs. p above. */ \ + typeof(p) _________p1 = lockless_dereference(p); \ rcu_lockdep_assert(c, \ "suspicious rcu_dereference_index_check() usage"); \ - smp_read_barrier_depends(); /* Dependency order vs. p above. */ \ (_________p1); \ }) diff --git a/include/linux/rcutiny.h b/include/linux/rcutiny.h index 0e5366200154..937edaeb150d 100644 --- a/include/linux/rcutiny.h +++ b/include/linux/rcutiny.h @@ -92,17 +92,49 @@ static inline void rcu_virt_note_context_switch(int cpu) } /* - * Return the number of grace periods. + * Return the number of grace periods started. */ -static inline long rcu_batches_completed(void) +static inline unsigned long rcu_batches_started(void) { return 0; } /* - * Return the number of bottom-half grace periods. + * Return the number of bottom-half grace periods started. */ -static inline long rcu_batches_completed_bh(void) +static inline unsigned long rcu_batches_started_bh(void) +{ + return 0; +} + +/* + * Return the number of sched grace periods started. + */ +static inline unsigned long rcu_batches_started_sched(void) +{ + return 0; +} + +/* + * Return the number of grace periods completed. + */ +static inline unsigned long rcu_batches_completed(void) +{ + return 0; +} + +/* + * Return the number of bottom-half grace periods completed. + */ +static inline unsigned long rcu_batches_completed_bh(void) +{ + return 0; +} + +/* + * Return the number of sched grace periods completed. + */ +static inline unsigned long rcu_batches_completed_sched(void) { return 0; } @@ -154,7 +186,10 @@ static inline bool rcu_is_watching(void) return true; } - #endif /* #else defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) */ +static inline void rcu_all_qs(void) +{ +} + #endif /* __LINUX_RCUTINY_H */ diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h index 52953790dcca..d2e583a6aaca 100644 --- a/include/linux/rcutree.h +++ b/include/linux/rcutree.h @@ -81,9 +81,12 @@ void cond_synchronize_rcu(unsigned long oldstate); extern unsigned long rcutorture_testseq; extern unsigned long rcutorture_vernum; -long rcu_batches_completed(void); -long rcu_batches_completed_bh(void); -long rcu_batches_completed_sched(void); +unsigned long rcu_batches_started(void); +unsigned long rcu_batches_started_bh(void); +unsigned long rcu_batches_started_sched(void); +unsigned long rcu_batches_completed(void); +unsigned long rcu_batches_completed_bh(void); +unsigned long rcu_batches_completed_sched(void); void show_rcu_gp_kthreads(void); void rcu_force_quiescent_state(void); @@ -97,4 +100,6 @@ extern int rcu_scheduler_active __read_mostly; bool rcu_is_watching(void); +void rcu_all_qs(void); + #endif /* __LINUX_RCUTREE_H */ diff --git a/include/linux/regmap.h b/include/linux/regmap.h index 4419b99d8d6e..116655d92269 100644 --- a/include/linux/regmap.h +++ b/include/linux/regmap.h @@ -468,7 +468,7 @@ bool regmap_reg_in_ranges(unsigned int reg, * * @reg: Offset of the register within the regmap bank * @lsb: lsb of the register field. - * @reg: msb of the register field. + * @msb: msb of the register field. * @id_size: port size if it has some ports * @id_offset: address offset for each ports */ diff --git a/include/linux/regulator/da9211.h b/include/linux/regulator/da9211.h index 5479394fefce..5dd65acc2a69 100644 --- a/include/linux/regulator/da9211.h +++ b/include/linux/regulator/da9211.h @@ -32,6 +32,8 @@ struct da9211_pdata { * 2 : 2 phase 2 buck */ int num_buck; + int gpio_ren[DA9211_MAX_REGULATORS]; + struct device_node *reg_node[DA9211_MAX_REGULATORS]; struct regulator_init_data *init_data[DA9211_MAX_REGULATORS]; }; #endif diff --git a/include/linux/regulator/driver.h b/include/linux/regulator/driver.h index 5f1e9ca47417..d4ad5b5a02bb 100644 --- a/include/linux/regulator/driver.h +++ b/include/linux/regulator/driver.h @@ -21,6 +21,7 @@ struct regmap; struct regulator_dev; +struct regulator_config; struct regulator_init_data; struct regulator_enable_gpio; @@ -205,6 +206,15 @@ enum regulator_type { * @supply_name: Identifying the regulator supply * @of_match: Name used to identify regulator in DT. * @regulators_node: Name of node containing regulator definitions in DT. + * @of_parse_cb: Optional callback called only if of_match is present. + * Will be called for each regulator parsed from DT, during + * init_data parsing. + * The regulator_config passed as argument to the callback will + * be a copy of config passed to regulator_register, valid only + * for this particular call. Callback may freely change the + * config but it cannot store it for later usage. + * Callback should return 0 on success or negative ERRNO + * indicating failure. * @id: Numerical identifier for the regulator. * @ops: Regulator operations table. * @irq: Interrupt number for the regulator. @@ -251,6 +261,9 @@ struct regulator_desc { const char *supply_name; const char *of_match; const char *regulators_node; + int (*of_parse_cb)(struct device_node *, + const struct regulator_desc *, + struct regulator_config *); int id; bool continuous_voltage_range; unsigned n_voltages; diff --git a/include/linux/regulator/machine.h b/include/linux/regulator/machine.h index 0b08d05d470b..b07562e082c4 100644 --- a/include/linux/regulator/machine.h +++ b/include/linux/regulator/machine.h @@ -191,15 +191,22 @@ struct regulator_init_data { void *driver_data; /* core does not touch this */ }; -int regulator_suspend_prepare(suspend_state_t state); -int regulator_suspend_finish(void); - #ifdef CONFIG_REGULATOR void regulator_has_full_constraints(void); +int regulator_suspend_prepare(suspend_state_t state); +int regulator_suspend_finish(void); #else static inline void regulator_has_full_constraints(void) { } +static inline int regulator_suspend_prepare(suspend_state_t state) +{ + return 0; +} +static inline int regulator_suspend_finish(void) +{ + return 0; +} #endif #endif diff --git a/include/linux/regulator/mt6397-regulator.h b/include/linux/regulator/mt6397-regulator.h new file mode 100644 index 000000000000..30cc5963e265 --- /dev/null +++ b/include/linux/regulator/mt6397-regulator.h @@ -0,0 +1,49 @@ +/* + * Copyright (c) 2014 MediaTek Inc. + * Author: Flora Fu <flora.fu@mediatek.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef __LINUX_REGULATOR_MT6397_H +#define __LINUX_REGULATOR_MT6397_H + +enum { + MT6397_ID_VPCA15 = 0, + MT6397_ID_VPCA7, + MT6397_ID_VSRAMCA15, + MT6397_ID_VSRAMCA7, + MT6397_ID_VCORE, + MT6397_ID_VGPU, + MT6397_ID_VDRM, + MT6397_ID_VIO18 = 7, + MT6397_ID_VTCXO, + MT6397_ID_VA28, + MT6397_ID_VCAMA, + MT6397_ID_VIO28, + MT6397_ID_VUSB, + MT6397_ID_VMC, + MT6397_ID_VMCH, + MT6397_ID_VEMC3V3, + MT6397_ID_VGP1, + MT6397_ID_VGP2, + MT6397_ID_VGP3, + MT6397_ID_VGP4, + MT6397_ID_VGP5, + MT6397_ID_VGP6, + MT6397_ID_VIBR, + MT6397_ID_RG_MAX, +}; + +#define MT6397_MAX_REGULATOR MT6397_ID_RG_MAX +#define MT6397_REGULATOR_ID97 0x97 +#define MT6397_REGULATOR_ID91 0x91 + +#endif /* __LINUX_REGULATOR_MT6397_H */ diff --git a/include/linux/regulator/pfuze100.h b/include/linux/regulator/pfuze100.h index 364f7a7c43db..70c6c66c5bcf 100644 --- a/include/linux/regulator/pfuze100.h +++ b/include/linux/regulator/pfuze100.h @@ -49,6 +49,20 @@ #define PFUZE200_VGEN5 11 #define PFUZE200_VGEN6 12 +#define PFUZE3000_SW1A 0 +#define PFUZE3000_SW1B 1 +#define PFUZE3000_SW2 2 +#define PFUZE3000_SW3 3 +#define PFUZE3000_SWBST 4 +#define PFUZE3000_VSNVS 5 +#define PFUZE3000_VREFDDR 6 +#define PFUZE3000_VLDO1 7 +#define PFUZE3000_VLDO2 8 +#define PFUZE3000_VCCSD 9 +#define PFUZE3000_V33 10 +#define PFUZE3000_VLDO3 11 +#define PFUZE3000_VLDO4 12 + struct regulator_init_data; struct pfuze_regulator_platform_data { diff --git a/include/linux/spi/at86rf230.h b/include/linux/spi/at86rf230.h index b2b1afbb3202..cd519a11c2c6 100644 --- a/include/linux/spi/at86rf230.h +++ b/include/linux/spi/at86rf230.h @@ -12,10 +12,6 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * - * You should have received a copy of the GNU General Public License along - * with this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. - * * Written by: * Dmitry Eremin-Solenikov <dmitry.baryshkov@siemens.com> */ diff --git a/include/linux/spi/l4f00242t03.h b/include/linux/spi/l4f00242t03.h index bc8677c8eba9..e69e9b51b21a 100644 --- a/include/linux/spi/l4f00242t03.h +++ b/include/linux/spi/l4f00242t03.h @@ -12,10 +12,6 @@ * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #ifndef _INCLUDE_LINUX_SPI_L4F00242T03_H_ diff --git a/include/linux/spi/lms283gf05.h b/include/linux/spi/lms283gf05.h index 555d254e6606..fdd1d1d51da5 100644 --- a/include/linux/spi/lms283gf05.h +++ b/include/linux/spi/lms283gf05.h @@ -11,10 +11,6 @@ * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #ifndef _INCLUDE_LINUX_SPI_LMS283GF05_H_ diff --git a/include/linux/spi/mxs-spi.h b/include/linux/spi/mxs-spi.h index 4835486f58e5..381d368b91b4 100644 --- a/include/linux/spi/mxs-spi.h +++ b/include/linux/spi/mxs-spi.h @@ -15,10 +15,6 @@ * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License along - * with this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #ifndef __LINUX_SPI_MXS_SPI_H__ diff --git a/include/linux/spi/pxa2xx_spi.h b/include/linux/spi/pxa2xx_spi.h index d5a316550177..6d36dacec4ba 100644 --- a/include/linux/spi/pxa2xx_spi.h +++ b/include/linux/spi/pxa2xx_spi.h @@ -10,10 +10,6 @@ * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #ifndef __linux_pxa2xx_spi_h #define __linux_pxa2xx_spi_h @@ -57,7 +53,6 @@ struct pxa2xx_spi_chip { #if defined(CONFIG_ARCH_PXA) || defined(CONFIG_ARCH_MMP) #include <linux/clk.h> -#include <mach/dma.h> extern void pxa2xx_set_spi_info(unsigned id, struct pxa2xx_spi_master *info); diff --git a/include/linux/spi/rspi.h b/include/linux/spi/rspi.h index e546b2ceb623..a693188cc08b 100644 --- a/include/linux/spi/rspi.h +++ b/include/linux/spi/rspi.h @@ -11,11 +11,6 @@ * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - * */ #ifndef __LINUX_SPI_RENESAS_SPI_H__ diff --git a/include/linux/spi/sh_hspi.h b/include/linux/spi/sh_hspi.h index a1121f872ac1..aa0d440ab4f0 100644 --- a/include/linux/spi/sh_hspi.h +++ b/include/linux/spi/sh_hspi.h @@ -9,10 +9,6 @@ * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #ifndef SH_HSPI_H #define SH_HSPI_H diff --git a/include/linux/spi/sh_msiof.h b/include/linux/spi/sh_msiof.h index 88a14d81c49e..b087a85f5f72 100644 --- a/include/linux/spi/sh_msiof.h +++ b/include/linux/spi/sh_msiof.h @@ -7,6 +7,8 @@ struct sh_msiof_spi_info { u16 num_chipselect; unsigned int dma_tx_id; unsigned int dma_rx_id; + u32 dtdl; + u32 syncdl; }; #endif /* __SPI_SH_MSIOF_H__ */ diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h index a6ef2a8e6de4..ed9489d893a4 100644 --- a/include/linux/spi/spi.h +++ b/include/linux/spi/spi.h @@ -10,10 +10,6 @@ * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #ifndef __LINUX_SPI_H @@ -260,6 +256,7 @@ static inline void spi_unregister_driver(struct spi_driver *sdrv) * @pump_messages: work struct for scheduling work to the message pump * @queue_lock: spinlock to syncronise access to message queue * @queue: message queue + * @idling: the device is entering idle state * @cur_msg: the currently in-flight message * @cur_msg_prepared: spi_prepare_message was called for the currently * in-flight message @@ -425,6 +422,7 @@ struct spi_master { spinlock_t queue_lock; struct list_head queue; struct spi_message *cur_msg; + bool idling; bool busy; bool running; bool rt; diff --git a/include/linux/spi/tle62x0.h b/include/linux/spi/tle62x0.h index 60b59187e590..414c6fddfcf0 100644 --- a/include/linux/spi/tle62x0.h +++ b/include/linux/spi/tle62x0.h @@ -12,10 +12,6 @@ * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ struct tle62x0_pdata { diff --git a/include/linux/spi/tsc2005.h b/include/linux/spi/tsc2005.h index 8f721e465e05..563b3b1799a8 100644 --- a/include/linux/spi/tsc2005.h +++ b/include/linux/spi/tsc2005.h @@ -12,11 +12,6 @@ * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - * */ #ifndef _LINUX_SPI_TSC2005_H diff --git a/include/linux/srcu.h b/include/linux/srcu.h index a2783cb5d275..9cfd9623fb03 100644 --- a/include/linux/srcu.h +++ b/include/linux/srcu.h @@ -45,7 +45,7 @@ struct rcu_batch { #define RCU_BATCH_INIT(name) { NULL, &(name.head) } struct srcu_struct { - unsigned completed; + unsigned long completed; struct srcu_struct_array __percpu *per_cpu_ref; spinlock_t queue_lock; /* protect ->batch_queue, ->running */ bool running; @@ -102,13 +102,11 @@ void process_srcu(struct work_struct *work); * define and init a srcu struct at build time. * dont't call init_srcu_struct() nor cleanup_srcu_struct() on it. */ -#define DEFINE_SRCU(name) \ +#define __DEFINE_SRCU(name, is_static) \ static DEFINE_PER_CPU(struct srcu_struct_array, name##_srcu_array);\ - struct srcu_struct name = __SRCU_STRUCT_INIT(name); - -#define DEFINE_STATIC_SRCU(name) \ - static DEFINE_PER_CPU(struct srcu_struct_array, name##_srcu_array);\ - static struct srcu_struct name = __SRCU_STRUCT_INIT(name); + is_static struct srcu_struct name = __SRCU_STRUCT_INIT(name) +#define DEFINE_SRCU(name) __DEFINE_SRCU(name, /* not static */) +#define DEFINE_STATIC_SRCU(name) __DEFINE_SRCU(name, static) /** * call_srcu() - Queue a callback for invocation after an SRCU grace period @@ -135,7 +133,7 @@ int __srcu_read_lock(struct srcu_struct *sp) __acquires(sp); void __srcu_read_unlock(struct srcu_struct *sp, int idx) __releases(sp); void synchronize_srcu(struct srcu_struct *sp); void synchronize_srcu_expedited(struct srcu_struct *sp); -long srcu_batches_completed(struct srcu_struct *sp); +unsigned long srcu_batches_completed(struct srcu_struct *sp); void srcu_barrier(struct srcu_struct *sp); #ifdef CONFIG_DEBUG_LOCK_ALLOC diff --git a/include/linux/tracepoint.h b/include/linux/tracepoint.h index e08e21e5f601..c72851328ca9 100644 --- a/include/linux/tracepoint.h +++ b/include/linux/tracepoint.h @@ -173,7 +173,7 @@ extern void syscall_unregfunc(void); TP_PROTO(data_proto), \ TP_ARGS(data_args), \ TP_CONDITION(cond),,); \ - if (IS_ENABLED(CONFIG_LOCKDEP)) { \ + if (IS_ENABLED(CONFIG_LOCKDEP) && (cond)) { \ rcu_read_lock_sched_notrace(); \ rcu_dereference_sched(__tracepoint_##name.funcs);\ rcu_read_unlock_sched_notrace(); \ diff --git a/include/net/flow_keys.h b/include/net/flow_keys.h index 7ee2df083542..dc8fd81412bf 100644 --- a/include/net/flow_keys.h +++ b/include/net/flow_keys.h @@ -22,9 +22,9 @@ struct flow_keys { __be32 ports; __be16 port16[2]; }; - u16 thoff; - u16 n_proto; - u8 ip_proto; + u16 thoff; + __be16 n_proto; + u8 ip_proto; }; bool __skb_flow_dissect(const struct sk_buff *skb, struct flow_keys *flow, diff --git a/include/net/ip.h b/include/net/ip.h index 0bb620702929..09cf5aebb283 100644 --- a/include/net/ip.h +++ b/include/net/ip.h @@ -39,11 +39,12 @@ struct inet_skb_parm { struct ip_options opt; /* Compiled IP options */ unsigned char flags; -#define IPSKB_FORWARDED 1 -#define IPSKB_XFRM_TUNNEL_SIZE 2 -#define IPSKB_XFRM_TRANSFORMED 4 -#define IPSKB_FRAG_COMPLETE 8 -#define IPSKB_REROUTED 16 +#define IPSKB_FORWARDED BIT(0) +#define IPSKB_XFRM_TUNNEL_SIZE BIT(1) +#define IPSKB_XFRM_TRANSFORMED BIT(2) +#define IPSKB_FRAG_COMPLETE BIT(3) +#define IPSKB_REROUTED BIT(4) +#define IPSKB_DOREDIRECT BIT(5) u16 frag_max_size; }; @@ -180,7 +181,7 @@ static inline __u8 ip_reply_arg_flowi_flags(const struct ip_reply_arg *arg) return (arg->flags & IP_REPLY_ARG_NOSRCCHECK) ? FLOWI_FLAG_ANYSRC : 0; } -void ip_send_unicast_reply(struct net *net, struct sk_buff *skb, +void ip_send_unicast_reply(struct sock *sk, struct sk_buff *skb, const struct ip_options *sopt, __be32 daddr, __be32 saddr, const struct ip_reply_arg *arg, diff --git a/include/net/ipv6.h b/include/net/ipv6.h index 4292929392b0..6e416f6d3e3c 100644 --- a/include/net/ipv6.h +++ b/include/net/ipv6.h @@ -671,6 +671,9 @@ static inline int ipv6_addr_diff(const struct in6_addr *a1, const struct in6_add return __ipv6_addr_diff(a1, a2, sizeof(struct in6_addr)); } +u32 __ipv6_select_ident(u32 hashrnd, struct in6_addr *dst, + struct in6_addr *src); +void ipv6_select_ident(struct frag_hdr *fhdr, struct rt6_info *rt); void ipv6_proxy_select_ident(struct sk_buff *skb); int ip6_dst_hoplimit(struct dst_entry *dst); @@ -708,7 +711,7 @@ static inline __be32 ip6_make_flowlabel(struct net *net, struct sk_buff *skb, __be32 flowlabel, bool autolabel) { if (!flowlabel && (autolabel || net->ipv6.sysctl.auto_flowlabels)) { - __be32 hash; + u32 hash; hash = skb_get_hash(skb); @@ -718,7 +721,7 @@ static inline __be32 ip6_make_flowlabel(struct net *net, struct sk_buff *skb, */ hash ^= hash >> 12; - flowlabel = hash & IPV6_FLOWLABEL_MASK; + flowlabel = (__force __be32)hash & IPV6_FLOWLABEL_MASK; } return flowlabel; diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h index 3ae969e3acf0..9eaaa7884586 100644 --- a/include/net/netfilter/nf_tables.h +++ b/include/net/netfilter/nf_tables.h @@ -530,6 +530,8 @@ enum nft_chain_type { int nft_chain_validate_dependency(const struct nft_chain *chain, enum nft_chain_type type); +int nft_chain_validate_hooks(const struct nft_chain *chain, + unsigned int hook_flags); struct nft_stats { u64 bytes; diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h index 24945cefc4fd..0ffef1a38efc 100644 --- a/include/net/netns/ipv4.h +++ b/include/net/netns/ipv4.h @@ -52,6 +52,7 @@ struct netns_ipv4 { struct inet_peer_base *peers; struct tcpm_hash_bucket *tcp_metrics_hash; unsigned int tcp_metrics_hash_log; + struct sock * __percpu *tcp_sk; struct netns_frags frags; #ifdef CONFIG_NETFILTER struct xt_table *iptable_filter; diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h index 3d282cbb66bf..c605d305c577 100644 --- a/include/net/sch_generic.h +++ b/include/net/sch_generic.h @@ -79,6 +79,9 @@ struct Qdisc { struct netdev_queue *dev_queue; struct gnet_stats_rate_est64 rate_est; + struct gnet_stats_basic_cpu __percpu *cpu_bstats; + struct gnet_stats_queue __percpu *cpu_qstats; + struct Qdisc *next_sched; struct sk_buff *gso_skb; /* @@ -86,15 +89,9 @@ struct Qdisc { */ unsigned long state; struct sk_buff_head q; - union { - struct gnet_stats_basic_packed bstats; - struct gnet_stats_basic_cpu __percpu *cpu_bstats; - } __packed; + struct gnet_stats_basic_packed bstats; unsigned int __state; - union { - struct gnet_stats_queue qstats; - struct gnet_stats_queue __percpu *cpu_qstats; - } __packed; + struct gnet_stats_queue qstats; struct rcu_head rcu_head; int padded; atomic_t refcnt; diff --git a/include/net/tcp.h b/include/net/tcp.h index f50f29faf76f..9d9111ef43ae 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -834,8 +834,8 @@ void tcp_get_available_congestion_control(char *buf, size_t len); void tcp_get_allowed_congestion_control(char *buf, size_t len); int tcp_set_allowed_congestion_control(char *allowed); int tcp_set_congestion_control(struct sock *sk, const char *name); -void tcp_slow_start(struct tcp_sock *tp, u32 acked); -void tcp_cong_avoid_ai(struct tcp_sock *tp, u32 w); +u32 tcp_slow_start(struct tcp_sock *tp, u32 acked); +void tcp_cong_avoid_ai(struct tcp_sock *tp, u32 w, u32 acked); u32 tcp_reno_ssthresh(struct sock *sk); void tcp_reno_cong_avoid(struct sock *sk, u32 ack, u32 acked); diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h index 0d74f1de99aa..65994a19e840 100644 --- a/include/rdma/ib_verbs.h +++ b/include/rdma/ib_verbs.h @@ -1707,10 +1707,7 @@ static inline int ib_copy_from_udata(void *dest, struct ib_udata *udata, size_t static inline int ib_copy_to_udata(struct ib_udata *udata, void *src, size_t len) { - size_t copy_sz; - - copy_sz = min_t(size_t, len, udata->outlen); - return copy_to_user(udata->outbuf, src, copy_sz) ? -EFAULT : 0; + return copy_to_user(udata->outbuf, src, len) ? -EFAULT : 0; } /** diff --git a/include/sound/ak4113.h b/include/sound/ak4113.h index 2609048c1d44..3a34f6edc2d1 100644 --- a/include/sound/ak4113.h +++ b/include/sound/ak4113.h @@ -286,7 +286,7 @@ struct ak4113 { ak4113_write_t *write; ak4113_read_t *read; void *private_data; - unsigned int init:1; + atomic_t wq_processing; spinlock_t lock; unsigned char regmap[AK4113_WRITABLE_REGS]; struct snd_kcontrol *kctls[AK4113_CONTROLS]; diff --git a/include/sound/ak4114.h b/include/sound/ak4114.h index 52f02a60dba7..069299a88915 100644 --- a/include/sound/ak4114.h +++ b/include/sound/ak4114.h @@ -168,7 +168,7 @@ struct ak4114 { ak4114_write_t * write; ak4114_read_t * read; void * private_data; - unsigned int init: 1; + atomic_t wq_processing; spinlock_t lock; unsigned char regmap[6]; unsigned char txcsb[5]; diff --git a/include/sound/soc.h b/include/sound/soc.h index b4fca9aed2a2..ac8b333acb4d 100644 --- a/include/sound/soc.h +++ b/include/sound/soc.h @@ -498,6 +498,7 @@ int snd_soc_test_bits(struct snd_soc_codec *codec, unsigned int reg, unsigned int mask, unsigned int value); #ifdef CONFIG_SND_SOC_AC97_BUS +struct snd_ac97 *snd_soc_alloc_ac97_codec(struct snd_soc_codec *codec); struct snd_ac97 *snd_soc_new_ac97_codec(struct snd_soc_codec *codec); void snd_soc_free_ac97_codec(struct snd_ac97 *ac97); diff --git a/include/trace/events/tlb.h b/include/trace/events/tlb.h index 13391d288107..0e7635765153 100644 --- a/include/trace/events/tlb.h +++ b/include/trace/events/tlb.h @@ -13,11 +13,13 @@ { TLB_LOCAL_SHOOTDOWN, "local shootdown" }, \ { TLB_LOCAL_MM_SHOOTDOWN, "local mm shootdown" } -TRACE_EVENT(tlb_flush, +TRACE_EVENT_CONDITION(tlb_flush, TP_PROTO(int reason, unsigned long pages), TP_ARGS(reason, pages), + TP_CONDITION(cpu_online(smp_processor_id())), + TP_STRUCT__entry( __field( int, reason) __field(unsigned long, pages) diff --git a/include/trace/ftrace.h b/include/trace/ftrace.h index 139b5067345b..27609dfcce25 100644 --- a/include/trace/ftrace.h +++ b/include/trace/ftrace.h @@ -763,7 +763,7 @@ perf_trace_##call(void *__data, proto) \ struct ftrace_event_call *event_call = __data; \ struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\ struct ftrace_raw_##call *entry; \ - struct pt_regs __regs; \ + struct pt_regs *__regs; \ u64 __addr = 0, __count = 1; \ struct task_struct *__task = NULL; \ struct hlist_head *head; \ @@ -782,18 +782,19 @@ perf_trace_##call(void *__data, proto) \ sizeof(u64)); \ __entry_size -= sizeof(u32); \ \ - perf_fetch_caller_regs(&__regs); \ entry = perf_trace_buf_prepare(__entry_size, \ event_call->event.type, &__regs, &rctx); \ if (!entry) \ return; \ \ + perf_fetch_caller_regs(__regs); \ + \ tstruct \ \ { assign; } \ \ perf_trace_buf_submit(entry, __entry_size, rctx, __addr, \ - __count, &__regs, head, __task); \ + __count, __regs, head, __task); \ } /* diff --git a/include/uapi/rdma/ib_user_verbs.h b/include/uapi/rdma/ib_user_verbs.h index 4275b961bf60..867cc5084afb 100644 --- a/include/uapi/rdma/ib_user_verbs.h +++ b/include/uapi/rdma/ib_user_verbs.h @@ -90,7 +90,6 @@ enum { }; enum { - IB_USER_VERBS_EX_CMD_QUERY_DEVICE = IB_USER_VERBS_CMD_QUERY_DEVICE, IB_USER_VERBS_EX_CMD_CREATE_FLOW = IB_USER_VERBS_CMD_THRESHOLD, IB_USER_VERBS_EX_CMD_DESTROY_FLOW, }; @@ -202,32 +201,6 @@ struct ib_uverbs_query_device_resp { __u8 reserved[4]; }; -enum { - IB_USER_VERBS_EX_QUERY_DEVICE_ODP = 1ULL << 0, -}; - -struct ib_uverbs_ex_query_device { - __u32 comp_mask; - __u32 reserved; -}; - -struct ib_uverbs_odp_caps { - __u64 general_caps; - struct { - __u32 rc_odp_caps; - __u32 uc_odp_caps; - __u32 ud_odp_caps; - } per_transport_caps; - __u32 reserved; -}; - -struct ib_uverbs_ex_query_device_resp { - struct ib_uverbs_query_device_resp base; - __u32 comp_mask; - __u32 reserved; - struct ib_uverbs_odp_caps odp_caps; -}; - struct ib_uverbs_query_port { __u64 response; __u8 port_num; diff --git a/init/Kconfig b/init/Kconfig index 9afb971497f4..1354ac09b516 100644 --- a/init/Kconfig +++ b/init/Kconfig @@ -470,7 +470,6 @@ choice config TREE_RCU bool "Tree-based hierarchical RCU" depends on !PREEMPT && SMP - select IRQ_WORK help This option selects the RCU implementation that is designed for very large SMP system with hundreds or @@ -480,7 +479,6 @@ config TREE_RCU config PREEMPT_RCU bool "Preemptible tree-based hierarchical RCU" depends on PREEMPT - select IRQ_WORK help This option selects the RCU implementation that is designed for very large SMP systems with hundreds or @@ -501,9 +499,17 @@ config TINY_RCU endchoice +config SRCU + bool + help + This option selects the sleepable version of RCU. This version + permits arbitrary sleeping or blocking within RCU read-side critical + sections. + config TASKS_RCU bool "Task_based RCU implementation using voluntary context switch" default n + select SRCU help This option enables a task-based RCU implementation that uses only voluntary context switch (not preemption!), idle, and @@ -668,9 +674,10 @@ config RCU_BOOST config RCU_KTHREAD_PRIO int "Real-time priority to use for RCU worker threads" - range 1 99 - depends on RCU_BOOST - default 1 + range 1 99 if RCU_BOOST + range 0 99 if !RCU_BOOST + default 1 if RCU_BOOST + default 0 if !RCU_BOOST help This option specifies the SCHED_FIFO priority value that will be assigned to the rcuc/n and rcub/n threads and is also the value @@ -1595,6 +1602,7 @@ config PERF_EVENTS depends on HAVE_PERF_EVENTS select ANON_INODES select IRQ_WORK + select SRCU help Enable kernel support for various performance events provided by software and hardware. diff --git a/kernel/Kconfig.locks b/kernel/Kconfig.locks index 76768ee812b2..08561f1acd13 100644 --- a/kernel/Kconfig.locks +++ b/kernel/Kconfig.locks @@ -231,6 +231,10 @@ config RWSEM_SPIN_ON_OWNER def_bool y depends on SMP && RWSEM_XCHGADD_ALGORITHM && ARCH_SUPPORTS_ATOMIC_RMW +config LOCK_SPIN_ON_OWNER + def_bool y + depends on MUTEX_SPIN_ON_OWNER || RWSEM_SPIN_ON_OWNER + config ARCH_USE_QUEUE_RWLOCK bool diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c index 088ac0b1b106..536edc2be307 100644 --- a/kernel/bpf/syscall.c +++ b/kernel/bpf/syscall.c @@ -150,7 +150,7 @@ static int map_lookup_elem(union bpf_attr *attr) int ufd = attr->map_fd; struct fd f = fdget(ufd); struct bpf_map *map; - void *key, *value; + void *key, *value, *ptr; int err; if (CHECK_ATTR(BPF_MAP_LOOKUP_ELEM)) @@ -169,20 +169,29 @@ static int map_lookup_elem(union bpf_attr *attr) if (copy_from_user(key, ukey, map->key_size) != 0) goto free_key; - err = -ENOENT; - rcu_read_lock(); - value = map->ops->map_lookup_elem(map, key); + err = -ENOMEM; + value = kmalloc(map->value_size, GFP_USER); if (!value) - goto err_unlock; + goto free_key; + + rcu_read_lock(); + ptr = map->ops->map_lookup_elem(map, key); + if (ptr) + memcpy(value, ptr, map->value_size); + rcu_read_unlock(); + + err = -ENOENT; + if (!ptr) + goto free_value; err = -EFAULT; if (copy_to_user(uvalue, value, map->value_size) != 0) - goto err_unlock; + goto free_value; err = 0; -err_unlock: - rcu_read_unlock(); +free_value: + kfree(value); free_key: kfree(key); err_put: diff --git a/kernel/cpu.c b/kernel/cpu.c index 5d220234b3ca..1972b161c61e 100644 --- a/kernel/cpu.c +++ b/kernel/cpu.c @@ -58,22 +58,23 @@ static int cpu_hotplug_disabled; static struct { struct task_struct *active_writer; - struct mutex lock; /* Synchronizes accesses to refcount, */ + /* wait queue to wake up the active_writer */ + wait_queue_head_t wq; + /* verifies that no writer will get active while readers are active */ + struct mutex lock; /* * Also blocks the new readers during * an ongoing cpu hotplug operation. */ - int refcount; - /* And allows lockless put_online_cpus(). */ - atomic_t puts_pending; + atomic_t refcount; #ifdef CONFIG_DEBUG_LOCK_ALLOC struct lockdep_map dep_map; #endif } cpu_hotplug = { .active_writer = NULL, + .wq = __WAIT_QUEUE_HEAD_INITIALIZER(cpu_hotplug.wq), .lock = __MUTEX_INITIALIZER(cpu_hotplug.lock), - .refcount = 0, #ifdef CONFIG_DEBUG_LOCK_ALLOC .dep_map = {.name = "cpu_hotplug.lock" }, #endif @@ -86,15 +87,6 @@ static struct { #define cpuhp_lock_acquire() lock_map_acquire(&cpu_hotplug.dep_map) #define cpuhp_lock_release() lock_map_release(&cpu_hotplug.dep_map) -static void apply_puts_pending(int max) -{ - int delta; - - if (atomic_read(&cpu_hotplug.puts_pending) >= max) { - delta = atomic_xchg(&cpu_hotplug.puts_pending, 0); - cpu_hotplug.refcount -= delta; - } -} void get_online_cpus(void) { @@ -103,8 +95,7 @@ void get_online_cpus(void) return; cpuhp_lock_acquire_read(); mutex_lock(&cpu_hotplug.lock); - apply_puts_pending(65536); - cpu_hotplug.refcount++; + atomic_inc(&cpu_hotplug.refcount); mutex_unlock(&cpu_hotplug.lock); } EXPORT_SYMBOL_GPL(get_online_cpus); @@ -116,8 +107,7 @@ bool try_get_online_cpus(void) if (!mutex_trylock(&cpu_hotplug.lock)) return false; cpuhp_lock_acquire_tryread(); - apply_puts_pending(65536); - cpu_hotplug.refcount++; + atomic_inc(&cpu_hotplug.refcount); mutex_unlock(&cpu_hotplug.lock); return true; } @@ -125,20 +115,18 @@ EXPORT_SYMBOL_GPL(try_get_online_cpus); void put_online_cpus(void) { + int refcount; + if (cpu_hotplug.active_writer == current) return; - if (!mutex_trylock(&cpu_hotplug.lock)) { - atomic_inc(&cpu_hotplug.puts_pending); - cpuhp_lock_release(); - return; - } - if (WARN_ON(!cpu_hotplug.refcount)) - cpu_hotplug.refcount++; /* try to fix things up */ + refcount = atomic_dec_return(&cpu_hotplug.refcount); + if (WARN_ON(refcount < 0)) /* try to fix things up */ + atomic_inc(&cpu_hotplug.refcount); + + if (refcount <= 0 && waitqueue_active(&cpu_hotplug.wq)) + wake_up(&cpu_hotplug.wq); - if (!--cpu_hotplug.refcount && unlikely(cpu_hotplug.active_writer)) - wake_up_process(cpu_hotplug.active_writer); - mutex_unlock(&cpu_hotplug.lock); cpuhp_lock_release(); } @@ -168,18 +156,20 @@ EXPORT_SYMBOL_GPL(put_online_cpus); */ void cpu_hotplug_begin(void) { - cpu_hotplug.active_writer = current; + DEFINE_WAIT(wait); + cpu_hotplug.active_writer = current; cpuhp_lock_acquire(); + for (;;) { mutex_lock(&cpu_hotplug.lock); - apply_puts_pending(1); - if (likely(!cpu_hotplug.refcount)) - break; - __set_current_state(TASK_UNINTERRUPTIBLE); + prepare_to_wait(&cpu_hotplug.wq, &wait, TASK_UNINTERRUPTIBLE); + if (likely(!atomic_read(&cpu_hotplug.refcount))) + break; mutex_unlock(&cpu_hotplug.lock); schedule(); } + finish_wait(&cpu_hotplug.wq, &wait); } void cpu_hotplug_done(void) diff --git a/kernel/events/core.c b/kernel/events/core.c index 882f835a0d85..7f2fbb8b5069 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -872,22 +872,32 @@ void perf_pmu_enable(struct pmu *pmu) pmu->pmu_enable(pmu); } -static DEFINE_PER_CPU(struct list_head, rotation_list); +static DEFINE_PER_CPU(struct list_head, active_ctx_list); /* - * perf_pmu_rotate_start() and perf_rotate_context() are fully serialized - * because they're strictly cpu affine and rotate_start is called with IRQs - * disabled, while rotate_context is called from IRQ context. + * perf_event_ctx_activate(), perf_event_ctx_deactivate(), and + * perf_event_task_tick() are fully serialized because they're strictly cpu + * affine and perf_event_ctx{activate,deactivate} are called with IRQs + * disabled, while perf_event_task_tick is called from IRQ context. */ -static void perf_pmu_rotate_start(struct pmu *pmu) +static void perf_event_ctx_activate(struct perf_event_context *ctx) { - struct perf_cpu_context *cpuctx = this_cpu_ptr(pmu->pmu_cpu_context); - struct list_head *head = this_cpu_ptr(&rotation_list); + struct list_head *head = this_cpu_ptr(&active_ctx_list); WARN_ON(!irqs_disabled()); - if (list_empty(&cpuctx->rotation_list)) - list_add(&cpuctx->rotation_list, head); + WARN_ON(!list_empty(&ctx->active_ctx_list)); + + list_add(&ctx->active_ctx_list, head); +} + +static void perf_event_ctx_deactivate(struct perf_event_context *ctx) +{ + WARN_ON(!irqs_disabled()); + + WARN_ON(list_empty(&ctx->active_ctx_list)); + + list_del_init(&ctx->active_ctx_list); } static void get_ctx(struct perf_event_context *ctx) @@ -907,6 +917,84 @@ static void put_ctx(struct perf_event_context *ctx) } /* + * Because of perf_event::ctx migration in sys_perf_event_open::move_group and + * perf_pmu_migrate_context() we need some magic. + * + * Those places that change perf_event::ctx will hold both + * perf_event_ctx::mutex of the 'old' and 'new' ctx value. + * + * Lock ordering is by mutex address. There is one other site where + * perf_event_context::mutex nests and that is put_event(). But remember that + * that is a parent<->child context relation, and migration does not affect + * children, therefore these two orderings should not interact. + * + * The change in perf_event::ctx does not affect children (as claimed above) + * because the sys_perf_event_open() case will install a new event and break + * the ctx parent<->child relation, and perf_pmu_migrate_context() is only + * concerned with cpuctx and that doesn't have children. + * + * The places that change perf_event::ctx will issue: + * + * perf_remove_from_context(); + * synchronize_rcu(); + * perf_install_in_context(); + * + * to affect the change. The remove_from_context() + synchronize_rcu() should + * quiesce the event, after which we can install it in the new location. This + * means that only external vectors (perf_fops, prctl) can perturb the event + * while in transit. Therefore all such accessors should also acquire + * perf_event_context::mutex to serialize against this. + * + * However; because event->ctx can change while we're waiting to acquire + * ctx->mutex we must be careful and use the below perf_event_ctx_lock() + * function. + * + * Lock order: + * task_struct::perf_event_mutex + * perf_event_context::mutex + * perf_event_context::lock + * perf_event::child_mutex; + * perf_event::mmap_mutex + * mmap_sem + */ +static struct perf_event_context * +perf_event_ctx_lock_nested(struct perf_event *event, int nesting) +{ + struct perf_event_context *ctx; + +again: + rcu_read_lock(); + ctx = ACCESS_ONCE(event->ctx); + if (!atomic_inc_not_zero(&ctx->refcount)) { + rcu_read_unlock(); + goto again; + } + rcu_read_unlock(); + + mutex_lock_nested(&ctx->mutex, nesting); + if (event->ctx != ctx) { + mutex_unlock(&ctx->mutex); + put_ctx(ctx); + goto again; + } + + return ctx; +} + +static inline struct perf_event_context * +perf_event_ctx_lock(struct perf_event *event) +{ + return perf_event_ctx_lock_nested(event, 0); +} + +static void perf_event_ctx_unlock(struct perf_event *event, + struct perf_event_context *ctx) +{ + mutex_unlock(&ctx->mutex); + put_ctx(ctx); +} + +/* * This must be done under the ctx->lock, such as to serialize against * context_equiv(), therefore we cannot call put_ctx() since that might end up * calling scheduler related locks and ctx->lock nests inside those. @@ -1155,8 +1243,6 @@ list_add_event(struct perf_event *event, struct perf_event_context *ctx) ctx->nr_branch_stack++; list_add_rcu(&event->event_entry, &ctx->event_list); - if (!ctx->nr_events) - perf_pmu_rotate_start(ctx->pmu); ctx->nr_events++; if (event->attr.inherit_stat) ctx->nr_stat++; @@ -1275,6 +1361,8 @@ static void perf_group_attach(struct perf_event *event) if (group_leader == event) return; + WARN_ON_ONCE(group_leader->ctx != event->ctx); + if (group_leader->group_flags & PERF_GROUP_SOFTWARE && !is_software_event(event)) group_leader->group_flags &= ~PERF_GROUP_SOFTWARE; @@ -1296,6 +1384,10 @@ static void list_del_event(struct perf_event *event, struct perf_event_context *ctx) { struct perf_cpu_context *cpuctx; + + WARN_ON_ONCE(event->ctx != ctx); + lockdep_assert_held(&ctx->lock); + /* * We can have double detach due to exit/hot-unplug + close. */ @@ -1380,6 +1472,8 @@ static void perf_group_detach(struct perf_event *event) /* Inherit group flags from the previous leader */ sibling->group_flags = event->group_flags; + + WARN_ON_ONCE(sibling->ctx != event->ctx); } out: @@ -1442,6 +1536,10 @@ event_sched_out(struct perf_event *event, { u64 tstamp = perf_event_time(event); u64 delta; + + WARN_ON_ONCE(event->ctx != ctx); + lockdep_assert_held(&ctx->lock); + /* * An event which could not be activated because of * filter mismatch still needs to have its timings @@ -1471,7 +1569,8 @@ event_sched_out(struct perf_event *event, if (!is_software_event(event)) cpuctx->active_oncpu--; - ctx->nr_active--; + if (!--ctx->nr_active) + perf_event_ctx_deactivate(ctx); if (event->attr.freq && event->attr.sample_freq) ctx->nr_freq--; if (event->attr.exclusive || !cpuctx->active_oncpu) @@ -1654,7 +1753,7 @@ int __perf_event_disable(void *info) * is the current context on this CPU and preemption is disabled, * hence we can't get into perf_event_task_sched_out for this context. */ -void perf_event_disable(struct perf_event *event) +static void _perf_event_disable(struct perf_event *event) { struct perf_event_context *ctx = event->ctx; struct task_struct *task = ctx->task; @@ -1695,6 +1794,19 @@ retry: } raw_spin_unlock_irq(&ctx->lock); } + +/* + * Strictly speaking kernel users cannot create groups and therefore this + * interface does not need the perf_event_ctx_lock() magic. + */ +void perf_event_disable(struct perf_event *event) +{ + struct perf_event_context *ctx; + + ctx = perf_event_ctx_lock(event); + _perf_event_disable(event); + perf_event_ctx_unlock(event, ctx); +} EXPORT_SYMBOL_GPL(perf_event_disable); static void perf_set_shadow_time(struct perf_event *event, @@ -1782,7 +1894,8 @@ event_sched_in(struct perf_event *event, if (!is_software_event(event)) cpuctx->active_oncpu++; - ctx->nr_active++; + if (!ctx->nr_active++) + perf_event_ctx_activate(ctx); if (event->attr.freq && event->attr.sample_freq) ctx->nr_freq++; @@ -2158,7 +2271,7 @@ unlock: * perf_event_for_each_child or perf_event_for_each as described * for perf_event_disable. */ -void perf_event_enable(struct perf_event *event) +static void _perf_event_enable(struct perf_event *event) { struct perf_event_context *ctx = event->ctx; struct task_struct *task = ctx->task; @@ -2214,9 +2327,21 @@ retry: out: raw_spin_unlock_irq(&ctx->lock); } + +/* + * See perf_event_disable(); + */ +void perf_event_enable(struct perf_event *event) +{ + struct perf_event_context *ctx; + + ctx = perf_event_ctx_lock(event); + _perf_event_enable(event); + perf_event_ctx_unlock(event, ctx); +} EXPORT_SYMBOL_GPL(perf_event_enable); -int perf_event_refresh(struct perf_event *event, int refresh) +static int _perf_event_refresh(struct perf_event *event, int refresh) { /* * not supported on inherited events @@ -2225,10 +2350,25 @@ int perf_event_refresh(struct perf_event *event, int refresh) return -EINVAL; atomic_add(refresh, &event->event_limit); - perf_event_enable(event); + _perf_event_enable(event); return 0; } + +/* + * See perf_event_disable() + */ +int perf_event_refresh(struct perf_event *event, int refresh) +{ + struct perf_event_context *ctx; + int ret; + + ctx = perf_event_ctx_lock(event); + ret = _perf_event_refresh(event, refresh); + perf_event_ctx_unlock(event, ctx); + + return ret; +} EXPORT_SYMBOL_GPL(perf_event_refresh); static void ctx_sched_out(struct perf_event_context *ctx, @@ -2612,12 +2752,6 @@ static void perf_event_context_sched_in(struct perf_event_context *ctx, perf_pmu_enable(ctx->pmu); perf_ctx_unlock(cpuctx, ctx); - - /* - * Since these rotations are per-cpu, we need to ensure the - * cpu-context we got scheduled on is actually rotating. - */ - perf_pmu_rotate_start(ctx->pmu); } /* @@ -2905,25 +3039,18 @@ static void rotate_ctx(struct perf_event_context *ctx) list_rotate_left(&ctx->flexible_groups); } -/* - * perf_pmu_rotate_start() and perf_rotate_context() are fully serialized - * because they're strictly cpu affine and rotate_start is called with IRQs - * disabled, while rotate_context is called from IRQ context. - */ static int perf_rotate_context(struct perf_cpu_context *cpuctx) { struct perf_event_context *ctx = NULL; - int rotate = 0, remove = 1; + int rotate = 0; if (cpuctx->ctx.nr_events) { - remove = 0; if (cpuctx->ctx.nr_events != cpuctx->ctx.nr_active) rotate = 1; } ctx = cpuctx->task_ctx; if (ctx && ctx->nr_events) { - remove = 0; if (ctx->nr_events != ctx->nr_active) rotate = 1; } @@ -2947,8 +3074,6 @@ static int perf_rotate_context(struct perf_cpu_context *cpuctx) perf_pmu_enable(cpuctx->ctx.pmu); perf_ctx_unlock(cpuctx, cpuctx->task_ctx); done: - if (remove) - list_del_init(&cpuctx->rotation_list); return rotate; } @@ -2966,9 +3091,8 @@ bool perf_event_can_stop_tick(void) void perf_event_task_tick(void) { - struct list_head *head = this_cpu_ptr(&rotation_list); - struct perf_cpu_context *cpuctx, *tmp; - struct perf_event_context *ctx; + struct list_head *head = this_cpu_ptr(&active_ctx_list); + struct perf_event_context *ctx, *tmp; int throttled; WARN_ON(!irqs_disabled()); @@ -2976,14 +3100,8 @@ void perf_event_task_tick(void) __this_cpu_inc(perf_throttled_seq); throttled = __this_cpu_xchg(perf_throttled_count, 0); - list_for_each_entry_safe(cpuctx, tmp, head, rotation_list) { - ctx = &cpuctx->ctx; + list_for_each_entry_safe(ctx, tmp, head, active_ctx_list) perf_adjust_freq_unthr_context(ctx, throttled); - - ctx = cpuctx->task_ctx; - if (ctx) - perf_adjust_freq_unthr_context(ctx, throttled); - } } static int event_enable_on_exec(struct perf_event *event, @@ -3142,6 +3260,7 @@ static void __perf_event_init_context(struct perf_event_context *ctx) { raw_spin_lock_init(&ctx->lock); mutex_init(&ctx->mutex); + INIT_LIST_HEAD(&ctx->active_ctx_list); INIT_LIST_HEAD(&ctx->pinned_groups); INIT_LIST_HEAD(&ctx->flexible_groups); INIT_LIST_HEAD(&ctx->event_list); @@ -3421,7 +3540,16 @@ static void perf_remove_from_owner(struct perf_event *event) rcu_read_unlock(); if (owner) { - mutex_lock(&owner->perf_event_mutex); + /* + * If we're here through perf_event_exit_task() we're already + * holding ctx->mutex which would be an inversion wrt. the + * normal lock order. + * + * However we can safely take this lock because its the child + * ctx->mutex. + */ + mutex_lock_nested(&owner->perf_event_mutex, SINGLE_DEPTH_NESTING); + /* * We have to re-check the event->owner field, if it is cleared * we raced with perf_event_exit_task(), acquiring the mutex @@ -3440,7 +3568,7 @@ static void perf_remove_from_owner(struct perf_event *event) */ static void put_event(struct perf_event *event) { - struct perf_event_context *ctx = event->ctx; + struct perf_event_context *ctx; if (!atomic_long_dec_and_test(&event->refcount)) return; @@ -3448,7 +3576,6 @@ static void put_event(struct perf_event *event) if (!is_kernel_event(event)) perf_remove_from_owner(event); - WARN_ON_ONCE(ctx->parent_ctx); /* * There are two ways this annotation is useful: * @@ -3461,7 +3588,8 @@ static void put_event(struct perf_event *event) * the last filedesc died, so there is no possibility * to trigger the AB-BA case. */ - mutex_lock_nested(&ctx->mutex, SINGLE_DEPTH_NESTING); + ctx = perf_event_ctx_lock_nested(event, SINGLE_DEPTH_NESTING); + WARN_ON_ONCE(ctx->parent_ctx); perf_remove_from_context(event, true); mutex_unlock(&ctx->mutex); @@ -3547,12 +3675,13 @@ static int perf_event_read_group(struct perf_event *event, u64 read_format, char __user *buf) { struct perf_event *leader = event->group_leader, *sub; - int n = 0, size = 0, ret = -EFAULT; struct perf_event_context *ctx = leader->ctx; - u64 values[5]; + int n = 0, size = 0, ret; u64 count, enabled, running; + u64 values[5]; + + lockdep_assert_held(&ctx->mutex); - mutex_lock(&ctx->mutex); count = perf_event_read_value(leader, &enabled, &running); values[n++] = 1 + leader->nr_siblings; @@ -3567,7 +3696,7 @@ static int perf_event_read_group(struct perf_event *event, size = n * sizeof(u64); if (copy_to_user(buf, values, size)) - goto unlock; + return -EFAULT; ret = size; @@ -3581,14 +3710,11 @@ static int perf_event_read_group(struct perf_event *event, size = n * sizeof(u64); if (copy_to_user(buf + ret, values, size)) { - ret = -EFAULT; - goto unlock; + return -EFAULT; } ret += size; } -unlock: - mutex_unlock(&ctx->mutex); return ret; } @@ -3660,8 +3786,14 @@ static ssize_t perf_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) { struct perf_event *event = file->private_data; + struct perf_event_context *ctx; + int ret; + + ctx = perf_event_ctx_lock(event); + ret = perf_read_hw(event, buf, count); + perf_event_ctx_unlock(event, ctx); - return perf_read_hw(event, buf, count); + return ret; } static unsigned int perf_poll(struct file *file, poll_table *wait) @@ -3687,7 +3819,7 @@ static unsigned int perf_poll(struct file *file, poll_table *wait) return events; } -static void perf_event_reset(struct perf_event *event) +static void _perf_event_reset(struct perf_event *event) { (void)perf_event_read(event); local64_set(&event->count, 0); @@ -3706,6 +3838,7 @@ static void perf_event_for_each_child(struct perf_event *event, struct perf_event *child; WARN_ON_ONCE(event->ctx->parent_ctx); + mutex_lock(&event->child_mutex); func(event); list_for_each_entry(child, &event->child_list, child_list) @@ -3719,14 +3852,13 @@ static void perf_event_for_each(struct perf_event *event, struct perf_event_context *ctx = event->ctx; struct perf_event *sibling; - WARN_ON_ONCE(ctx->parent_ctx); - mutex_lock(&ctx->mutex); + lockdep_assert_held(&ctx->mutex); + event = event->group_leader; perf_event_for_each_child(event, func); list_for_each_entry(sibling, &event->sibling_list, group_entry) perf_event_for_each_child(sibling, func); - mutex_unlock(&ctx->mutex); } static int perf_event_period(struct perf_event *event, u64 __user *arg) @@ -3796,25 +3928,24 @@ static int perf_event_set_output(struct perf_event *event, struct perf_event *output_event); static int perf_event_set_filter(struct perf_event *event, void __user *arg); -static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg) +static long _perf_ioctl(struct perf_event *event, unsigned int cmd, unsigned long arg) { - struct perf_event *event = file->private_data; void (*func)(struct perf_event *); u32 flags = arg; switch (cmd) { case PERF_EVENT_IOC_ENABLE: - func = perf_event_enable; + func = _perf_event_enable; break; case PERF_EVENT_IOC_DISABLE: - func = perf_event_disable; + func = _perf_event_disable; break; case PERF_EVENT_IOC_RESET: - func = perf_event_reset; + func = _perf_event_reset; break; case PERF_EVENT_IOC_REFRESH: - return perf_event_refresh(event, arg); + return _perf_event_refresh(event, arg); case PERF_EVENT_IOC_PERIOD: return perf_event_period(event, (u64 __user *)arg); @@ -3861,6 +3992,19 @@ static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg) return 0; } +static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg) +{ + struct perf_event *event = file->private_data; + struct perf_event_context *ctx; + long ret; + + ctx = perf_event_ctx_lock(event); + ret = _perf_ioctl(event, cmd, arg); + perf_event_ctx_unlock(event, ctx); + + return ret; +} + #ifdef CONFIG_COMPAT static long perf_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg) @@ -3883,11 +4027,15 @@ static long perf_compat_ioctl(struct file *file, unsigned int cmd, int perf_event_task_enable(void) { + struct perf_event_context *ctx; struct perf_event *event; mutex_lock(¤t->perf_event_mutex); - list_for_each_entry(event, ¤t->perf_event_list, owner_entry) - perf_event_for_each_child(event, perf_event_enable); + list_for_each_entry(event, ¤t->perf_event_list, owner_entry) { + ctx = perf_event_ctx_lock(event); + perf_event_for_each_child(event, _perf_event_enable); + perf_event_ctx_unlock(event, ctx); + } mutex_unlock(¤t->perf_event_mutex); return 0; @@ -3895,11 +4043,15 @@ int perf_event_task_enable(void) int perf_event_task_disable(void) { + struct perf_event_context *ctx; struct perf_event *event; mutex_lock(¤t->perf_event_mutex); - list_for_each_entry(event, ¤t->perf_event_list, owner_entry) - perf_event_for_each_child(event, perf_event_disable); + list_for_each_entry(event, ¤t->perf_event_list, owner_entry) { + ctx = perf_event_ctx_lock(event); + perf_event_for_each_child(event, _perf_event_disable); + perf_event_ctx_unlock(event, ctx); + } mutex_unlock(¤t->perf_event_mutex); return 0; @@ -5889,6 +6041,8 @@ end: rcu_read_unlock(); } +DEFINE_PER_CPU(struct pt_regs, __perf_regs[4]); + int perf_swevent_get_recursion_context(void) { struct swevent_htable *swhash = this_cpu_ptr(&swevent_htable); @@ -5904,21 +6058,30 @@ inline void perf_swevent_put_recursion_context(int rctx) put_recursion_context(swhash->recursion, rctx); } -void __perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr) +void ___perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr) { struct perf_sample_data data; - int rctx; - preempt_disable_notrace(); - rctx = perf_swevent_get_recursion_context(); - if (rctx < 0) + if (WARN_ON_ONCE(!regs)) return; perf_sample_data_init(&data, addr, 0); - do_perf_sw_event(PERF_TYPE_SOFTWARE, event_id, nr, &data, regs); +} + +void __perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr) +{ + int rctx; + + preempt_disable_notrace(); + rctx = perf_swevent_get_recursion_context(); + if (unlikely(rctx < 0)) + goto fail; + + ___perf_sw_event(event_id, nr, regs, addr); perf_swevent_put_recursion_context(rctx); +fail: preempt_enable_notrace(); } @@ -6776,12 +6939,10 @@ skip_type: __perf_event_init_context(&cpuctx->ctx); lockdep_set_class(&cpuctx->ctx.mutex, &cpuctx_mutex); lockdep_set_class(&cpuctx->ctx.lock, &cpuctx_lock); - cpuctx->ctx.type = cpu_context; cpuctx->ctx.pmu = pmu; __perf_cpu_hrtimer_init(cpuctx, cpu); - INIT_LIST_HEAD(&cpuctx->rotation_list); cpuctx->unique_pmu = pmu; } @@ -6854,6 +7015,20 @@ void perf_pmu_unregister(struct pmu *pmu) } EXPORT_SYMBOL_GPL(perf_pmu_unregister); +static int perf_try_init_event(struct pmu *pmu, struct perf_event *event) +{ + int ret; + + if (!try_module_get(pmu->module)) + return -ENODEV; + event->pmu = pmu; + ret = pmu->event_init(event); + if (ret) + module_put(pmu->module); + + return ret; +} + struct pmu *perf_init_event(struct perf_event *event) { struct pmu *pmu = NULL; @@ -6866,24 +7041,14 @@ struct pmu *perf_init_event(struct perf_event *event) pmu = idr_find(&pmu_idr, event->attr.type); rcu_read_unlock(); if (pmu) { - if (!try_module_get(pmu->module)) { - pmu = ERR_PTR(-ENODEV); - goto unlock; - } - event->pmu = pmu; - ret = pmu->event_init(event); + ret = perf_try_init_event(pmu, event); if (ret) pmu = ERR_PTR(ret); goto unlock; } list_for_each_entry_rcu(pmu, &pmus, entry) { - if (!try_module_get(pmu->module)) { - pmu = ERR_PTR(-ENODEV); - goto unlock; - } - event->pmu = pmu; - ret = pmu->event_init(event); + ret = perf_try_init_event(pmu, event); if (!ret) goto unlock; @@ -7247,6 +7412,15 @@ out: return ret; } +static void mutex_lock_double(struct mutex *a, struct mutex *b) +{ + if (b < a) + swap(a, b); + + mutex_lock(a); + mutex_lock_nested(b, SINGLE_DEPTH_NESTING); +} + /** * sys_perf_event_open - open a performance event, associate it to a task/cpu * @@ -7262,7 +7436,7 @@ SYSCALL_DEFINE5(perf_event_open, struct perf_event *group_leader = NULL, *output_event = NULL; struct perf_event *event, *sibling; struct perf_event_attr attr; - struct perf_event_context *ctx; + struct perf_event_context *ctx, *uninitialized_var(gctx); struct file *event_file = NULL; struct fd group = {NULL, 0}; struct task_struct *task = NULL; @@ -7420,7 +7594,19 @@ SYSCALL_DEFINE5(perf_event_open, * task or CPU context: */ if (move_group) { - if (group_leader->ctx->type != ctx->type) + /* + * Make sure we're both on the same task, or both + * per-cpu events. + */ + if (group_leader->ctx->task != ctx->task) + goto err_context; + + /* + * Make sure we're both events for the same CPU; + * grouping events for different CPUs is broken; since + * you can never concurrently schedule them anyhow. + */ + if (group_leader->cpu != event->cpu) goto err_context; } else { if (group_leader->ctx != ctx) @@ -7448,43 +7634,68 @@ SYSCALL_DEFINE5(perf_event_open, } if (move_group) { - struct perf_event_context *gctx = group_leader->ctx; - - mutex_lock(&gctx->mutex); - perf_remove_from_context(group_leader, false); + gctx = group_leader->ctx; /* - * Removing from the context ends up with disabled - * event. What we want here is event in the initial - * startup state, ready to be add into new context. + * See perf_event_ctx_lock() for comments on the details + * of swizzling perf_event::ctx. */ - perf_event__state_init(group_leader); + mutex_lock_double(&gctx->mutex, &ctx->mutex); + + perf_remove_from_context(group_leader, false); + list_for_each_entry(sibling, &group_leader->sibling_list, group_entry) { perf_remove_from_context(sibling, false); - perf_event__state_init(sibling); put_ctx(gctx); } - mutex_unlock(&gctx->mutex); - put_ctx(gctx); + } else { + mutex_lock(&ctx->mutex); } WARN_ON_ONCE(ctx->parent_ctx); - mutex_lock(&ctx->mutex); if (move_group) { + /* + * Wait for everybody to stop referencing the events through + * the old lists, before installing it on new lists. + */ synchronize_rcu(); - perf_install_in_context(ctx, group_leader, group_leader->cpu); - get_ctx(ctx); + + /* + * Install the group siblings before the group leader. + * + * Because a group leader will try and install the entire group + * (through the sibling list, which is still in-tact), we can + * end up with siblings installed in the wrong context. + * + * By installing siblings first we NO-OP because they're not + * reachable through the group lists. + */ list_for_each_entry(sibling, &group_leader->sibling_list, group_entry) { + perf_event__state_init(sibling); perf_install_in_context(ctx, sibling, sibling->cpu); get_ctx(ctx); } + + /* + * Removing from the context ends up with disabled + * event. What we want here is event in the initial + * startup state, ready to be add into new context. + */ + perf_event__state_init(group_leader); + perf_install_in_context(ctx, group_leader, group_leader->cpu); + get_ctx(ctx); } perf_install_in_context(ctx, event, event->cpu); perf_unpin_context(ctx); + + if (move_group) { + mutex_unlock(&gctx->mutex); + put_ctx(gctx); + } mutex_unlock(&ctx->mutex); put_online_cpus(); @@ -7592,7 +7803,11 @@ void perf_pmu_migrate_context(struct pmu *pmu, int src_cpu, int dst_cpu) src_ctx = &per_cpu_ptr(pmu->pmu_cpu_context, src_cpu)->ctx; dst_ctx = &per_cpu_ptr(pmu->pmu_cpu_context, dst_cpu)->ctx; - mutex_lock(&src_ctx->mutex); + /* + * See perf_event_ctx_lock() for comments on the details + * of swizzling perf_event::ctx. + */ + mutex_lock_double(&src_ctx->mutex, &dst_ctx->mutex); list_for_each_entry_safe(event, tmp, &src_ctx->event_list, event_entry) { perf_remove_from_context(event, false); @@ -7600,11 +7815,36 @@ void perf_pmu_migrate_context(struct pmu *pmu, int src_cpu, int dst_cpu) put_ctx(src_ctx); list_add(&event->migrate_entry, &events); } - mutex_unlock(&src_ctx->mutex); + /* + * Wait for the events to quiesce before re-instating them. + */ synchronize_rcu(); - mutex_lock(&dst_ctx->mutex); + /* + * Re-instate events in 2 passes. + * + * Skip over group leaders and only install siblings on this first + * pass, siblings will not get enabled without a leader, however a + * leader will enable its siblings, even if those are still on the old + * context. + */ + list_for_each_entry_safe(event, tmp, &events, migrate_entry) { + if (event->group_leader == event) + continue; + + list_del(&event->migrate_entry); + if (event->state >= PERF_EVENT_STATE_OFF) + event->state = PERF_EVENT_STATE_INACTIVE; + account_event_cpu(event, dst_cpu); + perf_install_in_context(dst_ctx, event, dst_cpu); + get_ctx(dst_ctx); + } + + /* + * Once all the siblings are setup properly, install the group leaders + * to make it go. + */ list_for_each_entry_safe(event, tmp, &events, migrate_entry) { list_del(&event->migrate_entry); if (event->state >= PERF_EVENT_STATE_OFF) @@ -7614,6 +7854,7 @@ void perf_pmu_migrate_context(struct pmu *pmu, int src_cpu, int dst_cpu) get_ctx(dst_ctx); } mutex_unlock(&dst_ctx->mutex); + mutex_unlock(&src_ctx->mutex); } EXPORT_SYMBOL_GPL(perf_pmu_migrate_context); @@ -7800,14 +8041,19 @@ static void perf_free_event(struct perf_event *event, put_event(parent); + raw_spin_lock_irq(&ctx->lock); perf_group_detach(event); list_del_event(event, ctx); + raw_spin_unlock_irq(&ctx->lock); free_event(event); } /* - * free an unexposed, unused context as created by inheritance by + * Free an unexposed, unused context as created by inheritance by * perf_event_init_task below, used by fork() in case of fail. + * + * Not all locks are strictly required, but take them anyway to be nice and + * help out with the lockdep assertions. */ void perf_event_free_task(struct task_struct *task) { @@ -8126,7 +8372,7 @@ static void __init perf_event_init_all_cpus(void) for_each_possible_cpu(cpu) { swhash = &per_cpu(swevent_htable, cpu); mutex_init(&swhash->hlist_mutex); - INIT_LIST_HEAD(&per_cpu(rotation_list, cpu)); + INIT_LIST_HEAD(&per_cpu(active_ctx_list, cpu)); } } @@ -8147,22 +8393,11 @@ static void perf_event_init_cpu(int cpu) } #if defined CONFIG_HOTPLUG_CPU || defined CONFIG_KEXEC -static void perf_pmu_rotate_stop(struct pmu *pmu) -{ - struct perf_cpu_context *cpuctx = this_cpu_ptr(pmu->pmu_cpu_context); - - WARN_ON(!irqs_disabled()); - - list_del_init(&cpuctx->rotation_list); -} - static void __perf_event_exit_context(void *__info) { struct remove_event re = { .detach_group = true }; struct perf_event_context *ctx = __info; - perf_pmu_rotate_stop(ctx->pmu); - rcu_read_lock(); list_for_each_entry_rcu(re.event, &ctx->event_list, event_entry) __perf_remove_from_context(&re); diff --git a/kernel/events/ring_buffer.c b/kernel/events/ring_buffer.c index 146a5792b1d2..eadb95ce7aac 100644 --- a/kernel/events/ring_buffer.c +++ b/kernel/events/ring_buffer.c @@ -13,12 +13,13 @@ #include <linux/vmalloc.h> #include <linux/slab.h> #include <linux/circ_buf.h> +#include <linux/poll.h> #include "internal.h" static void perf_output_wakeup(struct perf_output_handle *handle) { - atomic_set(&handle->rb->poll, POLL_IN); + atomic_set(&handle->rb->poll, POLLIN); handle->event->pending_wakeup = 1; irq_work_queue(&handle->event->pending); diff --git a/kernel/futex.c b/kernel/futex.c index 63678b573d61..4eeb63de7e54 100644 --- a/kernel/futex.c +++ b/kernel/futex.c @@ -2258,7 +2258,7 @@ static long futex_wait_restart(struct restart_block *restart) * if there are waiters then it will block, it does PI, etc. (Due to * races the kernel might see a 0 value of the futex too.) */ -static int futex_lock_pi(u32 __user *uaddr, unsigned int flags, int detect, +static int futex_lock_pi(u32 __user *uaddr, unsigned int flags, ktime_t *time, int trylock) { struct hrtimer_sleeper timeout, *to = NULL; @@ -2953,11 +2953,11 @@ long do_futex(u32 __user *uaddr, int op, u32 val, ktime_t *timeout, case FUTEX_WAKE_OP: return futex_wake_op(uaddr, flags, uaddr2, val, val2, val3); case FUTEX_LOCK_PI: - return futex_lock_pi(uaddr, flags, val, timeout, 0); + return futex_lock_pi(uaddr, flags, timeout, 0); case FUTEX_UNLOCK_PI: return futex_unlock_pi(uaddr, flags); case FUTEX_TRYLOCK_PI: - return futex_lock_pi(uaddr, flags, 0, timeout, 1); + return futex_lock_pi(uaddr, flags, NULL, 1); case FUTEX_WAIT_REQUEUE_PI: val3 = FUTEX_BITSET_MATCH_ANY; return futex_wait_requeue_pi(uaddr, flags, val, timeout, val3, diff --git a/kernel/locking/Makefile b/kernel/locking/Makefile index 8541bfdfd232..4ca8eb151975 100644 --- a/kernel/locking/Makefile +++ b/kernel/locking/Makefile @@ -1,5 +1,5 @@ -obj-y += mutex.o semaphore.o rwsem.o mcs_spinlock.o +obj-y += mutex.o semaphore.o rwsem.o ifdef CONFIG_FUNCTION_TRACER CFLAGS_REMOVE_lockdep.o = -pg @@ -14,6 +14,7 @@ ifeq ($(CONFIG_PROC_FS),y) obj-$(CONFIG_LOCKDEP) += lockdep_proc.o endif obj-$(CONFIG_SMP) += spinlock.o +obj-$(CONFIG_LOCK_SPIN_ON_OWNER) += osq_lock.o obj-$(CONFIG_SMP) += lglock.o obj-$(CONFIG_PROVE_LOCKING) += spinlock.o obj-$(CONFIG_RT_MUTEXES) += rtmutex.o diff --git a/kernel/locking/mcs_spinlock.h b/kernel/locking/mcs_spinlock.h index 4d60986fcbee..d1fe2ba5bac9 100644 --- a/kernel/locking/mcs_spinlock.h +++ b/kernel/locking/mcs_spinlock.h @@ -108,20 +108,4 @@ void mcs_spin_unlock(struct mcs_spinlock **lock, struct mcs_spinlock *node) arch_mcs_spin_unlock_contended(&next->locked); } -/* - * Cancellable version of the MCS lock above. - * - * Intended for adaptive spinning of sleeping locks: - * mutex_lock()/rwsem_down_{read,write}() etc. - */ - -struct optimistic_spin_node { - struct optimistic_spin_node *next, *prev; - int locked; /* 1 if lock acquired */ - int cpu; /* encoded CPU # value */ -}; - -extern bool osq_lock(struct optimistic_spin_queue *lock); -extern void osq_unlock(struct optimistic_spin_queue *lock); - #endif /* __LINUX_MCS_SPINLOCK_H */ diff --git a/kernel/locking/mutex.c b/kernel/locking/mutex.c index 5b49f652a3cf..94674e5919cb 100644 --- a/kernel/locking/mutex.c +++ b/kernel/locking/mutex.c @@ -147,7 +147,7 @@ static __always_inline void ww_mutex_lock_acquired(struct ww_mutex *ww, } /* - * after acquiring lock with fastpath or when we lost out in contested + * After acquiring lock with fastpath or when we lost out in contested * slowpath, set ctx and wake up any waiters so they can recheck. * * This function is never called when CONFIG_DEBUG_LOCK_ALLOC is set, @@ -191,19 +191,32 @@ ww_mutex_set_context_fastpath(struct ww_mutex *lock, spin_unlock_mutex(&lock->base.wait_lock, flags); } - -#ifdef CONFIG_MUTEX_SPIN_ON_OWNER /* - * In order to avoid a stampede of mutex spinners from acquiring the mutex - * more or less simultaneously, the spinners need to acquire a MCS lock - * first before spinning on the owner field. + * After acquiring lock in the slowpath set ctx and wake up any + * waiters so they can recheck. * + * Callers must hold the mutex wait_lock. */ +static __always_inline void +ww_mutex_set_context_slowpath(struct ww_mutex *lock, + struct ww_acquire_ctx *ctx) +{ + struct mutex_waiter *cur; -/* - * Mutex spinning code migrated from kernel/sched/core.c - */ + ww_mutex_lock_acquired(lock, ctx); + lock->ctx = ctx; + + /* + * Give any possible sleeping processes the chance to wake up, + * so they can recheck if they have to back off. + */ + list_for_each_entry(cur, &lock->base.wait_list, list) { + debug_mutex_wake_waiter(&lock->base, cur); + wake_up_process(cur->task); + } +} +#ifdef CONFIG_MUTEX_SPIN_ON_OWNER static inline bool owner_running(struct mutex *lock, struct task_struct *owner) { if (lock->owner != owner) @@ -307,6 +320,11 @@ static bool mutex_optimistic_spin(struct mutex *lock, if (!mutex_can_spin_on_owner(lock)) goto done; + /* + * In order to avoid a stampede of mutex spinners trying to + * acquire the mutex all at once, the spinners need to take a + * MCS (queued) lock first before spinning on the owner field. + */ if (!osq_lock(&lock->osq)) goto done; @@ -469,7 +487,7 @@ void __sched ww_mutex_unlock(struct ww_mutex *lock) EXPORT_SYMBOL(ww_mutex_unlock); static inline int __sched -__mutex_lock_check_stamp(struct mutex *lock, struct ww_acquire_ctx *ctx) +__ww_mutex_lock_check_stamp(struct mutex *lock, struct ww_acquire_ctx *ctx) { struct ww_mutex *ww = container_of(lock, struct ww_mutex, base); struct ww_acquire_ctx *hold_ctx = ACCESS_ONCE(ww->ctx); @@ -557,7 +575,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass, } if (use_ww_ctx && ww_ctx->acquired > 0) { - ret = __mutex_lock_check_stamp(lock, ww_ctx); + ret = __ww_mutex_lock_check_stamp(lock, ww_ctx); if (ret) goto err; } @@ -569,6 +587,8 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass, schedule_preempt_disabled(); spin_lock_mutex(&lock->wait_lock, flags); } + __set_task_state(task, TASK_RUNNING); + mutex_remove_waiter(lock, &waiter, current_thread_info()); /* set it to 0 if there are no waiters left: */ if (likely(list_empty(&lock->wait_list))) @@ -582,23 +602,7 @@ skip_wait: if (use_ww_ctx) { struct ww_mutex *ww = container_of(lock, struct ww_mutex, base); - struct mutex_waiter *cur; - - /* - * This branch gets optimized out for the common case, - * and is only important for ww_mutex_lock. - */ - ww_mutex_lock_acquired(ww, ww_ctx); - ww->ctx = ww_ctx; - - /* - * Give any possible sleeping processes the chance to wake up, - * so they can recheck if they have to back off. - */ - list_for_each_entry(cur, &lock->wait_list, list) { - debug_mutex_wake_waiter(lock, cur); - wake_up_process(cur->task); - } + ww_mutex_set_context_slowpath(ww, ww_ctx); } spin_unlock_mutex(&lock->wait_lock, flags); diff --git a/kernel/locking/mcs_spinlock.c b/kernel/locking/osq_lock.c index 9887a905a762..c112d00341b0 100644 --- a/kernel/locking/mcs_spinlock.c +++ b/kernel/locking/osq_lock.c @@ -1,8 +1,6 @@ #include <linux/percpu.h> #include <linux/sched.h> -#include "mcs_spinlock.h" - -#ifdef CONFIG_SMP +#include <linux/osq_lock.h> /* * An MCS like lock especially tailored for optimistic spinning for sleeping @@ -111,7 +109,7 @@ bool osq_lock(struct optimistic_spin_queue *lock) * cmpxchg in an attempt to undo our queueing. */ - while (!smp_load_acquire(&node->locked)) { + while (!ACCESS_ONCE(node->locked)) { /* * If we need to reschedule bail... so we can block. */ @@ -203,6 +201,3 @@ void osq_unlock(struct optimistic_spin_queue *lock) if (next) ACCESS_ONCE(next->locked) = 1; } - -#endif - diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c index 7c98873a3077..3059bc2f022d 100644 --- a/kernel/locking/rtmutex.c +++ b/kernel/locking/rtmutex.c @@ -1130,6 +1130,7 @@ __rt_mutex_slowlock(struct rt_mutex *lock, int state, set_current_state(state); } + __set_current_state(TASK_RUNNING); return ret; } @@ -1188,10 +1189,9 @@ rt_mutex_slowlock(struct rt_mutex *lock, int state, ret = task_blocks_on_rt_mutex(lock, &waiter, current, chwalk); if (likely(!ret)) + /* sleep on the mutex */ ret = __rt_mutex_slowlock(lock, state, timeout, &waiter); - set_current_state(TASK_RUNNING); - if (unlikely(ret)) { remove_waiter(lock, &waiter); rt_mutex_handle_deadlock(ret, chwalk, &waiter); @@ -1626,10 +1626,9 @@ int rt_mutex_finish_proxy_lock(struct rt_mutex *lock, set_current_state(TASK_INTERRUPTIBLE); + /* sleep on the mutex */ ret = __rt_mutex_slowlock(lock, TASK_INTERRUPTIBLE, to, waiter); - set_current_state(TASK_RUNNING); - if (unlikely(ret)) remove_waiter(lock, waiter); diff --git a/kernel/locking/rwsem-spinlock.c b/kernel/locking/rwsem-spinlock.c index 2c93571162cb..2555ae15ec14 100644 --- a/kernel/locking/rwsem-spinlock.c +++ b/kernel/locking/rwsem-spinlock.c @@ -154,7 +154,7 @@ void __sched __down_read(struct rw_semaphore *sem) set_task_state(tsk, TASK_UNINTERRUPTIBLE); } - tsk->state = TASK_RUNNING; + __set_task_state(tsk, TASK_RUNNING); out: ; } diff --git a/kernel/locking/rwsem-xadd.c b/kernel/locking/rwsem-xadd.c index 7628c3fc37ca..2f7cc4076f50 100644 --- a/kernel/locking/rwsem-xadd.c +++ b/kernel/locking/rwsem-xadd.c @@ -242,8 +242,7 @@ struct rw_semaphore __sched *rwsem_down_read_failed(struct rw_semaphore *sem) schedule(); } - tsk->state = TASK_RUNNING; - + __set_task_state(tsk, TASK_RUNNING); return sem; } EXPORT_SYMBOL(rwsem_down_read_failed); diff --git a/kernel/notifier.c b/kernel/notifier.c index 4803da6eab62..ae9fc7cc360e 100644 --- a/kernel/notifier.c +++ b/kernel/notifier.c @@ -402,6 +402,7 @@ int raw_notifier_call_chain(struct raw_notifier_head *nh, } EXPORT_SYMBOL_GPL(raw_notifier_call_chain); +#ifdef CONFIG_SRCU /* * SRCU notifier chain routines. Registration and unregistration * use a mutex, and call_chain is synchronized by SRCU (no locks). @@ -528,6 +529,8 @@ void srcu_init_notifier_head(struct srcu_notifier_head *nh) } EXPORT_SYMBOL_GPL(srcu_init_notifier_head); +#endif /* CONFIG_SRCU */ + static ATOMIC_NOTIFIER_HEAD(die_chain); int notrace notify_die(enum die_val val, const char *str, diff --git a/kernel/power/Kconfig b/kernel/power/Kconfig index 48b28d387c7f..7e01f78f0417 100644 --- a/kernel/power/Kconfig +++ b/kernel/power/Kconfig @@ -251,6 +251,7 @@ config APM_EMULATION config PM_OPP bool + select SRCU ---help--- SOCs have a standard set of tuples consisting of frequency and voltage pairs that the device will support per voltage domain. This diff --git a/kernel/rcu/Makefile b/kernel/rcu/Makefile index e6fae503d1bc..50a808424b06 100644 --- a/kernel/rcu/Makefile +++ b/kernel/rcu/Makefile @@ -1,4 +1,5 @@ -obj-y += update.o srcu.o +obj-y += update.o +obj-$(CONFIG_SRCU) += srcu.o obj-$(CONFIG_RCU_TORTURE_TEST) += rcutorture.o obj-$(CONFIG_TREE_RCU) += tree.o obj-$(CONFIG_PREEMPT_RCU) += tree.o diff --git a/kernel/rcu/rcu.h b/kernel/rcu/rcu.h index 07bb02eda844..80adef7d4c3d 100644 --- a/kernel/rcu/rcu.h +++ b/kernel/rcu/rcu.h @@ -137,4 +137,10 @@ int rcu_jiffies_till_stall_check(void); void rcu_early_boot_tests(void); +/* + * This function really isn't for public consumption, but RCU is special in + * that context switches can allow the state machine to make progress. + */ +extern void resched_cpu(int cpu); + #endif /* __LINUX_RCU_H */ diff --git a/kernel/rcu/rcutorture.c b/kernel/rcu/rcutorture.c index 4d559baf06e0..30d42aa55d83 100644 --- a/kernel/rcu/rcutorture.c +++ b/kernel/rcu/rcutorture.c @@ -244,7 +244,8 @@ struct rcu_torture_ops { int (*readlock)(void); void (*read_delay)(struct torture_random_state *rrsp); void (*readunlock)(int idx); - int (*completed)(void); + unsigned long (*started)(void); + unsigned long (*completed)(void); void (*deferred_free)(struct rcu_torture *p); void (*sync)(void); void (*exp_sync)(void); @@ -296,11 +297,6 @@ static void rcu_torture_read_unlock(int idx) __releases(RCU) rcu_read_unlock(); } -static int rcu_torture_completed(void) -{ - return rcu_batches_completed(); -} - /* * Update callback in the pipe. This should be invoked after a grace period. */ @@ -356,7 +352,7 @@ rcu_torture_cb(struct rcu_head *p) cur_ops->deferred_free(rp); } -static int rcu_no_completed(void) +static unsigned long rcu_no_completed(void) { return 0; } @@ -377,7 +373,8 @@ static struct rcu_torture_ops rcu_ops = { .readlock = rcu_torture_read_lock, .read_delay = rcu_read_delay, .readunlock = rcu_torture_read_unlock, - .completed = rcu_torture_completed, + .started = rcu_batches_started, + .completed = rcu_batches_completed, .deferred_free = rcu_torture_deferred_free, .sync = synchronize_rcu, .exp_sync = synchronize_rcu_expedited, @@ -407,11 +404,6 @@ static void rcu_bh_torture_read_unlock(int idx) __releases(RCU_BH) rcu_read_unlock_bh(); } -static int rcu_bh_torture_completed(void) -{ - return rcu_batches_completed_bh(); -} - static void rcu_bh_torture_deferred_free(struct rcu_torture *p) { call_rcu_bh(&p->rtort_rcu, rcu_torture_cb); @@ -423,7 +415,8 @@ static struct rcu_torture_ops rcu_bh_ops = { .readlock = rcu_bh_torture_read_lock, .read_delay = rcu_read_delay, /* just reuse rcu's version. */ .readunlock = rcu_bh_torture_read_unlock, - .completed = rcu_bh_torture_completed, + .started = rcu_batches_started_bh, + .completed = rcu_batches_completed_bh, .deferred_free = rcu_bh_torture_deferred_free, .sync = synchronize_rcu_bh, .exp_sync = synchronize_rcu_bh_expedited, @@ -466,6 +459,7 @@ static struct rcu_torture_ops rcu_busted_ops = { .readlock = rcu_torture_read_lock, .read_delay = rcu_read_delay, /* just reuse rcu's version. */ .readunlock = rcu_torture_read_unlock, + .started = rcu_no_completed, .completed = rcu_no_completed, .deferred_free = rcu_busted_torture_deferred_free, .sync = synchronize_rcu_busted, @@ -510,7 +504,7 @@ static void srcu_torture_read_unlock(int idx) __releases(&srcu_ctl) srcu_read_unlock(&srcu_ctl, idx); } -static int srcu_torture_completed(void) +static unsigned long srcu_torture_completed(void) { return srcu_batches_completed(&srcu_ctl); } @@ -564,6 +558,7 @@ static struct rcu_torture_ops srcu_ops = { .readlock = srcu_torture_read_lock, .read_delay = srcu_read_delay, .readunlock = srcu_torture_read_unlock, + .started = NULL, .completed = srcu_torture_completed, .deferred_free = srcu_torture_deferred_free, .sync = srcu_torture_synchronize, @@ -600,7 +595,8 @@ static struct rcu_torture_ops sched_ops = { .readlock = sched_torture_read_lock, .read_delay = rcu_read_delay, /* just reuse rcu's version. */ .readunlock = sched_torture_read_unlock, - .completed = rcu_no_completed, + .started = rcu_batches_started_sched, + .completed = rcu_batches_completed_sched, .deferred_free = rcu_sched_torture_deferred_free, .sync = synchronize_sched, .exp_sync = synchronize_sched_expedited, @@ -638,6 +634,7 @@ static struct rcu_torture_ops tasks_ops = { .readlock = tasks_torture_read_lock, .read_delay = rcu_read_delay, /* just reuse rcu's version. */ .readunlock = tasks_torture_read_unlock, + .started = rcu_no_completed, .completed = rcu_no_completed, .deferred_free = rcu_tasks_torture_deferred_free, .sync = synchronize_rcu_tasks, @@ -1015,8 +1012,8 @@ static void rcutorture_trace_dump(void) static void rcu_torture_timer(unsigned long unused) { int idx; - int completed; - int completed_end; + unsigned long started; + unsigned long completed; static DEFINE_TORTURE_RANDOM(rand); static DEFINE_SPINLOCK(rand_lock); struct rcu_torture *p; @@ -1024,7 +1021,10 @@ static void rcu_torture_timer(unsigned long unused) unsigned long long ts; idx = cur_ops->readlock(); - completed = cur_ops->completed(); + if (cur_ops->started) + started = cur_ops->started(); + else + started = cur_ops->completed(); ts = rcu_trace_clock_local(); p = rcu_dereference_check(rcu_torture_current, rcu_read_lock_bh_held() || @@ -1047,14 +1047,16 @@ static void rcu_torture_timer(unsigned long unused) /* Should not happen, but... */ pipe_count = RCU_TORTURE_PIPE_LEN; } - completed_end = cur_ops->completed(); + completed = cur_ops->completed(); if (pipe_count > 1) { do_trace_rcu_torture_read(cur_ops->name, &p->rtort_rcu, ts, - completed, completed_end); + started, completed); rcutorture_trace_dump(); } __this_cpu_inc(rcu_torture_count[pipe_count]); - completed = completed_end - completed; + completed = completed - started; + if (cur_ops->started) + completed++; if (completed > RCU_TORTURE_PIPE_LEN) { /* Should not happen, but... */ completed = RCU_TORTURE_PIPE_LEN; @@ -1073,8 +1075,8 @@ static void rcu_torture_timer(unsigned long unused) static int rcu_torture_reader(void *arg) { - int completed; - int completed_end; + unsigned long started; + unsigned long completed; int idx; DEFINE_TORTURE_RANDOM(rand); struct rcu_torture *p; @@ -1093,7 +1095,10 @@ rcu_torture_reader(void *arg) mod_timer(&t, jiffies + 1); } idx = cur_ops->readlock(); - completed = cur_ops->completed(); + if (cur_ops->started) + started = cur_ops->started(); + else + started = cur_ops->completed(); ts = rcu_trace_clock_local(); p = rcu_dereference_check(rcu_torture_current, rcu_read_lock_bh_held() || @@ -1114,14 +1119,16 @@ rcu_torture_reader(void *arg) /* Should not happen, but... */ pipe_count = RCU_TORTURE_PIPE_LEN; } - completed_end = cur_ops->completed(); + completed = cur_ops->completed(); if (pipe_count > 1) { do_trace_rcu_torture_read(cur_ops->name, &p->rtort_rcu, - ts, completed, completed_end); + ts, started, completed); rcutorture_trace_dump(); } __this_cpu_inc(rcu_torture_count[pipe_count]); - completed = completed_end - completed; + completed = completed - started; + if (cur_ops->started) + completed++; if (completed > RCU_TORTURE_PIPE_LEN) { /* Should not happen, but... */ completed = RCU_TORTURE_PIPE_LEN; @@ -1420,6 +1427,9 @@ static int rcu_torture_barrier(void *arg) cur_ops->cb_barrier(); /* Implies smp_mb() for wait_event(). */ if (atomic_read(&barrier_cbs_invoked) != n_barrier_cbs) { n_rcu_torture_barrier_error++; + pr_err("barrier_cbs_invoked = %d, n_barrier_cbs = %d\n", + atomic_read(&barrier_cbs_invoked), + n_barrier_cbs); WARN_ON_ONCE(1); } n_barrier_successes++; diff --git a/kernel/rcu/srcu.c b/kernel/rcu/srcu.c index e037f3eb2f7b..445bf8ffe3fb 100644 --- a/kernel/rcu/srcu.c +++ b/kernel/rcu/srcu.c @@ -546,7 +546,7 @@ EXPORT_SYMBOL_GPL(srcu_barrier); * Report the number of batches, correlated with, but not necessarily * precisely the same as, the number of grace periods that have elapsed. */ -long srcu_batches_completed(struct srcu_struct *sp) +unsigned long srcu_batches_completed(struct srcu_struct *sp) { return sp->completed; } diff --git a/kernel/rcu/tiny.c b/kernel/rcu/tiny.c index 0db5649f8817..cc9ceca7bde1 100644 --- a/kernel/rcu/tiny.c +++ b/kernel/rcu/tiny.c @@ -47,54 +47,14 @@ static void __call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu), struct rcu_ctrlblk *rcp); -static long long rcu_dynticks_nesting = DYNTICK_TASK_EXIT_IDLE; - #include "tiny_plugin.h" -/* Common code for rcu_idle_enter() and rcu_irq_exit(), see kernel/rcu/tree.c. */ -static void rcu_idle_enter_common(long long newval) -{ - if (newval) { - RCU_TRACE(trace_rcu_dyntick(TPS("--="), - rcu_dynticks_nesting, newval)); - rcu_dynticks_nesting = newval; - return; - } - RCU_TRACE(trace_rcu_dyntick(TPS("Start"), - rcu_dynticks_nesting, newval)); - if (IS_ENABLED(CONFIG_RCU_TRACE) && !is_idle_task(current)) { - struct task_struct *idle __maybe_unused = idle_task(smp_processor_id()); - - RCU_TRACE(trace_rcu_dyntick(TPS("Entry error: not idle task"), - rcu_dynticks_nesting, newval)); - ftrace_dump(DUMP_ALL); - WARN_ONCE(1, "Current pid: %d comm: %s / Idle pid: %d comm: %s", - current->pid, current->comm, - idle->pid, idle->comm); /* must be idle task! */ - } - rcu_sched_qs(); /* implies rcu_bh_inc() */ - barrier(); - rcu_dynticks_nesting = newval; -} - /* * Enter idle, which is an extended quiescent state if we have fully - * entered that mode (i.e., if the new value of dynticks_nesting is zero). + * entered that mode. */ void rcu_idle_enter(void) { - unsigned long flags; - long long newval; - - local_irq_save(flags); - WARN_ON_ONCE((rcu_dynticks_nesting & DYNTICK_TASK_NEST_MASK) == 0); - if ((rcu_dynticks_nesting & DYNTICK_TASK_NEST_MASK) == - DYNTICK_TASK_NEST_VALUE) - newval = 0; - else - newval = rcu_dynticks_nesting - DYNTICK_TASK_NEST_VALUE; - rcu_idle_enter_common(newval); - local_irq_restore(flags); } EXPORT_SYMBOL_GPL(rcu_idle_enter); @@ -103,55 +63,14 @@ EXPORT_SYMBOL_GPL(rcu_idle_enter); */ void rcu_irq_exit(void) { - unsigned long flags; - long long newval; - - local_irq_save(flags); - newval = rcu_dynticks_nesting - 1; - WARN_ON_ONCE(newval < 0); - rcu_idle_enter_common(newval); - local_irq_restore(flags); } EXPORT_SYMBOL_GPL(rcu_irq_exit); -/* Common code for rcu_idle_exit() and rcu_irq_enter(), see kernel/rcu/tree.c. */ -static void rcu_idle_exit_common(long long oldval) -{ - if (oldval) { - RCU_TRACE(trace_rcu_dyntick(TPS("++="), - oldval, rcu_dynticks_nesting)); - return; - } - RCU_TRACE(trace_rcu_dyntick(TPS("End"), oldval, rcu_dynticks_nesting)); - if (IS_ENABLED(CONFIG_RCU_TRACE) && !is_idle_task(current)) { - struct task_struct *idle __maybe_unused = idle_task(smp_processor_id()); - - RCU_TRACE(trace_rcu_dyntick(TPS("Exit error: not idle task"), - oldval, rcu_dynticks_nesting)); - ftrace_dump(DUMP_ALL); - WARN_ONCE(1, "Current pid: %d comm: %s / Idle pid: %d comm: %s", - current->pid, current->comm, - idle->pid, idle->comm); /* must be idle task! */ - } -} - /* * Exit idle, so that we are no longer in an extended quiescent state. */ void rcu_idle_exit(void) { - unsigned long flags; - long long oldval; - - local_irq_save(flags); - oldval = rcu_dynticks_nesting; - WARN_ON_ONCE(rcu_dynticks_nesting < 0); - if (rcu_dynticks_nesting & DYNTICK_TASK_NEST_MASK) - rcu_dynticks_nesting += DYNTICK_TASK_NEST_VALUE; - else - rcu_dynticks_nesting = DYNTICK_TASK_EXIT_IDLE; - rcu_idle_exit_common(oldval); - local_irq_restore(flags); } EXPORT_SYMBOL_GPL(rcu_idle_exit); @@ -160,15 +79,6 @@ EXPORT_SYMBOL_GPL(rcu_idle_exit); */ void rcu_irq_enter(void) { - unsigned long flags; - long long oldval; - - local_irq_save(flags); - oldval = rcu_dynticks_nesting; - rcu_dynticks_nesting++; - WARN_ON_ONCE(rcu_dynticks_nesting == 0); - rcu_idle_exit_common(oldval); - local_irq_restore(flags); } EXPORT_SYMBOL_GPL(rcu_irq_enter); @@ -179,23 +89,13 @@ EXPORT_SYMBOL_GPL(rcu_irq_enter); */ bool notrace __rcu_is_watching(void) { - return rcu_dynticks_nesting; + return true; } EXPORT_SYMBOL(__rcu_is_watching); #endif /* defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) */ /* - * Test whether the current CPU was interrupted from idle. Nested - * interrupts don't count, we must be running at the first interrupt - * level. - */ -static int rcu_is_cpu_rrupt_from_idle(void) -{ - return rcu_dynticks_nesting <= 1; -} - -/* * Helper function for rcu_sched_qs() and rcu_bh_qs(). * Also irqs are disabled to avoid confusion due to interrupt handlers * invoking call_rcu(). @@ -250,7 +150,7 @@ void rcu_bh_qs(void) void rcu_check_callbacks(int user) { RCU_TRACE(check_cpu_stalls()); - if (user || rcu_is_cpu_rrupt_from_idle()) + if (user) rcu_sched_qs(); else if (!in_softirq()) rcu_bh_qs(); @@ -357,6 +257,11 @@ static void __call_rcu(struct rcu_head *head, rcp->curtail = &head->next; RCU_TRACE(rcp->qlen++); local_irq_restore(flags); + + if (unlikely(is_idle_task(current))) { + /* force scheduling for rcu_sched_qs() */ + resched_cpu(0); + } } /* @@ -383,6 +288,8 @@ EXPORT_SYMBOL_GPL(call_rcu_bh); void __init rcu_init(void) { open_softirq(RCU_SOFTIRQ, rcu_process_callbacks); + RCU_TRACE(reset_cpu_stall_ticks(&rcu_sched_ctrlblk)); + RCU_TRACE(reset_cpu_stall_ticks(&rcu_bh_ctrlblk)); rcu_early_boot_tests(); } diff --git a/kernel/rcu/tiny_plugin.h b/kernel/rcu/tiny_plugin.h index 858c56569127..f94e209a10d6 100644 --- a/kernel/rcu/tiny_plugin.h +++ b/kernel/rcu/tiny_plugin.h @@ -145,17 +145,16 @@ static void check_cpu_stall(struct rcu_ctrlblk *rcp) rcp->ticks_this_gp++; j = jiffies; js = ACCESS_ONCE(rcp->jiffies_stall); - if (*rcp->curtail && ULONG_CMP_GE(j, js)) { + if (rcp->rcucblist && ULONG_CMP_GE(j, js)) { pr_err("INFO: %s stall on CPU (%lu ticks this GP) idle=%llx (t=%lu jiffies q=%ld)\n", - rcp->name, rcp->ticks_this_gp, rcu_dynticks_nesting, + rcp->name, rcp->ticks_this_gp, DYNTICK_TASK_EXIT_IDLE, jiffies - rcp->gp_start, rcp->qlen); dump_stack(); - } - if (*rcp->curtail && ULONG_CMP_GE(j, js)) ACCESS_ONCE(rcp->jiffies_stall) = jiffies + 3 * rcu_jiffies_till_stall_check() + 3; - else if (ULONG_CMP_GE(j, js)) + } else if (ULONG_CMP_GE(j, js)) { ACCESS_ONCE(rcp->jiffies_stall) = jiffies + rcu_jiffies_till_stall_check(); + } } static void reset_cpu_stall_ticks(struct rcu_ctrlblk *rcp) diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index 7680fc275036..48d640ca1a05 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -156,6 +156,10 @@ static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu) static void invoke_rcu_core(void); static void invoke_rcu_callbacks(struct rcu_state *rsp, struct rcu_data *rdp); +/* rcuc/rcub kthread realtime priority */ +static int kthread_prio = CONFIG_RCU_KTHREAD_PRIO; +module_param(kthread_prio, int, 0644); + /* * Track the rcutorture test sequence number and the update version * number within a given test. The rcutorture_testseq is incremented @@ -215,6 +219,9 @@ static DEFINE_PER_CPU(struct rcu_dynticks, rcu_dynticks) = { #endif /* #ifdef CONFIG_NO_HZ_FULL_SYSIDLE */ }; +DEFINE_PER_CPU_SHARED_ALIGNED(unsigned long, rcu_qs_ctr); +EXPORT_PER_CPU_SYMBOL_GPL(rcu_qs_ctr); + /* * Let the RCU core know that this CPU has gone through the scheduler, * which is a quiescent state. This is called when the need for a @@ -284,6 +291,22 @@ void rcu_note_context_switch(void) } EXPORT_SYMBOL_GPL(rcu_note_context_switch); +/* + * Register a quiesecent state for all RCU flavors. If there is an + * emergency, invoke rcu_momentary_dyntick_idle() to do a heavy-weight + * dyntick-idle quiescent state visible to other CPUs (but only for those + * RCU flavors in desparate need of a quiescent state, which will normally + * be none of them). Either way, do a lightweight quiescent state for + * all RCU flavors. + */ +void rcu_all_qs(void) +{ + if (unlikely(raw_cpu_read(rcu_sched_qs_mask))) + rcu_momentary_dyntick_idle(); + this_cpu_inc(rcu_qs_ctr); +} +EXPORT_SYMBOL_GPL(rcu_all_qs); + static long blimit = 10; /* Maximum callbacks per rcu_do_batch. */ static long qhimark = 10000; /* If this many pending, ignore blimit. */ static long qlowmark = 100; /* Once only this many pending, use blimit. */ @@ -315,18 +338,54 @@ static void force_quiescent_state(struct rcu_state *rsp); static int rcu_pending(void); /* - * Return the number of RCU-sched batches processed thus far for debug & stats. + * Return the number of RCU batches started thus far for debug & stats. + */ +unsigned long rcu_batches_started(void) +{ + return rcu_state_p->gpnum; +} +EXPORT_SYMBOL_GPL(rcu_batches_started); + +/* + * Return the number of RCU-sched batches started thus far for debug & stats. + */ +unsigned long rcu_batches_started_sched(void) +{ + return rcu_sched_state.gpnum; +} +EXPORT_SYMBOL_GPL(rcu_batches_started_sched); + +/* + * Return the number of RCU BH batches started thus far for debug & stats. */ -long rcu_batches_completed_sched(void) +unsigned long rcu_batches_started_bh(void) +{ + return rcu_bh_state.gpnum; +} +EXPORT_SYMBOL_GPL(rcu_batches_started_bh); + +/* + * Return the number of RCU batches completed thus far for debug & stats. + */ +unsigned long rcu_batches_completed(void) +{ + return rcu_state_p->completed; +} +EXPORT_SYMBOL_GPL(rcu_batches_completed); + +/* + * Return the number of RCU-sched batches completed thus far for debug & stats. + */ +unsigned long rcu_batches_completed_sched(void) { return rcu_sched_state.completed; } EXPORT_SYMBOL_GPL(rcu_batches_completed_sched); /* - * Return the number of RCU BH batches processed thus far for debug & stats. + * Return the number of RCU BH batches completed thus far for debug & stats. */ -long rcu_batches_completed_bh(void) +unsigned long rcu_batches_completed_bh(void) { return rcu_bh_state.completed; } @@ -759,39 +818,71 @@ void rcu_irq_enter(void) /** * rcu_nmi_enter - inform RCU of entry to NMI context * - * If the CPU was idle with dynamic ticks active, and there is no - * irq handler running, this updates rdtp->dynticks_nmi to let the - * RCU grace-period handling know that the CPU is active. + * If the CPU was idle from RCU's viewpoint, update rdtp->dynticks and + * rdtp->dynticks_nmi_nesting to let the RCU grace-period handling know + * that the CPU is active. This implementation permits nested NMIs, as + * long as the nesting level does not overflow an int. (You will probably + * run out of stack space first.) */ void rcu_nmi_enter(void) { struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks); + int incby = 2; - if (rdtp->dynticks_nmi_nesting == 0 && - (atomic_read(&rdtp->dynticks) & 0x1)) - return; - rdtp->dynticks_nmi_nesting++; - smp_mb__before_atomic(); /* Force delay from prior write. */ - atomic_inc(&rdtp->dynticks); - /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */ - smp_mb__after_atomic(); /* See above. */ - WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1)); + /* Complain about underflow. */ + WARN_ON_ONCE(rdtp->dynticks_nmi_nesting < 0); + + /* + * If idle from RCU viewpoint, atomically increment ->dynticks + * to mark non-idle and increment ->dynticks_nmi_nesting by one. + * Otherwise, increment ->dynticks_nmi_nesting by two. This means + * if ->dynticks_nmi_nesting is equal to one, we are guaranteed + * to be in the outermost NMI handler that interrupted an RCU-idle + * period (observation due to Andy Lutomirski). + */ + if (!(atomic_read(&rdtp->dynticks) & 0x1)) { + smp_mb__before_atomic(); /* Force delay from prior write. */ + atomic_inc(&rdtp->dynticks); + /* atomic_inc() before later RCU read-side crit sects */ + smp_mb__after_atomic(); /* See above. */ + WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1)); + incby = 1; + } + rdtp->dynticks_nmi_nesting += incby; + barrier(); } /** * rcu_nmi_exit - inform RCU of exit from NMI context * - * If the CPU was idle with dynamic ticks active, and there is no - * irq handler running, this updates rdtp->dynticks_nmi to let the - * RCU grace-period handling know that the CPU is no longer active. + * If we are returning from the outermost NMI handler that interrupted an + * RCU-idle period, update rdtp->dynticks and rdtp->dynticks_nmi_nesting + * to let the RCU grace-period handling know that the CPU is back to + * being RCU-idle. */ void rcu_nmi_exit(void) { struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks); - if (rdtp->dynticks_nmi_nesting == 0 || - --rdtp->dynticks_nmi_nesting != 0) + /* + * Check for ->dynticks_nmi_nesting underflow and bad ->dynticks. + * (We are exiting an NMI handler, so RCU better be paying attention + * to us!) + */ + WARN_ON_ONCE(rdtp->dynticks_nmi_nesting <= 0); + WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1)); + + /* + * If the nesting level is not 1, the CPU wasn't RCU-idle, so + * leave it in non-RCU-idle state. + */ + if (rdtp->dynticks_nmi_nesting != 1) { + rdtp->dynticks_nmi_nesting -= 2; return; + } + + /* This NMI interrupted an RCU-idle CPU, restore RCU-idleness. */ + rdtp->dynticks_nmi_nesting = 0; /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */ smp_mb__before_atomic(); /* See above. */ atomic_inc(&rdtp->dynticks); @@ -898,17 +989,14 @@ static int dyntick_save_progress_counter(struct rcu_data *rdp, trace_rcu_fqs(rdp->rsp->name, rdp->gpnum, rdp->cpu, TPS("dti")); return 1; } else { + if (ULONG_CMP_LT(ACCESS_ONCE(rdp->gpnum) + ULONG_MAX / 4, + rdp->mynode->gpnum)) + ACCESS_ONCE(rdp->gpwrap) = true; return 0; } } /* - * This function really isn't for public consumption, but RCU is special in - * that context switches can allow the state machine to make progress. - */ -extern void resched_cpu(int cpu); - -/* * Return true if the specified CPU has passed through a quiescent * state by virtue of being in or having passed through an dynticks * idle state since the last call to dyntick_save_progress_counter() @@ -1011,6 +1099,22 @@ static void record_gp_stall_check_time(struct rcu_state *rsp) j1 = rcu_jiffies_till_stall_check(); ACCESS_ONCE(rsp->jiffies_stall) = j + j1; rsp->jiffies_resched = j + j1 / 2; + rsp->n_force_qs_gpstart = ACCESS_ONCE(rsp->n_force_qs); +} + +/* + * Complain about starvation of grace-period kthread. + */ +static void rcu_check_gp_kthread_starvation(struct rcu_state *rsp) +{ + unsigned long gpa; + unsigned long j; + + j = jiffies; + gpa = ACCESS_ONCE(rsp->gp_activity); + if (j - gpa > 2 * HZ) + pr_err("%s kthread starved for %ld jiffies!\n", + rsp->name, j - gpa); } /* @@ -1033,11 +1137,13 @@ static void rcu_dump_cpu_stacks(struct rcu_state *rsp) } } -static void print_other_cpu_stall(struct rcu_state *rsp) +static void print_other_cpu_stall(struct rcu_state *rsp, unsigned long gpnum) { int cpu; long delta; unsigned long flags; + unsigned long gpa; + unsigned long j; int ndetected = 0; struct rcu_node *rnp = rcu_get_root(rsp); long totqlen = 0; @@ -1075,30 +1181,34 @@ static void print_other_cpu_stall(struct rcu_state *rsp) raw_spin_unlock_irqrestore(&rnp->lock, flags); } - /* - * Now rat on any tasks that got kicked up to the root rcu_node - * due to CPU offlining. - */ - rnp = rcu_get_root(rsp); - raw_spin_lock_irqsave(&rnp->lock, flags); - ndetected += rcu_print_task_stall(rnp); - raw_spin_unlock_irqrestore(&rnp->lock, flags); - print_cpu_stall_info_end(); for_each_possible_cpu(cpu) totqlen += per_cpu_ptr(rsp->rda, cpu)->qlen; pr_cont("(detected by %d, t=%ld jiffies, g=%ld, c=%ld, q=%lu)\n", smp_processor_id(), (long)(jiffies - rsp->gp_start), (long)rsp->gpnum, (long)rsp->completed, totqlen); - if (ndetected == 0) - pr_err("INFO: Stall ended before state dump start\n"); - else + if (ndetected) { rcu_dump_cpu_stacks(rsp); + } else { + if (ACCESS_ONCE(rsp->gpnum) != gpnum || + ACCESS_ONCE(rsp->completed) == gpnum) { + pr_err("INFO: Stall ended before state dump start\n"); + } else { + j = jiffies; + gpa = ACCESS_ONCE(rsp->gp_activity); + pr_err("All QSes seen, last %s kthread activity %ld (%ld-%ld), jiffies_till_next_fqs=%ld\n", + rsp->name, j - gpa, j, gpa, + jiffies_till_next_fqs); + /* In this case, the current CPU might be at fault. */ + sched_show_task(current); + } + } /* Complain about tasks blocking the grace period. */ - rcu_print_detail_task_stall(rsp); + rcu_check_gp_kthread_starvation(rsp); + force_quiescent_state(rsp); /* Kick them all. */ } @@ -1123,6 +1233,9 @@ static void print_cpu_stall(struct rcu_state *rsp) pr_cont(" (t=%lu jiffies g=%ld c=%ld q=%lu)\n", jiffies - rsp->gp_start, (long)rsp->gpnum, (long)rsp->completed, totqlen); + + rcu_check_gp_kthread_starvation(rsp); + rcu_dump_cpu_stacks(rsp); raw_spin_lock_irqsave(&rnp->lock, flags); @@ -1193,7 +1306,7 @@ static void check_cpu_stall(struct rcu_state *rsp, struct rcu_data *rdp) ULONG_CMP_GE(j, js + RCU_STALL_RAT_DELAY)) { /* They had a few time units to dump stack, so complain. */ - print_other_cpu_stall(rsp); + print_other_cpu_stall(rsp, gpnum); } } @@ -1530,7 +1643,8 @@ static bool __note_gp_changes(struct rcu_state *rsp, struct rcu_node *rnp, bool ret; /* Handle the ends of any preceding grace periods first. */ - if (rdp->completed == rnp->completed) { + if (rdp->completed == rnp->completed && + !unlikely(ACCESS_ONCE(rdp->gpwrap))) { /* No grace period end, so just accelerate recent callbacks. */ ret = rcu_accelerate_cbs(rsp, rnp, rdp); @@ -1545,7 +1659,7 @@ static bool __note_gp_changes(struct rcu_state *rsp, struct rcu_node *rnp, trace_rcu_grace_period(rsp->name, rdp->gpnum, TPS("cpuend")); } - if (rdp->gpnum != rnp->gpnum) { + if (rdp->gpnum != rnp->gpnum || unlikely(ACCESS_ONCE(rdp->gpwrap))) { /* * If the current grace period is waiting for this CPU, * set up to detect a quiescent state, otherwise don't @@ -1554,8 +1668,10 @@ static bool __note_gp_changes(struct rcu_state *rsp, struct rcu_node *rnp, rdp->gpnum = rnp->gpnum; trace_rcu_grace_period(rsp->name, rdp->gpnum, TPS("cpustart")); rdp->passed_quiesce = 0; + rdp->rcu_qs_ctr_snap = __this_cpu_read(rcu_qs_ctr); rdp->qs_pending = !!(rnp->qsmask & rdp->grpmask); zero_cpu_stall_ticks(rdp); + ACCESS_ONCE(rdp->gpwrap) = false; } return ret; } @@ -1569,7 +1685,8 @@ static void note_gp_changes(struct rcu_state *rsp, struct rcu_data *rdp) local_irq_save(flags); rnp = rdp->mynode; if ((rdp->gpnum == ACCESS_ONCE(rnp->gpnum) && - rdp->completed == ACCESS_ONCE(rnp->completed)) || /* w/out lock. */ + rdp->completed == ACCESS_ONCE(rnp->completed) && + !unlikely(ACCESS_ONCE(rdp->gpwrap))) || /* w/out lock. */ !raw_spin_trylock(&rnp->lock)) { /* irqs already off, so later. */ local_irq_restore(flags); return; @@ -1589,6 +1706,7 @@ static int rcu_gp_init(struct rcu_state *rsp) struct rcu_data *rdp; struct rcu_node *rnp = rcu_get_root(rsp); + ACCESS_ONCE(rsp->gp_activity) = jiffies; rcu_bind_gp_kthread(); raw_spin_lock_irq(&rnp->lock); smp_mb__after_unlock_lock(); @@ -1649,6 +1767,7 @@ static int rcu_gp_init(struct rcu_state *rsp) rnp->grphi, rnp->qsmask); raw_spin_unlock_irq(&rnp->lock); cond_resched_rcu_qs(); + ACCESS_ONCE(rsp->gp_activity) = jiffies; } mutex_unlock(&rsp->onoff_mutex); @@ -1665,6 +1784,7 @@ static int rcu_gp_fqs(struct rcu_state *rsp, int fqs_state_in) unsigned long maxj; struct rcu_node *rnp = rcu_get_root(rsp); + ACCESS_ONCE(rsp->gp_activity) = jiffies; rsp->n_force_qs++; if (fqs_state == RCU_SAVE_DYNTICK) { /* Collect dyntick-idle snapshots. */ @@ -1703,6 +1823,7 @@ static void rcu_gp_cleanup(struct rcu_state *rsp) struct rcu_data *rdp; struct rcu_node *rnp = rcu_get_root(rsp); + ACCESS_ONCE(rsp->gp_activity) = jiffies; raw_spin_lock_irq(&rnp->lock); smp_mb__after_unlock_lock(); gp_duration = jiffies - rsp->gp_start; @@ -1739,6 +1860,7 @@ static void rcu_gp_cleanup(struct rcu_state *rsp) nocb += rcu_future_gp_cleanup(rsp, rnp); raw_spin_unlock_irq(&rnp->lock); cond_resched_rcu_qs(); + ACCESS_ONCE(rsp->gp_activity) = jiffies; } rnp = rcu_get_root(rsp); raw_spin_lock_irq(&rnp->lock); @@ -1788,6 +1910,7 @@ static int __noreturn rcu_gp_kthread(void *arg) if (rcu_gp_init(rsp)) break; cond_resched_rcu_qs(); + ACCESS_ONCE(rsp->gp_activity) = jiffies; WARN_ON(signal_pending(current)); trace_rcu_grace_period(rsp->name, ACCESS_ONCE(rsp->gpnum), @@ -1831,9 +1954,11 @@ static int __noreturn rcu_gp_kthread(void *arg) ACCESS_ONCE(rsp->gpnum), TPS("fqsend")); cond_resched_rcu_qs(); + ACCESS_ONCE(rsp->gp_activity) = jiffies; } else { /* Deal with stray signal. */ cond_resched_rcu_qs(); + ACCESS_ONCE(rsp->gp_activity) = jiffies; WARN_ON(signal_pending(current)); trace_rcu_grace_period(rsp->name, ACCESS_ONCE(rsp->gpnum), @@ -2010,8 +2135,10 @@ rcu_report_qs_rdp(int cpu, struct rcu_state *rsp, struct rcu_data *rdp) rnp = rdp->mynode; raw_spin_lock_irqsave(&rnp->lock, flags); smp_mb__after_unlock_lock(); - if (rdp->passed_quiesce == 0 || rdp->gpnum != rnp->gpnum || - rnp->completed == rnp->gpnum) { + if ((rdp->passed_quiesce == 0 && + rdp->rcu_qs_ctr_snap == __this_cpu_read(rcu_qs_ctr)) || + rdp->gpnum != rnp->gpnum || rnp->completed == rnp->gpnum || + rdp->gpwrap) { /* * The grace period in which this quiescent state was @@ -2020,6 +2147,7 @@ rcu_report_qs_rdp(int cpu, struct rcu_state *rsp, struct rcu_data *rdp) * within the current grace period. */ rdp->passed_quiesce = 0; /* need qs for new gp. */ + rdp->rcu_qs_ctr_snap = __this_cpu_read(rcu_qs_ctr); raw_spin_unlock_irqrestore(&rnp->lock, flags); return; } @@ -2064,7 +2192,8 @@ rcu_check_quiescent_state(struct rcu_state *rsp, struct rcu_data *rdp) * Was there a quiescent state since the beginning of the grace * period? If no, then exit and wait for the next call. */ - if (!rdp->passed_quiesce) + if (!rdp->passed_quiesce && + rdp->rcu_qs_ctr_snap == __this_cpu_read(rcu_qs_ctr)) return; /* @@ -2195,6 +2324,46 @@ static void rcu_cleanup_dying_cpu(struct rcu_state *rsp) } /* + * All CPUs for the specified rcu_node structure have gone offline, + * and all tasks that were preempted within an RCU read-side critical + * section while running on one of those CPUs have since exited their RCU + * read-side critical section. Some other CPU is reporting this fact with + * the specified rcu_node structure's ->lock held and interrupts disabled. + * This function therefore goes up the tree of rcu_node structures, + * clearing the corresponding bits in the ->qsmaskinit fields. Note that + * the leaf rcu_node structure's ->qsmaskinit field has already been + * updated + * + * This function does check that the specified rcu_node structure has + * all CPUs offline and no blocked tasks, so it is OK to invoke it + * prematurely. That said, invoking it after the fact will cost you + * a needless lock acquisition. So once it has done its work, don't + * invoke it again. + */ +static void rcu_cleanup_dead_rnp(struct rcu_node *rnp_leaf) +{ + long mask; + struct rcu_node *rnp = rnp_leaf; + + if (rnp->qsmaskinit || rcu_preempt_has_tasks(rnp)) + return; + for (;;) { + mask = rnp->grpmask; + rnp = rnp->parent; + if (!rnp) + break; + raw_spin_lock(&rnp->lock); /* irqs already disabled. */ + smp_mb__after_unlock_lock(); /* GP memory ordering. */ + rnp->qsmaskinit &= ~mask; + if (rnp->qsmaskinit) { + raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */ + return; + } + raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */ + } +} + +/* * The CPU has been completely removed, and some other CPU is reporting * this fact from process context. Do the remainder of the cleanup, * including orphaning the outgoing CPU's RCU callbacks, and also @@ -2204,8 +2373,6 @@ static void rcu_cleanup_dying_cpu(struct rcu_state *rsp) static void rcu_cleanup_dead_cpu(int cpu, struct rcu_state *rsp) { unsigned long flags; - unsigned long mask; - int need_report = 0; struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu); struct rcu_node *rnp = rdp->mynode; /* Outgoing CPU's rdp & rnp. */ @@ -2219,40 +2386,15 @@ static void rcu_cleanup_dead_cpu(int cpu, struct rcu_state *rsp) /* Orphan the dead CPU's callbacks, and adopt them if appropriate. */ rcu_send_cbs_to_orphanage(cpu, rsp, rnp, rdp); rcu_adopt_orphan_cbs(rsp, flags); + raw_spin_unlock_irqrestore(&rsp->orphan_lock, flags); - /* Remove the outgoing CPU from the masks in the rcu_node hierarchy. */ - mask = rdp->grpmask; /* rnp->grplo is constant. */ - do { - raw_spin_lock(&rnp->lock); /* irqs already disabled. */ - smp_mb__after_unlock_lock(); - rnp->qsmaskinit &= ~mask; - if (rnp->qsmaskinit != 0) { - if (rnp != rdp->mynode) - raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */ - break; - } - if (rnp == rdp->mynode) - need_report = rcu_preempt_offline_tasks(rsp, rnp, rdp); - else - raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */ - mask = rnp->grpmask; - rnp = rnp->parent; - } while (rnp != NULL); - - /* - * We still hold the leaf rcu_node structure lock here, and - * irqs are still disabled. The reason for this subterfuge is - * because invoking rcu_report_unblock_qs_rnp() with ->orphan_lock - * held leads to deadlock. - */ - raw_spin_unlock(&rsp->orphan_lock); /* irqs remain disabled. */ - rnp = rdp->mynode; - if (need_report & RCU_OFL_TASKS_NORM_GP) - rcu_report_unblock_qs_rnp(rnp, flags); - else - raw_spin_unlock_irqrestore(&rnp->lock, flags); - if (need_report & RCU_OFL_TASKS_EXP_GP) - rcu_report_exp_rnp(rsp, rnp, true); + /* Remove outgoing CPU from mask in the leaf rcu_node structure. */ + raw_spin_lock_irqsave(&rnp->lock, flags); + smp_mb__after_unlock_lock(); /* Enforce GP memory-order guarantee. */ + rnp->qsmaskinit &= ~rdp->grpmask; + if (rnp->qsmaskinit == 0 && !rcu_preempt_has_tasks(rnp)) + rcu_cleanup_dead_rnp(rnp); + rcu_report_qs_rnp(rdp->grpmask, rsp, rnp, flags); /* Rlses rnp->lock. */ WARN_ONCE(rdp->qlen != 0 || rdp->nxtlist != NULL, "rcu_cleanup_dead_cpu: Callbacks on offline CPU %d: qlen=%lu, nxtlist=%p\n", cpu, rdp->qlen, rdp->nxtlist); @@ -2268,6 +2410,10 @@ static void rcu_cleanup_dying_cpu(struct rcu_state *rsp) { } +static void __maybe_unused rcu_cleanup_dead_rnp(struct rcu_node *rnp_leaf) +{ +} + static void rcu_cleanup_dead_cpu(int cpu, struct rcu_state *rsp) { } @@ -2464,12 +2610,6 @@ static void force_qs_rnp(struct rcu_state *rsp, } raw_spin_unlock_irqrestore(&rnp->lock, flags); } - rnp = rcu_get_root(rsp); - if (rnp->qsmask == 0) { - raw_spin_lock_irqsave(&rnp->lock, flags); - smp_mb__after_unlock_lock(); - rcu_initiate_boost(rnp, flags); /* releases rnp->lock. */ - } } /* @@ -2569,7 +2709,7 @@ static void rcu_process_callbacks(struct softirq_action *unused) * Schedule RCU callback invocation. If the specified type of RCU * does not support RCU priority boosting, just do a direct call, * otherwise wake up the per-CPU kernel kthread. Note that because we - * are running on the current CPU with interrupts disabled, the + * are running on the current CPU with softirqs disabled, the * rcu_cpu_kthread_task cannot disappear out from under us. */ static void invoke_rcu_callbacks(struct rcu_state *rsp, struct rcu_data *rdp) @@ -3109,9 +3249,12 @@ static int __rcu_pending(struct rcu_state *rsp, struct rcu_data *rdp) /* Is the RCU core waiting for a quiescent state from this CPU? */ if (rcu_scheduler_fully_active && - rdp->qs_pending && !rdp->passed_quiesce) { + rdp->qs_pending && !rdp->passed_quiesce && + rdp->rcu_qs_ctr_snap == __this_cpu_read(rcu_qs_ctr)) { rdp->n_rp_qs_pending++; - } else if (rdp->qs_pending && rdp->passed_quiesce) { + } else if (rdp->qs_pending && + (rdp->passed_quiesce || + rdp->rcu_qs_ctr_snap != __this_cpu_read(rcu_qs_ctr))) { rdp->n_rp_report_qs++; return 1; } @@ -3135,7 +3278,8 @@ static int __rcu_pending(struct rcu_state *rsp, struct rcu_data *rdp) } /* Has a new RCU grace period started? */ - if (ACCESS_ONCE(rnp->gpnum) != rdp->gpnum) { /* outside lock */ + if (ACCESS_ONCE(rnp->gpnum) != rdp->gpnum || + unlikely(ACCESS_ONCE(rdp->gpwrap))) { /* outside lock */ rdp->n_rp_gp_started++; return 1; } @@ -3318,6 +3462,7 @@ static void _rcu_barrier(struct rcu_state *rsp) } else { _rcu_barrier_trace(rsp, "OnlineNoCB", cpu, rsp->n_barrier_done); + smp_mb__before_atomic(); atomic_inc(&rsp->barrier_cpu_count); __call_rcu(&rdp->barrier_head, rcu_barrier_callback, rsp, cpu, 0); @@ -3385,9 +3530,6 @@ rcu_boot_init_percpu_data(int cpu, struct rcu_state *rsp) /* Set up local state, ensuring consistent view of global state. */ raw_spin_lock_irqsave(&rnp->lock, flags); rdp->grpmask = 1UL << (cpu - rdp->mynode->grplo); - init_callback_list(rdp); - rdp->qlen_lazy = 0; - ACCESS_ONCE(rdp->qlen) = 0; rdp->dynticks = &per_cpu(rcu_dynticks, cpu); WARN_ON_ONCE(rdp->dynticks->dynticks_nesting != DYNTICK_TASK_EXIT_IDLE); WARN_ON_ONCE(atomic_read(&rdp->dynticks->dynticks) != 1); @@ -3444,6 +3586,7 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp) rdp->gpnum = rnp->completed; rdp->completed = rnp->completed; rdp->passed_quiesce = 0; + rdp->rcu_qs_ctr_snap = __this_cpu_read(rcu_qs_ctr); rdp->qs_pending = 0; trace_rcu_grace_period(rsp->name, rdp->gpnum, TPS("cpuonl")); } @@ -3535,17 +3678,35 @@ static int rcu_pm_notify(struct notifier_block *self, static int __init rcu_spawn_gp_kthread(void) { unsigned long flags; + int kthread_prio_in = kthread_prio; struct rcu_node *rnp; struct rcu_state *rsp; + struct sched_param sp; struct task_struct *t; + /* Force priority into range. */ + if (IS_ENABLED(CONFIG_RCU_BOOST) && kthread_prio < 1) + kthread_prio = 1; + else if (kthread_prio < 0) + kthread_prio = 0; + else if (kthread_prio > 99) + kthread_prio = 99; + if (kthread_prio != kthread_prio_in) + pr_alert("rcu_spawn_gp_kthread(): Limited prio to %d from %d\n", + kthread_prio, kthread_prio_in); + rcu_scheduler_fully_active = 1; for_each_rcu_flavor(rsp) { - t = kthread_run(rcu_gp_kthread, rsp, "%s", rsp->name); + t = kthread_create(rcu_gp_kthread, rsp, "%s", rsp->name); BUG_ON(IS_ERR(t)); rnp = rcu_get_root(rsp); raw_spin_lock_irqsave(&rnp->lock, flags); rsp->gp_kthread = t; + if (kthread_prio) { + sp.sched_priority = kthread_prio; + sched_setscheduler_nocheck(t, SCHED_FIFO, &sp); + } + wake_up_process(t); raw_spin_unlock_irqrestore(&rnp->lock, flags); } rcu_spawn_nocb_kthreads(); diff --git a/kernel/rcu/tree.h b/kernel/rcu/tree.h index 8e7b1843896e..119de399eb2f 100644 --- a/kernel/rcu/tree.h +++ b/kernel/rcu/tree.h @@ -27,7 +27,6 @@ #include <linux/threads.h> #include <linux/cpumask.h> #include <linux/seqlock.h> -#include <linux/irq_work.h> /* * Define shape of hierarchy based on NR_CPUS, CONFIG_RCU_FANOUT, and @@ -172,11 +171,6 @@ struct rcu_node { /* queued on this rcu_node structure that */ /* are blocking the current grace period, */ /* there can be no such task. */ - struct completion boost_completion; - /* Used to ensure that the rt_mutex used */ - /* to carry out the boosting is fully */ - /* released with no future boostee accesses */ - /* before that rt_mutex is re-initialized. */ struct rt_mutex boost_mtx; /* Used only for the priority-boosting */ /* side effect, not as a lock. */ @@ -257,9 +251,12 @@ struct rcu_data { /* in order to detect GP end. */ unsigned long gpnum; /* Highest gp number that this CPU */ /* is aware of having started. */ + unsigned long rcu_qs_ctr_snap;/* Snapshot of rcu_qs_ctr to check */ + /* for rcu_all_qs() invocations. */ bool passed_quiesce; /* User-mode/idle loop etc. */ bool qs_pending; /* Core waits for quiesc state. */ bool beenonline; /* CPU online at least once. */ + bool gpwrap; /* Possible gpnum/completed wrap. */ struct rcu_node *mynode; /* This CPU's leaf of hierarchy */ unsigned long grpmask; /* Mask to apply to leaf qsmask. */ #ifdef CONFIG_RCU_CPU_STALL_INFO @@ -340,14 +337,10 @@ struct rcu_data { #ifdef CONFIG_RCU_NOCB_CPU struct rcu_head *nocb_head; /* CBs waiting for kthread. */ struct rcu_head **nocb_tail; - atomic_long_t nocb_q_count; /* # CBs waiting for kthread */ - atomic_long_t nocb_q_count_lazy; /* (approximate). */ + atomic_long_t nocb_q_count; /* # CBs waiting for nocb */ + atomic_long_t nocb_q_count_lazy; /* invocation (all stages). */ struct rcu_head *nocb_follower_head; /* CBs ready to invoke. */ struct rcu_head **nocb_follower_tail; - atomic_long_t nocb_follower_count; /* # CBs ready to invoke. */ - atomic_long_t nocb_follower_count_lazy; /* (approximate). */ - int nocb_p_count; /* # CBs being invoked by kthread */ - int nocb_p_count_lazy; /* (approximate). */ wait_queue_head_t nocb_wq; /* For nocb kthreads to sleep on. */ struct task_struct *nocb_kthread; int nocb_defer_wakeup; /* Defer wakeup of nocb_kthread. */ @@ -356,8 +349,6 @@ struct rcu_data { struct rcu_head *nocb_gp_head ____cacheline_internodealigned_in_smp; /* CBs waiting for GP. */ struct rcu_head **nocb_gp_tail; - long nocb_gp_count; - long nocb_gp_count_lazy; bool nocb_leader_sleep; /* Is the nocb leader thread asleep? */ struct rcu_data *nocb_next_follower; /* Next follower in wakeup chain. */ @@ -488,10 +479,14 @@ struct rcu_state { /* due to no GP active. */ unsigned long gp_start; /* Time at which GP started, */ /* but in jiffies. */ + unsigned long gp_activity; /* Time of last GP kthread */ + /* activity in jiffies. */ unsigned long jiffies_stall; /* Time at which to check */ /* for CPU stalls. */ unsigned long jiffies_resched; /* Time at which to resched */ /* a reluctant CPU. */ + unsigned long n_force_qs_gpstart; /* Snapshot of n_force_qs at */ + /* GP start. */ unsigned long gp_max; /* Maximum GP duration in */ /* jiffies. */ const char *name; /* Name of structure. */ @@ -514,13 +509,6 @@ extern struct list_head rcu_struct_flavors; #define for_each_rcu_flavor(rsp) \ list_for_each_entry((rsp), &rcu_struct_flavors, flavors) -/* Return values for rcu_preempt_offline_tasks(). */ - -#define RCU_OFL_TASKS_NORM_GP 0x1 /* Tasks blocking normal */ - /* GP were moved to root. */ -#define RCU_OFL_TASKS_EXP_GP 0x2 /* Tasks blocking expedited */ - /* GP were moved to root. */ - /* * RCU implementation internal declarations: */ @@ -546,27 +534,16 @@ DECLARE_PER_CPU(char, rcu_cpu_has_work); /* Forward declarations for rcutree_plugin.h */ static void rcu_bootup_announce(void); -long rcu_batches_completed(void); static void rcu_preempt_note_context_switch(void); static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp); #ifdef CONFIG_HOTPLUG_CPU -static void rcu_report_unblock_qs_rnp(struct rcu_node *rnp, - unsigned long flags); +static bool rcu_preempt_has_tasks(struct rcu_node *rnp); #endif /* #ifdef CONFIG_HOTPLUG_CPU */ static void rcu_print_detail_task_stall(struct rcu_state *rsp); static int rcu_print_task_stall(struct rcu_node *rnp); static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp); -#ifdef CONFIG_HOTPLUG_CPU -static int rcu_preempt_offline_tasks(struct rcu_state *rsp, - struct rcu_node *rnp, - struct rcu_data *rdp); -#endif /* #ifdef CONFIG_HOTPLUG_CPU */ static void rcu_preempt_check_callbacks(void); void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu)); -#if defined(CONFIG_HOTPLUG_CPU) || defined(CONFIG_PREEMPT_RCU) -static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp, - bool wake); -#endif /* #if defined(CONFIG_HOTPLUG_CPU) || defined(CONFIG_PREEMPT_RCU) */ static void __init __rcu_init_preempt(void); static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags); static void rcu_preempt_boost_start_gp(struct rcu_node *rnp); @@ -622,24 +599,15 @@ static void rcu_dynticks_task_exit(void); #endif /* #ifndef RCU_TREE_NONCORE */ #ifdef CONFIG_RCU_TRACE -#ifdef CONFIG_RCU_NOCB_CPU -/* Sum up queue lengths for tracing. */ +/* Read out queue lengths for tracing. */ static inline void rcu_nocb_q_lengths(struct rcu_data *rdp, long *ql, long *qll) { - *ql = atomic_long_read(&rdp->nocb_q_count) + - rdp->nocb_p_count + - atomic_long_read(&rdp->nocb_follower_count) + - rdp->nocb_p_count + rdp->nocb_gp_count; - *qll = atomic_long_read(&rdp->nocb_q_count_lazy) + - rdp->nocb_p_count_lazy + - atomic_long_read(&rdp->nocb_follower_count_lazy) + - rdp->nocb_p_count_lazy + rdp->nocb_gp_count_lazy; -} +#ifdef CONFIG_RCU_NOCB_CPU + *ql = atomic_long_read(&rdp->nocb_q_count); + *qll = atomic_long_read(&rdp->nocb_q_count_lazy); #else /* #ifdef CONFIG_RCU_NOCB_CPU */ -static inline void rcu_nocb_q_lengths(struct rcu_data *rdp, long *ql, long *qll) -{ *ql = 0; *qll = 0; -} #endif /* #else #ifdef CONFIG_RCU_NOCB_CPU */ +} #endif /* #ifdef CONFIG_RCU_TRACE */ diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h index 3ec85cb5d544..2e850a51bb8f 100644 --- a/kernel/rcu/tree_plugin.h +++ b/kernel/rcu/tree_plugin.h @@ -34,10 +34,6 @@ #include "../locking/rtmutex_common.h" -/* rcuc/rcub kthread realtime priority */ -static int kthread_prio = CONFIG_RCU_KTHREAD_PRIO; -module_param(kthread_prio, int, 0644); - /* * Control variables for per-CPU and per-rcu_node kthreads. These * handle all flavors of RCU. @@ -103,6 +99,8 @@ RCU_STATE_INITIALIZER(rcu_preempt, 'p', call_rcu); static struct rcu_state *rcu_state_p = &rcu_preempt_state; static int rcu_preempted_readers_exp(struct rcu_node *rnp); +static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp, + bool wake); /* * Tell them what RCU they are running. @@ -114,25 +112,6 @@ static void __init rcu_bootup_announce(void) } /* - * Return the number of RCU-preempt batches processed thus far - * for debug and statistics. - */ -static long rcu_batches_completed_preempt(void) -{ - return rcu_preempt_state.completed; -} -EXPORT_SYMBOL_GPL(rcu_batches_completed_preempt); - -/* - * Return the number of RCU batches processed thus far for debug & stats. - */ -long rcu_batches_completed(void) -{ - return rcu_batches_completed_preempt(); -} -EXPORT_SYMBOL_GPL(rcu_batches_completed); - -/* * Record a preemptible-RCU quiescent state for the specified CPU. Note * that this just means that the task currently running on the CPU is * not in a quiescent state. There might be any number of tasks blocked @@ -307,15 +286,25 @@ static struct list_head *rcu_next_node_entry(struct task_struct *t, } /* + * Return true if the specified rcu_node structure has tasks that were + * preempted within an RCU read-side critical section. + */ +static bool rcu_preempt_has_tasks(struct rcu_node *rnp) +{ + return !list_empty(&rnp->blkd_tasks); +} + +/* * Handle special cases during rcu_read_unlock(), such as needing to * notify RCU core processing or task having blocked during the RCU * read-side critical section. */ void rcu_read_unlock_special(struct task_struct *t) { - int empty; - int empty_exp; - int empty_exp_now; + bool empty; + bool empty_exp; + bool empty_norm; + bool empty_exp_now; unsigned long flags; struct list_head *np; #ifdef CONFIG_RCU_BOOST @@ -367,7 +356,8 @@ void rcu_read_unlock_special(struct task_struct *t) break; raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */ } - empty = !rcu_preempt_blocked_readers_cgp(rnp); + empty = !rcu_preempt_has_tasks(rnp); + empty_norm = !rcu_preempt_blocked_readers_cgp(rnp); empty_exp = !rcu_preempted_readers_exp(rnp); smp_mb(); /* ensure expedited fastpath sees end of RCU c-s. */ np = rcu_next_node_entry(t, rnp); @@ -387,13 +377,21 @@ void rcu_read_unlock_special(struct task_struct *t) #endif /* #ifdef CONFIG_RCU_BOOST */ /* + * If this was the last task on the list, go see if we + * need to propagate ->qsmaskinit bit clearing up the + * rcu_node tree. + */ + if (!empty && !rcu_preempt_has_tasks(rnp)) + rcu_cleanup_dead_rnp(rnp); + + /* * If this was the last task on the current list, and if * we aren't waiting on any CPUs, report the quiescent state. * Note that rcu_report_unblock_qs_rnp() releases rnp->lock, * so we must take a snapshot of the expedited state. */ empty_exp_now = !rcu_preempted_readers_exp(rnp); - if (!empty && !rcu_preempt_blocked_readers_cgp(rnp)) { + if (!empty_norm && !rcu_preempt_blocked_readers_cgp(rnp)) { trace_rcu_quiescent_state_report(TPS("preempt_rcu"), rnp->gpnum, 0, rnp->qsmask, @@ -408,10 +406,8 @@ void rcu_read_unlock_special(struct task_struct *t) #ifdef CONFIG_RCU_BOOST /* Unboost if we were boosted. */ - if (drop_boost_mutex) { + if (drop_boost_mutex) rt_mutex_unlock(&rnp->boost_mtx); - complete(&rnp->boost_completion); - } #endif /* #ifdef CONFIG_RCU_BOOST */ /* @@ -519,99 +515,13 @@ static int rcu_print_task_stall(struct rcu_node *rnp) static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp) { WARN_ON_ONCE(rcu_preempt_blocked_readers_cgp(rnp)); - if (!list_empty(&rnp->blkd_tasks)) + if (rcu_preempt_has_tasks(rnp)) rnp->gp_tasks = rnp->blkd_tasks.next; WARN_ON_ONCE(rnp->qsmask); } #ifdef CONFIG_HOTPLUG_CPU -/* - * Handle tasklist migration for case in which all CPUs covered by the - * specified rcu_node have gone offline. Move them up to the root - * rcu_node. The reason for not just moving them to the immediate - * parent is to remove the need for rcu_read_unlock_special() to - * make more than two attempts to acquire the target rcu_node's lock. - * Returns true if there were tasks blocking the current RCU grace - * period. - * - * Returns 1 if there was previously a task blocking the current grace - * period on the specified rcu_node structure. - * - * The caller must hold rnp->lock with irqs disabled. - */ -static int rcu_preempt_offline_tasks(struct rcu_state *rsp, - struct rcu_node *rnp, - struct rcu_data *rdp) -{ - struct list_head *lp; - struct list_head *lp_root; - int retval = 0; - struct rcu_node *rnp_root = rcu_get_root(rsp); - struct task_struct *t; - - if (rnp == rnp_root) { - WARN_ONCE(1, "Last CPU thought to be offlined?"); - return 0; /* Shouldn't happen: at least one CPU online. */ - } - - /* If we are on an internal node, complain bitterly. */ - WARN_ON_ONCE(rnp != rdp->mynode); - - /* - * Move tasks up to root rcu_node. Don't try to get fancy for - * this corner-case operation -- just put this node's tasks - * at the head of the root node's list, and update the root node's - * ->gp_tasks and ->exp_tasks pointers to those of this node's, - * if non-NULL. This might result in waiting for more tasks than - * absolutely necessary, but this is a good performance/complexity - * tradeoff. - */ - if (rcu_preempt_blocked_readers_cgp(rnp) && rnp->qsmask == 0) - retval |= RCU_OFL_TASKS_NORM_GP; - if (rcu_preempted_readers_exp(rnp)) - retval |= RCU_OFL_TASKS_EXP_GP; - lp = &rnp->blkd_tasks; - lp_root = &rnp_root->blkd_tasks; - while (!list_empty(lp)) { - t = list_entry(lp->next, typeof(*t), rcu_node_entry); - raw_spin_lock(&rnp_root->lock); /* irqs already disabled */ - smp_mb__after_unlock_lock(); - list_del(&t->rcu_node_entry); - t->rcu_blocked_node = rnp_root; - list_add(&t->rcu_node_entry, lp_root); - if (&t->rcu_node_entry == rnp->gp_tasks) - rnp_root->gp_tasks = rnp->gp_tasks; - if (&t->rcu_node_entry == rnp->exp_tasks) - rnp_root->exp_tasks = rnp->exp_tasks; -#ifdef CONFIG_RCU_BOOST - if (&t->rcu_node_entry == rnp->boost_tasks) - rnp_root->boost_tasks = rnp->boost_tasks; -#endif /* #ifdef CONFIG_RCU_BOOST */ - raw_spin_unlock(&rnp_root->lock); /* irqs still disabled */ - } - - rnp->gp_tasks = NULL; - rnp->exp_tasks = NULL; -#ifdef CONFIG_RCU_BOOST - rnp->boost_tasks = NULL; - /* - * In case root is being boosted and leaf was not. Make sure - * that we boost the tasks blocking the current grace period - * in this case. - */ - raw_spin_lock(&rnp_root->lock); /* irqs already disabled */ - smp_mb__after_unlock_lock(); - if (rnp_root->boost_tasks != NULL && - rnp_root->boost_tasks != rnp_root->gp_tasks && - rnp_root->boost_tasks != rnp_root->exp_tasks) - rnp_root->boost_tasks = rnp_root->gp_tasks; - raw_spin_unlock(&rnp_root->lock); /* irqs still disabled */ -#endif /* #ifdef CONFIG_RCU_BOOST */ - - return retval; -} - #endif /* #ifdef CONFIG_HOTPLUG_CPU */ /* @@ -771,7 +681,7 @@ sync_rcu_preempt_exp_init(struct rcu_state *rsp, struct rcu_node *rnp) raw_spin_lock_irqsave(&rnp->lock, flags); smp_mb__after_unlock_lock(); - if (list_empty(&rnp->blkd_tasks)) { + if (!rcu_preempt_has_tasks(rnp)) { raw_spin_unlock_irqrestore(&rnp->lock, flags); } else { rnp->exp_tasks = rnp->blkd_tasks.next; @@ -933,15 +843,6 @@ static void __init rcu_bootup_announce(void) } /* - * Return the number of RCU batches processed thus far for debug & stats. - */ -long rcu_batches_completed(void) -{ - return rcu_batches_completed_sched(); -} -EXPORT_SYMBOL_GPL(rcu_batches_completed); - -/* * Because preemptible RCU does not exist, we never have to check for * CPUs being in quiescent states. */ @@ -960,11 +861,12 @@ static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp) #ifdef CONFIG_HOTPLUG_CPU -/* Because preemptible RCU does not exist, no quieting of tasks. */ -static void rcu_report_unblock_qs_rnp(struct rcu_node *rnp, unsigned long flags) - __releases(rnp->lock) +/* + * Because there is no preemptible RCU, there can be no readers blocked. + */ +static bool rcu_preempt_has_tasks(struct rcu_node *rnp) { - raw_spin_unlock_irqrestore(&rnp->lock, flags); + return false; } #endif /* #ifdef CONFIG_HOTPLUG_CPU */ @@ -996,23 +898,6 @@ static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp) WARN_ON_ONCE(rnp->qsmask); } -#ifdef CONFIG_HOTPLUG_CPU - -/* - * Because preemptible RCU does not exist, it never needs to migrate - * tasks that were blocked within RCU read-side critical sections, and - * such non-existent tasks cannot possibly have been blocking the current - * grace period. - */ -static int rcu_preempt_offline_tasks(struct rcu_state *rsp, - struct rcu_node *rnp, - struct rcu_data *rdp) -{ - return 0; -} - -#endif /* #ifdef CONFIG_HOTPLUG_CPU */ - /* * Because preemptible RCU does not exist, it never has any callbacks * to check. @@ -1031,20 +916,6 @@ void synchronize_rcu_expedited(void) } EXPORT_SYMBOL_GPL(synchronize_rcu_expedited); -#ifdef CONFIG_HOTPLUG_CPU - -/* - * Because preemptible RCU does not exist, there is never any need to - * report on tasks preempted in RCU read-side critical sections during - * expedited RCU grace periods. - */ -static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp, - bool wake) -{ -} - -#endif /* #ifdef CONFIG_HOTPLUG_CPU */ - /* * Because preemptible RCU does not exist, rcu_barrier() is just * another name for rcu_barrier_sched(). @@ -1080,7 +951,7 @@ void exit_rcu(void) static void rcu_initiate_boost_trace(struct rcu_node *rnp) { - if (list_empty(&rnp->blkd_tasks)) + if (!rcu_preempt_has_tasks(rnp)) rnp->n_balk_blkd_tasks++; else if (rnp->exp_tasks == NULL && rnp->gp_tasks == NULL) rnp->n_balk_exp_gp_tasks++; @@ -1127,7 +998,8 @@ static int rcu_boost(struct rcu_node *rnp) struct task_struct *t; struct list_head *tb; - if (rnp->exp_tasks == NULL && rnp->boost_tasks == NULL) + if (ACCESS_ONCE(rnp->exp_tasks) == NULL && + ACCESS_ONCE(rnp->boost_tasks) == NULL) return 0; /* Nothing left to boost. */ raw_spin_lock_irqsave(&rnp->lock, flags); @@ -1175,15 +1047,11 @@ static int rcu_boost(struct rcu_node *rnp) */ t = container_of(tb, struct task_struct, rcu_node_entry); rt_mutex_init_proxy_locked(&rnp->boost_mtx, t); - init_completion(&rnp->boost_completion); raw_spin_unlock_irqrestore(&rnp->lock, flags); /* Lock only for side effect: boosts task t's priority. */ rt_mutex_lock(&rnp->boost_mtx); rt_mutex_unlock(&rnp->boost_mtx); /* Then keep lockdep happy. */ - /* Wait for boostee to be done w/boost_mtx before reinitializing. */ - wait_for_completion(&rnp->boost_completion); - return ACCESS_ONCE(rnp->exp_tasks) != NULL || ACCESS_ONCE(rnp->boost_tasks) != NULL; } @@ -1416,12 +1284,8 @@ static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu) for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++, mask >>= 1) if ((mask & 0x1) && cpu != outgoingcpu) cpumask_set_cpu(cpu, cm); - if (cpumask_weight(cm) == 0) { + if (cpumask_weight(cm) == 0) cpumask_setall(cm); - for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++) - cpumask_clear_cpu(cpu, cm); - WARN_ON_ONCE(cpumask_weight(cm) == 0); - } set_cpus_allowed_ptr(t, cm); free_cpumask_var(cm); } @@ -1446,12 +1310,8 @@ static void __init rcu_spawn_boost_kthreads(void) for_each_possible_cpu(cpu) per_cpu(rcu_cpu_has_work, cpu) = 0; BUG_ON(smpboot_register_percpu_thread(&rcu_cpu_thread_spec)); - rnp = rcu_get_root(rcu_state_p); - (void)rcu_spawn_one_boost_kthread(rcu_state_p, rnp); - if (NUM_RCU_NODES > 1) { - rcu_for_each_leaf_node(rcu_state_p, rnp) - (void)rcu_spawn_one_boost_kthread(rcu_state_p, rnp); - } + rcu_for_each_leaf_node(rcu_state_p, rnp) + (void)rcu_spawn_one_boost_kthread(rcu_state_p, rnp); } static void rcu_prepare_kthreads(int cpu) @@ -1605,7 +1465,8 @@ static bool __maybe_unused rcu_try_advance_all_cbs(void) * completed since we last checked and there are * callbacks not yet ready to invoke. */ - if (rdp->completed != rnp->completed && + if ((rdp->completed != rnp->completed || + unlikely(ACCESS_ONCE(rdp->gpwrap))) && rdp->nxttail[RCU_DONE_TAIL] != rdp->nxttail[RCU_NEXT_TAIL]) note_gp_changes(rsp, rdp); @@ -1898,11 +1759,12 @@ static void print_cpu_stall_info(struct rcu_state *rsp, int cpu) ticks_value = rsp->gpnum - rdp->gpnum; } print_cpu_stall_fast_no_hz(fast_no_hz, cpu); - pr_err("\t%d: (%lu %s) idle=%03x/%llx/%d softirq=%u/%u %s\n", + pr_err("\t%d: (%lu %s) idle=%03x/%llx/%d softirq=%u/%u fqs=%ld %s\n", cpu, ticks_value, ticks_title, atomic_read(&rdtp->dynticks) & 0xfff, rdtp->dynticks_nesting, rdtp->dynticks_nmi_nesting, rdp->softirq_snap, kstat_softirqs_cpu(RCU_SOFTIRQ, cpu), + ACCESS_ONCE(rsp->n_force_qs) - rsp->n_force_qs_gpstart, fast_no_hz); } @@ -2056,9 +1918,26 @@ static void wake_nocb_leader(struct rcu_data *rdp, bool force) static bool rcu_nocb_cpu_needs_barrier(struct rcu_state *rsp, int cpu) { struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu); + unsigned long ret; +#ifdef CONFIG_PROVE_RCU struct rcu_head *rhp; +#endif /* #ifdef CONFIG_PROVE_RCU */ - /* No-CBs CPUs might have callbacks on any of three lists. */ + /* + * Check count of all no-CBs callbacks awaiting invocation. + * There needs to be a barrier before this function is called, + * but associated with a prior determination that no more + * callbacks would be posted. In the worst case, the first + * barrier in _rcu_barrier() suffices (but the caller cannot + * necessarily rely on this, not a substitute for the caller + * getting the concurrency design right!). There must also be + * a barrier between the following load an posting of a callback + * (if a callback is in fact needed). This is associated with an + * atomic_inc() in the caller. + */ + ret = atomic_long_read(&rdp->nocb_q_count); + +#ifdef CONFIG_PROVE_RCU rhp = ACCESS_ONCE(rdp->nocb_head); if (!rhp) rhp = ACCESS_ONCE(rdp->nocb_gp_head); @@ -2072,8 +1951,9 @@ static bool rcu_nocb_cpu_needs_barrier(struct rcu_state *rsp, int cpu) cpu, rhp->func); WARN_ON_ONCE(1); } +#endif /* #ifdef CONFIG_PROVE_RCU */ - return !!rhp; + return !!ret; } /* @@ -2095,9 +1975,10 @@ static void __call_rcu_nocb_enqueue(struct rcu_data *rdp, struct task_struct *t; /* Enqueue the callback on the nocb list and update counts. */ + atomic_long_add(rhcount, &rdp->nocb_q_count); + /* rcu_barrier() relies on ->nocb_q_count add before xchg. */ old_rhpp = xchg(&rdp->nocb_tail, rhtp); ACCESS_ONCE(*old_rhpp) = rhp; - atomic_long_add(rhcount, &rdp->nocb_q_count); atomic_long_add(rhcount_lazy, &rdp->nocb_q_count_lazy); smp_mb__after_atomic(); /* Store *old_rhpp before _wake test. */ @@ -2288,9 +2169,6 @@ wait_again: /* Move callbacks to wait-for-GP list, which is empty. */ ACCESS_ONCE(rdp->nocb_head) = NULL; rdp->nocb_gp_tail = xchg(&rdp->nocb_tail, &rdp->nocb_head); - rdp->nocb_gp_count = atomic_long_xchg(&rdp->nocb_q_count, 0); - rdp->nocb_gp_count_lazy = - atomic_long_xchg(&rdp->nocb_q_count_lazy, 0); gotcbs = true; } @@ -2338,9 +2216,6 @@ wait_again: /* Append callbacks to follower's "done" list. */ tail = xchg(&rdp->nocb_follower_tail, rdp->nocb_gp_tail); *tail = rdp->nocb_gp_head; - atomic_long_add(rdp->nocb_gp_count, &rdp->nocb_follower_count); - atomic_long_add(rdp->nocb_gp_count_lazy, - &rdp->nocb_follower_count_lazy); smp_mb__after_atomic(); /* Store *tail before wakeup. */ if (rdp != my_rdp && tail == &rdp->nocb_follower_head) { /* @@ -2415,13 +2290,11 @@ static int rcu_nocb_kthread(void *arg) trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, "WokeNonEmpty"); ACCESS_ONCE(rdp->nocb_follower_head) = NULL; tail = xchg(&rdp->nocb_follower_tail, &rdp->nocb_follower_head); - c = atomic_long_xchg(&rdp->nocb_follower_count, 0); - cl = atomic_long_xchg(&rdp->nocb_follower_count_lazy, 0); - rdp->nocb_p_count += c; - rdp->nocb_p_count_lazy += cl; /* Each pass through the following loop invokes a callback. */ - trace_rcu_batch_start(rdp->rsp->name, cl, c, -1); + trace_rcu_batch_start(rdp->rsp->name, + atomic_long_read(&rdp->nocb_q_count_lazy), + atomic_long_read(&rdp->nocb_q_count), -1); c = cl = 0; while (list) { next = list->next; @@ -2443,9 +2316,9 @@ static int rcu_nocb_kthread(void *arg) list = next; } trace_rcu_batch_end(rdp->rsp->name, c, !!list, 0, 0, 1); - ACCESS_ONCE(rdp->nocb_p_count) = rdp->nocb_p_count - c; - ACCESS_ONCE(rdp->nocb_p_count_lazy) = - rdp->nocb_p_count_lazy - cl; + smp_mb__before_atomic(); /* _add after CB invocation. */ + atomic_long_add(-c, &rdp->nocb_q_count); + atomic_long_add(-cl, &rdp->nocb_q_count_lazy); rdp->n_nocbs_invoked += c; } return 0; diff --git a/kernel/rcu/tree_trace.c b/kernel/rcu/tree_trace.c index 5cdc62e1beeb..fbb6240509ea 100644 --- a/kernel/rcu/tree_trace.c +++ b/kernel/rcu/tree_trace.c @@ -46,6 +46,8 @@ #define RCU_TREE_NONCORE #include "tree.h" +DECLARE_PER_CPU_SHARED_ALIGNED(unsigned long, rcu_qs_ctr); + static int r_open(struct inode *inode, struct file *file, const struct seq_operations *op) { @@ -115,11 +117,13 @@ static void print_one_rcu_data(struct seq_file *m, struct rcu_data *rdp) if (!rdp->beenonline) return; - seq_printf(m, "%3d%cc=%ld g=%ld pq=%d qp=%d", + seq_printf(m, "%3d%cc=%ld g=%ld pq=%d/%d qp=%d", rdp->cpu, cpu_is_offline(rdp->cpu) ? '!' : ' ', ulong2long(rdp->completed), ulong2long(rdp->gpnum), - rdp->passed_quiesce, rdp->qs_pending); + rdp->passed_quiesce, + rdp->rcu_qs_ctr_snap == per_cpu(rcu_qs_ctr, rdp->cpu), + rdp->qs_pending); seq_printf(m, " dt=%d/%llx/%d df=%lu", atomic_read(&rdp->dynticks->dynticks), rdp->dynticks->dynticks_nesting, diff --git a/kernel/sched/completion.c b/kernel/sched/completion.c index 607f852b4d04..7052d3fd4e7b 100644 --- a/kernel/sched/completion.c +++ b/kernel/sched/completion.c @@ -268,6 +268,15 @@ bool try_wait_for_completion(struct completion *x) unsigned long flags; int ret = 1; + /* + * Since x->done will need to be locked only + * in the non-blocking case, we check x->done + * first without taking the lock so we can + * return early in the blocking case. + */ + if (!ACCESS_ONCE(x->done)) + return 0; + spin_lock_irqsave(&x->wait.lock, flags); if (!x->done) ret = 0; @@ -288,13 +297,6 @@ EXPORT_SYMBOL(try_wait_for_completion); */ bool completion_done(struct completion *x) { - unsigned long flags; - int ret = 1; - - spin_lock_irqsave(&x->wait.lock, flags); - if (!x->done) - ret = 0; - spin_unlock_irqrestore(&x->wait.lock, flags); - return ret; + return !!ACCESS_ONCE(x->done); } EXPORT_SYMBOL(completion_done); diff --git a/kernel/sched/core.c b/kernel/sched/core.c index b269e8a2a516..1f37fe7f77a4 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -1089,7 +1089,7 @@ void set_task_cpu(struct task_struct *p, unsigned int new_cpu) if (p->sched_class->migrate_task_rq) p->sched_class->migrate_task_rq(p, new_cpu); p->se.nr_migrations++; - perf_sw_event(PERF_COUNT_SW_CPU_MIGRATIONS, 1, NULL, 0); + perf_sw_event_sched(PERF_COUNT_SW_CPU_MIGRATIONS, 1, 0); } __set_task_cpu(p, new_cpu); @@ -7346,13 +7346,12 @@ void __might_sleep(const char *file, int line, int preempt_offset) * since we will exit with TASK_RUNNING make sure we enter with it, * otherwise we will destroy state. */ - if (WARN_ONCE(current->state != TASK_RUNNING, + WARN_ONCE(current->state != TASK_RUNNING && current->task_state_change, "do not call blocking ops when !TASK_RUNNING; " "state=%lx set at [<%p>] %pS\n", current->state, (void *)current->task_state_change, - (void *)current->task_state_change)) - __set_current_state(TASK_RUNNING); + (void *)current->task_state_change); ___might_sleep(file, line, preempt_offset); } diff --git a/kernel/smpboot.c b/kernel/smpboot.c index f032fb5284e3..40190f28db35 100644 --- a/kernel/smpboot.c +++ b/kernel/smpboot.c @@ -280,6 +280,7 @@ int smpboot_register_percpu_thread(struct smp_hotplug_thread *plug_thread) unsigned int cpu; int ret = 0; + get_online_cpus(); mutex_lock(&smpboot_threads_lock); for_each_online_cpu(cpu) { ret = __smpboot_create_thread(plug_thread, cpu); @@ -292,6 +293,7 @@ int smpboot_register_percpu_thread(struct smp_hotplug_thread *plug_thread) list_add(&plug_thread->list, &hotplug_threads); out: mutex_unlock(&smpboot_threads_lock); + put_online_cpus(); return ret; } EXPORT_SYMBOL_GPL(smpboot_register_percpu_thread); diff --git a/kernel/softirq.c b/kernel/softirq.c index 501baa9ac1be..479e4436f787 100644 --- a/kernel/softirq.c +++ b/kernel/softirq.c @@ -114,8 +114,12 @@ void __local_bh_disable_ip(unsigned long ip, unsigned int cnt) trace_softirqs_off(ip); raw_local_irq_restore(flags); - if (preempt_count() == cnt) + if (preempt_count() == cnt) { +#ifdef CONFIG_DEBUG_PREEMPT + current->preempt_disable_ip = get_parent_ip(CALLER_ADDR1); +#endif trace_preempt_off(CALLER_ADDR0, get_parent_ip(CALLER_ADDR1)); + } } EXPORT_SYMBOL(__local_bh_disable_ip); #endif /* CONFIG_TRACE_IRQFLAGS */ @@ -656,9 +660,8 @@ static void run_ksoftirqd(unsigned int cpu) * in the task stack here. */ __do_softirq(); - rcu_note_context_switch(); local_irq_enable(); - cond_resched(); + cond_resched_rcu_qs(); return; } local_irq_enable(); diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c index 37e50aadd471..d8c724cda37b 100644 --- a/kernel/time/hrtimer.c +++ b/kernel/time/hrtimer.c @@ -122,7 +122,7 @@ static void hrtimer_get_softirq_time(struct hrtimer_cpu_base *base) mono = ktime_get_update_offsets_tick(&off_real, &off_boot, &off_tai); boot = ktime_add(mono, off_boot); xtim = ktime_add(mono, off_real); - tai = ktime_add(xtim, off_tai); + tai = ktime_add(mono, off_tai); base->clock_base[HRTIMER_BASE_REALTIME].softirq_time = xtim; base->clock_base[HRTIMER_BASE_MONOTONIC].softirq_time = mono; diff --git a/kernel/trace/trace_event_perf.c b/kernel/trace/trace_event_perf.c index 4b9c114ee9de..6fa484de2ba1 100644 --- a/kernel/trace/trace_event_perf.c +++ b/kernel/trace/trace_event_perf.c @@ -261,7 +261,7 @@ void perf_trace_del(struct perf_event *p_event, int flags) } void *perf_trace_buf_prepare(int size, unsigned short type, - struct pt_regs *regs, int *rctxp) + struct pt_regs **regs, int *rctxp) { struct trace_entry *entry; unsigned long flags; @@ -280,6 +280,8 @@ void *perf_trace_buf_prepare(int size, unsigned short type, if (*rctxp < 0) return NULL; + if (regs) + *regs = this_cpu_ptr(&__perf_regs[*rctxp]); raw_data = this_cpu_ptr(perf_trace_buf[*rctxp]); /* zero the dead bytes from align to not leak stack to user */ diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c index 5edb518be345..296079ae6583 100644 --- a/kernel/trace/trace_kprobe.c +++ b/kernel/trace/trace_kprobe.c @@ -1148,7 +1148,7 @@ kprobe_perf_func(struct trace_kprobe *tk, struct pt_regs *regs) size = ALIGN(__size + sizeof(u32), sizeof(u64)); size -= sizeof(u32); - entry = perf_trace_buf_prepare(size, call->event.type, regs, &rctx); + entry = perf_trace_buf_prepare(size, call->event.type, NULL, &rctx); if (!entry) return; @@ -1179,7 +1179,7 @@ kretprobe_perf_func(struct trace_kprobe *tk, struct kretprobe_instance *ri, size = ALIGN(__size + sizeof(u32), sizeof(u64)); size -= sizeof(u32); - entry = perf_trace_buf_prepare(size, call->event.type, regs, &rctx); + entry = perf_trace_buf_prepare(size, call->event.type, NULL, &rctx); if (!entry) return; diff --git a/kernel/trace/trace_syscalls.c b/kernel/trace/trace_syscalls.c index c6ee36fcbf90..f97f6e3a676c 100644 --- a/kernel/trace/trace_syscalls.c +++ b/kernel/trace/trace_syscalls.c @@ -574,7 +574,7 @@ static void perf_syscall_enter(void *ignore, struct pt_regs *regs, long id) size -= sizeof(u32); rec = (struct syscall_trace_enter *)perf_trace_buf_prepare(size, - sys_data->enter_event->event.type, regs, &rctx); + sys_data->enter_event->event.type, NULL, &rctx); if (!rec) return; @@ -647,7 +647,7 @@ static void perf_syscall_exit(void *ignore, struct pt_regs *regs, long ret) size -= sizeof(u32); rec = (struct syscall_trace_exit *)perf_trace_buf_prepare(size, - sys_data->exit_event->event.type, regs, &rctx); + sys_data->exit_event->event.type, NULL, &rctx); if (!rec) return; diff --git a/kernel/trace/trace_uprobe.c b/kernel/trace/trace_uprobe.c index 8520acc34b18..b11441321e7a 100644 --- a/kernel/trace/trace_uprobe.c +++ b/kernel/trace/trace_uprobe.c @@ -1111,7 +1111,7 @@ static void __uprobe_perf_func(struct trace_uprobe *tu, if (hlist_empty(head)) goto out; - entry = perf_trace_buf_prepare(size, call->event.type, regs, &rctx); + entry = perf_trace_buf_prepare(size, call->event.type, NULL, &rctx); if (!entry) goto out; diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 5f2ce616c046..a2ca213c71ca 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -1215,6 +1215,7 @@ config RCU_TORTURE_TEST tristate "torture tests for RCU" depends on DEBUG_KERNEL select TORTURE_TEST + select SRCU default n help This option provides a kernel module that runs torture tests @@ -1257,7 +1258,7 @@ config RCU_CPU_STALL_TIMEOUT config RCU_CPU_STALL_INFO bool "Print additional diagnostics on RCU CPU stall" depends on (TREE_RCU || PREEMPT_RCU) && DEBUG_KERNEL - default n + default y help For each stalled CPU that is aware of the current RCU grace period, print out additional per-CPU diagnostic information diff --git a/lib/checksum.c b/lib/checksum.c index 129775eb6de6..8b39e86dbab5 100644 --- a/lib/checksum.c +++ b/lib/checksum.c @@ -181,6 +181,15 @@ csum_partial_copy(const void *src, void *dst, int len, __wsum sum) EXPORT_SYMBOL(csum_partial_copy); #ifndef csum_tcpudp_nofold +static inline u32 from64to32(u64 x) +{ + /* add up 32-bit and 32-bit for 32+c bit */ + x = (x & 0xffffffff) + (x >> 32); + /* add up carry.. */ + x = (x & 0xffffffff) + (x >> 32); + return (u32)x; +} + __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr, unsigned short len, unsigned short proto, @@ -195,8 +204,7 @@ __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr, #else s += (proto + len) << 8; #endif - s += (s >> 32); - return (__force __wsum)s; + return (__force __wsum)from64to32(s); } EXPORT_SYMBOL(csum_tcpudp_nofold); #endif diff --git a/mm/Kconfig b/mm/Kconfig index 1d1ae6b078fd..4395b12869c8 100644 --- a/mm/Kconfig +++ b/mm/Kconfig @@ -325,6 +325,7 @@ config VIRT_TO_BUS config MMU_NOTIFIER bool + select SRCU config KSM bool "Enable KSM for page merging" @@ -296,7 +296,7 @@ static int faultin_page(struct task_struct *tsk, struct vm_area_struct *vma, return -ENOMEM; if (ret & (VM_FAULT_HWPOISON | VM_FAULT_HWPOISON_LARGE)) return *flags & FOLL_HWPOISON ? -EHWPOISON : -EFAULT; - if (ret & VM_FAULT_SIGBUS) + if (ret & (VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV)) return -EFAULT; BUG(); } @@ -571,7 +571,7 @@ int fixup_user_fault(struct task_struct *tsk, struct mm_struct *mm, return -ENOMEM; if (ret & (VM_FAULT_HWPOISON | VM_FAULT_HWPOISON_LARGE)) return -EHWPOISON; - if (ret & VM_FAULT_SIGBUS) + if (ret & (VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV)) return -EFAULT; BUG(); } @@ -376,7 +376,7 @@ static int break_ksm(struct vm_area_struct *vma, unsigned long addr) else ret = VM_FAULT_WRITE; put_page(page); - } while (!(ret & (VM_FAULT_WRITE | VM_FAULT_SIGBUS | VM_FAULT_OOM))); + } while (!(ret & (VM_FAULT_WRITE | VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV | VM_FAULT_OOM))); /* * We must loop because handle_mm_fault() may back out if there's * any difficulty e.g. if pte accessed bit gets updated concurrently. diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 683b4782019b..2f6893c2f01b 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -5773,7 +5773,7 @@ void mem_cgroup_uncharge_list(struct list_head *page_list) * mem_cgroup_migrate - migrate a charge to another page * @oldpage: currently charged page * @newpage: page to transfer the charge to - * @lrucare: both pages might be on the LRU already + * @lrucare: either or both pages might be on the LRU already * * Migrate the charge from @oldpage to @newpage. * diff --git a/mm/memory.c b/mm/memory.c index 54f3a9b00956..2c3536cc6c63 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -2632,7 +2632,7 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma, /* Check if we need to add a guard page to the stack */ if (check_stack_guard_page(vma, address) < 0) - return VM_FAULT_SIGBUS; + return VM_FAULT_SIGSEGV; /* Use the zero-page for reads */ if (!(flags & FAULT_FLAG_WRITE) && !mm_forbids_zeropage(mm)) { diff --git a/mm/nommu.c b/mm/nommu.c index b51eadf6d952..28bd8c4dff6f 100644 --- a/mm/nommu.c +++ b/mm/nommu.c @@ -59,6 +59,7 @@ #endif void *high_memory; +EXPORT_SYMBOL(high_memory); struct page *mem_map; unsigned long max_mapnr; unsigned long highest_memmap_pfn; diff --git a/mm/pagewalk.c b/mm/pagewalk.c index ad83195521f2..b264bda46e1b 100644 --- a/mm/pagewalk.c +++ b/mm/pagewalk.c @@ -199,7 +199,10 @@ int walk_page_range(unsigned long addr, unsigned long end, */ if ((vma->vm_start <= addr) && (vma->vm_flags & VM_PFNMAP)) { - next = vma->vm_end; + if (walk->pte_hole) + err = walk->pte_hole(addr, next, walk); + if (err) + break; pgd = pgd_offset(walk->mm, next); continue; } diff --git a/mm/shmem.c b/mm/shmem.c index 73ba1df7c8ba..993e6ba689cc 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -1013,7 +1013,7 @@ static int shmem_replace_page(struct page **pagep, gfp_t gfp, */ oldpage = newpage; } else { - mem_cgroup_migrate(oldpage, newpage, false); + mem_cgroup_migrate(oldpage, newpage, true); lru_cache_add_anon(newpage); *pagep = newpage; } diff --git a/net/bridge/netfilter/nft_reject_bridge.c b/net/bridge/netfilter/nft_reject_bridge.c index b0330aecbf97..3244aead0926 100644 --- a/net/bridge/netfilter/nft_reject_bridge.c +++ b/net/bridge/netfilter/nft_reject_bridge.c @@ -265,22 +265,12 @@ out: data[NFT_REG_VERDICT].verdict = NF_DROP; } -static int nft_reject_bridge_validate_hooks(const struct nft_chain *chain) +static int nft_reject_bridge_validate(const struct nft_ctx *ctx, + const struct nft_expr *expr, + const struct nft_data **data) { - struct nft_base_chain *basechain; - - if (chain->flags & NFT_BASE_CHAIN) { - basechain = nft_base_chain(chain); - - switch (basechain->ops[0].hooknum) { - case NF_BR_PRE_ROUTING: - case NF_BR_LOCAL_IN: - break; - default: - return -EOPNOTSUPP; - } - } - return 0; + return nft_chain_validate_hooks(ctx->chain, (1 << NF_BR_PRE_ROUTING) | + (1 << NF_BR_LOCAL_IN)); } static int nft_reject_bridge_init(const struct nft_ctx *ctx, @@ -290,7 +280,7 @@ static int nft_reject_bridge_init(const struct nft_ctx *ctx, struct nft_reject *priv = nft_expr_priv(expr); int icmp_code, err; - err = nft_reject_bridge_validate_hooks(ctx->chain); + err = nft_reject_bridge_validate(ctx, expr, NULL); if (err < 0) return err; @@ -341,13 +331,6 @@ nla_put_failure: return -1; } -static int nft_reject_bridge_validate(const struct nft_ctx *ctx, - const struct nft_expr *expr, - const struct nft_data **data) -{ - return nft_reject_bridge_validate_hooks(ctx->chain); -} - static struct nft_expr_type nft_reject_bridge_type; static const struct nft_expr_ops nft_reject_bridge_ops = { .type = &nft_reject_bridge_type, diff --git a/net/caif/chnl_net.c b/net/caif/chnl_net.c index 4589ff67bfa9..67a4a36febd1 100644 --- a/net/caif/chnl_net.c +++ b/net/caif/chnl_net.c @@ -470,7 +470,6 @@ static int ipcaif_newlink(struct net *src_net, struct net_device *dev, ASSERT_RTNL(); caifdev = netdev_priv(dev); caif_netlink_parms(data, &caifdev->conn_req); - dev_net_set(caifdev->netdev, src_net); ret = register_netdevice(dev); if (ret) diff --git a/net/core/dev.c b/net/core/dev.c index 171420e75b03..7fe82929f509 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -2352,7 +2352,6 @@ EXPORT_SYMBOL(skb_checksum_help); __be16 skb_network_protocol(struct sk_buff *skb, int *depth) { - unsigned int vlan_depth = skb->mac_len; __be16 type = skb->protocol; /* Tunnel gso handlers can set protocol to ethernet. */ @@ -2366,35 +2365,7 @@ __be16 skb_network_protocol(struct sk_buff *skb, int *depth) type = eth->h_proto; } - /* if skb->protocol is 802.1Q/AD then the header should already be - * present at mac_len - VLAN_HLEN (if mac_len > 0), or at - * ETH_HLEN otherwise - */ - if (type == htons(ETH_P_8021Q) || type == htons(ETH_P_8021AD)) { - if (vlan_depth) { - if (WARN_ON(vlan_depth < VLAN_HLEN)) - return 0; - vlan_depth -= VLAN_HLEN; - } else { - vlan_depth = ETH_HLEN; - } - do { - struct vlan_hdr *vh; - - if (unlikely(!pskb_may_pull(skb, - vlan_depth + VLAN_HLEN))) - return 0; - - vh = (struct vlan_hdr *)(skb->data + vlan_depth); - type = vh->h_vlan_encapsulated_proto; - vlan_depth += VLAN_HLEN; - } while (type == htons(ETH_P_8021Q) || - type == htons(ETH_P_8021AD)); - } - - *depth = vlan_depth; - - return type; + return __vlan_get_protocol(skb, type, depth); } /** @@ -5323,7 +5294,7 @@ void netdev_upper_dev_unlink(struct net_device *dev, } EXPORT_SYMBOL(netdev_upper_dev_unlink); -void netdev_adjacent_add_links(struct net_device *dev) +static void netdev_adjacent_add_links(struct net_device *dev) { struct netdev_adjacent *iter; @@ -5348,7 +5319,7 @@ void netdev_adjacent_add_links(struct net_device *dev) } } -void netdev_adjacent_del_links(struct net_device *dev) +static void netdev_adjacent_del_links(struct net_device *dev) { struct netdev_adjacent *iter; @@ -6656,7 +6627,7 @@ struct netdev_queue *dev_ingress_queue_create(struct net_device *dev) if (!queue) return NULL; netdev_init_one_queue(dev, queue, NULL); - queue->qdisc = &noop_qdisc; + RCU_INIT_POINTER(queue->qdisc, &noop_qdisc); queue->qdisc_sleeping = &noop_qdisc; rcu_assign_pointer(dev->ingress_queue, queue); #endif diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index 9cf6fe9ddc0c..446cbaf81185 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c @@ -2895,12 +2895,16 @@ static int rtnl_bridge_notify(struct net_device *dev, u16 flags) goto errout; } + if (!skb->len) + goto errout; + rtnl_notify(skb, net, 0, RTNLGRP_LINK, NULL, GFP_ATOMIC); return 0; errout: WARN_ON(err == -EMSGSIZE); kfree_skb(skb); - rtnl_set_sk_err(net, RTNLGRP_LINK, err); + if (err) + rtnl_set_sk_err(net, RTNLGRP_LINK, err); return err; } diff --git a/net/dsa/slave.c b/net/dsa/slave.c index 515569ffde8a..589aafd01fc5 100644 --- a/net/dsa/slave.c +++ b/net/dsa/slave.c @@ -46,6 +46,7 @@ void dsa_slave_mii_bus_init(struct dsa_switch *ds) snprintf(ds->slave_mii_bus->id, MII_BUS_ID_SIZE, "dsa-%d:%.2x", ds->index, ds->pd->sw_addr); ds->slave_mii_bus->parent = ds->master_dev; + ds->slave_mii_bus->phy_mask = ~ds->phys_mii_mask; } diff --git a/net/ipv4/ip_forward.c b/net/ipv4/ip_forward.c index 3a83ce5efa80..787b3c294ce6 100644 --- a/net/ipv4/ip_forward.c +++ b/net/ipv4/ip_forward.c @@ -129,7 +129,8 @@ int ip_forward(struct sk_buff *skb) * We now generate an ICMP HOST REDIRECT giving the route * we calculated. */ - if (rt->rt_flags&RTCF_DOREDIRECT && !opt->srr && !skb_sec_path(skb)) + if (IPCB(skb)->flags & IPSKB_DOREDIRECT && !opt->srr && + !skb_sec_path(skb)) ip_rt_send_redirect(skb); skb->priority = rt_tos2priority(iph->tos); diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c index b50861b22b6b..c373c0708d97 100644 --- a/net/ipv4/ip_output.c +++ b/net/ipv4/ip_output.c @@ -1506,23 +1506,8 @@ static int ip_reply_glue_bits(void *dptr, char *to, int offset, /* * Generic function to send a packet as reply to another packet. * Used to send some TCP resets/acks so far. - * - * Use a fake percpu inet socket to avoid false sharing and contention. */ -static DEFINE_PER_CPU(struct inet_sock, unicast_sock) = { - .sk = { - .__sk_common = { - .skc_refcnt = ATOMIC_INIT(1), - }, - .sk_wmem_alloc = ATOMIC_INIT(1), - .sk_allocation = GFP_ATOMIC, - .sk_flags = (1UL << SOCK_USE_WRITE_QUEUE), - }, - .pmtudisc = IP_PMTUDISC_WANT, - .uc_ttl = -1, -}; - -void ip_send_unicast_reply(struct net *net, struct sk_buff *skb, +void ip_send_unicast_reply(struct sock *sk, struct sk_buff *skb, const struct ip_options *sopt, __be32 daddr, __be32 saddr, const struct ip_reply_arg *arg, @@ -1532,9 +1517,8 @@ void ip_send_unicast_reply(struct net *net, struct sk_buff *skb, struct ipcm_cookie ipc; struct flowi4 fl4; struct rtable *rt = skb_rtable(skb); + struct net *net = sock_net(sk); struct sk_buff *nskb; - struct sock *sk; - struct inet_sock *inet; int err; if (__ip_options_echo(&replyopts.opt.opt, skb, sopt)) @@ -1565,15 +1549,11 @@ void ip_send_unicast_reply(struct net *net, struct sk_buff *skb, if (IS_ERR(rt)) return; - inet = &get_cpu_var(unicast_sock); + inet_sk(sk)->tos = arg->tos; - inet->tos = arg->tos; - sk = &inet->sk; sk->sk_priority = skb->priority; sk->sk_protocol = ip_hdr(skb)->protocol; sk->sk_bound_dev_if = arg->bound_dev_if; - sock_net_set(sk, net); - __skb_queue_head_init(&sk->sk_write_queue); sk->sk_sndbuf = sysctl_wmem_default; err = ip_append_data(sk, &fl4, ip_reply_glue_bits, arg->iov->iov_base, len, 0, &ipc, &rt, MSG_DONTWAIT); @@ -1589,13 +1569,10 @@ void ip_send_unicast_reply(struct net *net, struct sk_buff *skb, arg->csumoffset) = csum_fold(csum_add(nskb->csum, arg->csum)); nskb->ip_summed = CHECKSUM_NONE; - skb_orphan(nskb); skb_set_queue_mapping(nskb, skb_get_queue_mapping(skb)); ip_push_pending_frames(sk, &fl4); } out: - put_cpu_var(unicast_sock); - ip_rt_put(rt); } diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c index c0d82f78d364..2a3720fb5a5f 100644 --- a/net/ipv4/ping.c +++ b/net/ipv4/ping.c @@ -966,8 +966,11 @@ bool ping_rcv(struct sk_buff *skb) sk = ping_lookup(net, skb, ntohs(icmph->un.echo.id)); if (sk != NULL) { + struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC); + pr_debug("rcv on socket %p\n", sk); - ping_queue_rcv_skb(sk, skb_get(skb)); + if (skb2) + ping_queue_rcv_skb(sk, skb2); sock_put(sk); return true; } diff --git a/net/ipv4/route.c b/net/ipv4/route.c index 6a2155b02602..52e1f2bf0ca2 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c @@ -966,6 +966,9 @@ static void __ip_rt_update_pmtu(struct rtable *rt, struct flowi4 *fl4, u32 mtu) if (dst->dev->mtu < mtu) return; + if (rt->rt_pmtu && rt->rt_pmtu < mtu) + return; + if (mtu < ip_rt_min_pmtu) mtu = ip_rt_min_pmtu; @@ -1554,11 +1557,10 @@ static int __mkroute_input(struct sk_buff *skb, do_cache = res->fi && !itag; if (out_dev == in_dev && err && IN_DEV_TX_REDIRECTS(out_dev) && + skb->protocol == htons(ETH_P_IP) && (IN_DEV_SHARED_MEDIA(out_dev) || - inet_addr_onlink(out_dev, saddr, FIB_RES_GW(*res)))) { - flags |= RTCF_DOREDIRECT; - do_cache = false; - } + inet_addr_onlink(out_dev, saddr, FIB_RES_GW(*res)))) + IPCB(skb)->flags |= IPSKB_DOREDIRECT; if (skb->protocol != htons(ETH_P_IP)) { /* Not IP (i.e. ARP). Do not create route, if it is @@ -2303,6 +2305,8 @@ static int rt_fill_info(struct net *net, __be32 dst, __be32 src, r->rtm_flags = (rt->rt_flags & ~0xFFFF) | RTM_F_CLONED; if (rt->rt_flags & RTCF_NOTIFY) r->rtm_flags |= RTM_F_NOTIFY; + if (IPCB(skb)->flags & IPSKB_DOREDIRECT) + r->rtm_flags |= RTCF_DOREDIRECT; if (nla_put_be32(skb, RTA_DST, dst)) goto nla_put_failure; diff --git a/net/ipv4/tcp_bic.c b/net/ipv4/tcp_bic.c index bb395d46a389..c037644eafb7 100644 --- a/net/ipv4/tcp_bic.c +++ b/net/ipv4/tcp_bic.c @@ -150,7 +150,7 @@ static void bictcp_cong_avoid(struct sock *sk, u32 ack, u32 acked) tcp_slow_start(tp, acked); else { bictcp_update(ca, tp->snd_cwnd); - tcp_cong_avoid_ai(tp, ca->cnt); + tcp_cong_avoid_ai(tp, ca->cnt, 1); } } diff --git a/net/ipv4/tcp_cong.c b/net/ipv4/tcp_cong.c index 27ead0dd16bc..8670e68e2ce6 100644 --- a/net/ipv4/tcp_cong.c +++ b/net/ipv4/tcp_cong.c @@ -291,26 +291,32 @@ int tcp_set_congestion_control(struct sock *sk, const char *name) * ABC caps N to 2. Slow start exits when cwnd grows over ssthresh and * returns the leftover acks to adjust cwnd in congestion avoidance mode. */ -void tcp_slow_start(struct tcp_sock *tp, u32 acked) +u32 tcp_slow_start(struct tcp_sock *tp, u32 acked) { u32 cwnd = tp->snd_cwnd + acked; if (cwnd > tp->snd_ssthresh) cwnd = tp->snd_ssthresh + 1; + acked -= cwnd - tp->snd_cwnd; tp->snd_cwnd = min(cwnd, tp->snd_cwnd_clamp); + + return acked; } EXPORT_SYMBOL_GPL(tcp_slow_start); -/* In theory this is tp->snd_cwnd += 1 / tp->snd_cwnd (or alternative w) */ -void tcp_cong_avoid_ai(struct tcp_sock *tp, u32 w) +/* In theory this is tp->snd_cwnd += 1 / tp->snd_cwnd (or alternative w), + * for every packet that was ACKed. + */ +void tcp_cong_avoid_ai(struct tcp_sock *tp, u32 w, u32 acked) { + tp->snd_cwnd_cnt += acked; if (tp->snd_cwnd_cnt >= w) { - if (tp->snd_cwnd < tp->snd_cwnd_clamp) - tp->snd_cwnd++; - tp->snd_cwnd_cnt = 0; - } else { - tp->snd_cwnd_cnt++; + u32 delta = tp->snd_cwnd_cnt / w; + + tp->snd_cwnd_cnt -= delta * w; + tp->snd_cwnd += delta; } + tp->snd_cwnd = min(tp->snd_cwnd, tp->snd_cwnd_clamp); } EXPORT_SYMBOL_GPL(tcp_cong_avoid_ai); @@ -329,11 +335,13 @@ void tcp_reno_cong_avoid(struct sock *sk, u32 ack, u32 acked) return; /* In "safe" area, increase. */ - if (tp->snd_cwnd <= tp->snd_ssthresh) - tcp_slow_start(tp, acked); + if (tp->snd_cwnd <= tp->snd_ssthresh) { + acked = tcp_slow_start(tp, acked); + if (!acked) + return; + } /* In dangerous area, increase slowly. */ - else - tcp_cong_avoid_ai(tp, tp->snd_cwnd); + tcp_cong_avoid_ai(tp, tp->snd_cwnd, acked); } EXPORT_SYMBOL_GPL(tcp_reno_cong_avoid); diff --git a/net/ipv4/tcp_cubic.c b/net/ipv4/tcp_cubic.c index 6b6002416a73..4b276d1ed980 100644 --- a/net/ipv4/tcp_cubic.c +++ b/net/ipv4/tcp_cubic.c @@ -93,9 +93,7 @@ struct bictcp { u32 epoch_start; /* beginning of an epoch */ u32 ack_cnt; /* number of acks */ u32 tcp_cwnd; /* estimated tcp cwnd */ -#define ACK_RATIO_SHIFT 4 -#define ACK_RATIO_LIMIT (32u << ACK_RATIO_SHIFT) - u16 delayed_ack; /* estimate the ratio of Packets/ACKs << 4 */ + u16 unused; u8 sample_cnt; /* number of samples to decide curr_rtt */ u8 found; /* the exit point is found? */ u32 round_start; /* beginning of each round */ @@ -114,7 +112,6 @@ static inline void bictcp_reset(struct bictcp *ca) ca->bic_K = 0; ca->delay_min = 0; ca->epoch_start = 0; - ca->delayed_ack = 2 << ACK_RATIO_SHIFT; ca->ack_cnt = 0; ca->tcp_cwnd = 0; ca->found = 0; @@ -205,23 +202,30 @@ static u32 cubic_root(u64 a) /* * Compute congestion window to use. */ -static inline void bictcp_update(struct bictcp *ca, u32 cwnd) +static inline void bictcp_update(struct bictcp *ca, u32 cwnd, u32 acked) { u32 delta, bic_target, max_cnt; u64 offs, t; - ca->ack_cnt++; /* count the number of ACKs */ + ca->ack_cnt += acked; /* count the number of ACKed packets */ if (ca->last_cwnd == cwnd && (s32)(tcp_time_stamp - ca->last_time) <= HZ / 32) return; + /* The CUBIC function can update ca->cnt at most once per jiffy. + * On all cwnd reduction events, ca->epoch_start is set to 0, + * which will force a recalculation of ca->cnt. + */ + if (ca->epoch_start && tcp_time_stamp == ca->last_time) + goto tcp_friendliness; + ca->last_cwnd = cwnd; ca->last_time = tcp_time_stamp; if (ca->epoch_start == 0) { ca->epoch_start = tcp_time_stamp; /* record beginning */ - ca->ack_cnt = 1; /* start counting */ + ca->ack_cnt = acked; /* start counting */ ca->tcp_cwnd = cwnd; /* syn with cubic */ if (ca->last_max_cwnd <= cwnd) { @@ -283,6 +287,7 @@ static inline void bictcp_update(struct bictcp *ca, u32 cwnd) if (ca->last_max_cwnd == 0 && ca->cnt > 20) ca->cnt = 20; /* increase cwnd 5% per RTT */ +tcp_friendliness: /* TCP Friendly */ if (tcp_friendliness) { u32 scale = beta_scale; @@ -301,7 +306,6 @@ static inline void bictcp_update(struct bictcp *ca, u32 cwnd) } } - ca->cnt = (ca->cnt << ACK_RATIO_SHIFT) / ca->delayed_ack; if (ca->cnt == 0) /* cannot be zero */ ca->cnt = 1; } @@ -317,11 +321,12 @@ static void bictcp_cong_avoid(struct sock *sk, u32 ack, u32 acked) if (tp->snd_cwnd <= tp->snd_ssthresh) { if (hystart && after(ack, ca->end_seq)) bictcp_hystart_reset(sk); - tcp_slow_start(tp, acked); - } else { - bictcp_update(ca, tp->snd_cwnd); - tcp_cong_avoid_ai(tp, ca->cnt); + acked = tcp_slow_start(tp, acked); + if (!acked) + return; } + bictcp_update(ca, tp->snd_cwnd, acked); + tcp_cong_avoid_ai(tp, ca->cnt, acked); } static u32 bictcp_recalc_ssthresh(struct sock *sk) @@ -411,20 +416,10 @@ static void hystart_update(struct sock *sk, u32 delay) */ static void bictcp_acked(struct sock *sk, u32 cnt, s32 rtt_us) { - const struct inet_connection_sock *icsk = inet_csk(sk); const struct tcp_sock *tp = tcp_sk(sk); struct bictcp *ca = inet_csk_ca(sk); u32 delay; - if (icsk->icsk_ca_state == TCP_CA_Open) { - u32 ratio = ca->delayed_ack; - - ratio -= ca->delayed_ack >> ACK_RATIO_SHIFT; - ratio += cnt; - - ca->delayed_ack = clamp(ratio, 1U, ACK_RATIO_LIMIT); - } - /* Some calls are for duplicates without timetamps */ if (rtt_us < 0) return; diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index a3f72d7fc06c..d22f54482bab 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -683,7 +683,8 @@ static void tcp_v4_send_reset(struct sock *sk, struct sk_buff *skb) arg.bound_dev_if = sk->sk_bound_dev_if; arg.tos = ip_hdr(skb)->tos; - ip_send_unicast_reply(net, skb, &TCP_SKB_CB(skb)->header.h4.opt, + ip_send_unicast_reply(*this_cpu_ptr(net->ipv4.tcp_sk), + skb, &TCP_SKB_CB(skb)->header.h4.opt, ip_hdr(skb)->saddr, ip_hdr(skb)->daddr, &arg, arg.iov[0].iov_len); @@ -767,7 +768,8 @@ static void tcp_v4_send_ack(struct sk_buff *skb, u32 seq, u32 ack, if (oif) arg.bound_dev_if = oif; arg.tos = tos; - ip_send_unicast_reply(net, skb, &TCP_SKB_CB(skb)->header.h4.opt, + ip_send_unicast_reply(*this_cpu_ptr(net->ipv4.tcp_sk), + skb, &TCP_SKB_CB(skb)->header.h4.opt, ip_hdr(skb)->saddr, ip_hdr(skb)->daddr, &arg, arg.iov[0].iov_len); @@ -2428,14 +2430,39 @@ struct proto tcp_prot = { }; EXPORT_SYMBOL(tcp_prot); +static void __net_exit tcp_sk_exit(struct net *net) +{ + int cpu; + + for_each_possible_cpu(cpu) + inet_ctl_sock_destroy(*per_cpu_ptr(net->ipv4.tcp_sk, cpu)); + free_percpu(net->ipv4.tcp_sk); +} + static int __net_init tcp_sk_init(struct net *net) { + int res, cpu; + + net->ipv4.tcp_sk = alloc_percpu(struct sock *); + if (!net->ipv4.tcp_sk) + return -ENOMEM; + + for_each_possible_cpu(cpu) { + struct sock *sk; + + res = inet_ctl_sock_create(&sk, PF_INET, SOCK_RAW, + IPPROTO_TCP, net); + if (res) + goto fail; + *per_cpu_ptr(net->ipv4.tcp_sk, cpu) = sk; + } net->ipv4.sysctl_tcp_ecn = 2; return 0; -} -static void __net_exit tcp_sk_exit(struct net *net) -{ +fail: + tcp_sk_exit(net); + + return res; } static void __net_exit tcp_sk_exit_batch(struct list_head *net_exit_list) diff --git a/net/ipv4/tcp_scalable.c b/net/ipv4/tcp_scalable.c index 6824afb65d93..333bcb2415ff 100644 --- a/net/ipv4/tcp_scalable.c +++ b/net/ipv4/tcp_scalable.c @@ -25,7 +25,8 @@ static void tcp_scalable_cong_avoid(struct sock *sk, u32 ack, u32 acked) if (tp->snd_cwnd <= tp->snd_ssthresh) tcp_slow_start(tp, acked); else - tcp_cong_avoid_ai(tp, min(tp->snd_cwnd, TCP_SCALABLE_AI_CNT)); + tcp_cong_avoid_ai(tp, min(tp->snd_cwnd, TCP_SCALABLE_AI_CNT), + 1); } static u32 tcp_scalable_ssthresh(struct sock *sk) diff --git a/net/ipv4/tcp_veno.c b/net/ipv4/tcp_veno.c index a4d2d2d88dca..112151eeee45 100644 --- a/net/ipv4/tcp_veno.c +++ b/net/ipv4/tcp_veno.c @@ -159,7 +159,7 @@ static void tcp_veno_cong_avoid(struct sock *sk, u32 ack, u32 acked) /* In the "non-congestive state", increase cwnd * every rtt. */ - tcp_cong_avoid_ai(tp, tp->snd_cwnd); + tcp_cong_avoid_ai(tp, tp->snd_cwnd, 1); } else { /* In the "congestive state", increase cwnd * every other rtt. diff --git a/net/ipv4/tcp_yeah.c b/net/ipv4/tcp_yeah.c index cd7273218598..17d35662930d 100644 --- a/net/ipv4/tcp_yeah.c +++ b/net/ipv4/tcp_yeah.c @@ -92,7 +92,7 @@ static void tcp_yeah_cong_avoid(struct sock *sk, u32 ack, u32 acked) } else { /* Reno */ - tcp_cong_avoid_ai(tp, tp->snd_cwnd); + tcp_cong_avoid_ai(tp, tp->snd_cwnd, 1); } /* The key players are v_vegas.beg_snd_una and v_beg_snd_nxt. diff --git a/net/ipv4/udp_diag.c b/net/ipv4/udp_diag.c index 7927db0a9279..4a000f1dd757 100644 --- a/net/ipv4/udp_diag.c +++ b/net/ipv4/udp_diag.c @@ -99,11 +99,13 @@ static void udp_dump(struct udp_table *table, struct sk_buff *skb, struct netlin s_slot = cb->args[0]; num = s_num = cb->args[1]; - for (slot = s_slot; slot <= table->mask; num = s_num = 0, slot++) { + for (slot = s_slot; slot <= table->mask; s_num = 0, slot++) { struct sock *sk; struct hlist_nulls_node *node; struct udp_hslot *hslot = &table->hash[slot]; + num = 0; + if (hlist_nulls_empty(&hslot->head)) continue; diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c index b2d1838897c9..f1c6d5e98322 100644 --- a/net/ipv6/ip6_fib.c +++ b/net/ipv6/ip6_fib.c @@ -659,6 +659,29 @@ static int fib6_commit_metrics(struct dst_entry *dst, return 0; } +static void fib6_purge_rt(struct rt6_info *rt, struct fib6_node *fn, + struct net *net) +{ + if (atomic_read(&rt->rt6i_ref) != 1) { + /* This route is used as dummy address holder in some split + * nodes. It is not leaked, but it still holds other resources, + * which must be released in time. So, scan ascendant nodes + * and replace dummy references to this route with references + * to still alive ones. + */ + while (fn) { + if (!(fn->fn_flags & RTN_RTINFO) && fn->leaf == rt) { + fn->leaf = fib6_find_prefix(net, fn); + atomic_inc(&fn->leaf->rt6i_ref); + rt6_release(rt); + } + fn = fn->parent; + } + /* No more references are possible at this point. */ + BUG_ON(atomic_read(&rt->rt6i_ref) != 1); + } +} + /* * Insert routing information in a node. */ @@ -807,11 +830,12 @@ add: rt->dst.rt6_next = iter->dst.rt6_next; atomic_inc(&rt->rt6i_ref); inet6_rt_notify(RTM_NEWROUTE, rt, info); - rt6_release(iter); if (!(fn->fn_flags & RTN_RTINFO)) { info->nl_net->ipv6.rt6_stats->fib_route_nodes++; fn->fn_flags |= RTN_RTINFO; } + fib6_purge_rt(iter, fn, info->nl_net); + rt6_release(iter); } return 0; @@ -1322,24 +1346,7 @@ static void fib6_del_route(struct fib6_node *fn, struct rt6_info **rtp, fn = fib6_repair_tree(net, fn); } - if (atomic_read(&rt->rt6i_ref) != 1) { - /* This route is used as dummy address holder in some split - * nodes. It is not leaked, but it still holds other resources, - * which must be released in time. So, scan ascendant nodes - * and replace dummy references to this route with references - * to still alive ones. - */ - while (fn) { - if (!(fn->fn_flags & RTN_RTINFO) && fn->leaf == rt) { - fn->leaf = fib6_find_prefix(net, fn); - atomic_inc(&fn->leaf->rt6i_ref); - rt6_release(rt); - } - fn = fn->parent; - } - /* No more references are possible at this point. */ - BUG_ON(atomic_read(&rt->rt6i_ref) != 1); - } + fib6_purge_rt(rt, fn, net); inet6_rt_notify(RTM_DELROUTE, rt, info); rt6_release(rt); diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c index 13cda4c6313b..01ccc28a686f 100644 --- a/net/ipv6/ip6_gre.c +++ b/net/ipv6/ip6_gre.c @@ -417,7 +417,7 @@ static void ip6gre_err(struct sk_buff *skb, struct inet6_skb_parm *opt, if (code == ICMPV6_HDR_FIELD) teli = ip6_tnl_parse_tlv_enc_lim(skb, skb->data); - if (teli && teli == info - 2) { + if (teli && teli == be32_to_cpu(info) - 2) { tel = (struct ipv6_tlv_tnl_enc_lim *) &skb->data[teli]; if (tel->encap_limit == 0) { net_warn_ratelimited("%s: Too small encapsulation limit or routing loop in tunnel!\n", @@ -429,7 +429,7 @@ static void ip6gre_err(struct sk_buff *skb, struct inet6_skb_parm *opt, } break; case ICMPV6_PKT_TOOBIG: - mtu = info - offset; + mtu = be32_to_cpu(info) - offset; if (mtu < IPV6_MIN_MTU) mtu = IPV6_MIN_MTU; t->dev->mtu = mtu; diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c index ce69a12ae48c..d28f2a2efb32 100644 --- a/net/ipv6/ip6_output.c +++ b/net/ipv6/ip6_output.c @@ -537,20 +537,6 @@ static void ip6_copy_metadata(struct sk_buff *to, struct sk_buff *from) skb_copy_secmark(to, from); } -static void ipv6_select_ident(struct frag_hdr *fhdr, struct rt6_info *rt) -{ - static u32 ip6_idents_hashrnd __read_mostly; - u32 hash, id; - - net_get_random_once(&ip6_idents_hashrnd, sizeof(ip6_idents_hashrnd)); - - hash = __ipv6_addr_jhash(&rt->rt6i_dst.addr, ip6_idents_hashrnd); - hash = __ipv6_addr_jhash(&rt->rt6i_src.addr, hash); - - id = ip_idents_reserve(hash, 1); - fhdr->identification = htonl(id); -} - int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *)) { struct sk_buff *frag; diff --git a/net/ipv6/output_core.c b/net/ipv6/output_core.c index 97f41a3e68d9..54520a0bd5e3 100644 --- a/net/ipv6/output_core.c +++ b/net/ipv6/output_core.c @@ -9,6 +9,24 @@ #include <net/addrconf.h> #include <net/secure_seq.h> +u32 __ipv6_select_ident(u32 hashrnd, struct in6_addr *dst, struct in6_addr *src) +{ + u32 hash, id; + + hash = __ipv6_addr_jhash(dst, hashrnd); + hash = __ipv6_addr_jhash(src, hash); + + /* Treat id of 0 as unset and if we get 0 back from ip_idents_reserve, + * set the hight order instead thus minimizing possible future + * collisions. + */ + id = ip_idents_reserve(hash, 1); + if (unlikely(!id)) + id = 1 << 31; + + return id; +} + /* This function exists only for tap drivers that must support broken * clients requesting UFO without specifying an IPv6 fragment ID. * @@ -22,7 +40,7 @@ void ipv6_proxy_select_ident(struct sk_buff *skb) static u32 ip6_proxy_idents_hashrnd __read_mostly; struct in6_addr buf[2]; struct in6_addr *addrs; - u32 hash, id; + u32 id; addrs = skb_header_pointer(skb, skb_network_offset(skb) + @@ -34,14 +52,25 @@ void ipv6_proxy_select_ident(struct sk_buff *skb) net_get_random_once(&ip6_proxy_idents_hashrnd, sizeof(ip6_proxy_idents_hashrnd)); - hash = __ipv6_addr_jhash(&addrs[1], ip6_proxy_idents_hashrnd); - hash = __ipv6_addr_jhash(&addrs[0], hash); - - id = ip_idents_reserve(hash, 1); - skb_shinfo(skb)->ip6_frag_id = htonl(id); + id = __ipv6_select_ident(ip6_proxy_idents_hashrnd, + &addrs[1], &addrs[0]); + skb_shinfo(skb)->ip6_frag_id = id; } EXPORT_SYMBOL_GPL(ipv6_proxy_select_ident); +void ipv6_select_ident(struct frag_hdr *fhdr, struct rt6_info *rt) +{ + static u32 ip6_idents_hashrnd __read_mostly; + u32 id; + + net_get_random_once(&ip6_idents_hashrnd, sizeof(ip6_idents_hashrnd)); + + id = __ipv6_select_ident(ip6_idents_hashrnd, &rt->rt6i_dst.addr, + &rt->rt6i_src.addr); + fhdr->identification = htonl(id); +} +EXPORT_SYMBOL(ipv6_select_ident); + int ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr) { u16 offset = sizeof(struct ipv6hdr); diff --git a/net/ipv6/route.c b/net/ipv6/route.c index 166e33bed222..495965358d22 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c @@ -1242,12 +1242,16 @@ restart: rt = net->ipv6.ip6_null_entry; else if (rt->dst.error) { rt = net->ipv6.ip6_null_entry; - } else if (rt == net->ipv6.ip6_null_entry) { + goto out; + } + + if (rt == net->ipv6.ip6_null_entry) { fn = fib6_backtrack(fn, &fl6->saddr); if (fn) goto restart; } +out: dst_hold(&rt->dst); read_unlock_bh(&table->tb6_lock); diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c index 213546bd6d5d..cdbfe5af6187 100644 --- a/net/ipv6/sit.c +++ b/net/ipv6/sit.c @@ -1506,12 +1506,12 @@ static bool ipip6_netlink_encap_parms(struct nlattr *data[], if (data[IFLA_IPTUN_ENCAP_SPORT]) { ret = true; - ipencap->sport = nla_get_u16(data[IFLA_IPTUN_ENCAP_SPORT]); + ipencap->sport = nla_get_be16(data[IFLA_IPTUN_ENCAP_SPORT]); } if (data[IFLA_IPTUN_ENCAP_DPORT]) { ret = true; - ipencap->dport = nla_get_u16(data[IFLA_IPTUN_ENCAP_DPORT]); + ipencap->dport = nla_get_be16(data[IFLA_IPTUN_ENCAP_DPORT]); } return ret; @@ -1707,9 +1707,9 @@ static int ipip6_fill_info(struct sk_buff *skb, const struct net_device *dev) if (nla_put_u16(skb, IFLA_IPTUN_ENCAP_TYPE, tunnel->encap.type) || - nla_put_u16(skb, IFLA_IPTUN_ENCAP_SPORT, + nla_put_be16(skb, IFLA_IPTUN_ENCAP_SPORT, tunnel->encap.sport) || - nla_put_u16(skb, IFLA_IPTUN_ENCAP_DPORT, + nla_put_be16(skb, IFLA_IPTUN_ENCAP_DPORT, tunnel->encap.dport) || nla_put_u16(skb, IFLA_IPTUN_ENCAP_FLAGS, tunnel->encap.flags)) diff --git a/net/ipv6/udp_offload.c b/net/ipv6/udp_offload.c index b6aa8ed18257..a56276996b72 100644 --- a/net/ipv6/udp_offload.c +++ b/net/ipv6/udp_offload.c @@ -52,6 +52,10 @@ static struct sk_buff *udp6_ufo_fragment(struct sk_buff *skb, skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(skb->len, mss); + /* Set the IPv6 fragment id if not set yet */ + if (!skb_shinfo(skb)->ip6_frag_id) + ipv6_proxy_select_ident(skb); + segs = NULL; goto out; } @@ -108,7 +112,11 @@ static struct sk_buff *udp6_ufo_fragment(struct sk_buff *skb, fptr = (struct frag_hdr *)(skb_network_header(skb) + unfrag_ip6hlen); fptr->nexthdr = nexthdr; fptr->reserved = 0; - fptr->identification = skb_shinfo(skb)->ip6_frag_id; + if (skb_shinfo(skb)->ip6_frag_id) + fptr->identification = skb_shinfo(skb)->ip6_frag_id; + else + ipv6_select_ident(fptr, + (struct rt6_info *)skb_dst(skb)); /* Fragment the skb. ipv6 header and the remaining fields of the * fragment header are updated in ipv6_gso_segment() diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c index 5f983644373a..48bf5a06847b 100644 --- a/net/ipv6/xfrm6_policy.c +++ b/net/ipv6/xfrm6_policy.c @@ -130,12 +130,18 @@ _decode_session6(struct sk_buff *skb, struct flowi *fl, int reverse) { struct flowi6 *fl6 = &fl->u.ip6; int onlyproto = 0; - u16 offset = skb_network_header_len(skb); const struct ipv6hdr *hdr = ipv6_hdr(skb); + u16 offset = sizeof(*hdr); struct ipv6_opt_hdr *exthdr; const unsigned char *nh = skb_network_header(skb); - u8 nexthdr = nh[IP6CB(skb)->nhoff]; + u16 nhoff = IP6CB(skb)->nhoff; int oif = 0; + u8 nexthdr; + + if (!nhoff) + nhoff = offsetof(struct ipv6hdr, nexthdr); + + nexthdr = nh[nhoff]; if (skb_dst(skb)) oif = skb_dst(skb)->dev->ifindex; diff --git a/net/llc/sysctl_net_llc.c b/net/llc/sysctl_net_llc.c index 612a5ddaf93b..799bafc2af39 100644 --- a/net/llc/sysctl_net_llc.c +++ b/net/llc/sysctl_net_llc.c @@ -18,28 +18,28 @@ static struct ctl_table llc2_timeout_table[] = { { .procname = "ack", .data = &sysctl_llc2_ack_timeout, - .maxlen = sizeof(long), + .maxlen = sizeof(sysctl_llc2_ack_timeout), .mode = 0644, .proc_handler = proc_dointvec_jiffies, }, { .procname = "busy", .data = &sysctl_llc2_busy_timeout, - .maxlen = sizeof(long), + .maxlen = sizeof(sysctl_llc2_busy_timeout), .mode = 0644, .proc_handler = proc_dointvec_jiffies, }, { .procname = "p", .data = &sysctl_llc2_p_timeout, - .maxlen = sizeof(long), + .maxlen = sizeof(sysctl_llc2_p_timeout), .mode = 0644, .proc_handler = proc_dointvec_jiffies, }, { .procname = "rej", .data = &sysctl_llc2_rej_timeout, - .maxlen = sizeof(long), + .maxlen = sizeof(sysctl_llc2_rej_timeout), .mode = 0644, .proc_handler = proc_dointvec_jiffies, }, diff --git a/net/mac80211/pm.c b/net/mac80211/pm.c index 4c5192e0d66c..4a95fe3cffbc 100644 --- a/net/mac80211/pm.c +++ b/net/mac80211/pm.c @@ -86,20 +86,6 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan) } } - /* tear down aggregation sessions and remove STAs */ - mutex_lock(&local->sta_mtx); - list_for_each_entry(sta, &local->sta_list, list) { - if (sta->uploaded) { - enum ieee80211_sta_state state; - - state = sta->sta_state; - for (; state > IEEE80211_STA_NOTEXIST; state--) - WARN_ON(drv_sta_state(local, sta->sdata, sta, - state, state - 1)); - } - } - mutex_unlock(&local->sta_mtx); - /* remove all interfaces that were created in the driver */ list_for_each_entry(sdata, &local->interfaces, list) { if (!ieee80211_sdata_running(sdata)) @@ -111,6 +97,21 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan) case NL80211_IFTYPE_STATION: ieee80211_mgd_quiesce(sdata); break; + case NL80211_IFTYPE_WDS: + /* tear down aggregation sessions and remove STAs */ + mutex_lock(&local->sta_mtx); + sta = sdata->u.wds.sta; + if (sta && sta->uploaded) { + enum ieee80211_sta_state state; + + state = sta->sta_state; + for (; state > IEEE80211_STA_NOTEXIST; state--) + WARN_ON(drv_sta_state(local, sta->sdata, + sta, state, + state - 1)); + } + mutex_unlock(&local->sta_mtx); + break; default: break; } diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c index 683b10f46505..d69ca513848e 100644 --- a/net/mac80211/rx.c +++ b/net/mac80211/rx.c @@ -272,7 +272,7 @@ ieee80211_add_rx_radiotap_header(struct ieee80211_local *local, else if (rate && rate->flags & IEEE80211_RATE_ERP_G) channel_flags |= IEEE80211_CHAN_OFDM | IEEE80211_CHAN_2GHZ; else if (rate) - channel_flags |= IEEE80211_CHAN_OFDM | IEEE80211_CHAN_2GHZ; + channel_flags |= IEEE80211_CHAN_CCK | IEEE80211_CHAN_2GHZ; else channel_flags |= IEEE80211_CHAN_2GHZ; put_unaligned_le16(channel_flags, pos); diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c index 990decba1fe4..b87ca32efa0b 100644 --- a/net/netfilter/ipvs/ip_vs_core.c +++ b/net/netfilter/ipvs/ip_vs_core.c @@ -659,16 +659,24 @@ static inline int ip_vs_gather_frags(struct sk_buff *skb, u_int32_t user) return err; } -static int ip_vs_route_me_harder(int af, struct sk_buff *skb) +static int ip_vs_route_me_harder(int af, struct sk_buff *skb, + unsigned int hooknum) { + if (!sysctl_snat_reroute(skb)) + return 0; + /* Reroute replies only to remote clients (FORWARD and LOCAL_OUT) */ + if (NF_INET_LOCAL_IN == hooknum) + return 0; #ifdef CONFIG_IP_VS_IPV6 if (af == AF_INET6) { - if (sysctl_snat_reroute(skb) && ip6_route_me_harder(skb) != 0) + struct dst_entry *dst = skb_dst(skb); + + if (dst->dev && !(dst->dev->flags & IFF_LOOPBACK) && + ip6_route_me_harder(skb) != 0) return 1; } else #endif - if ((sysctl_snat_reroute(skb) || - skb_rtable(skb)->rt_flags & RTCF_LOCAL) && + if (!(skb_rtable(skb)->rt_flags & RTCF_LOCAL) && ip_route_me_harder(skb, RTN_LOCAL) != 0) return 1; @@ -791,7 +799,8 @@ static int handle_response_icmp(int af, struct sk_buff *skb, union nf_inet_addr *snet, __u8 protocol, struct ip_vs_conn *cp, struct ip_vs_protocol *pp, - unsigned int offset, unsigned int ihl) + unsigned int offset, unsigned int ihl, + unsigned int hooknum) { unsigned int verdict = NF_DROP; @@ -821,7 +830,7 @@ static int handle_response_icmp(int af, struct sk_buff *skb, #endif ip_vs_nat_icmp(skb, pp, cp, 1); - if (ip_vs_route_me_harder(af, skb)) + if (ip_vs_route_me_harder(af, skb, hooknum)) goto out; /* do the statistics and put it back */ @@ -916,7 +925,7 @@ static int ip_vs_out_icmp(struct sk_buff *skb, int *related, snet.ip = iph->saddr; return handle_response_icmp(AF_INET, skb, &snet, cih->protocol, cp, - pp, ciph.len, ihl); + pp, ciph.len, ihl, hooknum); } #ifdef CONFIG_IP_VS_IPV6 @@ -981,7 +990,8 @@ static int ip_vs_out_icmp_v6(struct sk_buff *skb, int *related, snet.in6 = ciph.saddr.in6; writable = ciph.len; return handle_response_icmp(AF_INET6, skb, &snet, ciph.protocol, cp, - pp, writable, sizeof(struct ipv6hdr)); + pp, writable, sizeof(struct ipv6hdr), + hooknum); } #endif @@ -1040,7 +1050,8 @@ static inline bool is_new_conn(const struct sk_buff *skb, */ static unsigned int handle_response(int af, struct sk_buff *skb, struct ip_vs_proto_data *pd, - struct ip_vs_conn *cp, struct ip_vs_iphdr *iph) + struct ip_vs_conn *cp, struct ip_vs_iphdr *iph, + unsigned int hooknum) { struct ip_vs_protocol *pp = pd->pp; @@ -1078,7 +1089,7 @@ handle_response(int af, struct sk_buff *skb, struct ip_vs_proto_data *pd, * if it came from this machine itself. So re-compute * the routing information. */ - if (ip_vs_route_me_harder(af, skb)) + if (ip_vs_route_me_harder(af, skb, hooknum)) goto drop; IP_VS_DBG_PKT(10, af, pp, skb, 0, "After SNAT"); @@ -1181,7 +1192,7 @@ ip_vs_out(unsigned int hooknum, struct sk_buff *skb, int af) cp = pp->conn_out_get(af, skb, &iph, 0); if (likely(cp)) - return handle_response(af, skb, pd, cp, &iph); + return handle_response(af, skb, pd, cp, &iph, hooknum); if (sysctl_nat_icmp_send(net) && (pp->protocol == IPPROTO_TCP || pp->protocol == IPPROTO_UDP || diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c index 3b3ddb4fb9ee..1ff04bcd4871 100644 --- a/net/netfilter/nf_tables_api.c +++ b/net/netfilter/nf_tables_api.c @@ -1134,9 +1134,11 @@ static struct nft_stats __percpu *nft_stats_alloc(const struct nlattr *attr) /* Restore old counters on this cpu, no problem. Per-cpu statistics * are not exposed to userspace. */ + preempt_disable(); stats = this_cpu_ptr(newstats); stats->bytes = be64_to_cpu(nla_get_be64(tb[NFTA_COUNTER_BYTES])); stats->pkts = be64_to_cpu(nla_get_be64(tb[NFTA_COUNTER_PACKETS])); + preempt_enable(); return newstats; } @@ -1262,8 +1264,10 @@ static int nf_tables_newchain(struct sock *nlsk, struct sk_buff *skb, nft_ctx_init(&ctx, skb, nlh, afi, table, chain, nla); trans = nft_trans_alloc(&ctx, NFT_MSG_NEWCHAIN, sizeof(struct nft_trans_chain)); - if (trans == NULL) + if (trans == NULL) { + free_percpu(stats); return -ENOMEM; + } nft_trans_chain_stats(trans) = stats; nft_trans_chain_update(trans) = true; @@ -1319,8 +1323,10 @@ static int nf_tables_newchain(struct sock *nlsk, struct sk_buff *skb, hookfn = type->hooks[hooknum]; basechain = kzalloc(sizeof(*basechain), GFP_KERNEL); - if (basechain == NULL) + if (basechain == NULL) { + module_put(type->owner); return -ENOMEM; + } if (nla[NFTA_CHAIN_COUNTERS]) { stats = nft_stats_alloc(nla[NFTA_CHAIN_COUNTERS]); @@ -3753,6 +3759,24 @@ int nft_chain_validate_dependency(const struct nft_chain *chain, } EXPORT_SYMBOL_GPL(nft_chain_validate_dependency); +int nft_chain_validate_hooks(const struct nft_chain *chain, + unsigned int hook_flags) +{ + struct nft_base_chain *basechain; + + if (chain->flags & NFT_BASE_CHAIN) { + basechain = nft_base_chain(chain); + + if ((1 << basechain->ops[0].hooknum) & hook_flags) + return 0; + + return -EOPNOTSUPP; + } + + return 0; +} +EXPORT_SYMBOL_GPL(nft_chain_validate_hooks); + /* * Loop detection - walk through the ruleset beginning at the destination chain * of a new jump until either the source chain is reached (loop) or all diff --git a/net/netfilter/nft_masq.c b/net/netfilter/nft_masq.c index d1ffd5eb3a9b..9aea747b43ea 100644 --- a/net/netfilter/nft_masq.c +++ b/net/netfilter/nft_masq.c @@ -21,6 +21,21 @@ const struct nla_policy nft_masq_policy[NFTA_MASQ_MAX + 1] = { }; EXPORT_SYMBOL_GPL(nft_masq_policy); +int nft_masq_validate(const struct nft_ctx *ctx, + const struct nft_expr *expr, + const struct nft_data **data) +{ + int err; + + err = nft_chain_validate_dependency(ctx->chain, NFT_CHAIN_T_NAT); + if (err < 0) + return err; + + return nft_chain_validate_hooks(ctx->chain, + (1 << NF_INET_POST_ROUTING)); +} +EXPORT_SYMBOL_GPL(nft_masq_validate); + int nft_masq_init(const struct nft_ctx *ctx, const struct nft_expr *expr, const struct nlattr * const tb[]) @@ -28,8 +43,8 @@ int nft_masq_init(const struct nft_ctx *ctx, struct nft_masq *priv = nft_expr_priv(expr); int err; - err = nft_chain_validate_dependency(ctx->chain, NFT_CHAIN_T_NAT); - if (err < 0) + err = nft_masq_validate(ctx, expr, NULL); + if (err) return err; if (tb[NFTA_MASQ_FLAGS] == NULL) @@ -60,12 +75,5 @@ nla_put_failure: } EXPORT_SYMBOL_GPL(nft_masq_dump); -int nft_masq_validate(const struct nft_ctx *ctx, const struct nft_expr *expr, - const struct nft_data **data) -{ - return nft_chain_validate_dependency(ctx->chain, NFT_CHAIN_T_NAT); -} -EXPORT_SYMBOL_GPL(nft_masq_validate); - MODULE_LICENSE("GPL"); MODULE_AUTHOR("Arturo Borrero Gonzalez <arturo.borrero.glez@gmail.com>"); diff --git a/net/netfilter/nft_nat.c b/net/netfilter/nft_nat.c index aff54fb1c8a0..a0837c6c9283 100644 --- a/net/netfilter/nft_nat.c +++ b/net/netfilter/nft_nat.c @@ -88,17 +88,40 @@ static const struct nla_policy nft_nat_policy[NFTA_NAT_MAX + 1] = { [NFTA_NAT_FLAGS] = { .type = NLA_U32 }, }; -static int nft_nat_init(const struct nft_ctx *ctx, const struct nft_expr *expr, - const struct nlattr * const tb[]) +static int nft_nat_validate(const struct nft_ctx *ctx, + const struct nft_expr *expr, + const struct nft_data **data) { struct nft_nat *priv = nft_expr_priv(expr); - u32 family; int err; err = nft_chain_validate_dependency(ctx->chain, NFT_CHAIN_T_NAT); if (err < 0) return err; + switch (priv->type) { + case NFT_NAT_SNAT: + err = nft_chain_validate_hooks(ctx->chain, + (1 << NF_INET_POST_ROUTING) | + (1 << NF_INET_LOCAL_IN)); + break; + case NFT_NAT_DNAT: + err = nft_chain_validate_hooks(ctx->chain, + (1 << NF_INET_PRE_ROUTING) | + (1 << NF_INET_LOCAL_OUT)); + break; + } + + return err; +} + +static int nft_nat_init(const struct nft_ctx *ctx, const struct nft_expr *expr, + const struct nlattr * const tb[]) +{ + struct nft_nat *priv = nft_expr_priv(expr); + u32 family; + int err; + if (tb[NFTA_NAT_TYPE] == NULL || (tb[NFTA_NAT_REG_ADDR_MIN] == NULL && tb[NFTA_NAT_REG_PROTO_MIN] == NULL)) @@ -115,6 +138,10 @@ static int nft_nat_init(const struct nft_ctx *ctx, const struct nft_expr *expr, return -EINVAL; } + err = nft_nat_validate(ctx, expr, NULL); + if (err < 0) + return err; + if (tb[NFTA_NAT_FAMILY] == NULL) return -EINVAL; @@ -219,13 +246,6 @@ nla_put_failure: return -1; } -static int nft_nat_validate(const struct nft_ctx *ctx, - const struct nft_expr *expr, - const struct nft_data **data) -{ - return nft_chain_validate_dependency(ctx->chain, NFT_CHAIN_T_NAT); -} - static struct nft_expr_type nft_nat_type; static const struct nft_expr_ops nft_nat_ops = { .type = &nft_nat_type, diff --git a/net/netfilter/nft_redir.c b/net/netfilter/nft_redir.c index 9e8093f28311..d7e9e93a4e90 100644 --- a/net/netfilter/nft_redir.c +++ b/net/netfilter/nft_redir.c @@ -23,6 +23,22 @@ const struct nla_policy nft_redir_policy[NFTA_REDIR_MAX + 1] = { }; EXPORT_SYMBOL_GPL(nft_redir_policy); +int nft_redir_validate(const struct nft_ctx *ctx, + const struct nft_expr *expr, + const struct nft_data **data) +{ + int err; + + err = nft_chain_validate_dependency(ctx->chain, NFT_CHAIN_T_NAT); + if (err < 0) + return err; + + return nft_chain_validate_hooks(ctx->chain, + (1 << NF_INET_PRE_ROUTING) | + (1 << NF_INET_LOCAL_OUT)); +} +EXPORT_SYMBOL_GPL(nft_redir_validate); + int nft_redir_init(const struct nft_ctx *ctx, const struct nft_expr *expr, const struct nlattr * const tb[]) @@ -30,7 +46,7 @@ int nft_redir_init(const struct nft_ctx *ctx, struct nft_redir *priv = nft_expr_priv(expr); int err; - err = nft_chain_validate_dependency(ctx->chain, NFT_CHAIN_T_NAT); + err = nft_redir_validate(ctx, expr, NULL); if (err < 0) return err; @@ -88,12 +104,5 @@ nla_put_failure: } EXPORT_SYMBOL_GPL(nft_redir_dump); -int nft_redir_validate(const struct nft_ctx *ctx, const struct nft_expr *expr, - const struct nft_data **data) -{ - return nft_chain_validate_dependency(ctx->chain, NFT_CHAIN_T_NAT); -} -EXPORT_SYMBOL_GPL(nft_redir_validate); - MODULE_LICENSE("GPL"); MODULE_AUTHOR("Arturo Borrero Gonzalez <arturo.borrero.glez@gmail.com>"); diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c index 02fdde28dada..75532efa51cd 100644 --- a/net/netlink/af_netlink.c +++ b/net/netlink/af_netlink.c @@ -1438,7 +1438,7 @@ static void netlink_undo_bind(int group, long unsigned int groups, for (undo = 0; undo < group; undo++) if (test_bit(undo, &groups)) - nlk->netlink_unbind(sock_net(sk), undo); + nlk->netlink_unbind(sock_net(sk), undo + 1); } static int netlink_bind(struct socket *sock, struct sockaddr *addr, @@ -1476,7 +1476,7 @@ static int netlink_bind(struct socket *sock, struct sockaddr *addr, for (group = 0; group < nlk->ngroups; group++) { if (!test_bit(group, &groups)) continue; - err = nlk->netlink_bind(net, group); + err = nlk->netlink_bind(net, group + 1); if (!err) continue; netlink_undo_bind(group, groups, sk); diff --git a/net/rds/sysctl.c b/net/rds/sysctl.c index c3b0cd43eb56..c173f69e1479 100644 --- a/net/rds/sysctl.c +++ b/net/rds/sysctl.c @@ -71,14 +71,14 @@ static struct ctl_table rds_sysctl_rds_table[] = { { .procname = "max_unacked_packets", .data = &rds_sysctl_max_unacked_packets, - .maxlen = sizeof(unsigned long), + .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec, }, { .procname = "max_unacked_bytes", .data = &rds_sysctl_max_unacked_bytes, - .maxlen = sizeof(unsigned long), + .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec, }, diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c index aad6a679fb13..baef987fe2c0 100644 --- a/net/sched/cls_api.c +++ b/net/sched/cls_api.c @@ -556,8 +556,9 @@ void tcf_exts_change(struct tcf_proto *tp, struct tcf_exts *dst, } EXPORT_SYMBOL(tcf_exts_change); -#define tcf_exts_first_act(ext) \ - list_first_entry(&(exts)->actions, struct tc_action, list) +#define tcf_exts_first_act(ext) \ + list_first_entry_or_null(&(exts)->actions, \ + struct tc_action, list) int tcf_exts_dump(struct sk_buff *skb, struct tcf_exts *exts) { @@ -603,7 +604,7 @@ int tcf_exts_dump_stats(struct sk_buff *skb, struct tcf_exts *exts) { #ifdef CONFIG_NET_CLS_ACT struct tc_action *a = tcf_exts_first_act(exts); - if (tcf_action_copy_stats(skb, a, 1) < 0) + if (a != NULL && tcf_action_copy_stats(skb, a, 1) < 0) return -1; #endif return 0; diff --git a/net/sched/cls_bpf.c b/net/sched/cls_bpf.c index 84c8219c3e1c..f59adf8a4cd7 100644 --- a/net/sched/cls_bpf.c +++ b/net/sched/cls_bpf.c @@ -180,6 +180,11 @@ static int cls_bpf_modify_existing(struct net *net, struct tcf_proto *tp, } bpf_size = bpf_len * sizeof(*bpf_ops); + if (bpf_size != nla_len(tb[TCA_BPF_OPS])) { + ret = -EINVAL; + goto errout; + } + bpf_ops = kzalloc(bpf_size, GFP_KERNEL); if (bpf_ops == NULL) { ret = -ENOMEM; @@ -215,15 +220,21 @@ static u32 cls_bpf_grab_new_handle(struct tcf_proto *tp, struct cls_bpf_head *head) { unsigned int i = 0x80000000; + u32 handle; do { if (++head->hgen == 0x7FFFFFFF) head->hgen = 1; } while (--i > 0 && cls_bpf_get(tp, head->hgen)); - if (i == 0) + + if (unlikely(i == 0)) { pr_err("Insufficient number of handles\n"); + handle = 0; + } else { + handle = head->hgen; + } - return i; + return handle; } static int cls_bpf_change(struct net *net, struct sk_buff *in_skb, diff --git a/net/sched/sch_fq.c b/net/sched/sch_fq.c index 9b05924cc386..333cd94ba381 100644 --- a/net/sched/sch_fq.c +++ b/net/sched/sch_fq.c @@ -670,8 +670,14 @@ static int fq_change(struct Qdisc *sch, struct nlattr *opt) if (tb[TCA_FQ_FLOW_PLIMIT]) q->flow_plimit = nla_get_u32(tb[TCA_FQ_FLOW_PLIMIT]); - if (tb[TCA_FQ_QUANTUM]) - q->quantum = nla_get_u32(tb[TCA_FQ_QUANTUM]); + if (tb[TCA_FQ_QUANTUM]) { + u32 quantum = nla_get_u32(tb[TCA_FQ_QUANTUM]); + + if (quantum > 0) + q->quantum = quantum; + else + err = -EINVAL; + } if (tb[TCA_FQ_INITIAL_QUANTUM]) q->initial_quantum = nla_get_u32(tb[TCA_FQ_INITIAL_QUANTUM]); diff --git a/net/sctp/associola.c b/net/sctp/associola.c index f791edd64d6c..26d06dbcc1c8 100644 --- a/net/sctp/associola.c +++ b/net/sctp/associola.c @@ -1182,7 +1182,6 @@ void sctp_assoc_update(struct sctp_association *asoc, asoc->peer.peer_hmacs = new->peer.peer_hmacs; new->peer.peer_hmacs = NULL; - sctp_auth_key_put(asoc->asoc_shared_key); sctp_auth_asoc_init_active_key(asoc, GFP_ATOMIC); } diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c index e49e231cef52..06320c8c1c86 100644 --- a/net/sctp/sm_make_chunk.c +++ b/net/sctp/sm_make_chunk.c @@ -2608,7 +2608,7 @@ do_addr_param: addr_param = param.v + sizeof(sctp_addip_param_t); - af = sctp_get_af_specific(param_type2af(param.p->type)); + af = sctp_get_af_specific(param_type2af(addr_param->p.type)); if (af == NULL) break; diff --git a/net/socket.c b/net/socket.c index a2c33a4dc7ba..418795caa897 100644 --- a/net/socket.c +++ b/net/socket.c @@ -869,9 +869,6 @@ static ssize_t sock_splice_read(struct file *file, loff_t *ppos, static struct sock_iocb *alloc_sock_iocb(struct kiocb *iocb, struct sock_iocb *siocb) { - if (!is_sync_kiocb(iocb)) - BUG(); - siocb->kiocb = iocb; iocb->private = siocb; return siocb; diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index 7ca4b5133123..8887c6e5fca8 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c @@ -2854,6 +2854,9 @@ static int nl80211_get_key(struct sk_buff *skb, struct genl_info *info) if (!rdev->ops->get_key) return -EOPNOTSUPP; + if (!pairwise && mac_addr && !(rdev->wiphy.flags & WIPHY_FLAG_IBSS_RSN)) + return -ENOENT; + msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!msg) return -ENOMEM; @@ -2873,10 +2876,6 @@ static int nl80211_get_key(struct sk_buff *skb, struct genl_info *info) nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, mac_addr)) goto nla_put_failure; - if (pairwise && mac_addr && - !(rdev->wiphy.flags & WIPHY_FLAG_IBSS_RSN)) - return -ENOENT; - err = rdev_get_key(rdev, dev, key_idx, pairwise, mac_addr, &cookie, get_key_callback); @@ -3047,7 +3046,7 @@ static int nl80211_del_key(struct sk_buff *skb, struct genl_info *info) wdev_lock(dev->ieee80211_ptr); err = nl80211_key_allowed(dev->ieee80211_ptr); - if (key.type == NL80211_KEYTYPE_PAIRWISE && mac_addr && + if (key.type == NL80211_KEYTYPE_GROUP && mac_addr && !(rdev->wiphy.flags & WIPHY_FLAG_IBSS_RSN)) err = -ENOENT; diff --git a/net/wireless/util.c b/net/wireless/util.c index d0ac795445b7..5488c3662f7d 100644 --- a/net/wireless/util.c +++ b/net/wireless/util.c @@ -308,6 +308,12 @@ unsigned int __attribute_const__ ieee80211_hdrlen(__le16 fc) goto out; } + if (ieee80211_is_mgmt(fc)) { + if (ieee80211_has_order(fc)) + hdrlen += IEEE80211_HT_CTL_LEN; + goto out; + } + if (ieee80211_is_ctl(fc)) { /* * ACK and CTS are 10 bytes, all others 16. To see how diff --git a/samples/bpf/test_maps.c b/samples/bpf/test_maps.c index e286b42307f3..6299ee95cd11 100644 --- a/samples/bpf/test_maps.c +++ b/samples/bpf/test_maps.c @@ -69,9 +69,9 @@ static void test_hashmap_sanity(int i, void *data) /* iterate over two elements */ assert(bpf_get_next_key(map_fd, &key, &next_key) == 0 && - next_key == 2); + (next_key == 1 || next_key == 2)); assert(bpf_get_next_key(map_fd, &next_key, &next_key) == 0 && - next_key == 1); + (next_key == 1 || next_key == 2)); assert(bpf_get_next_key(map_fd, &next_key, &next_key) == -1 && errno == ENOENT); diff --git a/security/tomoyo/Kconfig b/security/tomoyo/Kconfig index 8eb779b9d77f..604e718d68d3 100644 --- a/security/tomoyo/Kconfig +++ b/security/tomoyo/Kconfig @@ -5,6 +5,7 @@ config SECURITY_TOMOYO select SECURITYFS select SECURITY_PATH select SECURITY_NETWORK + select SRCU default n help This selects TOMOYO Linux, pathname-based access control. diff --git a/sound/core/seq/seq_dummy.c b/sound/core/seq/seq_dummy.c index ec667f158f19..5d905d90d504 100644 --- a/sound/core/seq/seq_dummy.c +++ b/sound/core/seq/seq_dummy.c @@ -82,36 +82,6 @@ struct snd_seq_dummy_port { static int my_client = -1; /* - * unuse callback - send ALL_SOUNDS_OFF and RESET_CONTROLLERS events - * to subscribers. - * Note: this callback is called only after all subscribers are removed. - */ -static int -dummy_unuse(void *private_data, struct snd_seq_port_subscribe *info) -{ - struct snd_seq_dummy_port *p; - int i; - struct snd_seq_event ev; - - p = private_data; - memset(&ev, 0, sizeof(ev)); - if (p->duplex) - ev.source.port = p->connect; - else - ev.source.port = p->port; - ev.dest.client = SNDRV_SEQ_ADDRESS_SUBSCRIBERS; - ev.type = SNDRV_SEQ_EVENT_CONTROLLER; - for (i = 0; i < 16; i++) { - ev.data.control.channel = i; - ev.data.control.param = MIDI_CTL_ALL_SOUNDS_OFF; - snd_seq_kernel_client_dispatch(p->client, &ev, 0, 0); - ev.data.control.param = MIDI_CTL_RESET_CONTROLLERS; - snd_seq_kernel_client_dispatch(p->client, &ev, 0, 0); - } - return 0; -} - -/* * event input callback - just redirect events to subscribers */ static int @@ -175,7 +145,6 @@ create_port(int idx, int type) | SNDRV_SEQ_PORT_TYPE_PORT; memset(&pcb, 0, sizeof(pcb)); pcb.owner = THIS_MODULE; - pcb.unuse = dummy_unuse; pcb.event_input = dummy_input; pcb.private_free = dummy_free; pcb.private_data = rec; diff --git a/sound/i2c/other/ak4113.c b/sound/i2c/other/ak4113.c index 1a3a6fa27158..c6bba99a90b2 100644 --- a/sound/i2c/other/ak4113.c +++ b/sound/i2c/other/ak4113.c @@ -56,8 +56,7 @@ static inline unsigned char reg_read(struct ak4113 *ak4113, unsigned char reg) static void snd_ak4113_free(struct ak4113 *chip) { - chip->init = 1; /* don't schedule new work */ - mb(); + atomic_inc(&chip->wq_processing); /* don't schedule new work */ cancel_delayed_work_sync(&chip->work); kfree(chip); } @@ -89,6 +88,7 @@ int snd_ak4113_create(struct snd_card *card, ak4113_read_t *read, chip->write = write; chip->private_data = private_data; INIT_DELAYED_WORK(&chip->work, ak4113_stats); + atomic_set(&chip->wq_processing, 0); for (reg = 0; reg < AK4113_WRITABLE_REGS ; reg++) chip->regmap[reg] = pgm[reg]; @@ -139,13 +139,11 @@ static void ak4113_init_regs(struct ak4113 *chip) void snd_ak4113_reinit(struct ak4113 *chip) { - chip->init = 1; - mb(); - flush_delayed_work(&chip->work); + if (atomic_inc_return(&chip->wq_processing) == 1) + cancel_delayed_work_sync(&chip->work); ak4113_init_regs(chip); /* bring up statistics / event queing */ - chip->init = 0; - if (chip->kctls[0]) + if (atomic_dec_and_test(&chip->wq_processing)) schedule_delayed_work(&chip->work, HZ / 10); } EXPORT_SYMBOL_GPL(snd_ak4113_reinit); @@ -632,8 +630,9 @@ static void ak4113_stats(struct work_struct *work) { struct ak4113 *chip = container_of(work, struct ak4113, work.work); - if (!chip->init) + if (atomic_inc_return(&chip->wq_processing) == 1) snd_ak4113_check_rate_and_errors(chip, chip->check_flags); - schedule_delayed_work(&chip->work, HZ / 10); + if (atomic_dec_and_test(&chip->wq_processing)) + schedule_delayed_work(&chip->work, HZ / 10); } diff --git a/sound/i2c/other/ak4114.c b/sound/i2c/other/ak4114.c index c7f56339415d..b70e6eccbd03 100644 --- a/sound/i2c/other/ak4114.c +++ b/sound/i2c/other/ak4114.c @@ -66,8 +66,7 @@ static void reg_dump(struct ak4114 *ak4114) static void snd_ak4114_free(struct ak4114 *chip) { - chip->init = 1; /* don't schedule new work */ - mb(); + atomic_inc(&chip->wq_processing); /* don't schedule new work */ cancel_delayed_work_sync(&chip->work); kfree(chip); } @@ -100,6 +99,7 @@ int snd_ak4114_create(struct snd_card *card, chip->write = write; chip->private_data = private_data; INIT_DELAYED_WORK(&chip->work, ak4114_stats); + atomic_set(&chip->wq_processing, 0); for (reg = 0; reg < 6; reg++) chip->regmap[reg] = pgm[reg]; @@ -152,13 +152,11 @@ static void ak4114_init_regs(struct ak4114 *chip) void snd_ak4114_reinit(struct ak4114 *chip) { - chip->init = 1; - mb(); - flush_delayed_work(&chip->work); + if (atomic_inc_return(&chip->wq_processing) == 1) + cancel_delayed_work_sync(&chip->work); ak4114_init_regs(chip); /* bring up statistics / event queing */ - chip->init = 0; - if (chip->kctls[0]) + if (atomic_dec_and_test(&chip->wq_processing)) schedule_delayed_work(&chip->work, HZ / 10); } @@ -612,10 +610,10 @@ static void ak4114_stats(struct work_struct *work) { struct ak4114 *chip = container_of(work, struct ak4114, work.work); - if (!chip->init) + if (atomic_inc_return(&chip->wq_processing) == 1) snd_ak4114_check_rate_and_errors(chip, chip->check_flags); - - schedule_delayed_work(&chip->work, HZ / 10); + if (atomic_dec_and_test(&chip->wq_processing)) + schedule_delayed_work(&chip->work, HZ / 10); } EXPORT_SYMBOL(snd_ak4114_create); diff --git a/sound/soc/adi/axi-i2s.c b/sound/soc/adi/axi-i2s.c index 7752860f7230..4c23381727a1 100644 --- a/sound/soc/adi/axi-i2s.c +++ b/sound/soc/adi/axi-i2s.c @@ -240,6 +240,8 @@ static int axi_i2s_probe(struct platform_device *pdev) if (ret) goto err_clk_disable; + return 0; + err_clk_disable: clk_disable_unprepare(i2s->clk); return ret; diff --git a/sound/soc/atmel/atmel_ssc_dai.c b/sound/soc/atmel/atmel_ssc_dai.c index 99ff35e2a25d..35e44e463cfe 100644 --- a/sound/soc/atmel/atmel_ssc_dai.c +++ b/sound/soc/atmel/atmel_ssc_dai.c @@ -348,7 +348,6 @@ static int atmel_ssc_hw_params(struct snd_pcm_substream *substream, struct atmel_pcm_dma_params *dma_params; int dir, channels, bits; u32 tfmr, rfmr, tcmr, rcmr; - int start_event; int ret; int fslen, fslen_ext; @@ -457,19 +456,10 @@ static int atmel_ssc_hw_params(struct snd_pcm_substream *substream, * The SSC transmit clock is obtained from the BCLK signal on * on the TK line, and the SSC receive clock is * generated from the transmit clock. - * - * For single channel data, one sample is transferred - * on the falling edge of the LRC clock. - * For two channel data, one sample is - * transferred on both edges of the LRC clock. */ - start_event = ((channels == 1) - ? SSC_START_FALLING_RF - : SSC_START_EDGE_RF); - rcmr = SSC_BF(RCMR_PERIOD, 0) | SSC_BF(RCMR_STTDLY, START_DELAY) - | SSC_BF(RCMR_START, start_event) + | SSC_BF(RCMR_START, SSC_START_FALLING_RF) | SSC_BF(RCMR_CKI, SSC_CKI_RISING) | SSC_BF(RCMR_CKO, SSC_CKO_NONE) | SSC_BF(RCMR_CKS, ssc->clk_from_rk_pin ? @@ -478,14 +468,14 @@ static int atmel_ssc_hw_params(struct snd_pcm_substream *substream, rfmr = SSC_BF(RFMR_FSEDGE, SSC_FSEDGE_POSITIVE) | SSC_BF(RFMR_FSOS, SSC_FSOS_NONE) | SSC_BF(RFMR_FSLEN, 0) - | SSC_BF(RFMR_DATNB, 0) + | SSC_BF(RFMR_DATNB, (channels - 1)) | SSC_BIT(RFMR_MSBF) | SSC_BF(RFMR_LOOP, 0) | SSC_BF(RFMR_DATLEN, (bits - 1)); tcmr = SSC_BF(TCMR_PERIOD, 0) | SSC_BF(TCMR_STTDLY, START_DELAY) - | SSC_BF(TCMR_START, start_event) + | SSC_BF(TCMR_START, SSC_START_FALLING_RF) | SSC_BF(TCMR_CKI, SSC_CKI_FALLING) | SSC_BF(TCMR_CKO, SSC_CKO_NONE) | SSC_BF(TCMR_CKS, ssc->clk_from_rk_pin ? @@ -495,7 +485,7 @@ static int atmel_ssc_hw_params(struct snd_pcm_substream *substream, | SSC_BF(TFMR_FSDEN, 0) | SSC_BF(TFMR_FSOS, SSC_FSOS_NONE) | SSC_BF(TFMR_FSLEN, 0) - | SSC_BF(TFMR_DATNB, 0) + | SSC_BF(TFMR_DATNB, (channels - 1)) | SSC_BIT(TFMR_MSBF) | SSC_BF(TFMR_DATDEF, 0) | SSC_BF(TFMR_DATLEN, (bits - 1)); @@ -512,7 +502,7 @@ static int atmel_ssc_hw_params(struct snd_pcm_substream *substream, rcmr = SSC_BF(RCMR_PERIOD, ssc_p->rcmr_period) | SSC_BF(RCMR_STTDLY, 1) | SSC_BF(RCMR_START, SSC_START_RISING_RF) - | SSC_BF(RCMR_CKI, SSC_CKI_RISING) + | SSC_BF(RCMR_CKI, SSC_CKI_FALLING) | SSC_BF(RCMR_CKO, SSC_CKO_NONE) | SSC_BF(RCMR_CKS, SSC_CKS_DIV); @@ -527,7 +517,7 @@ static int atmel_ssc_hw_params(struct snd_pcm_substream *substream, tcmr = SSC_BF(TCMR_PERIOD, ssc_p->tcmr_period) | SSC_BF(TCMR_STTDLY, 1) | SSC_BF(TCMR_START, SSC_START_RISING_RF) - | SSC_BF(TCMR_CKI, SSC_CKI_RISING) + | SSC_BF(TCMR_CKI, SSC_CKI_FALLING) | SSC_BF(TCMR_CKO, SSC_CKO_CONTINUOUS) | SSC_BF(TCMR_CKS, SSC_CKS_DIV); @@ -556,7 +546,7 @@ static int atmel_ssc_hw_params(struct snd_pcm_substream *substream, rcmr = SSC_BF(RCMR_PERIOD, 0) | SSC_BF(RCMR_STTDLY, START_DELAY) | SSC_BF(RCMR_START, SSC_START_RISING_RF) - | SSC_BF(RCMR_CKI, SSC_CKI_RISING) + | SSC_BF(RCMR_CKI, SSC_CKI_FALLING) | SSC_BF(RCMR_CKO, SSC_CKO_NONE) | SSC_BF(RCMR_CKS, ssc->clk_from_rk_pin ? SSC_CKS_PIN : SSC_CKS_CLOCK); diff --git a/sound/soc/codecs/pcm512x.c b/sound/soc/codecs/pcm512x.c index e5f2fb884bf3..30c673cdc12e 100644 --- a/sound/soc/codecs/pcm512x.c +++ b/sound/soc/codecs/pcm512x.c @@ -188,8 +188,8 @@ static const DECLARE_TLV_DB_SCALE(boost_tlv, 0, 80, 0); static const char * const pcm512x_dsp_program_texts[] = { "FIR interpolation with de-emphasis", "Low latency IIR with de-emphasis", - "Fixed process flow", "High attenuation with de-emphasis", + "Fixed process flow", "Ringing-less low latency FIR", }; diff --git a/sound/soc/codecs/rt286.c b/sound/soc/codecs/rt286.c index 2cd4fe463102..1d1c7f8a9af2 100644 --- a/sound/soc/codecs/rt286.c +++ b/sound/soc/codecs/rt286.c @@ -861,10 +861,8 @@ static int rt286_hw_params(struct snd_pcm_substream *substream, RT286_I2S_CTRL1, 0x0018, d_len_code << 3); dev_dbg(codec->dev, "format val = 0x%x\n", val); - if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) - snd_soc_update_bits(codec, RT286_DAC_FORMAT, 0x407f, val); - else - snd_soc_update_bits(codec, RT286_ADC_FORMAT, 0x407f, val); + snd_soc_update_bits(codec, RT286_DAC_FORMAT, 0x407f, val); + snd_soc_update_bits(codec, RT286_ADC_FORMAT, 0x407f, val); return 0; } diff --git a/sound/soc/codecs/rt5640.c b/sound/soc/codecs/rt5640.c index c3f2decd643c..1ff726c29249 100644 --- a/sound/soc/codecs/rt5640.c +++ b/sound/soc/codecs/rt5640.c @@ -2124,6 +2124,7 @@ MODULE_DEVICE_TABLE(of, rt5640_of_match); static struct acpi_device_id rt5640_acpi_match[] = { { "INT33CA", 0 }, { "10EC5640", 0 }, + { "10EC5642", 0 }, { }, }; MODULE_DEVICE_TABLE(acpi, rt5640_acpi_match); diff --git a/sound/soc/codecs/rt5677.c b/sound/soc/codecs/rt5677.c index c0fbe1881439..918ada9738b0 100644 --- a/sound/soc/codecs/rt5677.c +++ b/sound/soc/codecs/rt5677.c @@ -2083,10 +2083,14 @@ static int rt5677_set_pll1_event(struct snd_soc_dapm_widget *w, struct rt5677_priv *rt5677 = snd_soc_codec_get_drvdata(codec); switch (event) { - case SND_SOC_DAPM_POST_PMU: + case SND_SOC_DAPM_PRE_PMU: regmap_update_bits(rt5677->regmap, RT5677_PLL1_CTRL2, 0x2, 0x2); + break; + + case SND_SOC_DAPM_POST_PMU: regmap_update_bits(rt5677->regmap, RT5677_PLL1_CTRL2, 0x2, 0x0); break; + default: return 0; } @@ -2101,10 +2105,14 @@ static int rt5677_set_pll2_event(struct snd_soc_dapm_widget *w, struct rt5677_priv *rt5677 = snd_soc_codec_get_drvdata(codec); switch (event) { - case SND_SOC_DAPM_POST_PMU: + case SND_SOC_DAPM_PRE_PMU: regmap_update_bits(rt5677->regmap, RT5677_PLL2_CTRL2, 0x2, 0x2); + break; + + case SND_SOC_DAPM_POST_PMU: regmap_update_bits(rt5677->regmap, RT5677_PLL2_CTRL2, 0x2, 0x0); break; + default: return 0; } @@ -2212,9 +2220,11 @@ static int rt5677_vref_event(struct snd_soc_dapm_widget *w, static const struct snd_soc_dapm_widget rt5677_dapm_widgets[] = { SND_SOC_DAPM_SUPPLY("PLL1", RT5677_PWR_ANLG2, RT5677_PWR_PLL1_BIT, - 0, rt5677_set_pll1_event, SND_SOC_DAPM_POST_PMU), + 0, rt5677_set_pll1_event, SND_SOC_DAPM_PRE_PMU | + SND_SOC_DAPM_POST_PMU), SND_SOC_DAPM_SUPPLY("PLL2", RT5677_PWR_ANLG2, RT5677_PWR_PLL2_BIT, - 0, rt5677_set_pll2_event, SND_SOC_DAPM_POST_PMU), + 0, rt5677_set_pll2_event, SND_SOC_DAPM_PRE_PMU | + SND_SOC_DAPM_POST_PMU), /* Input Side */ /* micbias */ diff --git a/sound/soc/codecs/sgtl5000.c b/sound/soc/codecs/sgtl5000.c index 29cf7ce610f4..aa98be32bb60 100644 --- a/sound/soc/codecs/sgtl5000.c +++ b/sound/soc/codecs/sgtl5000.c @@ -483,21 +483,21 @@ static int sgtl5000_set_dai_fmt(struct snd_soc_dai *codec_dai, unsigned int fmt) /* setting i2s data format */ switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) { case SND_SOC_DAIFMT_DSP_A: - i2sctl |= SGTL5000_I2S_MODE_PCM; + i2sctl |= SGTL5000_I2S_MODE_PCM << SGTL5000_I2S_MODE_SHIFT; break; case SND_SOC_DAIFMT_DSP_B: - i2sctl |= SGTL5000_I2S_MODE_PCM; + i2sctl |= SGTL5000_I2S_MODE_PCM << SGTL5000_I2S_MODE_SHIFT; i2sctl |= SGTL5000_I2S_LRALIGN; break; case SND_SOC_DAIFMT_I2S: - i2sctl |= SGTL5000_I2S_MODE_I2S_LJ; + i2sctl |= SGTL5000_I2S_MODE_I2S_LJ << SGTL5000_I2S_MODE_SHIFT; break; case SND_SOC_DAIFMT_RIGHT_J: - i2sctl |= SGTL5000_I2S_MODE_RJ; + i2sctl |= SGTL5000_I2S_MODE_RJ << SGTL5000_I2S_MODE_SHIFT; i2sctl |= SGTL5000_I2S_LRPOL; break; case SND_SOC_DAIFMT_LEFT_J: - i2sctl |= SGTL5000_I2S_MODE_I2S_LJ; + i2sctl |= SGTL5000_I2S_MODE_I2S_LJ << SGTL5000_I2S_MODE_SHIFT; i2sctl |= SGTL5000_I2S_LRALIGN; break; default: @@ -1462,6 +1462,9 @@ static int sgtl5000_i2c_probe(struct i2c_client *client, if (ret) return ret; + /* Need 8 clocks before I2C accesses */ + udelay(1); + /* read chip information */ ret = regmap_read(sgtl5000->regmap, SGTL5000_CHIP_ID, ®); if (ret) diff --git a/sound/soc/codecs/tlv320aic3x.c b/sound/soc/codecs/tlv320aic3x.c index b7ebce054b4e..dd222b10ce13 100644 --- a/sound/soc/codecs/tlv320aic3x.c +++ b/sound/soc/codecs/tlv320aic3x.c @@ -1046,7 +1046,7 @@ static int aic3x_prepare(struct snd_pcm_substream *substream, delay += aic3x->tdm_delay; /* Configure data delay */ - snd_soc_write(codec, AIC3X_ASD_INTF_CTRLC, aic3x->tdm_delay); + snd_soc_write(codec, AIC3X_ASD_INTF_CTRLC, delay); return 0; } diff --git a/sound/soc/codecs/ts3a227e.c b/sound/soc/codecs/ts3a227e.c index 1d1205702d23..9f2dced046de 100644 --- a/sound/soc/codecs/ts3a227e.c +++ b/sound/soc/codecs/ts3a227e.c @@ -254,6 +254,7 @@ static int ts3a227e_i2c_probe(struct i2c_client *i2c, struct ts3a227e *ts3a227e; struct device *dev = &i2c->dev; int ret; + unsigned int acc_reg; ts3a227e = devm_kzalloc(&i2c->dev, sizeof(*ts3a227e), GFP_KERNEL); if (ts3a227e == NULL) @@ -283,6 +284,11 @@ static int ts3a227e_i2c_probe(struct i2c_client *i2c, INTB_DISABLE | ADC_COMPLETE_INT_DISABLE, ADC_COMPLETE_INT_DISABLE); + /* Read jack status because chip might not trigger interrupt at boot. */ + regmap_read(ts3a227e->regmap, TS3A227E_REG_ACCESSORY_STATUS, &acc_reg); + ts3a227e_new_jack_state(ts3a227e, acc_reg); + ts3a227e_jack_report(ts3a227e); + return 0; } diff --git a/sound/soc/codecs/wm8731.c b/sound/soc/codecs/wm8731.c index b9211b42f6e9..b115ed815db9 100644 --- a/sound/soc/codecs/wm8731.c +++ b/sound/soc/codecs/wm8731.c @@ -717,6 +717,8 @@ static int wm8731_i2c_probe(struct i2c_client *i2c, if (wm8731 == NULL) return -ENOMEM; + mutex_init(&wm8731->lock); + wm8731->regmap = devm_regmap_init_i2c(i2c, &wm8731_regmap); if (IS_ERR(wm8731->regmap)) { ret = PTR_ERR(wm8731->regmap); diff --git a/sound/soc/codecs/wm8904.c b/sound/soc/codecs/wm8904.c index 4d2d2b1380d5..75b87c5c0f04 100644 --- a/sound/soc/codecs/wm8904.c +++ b/sound/soc/codecs/wm8904.c @@ -1076,10 +1076,13 @@ static const struct snd_soc_dapm_route adc_intercon[] = { { "Right Capture PGA", NULL, "Right Capture Mux" }, { "Right Capture PGA", NULL, "Right Capture Inverting Mux" }, - { "AIFOUTL", "Left", "ADCL" }, - { "AIFOUTL", "Right", "ADCR" }, - { "AIFOUTR", "Left", "ADCL" }, - { "AIFOUTR", "Right", "ADCR" }, + { "AIFOUTL Mux", "Left", "ADCL" }, + { "AIFOUTL Mux", "Right", "ADCR" }, + { "AIFOUTR Mux", "Left", "ADCL" }, + { "AIFOUTR Mux", "Right", "ADCR" }, + + { "AIFOUTL", NULL, "AIFOUTL Mux" }, + { "AIFOUTR", NULL, "AIFOUTR Mux" }, { "ADCL", NULL, "CLK_DSP" }, { "ADCL", NULL, "Left Capture PGA" }, @@ -1089,12 +1092,16 @@ static const struct snd_soc_dapm_route adc_intercon[] = { }; static const struct snd_soc_dapm_route dac_intercon[] = { - { "DACL", "Right", "AIFINR" }, - { "DACL", "Left", "AIFINL" }, + { "DACL Mux", "Left", "AIFINL" }, + { "DACL Mux", "Right", "AIFINR" }, + + { "DACR Mux", "Left", "AIFINL" }, + { "DACR Mux", "Right", "AIFINR" }, + + { "DACL", NULL, "DACL Mux" }, { "DACL", NULL, "CLK_DSP" }, - { "DACR", "Right", "AIFINR" }, - { "DACR", "Left", "AIFINL" }, + { "DACR", NULL, "DACR Mux" }, { "DACR", NULL, "CLK_DSP" }, { "Charge pump", NULL, "SYSCLK" }, diff --git a/sound/soc/codecs/wm8960.c b/sound/soc/codecs/wm8960.c index 031a1ae71d94..a96eb497a379 100644 --- a/sound/soc/codecs/wm8960.c +++ b/sound/soc/codecs/wm8960.c @@ -556,7 +556,7 @@ static struct { { 22050, 2 }, { 24000, 2 }, { 16000, 3 }, - { 11250, 4 }, + { 11025, 4 }, { 12000, 4 }, { 8000, 5 }, }; diff --git a/sound/soc/codecs/wm9705.c b/sound/soc/codecs/wm9705.c index 3eddb18fefd1..5cc457ef8894 100644 --- a/sound/soc/codecs/wm9705.c +++ b/sound/soc/codecs/wm9705.c @@ -344,23 +344,27 @@ static int wm9705_soc_probe(struct snd_soc_codec *codec) struct snd_ac97 *ac97; int ret = 0; - ac97 = snd_soc_new_ac97_codec(codec); + ac97 = snd_soc_alloc_ac97_codec(codec); if (IS_ERR(ac97)) { ret = PTR_ERR(ac97); dev_err(codec->dev, "Failed to register AC97 codec\n"); return ret; } - snd_soc_codec_set_drvdata(codec, ac97); - ret = wm9705_reset(codec); if (ret) - goto reset_err; + goto err_put_device; + + ret = device_add(&ac97->dev); + if (ret) + goto err_put_device; + + snd_soc_codec_set_drvdata(codec, ac97); return 0; -reset_err: - snd_soc_free_ac97_codec(ac97); +err_put_device: + put_device(&ac97->dev); return ret; } diff --git a/sound/soc/codecs/wm9712.c b/sound/soc/codecs/wm9712.c index e04643d2bb24..9517571e820d 100644 --- a/sound/soc/codecs/wm9712.c +++ b/sound/soc/codecs/wm9712.c @@ -666,7 +666,7 @@ static int wm9712_soc_probe(struct snd_soc_codec *codec) struct wm9712_priv *wm9712 = snd_soc_codec_get_drvdata(codec); int ret = 0; - wm9712->ac97 = snd_soc_new_ac97_codec(codec); + wm9712->ac97 = snd_soc_alloc_ac97_codec(codec); if (IS_ERR(wm9712->ac97)) { ret = PTR_ERR(wm9712->ac97); dev_err(codec->dev, "Failed to register AC97 codec: %d\n", ret); @@ -675,15 +675,19 @@ static int wm9712_soc_probe(struct snd_soc_codec *codec) ret = wm9712_reset(codec, 0); if (ret < 0) - goto reset_err; + goto err_put_device; + + ret = device_add(&wm9712->ac97->dev); + if (ret) + goto err_put_device; /* set alc mux to none */ ac97_write(codec, AC97_VIDEO, ac97_read(codec, AC97_VIDEO) | 0x3000); return 0; -reset_err: - snd_soc_free_ac97_codec(wm9712->ac97); +err_put_device: + put_device(&wm9712->ac97->dev); return ret; } diff --git a/sound/soc/codecs/wm9713.c b/sound/soc/codecs/wm9713.c index 71b9d5b0734d..6ab1122a3872 100644 --- a/sound/soc/codecs/wm9713.c +++ b/sound/soc/codecs/wm9713.c @@ -1225,7 +1225,7 @@ static int wm9713_soc_probe(struct snd_soc_codec *codec) struct wm9713_priv *wm9713 = snd_soc_codec_get_drvdata(codec); int ret = 0, reg; - wm9713->ac97 = snd_soc_new_ac97_codec(codec); + wm9713->ac97 = snd_soc_alloc_ac97_codec(codec); if (IS_ERR(wm9713->ac97)) return PTR_ERR(wm9713->ac97); @@ -1234,7 +1234,11 @@ static int wm9713_soc_probe(struct snd_soc_codec *codec) wm9713_reset(codec, 0); ret = wm9713_reset(codec, 1); if (ret < 0) - goto reset_err; + goto err_put_device; + + ret = device_add(&wm9713->ac97->dev); + if (ret) + goto err_put_device; /* unmute the adc - move to kcontrol */ reg = ac97_read(codec, AC97_CD) & 0x7fff; @@ -1242,8 +1246,8 @@ static int wm9713_soc_probe(struct snd_soc_codec *codec) return 0; -reset_err: - snd_soc_free_ac97_codec(wm9713->ac97); +err_put_device: + put_device(&wm9713->ac97->dev); return ret; } diff --git a/sound/soc/fsl/fsl_esai.h b/sound/soc/fsl/fsl_esai.h index 91a550f4a10d..5e793bbb6b02 100644 --- a/sound/soc/fsl/fsl_esai.h +++ b/sound/soc/fsl/fsl_esai.h @@ -302,7 +302,7 @@ #define ESAI_xCCR_xFP_MASK (((1 << ESAI_xCCR_xFP_WIDTH) - 1) << ESAI_xCCR_xFP_SHIFT) #define ESAI_xCCR_xFP(v) ((((v) - 1) << ESAI_xCCR_xFP_SHIFT) & ESAI_xCCR_xFP_MASK) #define ESAI_xCCR_xDC_SHIFT 9 -#define ESAI_xCCR_xDC_WIDTH 4 +#define ESAI_xCCR_xDC_WIDTH 5 #define ESAI_xCCR_xDC_MASK (((1 << ESAI_xCCR_xDC_WIDTH) - 1) << ESAI_xCCR_xDC_SHIFT) #define ESAI_xCCR_xDC(v) ((((v) - 1) << ESAI_xCCR_xDC_SHIFT) & ESAI_xCCR_xDC_MASK) #define ESAI_xCCR_xPSR_SHIFT 8 diff --git a/sound/soc/fsl/fsl_ssi.c b/sound/soc/fsl/fsl_ssi.c index a65f17d57ffb..059496ed9ad7 100644 --- a/sound/soc/fsl/fsl_ssi.c +++ b/sound/soc/fsl/fsl_ssi.c @@ -1362,9 +1362,9 @@ static int fsl_ssi_probe(struct platform_device *pdev) } ssi_private->irq = platform_get_irq(pdev, 0); - if (!ssi_private->irq) { + if (ssi_private->irq < 0) { dev_err(&pdev->dev, "no irq for node %s\n", np->full_name); - return -ENXIO; + return ssi_private->irq; } /* Are the RX and the TX clocks locked? */ diff --git a/sound/soc/fsl/imx-wm8962.c b/sound/soc/fsl/imx-wm8962.c index 4caacb05a623..cd146d4fa805 100644 --- a/sound/soc/fsl/imx-wm8962.c +++ b/sound/soc/fsl/imx-wm8962.c @@ -257,6 +257,7 @@ static int imx_wm8962_probe(struct platform_device *pdev) if (ret) goto clk_fail; data->card.num_links = 1; + data->card.owner = THIS_MODULE; data->card.dai_link = &data->dai; data->card.dapm_widgets = imx_wm8962_dapm_widgets; data->card.num_dapm_widgets = ARRAY_SIZE(imx_wm8962_dapm_widgets); diff --git a/sound/soc/generic/simple-card.c b/sound/soc/generic/simple-card.c index fb9240fdc9b7..7fe3009b1c43 100644 --- a/sound/soc/generic/simple-card.c +++ b/sound/soc/generic/simple-card.c @@ -452,9 +452,8 @@ static int asoc_simple_card_parse_of(struct device_node *node, } /* Decrease the reference count of the device nodes */ -static int asoc_simple_card_unref(struct platform_device *pdev) +static int asoc_simple_card_unref(struct snd_soc_card *card) { - struct snd_soc_card *card = platform_get_drvdata(pdev); struct snd_soc_dai_link *dai_link; int num_links; @@ -556,7 +555,7 @@ static int asoc_simple_card_probe(struct platform_device *pdev) return ret; err: - asoc_simple_card_unref(pdev); + asoc_simple_card_unref(&priv->snd_card); return ret; } @@ -572,7 +571,7 @@ static int asoc_simple_card_remove(struct platform_device *pdev) snd_soc_jack_free_gpios(&simple_card_mic_jack, 1, &simple_card_mic_jack_gpio); - return asoc_simple_card_unref(pdev); + return asoc_simple_card_unref(card); } static const struct of_device_id asoc_simple_of_match[] = { diff --git a/sound/soc/intel/sst-firmware.c b/sound/soc/intel/sst-firmware.c index ef2e8b5766a1..b3f9489794a6 100644 --- a/sound/soc/intel/sst-firmware.c +++ b/sound/soc/intel/sst-firmware.c @@ -706,6 +706,7 @@ static int block_alloc_fixed(struct sst_dsp *dsp, struct sst_block_allocator *ba struct list_head *block_list) { struct sst_mem_block *block, *tmp; + struct sst_block_allocator ba_tmp = *ba; u32 end = ba->offset + ba->size, block_end; int err; @@ -730,9 +731,9 @@ static int block_alloc_fixed(struct sst_dsp *dsp, struct sst_block_allocator *ba if (ba->offset >= block->offset && ba->offset < block_end) { /* align ba to block boundary */ - ba->size -= block_end - ba->offset; - ba->offset = block_end; - err = block_alloc_contiguous(dsp, ba, block_list); + ba_tmp.size -= block_end - ba->offset; + ba_tmp.offset = block_end; + err = block_alloc_contiguous(dsp, &ba_tmp, block_list); if (err < 0) return -ENOMEM; @@ -767,10 +768,10 @@ static int block_alloc_fixed(struct sst_dsp *dsp, struct sst_block_allocator *ba list_move(&block->list, &dsp->used_block_list); list_add(&block->module_list, block_list); /* align ba to block boundary */ - ba->size -= block_end - ba->offset; - ba->offset = block_end; + ba_tmp.size -= block_end - ba->offset; + ba_tmp.offset = block_end; - err = block_alloc_contiguous(dsp, ba, block_list); + err = block_alloc_contiguous(dsp, &ba_tmp, block_list); if (err < 0) return -ENOMEM; diff --git a/sound/soc/intel/sst-haswell-ipc.c b/sound/soc/intel/sst-haswell-ipc.c index 3f8c48231364..8156cc1accb7 100644 --- a/sound/soc/intel/sst-haswell-ipc.c +++ b/sound/soc/intel/sst-haswell-ipc.c @@ -651,11 +651,11 @@ static void hsw_notification_work(struct work_struct *work) } /* tell DSP that notification has been handled */ - sst_dsp_shim_update_bits_unlocked(hsw->dsp, SST_IPCD, + sst_dsp_shim_update_bits(hsw->dsp, SST_IPCD, SST_IPCD_BUSY | SST_IPCD_DONE, SST_IPCD_DONE); /* unmask busy interrupt */ - sst_dsp_shim_update_bits_unlocked(hsw->dsp, SST_IMRX, SST_IMRX_BUSY, 0); + sst_dsp_shim_update_bits(hsw->dsp, SST_IMRX, SST_IMRX_BUSY, 0); } static struct ipc_message *reply_find_msg(struct sst_hsw *hsw, u32 header) @@ -1228,6 +1228,11 @@ int sst_hsw_stream_free(struct sst_hsw *hsw, struct sst_hsw_stream *stream) struct sst_dsp *sst = hsw->dsp; unsigned long flags; + if (!stream) { + dev_warn(hsw->dev, "warning: stream is NULL, no stream to free, ignore it.\n"); + return 0; + } + /* dont free DSP streams that are not commited */ if (!stream->commited) goto out; @@ -1415,6 +1420,16 @@ int sst_hsw_stream_commit(struct sst_hsw *hsw, struct sst_hsw_stream *stream) u32 header; int ret; + if (!stream) { + dev_warn(hsw->dev, "warning: stream is NULL, no stream to commit, ignore it.\n"); + return 0; + } + + if (stream->commited) { + dev_warn(hsw->dev, "warning: stream is already committed, ignore it.\n"); + return 0; + } + trace_ipc_request("stream alloc", stream->host_id); header = IPC_GLB_TYPE(IPC_GLB_ALLOCATE_STREAM); @@ -1519,6 +1534,11 @@ int sst_hsw_stream_pause(struct sst_hsw *hsw, struct sst_hsw_stream *stream, { int ret; + if (!stream) { + dev_warn(hsw->dev, "warning: stream is NULL, no stream to pause, ignore it.\n"); + return 0; + } + trace_ipc_request("stream pause", stream->reply.stream_hw_id); ret = sst_hsw_stream_operations(hsw, IPC_STR_PAUSE, @@ -1535,6 +1555,11 @@ int sst_hsw_stream_resume(struct sst_hsw *hsw, struct sst_hsw_stream *stream, { int ret; + if (!stream) { + dev_warn(hsw->dev, "warning: stream is NULL, no stream to resume, ignore it.\n"); + return 0; + } + trace_ipc_request("stream resume", stream->reply.stream_hw_id); ret = sst_hsw_stream_operations(hsw, IPC_STR_RESUME, @@ -1550,6 +1575,11 @@ int sst_hsw_stream_reset(struct sst_hsw *hsw, struct sst_hsw_stream *stream) { int ret, tries = 10; + if (!stream) { + dev_warn(hsw->dev, "warning: stream is NULL, no stream to reset, ignore it.\n"); + return 0; + } + /* dont reset streams that are not commited */ if (!stream->commited) return 0; diff --git a/sound/soc/intel/sst/sst_acpi.c b/sound/soc/intel/sst/sst_acpi.c index 2ac72eb5e75d..b3360139c41a 100644 --- a/sound/soc/intel/sst/sst_acpi.c +++ b/sound/soc/intel/sst/sst_acpi.c @@ -350,7 +350,7 @@ static struct sst_machines sst_acpi_bytcr[] = { /* Cherryview-based platforms: CherryTrail and Braswell */ static struct sst_machines sst_acpi_chv[] = { - {"10EC5670", "cht-bsw", "cht-bsw-rt5672", NULL, "fw_sst_22a8.bin", + {"10EC5670", "cht-bsw", "cht-bsw-rt5672", NULL, "intel/fw_sst_22a8.bin", &chv_platform_data }, {}, }; diff --git a/sound/soc/omap/omap-mcbsp.c b/sound/soc/omap/omap-mcbsp.c index 8b79cafab1e2..c7eb9dd67f60 100644 --- a/sound/soc/omap/omap-mcbsp.c +++ b/sound/soc/omap/omap-mcbsp.c @@ -434,7 +434,7 @@ static int omap_mcbsp_dai_set_dai_fmt(struct snd_soc_dai *cpu_dai, case SND_SOC_DAIFMT_CBM_CFS: /* McBSP slave. FS clock as output */ regs->srgr2 |= FSGM; - regs->pcr0 |= FSXM; + regs->pcr0 |= FSXM | FSRM; break; case SND_SOC_DAIFMT_CBM_CFM: /* McBSP slave */ diff --git a/sound/soc/rockchip/rockchip_i2s.c b/sound/soc/rockchip/rockchip_i2s.c index 13d8507333b8..dcc26eda0539 100644 --- a/sound/soc/rockchip/rockchip_i2s.c +++ b/sound/soc/rockchip/rockchip_i2s.c @@ -335,6 +335,7 @@ static struct snd_soc_dai_driver rockchip_i2s_dai = { SNDRV_PCM_FMTBIT_S24_LE), }, .ops = &rockchip_i2s_dai_ops, + .symmetric_rates = 1, }; static const struct snd_soc_component_driver rockchip_i2s_component = { diff --git a/sound/soc/soc-ac97.c b/sound/soc/soc-ac97.c index 2e10e9a38376..08d7259bbaab 100644 --- a/sound/soc/soc-ac97.c +++ b/sound/soc/soc-ac97.c @@ -48,15 +48,18 @@ static void soc_ac97_device_release(struct device *dev) } /** - * snd_soc_new_ac97_codec - initailise AC97 device - * @codec: audio codec + * snd_soc_alloc_ac97_codec() - Allocate new a AC'97 device + * @codec: The CODEC for which to create the AC'97 device * - * Initialises AC97 codec resources for use by ad-hoc devices only. + * Allocated a new snd_ac97 device and intializes it, but does not yet register + * it. The caller is responsible to either call device_add(&ac97->dev) to + * register the device, or to call put_device(&ac97->dev) to free the device. + * + * Returns: A snd_ac97 device or a PTR_ERR in case of an error. */ -struct snd_ac97 *snd_soc_new_ac97_codec(struct snd_soc_codec *codec) +struct snd_ac97 *snd_soc_alloc_ac97_codec(struct snd_soc_codec *codec) { struct snd_ac97 *ac97; - int ret; ac97 = kzalloc(sizeof(struct snd_ac97), GFP_KERNEL); if (ac97 == NULL) @@ -73,7 +76,28 @@ struct snd_ac97 *snd_soc_new_ac97_codec(struct snd_soc_codec *codec) codec->component.card->snd_card->number, 0, codec->component.name); - ret = device_register(&ac97->dev); + device_initialize(&ac97->dev); + + return ac97; +} +EXPORT_SYMBOL(snd_soc_alloc_ac97_codec); + +/** + * snd_soc_new_ac97_codec - initailise AC97 device + * @codec: audio codec + * + * Initialises AC97 codec resources for use by ad-hoc devices only. + */ +struct snd_ac97 *snd_soc_new_ac97_codec(struct snd_soc_codec *codec) +{ + struct snd_ac97 *ac97; + int ret; + + ac97 = snd_soc_alloc_ac97_codec(codec); + if (IS_ERR(ac97)) + return ac97; + + ret = device_add(&ac97->dev); if (ret) { put_device(&ac97->dev); return ERR_PTR(ret); diff --git a/sound/soc/soc-compress.c b/sound/soc/soc-compress.c index 590a82f01d0b..025c38fbe3c0 100644 --- a/sound/soc/soc-compress.c +++ b/sound/soc/soc-compress.c @@ -659,7 +659,8 @@ int soc_new_compress(struct snd_soc_pcm_runtime *rtd, int num) rtd->dai_link->stream_name); ret = snd_pcm_new_internal(rtd->card->snd_card, new_name, num, - 1, 0, &be_pcm); + rtd->dai_link->dpcm_playback, + rtd->dai_link->dpcm_capture, &be_pcm); if (ret < 0) { dev_err(rtd->card->dev, "ASoC: can't create compressed for %s\n", rtd->dai_link->name); @@ -668,8 +669,10 @@ int soc_new_compress(struct snd_soc_pcm_runtime *rtd, int num) rtd->pcm = be_pcm; rtd->fe_compr = 1; - be_pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream->private_data = rtd; - be_pcm->streams[SNDRV_PCM_STREAM_CAPTURE].substream->private_data = rtd; + if (rtd->dai_link->dpcm_playback) + be_pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream->private_data = rtd; + else if (rtd->dai_link->dpcm_capture) + be_pcm->streams[SNDRV_PCM_STREAM_CAPTURE].substream->private_data = rtd; memcpy(compr->ops, &soc_compr_dyn_ops, sizeof(soc_compr_dyn_ops)); } else memcpy(compr->ops, &soc_compr_ops, sizeof(soc_compr_ops)); diff --git a/tools/lib/api/fs/debugfs.c b/tools/lib/api/fs/debugfs.c index 86ea2d7b8845..d2b18e887071 100644 --- a/tools/lib/api/fs/debugfs.c +++ b/tools/lib/api/fs/debugfs.c @@ -1,3 +1,4 @@ +#define _GNU_SOURCE #include <errno.h> #include <stdio.h> #include <stdlib.h> @@ -98,3 +99,45 @@ char *debugfs_mount(const char *mountpoint) out: return debugfs_mountpoint; } + +int debugfs__strerror_open(int err, char *buf, size_t size, const char *filename) +{ + char sbuf[128]; + + switch (err) { + case ENOENT: + if (debugfs_found) { + snprintf(buf, size, + "Error:\tFile %s/%s not found.\n" + "Hint:\tPerhaps this kernel misses some CONFIG_ setting to enable this feature?.\n", + debugfs_mountpoint, filename); + break; + } + snprintf(buf, size, "%s", + "Error:\tUnable to find debugfs\n" + "Hint:\tWas your kernel compiled with debugfs support?\n" + "Hint:\tIs the debugfs filesystem mounted?\n" + "Hint:\tTry 'sudo mount -t debugfs nodev /sys/kernel/debug'"); + break; + case EACCES: + snprintf(buf, size, + "Error:\tNo permissions to read %s/%s\n" + "Hint:\tTry 'sudo mount -o remount,mode=755 %s'\n", + debugfs_mountpoint, filename, debugfs_mountpoint); + break; + default: + snprintf(buf, size, "%s", strerror_r(err, sbuf, sizeof(sbuf))); + break; + } + + return 0; +} + +int debugfs__strerror_open_tp(int err, char *buf, size_t size, const char *sys, const char *name) +{ + char path[PATH_MAX]; + + snprintf(path, PATH_MAX, "tracing/events/%s/%s", sys, name ?: "*"); + + return debugfs__strerror_open(err, buf, size, path); +} diff --git a/tools/lib/api/fs/debugfs.h b/tools/lib/api/fs/debugfs.h index f19d3df9609d..0739881a9897 100644 --- a/tools/lib/api/fs/debugfs.h +++ b/tools/lib/api/fs/debugfs.h @@ -26,4 +26,7 @@ char *debugfs_mount(const char *mountpoint); extern char debugfs_mountpoint[]; +int debugfs__strerror_open(int err, char *buf, size_t size, const char *filename); +int debugfs__strerror_open_tp(int err, char *buf, size_t size, const char *sys, const char *name); + #endif /* __API_DEBUGFS_H__ */ diff --git a/tools/lib/lockdep/.gitignore b/tools/lib/lockdep/.gitignore new file mode 100644 index 000000000000..cc0e7a9f99e3 --- /dev/null +++ b/tools/lib/lockdep/.gitignore @@ -0,0 +1 @@ +liblockdep.so.* diff --git a/tools/lib/lockdep/Makefile b/tools/lib/lockdep/Makefile index 52f9279c6c13..4b866c54f624 100644 --- a/tools/lib/lockdep/Makefile +++ b/tools/lib/lockdep/Makefile @@ -104,7 +104,7 @@ N = export Q VERBOSE -INCLUDES = -I. -I/usr/local/include -I./uinclude -I./include -I../../include $(CONFIG_INCLUDES) +INCLUDES = -I. -I./uinclude -I./include -I../../include $(CONFIG_INCLUDES) # Set compile option CFLAGS if not set elsewhere CFLAGS ?= -g -DCONFIG_LOCKDEP -DCONFIG_STACKTRACE -DCONFIG_PROVE_LOCKING -DBITS_PER_LONG=__WORDSIZE -DLIBLOCKDEP_VERSION='"$(LIBLOCKDEP_VERSION)"' -rdynamic -O0 -g diff --git a/tools/lib/traceevent/event-parse.c b/tools/lib/traceevent/event-parse.c index cf3a44bf1ec3..afe20ed9fac8 100644 --- a/tools/lib/traceevent/event-parse.c +++ b/tools/lib/traceevent/event-parse.c @@ -32,6 +32,7 @@ #include <stdint.h> #include <limits.h> +#include <netinet/ip6.h> #include "event-parse.h" #include "event-utils.h" @@ -4149,6 +4150,324 @@ static void print_mac_arg(struct trace_seq *s, int mac, void *data, int size, trace_seq_printf(s, fmt, buf[0], buf[1], buf[2], buf[3], buf[4], buf[5]); } +static void print_ip4_addr(struct trace_seq *s, char i, unsigned char *buf) +{ + const char *fmt; + + if (i == 'i') + fmt = "%03d.%03d.%03d.%03d"; + else + fmt = "%d.%d.%d.%d"; + + trace_seq_printf(s, fmt, buf[0], buf[1], buf[2], buf[3]); +} + +static inline bool ipv6_addr_v4mapped(const struct in6_addr *a) +{ + return ((unsigned long)(a->s6_addr32[0] | a->s6_addr32[1]) | + (unsigned long)(a->s6_addr32[2] ^ htonl(0x0000ffff))) == 0UL; +} + +static inline bool ipv6_addr_is_isatap(const struct in6_addr *addr) +{ + return (addr->s6_addr32[2] | htonl(0x02000000)) == htonl(0x02005EFE); +} + +static void print_ip6c_addr(struct trace_seq *s, unsigned char *addr) +{ + int i, j, range; + unsigned char zerolength[8]; + int longest = 1; + int colonpos = -1; + uint16_t word; + uint8_t hi, lo; + bool needcolon = false; + bool useIPv4; + struct in6_addr in6; + + memcpy(&in6, addr, sizeof(struct in6_addr)); + + useIPv4 = ipv6_addr_v4mapped(&in6) || ipv6_addr_is_isatap(&in6); + + memset(zerolength, 0, sizeof(zerolength)); + + if (useIPv4) + range = 6; + else + range = 8; + + /* find position of longest 0 run */ + for (i = 0; i < range; i++) { + for (j = i; j < range; j++) { + if (in6.s6_addr16[j] != 0) + break; + zerolength[i]++; + } + } + for (i = 0; i < range; i++) { + if (zerolength[i] > longest) { + longest = zerolength[i]; + colonpos = i; + } + } + if (longest == 1) /* don't compress a single 0 */ + colonpos = -1; + + /* emit address */ + for (i = 0; i < range; i++) { + if (i == colonpos) { + if (needcolon || i == 0) + trace_seq_printf(s, ":"); + trace_seq_printf(s, ":"); + needcolon = false; + i += longest - 1; + continue; + } + if (needcolon) { + trace_seq_printf(s, ":"); + needcolon = false; + } + /* hex u16 without leading 0s */ + word = ntohs(in6.s6_addr16[i]); + hi = word >> 8; + lo = word & 0xff; + if (hi) + trace_seq_printf(s, "%x%02x", hi, lo); + else + trace_seq_printf(s, "%x", lo); + + needcolon = true; + } + + if (useIPv4) { + if (needcolon) + trace_seq_printf(s, ":"); + print_ip4_addr(s, 'I', &in6.s6_addr[12]); + } + + return; +} + +static void print_ip6_addr(struct trace_seq *s, char i, unsigned char *buf) +{ + int j; + + for (j = 0; j < 16; j += 2) { + trace_seq_printf(s, "%02x%02x", buf[j], buf[j+1]); + if (i == 'I' && j < 14) + trace_seq_printf(s, ":"); + } +} + +/* + * %pi4 print an IPv4 address with leading zeros + * %pI4 print an IPv4 address without leading zeros + * %pi6 print an IPv6 address without colons + * %pI6 print an IPv6 address with colons + * %pI6c print an IPv6 address in compressed form with colons + * %pISpc print an IP address based on sockaddr; p adds port. + */ +static int print_ipv4_arg(struct trace_seq *s, const char *ptr, char i, + void *data, int size, struct event_format *event, + struct print_arg *arg) +{ + unsigned char *buf; + + if (arg->type == PRINT_FUNC) { + process_defined_func(s, data, size, event, arg); + return 0; + } + + if (arg->type != PRINT_FIELD) { + trace_seq_printf(s, "ARG TYPE NOT FIELD BUT %d", arg->type); + return 0; + } + + if (!arg->field.field) { + arg->field.field = + pevent_find_any_field(event, arg->field.name); + if (!arg->field.field) { + do_warning("%s: field %s not found", + __func__, arg->field.name); + return 0; + } + } + + buf = data + arg->field.field->offset; + + if (arg->field.field->size != 4) { + trace_seq_printf(s, "INVALIDIPv4"); + return 0; + } + print_ip4_addr(s, i, buf); + + return 0; +} + +static int print_ipv6_arg(struct trace_seq *s, const char *ptr, char i, + void *data, int size, struct event_format *event, + struct print_arg *arg) +{ + char have_c = 0; + unsigned char *buf; + int rc = 0; + + /* pI6c */ + if (i == 'I' && *ptr == 'c') { + have_c = 1; + ptr++; + rc++; + } + + if (arg->type == PRINT_FUNC) { + process_defined_func(s, data, size, event, arg); + return rc; + } + + if (arg->type != PRINT_FIELD) { + trace_seq_printf(s, "ARG TYPE NOT FIELD BUT %d", arg->type); + return rc; + } + + if (!arg->field.field) { + arg->field.field = + pevent_find_any_field(event, arg->field.name); + if (!arg->field.field) { + do_warning("%s: field %s not found", + __func__, arg->field.name); + return rc; + } + } + + buf = data + arg->field.field->offset; + + if (arg->field.field->size != 16) { + trace_seq_printf(s, "INVALIDIPv6"); + return rc; + } + + if (have_c) + print_ip6c_addr(s, buf); + else + print_ip6_addr(s, i, buf); + + return rc; +} + +static int print_ipsa_arg(struct trace_seq *s, const char *ptr, char i, + void *data, int size, struct event_format *event, + struct print_arg *arg) +{ + char have_c = 0, have_p = 0; + unsigned char *buf; + struct sockaddr_storage *sa; + int rc = 0; + + /* pISpc */ + if (i == 'I') { + if (*ptr == 'p') { + have_p = 1; + ptr++; + rc++; + } + if (*ptr == 'c') { + have_c = 1; + ptr++; + rc++; + } + } + + if (arg->type == PRINT_FUNC) { + process_defined_func(s, data, size, event, arg); + return rc; + } + + if (arg->type != PRINT_FIELD) { + trace_seq_printf(s, "ARG TYPE NOT FIELD BUT %d", arg->type); + return rc; + } + + if (!arg->field.field) { + arg->field.field = + pevent_find_any_field(event, arg->field.name); + if (!arg->field.field) { + do_warning("%s: field %s not found", + __func__, arg->field.name); + return rc; + } + } + + sa = (struct sockaddr_storage *) (data + arg->field.field->offset); + + if (sa->ss_family == AF_INET) { + struct sockaddr_in *sa4 = (struct sockaddr_in *) sa; + + if (arg->field.field->size < sizeof(struct sockaddr_in)) { + trace_seq_printf(s, "INVALIDIPv4"); + return rc; + } + + print_ip4_addr(s, i, (unsigned char *) &sa4->sin_addr); + if (have_p) + trace_seq_printf(s, ":%d", ntohs(sa4->sin_port)); + + + } else if (sa->ss_family == AF_INET6) { + struct sockaddr_in6 *sa6 = (struct sockaddr_in6 *) sa; + + if (arg->field.field->size < sizeof(struct sockaddr_in6)) { + trace_seq_printf(s, "INVALIDIPv6"); + return rc; + } + + if (have_p) + trace_seq_printf(s, "["); + + buf = (unsigned char *) &sa6->sin6_addr; + if (have_c) + print_ip6c_addr(s, buf); + else + print_ip6_addr(s, i, buf); + + if (have_p) + trace_seq_printf(s, "]:%d", ntohs(sa6->sin6_port)); + } + + return rc; +} + +static int print_ip_arg(struct trace_seq *s, const char *ptr, + void *data, int size, struct event_format *event, + struct print_arg *arg) +{ + char i = *ptr; /* 'i' or 'I' */ + char ver; + int rc = 0; + + ptr++; + rc++; + + ver = *ptr; + ptr++; + rc++; + + switch (ver) { + case '4': + rc += print_ipv4_arg(s, ptr, i, data, size, event, arg); + break; + case '6': + rc += print_ipv6_arg(s, ptr, i, data, size, event, arg); + break; + case 'S': + rc += print_ipsa_arg(s, ptr, i, data, size, event, arg); + break; + default: + return 0; + } + + return rc; +} + static int is_printable_array(char *p, unsigned int len) { unsigned int i; @@ -4337,6 +4656,15 @@ static void pretty_print(struct trace_seq *s, void *data, int size, struct event ptr++; arg = arg->next; break; + } else if (*(ptr+1) == 'I' || *(ptr+1) == 'i') { + int n; + + n = print_ip_arg(s, ptr+1, data, size, event, arg); + if (n > 0) { + ptr += n; + arg = arg->next; + break; + } } /* fall through */ diff --git a/tools/perf/Documentation/perf-buildid-cache.txt b/tools/perf/Documentation/perf-buildid-cache.txt index fd77d81ea748..0294c57b1f5e 100644 --- a/tools/perf/Documentation/perf-buildid-cache.txt +++ b/tools/perf/Documentation/perf-buildid-cache.txt @@ -38,7 +38,7 @@ OPTIONS --remove=:: Remove specified file from the cache. -M:: ---missing=:: +--missing=:: List missing build ids in the cache for the specified file. -u:: --update:: diff --git a/tools/perf/Documentation/perf-list.txt b/tools/perf/Documentation/perf-list.txt index cbb4f743d921..3e2aec94f806 100644 --- a/tools/perf/Documentation/perf-list.txt +++ b/tools/perf/Documentation/perf-list.txt @@ -89,6 +89,19 @@ raw encoding of 0x1A8 can be used: You should refer to the processor specific documentation for getting these details. Some of them are referenced in the SEE ALSO section below. +PARAMETERIZED EVENTS +-------------------- + +Some pmu events listed by 'perf-list' will be displayed with '?' in them. For +example: + + hv_gpci/dtbp_ptitc,phys_processor_idx=?/ + +This means that when provided as an event, a value for '?' must +also be supplied. For example: + + perf stat -C 0 -e 'hv_gpci/dtbp_ptitc,phys_processor_idx=0x2/' ... + OPTIONS ------- diff --git a/tools/perf/Documentation/perf-mem.txt b/tools/perf/Documentation/perf-mem.txt index 1d78a4064da4..43310d8661fe 100644 --- a/tools/perf/Documentation/perf-mem.txt +++ b/tools/perf/Documentation/perf-mem.txt @@ -12,11 +12,12 @@ SYNOPSIS DESCRIPTION ----------- -"perf mem -t <TYPE> record" runs a command and gathers memory operation data +"perf mem record" runs a command and gathers memory operation data from it, into perf.data. Perf record options are accepted and are passed through. -"perf mem -t <TYPE> report" displays the result. It invokes perf report with the -right set of options to display a memory access profile. +"perf mem report" displays the result. It invokes perf report with the +right set of options to display a memory access profile. By default, loads +and stores are sampled. Use the -t option to limit to loads or stores. Note that on Intel systems the memory latency reported is the use-latency, not the pure load (or store latency). Use latency includes any pipeline @@ -29,7 +30,7 @@ OPTIONS -t:: --type=:: - Select the memory operation type: load or store (default: load) + Select the memory operation type: load or store (default: load,store) -D:: --dump-raw-samples=:: diff --git a/tools/perf/Documentation/perf-record.txt b/tools/perf/Documentation/perf-record.txt index af9a54ece024..31e977459c51 100644 --- a/tools/perf/Documentation/perf-record.txt +++ b/tools/perf/Documentation/perf-record.txt @@ -33,12 +33,27 @@ OPTIONS - a raw PMU event (eventsel+umask) in the form of rNNN where NNN is a hexadecimal event descriptor. - - a hardware breakpoint event in the form of '\mem:addr[:access]' + - a symbolically formed PMU event like 'pmu/param1=0x3,param2/' where + 'param1', 'param2', etc are defined as formats for the PMU in + /sys/bus/event_sources/devices/<pmu>/format/*. + + - a symbolically formed event like 'pmu/config=M,config1=N,config3=K/' + + where M, N, K are numbers (in decimal, hex, octal format). Acceptable + values for each of 'config', 'config1' and 'config2' are defined by + corresponding entries in /sys/bus/event_sources/devices/<pmu>/format/* + param1 and param2 are defined as formats for the PMU in: + /sys/bus/event_sources/devices/<pmu>/format/* + + - a hardware breakpoint event in the form of '\mem:addr[/len][:access]' where addr is the address in memory you want to break in. Access is the memory access type (read, write, execute) it can - be passed as follows: '\mem:addr[:[r][w][x]]'. + be passed as follows: '\mem:addr[:[r][w][x]]'. len is the range, + number of bytes from specified addr, which the breakpoint will cover. If you want to profile read-write accesses in 0x1000, just set 'mem:0x1000:rw'. + If you want to profile write accesses in [0x1000~1008), just set + 'mem:0x1000/8:w'. --filter=<filter>:: Event filter. diff --git a/tools/perf/Documentation/perf-script.txt b/tools/perf/Documentation/perf-script.txt index 21494806c0ab..a21eec05bc42 100644 --- a/tools/perf/Documentation/perf-script.txt +++ b/tools/perf/Documentation/perf-script.txt @@ -125,46 +125,46 @@ OPTIONS is equivalent to: perf script -f trace:<fields> -f sw:<fields> -f hw:<fields> - + i.e., the specified fields apply to all event types if the type string is not given. - + The arguments are processed in the order received. A later usage can reset a prior request. e.g.: - + -f trace: -f comm,tid,time,ip,sym - + The first -f suppresses trace events (field list is ""), but then the second invocation sets the fields to comm,tid,time,ip,sym. In this case a warning is given to the user: - + "Overriding previous field request for all events." - + Alternatively, consider the order: - + -f comm,tid,time,ip,sym -f trace: - + The first -f sets the fields for all events and the second -f suppresses trace events. The user is given a warning message about the override, and the result of the above is that only S/W and H/W events are displayed with the given fields. - + For the 'wildcard' option if a user selected field is invalid for an event type, a message is displayed to the user that the option is ignored for that type. For example: - + $ perf script -f comm,tid,trace 'trace' not valid for hardware events. Ignoring. 'trace' not valid for software events. Ignoring. - + Alternatively, if the type is given an invalid field is specified it is an error. For example: - + perf script -v -f sw:comm,tid,trace 'trace' not valid for software events. - + At this point usage is displayed, and perf-script exits. - + Finally, a user may not set fields to none for all event types. i.e., -f "" is not allowed. diff --git a/tools/perf/Documentation/perf-stat.txt b/tools/perf/Documentation/perf-stat.txt index 29ee857c09c6..04e150d83e7d 100644 --- a/tools/perf/Documentation/perf-stat.txt +++ b/tools/perf/Documentation/perf-stat.txt @@ -25,10 +25,22 @@ OPTIONS -e:: --event=:: - Select the PMU event. Selection can be a symbolic event name - (use 'perf list' to list all events) or a raw PMU - event (eventsel+umask) in the form of rNNN where NNN is a - hexadecimal event descriptor. + Select the PMU event. Selection can be: + + - a symbolic event name (use 'perf list' to list all events) + + - a raw PMU event (eventsel+umask) in the form of rNNN where NNN is a + hexadecimal event descriptor. + + - a symbolically formed event like 'pmu/param1=0x3,param2/' where + param1 and param2 are defined as formats for the PMU in + /sys/bus/event_sources/devices/<pmu>/format/* + + - a symbolically formed event like 'pmu/config=M,config1=N,config2=K/' + where M, N, K are numbers (in decimal, hex, octal format). + Acceptable values for each of 'config', 'config1' and 'config2' + parameters are defined by corresponding entries in + /sys/bus/event_sources/devices/<pmu>/format/* -i:: --no-inherit:: diff --git a/tools/perf/bench/futex.h b/tools/perf/bench/futex.h index 71f2844cf97f..7ed22ff1e1ac 100644 --- a/tools/perf/bench/futex.h +++ b/tools/perf/bench/futex.h @@ -68,4 +68,17 @@ futex_cmp_requeue(u_int32_t *uaddr, u_int32_t val, u_int32_t *uaddr2, int nr_wak val, opflags); } +#ifndef HAVE_PTHREAD_ATTR_SETAFFINITY_NP +#include <pthread.h> +static inline int pthread_attr_setaffinity_np(pthread_attr_t *attr, + size_t cpusetsize, + cpu_set_t *cpuset) +{ + attr = attr; + cpusetsize = cpusetsize; + cpuset = cpuset; + return 0; +} +#endif + #endif /* _FUTEX_H */ diff --git a/tools/perf/builtin-buildid-cache.c b/tools/perf/builtin-buildid-cache.c index 77d5cae54c6a..50e6b66aea1f 100644 --- a/tools/perf/builtin-buildid-cache.c +++ b/tools/perf/builtin-buildid-cache.c @@ -236,10 +236,10 @@ static bool dso__missing_buildid_cache(struct dso *dso, int parm __maybe_unused) if (errno == ENOENT) return false; - pr_warning("Problems with %s file, consider removing it from the cache\n", + pr_warning("Problems with %s file, consider removing it from the cache\n", filename); } else if (memcmp(dso->build_id, build_id, sizeof(dso->build_id))) { - pr_warning("Problems with %s file, consider removing it from the cache\n", + pr_warning("Problems with %s file, consider removing it from the cache\n", filename); } diff --git a/tools/perf/builtin-diff.c b/tools/perf/builtin-diff.c index 1fd96c13f199..74aada554b12 100644 --- a/tools/perf/builtin-diff.c +++ b/tools/perf/builtin-diff.c @@ -390,6 +390,15 @@ static void perf_evlist__collapse_resort(struct perf_evlist *evlist) } } +static struct data__file *fmt_to_data_file(struct perf_hpp_fmt *fmt) +{ + struct diff_hpp_fmt *dfmt = container_of(fmt, struct diff_hpp_fmt, fmt); + void *ptr = dfmt - dfmt->idx; + struct data__file *d = container_of(ptr, struct data__file, fmt); + + return d; +} + static struct hist_entry* get_pair_data(struct hist_entry *he, struct data__file *d) { @@ -407,8 +416,7 @@ get_pair_data(struct hist_entry *he, struct data__file *d) static struct hist_entry* get_pair_fmt(struct hist_entry *he, struct diff_hpp_fmt *dfmt) { - void *ptr = dfmt - dfmt->idx; - struct data__file *d = container_of(ptr, struct data__file, fmt); + struct data__file *d = fmt_to_data_file(&dfmt->fmt); return get_pair_data(he, d); } @@ -430,7 +438,7 @@ static void hists__baseline_only(struct hists *hists) next = rb_next(&he->rb_node_in); if (!hist_entry__next_pair(he)) { rb_erase(&he->rb_node_in, root); - hist_entry__free(he); + hist_entry__delete(he); } } } @@ -448,26 +456,30 @@ static void hists__precompute(struct hists *hists) next = rb_first(root); while (next != NULL) { struct hist_entry *he, *pair; + struct data__file *d; + int i; he = rb_entry(next, struct hist_entry, rb_node_in); next = rb_next(&he->rb_node_in); - pair = get_pair_data(he, &data__files[sort_compute]); - if (!pair) - continue; + data__for_each_file_new(i, d) { + pair = get_pair_data(he, d); + if (!pair) + continue; - switch (compute) { - case COMPUTE_DELTA: - compute_delta(he, pair); - break; - case COMPUTE_RATIO: - compute_ratio(he, pair); - break; - case COMPUTE_WEIGHTED_DIFF: - compute_wdiff(he, pair); - break; - default: - BUG_ON(1); + switch (compute) { + case COMPUTE_DELTA: + compute_delta(he, pair); + break; + case COMPUTE_RATIO: + compute_ratio(he, pair); + break; + case COMPUTE_WEIGHTED_DIFF: + compute_wdiff(he, pair); + break; + default: + BUG_ON(1); + } } } } @@ -517,7 +529,7 @@ __hist_entry__cmp_compute(struct hist_entry *left, struct hist_entry *right, static int64_t hist_entry__cmp_compute(struct hist_entry *left, struct hist_entry *right, - int c) + int c, int sort_idx) { bool pairs_left = hist_entry__has_pairs(left); bool pairs_right = hist_entry__has_pairs(right); @@ -529,8 +541,8 @@ hist_entry__cmp_compute(struct hist_entry *left, struct hist_entry *right, if (!pairs_left || !pairs_right) return pairs_left ? -1 : 1; - p_left = get_pair_data(left, &data__files[sort_compute]); - p_right = get_pair_data(right, &data__files[sort_compute]); + p_left = get_pair_data(left, &data__files[sort_idx]); + p_right = get_pair_data(right, &data__files[sort_idx]); if (!p_left && !p_right) return 0; @@ -546,90 +558,102 @@ hist_entry__cmp_compute(struct hist_entry *left, struct hist_entry *right, } static int64_t -hist_entry__cmp_nop(struct hist_entry *left __maybe_unused, +hist_entry__cmp_compute_idx(struct hist_entry *left, struct hist_entry *right, + int c, int sort_idx) +{ + struct hist_entry *p_right, *p_left; + + p_left = get_pair_data(left, &data__files[sort_idx]); + p_right = get_pair_data(right, &data__files[sort_idx]); + + if (!p_left && !p_right) + return 0; + + if (!p_left || !p_right) + return p_left ? -1 : 1; + + if (c != COMPUTE_DELTA) { + /* + * The delta can be computed without the baseline, but + * others are not. Put those entries which have no + * values below. + */ + if (left->dummy && right->dummy) + return 0; + + if (left->dummy || right->dummy) + return left->dummy ? 1 : -1; + } + + return __hist_entry__cmp_compute(p_left, p_right, c); +} + +static int64_t +hist_entry__cmp_nop(struct perf_hpp_fmt *fmt __maybe_unused, + struct hist_entry *left __maybe_unused, struct hist_entry *right __maybe_unused) { return 0; } static int64_t -hist_entry__cmp_baseline(struct hist_entry *left, struct hist_entry *right) +hist_entry__cmp_baseline(struct perf_hpp_fmt *fmt __maybe_unused, + struct hist_entry *left, struct hist_entry *right) { - if (sort_compute) - return 0; - if (left->stat.period == right->stat.period) return 0; return left->stat.period > right->stat.period ? 1 : -1; } static int64_t -hist_entry__cmp_delta(struct hist_entry *left, struct hist_entry *right) +hist_entry__cmp_delta(struct perf_hpp_fmt *fmt, + struct hist_entry *left, struct hist_entry *right) { - return hist_entry__cmp_compute(right, left, COMPUTE_DELTA); + struct data__file *d = fmt_to_data_file(fmt); + + return hist_entry__cmp_compute(right, left, COMPUTE_DELTA, d->idx); } static int64_t -hist_entry__cmp_ratio(struct hist_entry *left, struct hist_entry *right) +hist_entry__cmp_ratio(struct perf_hpp_fmt *fmt, + struct hist_entry *left, struct hist_entry *right) { - return hist_entry__cmp_compute(right, left, COMPUTE_RATIO); + struct data__file *d = fmt_to_data_file(fmt); + + return hist_entry__cmp_compute(right, left, COMPUTE_RATIO, d->idx); } static int64_t -hist_entry__cmp_wdiff(struct hist_entry *left, struct hist_entry *right) +hist_entry__cmp_wdiff(struct perf_hpp_fmt *fmt, + struct hist_entry *left, struct hist_entry *right) { - return hist_entry__cmp_compute(right, left, COMPUTE_WEIGHTED_DIFF); + struct data__file *d = fmt_to_data_file(fmt); + + return hist_entry__cmp_compute(right, left, COMPUTE_WEIGHTED_DIFF, d->idx); } -static void insert_hist_entry_by_compute(struct rb_root *root, - struct hist_entry *he, - int c) +static int64_t +hist_entry__cmp_delta_idx(struct perf_hpp_fmt *fmt __maybe_unused, + struct hist_entry *left, struct hist_entry *right) { - struct rb_node **p = &root->rb_node; - struct rb_node *parent = NULL; - struct hist_entry *iter; - - while (*p != NULL) { - parent = *p; - iter = rb_entry(parent, struct hist_entry, rb_node); - if (hist_entry__cmp_compute(he, iter, c) < 0) - p = &(*p)->rb_left; - else - p = &(*p)->rb_right; - } - - rb_link_node(&he->rb_node, parent, p); - rb_insert_color(&he->rb_node, root); + return hist_entry__cmp_compute_idx(right, left, COMPUTE_DELTA, + sort_compute); } -static void hists__compute_resort(struct hists *hists) +static int64_t +hist_entry__cmp_ratio_idx(struct perf_hpp_fmt *fmt __maybe_unused, + struct hist_entry *left, struct hist_entry *right) { - struct rb_root *root; - struct rb_node *next; - - if (sort__need_collapse) - root = &hists->entries_collapsed; - else - root = hists->entries_in; - - hists->entries = RB_ROOT; - next = rb_first(root); - - hists__reset_stats(hists); - hists__reset_col_len(hists); - - while (next != NULL) { - struct hist_entry *he; - - he = rb_entry(next, struct hist_entry, rb_node_in); - next = rb_next(&he->rb_node_in); - - insert_hist_entry_by_compute(&hists->entries, he, compute); - hists__inc_stats(hists, he); + return hist_entry__cmp_compute_idx(right, left, COMPUTE_RATIO, + sort_compute); +} - if (!he->filtered) - hists__calc_col_len(hists, he); - } +static int64_t +hist_entry__cmp_wdiff_idx(struct perf_hpp_fmt *fmt __maybe_unused, + struct hist_entry *left, struct hist_entry *right) +{ + return hist_entry__cmp_compute_idx(right, left, COMPUTE_WEIGHTED_DIFF, + sort_compute); } static void hists__process(struct hists *hists) @@ -637,12 +661,8 @@ static void hists__process(struct hists *hists) if (show_baseline_only) hists__baseline_only(hists); - if (sort_compute) { - hists__precompute(hists); - hists__compute_resort(hists); - } else { - hists__output_resort(hists, NULL); - } + hists__precompute(hists); + hists__output_resort(hists, NULL); hists__fprintf(hists, true, 0, 0, 0, stdout); } @@ -841,7 +861,7 @@ static int __hpp__color_compare(struct perf_hpp_fmt *fmt, char pfmt[20] = " "; if (!pair) - goto dummy_print; + goto no_print; switch (comparison_method) { case COMPUTE_DELTA: @@ -850,8 +870,6 @@ static int __hpp__color_compare(struct perf_hpp_fmt *fmt, else diff = compute_delta(he, pair); - if (fabs(diff) < 0.01) - goto dummy_print; scnprintf(pfmt, 20, "%%%+d.2f%%%%", dfmt->header_width - 1); return percent_color_snprintf(hpp->buf, hpp->size, pfmt, diff); @@ -883,6 +901,9 @@ static int __hpp__color_compare(struct perf_hpp_fmt *fmt, } dummy_print: return scnprintf(hpp->buf, hpp->size, "%*s", + dfmt->header_width, "N/A"); +no_print: + return scnprintf(hpp->buf, hpp->size, "%*s", dfmt->header_width, pfmt); } @@ -932,14 +953,15 @@ hpp__entry_pair(struct hist_entry *he, struct hist_entry *pair, else diff = compute_delta(he, pair); - if (fabs(diff) >= 0.01) - scnprintf(buf, size, "%+4.2F%%", diff); + scnprintf(buf, size, "%+4.2F%%", diff); break; case PERF_HPP_DIFF__RATIO: /* No point for ratio number if we are dummy.. */ - if (he->dummy) + if (he->dummy) { + scnprintf(buf, size, "N/A"); break; + } if (pair->diff.computed) ratio = pair->diff.period_ratio; @@ -952,8 +974,10 @@ hpp__entry_pair(struct hist_entry *he, struct hist_entry *pair, case PERF_HPP_DIFF__WEIGHTED_DIFF: /* No point for wdiff number if we are dummy.. */ - if (he->dummy) + if (he->dummy) { + scnprintf(buf, size, "N/A"); break; + } if (pair->diff.computed) wdiff = pair->diff.wdiff; @@ -1105,9 +1129,10 @@ static void data__hpp_register(struct data__file *d, int idx) perf_hpp__register_sort_field(fmt); } -static void ui_init(void) +static int ui_init(void) { struct data__file *d; + struct perf_hpp_fmt *fmt; int i; data__for_each_file(i, d) { @@ -1137,6 +1162,46 @@ static void ui_init(void) data__hpp_register(d, i ? PERF_HPP_DIFF__PERIOD : PERF_HPP_DIFF__PERIOD_BASELINE); } + + if (!sort_compute) + return 0; + + /* + * Prepend an fmt to sort on columns at 'sort_compute' first. + * This fmt is added only to the sort list but not to the + * output fields list. + * + * Note that this column (data) can be compared twice - one + * for this 'sort_compute' fmt and another for the normal + * diff_hpp_fmt. But it shouldn't a problem as most entries + * will be sorted out by first try or baseline and comparing + * is not a costly operation. + */ + fmt = zalloc(sizeof(*fmt)); + if (fmt == NULL) { + pr_err("Memory allocation failed\n"); + return -1; + } + + fmt->cmp = hist_entry__cmp_nop; + fmt->collapse = hist_entry__cmp_nop; + + switch (compute) { + case COMPUTE_DELTA: + fmt->sort = hist_entry__cmp_delta_idx; + break; + case COMPUTE_RATIO: + fmt->sort = hist_entry__cmp_ratio_idx; + break; + case COMPUTE_WEIGHTED_DIFF: + fmt->sort = hist_entry__cmp_wdiff_idx; + break; + default: + BUG_ON(1); + } + + list_add(&fmt->sort_list, &perf_hpp__sort_list); + return 0; } static int data_init(int argc, const char **argv) @@ -1202,7 +1267,8 @@ int cmd_diff(int argc, const char **argv, const char *prefix __maybe_unused) if (data_init(argc, argv) < 0) return -1; - ui_init(); + if (ui_init() < 0) + return -1; sort__mode = SORT_MODE__DIFF; diff --git a/tools/perf/builtin-inject.c b/tools/perf/builtin-inject.c index 84df2deed988..a13641e066f5 100644 --- a/tools/perf/builtin-inject.c +++ b/tools/perf/builtin-inject.c @@ -343,6 +343,7 @@ static int __cmd_inject(struct perf_inject *inject) int ret = -EINVAL; struct perf_session *session = inject->session; struct perf_data_file *file_out = &inject->output; + int fd = perf_data_file__fd(file_out); signal(SIGINT, sig_handler); @@ -376,7 +377,7 @@ static int __cmd_inject(struct perf_inject *inject) } if (!file_out->is_pipe) - lseek(file_out->fd, session->header.data_offset, SEEK_SET); + lseek(fd, session->header.data_offset, SEEK_SET); ret = perf_session__process_events(session, &inject->tool); @@ -385,7 +386,7 @@ static int __cmd_inject(struct perf_inject *inject) perf_header__set_feat(&session->header, HEADER_BUILD_ID); session->header.data_size = inject->bytes_written; - perf_session__write_header(session, session->evlist, file_out->fd, true); + perf_session__write_header(session, session->evlist, fd, true); } return ret; diff --git a/tools/perf/builtin-mem.c b/tools/perf/builtin-mem.c index 24db6ffe2957..9b5663950a4d 100644 --- a/tools/perf/builtin-mem.c +++ b/tools/perf/builtin-mem.c @@ -7,44 +7,47 @@ #include "util/session.h" #include "util/data.h" -#define MEM_OPERATION_LOAD "load" -#define MEM_OPERATION_STORE "store" - -static const char *mem_operation = MEM_OPERATION_LOAD; +#define MEM_OPERATION_LOAD 0x1 +#define MEM_OPERATION_STORE 0x2 struct perf_mem { struct perf_tool tool; char const *input_name; bool hide_unresolved; bool dump_raw; + int operation; const char *cpu_list; DECLARE_BITMAP(cpu_bitmap, MAX_NR_CPUS); }; -static int __cmd_record(int argc, const char **argv) +static int __cmd_record(int argc, const char **argv, struct perf_mem *mem) { int rec_argc, i = 0, j; const char **rec_argv; - char event[64]; int ret; - rec_argc = argc + 4; + rec_argc = argc + 7; /* max number of arguments */ rec_argv = calloc(rec_argc + 1, sizeof(char *)); if (!rec_argv) return -1; - rec_argv[i++] = strdup("record"); - if (!strcmp(mem_operation, MEM_OPERATION_LOAD)) - rec_argv[i++] = strdup("-W"); - rec_argv[i++] = strdup("-d"); - rec_argv[i++] = strdup("-e"); + rec_argv[i++] = "record"; - if (strcmp(mem_operation, MEM_OPERATION_LOAD)) - sprintf(event, "cpu/mem-stores/pp"); - else - sprintf(event, "cpu/mem-loads/pp"); + if (mem->operation & MEM_OPERATION_LOAD) + rec_argv[i++] = "-W"; + + rec_argv[i++] = "-d"; + + if (mem->operation & MEM_OPERATION_LOAD) { + rec_argv[i++] = "-e"; + rec_argv[i++] = "cpu/mem-loads/pp"; + } + + if (mem->operation & MEM_OPERATION_STORE) { + rec_argv[i++] = "-e"; + rec_argv[i++] = "cpu/mem-stores/pp"; + } - rec_argv[i++] = strdup(event); for (j = 1; j < argc; j++, i++) rec_argv[i] = argv[j]; @@ -162,17 +165,17 @@ static int report_events(int argc, const char **argv, struct perf_mem *mem) if (!rep_argv) return -1; - rep_argv[i++] = strdup("report"); - rep_argv[i++] = strdup("--mem-mode"); - rep_argv[i++] = strdup("-n"); /* display number of samples */ + rep_argv[i++] = "report"; + rep_argv[i++] = "--mem-mode"; + rep_argv[i++] = "-n"; /* display number of samples */ /* * there is no weight (cost) associated with stores, so don't print * the column */ - if (strcmp(mem_operation, MEM_OPERATION_LOAD)) - rep_argv[i++] = strdup("--sort=mem,sym,dso,symbol_daddr," - "dso_daddr,tlb,locked"); + if (!(mem->operation & MEM_OPERATION_LOAD)) + rep_argv[i++] = "--sort=mem,sym,dso,symbol_daddr," + "dso_daddr,tlb,locked"; for (j = 1; j < argc; j++, i++) rep_argv[i] = argv[j]; @@ -182,6 +185,75 @@ static int report_events(int argc, const char **argv, struct perf_mem *mem) return ret; } +struct mem_mode { + const char *name; + int mode; +}; + +#define MEM_OPT(n, m) \ + { .name = n, .mode = (m) } + +#define MEM_END { .name = NULL } + +static const struct mem_mode mem_modes[]={ + MEM_OPT("load", MEM_OPERATION_LOAD), + MEM_OPT("store", MEM_OPERATION_STORE), + MEM_END +}; + +static int +parse_mem_ops(const struct option *opt, const char *str, int unset) +{ + int *mode = (int *)opt->value; + const struct mem_mode *m; + char *s, *os = NULL, *p; + int ret = -1; + + if (unset) + return 0; + + /* str may be NULL in case no arg is passed to -t */ + if (str) { + /* because str is read-only */ + s = os = strdup(str); + if (!s) + return -1; + + /* reset mode */ + *mode = 0; + + for (;;) { + p = strchr(s, ','); + if (p) + *p = '\0'; + + for (m = mem_modes; m->name; m++) { + if (!strcasecmp(s, m->name)) + break; + } + if (!m->name) { + fprintf(stderr, "unknown sampling op %s," + " check man page\n", s); + goto error; + } + + *mode |= m->mode; + + if (!p) + break; + + s = p + 1; + } + } + ret = 0; + + if (*mode == 0) + *mode = MEM_OPERATION_LOAD; +error: + free(os); + return ret; +} + int cmd_mem(int argc, const char **argv, const char *prefix __maybe_unused) { struct stat st; @@ -197,10 +269,15 @@ int cmd_mem(int argc, const char **argv, const char *prefix __maybe_unused) .ordered_events = true, }, .input_name = "perf.data", + /* + * default to both load an store sampling + */ + .operation = MEM_OPERATION_LOAD | MEM_OPERATION_STORE, }; const struct option mem_options[] = { - OPT_STRING('t', "type", &mem_operation, - "type", "memory operations(load/store)"), + OPT_CALLBACK('t', "type", &mem.operation, + "type", "memory operations(load,store) Default load,store", + parse_mem_ops), OPT_BOOLEAN('D', "dump-raw-samples", &mem.dump_raw, "dump raw samples in ASCII"), OPT_BOOLEAN('U', "hide-unresolved", &mem.hide_unresolved, @@ -225,7 +302,7 @@ int cmd_mem(int argc, const char **argv, const char *prefix __maybe_unused) argc = parse_options_subcommand(argc, argv, mem_options, mem_subcommands, mem_usage, PARSE_OPT_STOP_AT_NON_OPTION); - if (!argc || !(strncmp(argv[0], "rec", 3) || mem_operation)) + if (!argc || !(strncmp(argv[0], "rec", 3) || mem.operation)) usage_with_options(mem_usage, mem_options); if (!mem.input_name || !strlen(mem.input_name)) { @@ -236,7 +313,7 @@ int cmd_mem(int argc, const char **argv, const char *prefix __maybe_unused) } if (!strncmp(argv[0], "rec", 3)) - return __cmd_record(argc, argv); + return __cmd_record(argc, argv, &mem); else if (!strncmp(argv[0], "rep", 3)) return report_events(argc, argv, &mem); else diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c index 8648c6d3003d..404ab3434052 100644 --- a/tools/perf/builtin-record.c +++ b/tools/perf/builtin-record.c @@ -190,16 +190,30 @@ out: return rc; } +static int process_sample_event(struct perf_tool *tool, + union perf_event *event, + struct perf_sample *sample, + struct perf_evsel *evsel, + struct machine *machine) +{ + struct record *rec = container_of(tool, struct record, tool); + + rec->samples++; + + return build_id__mark_dso_hit(tool, event, sample, evsel, machine); +} + static int process_buildids(struct record *rec) { struct perf_data_file *file = &rec->file; struct perf_session *session = rec->session; - u64 start = session->header.data_offset; - u64 size = lseek(file->fd, 0, SEEK_CUR); + u64 size = lseek(perf_data_file__fd(file), 0, SEEK_CUR); if (size == 0) return 0; + file->size = size; + /* * During this process, it'll load kernel map and replace the * dso->long_name to a real pathname it found. In this case @@ -211,9 +225,7 @@ static int process_buildids(struct record *rec) */ symbol_conf.ignore_vmlinux_buildid = true; - return __perf_session__process_events(session, start, - size - start, - size, &build_id__mark_dso_hit_ops); + return perf_session__process_events(session, &rec->tool); } static void perf_event__synthesize_guest_os(struct machine *machine, void *data) @@ -322,6 +334,7 @@ static int __cmd_record(struct record *rec, int argc, const char **argv) struct perf_data_file *file = &rec->file; struct perf_session *session; bool disabled = false, draining = false; + int fd; rec->progname = argv[0]; @@ -336,6 +349,7 @@ static int __cmd_record(struct record *rec, int argc, const char **argv) return -1; } + fd = perf_data_file__fd(file); rec->session = session; record__init_features(rec); @@ -360,12 +374,11 @@ static int __cmd_record(struct record *rec, int argc, const char **argv) perf_header__clear_feat(&session->header, HEADER_GROUP_DESC); if (file->is_pipe) { - err = perf_header__write_pipe(file->fd); + err = perf_header__write_pipe(fd); if (err < 0) goto out_child; } else { - err = perf_session__write_header(session, rec->evlist, - file->fd, false); + err = perf_session__write_header(session, rec->evlist, fd, false); if (err < 0) goto out_child; } @@ -397,7 +410,7 @@ static int __cmd_record(struct record *rec, int argc, const char **argv) * return this more properly and also * propagate errors that now are calling die() */ - err = perf_event__synthesize_tracing_data(tool, file->fd, rec->evlist, + err = perf_event__synthesize_tracing_data(tool, fd, rec->evlist, process_synthesized_event); if (err <= 0) { pr_err("Couldn't record tracing data.\n"); @@ -504,19 +517,9 @@ static int __cmd_record(struct record *rec, int argc, const char **argv) goto out_child; } - if (!quiet) { + if (!quiet) fprintf(stderr, "[ perf record: Woken up %ld times to write data ]\n", waking); - /* - * Approximate RIP event size: 24 bytes. - */ - fprintf(stderr, - "[ perf record: Captured and wrote %.3f MB %s (~%" PRIu64 " samples) ]\n", - (double)rec->bytes_written / 1024.0 / 1024.0, - file->path, - rec->bytes_written / 24); - } - out_child: if (forks) { int exit_status; @@ -535,13 +538,29 @@ out_child: } else status = err; + /* this will be recalculated during process_buildids() */ + rec->samples = 0; + if (!err && !file->is_pipe) { rec->session->header.data_size += rec->bytes_written; if (!rec->no_buildid) process_buildids(rec); - perf_session__write_header(rec->session, rec->evlist, - file->fd, true); + perf_session__write_header(rec->session, rec->evlist, fd, true); + } + + if (!err && !quiet) { + char samples[128]; + + if (rec->samples) + scnprintf(samples, sizeof(samples), + " (%" PRIu64 " samples)", rec->samples); + else + samples[0] = '\0'; + + fprintf(stderr, "[ perf record: Captured and wrote %.3f MB %s%s ]\n", + perf_data_file__size(file) / 1024.0 / 1024.0, + file->path, samples); } out_delete_session: @@ -720,6 +739,13 @@ static struct record record = { .default_per_cpu = true, }, }, + .tool = { + .sample = process_sample_event, + .fork = perf_event__process_fork, + .comm = perf_event__process_comm, + .mmap = perf_event__process_mmap, + .mmap2 = perf_event__process_mmap2, + }, }; #define CALLCHAIN_HELP "setup and enables call-graph (stack chain/backtrace) recording: " diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c index 072ae8ad67fc..2f91094e228b 100644 --- a/tools/perf/builtin-report.c +++ b/tools/perf/builtin-report.c @@ -86,17 +86,6 @@ static int report__config(const char *var, const char *value, void *cb) return perf_default_config(var, value, cb); } -static void report__inc_stats(struct report *rep, struct hist_entry *he) -{ - /* - * The @he is either of a newly created one or an existing one - * merging current sample. We only want to count a new one so - * checking ->nr_events being 1. - */ - if (he->stat.nr_events == 1) - rep->nr_entries++; -} - static int hist_iter__report_callback(struct hist_entry_iter *iter, struct addr_location *al, bool single, void *arg) @@ -108,8 +97,6 @@ static int hist_iter__report_callback(struct hist_entry_iter *iter, struct mem_info *mi; struct branch_info *bi; - report__inc_stats(rep, he); - if (!ui__has_annotation()) return 0; @@ -499,6 +486,9 @@ static int __cmd_report(struct report *rep) report__warn_kptr_restrict(rep); + evlist__for_each(session->evlist, pos) + rep->nr_entries += evsel__hists(pos)->nr_entries; + if (use_browser == 0) { if (verbose > 3) perf_session__fprintf(session, stdout); diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c index 891086376381..e598e4e98170 100644 --- a/tools/perf/builtin-stat.c +++ b/tools/perf/builtin-stat.c @@ -1730,7 +1730,7 @@ int cmd_stat(int argc, const char **argv, const char *prefix __maybe_unused) "detailed run - start a lot of events"), OPT_BOOLEAN('S', "sync", &sync_run, "call sync() before starting a run"), - OPT_CALLBACK_NOOPT('B', "big-num", NULL, NULL, + OPT_CALLBACK_NOOPT('B', "big-num", NULL, NULL, "print large numbers with thousands\' separators", stat__set_big_num), OPT_STRING('C', "cpu", &target.cpu_list, "cpu", diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c index 616f0fcb4701..c4c7eac69de4 100644 --- a/tools/perf/builtin-top.c +++ b/tools/perf/builtin-top.c @@ -165,7 +165,7 @@ static void ui__warn_map_erange(struct map *map, struct symbol *sym, u64 ip) err ? "[unknown]" : uts.release, perf_version_string); if (use_browser <= 0) sleep(5); - + map->erange_warned = true; } diff --git a/tools/perf/builtin-trace.c b/tools/perf/builtin-trace.c index badfabc6a01f..7e935f1083ec 100644 --- a/tools/perf/builtin-trace.c +++ b/tools/perf/builtin-trace.c @@ -929,66 +929,66 @@ static struct syscall_fmt { .arg_scnprintf = { [0] = SCA_HEX, /* brk */ }, }, { .name = "clock_gettime", .errmsg = true, STRARRAY(0, clk_id, clockid), }, { .name = "close", .errmsg = true, - .arg_scnprintf = { [0] = SCA_CLOSE_FD, /* fd */ }, }, + .arg_scnprintf = { [0] = SCA_CLOSE_FD, /* fd */ }, }, { .name = "connect", .errmsg = true, }, { .name = "dup", .errmsg = true, - .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, }, + .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, }, { .name = "dup2", .errmsg = true, - .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, }, + .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, }, { .name = "dup3", .errmsg = true, - .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, }, + .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, }, { .name = "epoll_ctl", .errmsg = true, STRARRAY(1, op, epoll_ctl_ops), }, { .name = "eventfd2", .errmsg = true, .arg_scnprintf = { [1] = SCA_EFD_FLAGS, /* flags */ }, }, { .name = "faccessat", .errmsg = true, .arg_scnprintf = { [0] = SCA_FDAT, /* dfd */ }, }, { .name = "fadvise64", .errmsg = true, - .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, }, + .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, }, { .name = "fallocate", .errmsg = true, - .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, }, + .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, }, { .name = "fchdir", .errmsg = true, - .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, }, + .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, }, { .name = "fchmod", .errmsg = true, - .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, }, + .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, }, { .name = "fchmodat", .errmsg = true, - .arg_scnprintf = { [0] = SCA_FDAT, /* fd */ }, }, + .arg_scnprintf = { [0] = SCA_FDAT, /* fd */ }, }, { .name = "fchown", .errmsg = true, - .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, }, + .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, }, { .name = "fchownat", .errmsg = true, - .arg_scnprintf = { [0] = SCA_FDAT, /* fd */ }, }, + .arg_scnprintf = { [0] = SCA_FDAT, /* fd */ }, }, { .name = "fcntl", .errmsg = true, .arg_scnprintf = { [0] = SCA_FD, /* fd */ [1] = SCA_STRARRAY, /* cmd */ }, .arg_parm = { [1] = &strarray__fcntl_cmds, /* cmd */ }, }, { .name = "fdatasync", .errmsg = true, - .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, }, + .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, }, { .name = "flock", .errmsg = true, .arg_scnprintf = { [0] = SCA_FD, /* fd */ [1] = SCA_FLOCK, /* cmd */ }, }, { .name = "fsetxattr", .errmsg = true, - .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, }, + .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, }, { .name = "fstat", .errmsg = true, .alias = "newfstat", - .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, }, + .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, }, { .name = "fstatat", .errmsg = true, .alias = "newfstatat", - .arg_scnprintf = { [0] = SCA_FDAT, /* dfd */ }, }, + .arg_scnprintf = { [0] = SCA_FDAT, /* dfd */ }, }, { .name = "fstatfs", .errmsg = true, - .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, }, + .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, }, { .name = "fsync", .errmsg = true, - .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, }, + .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, }, { .name = "ftruncate", .errmsg = true, - .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, }, + .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, }, { .name = "futex", .errmsg = true, .arg_scnprintf = { [1] = SCA_FUTEX_OP, /* op */ }, }, { .name = "futimesat", .errmsg = true, - .arg_scnprintf = { [0] = SCA_FDAT, /* fd */ }, }, + .arg_scnprintf = { [0] = SCA_FDAT, /* fd */ }, }, { .name = "getdents", .errmsg = true, - .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, }, + .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, }, { .name = "getdents64", .errmsg = true, - .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, }, + .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, }, { .name = "getitimer", .errmsg = true, STRARRAY(0, which, itimers), }, { .name = "getrlimit", .errmsg = true, STRARRAY(0, resource, rlimit_resources), }, { .name = "ioctl", .errmsg = true, - .arg_scnprintf = { [0] = SCA_FD, /* fd */ + .arg_scnprintf = { [0] = SCA_FD, /* fd */ #if defined(__i386__) || defined(__x86_64__) /* * FIXME: Make this available to all arches. @@ -1002,7 +1002,7 @@ static struct syscall_fmt { { .name = "kill", .errmsg = true, .arg_scnprintf = { [1] = SCA_SIGNUM, /* sig */ }, }, { .name = "linkat", .errmsg = true, - .arg_scnprintf = { [0] = SCA_FDAT, /* fd */ }, }, + .arg_scnprintf = { [0] = SCA_FDAT, /* fd */ }, }, { .name = "lseek", .errmsg = true, .arg_scnprintf = { [0] = SCA_FD, /* fd */ [2] = SCA_STRARRAY, /* whence */ }, @@ -1012,9 +1012,9 @@ static struct syscall_fmt { .arg_scnprintf = { [0] = SCA_HEX, /* start */ [2] = SCA_MADV_BHV, /* behavior */ }, }, { .name = "mkdirat", .errmsg = true, - .arg_scnprintf = { [0] = SCA_FDAT, /* fd */ }, }, + .arg_scnprintf = { [0] = SCA_FDAT, /* fd */ }, }, { .name = "mknodat", .errmsg = true, - .arg_scnprintf = { [0] = SCA_FDAT, /* fd */ }, }, + .arg_scnprintf = { [0] = SCA_FDAT, /* fd */ }, }, { .name = "mlock", .errmsg = true, .arg_scnprintf = { [0] = SCA_HEX, /* addr */ }, }, { .name = "mlockall", .errmsg = true, @@ -1036,9 +1036,9 @@ static struct syscall_fmt { { .name = "munmap", .errmsg = true, .arg_scnprintf = { [0] = SCA_HEX, /* addr */ }, }, { .name = "name_to_handle_at", .errmsg = true, - .arg_scnprintf = { [0] = SCA_FDAT, /* dfd */ }, }, + .arg_scnprintf = { [0] = SCA_FDAT, /* dfd */ }, }, { .name = "newfstatat", .errmsg = true, - .arg_scnprintf = { [0] = SCA_FDAT, /* dfd */ }, }, + .arg_scnprintf = { [0] = SCA_FDAT, /* dfd */ }, }, { .name = "open", .errmsg = true, .arg_scnprintf = { [1] = SCA_OPEN_FLAGS, /* flags */ }, }, { .name = "open_by_handle_at", .errmsg = true, @@ -1052,20 +1052,20 @@ static struct syscall_fmt { { .name = "poll", .errmsg = true, .timeout = true, }, { .name = "ppoll", .errmsg = true, .timeout = true, }, { .name = "pread", .errmsg = true, .alias = "pread64", - .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, }, + .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, }, { .name = "preadv", .errmsg = true, .alias = "pread", - .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, }, + .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, }, { .name = "prlimit64", .errmsg = true, STRARRAY(1, resource, rlimit_resources), }, { .name = "pwrite", .errmsg = true, .alias = "pwrite64", - .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, }, + .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, }, { .name = "pwritev", .errmsg = true, - .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, }, + .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, }, { .name = "read", .errmsg = true, - .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, }, + .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, }, { .name = "readlinkat", .errmsg = true, - .arg_scnprintf = { [0] = SCA_FDAT, /* dfd */ }, }, + .arg_scnprintf = { [0] = SCA_FDAT, /* dfd */ }, }, { .name = "readv", .errmsg = true, - .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, }, + .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, }, { .name = "recvfrom", .errmsg = true, .arg_scnprintf = { [3] = SCA_MSG_FLAGS, /* flags */ }, }, { .name = "recvmmsg", .errmsg = true, @@ -1073,7 +1073,7 @@ static struct syscall_fmt { { .name = "recvmsg", .errmsg = true, .arg_scnprintf = { [2] = SCA_MSG_FLAGS, /* flags */ }, }, { .name = "renameat", .errmsg = true, - .arg_scnprintf = { [0] = SCA_FDAT, /* dfd */ }, }, + .arg_scnprintf = { [0] = SCA_FDAT, /* dfd */ }, }, { .name = "rt_sigaction", .errmsg = true, .arg_scnprintf = { [0] = SCA_SIGNUM, /* sig */ }, }, { .name = "rt_sigprocmask", .errmsg = true, STRARRAY(0, how, sighow), }, @@ -1091,7 +1091,7 @@ static struct syscall_fmt { { .name = "setitimer", .errmsg = true, STRARRAY(0, which, itimers), }, { .name = "setrlimit", .errmsg = true, STRARRAY(0, resource, rlimit_resources), }, { .name = "shutdown", .errmsg = true, - .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, }, + .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, }, { .name = "socket", .errmsg = true, .arg_scnprintf = { [0] = SCA_STRARRAY, /* family */ [1] = SCA_SK_TYPE, /* type */ }, @@ -1102,7 +1102,7 @@ static struct syscall_fmt { .arg_parm = { [0] = &strarray__socket_families, /* family */ }, }, { .name = "stat", .errmsg = true, .alias = "newstat", }, { .name = "symlinkat", .errmsg = true, - .arg_scnprintf = { [0] = SCA_FDAT, /* dfd */ }, }, + .arg_scnprintf = { [0] = SCA_FDAT, /* dfd */ }, }, { .name = "tgkill", .errmsg = true, .arg_scnprintf = { [2] = SCA_SIGNUM, /* sig */ }, }, { .name = "tkill", .errmsg = true, @@ -1113,9 +1113,9 @@ static struct syscall_fmt { { .name = "utimensat", .errmsg = true, .arg_scnprintf = { [0] = SCA_FDAT, /* dirfd */ }, }, { .name = "write", .errmsg = true, - .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, }, + .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, }, { .name = "writev", .errmsg = true, - .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, }, + .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, }, }; static int syscall_fmt__cmp(const void *name, const void *fmtp) @@ -1191,7 +1191,7 @@ static struct thread_trace *thread__trace(struct thread *thread, FILE *fp) if (thread__priv(thread) == NULL) thread__set_priv(thread, thread_trace__new()); - + if (thread__priv(thread) == NULL) goto fail; @@ -2056,23 +2056,24 @@ static int trace__run(struct trace *trace, int argc, const char **argv) if (trace->trace_syscalls && perf_evlist__add_syscall_newtp(evlist, trace__sys_enter, trace__sys_exit)) - goto out_error_tp; + goto out_error_raw_syscalls; if (trace->trace_syscalls) perf_evlist__add_vfs_getname(evlist); if ((trace->trace_pgfaults & TRACE_PFMAJ) && - perf_evlist__add_pgfault(evlist, PERF_COUNT_SW_PAGE_FAULTS_MAJ)) - goto out_error_tp; + perf_evlist__add_pgfault(evlist, PERF_COUNT_SW_PAGE_FAULTS_MAJ)) { + goto out_error_mem; + } if ((trace->trace_pgfaults & TRACE_PFMIN) && perf_evlist__add_pgfault(evlist, PERF_COUNT_SW_PAGE_FAULTS_MIN)) - goto out_error_tp; + goto out_error_mem; if (trace->sched && - perf_evlist__add_newtp(evlist, "sched", "sched_stat_runtime", - trace__sched_stat_runtime)) - goto out_error_tp; + perf_evlist__add_newtp(evlist, "sched", "sched_stat_runtime", + trace__sched_stat_runtime)) + goto out_error_sched_stat_runtime; err = perf_evlist__create_maps(evlist, &trace->opts.target); if (err < 0) { @@ -2202,8 +2203,12 @@ out: { char errbuf[BUFSIZ]; -out_error_tp: - perf_evlist__strerror_tp(evlist, errno, errbuf, sizeof(errbuf)); +out_error_sched_stat_runtime: + debugfs__strerror_open_tp(errno, errbuf, sizeof(errbuf), "sched", "sched_stat_runtime"); + goto out_error; + +out_error_raw_syscalls: + debugfs__strerror_open_tp(errno, errbuf, sizeof(errbuf), "raw_syscalls", "sys_(enter|exit)"); goto out_error; out_error_mmap: @@ -2217,6 +2222,9 @@ out_error: fprintf(trace->output, "%s\n", errbuf); goto out_delete_evlist; } +out_error_mem: + fprintf(trace->output, "Not enough memory to run!\n"); + goto out_delete_evlist; } static int trace__replay(struct trace *trace) diff --git a/tools/perf/config/Makefile b/tools/perf/config/Makefile index 648e31ff4021..cc224080b525 100644 --- a/tools/perf/config/Makefile +++ b/tools/perf/config/Makefile @@ -198,6 +198,7 @@ CORE_FEATURE_TESTS = \ libpython-version \ libslang \ libunwind \ + pthread-attr-setaffinity-np \ stackprotector-all \ timerfd \ libdw-dwarf-unwind \ @@ -226,6 +227,7 @@ VF_FEATURE_TESTS = \ libelf-getphdrnum \ libelf-mmap \ libpython-version \ + pthread-attr-setaffinity-np \ stackprotector-all \ timerfd \ libunwind-debug-frame \ @@ -301,6 +303,10 @@ ifeq ($(feature-sync-compare-and-swap), 1) CFLAGS += -DHAVE_SYNC_COMPARE_AND_SWAP_SUPPORT endif +ifeq ($(feature-pthread-attr-setaffinity-np), 1) + CFLAGS += -DHAVE_PTHREAD_ATTR_SETAFFINITY_NP +endif + ifndef NO_BIONIC $(call feature_check,bionic) ifeq ($(feature-bionic), 1) diff --git a/tools/perf/config/feature-checks/Makefile b/tools/perf/config/feature-checks/Makefile index 53f19b5dbc37..42ac05aaf8ac 100644 --- a/tools/perf/config/feature-checks/Makefile +++ b/tools/perf/config/feature-checks/Makefile @@ -25,6 +25,7 @@ FILES= \ test-libslang.bin \ test-libunwind.bin \ test-libunwind-debug-frame.bin \ + test-pthread-attr-setaffinity-np.bin \ test-stackprotector-all.bin \ test-timerfd.bin \ test-libdw-dwarf-unwind.bin \ @@ -47,6 +48,9 @@ test-all.bin: test-hello.bin: $(BUILD) +test-pthread-attr-setaffinity-np.bin: + $(BUILD) -Werror -lpthread + test-stackprotector-all.bin: $(BUILD) -Werror -fstack-protector-all diff --git a/tools/perf/config/feature-checks/test-all.c b/tools/perf/config/feature-checks/test-all.c index 652e0098eba6..6d4d09323922 100644 --- a/tools/perf/config/feature-checks/test-all.c +++ b/tools/perf/config/feature-checks/test-all.c @@ -97,6 +97,10 @@ # include "test-zlib.c" #undef main +#define main main_test_pthread_attr_setaffinity_np +# include "test-pthread_attr_setaffinity_np.c" +#undef main + int main(int argc, char *argv[]) { main_test_libpython(); @@ -121,6 +125,7 @@ int main(int argc, char *argv[]) main_test_libdw_dwarf_unwind(); main_test_sync_compare_and_swap(argc, argv); main_test_zlib(); + main_test_pthread_attr_setaffinity_np(); return 0; } diff --git a/tools/perf/config/feature-checks/test-pthread-attr-setaffinity-np.c b/tools/perf/config/feature-checks/test-pthread-attr-setaffinity-np.c new file mode 100644 index 000000000000..0a0d3ecb4e8a --- /dev/null +++ b/tools/perf/config/feature-checks/test-pthread-attr-setaffinity-np.c @@ -0,0 +1,14 @@ +#include <stdint.h> +#include <pthread.h> + +int main(void) +{ + int ret = 0; + pthread_attr_t thread_attr; + + pthread_attr_init(&thread_attr); + /* don't care abt exact args, just the API itself in libpthread */ + ret = pthread_attr_setaffinity_np(&thread_attr, 0, NULL); + + return ret; +} diff --git a/tools/perf/scripts/perl/Perf-Trace-Util/Context.c b/tools/perf/scripts/perl/Perf-Trace-Util/Context.c index 790ceba6ad3f..28431d1bbcf5 100644 --- a/tools/perf/scripts/perl/Perf-Trace-Util/Context.c +++ b/tools/perf/scripts/perl/Perf-Trace-Util/Context.c @@ -5,7 +5,10 @@ * ANY CHANGES MADE HERE WILL BE LOST! * */ - +#include <stdbool.h> +#ifndef HAS_BOOL +# define HAS_BOOL 1 +#endif #line 1 "Context.xs" /* * Context.xs. XS interfaces for perf script. diff --git a/tools/perf/tests/attr.py b/tools/perf/tests/attr.py index c9b4b6269b51..1091bd47adfd 100644 --- a/tools/perf/tests/attr.py +++ b/tools/perf/tests/attr.py @@ -104,7 +104,6 @@ class Event(dict): continue if not self.compare_data(self[t], other[t]): log.warning("expected %s=%s, got %s" % (t, self[t], other[t])) - # Test file description needs to have following sections: # [config] diff --git a/tools/perf/tests/hists_cumulate.c b/tools/perf/tests/hists_cumulate.c index 8d110dec393e..18619966454c 100644 --- a/tools/perf/tests/hists_cumulate.c +++ b/tools/perf/tests/hists_cumulate.c @@ -140,7 +140,7 @@ static void del_hist_entries(struct hists *hists) he = rb_entry(node, struct hist_entry, rb_node); rb_erase(node, root_out); rb_erase(&he->rb_node_in, root_in); - hist_entry__free(he); + hist_entry__delete(he); } } diff --git a/tools/perf/tests/hists_output.c b/tools/perf/tests/hists_output.c index f5547610da02..b52c9faea224 100644 --- a/tools/perf/tests/hists_output.c +++ b/tools/perf/tests/hists_output.c @@ -106,7 +106,7 @@ static void del_hist_entries(struct hists *hists) he = rb_entry(node, struct hist_entry, rb_node); rb_erase(node, root_out); rb_erase(&he->rb_node_in, root_in); - hist_entry__free(he); + hist_entry__delete(he); } } diff --git a/tools/perf/tests/make b/tools/perf/tests/make index 69a71ff84e01..75709d2b17b4 100644 --- a/tools/perf/tests/make +++ b/tools/perf/tests/make @@ -222,7 +222,6 @@ tarpkg: @cmd="$(PERF)/tests/perf-targz-src-pkg $(PERF)"; \ echo "- $@: $$cmd" && echo $$cmd > $@ && \ ( eval $$cmd ) >> $@ 2>&1 - all: $(run) $(run_O) tarpkg @echo OK diff --git a/tools/perf/tests/parse-events.c b/tools/perf/tests/parse-events.c index 7f2f51f93619..1cdab0ce00e2 100644 --- a/tools/perf/tests/parse-events.c +++ b/tools/perf/tests/parse-events.c @@ -1145,6 +1145,49 @@ static int test__pinned_group(struct perf_evlist *evlist) return 0; } +static int test__checkevent_breakpoint_len(struct perf_evlist *evlist) +{ + struct perf_evsel *evsel = perf_evlist__first(evlist); + + TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->nr_entries); + TEST_ASSERT_VAL("wrong type", PERF_TYPE_BREAKPOINT == evsel->attr.type); + TEST_ASSERT_VAL("wrong config", 0 == evsel->attr.config); + TEST_ASSERT_VAL("wrong bp_type", (HW_BREAKPOINT_R | HW_BREAKPOINT_W) == + evsel->attr.bp_type); + TEST_ASSERT_VAL("wrong bp_len", HW_BREAKPOINT_LEN_1 == + evsel->attr.bp_len); + + return 0; +} + +static int test__checkevent_breakpoint_len_w(struct perf_evlist *evlist) +{ + struct perf_evsel *evsel = perf_evlist__first(evlist); + + TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->nr_entries); + TEST_ASSERT_VAL("wrong type", PERF_TYPE_BREAKPOINT == evsel->attr.type); + TEST_ASSERT_VAL("wrong config", 0 == evsel->attr.config); + TEST_ASSERT_VAL("wrong bp_type", HW_BREAKPOINT_W == + evsel->attr.bp_type); + TEST_ASSERT_VAL("wrong bp_len", HW_BREAKPOINT_LEN_2 == + evsel->attr.bp_len); + + return 0; +} + +static int +test__checkevent_breakpoint_len_rw_modifier(struct perf_evlist *evlist) +{ + struct perf_evsel *evsel = perf_evlist__first(evlist); + + TEST_ASSERT_VAL("wrong exclude_user", !evsel->attr.exclude_user); + TEST_ASSERT_VAL("wrong exclude_kernel", evsel->attr.exclude_kernel); + TEST_ASSERT_VAL("wrong exclude_hv", evsel->attr.exclude_hv); + TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip); + + return test__checkevent_breakpoint_rw(evlist); +} + static int count_tracepoints(void) { char events_path[PATH_MAX]; @@ -1420,6 +1463,21 @@ static struct evlist_test test__events[] = { .check = test__pinned_group, .id = 41, }, + { + .name = "mem:0/1", + .check = test__checkevent_breakpoint_len, + .id = 42, + }, + { + .name = "mem:0/2:w", + .check = test__checkevent_breakpoint_len_w, + .id = 43, + }, + { + .name = "mem:0/4:rw:u", + .check = test__checkevent_breakpoint_len_rw_modifier, + .id = 44 + }, #if defined(__s390x__) { .name = "kvm-s390:kvm_s390_create_vm", @@ -1471,7 +1529,7 @@ static int test_event(struct evlist_test *e) } else { ret = e->check(evlist); } - + perf_evlist__delete(evlist); return ret; diff --git a/tools/perf/tests/sample-parsing.c b/tools/perf/tests/sample-parsing.c index 4908c648a597..30c02181e78b 100644 --- a/tools/perf/tests/sample-parsing.c +++ b/tools/perf/tests/sample-parsing.c @@ -110,7 +110,7 @@ static bool samples_same(const struct perf_sample *s1, if (type & PERF_SAMPLE_STACK_USER) { COMP(user_stack.size); - if (memcmp(s1->user_stack.data, s1->user_stack.data, + if (memcmp(s1->user_stack.data, s2->user_stack.data, s1->user_stack.size)) { pr_debug("Samples differ at 'user_stack'\n"); return false; diff --git a/tools/perf/ui/browsers/annotate.c b/tools/perf/ui/browsers/annotate.c index 1e0a2fd80115..9d32e3c0cfee 100644 --- a/tools/perf/ui/browsers/annotate.c +++ b/tools/perf/ui/browsers/annotate.c @@ -517,7 +517,7 @@ static bool annotate_browser__jump(struct annotate_browser *browser) } annotate_browser__set_top(browser, dl, idx); - + return true; } @@ -867,7 +867,6 @@ static void annotate_browser__mark_jump_targets(struct annotate_browser *browser ++browser->nr_jumps; } - } static inline int width_jumps(int n) diff --git a/tools/perf/ui/hist.c b/tools/perf/ui/hist.c index 482adae3cc44..25d608394d74 100644 --- a/tools/perf/ui/hist.c +++ b/tools/perf/ui/hist.c @@ -285,7 +285,8 @@ static int hpp__entry_##_type(struct perf_hpp_fmt *fmt, \ } #define __HPP_SORT_FN(_type, _field) \ -static int64_t hpp__sort_##_type(struct hist_entry *a, struct hist_entry *b) \ +static int64_t hpp__sort_##_type(struct perf_hpp_fmt *fmt __maybe_unused, \ + struct hist_entry *a, struct hist_entry *b) \ { \ return __hpp__sort(a, b, he_get_##_field); \ } @@ -312,7 +313,8 @@ static int hpp__entry_##_type(struct perf_hpp_fmt *fmt, \ } #define __HPP_SORT_ACC_FN(_type, _field) \ -static int64_t hpp__sort_##_type(struct hist_entry *a, struct hist_entry *b) \ +static int64_t hpp__sort_##_type(struct perf_hpp_fmt *fmt __maybe_unused, \ + struct hist_entry *a, struct hist_entry *b) \ { \ return __hpp__sort_acc(a, b, he_get_acc_##_field); \ } @@ -331,7 +333,8 @@ static int hpp__entry_##_type(struct perf_hpp_fmt *fmt, \ } #define __HPP_SORT_RAW_FN(_type, _field) \ -static int64_t hpp__sort_##_type(struct hist_entry *a, struct hist_entry *b) \ +static int64_t hpp__sort_##_type(struct perf_hpp_fmt *fmt __maybe_unused, \ + struct hist_entry *a, struct hist_entry *b) \ { \ return __hpp__sort(a, b, he_get_raw_##_field); \ } @@ -361,7 +364,8 @@ HPP_PERCENT_ACC_FNS(overhead_acc, period) HPP_RAW_FNS(samples, nr_events) HPP_RAW_FNS(period, period) -static int64_t hpp__nop_cmp(struct hist_entry *a __maybe_unused, +static int64_t hpp__nop_cmp(struct perf_hpp_fmt *fmt __maybe_unused, + struct hist_entry *a __maybe_unused, struct hist_entry *b __maybe_unused) { return 0; diff --git a/tools/perf/ui/progress.h b/tools/perf/ui/progress.h index f34f89eb607c..717d39d3052b 100644 --- a/tools/perf/ui/progress.h +++ b/tools/perf/ui/progress.h @@ -4,12 +4,12 @@ #include <linux/types.h> void ui_progress__finish(void); - + struct ui_progress { const char *title; u64 curr, next, step, total; }; - + void ui_progress__init(struct ui_progress *p, u64 total, const char *title); void ui_progress__update(struct ui_progress *p, u64 adv); diff --git a/tools/perf/ui/tui/helpline.c b/tools/perf/ui/tui/helpline.c index 1c8b9afd5d6e..88f5143a5981 100644 --- a/tools/perf/ui/tui/helpline.c +++ b/tools/perf/ui/tui/helpline.c @@ -9,6 +9,7 @@ #include "../libslang.h" char ui_helpline__last_msg[1024]; +bool tui_helpline__set; static void tui_helpline__pop(void) { @@ -35,6 +36,8 @@ static int tui_helpline__show(const char *format, va_list ap) sizeof(ui_helpline__last_msg) - backlog, format, ap); backlog += ret; + tui_helpline__set = true; + if (ui_helpline__last_msg[backlog - 1] == '\n') { ui_helpline__puts(ui_helpline__last_msg); SLsmg_refresh(); diff --git a/tools/perf/ui/tui/setup.c b/tools/perf/ui/tui/setup.c index 3c38f25b1695..b77e1d771363 100644 --- a/tools/perf/ui/tui/setup.c +++ b/tools/perf/ui/tui/setup.c @@ -17,6 +17,7 @@ static volatile int ui__need_resize; extern struct perf_error_ops perf_tui_eops; +extern bool tui_helpline__set; extern void hist_browser__init_hpp(void); @@ -159,7 +160,7 @@ out: void ui__exit(bool wait_for_ok) { - if (wait_for_ok) + if (wait_for_ok && tui_helpline__set) ui__question_window("Fatal Error", ui_helpline__last_msg, "Press any key...", 0); diff --git a/tools/perf/util/annotate.c b/tools/perf/util/annotate.c index 79999ceaf2be..61bf9128e1f2 100644 --- a/tools/perf/util/annotate.c +++ b/tools/perf/util/annotate.c @@ -177,14 +177,17 @@ static int lock__parse(struct ins_operands *ops) goto out_free_ops; ops->locked.ins = ins__find(name); + free(name); + if (ops->locked.ins == NULL) goto out_free_ops; if (!ops->locked.ins->ops) return 0; - if (ops->locked.ins->ops->parse) - ops->locked.ins->ops->parse(ops->locked.ops); + if (ops->locked.ins->ops->parse && + ops->locked.ins->ops->parse(ops->locked.ops) < 0) + goto out_free_ops; return 0; @@ -208,6 +211,13 @@ static int lock__scnprintf(struct ins *ins, char *bf, size_t size, static void lock__delete(struct ins_operands *ops) { + struct ins *ins = ops->locked.ins; + + if (ins && ins->ops->free) + ins->ops->free(ops->locked.ops); + else + ins__delete(ops->locked.ops); + zfree(&ops->locked.ops); zfree(&ops->target.raw); zfree(&ops->target.name); @@ -229,7 +239,7 @@ static int mov__parse(struct ins_operands *ops) *s = '\0'; ops->source.raw = strdup(ops->raw); *s = ','; - + if (ops->source.raw == NULL) return -1; @@ -531,8 +541,8 @@ static void disasm_line__init_ins(struct disasm_line *dl) if (!dl->ins->ops) return; - if (dl->ins->ops->parse) - dl->ins->ops->parse(&dl->ops); + if (dl->ins->ops->parse && dl->ins->ops->parse(&dl->ops) < 0) + dl->ins = NULL; } static int disasm_line__parse(char *line, char **namep, char **rawp) diff --git a/tools/perf/util/color.c b/tools/perf/util/color.c index f4654183d391..55355b3d4f85 100644 --- a/tools/perf/util/color.c +++ b/tools/perf/util/color.c @@ -5,132 +5,6 @@ int perf_use_color_default = -1; -static int parse_color(const char *name, int len) -{ - static const char * const color_names[] = { - "normal", "black", "red", "green", "yellow", - "blue", "magenta", "cyan", "white" - }; - char *end; - int i; - - for (i = 0; i < (int)ARRAY_SIZE(color_names); i++) { - const char *str = color_names[i]; - if (!strncasecmp(name, str, len) && !str[len]) - return i - 1; - } - i = strtol(name, &end, 10); - if (end - name == len && i >= -1 && i <= 255) - return i; - return -2; -} - -static int parse_attr(const char *name, int len) -{ - static const int attr_values[] = { 1, 2, 4, 5, 7 }; - static const char * const attr_names[] = { - "bold", "dim", "ul", "blink", "reverse" - }; - unsigned int i; - - for (i = 0; i < ARRAY_SIZE(attr_names); i++) { - const char *str = attr_names[i]; - if (!strncasecmp(name, str, len) && !str[len]) - return attr_values[i]; - } - return -1; -} - -void color_parse(const char *value, const char *var, char *dst) -{ - color_parse_mem(value, strlen(value), var, dst); -} - -void color_parse_mem(const char *value, int value_len, const char *var, - char *dst) -{ - const char *ptr = value; - int len = value_len; - int attr = -1; - int fg = -2; - int bg = -2; - - if (!strncasecmp(value, "reset", len)) { - strcpy(dst, PERF_COLOR_RESET); - return; - } - - /* [fg [bg]] [attr] */ - while (len > 0) { - const char *word = ptr; - int val, wordlen = 0; - - while (len > 0 && !isspace(word[wordlen])) { - wordlen++; - len--; - } - - ptr = word + wordlen; - while (len > 0 && isspace(*ptr)) { - ptr++; - len--; - } - - val = parse_color(word, wordlen); - if (val >= -1) { - if (fg == -2) { - fg = val; - continue; - } - if (bg == -2) { - bg = val; - continue; - } - goto bad; - } - val = parse_attr(word, wordlen); - if (val < 0 || attr != -1) - goto bad; - attr = val; - } - - if (attr >= 0 || fg >= 0 || bg >= 0) { - int sep = 0; - - *dst++ = '\033'; - *dst++ = '['; - if (attr >= 0) { - *dst++ = '0' + attr; - sep++; - } - if (fg >= 0) { - if (sep++) - *dst++ = ';'; - if (fg < 8) { - *dst++ = '3'; - *dst++ = '0' + fg; - } else { - dst += sprintf(dst, "38;5;%d", fg); - } - } - if (bg >= 0) { - if (sep++) - *dst++ = ';'; - if (bg < 8) { - *dst++ = '4'; - *dst++ = '0' + bg; - } else { - dst += sprintf(dst, "48;5;%d", bg); - } - } - *dst++ = 'm'; - } - *dst = 0; - return; -bad: - die("bad color value '%.*s' for variable '%s'", value_len, value, var); -} - int perf_config_colorbool(const char *var, const char *value, int stdout_is_tty) { if (value) { diff --git a/tools/perf/util/color.h b/tools/perf/util/color.h index 0a594b8a0c26..38146f922c54 100644 --- a/tools/perf/util/color.h +++ b/tools/perf/util/color.h @@ -30,8 +30,6 @@ extern int perf_use_color_default; int perf_color_default_config(const char *var, const char *value, void *cb); int perf_config_colorbool(const char *var, const char *value, int stdout_is_tty); -void color_parse(const char *value, const char *var, char *dst); -void color_parse_mem(const char *value, int len, const char *var, char *dst); int color_vsnprintf(char *bf, size_t size, const char *color, const char *fmt, va_list args); int color_vfprintf(FILE *fp, const char *color, const char *fmt, va_list args); diff --git a/tools/perf/util/dso.c b/tools/perf/util/dso.c index 45be944d450a..c2f7d3b90966 100644 --- a/tools/perf/util/dso.c +++ b/tools/perf/util/dso.c @@ -532,12 +532,8 @@ dso_cache__read(struct dso *dso, u64 offset, u8 *data, ssize_t size) break; cache_offset = offset & DSO__DATA_CACHE_MASK; - ret = -EINVAL; - if (-1 == lseek(dso->data.fd, cache_offset, SEEK_SET)) - break; - - ret = read(dso->data.fd, cache->data, DSO__DATA_CACHE_SIZE); + ret = pread(dso->data.fd, cache->data, DSO__DATA_CACHE_SIZE, cache_offset); if (ret <= 0) break; diff --git a/tools/perf/util/dso.h b/tools/perf/util/dso.h index 3782c82c6e44..ced92841ff97 100644 --- a/tools/perf/util/dso.h +++ b/tools/perf/util/dso.h @@ -139,6 +139,7 @@ struct dso { u32 status_seen; size_t file_size; struct list_head open_entry; + u64 frame_offset; } data; union { /* Tool specific area */ diff --git a/tools/perf/util/evlist.c b/tools/perf/util/evlist.c index cbab1fb77b1d..28b8ce86bf12 100644 --- a/tools/perf/util/evlist.c +++ b/tools/perf/util/evlist.c @@ -1436,33 +1436,6 @@ size_t perf_evlist__fprintf(struct perf_evlist *evlist, FILE *fp) return printed + fprintf(fp, "\n"); } -int perf_evlist__strerror_tp(struct perf_evlist *evlist __maybe_unused, - int err, char *buf, size_t size) -{ - char sbuf[128]; - - switch (err) { - case ENOENT: - scnprintf(buf, size, "%s", - "Error:\tUnable to find debugfs\n" - "Hint:\tWas your kernel was compiled with debugfs support?\n" - "Hint:\tIs the debugfs filesystem mounted?\n" - "Hint:\tTry 'sudo mount -t debugfs nodev /sys/kernel/debug'"); - break; - case EACCES: - scnprintf(buf, size, - "Error:\tNo permissions to read %s/tracing/events/raw_syscalls\n" - "Hint:\tTry 'sudo mount -o remount,mode=755 %s'\n", - debugfs_mountpoint, debugfs_mountpoint); - break; - default: - scnprintf(buf, size, "%s", strerror_r(err, sbuf, sizeof(sbuf))); - break; - } - - return 0; -} - int perf_evlist__strerror_open(struct perf_evlist *evlist __maybe_unused, int err, char *buf, size_t size) { diff --git a/tools/perf/util/evlist.h b/tools/perf/util/evlist.h index 0ba93f67ab94..c94a9e03ecf1 100644 --- a/tools/perf/util/evlist.h +++ b/tools/perf/util/evlist.h @@ -183,7 +183,6 @@ static inline struct perf_evsel *perf_evlist__last(struct perf_evlist *evlist) size_t perf_evlist__fprintf(struct perf_evlist *evlist, FILE *fp); -int perf_evlist__strerror_tp(struct perf_evlist *evlist, int err, char *buf, size_t size); int perf_evlist__strerror_open(struct perf_evlist *evlist, int err, char *buf, size_t size); int perf_evlist__strerror_mmap(struct perf_evlist *evlist, int err, char *buf, size_t size); diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c index 1e90c8557ede..ea51a90e20a0 100644 --- a/tools/perf/util/evsel.c +++ b/tools/perf/util/evsel.c @@ -709,6 +709,7 @@ void perf_evsel__config(struct perf_evsel *evsel, struct record_opts *opts) if (opts->sample_weight) perf_evsel__set_sample_bit(evsel, WEIGHT); + attr->task = track; attr->mmap = track; attr->mmap2 = track && !perf_missing_features.mmap2; attr->comm = track; @@ -797,6 +798,9 @@ int perf_evsel__enable(struct perf_evsel *evsel, int ncpus, int nthreads) int perf_evsel__alloc_id(struct perf_evsel *evsel, int ncpus, int nthreads) { + if (ncpus == 0 || nthreads == 0) + return 0; + if (evsel->system_wide) nthreads = 1; diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c index b20e40c74468..1f407f7352a7 100644 --- a/tools/perf/util/header.c +++ b/tools/perf/util/header.c @@ -2237,6 +2237,7 @@ static int check_magic_endian(u64 magic, uint64_t hdr_sz, * - unique number to identify actual perf.data files * - encode endianness of file */ + ph->version = PERF_HEADER_VERSION_2; /* check magic number with one endianness */ if (magic == __perf_magic2) @@ -2247,7 +2248,6 @@ static int check_magic_endian(u64 magic, uint64_t hdr_sz, return -1; ph->needs_swap = true; - ph->version = PERF_HEADER_VERSION_2; return 0; } diff --git a/tools/perf/util/hist.c b/tools/perf/util/hist.c index 182395546ddc..70b48a65064c 100644 --- a/tools/perf/util/hist.c +++ b/tools/perf/util/hist.c @@ -241,6 +241,20 @@ static bool hists__decay_entry(struct hists *hists, struct hist_entry *he) return he->stat.period == 0; } +static void hists__delete_entry(struct hists *hists, struct hist_entry *he) +{ + rb_erase(&he->rb_node, &hists->entries); + + if (sort__need_collapse) + rb_erase(&he->rb_node_in, &hists->entries_collapsed); + + --hists->nr_entries; + if (!he->filtered) + --hists->nr_non_filtered_entries; + + hist_entry__delete(he); +} + void hists__decay_entries(struct hists *hists, bool zap_user, bool zap_kernel) { struct rb_node *next = rb_first(&hists->entries); @@ -258,16 +272,7 @@ void hists__decay_entries(struct hists *hists, bool zap_user, bool zap_kernel) (zap_kernel && n->level != '.') || hists__decay_entry(hists, n)) && !n->used) { - rb_erase(&n->rb_node, &hists->entries); - - if (sort__need_collapse) - rb_erase(&n->rb_node_in, &hists->entries_collapsed); - - --hists->nr_entries; - if (!n->filtered) - --hists->nr_non_filtered_entries; - - hist_entry__free(n); + hists__delete_entry(hists, n); } } } @@ -281,16 +286,7 @@ void hists__delete_entries(struct hists *hists) n = rb_entry(next, struct hist_entry, rb_node); next = rb_next(&n->rb_node); - rb_erase(&n->rb_node, &hists->entries); - - if (sort__need_collapse) - rb_erase(&n->rb_node_in, &hists->entries_collapsed); - - --hists->nr_entries; - if (!n->filtered) - --hists->nr_non_filtered_entries; - - hist_entry__free(n); + hists__delete_entry(hists, n); } } @@ -433,6 +429,8 @@ static struct hist_entry *add_hist_entry(struct hists *hists, if (!he) return NULL; + hists->nr_entries++; + rb_link_node(&he->rb_node_in, parent, p); rb_insert_color(&he->rb_node_in, hists->entries_in); out: @@ -915,7 +913,7 @@ hist_entry__cmp(struct hist_entry *left, struct hist_entry *right) if (perf_hpp__should_skip(fmt)) continue; - cmp = fmt->cmp(left, right); + cmp = fmt->cmp(fmt, left, right); if (cmp) break; } @@ -933,7 +931,7 @@ hist_entry__collapse(struct hist_entry *left, struct hist_entry *right) if (perf_hpp__should_skip(fmt)) continue; - cmp = fmt->collapse(left, right); + cmp = fmt->collapse(fmt, left, right); if (cmp) break; } @@ -941,7 +939,7 @@ hist_entry__collapse(struct hist_entry *left, struct hist_entry *right) return cmp; } -void hist_entry__free(struct hist_entry *he) +void hist_entry__delete(struct hist_entry *he) { zfree(&he->branch_info); zfree(&he->mem_info); @@ -981,7 +979,7 @@ static bool hists__collapse_insert_entry(struct hists *hists __maybe_unused, iter->callchain, he->callchain); } - hist_entry__free(he); + hist_entry__delete(he); return false; } @@ -1063,7 +1061,7 @@ static int hist_entry__sort(struct hist_entry *a, struct hist_entry *b) if (perf_hpp__should_skip(fmt)) continue; - cmp = fmt->sort(a, b); + cmp = fmt->sort(fmt, a, b); if (cmp) break; } diff --git a/tools/perf/util/hist.h b/tools/perf/util/hist.h index 46bd50344f85..2b690d028907 100644 --- a/tools/perf/util/hist.h +++ b/tools/perf/util/hist.h @@ -119,7 +119,7 @@ int64_t hist_entry__collapse(struct hist_entry *left, struct hist_entry *right); int hist_entry__transaction_len(void); int hist_entry__sort_snprintf(struct hist_entry *he, char *bf, size_t size, struct hists *hists); -void hist_entry__free(struct hist_entry *); +void hist_entry__delete(struct hist_entry *he); void hists__output_resort(struct hists *hists, struct ui_progress *prog); void hists__collapse_resort(struct hists *hists, struct ui_progress *prog); @@ -195,9 +195,12 @@ struct perf_hpp_fmt { struct hist_entry *he); int (*entry)(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp, struct hist_entry *he); - int64_t (*cmp)(struct hist_entry *a, struct hist_entry *b); - int64_t (*collapse)(struct hist_entry *a, struct hist_entry *b); - int64_t (*sort)(struct hist_entry *a, struct hist_entry *b); + int64_t (*cmp)(struct perf_hpp_fmt *fmt, + struct hist_entry *a, struct hist_entry *b); + int64_t (*collapse)(struct perf_hpp_fmt *fmt, + struct hist_entry *a, struct hist_entry *b); + int64_t (*sort)(struct perf_hpp_fmt *fmt, + struct hist_entry *a, struct hist_entry *b); struct list_head list; struct list_head sort_list; diff --git a/tools/perf/util/map.h b/tools/perf/util/map.h index 6951a9d42339..0e42438b1e59 100644 --- a/tools/perf/util/map.h +++ b/tools/perf/util/map.h @@ -116,6 +116,22 @@ struct thread; #define map__for_each_symbol(map, pos, n) \ dso__for_each_symbol(map->dso, pos, n, map->type) +/* map__for_each_symbol_with_name - iterate over the symbols in the given map + * that have the given name + * + * @map: the 'struct map *' in which symbols itereated + * @sym_name: the symbol name + * @pos: the 'struct symbol *' to use as a loop cursor + * @filter: to use when loading the DSO + */ +#define __map__for_each_symbol_by_name(map, sym_name, pos, filter) \ + for (pos = map__find_symbol_by_name(map, sym_name, filter); \ + pos && strcmp(pos->name, sym_name) == 0; \ + pos = symbol__next_by_name(pos)) + +#define map__for_each_symbol_by_name(map, sym_name, pos) \ + __map__for_each_symbol_by_name(map, sym_name, (pos), NULL) + typedef int (*symbol_filter_t)(struct map *map, struct symbol *sym); void map__init(struct map *map, enum map_type type, diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c index 77b43fe43d55..7f8ec6ce2823 100644 --- a/tools/perf/util/parse-events.c +++ b/tools/perf/util/parse-events.c @@ -526,7 +526,7 @@ do { \ } int parse_events_add_breakpoint(struct list_head *list, int *idx, - void *ptr, char *type) + void *ptr, char *type, u64 len) { struct perf_event_attr attr; @@ -536,14 +536,15 @@ int parse_events_add_breakpoint(struct list_head *list, int *idx, if (parse_breakpoint_type(type, &attr)) return -EINVAL; - /* - * We should find a nice way to override the access length - * Provide some defaults for now - */ - if (attr.bp_type == HW_BREAKPOINT_X) - attr.bp_len = sizeof(long); - else - attr.bp_len = HW_BREAKPOINT_LEN_4; + /* Provide some defaults if len is not specified */ + if (!len) { + if (attr.bp_type == HW_BREAKPOINT_X) + len = sizeof(long); + else + len = HW_BREAKPOINT_LEN_4; + } + + attr.bp_len = len; attr.type = PERF_TYPE_BREAKPOINT; attr.sample_period = 1; @@ -1121,7 +1122,7 @@ void print_tracepoint_events(const char *subsys_glob, const char *event_glob, return; for_each_subsystem(sys_dir, sys_dirent, sys_next) { - if (subsys_glob != NULL && + if (subsys_glob != NULL && !strglobmatch(sys_dirent.d_name, subsys_glob)) continue; @@ -1132,7 +1133,7 @@ void print_tracepoint_events(const char *subsys_glob, const char *event_glob, continue; for_each_event(sys_dirent, evt_dir, evt_dirent, evt_next) { - if (event_glob != NULL && + if (event_glob != NULL && !strglobmatch(evt_dirent.d_name, event_glob)) continue; @@ -1305,7 +1306,7 @@ static void print_symbol_events(const char *event_glob, unsigned type, for (i = 0; i < max; i++, syms++) { - if (event_glob != NULL && + if (event_glob != NULL && !(strglobmatch(syms->symbol, event_glob) || (syms->alias && strglobmatch(syms->alias, event_glob)))) continue; @@ -1366,7 +1367,7 @@ void print_events(const char *event_glob, bool name_only) printf("\n"); printf(" %-50s [%s]\n", - "mem:<addr>[:access]", + "mem:<addr>[/len][:access]", event_type_descriptors[PERF_TYPE_BREAKPOINT]); printf("\n"); } diff --git a/tools/perf/util/parse-events.h b/tools/perf/util/parse-events.h index db2cf78ff0f3..ff6e1fa4111e 100644 --- a/tools/perf/util/parse-events.h +++ b/tools/perf/util/parse-events.h @@ -71,6 +71,7 @@ struct parse_events_term { int type_val; int type_term; struct list_head list; + bool used; }; struct parse_events_evlist { @@ -104,7 +105,7 @@ int parse_events_add_numeric(struct list_head *list, int *idx, int parse_events_add_cache(struct list_head *list, int *idx, char *type, char *op_result1, char *op_result2); int parse_events_add_breakpoint(struct list_head *list, int *idx, - void *ptr, char *type); + void *ptr, char *type, u64 len); int parse_events_add_pmu(struct list_head *list, int *idx, char *pmu , struct list_head *head_config); enum perf_pmu_event_symbol_type diff --git a/tools/perf/util/parse-events.l b/tools/perf/util/parse-events.l index 906630bbf8eb..94eacb6c1ef7 100644 --- a/tools/perf/util/parse-events.l +++ b/tools/perf/util/parse-events.l @@ -159,6 +159,7 @@ branch_type { return term(yyscanner, PARSE_EVENTS__TERM_TYPE_BRANCH_SAMPLE_TYPE <mem>{ {modifier_bp} { return str(yyscanner, PE_MODIFIER_BP); } : { return ':'; } +"/" { return '/'; } {num_dec} { return value(yyscanner, 10); } {num_hex} { return value(yyscanner, 16); } /* diff --git a/tools/perf/util/parse-events.y b/tools/perf/util/parse-events.y index 93c4c9fbc922..72def077dbbf 100644 --- a/tools/perf/util/parse-events.y +++ b/tools/perf/util/parse-events.y @@ -326,6 +326,28 @@ PE_NAME_CACHE_TYPE } event_legacy_mem: +PE_PREFIX_MEM PE_VALUE '/' PE_VALUE ':' PE_MODIFIER_BP sep_dc +{ + struct parse_events_evlist *data = _data; + struct list_head *list; + + ALLOC_LIST(list); + ABORT_ON(parse_events_add_breakpoint(list, &data->idx, + (void *) $2, $6, $4)); + $$ = list; +} +| +PE_PREFIX_MEM PE_VALUE '/' PE_VALUE sep_dc +{ + struct parse_events_evlist *data = _data; + struct list_head *list; + + ALLOC_LIST(list); + ABORT_ON(parse_events_add_breakpoint(list, &data->idx, + (void *) $2, NULL, $4)); + $$ = list; +} +| PE_PREFIX_MEM PE_VALUE ':' PE_MODIFIER_BP sep_dc { struct parse_events_evlist *data = _data; @@ -333,7 +355,7 @@ PE_PREFIX_MEM PE_VALUE ':' PE_MODIFIER_BP sep_dc ALLOC_LIST(list); ABORT_ON(parse_events_add_breakpoint(list, &data->idx, - (void *) $2, $4)); + (void *) $2, $4, 0)); $$ = list; } | @@ -344,7 +366,7 @@ PE_PREFIX_MEM PE_VALUE sep_dc ALLOC_LIST(list); ABORT_ON(parse_events_add_breakpoint(list, &data->idx, - (void *) $2, NULL)); + (void *) $2, NULL, 0)); $$ = list; } diff --git a/tools/perf/util/parse-options.c b/tools/perf/util/parse-options.c index f62dee7bd924..4a015f77e2b5 100644 --- a/tools/perf/util/parse-options.c +++ b/tools/perf/util/parse-options.c @@ -46,7 +46,7 @@ static int get_value(struct parse_opt_ctx_t *p, return opterror(opt, "is not usable", flags); if (opt->flags & PARSE_OPT_EXCLUSIVE) { - if (p->excl_opt) { + if (p->excl_opt && p->excl_opt != opt) { char msg[128]; if (((flags & OPT_SHORT) && p->excl_opt->short_name) || diff --git a/tools/perf/util/pmu.c b/tools/perf/util/pmu.c index 5c9c4947cfb4..48411674da0f 100644 --- a/tools/perf/util/pmu.c +++ b/tools/perf/util/pmu.c @@ -551,31 +551,68 @@ static void pmu_format_value(unsigned long *format, __u64 value, __u64 *v, } /* + * Term is a string term, and might be a param-term. Try to look up it's value + * in the remaining terms. + * - We have a term like "base-or-format-term=param-term", + * - We need to find the value supplied for "param-term" (with param-term named + * in a config string) later on in the term list. + */ +static int pmu_resolve_param_term(struct parse_events_term *term, + struct list_head *head_terms, + __u64 *value) +{ + struct parse_events_term *t; + + list_for_each_entry(t, head_terms, list) { + if (t->type_val == PARSE_EVENTS__TERM_TYPE_NUM) { + if (!strcmp(t->config, term->config)) { + t->used = true; + *value = t->val.num; + return 0; + } + } + } + + if (verbose) + printf("Required parameter '%s' not specified\n", term->config); + + return -1; +} + +/* * Setup one of config[12] attr members based on the * user input data - term parameter. */ static int pmu_config_term(struct list_head *formats, struct perf_event_attr *attr, struct parse_events_term *term, + struct list_head *head_terms, bool zero) { struct perf_pmu_format *format; __u64 *vp; + __u64 val; + + /* + * If this is a parameter we've already used for parameterized-eval, + * skip it in normal eval. + */ + if (term->used) + return 0; /* - * Support only for hardcoded and numnerial terms. * Hardcoded terms should be already in, so nothing * to be done for them. */ if (parse_events__is_hardcoded_term(term)) return 0; - if (term->type_val != PARSE_EVENTS__TERM_TYPE_NUM) - return -EINVAL; - format = pmu_find_format(formats, term->config); - if (!format) + if (!format) { + if (verbose) + printf("Invalid event/parameter '%s'\n", term->config); return -EINVAL; + } switch (format->value) { case PERF_PMU_FORMAT_VALUE_CONFIG: @@ -592,11 +629,25 @@ static int pmu_config_term(struct list_head *formats, } /* - * XXX If we ever decide to go with string values for - * non-hardcoded terms, here's the place to translate - * them into value. + * Either directly use a numeric term, or try to translate string terms + * using event parameters. */ - pmu_format_value(format->bits, term->val.num, vp, zero); + if (term->type_val == PARSE_EVENTS__TERM_TYPE_NUM) + val = term->val.num; + else if (term->type_val == PARSE_EVENTS__TERM_TYPE_STR) { + if (strcmp(term->val.str, "?")) { + if (verbose) + pr_info("Invalid sysfs entry %s=%s\n", + term->config, term->val.str); + return -EINVAL; + } + + if (pmu_resolve_param_term(term, head_terms, &val)) + return -EINVAL; + } else + return -EINVAL; + + pmu_format_value(format->bits, val, vp, zero); return 0; } @@ -607,9 +658,10 @@ int perf_pmu__config_terms(struct list_head *formats, { struct parse_events_term *term; - list_for_each_entry(term, head_terms, list) - if (pmu_config_term(formats, attr, term, zero)) + list_for_each_entry(term, head_terms, list) { + if (pmu_config_term(formats, attr, term, head_terms, zero)) return -EINVAL; + } return 0; } @@ -767,10 +819,36 @@ void perf_pmu__set_format(unsigned long *bits, long from, long to) set_bit(b, bits); } +static int sub_non_neg(int a, int b) +{ + if (b > a) + return 0; + return a - b; +} + static char *format_alias(char *buf, int len, struct perf_pmu *pmu, struct perf_pmu_alias *alias) { - snprintf(buf, len, "%s/%s/", pmu->name, alias->name); + struct parse_events_term *term; + int used = snprintf(buf, len, "%s/%s", pmu->name, alias->name); + + list_for_each_entry(term, &alias->terms, list) { + if (term->type_val == PARSE_EVENTS__TERM_TYPE_STR) + used += snprintf(buf + used, sub_non_neg(len, used), + ",%s=%s", term->config, + term->val.str); + } + + if (sub_non_neg(len, used) > 0) { + buf[used] = '/'; + used++; + } + if (sub_non_neg(len, used) > 0) { + buf[used] = '\0'; + used++; + } else + buf[len - 1] = '\0'; + return buf; } diff --git a/tools/perf/util/probe-event.c b/tools/perf/util/probe-event.c index 94a717bf007d..919937eb0be2 100644 --- a/tools/perf/util/probe-event.c +++ b/tools/perf/util/probe-event.c @@ -446,7 +446,7 @@ static int post_process_probe_trace_events(struct probe_trace_event *tevs, } for (i = 0; i < ntevs; i++) { - if (tevs[i].point.address) { + if (tevs[i].point.address && !tevs[i].point.retprobe) { tmp = strdup(reloc_sym->name); if (!tmp) return -ENOMEM; @@ -2193,18 +2193,17 @@ static int __add_probe_trace_events(struct perf_probe_event *pev, return ret; } -static char *looking_function_name; -static int num_matched_functions; - -static int probe_function_filter(struct map *map __maybe_unused, - struct symbol *sym) +static int find_probe_functions(struct map *map, char *name) { - if ((sym->binding == STB_GLOBAL || sym->binding == STB_LOCAL) && - strcmp(looking_function_name, sym->name) == 0) { - num_matched_functions++; - return 0; + int found = 0; + struct symbol *sym; + + map__for_each_symbol_by_name(map, name, sym) { + if (sym->binding == STB_GLOBAL || sym->binding == STB_LOCAL) + found++; } - return 1; + + return found; } #define strdup_or_goto(str, label) \ @@ -2222,10 +2221,10 @@ static int find_probe_trace_events_from_map(struct perf_probe_event *pev, struct kmap *kmap = NULL; struct ref_reloc_sym *reloc_sym = NULL; struct symbol *sym; - struct rb_node *nd; struct probe_trace_event *tev; struct perf_probe_point *pp = &pev->point; struct probe_trace_point *tp; + int num_matched_functions; int ret, i; /* Init maps of given executable or kernel */ @@ -2242,10 +2241,8 @@ static int find_probe_trace_events_from_map(struct perf_probe_event *pev, * Load matched symbols: Since the different local symbols may have * same name but different addresses, this lists all the symbols. */ - num_matched_functions = 0; - looking_function_name = pp->function; - ret = map__load(map, probe_function_filter); - if (ret || num_matched_functions == 0) { + num_matched_functions = find_probe_functions(map, pp->function); + if (num_matched_functions == 0) { pr_err("Failed to find symbol %s in %s\n", pp->function, target ? : "kernel"); ret = -ENOENT; @@ -2257,7 +2254,7 @@ static int find_probe_trace_events_from_map(struct perf_probe_event *pev, goto out; } - if (!pev->uprobes) { + if (!pev->uprobes && !pp->retprobe) { kmap = map__kmap(map); reloc_sym = kmap->ref_reloc_sym; if (!reloc_sym) { @@ -2275,7 +2272,8 @@ static int find_probe_trace_events_from_map(struct perf_probe_event *pev, } ret = 0; - map__for_each_symbol(map, sym, nd) { + + map__for_each_symbol_by_name(map, pp->function, sym) { tev = (*tevs) + ret; tp = &tev->point; if (ret == num_matched_functions) { diff --git a/tools/perf/util/python.c b/tools/perf/util/python.c index 3dda85ca50c1..d906d0ad5d40 100644 --- a/tools/perf/util/python.c +++ b/tools/perf/util/python.c @@ -768,7 +768,7 @@ static PyObject *pyrf_evlist__get_pollfd(struct pyrf_evlist *pevlist, Py_DECREF(file); goto free_list; } - + Py_DECREF(file); } diff --git a/tools/perf/util/scripting-engines/trace-event-python.c b/tools/perf/util/scripting-engines/trace-event-python.c index d808a328f4dc..0c815a40a6e8 100644 --- a/tools/perf/util/scripting-engines/trace-event-python.c +++ b/tools/perf/util/scripting-engines/trace-event-python.c @@ -89,7 +89,7 @@ static void handler_call_die(const char *handler_name) /* * Insert val into into the dictionary and decrement the reference counter. - * This is necessary for dictionaries since PyDict_SetItemString() does not + * This is necessary for dictionaries since PyDict_SetItemString() does not * steal a reference, as opposed to PyTuple_SetItem(). */ static void pydict_set_item_string_decref(PyObject *dict, const char *key, PyObject *val) diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c index 5f0e05a76c05..0baf75f12b7c 100644 --- a/tools/perf/util/session.c +++ b/tools/perf/util/session.c @@ -274,7 +274,7 @@ void perf_tool__fill_defaults(struct perf_tool *tool) if (tool->id_index == NULL) tool->id_index = process_id_index_stub; } - + static void swap_sample_id_all(union perf_event *event, void *data) { void *end = (void *) event + event->header.size; @@ -1251,9 +1251,9 @@ fetch_mmaped_event(struct perf_session *session, #define NUM_MMAPS 128 #endif -int __perf_session__process_events(struct perf_session *session, - u64 data_offset, u64 data_size, - u64 file_size, struct perf_tool *tool) +static int __perf_session__process_events(struct perf_session *session, + u64 data_offset, u64 data_size, + u64 file_size, struct perf_tool *tool) { int fd = perf_data_file__fd(session->file); u64 head, page_offset, file_offset, file_pos, size; diff --git a/tools/perf/util/session.h b/tools/perf/util/session.h index dc26ebf60fe4..6d663dc76404 100644 --- a/tools/perf/util/session.h +++ b/tools/perf/util/session.h @@ -49,9 +49,6 @@ int perf_session__peek_event(struct perf_session *session, off_t file_offset, union perf_event **event_ptr, struct perf_sample *sample); -int __perf_session__process_events(struct perf_session *session, - u64 data_offset, u64 data_size, u64 size, - struct perf_tool *tool); int perf_session__process_events(struct perf_session *session, struct perf_tool *tool); diff --git a/tools/perf/util/sort.c b/tools/perf/util/sort.c index 9139dda9f9a3..7a39c1ed8d37 100644 --- a/tools/perf/util/sort.c +++ b/tools/perf/util/sort.c @@ -1304,6 +1304,37 @@ static int __sort__hpp_entry(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp, return hse->se->se_snprintf(he, hpp->buf, hpp->size, len); } +static int64_t __sort__hpp_cmp(struct perf_hpp_fmt *fmt, + struct hist_entry *a, struct hist_entry *b) +{ + struct hpp_sort_entry *hse; + + hse = container_of(fmt, struct hpp_sort_entry, hpp); + return hse->se->se_cmp(a, b); +} + +static int64_t __sort__hpp_collapse(struct perf_hpp_fmt *fmt, + struct hist_entry *a, struct hist_entry *b) +{ + struct hpp_sort_entry *hse; + int64_t (*collapse_fn)(struct hist_entry *, struct hist_entry *); + + hse = container_of(fmt, struct hpp_sort_entry, hpp); + collapse_fn = hse->se->se_collapse ?: hse->se->se_cmp; + return collapse_fn(a, b); +} + +static int64_t __sort__hpp_sort(struct perf_hpp_fmt *fmt, + struct hist_entry *a, struct hist_entry *b) +{ + struct hpp_sort_entry *hse; + int64_t (*sort_fn)(struct hist_entry *, struct hist_entry *); + + hse = container_of(fmt, struct hpp_sort_entry, hpp); + sort_fn = hse->se->se_sort ?: hse->se->se_cmp; + return sort_fn(a, b); +} + static struct hpp_sort_entry * __sort_dimension__alloc_hpp(struct sort_dimension *sd) { @@ -1322,9 +1353,9 @@ __sort_dimension__alloc_hpp(struct sort_dimension *sd) hse->hpp.entry = __sort__hpp_entry; hse->hpp.color = NULL; - hse->hpp.cmp = sd->entry->se_cmp; - hse->hpp.collapse = sd->entry->se_collapse ? : sd->entry->se_cmp; - hse->hpp.sort = sd->entry->se_sort ? : hse->hpp.collapse; + hse->hpp.cmp = __sort__hpp_cmp; + hse->hpp.collapse = __sort__hpp_collapse; + hse->hpp.sort = __sort__hpp_sort; INIT_LIST_HEAD(&hse->hpp.list); INIT_LIST_HEAD(&hse->hpp.sort_list); diff --git a/tools/perf/util/symbol-elf.c b/tools/perf/util/symbol-elf.c index 06fcd1bf98b6..b24f9d8727a8 100644 --- a/tools/perf/util/symbol-elf.c +++ b/tools/perf/util/symbol-elf.c @@ -574,13 +574,16 @@ static int decompress_kmodule(struct dso *dso, const char *name, const char *ext = strrchr(name, '.'); char tmpbuf[] = "/tmp/perf-kmod-XXXXXX"; - if ((type != DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP && - type != DSO_BINARY_TYPE__GUEST_KMODULE_COMP) || - type != dso->symtab_type) + if (type != DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP && + type != DSO_BINARY_TYPE__GUEST_KMODULE_COMP && + type != DSO_BINARY_TYPE__BUILD_ID_CACHE) return -1; - if (!ext || !is_supported_compression(ext + 1)) - return -1; + if (!ext || !is_supported_compression(ext + 1)) { + ext = strrchr(dso->name, '.'); + if (!ext || !is_supported_compression(ext + 1)) + return -1; + } fd = mkstemp(tmpbuf); if (fd < 0) diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c index c24c5b83156c..a69066865a55 100644 --- a/tools/perf/util/symbol.c +++ b/tools/perf/util/symbol.c @@ -396,6 +396,7 @@ static struct symbol *symbols__find_by_name(struct rb_root *symbols, const char *name) { struct rb_node *n; + struct symbol_name_rb_node *s; if (symbols == NULL) return NULL; @@ -403,7 +404,6 @@ static struct symbol *symbols__find_by_name(struct rb_root *symbols, n = symbols->rb_node; while (n) { - struct symbol_name_rb_node *s; int cmp; s = rb_entry(n, struct symbol_name_rb_node, rb_node); @@ -414,10 +414,24 @@ static struct symbol *symbols__find_by_name(struct rb_root *symbols, else if (cmp > 0) n = n->rb_right; else - return &s->sym; + break; } - return NULL; + if (n == NULL) + return NULL; + + /* return first symbol that has same name (if any) */ + for (n = rb_prev(n); n; n = rb_prev(n)) { + struct symbol_name_rb_node *tmp; + + tmp = rb_entry(n, struct symbol_name_rb_node, rb_node); + if (strcmp(tmp->sym.name, s->sym.name)) + break; + + s = tmp; + } + + return &s->sym; } struct symbol *dso__find_symbol(struct dso *dso, @@ -436,6 +450,17 @@ struct symbol *dso__next_symbol(struct symbol *sym) return symbols__next(sym); } +struct symbol *symbol__next_by_name(struct symbol *sym) +{ + struct symbol_name_rb_node *s = container_of(sym, struct symbol_name_rb_node, sym); + struct rb_node *n = rb_next(&s->rb_node); + + return n ? &rb_entry(n, struct symbol_name_rb_node, rb_node)->sym : NULL; +} + + /* + * Teturns first symbol that matched with @name. + */ struct symbol *dso__find_symbol_by_name(struct dso *dso, enum map_type type, const char *name) { @@ -660,7 +685,7 @@ static int dso__split_kallsyms(struct dso *dso, struct map *map, u64 delta, struct machine *machine = kmaps->machine; struct map *curr_map = map; struct symbol *pos; - int count = 0, moved = 0; + int count = 0, moved = 0; struct rb_root *root = &dso->symbols[map->type]; struct rb_node *next = rb_first(root); int kernel_range = 0; diff --git a/tools/perf/util/symbol.h b/tools/perf/util/symbol.h index 9d602e9c6f59..1650dcb3a67b 100644 --- a/tools/perf/util/symbol.h +++ b/tools/perf/util/symbol.h @@ -231,6 +231,7 @@ struct symbol *dso__find_symbol(struct dso *dso, enum map_type type, u64 addr); struct symbol *dso__find_symbol_by_name(struct dso *dso, enum map_type type, const char *name); +struct symbol *symbol__next_by_name(struct symbol *sym); struct symbol *dso__first_symbol(struct dso *dso, enum map_type type); struct symbol *dso__next_symbol(struct symbol *sym); diff --git a/tools/perf/util/unwind-libunwind.c b/tools/perf/util/unwind-libunwind.c index 6edf535f65c2..e3c40a520a25 100644 --- a/tools/perf/util/unwind-libunwind.c +++ b/tools/perf/util/unwind-libunwind.c @@ -266,14 +266,17 @@ static int read_unwind_spec_eh_frame(struct dso *dso, struct machine *machine, u64 *fde_count) { int ret = -EINVAL, fd; - u64 offset; + u64 offset = dso->data.frame_offset; - fd = dso__data_fd(dso, machine); - if (fd < 0) - return -EINVAL; + if (offset == 0) { + fd = dso__data_fd(dso, machine); + if (fd < 0) + return -EINVAL; - /* Check the .eh_frame section for unwinding info */ - offset = elf_section_offset(fd, ".eh_frame_hdr"); + /* Check the .eh_frame section for unwinding info */ + offset = elf_section_offset(fd, ".eh_frame_hdr"); + dso->data.frame_offset = offset; + } if (offset) ret = unwind_spec_ehframe(dso, machine, offset, @@ -287,14 +290,20 @@ static int read_unwind_spec_eh_frame(struct dso *dso, struct machine *machine, static int read_unwind_spec_debug_frame(struct dso *dso, struct machine *machine, u64 *offset) { - int fd = dso__data_fd(dso, machine); + int fd; + u64 ofs = dso->data.frame_offset; - if (fd < 0) - return -EINVAL; + if (ofs == 0) { + fd = dso__data_fd(dso, machine); + if (fd < 0) + return -EINVAL; - /* Check the .debug_frame section for unwinding info */ - *offset = elf_section_offset(fd, ".debug_frame"); + /* Check the .debug_frame section for unwinding info */ + ofs = elf_section_offset(fd, ".debug_frame"); + dso->data.frame_offset = ofs; + } + *offset = ofs; if (*offset) return 0; diff --git a/tools/testing/selftests/rcutorture/bin/cpus2use.sh b/tools/testing/selftests/rcutorture/bin/cpus2use.sh index abe14b7f36e9..bb99cde3f5f9 100755 --- a/tools/testing/selftests/rcutorture/bin/cpus2use.sh +++ b/tools/testing/selftests/rcutorture/bin/cpus2use.sh @@ -24,7 +24,7 @@ ncpus=`grep '^processor' /proc/cpuinfo | wc -l` idlecpus=`mpstat | tail -1 | \ - awk -v ncpus=$ncpus '{ print ncpus * ($7 + $12) / 100 }'` + awk -v ncpus=$ncpus '{ print ncpus * ($7 + $NF) / 100 }'` awk -v ncpus=$ncpus -v idlecpus=$idlecpus < /dev/null ' BEGIN { cpus2use = idlecpus; diff --git a/tools/testing/selftests/rcutorture/bin/kvm-recheck-rcu.sh b/tools/testing/selftests/rcutorture/bin/kvm-recheck-rcu.sh index d6cc07fc137f..559e01ac86be 100755 --- a/tools/testing/selftests/rcutorture/bin/kvm-recheck-rcu.sh +++ b/tools/testing/selftests/rcutorture/bin/kvm-recheck-rcu.sh @@ -30,6 +30,7 @@ else echo Unreadable results directory: $i exit 1 fi +. tools/testing/selftests/rcutorture/bin/functions.sh configfile=`echo $i | sed -e 's/^.*\///'` ngps=`grep ver: $i/console.log 2> /dev/null | tail -1 | sed -e 's/^.* ver: //' -e 's/ .*$//'` @@ -48,4 +49,21 @@ else title="$title ($ngpsps per second)" fi echo $title + nclosecalls=`grep --binary-files=text 'torture: Reader Batch' $i/console.log | tail -1 | awk '{for (i=NF-8;i<=NF;i++) sum+=$i; } END {print sum}'` + if test -z "$nclosecalls" + then + exit 0 + fi + if test "$nclosecalls" -eq 0 + then + exit 0 + fi + # Compute number of close calls per tenth of an hour + nclosecalls10=`awk -v nclosecalls=$nclosecalls -v dur=$dur 'BEGIN { print int(nclosecalls * 36000 / dur) }' < /dev/null` + if test $nclosecalls10 -gt 5 -a $nclosecalls -gt 1 + then + print_bug $nclosecalls "Reader Batch close calls in" $(($dur/60)) minute run: $i + else + print_warning $nclosecalls "Reader Batch close calls in" $(($dur/60)) minute run: $i + fi fi diff --git a/tools/testing/selftests/rcutorture/bin/kvm-test-1-run.sh b/tools/testing/selftests/rcutorture/bin/kvm-test-1-run.sh index 8ca9f21f2efc..5236e073919d 100755 --- a/tools/testing/selftests/rcutorture/bin/kvm-test-1-run.sh +++ b/tools/testing/selftests/rcutorture/bin/kvm-test-1-run.sh @@ -8,9 +8,9 @@ # # Usage: kvm-test-1-run.sh config builddir resdir minutes qemu-args boot_args # -# qemu-args defaults to "-nographic", along with arguments specifying the -# number of CPUs and other options generated from -# the underlying CPU architecture. +# qemu-args defaults to "-enable-kvm -soundhw pcspk -nographic", along with +# arguments specifying the number of CPUs and other +# options generated from the underlying CPU architecture. # boot_args defaults to value returned by the per_version_boot_params # shell function. # @@ -138,7 +138,7 @@ then fi # Generate -smp qemu argument. -qemu_args="-nographic $qemu_args" +qemu_args="-enable-kvm -soundhw pcspk -nographic $qemu_args" cpu_count=`configNR_CPUS.sh $config_template` cpu_count=`configfrag_boot_cpus "$boot_args" "$config_template" "$cpu_count"` vcpus=`identify_qemu_vcpus` @@ -168,6 +168,7 @@ then touch $resdir/buildonly exit 0 fi +echo "NOTE: $QEMU either did not run or was interactive" > $builddir/console.log echo $QEMU $qemu_args -m 512 -kernel $resdir/bzImage -append \"$qemu_append $boot_args\" > $resdir/qemu-cmd ( $QEMU $qemu_args -m 512 -kernel $resdir/bzImage -append "$qemu_append $boot_args"; echo $? > $resdir/qemu-retval ) & qemu_pid=$! diff --git a/tools/testing/selftests/rcutorture/bin/parse-build.sh b/tools/testing/selftests/rcutorture/bin/parse-build.sh index 499d1e598e42..a6b57622c2e5 100755 --- a/tools/testing/selftests/rcutorture/bin/parse-build.sh +++ b/tools/testing/selftests/rcutorture/bin/parse-build.sh @@ -26,12 +26,15 @@ # # Authors: Paul E. McKenney <paulmck@linux.vnet.ibm.com> -T=$1 +F=$1 title=$2 +T=/tmp/parse-build.sh.$$ +trap 'rm -rf $T' 0 +mkdir $T . functions.sh -if grep -q CC < $T +if grep -q CC < $F then : else @@ -39,18 +42,21 @@ else exit 1 fi -if grep -q "error:" < $T +if grep -q "error:" < $F then print_bug $title build errors: - grep "error:" < $T + grep "error:" < $F exit 2 fi -exit 0 -if egrep -q "rcu[^/]*\.c.*warning:|rcu.*\.h.*warning:" < $T +grep warning: < $F > $T/warnings +grep "include/linux/*rcu*\.h:" $T/warnings > $T/hwarnings +grep "kernel/rcu/[^/]*:" $T/warnings > $T/cwarnings +cat $T/hwarnings $T/cwarnings > $T/rcuwarnings +if test -s $T/rcuwarnings then print_warning $title build errors: - egrep "rcu[^/]*\.c.*warning:|rcu.*\.h.*warning:" < $T + cat $T/rcuwarnings exit 2 fi exit 0 diff --git a/tools/testing/selftests/rcutorture/bin/parse-console.sh b/tools/testing/selftests/rcutorture/bin/parse-console.sh index f962ba4cf68b..d8f35cf116be 100755 --- a/tools/testing/selftests/rcutorture/bin/parse-console.sh +++ b/tools/testing/selftests/rcutorture/bin/parse-console.sh @@ -36,7 +36,7 @@ if grep -Pq '\x00' < $file then print_warning Console output contains nul bytes, old qemu still running? fi -egrep 'Badness|WARNING:|Warn|BUG|===========|Call Trace:|Oops:' < $file | grep -v 'ODEBUG: ' | grep -v 'Warning: unable to open an initial console' > $T +egrep 'Badness|WARNING:|Warn|BUG|===========|Call Trace:|Oops:|Stall ended before state dump start' < $file | grep -v 'ODEBUG: ' | grep -v 'Warning: unable to open an initial console' > $T if test -s $T then print_warning Assertion failure in $file $title |