aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.mailmap2
-rw-r--r--Documentation/devicetree/bindings/clock/imx8m-clock.yaml4
-rw-r--r--Documentation/devicetree/bindings/display/bridge/renesas,lvds.yaml4
-rw-r--r--Documentation/devicetree/bindings/display/renesas,du.yaml23
-rw-r--r--Documentation/devicetree/bindings/iio/adc/st,stm32-adc.yaml2
-rw-r--r--Documentation/devicetree/bindings/leds/leds-mt6360.yaml2
-rw-r--r--Documentation/devicetree/bindings/mmc/nvidia,tegra20-sdhci.yaml7
-rw-r--r--Documentation/devicetree/bindings/mtd/gpmi-nand.yaml2
-rw-r--r--Documentation/devicetree/bindings/net/can/bosch,c_can.yaml3
-rw-r--r--Documentation/devicetree/bindings/pci/apple,pcie.yaml3
-rw-r--r--Documentation/devicetree/bindings/phy/brcm,sata-phy.yaml10
-rw-r--r--Documentation/devicetree/bindings/pinctrl/pincfg-node.yaml12
-rw-r--r--Documentation/devicetree/bindings/rtc/allwinner,sun6i-a31-rtc.yaml10
-rw-r--r--Documentation/devicetree/bindings/serial/samsung_uart.yaml4
-rw-r--r--Documentation/devicetree/bindings/sound/allwinner,sun4i-a10-i2s.yaml1
-rw-r--r--Documentation/devicetree/bindings/sound/ti,j721e-cpb-audio.yaml2
-rw-r--r--Documentation/devicetree/bindings/thermal/rcar-gen3-thermal.yaml1
-rw-r--r--Documentation/devicetree/bindings/ufs/cdns,ufshc.yaml3
-rw-r--r--MAINTAINERS27
-rw-r--r--Makefile2
-rw-r--r--arch/mips/include/asm/timex.h8
-rw-r--r--arch/mips/kernel/time.c11
-rw-r--r--arch/parisc/Kconfig1
-rw-r--r--arch/parisc/configs/generic-32bit_defconfig4
-rw-r--r--arch/parisc/configs/generic-64bit_defconfig3
-rw-r--r--arch/parisc/include/asm/pgtable.h2
-rw-r--r--arch/parisc/kernel/cache.c18
-rw-r--r--arch/parisc/kernel/kprobes.c2
-rw-r--r--arch/parisc/kernel/patch.c25
-rw-r--r--arch/parisc/kernel/processor.c11
-rw-r--r--arch/parisc/kernel/setup.c2
-rw-r--r--arch/parisc/kernel/time.c6
-rw-r--r--arch/parisc/kernel/traps.c2
-rw-r--r--arch/parisc/math-emu/dfadd.c2
-rw-r--r--arch/parisc/math-emu/dfsub.c2
-rw-r--r--arch/parisc/math-emu/sfadd.c2
-rw-r--r--arch/parisc/math-emu/sfsub.c2
-rw-r--r--arch/powerpc/kernel/vdso/gettimeofday.S9
-rw-r--r--arch/powerpc/platforms/pseries/papr_scm.c7
-rw-r--r--arch/powerpc/platforms/pseries/vas-sysfs.c19
-rw-r--r--arch/powerpc/platforms/pseries/vas.c23
-rw-r--r--arch/powerpc/platforms/pseries/vas.h2
-rw-r--r--arch/riscv/mm/init.c21
-rw-r--r--arch/s390/Makefile10
-rw-r--r--arch/s390/kvm/kvm-s390.c11
-rw-r--r--arch/s390/mm/gmap.c7
-rw-r--r--arch/x86/kernel/fpu/core.c67
-rw-r--r--arch/x86/kvm/cpuid.c5
-rw-r--r--arch/x86/kvm/mmu/mmu.c34
-rw-r--r--arch/x86/kvm/mmu/spte.c28
-rw-r--r--arch/x86/kvm/mmu/spte.h4
-rw-r--r--arch/x86/kvm/mmu/tdp_iter.h34
-rw-r--r--arch/x86/kvm/mmu/tdp_mmu.c82
-rw-r--r--arch/x86/kvm/svm/pmu.c28
-rw-r--r--arch/x86/kvm/svm/sev.c42
-rw-r--r--arch/x86/kvm/vmx/vmx.c2
-rw-r--r--block/blk-core.c4
-rw-r--r--drivers/block/ataflop.c10
-rw-r--r--drivers/block/floppy.c18
-rw-r--r--drivers/firewire/core-card.c3
-rw-r--r--drivers/firewire/core-cdev.c4
-rw-r--r--drivers/firewire/core-topology.c9
-rw-r--r--drivers/firewire/core-transaction.c30
-rw-r--r--drivers/firewire/sbp2.c13
-rw-r--r--drivers/gpio/gpio-mvebu.c7
-rw-r--r--drivers/gpio/gpio-pca953x.c4
-rw-r--r--drivers/gpio/gpio-visconti.c7
-rw-r--r--drivers/gpio/gpiolib-of.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c2
-rw-r--r--drivers/gpu/drm/bridge/Kconfig1
-rw-r--r--drivers/gpu/drm/msm/dp/dp_display.c6
-rw-r--r--drivers/gpu/drm/msm/dp/dp_panel.c11
-rw-r--r--drivers/gpu/drm/msm/dp/dp_panel.h1
-rw-r--r--drivers/infiniband/hw/irdma/cm.c33
-rw-r--r--drivers/infiniband/hw/irdma/utils.c21
-rw-r--r--drivers/infiniband/hw/irdma/verbs.c4
-rw-r--r--drivers/infiniband/sw/rxe/rxe_mcast.c81
-rw-r--r--drivers/infiniband/sw/rxe/rxe_resp.c35
-rw-r--r--drivers/infiniband/sw/siw/siw_cm.c7
-rw-r--r--drivers/mmc/core/mmc.c23
-rw-r--r--drivers/mmc/host/sdhci-msm.c42
-rw-r--r--drivers/mmc/host/sunxi-mmc.c5
-rw-r--r--drivers/net/dsa/bcm_sf2.c3
-rw-r--r--drivers/net/dsa/ocelot/felix.c7
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c4
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_ring.c20
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c7
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.c4
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.c10
-rw-r--r--drivers/net/ethernet/dec/tulip/tulip_core.c5
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c27
-rw-r--r--drivers/net/ethernet/intel/ice/ice.h1
-rw-r--r--drivers/net/ethernet/intel/ice/ice_idc.c25
-rw-r--r--drivers/net/ethernet/intel/ice/ice_main.c2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp.c10
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl.c68
-rw-r--r--drivers/net/ethernet/mediatek/mtk_ppe.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c11
-rw-r--r--drivers/net/ethernet/mscc/ocelot.c11
-rw-r--r--drivers/net/ethernet/mscc/ocelot_flower.c9
-rw-r--r--drivers/net/ethernet/mscc/ocelot_vcap.c9
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c3
-rw-r--r--drivers/net/ethernet/sfc/ef10.c5
-rw-r--r--drivers/net/ethernet/sfc/efx_channels.c8
-rw-r--r--drivers/net/ethernet/sfc/efx_channels.h1
-rw-r--r--drivers/net/ethernet/sfc/ptp.c14
-rw-r--r--drivers/net/ethernet/sfc/ptp.h1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c4
-rw-r--r--drivers/net/phy/micrel.c10
-rw-r--r--drivers/net/phy/phy.c7
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c2
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c10
-rw-r--r--drivers/platform/surface/aggregator/core.c2
-rw-r--r--drivers/platform/surface/surface_gpe.c8
-rw-r--r--drivers/platform/x86/intel/pmt/telemetry.c2
-rw-r--r--drivers/platform/x86/thinkpad_acpi.c175
-rw-r--r--drivers/ptp/ptp_ocp.c2
-rw-r--r--drivers/s390/block/dasd.c18
-rw-r--r--drivers/s390/block/dasd_eckd.c33
-rw-r--r--drivers/s390/block/dasd_int.h14
-rw-r--r--drivers/s390/net/ctcm_mpc.c6
-rw-r--r--drivers/s390/net/ctcm_sysfs.c5
-rw-r--r--drivers/s390/net/lcs.c7
-rw-r--r--drivers/video/fbdev/core/fbmem.c5
-rw-r--r--fs/btrfs/disk-io.c11
-rw-r--r--fs/btrfs/sysfs.c3
-rw-r--r--fs/btrfs/tree-log.c39
-rw-r--r--fs/btrfs/zoned.c34
-rw-r--r--fs/fs-writeback.c4
-rw-r--r--fs/io_uring.c7
-rw-r--r--fs/nfs/nfs4proc.c12
-rw-r--r--fs/notify/fanotify/fanotify_user.c13
-rw-r--r--fs/udf/namei.c8
-rw-r--r--include/linux/bio.h5
-rw-r--r--include/linux/blk-mq.h1
-rw-r--r--include/linux/netdev_features.h4
-rw-r--r--include/linux/sunrpc/clnt.h1
-rw-r--r--include/net/bluetooth/hci_core.h3
-rw-r--r--include/net/tc_act/tc_pedit.h1
-rw-r--r--include/soc/mscc/ocelot_vcap.h2
-rw-r--r--include/uapi/linux/rfkill.h2
-rw-r--r--include/uapi/linux/virtio_ids.h14
-rw-r--r--kernel/cgroup/cpuset.c7
-rw-r--r--kernel/fork.c2
-rw-r--r--kernel/irq/internals.h2
-rw-r--r--kernel/irq/irqdesc.c2
-rw-r--r--kernel/irq/manage.c39
-rw-r--r--kernel/time/timekeeping.c4
-rw-r--r--lib/dim/net_dim.c44
-rw-r--r--mm/readahead.c15
-rw-r--r--net/batman-adv/fragmentation.c11
-rw-r--r--net/bluetooth/hci_core.c6
-rw-r--r--net/core/skbuff.c4
-rw-r--r--net/decnet/dn_dev.c4
-rw-r--r--net/decnet/dn_neigh.c3
-rw-r--r--net/decnet/dn_route.c4
-rw-r--r--net/dsa/port.c1
-rw-r--r--net/ipv4/ping.c12
-rw-r--r--net/ipv4/route.c1
-rw-r--r--net/mac80211/mlme.c6
-rw-r--r--net/mac80211/rx.c3
-rw-r--r--net/netlink/af_netlink.c1
-rw-r--r--net/rds/tcp.c12
-rw-r--r--net/rds/tcp.h2
-rw-r--r--net/rds/tcp_connect.c5
-rw-r--r--net/rds/tcp_listen.c5
-rw-r--r--net/sched/act_pedit.c26
-rw-r--r--net/smc/smc_rx.c4
-rw-r--r--net/sunrpc/auth_gss/gss_rpc_upcall.c2
-rw-r--r--net/sunrpc/clnt.c14
-rw-r--r--net/sunrpc/xprtsock.c35
-rw-r--r--net/tls/tls_device.c3
-rw-r--r--net/wireless/nl80211.c18
-rw-r--r--net/wireless/scan.c2
-rw-r--r--sound/firewire/fireworks/fireworks_hwdep.c1
-rw-r--r--sound/pci/hda/patch_realtek.c15
-rw-r--r--sound/soc/atmel/mchp-pdmc.c1
-rw-r--r--sound/soc/codecs/da7219.c14
-rw-r--r--sound/soc/codecs/max98090.c5
-rw-r--r--sound/soc/codecs/rt9120.c1
-rw-r--r--sound/soc/codecs/wm8958-dsp2.c8
-rw-r--r--sound/soc/generic/simple-card-utils.c2
-rw-r--r--sound/soc/meson/aiu-acodec-ctrl.c2
-rw-r--r--sound/soc/meson/aiu-codec-ctrl.c2
-rw-r--r--sound/soc/meson/axg-card.c1
-rw-r--r--sound/soc/meson/axg-tdm-interface.c26
-rw-r--r--sound/soc/meson/g12a-tohdmitx.c2
-rw-r--r--sound/soc/soc-generic-dmaengine-pcm.c6
-rw-r--r--sound/soc/soc-ops.c20
-rw-r--r--sound/soc/sof/sof-pci-dev.c5
-rw-r--r--tools/testing/selftests/net/Makefile3
-rw-r--r--tools/testing/selftests/net/bpf/Makefile14
-rw-r--r--tools/testing/selftests/net/bpf/nat6to4.c285
-rwxr-xr-xtools/testing/selftests/net/fcnal-test.sh12
-rwxr-xr-xtools/testing/selftests/net/udpgro_frglist.sh101
196 files changed, 1835 insertions, 780 deletions
diff --git a/.mailmap b/.mailmap
index 93458154ce7d..ecd51ee5fa0c 100644
--- a/.mailmap
+++ b/.mailmap
@@ -45,6 +45,7 @@ Andrey Konovalov <andreyknvl@gmail.com> <andreyknvl@google.com>
Andrey Ryabinin <ryabinin.a.a@gmail.com> <a.ryabinin@samsung.com>
Andrey Ryabinin <ryabinin.a.a@gmail.com> <aryabinin@virtuozzo.com>
Andrzej Hajda <andrzej.hajda@intel.com> <a.hajda@samsung.com>
+André Almeida <andrealmeid@igalia.com> <andrealmeid@collabora.com>
Andy Adamson <andros@citi.umich.edu>
Antoine Tenart <atenart@kernel.org> <antoine.tenart@bootlin.com>
Antoine Tenart <atenart@kernel.org> <antoine.tenart@free-electrons.com>
@@ -204,6 +205,7 @@ Juha Yrjola <at solidboot.com>
Juha Yrjola <juha.yrjola@nokia.com>
Juha Yrjola <juha.yrjola@solidboot.com>
Julien Thierry <julien.thierry.kdev@gmail.com> <julien.thierry@arm.com>
+Kalle Valo <kvalo@kernel.org> <kvalo@codeaurora.org>
Kalyan Thota <quic_kalyant@quicinc.com> <kalyan_t@codeaurora.org>
Kay Sievers <kay.sievers@vrfy.org>
Kees Cook <keescook@chromium.org> <kees.cook@canonical.com>
diff --git a/Documentation/devicetree/bindings/clock/imx8m-clock.yaml b/Documentation/devicetree/bindings/clock/imx8m-clock.yaml
index 625f573a7b90..458c7645ee68 100644
--- a/Documentation/devicetree/bindings/clock/imx8m-clock.yaml
+++ b/Documentation/devicetree/bindings/clock/imx8m-clock.yaml
@@ -55,8 +55,6 @@ allOf:
then:
properties:
clocks:
- minItems: 7
- maxItems: 7
items:
- description: 32k osc
- description: 25m osc
@@ -66,8 +64,6 @@ allOf:
- description: ext3 clock input
- description: ext4 clock input
clock-names:
- minItems: 7
- maxItems: 7
items:
- const: ckil
- const: osc_25m
diff --git a/Documentation/devicetree/bindings/display/bridge/renesas,lvds.yaml b/Documentation/devicetree/bindings/display/bridge/renesas,lvds.yaml
index a51baf8a4c76..bb9dbfb9beaf 100644
--- a/Documentation/devicetree/bindings/display/bridge/renesas,lvds.yaml
+++ b/Documentation/devicetree/bindings/display/bridge/renesas,lvds.yaml
@@ -95,7 +95,6 @@ then:
properties:
clocks:
minItems: 1
- maxItems: 4
items:
- description: Functional clock
- description: EXTAL input clock
@@ -104,7 +103,6 @@ then:
clock-names:
minItems: 1
- maxItems: 4
items:
- const: fck
# The LVDS encoder can use the EXTAL or DU_DOTCLKINx clocks.
@@ -128,12 +126,10 @@ then:
else:
properties:
clocks:
- maxItems: 1
items:
- description: Functional clock
clock-names:
- maxItems: 1
items:
- const: fck
diff --git a/Documentation/devicetree/bindings/display/renesas,du.yaml b/Documentation/devicetree/bindings/display/renesas,du.yaml
index 56cedcd6d576..b3e588022082 100644
--- a/Documentation/devicetree/bindings/display/renesas,du.yaml
+++ b/Documentation/devicetree/bindings/display/renesas,du.yaml
@@ -109,7 +109,6 @@ allOf:
properties:
clocks:
minItems: 1
- maxItems: 3
items:
- description: Functional clock
- description: DU_DOTCLKIN0 input clock
@@ -117,7 +116,6 @@ allOf:
clock-names:
minItems: 1
- maxItems: 3
items:
- const: du.0
- pattern: '^dclkin\.[01]$'
@@ -159,7 +157,6 @@ allOf:
properties:
clocks:
minItems: 2
- maxItems: 4
items:
- description: Functional clock for DU0
- description: Functional clock for DU1
@@ -168,7 +165,6 @@ allOf:
clock-names:
minItems: 2
- maxItems: 4
items:
- const: du.0
- const: du.1
@@ -216,7 +212,6 @@ allOf:
properties:
clocks:
minItems: 2
- maxItems: 4
items:
- description: Functional clock for DU0
- description: Functional clock for DU1
@@ -225,7 +220,6 @@ allOf:
clock-names:
minItems: 2
- maxItems: 4
items:
- const: du.0
- const: du.1
@@ -271,7 +265,6 @@ allOf:
properties:
clocks:
minItems: 2
- maxItems: 4
items:
- description: Functional clock for DU0
- description: Functional clock for DU1
@@ -280,7 +273,6 @@ allOf:
clock-names:
minItems: 2
- maxItems: 4
items:
- const: du.0
- const: du.1
@@ -327,7 +319,6 @@ allOf:
properties:
clocks:
minItems: 2
- maxItems: 4
items:
- description: Functional clock for DU0
- description: Functional clock for DU1
@@ -336,7 +327,6 @@ allOf:
clock-names:
minItems: 2
- maxItems: 4
items:
- const: du.0
- const: du.1
@@ -386,7 +376,6 @@ allOf:
properties:
clocks:
minItems: 3
- maxItems: 6
items:
- description: Functional clock for DU0
- description: Functional clock for DU1
@@ -397,7 +386,6 @@ allOf:
clock-names:
minItems: 3
- maxItems: 6
items:
- const: du.0
- const: du.1
@@ -448,7 +436,6 @@ allOf:
properties:
clocks:
minItems: 4
- maxItems: 8
items:
- description: Functional clock for DU0
- description: Functional clock for DU1
@@ -461,7 +448,6 @@ allOf:
clock-names:
minItems: 4
- maxItems: 8
items:
- const: du.0
- const: du.1
@@ -525,7 +511,6 @@ allOf:
properties:
clocks:
minItems: 3
- maxItems: 6
items:
- description: Functional clock for DU0
- description: Functional clock for DU1
@@ -536,7 +521,6 @@ allOf:
clock-names:
minItems: 3
- maxItems: 6
items:
- const: du.0
- const: du.1
@@ -596,7 +580,6 @@ allOf:
properties:
clocks:
minItems: 3
- maxItems: 6
items:
- description: Functional clock for DU0
- description: Functional clock for DU1
@@ -607,7 +590,6 @@ allOf:
clock-names:
minItems: 3
- maxItems: 6
items:
- const: du.0
- const: du.1
@@ -666,14 +648,12 @@ allOf:
properties:
clocks:
minItems: 1
- maxItems: 2
items:
- description: Functional clock for DU0
- description: DU_DOTCLKIN0 input clock
clock-names:
minItems: 1
- maxItems: 2
items:
- const: du.0
- const: dclkin.0
@@ -723,7 +703,6 @@ allOf:
properties:
clocks:
minItems: 2
- maxItems: 4
items:
- description: Functional clock for DU0
- description: Functional clock for DU1
@@ -732,7 +711,6 @@ allOf:
clock-names:
minItems: 2
- maxItems: 4
items:
- const: du.0
- const: du.1
@@ -791,7 +769,6 @@ allOf:
- description: Functional clock
clock-names:
- maxItems: 1
items:
- const: du.0
diff --git a/Documentation/devicetree/bindings/iio/adc/st,stm32-adc.yaml b/Documentation/devicetree/bindings/iio/adc/st,stm32-adc.yaml
index 4d6074518b5c..fa8da42cb1e6 100644
--- a/Documentation/devicetree/bindings/iio/adc/st,stm32-adc.yaml
+++ b/Documentation/devicetree/bindings/iio/adc/st,stm32-adc.yaml
@@ -138,7 +138,6 @@ allOf:
- const: bus
- const: adc
minItems: 1
- maxItems: 2
interrupts:
items:
@@ -170,7 +169,6 @@ allOf:
- const: bus
- const: adc
minItems: 1
- maxItems: 2
interrupts:
items:
diff --git a/Documentation/devicetree/bindings/leds/leds-mt6360.yaml b/Documentation/devicetree/bindings/leds/leds-mt6360.yaml
index b2fe6eb89389..10f95bf1d666 100644
--- a/Documentation/devicetree/bindings/leds/leds-mt6360.yaml
+++ b/Documentation/devicetree/bindings/leds/leds-mt6360.yaml
@@ -43,8 +43,6 @@ patternProperties:
- 4 # LED output FLASH1
- 5 # LED output FLASH2
-unevaluatedProperties: false
-
required:
- compatible
- "#address-cells"
diff --git a/Documentation/devicetree/bindings/mmc/nvidia,tegra20-sdhci.yaml b/Documentation/devicetree/bindings/mmc/nvidia,tegra20-sdhci.yaml
index f3f4d5b02744..fe0270207622 100644
--- a/Documentation/devicetree/bindings/mmc/nvidia,tegra20-sdhci.yaml
+++ b/Documentation/devicetree/bindings/mmc/nvidia,tegra20-sdhci.yaml
@@ -202,22 +202,17 @@ allOf:
clocks:
items:
- description: module clock
- minItems: 1
- maxItems: 1
else:
properties:
clocks:
items:
- description: module clock
- description: timeout clock
- minItems: 2
- maxItems: 2
+
clock-names:
items:
- const: sdhci
- const: tmclk
- minItems: 2
- maxItems: 2
required:
- clock-names
diff --git a/Documentation/devicetree/bindings/mtd/gpmi-nand.yaml b/Documentation/devicetree/bindings/mtd/gpmi-nand.yaml
index 9d764e654e1d..849aeae319a9 100644
--- a/Documentation/devicetree/bindings/mtd/gpmi-nand.yaml
+++ b/Documentation/devicetree/bindings/mtd/gpmi-nand.yaml
@@ -147,8 +147,6 @@ allOf:
- description: SoC gpmi io clock
- description: SoC gpmi bch apb clock
clock-names:
- minItems: 2
- maxItems: 2
items:
- const: gpmi_io
- const: gpmi_bch_apb
diff --git a/Documentation/devicetree/bindings/net/can/bosch,c_can.yaml b/Documentation/devicetree/bindings/net/can/bosch,c_can.yaml
index 8bad328b184d..51aa89ac7e85 100644
--- a/Documentation/devicetree/bindings/net/can/bosch,c_can.yaml
+++ b/Documentation/devicetree/bindings/net/can/bosch,c_can.yaml
@@ -80,8 +80,6 @@ if:
then:
properties:
interrupts:
- minItems: 4
- maxItems: 4
items:
- description: Error and status IRQ
- description: Message object IRQ
@@ -91,7 +89,6 @@ then:
else:
properties:
interrupts:
- maxItems: 1
items:
- description: Error and status IRQ
diff --git a/Documentation/devicetree/bindings/pci/apple,pcie.yaml b/Documentation/devicetree/bindings/pci/apple,pcie.yaml
index 7f01e15fc81c..daf602ac0d0f 100644
--- a/Documentation/devicetree/bindings/pci/apple,pcie.yaml
+++ b/Documentation/devicetree/bindings/pci/apple,pcie.yaml
@@ -142,7 +142,6 @@ examples:
device_type = "pci";
reg = <0x0 0x0 0x0 0x0 0x0>;
reset-gpios = <&pinctrl_ap 152 0>;
- max-link-speed = <2>;
#address-cells = <3>;
#size-cells = <2>;
@@ -153,7 +152,6 @@ examples:
device_type = "pci";
reg = <0x800 0x0 0x0 0x0 0x0>;
reset-gpios = <&pinctrl_ap 153 0>;
- max-link-speed = <2>;
#address-cells = <3>;
#size-cells = <2>;
@@ -164,7 +162,6 @@ examples:
device_type = "pci";
reg = <0x1000 0x0 0x0 0x0 0x0>;
reset-gpios = <&pinctrl_ap 33 0>;
- max-link-speed = <1>;
#address-cells = <3>;
#size-cells = <2>;
diff --git a/Documentation/devicetree/bindings/phy/brcm,sata-phy.yaml b/Documentation/devicetree/bindings/phy/brcm,sata-phy.yaml
index cb1aa325336f..435b971dfd9b 100644
--- a/Documentation/devicetree/bindings/phy/brcm,sata-phy.yaml
+++ b/Documentation/devicetree/bindings/phy/brcm,sata-phy.yaml
@@ -102,19 +102,17 @@ if:
then:
properties:
reg:
- maxItems: 2
+ minItems: 2
+
reg-names:
- items:
- - const: "phy"
- - const: "phy-ctrl"
+ minItems: 2
else:
properties:
reg:
maxItems: 1
+
reg-names:
maxItems: 1
- items:
- - const: "phy"
required:
- compatible
diff --git a/Documentation/devicetree/bindings/pinctrl/pincfg-node.yaml b/Documentation/devicetree/bindings/pinctrl/pincfg-node.yaml
index 4b22a9e3a447..f5a121311f61 100644
--- a/Documentation/devicetree/bindings/pinctrl/pincfg-node.yaml
+++ b/Documentation/devicetree/bindings/pinctrl/pincfg-node.yaml
@@ -52,11 +52,19 @@ properties:
hardware supporting it the pull strength in Ohm.
drive-push-pull:
- type: boolean
+ oneOf:
+ - type: boolean
+ - $ref: /schemas/types.yaml#/definitions/uint32
+ enum: [ 0, 1 ]
+ deprecated: true
description: drive actively high and low
drive-open-drain:
- type: boolean
+ oneOf:
+ - type: boolean
+ - $ref: /schemas/types.yaml#/definitions/uint32
+ const: 1 # No known cases of 0
+ deprecated: true
description: drive with open drain
drive-open-source:
diff --git a/Documentation/devicetree/bindings/rtc/allwinner,sun6i-a31-rtc.yaml b/Documentation/devicetree/bindings/rtc/allwinner,sun6i-a31-rtc.yaml
index 0b767fec39d8..6b38bd7eb3b4 100644
--- a/Documentation/devicetree/bindings/rtc/allwinner,sun6i-a31-rtc.yaml
+++ b/Documentation/devicetree/bindings/rtc/allwinner,sun6i-a31-rtc.yaml
@@ -71,7 +71,6 @@ allOf:
then:
properties:
clock-output-names:
- minItems: 1
maxItems: 1
- if:
@@ -102,7 +101,6 @@ allOf:
properties:
clock-output-names:
minItems: 3
- maxItems: 3
- if:
properties:
@@ -113,16 +111,12 @@ allOf:
then:
properties:
clocks:
- minItems: 3
- maxItems: 3
items:
- description: Bus clock for register access
- description: 24 MHz oscillator
- description: 32 kHz clock from the CCU
clock-names:
- minItems: 3
- maxItems: 3
items:
- const: bus
- const: hosc
@@ -142,7 +136,6 @@ allOf:
properties:
clocks:
minItems: 3
- maxItems: 4
items:
- description: Bus clock for register access
- description: 24 MHz oscillator
@@ -151,7 +144,6 @@ allOf:
clock-names:
minItems: 3
- maxItems: 4
items:
- const: bus
- const: hosc
@@ -174,14 +166,12 @@ allOf:
then:
properties:
interrupts:
- minItems: 1
maxItems: 1
else:
properties:
interrupts:
minItems: 2
- maxItems: 2
required:
- "#clock-cells"
diff --git a/Documentation/devicetree/bindings/serial/samsung_uart.yaml b/Documentation/devicetree/bindings/serial/samsung_uart.yaml
index d4688e317fc5..901c1e2cea28 100644
--- a/Documentation/devicetree/bindings/serial/samsung_uart.yaml
+++ b/Documentation/devicetree/bindings/serial/samsung_uart.yaml
@@ -100,7 +100,6 @@ allOf:
maxItems: 3
clock-names:
minItems: 2
- maxItems: 3
items:
- const: uart
- pattern: '^clk_uart_baud[0-1]$'
@@ -118,11 +117,8 @@ allOf:
then:
properties:
clocks:
- minItems: 2
maxItems: 2
clock-names:
- minItems: 2
- maxItems: 2
items:
- const: uart
- const: clk_uart_baud0
diff --git a/Documentation/devicetree/bindings/sound/allwinner,sun4i-a10-i2s.yaml b/Documentation/devicetree/bindings/sound/allwinner,sun4i-a10-i2s.yaml
index c21c807b667c..34f6ee9de392 100644
--- a/Documentation/devicetree/bindings/sound/allwinner,sun4i-a10-i2s.yaml
+++ b/Documentation/devicetree/bindings/sound/allwinner,sun4i-a10-i2s.yaml
@@ -89,7 +89,6 @@ allOf:
properties:
dmas:
minItems: 1
- maxItems: 2
items:
- description: RX DMA Channel
- description: TX DMA Channel
diff --git a/Documentation/devicetree/bindings/sound/ti,j721e-cpb-audio.yaml b/Documentation/devicetree/bindings/sound/ti,j721e-cpb-audio.yaml
index 6806f53a4aed..20ea5883b7ff 100644
--- a/Documentation/devicetree/bindings/sound/ti,j721e-cpb-audio.yaml
+++ b/Documentation/devicetree/bindings/sound/ti,j721e-cpb-audio.yaml
@@ -80,7 +80,6 @@ allOf:
then:
properties:
clocks:
- minItems: 6
items:
- description: AUXCLK clock for McASP used by CPB audio
- description: Parent for CPB_McASP auxclk (for 48KHz)
@@ -107,7 +106,6 @@ allOf:
then:
properties:
clocks:
- maxItems: 4
items:
- description: AUXCLK clock for McASP used by CPB audio
- description: Parent for CPB_McASP auxclk (for 48KHz)
diff --git a/Documentation/devicetree/bindings/thermal/rcar-gen3-thermal.yaml b/Documentation/devicetree/bindings/thermal/rcar-gen3-thermal.yaml
index f963204e0b16..1368d90da0e8 100644
--- a/Documentation/devicetree/bindings/thermal/rcar-gen3-thermal.yaml
+++ b/Documentation/devicetree/bindings/thermal/rcar-gen3-thermal.yaml
@@ -67,7 +67,6 @@ then:
properties:
reg:
minItems: 2
- maxItems: 3
items:
- description: TSC1 registers
- description: TSC2 registers
diff --git a/Documentation/devicetree/bindings/ufs/cdns,ufshc.yaml b/Documentation/devicetree/bindings/ufs/cdns,ufshc.yaml
index d227dea368be..fb45f66d6454 100644
--- a/Documentation/devicetree/bindings/ufs/cdns,ufshc.yaml
+++ b/Documentation/devicetree/bindings/ufs/cdns,ufshc.yaml
@@ -43,6 +43,9 @@ properties:
- const: phy_clk
- const: ref_clk
+ power-domains:
+ maxItems: 1
+
reg:
maxItems: 1
diff --git a/MAINTAINERS b/MAINTAINERS
index de6a26a01166..69b597aa4bc7 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -3571,8 +3571,9 @@ M: Andy Gospodarek <andy@greyhouse.net>
L: netdev@vger.kernel.org
S: Supported
W: http://sourceforge.net/projects/bonding/
+F: Documentation/networking/bonding.rst
F: drivers/net/bonding/
-F: include/net/bonding.h
+F: include/net/bond*
F: include/uapi/linux/if_bonding.h
BOSCH SENSORTEC BMA400 ACCELEROMETER IIO DRIVER
@@ -5919,7 +5920,7 @@ R: Benjamin Gaignard <benjamin.gaignard@collabora.com>
R: Liam Mark <lmark@codeaurora.org>
R: Laura Abbott <labbott@redhat.com>
R: Brian Starkey <Brian.Starkey@arm.com>
-R: John Stultz <john.stultz@linaro.org>
+R: John Stultz <jstultz@google.com>
L: linux-media@vger.kernel.org
L: dri-devel@lists.freedesktop.org
L: linaro-mm-sig@lists.linaro.org (moderated for non-subscribers)
@@ -6589,7 +6590,7 @@ F: drivers/gpu/drm/gma500/
DRM DRIVERS FOR HISILICON
M: Xinliang Liu <xinliang.liu@linaro.org>
M: Tian Tao <tiantao6@hisilicon.com>
-R: John Stultz <john.stultz@linaro.org>
+R: John Stultz <jstultz@google.com>
R: Xinwei Kong <kong.kongxinwei@hisilicon.com>
R: Chen Feng <puck.chen@hisilicon.com>
L: dri-devel@lists.freedesktop.org
@@ -7501,7 +7502,7 @@ F: Documentation/hwmon/f71805f.rst
F: drivers/hwmon/f71805f.c
FADDR2LINE
-M: Josh Poimboeuf <jpoimboe@redhat.com>
+M: Josh Poimboeuf <jpoimboe@kernel.org>
S: Maintained
F: scripts/faddr2line
@@ -8114,7 +8115,7 @@ M: Ingo Molnar <mingo@redhat.com>
R: Peter Zijlstra <peterz@infradead.org>
R: Darren Hart <dvhart@infradead.org>
R: Davidlohr Bueso <dave@stgolabs.net>
-R: André Almeida <andrealmeid@collabora.com>
+R: André Almeida <andrealmeid@igalia.com>
L: linux-kernel@vger.kernel.org
S: Maintained
T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git locking/core
@@ -8387,7 +8388,7 @@ M: Linus Walleij <linus.walleij@linaro.org>
M: Bartosz Golaszewski <brgl@bgdev.pl>
L: linux-gpio@vger.kernel.org
S: Maintained
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/linusw/linux-gpio.git
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/brgl/linux.git
F: Documentation/ABI/obsolete/sysfs-gpio
F: Documentation/ABI/testing/gpio-cdev
F: Documentation/admin-guide/gpio/
@@ -8849,7 +8850,7 @@ F: Documentation/devicetree/bindings/net/hisilicon*.txt
F: drivers/net/ethernet/hisilicon/
HIKEY960 ONBOARD USB GPIO HUB DRIVER
-M: John Stultz <john.stultz@linaro.org>
+M: John Stultz <jstultz@google.com>
L: linux-kernel@vger.kernel.org
S: Maintained
F: drivers/misc/hisi_hikey_usb.c
@@ -10132,7 +10133,7 @@ S: Supported
F: drivers/net/wireless/intel/iwlegacy/
INTEL WIRELESS WIFI LINK (iwlwifi)
-M: Luca Coelho <luciano.coelho@intel.com>
+M: Gregory Greenman <gregory.greenman@intel.com>
L: linux-wireless@vger.kernel.org
S: Supported
W: https://wireless.wiki.kernel.org/en/users/drivers/iwlwifi
@@ -11349,7 +11350,7 @@ F: drivers/mmc/host/litex_mmc.c
N: litex
LIVE PATCHING
-M: Josh Poimboeuf <jpoimboe@redhat.com>
+M: Josh Poimboeuf <jpoimboe@kernel.org>
M: Jiri Kosina <jikos@kernel.org>
M: Miroslav Benes <mbenes@suse.cz>
M: Petr Mladek <pmladek@suse.com>
@@ -14250,7 +14251,7 @@ F: lib/objagg.c
F: lib/test_objagg.c
OBJTOOL
-M: Josh Poimboeuf <jpoimboe@redhat.com>
+M: Josh Poimboeuf <jpoimboe@kernel.org>
M: Peter Zijlstra <peterz@infradead.org>
S: Supported
F: tools/objtool/
@@ -18824,7 +18825,7 @@ F: include/dt-bindings/reset/starfive-jh7100.h
STATIC BRANCH/CALL
M: Peter Zijlstra <peterz@infradead.org>
-M: Josh Poimboeuf <jpoimboe@redhat.com>
+M: Josh Poimboeuf <jpoimboe@kernel.org>
M: Jason Baron <jbaron@akamai.com>
R: Steven Rostedt <rostedt@goodmis.org>
R: Ard Biesheuvel <ardb@kernel.org>
@@ -19833,7 +19834,7 @@ F: drivers/net/wireless/ti/
F: include/linux/wl12xx.h
TIMEKEEPING, CLOCKSOURCE CORE, NTP, ALARMTIMER
-M: John Stultz <john.stultz@linaro.org>
+M: John Stultz <jstultz@google.com>
M: Thomas Gleixner <tglx@linutronix.de>
R: Stephen Boyd <sboyd@kernel.org>
L: linux-kernel@vger.kernel.org
@@ -21484,7 +21485,7 @@ F: arch/x86/kernel/apic/x2apic_uv_x.c
F: arch/x86/platform/uv/
X86 STACK UNWINDING
-M: Josh Poimboeuf <jpoimboe@redhat.com>
+M: Josh Poimboeuf <jpoimboe@kernel.org>
M: Peter Zijlstra <peterz@infradead.org>
S: Supported
F: arch/x86/include/asm/unwind*.h
diff --git a/Makefile b/Makefile
index 9a820c525b86..2284d1ca2503 100644
--- a/Makefile
+++ b/Makefile
@@ -2,7 +2,7 @@
VERSION = 5
PATCHLEVEL = 18
SUBLEVEL = 0
-EXTRAVERSION = -rc5
+EXTRAVERSION = -rc6
NAME = Superb Owl
# *DOCUMENTATION*
diff --git a/arch/mips/include/asm/timex.h b/arch/mips/include/asm/timex.h
index b05bb70a2e46..8026baf46e72 100644
--- a/arch/mips/include/asm/timex.h
+++ b/arch/mips/include/asm/timex.h
@@ -40,9 +40,9 @@
typedef unsigned int cycles_t;
/*
- * On R4000/R4400 before version 5.0 an erratum exists such that if the
- * cycle counter is read in the exact moment that it is matching the
- * compare register, no interrupt will be generated.
+ * On R4000/R4400 an erratum exists such that if the cycle counter is
+ * read in the exact moment that it is matching the compare register,
+ * no interrupt will be generated.
*
* There is a suggested workaround and also the erratum can't strike if
* the compare interrupt isn't being used as the clock source device.
@@ -63,7 +63,7 @@ static inline int can_use_mips_counter(unsigned int prid)
if (!__builtin_constant_p(cpu_has_counter))
asm volatile("" : "=m" (cpu_data[0].options));
if (likely(cpu_has_counter &&
- prid >= (PRID_IMP_R4000 | PRID_REV_ENCODE_44(5, 0))))
+ prid > (PRID_IMP_R4000 | PRID_REV_ENCODE_44(15, 15))))
return 1;
else
return 0;
diff --git a/arch/mips/kernel/time.c b/arch/mips/kernel/time.c
index caa01457dce6..ed339d7979f3 100644
--- a/arch/mips/kernel/time.c
+++ b/arch/mips/kernel/time.c
@@ -141,15 +141,10 @@ static __init int cpu_has_mfc0_count_bug(void)
case CPU_R4400MC:
/*
* The published errata for the R4400 up to 3.0 say the CPU
- * has the mfc0 from count bug.
+ * has the mfc0 from count bug. This seems the last version
+ * produced.
*/
- if ((current_cpu_data.processor_id & 0xff) <= 0x30)
- return 1;
-
- /*
- * we assume newer revisions are ok
- */
- return 0;
+ return 1;
}
return 0;
diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig
index 52e550b45692..bd22578859d0 100644
--- a/arch/parisc/Kconfig
+++ b/arch/parisc/Kconfig
@@ -38,6 +38,7 @@ config PARISC
select ARCH_HAVE_NMI_SAFE_CMPXCHG
select GENERIC_SMP_IDLE_THREAD
select GENERIC_ARCH_TOPOLOGY if SMP
+ select GENERIC_CPU_DEVICES if !SMP
select GENERIC_LIB_DEVMEM_IS_ALLOWED
select SYSCTL_ARCH_UNALIGN_ALLOW
select SYSCTL_EXCEPTION_TRACE
diff --git a/arch/parisc/configs/generic-32bit_defconfig b/arch/parisc/configs/generic-32bit_defconfig
index a5fee10d76ee..8ce0ae370680 100644
--- a/arch/parisc/configs/generic-32bit_defconfig
+++ b/arch/parisc/configs/generic-32bit_defconfig
@@ -6,6 +6,9 @@ CONFIG_BSD_PROCESS_ACCT=y
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=16
+CONFIG_CGROUPS=y
+CONFIG_NAMESPACES=y
+CONFIG_USER_NS=y
CONFIG_BLK_DEV_INITRD=y
CONFIG_EXPERT=y
CONFIG_PERF_EVENTS=y
@@ -47,7 +50,6 @@ CONFIG_PARPORT=y
CONFIG_PARPORT_PC=m
CONFIG_PARPORT_1284=y
CONFIG_BLK_DEV_LOOP=y
-CONFIG_BLK_DEV_CRYPTOLOOP=y
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=6144
CONFIG_BLK_DEV_SD=y
diff --git a/arch/parisc/configs/generic-64bit_defconfig b/arch/parisc/configs/generic-64bit_defconfig
index 1b8fd80cbe7f..57501b0aed92 100644
--- a/arch/parisc/configs/generic-64bit_defconfig
+++ b/arch/parisc/configs/generic-64bit_defconfig
@@ -16,6 +16,7 @@ CONFIG_CGROUPS=y
CONFIG_MEMCG=y
CONFIG_CGROUP_PIDS=y
CONFIG_CPUSETS=y
+CONFIG_USER_NS=y
CONFIG_RELAY=y
CONFIG_BLK_DEV_INITRD=y
CONFIG_CC_OPTIMIZE_FOR_SIZE=y
@@ -267,9 +268,9 @@ CONFIG_CRYPTO_DEFLATE=m
CONFIG_CRC_CCITT=m
CONFIG_LIBCRC32C=y
CONFIG_PRINTK_TIME=y
+CONFIG_DEBUG_KERNEL=y
CONFIG_STRIP_ASM_SYMS=y
CONFIG_MAGIC_SYSRQ=y
CONFIG_DEBUG_FS=y
-CONFIG_DEBUG_KERNEL=y
CONFIG_DEBUG_STACKOVERFLOW=y
# CONFIG_SCHED_DEBUG is not set
diff --git a/arch/parisc/include/asm/pgtable.h b/arch/parisc/include/asm/pgtable.h
index 939db6fe620b..69765a6dbe89 100644
--- a/arch/parisc/include/asm/pgtable.h
+++ b/arch/parisc/include/asm/pgtable.h
@@ -160,7 +160,7 @@ extern void __update_cache(pte_t pte);
#define SPACEID_SHIFT (MAX_ADDRBITS - 32)
#else
#define MAX_ADDRBITS (BITS_PER_LONG)
-#define MAX_ADDRESS (1UL << MAX_ADDRBITS)
+#define MAX_ADDRESS (1ULL << MAX_ADDRBITS)
#define SPACEID_SHIFT 0
#endif
diff --git a/arch/parisc/kernel/cache.c b/arch/parisc/kernel/cache.c
index 23348199f3f8..e7911225a4f8 100644
--- a/arch/parisc/kernel/cache.c
+++ b/arch/parisc/kernel/cache.c
@@ -403,7 +403,7 @@ void __init parisc_setup_cache_timing(void)
{
unsigned long rangetime, alltime;
unsigned long size;
- unsigned long threshold, threshold2;
+ unsigned long threshold;
alltime = mfctl(16);
flush_data_cache();
@@ -418,20 +418,8 @@ void __init parisc_setup_cache_timing(void)
alltime, size, rangetime);
threshold = L1_CACHE_ALIGN(size * alltime / rangetime);
-
- /*
- * The threshold computed above isn't very reliable since the
- * flush times depend greatly on the percentage of dirty lines
- * in the flush range. Further, the whole cache time doesn't
- * include the time to refill lines that aren't in the mm/vma
- * being flushed. By timing glibc build and checks on mako cpus,
- * the following formula seems to work reasonably well. The
- * value from the timing calculation is too small, and increases
- * build and check times by almost a factor two.
- */
- threshold2 = cache_info.dc_size * num_online_cpus();
- if (threshold2 > threshold)
- threshold = threshold2;
+ if (threshold > cache_info.dc_size)
+ threshold = cache_info.dc_size;
if (threshold)
parisc_cache_flush_threshold = threshold;
printk(KERN_INFO "Cache flush threshold set to %lu KiB\n",
diff --git a/arch/parisc/kernel/kprobes.c b/arch/parisc/kernel/kprobes.c
index 3343d2fb7889..6e0b86652f30 100644
--- a/arch/parisc/kernel/kprobes.c
+++ b/arch/parisc/kernel/kprobes.c
@@ -152,7 +152,7 @@ int __kprobes parisc_kprobe_ss_handler(struct pt_regs *regs)
/* for absolute branch instructions we can copy iaoq_b. for relative
* branch instructions we need to calculate the new address based on the
* difference between iaoq_f and iaoq_b. We cannot use iaoq_b without
- * modificationt because it's based on our ainsn.insn address.
+ * modifications because it's based on our ainsn.insn address.
*/
if (p->post_handler)
diff --git a/arch/parisc/kernel/patch.c b/arch/parisc/kernel/patch.c
index e59574f65e64..80a0ab372802 100644
--- a/arch/parisc/kernel/patch.c
+++ b/arch/parisc/kernel/patch.c
@@ -40,7 +40,10 @@ static void __kprobes *patch_map(void *addr, int fixmap, unsigned long *flags,
*need_unmap = 1;
set_fixmap(fixmap, page_to_phys(page));
- raw_spin_lock_irqsave(&patch_lock, *flags);
+ if (flags)
+ raw_spin_lock_irqsave(&patch_lock, *flags);
+ else
+ __acquire(&patch_lock);
return (void *) (__fix_to_virt(fixmap) + (uintaddr & ~PAGE_MASK));
}
@@ -49,7 +52,10 @@ static void __kprobes patch_unmap(int fixmap, unsigned long *flags)
{
clear_fixmap(fixmap);
- raw_spin_unlock_irqrestore(&patch_lock, *flags);
+ if (flags)
+ raw_spin_unlock_irqrestore(&patch_lock, *flags);
+ else
+ __release(&patch_lock);
}
void __kprobes __patch_text_multiple(void *addr, u32 *insn, unsigned int len)
@@ -61,9 +67,8 @@ void __kprobes __patch_text_multiple(void *addr, u32 *insn, unsigned int len)
int mapped;
/* Make sure we don't have any aliases in cache */
- flush_kernel_dcache_range_asm(start, end);
- flush_kernel_icache_range_asm(start, end);
- flush_tlb_kernel_range(start, end);
+ flush_kernel_vmap_range(addr, len);
+ flush_icache_range(start, end);
p = fixmap = patch_map(addr, FIX_TEXT_POKE0, &flags, &mapped);
@@ -76,10 +81,8 @@ void __kprobes __patch_text_multiple(void *addr, u32 *insn, unsigned int len)
* We're crossing a page boundary, so
* need to remap
*/
- flush_kernel_dcache_range_asm((unsigned long)fixmap,
- (unsigned long)p);
- flush_tlb_kernel_range((unsigned long)fixmap,
- (unsigned long)p);
+ flush_kernel_vmap_range((void *)fixmap,
+ (p-fixmap) * sizeof(*p));
if (mapped)
patch_unmap(FIX_TEXT_POKE0, &flags);
p = fixmap = patch_map(addr, FIX_TEXT_POKE0, &flags,
@@ -87,10 +90,10 @@ void __kprobes __patch_text_multiple(void *addr, u32 *insn, unsigned int len)
}
}
- flush_kernel_dcache_range_asm((unsigned long)fixmap, (unsigned long)p);
- flush_tlb_kernel_range((unsigned long)fixmap, (unsigned long)p);
+ flush_kernel_vmap_range((void *)fixmap, (p-fixmap) * sizeof(*p));
if (mapped)
patch_unmap(FIX_TEXT_POKE0, &flags);
+ flush_icache_range(start, end);
}
void __kprobes __patch_text(void *addr, u32 insn)
diff --git a/arch/parisc/kernel/processor.c b/arch/parisc/kernel/processor.c
index d98692115221..26eb568f8b96 100644
--- a/arch/parisc/kernel/processor.c
+++ b/arch/parisc/kernel/processor.c
@@ -171,6 +171,7 @@ static int __init processor_probe(struct parisc_device *dev)
p->cpu_num = cpu_info.cpu_num;
p->cpu_loc = cpu_info.cpu_loc;
+ set_cpu_possible(cpuid, true);
store_cpu_topology(cpuid);
#ifdef CONFIG_SMP
@@ -419,8 +420,7 @@ show_cpuinfo (struct seq_file *m, void *v)
}
seq_printf(m, " (0x%02lx)\n", boot_cpu_data.pdc.capabilities);
- seq_printf(m, "model\t\t: %s\n"
- "model name\t: %s\n",
+ seq_printf(m, "model\t\t: %s - %s\n",
boot_cpu_data.pdc.sys_model_name,
cpuinfo->dev ?
cpuinfo->dev->name : "Unknown");
@@ -461,6 +461,13 @@ static struct parisc_driver cpu_driver __refdata = {
*/
void __init processor_init(void)
{
+ unsigned int cpu;
+
reset_cpu_topology();
+
+ /* reset possible mask. We will mark those which are possible. */
+ for_each_possible_cpu(cpu)
+ set_cpu_possible(cpu, false);
+
register_parisc_driver(&cpu_driver);
}
diff --git a/arch/parisc/kernel/setup.c b/arch/parisc/kernel/setup.c
index b91cb45ffd4e..f005ddedb50e 100644
--- a/arch/parisc/kernel/setup.c
+++ b/arch/parisc/kernel/setup.c
@@ -161,6 +161,8 @@ void __init setup_arch(char **cmdline_p)
#ifdef CONFIG_PA11
dma_ops_init();
#endif
+
+ clear_sched_clock_stable();
}
/*
diff --git a/arch/parisc/kernel/time.c b/arch/parisc/kernel/time.c
index bb27dfeeddfc..9714fbd7c42d 100644
--- a/arch/parisc/kernel/time.c
+++ b/arch/parisc/kernel/time.c
@@ -251,13 +251,9 @@ void __init time_init(void)
static int __init init_cr16_clocksource(void)
{
/*
- * The cr16 interval timers are not syncronized across CPUs, even if
- * they share the same socket.
+ * The cr16 interval timers are not synchronized across CPUs.
*/
if (num_online_cpus() > 1 && !running_on_qemu) {
- /* mark sched_clock unstable */
- clear_sched_clock_stable();
-
clocksource_cr16.name = "cr16_unstable";
clocksource_cr16.flags = CLOCK_SOURCE_UNSTABLE;
clocksource_cr16.rating = 0;
diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c
index a6e61cf2cad0..b78f1b9d45c1 100644
--- a/arch/parisc/kernel/traps.c
+++ b/arch/parisc/kernel/traps.c
@@ -469,7 +469,7 @@ void parisc_terminate(char *msg, struct pt_regs *regs, int code, unsigned long o
* panic notifiers, and we should call panic
* directly from the location that we wish.
* e.g. We should not call panic from
- * parisc_terminate, but rather the oter way around.
+ * parisc_terminate, but rather the other way around.
* This hack works, prints the panic message twice,
* and it enables reboot timers!
*/
diff --git a/arch/parisc/math-emu/dfadd.c b/arch/parisc/math-emu/dfadd.c
index ec487e07f004..00e561d4aa55 100644
--- a/arch/parisc/math-emu/dfadd.c
+++ b/arch/parisc/math-emu/dfadd.c
@@ -253,7 +253,7 @@ dbl_fadd(
return(NOEXCEPTION);
}
right_exponent = 1; /* Set exponent to reflect different bias
- * with denomalized numbers. */
+ * with denormalized numbers. */
}
else
{
diff --git a/arch/parisc/math-emu/dfsub.c b/arch/parisc/math-emu/dfsub.c
index c4f30acf2d48..4f03782284bd 100644
--- a/arch/parisc/math-emu/dfsub.c
+++ b/arch/parisc/math-emu/dfsub.c
@@ -256,7 +256,7 @@ dbl_fsub(
return(NOEXCEPTION);
}
right_exponent = 1; /* Set exponent to reflect different bias
- * with denomalized numbers. */
+ * with denormalized numbers. */
}
else
{
diff --git a/arch/parisc/math-emu/sfadd.c b/arch/parisc/math-emu/sfadd.c
index 838758279d5b..9b98c874dfac 100644
--- a/arch/parisc/math-emu/sfadd.c
+++ b/arch/parisc/math-emu/sfadd.c
@@ -249,7 +249,7 @@ sgl_fadd(
return(NOEXCEPTION);
}
right_exponent = 1; /* Set exponent to reflect different bias
- * with denomalized numbers. */
+ * with denormalized numbers. */
}
else
{
diff --git a/arch/parisc/math-emu/sfsub.c b/arch/parisc/math-emu/sfsub.c
index 583d3ace4634..29d9eed09d12 100644
--- a/arch/parisc/math-emu/sfsub.c
+++ b/arch/parisc/math-emu/sfsub.c
@@ -252,7 +252,7 @@ sgl_fsub(
return(NOEXCEPTION);
}
right_exponent = 1; /* Set exponent to reflect different bias
- * with denomalized numbers. */
+ * with denormalized numbers. */
}
else
{
diff --git a/arch/powerpc/kernel/vdso/gettimeofday.S b/arch/powerpc/kernel/vdso/gettimeofday.S
index eb9c81e1c218..0c4ecc8fec5a 100644
--- a/arch/powerpc/kernel/vdso/gettimeofday.S
+++ b/arch/powerpc/kernel/vdso/gettimeofday.S
@@ -22,12 +22,15 @@
.macro cvdso_call funct call_time=0
.cfi_startproc
PPC_STLU r1, -PPC_MIN_STKFRM(r1)
+ .cfi_adjust_cfa_offset PPC_MIN_STKFRM
mflr r0
- .cfi_register lr, r0
PPC_STLU r1, -PPC_MIN_STKFRM(r1)
+ .cfi_adjust_cfa_offset PPC_MIN_STKFRM
PPC_STL r0, PPC_MIN_STKFRM + PPC_LR_STKOFF(r1)
+ .cfi_rel_offset lr, PPC_MIN_STKFRM + PPC_LR_STKOFF
#ifdef __powerpc64__
PPC_STL r2, PPC_MIN_STKFRM + STK_GOT(r1)
+ .cfi_rel_offset r2, PPC_MIN_STKFRM + STK_GOT
#endif
get_datapage r5
.ifeq \call_time
@@ -39,13 +42,15 @@
PPC_LL r0, PPC_MIN_STKFRM + PPC_LR_STKOFF(r1)
#ifdef __powerpc64__
PPC_LL r2, PPC_MIN_STKFRM + STK_GOT(r1)
+ .cfi_restore r2
#endif
.ifeq \call_time
cmpwi r3, 0
.endif
mtlr r0
- .cfi_restore lr
addi r1, r1, 2 * PPC_MIN_STKFRM
+ .cfi_restore lr
+ .cfi_def_cfa_offset 0
crclr so
.ifeq \call_time
beqlr+
diff --git a/arch/powerpc/platforms/pseries/papr_scm.c b/arch/powerpc/platforms/pseries/papr_scm.c
index f58728d5f10d..39962c905542 100644
--- a/arch/powerpc/platforms/pseries/papr_scm.c
+++ b/arch/powerpc/platforms/pseries/papr_scm.c
@@ -462,7 +462,6 @@ static int papr_scm_pmu_check_events(struct papr_scm_priv *p, struct nvdimm_pmu
{
struct papr_scm_perf_stat *stat;
struct papr_scm_perf_stats *stats;
- char *statid;
int index, rc, count;
u32 available_events;
@@ -493,14 +492,12 @@ static int papr_scm_pmu_check_events(struct papr_scm_priv *p, struct nvdimm_pmu
for (index = 0, stat = stats->scm_statistic, count = 0;
index < available_events; index++, ++stat) {
- statid = kzalloc(strlen(stat->stat_id) + 1, GFP_KERNEL);
- if (!statid) {
+ p->nvdimm_events_map[count] = kmemdup_nul(stat->stat_id, 8, GFP_KERNEL);
+ if (!p->nvdimm_events_map[count]) {
rc = -ENOMEM;
goto out_nvdimm_events_map;
}
- strcpy(statid, stat->stat_id);
- p->nvdimm_events_map[count] = statid;
count++;
}
p->nvdimm_events_map[count] = NULL;
diff --git a/arch/powerpc/platforms/pseries/vas-sysfs.c b/arch/powerpc/platforms/pseries/vas-sysfs.c
index 909535ca513a..ec65586cbeb3 100644
--- a/arch/powerpc/platforms/pseries/vas-sysfs.c
+++ b/arch/powerpc/platforms/pseries/vas-sysfs.c
@@ -27,22 +27,31 @@ struct vas_caps_entry {
/*
* This function is used to get the notification from the drmgr when
- * QoS credits are changed. Though receiving the target total QoS
- * credits here, get the official QoS capabilities from the hypervisor.
+ * QoS credits are changed.
*/
-static ssize_t update_total_credits_trigger(struct vas_cop_feat_caps *caps,
+static ssize_t update_total_credits_store(struct vas_cop_feat_caps *caps,
const char *buf, size_t count)
{
int err;
u16 creds;
err = kstrtou16(buf, 0, &creds);
+ /*
+ * The user space interface from the management console
+ * notifies OS with the new QoS credits and then the
+ * hypervisor. So OS has to use this new credits value
+ * and reconfigure VAS windows (close or reopen depends
+ * on the credits available) instead of depending on VAS
+ * QoS capabilities from the hypervisor.
+ */
if (!err)
- err = vas_reconfig_capabilties(caps->win_type);
+ err = vas_reconfig_capabilties(caps->win_type, creds);
if (err)
return -EINVAL;
+ pr_info("Set QoS total credits %u\n", creds);
+
return count;
}
@@ -92,7 +101,7 @@ VAS_ATTR_RO(nr_total_credits);
VAS_ATTR_RO(nr_used_credits);
static struct vas_sysfs_entry update_total_credits_attribute =
- __ATTR(update_total_credits, 0200, NULL, update_total_credits_trigger);
+ __ATTR(update_total_credits, 0200, NULL, update_total_credits_store);
static struct attribute *vas_def_capab_attrs[] = {
&nr_total_credits_attribute.attr,
diff --git a/arch/powerpc/platforms/pseries/vas.c b/arch/powerpc/platforms/pseries/vas.c
index 1f59d78c77a1..ec643bbdb67f 100644
--- a/arch/powerpc/platforms/pseries/vas.c
+++ b/arch/powerpc/platforms/pseries/vas.c
@@ -779,10 +779,10 @@ static int reconfig_close_windows(struct vas_caps *vcap, int excess_creds,
* changes. Reconfig window configurations based on the credits
* availability from this new capabilities.
*/
-int vas_reconfig_capabilties(u8 type)
+int vas_reconfig_capabilties(u8 type, int new_nr_creds)
{
struct vas_cop_feat_caps *caps;
- int old_nr_creds, new_nr_creds;
+ int old_nr_creds;
struct vas_caps *vcaps;
int rc = 0, nr_active_wins;
@@ -795,12 +795,6 @@ int vas_reconfig_capabilties(u8 type)
caps = &vcaps->caps;
mutex_lock(&vas_pseries_mutex);
- rc = h_query_vas_capabilities(H_QUERY_VAS_CAPABILITIES, vcaps->feat,
- (u64)virt_to_phys(&hv_cop_caps));
- if (rc)
- goto out;
-
- new_nr_creds = be16_to_cpu(hv_cop_caps.target_lpar_creds);
old_nr_creds = atomic_read(&caps->nr_total_credits);
@@ -832,7 +826,6 @@ int vas_reconfig_capabilties(u8 type)
false);
}
-out:
mutex_unlock(&vas_pseries_mutex);
return rc;
}
@@ -850,7 +843,7 @@ static int pseries_vas_notifier(struct notifier_block *nb,
struct of_reconfig_data *rd = data;
struct device_node *dn = rd->dn;
const __be32 *intserv = NULL;
- int len, rc = 0;
+ int new_nr_creds, len, rc = 0;
if ((action == OF_RECONFIG_ATTACH_NODE) ||
(action == OF_RECONFIG_DETACH_NODE))
@@ -862,7 +855,15 @@ static int pseries_vas_notifier(struct notifier_block *nb,
if (!intserv)
return NOTIFY_OK;
- rc = vas_reconfig_capabilties(VAS_GZIP_DEF_FEAT_TYPE);
+ rc = h_query_vas_capabilities(H_QUERY_VAS_CAPABILITIES,
+ vascaps[VAS_GZIP_DEF_FEAT_TYPE].feat,
+ (u64)virt_to_phys(&hv_cop_caps));
+ if (!rc) {
+ new_nr_creds = be16_to_cpu(hv_cop_caps.target_lpar_creds);
+ rc = vas_reconfig_capabilties(VAS_GZIP_DEF_FEAT_TYPE,
+ new_nr_creds);
+ }
+
if (rc)
pr_err("Failed reconfig VAS capabilities with DLPAR\n");
diff --git a/arch/powerpc/platforms/pseries/vas.h b/arch/powerpc/platforms/pseries/vas.h
index 34177881e998..333ffa2f9f42 100644
--- a/arch/powerpc/platforms/pseries/vas.h
+++ b/arch/powerpc/platforms/pseries/vas.h
@@ -135,7 +135,7 @@ struct pseries_vas_window {
};
int sysfs_add_vas_caps(struct vas_cop_feat_caps *caps);
-int vas_reconfig_capabilties(u8 type);
+int vas_reconfig_capabilties(u8 type, int new_nr_creds);
int __init sysfs_pseries_vas_init(struct vas_all_caps *vas_caps);
#ifdef CONFIG_PPC_VAS
diff --git a/arch/riscv/mm/init.c b/arch/riscv/mm/init.c
index b0793dc0c291..05ed641a1134 100644
--- a/arch/riscv/mm/init.c
+++ b/arch/riscv/mm/init.c
@@ -208,8 +208,25 @@ static void __init setup_bootmem(void)
* early_init_fdt_reserve_self() since __pa() does
* not work for DTB pointers that are fixmap addresses
*/
- if (!IS_ENABLED(CONFIG_BUILTIN_DTB))
- memblock_reserve(dtb_early_pa, fdt_totalsize(dtb_early_va));
+ if (!IS_ENABLED(CONFIG_BUILTIN_DTB)) {
+ /*
+ * In case the DTB is not located in a memory region we won't
+ * be able to locate it later on via the linear mapping and
+ * get a segfault when accessing it via __va(dtb_early_pa).
+ * To avoid this situation copy DTB to a memory region.
+ * Note that memblock_phys_alloc will also reserve DTB region.
+ */
+ if (!memblock_is_memory(dtb_early_pa)) {
+ size_t fdt_size = fdt_totalsize(dtb_early_va);
+ phys_addr_t new_dtb_early_pa = memblock_phys_alloc(fdt_size, PAGE_SIZE);
+ void *new_dtb_early_va = early_memremap(new_dtb_early_pa, fdt_size);
+
+ memcpy(new_dtb_early_va, dtb_early_va, fdt_size);
+ early_memunmap(new_dtb_early_va, fdt_size);
+ _dtb_early_pa = new_dtb_early_pa;
+ } else
+ memblock_reserve(dtb_early_pa, fdt_totalsize(dtb_early_va));
+ }
early_init_fdt_scan_reserved_mem();
dma_contiguous_reserve(dma32_phys_limit);
diff --git a/arch/s390/Makefile b/arch/s390/Makefile
index e441b60b1812..df325eacf62d 100644
--- a/arch/s390/Makefile
+++ b/arch/s390/Makefile
@@ -30,6 +30,16 @@ KBUILD_CFLAGS_DECOMPRESSOR += -fno-stack-protector
KBUILD_CFLAGS_DECOMPRESSOR += $(call cc-disable-warning, address-of-packed-member)
KBUILD_CFLAGS_DECOMPRESSOR += $(if $(CONFIG_DEBUG_INFO),-g)
KBUILD_CFLAGS_DECOMPRESSOR += $(if $(CONFIG_DEBUG_INFO_DWARF4), $(call cc-option, -gdwarf-4,))
+
+ifdef CONFIG_CC_IS_GCC
+ ifeq ($(call cc-ifversion, -ge, 1200, y), y)
+ ifeq ($(call cc-ifversion, -lt, 1300, y), y)
+ KBUILD_CFLAGS += $(call cc-disable-warning, array-bounds)
+ KBUILD_CFLAGS_DECOMPRESSOR += $(call cc-disable-warning, array-bounds)
+ endif
+ endif
+endif
+
UTS_MACHINE := s390x
STACK_SIZE := $(if $(CONFIG_KASAN),65536,16384)
CHECKFLAGS += -D__s390__ -D__s390x__
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index da3dabda1a12..76ad6408cb2c 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -2384,7 +2384,16 @@ static int kvm_s390_vm_mem_op(struct kvm *kvm, struct kvm_s390_mem_op *mop)
return -EINVAL;
if (mop->size > MEM_OP_MAX_SIZE)
return -E2BIG;
- if (kvm_s390_pv_is_protected(kvm))
+ /*
+ * This is technically a heuristic only, if the kvm->lock is not
+ * taken, it is not guaranteed that the vm is/remains non-protected.
+ * This is ok from a kernel perspective, wrongdoing is detected
+ * on the access, -EFAULT is returned and the vm may crash the
+ * next time it accesses the memory in question.
+ * There is no sane usecase to do switching and a memop on two
+ * different CPUs at the same time.
+ */
+ if (kvm_s390_pv_get_handle(kvm))
return -EINVAL;
if (mop->flags & KVM_S390_MEMOP_F_SKEY_PROTECTION) {
if (access_key_invalid(mop->key))
diff --git a/arch/s390/mm/gmap.c b/arch/s390/mm/gmap.c
index af03cacf34ec..1ac73917a8d3 100644
--- a/arch/s390/mm/gmap.c
+++ b/arch/s390/mm/gmap.c
@@ -1183,6 +1183,7 @@ EXPORT_SYMBOL_GPL(gmap_read_table);
static inline void gmap_insert_rmap(struct gmap *sg, unsigned long vmaddr,
struct gmap_rmap *rmap)
{
+ struct gmap_rmap *temp;
void __rcu **slot;
BUG_ON(!gmap_is_shadow(sg));
@@ -1190,6 +1191,12 @@ static inline void gmap_insert_rmap(struct gmap *sg, unsigned long vmaddr,
if (slot) {
rmap->next = radix_tree_deref_slot_protected(slot,
&sg->guest_table_lock);
+ for (temp = rmap->next; temp; temp = temp->next) {
+ if (temp->raddr == rmap->raddr) {
+ kfree(rmap);
+ return;
+ }
+ }
radix_tree_replace_slot(&sg->host_to_rmap, slot, rmap);
} else {
rmap->next = NULL;
diff --git a/arch/x86/kernel/fpu/core.c b/arch/x86/kernel/fpu/core.c
index c049561f373a..e28ab0ecc537 100644
--- a/arch/x86/kernel/fpu/core.c
+++ b/arch/x86/kernel/fpu/core.c
@@ -41,17 +41,7 @@ struct fpu_state_config fpu_user_cfg __ro_after_init;
*/
struct fpstate init_fpstate __ro_after_init;
-/*
- * Track whether the kernel is using the FPU state
- * currently.
- *
- * This flag is used:
- *
- * - by IRQ context code to potentially use the FPU
- * if it's unused.
- *
- * - to debug kernel_fpu_begin()/end() correctness
- */
+/* Track in-kernel FPU usage */
static DEFINE_PER_CPU(bool, in_kernel_fpu);
/*
@@ -59,42 +49,37 @@ static DEFINE_PER_CPU(bool, in_kernel_fpu);
*/
DEFINE_PER_CPU(struct fpu *, fpu_fpregs_owner_ctx);
-static bool kernel_fpu_disabled(void)
-{
- return this_cpu_read(in_kernel_fpu);
-}
-
-static bool interrupted_kernel_fpu_idle(void)
-{
- return !kernel_fpu_disabled();
-}
-
-/*
- * Were we in user mode (or vm86 mode) when we were
- * interrupted?
- *
- * Doing kernel_fpu_begin/end() is ok if we are running
- * in an interrupt context from user mode - we'll just
- * save the FPU state as required.
- */
-static bool interrupted_user_mode(void)
-{
- struct pt_regs *regs = get_irq_regs();
- return regs && user_mode(regs);
-}
-
/*
* Can we use the FPU in kernel mode with the
* whole "kernel_fpu_begin/end()" sequence?
- *
- * It's always ok in process context (ie "not interrupt")
- * but it is sometimes ok even from an irq.
*/
bool irq_fpu_usable(void)
{
- return !in_interrupt() ||
- interrupted_user_mode() ||
- interrupted_kernel_fpu_idle();
+ if (WARN_ON_ONCE(in_nmi()))
+ return false;
+
+ /* In kernel FPU usage already active? */
+ if (this_cpu_read(in_kernel_fpu))
+ return false;
+
+ /*
+ * When not in NMI or hard interrupt context, FPU can be used in:
+ *
+ * - Task context except from within fpregs_lock()'ed critical
+ * regions.
+ *
+ * - Soft interrupt processing context which cannot happen
+ * while in a fpregs_lock()'ed critical region.
+ */
+ if (!in_hardirq())
+ return true;
+
+ /*
+ * In hard interrupt context it's safe when soft interrupts
+ * are enabled, which means the interrupt did not hit in
+ * a fpregs_lock()'ed critical region.
+ */
+ return !softirq_count();
}
EXPORT_SYMBOL(irq_fpu_usable);
diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
index 598334ed5fbc..0c1ba6aa0765 100644
--- a/arch/x86/kvm/cpuid.c
+++ b/arch/x86/kvm/cpuid.c
@@ -887,6 +887,11 @@ static inline int __do_cpuid_func(struct kvm_cpuid_array *array, u32 function)
union cpuid10_eax eax;
union cpuid10_edx edx;
+ if (!static_cpu_has(X86_FEATURE_ARCH_PERFMON)) {
+ entry->eax = entry->ebx = entry->ecx = entry->edx = 0;
+ break;
+ }
+
perf_get_x86_pmu_capability(&cap);
/*
diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index 64a2a7e2be90..311e4e1d7870 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -473,30 +473,6 @@ retry:
}
#endif
-static bool spte_has_volatile_bits(u64 spte)
-{
- if (!is_shadow_present_pte(spte))
- return false;
-
- /*
- * Always atomically update spte if it can be updated
- * out of mmu-lock, it can ensure dirty bit is not lost,
- * also, it can help us to get a stable is_writable_pte()
- * to ensure tlb flush is not missed.
- */
- if (spte_can_locklessly_be_made_writable(spte) ||
- is_access_track_spte(spte))
- return true;
-
- if (spte_ad_enabled(spte)) {
- if ((spte & shadow_accessed_mask) == 0 ||
- (is_writable_pte(spte) && (spte & shadow_dirty_mask) == 0))
- return true;
- }
-
- return false;
-}
-
/* Rules for using mmu_spte_set:
* Set the sptep from nonpresent to present.
* Note: the sptep being assigned *must* be either not present
@@ -557,7 +533,7 @@ static bool mmu_spte_update(u64 *sptep, u64 new_spte)
* we always atomically update it, see the comments in
* spte_has_volatile_bits().
*/
- if (spte_can_locklessly_be_made_writable(old_spte) &&
+ if (is_mmu_writable_spte(old_spte) &&
!is_writable_pte(new_spte))
flush = true;
@@ -591,7 +567,8 @@ static int mmu_spte_clear_track_bits(struct kvm *kvm, u64 *sptep)
u64 old_spte = *sptep;
int level = sptep_to_sp(sptep)->role.level;
- if (!spte_has_volatile_bits(old_spte))
+ if (!is_shadow_present_pte(old_spte) ||
+ !spte_has_volatile_bits(old_spte))
__update_clear_spte_fast(sptep, 0ull);
else
old_spte = __update_clear_spte_slow(sptep, 0ull);
@@ -1187,7 +1164,7 @@ static bool spte_write_protect(u64 *sptep, bool pt_protect)
u64 spte = *sptep;
if (!is_writable_pte(spte) &&
- !(pt_protect && spte_can_locklessly_be_made_writable(spte)))
+ !(pt_protect && is_mmu_writable_spte(spte)))
return false;
rmap_printk("spte %p %llx\n", sptep, *sptep);
@@ -3196,8 +3173,7 @@ static int fast_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
* be removed in the fast path only if the SPTE was
* write-protected for dirty-logging or access tracking.
*/
- if (fault->write &&
- spte_can_locklessly_be_made_writable(spte)) {
+ if (fault->write && is_mmu_writable_spte(spte)) {
new_spte |= PT_WRITABLE_MASK;
/*
diff --git a/arch/x86/kvm/mmu/spte.c b/arch/x86/kvm/mmu/spte.c
index 4739b53c9734..e5c0b6db6f2c 100644
--- a/arch/x86/kvm/mmu/spte.c
+++ b/arch/x86/kvm/mmu/spte.c
@@ -90,6 +90,34 @@ static bool kvm_is_mmio_pfn(kvm_pfn_t pfn)
E820_TYPE_RAM);
}
+/*
+ * Returns true if the SPTE has bits that may be set without holding mmu_lock.
+ * The caller is responsible for checking if the SPTE is shadow-present, and
+ * for determining whether or not the caller cares about non-leaf SPTEs.
+ */
+bool spte_has_volatile_bits(u64 spte)
+{
+ /*
+ * Always atomically update spte if it can be updated
+ * out of mmu-lock, it can ensure dirty bit is not lost,
+ * also, it can help us to get a stable is_writable_pte()
+ * to ensure tlb flush is not missed.
+ */
+ if (!is_writable_pte(spte) && is_mmu_writable_spte(spte))
+ return true;
+
+ if (is_access_track_spte(spte))
+ return true;
+
+ if (spte_ad_enabled(spte)) {
+ if (!(spte & shadow_accessed_mask) ||
+ (is_writable_pte(spte) && !(spte & shadow_dirty_mask)))
+ return true;
+ }
+
+ return false;
+}
+
bool make_spte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
const struct kvm_memory_slot *slot,
unsigned int pte_access, gfn_t gfn, kvm_pfn_t pfn,
diff --git a/arch/x86/kvm/mmu/spte.h b/arch/x86/kvm/mmu/spte.h
index e4abeb5df1b1..80ab0f5cff01 100644
--- a/arch/x86/kvm/mmu/spte.h
+++ b/arch/x86/kvm/mmu/spte.h
@@ -390,7 +390,7 @@ static inline void check_spte_writable_invariants(u64 spte)
"kvm: Writable SPTE is not MMU-writable: %llx", spte);
}
-static inline bool spte_can_locklessly_be_made_writable(u64 spte)
+static inline bool is_mmu_writable_spte(u64 spte)
{
return spte & shadow_mmu_writable_mask;
}
@@ -404,6 +404,8 @@ static inline u64 get_mmio_spte_generation(u64 spte)
return gen;
}
+bool spte_has_volatile_bits(u64 spte);
+
bool make_spte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
const struct kvm_memory_slot *slot,
unsigned int pte_access, gfn_t gfn, kvm_pfn_t pfn,
diff --git a/arch/x86/kvm/mmu/tdp_iter.h b/arch/x86/kvm/mmu/tdp_iter.h
index b1eaf6ec0e0b..f0af385c56e0 100644
--- a/arch/x86/kvm/mmu/tdp_iter.h
+++ b/arch/x86/kvm/mmu/tdp_iter.h
@@ -6,6 +6,7 @@
#include <linux/kvm_host.h>
#include "mmu.h"
+#include "spte.h"
/*
* TDP MMU SPTEs are RCU protected to allow paging structures (non-leaf SPTEs)
@@ -17,9 +18,38 @@ static inline u64 kvm_tdp_mmu_read_spte(tdp_ptep_t sptep)
{
return READ_ONCE(*rcu_dereference(sptep));
}
-static inline void kvm_tdp_mmu_write_spte(tdp_ptep_t sptep, u64 val)
+
+static inline u64 kvm_tdp_mmu_write_spte_atomic(tdp_ptep_t sptep, u64 new_spte)
+{
+ return xchg(rcu_dereference(sptep), new_spte);
+}
+
+static inline void __kvm_tdp_mmu_write_spte(tdp_ptep_t sptep, u64 new_spte)
+{
+ WRITE_ONCE(*rcu_dereference(sptep), new_spte);
+}
+
+static inline u64 kvm_tdp_mmu_write_spte(tdp_ptep_t sptep, u64 old_spte,
+ u64 new_spte, int level)
{
- WRITE_ONCE(*rcu_dereference(sptep), val);
+ /*
+ * Atomically write the SPTE if it is a shadow-present, leaf SPTE with
+ * volatile bits, i.e. has bits that can be set outside of mmu_lock.
+ * The Writable bit can be set by KVM's fast page fault handler, and
+ * Accessed and Dirty bits can be set by the CPU.
+ *
+ * Note, non-leaf SPTEs do have Accessed bits and those bits are
+ * technically volatile, but KVM doesn't consume the Accessed bit of
+ * non-leaf SPTEs, i.e. KVM doesn't care if it clobbers the bit. This
+ * logic needs to be reassessed if KVM were to use non-leaf Accessed
+ * bits, e.g. to skip stepping down into child SPTEs when aging SPTEs.
+ */
+ if (is_shadow_present_pte(old_spte) && is_last_spte(old_spte, level) &&
+ spte_has_volatile_bits(old_spte))
+ return kvm_tdp_mmu_write_spte_atomic(sptep, new_spte);
+
+ __kvm_tdp_mmu_write_spte(sptep, new_spte);
+ return old_spte;
}
/*
diff --git a/arch/x86/kvm/mmu/tdp_mmu.c b/arch/x86/kvm/mmu/tdp_mmu.c
index edc68538819b..922b06bf4b94 100644
--- a/arch/x86/kvm/mmu/tdp_mmu.c
+++ b/arch/x86/kvm/mmu/tdp_mmu.c
@@ -426,9 +426,9 @@ static void handle_removed_pt(struct kvm *kvm, tdp_ptep_t pt, bool shared)
tdp_mmu_unlink_sp(kvm, sp, shared);
for (i = 0; i < PT64_ENT_PER_PAGE; i++) {
- u64 *sptep = rcu_dereference(pt) + i;
+ tdp_ptep_t sptep = pt + i;
gfn_t gfn = base_gfn + i * KVM_PAGES_PER_HPAGE(level);
- u64 old_child_spte;
+ u64 old_spte;
if (shared) {
/*
@@ -440,8 +440,8 @@ static void handle_removed_pt(struct kvm *kvm, tdp_ptep_t pt, bool shared)
* value to the removed SPTE value.
*/
for (;;) {
- old_child_spte = xchg(sptep, REMOVED_SPTE);
- if (!is_removed_spte(old_child_spte))
+ old_spte = kvm_tdp_mmu_write_spte_atomic(sptep, REMOVED_SPTE);
+ if (!is_removed_spte(old_spte))
break;
cpu_relax();
}
@@ -455,23 +455,43 @@ static void handle_removed_pt(struct kvm *kvm, tdp_ptep_t pt, bool shared)
* are guarded by the memslots generation, not by being
* unreachable.
*/
- old_child_spte = READ_ONCE(*sptep);
- if (!is_shadow_present_pte(old_child_spte))
+ old_spte = kvm_tdp_mmu_read_spte(sptep);
+ if (!is_shadow_present_pte(old_spte))
continue;
/*
- * Marking the SPTE as a removed SPTE is not
- * strictly necessary here as the MMU lock will
- * stop other threads from concurrently modifying
- * this SPTE. Using the removed SPTE value keeps
- * the two branches consistent and simplifies
- * the function.
+ * Use the common helper instead of a raw WRITE_ONCE as
+ * the SPTE needs to be updated atomically if it can be
+ * modified by a different vCPU outside of mmu_lock.
+ * Even though the parent SPTE is !PRESENT, the TLB
+ * hasn't yet been flushed, and both Intel and AMD
+ * document that A/D assists can use upper-level PxE
+ * entries that are cached in the TLB, i.e. the CPU can
+ * still access the page and mark it dirty.
+ *
+ * No retry is needed in the atomic update path as the
+ * sole concern is dropping a Dirty bit, i.e. no other
+ * task can zap/remove the SPTE as mmu_lock is held for
+ * write. Marking the SPTE as a removed SPTE is not
+ * strictly necessary for the same reason, but using
+ * the remove SPTE value keeps the shared/exclusive
+ * paths consistent and allows the handle_changed_spte()
+ * call below to hardcode the new value to REMOVED_SPTE.
+ *
+ * Note, even though dropping a Dirty bit is the only
+ * scenario where a non-atomic update could result in a
+ * functional bug, simply checking the Dirty bit isn't
+ * sufficient as a fast page fault could read the upper
+ * level SPTE before it is zapped, and then make this
+ * target SPTE writable, resume the guest, and set the
+ * Dirty bit between reading the SPTE above and writing
+ * it here.
*/
- WRITE_ONCE(*sptep, REMOVED_SPTE);
+ old_spte = kvm_tdp_mmu_write_spte(sptep, old_spte,
+ REMOVED_SPTE, level);
}
handle_changed_spte(kvm, kvm_mmu_page_as_id(sp), gfn,
- old_child_spte, REMOVED_SPTE, level,
- shared);
+ old_spte, REMOVED_SPTE, level, shared);
}
call_rcu(&sp->rcu_head, tdp_mmu_free_sp_rcu_callback);
@@ -667,14 +687,13 @@ static inline int tdp_mmu_zap_spte_atomic(struct kvm *kvm,
KVM_PAGES_PER_HPAGE(iter->level));
/*
- * No other thread can overwrite the removed SPTE as they
- * must either wait on the MMU lock or use
- * tdp_mmu_set_spte_atomic which will not overwrite the
- * special removed SPTE value. No bookkeeping is needed
- * here since the SPTE is going from non-present
- * to non-present.
+ * No other thread can overwrite the removed SPTE as they must either
+ * wait on the MMU lock or use tdp_mmu_set_spte_atomic() which will not
+ * overwrite the special removed SPTE value. No bookkeeping is needed
+ * here since the SPTE is going from non-present to non-present. Use
+ * the raw write helper to avoid an unnecessary check on volatile bits.
*/
- kvm_tdp_mmu_write_spte(iter->sptep, 0);
+ __kvm_tdp_mmu_write_spte(iter->sptep, 0);
return 0;
}
@@ -699,10 +718,13 @@ static inline int tdp_mmu_zap_spte_atomic(struct kvm *kvm,
* unless performing certain dirty logging operations.
* Leaving record_dirty_log unset in that case prevents page
* writes from being double counted.
+ *
+ * Returns the old SPTE value, which _may_ be different than @old_spte if the
+ * SPTE had voldatile bits.
*/
-static void __tdp_mmu_set_spte(struct kvm *kvm, int as_id, tdp_ptep_t sptep,
- u64 old_spte, u64 new_spte, gfn_t gfn, int level,
- bool record_acc_track, bool record_dirty_log)
+static u64 __tdp_mmu_set_spte(struct kvm *kvm, int as_id, tdp_ptep_t sptep,
+ u64 old_spte, u64 new_spte, gfn_t gfn, int level,
+ bool record_acc_track, bool record_dirty_log)
{
lockdep_assert_held_write(&kvm->mmu_lock);
@@ -715,7 +737,7 @@ static void __tdp_mmu_set_spte(struct kvm *kvm, int as_id, tdp_ptep_t sptep,
*/
WARN_ON(is_removed_spte(old_spte) || is_removed_spte(new_spte));
- kvm_tdp_mmu_write_spte(sptep, new_spte);
+ old_spte = kvm_tdp_mmu_write_spte(sptep, old_spte, new_spte, level);
__handle_changed_spte(kvm, as_id, gfn, old_spte, new_spte, level, false);
@@ -724,6 +746,7 @@ static void __tdp_mmu_set_spte(struct kvm *kvm, int as_id, tdp_ptep_t sptep,
if (record_dirty_log)
handle_changed_spte_dirty_log(kvm, as_id, gfn, old_spte,
new_spte, level);
+ return old_spte;
}
static inline void _tdp_mmu_set_spte(struct kvm *kvm, struct tdp_iter *iter,
@@ -732,9 +755,10 @@ static inline void _tdp_mmu_set_spte(struct kvm *kvm, struct tdp_iter *iter,
{
WARN_ON_ONCE(iter->yielded);
- __tdp_mmu_set_spte(kvm, iter->as_id, iter->sptep, iter->old_spte,
- new_spte, iter->gfn, iter->level,
- record_acc_track, record_dirty_log);
+ iter->old_spte = __tdp_mmu_set_spte(kvm, iter->as_id, iter->sptep,
+ iter->old_spte, new_spte,
+ iter->gfn, iter->level,
+ record_acc_track, record_dirty_log);
}
static inline void tdp_mmu_set_spte(struct kvm *kvm, struct tdp_iter *iter,
diff --git a/arch/x86/kvm/svm/pmu.c b/arch/x86/kvm/svm/pmu.c
index b14860863c39..16a5ebb420cf 100644
--- a/arch/x86/kvm/svm/pmu.c
+++ b/arch/x86/kvm/svm/pmu.c
@@ -45,6 +45,22 @@ static struct kvm_event_hw_type_mapping amd_event_mapping[] = {
[7] = { 0xd1, 0x00, PERF_COUNT_HW_STALLED_CYCLES_BACKEND },
};
+/* duplicated from amd_f17h_perfmon_event_map. */
+static struct kvm_event_hw_type_mapping amd_f17h_event_mapping[] = {
+ [0] = { 0x76, 0x00, PERF_COUNT_HW_CPU_CYCLES },
+ [1] = { 0xc0, 0x00, PERF_COUNT_HW_INSTRUCTIONS },
+ [2] = { 0x60, 0xff, PERF_COUNT_HW_CACHE_REFERENCES },
+ [3] = { 0x64, 0x09, PERF_COUNT_HW_CACHE_MISSES },
+ [4] = { 0xc2, 0x00, PERF_COUNT_HW_BRANCH_INSTRUCTIONS },
+ [5] = { 0xc3, 0x00, PERF_COUNT_HW_BRANCH_MISSES },
+ [6] = { 0x87, 0x02, PERF_COUNT_HW_STALLED_CYCLES_FRONTEND },
+ [7] = { 0x87, 0x01, PERF_COUNT_HW_STALLED_CYCLES_BACKEND },
+};
+
+/* amd_pmc_perf_hw_id depends on these being the same size */
+static_assert(ARRAY_SIZE(amd_event_mapping) ==
+ ARRAY_SIZE(amd_f17h_event_mapping));
+
static unsigned int get_msr_base(struct kvm_pmu *pmu, enum pmu_type type)
{
struct kvm_vcpu *vcpu = pmu_to_vcpu(pmu);
@@ -140,6 +156,7 @@ static inline struct kvm_pmc *get_gp_pmc_amd(struct kvm_pmu *pmu, u32 msr,
static unsigned int amd_pmc_perf_hw_id(struct kvm_pmc *pmc)
{
+ struct kvm_event_hw_type_mapping *event_mapping;
u8 event_select = pmc->eventsel & ARCH_PERFMON_EVENTSEL_EVENT;
u8 unit_mask = (pmc->eventsel & ARCH_PERFMON_EVENTSEL_UMASK) >> 8;
int i;
@@ -148,15 +165,20 @@ static unsigned int amd_pmc_perf_hw_id(struct kvm_pmc *pmc)
if (WARN_ON(pmc_is_fixed(pmc)))
return PERF_COUNT_HW_MAX;
+ if (guest_cpuid_family(pmc->vcpu) >= 0x17)
+ event_mapping = amd_f17h_event_mapping;
+ else
+ event_mapping = amd_event_mapping;
+
for (i = 0; i < ARRAY_SIZE(amd_event_mapping); i++)
- if (amd_event_mapping[i].eventsel == event_select
- && amd_event_mapping[i].unit_mask == unit_mask)
+ if (event_mapping[i].eventsel == event_select
+ && event_mapping[i].unit_mask == unit_mask)
break;
if (i == ARRAY_SIZE(amd_event_mapping))
return PERF_COUNT_HW_MAX;
- return amd_event_mapping[i].event_type;
+ return event_mapping[i].event_type;
}
/* check if a PMC is enabled by comparing it against global_ctrl bits. Because
diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c
index 0ad70c12c7c3..7c392873626f 100644
--- a/arch/x86/kvm/svm/sev.c
+++ b/arch/x86/kvm/svm/sev.c
@@ -1594,24 +1594,51 @@ static void sev_unlock_two_vms(struct kvm *dst_kvm, struct kvm *src_kvm)
atomic_set_release(&src_sev->migration_in_progress, 0);
}
+/* vCPU mutex subclasses. */
+enum sev_migration_role {
+ SEV_MIGRATION_SOURCE = 0,
+ SEV_MIGRATION_TARGET,
+ SEV_NR_MIGRATION_ROLES,
+};
-static int sev_lock_vcpus_for_migration(struct kvm *kvm)
+static int sev_lock_vcpus_for_migration(struct kvm *kvm,
+ enum sev_migration_role role)
{
struct kvm_vcpu *vcpu;
unsigned long i, j;
+ bool first = true;
kvm_for_each_vcpu(i, vcpu, kvm) {
- if (mutex_lock_killable(&vcpu->mutex))
+ if (mutex_lock_killable_nested(&vcpu->mutex, role))
goto out_unlock;
+
+ if (first) {
+ /*
+ * Reset the role to one that avoids colliding with
+ * the role used for the first vcpu mutex.
+ */
+ role = SEV_NR_MIGRATION_ROLES;
+ first = false;
+ } else {
+ mutex_release(&vcpu->mutex.dep_map, _THIS_IP_);
+ }
}
return 0;
out_unlock:
+
+ first = true;
kvm_for_each_vcpu(j, vcpu, kvm) {
if (i == j)
break;
+ if (first)
+ first = false;
+ else
+ mutex_acquire(&vcpu->mutex.dep_map, role, 0, _THIS_IP_);
+
+
mutex_unlock(&vcpu->mutex);
}
return -EINTR;
@@ -1621,8 +1648,15 @@ static void sev_unlock_vcpus_for_migration(struct kvm *kvm)
{
struct kvm_vcpu *vcpu;
unsigned long i;
+ bool first = true;
kvm_for_each_vcpu(i, vcpu, kvm) {
+ if (first)
+ first = false;
+ else
+ mutex_acquire(&vcpu->mutex.dep_map,
+ SEV_NR_MIGRATION_ROLES, 0, _THIS_IP_);
+
mutex_unlock(&vcpu->mutex);
}
}
@@ -1748,10 +1782,10 @@ int sev_vm_move_enc_context_from(struct kvm *kvm, unsigned int source_fd)
charged = true;
}
- ret = sev_lock_vcpus_for_migration(kvm);
+ ret = sev_lock_vcpus_for_migration(kvm, SEV_MIGRATION_SOURCE);
if (ret)
goto out_dst_cgroup;
- ret = sev_lock_vcpus_for_migration(source_kvm);
+ ret = sev_lock_vcpus_for_migration(source_kvm, SEV_MIGRATION_TARGET);
if (ret)
goto out_dst_vcpu;
diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
index d58b763df855..610355b9ccce 100644
--- a/arch/x86/kvm/vmx/vmx.c
+++ b/arch/x86/kvm/vmx/vmx.c
@@ -5472,7 +5472,7 @@ static bool vmx_emulation_required_with_pending_exception(struct kvm_vcpu *vcpu)
struct vcpu_vmx *vmx = to_vmx(vcpu);
return vmx->emulation_required && !vmx->rmode.vm86_active &&
- vcpu->arch.exception.pending;
+ (vcpu->arch.exception.pending || vcpu->arch.exception.injected);
}
static int handle_invalid_guest_state(struct kvm_vcpu *vcpu)
diff --git a/block/blk-core.c b/block/blk-core.c
index 937bb6b86331..bc0506772152 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -50,7 +50,6 @@
#include "blk-pm.h"
#include "blk-cgroup.h"
#include "blk-throttle.h"
-#include "blk-rq-qos.h"
struct dentry *blk_debugfs_root;
@@ -315,9 +314,6 @@ void blk_cleanup_queue(struct request_queue *q)
*/
blk_freeze_queue(q);
- /* cleanup rq qos structures for queue without disk */
- rq_qos_exit(q);
-
blk_queue_flag_set(QUEUE_FLAG_DEAD, q);
blk_sync_queue(q);
diff --git a/drivers/block/ataflop.c b/drivers/block/ataflop.c
index 5d819a466e2f..e232cc4fd444 100644
--- a/drivers/block/ataflop.c
+++ b/drivers/block/ataflop.c
@@ -303,6 +303,7 @@ static struct atari_floppy_struct {
int ref;
int type;
struct blk_mq_tag_set tag_set;
+ int error_count;
} unit[FD_MAX_UNITS];
#define UD unit[drive]
@@ -705,14 +706,14 @@ static void fd_error( void )
if (!fd_request)
return;
- fd_request->error_count++;
- if (fd_request->error_count >= MAX_ERRORS) {
+ unit[SelectedDrive].error_count++;
+ if (unit[SelectedDrive].error_count >= MAX_ERRORS) {
printk(KERN_ERR "fd%d: too many errors.\n", SelectedDrive );
fd_end_request_cur(BLK_STS_IOERR);
finish_fdc();
return;
}
- else if (fd_request->error_count == RECALIBRATE_ERRORS) {
+ else if (unit[SelectedDrive].error_count == RECALIBRATE_ERRORS) {
printk(KERN_WARNING "fd%d: recalibrating\n", SelectedDrive );
if (SelectedDrive != -1)
SUD.track = -1;
@@ -1491,7 +1492,7 @@ static void setup_req_params( int drive )
ReqData = ReqBuffer + 512 * ReqCnt;
if (UseTrackbuffer)
- read_track = (ReqCmd == READ && fd_request->error_count == 0);
+ read_track = (ReqCmd == READ && unit[drive].error_count == 0);
else
read_track = 0;
@@ -1520,6 +1521,7 @@ static blk_status_t ataflop_queue_rq(struct blk_mq_hw_ctx *hctx,
return BLK_STS_RESOURCE;
}
fd_request = bd->rq;
+ unit[drive].error_count = 0;
blk_mq_start_request(fd_request);
atari_disable_irq( IRQ_MFP_FDC );
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
index d5b9ff9bcbb2..015841f50f4e 100644
--- a/drivers/block/floppy.c
+++ b/drivers/block/floppy.c
@@ -509,8 +509,8 @@ static unsigned long fdc_busy;
static DECLARE_WAIT_QUEUE_HEAD(fdc_wait);
static DECLARE_WAIT_QUEUE_HEAD(command_done);
-/* Errors during formatting are counted here. */
-static int format_errors;
+/* errors encountered on the current (or last) request */
+static int floppy_errors;
/* Format request descriptor. */
static struct format_descr format_req;
@@ -530,7 +530,6 @@ static struct format_descr format_req;
static char *floppy_track_buffer;
static int max_buffer_sectors;
-static int *errors;
typedef void (*done_f)(int);
static const struct cont_t {
void (*interrupt)(void);
@@ -1455,7 +1454,7 @@ static int interpret_errors(void)
if (drive_params[current_drive].flags & FTD_MSG)
DPRINT("Over/Underrun - retrying\n");
bad = 0;
- } else if (*errors >= drive_params[current_drive].max_errors.reporting) {
+ } else if (floppy_errors >= drive_params[current_drive].max_errors.reporting) {
print_errors();
}
if (reply_buffer[ST2] & ST2_WC || reply_buffer[ST2] & ST2_BC)
@@ -2095,7 +2094,7 @@ static void bad_flp_intr(void)
if (!next_valid_format(current_drive))
return;
}
- err_count = ++(*errors);
+ err_count = ++floppy_errors;
INFBOUND(write_errors[current_drive].badness, err_count);
if (err_count > drive_params[current_drive].max_errors.abort)
cont->done(0);
@@ -2241,9 +2240,8 @@ static int do_format(int drive, struct format_descr *tmp_format_req)
return -EINVAL;
}
format_req = *tmp_format_req;
- format_errors = 0;
cont = &format_cont;
- errors = &format_errors;
+ floppy_errors = 0;
ret = wait_til_done(redo_format, true);
if (ret == -EINTR)
return -EINTR;
@@ -2759,10 +2757,11 @@ static int set_next_request(void)
current_req = list_first_entry_or_null(&floppy_reqs, struct request,
queuelist);
if (current_req) {
- current_req->error_count = 0;
+ floppy_errors = 0;
list_del_init(&current_req->queuelist);
+ return 1;
}
- return current_req != NULL;
+ return 0;
}
/* Starts or continues processing request. Will automatically unlock the
@@ -2821,7 +2820,6 @@ do_request:
_floppy = floppy_type + drive_params[current_drive].autodetect[drive_state[current_drive].probed_format];
} else
probing = 0;
- errors = &(current_req->error_count);
tmp = make_raw_rw_request();
if (tmp < 2) {
request_done(tmp);
diff --git a/drivers/firewire/core-card.c b/drivers/firewire/core-card.c
index 54be88167c60..f3b3953cac83 100644
--- a/drivers/firewire/core-card.c
+++ b/drivers/firewire/core-card.c
@@ -668,6 +668,7 @@ EXPORT_SYMBOL_GPL(fw_card_release);
void fw_core_remove_card(struct fw_card *card)
{
struct fw_card_driver dummy_driver = dummy_driver_template;
+ unsigned long flags;
card->driver->update_phy_reg(card, 4,
PHY_LINK_ACTIVE | PHY_CONTENDER, 0);
@@ -682,7 +683,9 @@ void fw_core_remove_card(struct fw_card *card)
dummy_driver.stop_iso = card->driver->stop_iso;
card->driver = &dummy_driver;
+ spin_lock_irqsave(&card->lock, flags);
fw_destroy_nodes(card);
+ spin_unlock_irqrestore(&card->lock, flags);
/* Wait for all users, especially device workqueue jobs, to finish. */
fw_card_put(card);
diff --git a/drivers/firewire/core-cdev.c b/drivers/firewire/core-cdev.c
index 9f89c17730b1..708e417200f4 100644
--- a/drivers/firewire/core-cdev.c
+++ b/drivers/firewire/core-cdev.c
@@ -1500,6 +1500,7 @@ static void outbound_phy_packet_callback(struct fw_packet *packet,
{
struct outbound_phy_packet_event *e =
container_of(packet, struct outbound_phy_packet_event, p);
+ struct client *e_client;
switch (status) {
/* expected: */
@@ -1516,9 +1517,10 @@ static void outbound_phy_packet_callback(struct fw_packet *packet,
}
e->phy_packet.data[0] = packet->timestamp;
+ e_client = e->client;
queue_event(e->client, &e->event, &e->phy_packet,
sizeof(e->phy_packet) + e->phy_packet.length, NULL, 0);
- client_put(e->client);
+ client_put(e_client);
}
static int ioctl_send_phy_packet(struct client *client, union ioctl_arg *arg)
diff --git a/drivers/firewire/core-topology.c b/drivers/firewire/core-topology.c
index b63d55f5ebd3..f40c81534381 100644
--- a/drivers/firewire/core-topology.c
+++ b/drivers/firewire/core-topology.c
@@ -375,16 +375,13 @@ static void report_found_node(struct fw_card *card,
card->bm_retries = 0;
}
+/* Must be called with card->lock held */
void fw_destroy_nodes(struct fw_card *card)
{
- unsigned long flags;
-
- spin_lock_irqsave(&card->lock, flags);
card->color++;
if (card->local_node != NULL)
for_each_fw_node(card, card->local_node, report_lost_node);
card->local_node = NULL;
- spin_unlock_irqrestore(&card->lock, flags);
}
static void move_tree(struct fw_node *node0, struct fw_node *node1, int port)
@@ -510,6 +507,8 @@ void fw_core_handle_bus_reset(struct fw_card *card, int node_id, int generation,
struct fw_node *local_node;
unsigned long flags;
+ spin_lock_irqsave(&card->lock, flags);
+
/*
* If the selfID buffer is not the immediate successor of the
* previously processed one, we cannot reliably compare the
@@ -521,8 +520,6 @@ void fw_core_handle_bus_reset(struct fw_card *card, int node_id, int generation,
card->bm_retries = 0;
}
- spin_lock_irqsave(&card->lock, flags);
-
card->broadcast_channel_allocated = card->broadcast_channel_auto_allocated;
card->node_id = node_id;
/*
diff --git a/drivers/firewire/core-transaction.c b/drivers/firewire/core-transaction.c
index ac487c96bb71..6c20815cc8d1 100644
--- a/drivers/firewire/core-transaction.c
+++ b/drivers/firewire/core-transaction.c
@@ -73,24 +73,25 @@ static int try_cancel_split_timeout(struct fw_transaction *t)
static int close_transaction(struct fw_transaction *transaction,
struct fw_card *card, int rcode)
{
- struct fw_transaction *t;
+ struct fw_transaction *t = NULL, *iter;
unsigned long flags;
spin_lock_irqsave(&card->lock, flags);
- list_for_each_entry(t, &card->transaction_list, link) {
- if (t == transaction) {
- if (!try_cancel_split_timeout(t)) {
+ list_for_each_entry(iter, &card->transaction_list, link) {
+ if (iter == transaction) {
+ if (!try_cancel_split_timeout(iter)) {
spin_unlock_irqrestore(&card->lock, flags);
goto timed_out;
}
- list_del_init(&t->link);
- card->tlabel_mask &= ~(1ULL << t->tlabel);
+ list_del_init(&iter->link);
+ card->tlabel_mask &= ~(1ULL << iter->tlabel);
+ t = iter;
break;
}
}
spin_unlock_irqrestore(&card->lock, flags);
- if (&t->link != &card->transaction_list) {
+ if (t) {
t->callback(card, rcode, NULL, 0, t->callback_data);
return 0;
}
@@ -935,7 +936,7 @@ EXPORT_SYMBOL(fw_core_handle_request);
void fw_core_handle_response(struct fw_card *card, struct fw_packet *p)
{
- struct fw_transaction *t;
+ struct fw_transaction *t = NULL, *iter;
unsigned long flags;
u32 *data;
size_t data_length;
@@ -947,20 +948,21 @@ void fw_core_handle_response(struct fw_card *card, struct fw_packet *p)
rcode = HEADER_GET_RCODE(p->header[1]);
spin_lock_irqsave(&card->lock, flags);
- list_for_each_entry(t, &card->transaction_list, link) {
- if (t->node_id == source && t->tlabel == tlabel) {
- if (!try_cancel_split_timeout(t)) {
+ list_for_each_entry(iter, &card->transaction_list, link) {
+ if (iter->node_id == source && iter->tlabel == tlabel) {
+ if (!try_cancel_split_timeout(iter)) {
spin_unlock_irqrestore(&card->lock, flags);
goto timed_out;
}
- list_del_init(&t->link);
- card->tlabel_mask &= ~(1ULL << t->tlabel);
+ list_del_init(&iter->link);
+ card->tlabel_mask &= ~(1ULL << iter->tlabel);
+ t = iter;
break;
}
}
spin_unlock_irqrestore(&card->lock, flags);
- if (&t->link == &card->transaction_list) {
+ if (!t) {
timed_out:
fw_notice(card, "unsolicited response (source %x, tlabel %x)\n",
source, tlabel);
diff --git a/drivers/firewire/sbp2.c b/drivers/firewire/sbp2.c
index 85cd379fd383..60051c0cabea 100644
--- a/drivers/firewire/sbp2.c
+++ b/drivers/firewire/sbp2.c
@@ -408,7 +408,7 @@ static void sbp2_status_write(struct fw_card *card, struct fw_request *request,
void *payload, size_t length, void *callback_data)
{
struct sbp2_logical_unit *lu = callback_data;
- struct sbp2_orb *orb;
+ struct sbp2_orb *orb = NULL, *iter;
struct sbp2_status status;
unsigned long flags;
@@ -433,17 +433,18 @@ static void sbp2_status_write(struct fw_card *card, struct fw_request *request,
/* Lookup the orb corresponding to this status write. */
spin_lock_irqsave(&lu->tgt->lock, flags);
- list_for_each_entry(orb, &lu->orb_list, link) {
+ list_for_each_entry(iter, &lu->orb_list, link) {
if (STATUS_GET_ORB_HIGH(status) == 0 &&
- STATUS_GET_ORB_LOW(status) == orb->request_bus) {
- orb->rcode = RCODE_COMPLETE;
- list_del(&orb->link);
+ STATUS_GET_ORB_LOW(status) == iter->request_bus) {
+ iter->rcode = RCODE_COMPLETE;
+ list_del(&iter->link);
+ orb = iter;
break;
}
}
spin_unlock_irqrestore(&lu->tgt->lock, flags);
- if (&orb->link != &lu->orb_list) {
+ if (orb) {
orb->callback(orb, &status);
kref_put(&orb->kref, free_orb); /* orb callback reference */
} else {
diff --git a/drivers/gpio/gpio-mvebu.c b/drivers/gpio/gpio-mvebu.c
index 4c1f9e1091b7..a2c8dd329b31 100644
--- a/drivers/gpio/gpio-mvebu.c
+++ b/drivers/gpio/gpio-mvebu.c
@@ -871,13 +871,6 @@ static int mvebu_pwm_probe(struct platform_device *pdev,
mvpwm->chip.dev = dev;
mvpwm->chip.ops = &mvebu_pwm_ops;
mvpwm->chip.npwm = mvchip->chip.ngpio;
- /*
- * There may already be some PWM allocated, so we can't force
- * mvpwm->chip.base to a fixed point like mvchip->chip.base.
- * So, we let pwmchip_add() do the numbering and take the next free
- * region.
- */
- mvpwm->chip.base = -1;
spin_lock_init(&mvpwm->lock);
diff --git a/drivers/gpio/gpio-pca953x.c b/drivers/gpio/gpio-pca953x.c
index d2fe76f3f34f..8726921a1129 100644
--- a/drivers/gpio/gpio-pca953x.c
+++ b/drivers/gpio/gpio-pca953x.c
@@ -762,11 +762,11 @@ static bool pca953x_irq_pending(struct pca953x_chip *chip, unsigned long *pendin
bitmap_xor(cur_stat, new_stat, old_stat, gc->ngpio);
bitmap_and(trigger, cur_stat, chip->irq_mask, gc->ngpio);
+ bitmap_copy(chip->irq_stat, new_stat, gc->ngpio);
+
if (bitmap_empty(trigger, gc->ngpio))
return false;
- bitmap_copy(chip->irq_stat, new_stat, gc->ngpio);
-
bitmap_and(cur_stat, chip->irq_trig_fall, old_stat, gc->ngpio);
bitmap_and(old_stat, chip->irq_trig_raise, new_stat, gc->ngpio);
bitmap_or(new_stat, old_stat, cur_stat, gc->ngpio);
diff --git a/drivers/gpio/gpio-visconti.c b/drivers/gpio/gpio-visconti.c
index 47455810bdb9..e6534ea1eaa7 100644
--- a/drivers/gpio/gpio-visconti.c
+++ b/drivers/gpio/gpio-visconti.c
@@ -130,7 +130,6 @@ static int visconti_gpio_probe(struct platform_device *pdev)
struct gpio_irq_chip *girq;
struct irq_domain *parent;
struct device_node *irq_parent;
- struct fwnode_handle *fwnode;
int ret;
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
@@ -150,14 +149,12 @@ static int visconti_gpio_probe(struct platform_device *pdev)
}
parent = irq_find_host(irq_parent);
+ of_node_put(irq_parent);
if (!parent) {
dev_err(dev, "No IRQ parent domain\n");
return -ENODEV;
}
- fwnode = of_node_to_fwnode(irq_parent);
- of_node_put(irq_parent);
-
ret = bgpio_init(&priv->gpio_chip, dev, 4,
priv->base + GPIO_IDATA,
priv->base + GPIO_OSET,
@@ -180,7 +177,7 @@ static int visconti_gpio_probe(struct platform_device *pdev)
girq = &priv->gpio_chip.irq;
girq->chip = irq_chip;
- girq->fwnode = fwnode;
+ girq->fwnode = of_node_to_fwnode(dev->of_node);
girq->parent_domain = parent;
girq->child_to_parent_hwirq = visconti_gpio_child_to_parent_hwirq;
girq->populate_parent_alloc_arg = visconti_gpio_populate_parent_fwspec;
diff --git a/drivers/gpio/gpiolib-of.c b/drivers/gpio/gpiolib-of.c
index ae1ce319cd78..7e5e51d49d09 100644
--- a/drivers/gpio/gpiolib-of.c
+++ b/drivers/gpio/gpiolib-of.c
@@ -910,7 +910,7 @@ static void of_gpiochip_init_valid_mask(struct gpio_chip *chip)
i, &start);
of_property_read_u32_index(np, "gpio-reserved-ranges",
i + 1, &count);
- if (start >= chip->ngpio || start + count >= chip->ngpio)
+ if (start >= chip->ngpio || start + count > chip->ngpio)
continue;
bitmap_clear(chip->valid_mask, start, count);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
index a025f080aa6a..5e3756643da3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
@@ -24,6 +24,7 @@
#include <linux/module.h>
#include <drm/drm_drv.h>
+#include <xen/xen.h>
#include "amdgpu.h"
#include "amdgpu_ras.h"
@@ -710,7 +711,8 @@ void amdgpu_detect_virtualization(struct amdgpu_device *adev)
adev->virt.caps |= AMDGPU_SRIOV_CAPS_ENABLE_IOV;
if (!reg) {
- if (is_virtual_machine()) /* passthrough mode exclus sriov mod */
+ /* passthrough mode exclus sriov mod */
+ if (is_virtual_machine() && !xen_initial_domain())
adev->virt.caps |= AMDGPU_PASSTHROUGH_MODE;
}
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
index 22dabe596dfc..95b5b5bfa1ff 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
@@ -4440,7 +4440,7 @@ static void dp_test_get_audio_test_data(struct dc_link *link, bool disable_video
&dpcd_pattern_type.value,
sizeof(dpcd_pattern_type));
- channel_count = dpcd_test_mode.bits.channel_count + 1;
+ channel_count = min(dpcd_test_mode.bits.channel_count + 1, AUDIO_CHANNELS_COUNT);
// read pattern periods for requested channels when sawTooth pattern is requested
if (dpcd_pattern_type.value == AUDIO_TEST_PATTERN_SAWTOOTH ||
diff --git a/drivers/gpu/drm/bridge/Kconfig b/drivers/gpu/drm/bridge/Kconfig
index 007e5a282f67..2145b08f9534 100644
--- a/drivers/gpu/drm/bridge/Kconfig
+++ b/drivers/gpu/drm/bridge/Kconfig
@@ -78,6 +78,7 @@ config DRM_ITE_IT6505
tristate "ITE IT6505 DisplayPort bridge"
depends on OF
select DRM_KMS_HELPER
+ select DRM_DP_HELPER
select EXTCON
help
ITE IT6505 DisplayPort bridge chip driver.
diff --git a/drivers/gpu/drm/msm/dp/dp_display.c b/drivers/gpu/drm/msm/dp/dp_display.c
index a42732b67349..178b774a5fbd 100644
--- a/drivers/gpu/drm/msm/dp/dp_display.c
+++ b/drivers/gpu/drm/msm/dp/dp_display.c
@@ -580,12 +580,6 @@ static int dp_hpd_plug_handle(struct dp_display_private *dp, u32 data)
dp->dp_display.connector_type, state);
mutex_unlock(&dp->event_mutex);
- /*
- * add fail safe mode outside event_mutex scope
- * to avoid potiential circular lock with drm thread
- */
- dp_panel_add_fail_safe_mode(dp->dp_display.connector);
-
/* uevent will complete connection part */
return 0;
};
diff --git a/drivers/gpu/drm/msm/dp/dp_panel.c b/drivers/gpu/drm/msm/dp/dp_panel.c
index 26c3653c99ec..26f4b6959c31 100644
--- a/drivers/gpu/drm/msm/dp/dp_panel.c
+++ b/drivers/gpu/drm/msm/dp/dp_panel.c
@@ -151,15 +151,6 @@ static int dp_panel_update_modes(struct drm_connector *connector,
return rc;
}
-void dp_panel_add_fail_safe_mode(struct drm_connector *connector)
-{
- /* fail safe edid */
- mutex_lock(&connector->dev->mode_config.mutex);
- if (drm_add_modes_noedid(connector, 640, 480))
- drm_set_preferred_mode(connector, 640, 480);
- mutex_unlock(&connector->dev->mode_config.mutex);
-}
-
int dp_panel_read_sink_caps(struct dp_panel *dp_panel,
struct drm_connector *connector)
{
@@ -215,8 +206,6 @@ int dp_panel_read_sink_caps(struct dp_panel *dp_panel,
rc = -ETIMEDOUT;
goto end;
}
-
- dp_panel_add_fail_safe_mode(connector);
}
if (panel->aux_cfg_update_done) {
diff --git a/drivers/gpu/drm/msm/dp/dp_panel.h b/drivers/gpu/drm/msm/dp/dp_panel.h
index 99739ea679a7..9023e5bb4b8b 100644
--- a/drivers/gpu/drm/msm/dp/dp_panel.h
+++ b/drivers/gpu/drm/msm/dp/dp_panel.h
@@ -59,7 +59,6 @@ int dp_panel_init_panel_info(struct dp_panel *dp_panel);
int dp_panel_deinit(struct dp_panel *dp_panel);
int dp_panel_timing_cfg(struct dp_panel *dp_panel);
void dp_panel_dump_regs(struct dp_panel *dp_panel);
-void dp_panel_add_fail_safe_mode(struct drm_connector *connector);
int dp_panel_read_sink_caps(struct dp_panel *dp_panel,
struct drm_connector *connector);
u32 dp_panel_get_mode_bpp(struct dp_panel *dp_panel, u32 mode_max_bpp,
diff --git a/drivers/infiniband/hw/irdma/cm.c b/drivers/infiniband/hw/irdma/cm.c
index dedb3b7edd8d..638bf4a1ed94 100644
--- a/drivers/infiniband/hw/irdma/cm.c
+++ b/drivers/infiniband/hw/irdma/cm.c
@@ -2308,10 +2308,8 @@ err:
return NULL;
}
-static void irdma_cm_node_free_cb(struct rcu_head *rcu_head)
+static void irdma_destroy_connection(struct irdma_cm_node *cm_node)
{
- struct irdma_cm_node *cm_node =
- container_of(rcu_head, struct irdma_cm_node, rcu_head);
struct irdma_cm_core *cm_core = cm_node->cm_core;
struct irdma_qp *iwqp;
struct irdma_cm_info nfo;
@@ -2359,7 +2357,6 @@ static void irdma_cm_node_free_cb(struct rcu_head *rcu_head)
}
cm_core->cm_free_ah(cm_node);
- kfree(cm_node);
}
/**
@@ -2387,8 +2384,9 @@ void irdma_rem_ref_cm_node(struct irdma_cm_node *cm_node)
spin_unlock_irqrestore(&cm_core->ht_lock, flags);
- /* wait for all list walkers to exit their grace period */
- call_rcu(&cm_node->rcu_head, irdma_cm_node_free_cb);
+ irdma_destroy_connection(cm_node);
+
+ kfree_rcu(cm_node, rcu_head);
}
/**
@@ -3246,15 +3244,10 @@ int irdma_setup_cm_core(struct irdma_device *iwdev, u8 rdma_ver)
*/
void irdma_cleanup_cm_core(struct irdma_cm_core *cm_core)
{
- unsigned long flags;
-
if (!cm_core)
return;
- spin_lock_irqsave(&cm_core->ht_lock, flags);
- if (timer_pending(&cm_core->tcp_timer))
- del_timer_sync(&cm_core->tcp_timer);
- spin_unlock_irqrestore(&cm_core->ht_lock, flags);
+ del_timer_sync(&cm_core->tcp_timer);
destroy_workqueue(cm_core->event_wq);
cm_core->dev->ws_reset(&cm_core->iwdev->vsi);
@@ -3467,12 +3460,6 @@ static void irdma_cm_disconn_true(struct irdma_qp *iwqp)
}
cm_id = iwqp->cm_id;
- /* make sure we havent already closed this connection */
- if (!cm_id) {
- spin_unlock_irqrestore(&iwqp->lock, flags);
- return;
- }
-
original_hw_tcp_state = iwqp->hw_tcp_state;
original_ibqp_state = iwqp->ibqp_state;
last_ae = iwqp->last_aeq;
@@ -3494,11 +3481,11 @@ static void irdma_cm_disconn_true(struct irdma_qp *iwqp)
disconn_status = -ECONNRESET;
}
- if ((original_hw_tcp_state == IRDMA_TCP_STATE_CLOSED ||
- original_hw_tcp_state == IRDMA_TCP_STATE_TIME_WAIT ||
- last_ae == IRDMA_AE_RDMAP_ROE_BAD_LLP_CLOSE ||
- last_ae == IRDMA_AE_BAD_CLOSE ||
- last_ae == IRDMA_AE_LLP_CONNECTION_RESET || iwdev->rf->reset)) {
+ if (original_hw_tcp_state == IRDMA_TCP_STATE_CLOSED ||
+ original_hw_tcp_state == IRDMA_TCP_STATE_TIME_WAIT ||
+ last_ae == IRDMA_AE_RDMAP_ROE_BAD_LLP_CLOSE ||
+ last_ae == IRDMA_AE_BAD_CLOSE ||
+ last_ae == IRDMA_AE_LLP_CONNECTION_RESET || iwdev->rf->reset || !cm_id) {
issue_close = 1;
iwqp->cm_id = NULL;
qp->term_flags = 0;
diff --git a/drivers/infiniband/hw/irdma/utils.c b/drivers/infiniband/hw/irdma/utils.c
index 346c2c5dabdf..81760415d66c 100644
--- a/drivers/infiniband/hw/irdma/utils.c
+++ b/drivers/infiniband/hw/irdma/utils.c
@@ -258,18 +258,16 @@ int irdma_net_event(struct notifier_block *notifier, unsigned long event,
u32 local_ipaddr[4] = {};
bool ipv4 = true;
- real_dev = rdma_vlan_dev_real_dev(netdev);
- if (!real_dev)
- real_dev = netdev;
-
- ibdev = ib_device_get_by_netdev(real_dev, RDMA_DRIVER_IRDMA);
- if (!ibdev)
- return NOTIFY_DONE;
-
- iwdev = to_iwdev(ibdev);
-
switch (event) {
case NETEVENT_NEIGH_UPDATE:
+ real_dev = rdma_vlan_dev_real_dev(netdev);
+ if (!real_dev)
+ real_dev = netdev;
+ ibdev = ib_device_get_by_netdev(real_dev, RDMA_DRIVER_IRDMA);
+ if (!ibdev)
+ return NOTIFY_DONE;
+
+ iwdev = to_iwdev(ibdev);
p = (__be32 *)neigh->primary_key;
if (neigh->tbl->family == AF_INET6) {
ipv4 = false;
@@ -290,13 +288,12 @@ int irdma_net_event(struct notifier_block *notifier, unsigned long event,
irdma_manage_arp_cache(iwdev->rf, neigh->ha,
local_ipaddr, ipv4,
IRDMA_ARP_DELETE);
+ ib_device_put(ibdev);
break;
default:
break;
}
- ib_device_put(ibdev);
-
return NOTIFY_DONE;
}
diff --git a/drivers/infiniband/hw/irdma/verbs.c b/drivers/infiniband/hw/irdma/verbs.c
index 46f475394af5..52f3e88f8569 100644
--- a/drivers/infiniband/hw/irdma/verbs.c
+++ b/drivers/infiniband/hw/irdma/verbs.c
@@ -1618,13 +1618,13 @@ int irdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask,
if (issue_modify_qp && iwqp->ibqp_state > IB_QPS_RTS) {
if (dont_wait) {
- if (iwqp->cm_id && iwqp->hw_tcp_state) {
+ if (iwqp->hw_tcp_state) {
spin_lock_irqsave(&iwqp->lock, flags);
iwqp->hw_tcp_state = IRDMA_TCP_STATE_CLOSED;
iwqp->last_aeq = IRDMA_AE_RESET_SENT;
spin_unlock_irqrestore(&iwqp->lock, flags);
- irdma_cm_disconn(iwqp);
}
+ irdma_cm_disconn(iwqp);
} else {
int close_timer_started;
diff --git a/drivers/infiniband/sw/rxe/rxe_mcast.c b/drivers/infiniband/sw/rxe/rxe_mcast.c
index ae8f11cb704a..873a9b10307c 100644
--- a/drivers/infiniband/sw/rxe/rxe_mcast.c
+++ b/drivers/infiniband/sw/rxe/rxe_mcast.c
@@ -38,13 +38,13 @@ static int rxe_mcast_add(struct rxe_dev *rxe, union ib_gid *mgid)
}
/**
- * rxe_mcast_delete - delete multicast address from rxe device
+ * rxe_mcast_del - delete multicast address from rxe device
* @rxe: rxe device object
* @mgid: multicast address as a gid
*
* Returns 0 on success else an error
*/
-static int rxe_mcast_delete(struct rxe_dev *rxe, union ib_gid *mgid)
+static int rxe_mcast_del(struct rxe_dev *rxe, union ib_gid *mgid)
{
unsigned char ll_addr[ETH_ALEN];
@@ -143,11 +143,10 @@ static struct rxe_mcg *__rxe_lookup_mcg(struct rxe_dev *rxe,
struct rxe_mcg *rxe_lookup_mcg(struct rxe_dev *rxe, union ib_gid *mgid)
{
struct rxe_mcg *mcg;
- unsigned long flags;
- spin_lock_irqsave(&rxe->mcg_lock, flags);
+ spin_lock_bh(&rxe->mcg_lock);
mcg = __rxe_lookup_mcg(rxe, mgid);
- spin_unlock_irqrestore(&rxe->mcg_lock, flags);
+ spin_unlock_bh(&rxe->mcg_lock);
return mcg;
}
@@ -159,17 +158,10 @@ struct rxe_mcg *rxe_lookup_mcg(struct rxe_dev *rxe, union ib_gid *mgid)
* @mcg: new mcg object
*
* Context: caller should hold rxe->mcg lock
- * Returns: 0 on success else an error
*/
-static int __rxe_init_mcg(struct rxe_dev *rxe, union ib_gid *mgid,
- struct rxe_mcg *mcg)
+static void __rxe_init_mcg(struct rxe_dev *rxe, union ib_gid *mgid,
+ struct rxe_mcg *mcg)
{
- int err;
-
- err = rxe_mcast_add(rxe, mgid);
- if (unlikely(err))
- return err;
-
kref_init(&mcg->ref_cnt);
memcpy(&mcg->mgid, mgid, sizeof(mcg->mgid));
INIT_LIST_HEAD(&mcg->qp_list);
@@ -184,8 +176,6 @@ static int __rxe_init_mcg(struct rxe_dev *rxe, union ib_gid *mgid,
*/
kref_get(&mcg->ref_cnt);
__rxe_insert_mcg(mcg);
-
- return 0;
}
/**
@@ -198,7 +188,6 @@ static int __rxe_init_mcg(struct rxe_dev *rxe, union ib_gid *mgid,
static struct rxe_mcg *rxe_get_mcg(struct rxe_dev *rxe, union ib_gid *mgid)
{
struct rxe_mcg *mcg, *tmp;
- unsigned long flags;
int err;
if (rxe->attr.max_mcast_grp == 0)
@@ -209,36 +198,38 @@ static struct rxe_mcg *rxe_get_mcg(struct rxe_dev *rxe, union ib_gid *mgid)
if (mcg)
return mcg;
+ /* check to see if we have reached limit */
+ if (atomic_inc_return(&rxe->mcg_num) > rxe->attr.max_mcast_grp) {
+ err = -ENOMEM;
+ goto err_dec;
+ }
+
/* speculative alloc of new mcg */
mcg = kzalloc(sizeof(*mcg), GFP_KERNEL);
if (!mcg)
return ERR_PTR(-ENOMEM);
- spin_lock_irqsave(&rxe->mcg_lock, flags);
+ spin_lock_bh(&rxe->mcg_lock);
/* re-check to see if someone else just added it */
tmp = __rxe_lookup_mcg(rxe, mgid);
if (tmp) {
+ spin_unlock_bh(&rxe->mcg_lock);
+ atomic_dec(&rxe->mcg_num);
kfree(mcg);
- mcg = tmp;
- goto out;
+ return tmp;
}
- if (atomic_inc_return(&rxe->mcg_num) > rxe->attr.max_mcast_grp) {
- err = -ENOMEM;
- goto err_dec;
- }
+ __rxe_init_mcg(rxe, mgid, mcg);
+ spin_unlock_bh(&rxe->mcg_lock);
- err = __rxe_init_mcg(rxe, mgid, mcg);
- if (err)
- goto err_dec;
-out:
- spin_unlock_irqrestore(&rxe->mcg_lock, flags);
- return mcg;
+ /* add mcast address outside of lock */
+ err = rxe_mcast_add(rxe, mgid);
+ if (!err)
+ return mcg;
+ kfree(mcg);
err_dec:
atomic_dec(&rxe->mcg_num);
- spin_unlock_irqrestore(&rxe->mcg_lock, flags);
- kfree(mcg);
return ERR_PTR(err);
}
@@ -268,7 +259,6 @@ static void __rxe_destroy_mcg(struct rxe_mcg *mcg)
__rxe_remove_mcg(mcg);
kref_put(&mcg->ref_cnt, rxe_cleanup_mcg);
- rxe_mcast_delete(mcg->rxe, &mcg->mgid);
atomic_dec(&rxe->mcg_num);
}
@@ -280,11 +270,12 @@ static void __rxe_destroy_mcg(struct rxe_mcg *mcg)
*/
static void rxe_destroy_mcg(struct rxe_mcg *mcg)
{
- unsigned long flags;
+ /* delete mcast address outside of lock */
+ rxe_mcast_del(mcg->rxe, &mcg->mgid);
- spin_lock_irqsave(&mcg->rxe->mcg_lock, flags);
+ spin_lock_bh(&mcg->rxe->mcg_lock);
__rxe_destroy_mcg(mcg);
- spin_unlock_irqrestore(&mcg->rxe->mcg_lock, flags);
+ spin_unlock_bh(&mcg->rxe->mcg_lock);
}
/**
@@ -339,25 +330,24 @@ static int rxe_attach_mcg(struct rxe_mcg *mcg, struct rxe_qp *qp)
{
struct rxe_dev *rxe = mcg->rxe;
struct rxe_mca *mca, *tmp;
- unsigned long flags;
int err;
/* check to see if the qp is already a member of the group */
- spin_lock_irqsave(&rxe->mcg_lock, flags);
+ spin_lock_bh(&rxe->mcg_lock);
list_for_each_entry(mca, &mcg->qp_list, qp_list) {
if (mca->qp == qp) {
- spin_unlock_irqrestore(&rxe->mcg_lock, flags);
+ spin_unlock_bh(&rxe->mcg_lock);
return 0;
}
}
- spin_unlock_irqrestore(&rxe->mcg_lock, flags);
+ spin_unlock_bh(&rxe->mcg_lock);
/* speculative alloc new mca without using GFP_ATOMIC */
mca = kzalloc(sizeof(*mca), GFP_KERNEL);
if (!mca)
return -ENOMEM;
- spin_lock_irqsave(&rxe->mcg_lock, flags);
+ spin_lock_bh(&rxe->mcg_lock);
/* re-check to see if someone else just attached qp */
list_for_each_entry(tmp, &mcg->qp_list, qp_list) {
if (tmp->qp == qp) {
@@ -371,7 +361,7 @@ static int rxe_attach_mcg(struct rxe_mcg *mcg, struct rxe_qp *qp)
if (err)
kfree(mca);
out:
- spin_unlock_irqrestore(&rxe->mcg_lock, flags);
+ spin_unlock_bh(&rxe->mcg_lock);
return err;
}
@@ -405,9 +395,8 @@ static int rxe_detach_mcg(struct rxe_mcg *mcg, struct rxe_qp *qp)
{
struct rxe_dev *rxe = mcg->rxe;
struct rxe_mca *mca, *tmp;
- unsigned long flags;
- spin_lock_irqsave(&rxe->mcg_lock, flags);
+ spin_lock_bh(&rxe->mcg_lock);
list_for_each_entry_safe(mca, tmp, &mcg->qp_list, qp_list) {
if (mca->qp == qp) {
__rxe_cleanup_mca(mca, mcg);
@@ -421,13 +410,13 @@ static int rxe_detach_mcg(struct rxe_mcg *mcg, struct rxe_qp *qp)
if (atomic_read(&mcg->qp_num) <= 0)
__rxe_destroy_mcg(mcg);
- spin_unlock_irqrestore(&rxe->mcg_lock, flags);
+ spin_unlock_bh(&rxe->mcg_lock);
return 0;
}
}
/* we didn't find the qp on the list */
- spin_unlock_irqrestore(&rxe->mcg_lock, flags);
+ spin_unlock_bh(&rxe->mcg_lock);
return -EINVAL;
}
diff --git a/drivers/infiniband/sw/rxe/rxe_resp.c b/drivers/infiniband/sw/rxe/rxe_resp.c
index 16fc7ea1298d..9cd0eaff98de 100644
--- a/drivers/infiniband/sw/rxe/rxe_resp.c
+++ b/drivers/infiniband/sw/rxe/rxe_resp.c
@@ -680,6 +680,11 @@ static struct resp_res *rxe_prepare_read_res(struct rxe_qp *qp,
* It is assumed that the access permissions if originally good
* are OK and the mappings to be unchanged.
*
+ * TODO: If someone reregisters an MR to change its size or
+ * access permissions during the processing of an RDMA read
+ * we should kill the responder resource and complete the
+ * operation with an error.
+ *
* Return: mr on success else NULL
*/
static struct rxe_mr *rxe_recheck_mr(struct rxe_qp *qp, u32 rkey)
@@ -690,23 +695,27 @@ static struct rxe_mr *rxe_recheck_mr(struct rxe_qp *qp, u32 rkey)
if (rkey_is_mw(rkey)) {
mw = rxe_pool_get_index(&rxe->mw_pool, rkey >> 8);
- if (!mw || mw->rkey != rkey)
+ if (!mw)
return NULL;
- if (mw->state != RXE_MW_STATE_VALID) {
+ mr = mw->mr;
+ if (mw->rkey != rkey || mw->state != RXE_MW_STATE_VALID ||
+ !mr || mr->state != RXE_MR_STATE_VALID) {
rxe_put(mw);
return NULL;
}
- mr = mw->mr;
+ rxe_get(mr);
rxe_put(mw);
- } else {
- mr = rxe_pool_get_index(&rxe->mr_pool, rkey >> 8);
- if (!mr || mr->rkey != rkey)
- return NULL;
+
+ return mr;
}
- if (mr->state != RXE_MR_STATE_VALID) {
+ mr = rxe_pool_get_index(&rxe->mr_pool, rkey >> 8);
+ if (!mr)
+ return NULL;
+
+ if (mr->rkey != rkey || mr->state != RXE_MR_STATE_VALID) {
rxe_put(mr);
return NULL;
}
@@ -736,8 +745,14 @@ static enum resp_states read_reply(struct rxe_qp *qp,
}
if (res->state == rdatm_res_state_new) {
- mr = qp->resp.mr;
- qp->resp.mr = NULL;
+ if (!res->replay) {
+ mr = qp->resp.mr;
+ qp->resp.mr = NULL;
+ } else {
+ mr = rxe_recheck_mr(qp, res->read.rkey);
+ if (!mr)
+ return RESPST_ERR_RKEY_VIOLATION;
+ }
if (res->read.resid <= mtu)
opcode = IB_OPCODE_RC_RDMA_READ_RESPONSE_ONLY;
diff --git a/drivers/infiniband/sw/siw/siw_cm.c b/drivers/infiniband/sw/siw/siw_cm.c
index 7acdd3c3a599..17f34d584cd9 100644
--- a/drivers/infiniband/sw/siw/siw_cm.c
+++ b/drivers/infiniband/sw/siw/siw_cm.c
@@ -968,14 +968,15 @@ static void siw_accept_newconn(struct siw_cep *cep)
siw_cep_set_inuse(new_cep);
rv = siw_proc_mpareq(new_cep);
- siw_cep_set_free(new_cep);
-
if (rv != -EAGAIN) {
siw_cep_put(cep);
new_cep->listen_cep = NULL;
- if (rv)
+ if (rv) {
+ siw_cep_set_free(new_cep);
goto error;
+ }
}
+ siw_cep_set_free(new_cep);
}
return;
diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
index e7ea45386c22..efa95dc4fc4e 100644
--- a/drivers/mmc/core/mmc.c
+++ b/drivers/mmc/core/mmc.c
@@ -1384,13 +1384,17 @@ static int mmc_select_hs400es(struct mmc_card *card)
goto out_err;
}
+ /*
+ * Bump to HS timing and frequency. Some cards don't handle
+ * SEND_STATUS reliably at the initial frequency.
+ */
mmc_set_timing(host, MMC_TIMING_MMC_HS);
+ mmc_set_bus_speed(card);
+
err = mmc_switch_status(card, true);
if (err)
goto out_err;
- mmc_set_clock(host, card->ext_csd.hs_max_dtr);
-
/* Switch card to DDR with strobe bit */
val = EXT_CSD_DDR_BUS_WIDTH_8 | EXT_CSD_BUS_WIDTH_STROBE;
err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
@@ -1448,7 +1452,7 @@ out_err:
static int mmc_select_hs200(struct mmc_card *card)
{
struct mmc_host *host = card->host;
- unsigned int old_timing, old_signal_voltage;
+ unsigned int old_timing, old_signal_voltage, old_clock;
int err = -EINVAL;
u8 val;
@@ -1479,8 +1483,17 @@ static int mmc_select_hs200(struct mmc_card *card)
false, true, MMC_CMD_RETRIES);
if (err)
goto err;
+
+ /*
+ * Bump to HS timing and frequency. Some cards don't handle
+ * SEND_STATUS reliably at the initial frequency.
+ * NB: We can't move to full (HS200) speeds until after we've
+ * successfully switched over.
+ */
old_timing = host->ios.timing;
+ old_clock = host->ios.clock;
mmc_set_timing(host, MMC_TIMING_MMC_HS200);
+ mmc_set_clock(card->host, card->ext_csd.hs_max_dtr);
/*
* For HS200, CRC errors are not a reliable way to know the
@@ -1493,8 +1506,10 @@ static int mmc_select_hs200(struct mmc_card *card)
* mmc_select_timing() assumes timing has not changed if
* it is a switch error.
*/
- if (err == -EBADMSG)
+ if (err == -EBADMSG) {
+ mmc_set_clock(host, old_clock);
mmc_set_timing(host, old_timing);
+ }
}
err:
if (err) {
diff --git a/drivers/mmc/host/sdhci-msm.c b/drivers/mmc/host/sdhci-msm.c
index 50c71e0ba5e4..ff9f5b63c337 100644
--- a/drivers/mmc/host/sdhci-msm.c
+++ b/drivers/mmc/host/sdhci-msm.c
@@ -17,6 +17,7 @@
#include <linux/regulator/consumer.h>
#include <linux/interconnect.h>
#include <linux/pinctrl/consumer.h>
+#include <linux/reset.h>
#include "sdhci-pltfm.h"
#include "cqhci.h"
@@ -2482,6 +2483,43 @@ static inline void sdhci_msm_get_of_property(struct platform_device *pdev,
of_property_read_u32(node, "qcom,dll-config", &msm_host->dll_config);
}
+static int sdhci_msm_gcc_reset(struct device *dev, struct sdhci_host *host)
+{
+ struct reset_control *reset;
+ int ret = 0;
+
+ reset = reset_control_get_optional_exclusive(dev, NULL);
+ if (IS_ERR(reset))
+ return dev_err_probe(dev, PTR_ERR(reset),
+ "unable to acquire core_reset\n");
+
+ if (!reset)
+ return ret;
+
+ ret = reset_control_assert(reset);
+ if (ret) {
+ reset_control_put(reset);
+ return dev_err_probe(dev, ret, "core_reset assert failed\n");
+ }
+
+ /*
+ * The hardware requirement for delay between assert/deassert
+ * is at least 3-4 sleep clock (32.7KHz) cycles, which comes to
+ * ~125us (4/32768). To be on the safe side add 200us delay.
+ */
+ usleep_range(200, 210);
+
+ ret = reset_control_deassert(reset);
+ if (ret) {
+ reset_control_put(reset);
+ return dev_err_probe(dev, ret, "core_reset deassert failed\n");
+ }
+
+ usleep_range(200, 210);
+ reset_control_put(reset);
+
+ return ret;
+}
static int sdhci_msm_probe(struct platform_device *pdev)
{
@@ -2529,6 +2567,10 @@ static int sdhci_msm_probe(struct platform_device *pdev)
msm_host->saved_tuning_phase = INVALID_TUNING_PHASE;
+ ret = sdhci_msm_gcc_reset(&pdev->dev, host);
+ if (ret)
+ goto pltfm_free;
+
/* Setup SDCC bus voter clock. */
msm_host->bus_clk = devm_clk_get(&pdev->dev, "bus");
if (!IS_ERR(msm_host->bus_clk)) {
diff --git a/drivers/mmc/host/sunxi-mmc.c b/drivers/mmc/host/sunxi-mmc.c
index c62afd212692..46f9e2923d86 100644
--- a/drivers/mmc/host/sunxi-mmc.c
+++ b/drivers/mmc/host/sunxi-mmc.c
@@ -377,8 +377,9 @@ static void sunxi_mmc_init_idma_des(struct sunxi_mmc_host *host,
pdes[i].buf_addr_ptr1 =
cpu_to_le32(sg_dma_address(&data->sg[i]) >>
host->cfg->idma_des_shift);
- pdes[i].buf_addr_ptr2 = cpu_to_le32((u32)next_desc >>
- host->cfg->idma_des_shift);
+ pdes[i].buf_addr_ptr2 =
+ cpu_to_le32(next_desc >>
+ host->cfg->idma_des_shift);
}
pdes[0].config |= cpu_to_le32(SDXC_IDMAC_DES0_FD);
diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c
index cf82b1fa9725..87e81c636339 100644
--- a/drivers/net/dsa/bcm_sf2.c
+++ b/drivers/net/dsa/bcm_sf2.c
@@ -809,6 +809,9 @@ static void bcm_sf2_sw_mac_link_down(struct dsa_switch *ds, int port,
struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
u32 reg, offset;
+ if (priv->wol_ports_mask & BIT(port))
+ return;
+
if (port != core_readl(priv, CORE_IMP0_PRT_ID)) {
if (priv->type == BCM4908_DEVICE_ID ||
priv->type == BCM7445_DEVICE_ID)
diff --git a/drivers/net/dsa/ocelot/felix.c b/drivers/net/dsa/ocelot/felix.c
index f5c9d695408a..a23781d9a15c 100644
--- a/drivers/net/dsa/ocelot/felix.c
+++ b/drivers/net/dsa/ocelot/felix.c
@@ -322,6 +322,7 @@ static int felix_update_trapping_destinations(struct dsa_switch *ds,
{
struct ocelot *ocelot = ds->priv;
struct felix *felix = ocelot_to_felix(ocelot);
+ struct ocelot_vcap_block *block_vcap_is2;
struct ocelot_vcap_filter *trap;
enum ocelot_mask_mode mask_mode;
unsigned long port_mask;
@@ -341,9 +342,13 @@ static int felix_update_trapping_destinations(struct dsa_switch *ds,
/* We are sure that "cpu" was found, otherwise
* dsa_tree_setup_default_cpu() would have failed earlier.
*/
+ block_vcap_is2 = &ocelot->block[VCAP_IS2];
/* Make sure all traps are set up for that destination */
- list_for_each_entry(trap, &ocelot->traps, trap_list) {
+ list_for_each_entry(trap, &block_vcap_is2->rules, list) {
+ if (!trap->is_trap)
+ continue;
+
/* Figure out the current trapping destination */
if (using_tag_8021q) {
/* Redirect to the tag_8021q CPU port. If timestamps
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
index 3a529ee8c834..831833911a52 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
@@ -449,7 +449,7 @@ static int aq_pm_freeze(struct device *dev)
static int aq_pm_suspend_poweroff(struct device *dev)
{
- return aq_suspend_common(dev, false);
+ return aq_suspend_common(dev, true);
}
static int aq_pm_thaw(struct device *dev)
@@ -459,7 +459,7 @@ static int aq_pm_thaw(struct device *dev)
static int aq_pm_resume_restore(struct device *dev)
{
- return atl_resume_common(dev, false);
+ return atl_resume_common(dev, true);
}
static const struct dev_pm_ops aq_pm_ops = {
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
index ea740210803f..25129e723b57 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
@@ -544,7 +544,6 @@ static int __aq_ring_rx_clean(struct aq_ring_s *self, struct napi_struct *napi,
int *work_done, int budget)
{
struct net_device *ndev = aq_nic_get_ndev(self->aq_nic);
- bool is_rsc_completed = true;
int err = 0;
for (; (self->sw_head != self->hw_head) && budget;
@@ -562,12 +561,17 @@ static int __aq_ring_rx_clean(struct aq_ring_s *self, struct napi_struct *napi,
continue;
if (!buff->is_eop) {
+ unsigned int frag_cnt = 0U;
buff_ = buff;
do {
+ bool is_rsc_completed = true;
+
if (buff_->next >= self->size) {
err = -EIO;
goto err_exit;
}
+
+ frag_cnt++;
next_ = buff_->next,
buff_ = &self->buff_ring[next_];
is_rsc_completed =
@@ -575,18 +579,17 @@ static int __aq_ring_rx_clean(struct aq_ring_s *self, struct napi_struct *napi,
next_,
self->hw_head);
- if (unlikely(!is_rsc_completed))
- break;
+ if (unlikely(!is_rsc_completed) ||
+ frag_cnt > MAX_SKB_FRAGS) {
+ err = 0;
+ goto err_exit;
+ }
buff->is_error |= buff_->is_error;
buff->is_cso_err |= buff_->is_cso_err;
} while (!buff_->is_eop);
- if (!is_rsc_completed) {
- err = 0;
- goto err_exit;
- }
if (buff->is_error ||
(buff->is_lro && buff->is_cso_err)) {
buff_ = buff;
@@ -644,7 +647,7 @@ static int __aq_ring_rx_clean(struct aq_ring_s *self, struct napi_struct *napi,
ALIGN(hdr_len, sizeof(long)));
if (buff->len - hdr_len > 0) {
- skb_add_rx_frag(skb, 0, buff->rxdata.page,
+ skb_add_rx_frag(skb, i++, buff->rxdata.page,
buff->rxdata.pg_off + hdr_len,
buff->len - hdr_len,
self->frame_max);
@@ -653,7 +656,6 @@ static int __aq_ring_rx_clean(struct aq_ring_s *self, struct napi_struct *napi,
if (!buff->is_eop) {
buff_ = buff;
- i = 1U;
do {
next_ = buff_->next;
buff_ = &self->buff_ring[next_];
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
index 878a53abec33..54e70f07b573 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
@@ -889,6 +889,13 @@ int hw_atl_b0_hw_ring_tx_head_update(struct aq_hw_s *self,
err = -ENXIO;
goto err_exit;
}
+
+ /* Validate that the new hw_head_ is reasonable. */
+ if (hw_head_ >= ring->size) {
+ err = -ENXIO;
+ goto err_exit;
+ }
+
ring->hw_head = hw_head_;
err = aq_hw_err_from_flags(self);
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
index 65606351634e..8309fb993cdb 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
@@ -3998,6 +3998,10 @@ static int bcmgenet_probe(struct platform_device *pdev)
goto err;
}
priv->wol_irq = platform_get_irq_optional(pdev, 2);
+ if (priv->wol_irq == -EPROBE_DEFER) {
+ err = priv->wol_irq;
+ goto err;
+ }
priv->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(priv->base)) {
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
index e7b4e3ed056c..8d719f82854a 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
@@ -2793,14 +2793,14 @@ int t4_get_raw_vpd_params(struct adapter *adapter, struct vpd_params *p)
goto out;
na = ret;
- memcpy(p->id, vpd + id, min_t(int, id_len, ID_LEN));
+ memcpy(p->id, vpd + id, min_t(unsigned int, id_len, ID_LEN));
strim(p->id);
- memcpy(p->sn, vpd + sn, min_t(int, sn_len, SERNUM_LEN));
+ memcpy(p->sn, vpd + sn, min_t(unsigned int, sn_len, SERNUM_LEN));
strim(p->sn);
- memcpy(p->pn, vpd + pn, min_t(int, pn_len, PN_LEN));
+ memcpy(p->pn, vpd + pn, min_t(unsigned int, pn_len, PN_LEN));
strim(p->pn);
- memcpy(p->na, vpd + na, min_t(int, na_len, MACADDR_LEN));
- strim((char *)p->na);
+ memcpy(p->na, vpd + na, min_t(unsigned int, na_len, MACADDR_LEN));
+ strim(p->na);
out:
vfree(vpd);
diff --git a/drivers/net/ethernet/dec/tulip/tulip_core.c b/drivers/net/ethernet/dec/tulip/tulip_core.c
index 434d8bf0e8f9..825e81f5fd22 100644
--- a/drivers/net/ethernet/dec/tulip/tulip_core.c
+++ b/drivers/net/ethernet/dec/tulip/tulip_core.c
@@ -1399,8 +1399,10 @@ static int tulip_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
/* alloc_etherdev ensures aligned and zeroed private structures */
dev = alloc_etherdev (sizeof (*tp));
- if (!dev)
+ if (!dev) {
+ pci_disable_device(pdev);
return -ENOMEM;
+ }
SET_NETDEV_DEV(dev, &pdev->dev);
if (pci_resource_len (pdev, 0) < tulip_tbl[chip_idx].io_size) {
@@ -1785,6 +1787,7 @@ err_out_free_res:
err_out_free_netdev:
free_netdev (dev);
+ pci_disable_device(pdev);
return -ENODEV;
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index 358c2edc118d..332a608dbaa6 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -7554,42 +7554,43 @@ static void i40e_free_macvlan_channels(struct i40e_vsi *vsi)
static int i40e_fwd_ring_up(struct i40e_vsi *vsi, struct net_device *vdev,
struct i40e_fwd_adapter *fwd)
{
+ struct i40e_channel *ch = NULL, *ch_tmp, *iter;
int ret = 0, num_tc = 1, i, aq_err;
- struct i40e_channel *ch, *ch_tmp;
struct i40e_pf *pf = vsi->back;
struct i40e_hw *hw = &pf->hw;
- if (list_empty(&vsi->macvlan_list))
- return -EINVAL;
-
/* Go through the list and find an available channel */
- list_for_each_entry_safe(ch, ch_tmp, &vsi->macvlan_list, list) {
- if (!i40e_is_channel_macvlan(ch)) {
- ch->fwd = fwd;
+ list_for_each_entry_safe(iter, ch_tmp, &vsi->macvlan_list, list) {
+ if (!i40e_is_channel_macvlan(iter)) {
+ iter->fwd = fwd;
/* record configuration for macvlan interface in vdev */
for (i = 0; i < num_tc; i++)
netdev_bind_sb_channel_queue(vsi->netdev, vdev,
i,
- ch->num_queue_pairs,
- ch->base_queue);
- for (i = 0; i < ch->num_queue_pairs; i++) {
+ iter->num_queue_pairs,
+ iter->base_queue);
+ for (i = 0; i < iter->num_queue_pairs; i++) {
struct i40e_ring *tx_ring, *rx_ring;
u16 pf_q;
- pf_q = ch->base_queue + i;
+ pf_q = iter->base_queue + i;
/* Get to TX ring ptr */
tx_ring = vsi->tx_rings[pf_q];
- tx_ring->ch = ch;
+ tx_ring->ch = iter;
/* Get the RX ring ptr */
rx_ring = vsi->rx_rings[pf_q];
- rx_ring->ch = ch;
+ rx_ring->ch = iter;
}
+ ch = iter;
break;
}
}
+ if (!ch)
+ return -EINVAL;
+
/* Guarantee all rings are updated before we update the
* MAC address filter.
*/
diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h
index 2fbb3d737dfd..60453b3b8d23 100644
--- a/drivers/net/ethernet/intel/ice/ice.h
+++ b/drivers/net/ethernet/intel/ice/ice.h
@@ -540,6 +540,7 @@ struct ice_pf {
struct mutex avail_q_mutex; /* protects access to avail_[rx|tx]qs */
struct mutex sw_mutex; /* lock for protecting VSI alloc flow */
struct mutex tc_mutex; /* lock to protect TC changes */
+ struct mutex adev_mutex; /* lock to protect aux device access */
u32 msg_enable;
struct ice_ptp ptp;
struct tty_driver *ice_gnss_tty_driver;
diff --git a/drivers/net/ethernet/intel/ice/ice_idc.c b/drivers/net/ethernet/intel/ice/ice_idc.c
index 7e941264ae21..895c32bcc8b5 100644
--- a/drivers/net/ethernet/intel/ice/ice_idc.c
+++ b/drivers/net/ethernet/intel/ice/ice_idc.c
@@ -37,14 +37,17 @@ void ice_send_event_to_aux(struct ice_pf *pf, struct iidc_event *event)
if (WARN_ON_ONCE(!in_task()))
return;
+ mutex_lock(&pf->adev_mutex);
if (!pf->adev)
- return;
+ goto finish;
device_lock(&pf->adev->dev);
iadrv = ice_get_auxiliary_drv(pf);
if (iadrv && iadrv->event_handler)
iadrv->event_handler(pf, event);
device_unlock(&pf->adev->dev);
+finish:
+ mutex_unlock(&pf->adev_mutex);
}
/**
@@ -275,7 +278,6 @@ int ice_plug_aux_dev(struct ice_pf *pf)
return -ENOMEM;
adev = &iadev->adev;
- pf->adev = adev;
iadev->pf = pf;
adev->id = pf->aux_idx;
@@ -285,18 +287,20 @@ int ice_plug_aux_dev(struct ice_pf *pf)
ret = auxiliary_device_init(adev);
if (ret) {
- pf->adev = NULL;
kfree(iadev);
return ret;
}
ret = auxiliary_device_add(adev);
if (ret) {
- pf->adev = NULL;
auxiliary_device_uninit(adev);
return ret;
}
+ mutex_lock(&pf->adev_mutex);
+ pf->adev = adev;
+ mutex_unlock(&pf->adev_mutex);
+
return 0;
}
@@ -305,12 +309,17 @@ int ice_plug_aux_dev(struct ice_pf *pf)
*/
void ice_unplug_aux_dev(struct ice_pf *pf)
{
- if (!pf->adev)
- return;
+ struct auxiliary_device *adev;
- auxiliary_device_delete(pf->adev);
- auxiliary_device_uninit(pf->adev);
+ mutex_lock(&pf->adev_mutex);
+ adev = pf->adev;
pf->adev = NULL;
+ mutex_unlock(&pf->adev_mutex);
+
+ if (adev) {
+ auxiliary_device_delete(adev);
+ auxiliary_device_uninit(adev);
+ }
}
/**
diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
index 867908f94661..4a5d4d971161 100644
--- a/drivers/net/ethernet/intel/ice/ice_main.c
+++ b/drivers/net/ethernet/intel/ice/ice_main.c
@@ -3785,6 +3785,7 @@ u16 ice_get_avail_rxq_count(struct ice_pf *pf)
static void ice_deinit_pf(struct ice_pf *pf)
{
ice_service_task_stop(pf);
+ mutex_destroy(&pf->adev_mutex);
mutex_destroy(&pf->sw_mutex);
mutex_destroy(&pf->tc_mutex);
mutex_destroy(&pf->avail_q_mutex);
@@ -3863,6 +3864,7 @@ static int ice_init_pf(struct ice_pf *pf)
mutex_init(&pf->sw_mutex);
mutex_init(&pf->tc_mutex);
+ mutex_init(&pf->adev_mutex);
INIT_HLIST_HEAD(&pf->aq_wait_list);
spin_lock_init(&pf->aq_wait_lock);
diff --git a/drivers/net/ethernet/intel/ice/ice_ptp.c b/drivers/net/ethernet/intel/ice/ice_ptp.c
index a1cd33273ca4..da025c204577 100644
--- a/drivers/net/ethernet/intel/ice/ice_ptp.c
+++ b/drivers/net/ethernet/intel/ice/ice_ptp.c
@@ -2287,6 +2287,7 @@ ice_ptp_init_tx_e810(struct ice_pf *pf, struct ice_ptp_tx *tx)
/**
* ice_ptp_tx_tstamp_cleanup - Cleanup old timestamp requests that got dropped
+ * @hw: pointer to the hw struct
* @tx: PTP Tx tracker to clean up
*
* Loop through the Tx timestamp requests and see if any of them have been
@@ -2295,7 +2296,7 @@ ice_ptp_init_tx_e810(struct ice_pf *pf, struct ice_ptp_tx *tx)
* timestamp will never be captured. This might happen if the packet gets
* discarded before it reaches the PHY timestamping block.
*/
-static void ice_ptp_tx_tstamp_cleanup(struct ice_ptp_tx *tx)
+static void ice_ptp_tx_tstamp_cleanup(struct ice_hw *hw, struct ice_ptp_tx *tx)
{
u8 idx;
@@ -2304,11 +2305,16 @@ static void ice_ptp_tx_tstamp_cleanup(struct ice_ptp_tx *tx)
for_each_set_bit(idx, tx->in_use, tx->len) {
struct sk_buff *skb;
+ u64 raw_tstamp;
/* Check if this SKB has been waiting for too long */
if (time_is_after_jiffies(tx->tstamps[idx].start + 2 * HZ))
continue;
+ /* Read tstamp to be able to use this register again */
+ ice_read_phy_tstamp(hw, tx->quad, idx + tx->quad_offset,
+ &raw_tstamp);
+
spin_lock(&tx->lock);
skb = tx->tstamps[idx].skb;
tx->tstamps[idx].skb = NULL;
@@ -2330,7 +2336,7 @@ static void ice_ptp_periodic_work(struct kthread_work *work)
ice_ptp_update_cached_phctime(pf);
- ice_ptp_tx_tstamp_cleanup(&pf->ptp.port.tx);
+ ice_ptp_tx_tstamp_cleanup(&pf->hw, &pf->ptp.port.tx);
/* Run twice a second */
kthread_queue_delayed_work(ptp->kworker, &ptp->work,
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl.c b/drivers/net/ethernet/intel/ice/ice_virtchnl.c
index b47577a2841a..1d9b84c3937a 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl.c
+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl.c
@@ -1290,12 +1290,51 @@ error_param:
}
/**
+ * ice_vf_vsi_dis_single_txq - disable a single Tx queue
+ * @vf: VF to disable queue for
+ * @vsi: VSI for the VF
+ * @q_id: VF relative (0-based) queue ID
+ *
+ * Attempt to disable the Tx queue passed in. If the Tx queue was successfully
+ * disabled then clear q_id bit in the enabled queues bitmap and return
+ * success. Otherwise return error.
+ */
+static int
+ice_vf_vsi_dis_single_txq(struct ice_vf *vf, struct ice_vsi *vsi, u16 q_id)
+{
+ struct ice_txq_meta txq_meta = { 0 };
+ struct ice_tx_ring *ring;
+ int err;
+
+ if (!test_bit(q_id, vf->txq_ena))
+ dev_dbg(ice_pf_to_dev(vsi->back), "Queue %u on VSI %u is not enabled, but stopping it anyway\n",
+ q_id, vsi->vsi_num);
+
+ ring = vsi->tx_rings[q_id];
+ if (!ring)
+ return -EINVAL;
+
+ ice_fill_txq_meta(vsi, ring, &txq_meta);
+
+ err = ice_vsi_stop_tx_ring(vsi, ICE_NO_RESET, vf->vf_id, ring, &txq_meta);
+ if (err) {
+ dev_err(ice_pf_to_dev(vsi->back), "Failed to stop Tx ring %d on VSI %d\n",
+ q_id, vsi->vsi_num);
+ return err;
+ }
+
+ /* Clear enabled queues flag */
+ clear_bit(q_id, vf->txq_ena);
+
+ return 0;
+}
+
+/**
* ice_vc_dis_qs_msg
* @vf: pointer to the VF info
* @msg: pointer to the msg buffer
*
- * called from the VF to disable all or specific
- * queue(s)
+ * called from the VF to disable all or specific queue(s)
*/
static int ice_vc_dis_qs_msg(struct ice_vf *vf, u8 *msg)
{
@@ -1332,30 +1371,15 @@ static int ice_vc_dis_qs_msg(struct ice_vf *vf, u8 *msg)
q_map = vqs->tx_queues;
for_each_set_bit(vf_q_id, &q_map, ICE_MAX_RSS_QS_PER_VF) {
- struct ice_tx_ring *ring = vsi->tx_rings[vf_q_id];
- struct ice_txq_meta txq_meta = { 0 };
-
if (!ice_vc_isvalid_q_id(vf, vqs->vsi_id, vf_q_id)) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
- if (!test_bit(vf_q_id, vf->txq_ena))
- dev_dbg(ice_pf_to_dev(vsi->back), "Queue %u on VSI %u is not enabled, but stopping it anyway\n",
- vf_q_id, vsi->vsi_num);
-
- ice_fill_txq_meta(vsi, ring, &txq_meta);
-
- if (ice_vsi_stop_tx_ring(vsi, ICE_NO_RESET, vf->vf_id,
- ring, &txq_meta)) {
- dev_err(ice_pf_to_dev(vsi->back), "Failed to stop Tx ring %d on VSI %d\n",
- vf_q_id, vsi->vsi_num);
+ if (ice_vf_vsi_dis_single_txq(vf, vsi, vf_q_id)) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
-
- /* Clear enabled queues flag */
- clear_bit(vf_q_id, vf->txq_ena);
}
}
@@ -1604,6 +1628,14 @@ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg)
if (qpi->txq.ring_len > 0) {
vsi->tx_rings[i]->dma = qpi->txq.dma_ring_addr;
vsi->tx_rings[i]->count = qpi->txq.ring_len;
+
+ /* Disable any existing queue first */
+ if (ice_vf_vsi_dis_single_txq(vf, vsi, q_idx)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ /* Configure a queue with the requested settings */
if (ice_vsi_cfg_single_txq(vsi, vsi->tx_rings, q_idx)) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
diff --git a/drivers/net/ethernet/mediatek/mtk_ppe.c b/drivers/net/ethernet/mediatek/mtk_ppe.c
index 683f89f8e3b2..dab8f3f771f8 100644
--- a/drivers/net/ethernet/mediatek/mtk_ppe.c
+++ b/drivers/net/ethernet/mediatek/mtk_ppe.c
@@ -715,7 +715,7 @@ static void mtk_ppe_init_foe_table(struct mtk_ppe *ppe)
static const u8 skip[] = { 12, 25, 38, 51, 76, 89, 102 };
int i, k;
- memset(ppe->foe_table, 0, MTK_PPE_ENTRIES * sizeof(ppe->foe_table));
+ memset(ppe->foe_table, 0, MTK_PPE_ENTRIES * sizeof(*ppe->foe_table));
if (!IS_ENABLED(CONFIG_SOC_MT7621))
return;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c
index 01cf5a6a26bd..a2ee695a3f17 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c
@@ -568,10 +568,8 @@ static int
mlxsw_sp2_ipip_rem_addr_set_gre6(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_ipip_entry *ipip_entry)
{
- struct __ip6_tnl_parm parms6;
-
- parms6 = mlxsw_sp_ipip_netdev_parms6(ipip_entry->ol_dev);
- return mlxsw_sp_ipv6_addr_kvdl_index_get(mlxsw_sp, &parms6.raddr,
+ return mlxsw_sp_ipv6_addr_kvdl_index_get(mlxsw_sp,
+ &ipip_entry->parms.daddr.addr6,
&ipip_entry->dip_kvdl_index);
}
@@ -579,10 +577,7 @@ static void
mlxsw_sp2_ipip_rem_addr_unset_gre6(struct mlxsw_sp *mlxsw_sp,
const struct mlxsw_sp_ipip_entry *ipip_entry)
{
- struct __ip6_tnl_parm parms6;
-
- parms6 = mlxsw_sp_ipip_netdev_parms6(ipip_entry->ol_dev);
- mlxsw_sp_ipv6_addr_put(mlxsw_sp, &parms6.raddr);
+ mlxsw_sp_ipv6_addr_put(mlxsw_sp, &ipip_entry->parms.daddr.addr6);
}
static const struct mlxsw_sp_ipip_ops mlxsw_sp2_ipip_gre6_ops = {
diff --git a/drivers/net/ethernet/mscc/ocelot.c b/drivers/net/ethernet/mscc/ocelot.c
index 9336f3b00c6e..5f81938c58a9 100644
--- a/drivers/net/ethernet/mscc/ocelot.c
+++ b/drivers/net/ethernet/mscc/ocelot.c
@@ -1622,7 +1622,7 @@ int ocelot_trap_add(struct ocelot *ocelot, int port,
trap->action.mask_mode = OCELOT_MASK_MODE_PERMIT_DENY;
trap->action.port_mask = 0;
trap->take_ts = take_ts;
- list_add_tail(&trap->trap_list, &ocelot->traps);
+ trap->is_trap = true;
new = true;
}
@@ -1634,10 +1634,8 @@ int ocelot_trap_add(struct ocelot *ocelot, int port,
err = ocelot_vcap_filter_replace(ocelot, trap);
if (err) {
trap->ingress_port_mask &= ~BIT(port);
- if (!trap->ingress_port_mask) {
- list_del(&trap->trap_list);
+ if (!trap->ingress_port_mask)
kfree(trap);
- }
return err;
}
@@ -1657,11 +1655,8 @@ int ocelot_trap_del(struct ocelot *ocelot, int port, unsigned long cookie)
return 0;
trap->ingress_port_mask &= ~BIT(port);
- if (!trap->ingress_port_mask) {
- list_del(&trap->trap_list);
-
+ if (!trap->ingress_port_mask)
return ocelot_vcap_filter_del(ocelot, trap);
- }
return ocelot_vcap_filter_replace(ocelot, trap);
}
diff --git a/drivers/net/ethernet/mscc/ocelot_flower.c b/drivers/net/ethernet/mscc/ocelot_flower.c
index 03b5e59d033e..51cf241ff7d0 100644
--- a/drivers/net/ethernet/mscc/ocelot_flower.c
+++ b/drivers/net/ethernet/mscc/ocelot_flower.c
@@ -280,9 +280,10 @@ static int ocelot_flower_parse_action(struct ocelot *ocelot, int port,
filter->type = OCELOT_VCAP_FILTER_OFFLOAD;
break;
case FLOW_ACTION_TRAP:
- if (filter->block_id != VCAP_IS2) {
+ if (filter->block_id != VCAP_IS2 ||
+ filter->lookup != 0) {
NL_SET_ERR_MSG_MOD(extack,
- "Trap action can only be offloaded to VCAP IS2");
+ "Trap action can only be offloaded to VCAP IS2 lookup 0");
return -EOPNOTSUPP;
}
if (filter->goto_target != -1) {
@@ -295,7 +296,7 @@ static int ocelot_flower_parse_action(struct ocelot *ocelot, int port,
filter->action.cpu_copy_ena = true;
filter->action.cpu_qu_num = 0;
filter->type = OCELOT_VCAP_FILTER_OFFLOAD;
- list_add_tail(&filter->trap_list, &ocelot->traps);
+ filter->is_trap = true;
break;
case FLOW_ACTION_POLICE:
if (filter->block_id == PSFP_BLOCK_ID) {
@@ -878,8 +879,6 @@ int ocelot_cls_flower_replace(struct ocelot *ocelot, int port,
ret = ocelot_flower_parse(ocelot, port, ingress, f, filter);
if (ret) {
- if (!list_empty(&filter->trap_list))
- list_del(&filter->trap_list);
kfree(filter);
return ret;
}
diff --git a/drivers/net/ethernet/mscc/ocelot_vcap.c b/drivers/net/ethernet/mscc/ocelot_vcap.c
index cdbe29f2ddc7..73cdec5ca6a3 100644
--- a/drivers/net/ethernet/mscc/ocelot_vcap.c
+++ b/drivers/net/ethernet/mscc/ocelot_vcap.c
@@ -374,7 +374,6 @@ static void is2_entry_set(struct ocelot *ocelot, int ix,
OCELOT_VCAP_BIT_0);
vcap_key_set(vcap, &data, VCAP_IS2_HK_IGR_PORT_MASK, 0,
~filter->ingress_port_mask);
- vcap_key_bit_set(vcap, &data, VCAP_IS2_HK_FIRST, OCELOT_VCAP_BIT_ANY);
vcap_key_bit_set(vcap, &data, VCAP_IS2_HK_HOST_MATCH,
OCELOT_VCAP_BIT_ANY);
vcap_key_bit_set(vcap, &data, VCAP_IS2_HK_L2_MC, filter->dmac_mc);
@@ -1209,6 +1208,8 @@ int ocelot_vcap_filter_add(struct ocelot *ocelot,
struct ocelot_vcap_filter *tmp;
tmp = ocelot_vcap_block_find_filter_by_index(block, i);
+ /* Read back the filter's counters before moving it */
+ vcap_entry_get(ocelot, i - 1, tmp);
vcap_entry_set(ocelot, i, tmp);
}
@@ -1242,7 +1243,11 @@ int ocelot_vcap_filter_del(struct ocelot *ocelot,
struct ocelot_vcap_filter del_filter;
int i, index;
+ /* Need to inherit the block_id so that vcap_entry_set()
+ * does not get confused and knows where to install it.
+ */
memset(&del_filter, 0, sizeof(del_filter));
+ del_filter.block_id = filter->block_id;
/* Gets index of the filter */
index = ocelot_vcap_block_get_filter_index(block, filter);
@@ -1257,6 +1262,8 @@ int ocelot_vcap_filter_del(struct ocelot *ocelot,
struct ocelot_vcap_filter *tmp;
tmp = ocelot_vcap_block_find_filter_by_index(block, i);
+ /* Read back the filter's counters before moving it */
+ vcap_entry_get(ocelot, i + 1, tmp);
vcap_entry_set(ocelot, i, tmp);
}
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c b/drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c
index 6ffc62c41165..0a7a757494bc 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c
@@ -256,7 +256,7 @@ static int ionic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
err = ionic_map_bars(ionic);
if (err)
- goto err_out_pci_disable_device;
+ goto err_out_pci_release_regions;
/* Configure the device */
err = ionic_setup(ionic);
@@ -360,6 +360,7 @@ err_out_teardown:
err_out_unmap_bars:
ionic_unmap_bars(ionic);
+err_out_pci_release_regions:
pci_release_regions(pdev);
err_out_pci_disable_device:
pci_disable_device(pdev);
diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c
index c9ee5011803f..186cb28c03bd 100644
--- a/drivers/net/ethernet/sfc/ef10.c
+++ b/drivers/net/ethernet/sfc/ef10.c
@@ -3579,6 +3579,11 @@ static int efx_ef10_mtd_probe(struct efx_nic *efx)
n_parts++;
}
+ if (!n_parts) {
+ kfree(parts);
+ return 0;
+ }
+
rc = efx_mtd_add(efx, &parts[0].common, n_parts, sizeof(*parts));
fail:
if (rc)
diff --git a/drivers/net/ethernet/sfc/efx_channels.c b/drivers/net/ethernet/sfc/efx_channels.c
index 3f28f9861dfa..79df636d6df8 100644
--- a/drivers/net/ethernet/sfc/efx_channels.c
+++ b/drivers/net/ethernet/sfc/efx_channels.c
@@ -598,7 +598,6 @@ void efx_fini_channels(struct efx_nic *efx)
/* Allocate and initialise a channel structure, copying parameters
* (but not resources) from an old channel structure.
*/
-static
struct efx_channel *efx_copy_channel(const struct efx_channel *old_channel)
{
struct efx_rx_queue *rx_queue;
@@ -848,7 +847,9 @@ static void efx_set_xdp_channels(struct efx_nic *efx)
int efx_realloc_channels(struct efx_nic *efx, u32 rxq_entries, u32 txq_entries)
{
- struct efx_channel *other_channel[EFX_MAX_CHANNELS], *channel;
+ struct efx_channel *other_channel[EFX_MAX_CHANNELS], *channel,
+ *ptp_channel = efx_ptp_channel(efx);
+ struct efx_ptp_data *ptp_data = efx->ptp_data;
unsigned int i, next_buffer_table = 0;
u32 old_rxq_entries, old_txq_entries;
int rc, rc2;
@@ -919,6 +920,7 @@ int efx_realloc_channels(struct efx_nic *efx, u32 rxq_entries, u32 txq_entries)
efx_set_xdp_channels(efx);
out:
+ efx->ptp_data = NULL;
/* Destroy unused channel structures */
for (i = 0; i < efx->n_channels; i++) {
channel = other_channel[i];
@@ -929,6 +931,7 @@ out:
}
}
+ efx->ptp_data = ptp_data;
rc2 = efx_soft_enable_interrupts(efx);
if (rc2) {
rc = rc ? rc : rc2;
@@ -947,6 +950,7 @@ rollback:
efx->txq_entries = old_txq_entries;
for (i = 0; i < efx->n_channels; i++)
swap(efx->channel[i], other_channel[i]);
+ efx_ptp_update_channel(efx, ptp_channel);
goto out;
}
diff --git a/drivers/net/ethernet/sfc/efx_channels.h b/drivers/net/ethernet/sfc/efx_channels.h
index 64abb99a56b8..46b702648721 100644
--- a/drivers/net/ethernet/sfc/efx_channels.h
+++ b/drivers/net/ethernet/sfc/efx_channels.h
@@ -39,6 +39,7 @@ int efx_set_channels(struct efx_nic *efx);
void efx_remove_channel(struct efx_channel *channel);
void efx_remove_channels(struct efx_nic *efx);
void efx_fini_channels(struct efx_nic *efx);
+struct efx_channel *efx_copy_channel(const struct efx_channel *old_channel);
void efx_start_channels(struct efx_nic *efx);
void efx_stop_channels(struct efx_nic *efx);
diff --git a/drivers/net/ethernet/sfc/ptp.c b/drivers/net/ethernet/sfc/ptp.c
index f0ef515e2ade..4625f85acab2 100644
--- a/drivers/net/ethernet/sfc/ptp.c
+++ b/drivers/net/ethernet/sfc/ptp.c
@@ -45,6 +45,7 @@
#include "farch_regs.h"
#include "tx.h"
#include "nic.h" /* indirectly includes ptp.h */
+#include "efx_channels.h"
/* Maximum number of events expected to make up a PTP event */
#define MAX_EVENT_FRAGS 3
@@ -541,6 +542,12 @@ struct efx_channel *efx_ptp_channel(struct efx_nic *efx)
return efx->ptp_data ? efx->ptp_data->channel : NULL;
}
+void efx_ptp_update_channel(struct efx_nic *efx, struct efx_channel *channel)
+{
+ if (efx->ptp_data)
+ efx->ptp_data->channel = channel;
+}
+
static u32 last_sync_timestamp_major(struct efx_nic *efx)
{
struct efx_channel *channel = efx_ptp_channel(efx);
@@ -1443,6 +1450,11 @@ int efx_ptp_probe(struct efx_nic *efx, struct efx_channel *channel)
int rc = 0;
unsigned int pos;
+ if (efx->ptp_data) {
+ efx->ptp_data->channel = channel;
+ return 0;
+ }
+
ptp = kzalloc(sizeof(struct efx_ptp_data), GFP_KERNEL);
efx->ptp_data = ptp;
if (!efx->ptp_data)
@@ -2176,7 +2188,7 @@ static const struct efx_channel_type efx_ptp_channel_type = {
.pre_probe = efx_ptp_probe_channel,
.post_remove = efx_ptp_remove_channel,
.get_name = efx_ptp_get_channel_name,
- /* no copy operation; there is no need to reallocate this channel */
+ .copy = efx_copy_channel,
.receive_skb = efx_ptp_rx,
.want_txqs = efx_ptp_want_txqs,
.keep_eventq = false,
diff --git a/drivers/net/ethernet/sfc/ptp.h b/drivers/net/ethernet/sfc/ptp.h
index 9855e8c9e544..7b1ef7002b3f 100644
--- a/drivers/net/ethernet/sfc/ptp.h
+++ b/drivers/net/ethernet/sfc/ptp.h
@@ -16,6 +16,7 @@ struct ethtool_ts_info;
int efx_ptp_probe(struct efx_nic *efx, struct efx_channel *channel);
void efx_ptp_defer_probe_with_channel(struct efx_nic *efx);
struct efx_channel *efx_ptp_channel(struct efx_nic *efx);
+void efx_ptp_update_channel(struct efx_nic *efx, struct efx_channel *channel);
void efx_ptp_remove(struct efx_nic *efx);
int efx_ptp_set_ts_config(struct efx_nic *efx, struct ifreq *ifr);
int efx_ptp_get_ts_config(struct efx_nic *efx, struct ifreq *ifr);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
index fcf17d8a0494..644bb54f5f02 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
@@ -181,7 +181,7 @@ static int stmmac_pci_probe(struct pci_dev *pdev,
return -ENOMEM;
/* Enable pci device */
- ret = pci_enable_device(pdev);
+ ret = pcim_enable_device(pdev);
if (ret) {
dev_err(&pdev->dev, "%s: ERROR: failed to enable device\n",
__func__);
@@ -241,8 +241,6 @@ static void stmmac_pci_remove(struct pci_dev *pdev)
pcim_iounmap_regions(pdev, BIT(i));
break;
}
-
- pci_disable_device(pdev);
}
static int __maybe_unused stmmac_pci_suspend(struct device *dev)
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
index 685a0ab5453c..c34a93403d1e 100644
--- a/drivers/net/phy/micrel.c
+++ b/drivers/net/phy/micrel.c
@@ -1959,7 +1959,7 @@ static int ksz886x_cable_test_get_status(struct phy_device *phydev,
static int lanphy_read_page_reg(struct phy_device *phydev, int page, u32 addr)
{
- u32 data;
+ int data;
phy_lock_mdio_bus(phydev);
__phy_write(phydev, LAN_EXT_PAGE_ACCESS_CONTROL, page);
@@ -2660,8 +2660,7 @@ static int lan8804_config_init(struct phy_device *phydev)
static irqreturn_t lan8814_handle_interrupt(struct phy_device *phydev)
{
- u16 tsu_irq_status;
- int irq_status;
+ int irq_status, tsu_irq_status;
irq_status = phy_read(phydev, LAN8814_INTS);
if (irq_status > 0 && (irq_status & LAN8814_INT_LINK))
@@ -2896,6 +2895,7 @@ static struct phy_driver ksphy_driver[] = {
.name = "Micrel KS8737",
/* PHY_BASIC_FEATURES */
.driver_data = &ks8737_type,
+ .probe = kszphy_probe,
.config_init = kszphy_config_init,
.config_intr = kszphy_config_intr,
.handle_interrupt = kszphy_handle_interrupt,
@@ -3021,8 +3021,8 @@ static struct phy_driver ksphy_driver[] = {
.config_init = ksz8061_config_init,
.config_intr = kszphy_config_intr,
.handle_interrupt = kszphy_handle_interrupt,
- .suspend = kszphy_suspend,
- .resume = kszphy_resume,
+ .suspend = genphy_suspend,
+ .resume = genphy_resume,
}, {
.phy_id = PHY_ID_KSZ9021,
.phy_id_mask = 0x000ffffe,
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index 9034c6a8e18f..ef62f357b76d 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -974,8 +974,13 @@ static irqreturn_t phy_interrupt(int irq, void *phy_dat)
{
struct phy_device *phydev = phy_dat;
struct phy_driver *drv = phydev->drv;
+ irqreturn_t ret;
- return drv->handle_interrupt(phydev);
+ mutex_lock(&phydev->lock);
+ ret = drv->handle_interrupt(phydev);
+ mutex_unlock(&phydev->lock);
+
+ return ret;
}
/**
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c b/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c
index 866a33f49915..3237d4b528b5 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c
@@ -371,7 +371,7 @@ void iwl_dbg_tlv_del_timers(struct iwl_trans *trans)
struct iwl_dbg_tlv_timer_node *node, *tmp;
list_for_each_entry_safe(node, tmp, timer_list, list) {
- del_timer(&node->timer);
+ del_timer_sync(&node->timer);
list_del(&node->list);
kfree(node);
}
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index afdf48550588..2f746eb64507 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -2202,11 +2202,14 @@ mac80211_hwsim_sta_rc_update(struct ieee80211_hw *hw,
if (!data->use_chanctx) {
confbw = data->bw;
} else {
- struct ieee80211_chanctx_conf *chanctx_conf =
- rcu_dereference(vif->chanctx_conf);
+ struct ieee80211_chanctx_conf *chanctx_conf;
+
+ rcu_read_lock();
+ chanctx_conf = rcu_dereference(vif->chanctx_conf);
if (!WARN_ON(!chanctx_conf))
confbw = chanctx_conf->def.width;
+ rcu_read_unlock();
}
WARN(bw > hwsim_get_chanwidth(confbw),
@@ -2475,11 +2478,13 @@ static void hw_scan_work(struct work_struct *work)
if (req->ie_len)
skb_put_data(probe, req->ie, req->ie_len);
+ rcu_read_lock();
if (!ieee80211_tx_prepare_skb(hwsim->hw,
hwsim->hw_scan_vif,
probe,
hwsim->tmp_chan->band,
NULL)) {
+ rcu_read_unlock();
kfree_skb(probe);
continue;
}
@@ -2487,6 +2492,7 @@ static void hw_scan_work(struct work_struct *work)
local_bh_disable();
mac80211_hwsim_tx_frame(hwsim->hw, probe,
hwsim->tmp_chan);
+ rcu_read_unlock();
local_bh_enable();
}
}
diff --git a/drivers/platform/surface/aggregator/core.c b/drivers/platform/surface/aggregator/core.c
index d384d36098c2..a62c5dfe42d6 100644
--- a/drivers/platform/surface/aggregator/core.c
+++ b/drivers/platform/surface/aggregator/core.c
@@ -817,7 +817,7 @@ err_cpkg:
err_bus:
return status;
}
-module_init(ssam_core_init);
+subsys_initcall(ssam_core_init);
static void __exit ssam_core_exit(void)
{
diff --git a/drivers/platform/surface/surface_gpe.c b/drivers/platform/surface/surface_gpe.c
index c1775db29efb..ec66fde28e75 100644
--- a/drivers/platform/surface/surface_gpe.c
+++ b/drivers/platform/surface/surface_gpe.c
@@ -100,6 +100,14 @@ static const struct dmi_system_id dmi_lid_device_table[] = {
.driver_data = (void *)lid_device_props_l4D,
},
{
+ .ident = "Surface Pro 8",
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Surface Pro 8"),
+ },
+ .driver_data = (void *)lid_device_props_l4B,
+ },
+ {
.ident = "Surface Book 1",
.matches = {
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"),
diff --git a/drivers/platform/x86/intel/pmt/telemetry.c b/drivers/platform/x86/intel/pmt/telemetry.c
index 6b6f3e2a617a..f73ecfd4a309 100644
--- a/drivers/platform/x86/intel/pmt/telemetry.c
+++ b/drivers/platform/x86/intel/pmt/telemetry.c
@@ -103,7 +103,7 @@ static int pmt_telem_probe(struct auxiliary_device *auxdev, const struct auxilia
auxiliary_set_drvdata(auxdev, priv);
for (i = 0; i < intel_vsec_dev->num_resources; i++) {
- struct intel_pmt_entry *entry = &priv->entry[i];
+ struct intel_pmt_entry *entry = &priv->entry[priv->num_entries];
ret = intel_pmt_dev_create(entry, &pmt_telem_ns, intel_vsec_dev, i);
if (ret < 0)
diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
index c568fae56db2..e6cb4a14cdd4 100644
--- a/drivers/platform/x86/thinkpad_acpi.c
+++ b/drivers/platform/x86/thinkpad_acpi.c
@@ -309,6 +309,20 @@ struct ibm_init_struct {
struct ibm_struct *data;
};
+/* DMI Quirks */
+struct quirk_entry {
+ bool btusb_bug;
+ u32 s2idle_bug_mmio;
+};
+
+static struct quirk_entry quirk_btusb_bug = {
+ .btusb_bug = true,
+};
+
+static struct quirk_entry quirk_s2idle_bug = {
+ .s2idle_bug_mmio = 0xfed80380,
+};
+
static struct {
u32 bluetooth:1;
u32 hotkey:1;
@@ -338,6 +352,7 @@ static struct {
u32 hotkey_poll_active:1;
u32 has_adaptive_kbd:1;
u32 kbd_lang:1;
+ struct quirk_entry *quirks;
} tp_features;
static struct {
@@ -4359,9 +4374,10 @@ static void bluetooth_exit(void)
bluetooth_shutdown();
}
-static const struct dmi_system_id bt_fwbug_list[] __initconst = {
+static const struct dmi_system_id fwbug_list[] __initconst = {
{
.ident = "ThinkPad E485",
+ .driver_data = &quirk_btusb_bug,
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
DMI_MATCH(DMI_BOARD_NAME, "20KU"),
@@ -4369,6 +4385,7 @@ static const struct dmi_system_id bt_fwbug_list[] __initconst = {
},
{
.ident = "ThinkPad E585",
+ .driver_data = &quirk_btusb_bug,
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
DMI_MATCH(DMI_BOARD_NAME, "20KV"),
@@ -4376,6 +4393,7 @@ static const struct dmi_system_id bt_fwbug_list[] __initconst = {
},
{
.ident = "ThinkPad A285 - 20MW",
+ .driver_data = &quirk_btusb_bug,
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
DMI_MATCH(DMI_BOARD_NAME, "20MW"),
@@ -4383,6 +4401,7 @@ static const struct dmi_system_id bt_fwbug_list[] __initconst = {
},
{
.ident = "ThinkPad A285 - 20MX",
+ .driver_data = &quirk_btusb_bug,
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
DMI_MATCH(DMI_BOARD_NAME, "20MX"),
@@ -4390,6 +4409,7 @@ static const struct dmi_system_id bt_fwbug_list[] __initconst = {
},
{
.ident = "ThinkPad A485 - 20MU",
+ .driver_data = &quirk_btusb_bug,
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
DMI_MATCH(DMI_BOARD_NAME, "20MU"),
@@ -4397,14 +4417,125 @@ static const struct dmi_system_id bt_fwbug_list[] __initconst = {
},
{
.ident = "ThinkPad A485 - 20MV",
+ .driver_data = &quirk_btusb_bug,
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
DMI_MATCH(DMI_BOARD_NAME, "20MV"),
},
},
+ {
+ .ident = "L14 Gen2 AMD",
+ .driver_data = &quirk_s2idle_bug,
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "20X5"),
+ }
+ },
+ {
+ .ident = "T14s Gen2 AMD",
+ .driver_data = &quirk_s2idle_bug,
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "20XF"),
+ }
+ },
+ {
+ .ident = "X13 Gen2 AMD",
+ .driver_data = &quirk_s2idle_bug,
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "20XH"),
+ }
+ },
+ {
+ .ident = "T14 Gen2 AMD",
+ .driver_data = &quirk_s2idle_bug,
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "20XK"),
+ }
+ },
+ {
+ .ident = "T14 Gen1 AMD",
+ .driver_data = &quirk_s2idle_bug,
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "20UD"),
+ }
+ },
+ {
+ .ident = "T14 Gen1 AMD",
+ .driver_data = &quirk_s2idle_bug,
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "20UE"),
+ }
+ },
+ {
+ .ident = "T14s Gen1 AMD",
+ .driver_data = &quirk_s2idle_bug,
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "20UH"),
+ }
+ },
+ {
+ .ident = "P14s Gen1 AMD",
+ .driver_data = &quirk_s2idle_bug,
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "20Y1"),
+ }
+ },
+ {
+ .ident = "P14s Gen2 AMD",
+ .driver_data = &quirk_s2idle_bug,
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "21A0"),
+ }
+ },
{}
};
+#ifdef CONFIG_SUSPEND
+/*
+ * Lenovo laptops from a variety of generations run a SMI handler during the D3->D0
+ * transition that occurs specifically when exiting suspend to idle which can cause
+ * large delays during resume when the IOMMU translation layer is enabled (the default
+ * behavior) for NVME devices:
+ *
+ * To avoid this firmware problem, skip the SMI handler on these machines before the
+ * D0 transition occurs.
+ */
+static void thinkpad_acpi_amd_s2idle_restore(void)
+{
+ struct resource *res;
+ void __iomem *addr;
+ u8 val;
+
+ res = request_mem_region_muxed(tp_features.quirks->s2idle_bug_mmio, 1,
+ "thinkpad_acpi_pm80");
+ if (!res)
+ return;
+
+ addr = ioremap(tp_features.quirks->s2idle_bug_mmio, 1);
+ if (!addr)
+ goto cleanup_resource;
+
+ val = ioread8(addr);
+ iowrite8(val & ~BIT(0), addr);
+
+ iounmap(addr);
+cleanup_resource:
+ release_resource(res);
+}
+
+static struct acpi_s2idle_dev_ops thinkpad_acpi_s2idle_dev_ops = {
+ .restore = thinkpad_acpi_amd_s2idle_restore,
+};
+#endif
+
static const struct pci_device_id fwbug_cards_ids[] __initconst = {
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x24F3) },
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x24FD) },
@@ -4419,7 +4550,8 @@ static int __init have_bt_fwbug(void)
* Some AMD based ThinkPads have a firmware bug that calling
* "GBDC" will cause bluetooth on Intel wireless cards blocked
*/
- if (dmi_check_system(bt_fwbug_list) && pci_dev_present(fwbug_cards_ids)) {
+ if (tp_features.quirks && tp_features.quirks->btusb_bug &&
+ pci_dev_present(fwbug_cards_ids)) {
vdbg_printk(TPACPI_DBG_INIT | TPACPI_DBG_RFKILL,
FW_BUG "disable bluetooth subdriver for Intel cards\n");
return 1;
@@ -8748,24 +8880,27 @@ static int __init fan_init(struct ibm_init_struct *iibm)
fan_status_access_mode = TPACPI_FAN_RD_TPEC;
if (quirks & TPACPI_FAN_Q1)
fan_quirk1_setup();
- if (quirks & TPACPI_FAN_2FAN) {
- tp_features.second_fan = 1;
- pr_info("secondary fan support enabled\n");
- }
- if (quirks & TPACPI_FAN_2CTL) {
- tp_features.second_fan = 1;
- tp_features.second_fan_ctl = 1;
- pr_info("secondary fan control enabled\n");
- }
/* Try and probe the 2nd fan */
+ tp_features.second_fan = 1; /* needed for get_speed to work */
res = fan2_get_speed(&speed);
if (res >= 0) {
/* It responded - so let's assume it's there */
tp_features.second_fan = 1;
tp_features.second_fan_ctl = 1;
pr_info("secondary fan control detected & enabled\n");
+ } else {
+ /* Fan not auto-detected */
+ tp_features.second_fan = 0;
+ if (quirks & TPACPI_FAN_2FAN) {
+ tp_features.second_fan = 1;
+ pr_info("secondary fan support enabled\n");
+ }
+ if (quirks & TPACPI_FAN_2CTL) {
+ tp_features.second_fan = 1;
+ tp_features.second_fan_ctl = 1;
+ pr_info("secondary fan control enabled\n");
+ }
}
-
} else {
pr_err("ThinkPad ACPI EC access misbehaving, fan status and control unavailable\n");
return -ENODEV;
@@ -11455,6 +11590,10 @@ static void thinkpad_acpi_module_exit(void)
tpacpi_lifecycle = TPACPI_LIFE_EXITING;
+#ifdef CONFIG_SUSPEND
+ if (tp_features.quirks && tp_features.quirks->s2idle_bug_mmio)
+ acpi_unregister_lps0_dev(&thinkpad_acpi_s2idle_dev_ops);
+#endif
if (tpacpi_hwmon)
hwmon_device_unregister(tpacpi_hwmon);
if (tp_features.sensors_pdrv_registered)
@@ -11496,6 +11635,7 @@ static void thinkpad_acpi_module_exit(void)
static int __init thinkpad_acpi_module_init(void)
{
+ const struct dmi_system_id *dmi_id;
int ret, i;
tpacpi_lifecycle = TPACPI_LIFE_INIT;
@@ -11535,6 +11675,10 @@ static int __init thinkpad_acpi_module_init(void)
return -ENODEV;
}
+ dmi_id = dmi_first_match(fwbug_list);
+ if (dmi_id)
+ tp_features.quirks = dmi_id->driver_data;
+
/* Device initialization */
tpacpi_pdev = platform_device_register_simple(TPACPI_DRVR_NAME, -1,
NULL, 0);
@@ -11623,6 +11767,13 @@ static int __init thinkpad_acpi_module_init(void)
tp_features.input_device_registered = 1;
}
+#ifdef CONFIG_SUSPEND
+ if (tp_features.quirks && tp_features.quirks->s2idle_bug_mmio) {
+ if (!acpi_register_lps0_dev(&thinkpad_acpi_s2idle_dev_ops))
+ pr_info("Using s2idle quirk to avoid %s platform firmware bug\n",
+ (dmi_id && dmi_id->ident) ? dmi_id->ident : "");
+ }
+#endif
return 0;
}
diff --git a/drivers/ptp/ptp_ocp.c b/drivers/ptp/ptp_ocp.c
index 0feaa4b45317..dd45471f6780 100644
--- a/drivers/ptp/ptp_ocp.c
+++ b/drivers/ptp/ptp_ocp.c
@@ -1557,7 +1557,7 @@ ptp_ocp_signal_set(struct ptp_ocp *bp, int gen, struct ptp_ocp_signal *s)
start_ns = ktime_set(ts.tv_sec, ts.tv_nsec) + NSEC_PER_MSEC;
if (!s->start) {
/* roundup() does not work on 32-bit systems */
- s->start = DIV_ROUND_UP_ULL(start_ns, s->period);
+ s->start = DIV64_U64_ROUND_UP(start_ns, s->period);
s->start = ktime_add(s->start, s->phase);
}
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
index 8e87a31e329d..ba6d78789660 100644
--- a/drivers/s390/block/dasd.c
+++ b/drivers/s390/block/dasd.c
@@ -1422,6 +1422,13 @@ int dasd_start_IO(struct dasd_ccw_req *cqr)
if (!cqr->lpm)
cqr->lpm = dasd_path_get_opm(device);
}
+ /*
+ * remember the amount of formatted tracks to prevent double format on
+ * ESE devices
+ */
+ if (cqr->block)
+ cqr->trkcount = atomic_read(&cqr->block->trkcount);
+
if (cqr->cpmode == 1) {
rc = ccw_device_tm_start(device->cdev, cqr->cpaddr,
(long) cqr, cqr->lpm);
@@ -1639,6 +1646,7 @@ void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm,
unsigned long now;
int nrf_suppressed = 0;
int fp_suppressed = 0;
+ struct request *req;
u8 *sense = NULL;
int expires;
@@ -1739,7 +1747,12 @@ void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm,
}
if (dasd_ese_needs_format(cqr->block, irb)) {
- if (rq_data_dir((struct request *)cqr->callback_data) == READ) {
+ req = dasd_get_callback_data(cqr);
+ if (!req) {
+ cqr->status = DASD_CQR_ERROR;
+ return;
+ }
+ if (rq_data_dir(req) == READ) {
device->discipline->ese_read(cqr, irb);
cqr->status = DASD_CQR_SUCCESS;
cqr->stopclk = now;
@@ -2765,8 +2778,7 @@ static void __dasd_cleanup_cqr(struct dasd_ccw_req *cqr)
* complete a request partially.
*/
if (proc_bytes) {
- blk_update_request(req, BLK_STS_OK,
- blk_rq_bytes(req) - proc_bytes);
+ blk_update_request(req, BLK_STS_OK, proc_bytes);
blk_mq_requeue_request(req, true);
} else if (likely(!blk_should_fake_timeout(req->q))) {
blk_mq_complete_request(req);
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c
index 8410a25a65c1..836838f7d686 100644
--- a/drivers/s390/block/dasd_eckd.c
+++ b/drivers/s390/block/dasd_eckd.c
@@ -1480,7 +1480,7 @@ static int dasd_eckd_pe_handler(struct dasd_device *device,
{
struct pe_handler_work_data *data;
- data = kmalloc(sizeof(*data), GFP_ATOMIC | GFP_DMA);
+ data = kzalloc(sizeof(*data), GFP_ATOMIC | GFP_DMA);
if (!data) {
if (mutex_trylock(&dasd_pe_handler_mutex)) {
data = pe_handler_worker;
@@ -1488,9 +1488,6 @@ static int dasd_eckd_pe_handler(struct dasd_device *device,
} else {
return -ENOMEM;
}
- } else {
- memset(data, 0, sizeof(*data));
- data->isglobal = 0;
}
INIT_WORK(&data->worker, do_pe_handler_work);
dasd_get_device(device);
@@ -3083,13 +3080,24 @@ static int dasd_eckd_format_device(struct dasd_device *base,
}
static bool test_and_set_format_track(struct dasd_format_entry *to_format,
- struct dasd_block *block)
+ struct dasd_ccw_req *cqr)
{
+ struct dasd_block *block = cqr->block;
struct dasd_format_entry *format;
unsigned long flags;
bool rc = false;
spin_lock_irqsave(&block->format_lock, flags);
+ if (cqr->trkcount != atomic_read(&block->trkcount)) {
+ /*
+ * The number of formatted tracks has changed after request
+ * start and we can not tell if the current track was involved.
+ * To avoid data corruption treat it as if the current track is
+ * involved
+ */
+ rc = true;
+ goto out;
+ }
list_for_each_entry(format, &block->format_list, list) {
if (format->track == to_format->track) {
rc = true;
@@ -3109,6 +3117,7 @@ static void clear_format_track(struct dasd_format_entry *format,
unsigned long flags;
spin_lock_irqsave(&block->format_lock, flags);
+ atomic_inc(&block->trkcount);
list_del_init(&format->list);
spin_unlock_irqrestore(&block->format_lock, flags);
}
@@ -3145,7 +3154,7 @@ dasd_eckd_ese_format(struct dasd_device *startdev, struct dasd_ccw_req *cqr,
sector_t curr_trk;
int rc;
- req = cqr->callback_data;
+ req = dasd_get_callback_data(cqr);
block = cqr->block;
base = block->base;
private = base->private;
@@ -3170,8 +3179,11 @@ dasd_eckd_ese_format(struct dasd_device *startdev, struct dasd_ccw_req *cqr,
}
format->track = curr_trk;
/* test if track is already in formatting by another thread */
- if (test_and_set_format_track(format, block))
+ if (test_and_set_format_track(format, cqr)) {
+ /* this is no real error so do not count down retries */
+ cqr->retries++;
return ERR_PTR(-EEXIST);
+ }
fdata.start_unit = curr_trk;
fdata.stop_unit = curr_trk;
@@ -3270,12 +3282,11 @@ static int dasd_eckd_ese_read(struct dasd_ccw_req *cqr, struct irb *irb)
cqr->proc_bytes = blk_count * blksize;
return 0;
}
- if (dst && !skip_block) {
- dst += off;
+ if (dst && !skip_block)
memset(dst, 0, blksize);
- } else {
+ else
skip_block--;
- }
+ dst += blksize;
blk_count++;
}
}
diff --git a/drivers/s390/block/dasd_int.h b/drivers/s390/block/dasd_int.h
index 3b7af00a7825..83b918b84b4a 100644
--- a/drivers/s390/block/dasd_int.h
+++ b/drivers/s390/block/dasd_int.h
@@ -187,6 +187,7 @@ struct dasd_ccw_req {
void (*callback)(struct dasd_ccw_req *, void *data);
void *callback_data;
unsigned int proc_bytes; /* bytes for partial completion */
+ unsigned int trkcount; /* count formatted tracks */
};
/*
@@ -610,6 +611,7 @@ struct dasd_block {
struct list_head format_list;
spinlock_t format_lock;
+ atomic_t trkcount;
};
struct dasd_attention_data {
@@ -756,6 +758,18 @@ dasd_check_blocksize(int bsize)
return 0;
}
+/*
+ * return the callback data of the original request in case there are
+ * ERP requests build on top of it
+ */
+static inline void *dasd_get_callback_data(struct dasd_ccw_req *cqr)
+{
+ while (cqr->refers)
+ cqr = cqr->refers;
+
+ return cqr->callback_data;
+}
+
/* externals in dasd.c */
#define DASD_PROFILE_OFF 0
#define DASD_PROFILE_ON 1
diff --git a/drivers/s390/net/ctcm_mpc.c b/drivers/s390/net/ctcm_mpc.c
index 88abfb5e8045..8ac213a55141 100644
--- a/drivers/s390/net/ctcm_mpc.c
+++ b/drivers/s390/net/ctcm_mpc.c
@@ -626,8 +626,6 @@ static void mpc_rcvd_sweep_resp(struct mpcg_info *mpcginfo)
ctcm_clear_busy_do(dev);
}
- kfree(mpcginfo);
-
return;
}
@@ -1192,10 +1190,10 @@ static void ctcmpc_unpack_skb(struct channel *ch, struct sk_buff *pskb)
CTCM_FUNTAIL, dev->name);
priv->stats.rx_dropped++;
/* mpcginfo only used for non-data transfers */
- kfree(mpcginfo);
if (do_debug_data)
ctcmpc_dump_skb(pskb, -8);
}
+ kfree(mpcginfo);
}
done:
@@ -1977,7 +1975,6 @@ static void mpc_action_rcvd_xid0(fsm_instance *fsm, int event, void *arg)
}
break;
}
- kfree(mpcginfo);
CTCM_PR_DEBUG("ctcmpc:%s() %s xid2:%i xid7:%i xidt_p2:%i \n",
__func__, ch->id, grp->outstanding_xid2,
@@ -2038,7 +2035,6 @@ static void mpc_action_rcvd_xid7(fsm_instance *fsm, int event, void *arg)
mpc_validate_xid(mpcginfo);
break;
}
- kfree(mpcginfo);
return;
}
diff --git a/drivers/s390/net/ctcm_sysfs.c b/drivers/s390/net/ctcm_sysfs.c
index ded1930a00b2..e3813a7aa5e6 100644
--- a/drivers/s390/net/ctcm_sysfs.c
+++ b/drivers/s390/net/ctcm_sysfs.c
@@ -39,11 +39,12 @@ static ssize_t ctcm_buffer_write(struct device *dev,
struct ctcm_priv *priv = dev_get_drvdata(dev);
int rc;
- ndev = priv->channel[CTCM_READ]->netdev;
- if (!(priv && priv->channel[CTCM_READ] && ndev)) {
+ if (!(priv && priv->channel[CTCM_READ] &&
+ priv->channel[CTCM_READ]->netdev)) {
CTCM_DBF_TEXT(SETUP, CTC_DBF_ERROR, "bfnondev");
return -ENODEV;
}
+ ndev = priv->channel[CTCM_READ]->netdev;
rc = kstrtouint(buf, 0, &bs1);
if (rc)
diff --git a/drivers/s390/net/lcs.c b/drivers/s390/net/lcs.c
index bab9b34926c6..84c8981317b4 100644
--- a/drivers/s390/net/lcs.c
+++ b/drivers/s390/net/lcs.c
@@ -1736,10 +1736,11 @@ lcs_get_control(struct lcs_card *card, struct lcs_cmd *cmd)
lcs_schedule_recovery(card);
break;
case LCS_CMD_STOPLAN:
- pr_warn("Stoplan for %s initiated by LGW\n",
- card->dev->name);
- if (card->dev)
+ if (card->dev) {
+ pr_warn("Stoplan for %s initiated by LGW\n",
+ card->dev->name);
netif_carrier_off(card->dev);
+ }
break;
default:
LCS_DBF_TEXT(5, trace, "noLGWcmd");
diff --git a/drivers/video/fbdev/core/fbmem.c b/drivers/video/fbdev/core/fbmem.c
index a6bb0e438216..97eb0dee411c 100644
--- a/drivers/video/fbdev/core/fbmem.c
+++ b/drivers/video/fbdev/core/fbmem.c
@@ -1434,7 +1434,10 @@ fb_release(struct inode *inode, struct file *file)
__acquires(&info->lock)
__releases(&info->lock)
{
- struct fb_info * const info = file->private_data;
+ struct fb_info * const info = file_fb_info(file);
+
+ if (!info)
+ return -ENODEV;
lock_fb_info(info);
if (info->fbops->fb_release)
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index ed8e288cc369..31c3f592e587 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -3658,6 +3658,17 @@ int __cold open_ctree(struct super_block *sb, struct btrfs_fs_devices *fs_device
if (sectorsize < PAGE_SIZE) {
struct btrfs_subpage_info *subpage_info;
+ /*
+ * V1 space cache has some hardcoded PAGE_SIZE usage, and is
+ * going to be deprecated.
+ *
+ * Force to use v2 cache for subpage case.
+ */
+ btrfs_clear_opt(fs_info->mount_opt, SPACE_CACHE);
+ btrfs_set_and_info(fs_info, FREE_SPACE_TREE,
+ "forcing free space tree for sector size %u with page size %lu",
+ sectorsize, PAGE_SIZE);
+
btrfs_warn(fs_info,
"read-write for sector size %u with page size %lu is experimental",
sectorsize, PAGE_SIZE);
diff --git a/fs/btrfs/sysfs.c b/fs/btrfs/sysfs.c
index 17389a42a3ab..ba78ca5aabbb 100644
--- a/fs/btrfs/sysfs.c
+++ b/fs/btrfs/sysfs.c
@@ -922,6 +922,9 @@ static ssize_t btrfs_exclusive_operation_show(struct kobject *kobj,
case BTRFS_EXCLOP_BALANCE:
str = "balance\n";
break;
+ case BTRFS_EXCLOP_BALANCE_PAUSED:
+ str = "balance paused\n";
+ break;
case BTRFS_EXCLOP_DEV_ADD:
str = "device add\n";
break;
diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
index 11399c8eed87..e65633686378 100644
--- a/fs/btrfs/tree-log.c
+++ b/fs/btrfs/tree-log.c
@@ -3721,11 +3721,29 @@ static noinline int insert_dir_log_key(struct btrfs_trans_handle *trans,
key.offset = first_offset;
key.type = BTRFS_DIR_LOG_INDEX_KEY;
ret = btrfs_insert_empty_item(trans, log, path, &key, sizeof(*item));
- if (ret)
+ /*
+ * -EEXIST is fine and can happen sporadically when we are logging a
+ * directory and have concurrent insertions in the subvolume's tree for
+ * items from other inodes and that result in pushing off some dir items
+ * from one leaf to another in order to accommodate for the new items.
+ * This results in logging the same dir index range key.
+ */
+ if (ret && ret != -EEXIST)
return ret;
item = btrfs_item_ptr(path->nodes[0], path->slots[0],
struct btrfs_dir_log_item);
+ if (ret == -EEXIST) {
+ const u64 curr_end = btrfs_dir_log_end(path->nodes[0], item);
+
+ /*
+ * btrfs_del_dir_entries_in_log() might have been called during
+ * an unlink between the initial insertion of this key and the
+ * current update, or we might be logging a single entry deletion
+ * during a rename, so set the new last_offset to the max value.
+ */
+ last_offset = max(last_offset, curr_end);
+ }
btrfs_set_dir_log_end(path->nodes[0], item, last_offset);
btrfs_mark_buffer_dirty(path->nodes[0]);
btrfs_release_path(path);
@@ -3849,13 +3867,6 @@ static int process_dir_items_leaf(struct btrfs_trans_handle *trans,
ret = insert_dir_log_key(trans, log, dst_path,
ino, *last_old_dentry_offset + 1,
key.offset - 1);
- /*
- * -EEXIST should never happen because when we
- * log a directory in full mode (LOG_INODE_ALL)
- * we drop all BTRFS_DIR_LOG_INDEX_KEY keys from
- * the log tree.
- */
- ASSERT(ret != -EEXIST);
if (ret < 0)
return ret;
}
@@ -7031,12 +7042,12 @@ void btrfs_log_new_name(struct btrfs_trans_handle *trans,
/*
* Other concurrent task might be logging the old directory,
* as it can be triggered when logging other inode that had or
- * still has a dentry in the old directory. So take the old
- * directory's log_mutex to prevent getting an -EEXIST when
- * logging a key to record the deletion, or having that other
- * task logging the old directory get an -EEXIST if it attempts
- * to log the same key after we just did it. In both cases that
- * would result in falling back to a transaction commit.
+ * still has a dentry in the old directory. We lock the old
+ * directory's log_mutex to ensure the deletion of the old
+ * name is persisted, because during directory logging we
+ * delete all BTRFS_DIR_LOG_INDEX_KEY keys and the deletion of
+ * the old name's dir index item is in the delayed items, so
+ * it could be missed by an in progress directory logging.
*/
mutex_lock(&old_dir->log_mutex);
ret = del_logged_dentry(trans, log, path, btrfs_ino(old_dir),
diff --git a/fs/btrfs/zoned.c b/fs/btrfs/zoned.c
index 1b1b310c3c51..d31b0eda210f 100644
--- a/fs/btrfs/zoned.c
+++ b/fs/btrfs/zoned.c
@@ -1835,6 +1835,12 @@ bool btrfs_zone_activate(struct btrfs_block_group *block_group)
goto out_unlock;
}
+ /* No space left */
+ if (block_group->alloc_offset == block_group->zone_capacity) {
+ ret = false;
+ goto out_unlock;
+ }
+
for (i = 0; i < map->num_stripes; i++) {
device = map->stripes[i].dev;
physical = map->stripes[i].physical;
@@ -1842,35 +1848,23 @@ bool btrfs_zone_activate(struct btrfs_block_group *block_group)
if (device->zone_info->max_active_zones == 0)
continue;
- /* No space left */
- if (block_group->alloc_offset == block_group->zone_capacity) {
- ret = false;
- goto out_unlock;
- }
-
if (!btrfs_dev_set_active_zone(device, physical)) {
/* Cannot activate the zone */
ret = false;
goto out_unlock;
}
-
- /* Successfully activated all the zones */
- if (i == map->num_stripes - 1)
- block_group->zone_is_active = 1;
-
-
}
+
+ /* Successfully activated all the zones */
+ block_group->zone_is_active = 1;
spin_unlock(&block_group->lock);
- if (block_group->zone_is_active) {
- /* For the active block group list */
- btrfs_get_block_group(block_group);
+ /* For the active block group list */
+ btrfs_get_block_group(block_group);
- spin_lock(&fs_info->zone_active_bgs_lock);
- list_add_tail(&block_group->active_bg_list,
- &fs_info->zone_active_bgs);
- spin_unlock(&fs_info->zone_active_bgs_lock);
- }
+ spin_lock(&fs_info->zone_active_bgs_lock);
+ list_add_tail(&block_group->active_bg_list, &fs_info->zone_active_bgs);
+ spin_unlock(&fs_info->zone_active_bgs_lock);
return true;
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
index 591fe9cf1659..1fae0196292a 100644
--- a/fs/fs-writeback.c
+++ b/fs/fs-writeback.c
@@ -1712,6 +1712,10 @@ static int writeback_single_inode(struct inode *inode,
*/
if (!(inode->i_state & I_DIRTY_ALL))
inode_cgwb_move_to_attached(inode, wb);
+ else if (!(inode->i_state & I_SYNC_QUEUED) &&
+ (inode->i_state & I_DIRTY))
+ redirty_tail_locked(inode, wb);
+
spin_unlock(&wb->list_lock);
inode_sync_complete(inode);
out:
diff --git a/fs/io_uring.c b/fs/io_uring.c
index e01f595f5b7d..91de361ea9ab 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -6947,7 +6947,12 @@ static int io_req_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
static int io_req_prep_async(struct io_kiocb *req)
{
- if (!io_op_defs[req->opcode].needs_async_setup)
+ const struct io_op_def *def = &io_op_defs[req->opcode];
+
+ /* assign early for deferred execution for non-fixed file */
+ if (def->needs_file && !(req->flags & REQ_F_FIXED_FILE))
+ req->file = io_file_get_normal(req, req->fd);
+ if (!def->needs_async_setup)
return 0;
if (WARN_ON_ONCE(req_has_async_data(req)))
return -EFAULT;
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index 16106f805ffa..a79f66432bd3 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -363,6 +363,14 @@ static void nfs4_setup_readdir(u64 cookie, __be32 *verifier, struct dentry *dent
kunmap_atomic(start);
}
+static void nfs4_fattr_set_prechange(struct nfs_fattr *fattr, u64 version)
+{
+ if (!(fattr->valid & NFS_ATTR_FATTR_PRECHANGE)) {
+ fattr->pre_change_attr = version;
+ fattr->valid |= NFS_ATTR_FATTR_PRECHANGE;
+ }
+}
+
static void nfs4_test_and_free_stateid(struct nfs_server *server,
nfs4_stateid *stateid,
const struct cred *cred)
@@ -6553,7 +6561,9 @@ static void nfs4_delegreturn_release(void *calldata)
pnfs_roc_release(&data->lr.arg, &data->lr.res,
data->res.lr_ret);
if (inode) {
- nfs_post_op_update_inode_force_wcc(inode, &data->fattr);
+ nfs4_fattr_set_prechange(&data->fattr,
+ inode_peek_iversion_raw(inode));
+ nfs_refresh_inode(inode, &data->fattr);
nfs_iput_and_deactive(inode);
}
kfree(calldata);
diff --git a/fs/notify/fanotify/fanotify_user.c b/fs/notify/fanotify/fanotify_user.c
index 9b32b76a9c30..a792e21c5309 100644
--- a/fs/notify/fanotify/fanotify_user.c
+++ b/fs/notify/fanotify/fanotify_user.c
@@ -1657,6 +1657,19 @@ static int do_fanotify_mark(int fanotify_fd, unsigned int flags, __u64 mask,
else
mnt = path.mnt;
+ /*
+ * FAN_RENAME is not allowed on non-dir (for now).
+ * We shouldn't have allowed setting any dirent events in mask of
+ * non-dir, but because we always allowed it, error only if group
+ * was initialized with the new flag FAN_REPORT_TARGET_FID.
+ */
+ ret = -ENOTDIR;
+ if (inode && !S_ISDIR(inode->i_mode) &&
+ ((mask & FAN_RENAME) ||
+ ((mask & FANOTIFY_DIRENT_EVENTS) &&
+ FAN_GROUP_FLAG(group, FAN_REPORT_TARGET_FID))))
+ goto path_put_and_out;
+
/* Mask out FAN_EVENT_ON_CHILD flag for sb/mount/non-dir marks */
if (mnt || !S_ISDIR(inode->i_mode)) {
mask &= ~FAN_EVENT_ON_CHILD;
diff --git a/fs/udf/namei.c b/fs/udf/namei.c
index 0ed4861b038f..b3d5f97f16cd 100644
--- a/fs/udf/namei.c
+++ b/fs/udf/namei.c
@@ -75,11 +75,11 @@ int udf_write_fi(struct inode *inode, struct fileIdentDesc *cfi,
if (fileident) {
if (adinicb || (offset + lfi < 0)) {
- memcpy(udf_get_fi_ident(sfi), fileident, lfi);
+ memcpy(sfi->impUse + liu, fileident, lfi);
} else if (offset >= 0) {
memcpy(fibh->ebh->b_data + offset, fileident, lfi);
} else {
- memcpy(udf_get_fi_ident(sfi), fileident, -offset);
+ memcpy(sfi->impUse + liu, fileident, -offset);
memcpy(fibh->ebh->b_data, fileident - offset,
lfi + offset);
}
@@ -88,11 +88,11 @@ int udf_write_fi(struct inode *inode, struct fileIdentDesc *cfi,
offset += lfi;
if (adinicb || (offset + padlen < 0)) {
- memset(udf_get_fi_ident(sfi) + lfi, 0x00, padlen);
+ memset(sfi->impUse + liu + lfi, 0x00, padlen);
} else if (offset >= 0) {
memset(fibh->ebh->b_data + offset, 0x00, padlen);
} else {
- memset(udf_get_fi_ident(sfi) + lfi, 0x00, -offset);
+ memset(sfi->impUse + liu + lfi, 0x00, -offset);
memset(fibh->ebh->b_data, 0x00, padlen + offset);
}
diff --git a/include/linux/bio.h b/include/linux/bio.h
index 278cc81cc1e7..00450fd86bb4 100644
--- a/include/linux/bio.h
+++ b/include/linux/bio.h
@@ -269,6 +269,7 @@ struct folio_iter {
size_t offset;
size_t length;
/* private: for use by the iterator */
+ struct folio *_next;
size_t _seg_count;
int _i;
};
@@ -283,6 +284,7 @@ static inline void bio_first_folio(struct folio_iter *fi, struct bio *bio,
PAGE_SIZE * (bvec->bv_page - &fi->folio->page);
fi->_seg_count = bvec->bv_len;
fi->length = min(folio_size(fi->folio) - fi->offset, fi->_seg_count);
+ fi->_next = folio_next(fi->folio);
fi->_i = i;
}
@@ -290,9 +292,10 @@ static inline void bio_next_folio(struct folio_iter *fi, struct bio *bio)
{
fi->_seg_count -= fi->length;
if (fi->_seg_count) {
- fi->folio = folio_next(fi->folio);
+ fi->folio = fi->_next;
fi->offset = 0;
fi->length = min(folio_size(fi->folio), fi->_seg_count);
+ fi->_next = folio_next(fi->folio);
} else if (fi->_i + 1 < bio->bi_vcnt) {
bio_first_folio(fi, bio, fi->_i + 1);
} else {
diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h
index 7aa5c54901a9..9f07061418db 100644
--- a/include/linux/blk-mq.h
+++ b/include/linux/blk-mq.h
@@ -163,7 +163,6 @@ struct request {
struct rb_node rb_node; /* sort/lookup */
struct bio_vec special_vec;
void *completion_data;
- int error_count; /* for legacy drivers, don't use */
};
diff --git a/include/linux/netdev_features.h b/include/linux/netdev_features.h
index 2c6b9e416225..7c2d77d75a88 100644
--- a/include/linux/netdev_features.h
+++ b/include/linux/netdev_features.h
@@ -169,7 +169,7 @@ enum {
#define NETIF_F_HW_HSR_FWD __NETIF_F(HW_HSR_FWD)
#define NETIF_F_HW_HSR_DUP __NETIF_F(HW_HSR_DUP)
-/* Finds the next feature with the highest number of the range of start till 0.
+/* Finds the next feature with the highest number of the range of start-1 till 0.
*/
static inline int find_next_netdev_feature(u64 feature, unsigned long start)
{
@@ -188,7 +188,7 @@ static inline int find_next_netdev_feature(u64 feature, unsigned long start)
for ((bit) = find_next_netdev_feature((mask_addr), \
NETDEV_FEATURE_COUNT); \
(bit) >= 0; \
- (bit) = find_next_netdev_feature((mask_addr), (bit) - 1))
+ (bit) = find_next_netdev_feature((mask_addr), (bit)))
/* Features valid for ethtool to change */
/* = all defined minus driver/device-class-related */
diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h
index 267b7aeaf1a6..db5149567305 100644
--- a/include/linux/sunrpc/clnt.h
+++ b/include/linux/sunrpc/clnt.h
@@ -160,6 +160,7 @@ struct rpc_add_xprt_test {
#define RPC_CLNT_CREATE_NO_RETRANS_TIMEOUT (1UL << 9)
#define RPC_CLNT_CREATE_SOFTERR (1UL << 10)
#define RPC_CLNT_CREATE_REUSEPORT (1UL << 11)
+#define RPC_CLNT_CREATE_IGNORE_NULL_UNAVAIL (1UL << 12)
struct rpc_clnt *rpc_create(struct rpc_create_args *args);
struct rpc_clnt *rpc_bind_new_program(struct rpc_clnt *,
diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h
index 8abd08245326..62d7b81b1cb7 100644
--- a/include/net/bluetooth/hci_core.h
+++ b/include/net/bluetooth/hci_core.h
@@ -36,6 +36,9 @@
/* HCI priority */
#define HCI_PRIO_MAX 7
+/* HCI maximum id value */
+#define HCI_MAX_ID 10000
+
/* HCI Core structures */
struct inquiry_data {
bdaddr_t bdaddr;
diff --git a/include/net/tc_act/tc_pedit.h b/include/net/tc_act/tc_pedit.h
index 748cf87a4d7e..3e02709a1df6 100644
--- a/include/net/tc_act/tc_pedit.h
+++ b/include/net/tc_act/tc_pedit.h
@@ -14,6 +14,7 @@ struct tcf_pedit {
struct tc_action common;
unsigned char tcfp_nkeys;
unsigned char tcfp_flags;
+ u32 tcfp_off_max_hint;
struct tc_pedit_key *tcfp_keys;
struct tcf_pedit_key_ex *tcfp_keys_ex;
};
diff --git a/include/soc/mscc/ocelot_vcap.h b/include/soc/mscc/ocelot_vcap.h
index 7b2bf9b1fe69..de26c992f821 100644
--- a/include/soc/mscc/ocelot_vcap.h
+++ b/include/soc/mscc/ocelot_vcap.h
@@ -681,7 +681,6 @@ struct ocelot_vcap_id {
struct ocelot_vcap_filter {
struct list_head list;
- struct list_head trap_list;
enum ocelot_vcap_filter_type type;
int block_id;
@@ -695,6 +694,7 @@ struct ocelot_vcap_filter {
struct ocelot_vcap_stats stats;
/* For VCAP IS1 and IS2 */
bool take_ts;
+ bool is_trap;
unsigned long ingress_port_mask;
/* For VCAP ES0 */
struct ocelot_vcap_port ingress_port;
diff --git a/include/uapi/linux/rfkill.h b/include/uapi/linux/rfkill.h
index 283c5a7b3f2c..db6c8588c1d0 100644
--- a/include/uapi/linux/rfkill.h
+++ b/include/uapi/linux/rfkill.h
@@ -184,7 +184,7 @@ struct rfkill_event_ext {
#define RFKILL_IOC_NOINPUT 1
#define RFKILL_IOCTL_NOINPUT _IO(RFKILL_IOC_MAGIC, RFKILL_IOC_NOINPUT)
#define RFKILL_IOC_MAX_SIZE 2
-#define RFKILL_IOCTL_MAX_SIZE _IOW(RFKILL_IOC_MAGIC, RFKILL_IOC_EXT_SIZE, __u32)
+#define RFKILL_IOCTL_MAX_SIZE _IOW(RFKILL_IOC_MAGIC, RFKILL_IOC_MAX_SIZE, __u32)
/* and that's all userspace gets */
diff --git a/include/uapi/linux/virtio_ids.h b/include/uapi/linux/virtio_ids.h
index 80d76b75bccd..7aa2eb766205 100644
--- a/include/uapi/linux/virtio_ids.h
+++ b/include/uapi/linux/virtio_ids.h
@@ -73,12 +73,12 @@
* Virtio Transitional IDs
*/
-#define VIRTIO_TRANS_ID_NET 1000 /* transitional virtio net */
-#define VIRTIO_TRANS_ID_BLOCK 1001 /* transitional virtio block */
-#define VIRTIO_TRANS_ID_BALLOON 1002 /* transitional virtio balloon */
-#define VIRTIO_TRANS_ID_CONSOLE 1003 /* transitional virtio console */
-#define VIRTIO_TRANS_ID_SCSI 1004 /* transitional virtio SCSI */
-#define VIRTIO_TRANS_ID_RNG 1005 /* transitional virtio rng */
-#define VIRTIO_TRANS_ID_9P 1009 /* transitional virtio 9p console */
+#define VIRTIO_TRANS_ID_NET 0x1000 /* transitional virtio net */
+#define VIRTIO_TRANS_ID_BLOCK 0x1001 /* transitional virtio block */
+#define VIRTIO_TRANS_ID_BALLOON 0x1002 /* transitional virtio balloon */
+#define VIRTIO_TRANS_ID_CONSOLE 0x1003 /* transitional virtio console */
+#define VIRTIO_TRANS_ID_SCSI 0x1004 /* transitional virtio SCSI */
+#define VIRTIO_TRANS_ID_RNG 0x1005 /* transitional virtio rng */
+#define VIRTIO_TRANS_ID_9P 0x1009 /* transitional virtio 9p console */
#endif /* _LINUX_VIRTIO_IDS_H */
diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c
index 9390bfd9f1cd..71a418858a5e 100644
--- a/kernel/cgroup/cpuset.c
+++ b/kernel/cgroup/cpuset.c
@@ -3390,8 +3390,11 @@ static struct notifier_block cpuset_track_online_nodes_nb = {
*/
void __init cpuset_init_smp(void)
{
- cpumask_copy(top_cpuset.cpus_allowed, cpu_active_mask);
- top_cpuset.mems_allowed = node_states[N_MEMORY];
+ /*
+ * cpus_allowd/mems_allowed set to v2 values in the initial
+ * cpuset_bind() call will be reset to v1 values in another
+ * cpuset_bind() call when v1 cpuset is mounted.
+ */
top_cpuset.old_mems_allowed = top_cpuset.mems_allowed;
cpumask_copy(top_cpuset.effective_cpus, cpu_active_mask);
diff --git a/kernel/fork.c b/kernel/fork.c
index 9796897560ab..35a3beff140b 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -792,6 +792,7 @@ void __mmdrop(struct mm_struct *mm)
mmu_notifier_subscriptions_destroy(mm);
check_mm(mm);
put_user_ns(mm->user_ns);
+ mm_pasid_drop(mm);
free_mm(mm);
}
EXPORT_SYMBOL_GPL(__mmdrop);
@@ -1190,7 +1191,6 @@ static inline void __mmput(struct mm_struct *mm)
}
if (mm->binfmt)
module_put(mm->binfmt->module);
- mm_pasid_drop(mm);
mmdrop(mm);
}
diff --git a/kernel/irq/internals.h b/kernel/irq/internals.h
index 99cbdf55a8bd..f09c60393e55 100644
--- a/kernel/irq/internals.h
+++ b/kernel/irq/internals.h
@@ -29,12 +29,14 @@ extern struct irqaction chained_action;
* IRQTF_WARNED - warning "IRQ_WAKE_THREAD w/o thread_fn" has been printed
* IRQTF_AFFINITY - irq thread is requested to adjust affinity
* IRQTF_FORCED_THREAD - irq action is force threaded
+ * IRQTF_READY - signals that irq thread is ready
*/
enum {
IRQTF_RUNTHREAD,
IRQTF_WARNED,
IRQTF_AFFINITY,
IRQTF_FORCED_THREAD,
+ IRQTF_READY,
};
/*
diff --git a/kernel/irq/irqdesc.c b/kernel/irq/irqdesc.c
index 939d21cd55c3..0099b87dd853 100644
--- a/kernel/irq/irqdesc.c
+++ b/kernel/irq/irqdesc.c
@@ -407,6 +407,7 @@ static struct irq_desc *alloc_desc(int irq, int node, unsigned int flags,
lockdep_set_class(&desc->lock, &irq_desc_lock_class);
mutex_init(&desc->request_mutex);
init_rcu_head(&desc->rcu);
+ init_waitqueue_head(&desc->wait_for_threads);
desc_set_defaults(irq, desc, node, affinity, owner);
irqd_set(&desc->irq_data, flags);
@@ -575,6 +576,7 @@ int __init early_irq_init(void)
raw_spin_lock_init(&desc[i].lock);
lockdep_set_class(&desc[i].lock, &irq_desc_lock_class);
mutex_init(&desc[i].request_mutex);
+ init_waitqueue_head(&desc[i].wait_for_threads);
desc_set_defaults(i, &desc[i], node, NULL, NULL);
}
return arch_early_irq_init();
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index c03f71d5ec10..e3e245a4fd70 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -1249,6 +1249,31 @@ static void irq_wake_secondary(struct irq_desc *desc, struct irqaction *action)
}
/*
+ * Internal function to notify that a interrupt thread is ready.
+ */
+static void irq_thread_set_ready(struct irq_desc *desc,
+ struct irqaction *action)
+{
+ set_bit(IRQTF_READY, &action->thread_flags);
+ wake_up(&desc->wait_for_threads);
+}
+
+/*
+ * Internal function to wake up a interrupt thread and wait until it is
+ * ready.
+ */
+static void wake_up_and_wait_for_irq_thread_ready(struct irq_desc *desc,
+ struct irqaction *action)
+{
+ if (!action || !action->thread)
+ return;
+
+ wake_up_process(action->thread);
+ wait_event(desc->wait_for_threads,
+ test_bit(IRQTF_READY, &action->thread_flags));
+}
+
+/*
* Interrupt handler thread
*/
static int irq_thread(void *data)
@@ -1259,6 +1284,8 @@ static int irq_thread(void *data)
irqreturn_t (*handler_fn)(struct irq_desc *desc,
struct irqaction *action);
+ irq_thread_set_ready(desc, action);
+
sched_set_fifo(current);
if (force_irqthreads() && test_bit(IRQTF_FORCED_THREAD,
@@ -1683,8 +1710,6 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
}
if (!shared) {
- init_waitqueue_head(&desc->wait_for_threads);
-
/* Setup the type (level, edge polarity) if configured: */
if (new->flags & IRQF_TRIGGER_MASK) {
ret = __irq_set_trigger(desc,
@@ -1780,14 +1805,8 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
irq_setup_timings(desc, new);
- /*
- * Strictly no need to wake it up, but hung_task complains
- * when no hard interrupt wakes the thread up.
- */
- if (new->thread)
- wake_up_process(new->thread);
- if (new->secondary)
- wake_up_process(new->secondary->thread);
+ wake_up_and_wait_for_irq_thread_ready(desc, new);
+ wake_up_and_wait_for_irq_thread_ready(desc, new->secondary);
register_irq_proc(irq, desc);
new->dir = NULL;
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
index dcdcb85121e4..3b1398fbddaf 100644
--- a/kernel/time/timekeeping.c
+++ b/kernel/time/timekeeping.c
@@ -482,7 +482,7 @@ static __always_inline u64 __ktime_get_fast_ns(struct tk_fast *tkf)
* of the following timestamps. Callers need to be aware of that and
* deal with it.
*/
-u64 ktime_get_mono_fast_ns(void)
+u64 notrace ktime_get_mono_fast_ns(void)
{
return __ktime_get_fast_ns(&tk_fast_mono);
}
@@ -494,7 +494,7 @@ EXPORT_SYMBOL_GPL(ktime_get_mono_fast_ns);
* Contrary to ktime_get_mono_fast_ns() this is always correct because the
* conversion factor is not affected by NTP/PTP correction.
*/
-u64 ktime_get_raw_fast_ns(void)
+u64 notrace ktime_get_raw_fast_ns(void)
{
return __ktime_get_fast_ns(&tk_fast_raw);
}
diff --git a/lib/dim/net_dim.c b/lib/dim/net_dim.c
index 06811d866775..53f6b9c6e936 100644
--- a/lib/dim/net_dim.c
+++ b/lib/dim/net_dim.c
@@ -12,41 +12,41 @@
* Each profile size must be of NET_DIM_PARAMS_NUM_PROFILES
*/
#define NET_DIM_PARAMS_NUM_PROFILES 5
-#define NET_DIM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE 256
-#define NET_DIM_DEFAULT_TX_CQ_MODERATION_PKTS_FROM_EQE 128
+#define NET_DIM_DEFAULT_RX_CQ_PKTS_FROM_EQE 256
+#define NET_DIM_DEFAULT_TX_CQ_PKTS_FROM_EQE 128
#define NET_DIM_DEF_PROFILE_CQE 1
#define NET_DIM_DEF_PROFILE_EQE 1
#define NET_DIM_RX_EQE_PROFILES { \
- {1, NET_DIM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE}, \
- {8, NET_DIM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE}, \
- {64, NET_DIM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE}, \
- {128, NET_DIM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE}, \
- {256, NET_DIM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE}, \
+ {.usec = 1, .pkts = NET_DIM_DEFAULT_RX_CQ_PKTS_FROM_EQE,}, \
+ {.usec = 8, .pkts = NET_DIM_DEFAULT_RX_CQ_PKTS_FROM_EQE,}, \
+ {.usec = 64, .pkts = NET_DIM_DEFAULT_RX_CQ_PKTS_FROM_EQE,}, \
+ {.usec = 128, .pkts = NET_DIM_DEFAULT_RX_CQ_PKTS_FROM_EQE,}, \
+ {.usec = 256, .pkts = NET_DIM_DEFAULT_RX_CQ_PKTS_FROM_EQE,} \
}
#define NET_DIM_RX_CQE_PROFILES { \
- {2, 256}, \
- {8, 128}, \
- {16, 64}, \
- {32, 64}, \
- {64, 64} \
+ {.usec = 2, .pkts = 256,}, \
+ {.usec = 8, .pkts = 128,}, \
+ {.usec = 16, .pkts = 64,}, \
+ {.usec = 32, .pkts = 64,}, \
+ {.usec = 64, .pkts = 64,} \
}
#define NET_DIM_TX_EQE_PROFILES { \
- {1, NET_DIM_DEFAULT_TX_CQ_MODERATION_PKTS_FROM_EQE}, \
- {8, NET_DIM_DEFAULT_TX_CQ_MODERATION_PKTS_FROM_EQE}, \
- {32, NET_DIM_DEFAULT_TX_CQ_MODERATION_PKTS_FROM_EQE}, \
- {64, NET_DIM_DEFAULT_TX_CQ_MODERATION_PKTS_FROM_EQE}, \
- {128, NET_DIM_DEFAULT_TX_CQ_MODERATION_PKTS_FROM_EQE} \
+ {.usec = 1, .pkts = NET_DIM_DEFAULT_TX_CQ_PKTS_FROM_EQE,}, \
+ {.usec = 8, .pkts = NET_DIM_DEFAULT_TX_CQ_PKTS_FROM_EQE,}, \
+ {.usec = 32, .pkts = NET_DIM_DEFAULT_TX_CQ_PKTS_FROM_EQE,}, \
+ {.usec = 64, .pkts = NET_DIM_DEFAULT_TX_CQ_PKTS_FROM_EQE,}, \
+ {.usec = 128, .pkts = NET_DIM_DEFAULT_TX_CQ_PKTS_FROM_EQE,} \
}
#define NET_DIM_TX_CQE_PROFILES { \
- {5, 128}, \
- {8, 64}, \
- {16, 32}, \
- {32, 32}, \
- {64, 32} \
+ {.usec = 5, .pkts = 128,}, \
+ {.usec = 8, .pkts = 64,}, \
+ {.usec = 16, .pkts = 32,}, \
+ {.usec = 32, .pkts = 32,}, \
+ {.usec = 64, .pkts = 32,} \
}
static const struct dim_cq_moder
diff --git a/mm/readahead.c b/mm/readahead.c
index 8e3775829513..4a60cdb64262 100644
--- a/mm/readahead.c
+++ b/mm/readahead.c
@@ -474,7 +474,8 @@ static inline int ra_alloc_folio(struct readahead_control *ractl, pgoff_t index,
if (!folio)
return -ENOMEM;
- if (mark - index < (1UL << order))
+ mark = round_up(mark, 1UL << order);
+ if (index == mark)
folio_set_readahead(folio);
err = filemap_add_folio(ractl->mapping, folio, index, gfp);
if (err)
@@ -555,8 +556,9 @@ static void ondemand_readahead(struct readahead_control *ractl,
struct file_ra_state *ra = ractl->ra;
unsigned long max_pages = ra->ra_pages;
unsigned long add_pages;
- unsigned long index = readahead_index(ractl);
- pgoff_t prev_index;
+ pgoff_t index = readahead_index(ractl);
+ pgoff_t expected, prev_index;
+ unsigned int order = folio ? folio_order(folio) : 0;
/*
* If the request exceeds the readahead window, allow the read to
@@ -575,8 +577,9 @@ static void ondemand_readahead(struct readahead_control *ractl,
* It's the expected callback index, assume sequential access.
* Ramp up sizes, and push forward the readahead window.
*/
- if ((index == (ra->start + ra->size - ra->async_size) ||
- index == (ra->start + ra->size))) {
+ expected = round_up(ra->start + ra->size - ra->async_size,
+ 1UL << order);
+ if (index == expected || index == (ra->start + ra->size)) {
ra->start += ra->size;
ra->size = get_next_ra_size(ra, max_pages);
ra->async_size = ra->size;
@@ -662,7 +665,7 @@ readit:
}
ractl->_index = ra->start;
- page_cache_ra_order(ractl, ra, folio ? folio_order(folio) : 0);
+ page_cache_ra_order(ractl, ra, order);
}
void page_cache_sync_ra(struct readahead_control *ractl,
diff --git a/net/batman-adv/fragmentation.c b/net/batman-adv/fragmentation.c
index 0899a729a23f..c120c7c6d25f 100644
--- a/net/batman-adv/fragmentation.c
+++ b/net/batman-adv/fragmentation.c
@@ -475,6 +475,17 @@ int batadv_frag_send_packet(struct sk_buff *skb,
goto free_skb;
}
+ /* GRO might have added fragments to the fragment list instead of
+ * frags[]. But this is not handled by skb_split and must be
+ * linearized to avoid incorrect length information after all
+ * batman-adv fragments were created and submitted to the
+ * hard-interface
+ */
+ if (skb_has_frag_list(skb) && __skb_linearize(skb)) {
+ ret = -ENOMEM;
+ goto free_skb;
+ }
+
/* Create one header to be copied to all fragments */
frag_header.packet_type = BATADV_UNICAST_FRAG;
frag_header.version = BATADV_COMPAT_VERSION;
diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
index b4782a6c1025..45c2dd2e1590 100644
--- a/net/bluetooth/hci_core.c
+++ b/net/bluetooth/hci_core.c
@@ -2555,10 +2555,10 @@ int hci_register_dev(struct hci_dev *hdev)
*/
switch (hdev->dev_type) {
case HCI_PRIMARY:
- id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
+ id = ida_simple_get(&hci_index_ida, 0, HCI_MAX_ID, GFP_KERNEL);
break;
case HCI_AMP:
- id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
+ id = ida_simple_get(&hci_index_ida, 1, HCI_MAX_ID, GFP_KERNEL);
break;
default:
return -EINVAL;
@@ -2567,7 +2567,7 @@ int hci_register_dev(struct hci_dev *hdev)
if (id < 0)
return id;
- sprintf(hdev->name, "hci%d", id);
+ snprintf(hdev->name, sizeof(hdev->name), "hci%d", id);
hdev->id = id;
BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 15f7b6f99a8f..bd16e158b366 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -3889,7 +3889,7 @@ struct sk_buff *skb_segment_list(struct sk_buff *skb,
unsigned int delta_len = 0;
struct sk_buff *tail = NULL;
struct sk_buff *nskb, *tmp;
- int err;
+ int len_diff, err;
skb_push(skb, -skb_network_offset(skb) + offset);
@@ -3929,9 +3929,11 @@ struct sk_buff *skb_segment_list(struct sk_buff *skb,
skb_push(nskb, -skb_network_offset(nskb) + offset);
skb_release_head_state(nskb);
+ len_diff = skb_network_header_len(nskb) - skb_network_header_len(skb);
__copy_skb_header(nskb, skb);
skb_headers_offset_update(nskb, skb_headroom(nskb) - skb_headroom(skb));
+ nskb->transport_header += len_diff;
skb_copy_from_linear_data_offset(skb, -tnl_hlen,
nskb->data - tnl_hlen,
offset + tnl_hlen);
diff --git a/net/decnet/dn_dev.c b/net/decnet/dn_dev.c
index 0ee7d4c0c955..a09ba642b5e7 100644
--- a/net/decnet/dn_dev.c
+++ b/net/decnet/dn_dev.c
@@ -854,7 +854,7 @@ static void dn_send_endnode_hello(struct net_device *dev, struct dn_ifaddr *ifa)
memcpy(msg->neighbor, dn_hiord, ETH_ALEN);
if (dn_db->router) {
- struct dn_neigh *dn = (struct dn_neigh *)dn_db->router;
+ struct dn_neigh *dn = container_of(dn_db->router, struct dn_neigh, n);
dn_dn2eth(msg->neighbor, dn->addr);
}
@@ -902,7 +902,7 @@ static void dn_send_router_hello(struct net_device *dev, struct dn_ifaddr *ifa)
{
int n;
struct dn_dev *dn_db = rcu_dereference_raw(dev->dn_ptr);
- struct dn_neigh *dn = (struct dn_neigh *)dn_db->router;
+ struct dn_neigh *dn = container_of(dn_db->router, struct dn_neigh, n);
struct sk_buff *skb;
size_t size;
unsigned char *ptr;
diff --git a/net/decnet/dn_neigh.c b/net/decnet/dn_neigh.c
index 94b306f6d551..fbd98ac853ea 100644
--- a/net/decnet/dn_neigh.c
+++ b/net/decnet/dn_neigh.c
@@ -426,7 +426,8 @@ int dn_neigh_router_hello(struct net *net, struct sock *sk, struct sk_buff *skb)
if (!dn_db->router) {
dn_db->router = neigh_clone(neigh);
} else {
- if (msg->priority > ((struct dn_neigh *)dn_db->router)->priority)
+ if (msg->priority > container_of(dn_db->router,
+ struct dn_neigh, n)->priority)
neigh_release(xchg(&dn_db->router, neigh_clone(neigh)));
}
}
diff --git a/net/decnet/dn_route.c b/net/decnet/dn_route.c
index 7e85f2a1ae25..d1d78a463a06 100644
--- a/net/decnet/dn_route.c
+++ b/net/decnet/dn_route.c
@@ -1120,7 +1120,7 @@ source_ok:
/* Ok then, we assume its directly connected and move on */
select_source:
if (neigh)
- gateway = ((struct dn_neigh *)neigh)->addr;
+ gateway = container_of(neigh, struct dn_neigh, n)->addr;
if (gateway == 0)
gateway = fld.daddr;
if (fld.saddr == 0) {
@@ -1429,7 +1429,7 @@ static int dn_route_input_slow(struct sk_buff *skb)
/* Use the default router if there is one */
neigh = neigh_clone(dn_db->router);
if (neigh) {
- gateway = ((struct dn_neigh *)neigh)->addr;
+ gateway = container_of(neigh, struct dn_neigh, n)->addr;
goto make_route;
}
diff --git a/net/dsa/port.c b/net/dsa/port.c
index 48e5a309ca5c..ecf0395cbddd 100644
--- a/net/dsa/port.c
+++ b/net/dsa/port.c
@@ -503,6 +503,7 @@ out_rollback_unoffload:
switchdev_bridge_port_unoffload(brport_dev, dp,
&dsa_slave_switchdev_notifier,
&dsa_slave_switchdev_blocking_notifier);
+ dsa_flush_workqueue();
out_rollback_unbridge:
dsa_broadcast(DSA_NOTIFIER_BRIDGE_LEAVE, &info);
out_rollback:
diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
index 5f8cad2978b3..1a43ca73f94d 100644
--- a/net/ipv4/ping.c
+++ b/net/ipv4/ping.c
@@ -305,6 +305,7 @@ static int ping_check_bind_addr(struct sock *sk, struct inet_sock *isk,
struct net *net = sock_net(sk);
if (sk->sk_family == AF_INET) {
struct sockaddr_in *addr = (struct sockaddr_in *) uaddr;
+ u32 tb_id = RT_TABLE_LOCAL;
int chk_addr_ret;
if (addr_len < sizeof(*addr))
@@ -318,7 +319,8 @@ static int ping_check_bind_addr(struct sock *sk, struct inet_sock *isk,
pr_debug("ping_check_bind_addr(sk=%p,addr=%pI4,port=%d)\n",
sk, &addr->sin_addr.s_addr, ntohs(addr->sin_port));
- chk_addr_ret = inet_addr_type(net, addr->sin_addr.s_addr);
+ tb_id = l3mdev_fib_table_by_index(net, sk->sk_bound_dev_if) ? : tb_id;
+ chk_addr_ret = inet_addr_type_table(net, addr->sin_addr.s_addr, tb_id);
if (!inet_addr_valid_or_nonlocal(net, inet_sk(sk),
addr->sin_addr.s_addr,
@@ -355,6 +357,14 @@ static int ping_check_bind_addr(struct sock *sk, struct inet_sock *isk,
return -ENODEV;
}
}
+
+ if (!dev && sk->sk_bound_dev_if) {
+ dev = dev_get_by_index_rcu(net, sk->sk_bound_dev_if);
+ if (!dev) {
+ rcu_read_unlock();
+ return -ENODEV;
+ }
+ }
has_addr = pingv6_ops.ipv6_chk_addr(net, &addr->sin6_addr, dev,
scoped);
rcu_read_unlock();
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index ffbe2e4f8c89..444d4a2a422d 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -1754,6 +1754,7 @@ static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr,
#endif
RT_CACHE_STAT_INC(in_slow_mc);
+ skb_dst_drop(skb);
skb_dst_set(skb, &rth->dst);
return 0;
}
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index b857915881e0..07a96f7c5dc3 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -3657,6 +3657,12 @@ static bool ieee80211_assoc_success(struct ieee80211_sub_if_data *sdata,
cbss->transmitted_bss->bssid);
bss_conf->bssid_indicator = cbss->max_bssid_indicator;
bss_conf->bssid_index = cbss->bssid_index;
+ } else {
+ bss_conf->nontransmitted = false;
+ memset(bss_conf->transmitter_bssid, 0,
+ sizeof(bss_conf->transmitter_bssid));
+ bss_conf->bssid_indicator = 0;
+ bss_conf->bssid_index = 0;
}
/*
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index 959a36fd658b..3c08ae04ddbc 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -1405,8 +1405,7 @@ static void ieee80211_rx_reorder_ampdu(struct ieee80211_rx_data *rx,
goto dont_reorder;
/* not part of a BA session */
- if (ack_policy != IEEE80211_QOS_CTL_ACK_POLICY_BLOCKACK &&
- ack_policy != IEEE80211_QOS_CTL_ACK_POLICY_NORMAL)
+ if (ack_policy == IEEE80211_QOS_CTL_ACK_POLICY_NOACK)
goto dont_reorder;
/* new, potentially un-ordered, ampdu frame - process it */
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index 1b5a9c2e1c29..0cd91f813a3b 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -1974,7 +1974,6 @@ static int netlink_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
copied = len;
}
- skb_reset_transport_header(data_skb);
err = skb_copy_datagram_msg(data_skb, 0, msg, copied);
if (msg->msg_name) {
diff --git a/net/rds/tcp.c b/net/rds/tcp.c
index 2f638f8b7b1e..73ee2771093d 100644
--- a/net/rds/tcp.c
+++ b/net/rds/tcp.c
@@ -487,11 +487,11 @@ struct rds_tcp_net {
/* All module specific customizations to the RDS-TCP socket should be done in
* rds_tcp_tune() and applied after socket creation.
*/
-void rds_tcp_tune(struct socket *sock)
+bool rds_tcp_tune(struct socket *sock)
{
struct sock *sk = sock->sk;
struct net *net = sock_net(sk);
- struct rds_tcp_net *rtn = net_generic(net, rds_tcp_netid);
+ struct rds_tcp_net *rtn;
tcp_sock_set_nodelay(sock->sk);
lock_sock(sk);
@@ -499,10 +499,15 @@ void rds_tcp_tune(struct socket *sock)
* a process which created this net namespace terminated.
*/
if (!sk->sk_net_refcnt) {
+ if (!maybe_get_net(net)) {
+ release_sock(sk);
+ return false;
+ }
sk->sk_net_refcnt = 1;
- get_net_track(net, &sk->ns_tracker, GFP_KERNEL);
+ netns_tracker_alloc(net, &sk->ns_tracker, GFP_KERNEL);
sock_inuse_add(net, 1);
}
+ rtn = net_generic(net, rds_tcp_netid);
if (rtn->sndbuf_size > 0) {
sk->sk_sndbuf = rtn->sndbuf_size;
sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
@@ -512,6 +517,7 @@ void rds_tcp_tune(struct socket *sock)
sk->sk_userlocks |= SOCK_RCVBUF_LOCK;
}
release_sock(sk);
+ return true;
}
static void rds_tcp_accept_worker(struct work_struct *work)
diff --git a/net/rds/tcp.h b/net/rds/tcp.h
index dc8d745d6857..f8b5930d7b34 100644
--- a/net/rds/tcp.h
+++ b/net/rds/tcp.h
@@ -49,7 +49,7 @@ struct rds_tcp_statistics {
};
/* tcp.c */
-void rds_tcp_tune(struct socket *sock);
+bool rds_tcp_tune(struct socket *sock);
void rds_tcp_set_callbacks(struct socket *sock, struct rds_conn_path *cp);
void rds_tcp_reset_callbacks(struct socket *sock, struct rds_conn_path *cp);
void rds_tcp_restore_callbacks(struct socket *sock,
diff --git a/net/rds/tcp_connect.c b/net/rds/tcp_connect.c
index 5461d77fff4f..f0c477c5d1db 100644
--- a/net/rds/tcp_connect.c
+++ b/net/rds/tcp_connect.c
@@ -124,7 +124,10 @@ int rds_tcp_conn_path_connect(struct rds_conn_path *cp)
if (ret < 0)
goto out;
- rds_tcp_tune(sock);
+ if (!rds_tcp_tune(sock)) {
+ ret = -EINVAL;
+ goto out;
+ }
if (isv6) {
sin6.sin6_family = AF_INET6;
diff --git a/net/rds/tcp_listen.c b/net/rds/tcp_listen.c
index 09cadd556d1e..7edf2e69d3fe 100644
--- a/net/rds/tcp_listen.c
+++ b/net/rds/tcp_listen.c
@@ -133,7 +133,10 @@ int rds_tcp_accept_one(struct socket *sock)
__module_get(new_sock->ops->owner);
rds_tcp_keepalive(new_sock);
- rds_tcp_tune(new_sock);
+ if (!rds_tcp_tune(new_sock)) {
+ ret = -EINVAL;
+ goto out;
+ }
inet = inet_sk(new_sock->sk);
diff --git a/net/sched/act_pedit.c b/net/sched/act_pedit.c
index e01ef7f109f4..d1221daa0952 100644
--- a/net/sched/act_pedit.c
+++ b/net/sched/act_pedit.c
@@ -149,7 +149,7 @@ static int tcf_pedit_init(struct net *net, struct nlattr *nla,
struct nlattr *pattr;
struct tcf_pedit *p;
int ret = 0, err;
- int ksize;
+ int i, ksize;
u32 index;
if (!nla) {
@@ -228,6 +228,18 @@ static int tcf_pedit_init(struct net *net, struct nlattr *nla,
p->tcfp_nkeys = parm->nkeys;
}
memcpy(p->tcfp_keys, parm->keys, ksize);
+ p->tcfp_off_max_hint = 0;
+ for (i = 0; i < p->tcfp_nkeys; ++i) {
+ u32 cur = p->tcfp_keys[i].off;
+
+ /* The AT option can read a single byte, we can bound the actual
+ * value with uchar max.
+ */
+ cur += (0xff & p->tcfp_keys[i].offmask) >> p->tcfp_keys[i].shift;
+
+ /* Each key touches 4 bytes starting from the computed offset */
+ p->tcfp_off_max_hint = max(p->tcfp_off_max_hint, cur + 4);
+ }
p->tcfp_flags = parm->flags;
goto_ch = tcf_action_set_ctrlact(*a, parm->action, goto_ch);
@@ -308,13 +320,18 @@ static int tcf_pedit_act(struct sk_buff *skb, const struct tc_action *a,
struct tcf_result *res)
{
struct tcf_pedit *p = to_pedit(a);
+ u32 max_offset;
int i;
- if (skb_unclone(skb, GFP_ATOMIC))
- return p->tcf_action;
-
spin_lock(&p->tcf_lock);
+ max_offset = (skb_transport_header_was_set(skb) ?
+ skb_transport_offset(skb) :
+ skb_network_offset(skb)) +
+ p->tcfp_off_max_hint;
+ if (skb_ensure_writable(skb, min(skb->len, max_offset)))
+ goto unlock;
+
tcf_lastuse_update(&p->tcf_tm);
if (p->tcfp_nkeys > 0) {
@@ -403,6 +420,7 @@ bad:
p->tcf_qstats.overlimits++;
done:
bstats_update(&p->tcf_bstats, skb);
+unlock:
spin_unlock(&p->tcf_lock);
return p->tcf_action;
}
diff --git a/net/smc/smc_rx.c b/net/smc/smc_rx.c
index 51e8eb2933ff..338b9ef806e8 100644
--- a/net/smc/smc_rx.c
+++ b/net/smc/smc_rx.c
@@ -355,12 +355,12 @@ int smc_rx_recvmsg(struct smc_sock *smc, struct msghdr *msg,
}
break;
}
+ if (!timeo)
+ return -EAGAIN;
if (signal_pending(current)) {
read_done = sock_intr_errno(timeo);
break;
}
- if (!timeo)
- return -EAGAIN;
}
if (!smc_rx_data_available(conn)) {
diff --git a/net/sunrpc/auth_gss/gss_rpc_upcall.c b/net/sunrpc/auth_gss/gss_rpc_upcall.c
index 61c276bddaf2..8ca1d809b78d 100644
--- a/net/sunrpc/auth_gss/gss_rpc_upcall.c
+++ b/net/sunrpc/auth_gss/gss_rpc_upcall.c
@@ -97,7 +97,7 @@ static int gssp_rpc_create(struct net *net, struct rpc_clnt **_clnt)
* timeout, which would result in reconnections being
* done without the correct namespace:
*/
- .flags = RPC_CLNT_CREATE_NOPING |
+ .flags = RPC_CLNT_CREATE_IGNORE_NULL_UNAVAIL |
RPC_CLNT_CREATE_NO_IDLE_TIMEOUT
};
struct rpc_clnt *clnt;
diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
index af0174d7ce5a..22c28cf43eba 100644
--- a/net/sunrpc/clnt.c
+++ b/net/sunrpc/clnt.c
@@ -479,6 +479,9 @@ static struct rpc_clnt *rpc_create_xprt(struct rpc_create_args *args,
if (!(args->flags & RPC_CLNT_CREATE_NOPING)) {
int err = rpc_ping(clnt);
+ if ((args->flags & RPC_CLNT_CREATE_IGNORE_NULL_UNAVAIL) &&
+ err == -EOPNOTSUPP)
+ err = 0;
if (err != 0) {
rpc_shutdown_client(clnt);
return ERR_PTR(err);
@@ -1065,10 +1068,13 @@ rpc_task_get_next_xprt(struct rpc_clnt *clnt)
static
void rpc_task_set_transport(struct rpc_task *task, struct rpc_clnt *clnt)
{
- if (task->tk_xprt &&
- !(test_bit(XPRT_OFFLINE, &task->tk_xprt->state) &&
- (task->tk_flags & RPC_TASK_MOVEABLE)))
- return;
+ if (task->tk_xprt) {
+ if (!(test_bit(XPRT_OFFLINE, &task->tk_xprt->state) &&
+ (task->tk_flags & RPC_TASK_MOVEABLE)))
+ return;
+ xprt_release(task);
+ xprt_put(task->tk_xprt);
+ }
if (task->tk_flags & RPC_TASK_NO_ROUND_ROBIN)
task->tk_xprt = rpc_task_get_first_xprt(clnt);
else
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
index 5c91c5457197..fcdd0fca408e 100644
--- a/net/sunrpc/xprtsock.c
+++ b/net/sunrpc/xprtsock.c
@@ -1419,6 +1419,26 @@ static size_t xs_tcp_bc_maxpayload(struct rpc_xprt *xprt)
#endif /* CONFIG_SUNRPC_BACKCHANNEL */
/**
+ * xs_local_state_change - callback to handle AF_LOCAL socket state changes
+ * @sk: socket whose state has changed
+ *
+ */
+static void xs_local_state_change(struct sock *sk)
+{
+ struct rpc_xprt *xprt;
+ struct sock_xprt *transport;
+
+ if (!(xprt = xprt_from_sock(sk)))
+ return;
+ transport = container_of(xprt, struct sock_xprt, xprt);
+ if (sk->sk_shutdown & SHUTDOWN_MASK) {
+ clear_bit(XPRT_CONNECTED, &xprt->state);
+ /* Trigger the socket release */
+ xs_run_error_worker(transport, XPRT_SOCK_WAKE_DISCONNECT);
+ }
+}
+
+/**
* xs_tcp_state_change - callback to handle TCP socket state changes
* @sk: socket whose state has changed
*
@@ -1866,6 +1886,7 @@ static int xs_local_finish_connecting(struct rpc_xprt *xprt,
sk->sk_user_data = xprt;
sk->sk_data_ready = xs_data_ready;
sk->sk_write_space = xs_udp_write_space;
+ sk->sk_state_change = xs_local_state_change;
sk->sk_error_report = xs_error_report;
xprt_clear_connected(xprt);
@@ -1950,6 +1971,9 @@ static void xs_local_connect(struct rpc_xprt *xprt, struct rpc_task *task)
struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
int ret;
+ if (transport->file)
+ goto force_disconnect;
+
if (RPC_IS_ASYNC(task)) {
/*
* We want the AF_LOCAL connect to be resolved in the
@@ -1962,11 +1986,17 @@ static void xs_local_connect(struct rpc_xprt *xprt, struct rpc_task *task)
*/
task->tk_rpc_status = -ENOTCONN;
rpc_exit(task, -ENOTCONN);
- return;
+ goto out_wake;
}
ret = xs_local_setup_socket(transport);
if (ret && !RPC_IS_SOFTCONN(task))
msleep_interruptible(15000);
+ return;
+force_disconnect:
+ xprt_force_disconnect(xprt);
+out_wake:
+ xprt_clear_connecting(xprt);
+ xprt_wake_pending_tasks(xprt, -ENOTCONN);
}
#if IS_ENABLED(CONFIG_SUNRPC_SWAP)
@@ -2845,9 +2875,6 @@ static struct rpc_xprt *xs_setup_local(struct xprt_create *args)
}
xprt_set_bound(xprt);
xs_format_peer_addresses(xprt, "local", RPCBIND_NETID_LOCAL);
- ret = ERR_PTR(xs_local_setup_socket(transport));
- if (ret)
- goto out_err;
break;
default:
ret = ERR_PTR(-EAFNOSUPPORT);
diff --git a/net/tls/tls_device.c b/net/tls/tls_device.c
index b12f81a2b44c..bca00521ebc1 100644
--- a/net/tls/tls_device.c
+++ b/net/tls/tls_device.c
@@ -1345,7 +1345,10 @@ static int tls_device_down(struct net_device *netdev)
/* Device contexts for RX and TX will be freed in on sk_destruct
* by tls_device_free_ctx. rx_conf and tx_conf stay in TLS_HW.
+ * Now release the ref taken above.
*/
+ if (refcount_dec_and_test(&ctx->refcount))
+ tls_device_free_ctx(ctx);
}
up_write(&device_offload_lock);
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index 945ed87d12e0..02a29052e41d 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -3173,6 +3173,15 @@ int nl80211_parse_chandef(struct cfg80211_registered_device *rdev,
} else if (attrs[NL80211_ATTR_CHANNEL_WIDTH]) {
chandef->width =
nla_get_u32(attrs[NL80211_ATTR_CHANNEL_WIDTH]);
+ if (chandef->chan->band == NL80211_BAND_S1GHZ) {
+ /* User input error for channel width doesn't match channel */
+ if (chandef->width != ieee80211_s1g_channel_width(chandef->chan)) {
+ NL_SET_ERR_MSG_ATTR(extack,
+ attrs[NL80211_ATTR_CHANNEL_WIDTH],
+ "bad channel width");
+ return -EINVAL;
+ }
+ }
if (attrs[NL80211_ATTR_CENTER_FREQ1]) {
chandef->center_freq1 =
nla_get_u32(attrs[NL80211_ATTR_CENTER_FREQ1]);
@@ -11658,18 +11667,23 @@ static int nl80211_set_tx_bitrate_mask(struct sk_buff *skb,
struct cfg80211_bitrate_mask mask;
struct cfg80211_registered_device *rdev = info->user_ptr[0];
struct net_device *dev = info->user_ptr[1];
+ struct wireless_dev *wdev = dev->ieee80211_ptr;
int err;
if (!rdev->ops->set_bitrate_mask)
return -EOPNOTSUPP;
+ wdev_lock(wdev);
err = nl80211_parse_tx_bitrate_mask(info, info->attrs,
NL80211_ATTR_TX_RATES, &mask,
dev, true);
if (err)
- return err;
+ goto out;
- return rdev_set_bitrate_mask(rdev, dev, NULL, &mask);
+ err = rdev_set_bitrate_mask(rdev, dev, NULL, &mask);
+out:
+ wdev_unlock(wdev);
+ return err;
}
static int nl80211_register_mgmt(struct sk_buff *skb, struct genl_info *info)
diff --git a/net/wireless/scan.c b/net/wireless/scan.c
index 4a6d86432910..6d82bd9eaf8c 100644
--- a/net/wireless/scan.c
+++ b/net/wireless/scan.c
@@ -1829,7 +1829,7 @@ int cfg80211_get_ies_channel_number(const u8 *ie, size_t ielen,
if (tmp && tmp->datalen >= sizeof(struct ieee80211_s1g_oper_ie)) {
struct ieee80211_s1g_oper_ie *s1gop = (void *)tmp->data;
- return s1gop->primary_ch;
+ return s1gop->oper_ch;
}
} else {
tmp = cfg80211_find_elem(WLAN_EID_DS_PARAMS, ie, ielen);
diff --git a/sound/firewire/fireworks/fireworks_hwdep.c b/sound/firewire/fireworks/fireworks_hwdep.c
index 626c0c34b0b6..3a53914277d3 100644
--- a/sound/firewire/fireworks/fireworks_hwdep.c
+++ b/sound/firewire/fireworks/fireworks_hwdep.c
@@ -34,6 +34,7 @@ hwdep_read_resp_buf(struct snd_efw *efw, char __user *buf, long remained,
type = SNDRV_FIREWIRE_EVENT_EFW_RESPONSE;
if (copy_to_user(buf, &type, sizeof(type)))
return -EFAULT;
+ count += sizeof(type);
remained -= sizeof(type);
buf += sizeof(type);
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index 4c0c593f3c0a..cf531c1efa13 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -8769,6 +8769,8 @@ static const struct hda_fixup alc269_fixups[] = {
[ALC287_FIXUP_CS35L41_I2C_2] = {
.type = HDA_FIXUP_FUNC,
.v.func = cs35l41_fixup_i2c_two,
+ .chained = true,
+ .chain_id = ALC269_FIXUP_THINKPAD_ACPI,
},
[ALC287_FIXUP_CS35L41_I2C_2_HP_GPIO_LED] = {
.type = HDA_FIXUP_FUNC,
@@ -9023,12 +9025,12 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x103c, 0x8896, "HP EliteBook 855 G8 Notebook PC", ALC285_FIXUP_HP_MUTE_LED),
SND_PCI_QUIRK(0x103c, 0x8898, "HP EliteBook 845 G8 Notebook PC", ALC285_FIXUP_HP_LIMIT_INT_MIC_BOOST),
SND_PCI_QUIRK(0x103c, 0x88d0, "HP Pavilion 15-eh1xxx (mainboard 88D0)", ALC287_FIXUP_HP_GPIO_LED),
- SND_PCI_QUIRK(0x103c, 0x896e, "HP EliteBook x360 830 G9", ALC245_FIXUP_CS35L41_SPI_2),
- SND_PCI_QUIRK(0x103c, 0x8971, "HP EliteBook 830 G9", ALC245_FIXUP_CS35L41_SPI_2),
- SND_PCI_QUIRK(0x103c, 0x8972, "HP EliteBook 840 G9", ALC245_FIXUP_CS35L41_SPI_2),
- SND_PCI_QUIRK(0x103c, 0x8973, "HP EliteBook 860 G9", ALC245_FIXUP_CS35L41_SPI_2),
- SND_PCI_QUIRK(0x103c, 0x8974, "HP EliteBook 840 Aero G9", ALC245_FIXUP_CS35L41_SPI_2),
- SND_PCI_QUIRK(0x103c, 0x8975, "HP EliteBook x360 840 Aero G9", ALC245_FIXUP_CS35L41_SPI_2),
+ SND_PCI_QUIRK(0x103c, 0x896e, "HP EliteBook x360 830 G9", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED),
+ SND_PCI_QUIRK(0x103c, 0x8971, "HP EliteBook 830 G9", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED),
+ SND_PCI_QUIRK(0x103c, 0x8972, "HP EliteBook 840 G9", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED),
+ SND_PCI_QUIRK(0x103c, 0x8973, "HP EliteBook 860 G9", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED),
+ SND_PCI_QUIRK(0x103c, 0x8974, "HP EliteBook 840 Aero G9", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED),
+ SND_PCI_QUIRK(0x103c, 0x8975, "HP EliteBook x360 840 Aero G9", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED),
SND_PCI_QUIRK(0x103c, 0x8981, "HP Elite Dragonfly G3", ALC245_FIXUP_CS35L41_SPI_4),
SND_PCI_QUIRK(0x103c, 0x898e, "HP EliteBook 835 G9", ALC287_FIXUP_CS35L41_I2C_2),
SND_PCI_QUIRK(0x103c, 0x898f, "HP EliteBook 835 G9", ALC287_FIXUP_CS35L41_I2C_2),
@@ -9245,6 +9247,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x17aa, 0x3813, "Legion 7i 15IMHG05", ALC287_FIXUP_LEGION_15IMHG05_SPEAKERS),
SND_PCI_QUIRK(0x17aa, 0x3818, "Lenovo C940", ALC298_FIXUP_LENOVO_SPK_VOLUME),
SND_PCI_QUIRK(0x17aa, 0x3819, "Lenovo 13s Gen2 ITL", ALC287_FIXUP_13S_GEN2_SPEAKERS),
+ SND_PCI_QUIRK(0x17aa, 0x3820, "Yoga Duet 7 13ITL6", ALC287_FIXUP_YOGA7_14ITL_SPEAKERS),
SND_PCI_QUIRK(0x17aa, 0x3824, "Legion Y9000X 2020", ALC285_FIXUP_LEGION_Y9000X_SPEAKERS),
SND_PCI_QUIRK(0x17aa, 0x3827, "Ideapad S740", ALC285_FIXUP_IDEAPAD_S740_COEF),
SND_PCI_QUIRK(0x17aa, 0x3834, "Lenovo IdeaPad Slim 9i 14ITL5", ALC287_FIXUP_YOGA7_14ITL_SPEAKERS),
diff --git a/sound/soc/atmel/mchp-pdmc.c b/sound/soc/atmel/mchp-pdmc.c
index 1a7802fbf23c..a3856c73e221 100644
--- a/sound/soc/atmel/mchp-pdmc.c
+++ b/sound/soc/atmel/mchp-pdmc.c
@@ -966,6 +966,7 @@ static int mchp_pdmc_process(struct snd_pcm_substream *substream,
static struct snd_dmaengine_pcm_config mchp_pdmc_config = {
.process = mchp_pdmc_process,
+ .prepare_slave_config = snd_dmaengine_pcm_prepare_slave_config,
};
static int mchp_pdmc_probe(struct platform_device *pdev)
diff --git a/sound/soc/codecs/da7219.c b/sound/soc/codecs/da7219.c
index 13009d08b09a..c7493549a9a5 100644
--- a/sound/soc/codecs/da7219.c
+++ b/sound/soc/codecs/da7219.c
@@ -446,7 +446,7 @@ static int da7219_tonegen_freq_put(struct snd_kcontrol *kcontrol,
struct soc_mixer_control *mixer_ctrl =
(struct soc_mixer_control *) kcontrol->private_value;
unsigned int reg = mixer_ctrl->reg;
- __le16 val;
+ __le16 val_new, val_old;
int ret;
/*
@@ -454,13 +454,19 @@ static int da7219_tonegen_freq_put(struct snd_kcontrol *kcontrol,
* Therefore we need to convert to little endian here to align with
* HW registers.
*/
- val = cpu_to_le16(ucontrol->value.integer.value[0]);
+ val_new = cpu_to_le16(ucontrol->value.integer.value[0]);
mutex_lock(&da7219->ctrl_lock);
- ret = regmap_raw_write(da7219->regmap, reg, &val, sizeof(val));
+ ret = regmap_raw_read(da7219->regmap, reg, &val_old, sizeof(val_old));
+ if (ret == 0 && (val_old != val_new))
+ ret = regmap_raw_write(da7219->regmap, reg,
+ &val_new, sizeof(val_new));
mutex_unlock(&da7219->ctrl_lock);
- return ret;
+ if (ret < 0)
+ return ret;
+
+ return val_old != val_new;
}
diff --git a/sound/soc/codecs/max98090.c b/sound/soc/codecs/max98090.c
index b45ec35cd63c..62b41ca050a2 100644
--- a/sound/soc/codecs/max98090.c
+++ b/sound/soc/codecs/max98090.c
@@ -413,6 +413,9 @@ static int max98090_put_enab_tlv(struct snd_kcontrol *kcontrol,
val = (val >> mc->shift) & mask;
+ if (sel < 0 || sel > mc->max)
+ return -EINVAL;
+
*select = sel;
/* Setting a volume is only valid if it is already On */
@@ -427,7 +430,7 @@ static int max98090_put_enab_tlv(struct snd_kcontrol *kcontrol,
mask << mc->shift,
sel << mc->shift);
- return 0;
+ return *select != val;
}
static const char *max98090_perf_pwr_text[] =
diff --git a/sound/soc/codecs/rt9120.c b/sound/soc/codecs/rt9120.c
index 7aa1772a915f..6e0d7cf0c8c9 100644
--- a/sound/soc/codecs/rt9120.c
+++ b/sound/soc/codecs/rt9120.c
@@ -341,7 +341,6 @@ static int rt9120_get_reg_size(unsigned int reg)
{
switch (reg) {
case 0x00:
- case 0x09:
case 0x20 ... 0x27:
return 2;
case 0x30 ... 0x3D:
diff --git a/sound/soc/codecs/wm8958-dsp2.c b/sound/soc/codecs/wm8958-dsp2.c
index e4018ba3b19a..7878c7a58ff1 100644
--- a/sound/soc/codecs/wm8958-dsp2.c
+++ b/sound/soc/codecs/wm8958-dsp2.c
@@ -530,7 +530,7 @@ static int wm8958_mbc_put(struct snd_kcontrol *kcontrol,
wm8958_dsp_apply(component, mbc, wm8994->mbc_ena[mbc]);
- return 0;
+ return 1;
}
#define WM8958_MBC_SWITCH(xname, xval) {\
@@ -656,7 +656,7 @@ static int wm8958_vss_put(struct snd_kcontrol *kcontrol,
wm8958_dsp_apply(component, vss, wm8994->vss_ena[vss]);
- return 0;
+ return 1;
}
@@ -730,7 +730,7 @@ static int wm8958_hpf_put(struct snd_kcontrol *kcontrol,
wm8958_dsp_apply(component, hpf % 3, ucontrol->value.integer.value[0]);
- return 0;
+ return 1;
}
#define WM8958_HPF_SWITCH(xname, xval) {\
@@ -824,7 +824,7 @@ static int wm8958_enh_eq_put(struct snd_kcontrol *kcontrol,
wm8958_dsp_apply(component, eq, ucontrol->value.integer.value[0]);
- return 0;
+ return 1;
}
#define WM8958_ENH_EQ_SWITCH(xname, xval) {\
diff --git a/sound/soc/generic/simple-card-utils.c b/sound/soc/generic/simple-card-utils.c
index f2157944247f..da0c27828ce6 100644
--- a/sound/soc/generic/simple-card-utils.c
+++ b/sound/soc/generic/simple-card-utils.c
@@ -322,7 +322,7 @@ void asoc_simple_shutdown(struct snd_pcm_substream *substream)
if (props->mclk_fs && !dai->clk_fixed && !snd_soc_dai_active(cpu_dai))
snd_soc_dai_set_sysclk(cpu_dai,
- 0, 0, SND_SOC_CLOCK_IN);
+ 0, 0, SND_SOC_CLOCK_OUT);
asoc_simple_clk_disable(dai);
}
diff --git a/sound/soc/meson/aiu-acodec-ctrl.c b/sound/soc/meson/aiu-acodec-ctrl.c
index 22e181646bc3..3776b073a3db 100644
--- a/sound/soc/meson/aiu-acodec-ctrl.c
+++ b/sound/soc/meson/aiu-acodec-ctrl.c
@@ -58,7 +58,7 @@ static int aiu_acodec_ctrl_mux_put_enum(struct snd_kcontrol *kcontrol,
snd_soc_dapm_mux_update_power(dapm, kcontrol, mux, e, NULL);
- return 0;
+ return 1;
}
static SOC_ENUM_SINGLE_DECL(aiu_acodec_ctrl_mux_enum, AIU_ACODEC_CTRL,
diff --git a/sound/soc/meson/aiu-codec-ctrl.c b/sound/soc/meson/aiu-codec-ctrl.c
index 59ee66fc2bcd..286ac4983d40 100644
--- a/sound/soc/meson/aiu-codec-ctrl.c
+++ b/sound/soc/meson/aiu-codec-ctrl.c
@@ -57,7 +57,7 @@ static int aiu_codec_ctrl_mux_put_enum(struct snd_kcontrol *kcontrol,
snd_soc_dapm_mux_update_power(dapm, kcontrol, mux, e, NULL);
- return 0;
+ return 1;
}
static SOC_ENUM_SINGLE_DECL(aiu_hdmi_ctrl_mux_enum, AIU_HDMI_CLK_DATA_CTRL,
diff --git a/sound/soc/meson/axg-card.c b/sound/soc/meson/axg-card.c
index cbbaa55d92a6..2b77010c2c5c 100644
--- a/sound/soc/meson/axg-card.c
+++ b/sound/soc/meson/axg-card.c
@@ -320,7 +320,6 @@ static int axg_card_add_link(struct snd_soc_card *card, struct device_node *np,
dai_link->cpus = cpu;
dai_link->num_cpus = 1;
- dai_link->nonatomic = true;
ret = meson_card_parse_dai(card, np, &dai_link->cpus->of_node,
&dai_link->cpus->dai_name);
diff --git a/sound/soc/meson/axg-tdm-interface.c b/sound/soc/meson/axg-tdm-interface.c
index 0c31934a9630..e076ced30025 100644
--- a/sound/soc/meson/axg-tdm-interface.c
+++ b/sound/soc/meson/axg-tdm-interface.c
@@ -351,29 +351,13 @@ static int axg_tdm_iface_hw_free(struct snd_pcm_substream *substream,
return 0;
}
-static int axg_tdm_iface_trigger(struct snd_pcm_substream *substream,
- int cmd,
+static int axg_tdm_iface_prepare(struct snd_pcm_substream *substream,
struct snd_soc_dai *dai)
{
- struct axg_tdm_stream *ts =
- snd_soc_dai_get_dma_data(dai, substream);
-
- switch (cmd) {
- case SNDRV_PCM_TRIGGER_START:
- case SNDRV_PCM_TRIGGER_RESUME:
- case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
- axg_tdm_stream_start(ts);
- break;
- case SNDRV_PCM_TRIGGER_SUSPEND:
- case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
- case SNDRV_PCM_TRIGGER_STOP:
- axg_tdm_stream_stop(ts);
- break;
- default:
- return -EINVAL;
- }
+ struct axg_tdm_stream *ts = snd_soc_dai_get_dma_data(dai, substream);
- return 0;
+ /* Force all attached formatters to update */
+ return axg_tdm_stream_reset(ts);
}
static int axg_tdm_iface_remove_dai(struct snd_soc_dai *dai)
@@ -413,8 +397,8 @@ static const struct snd_soc_dai_ops axg_tdm_iface_ops = {
.set_fmt = axg_tdm_iface_set_fmt,
.startup = axg_tdm_iface_startup,
.hw_params = axg_tdm_iface_hw_params,
+ .prepare = axg_tdm_iface_prepare,
.hw_free = axg_tdm_iface_hw_free,
- .trigger = axg_tdm_iface_trigger,
};
/* TDM Backend DAIs */
diff --git a/sound/soc/meson/g12a-tohdmitx.c b/sound/soc/meson/g12a-tohdmitx.c
index 9b2b59536ced..6c99052feafd 100644
--- a/sound/soc/meson/g12a-tohdmitx.c
+++ b/sound/soc/meson/g12a-tohdmitx.c
@@ -67,7 +67,7 @@ static int g12a_tohdmitx_i2s_mux_put_enum(struct snd_kcontrol *kcontrol,
snd_soc_dapm_mux_update_power(dapm, kcontrol, mux, e, NULL);
- return 0;
+ return 1;
}
static SOC_ENUM_SINGLE_DECL(g12a_tohdmitx_i2s_mux_enum, TOHDMITX_CTRL0,
diff --git a/sound/soc/soc-generic-dmaengine-pcm.c b/sound/soc/soc-generic-dmaengine-pcm.c
index 2ab2ddc1294d..285441d6aeed 100644
--- a/sound/soc/soc-generic-dmaengine-pcm.c
+++ b/sound/soc/soc-generic-dmaengine-pcm.c
@@ -86,10 +86,10 @@ static int dmaengine_pcm_hw_params(struct snd_soc_component *component,
memset(&slave_config, 0, sizeof(slave_config));
- if (pcm->config && pcm->config->prepare_slave_config)
- prepare_slave_config = pcm->config->prepare_slave_config;
- else
+ if (!pcm->config)
prepare_slave_config = snd_dmaengine_pcm_prepare_slave_config;
+ else
+ prepare_slave_config = pcm->config->prepare_slave_config;
if (prepare_slave_config) {
int ret = prepare_slave_config(substream, params, &slave_config);
diff --git a/sound/soc/soc-ops.c b/sound/soc/soc-ops.c
index a0ca58ba1627..e693070f51fe 100644
--- a/sound/soc/soc-ops.c
+++ b/sound/soc/soc-ops.c
@@ -461,7 +461,7 @@ int snd_soc_put_volsw_sx(struct snd_kcontrol *kcontrol,
ret = err;
}
}
- return err;
+ return ret;
}
EXPORT_SYMBOL_GPL(snd_soc_put_volsw_sx);
@@ -519,7 +519,15 @@ int snd_soc_put_volsw_range(struct snd_kcontrol *kcontrol,
unsigned int mask = (1 << fls(max)) - 1;
unsigned int invert = mc->invert;
unsigned int val, val_mask;
- int err, ret;
+ int err, ret, tmp;
+
+ tmp = ucontrol->value.integer.value[0];
+ if (tmp < 0)
+ return -EINVAL;
+ if (mc->platform_max && tmp > mc->platform_max)
+ return -EINVAL;
+ if (tmp > mc->max - mc->min + 1)
+ return -EINVAL;
if (invert)
val = (max - ucontrol->value.integer.value[0]) & mask;
@@ -534,6 +542,14 @@ int snd_soc_put_volsw_range(struct snd_kcontrol *kcontrol,
ret = err;
if (snd_soc_volsw_is_stereo(mc)) {
+ tmp = ucontrol->value.integer.value[1];
+ if (tmp < 0)
+ return -EINVAL;
+ if (mc->platform_max && tmp > mc->platform_max)
+ return -EINVAL;
+ if (tmp > mc->max - mc->min + 1)
+ return -EINVAL;
+
if (invert)
val = (max - ucontrol->value.integer.value[1]) & mask;
else
diff --git a/sound/soc/sof/sof-pci-dev.c b/sound/soc/sof/sof-pci-dev.c
index 12f5cff22448..7fa2649e56e5 100644
--- a/sound/soc/sof/sof-pci-dev.c
+++ b/sound/soc/sof/sof-pci-dev.c
@@ -153,6 +153,11 @@ int sof_pci_probe(struct pci_dev *pci, const struct pci_device_id *pci_id)
dev_dbg(&pci->dev, "PCI DSP detected");
+ if (!desc) {
+ dev_err(dev, "error: no matching PCI descriptor\n");
+ return -ENODEV;
+ }
+
if (!desc->ops) {
dev_err(dev, "error: no matching PCI descriptor ops\n");
return -ENODEV;
diff --git a/tools/testing/selftests/net/Makefile b/tools/testing/selftests/net/Makefile
index 0fbdacfdcd6a..811e9b712bea 100644
--- a/tools/testing/selftests/net/Makefile
+++ b/tools/testing/selftests/net/Makefile
@@ -25,6 +25,7 @@ TEST_PROGS += bareudp.sh
TEST_PROGS += amt.sh
TEST_PROGS += unicast_extensions.sh
TEST_PROGS += udpgro_fwd.sh
+TEST_PROGS += udpgro_frglist.sh
TEST_PROGS += veth.sh
TEST_PROGS += ioam6.sh
TEST_PROGS += gro.sh
@@ -62,6 +63,8 @@ TEST_FILES := settings
KSFT_KHDR_INSTALL := 1
include ../lib.mk
+include bpf/Makefile
+
$(OUTPUT)/reuseport_bpf_numa: LDLIBS += -lnuma
$(OUTPUT)/tcp_mmap: LDLIBS += -lpthread
$(OUTPUT)/tcp_inq: LDLIBS += -lpthread
diff --git a/tools/testing/selftests/net/bpf/Makefile b/tools/testing/selftests/net/bpf/Makefile
new file mode 100644
index 000000000000..f91bf14bbee7
--- /dev/null
+++ b/tools/testing/selftests/net/bpf/Makefile
@@ -0,0 +1,14 @@
+# SPDX-License-Identifier: GPL-2.0
+
+CLANG ?= clang
+CCINCLUDE += -I../../bpf
+CCINCLUDE += -I../../../../../usr/include/
+
+TEST_CUSTOM_PROGS = $(OUTPUT)/bpf/nat6to4.o
+all: $(TEST_CUSTOM_PROGS)
+
+$(OUTPUT)/%.o: %.c
+ $(CLANG) -O2 -target bpf -c $< $(CCINCLUDE) -o $@
+
+clean:
+ rm -f $(TEST_CUSTOM_PROGS)
diff --git a/tools/testing/selftests/net/bpf/nat6to4.c b/tools/testing/selftests/net/bpf/nat6to4.c
new file mode 100644
index 000000000000..ac54c36b25fc
--- /dev/null
+++ b/tools/testing/selftests/net/bpf/nat6to4.c
@@ -0,0 +1,285 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * This code is taken from the Android Open Source Project and the author
+ * (Maciej Żenczykowski) has gave permission to relicense it under the
+ * GPLv2. Therefore this program is free software;
+ * You can redistribute it and/or modify it under the terms of the GNU
+ * General Public License version 2 as published by the Free Software
+ * Foundation
+
+ * The original headers, including the original license headers, are
+ * included below for completeness.
+ *
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include <linux/bpf.h>
+#include <linux/if.h>
+#include <linux/if_ether.h>
+#include <linux/if_packet.h>
+#include <linux/in.h>
+#include <linux/in6.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <linux/pkt_cls.h>
+#include <linux/swab.h>
+#include <stdbool.h>
+#include <stdint.h>
+
+
+#include <linux/udp.h>
+
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_endian.h>
+
+#define IP_DF 0x4000 // Flag: "Don't Fragment"
+
+SEC("schedcls/ingress6/nat_6")
+int sched_cls_ingress6_nat_6_prog(struct __sk_buff *skb)
+{
+ const int l2_header_size = sizeof(struct ethhdr);
+ void *data = (void *)(long)skb->data;
+ const void *data_end = (void *)(long)skb->data_end;
+ const struct ethhdr * const eth = data; // used iff is_ethernet
+ const struct ipv6hdr * const ip6 = (void *)(eth + 1);
+
+ // Require ethernet dst mac address to be our unicast address.
+ if (skb->pkt_type != PACKET_HOST)
+ return TC_ACT_OK;
+
+ // Must be meta-ethernet IPv6 frame
+ if (skb->protocol != bpf_htons(ETH_P_IPV6))
+ return TC_ACT_OK;
+
+ // Must have (ethernet and) ipv6 header
+ if (data + l2_header_size + sizeof(*ip6) > data_end)
+ return TC_ACT_OK;
+
+ // Ethertype - if present - must be IPv6
+ if (eth->h_proto != bpf_htons(ETH_P_IPV6))
+ return TC_ACT_OK;
+
+ // IP version must be 6
+ if (ip6->version != 6)
+ return TC_ACT_OK;
+ // Maximum IPv6 payload length that can be translated to IPv4
+ if (bpf_ntohs(ip6->payload_len) > 0xFFFF - sizeof(struct iphdr))
+ return TC_ACT_OK;
+ switch (ip6->nexthdr) {
+ case IPPROTO_TCP: // For TCP & UDP the checksum neutrality of the chosen IPv6
+ case IPPROTO_UDP: // address means there is no need to update their checksums.
+ case IPPROTO_GRE: // We do not need to bother looking at GRE/ESP headers,
+ case IPPROTO_ESP: // since there is never a checksum to update.
+ break;
+ default: // do not know how to handle anything else
+ return TC_ACT_OK;
+ }
+
+ struct ethhdr eth2; // used iff is_ethernet
+
+ eth2 = *eth; // Copy over the ethernet header (src/dst mac)
+ eth2.h_proto = bpf_htons(ETH_P_IP); // But replace the ethertype
+
+ struct iphdr ip = {
+ .version = 4, // u4
+ .ihl = sizeof(struct iphdr) / sizeof(__u32), // u4
+ .tos = (ip6->priority << 4) + (ip6->flow_lbl[0] >> 4), // u8
+ .tot_len = bpf_htons(bpf_ntohs(ip6->payload_len) + sizeof(struct iphdr)), // u16
+ .id = 0, // u16
+ .frag_off = bpf_htons(IP_DF), // u16
+ .ttl = ip6->hop_limit, // u8
+ .protocol = ip6->nexthdr, // u8
+ .check = 0, // u16
+ .saddr = 0x0201a8c0, // u32
+ .daddr = 0x0101a8c0, // u32
+ };
+
+ // Calculate the IPv4 one's complement checksum of the IPv4 header.
+ __wsum sum4 = 0;
+
+ for (int i = 0; i < sizeof(ip) / sizeof(__u16); ++i)
+ sum4 += ((__u16 *)&ip)[i];
+
+ // Note that sum4 is guaranteed to be non-zero by virtue of ip.version == 4
+ sum4 = (sum4 & 0xFFFF) + (sum4 >> 16); // collapse u32 into range 1 .. 0x1FFFE
+ sum4 = (sum4 & 0xFFFF) + (sum4 >> 16); // collapse any potential carry into u16
+ ip.check = (__u16)~sum4; // sum4 cannot be zero, so this is never 0xFFFF
+
+ // Calculate the *negative* IPv6 16-bit one's complement checksum of the IPv6 header.
+ __wsum sum6 = 0;
+ // We'll end up with a non-zero sum due to ip6->version == 6 (which has '0' bits)
+ for (int i = 0; i < sizeof(*ip6) / sizeof(__u16); ++i)
+ sum6 += ~((__u16 *)ip6)[i]; // note the bitwise negation
+
+ // Note that there is no L4 checksum update: we are relying on the checksum neutrality
+ // of the ipv6 address chosen by netd's ClatdController.
+
+ // Packet mutations begin - point of no return, but if this first modification fails
+ // the packet is probably still pristine, so let clatd handle it.
+ if (bpf_skb_change_proto(skb, bpf_htons(ETH_P_IP), 0))
+ return TC_ACT_OK;
+ bpf_csum_update(skb, sum6);
+
+ data = (void *)(long)skb->data;
+ data_end = (void *)(long)skb->data_end;
+ if (data + l2_header_size + sizeof(struct iphdr) > data_end)
+ return TC_ACT_SHOT;
+
+ struct ethhdr *new_eth = data;
+
+ // Copy over the updated ethernet header
+ *new_eth = eth2;
+
+ // Copy over the new ipv4 header.
+ *(struct iphdr *)(new_eth + 1) = ip;
+ return bpf_redirect(skb->ifindex, BPF_F_INGRESS);
+}
+
+SEC("schedcls/egress4/snat4")
+int sched_cls_egress4_snat4_prog(struct __sk_buff *skb)
+{
+ const int l2_header_size = sizeof(struct ethhdr);
+ void *data = (void *)(long)skb->data;
+ const void *data_end = (void *)(long)skb->data_end;
+ const struct ethhdr *const eth = data; // used iff is_ethernet
+ const struct iphdr *const ip4 = (void *)(eth + 1);
+
+ // Must be meta-ethernet IPv4 frame
+ if (skb->protocol != bpf_htons(ETH_P_IP))
+ return TC_ACT_OK;
+
+ // Must have ipv4 header
+ if (data + l2_header_size + sizeof(struct ipv6hdr) > data_end)
+ return TC_ACT_OK;
+
+ // Ethertype - if present - must be IPv4
+ if (eth->h_proto != bpf_htons(ETH_P_IP))
+ return TC_ACT_OK;
+
+ // IP version must be 4
+ if (ip4->version != 4)
+ return TC_ACT_OK;
+
+ // We cannot handle IP options, just standard 20 byte == 5 dword minimal IPv4 header
+ if (ip4->ihl != 5)
+ return TC_ACT_OK;
+
+ // Maximum IPv6 payload length that can be translated to IPv4
+ if (bpf_htons(ip4->tot_len) > 0xFFFF - sizeof(struct ipv6hdr))
+ return TC_ACT_OK;
+
+ // Calculate the IPv4 one's complement checksum of the IPv4 header.
+ __wsum sum4 = 0;
+
+ for (int i = 0; i < sizeof(*ip4) / sizeof(__u16); ++i)
+ sum4 += ((__u16 *)ip4)[i];
+
+ // Note that sum4 is guaranteed to be non-zero by virtue of ip4->version == 4
+ sum4 = (sum4 & 0xFFFF) + (sum4 >> 16); // collapse u32 into range 1 .. 0x1FFFE
+ sum4 = (sum4 & 0xFFFF) + (sum4 >> 16); // collapse any potential carry into u16
+ // for a correct checksum we should get *a* zero, but sum4 must be positive, ie 0xFFFF
+ if (sum4 != 0xFFFF)
+ return TC_ACT_OK;
+
+ // Minimum IPv4 total length is the size of the header
+ if (bpf_ntohs(ip4->tot_len) < sizeof(*ip4))
+ return TC_ACT_OK;
+
+ // We are incapable of dealing with IPv4 fragments
+ if (ip4->frag_off & ~bpf_htons(IP_DF))
+ return TC_ACT_OK;
+
+ switch (ip4->protocol) {
+ case IPPROTO_TCP: // For TCP & UDP the checksum neutrality of the chosen IPv6
+ case IPPROTO_GRE: // address means there is no need to update their checksums.
+ case IPPROTO_ESP: // We do not need to bother looking at GRE/ESP headers,
+ break; // since there is never a checksum to update.
+
+ case IPPROTO_UDP: // See above comment, but must also have UDP header...
+ if (data + sizeof(*ip4) + sizeof(struct udphdr) > data_end)
+ return TC_ACT_OK;
+ const struct udphdr *uh = (const struct udphdr *)(ip4 + 1);
+ // If IPv4/UDP checksum is 0 then fallback to clatd so it can calculate the
+ // checksum. Otherwise the network or more likely the NAT64 gateway might
+ // drop the packet because in most cases IPv6/UDP packets with a zero checksum
+ // are invalid. See RFC 6935. TODO: calculate checksum via bpf_csum_diff()
+ if (!uh->check)
+ return TC_ACT_OK;
+ break;
+
+ default: // do not know how to handle anything else
+ return TC_ACT_OK;
+ }
+ struct ethhdr eth2; // used iff is_ethernet
+
+ eth2 = *eth; // Copy over the ethernet header (src/dst mac)
+ eth2.h_proto = bpf_htons(ETH_P_IPV6); // But replace the ethertype
+
+ struct ipv6hdr ip6 = {
+ .version = 6, // __u8:4
+ .priority = ip4->tos >> 4, // __u8:4
+ .flow_lbl = {(ip4->tos & 0xF) << 4, 0, 0}, // __u8[3]
+ .payload_len = bpf_htons(bpf_ntohs(ip4->tot_len) - 20), // __be16
+ .nexthdr = ip4->protocol, // __u8
+ .hop_limit = ip4->ttl, // __u8
+ };
+ ip6.saddr.in6_u.u6_addr32[0] = bpf_htonl(0x20010db8);
+ ip6.saddr.in6_u.u6_addr32[1] = 0;
+ ip6.saddr.in6_u.u6_addr32[2] = 0;
+ ip6.saddr.in6_u.u6_addr32[3] = bpf_htonl(1);
+ ip6.daddr.in6_u.u6_addr32[0] = bpf_htonl(0x20010db8);
+ ip6.daddr.in6_u.u6_addr32[1] = 0;
+ ip6.daddr.in6_u.u6_addr32[2] = 0;
+ ip6.daddr.in6_u.u6_addr32[3] = bpf_htonl(2);
+
+ // Calculate the IPv6 16-bit one's complement checksum of the IPv6 header.
+ __wsum sum6 = 0;
+ // We'll end up with a non-zero sum due to ip6.version == 6
+ for (int i = 0; i < sizeof(ip6) / sizeof(__u16); ++i)
+ sum6 += ((__u16 *)&ip6)[i];
+
+ // Packet mutations begin - point of no return, but if this first modification fails
+ // the packet is probably still pristine, so let clatd handle it.
+ if (bpf_skb_change_proto(skb, bpf_htons(ETH_P_IPV6), 0))
+ return TC_ACT_OK;
+
+ // This takes care of updating the skb->csum field for a CHECKSUM_COMPLETE packet.
+ // In such a case, skb->csum is a 16-bit one's complement sum of the entire payload,
+ // thus we need to subtract out the ipv4 header's sum, and add in the ipv6 header's sum.
+ // However, we've already verified the ipv4 checksum is correct and thus 0.
+ // Thus we only need to add the ipv6 header's sum.
+ //
+ // bpf_csum_update() always succeeds if the skb is CHECKSUM_COMPLETE and returns an error
+ // (-ENOTSUPP) if it isn't. So we just ignore the return code (see above for more details).
+ bpf_csum_update(skb, sum6);
+
+ // bpf_skb_change_proto() invalidates all pointers - reload them.
+ data = (void *)(long)skb->data;
+ data_end = (void *)(long)skb->data_end;
+
+ // I cannot think of any valid way for this error condition to trigger, however I do
+ // believe the explicit check is required to keep the in kernel ebpf verifier happy.
+ if (data + l2_header_size + sizeof(ip6) > data_end)
+ return TC_ACT_SHOT;
+
+ struct ethhdr *new_eth = data;
+
+ // Copy over the updated ethernet header
+ *new_eth = eth2;
+ // Copy over the new ipv4 header.
+ *(struct ipv6hdr *)(new_eth + 1) = ip6;
+ return TC_ACT_OK;
+}
+
+char _license[] SEC("license") = ("GPL");
diff --git a/tools/testing/selftests/net/fcnal-test.sh b/tools/testing/selftests/net/fcnal-test.sh
index 47c4d4b4a44a..54701c8b0cd7 100755
--- a/tools/testing/selftests/net/fcnal-test.sh
+++ b/tools/testing/selftests/net/fcnal-test.sh
@@ -810,10 +810,16 @@ ipv4_ping()
setup
set_sysctl net.ipv4.raw_l3mdev_accept=1 2>/dev/null
ipv4_ping_novrf
+ setup
+ set_sysctl net.ipv4.ping_group_range='0 2147483647' 2>/dev/null
+ ipv4_ping_novrf
log_subsection "With VRF"
setup "yes"
ipv4_ping_vrf
+ setup "yes"
+ set_sysctl net.ipv4.ping_group_range='0 2147483647' 2>/dev/null
+ ipv4_ping_vrf
}
################################################################################
@@ -2348,10 +2354,16 @@ ipv6_ping()
log_subsection "No VRF"
setup
ipv6_ping_novrf
+ setup
+ set_sysctl net.ipv4.ping_group_range='0 2147483647' 2>/dev/null
+ ipv6_ping_novrf
log_subsection "With VRF"
setup "yes"
ipv6_ping_vrf
+ setup "yes"
+ set_sysctl net.ipv4.ping_group_range='0 2147483647' 2>/dev/null
+ ipv6_ping_vrf
}
################################################################################
diff --git a/tools/testing/selftests/net/udpgro_frglist.sh b/tools/testing/selftests/net/udpgro_frglist.sh
new file mode 100755
index 000000000000..807b74c8fd80
--- /dev/null
+++ b/tools/testing/selftests/net/udpgro_frglist.sh
@@ -0,0 +1,101 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+#
+# Run a series of udpgro benchmarks
+
+readonly PEER_NS="ns-peer-$(mktemp -u XXXXXX)"
+
+cleanup() {
+ local -r jobs="$(jobs -p)"
+ local -r ns="$(ip netns list|grep $PEER_NS)"
+
+ [ -n "${jobs}" ] && kill -INT ${jobs} 2>/dev/null
+ [ -n "$ns" ] && ip netns del $ns 2>/dev/null
+}
+trap cleanup EXIT
+
+run_one() {
+ # use 'rx' as separator between sender args and receiver args
+ local -r all="$@"
+ local -r tx_args=${all%rx*}
+ local rx_args=${all#*rx}
+
+
+
+ ip netns add "${PEER_NS}"
+ ip -netns "${PEER_NS}" link set lo up
+ ip link add type veth
+ ip link set dev veth0 up
+ ip addr add dev veth0 192.168.1.2/24
+ ip addr add dev veth0 2001:db8::2/64 nodad
+
+ ip link set dev veth1 netns "${PEER_NS}"
+ ip -netns "${PEER_NS}" addr add dev veth1 192.168.1.1/24
+ ip -netns "${PEER_NS}" addr add dev veth1 2001:db8::1/64 nodad
+ ip -netns "${PEER_NS}" link set dev veth1 up
+ ip netns exec "${PEER_NS}" ethtool -K veth1 rx-gro-list on
+
+
+ ip -n "${PEER_NS}" link set veth1 xdp object ../bpf/xdp_dummy.o section xdp_dummy
+ tc -n "${PEER_NS}" qdisc add dev veth1 clsact
+ tc -n "${PEER_NS}" filter add dev veth1 ingress prio 4 protocol ipv6 bpf object-file ../bpf/nat6to4.o section schedcls/ingress6/nat_6 direct-action
+ tc -n "${PEER_NS}" filter add dev veth1 egress prio 4 protocol ip bpf object-file ../bpf/nat6to4.o section schedcls/egress4/snat4 direct-action
+ echo ${rx_args}
+ ip netns exec "${PEER_NS}" ./udpgso_bench_rx ${rx_args} -r &
+
+ # Hack: let bg programs complete the startup
+ sleep 0.1
+ ./udpgso_bench_tx ${tx_args}
+}
+
+run_in_netns() {
+ local -r args=$@
+ echo ${args}
+ ./in_netns.sh $0 __subprocess ${args}
+}
+
+run_udp() {
+ local -r args=$@
+
+ echo "udp gso - over veth touching data"
+ run_in_netns ${args} -u -S 0 rx -4 -v
+
+ echo "udp gso and gro - over veth touching data"
+ run_in_netns ${args} -S 0 rx -4 -G
+}
+
+run_tcp() {
+ local -r args=$@
+
+ echo "tcp - over veth touching data"
+ run_in_netns ${args} -t rx -4 -t
+}
+
+run_all() {
+ local -r core_args="-l 4"
+ local -r ipv4_args="${core_args} -4 -D 192.168.1.1"
+ local -r ipv6_args="${core_args} -6 -D 2001:db8::1"
+
+ echo "ipv6"
+ run_tcp "${ipv6_args}"
+ run_udp "${ipv6_args}"
+}
+
+if [ ! -f ../bpf/xdp_dummy.o ]; then
+ echo "Missing xdp_dummy helper. Build bpf selftest first"
+ exit -1
+fi
+
+if [ ! -f bpf/nat6to4.o ]; then
+ echo "Missing nat6to4 helper. Build bpfnat6to4.o selftest first"
+ exit -1
+fi
+
+if [[ $# -eq 0 ]]; then
+ run_all
+elif [[ $1 == "__subprocess" ]]; then
+ shift
+ run_one $@
+else
+ run_in_netns $@
+fi