aboutsummaryrefslogtreecommitdiff
path: root/drivers/net
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/appletalk/cops.c4
-rw-r--r--drivers/net/bonding/bond_main.c2
-rw-r--r--drivers/net/caif/caif_serial.c3
-rw-r--r--drivers/net/dsa/bcm_sf2.c5
-rw-r--r--drivers/net/dsa/microchip/ksz9477.c1
-rw-r--r--drivers/net/dsa/mt7530.c8
-rw-r--r--drivers/net/dsa/ocelot/felix_vsc9959.c15
-rw-r--r--drivers/net/dsa/sja1105/sja1105_dynamic_config.c23
-rw-r--r--drivers/net/dsa/sja1105/sja1105_main.c74
-rw-r--r--drivers/net/ethernet/broadcom/bnx2.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c4
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c138
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.h10
-rw-r--r--drivers/net/ethernet/cadence/macb_main.c3
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_main.c27
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_vf_main.c27
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c2
-rw-r--r--drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c80
-rw-r--r--drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.h2
-rw-r--r--drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_io.c6
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c24
-rw-r--r--drivers/net/ethernet/fujitsu/fmvj18x_cs.c4
-rw-r--r--drivers/net/ethernet/google/gve/gve_main.c21
-rw-r--r--drivers/net/ethernet/google/gve/gve_tx.c10
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_ethtool.c6
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.c110
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c64
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c4
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c16
-rw-r--r--drivers/net/ethernet/korina.c12
-rw-r--r--drivers/net/ethernet/lantiq_xrx200.c14
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2.h22
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c54
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c4
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.c77
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.h24
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_ethtool.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/port.c107
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rep/bond.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_fs.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c16
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c28
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c61
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag_mp.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.h5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c22
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c18
-rw-r--r--drivers/net/ethernet/microchip/encx24j600.c2
-rw-r--r--drivers/net/ethernet/microchip/encx24j600_hw.h2
-rw-r--r--drivers/net/ethernet/pensando/Kconfig1
-rw-r--r--drivers/net/ethernet/qlogic/qla3xxx.c2
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c3
-rw-r--r--drivers/net/ethernet/sfc/nic.c1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c8
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c32
-rw-r--r--drivers/net/ethernet/sun/niu.c32
-rw-r--r--drivers/net/ethernet/ti/netcp_core.c4
-rw-r--r--drivers/net/ipa/ipa.h2
-rw-r--r--drivers/net/ipa/ipa_mem.c3
-rw-r--r--drivers/net/mdio/mdio-octeon.c2
-rw-r--r--drivers/net/mdio/mdio-thunder.c1
-rw-r--r--drivers/net/phy/mdio_bus.c3
-rw-r--r--drivers/net/usb/cdc_eem.c2
-rw-r--r--drivers/net/usb/hso.c45
-rw-r--r--drivers/net/usb/lan78xx.c1
-rw-r--r--drivers/net/usb/r8152.c42
-rw-r--r--drivers/net/usb/smsc75xx.c8
-rw-r--r--drivers/net/wireless/ath/ath10k/htt.h1
-rw-r--r--drivers/net/wireless/ath/ath10k/htt_rx.c201
-rw-r--r--drivers/net/wireless/ath/ath10k/rx_desc.h14
-rw-r--r--drivers/net/wireless/ath/ath11k/dp_rx.c34
-rw-r--r--drivers/net/wireless/ath/ath11k/dp_rx.h1
-rw-r--r--drivers/net/wireless/ath/ath11k/mac.c6
-rw-r--r--drivers/net/wireless/ath/ath6kl/debug.c5
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c8
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/bus.h19
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c42
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c9
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.h5
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c8
-rw-r--r--drivers/net/wireless/marvell/libertas/mesh.c33
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/base.c18
88 files changed, 1270 insertions, 523 deletions
diff --git a/drivers/net/appletalk/cops.c b/drivers/net/appletalk/cops.c
index ba8e70a8e312..6b12ce822e51 100644
--- a/drivers/net/appletalk/cops.c
+++ b/drivers/net/appletalk/cops.c
@@ -327,6 +327,8 @@ static int __init cops_probe1(struct net_device *dev, int ioaddr)
break;
}
+ dev->base_addr = ioaddr;
+
/* Reserve any actual interrupt. */
if (dev->irq) {
retval = request_irq(dev->irq, cops_interrupt, 0, dev->name, dev);
@@ -334,8 +336,6 @@ static int __init cops_probe1(struct net_device *dev, int ioaddr)
goto err_out;
}
- dev->base_addr = ioaddr;
-
lp = netdev_priv(dev);
spin_lock_init(&lp->lock);
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 20bbda1b36e1..c5a646d06102 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -1526,6 +1526,7 @@ static struct slave *bond_alloc_slave(struct bonding *bond,
slave->bond = bond;
slave->dev = slave_dev;
+ INIT_DELAYED_WORK(&slave->notify_work, bond_netdev_notify_work);
if (bond_kobj_init(slave))
return NULL;
@@ -1538,7 +1539,6 @@ static struct slave *bond_alloc_slave(struct bonding *bond,
return NULL;
}
}
- INIT_DELAYED_WORK(&slave->notify_work, bond_netdev_notify_work);
return slave;
}
diff --git a/drivers/net/caif/caif_serial.c b/drivers/net/caif/caif_serial.c
index d0c24dba4a86..205219d28aff 100644
--- a/drivers/net/caif/caif_serial.c
+++ b/drivers/net/caif/caif_serial.c
@@ -269,9 +269,6 @@ static netdev_tx_t caif_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct ser_device *ser;
- if (WARN_ON(!dev))
- return -EINVAL;
-
ser = netdev_priv(dev);
/* Send flow off once, on high water mark */
diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c
index 9150038b60cb..3b018fcf4412 100644
--- a/drivers/net/dsa/bcm_sf2.c
+++ b/drivers/net/dsa/bcm_sf2.c
@@ -821,11 +821,9 @@ static void bcm_sf2_sw_mac_link_up(struct dsa_switch *ds, int port,
bcm_sf2_sw_mac_link_set(ds, port, interface, true);
if (port != core_readl(priv, CORE_IMP0_PRT_ID)) {
- u32 reg_rgmii_ctrl;
+ u32 reg_rgmii_ctrl = 0;
u32 reg, offset;
- reg_rgmii_ctrl = bcm_sf2_reg_rgmii_cntrl(priv, port);
-
if (priv->type == BCM4908_DEVICE_ID ||
priv->type == BCM7445_DEVICE_ID)
offset = CORE_STS_OVERRIDE_GMIIP_PORT(port);
@@ -836,6 +834,7 @@ static void bcm_sf2_sw_mac_link_up(struct dsa_switch *ds, int port,
interface == PHY_INTERFACE_MODE_RGMII_TXID ||
interface == PHY_INTERFACE_MODE_MII ||
interface == PHY_INTERFACE_MODE_REVMII) {
+ reg_rgmii_ctrl = bcm_sf2_reg_rgmii_cntrl(priv, port);
reg = reg_readl(priv, reg_rgmii_ctrl);
reg &= ~(RX_PAUSE_EN | TX_PAUSE_EN);
diff --git a/drivers/net/dsa/microchip/ksz9477.c b/drivers/net/dsa/microchip/ksz9477.c
index 55e5d479acce..854e25f43fa7 100644
--- a/drivers/net/dsa/microchip/ksz9477.c
+++ b/drivers/net/dsa/microchip/ksz9477.c
@@ -1530,6 +1530,7 @@ static const struct ksz_chip_data ksz9477_switch_chips[] = {
.num_statics = 16,
.cpu_ports = 0x7F, /* can be configured as cpu port */
.port_cnt = 7, /* total physical port count */
+ .phy_errata_9477 = true,
},
};
diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c
index 96f7c9eede35..9b90f3d3a8f5 100644
--- a/drivers/net/dsa/mt7530.c
+++ b/drivers/net/dsa/mt7530.c
@@ -1262,14 +1262,6 @@ mt7530_port_set_vlan_aware(struct dsa_switch *ds, int port)
{
struct mt7530_priv *priv = ds->priv;
- /* The real fabric path would be decided on the membership in the
- * entry of VLAN table. PCR_MATRIX set up here with ALL_MEMBERS
- * means potential VLAN can be consisting of certain subset of all
- * ports.
- */
- mt7530_rmw(priv, MT7530_PCR_P(port),
- PCR_MATRIX_MASK, PCR_MATRIX(MT7530_ALL_MEMBERS));
-
/* Trapped into security mode allows packet forwarding through VLAN
* table lookup. CPU port is set to fallback mode to let untagged
* frames pass through.
diff --git a/drivers/net/dsa/ocelot/felix_vsc9959.c b/drivers/net/dsa/ocelot/felix_vsc9959.c
index 2473bebe48e6..f966a253d1c7 100644
--- a/drivers/net/dsa/ocelot/felix_vsc9959.c
+++ b/drivers/net/dsa/ocelot/felix_vsc9959.c
@@ -1227,12 +1227,17 @@ static int vsc9959_qos_port_tas_set(struct ocelot *ocelot, int port,
if (taprio->num_entries > VSC9959_TAS_GCL_ENTRY_MAX)
return -ERANGE;
- /* Set port num and disable ALWAYS_GUARD_BAND_SCH_Q, which means set
- * guard band to be implemented for nonschedule queues to schedule
- * queues transition.
+ /* Enable guard band. The switch will schedule frames without taking
+ * their length into account. Thus we'll always need to enable the
+ * guard band which reserves the time of a maximum sized frame at the
+ * end of the time window.
+ *
+ * Although the ALWAYS_GUARD_BAND_SCH_Q bit is global for all ports, we
+ * need to set PORT_NUM, because subsequent writes to PARAM_CFG_REG_n
+ * operate on the port number.
*/
- ocelot_rmw(ocelot,
- QSYS_TAS_PARAM_CFG_CTRL_PORT_NUM(port),
+ ocelot_rmw(ocelot, QSYS_TAS_PARAM_CFG_CTRL_PORT_NUM(port) |
+ QSYS_TAS_PARAM_CFG_CTRL_ALWAYS_GUARD_BAND_SCH_Q,
QSYS_TAS_PARAM_CFG_CTRL_PORT_NUM_M |
QSYS_TAS_PARAM_CFG_CTRL_ALWAYS_GUARD_BAND_SCH_Q,
QSYS_TAS_PARAM_CFG_CTRL);
diff --git a/drivers/net/dsa/sja1105/sja1105_dynamic_config.c b/drivers/net/dsa/sja1105/sja1105_dynamic_config.c
index b777d3f37573..12cd04b56803 100644
--- a/drivers/net/dsa/sja1105/sja1105_dynamic_config.c
+++ b/drivers/net/dsa/sja1105/sja1105_dynamic_config.c
@@ -167,9 +167,10 @@ enum sja1105_hostcmd {
SJA1105_HOSTCMD_INVALIDATE = 4,
};
+/* Command and entry overlap */
static void
-sja1105_vl_lookup_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd,
- enum packing_op op)
+sja1105et_vl_lookup_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd,
+ enum packing_op op)
{
const int size = SJA1105_SIZE_DYN_CMD;
@@ -179,6 +180,20 @@ sja1105_vl_lookup_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd,
sja1105_packing(buf, &cmd->index, 9, 0, size, op);
}
+/* Command and entry are separate */
+static void
+sja1105pqrs_vl_lookup_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd,
+ enum packing_op op)
+{
+ u8 *p = buf + SJA1105_SIZE_VL_LOOKUP_ENTRY;
+ const int size = SJA1105_SIZE_DYN_CMD;
+
+ sja1105_packing(p, &cmd->valid, 31, 31, size, op);
+ sja1105_packing(p, &cmd->errors, 30, 30, size, op);
+ sja1105_packing(p, &cmd->rdwrset, 29, 29, size, op);
+ sja1105_packing(p, &cmd->index, 9, 0, size, op);
+}
+
static size_t sja1105et_vl_lookup_entry_packing(void *buf, void *entry_ptr,
enum packing_op op)
{
@@ -641,7 +656,7 @@ static size_t sja1105pqrs_cbs_entry_packing(void *buf, void *entry_ptr,
const struct sja1105_dynamic_table_ops sja1105et_dyn_ops[BLK_IDX_MAX_DYN] = {
[BLK_IDX_VL_LOOKUP] = {
.entry_packing = sja1105et_vl_lookup_entry_packing,
- .cmd_packing = sja1105_vl_lookup_cmd_packing,
+ .cmd_packing = sja1105et_vl_lookup_cmd_packing,
.access = OP_WRITE,
.max_entry_count = SJA1105_MAX_VL_LOOKUP_COUNT,
.packed_size = SJA1105ET_SIZE_VL_LOOKUP_DYN_CMD,
@@ -725,7 +740,7 @@ const struct sja1105_dynamic_table_ops sja1105et_dyn_ops[BLK_IDX_MAX_DYN] = {
const struct sja1105_dynamic_table_ops sja1105pqrs_dyn_ops[BLK_IDX_MAX_DYN] = {
[BLK_IDX_VL_LOOKUP] = {
.entry_packing = sja1105_vl_lookup_entry_packing,
- .cmd_packing = sja1105_vl_lookup_cmd_packing,
+ .cmd_packing = sja1105pqrs_vl_lookup_cmd_packing,
.access = (OP_READ | OP_WRITE),
.max_entry_count = SJA1105_MAX_VL_LOOKUP_COUNT,
.packed_size = SJA1105PQRS_SIZE_VL_LOOKUP_DYN_CMD,
diff --git a/drivers/net/dsa/sja1105/sja1105_main.c b/drivers/net/dsa/sja1105/sja1105_main.c
index 405024b637d6..b88d9ef45a1f 100644
--- a/drivers/net/dsa/sja1105/sja1105_main.c
+++ b/drivers/net/dsa/sja1105/sja1105_main.c
@@ -26,6 +26,7 @@
#include "sja1105_tas.h"
#define SJA1105_UNKNOWN_MULTICAST 0x010000000000ull
+#define SJA1105_DEFAULT_VLAN (VLAN_N_VID - 1)
static const struct dsa_switch_ops sja1105_switch_ops;
@@ -207,6 +208,7 @@ static int sja1105_init_mii_settings(struct sja1105_private *priv,
default:
dev_err(dev, "Unsupported PHY mode %s!\n",
phy_modes(ports[i].phy_mode));
+ return -EINVAL;
}
/* Even though the SerDes port is able to drive SGMII autoneg
@@ -321,6 +323,13 @@ static int sja1105_init_l2_lookup_params(struct sja1105_private *priv)
return 0;
}
+/* Set up a default VLAN for untagged traffic injected from the CPU
+ * using management routes (e.g. STP, PTP) as opposed to tag_8021q.
+ * All DT-defined ports are members of this VLAN, and there are no
+ * restrictions on forwarding (since the CPU selects the destination).
+ * Frames from this VLAN will always be transmitted as untagged, and
+ * neither the bridge nor the 8021q module cannot create this VLAN ID.
+ */
static int sja1105_init_static_vlan(struct sja1105_private *priv)
{
struct sja1105_table *table;
@@ -330,17 +339,13 @@ static int sja1105_init_static_vlan(struct sja1105_private *priv)
.vmemb_port = 0,
.vlan_bc = 0,
.tag_port = 0,
- .vlanid = 1,
+ .vlanid = SJA1105_DEFAULT_VLAN,
};
struct dsa_switch *ds = priv->ds;
int port;
table = &priv->static_config.tables[BLK_IDX_VLAN_LOOKUP];
- /* The static VLAN table will only contain the initial pvid of 1.
- * All other VLANs are to be configured through dynamic entries,
- * and kept in the static configuration table as backing memory.
- */
if (table->entry_count) {
kfree(table->entries);
table->entry_count = 0;
@@ -353,9 +358,6 @@ static int sja1105_init_static_vlan(struct sja1105_private *priv)
table->entry_count = 1;
- /* VLAN 1: all DT-defined ports are members; no restrictions on
- * forwarding; always transmit as untagged.
- */
for (port = 0; port < ds->num_ports; port++) {
struct sja1105_bridge_vlan *v;
@@ -366,15 +368,12 @@ static int sja1105_init_static_vlan(struct sja1105_private *priv)
pvid.vlan_bc |= BIT(port);
pvid.tag_port &= ~BIT(port);
- /* Let traffic that don't need dsa_8021q (e.g. STP, PTP) be
- * transmitted as untagged.
- */
v = kzalloc(sizeof(*v), GFP_KERNEL);
if (!v)
return -ENOMEM;
v->port = port;
- v->vid = 1;
+ v->vid = SJA1105_DEFAULT_VLAN;
v->untagged = true;
if (dsa_is_cpu_port(ds, port))
v->pvid = true;
@@ -2817,11 +2816,22 @@ static int sja1105_vlan_add_one(struct dsa_switch *ds, int port, u16 vid,
bool pvid = flags & BRIDGE_VLAN_INFO_PVID;
struct sja1105_bridge_vlan *v;
- list_for_each_entry(v, vlan_list, list)
- if (v->port == port && v->vid == vid &&
- v->untagged == untagged && v->pvid == pvid)
+ list_for_each_entry(v, vlan_list, list) {
+ if (v->port == port && v->vid == vid) {
/* Already added */
- return 0;
+ if (v->untagged == untagged && v->pvid == pvid)
+ /* Nothing changed */
+ return 0;
+
+ /* It's the same VLAN, but some of the flags changed
+ * and the user did not bother to delete it first.
+ * Update it and trigger sja1105_build_vlan_table.
+ */
+ v->untagged = untagged;
+ v->pvid = pvid;
+ return 1;
+ }
+ }
v = kzalloc(sizeof(*v), GFP_KERNEL);
if (!v) {
@@ -2976,13 +2986,13 @@ static int sja1105_setup(struct dsa_switch *ds)
rc = sja1105_static_config_load(priv, ports);
if (rc < 0) {
dev_err(ds->dev, "Failed to load static config: %d\n", rc);
- return rc;
+ goto out_ptp_clock_unregister;
}
/* Configure the CGU (PHY link modes and speeds) */
rc = sja1105_clocking_setup(priv);
if (rc < 0) {
dev_err(ds->dev, "Failed to configure MII clocking: %d\n", rc);
- return rc;
+ goto out_static_config_free;
}
/* On SJA1105, VLAN filtering per se is always enabled in hardware.
* The only thing we can do to disable it is lie about what the 802.1Q
@@ -3003,7 +3013,7 @@ static int sja1105_setup(struct dsa_switch *ds)
rc = sja1105_devlink_setup(ds);
if (rc < 0)
- return rc;
+ goto out_static_config_free;
/* The DSA/switchdev model brings up switch ports in standalone mode by
* default, and that means vlan_filtering is 0 since they're not under
@@ -3012,6 +3022,17 @@ static int sja1105_setup(struct dsa_switch *ds)
rtnl_lock();
rc = sja1105_setup_8021q_tagging(ds, true);
rtnl_unlock();
+ if (rc)
+ goto out_devlink_teardown;
+
+ return 0;
+
+out_devlink_teardown:
+ sja1105_devlink_teardown(ds);
+out_ptp_clock_unregister:
+ sja1105_ptp_clock_unregister(ds);
+out_static_config_free:
+ sja1105_static_config_free(&priv->static_config);
return rc;
}
@@ -3646,8 +3667,10 @@ static int sja1105_probe(struct spi_device *spi)
priv->cbs = devm_kcalloc(dev, priv->info->num_cbs_shapers,
sizeof(struct sja1105_cbs_entry),
GFP_KERNEL);
- if (!priv->cbs)
- return -ENOMEM;
+ if (!priv->cbs) {
+ rc = -ENOMEM;
+ goto out_unregister_switch;
+ }
}
/* Connections between dsa_port and sja1105_port */
@@ -3672,7 +3695,7 @@ static int sja1105_probe(struct spi_device *spi)
dev_err(ds->dev,
"failed to create deferred xmit thread: %d\n",
rc);
- goto out;
+ goto out_destroy_workers;
}
skb_queue_head_init(&sp->xmit_queue);
sp->xmit_tpid = ETH_P_SJA1105;
@@ -3682,7 +3705,8 @@ static int sja1105_probe(struct spi_device *spi)
}
return 0;
-out:
+
+out_destroy_workers:
while (port-- > 0) {
struct sja1105_port *sp = &priv->ports[port];
@@ -3691,6 +3715,10 @@ out:
kthread_destroy_worker(sp->xmit_worker);
}
+
+out_unregister_switch:
+ dsa_unregister_switch(ds);
+
return rc;
}
diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c
index c0986096c701..5bace8a93d73 100644
--- a/drivers/net/ethernet/broadcom/bnx2.c
+++ b/drivers/net/ethernet/broadcom/bnx2.c
@@ -8247,9 +8247,9 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
BNX2_WR(bp, PCI_COMMAND, reg);
} else if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A1) &&
!(bp->flags & BNX2_FLAG_PCIX)) {
-
dev_err(&pdev->dev,
"5706 A1 can only be used in a PCIX bus, aborting\n");
+ rc = -EPERM;
goto err_out_unmap;
}
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
index d21f085044cd..27943b0446c2 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
@@ -1223,8 +1223,10 @@ int bnx2x_iov_init_one(struct bnx2x *bp, int int_mode_param,
goto failed;
/* SR-IOV capability was enabled but there are no VFs*/
- if (iov->total == 0)
+ if (iov->total == 0) {
+ err = -EINVAL;
goto failed;
+ }
iov->nr_virtfn = min_t(u16, iov->total, num_vfs_param);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index 2985844634c8..fcc729d52b17 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -282,7 +282,8 @@ static bool bnxt_vf_pciid(enum board_idx idx)
{
return (idx == NETXTREME_C_VF || idx == NETXTREME_E_VF ||
idx == NETXTREME_S_VF || idx == NETXTREME_C_VF_HV ||
- idx == NETXTREME_E_VF_HV || idx == NETXTREME_E_P5_VF);
+ idx == NETXTREME_E_VF_HV || idx == NETXTREME_E_P5_VF ||
+ idx == NETXTREME_E_P5_VF_HV);
}
#define DB_CP_REARM_FLAGS (DB_KEY_CP | DB_IDX_VALID)
@@ -6932,17 +6933,10 @@ ctx_err:
static void bnxt_hwrm_set_pg_attr(struct bnxt_ring_mem_info *rmem, u8 *pg_attr,
__le64 *pg_dir)
{
- u8 pg_size = 0;
-
if (!rmem->nr_pages)
return;
- if (BNXT_PAGE_SHIFT == 13)
- pg_size = 1 << 4;
- else if (BNXT_PAGE_SIZE == 16)
- pg_size = 2 << 4;
-
- *pg_attr = pg_size;
+ BNXT_SET_CTX_PAGE_ATTR(*pg_attr);
if (rmem->depth >= 1) {
if (rmem->depth == 2)
*pg_attr |= 2;
@@ -10785,37 +10779,125 @@ static int bnxt_set_features(struct net_device *dev, netdev_features_t features)
return rc;
}
+static bool bnxt_exthdr_check(struct bnxt *bp, struct sk_buff *skb, int nw_off,
+ u8 **nextp)
+{
+ struct ipv6hdr *ip6h = (struct ipv6hdr *)(skb->data + nw_off);
+ int hdr_count = 0;
+ u8 *nexthdr;
+ int start;
+
+ /* Check that there are at most 2 IPv6 extension headers, no
+ * fragment header, and each is <= 64 bytes.
+ */
+ start = nw_off + sizeof(*ip6h);
+ nexthdr = &ip6h->nexthdr;
+ while (ipv6_ext_hdr(*nexthdr)) {
+ struct ipv6_opt_hdr *hp;
+ int hdrlen;
+
+ if (hdr_count >= 3 || *nexthdr == NEXTHDR_NONE ||
+ *nexthdr == NEXTHDR_FRAGMENT)
+ return false;
+ hp = __skb_header_pointer(NULL, start, sizeof(*hp), skb->data,
+ skb_headlen(skb), NULL);
+ if (!hp)
+ return false;
+ if (*nexthdr == NEXTHDR_AUTH)
+ hdrlen = ipv6_authlen(hp);
+ else
+ hdrlen = ipv6_optlen(hp);
+
+ if (hdrlen > 64)
+ return false;
+ nexthdr = &hp->nexthdr;
+ start += hdrlen;
+ hdr_count++;
+ }
+ if (nextp) {
+ /* Caller will check inner protocol */
+ if (skb->encapsulation) {
+ *nextp = nexthdr;
+ return true;
+ }
+ *nextp = NULL;
+ }
+ /* Only support TCP/UDP for non-tunneled ipv6 and inner ipv6 */
+ return *nexthdr == IPPROTO_TCP || *nexthdr == IPPROTO_UDP;
+}
+
+/* For UDP, we can only handle 1 Vxlan port and 1 Geneve port. */
+static bool bnxt_udp_tunl_check(struct bnxt *bp, struct sk_buff *skb)
+{
+ struct udphdr *uh = udp_hdr(skb);
+ __be16 udp_port = uh->dest;
+
+ if (udp_port != bp->vxlan_port && udp_port != bp->nge_port)
+ return false;
+ if (skb->inner_protocol_type == ENCAP_TYPE_ETHER) {
+ struct ethhdr *eh = inner_eth_hdr(skb);
+
+ switch (eh->h_proto) {
+ case htons(ETH_P_IP):
+ return true;
+ case htons(ETH_P_IPV6):
+ return bnxt_exthdr_check(bp, skb,
+ skb_inner_network_offset(skb),
+ NULL);
+ }
+ }
+ return false;
+}
+
+static bool bnxt_tunl_check(struct bnxt *bp, struct sk_buff *skb, u8 l4_proto)
+{
+ switch (l4_proto) {
+ case IPPROTO_UDP:
+ return bnxt_udp_tunl_check(bp, skb);
+ case IPPROTO_IPIP:
+ return true;
+ case IPPROTO_GRE: {
+ switch (skb->inner_protocol) {
+ default:
+ return false;
+ case htons(ETH_P_IP):
+ return true;
+ case htons(ETH_P_IPV6):
+ fallthrough;
+ }
+ }
+ case IPPROTO_IPV6:
+ /* Check ext headers of inner ipv6 */
+ return bnxt_exthdr_check(bp, skb, skb_inner_network_offset(skb),
+ NULL);
+ }
+ return false;
+}
+
static netdev_features_t bnxt_features_check(struct sk_buff *skb,
struct net_device *dev,
netdev_features_t features)
{
- struct bnxt *bp;
- __be16 udp_port;
- u8 l4_proto = 0;
+ struct bnxt *bp = netdev_priv(dev);
+ u8 *l4_proto;
features = vlan_features_check(skb, features);
- if (!skb->encapsulation)
- return features;
-
switch (vlan_get_protocol(skb)) {
case htons(ETH_P_IP):
- l4_proto = ip_hdr(skb)->protocol;
+ if (!skb->encapsulation)
+ return features;
+ l4_proto = &ip_hdr(skb)->protocol;
+ if (bnxt_tunl_check(bp, skb, *l4_proto))
+ return features;
break;
case htons(ETH_P_IPV6):
- l4_proto = ipv6_hdr(skb)->nexthdr;
+ if (!bnxt_exthdr_check(bp, skb, skb_network_offset(skb),
+ &l4_proto))
+ break;
+ if (!l4_proto || bnxt_tunl_check(bp, skb, *l4_proto))
+ return features;
break;
- default:
- return features;
}
-
- if (l4_proto != IPPROTO_UDP)
- return features;
-
- bp = netdev_priv(dev);
- /* For UDP, we can only handle 1 Vxlan port and 1 Geneve port. */
- udp_port = udp_hdr(skb)->dest;
- if (udp_port == bp->vxlan_port || udp_port == bp->nge_port)
- return features;
return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
index 98e0cef4532c..30e47ea343f9 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
@@ -1457,6 +1457,16 @@ struct bnxt_ctx_pg_info {
#define BNXT_BACKING_STORE_CFG_LEGACY_LEN 256
+#define BNXT_SET_CTX_PAGE_ATTR(attr) \
+do { \
+ if (BNXT_PAGE_SIZE == 0x2000) \
+ attr = FUNC_BACKING_STORE_CFG_REQ_SRQ_PG_SIZE_PG_8K; \
+ else if (BNXT_PAGE_SIZE == 0x10000) \
+ attr = FUNC_BACKING_STORE_CFG_REQ_QPC_PG_SIZE_PG_64K; \
+ else \
+ attr = FUNC_BACKING_STORE_CFG_REQ_QPC_PG_SIZE_PG_4K; \
+} while (0)
+
struct bnxt_ctx_mem_info {
u32 qp_max_entries;
u16 qp_min_qp1_entries;
diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
index 6bc7d41d519b..a0c7b1167dbb 100644
--- a/drivers/net/ethernet/cadence/macb_main.c
+++ b/drivers/net/ethernet/cadence/macb_main.c
@@ -2867,6 +2867,9 @@ static struct net_device_stats *gem_get_stats(struct macb *bp)
struct gem_stats *hwstat = &bp->hw_stats.gem;
struct net_device_stats *nstat = &bp->dev->stats;
+ if (!netif_running(bp->dev))
+ return nstat;
+
gem_update_stats(bp);
nstat->rx_errors = (hwstat->rx_frame_check_sequence_errors +
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_main.c b/drivers/net/ethernet/cavium/liquidio/lio_main.c
index 7c5af4beedc6..591229b96257 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_main.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c
@@ -1153,7 +1153,7 @@ static void octeon_destroy_resources(struct octeon_device *oct)
* @lio: per-network private data
* @start_stop: whether to start or stop
*/
-static void send_rx_ctrl_cmd(struct lio *lio, int start_stop)
+static int send_rx_ctrl_cmd(struct lio *lio, int start_stop)
{
struct octeon_soft_command *sc;
union octnet_cmd *ncmd;
@@ -1161,15 +1161,15 @@ static void send_rx_ctrl_cmd(struct lio *lio, int start_stop)
int retval;
if (oct->props[lio->ifidx].rx_on == start_stop)
- return;
+ return 0;
sc = (struct octeon_soft_command *)
octeon_alloc_soft_command(oct, OCTNET_CMD_SIZE,
16, 0);
if (!sc) {
netif_info(lio, rx_err, lio->netdev,
- "Failed to allocate octeon_soft_command\n");
- return;
+ "Failed to allocate octeon_soft_command struct\n");
+ return -ENOMEM;
}
ncmd = (union octnet_cmd *)sc->virtdptr;
@@ -1192,18 +1192,19 @@ static void send_rx_ctrl_cmd(struct lio *lio, int start_stop)
if (retval == IQ_SEND_FAILED) {
netif_info(lio, rx_err, lio->netdev, "Failed to send RX Control message\n");
octeon_free_soft_command(oct, sc);
- return;
} else {
/* Sleep on a wait queue till the cond flag indicates that the
* response arrived or timed-out.
*/
retval = wait_for_sc_completion_timeout(oct, sc, 0);
if (retval)
- return;
+ return retval;
oct->props[lio->ifidx].rx_on = start_stop;
WRITE_ONCE(sc->caller_is_done, true);
}
+
+ return retval;
}
/**
@@ -1778,6 +1779,7 @@ static int liquidio_open(struct net_device *netdev)
struct octeon_device_priv *oct_priv =
(struct octeon_device_priv *)oct->priv;
struct napi_struct *napi, *n;
+ int ret = 0;
if (oct->props[lio->ifidx].napi_enabled == 0) {
tasklet_disable(&oct_priv->droq_tasklet);
@@ -1813,7 +1815,9 @@ static int liquidio_open(struct net_device *netdev)
netif_info(lio, ifup, lio->netdev, "Interface Open, ready for traffic\n");
/* tell Octeon to start forwarding packets to host */
- send_rx_ctrl_cmd(lio, 1);
+ ret = send_rx_ctrl_cmd(lio, 1);
+ if (ret)
+ return ret;
/* start periodical statistics fetch */
INIT_DELAYED_WORK(&lio->stats_wk.work, lio_fetch_stats);
@@ -1824,7 +1828,7 @@ static int liquidio_open(struct net_device *netdev)
dev_info(&oct->pci_dev->dev, "%s interface is opened\n",
netdev->name);
- return 0;
+ return ret;
}
/**
@@ -1838,6 +1842,7 @@ static int liquidio_stop(struct net_device *netdev)
struct octeon_device_priv *oct_priv =
(struct octeon_device_priv *)oct->priv;
struct napi_struct *napi, *n;
+ int ret = 0;
ifstate_reset(lio, LIO_IFSTATE_RUNNING);
@@ -1854,7 +1859,9 @@ static int liquidio_stop(struct net_device *netdev)
lio->link_changes++;
/* Tell Octeon that nic interface is down. */
- send_rx_ctrl_cmd(lio, 0);
+ ret = send_rx_ctrl_cmd(lio, 0);
+ if (ret)
+ return ret;
if (OCTEON_CN23XX_PF(oct)) {
if (!oct->msix_on)
@@ -1889,7 +1896,7 @@ static int liquidio_stop(struct net_device *netdev)
dev_info(&oct->pci_dev->dev, "%s interface is stopped\n", netdev->name);
- return 0;
+ return ret;
}
/**
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c
index 516f166ceff8..ffddb3126a32 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c
@@ -595,7 +595,7 @@ static void octeon_destroy_resources(struct octeon_device *oct)
* @lio: per-network private data
* @start_stop: whether to start or stop
*/
-static void send_rx_ctrl_cmd(struct lio *lio, int start_stop)
+static int send_rx_ctrl_cmd(struct lio *lio, int start_stop)
{
struct octeon_device *oct = (struct octeon_device *)lio->oct_dev;
struct octeon_soft_command *sc;
@@ -603,11 +603,16 @@ static void send_rx_ctrl_cmd(struct lio *lio, int start_stop)
int retval;
if (oct->props[lio->ifidx].rx_on == start_stop)
- return;
+ return 0;
sc = (struct octeon_soft_command *)
octeon_alloc_soft_command(oct, OCTNET_CMD_SIZE,
16, 0);
+ if (!sc) {
+ netif_info(lio, rx_err, lio->netdev,
+ "Failed to allocate octeon_soft_command struct\n");
+ return -ENOMEM;
+ }
ncmd = (union octnet_cmd *)sc->virtdptr;
@@ -635,11 +640,13 @@ static void send_rx_ctrl_cmd(struct lio *lio, int start_stop)
*/
retval = wait_for_sc_completion_timeout(oct, sc, 0);
if (retval)
- return;
+ return retval;
oct->props[lio->ifidx].rx_on = start_stop;
WRITE_ONCE(sc->caller_is_done, true);
}
+
+ return retval;
}
/**
@@ -906,6 +913,7 @@ static int liquidio_open(struct net_device *netdev)
struct octeon_device_priv *oct_priv =
(struct octeon_device_priv *)oct->priv;
struct napi_struct *napi, *n;
+ int ret = 0;
if (!oct->props[lio->ifidx].napi_enabled) {
tasklet_disable(&oct_priv->droq_tasklet);
@@ -932,11 +940,13 @@ static int liquidio_open(struct net_device *netdev)
(LIQUIDIO_NDEV_STATS_POLL_TIME_MS));
/* tell Octeon to start forwarding packets to host */
- send_rx_ctrl_cmd(lio, 1);
+ ret = send_rx_ctrl_cmd(lio, 1);
+ if (ret)
+ return ret;
dev_info(&oct->pci_dev->dev, "%s interface is opened\n", netdev->name);
- return 0;
+ return ret;
}
/**
@@ -950,9 +960,12 @@ static int liquidio_stop(struct net_device *netdev)
struct octeon_device_priv *oct_priv =
(struct octeon_device_priv *)oct->priv;
struct napi_struct *napi, *n;
+ int ret = 0;
/* tell Octeon to stop forwarding packets to host */
- send_rx_ctrl_cmd(lio, 0);
+ ret = send_rx_ctrl_cmd(lio, 0);
+ if (ret)
+ return ret;
netif_info(lio, ifdown, lio->netdev, "Stopping interface!\n");
/* Inform that netif carrier is down */
@@ -986,7 +999,7 @@ static int liquidio_stop(struct net_device *netdev)
dev_info(&oct->pci_dev->dev, "%s interface is stopped\n", netdev->name);
- return 0;
+ return ret;
}
/**
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
index bc581b149b11..22c9ac922eba 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
@@ -1042,7 +1042,7 @@ void clear_all_filters(struct adapter *adapter)
cxgb4_del_filter(dev, f->tid, &f->fs);
}
- sb = t4_read_reg(adapter, LE_DB_SRVR_START_INDEX_A);
+ sb = adapter->tids.stid_base;
for (i = 0; i < sb; i++) {
f = (struct filter_entry *)adapter->tids.tid_tab[i];
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index 6264bc66a4fc..421bd9b88028 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -6480,9 +6480,9 @@ static void cxgb4_ktls_dev_del(struct net_device *netdev,
adap->uld[CXGB4_ULD_KTLS].tlsdev_ops->tls_dev_del(netdev, tls_ctx,
direction);
- cxgb4_set_ktls_feature(adap, FW_PARAMS_PARAM_DEV_KTLS_HW_DISABLE);
out_unlock:
+ cxgb4_set_ktls_feature(adap, FW_PARAMS_PARAM_DEV_KTLS_HW_DISABLE);
mutex_unlock(&uld_mutex);
}
diff --git a/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c b/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c
index ef3f1e92632f..59683f79959c 100644
--- a/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c
+++ b/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c
@@ -59,6 +59,7 @@ static int chcr_get_nfrags_to_send(struct sk_buff *skb, u32 start, u32 len)
}
static int chcr_init_tcb_fields(struct chcr_ktls_info *tx_info);
+static void clear_conn_resources(struct chcr_ktls_info *tx_info);
/*
* chcr_ktls_save_keys: calculate and save crypto keys.
* @tx_info - driver specific tls info.
@@ -364,10 +365,14 @@ static void chcr_ktls_dev_del(struct net_device *netdev,
chcr_get_ktls_tx_context(tls_ctx);
struct chcr_ktls_info *tx_info = tx_ctx->chcr_info;
struct ch_ktls_port_stats_debug *port_stats;
+ struct chcr_ktls_uld_ctx *u_ctx;
if (!tx_info)
return;
+ u_ctx = tx_info->adap->uld[CXGB4_ULD_KTLS].handle;
+ if (u_ctx && u_ctx->detach)
+ return;
/* clear l2t entry */
if (tx_info->l2te)
cxgb4_l2t_release(tx_info->l2te);
@@ -384,6 +389,8 @@ static void chcr_ktls_dev_del(struct net_device *netdev,
if (tx_info->tid != -1) {
cxgb4_remove_tid(&tx_info->adap->tids, tx_info->tx_chan,
tx_info->tid, tx_info->ip_family);
+
+ xa_erase(&u_ctx->tid_list, tx_info->tid);
}
port_stats = &tx_info->adap->ch_ktls_stats.ktls_port[tx_info->port_id];
@@ -411,6 +418,7 @@ static int chcr_ktls_dev_add(struct net_device *netdev, struct sock *sk,
struct tls_context *tls_ctx = tls_get_ctx(sk);
struct ch_ktls_port_stats_debug *port_stats;
struct chcr_ktls_ofld_ctx_tx *tx_ctx;
+ struct chcr_ktls_uld_ctx *u_ctx;
struct chcr_ktls_info *tx_info;
struct dst_entry *dst;
struct adapter *adap;
@@ -425,6 +433,7 @@ static int chcr_ktls_dev_add(struct net_device *netdev, struct sock *sk,
adap = pi->adapter;
port_stats = &adap->ch_ktls_stats.ktls_port[pi->port_id];
atomic64_inc(&port_stats->ktls_tx_connection_open);
+ u_ctx = adap->uld[CXGB4_ULD_KTLS].handle;
if (direction == TLS_OFFLOAD_CTX_DIR_RX) {
pr_err("not expecting for RX direction\n");
@@ -434,6 +443,9 @@ static int chcr_ktls_dev_add(struct net_device *netdev, struct sock *sk,
if (tx_ctx->chcr_info)
goto out;
+ if (u_ctx && u_ctx->detach)
+ goto out;
+
tx_info = kvzalloc(sizeof(*tx_info), GFP_KERNEL);
if (!tx_info)
goto out;
@@ -569,6 +581,8 @@ free_tid:
cxgb4_remove_tid(&tx_info->adap->tids, tx_info->tx_chan,
tx_info->tid, tx_info->ip_family);
+ xa_erase(&u_ctx->tid_list, tx_info->tid);
+
put_module:
/* release module refcount */
module_put(THIS_MODULE);
@@ -633,8 +647,12 @@ static int chcr_ktls_cpl_act_open_rpl(struct adapter *adap,
{
const struct cpl_act_open_rpl *p = (void *)input;
struct chcr_ktls_info *tx_info = NULL;
+ struct chcr_ktls_ofld_ctx_tx *tx_ctx;
+ struct chcr_ktls_uld_ctx *u_ctx;
unsigned int atid, tid, status;
+ struct tls_context *tls_ctx;
struct tid_info *t;
+ int ret = 0;
tid = GET_TID(p);
status = AOPEN_STATUS_G(ntohl(p->atid_status));
@@ -666,14 +684,29 @@ static int chcr_ktls_cpl_act_open_rpl(struct adapter *adap,
if (!status) {
tx_info->tid = tid;
cxgb4_insert_tid(t, tx_info, tx_info->tid, tx_info->ip_family);
+ /* Adding tid */
+ tls_ctx = tls_get_ctx(tx_info->sk);
+ tx_ctx = chcr_get_ktls_tx_context(tls_ctx);
+ u_ctx = adap->uld[CXGB4_ULD_KTLS].handle;
+ if (u_ctx) {
+ ret = xa_insert_bh(&u_ctx->tid_list, tid, tx_ctx,
+ GFP_NOWAIT);
+ if (ret < 0) {
+ pr_err("%s: Failed to allocate tid XA entry = %d\n",
+ __func__, tx_info->tid);
+ tx_info->open_state = CH_KTLS_OPEN_FAILURE;
+ goto out;
+ }
+ }
tx_info->open_state = CH_KTLS_OPEN_SUCCESS;
} else {
tx_info->open_state = CH_KTLS_OPEN_FAILURE;
}
+out:
spin_unlock(&tx_info->lock);
complete(&tx_info->completion);
- return 0;
+ return ret;
}
/*
@@ -2090,6 +2123,8 @@ static void *chcr_ktls_uld_add(const struct cxgb4_lld_info *lldi)
goto out;
}
u_ctx->lldi = *lldi;
+ u_ctx->detach = false;
+ xa_init_flags(&u_ctx->tid_list, XA_FLAGS_LOCK_BH);
out:
return u_ctx;
}
@@ -2123,6 +2158,45 @@ static int chcr_ktls_uld_rx_handler(void *handle, const __be64 *rsp,
return 0;
}
+static void clear_conn_resources(struct chcr_ktls_info *tx_info)
+{
+ /* clear l2t entry */
+ if (tx_info->l2te)
+ cxgb4_l2t_release(tx_info->l2te);
+
+#if IS_ENABLED(CONFIG_IPV6)
+ /* clear clip entry */
+ if (tx_info->ip_family == AF_INET6)
+ cxgb4_clip_release(tx_info->netdev, (const u32 *)
+ &tx_info->sk->sk_v6_rcv_saddr,
+ 1);
+#endif
+
+ /* clear tid */
+ if (tx_info->tid != -1)
+ cxgb4_remove_tid(&tx_info->adap->tids, tx_info->tx_chan,
+ tx_info->tid, tx_info->ip_family);
+}
+
+static void ch_ktls_reset_all_conn(struct chcr_ktls_uld_ctx *u_ctx)
+{
+ struct ch_ktls_port_stats_debug *port_stats;
+ struct chcr_ktls_ofld_ctx_tx *tx_ctx;
+ struct chcr_ktls_info *tx_info;
+ unsigned long index;
+
+ xa_for_each(&u_ctx->tid_list, index, tx_ctx) {
+ tx_info = tx_ctx->chcr_info;
+ clear_conn_resources(tx_info);
+ port_stats = &tx_info->adap->ch_ktls_stats.ktls_port[tx_info->port_id];
+ atomic64_inc(&port_stats->ktls_tx_connection_close);
+ kvfree(tx_info);
+ tx_ctx->chcr_info = NULL;
+ /* release module refcount */
+ module_put(THIS_MODULE);
+ }
+}
+
static int chcr_ktls_uld_state_change(void *handle, enum cxgb4_state new_state)
{
struct chcr_ktls_uld_ctx *u_ctx = handle;
@@ -2139,7 +2213,10 @@ static int chcr_ktls_uld_state_change(void *handle, enum cxgb4_state new_state)
case CXGB4_STATE_DETACH:
pr_info("%s: Down\n", pci_name(u_ctx->lldi.pdev));
mutex_lock(&dev_mutex);
+ u_ctx->detach = true;
list_del(&u_ctx->entry);
+ ch_ktls_reset_all_conn(u_ctx);
+ xa_destroy(&u_ctx->tid_list);
mutex_unlock(&dev_mutex);
break;
default:
@@ -2178,6 +2255,7 @@ static void __exit chcr_ktls_exit(void)
adap = pci_get_drvdata(u_ctx->lldi.pdev);
memset(&adap->ch_ktls_stats, 0, sizeof(adap->ch_ktls_stats));
list_del(&u_ctx->entry);
+ xa_destroy(&u_ctx->tid_list);
kfree(u_ctx);
}
mutex_unlock(&dev_mutex);
diff --git a/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.h b/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.h
index 18b3b1f02415..10572dc55365 100644
--- a/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.h
+++ b/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.h
@@ -75,6 +75,8 @@ struct chcr_ktls_ofld_ctx_tx {
struct chcr_ktls_uld_ctx {
struct list_head entry;
struct cxgb4_lld_info lldi;
+ struct xarray tid_list;
+ bool detach;
};
static inline struct chcr_ktls_ofld_ctx_tx *
diff --git a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_io.c b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_io.c
index 188d871f6b8c..c320cc8ca68d 100644
--- a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_io.c
+++ b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_io.c
@@ -1564,8 +1564,10 @@ found_ok_skb:
cerr = put_cmsg(msg, SOL_TLS, TLS_GET_RECORD_TYPE,
sizeof(thdr->type), &thdr->type);
- if (cerr && thdr->type != TLS_RECORD_TYPE_DATA)
- return -EIO;
+ if (cerr && thdr->type != TLS_RECORD_TYPE_DATA) {
+ copied = -EIO;
+ break;
+ }
/* don't send tls header, skip copy */
goto skip_copy;
}
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index f2065f9d02e6..ad82cffc6f3f 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -1662,7 +1662,7 @@ static int fec_enet_rx_napi(struct napi_struct *napi, int budget)
}
/* ------------------------------------------------------------------------- */
-static void fec_get_mac(struct net_device *ndev)
+static int fec_get_mac(struct net_device *ndev)
{
struct fec_enet_private *fep = netdev_priv(ndev);
unsigned char *iap, tmpaddr[ETH_ALEN];
@@ -1685,6 +1685,8 @@ static void fec_get_mac(struct net_device *ndev)
ret = of_get_mac_address(np, tmpaddr);
if (!ret)
iap = tmpaddr;
+ else if (ret == -EPROBE_DEFER)
+ return ret;
}
}
@@ -1723,7 +1725,7 @@ static void fec_get_mac(struct net_device *ndev)
eth_hw_addr_random(ndev);
dev_info(&fep->pdev->dev, "Using random MAC address: %pM\n",
ndev->dev_addr);
- return;
+ return 0;
}
memcpy(ndev->dev_addr, iap, ETH_ALEN);
@@ -1731,6 +1733,8 @@ static void fec_get_mac(struct net_device *ndev)
/* Adjust MAC if using macaddr */
if (iap == macaddr)
ndev->dev_addr[ETH_ALEN-1] = macaddr[ETH_ALEN-1] + fep->dev_id;
+
+ return 0;
}
/* ------------------------------------------------------------------------- */
@@ -3290,7 +3294,9 @@ static int fec_enet_init(struct net_device *ndev)
return ret;
}
- fec_enet_alloc_queue(ndev);
+ ret = fec_enet_alloc_queue(ndev);
+ if (ret)
+ return ret;
bd_size = (fep->total_tx_ring_size + fep->total_rx_ring_size) * dsize;
@@ -3298,11 +3304,15 @@ static int fec_enet_init(struct net_device *ndev)
cbd_base = dmam_alloc_coherent(&fep->pdev->dev, bd_size, &bd_dma,
GFP_KERNEL);
if (!cbd_base) {
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto free_queue_mem;
}
/* Get the Ethernet address */
- fec_get_mac(ndev);
+ ret = fec_get_mac(ndev);
+ if (ret)
+ goto free_queue_mem;
+
/* make sure MAC we just acquired is programmed into the hw */
fec_set_mac_address(ndev, NULL);
@@ -3376,6 +3386,10 @@ static int fec_enet_init(struct net_device *ndev)
fec_enet_update_ethtool_stats(ndev);
return 0;
+
+free_queue_mem:
+ fec_enet_free_queue(ndev);
+ return ret;
}
#ifdef CONFIG_OF
diff --git a/drivers/net/ethernet/fujitsu/fmvj18x_cs.c b/drivers/net/ethernet/fujitsu/fmvj18x_cs.c
index a7b7a4aace79..b0c0504950d8 100644
--- a/drivers/net/ethernet/fujitsu/fmvj18x_cs.c
+++ b/drivers/net/ethernet/fujitsu/fmvj18x_cs.c
@@ -548,8 +548,8 @@ static int fmvj18x_get_hwinfo(struct pcmcia_device *link, u_char *node_id)
base = ioremap(link->resource[2]->start, resource_size(link->resource[2]));
if (!base) {
- pcmcia_release_window(link, link->resource[2]);
- return -ENOMEM;
+ pcmcia_release_window(link, link->resource[2]);
+ return -1;
}
pcmcia_map_mem_page(link, link->resource[2], 0);
diff --git a/drivers/net/ethernet/google/gve/gve_main.c b/drivers/net/ethernet/google/gve/gve_main.c
index 7302498c6df3..bbc423e93122 100644
--- a/drivers/net/ethernet/google/gve/gve_main.c
+++ b/drivers/net/ethernet/google/gve/gve_main.c
@@ -180,7 +180,7 @@ static int gve_napi_poll(struct napi_struct *napi, int budget)
/* Double check we have no extra work.
* Ensure unmask synchronizes with checking for work.
*/
- dma_rmb();
+ mb();
if (block->tx)
reschedule |= gve_tx_poll(block, -1);
if (block->rx)
@@ -220,6 +220,7 @@ static int gve_alloc_notify_blocks(struct gve_priv *priv)
int vecs_left = new_num_ntfy_blks % 2;
priv->num_ntfy_blks = new_num_ntfy_blks;
+ priv->mgmt_msix_idx = priv->num_ntfy_blks;
priv->tx_cfg.max_queues = min_t(int, priv->tx_cfg.max_queues,
vecs_per_type);
priv->rx_cfg.max_queues = min_t(int, priv->rx_cfg.max_queues,
@@ -300,20 +301,22 @@ static void gve_free_notify_blocks(struct gve_priv *priv)
{
int i;
- /* Free the irqs */
- for (i = 0; i < priv->num_ntfy_blks; i++) {
- struct gve_notify_block *block = &priv->ntfy_blocks[i];
- int msix_idx = i;
+ if (priv->msix_vectors) {
+ /* Free the irqs */
+ for (i = 0; i < priv->num_ntfy_blks; i++) {
+ struct gve_notify_block *block = &priv->ntfy_blocks[i];
+ int msix_idx = i;
- irq_set_affinity_hint(priv->msix_vectors[msix_idx].vector,
- NULL);
- free_irq(priv->msix_vectors[msix_idx].vector, block);
+ irq_set_affinity_hint(priv->msix_vectors[msix_idx].vector,
+ NULL);
+ free_irq(priv->msix_vectors[msix_idx].vector, block);
+ }
+ free_irq(priv->msix_vectors[priv->mgmt_msix_idx].vector, priv);
}
dma_free_coherent(&priv->pdev->dev,
priv->num_ntfy_blks * sizeof(*priv->ntfy_blocks),
priv->ntfy_blocks, priv->ntfy_block_bus);
priv->ntfy_blocks = NULL;
- free_irq(priv->msix_vectors[priv->mgmt_msix_idx].vector, priv);
pci_disable_msix(priv->pdev);
kvfree(priv->msix_vectors);
priv->msix_vectors = NULL;
diff --git a/drivers/net/ethernet/google/gve/gve_tx.c b/drivers/net/ethernet/google/gve/gve_tx.c
index 6938f3a939d6..3e04a3973d68 100644
--- a/drivers/net/ethernet/google/gve/gve_tx.c
+++ b/drivers/net/ethernet/google/gve/gve_tx.c
@@ -212,10 +212,11 @@ static int gve_tx_alloc_ring(struct gve_priv *priv, int idx)
tx->dev = &priv->pdev->dev;
if (!tx->raw_addressing) {
tx->tx_fifo.qpl = gve_assign_tx_qpl(priv);
-
+ if (!tx->tx_fifo.qpl)
+ goto abort_with_desc;
/* map Tx FIFO */
if (gve_tx_fifo_init(priv, &tx->tx_fifo))
- goto abort_with_desc;
+ goto abort_with_qpl;
}
tx->q_resources =
@@ -236,6 +237,9 @@ static int gve_tx_alloc_ring(struct gve_priv *priv, int idx)
abort_with_fifo:
if (!tx->raw_addressing)
gve_tx_fifo_release(priv, &tx->tx_fifo);
+abort_with_qpl:
+ if (!tx->raw_addressing)
+ gve_unassign_qpl(priv, tx->tx_fifo.qpl->id);
abort_with_desc:
dma_free_coherent(hdev, bytes, tx->desc, tx->bus);
tx->desc = NULL;
@@ -589,7 +593,7 @@ netdev_tx_t gve_tx(struct sk_buff *skb, struct net_device *dev)
struct gve_tx_ring *tx;
int nsegs;
- WARN(skb_get_queue_mapping(skb) > priv->tx_cfg.num_queues,
+ WARN(skb_get_queue_mapping(skb) >= priv->tx_cfg.num_queues,
"skb queue index out of range");
tx = &priv->tx[skb_get_queue_mapping(skb)];
if (unlikely(gve_maybe_stop_tx(tx, skb))) {
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
index da48c05435ea..7e62dcff2426 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
@@ -192,7 +192,7 @@ static int hns_nic_get_link_ksettings(struct net_device *net_dev,
}
/**
- *hns_nic_set_link_settings - implement ethtool set link ksettings
+ *hns_nic_set_link_ksettings - implement ethtool set link ksettings
*@net_dev: net_device
*@cmd: ethtool_link_ksettings
*retuen 0 - success , negative --fail
@@ -827,7 +827,7 @@ hns_get_channels(struct net_device *net_dev, struct ethtool_channels *ch)
}
/**
- * get_ethtool_stats - get detail statistics.
+ * hns_get_ethtool_stats - get detail statistics.
* @netdev: net device
* @stats: statistics info.
* @data: statistics data.
@@ -885,7 +885,7 @@ static void hns_get_ethtool_stats(struct net_device *netdev,
}
/**
- * get_strings: Return a set of strings that describe the requested objects
+ * hns_get_strings: Return a set of strings that describe the requested objects
* @netdev: net device
* @stringset: string set ID.
* @data: objects data.
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
index 783fdaf8f8d6..026558f8e04b 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
@@ -264,22 +264,17 @@ static void hns3_vector_coalesce_init(struct hns3_enet_tqp_vector *tqp_vector,
struct hnae3_ae_dev *ae_dev = pci_get_drvdata(priv->ae_handle->pdev);
struct hns3_enet_coalesce *tx_coal = &tqp_vector->tx_group.coal;
struct hns3_enet_coalesce *rx_coal = &tqp_vector->rx_group.coal;
+ struct hns3_enet_coalesce *ptx_coal = &priv->tx_coal;
+ struct hns3_enet_coalesce *prx_coal = &priv->rx_coal;
- /* initialize the configuration for interrupt coalescing.
- * 1. GL (Interrupt Gap Limiter)
- * 2. RL (Interrupt Rate Limiter)
- * 3. QL (Interrupt Quantity Limiter)
- *
- * Default: enable interrupt coalescing self-adaptive and GL
- */
- tx_coal->adapt_enable = 1;
- rx_coal->adapt_enable = 1;
+ tx_coal->adapt_enable = ptx_coal->adapt_enable;
+ rx_coal->adapt_enable = prx_coal->adapt_enable;
- tx_coal->int_gl = HNS3_INT_GL_50K;
- rx_coal->int_gl = HNS3_INT_GL_50K;
+ tx_coal->int_gl = ptx_coal->int_gl;
+ rx_coal->int_gl = prx_coal->int_gl;
- rx_coal->flow_level = HNS3_FLOW_LOW;
- tx_coal->flow_level = HNS3_FLOW_LOW;
+ rx_coal->flow_level = prx_coal->flow_level;
+ tx_coal->flow_level = ptx_coal->flow_level;
/* device version above V3(include V3), GL can configure 1us
* unit, so uses 1us unit.
@@ -294,8 +289,8 @@ static void hns3_vector_coalesce_init(struct hns3_enet_tqp_vector *tqp_vector,
rx_coal->ql_enable = 1;
tx_coal->int_ql_max = ae_dev->dev_specs.int_ql_max;
rx_coal->int_ql_max = ae_dev->dev_specs.int_ql_max;
- tx_coal->int_ql = HNS3_INT_QL_DEFAULT_CFG;
- rx_coal->int_ql = HNS3_INT_QL_DEFAULT_CFG;
+ tx_coal->int_ql = ptx_coal->int_ql;
+ rx_coal->int_ql = prx_coal->int_ql;
}
}
@@ -846,8 +841,6 @@ static bool hns3_tunnel_csum_bug(struct sk_buff *skb)
l4.udp->dest == htons(4790))))
return false;
- skb_checksum_help(skb);
-
return true;
}
@@ -924,8 +917,7 @@ static int hns3_set_l2l3l4(struct sk_buff *skb, u8 ol4_proto,
/* the stack computes the IP header already,
* driver calculate l4 checksum when not TSO.
*/
- skb_checksum_help(skb);
- return 0;
+ return skb_checksum_help(skb);
}
hns3_set_outer_l2l3l4(skb, ol4_proto, ol_type_vlan_len_msec);
@@ -970,7 +962,7 @@ static int hns3_set_l2l3l4(struct sk_buff *skb, u8 ol4_proto,
break;
case IPPROTO_UDP:
if (hns3_tunnel_csum_bug(skb))
- break;
+ return skb_checksum_help(skb);
hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1);
hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4T_S,
@@ -995,8 +987,7 @@ static int hns3_set_l2l3l4(struct sk_buff *skb, u8 ol4_proto,
/* the stack computes the IP header already,
* driver calculate l4 checksum when not TSO.
*/
- skb_checksum_help(skb);
- return 0;
+ return skb_checksum_help(skb);
}
return 0;
@@ -3844,6 +3835,34 @@ map_ring_fail:
return ret;
}
+static void hns3_nic_init_coal_cfg(struct hns3_nic_priv *priv)
+{
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(priv->ae_handle->pdev);
+ struct hns3_enet_coalesce *tx_coal = &priv->tx_coal;
+ struct hns3_enet_coalesce *rx_coal = &priv->rx_coal;
+
+ /* initialize the configuration for interrupt coalescing.
+ * 1. GL (Interrupt Gap Limiter)
+ * 2. RL (Interrupt Rate Limiter)
+ * 3. QL (Interrupt Quantity Limiter)
+ *
+ * Default: enable interrupt coalescing self-adaptive and GL
+ */
+ tx_coal->adapt_enable = 1;
+ rx_coal->adapt_enable = 1;
+
+ tx_coal->int_gl = HNS3_INT_GL_50K;
+ rx_coal->int_gl = HNS3_INT_GL_50K;
+
+ rx_coal->flow_level = HNS3_FLOW_LOW;
+ tx_coal->flow_level = HNS3_FLOW_LOW;
+
+ if (ae_dev->dev_specs.int_ql_max) {
+ tx_coal->int_ql = HNS3_INT_QL_DEFAULT_CFG;
+ rx_coal->int_ql = HNS3_INT_QL_DEFAULT_CFG;
+ }
+}
+
static int hns3_nic_alloc_vector_data(struct hns3_nic_priv *priv)
{
struct hnae3_handle *h = priv->ae_handle;
@@ -4295,6 +4314,8 @@ static int hns3_client_init(struct hnae3_handle *handle)
goto out_get_ring_cfg;
}
+ hns3_nic_init_coal_cfg(priv);
+
ret = hns3_nic_alloc_vector_data(priv);
if (ret) {
ret = -ENOMEM;
@@ -4317,12 +4338,6 @@ static int hns3_client_init(struct hnae3_handle *handle)
if (ret)
goto out_init_phy;
- ret = register_netdev(netdev);
- if (ret) {
- dev_err(priv->dev, "probe register netdev fail!\n");
- goto out_reg_netdev_fail;
- }
-
/* the device can work without cpu rmap, only aRFS needs it */
ret = hns3_set_rx_cpu_rmap(netdev);
if (ret)
@@ -4355,17 +4370,23 @@ static int hns3_client_init(struct hnae3_handle *handle)
if (ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3)
set_bit(HNAE3_PFLAG_LIMIT_PROMISC, &handle->supported_pflags);
+ ret = register_netdev(netdev);
+ if (ret) {
+ dev_err(priv->dev, "probe register netdev fail!\n");
+ goto out_reg_netdev_fail;
+ }
+
if (netif_msg_drv(handle))
hns3_info_show(priv);
return ret;
+out_reg_netdev_fail:
+ hns3_dbg_uninit(handle);
out_client_start:
hns3_free_rx_cpu_rmap(netdev);
hns3_nic_uninit_irq(priv);
out_init_irq_fail:
- unregister_netdev(netdev);
-out_reg_netdev_fail:
hns3_uninit_phy(netdev);
out_init_phy:
hns3_uninit_all_ring(priv);
@@ -4571,31 +4592,6 @@ int hns3_nic_reset_all_ring(struct hnae3_handle *h)
return 0;
}
-static void hns3_store_coal(struct hns3_nic_priv *priv)
-{
- /* ethtool only support setting and querying one coal
- * configuration for now, so save the vector 0' coal
- * configuration here in order to restore it.
- */
- memcpy(&priv->tx_coal, &priv->tqp_vector[0].tx_group.coal,
- sizeof(struct hns3_enet_coalesce));
- memcpy(&priv->rx_coal, &priv->tqp_vector[0].rx_group.coal,
- sizeof(struct hns3_enet_coalesce));
-}
-
-static void hns3_restore_coal(struct hns3_nic_priv *priv)
-{
- u16 vector_num = priv->vector_num;
- int i;
-
- for (i = 0; i < vector_num; i++) {
- memcpy(&priv->tqp_vector[i].tx_group.coal, &priv->tx_coal,
- sizeof(struct hns3_enet_coalesce));
- memcpy(&priv->tqp_vector[i].rx_group.coal, &priv->rx_coal,
- sizeof(struct hns3_enet_coalesce));
- }
-}
-
static int hns3_reset_notify_down_enet(struct hnae3_handle *handle)
{
struct hnae3_knic_private_info *kinfo = &handle->kinfo;
@@ -4654,8 +4650,6 @@ static int hns3_reset_notify_init_enet(struct hnae3_handle *handle)
if (ret)
goto err_put_ring;
- hns3_restore_coal(priv);
-
ret = hns3_nic_init_vector_data(priv);
if (ret)
goto err_dealloc_vector;
@@ -4721,8 +4715,6 @@ static int hns3_reset_notify_uninit_enet(struct hnae3_handle *handle)
hns3_nic_uninit_vector_data(priv);
- hns3_store_coal(priv);
-
hns3_nic_dealloc_vector_data(priv);
hns3_uninit_all_ring(priv);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
index b48faf769b1c..c1ea403d2b56 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
@@ -1134,50 +1134,32 @@ static void hns3_get_channels(struct net_device *netdev,
h->ae_algo->ops->get_channels(h, ch);
}
-static int hns3_get_coalesce_per_queue(struct net_device *netdev, u32 queue,
- struct ethtool_coalesce *cmd)
+static int hns3_get_coalesce(struct net_device *netdev,
+ struct ethtool_coalesce *cmd)
{
- struct hns3_enet_tqp_vector *tx_vector, *rx_vector;
struct hns3_nic_priv *priv = netdev_priv(netdev);
+ struct hns3_enet_coalesce *tx_coal = &priv->tx_coal;
+ struct hns3_enet_coalesce *rx_coal = &priv->rx_coal;
struct hnae3_handle *h = priv->ae_handle;
- u16 queue_num = h->kinfo.num_tqps;
if (hns3_nic_resetting(netdev))
return -EBUSY;
- if (queue >= queue_num) {
- netdev_err(netdev,
- "Invalid queue value %u! Queue max id=%u\n",
- queue, queue_num - 1);
- return -EINVAL;
- }
-
- tx_vector = priv->ring[queue].tqp_vector;
- rx_vector = priv->ring[queue_num + queue].tqp_vector;
+ cmd->use_adaptive_tx_coalesce = tx_coal->adapt_enable;
+ cmd->use_adaptive_rx_coalesce = rx_coal->adapt_enable;
- cmd->use_adaptive_tx_coalesce =
- tx_vector->tx_group.coal.adapt_enable;
- cmd->use_adaptive_rx_coalesce =
- rx_vector->rx_group.coal.adapt_enable;
-
- cmd->tx_coalesce_usecs = tx_vector->tx_group.coal.int_gl;
- cmd->rx_coalesce_usecs = rx_vector->rx_group.coal.int_gl;
+ cmd->tx_coalesce_usecs = tx_coal->int_gl;
+ cmd->rx_coalesce_usecs = rx_coal->int_gl;
cmd->tx_coalesce_usecs_high = h->kinfo.int_rl_setting;
cmd->rx_coalesce_usecs_high = h->kinfo.int_rl_setting;
- cmd->tx_max_coalesced_frames = tx_vector->tx_group.coal.int_ql;
- cmd->rx_max_coalesced_frames = rx_vector->rx_group.coal.int_ql;
+ cmd->tx_max_coalesced_frames = tx_coal->int_ql;
+ cmd->rx_max_coalesced_frames = rx_coal->int_ql;
return 0;
}
-static int hns3_get_coalesce(struct net_device *netdev,
- struct ethtool_coalesce *cmd)
-{
- return hns3_get_coalesce_per_queue(netdev, 0, cmd);
-}
-
static int hns3_check_gl_coalesce_para(struct net_device *netdev,
struct ethtool_coalesce *cmd)
{
@@ -1292,19 +1274,7 @@ static int hns3_check_coalesce_para(struct net_device *netdev,
return ret;
}
- ret = hns3_check_ql_coalesce_param(netdev, cmd);
- if (ret)
- return ret;
-
- if (cmd->use_adaptive_tx_coalesce == 1 ||
- cmd->use_adaptive_rx_coalesce == 1) {
- netdev_info(netdev,
- "adaptive-tx=%u and adaptive-rx=%u, tx_usecs or rx_usecs will changed dynamically.\n",
- cmd->use_adaptive_tx_coalesce,
- cmd->use_adaptive_rx_coalesce);
- }
-
- return 0;
+ return hns3_check_ql_coalesce_param(netdev, cmd);
}
static void hns3_set_coalesce_per_queue(struct net_device *netdev,
@@ -1350,6 +1320,9 @@ static int hns3_set_coalesce(struct net_device *netdev,
struct ethtool_coalesce *cmd)
{
struct hnae3_handle *h = hns3_get_handle(netdev);
+ struct hns3_nic_priv *priv = netdev_priv(netdev);
+ struct hns3_enet_coalesce *tx_coal = &priv->tx_coal;
+ struct hns3_enet_coalesce *rx_coal = &priv->rx_coal;
u16 queue_num = h->kinfo.num_tqps;
int ret;
int i;
@@ -1364,6 +1337,15 @@ static int hns3_set_coalesce(struct net_device *netdev,
h->kinfo.int_rl_setting =
hns3_rl_round_down(cmd->rx_coalesce_usecs_high);
+ tx_coal->adapt_enable = cmd->use_adaptive_tx_coalesce;
+ rx_coal->adapt_enable = cmd->use_adaptive_rx_coalesce;
+
+ tx_coal->int_gl = cmd->tx_coalesce_usecs;
+ rx_coal->int_gl = cmd->rx_coalesce_usecs;
+
+ tx_coal->int_ql = cmd->tx_max_coalesced_frames;
+ rx_coal->int_ql = cmd->rx_max_coalesced_frames;
+
for (i = 0; i < queue_num; i++)
hns3_set_coalesce_per_queue(netdev, cmd, i);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
index 8e5f9dc8791d..f1c9f4ada348 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
@@ -710,7 +710,6 @@ void hclge_mbx_handler(struct hclge_dev *hdev)
unsigned int flag;
int ret = 0;
- memset(&resp_msg, 0, sizeof(resp_msg));
/* handle all the mailbox requests in the queue */
while (!hclge_cmd_crq_empty(&hdev->hw)) {
if (test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state)) {
@@ -738,6 +737,9 @@ void hclge_mbx_handler(struct hclge_dev *hdev)
trace_hclge_pf_mbx_get(hdev, req);
+ /* clear the resp_msg before processing every mailbox message */
+ memset(&resp_msg, 0, sizeof(resp_msg));
+
switch (req->msg.code) {
case HCLGE_MBX_MAP_RING_TO_VECTOR:
ret = hclge_map_unmap_ring_to_vf_vector(vport, true,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
index 988db46bff0e..214a38de3f41 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
@@ -467,12 +467,16 @@ static int ixgbe_set_vf_vlan(struct ixgbe_adapter *adapter, int add, int vid,
return err;
}
-static s32 ixgbe_set_vf_lpe(struct ixgbe_adapter *adapter, u32 *msgbuf, u32 vf)
+static int ixgbe_set_vf_lpe(struct ixgbe_adapter *adapter, u32 max_frame, u32 vf)
{
struct ixgbe_hw *hw = &adapter->hw;
- int max_frame = msgbuf[1];
u32 max_frs;
+ if (max_frame < ETH_MIN_MTU || max_frame > IXGBE_MAX_JUMBO_FRAME_SIZE) {
+ e_err(drv, "VF max_frame %d out of range\n", max_frame);
+ return -EINVAL;
+ }
+
/*
* For 82599EB we have to keep all PFs and VFs operating with
* the same max_frame value in order to avoid sending an oversize
@@ -533,12 +537,6 @@ static s32 ixgbe_set_vf_lpe(struct ixgbe_adapter *adapter, u32 *msgbuf, u32 vf)
}
}
- /* MTU < 68 is an error and causes problems on some kernels */
- if (max_frame > IXGBE_MAX_JUMBO_FRAME_SIZE) {
- e_err(drv, "VF max_frame %d out of range\n", max_frame);
- return -EINVAL;
- }
-
/* pull current max frame size from hardware */
max_frs = IXGBE_READ_REG(hw, IXGBE_MAXFRS);
max_frs &= IXGBE_MHADD_MFS_MASK;
@@ -1249,7 +1247,7 @@ static int ixgbe_rcv_msg_from_vf(struct ixgbe_adapter *adapter, u32 vf)
retval = ixgbe_set_vf_vlan_msg(adapter, msgbuf, vf);
break;
case IXGBE_VF_SET_LPE:
- retval = ixgbe_set_vf_lpe(adapter, msgbuf, vf);
+ retval = ixgbe_set_vf_lpe(adapter, msgbuf[1], vf);
break;
case IXGBE_VF_SET_MACVLAN:
retval = ixgbe_set_vf_macvlan_msg(adapter, msgbuf, vf);
diff --git a/drivers/net/ethernet/korina.c b/drivers/net/ethernet/korina.c
index 6f987a7ffcb3..b30a45725374 100644
--- a/drivers/net/ethernet/korina.c
+++ b/drivers/net/ethernet/korina.c
@@ -1315,23 +1315,23 @@ static int korina_probe(struct platform_device *pdev)
lp->tx_irq = platform_get_irq_byname(pdev, "tx");
p = devm_platform_ioremap_resource_byname(pdev, "emac");
- if (!p) {
+ if (IS_ERR(p)) {
printk(KERN_ERR DRV_NAME ": cannot remap registers\n");
- return -ENOMEM;
+ return PTR_ERR(p);
}
lp->eth_regs = p;
p = devm_platform_ioremap_resource_byname(pdev, "dma_rx");
- if (!p) {
+ if (IS_ERR(p)) {
printk(KERN_ERR DRV_NAME ": cannot remap Rx DMA registers\n");
- return -ENOMEM;
+ return PTR_ERR(p);
}
lp->rx_dma_regs = p;
p = devm_platform_ioremap_resource_byname(pdev, "dma_tx");
- if (!p) {
+ if (IS_ERR(p)) {
printk(KERN_ERR DRV_NAME ": cannot remap Tx DMA registers\n");
- return -ENOMEM;
+ return PTR_ERR(p);
}
lp->tx_dma_regs = p;
diff --git a/drivers/net/ethernet/lantiq_xrx200.c b/drivers/net/ethernet/lantiq_xrx200.c
index 41c2ad210bc9..36dc3e5f6218 100644
--- a/drivers/net/ethernet/lantiq_xrx200.c
+++ b/drivers/net/ethernet/lantiq_xrx200.c
@@ -154,6 +154,7 @@ static int xrx200_close(struct net_device *net_dev)
static int xrx200_alloc_skb(struct xrx200_chan *ch)
{
+ dma_addr_t mapping;
int ret = 0;
ch->skb[ch->dma.desc] = netdev_alloc_skb_ip_align(ch->priv->net_dev,
@@ -163,16 +164,17 @@ static int xrx200_alloc_skb(struct xrx200_chan *ch)
goto skip;
}
- ch->dma.desc_base[ch->dma.desc].addr = dma_map_single(ch->priv->dev,
- ch->skb[ch->dma.desc]->data, XRX200_DMA_DATA_LEN,
- DMA_FROM_DEVICE);
- if (unlikely(dma_mapping_error(ch->priv->dev,
- ch->dma.desc_base[ch->dma.desc].addr))) {
+ mapping = dma_map_single(ch->priv->dev, ch->skb[ch->dma.desc]->data,
+ XRX200_DMA_DATA_LEN, DMA_FROM_DEVICE);
+ if (unlikely(dma_mapping_error(ch->priv->dev, mapping))) {
dev_kfree_skb_any(ch->skb[ch->dma.desc]);
ret = -ENOMEM;
goto skip;
}
+ ch->dma.desc_base[ch->dma.desc].addr = mapping;
+ /* Make sure the address is written before we give it to HW */
+ wmb();
skip:
ch->dma.desc_base[ch->dma.desc].ctl =
LTQ_DMA_OWN | LTQ_DMA_RX_OFFSET(NET_IP_ALIGN) |
@@ -196,6 +198,8 @@ static int xrx200_hw_receive(struct xrx200_chan *ch)
ch->dma.desc %= LTQ_DESC_NUM;
if (ret) {
+ ch->skb[ch->dma.desc] = skb;
+ net_dev->stats.rx_dropped++;
netdev_err(net_dev, "failed to allocate new rx buffer\n");
return ret;
}
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2.h b/drivers/net/ethernet/marvell/mvpp2/mvpp2.h
index 8edba5ea90f0..4a61c90003b5 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2.h
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2.h
@@ -993,6 +993,14 @@ enum mvpp22_ptp_packet_format {
#define MVPP2_DESC_DMA_MASK DMA_BIT_MASK(40)
+/* Buffer header info bits */
+#define MVPP2_B_HDR_INFO_MC_ID_MASK 0xfff
+#define MVPP2_B_HDR_INFO_MC_ID(info) ((info) & MVPP2_B_HDR_INFO_MC_ID_MASK)
+#define MVPP2_B_HDR_INFO_LAST_OFFS 12
+#define MVPP2_B_HDR_INFO_LAST_MASK BIT(12)
+#define MVPP2_B_HDR_INFO_IS_LAST(info) \
+ (((info) & MVPP2_B_HDR_INFO_LAST_MASK) >> MVPP2_B_HDR_INFO_LAST_OFFS)
+
struct mvpp2_tai;
/* Definitions */
@@ -1002,6 +1010,20 @@ struct mvpp2_rss_table {
u32 indir[MVPP22_RSS_TABLE_ENTRIES];
};
+struct mvpp2_buff_hdr {
+ __le32 next_phys_addr;
+ __le32 next_dma_addr;
+ __le16 byte_count;
+ __le16 info;
+ __le16 reserved1; /* bm_qset (for future use, BM) */
+ u8 next_phys_addr_high;
+ u8 next_dma_addr_high;
+ __le16 reserved2;
+ __le16 reserved3;
+ __le16 reserved4;
+ __le16 reserved5;
+};
+
/* Shared Packet Processor resources */
struct mvpp2 {
/* Shared registers' base addresses */
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
index ec706d614cac..d39c7639cdba 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
@@ -3839,6 +3839,35 @@ mvpp2_run_xdp(struct mvpp2_port *port, struct mvpp2_rx_queue *rxq,
return ret;
}
+static void mvpp2_buff_hdr_pool_put(struct mvpp2_port *port, struct mvpp2_rx_desc *rx_desc,
+ int pool, u32 rx_status)
+{
+ phys_addr_t phys_addr, phys_addr_next;
+ dma_addr_t dma_addr, dma_addr_next;
+ struct mvpp2_buff_hdr *buff_hdr;
+
+ phys_addr = mvpp2_rxdesc_dma_addr_get(port, rx_desc);
+ dma_addr = mvpp2_rxdesc_cookie_get(port, rx_desc);
+
+ do {
+ buff_hdr = (struct mvpp2_buff_hdr *)phys_to_virt(phys_addr);
+
+ phys_addr_next = le32_to_cpu(buff_hdr->next_phys_addr);
+ dma_addr_next = le32_to_cpu(buff_hdr->next_dma_addr);
+
+ if (port->priv->hw_version >= MVPP22) {
+ phys_addr_next |= ((u64)buff_hdr->next_phys_addr_high << 32);
+ dma_addr_next |= ((u64)buff_hdr->next_dma_addr_high << 32);
+ }
+
+ mvpp2_bm_pool_put(port, pool, dma_addr, phys_addr);
+
+ phys_addr = phys_addr_next;
+ dma_addr = dma_addr_next;
+
+ } while (!MVPP2_B_HDR_INFO_IS_LAST(le16_to_cpu(buff_hdr->info)));
+}
+
/* Main rx processing */
static int mvpp2_rx(struct mvpp2_port *port, struct napi_struct *napi,
int rx_todo, struct mvpp2_rx_queue *rxq)
@@ -3885,14 +3914,6 @@ static int mvpp2_rx(struct mvpp2_port *port, struct napi_struct *napi,
MVPP2_RXD_BM_POOL_ID_OFFS;
bm_pool = &port->priv->bm_pools[pool];
- /* In case of an error, release the requested buffer pointer
- * to the Buffer Manager. This request process is controlled
- * by the hardware, and the information about the buffer is
- * comprised by the RX descriptor.
- */
- if (rx_status & MVPP2_RXD_ERR_SUMMARY)
- goto err_drop_frame;
-
if (port->priv->percpu_pools) {
pp = port->priv->page_pool[pool];
dma_dir = page_pool_get_dma_dir(pp);
@@ -3904,6 +3925,18 @@ static int mvpp2_rx(struct mvpp2_port *port, struct napi_struct *napi,
rx_bytes + MVPP2_MH_SIZE,
dma_dir);
+ /* Buffer header not supported */
+ if (rx_status & MVPP2_RXD_BUF_HDR)
+ goto err_drop_frame;
+
+ /* In case of an error, release the requested buffer pointer
+ * to the Buffer Manager. This request process is controlled
+ * by the hardware, and the information about the buffer is
+ * comprised by the RX descriptor.
+ */
+ if (rx_status & MVPP2_RXD_ERR_SUMMARY)
+ goto err_drop_frame;
+
/* Prefetch header */
prefetch(data);
@@ -3985,7 +4018,10 @@ err_drop_frame:
dev->stats.rx_errors++;
mvpp2_rx_error(port, rx_desc);
/* Return the buffer to the pool */
- mvpp2_bm_pool_put(port, pool, dma_addr, phys_addr);
+ if (rx_status & MVPP2_RXD_BUF_HDR)
+ mvpp2_buff_hdr_pool_put(port, rx_desc, pool, rx_status);
+ else
+ mvpp2_bm_pool_put(port, pool, dma_addr, phys_addr);
}
rcu_read_unlock();
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c
index f4962a97a075..9d9a2e438acf 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c
@@ -786,6 +786,10 @@ static int otx2_set_rxfh_context(struct net_device *dev, const u32 *indir,
if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
return -EOPNOTSUPP;
+ if (*rss_context != ETH_RXFH_CONTEXT_ALLOC &&
+ *rss_context >= MAX_RSS_GROUPS)
+ return -EINVAL;
+
rss = &pfvf->hw.rss_info;
if (!rss->enable) {
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
index ed4eacef17ce..64adfd24e134 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
@@ -681,32 +681,53 @@ static int mtk_set_mac_address(struct net_device *dev, void *p)
void mtk_stats_update_mac(struct mtk_mac *mac)
{
struct mtk_hw_stats *hw_stats = mac->hw_stats;
- unsigned int base = MTK_GDM1_TX_GBCNT;
- u64 stats;
-
- base += hw_stats->reg_offset;
+ struct mtk_eth *eth = mac->hw;
u64_stats_update_begin(&hw_stats->syncp);
- hw_stats->rx_bytes += mtk_r32(mac->hw, base);
- stats = mtk_r32(mac->hw, base + 0x04);
- if (stats)
- hw_stats->rx_bytes += (stats << 32);
- hw_stats->rx_packets += mtk_r32(mac->hw, base + 0x08);
- hw_stats->rx_overflow += mtk_r32(mac->hw, base + 0x10);
- hw_stats->rx_fcs_errors += mtk_r32(mac->hw, base + 0x14);
- hw_stats->rx_short_errors += mtk_r32(mac->hw, base + 0x18);
- hw_stats->rx_long_errors += mtk_r32(mac->hw, base + 0x1c);
- hw_stats->rx_checksum_errors += mtk_r32(mac->hw, base + 0x20);
- hw_stats->rx_flow_control_packets +=
- mtk_r32(mac->hw, base + 0x24);
- hw_stats->tx_skip += mtk_r32(mac->hw, base + 0x28);
- hw_stats->tx_collisions += mtk_r32(mac->hw, base + 0x2c);
- hw_stats->tx_bytes += mtk_r32(mac->hw, base + 0x30);
- stats = mtk_r32(mac->hw, base + 0x34);
- if (stats)
- hw_stats->tx_bytes += (stats << 32);
- hw_stats->tx_packets += mtk_r32(mac->hw, base + 0x38);
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
+ hw_stats->tx_packets += mtk_r32(mac->hw, MT7628_SDM_TPCNT);
+ hw_stats->tx_bytes += mtk_r32(mac->hw, MT7628_SDM_TBCNT);
+ hw_stats->rx_packets += mtk_r32(mac->hw, MT7628_SDM_RPCNT);
+ hw_stats->rx_bytes += mtk_r32(mac->hw, MT7628_SDM_RBCNT);
+ hw_stats->rx_checksum_errors +=
+ mtk_r32(mac->hw, MT7628_SDM_CS_ERR);
+ } else {
+ unsigned int offs = hw_stats->reg_offset;
+ u64 stats;
+
+ hw_stats->rx_bytes += mtk_r32(mac->hw,
+ MTK_GDM1_RX_GBCNT_L + offs);
+ stats = mtk_r32(mac->hw, MTK_GDM1_RX_GBCNT_H + offs);
+ if (stats)
+ hw_stats->rx_bytes += (stats << 32);
+ hw_stats->rx_packets +=
+ mtk_r32(mac->hw, MTK_GDM1_RX_GPCNT + offs);
+ hw_stats->rx_overflow +=
+ mtk_r32(mac->hw, MTK_GDM1_RX_OERCNT + offs);
+ hw_stats->rx_fcs_errors +=
+ mtk_r32(mac->hw, MTK_GDM1_RX_FERCNT + offs);
+ hw_stats->rx_short_errors +=
+ mtk_r32(mac->hw, MTK_GDM1_RX_SERCNT + offs);
+ hw_stats->rx_long_errors +=
+ mtk_r32(mac->hw, MTK_GDM1_RX_LENCNT + offs);
+ hw_stats->rx_checksum_errors +=
+ mtk_r32(mac->hw, MTK_GDM1_RX_CERCNT + offs);
+ hw_stats->rx_flow_control_packets +=
+ mtk_r32(mac->hw, MTK_GDM1_RX_FCCNT + offs);
+ hw_stats->tx_skip +=
+ mtk_r32(mac->hw, MTK_GDM1_TX_SKIPCNT + offs);
+ hw_stats->tx_collisions +=
+ mtk_r32(mac->hw, MTK_GDM1_TX_COLCNT + offs);
+ hw_stats->tx_bytes +=
+ mtk_r32(mac->hw, MTK_GDM1_TX_GBCNT_L + offs);
+ stats = mtk_r32(mac->hw, MTK_GDM1_TX_GBCNT_H + offs);
+ if (stats)
+ hw_stats->tx_bytes += (stats << 32);
+ hw_stats->tx_packets +=
+ mtk_r32(mac->hw, MTK_GDM1_TX_GPCNT + offs);
+ }
+
u64_stats_update_end(&hw_stats->syncp);
}
@@ -2423,7 +2444,8 @@ static void mtk_dim_rx(struct work_struct *work)
val |= cur << MTK_PDMA_DELAY_RX_PINT_SHIFT;
mtk_w32(eth, val, MTK_PDMA_DELAY_INT);
- mtk_w32(eth, val, MTK_QDMA_DELAY_INT);
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
+ mtk_w32(eth, val, MTK_QDMA_DELAY_INT);
spin_unlock_bh(&eth->dim_lock);
@@ -2452,7 +2474,8 @@ static void mtk_dim_tx(struct work_struct *work)
val |= cur << MTK_PDMA_DELAY_TX_PINT_SHIFT;
mtk_w32(eth, val, MTK_PDMA_DELAY_INT);
- mtk_w32(eth, val, MTK_QDMA_DELAY_INT);
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
+ mtk_w32(eth, val, MTK_QDMA_DELAY_INT);
spin_unlock_bh(&eth->dim_lock);
@@ -2480,6 +2503,10 @@ static int mtk_hw_init(struct mtk_eth *eth)
goto err_disable_pm;
}
+ /* set interrupt delays based on current Net DIM sample */
+ mtk_dim_rx(&eth->rx_dim.work);
+ mtk_dim_tx(&eth->tx_dim.work);
+
/* disable delay and normal interrupt */
mtk_tx_irq_disable(eth, ~0);
mtk_rx_irq_disable(eth, ~0);
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.h b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
index 11331b44ba07..5ef70dd8b49c 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
@@ -278,8 +278,21 @@
/* QDMA FQ Free Page Buffer Length Register */
#define MTK_QDMA_FQ_BLEN 0x1B2C
-/* GMA1 Received Good Byte Count Register */
-#define MTK_GDM1_TX_GBCNT 0x2400
+/* GMA1 counter / statics register */
+#define MTK_GDM1_RX_GBCNT_L 0x2400
+#define MTK_GDM1_RX_GBCNT_H 0x2404
+#define MTK_GDM1_RX_GPCNT 0x2408
+#define MTK_GDM1_RX_OERCNT 0x2410
+#define MTK_GDM1_RX_FERCNT 0x2414
+#define MTK_GDM1_RX_SERCNT 0x2418
+#define MTK_GDM1_RX_LENCNT 0x241c
+#define MTK_GDM1_RX_CERCNT 0x2420
+#define MTK_GDM1_RX_FCCNT 0x2424
+#define MTK_GDM1_TX_SKIPCNT 0x2428
+#define MTK_GDM1_TX_COLCNT 0x242c
+#define MTK_GDM1_TX_GBCNT_L 0x2430
+#define MTK_GDM1_TX_GBCNT_H 0x2434
+#define MTK_GDM1_TX_GPCNT 0x2438
#define MTK_STAT_OFFSET 0x40
/* QDMA descriptor txd4 */
@@ -502,6 +515,13 @@
#define MT7628_SDM_MAC_ADRL (MT7628_SDM_OFFSET + 0x0c)
#define MT7628_SDM_MAC_ADRH (MT7628_SDM_OFFSET + 0x10)
+/* Counter / stat register */
+#define MT7628_SDM_TPCNT (MT7628_SDM_OFFSET + 0x100)
+#define MT7628_SDM_TBCNT (MT7628_SDM_OFFSET + 0x104)
+#define MT7628_SDM_RPCNT (MT7628_SDM_OFFSET + 0x108)
+#define MT7628_SDM_RBCNT (MT7628_SDM_OFFSET + 0x10c)
+#define MT7628_SDM_CS_ERR (MT7628_SDM_OFFSET + 0x110)
+
struct mtk_rx_dma {
unsigned int rxd1;
unsigned int rxd2;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
index 1434df66fcf2..3616b77caa0a 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
@@ -2027,8 +2027,6 @@ static int mlx4_en_set_tunable(struct net_device *dev,
return ret;
}
-#define MLX4_EEPROM_PAGE_LEN 256
-
static int mlx4_en_get_module_info(struct net_device *dev,
struct ethtool_modinfo *modinfo)
{
@@ -2063,7 +2061,7 @@ static int mlx4_en_get_module_info(struct net_device *dev,
break;
case MLX4_MODULE_ID_SFP:
modinfo->type = ETH_MODULE_SFF_8472;
- modinfo->eeprom_len = MLX4_EEPROM_PAGE_LEN;
+ modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN;
break;
default:
return -EINVAL;
diff --git a/drivers/net/ethernet/mellanox/mlx4/port.c b/drivers/net/ethernet/mellanox/mlx4/port.c
index ba6ac31a339d..256a06b3c096 100644
--- a/drivers/net/ethernet/mellanox/mlx4/port.c
+++ b/drivers/net/ethernet/mellanox/mlx4/port.c
@@ -1973,6 +1973,7 @@ EXPORT_SYMBOL(mlx4_get_roce_gid_from_slave);
#define I2C_ADDR_LOW 0x50
#define I2C_ADDR_HIGH 0x51
#define I2C_PAGE_SIZE 256
+#define I2C_HIGH_PAGE_SIZE 128
/* Module Info Data */
struct mlx4_cable_info {
@@ -2026,6 +2027,88 @@ static inline const char *cable_info_mad_err_str(u16 mad_status)
return "Unknown Error";
}
+static int mlx4_get_module_id(struct mlx4_dev *dev, u8 port, u8 *module_id)
+{
+ struct mlx4_cmd_mailbox *inbox, *outbox;
+ struct mlx4_mad_ifc *inmad, *outmad;
+ struct mlx4_cable_info *cable_info;
+ int ret;
+
+ inbox = mlx4_alloc_cmd_mailbox(dev);
+ if (IS_ERR(inbox))
+ return PTR_ERR(inbox);
+
+ outbox = mlx4_alloc_cmd_mailbox(dev);
+ if (IS_ERR(outbox)) {
+ mlx4_free_cmd_mailbox(dev, inbox);
+ return PTR_ERR(outbox);
+ }
+
+ inmad = (struct mlx4_mad_ifc *)(inbox->buf);
+ outmad = (struct mlx4_mad_ifc *)(outbox->buf);
+
+ inmad->method = 0x1; /* Get */
+ inmad->class_version = 0x1;
+ inmad->mgmt_class = 0x1;
+ inmad->base_version = 0x1;
+ inmad->attr_id = cpu_to_be16(0xFF60); /* Module Info */
+
+ cable_info = (struct mlx4_cable_info *)inmad->data;
+ cable_info->dev_mem_address = 0;
+ cable_info->page_num = 0;
+ cable_info->i2c_addr = I2C_ADDR_LOW;
+ cable_info->size = cpu_to_be16(1);
+
+ ret = mlx4_cmd_box(dev, inbox->dma, outbox->dma, port, 3,
+ MLX4_CMD_MAD_IFC, MLX4_CMD_TIME_CLASS_C,
+ MLX4_CMD_NATIVE);
+ if (ret)
+ goto out;
+
+ if (be16_to_cpu(outmad->status)) {
+ /* Mad returned with bad status */
+ ret = be16_to_cpu(outmad->status);
+ mlx4_warn(dev,
+ "MLX4_CMD_MAD_IFC Get Module ID attr(%x) port(%d) i2c_addr(%x) offset(%d) size(%d): Response Mad Status(%x) - %s\n",
+ 0xFF60, port, I2C_ADDR_LOW, 0, 1, ret,
+ cable_info_mad_err_str(ret));
+ ret = -ret;
+ goto out;
+ }
+ cable_info = (struct mlx4_cable_info *)outmad->data;
+ *module_id = cable_info->data[0];
+out:
+ mlx4_free_cmd_mailbox(dev, inbox);
+ mlx4_free_cmd_mailbox(dev, outbox);
+ return ret;
+}
+
+static void mlx4_sfp_eeprom_params_set(u8 *i2c_addr, u8 *page_num, u16 *offset)
+{
+ *i2c_addr = I2C_ADDR_LOW;
+ *page_num = 0;
+
+ if (*offset < I2C_PAGE_SIZE)
+ return;
+
+ *i2c_addr = I2C_ADDR_HIGH;
+ *offset -= I2C_PAGE_SIZE;
+}
+
+static void mlx4_qsfp_eeprom_params_set(u8 *i2c_addr, u8 *page_num, u16 *offset)
+{
+ /* Offsets 0-255 belong to page 0.
+ * Offsets 256-639 belong to pages 01, 02, 03.
+ * For example, offset 400 is page 02: 1 + (400 - 256) / 128 = 2
+ */
+ if (*offset < I2C_PAGE_SIZE)
+ *page_num = 0;
+ else
+ *page_num = 1 + (*offset - I2C_PAGE_SIZE) / I2C_HIGH_PAGE_SIZE;
+ *i2c_addr = I2C_ADDR_LOW;
+ *offset -= *page_num * I2C_HIGH_PAGE_SIZE;
+}
+
/**
* mlx4_get_module_info - Read cable module eeprom data
* @dev: mlx4_dev.
@@ -2045,12 +2128,30 @@ int mlx4_get_module_info(struct mlx4_dev *dev, u8 port,
struct mlx4_cmd_mailbox *inbox, *outbox;
struct mlx4_mad_ifc *inmad, *outmad;
struct mlx4_cable_info *cable_info;
- u16 i2c_addr;
+ u8 module_id, i2c_addr, page_num;
int ret;
if (size > MODULE_INFO_MAX_READ)
size = MODULE_INFO_MAX_READ;
+ ret = mlx4_get_module_id(dev, port, &module_id);
+ if (ret)
+ return ret;
+
+ switch (module_id) {
+ case MLX4_MODULE_ID_SFP:
+ mlx4_sfp_eeprom_params_set(&i2c_addr, &page_num, &offset);
+ break;
+ case MLX4_MODULE_ID_QSFP:
+ case MLX4_MODULE_ID_QSFP_PLUS:
+ case MLX4_MODULE_ID_QSFP28:
+ mlx4_qsfp_eeprom_params_set(&i2c_addr, &page_num, &offset);
+ break;
+ default:
+ mlx4_err(dev, "Module ID not recognized: %#x\n", module_id);
+ return -EINVAL;
+ }
+
inbox = mlx4_alloc_cmd_mailbox(dev);
if (IS_ERR(inbox))
return PTR_ERR(inbox);
@@ -2076,11 +2177,9 @@ int mlx4_get_module_info(struct mlx4_dev *dev, u8 port,
*/
size -= offset + size - I2C_PAGE_SIZE;
- i2c_addr = I2C_ADDR_LOW;
-
cable_info = (struct mlx4_cable_info *)inmad->data;
cable_info->dev_mem_address = cpu_to_be16(offset);
- cable_info->page_num = 0;
+ cable_info->page_num = page_num;
cable_info->i2c_addr = i2c_addr;
cable_info->size = cpu_to_be16(size);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bond.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bond.c
index 95f2b26a3ee3..9c076aa20306 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bond.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bond.c
@@ -223,6 +223,8 @@ static void mlx5e_rep_changelowerstate_event(struct net_device *netdev, void *pt
rpriv = priv->ppriv;
fwd_vport_num = rpriv->rep->vport;
lag_dev = netdev_master_upper_dev_get(netdev);
+ if (!lag_dev)
+ return;
netdev_dbg(netdev, "lag_dev(%s)'s slave vport(%d) is txable(%d)\n",
lag_dev->name, fwd_vport_num, net_lag_port_dev_txable(netdev));
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c
index 6cdc52d50a48..311382261840 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c
@@ -626,7 +626,7 @@ static bool mlx5e_restore_skb(struct sk_buff *skb, u32 chain, u32 reg_c1,
struct mlx5_eswitch *esw;
u32 zone_restore_id;
- tc_skb_ext = skb_ext_add(skb, TC_SKB_EXT);
+ tc_skb_ext = tc_skb_ext_alloc(skb);
if (!tc_skb_ext) {
WARN_ON(1);
return false;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c
index 593503bc4d07..f1fb11680d20 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c
@@ -1505,7 +1505,7 @@ mlx5e_init_fib_work_ipv4(struct mlx5e_priv *priv,
fen_info = container_of(info, struct fib_entry_notifier_info, info);
fib_dev = fib_info_nh(fen_info->fi, 0)->fib_nh_dev;
- if (fib_dev->netdev_ops != &mlx5e_netdev_ops ||
+ if (!fib_dev || fib_dev->netdev_ops != &mlx5e_netdev_ops ||
fen_info->dst_len != 32)
return NULL;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
index 0d571a0c76d9..0b75fab41ae8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
@@ -35,6 +35,7 @@
#include <linux/ipv6.h>
#include <linux/tcp.h>
#include <linux/mlx5/fs.h>
+#include <linux/mlx5/mpfs.h>
#include "en.h"
#include "en_rep.h"
#include "lib/mpfs.h"
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index bca832cdc4cb..ad0f69480b9c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -889,10 +889,13 @@ err_free_rq:
void mlx5e_activate_rq(struct mlx5e_rq *rq)
{
set_bit(MLX5E_RQ_STATE_ENABLED, &rq->state);
- if (rq->icosq)
+ if (rq->icosq) {
mlx5e_trigger_irq(rq->icosq);
- else
+ } else {
+ local_bh_disable();
napi_schedule(rq->cq.napi);
+ local_bh_enable();
+ }
}
void mlx5e_deactivate_rq(struct mlx5e_rq *rq)
@@ -2697,7 +2700,7 @@ static int mlx5e_update_netdev_queues(struct mlx5e_priv *priv)
int err;
old_num_txqs = netdev->real_num_tx_queues;
- old_ntc = netdev->num_tc;
+ old_ntc = netdev->num_tc ? : 1;
nch = priv->channels.params.num_channels;
ntc = priv->channels.params.num_tc;
@@ -5229,6 +5232,11 @@ static void mlx5e_update_features(struct net_device *netdev)
rtnl_unlock();
}
+static void mlx5e_reset_channels(struct net_device *netdev)
+{
+ netdev_reset_tc(netdev);
+}
+
int mlx5e_attach_netdev(struct mlx5e_priv *priv)
{
const bool take_rtnl = priv->netdev->reg_state == NETREG_REGISTERED;
@@ -5283,6 +5291,7 @@ err_cleanup_tx:
profile->cleanup_tx(priv);
out:
+ mlx5e_reset_channels(priv->netdev);
set_bit(MLX5E_STATE_DESTROYING, &priv->state);
cancel_work_sync(&priv->update_stats_work);
return err;
@@ -5300,6 +5309,7 @@ void mlx5e_detach_netdev(struct mlx5e_priv *priv)
profile->cleanup_rx(priv);
profile->cleanup_tx(priv);
+ mlx5e_reset_channels(priv->netdev);
cancel_work_sync(&priv->update_stats_work);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index 47a9c49b25fd..2c776e7a7692 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -1322,10 +1322,10 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
struct netlink_ext_ack *extack)
{
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
- struct net_device *out_dev, *encap_dev = NULL;
struct mlx5e_tc_flow_parse_attr *parse_attr;
struct mlx5_flow_attr *attr = flow->attr;
bool vf_tun = false, encap_valid = true;
+ struct net_device *encap_dev = NULL;
struct mlx5_esw_flow_attr *esw_attr;
struct mlx5_fc *counter = NULL;
struct mlx5e_rep_priv *rpriv;
@@ -1371,16 +1371,22 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
esw_attr = attr->esw_attr;
for (out_index = 0; out_index < MLX5_MAX_FLOW_FWD_VPORTS; out_index++) {
+ struct net_device *out_dev;
int mirred_ifindex;
if (!(esw_attr->dests[out_index].flags & MLX5_ESW_DEST_ENCAP))
continue;
mirred_ifindex = parse_attr->mirred_ifindex[out_index];
- out_dev = __dev_get_by_index(dev_net(priv->netdev),
- mirred_ifindex);
+ out_dev = dev_get_by_index(dev_net(priv->netdev), mirred_ifindex);
+ if (!out_dev) {
+ NL_SET_ERR_MSG_MOD(extack, "Requested mirred device not found");
+ err = -ENODEV;
+ goto err_out;
+ }
err = mlx5e_attach_encap(priv, flow, out_dev, out_index,
extack, &encap_dev, &encap_valid);
+ dev_put(out_dev);
if (err)
goto err_out;
@@ -1393,6 +1399,12 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
esw_attr->dests[out_index].mdev = out_priv->mdev;
}
+ if (vf_tun && esw_attr->out_count > 1) {
+ NL_SET_ERR_MSG_MOD(extack, "VF tunnel encap with mirroring is not supported");
+ err = -EOPNOTSUPP;
+ goto err_out;
+ }
+
err = mlx5_eswitch_add_vlan_action(esw, attr);
if (err)
goto err_out;
@@ -3526,8 +3538,12 @@ static int add_vlan_push_action(struct mlx5e_priv *priv,
if (err)
return err;
- *out_dev = dev_get_by_index_rcu(dev_net(vlan_dev),
- dev_get_iflink(vlan_dev));
+ rcu_read_lock();
+ *out_dev = dev_get_by_index_rcu(dev_net(vlan_dev), dev_get_iflink(vlan_dev));
+ rcu_read_unlock();
+ if (!*out_dev)
+ return -ENODEV;
+
if (is_vlan_dev(*out_dev))
err = add_vlan_push_action(priv, attr, out_dev, action);
@@ -5074,7 +5090,7 @@ bool mlx5e_tc_update_skb(struct mlx5_cqe64 *cqe,
if (mapped_obj.type == MLX5_MAPPED_OBJ_CHAIN) {
chain = mapped_obj.chain;
- tc_skb_ext = skb_ext_add(skb, TC_SKB_EXT);
+ tc_skb_ext = tc_skb_ext_alloc(skb);
if (WARN_ON(!tc_skb_ext))
return false;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
index 570f2280823c..b88705a3a1a8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
@@ -35,6 +35,7 @@
#include <linux/mlx5/mlx5_ifc.h>
#include <linux/mlx5/vport.h>
#include <linux/mlx5/fs.h>
+#include <linux/mlx5/mpfs.h>
#include "esw/acl/lgcy.h"
#include "esw/legacy.h"
#include "mlx5_core.h"
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c
index a81ece94f599..b45954905845 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c
@@ -65,7 +65,7 @@ mlx5_eswitch_termtbl_create(struct mlx5_core_dev *dev,
{
struct mlx5_flow_table_attr ft_attr = {};
struct mlx5_flow_namespace *root_ns;
- int err;
+ int err, err2;
root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_FDB);
if (!root_ns) {
@@ -76,33 +76,34 @@ mlx5_eswitch_termtbl_create(struct mlx5_core_dev *dev,
/* As this is the terminating action then the termination table is the
* same prio as the slow path
*/
- ft_attr.flags = MLX5_FLOW_TABLE_TERMINATION |
+ ft_attr.flags = MLX5_FLOW_TABLE_TERMINATION | MLX5_FLOW_TABLE_UNMANAGED |
MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT;
- ft_attr.prio = FDB_SLOW_PATH;
+ ft_attr.prio = FDB_TC_OFFLOAD;
ft_attr.max_fte = 1;
+ ft_attr.level = 1;
ft_attr.autogroup.max_num_groups = 1;
tt->termtbl = mlx5_create_auto_grouped_flow_table(root_ns, &ft_attr);
if (IS_ERR(tt->termtbl)) {
- esw_warn(dev, "Failed to create termination table (error %d)\n",
- IS_ERR(tt->termtbl));
- return -EOPNOTSUPP;
+ err = PTR_ERR(tt->termtbl);
+ esw_warn(dev, "Failed to create termination table, err %pe\n", tt->termtbl);
+ return err;
}
tt->rule = mlx5_add_flow_rules(tt->termtbl, NULL, flow_act,
&tt->dest, 1);
if (IS_ERR(tt->rule)) {
- esw_warn(dev, "Failed to create termination table rule (error %d)\n",
- IS_ERR(tt->rule));
+ err = PTR_ERR(tt->rule);
+ esw_warn(dev, "Failed to create termination table rule, err %pe\n", tt->rule);
goto add_flow_err;
}
return 0;
add_flow_err:
- err = mlx5_destroy_flow_table(tt->termtbl);
- if (err)
- esw_warn(dev, "Failed to destroy termination table\n");
+ err2 = mlx5_destroy_flow_table(tt->termtbl);
+ if (err2)
+ esw_warn(dev, "Failed to destroy termination table, err %d\n", err2);
- return -EOPNOTSUPP;
+ return err;
}
static struct mlx5_termtbl_handle *
@@ -172,19 +173,6 @@ mlx5_eswitch_termtbl_put(struct mlx5_eswitch *esw,
}
}
-static bool mlx5_eswitch_termtbl_is_encap_reformat(struct mlx5_pkt_reformat *rt)
-{
- switch (rt->reformat_type) {
- case MLX5_REFORMAT_TYPE_L2_TO_VXLAN:
- case MLX5_REFORMAT_TYPE_L2_TO_NVGRE:
- case MLX5_REFORMAT_TYPE_L2_TO_L2_TUNNEL:
- case MLX5_REFORMAT_TYPE_L2_TO_L3_TUNNEL:
- return true;
- default:
- return false;
- }
-}
-
static void
mlx5_eswitch_termtbl_actions_move(struct mlx5_flow_act *src,
struct mlx5_flow_act *dst)
@@ -202,14 +190,6 @@ mlx5_eswitch_termtbl_actions_move(struct mlx5_flow_act *src,
memset(&src->vlan[1], 0, sizeof(src->vlan[1]));
}
}
-
- if (src->action & MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT &&
- mlx5_eswitch_termtbl_is_encap_reformat(src->pkt_reformat)) {
- src->action &= ~MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
- dst->action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
- dst->pkt_reformat = src->pkt_reformat;
- src->pkt_reformat = NULL;
- }
}
static bool mlx5_eswitch_offload_is_uplink_port(const struct mlx5_eswitch *esw,
@@ -238,6 +218,7 @@ mlx5_eswitch_termtbl_required(struct mlx5_eswitch *esw,
int i;
if (!MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, termination_table) ||
+ !MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, ignore_flow_level) ||
attr->flags & MLX5_ESW_ATTR_FLAG_SLOW_PATH ||
!mlx5_eswitch_offload_is_uplink_port(esw, spec))
return false;
@@ -279,12 +260,19 @@ mlx5_eswitch_add_termtbl_rule(struct mlx5_eswitch *esw,
if (dest[i].type != MLX5_FLOW_DESTINATION_TYPE_VPORT)
continue;
+ if (attr->dests[num_vport_dests].flags & MLX5_ESW_DEST_ENCAP) {
+ term_tbl_act.action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
+ term_tbl_act.pkt_reformat = attr->dests[num_vport_dests].pkt_reformat;
+ } else {
+ term_tbl_act.action &= ~MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
+ term_tbl_act.pkt_reformat = NULL;
+ }
+
/* get the terminating table for the action list */
tt = mlx5_eswitch_termtbl_get_create(esw, &term_tbl_act,
&dest[i], attr);
if (IS_ERR(tt)) {
- esw_warn(esw->dev, "Failed to get termination table (error %d)\n",
- IS_ERR(tt));
+ esw_warn(esw->dev, "Failed to get termination table, err %pe\n", tt);
goto revert_changes;
}
attr->dests[num_vport_dests].termtbl = tt;
@@ -301,6 +289,9 @@ mlx5_eswitch_add_termtbl_rule(struct mlx5_eswitch *esw,
goto revert_changes;
/* create the FTE */
+ flow_act->action &= ~MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
+ flow_act->pkt_reformat = NULL;
+ flow_act->flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
rule = mlx5_add_flow_rules(fdb, spec, flow_act, dest, num_dest);
if (IS_ERR(rule))
goto revert_changes;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag_mp.c b/drivers/net/ethernet/mellanox/mlx5/core/lag_mp.c
index 2c41a6920264..fd6196b5e163 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag_mp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag_mp.c
@@ -307,6 +307,11 @@ int mlx5_lag_mp_init(struct mlx5_lag *ldev)
struct lag_mp *mp = &ldev->lag_mp;
int err;
+ /* always clear mfi, as it might become stale when a route delete event
+ * has been missed
+ */
+ mp->mfi = NULL;
+
if (mp->fib_nb.notifier_call)
return 0;
@@ -335,4 +340,5 @@ void mlx5_lag_mp_cleanup(struct mlx5_lag *ldev)
unregister_fib_notifier(&init_net, &mp->fib_nb);
destroy_workqueue(mp->wq);
mp->fib_nb.notifier_call = NULL;
+ mp->mfi = NULL;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.c
index fd8449ff9e17..839a01da110f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.c
@@ -33,6 +33,7 @@
#include <linux/etherdevice.h>
#include <linux/mlx5/driver.h>
#include <linux/mlx5/mlx5_ifc.h>
+#include <linux/mlx5/mpfs.h>
#include <linux/mlx5/eswitch.h>
#include "mlx5_core.h"
#include "lib/mpfs.h"
@@ -175,6 +176,7 @@ out:
mutex_unlock(&mpfs->lock);
return err;
}
+EXPORT_SYMBOL(mlx5_mpfs_add_mac);
int mlx5_mpfs_del_mac(struct mlx5_core_dev *dev, u8 *mac)
{
@@ -206,3 +208,4 @@ unlock:
mutex_unlock(&mpfs->lock);
return err;
}
+EXPORT_SYMBOL(mlx5_mpfs_del_mac);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.h
index 4a7b2c3203a7..4a293542a7aa 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.h
@@ -84,12 +84,9 @@ struct l2addr_node {
#ifdef CONFIG_MLX5_MPFS
int mlx5_mpfs_init(struct mlx5_core_dev *dev);
void mlx5_mpfs_cleanup(struct mlx5_core_dev *dev);
-int mlx5_mpfs_add_mac(struct mlx5_core_dev *dev, u8 *mac);
-int mlx5_mpfs_del_mac(struct mlx5_core_dev *dev, u8 *mac);
#else /* #ifndef CONFIG_MLX5_MPFS */
static inline int mlx5_mpfs_init(struct mlx5_core_dev *dev) { return 0; }
static inline void mlx5_mpfs_cleanup(struct mlx5_core_dev *dev) {}
-static inline int mlx5_mpfs_add_mac(struct mlx5_core_dev *dev, u8 *mac) { return 0; }
-static inline int mlx5_mpfs_del_mac(struct mlx5_core_dev *dev, u8 *mac) { return 0; }
#endif
+
#endif
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index c114365eb126..a1d67bd7fb43 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -503,7 +503,7 @@ static int handle_hca_cap_odp(struct mlx5_core_dev *dev, void *set_ctx)
static int handle_hca_cap(struct mlx5_core_dev *dev, void *set_ctx)
{
- struct mlx5_profile *prof = dev->profile;
+ struct mlx5_profile *prof = &dev->profile;
void *set_hca_cap;
int err;
@@ -524,11 +524,11 @@ static int handle_hca_cap(struct mlx5_core_dev *dev, void *set_ctx)
to_fw_pkey_sz(dev, 128));
/* Check log_max_qp from HCA caps to set in current profile */
- if (MLX5_CAP_GEN_MAX(dev, log_max_qp) < profile[prof_sel].log_max_qp) {
+ if (MLX5_CAP_GEN_MAX(dev, log_max_qp) < prof->log_max_qp) {
mlx5_core_warn(dev, "log_max_qp value in current profile is %d, changing it to HCA capability limit (%d)\n",
- profile[prof_sel].log_max_qp,
+ prof->log_max_qp,
MLX5_CAP_GEN_MAX(dev, log_max_qp));
- profile[prof_sel].log_max_qp = MLX5_CAP_GEN_MAX(dev, log_max_qp);
+ prof->log_max_qp = MLX5_CAP_GEN_MAX(dev, log_max_qp);
}
if (prof->mask & MLX5_PROF_MASK_QP_SIZE)
MLX5_SET(cmd_hca_cap, set_hca_cap, log_max_qp,
@@ -1381,8 +1381,7 @@ int mlx5_mdev_init(struct mlx5_core_dev *dev, int profile_idx)
struct mlx5_priv *priv = &dev->priv;
int err;
- dev->profile = &profile[profile_idx];
-
+ memcpy(&dev->profile, &profile[profile_idx], sizeof(dev->profile));
INIT_LIST_HEAD(&priv->ctx_list);
spin_lock_init(&priv->ctx_lock);
mutex_init(&dev->intf_state_mutex);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
index 1f907df5b3a2..c3373fb1cd7f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
@@ -95,9 +95,10 @@ int mlx5_get_default_msix_vec_count(struct mlx5_core_dev *dev, int num_vfs)
int mlx5_set_msix_vec_count(struct mlx5_core_dev *dev, int function_id,
int msix_vec_count)
{
- int sz = MLX5_ST_SZ_BYTES(set_hca_cap_in);
+ int query_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out);
+ int set_sz = MLX5_ST_SZ_BYTES(set_hca_cap_in);
+ void *hca_cap = NULL, *query_cap = NULL, *cap;
int num_vf_msix, min_msix, max_msix;
- void *hca_cap, *cap;
int ret;
num_vf_msix = MLX5_CAP_GEN_MAX(dev, num_total_dynamic_vf_msix);
@@ -116,11 +117,20 @@ int mlx5_set_msix_vec_count(struct mlx5_core_dev *dev, int function_id,
if (msix_vec_count > max_msix)
return -EOVERFLOW;
- hca_cap = kzalloc(sz, GFP_KERNEL);
- if (!hca_cap)
- return -ENOMEM;
+ query_cap = kzalloc(query_sz, GFP_KERNEL);
+ hca_cap = kzalloc(set_sz, GFP_KERNEL);
+ if (!hca_cap || !query_cap) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ ret = mlx5_vport_get_other_func_cap(dev, function_id, query_cap);
+ if (ret)
+ goto out;
cap = MLX5_ADDR_OF(set_hca_cap_in, hca_cap, capability);
+ memcpy(cap, MLX5_ADDR_OF(query_hca_cap_out, query_cap, capability),
+ MLX5_UN_SZ_BYTES(hca_cap_union));
MLX5_SET(cmd_hca_cap, cap, dynamic_msix_table_size, msix_vec_count);
MLX5_SET(set_hca_cap_in, hca_cap, opcode, MLX5_CMD_OP_SET_HCA_CAP);
@@ -130,7 +140,9 @@ int mlx5_set_msix_vec_count(struct mlx5_core_dev *dev, int function_id,
MLX5_SET(set_hca_cap_in, hca_cap, op_mod,
MLX5_SET_HCA_CAP_OP_MOD_GENERAL_DEVICE << 1);
ret = mlx5_cmd_exec_in(dev, set_hca_cap, hca_cap);
+out:
kfree(hca_cap);
+ kfree(query_cap);
return ret;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c b/drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c
index a8e73c9ed1ea..1be048769309 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c
@@ -136,10 +136,10 @@ static enum devlink_port_fn_state mlx5_sf_to_devlink_state(u8 hw_state)
switch (hw_state) {
case MLX5_VHCA_STATE_ACTIVE:
case MLX5_VHCA_STATE_IN_USE:
- case MLX5_VHCA_STATE_TEARDOWN_REQUEST:
return DEVLINK_PORT_FN_STATE_ACTIVE;
case MLX5_VHCA_STATE_INVALID:
case MLX5_VHCA_STATE_ALLOCATED:
+ case MLX5_VHCA_STATE_TEARDOWN_REQUEST:
default:
return DEVLINK_PORT_FN_STATE_INACTIVE;
}
@@ -192,14 +192,17 @@ sf_err:
return err;
}
-static int mlx5_sf_activate(struct mlx5_core_dev *dev, struct mlx5_sf *sf)
+static int mlx5_sf_activate(struct mlx5_core_dev *dev, struct mlx5_sf *sf,
+ struct netlink_ext_ack *extack)
{
int err;
if (mlx5_sf_is_active(sf))
return 0;
- if (sf->hw_state != MLX5_VHCA_STATE_ALLOCATED)
- return -EINVAL;
+ if (sf->hw_state != MLX5_VHCA_STATE_ALLOCATED) {
+ NL_SET_ERR_MSG_MOD(extack, "SF is inactivated but it is still attached");
+ return -EBUSY;
+ }
err = mlx5_cmd_sf_enable_hca(dev, sf->hw_fn_id);
if (err)
@@ -226,7 +229,8 @@ static int mlx5_sf_deactivate(struct mlx5_core_dev *dev, struct mlx5_sf *sf)
static int mlx5_sf_state_set(struct mlx5_core_dev *dev, struct mlx5_sf_table *table,
struct mlx5_sf *sf,
- enum devlink_port_fn_state state)
+ enum devlink_port_fn_state state,
+ struct netlink_ext_ack *extack)
{
int err = 0;
@@ -234,7 +238,7 @@ static int mlx5_sf_state_set(struct mlx5_core_dev *dev, struct mlx5_sf_table *ta
if (state == mlx5_sf_to_devlink_state(sf->hw_state))
goto out;
if (state == DEVLINK_PORT_FN_STATE_ACTIVE)
- err = mlx5_sf_activate(dev, sf);
+ err = mlx5_sf_activate(dev, sf, extack);
else if (state == DEVLINK_PORT_FN_STATE_INACTIVE)
err = mlx5_sf_deactivate(dev, sf);
else
@@ -265,7 +269,7 @@ int mlx5_devlink_sf_port_fn_state_set(struct devlink *devlink, struct devlink_po
goto out;
}
- err = mlx5_sf_state_set(dev, table, sf, state);
+ err = mlx5_sf_state_set(dev, table, sf, state, extack);
out:
mlx5_sf_table_put(table);
return err;
diff --git a/drivers/net/ethernet/microchip/encx24j600.c b/drivers/net/ethernet/microchip/encx24j600.c
index 3658c4ae3c37..ee921a99e439 100644
--- a/drivers/net/ethernet/microchip/encx24j600.c
+++ b/drivers/net/ethernet/microchip/encx24j600.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0-or-later
-/**
+/*
* Microchip ENCX24J600 ethernet driver
*
* Copyright (C) 2015 Gridpoint
diff --git a/drivers/net/ethernet/microchip/encx24j600_hw.h b/drivers/net/ethernet/microchip/encx24j600_hw.h
index f604a260ede7..fac61a8fbd02 100644
--- a/drivers/net/ethernet/microchip/encx24j600_hw.h
+++ b/drivers/net/ethernet/microchip/encx24j600_hw.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/**
+/*
* encx24j600_hw.h: Register definitions
*
*/
diff --git a/drivers/net/ethernet/pensando/Kconfig b/drivers/net/ethernet/pensando/Kconfig
index 5f8b0bb3af6e..202973a82712 100644
--- a/drivers/net/ethernet/pensando/Kconfig
+++ b/drivers/net/ethernet/pensando/Kconfig
@@ -20,6 +20,7 @@ if NET_VENDOR_PENSANDO
config IONIC
tristate "Pensando Ethernet IONIC Support"
depends on 64BIT && PCI
+ depends on PTP_1588_CLOCK || !PTP_1588_CLOCK
select NET_DEVLINK
select DIMLIB
help
diff --git a/drivers/net/ethernet/qlogic/qla3xxx.c b/drivers/net/ethernet/qlogic/qla3xxx.c
index 214e347097a7..2376b2729633 100644
--- a/drivers/net/ethernet/qlogic/qla3xxx.c
+++ b/drivers/net/ethernet/qlogic/qla3xxx.c
@@ -114,7 +114,7 @@ static int ql_sem_spinlock(struct ql3_adapter *qdev,
value = readl(&port_regs->CommonRegs.semaphoreReg);
if ((value & (sem_mask >> 16)) == sem_bits)
return 0;
- ssleep(1);
+ mdelay(1000);
} while (--seconds);
return -1;
}
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
index d8a3ecaed3fc..d8f0863b3934 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
@@ -1048,7 +1048,7 @@ int qlcnic_do_lb_test(struct qlcnic_adapter *adapter, u8 mode)
for (i = 0; i < QLCNIC_NUM_ILB_PKT; i++) {
skb = netdev_alloc_skb(adapter->netdev, QLCNIC_ILB_PKT_SIZE);
if (!skb)
- break;
+ goto error;
qlcnic_create_loopback_buff(skb->data, adapter->mac_addr);
skb_put(skb, QLCNIC_ILB_PKT_SIZE);
adapter->ahw->diag_cnt = 0;
@@ -1072,6 +1072,7 @@ int qlcnic_do_lb_test(struct qlcnic_adapter *adapter, u8 mode)
cnt++;
}
if (cnt != i) {
+error:
dev_err(&adapter->pdev->dev,
"LB Test: failed, TX[%d], RX[%d]\n", i, cnt);
if (mode != QLCNIC_ILB_MODE)
diff --git a/drivers/net/ethernet/sfc/nic.c b/drivers/net/ethernet/sfc/nic.c
index d1e908846f5d..22fbb0ae77fb 100644
--- a/drivers/net/ethernet/sfc/nic.c
+++ b/drivers/net/ethernet/sfc/nic.c
@@ -90,6 +90,7 @@ int efx_nic_init_interrupt(struct efx_nic *efx)
efx->pci_dev->irq);
goto fail1;
}
+ efx->irqs_hooked = true;
return 0;
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c
index 527077c98ebc..fc3b0acc8f99 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c
@@ -30,7 +30,7 @@ struct sunxi_priv_data {
static int sun7i_gmac_init(struct platform_device *pdev, void *priv)
{
struct sunxi_priv_data *gmac = priv;
- int ret;
+ int ret = 0;
if (gmac->regulator) {
ret = regulator_enable(gmac->regulator);
@@ -51,11 +51,11 @@ static int sun7i_gmac_init(struct platform_device *pdev, void *priv)
} else {
clk_set_rate(gmac->tx_clk, SUN7I_GMAC_MII_RATE);
ret = clk_prepare(gmac->tx_clk);
- if (ret)
- return ret;
+ if (ret && gmac->regulator)
+ regulator_disable(gmac->regulator);
}
- return 0;
+ return ret;
}
static void sun7i_gmac_exit(struct platform_device *pdev, void *priv)
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 345b4c6d1fd4..5d956a553434 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -1196,7 +1196,6 @@ static void stmmac_check_pcs_mode(struct stmmac_priv *priv)
*/
static int stmmac_init_phy(struct net_device *dev)
{
- struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL };
struct stmmac_priv *priv = netdev_priv(dev);
struct device_node *node;
int ret;
@@ -1222,8 +1221,12 @@ static int stmmac_init_phy(struct net_device *dev)
ret = phylink_connect_phy(priv->phylink, phydev);
}
- phylink_ethtool_get_wol(priv->phylink, &wol);
- device_set_wakeup_capable(priv->device, !!wol.supported);
+ if (!priv->plat->pmt) {
+ struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL };
+
+ phylink_ethtool_get_wol(priv->phylink, &wol);
+ device_set_wakeup_capable(priv->device, !!wol.supported);
+ }
return ret;
}
@@ -5888,12 +5891,21 @@ static int stmmac_set_mac_address(struct net_device *ndev, void *addr)
struct stmmac_priv *priv = netdev_priv(ndev);
int ret = 0;
+ ret = pm_runtime_get_sync(priv->device);
+ if (ret < 0) {
+ pm_runtime_put_noidle(priv->device);
+ return ret;
+ }
+
ret = eth_mac_addr(ndev, addr);
if (ret)
- return ret;
+ goto set_mac_error;
stmmac_set_umac_addr(priv, priv->hw, ndev->dev_addr, 0);
+set_mac_error:
+ pm_runtime_put(priv->device);
+
return ret;
}
@@ -6188,12 +6200,6 @@ static int stmmac_vlan_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid
bool is_double = false;
int ret;
- ret = pm_runtime_get_sync(priv->device);
- if (ret < 0) {
- pm_runtime_put_noidle(priv->device);
- return ret;
- }
-
if (be16_to_cpu(proto) == ETH_P_8021AD)
is_double = true;
@@ -6219,6 +6225,12 @@ static int stmmac_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vi
bool is_double = false;
int ret;
+ ret = pm_runtime_get_sync(priv->device);
+ if (ret < 0) {
+ pm_runtime_put_noidle(priv->device);
+ return ret;
+ }
+
if (be16_to_cpu(proto) == ETH_P_8021AD)
is_double = true;
diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c
index 707ccdd03b19..74e748662ec0 100644
--- a/drivers/net/ethernet/sun/niu.c
+++ b/drivers/net/ethernet/sun/niu.c
@@ -8144,10 +8144,10 @@ static int niu_pci_vpd_scan_props(struct niu *np, u32 start, u32 end)
"VPD_SCAN: Reading in property [%s] len[%d]\n",
namebuf, prop_len);
for (i = 0; i < prop_len; i++) {
- err = niu_pci_eeprom_read(np, off + i);
- if (err >= 0)
- *prop_buf = err;
- ++prop_buf;
+ err = niu_pci_eeprom_read(np, off + i);
+ if (err < 0)
+ return err;
+ *prop_buf++ = err;
}
}
@@ -8158,14 +8158,14 @@ static int niu_pci_vpd_scan_props(struct niu *np, u32 start, u32 end)
}
/* ESPC_PIO_EN_ENABLE must be set */
-static void niu_pci_vpd_fetch(struct niu *np, u32 start)
+static int niu_pci_vpd_fetch(struct niu *np, u32 start)
{
u32 offset;
int err;
err = niu_pci_eeprom_read16_swp(np, start + 1);
if (err < 0)
- return;
+ return err;
offset = err + 3;
@@ -8174,12 +8174,14 @@ static void niu_pci_vpd_fetch(struct niu *np, u32 start)
u32 end;
err = niu_pci_eeprom_read(np, here);
+ if (err < 0)
+ return err;
if (err != 0x90)
- return;
+ return -EINVAL;
err = niu_pci_eeprom_read16_swp(np, here + 1);
if (err < 0)
- return;
+ return err;
here = start + offset + 3;
end = start + offset + err;
@@ -8187,9 +8189,12 @@ static void niu_pci_vpd_fetch(struct niu *np, u32 start)
offset += err;
err = niu_pci_vpd_scan_props(np, here, end);
- if (err < 0 || err == 1)
- return;
+ if (err < 0)
+ return err;
+ if (err == 1)
+ return -EINVAL;
}
+ return 0;
}
/* ESPC_PIO_EN_ENABLE must be set */
@@ -9280,8 +9285,11 @@ static int niu_get_invariants(struct niu *np)
offset = niu_pci_vpd_offset(np);
netif_printk(np, probe, KERN_DEBUG, np->dev,
"%s() VPD offset [%08x]\n", __func__, offset);
- if (offset)
- niu_pci_vpd_fetch(np, offset);
+ if (offset) {
+ err = niu_pci_vpd_fetch(np, offset);
+ if (err < 0)
+ return err;
+ }
nw64(ESPC_PIO_EN, 0);
if (np->flags & NIU_FLAGS_VPD_VALID) {
diff --git a/drivers/net/ethernet/ti/netcp_core.c b/drivers/net/ethernet/ti/netcp_core.c
index 9030e619e543..97942b0e3897 100644
--- a/drivers/net/ethernet/ti/netcp_core.c
+++ b/drivers/net/ethernet/ti/netcp_core.c
@@ -1350,8 +1350,8 @@ int netcp_txpipe_open(struct netcp_tx_pipe *tx_pipe)
tx_pipe->dma_queue = knav_queue_open(name, tx_pipe->dma_queue_id,
KNAV_QUEUE_SHARED);
if (IS_ERR(tx_pipe->dma_queue)) {
- dev_err(dev, "Could not open DMA queue for channel \"%s\": %d\n",
- name, ret);
+ dev_err(dev, "Could not open DMA queue for channel \"%s\": %pe\n",
+ name, tx_pipe->dma_queue);
ret = PTR_ERR(tx_pipe->dma_queue);
goto err;
}
diff --git a/drivers/net/ipa/ipa.h b/drivers/net/ipa/ipa.h
index e7ff376cb5b7..744406832a77 100644
--- a/drivers/net/ipa/ipa.h
+++ b/drivers/net/ipa/ipa.h
@@ -58,6 +58,7 @@ enum ipa_flag {
* @mem_virt: Virtual address of IPA-local memory space
* @mem_offset: Offset from @mem_virt used for access to IPA memory
* @mem_size: Total size (bytes) of memory at @mem_virt
+ * @mem_count: Number of entries in the mem array
* @mem: Array of IPA-local memory region descriptors
* @imem_iova: I/O virtual address of IPA region in IMEM
* @imem_size: Size of IMEM region
@@ -103,6 +104,7 @@ struct ipa {
void *mem_virt;
u32 mem_offset;
u32 mem_size;
+ u32 mem_count;
const struct ipa_mem *mem;
unsigned long imem_iova;
diff --git a/drivers/net/ipa/ipa_mem.c b/drivers/net/ipa/ipa_mem.c
index c5c3b1b7e67d..1624125e7459 100644
--- a/drivers/net/ipa/ipa_mem.c
+++ b/drivers/net/ipa/ipa_mem.c
@@ -180,7 +180,7 @@ int ipa_mem_config(struct ipa *ipa)
* for the region, write "canary" values in the space prior to
* the region's base address.
*/
- for (mem_id = 0; mem_id < IPA_MEM_COUNT; mem_id++) {
+ for (mem_id = 0; mem_id < ipa->mem_count; mem_id++) {
const struct ipa_mem *mem = &ipa->mem[mem_id];
u16 canary_count;
__le32 *canary;
@@ -487,6 +487,7 @@ int ipa_mem_init(struct ipa *ipa, const struct ipa_mem_data *mem_data)
ipa->mem_size = resource_size(res);
/* The ipa->mem[] array is indexed by enum ipa_mem_id values */
+ ipa->mem_count = mem_data->local_count;
ipa->mem = mem_data->local;
ret = ipa_imem_init(ipa, mem_data->imem_addr, mem_data->imem_size);
diff --git a/drivers/net/mdio/mdio-octeon.c b/drivers/net/mdio/mdio-octeon.c
index 8ce99c4888e1..e096e68ac667 100644
--- a/drivers/net/mdio/mdio-octeon.c
+++ b/drivers/net/mdio/mdio-octeon.c
@@ -71,7 +71,6 @@ static int octeon_mdiobus_probe(struct platform_device *pdev)
return 0;
fail_register:
- mdiobus_free(bus->mii_bus);
smi_en.u64 = 0;
oct_mdio_writeq(smi_en.u64, bus->register_base + SMI_EN);
return err;
@@ -85,7 +84,6 @@ static int octeon_mdiobus_remove(struct platform_device *pdev)
bus = platform_get_drvdata(pdev);
mdiobus_unregister(bus->mii_bus);
- mdiobus_free(bus->mii_bus);
smi_en.u64 = 0;
oct_mdio_writeq(smi_en.u64, bus->register_base + SMI_EN);
return 0;
diff --git a/drivers/net/mdio/mdio-thunder.c b/drivers/net/mdio/mdio-thunder.c
index cb1761693b69..822d2cdd2f35 100644
--- a/drivers/net/mdio/mdio-thunder.c
+++ b/drivers/net/mdio/mdio-thunder.c
@@ -126,7 +126,6 @@ static void thunder_mdiobus_pci_remove(struct pci_dev *pdev)
continue;
mdiobus_unregister(bus->mii_bus);
- mdiobus_free(bus->mii_bus);
oct_mdio_writeq(0, bus->register_base + SMI_EN);
}
pci_release_regions(pdev);
diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c
index dadf75ff3ab9..6045ad3def12 100644
--- a/drivers/net/phy/mdio_bus.c
+++ b/drivers/net/phy/mdio_bus.c
@@ -607,7 +607,8 @@ void mdiobus_unregister(struct mii_bus *bus)
struct mdio_device *mdiodev;
int i;
- BUG_ON(bus->state != MDIOBUS_REGISTERED);
+ if (WARN_ON_ONCE(bus->state != MDIOBUS_REGISTERED))
+ return;
bus->state = MDIOBUS_UNREGISTERED;
for (i = 0; i < PHY_MAX_ADDR; i++) {
diff --git a/drivers/net/usb/cdc_eem.c b/drivers/net/usb/cdc_eem.c
index 0eeec80bec31..2e60bc1b9a6b 100644
--- a/drivers/net/usb/cdc_eem.c
+++ b/drivers/net/usb/cdc_eem.c
@@ -26,7 +26,7 @@
* for transport over USB using a simpler USB device model than the
* previous CDC "Ethernet Control Model" (ECM, or "CDC Ethernet").
*
- * For details, see www.usb.org/developers/devclass_docs/CDC_EEM10.pdf
+ * For details, see https://usb.org/sites/default/files/CDC_EEM10.pdf
*
* This version has been tested with GIGAntIC WuaoW SIM Smart Card on 2.6.24,
* 2.6.27 and 2.6.30rc2 kernel.
diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
index c7563ed3ac31..f59e4973b0bd 100644
--- a/drivers/net/usb/hso.c
+++ b/drivers/net/usb/hso.c
@@ -1689,7 +1689,7 @@ static int hso_serial_tiocmset(struct tty_struct *tty,
spin_unlock_irqrestore(&serial->serial_lock, flags);
return usb_control_msg(serial->parent->usb,
- usb_rcvctrlpipe(serial->parent->usb, 0), 0x22,
+ usb_sndctrlpipe(serial->parent->usb, 0), 0x22,
0x21, val, if_num, NULL, 0,
USB_CTRL_SET_TIMEOUT);
}
@@ -2436,7 +2436,7 @@ static int hso_rfkill_set_block(void *data, bool blocked)
if (hso_dev->usb_gone)
rv = 0;
else
- rv = usb_control_msg(hso_dev->usb, usb_rcvctrlpipe(hso_dev->usb, 0),
+ rv = usb_control_msg(hso_dev->usb, usb_sndctrlpipe(hso_dev->usb, 0),
enabled ? 0x82 : 0x81, 0x40, 0, 0, NULL, 0,
USB_CTRL_SET_TIMEOUT);
mutex_unlock(&hso_dev->mutex);
@@ -2618,32 +2618,31 @@ static struct hso_device *hso_create_bulk_serial_device(
num_urbs = 2;
serial->tiocmget = kzalloc(sizeof(struct hso_tiocmget),
GFP_KERNEL);
+ if (!serial->tiocmget)
+ goto exit;
serial->tiocmget->serial_state_notification
= kzalloc(sizeof(struct hso_serial_state_notification),
GFP_KERNEL);
- /* it isn't going to break our heart if serial->tiocmget
- * allocation fails don't bother checking this.
- */
- if (serial->tiocmget && serial->tiocmget->serial_state_notification) {
- tiocmget = serial->tiocmget;
- tiocmget->endp = hso_get_ep(interface,
- USB_ENDPOINT_XFER_INT,
- USB_DIR_IN);
- if (!tiocmget->endp) {
- dev_err(&interface->dev, "Failed to find INT IN ep\n");
- goto exit;
- }
-
- tiocmget->urb = usb_alloc_urb(0, GFP_KERNEL);
- if (tiocmget->urb) {
- mutex_init(&tiocmget->mutex);
- init_waitqueue_head(&tiocmget->waitq);
- } else
- hso_free_tiomget(serial);
+ if (!serial->tiocmget->serial_state_notification)
+ goto exit;
+ tiocmget = serial->tiocmget;
+ tiocmget->endp = hso_get_ep(interface,
+ USB_ENDPOINT_XFER_INT,
+ USB_DIR_IN);
+ if (!tiocmget->endp) {
+ dev_err(&interface->dev, "Failed to find INT IN ep\n");
+ goto exit;
}
- }
- else
+
+ tiocmget->urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (!tiocmget->urb)
+ goto exit;
+
+ mutex_init(&tiocmget->mutex);
+ init_waitqueue_head(&tiocmget->waitq);
+ } else {
num_urbs = 1;
+ }
if (hso_serial_common_create(serial, num_urbs, BULK_URB_RX_SIZE,
BULK_URB_TX_SIZE))
diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c
index 6acc5e904518..02bce40a67e5 100644
--- a/drivers/net/usb/lan78xx.c
+++ b/drivers/net/usb/lan78xx.c
@@ -1645,6 +1645,7 @@ static const struct ethtool_ops lan78xx_ethtool_ops = {
.get_strings = lan78xx_get_strings,
.get_wol = lan78xx_get_wol,
.set_wol = lan78xx_set_wol,
+ .get_ts_info = ethtool_op_get_ts_info,
.get_eee = lan78xx_get_eee,
.set_eee = lan78xx_set_eee,
.get_pauseparam = lan78xx_get_pause,
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index 136ea06540ff..f6abb2fbf972 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -8107,6 +8107,37 @@ static void r8156b_init(struct r8152 *tp)
tp->coalesce = 15000; /* 15 us */
}
+static bool rtl_check_vendor_ok(struct usb_interface *intf)
+{
+ struct usb_host_interface *alt = intf->cur_altsetting;
+ struct usb_endpoint_descriptor *in, *out, *intr;
+
+ if (usb_find_common_endpoints(alt, &in, &out, &intr, NULL) < 0) {
+ dev_err(&intf->dev, "Expected endpoints are not found\n");
+ return false;
+ }
+
+ /* Check Rx endpoint address */
+ if (usb_endpoint_num(in) != 1) {
+ dev_err(&intf->dev, "Invalid Rx endpoint address\n");
+ return false;
+ }
+
+ /* Check Tx endpoint address */
+ if (usb_endpoint_num(out) != 2) {
+ dev_err(&intf->dev, "Invalid Tx endpoint address\n");
+ return false;
+ }
+
+ /* Check interrupt endpoint address */
+ if (usb_endpoint_num(intr) != 3) {
+ dev_err(&intf->dev, "Invalid interrupt endpoint address\n");
+ return false;
+ }
+
+ return true;
+}
+
static bool rtl_vendor_mode(struct usb_interface *intf)
{
struct usb_host_interface *alt = intf->cur_altsetting;
@@ -8115,12 +8146,15 @@ static bool rtl_vendor_mode(struct usb_interface *intf)
int i, num_configs;
if (alt->desc.bInterfaceClass == USB_CLASS_VENDOR_SPEC)
- return true;
+ return rtl_check_vendor_ok(intf);
/* The vendor mode is not always config #1, so to find it out. */
udev = interface_to_usbdev(intf);
c = udev->config;
num_configs = udev->descriptor.bNumConfigurations;
+ if (num_configs < 2)
+ return false;
+
for (i = 0; i < num_configs; (i++, c++)) {
struct usb_interface_descriptor *desc = NULL;
@@ -8135,7 +8169,8 @@ static bool rtl_vendor_mode(struct usb_interface *intf)
}
}
- WARN_ON_ONCE(i == num_configs);
+ if (i == num_configs)
+ dev_err(&intf->dev, "Unexpected Device\n");
return false;
}
@@ -9381,9 +9416,6 @@ static int rtl8152_probe(struct usb_interface *intf,
if (!rtl_vendor_mode(intf))
return -ENODEV;
- if (intf->cur_altsetting->desc.bNumEndpoints < 3)
- return -ENODEV;
-
usb_reset_device(udev);
netdev = alloc_etherdev(sizeof(struct r8152));
if (!netdev) {
diff --git a/drivers/net/usb/smsc75xx.c b/drivers/net/usb/smsc75xx.c
index f8cdabb9ef5a..b286993da67c 100644
--- a/drivers/net/usb/smsc75xx.c
+++ b/drivers/net/usb/smsc75xx.c
@@ -1483,7 +1483,7 @@ static int smsc75xx_bind(struct usbnet *dev, struct usb_interface *intf)
ret = smsc75xx_wait_ready(dev, 0);
if (ret < 0) {
netdev_warn(dev->net, "device not ready in smsc75xx_bind\n");
- return ret;
+ goto err;
}
smsc75xx_init_mac_address(dev);
@@ -1492,7 +1492,7 @@ static int smsc75xx_bind(struct usbnet *dev, struct usb_interface *intf)
ret = smsc75xx_reset(dev);
if (ret < 0) {
netdev_warn(dev->net, "smsc75xx_reset error %d\n", ret);
- return ret;
+ goto err;
}
dev->net->netdev_ops = &smsc75xx_netdev_ops;
@@ -1502,6 +1502,10 @@ static int smsc75xx_bind(struct usbnet *dev, struct usb_interface *intf)
dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len;
dev->net->max_mtu = MAX_SINGLE_PACKET_SIZE;
return 0;
+
+err:
+ kfree(pdata);
+ return ret;
}
static void smsc75xx_unbind(struct usbnet *dev, struct usb_interface *intf)
diff --git a/drivers/net/wireless/ath/ath10k/htt.h b/drivers/net/wireless/ath/ath10k/htt.h
index 956157946106..dbc8aef82a65 100644
--- a/drivers/net/wireless/ath/ath10k/htt.h
+++ b/drivers/net/wireless/ath/ath10k/htt.h
@@ -845,6 +845,7 @@ enum htt_security_types {
#define ATH10K_HTT_TXRX_PEER_SECURITY_MAX 2
#define ATH10K_TXRX_NUM_EXT_TIDS 19
+#define ATH10K_TXRX_NON_QOS_TID 16
enum htt_security_flags {
#define HTT_SECURITY_TYPE_MASK 0x7F
diff --git a/drivers/net/wireless/ath/ath10k/htt_rx.c b/drivers/net/wireless/ath/ath10k/htt_rx.c
index 1a08156d5011..7ffb5d5b2a70 100644
--- a/drivers/net/wireless/ath/ath10k/htt_rx.c
+++ b/drivers/net/wireless/ath/ath10k/htt_rx.c
@@ -1746,16 +1746,97 @@ static void ath10k_htt_rx_h_csum_offload(struct sk_buff *msdu)
msdu->ip_summed = ath10k_htt_rx_get_csum_state(msdu);
}
+static u64 ath10k_htt_rx_h_get_pn(struct ath10k *ar, struct sk_buff *skb,
+ u16 offset,
+ enum htt_rx_mpdu_encrypt_type enctype)
+{
+ struct ieee80211_hdr *hdr;
+ u64 pn = 0;
+ u8 *ehdr;
+
+ hdr = (struct ieee80211_hdr *)(skb->data + offset);
+ ehdr = skb->data + offset + ieee80211_hdrlen(hdr->frame_control);
+
+ if (enctype == HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2) {
+ pn = ehdr[0];
+ pn |= (u64)ehdr[1] << 8;
+ pn |= (u64)ehdr[4] << 16;
+ pn |= (u64)ehdr[5] << 24;
+ pn |= (u64)ehdr[6] << 32;
+ pn |= (u64)ehdr[7] << 40;
+ }
+ return pn;
+}
+
+static bool ath10k_htt_rx_h_frag_multicast_check(struct ath10k *ar,
+ struct sk_buff *skb,
+ u16 offset)
+{
+ struct ieee80211_hdr *hdr;
+
+ hdr = (struct ieee80211_hdr *)(skb->data + offset);
+ return !is_multicast_ether_addr(hdr->addr1);
+}
+
+static bool ath10k_htt_rx_h_frag_pn_check(struct ath10k *ar,
+ struct sk_buff *skb,
+ u16 peer_id,
+ u16 offset,
+ enum htt_rx_mpdu_encrypt_type enctype)
+{
+ struct ath10k_peer *peer;
+ union htt_rx_pn_t *last_pn, new_pn = {0};
+ struct ieee80211_hdr *hdr;
+ bool more_frags;
+ u8 tid, frag_number;
+ u32 seq;
+
+ peer = ath10k_peer_find_by_id(ar, peer_id);
+ if (!peer) {
+ ath10k_dbg(ar, ATH10K_DBG_HTT, "invalid peer for frag pn check\n");
+ return false;
+ }
+
+ hdr = (struct ieee80211_hdr *)(skb->data + offset);
+ if (ieee80211_is_data_qos(hdr->frame_control))
+ tid = ieee80211_get_tid(hdr);
+ else
+ tid = ATH10K_TXRX_NON_QOS_TID;
+
+ last_pn = &peer->frag_tids_last_pn[tid];
+ new_pn.pn48 = ath10k_htt_rx_h_get_pn(ar, skb, offset, enctype);
+ more_frags = ieee80211_has_morefrags(hdr->frame_control);
+ frag_number = le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG;
+ seq = (__le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_SEQ) >> 4;
+
+ if (frag_number == 0) {
+ last_pn->pn48 = new_pn.pn48;
+ peer->frag_tids_seq[tid] = seq;
+ } else {
+ if (seq != peer->frag_tids_seq[tid])
+ return false;
+
+ if (new_pn.pn48 != last_pn->pn48 + 1)
+ return false;
+
+ last_pn->pn48 = new_pn.pn48;
+ }
+
+ return true;
+}
+
static void ath10k_htt_rx_h_mpdu(struct ath10k *ar,
struct sk_buff_head *amsdu,
struct ieee80211_rx_status *status,
bool fill_crypt_header,
u8 *rx_hdr,
- enum ath10k_pkt_rx_err *err)
+ enum ath10k_pkt_rx_err *err,
+ u16 peer_id,
+ bool frag)
{
struct sk_buff *first;
struct sk_buff *last;
- struct sk_buff *msdu;
+ struct sk_buff *msdu, *temp;
struct htt_rx_desc *rxd;
struct ieee80211_hdr *hdr;
enum htt_rx_mpdu_encrypt_type enctype;
@@ -1768,6 +1849,7 @@ static void ath10k_htt_rx_h_mpdu(struct ath10k *ar,
bool is_decrypted;
bool is_mgmt;
u32 attention;
+ bool frag_pn_check = true, multicast_check = true;
if (skb_queue_empty(amsdu))
return;
@@ -1866,7 +1948,37 @@ static void ath10k_htt_rx_h_mpdu(struct ath10k *ar,
}
skb_queue_walk(amsdu, msdu) {
+ if (frag && !fill_crypt_header && is_decrypted &&
+ enctype == HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2)
+ frag_pn_check = ath10k_htt_rx_h_frag_pn_check(ar,
+ msdu,
+ peer_id,
+ 0,
+ enctype);
+
+ if (frag)
+ multicast_check = ath10k_htt_rx_h_frag_multicast_check(ar,
+ msdu,
+ 0);
+
+ if (!frag_pn_check || !multicast_check) {
+ /* Discard the fragment with invalid PN or multicast DA
+ */
+ temp = msdu->prev;
+ __skb_unlink(msdu, amsdu);
+ dev_kfree_skb_any(msdu);
+ msdu = temp;
+ frag_pn_check = true;
+ multicast_check = true;
+ continue;
+ }
+
ath10k_htt_rx_h_csum_offload(msdu);
+
+ if (frag && !fill_crypt_header &&
+ enctype == HTT_RX_MPDU_ENCRYPT_TKIP_WPA)
+ status->flag &= ~RX_FLAG_MMIC_STRIPPED;
+
ath10k_htt_rx_h_undecap(ar, msdu, status, first_hdr, enctype,
is_decrypted);
@@ -1884,6 +1996,11 @@ static void ath10k_htt_rx_h_mpdu(struct ath10k *ar,
hdr = (void *)msdu->data;
hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_PROTECTED);
+
+ if (frag && !fill_crypt_header &&
+ enctype == HTT_RX_MPDU_ENCRYPT_TKIP_WPA)
+ status->flag &= ~RX_FLAG_IV_STRIPPED &
+ ~RX_FLAG_MMIC_STRIPPED;
}
}
@@ -1991,14 +2108,62 @@ static void ath10k_htt_rx_h_unchain(struct ath10k *ar,
ath10k_unchain_msdu(amsdu, unchain_cnt);
}
+static bool ath10k_htt_rx_validate_amsdu(struct ath10k *ar,
+ struct sk_buff_head *amsdu)
+{
+ u8 *subframe_hdr;
+ struct sk_buff *first;
+ bool is_first, is_last;
+ struct htt_rx_desc *rxd;
+ struct ieee80211_hdr *hdr;
+ size_t hdr_len, crypto_len;
+ enum htt_rx_mpdu_encrypt_type enctype;
+ int bytes_aligned = ar->hw_params.decap_align_bytes;
+
+ first = skb_peek(amsdu);
+
+ rxd = (void *)first->data - sizeof(*rxd);
+ hdr = (void *)rxd->rx_hdr_status;
+
+ is_first = !!(rxd->msdu_end.common.info0 &
+ __cpu_to_le32(RX_MSDU_END_INFO0_FIRST_MSDU));
+ is_last = !!(rxd->msdu_end.common.info0 &
+ __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU));
+
+ /* Return in case of non-aggregated msdu */
+ if (is_first && is_last)
+ return true;
+
+ /* First msdu flag is not set for the first msdu of the list */
+ if (!is_first)
+ return false;
+
+ enctype = MS(__le32_to_cpu(rxd->mpdu_start.info0),
+ RX_MPDU_START_INFO0_ENCRYPT_TYPE);
+
+ hdr_len = ieee80211_hdrlen(hdr->frame_control);
+ crypto_len = ath10k_htt_rx_crypto_param_len(ar, enctype);
+
+ subframe_hdr = (u8 *)hdr + round_up(hdr_len, bytes_aligned) +
+ crypto_len;
+
+ /* Validate if the amsdu has a proper first subframe.
+ * There are chances a single msdu can be received as amsdu when
+ * the unauthenticated amsdu flag of a QoS header
+ * gets flipped in non-SPP AMSDU's, in such cases the first
+ * subframe has llc/snap header in place of a valid da.
+ * return false if the da matches rfc1042 pattern
+ */
+ if (ether_addr_equal(subframe_hdr, rfc1042_header))
+ return false;
+
+ return true;
+}
+
static bool ath10k_htt_rx_amsdu_allowed(struct ath10k *ar,
struct sk_buff_head *amsdu,
struct ieee80211_rx_status *rx_status)
{
- /* FIXME: It might be a good idea to do some fuzzy-testing to drop
- * invalid/dangerous frames.
- */
-
if (!rx_status->freq) {
ath10k_dbg(ar, ATH10K_DBG_HTT, "no channel configured; ignoring frame(s)!\n");
return false;
@@ -2009,6 +2174,11 @@ static bool ath10k_htt_rx_amsdu_allowed(struct ath10k *ar,
return false;
}
+ if (!ath10k_htt_rx_validate_amsdu(ar, amsdu)) {
+ ath10k_dbg(ar, ATH10K_DBG_HTT, "invalid amsdu received\n");
+ return false;
+ }
+
return true;
}
@@ -2071,7 +2241,8 @@ static int ath10k_htt_rx_handle_amsdu(struct ath10k_htt *htt)
ath10k_htt_rx_h_unchain(ar, &amsdu, &drop_cnt, &unchain_cnt);
ath10k_htt_rx_h_filter(ar, &amsdu, rx_status, &drop_cnt_filter);
- ath10k_htt_rx_h_mpdu(ar, &amsdu, rx_status, true, first_hdr, &err);
+ ath10k_htt_rx_h_mpdu(ar, &amsdu, rx_status, true, first_hdr, &err, 0,
+ false);
msdus_to_queue = skb_queue_len(&amsdu);
ath10k_htt_rx_h_enqueue(ar, &amsdu, rx_status);
@@ -2204,6 +2375,11 @@ static bool ath10k_htt_rx_proc_rx_ind_hl(struct ath10k_htt *htt,
fw_desc = &rx->fw_desc;
rx_desc_len = fw_desc->len;
+ if (fw_desc->u.bits.discard) {
+ ath10k_dbg(ar, ATH10K_DBG_HTT, "htt discard mpdu\n");
+ goto err;
+ }
+
/* I have not yet seen any case where num_mpdu_ranges > 1.
* qcacld does not seem handle that case either, so we introduce the
* same limitiation here as well.
@@ -2509,6 +2685,13 @@ static bool ath10k_htt_rx_proc_rx_frag_ind_hl(struct ath10k_htt *htt,
rx_desc = (struct htt_hl_rx_desc *)(skb->data + tot_hdr_len);
rx_desc_info = __le32_to_cpu(rx_desc->info);
+ hdr = (struct ieee80211_hdr *)((u8 *)rx_desc + rx_hl->fw_desc.len);
+
+ if (is_multicast_ether_addr(hdr->addr1)) {
+ /* Discard the fragment with multicast DA */
+ goto err;
+ }
+
if (!MS(rx_desc_info, HTT_RX_DESC_HL_INFO_ENCRYPTED)) {
spin_unlock_bh(&ar->data_lock);
return ath10k_htt_rx_proc_rx_ind_hl(htt, &resp->rx_ind_hl, skb,
@@ -2516,8 +2699,6 @@ static bool ath10k_htt_rx_proc_rx_frag_ind_hl(struct ath10k_htt *htt,
HTT_RX_NON_TKIP_MIC);
}
- hdr = (struct ieee80211_hdr *)((u8 *)rx_desc + rx_hl->fw_desc.len);
-
if (ieee80211_has_retry(hdr->frame_control))
goto err;
@@ -3027,7 +3208,7 @@ static int ath10k_htt_rx_in_ord_ind(struct ath10k *ar, struct sk_buff *skb)
ath10k_htt_rx_h_ppdu(ar, &amsdu, status, vdev_id);
ath10k_htt_rx_h_filter(ar, &amsdu, status, NULL);
ath10k_htt_rx_h_mpdu(ar, &amsdu, status, false, NULL,
- NULL);
+ NULL, peer_id, frag);
ath10k_htt_rx_h_enqueue(ar, &amsdu, status);
break;
case -EAGAIN:
diff --git a/drivers/net/wireless/ath/ath10k/rx_desc.h b/drivers/net/wireless/ath/ath10k/rx_desc.h
index f2b6bf8f0d60..705b6295e466 100644
--- a/drivers/net/wireless/ath/ath10k/rx_desc.h
+++ b/drivers/net/wireless/ath/ath10k/rx_desc.h
@@ -1282,7 +1282,19 @@ struct fw_rx_desc_base {
#define FW_RX_DESC_UDP (1 << 6)
struct fw_rx_desc_hl {
- u8 info0;
+ union {
+ struct {
+ u8 discard:1,
+ forward:1,
+ any_err:1,
+ dup_err:1,
+ reserved:1,
+ inspect:1,
+ extension:2;
+ } bits;
+ u8 info0;
+ } u;
+
u8 version;
u8 len;
u8 flags;
diff --git a/drivers/net/wireless/ath/ath11k/dp_rx.c b/drivers/net/wireless/ath/ath11k/dp_rx.c
index 1d9aa1bb6b6e..603d2f93ac18 100644
--- a/drivers/net/wireless/ath/ath11k/dp_rx.c
+++ b/drivers/net/wireless/ath/ath11k/dp_rx.c
@@ -260,6 +260,16 @@ static void ath11k_dp_rxdesc_set_msdu_len(struct ath11k_base *ab,
ab->hw_params.hw_ops->rx_desc_set_msdu_len(desc, len);
}
+static bool ath11k_dp_rx_h_attn_is_mcbc(struct ath11k_base *ab,
+ struct hal_rx_desc *desc)
+{
+ struct rx_attention *attn = ath11k_dp_rx_get_attention(ab, desc);
+
+ return ath11k_dp_rx_h_msdu_end_first_msdu(ab, desc) &&
+ (!!FIELD_GET(RX_ATTENTION_INFO1_MCAST_BCAST,
+ __le32_to_cpu(attn->info1)));
+}
+
static void ath11k_dp_service_mon_ring(struct timer_list *t)
{
struct ath11k_base *ab = from_timer(ab, t, mon_reap_timer);
@@ -852,6 +862,24 @@ static void ath11k_dp_rx_frags_cleanup(struct dp_rx_tid *rx_tid, bool rel_link_d
__skb_queue_purge(&rx_tid->rx_frags);
}
+void ath11k_peer_frags_flush(struct ath11k *ar, struct ath11k_peer *peer)
+{
+ struct dp_rx_tid *rx_tid;
+ int i;
+
+ lockdep_assert_held(&ar->ab->base_lock);
+
+ for (i = 0; i <= IEEE80211_NUM_TIDS; i++) {
+ rx_tid = &peer->rx_tid[i];
+
+ spin_unlock_bh(&ar->ab->base_lock);
+ del_timer_sync(&rx_tid->frag_timer);
+ spin_lock_bh(&ar->ab->base_lock);
+
+ ath11k_dp_rx_frags_cleanup(rx_tid, true);
+ }
+}
+
void ath11k_peer_rx_tid_cleanup(struct ath11k *ar, struct ath11k_peer *peer)
{
struct dp_rx_tid *rx_tid;
@@ -3450,6 +3478,7 @@ static int ath11k_dp_rx_frag_h_mpdu(struct ath11k *ar,
u8 tid;
int ret = 0;
bool more_frags;
+ bool is_mcbc;
rx_desc = (struct hal_rx_desc *)msdu->data;
peer_id = ath11k_dp_rx_h_mpdu_start_peer_id(ar->ab, rx_desc);
@@ -3457,6 +3486,11 @@ static int ath11k_dp_rx_frag_h_mpdu(struct ath11k *ar,
seqno = ath11k_dp_rx_h_mpdu_start_seq_no(ar->ab, rx_desc);
frag_no = ath11k_dp_rx_h_mpdu_start_frag_no(ar->ab, msdu);
more_frags = ath11k_dp_rx_h_mpdu_start_more_frags(ar->ab, msdu);
+ is_mcbc = ath11k_dp_rx_h_attn_is_mcbc(ar->ab, rx_desc);
+
+ /* Multicast/Broadcast fragments are not expected */
+ if (is_mcbc)
+ return -EINVAL;
if (!ath11k_dp_rx_h_mpdu_start_seq_ctrl_valid(ar->ab, rx_desc) ||
!ath11k_dp_rx_h_mpdu_start_fc_valid(ar->ab, rx_desc) ||
diff --git a/drivers/net/wireless/ath/ath11k/dp_rx.h b/drivers/net/wireless/ath/ath11k/dp_rx.h
index bf399312b5ff..623da3bf9dc8 100644
--- a/drivers/net/wireless/ath/ath11k/dp_rx.h
+++ b/drivers/net/wireless/ath/ath11k/dp_rx.h
@@ -49,6 +49,7 @@ int ath11k_dp_peer_rx_pn_replay_config(struct ath11k_vif *arvif,
const u8 *peer_addr,
enum set_key_cmd key_cmd,
struct ieee80211_key_conf *key);
+void ath11k_peer_frags_flush(struct ath11k *ar, struct ath11k_peer *peer);
void ath11k_peer_rx_tid_cleanup(struct ath11k *ar, struct ath11k_peer *peer);
void ath11k_peer_rx_tid_delete(struct ath11k *ar,
struct ath11k_peer *peer, u8 tid);
diff --git a/drivers/net/wireless/ath/ath11k/mac.c b/drivers/net/wireless/ath/ath11k/mac.c
index 4df425dd31a2..9d0ff150ec30 100644
--- a/drivers/net/wireless/ath/ath11k/mac.c
+++ b/drivers/net/wireless/ath/ath11k/mac.c
@@ -2779,6 +2779,12 @@ static int ath11k_mac_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
*/
spin_lock_bh(&ab->base_lock);
peer = ath11k_peer_find(ab, arvif->vdev_id, peer_addr);
+
+ /* flush the fragments cache during key (re)install to
+ * ensure all frags in the new frag list belong to the same key.
+ */
+ if (peer && cmd == SET_KEY)
+ ath11k_peer_frags_flush(ar, peer);
spin_unlock_bh(&ab->base_lock);
if (!peer) {
diff --git a/drivers/net/wireless/ath/ath6kl/debug.c b/drivers/net/wireless/ath/ath6kl/debug.c
index 7506cea46f58..433a047f3747 100644
--- a/drivers/net/wireless/ath/ath6kl/debug.c
+++ b/drivers/net/wireless/ath/ath6kl/debug.c
@@ -1027,14 +1027,17 @@ static ssize_t ath6kl_lrssi_roam_write(struct file *file,
{
struct ath6kl *ar = file->private_data;
unsigned long lrssi_roam_threshold;
+ int ret;
if (kstrtoul_from_user(user_buf, count, 0, &lrssi_roam_threshold))
return -EINVAL;
ar->lrssi_roam_threshold = lrssi_roam_threshold;
- ath6kl_wmi_set_roam_lrssi_cmd(ar->wmi, ar->lrssi_roam_threshold);
+ ret = ath6kl_wmi_set_roam_lrssi_cmd(ar->wmi, ar->lrssi_roam_threshold);
+ if (ret)
+ return ret;
return count;
}
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c
index ce8c102df7b3..633d0ab19031 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c
@@ -1217,13 +1217,9 @@ static struct sdio_driver brcmf_sdmmc_driver = {
},
};
-void brcmf_sdio_register(void)
+int brcmf_sdio_register(void)
{
- int ret;
-
- ret = sdio_register_driver(&brcmf_sdmmc_driver);
- if (ret)
- brcmf_err("sdio_register_driver failed: %d\n", ret);
+ return sdio_register_driver(&brcmf_sdmmc_driver);
}
void brcmf_sdio_exit(void)
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bus.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bus.h
index 08f9d47f2e5c..3f5da3bb6aa5 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bus.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bus.h
@@ -275,11 +275,26 @@ void brcmf_bus_add_txhdrlen(struct device *dev, uint len);
#ifdef CONFIG_BRCMFMAC_SDIO
void brcmf_sdio_exit(void);
-void brcmf_sdio_register(void);
+int brcmf_sdio_register(void);
+#else
+static inline void brcmf_sdio_exit(void) { }
+static inline int brcmf_sdio_register(void) { return 0; }
#endif
+
#ifdef CONFIG_BRCMFMAC_USB
void brcmf_usb_exit(void);
-void brcmf_usb_register(void);
+int brcmf_usb_register(void);
+#else
+static inline void brcmf_usb_exit(void) { }
+static inline int brcmf_usb_register(void) { return 0; }
+#endif
+
+#ifdef CONFIG_BRCMFMAC_PCIE
+void brcmf_pcie_exit(void);
+int brcmf_pcie_register(void);
+#else
+static inline void brcmf_pcie_exit(void) { }
+static inline int brcmf_pcie_register(void) { return 0; }
#endif
#endif /* BRCMFMAC_BUS_H */
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
index 838b09b23abf..cee1682d2333 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
@@ -1518,40 +1518,34 @@ void brcmf_bus_change_state(struct brcmf_bus *bus, enum brcmf_bus_state state)
}
}
-static void brcmf_driver_register(struct work_struct *work)
-{
-#ifdef CONFIG_BRCMFMAC_SDIO
- brcmf_sdio_register();
-#endif
-#ifdef CONFIG_BRCMFMAC_USB
- brcmf_usb_register();
-#endif
-#ifdef CONFIG_BRCMFMAC_PCIE
- brcmf_pcie_register();
-#endif
-}
-static DECLARE_WORK(brcmf_driver_work, brcmf_driver_register);
-
int __init brcmf_core_init(void)
{
- if (!schedule_work(&brcmf_driver_work))
- return -EBUSY;
+ int err;
+ err = brcmf_sdio_register();
+ if (err)
+ return err;
+
+ err = brcmf_usb_register();
+ if (err)
+ goto error_usb_register;
+
+ err = brcmf_pcie_register();
+ if (err)
+ goto error_pcie_register;
return 0;
+
+error_pcie_register:
+ brcmf_usb_exit();
+error_usb_register:
+ brcmf_sdio_exit();
+ return err;
}
void __exit brcmf_core_exit(void)
{
- cancel_work_sync(&brcmf_driver_work);
-
-#ifdef CONFIG_BRCMFMAC_SDIO
brcmf_sdio_exit();
-#endif
-#ifdef CONFIG_BRCMFMAC_USB
brcmf_usb_exit();
-#endif
-#ifdef CONFIG_BRCMFMAC_PCIE
brcmf_pcie_exit();
-#endif
}
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
index ad79e3b7e74a..143a705b5cb3 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
@@ -2140,15 +2140,10 @@ static struct pci_driver brcmf_pciedrvr = {
};
-void brcmf_pcie_register(void)
+int brcmf_pcie_register(void)
{
- int err;
-
brcmf_dbg(PCIE, "Enter\n");
- err = pci_register_driver(&brcmf_pciedrvr);
- if (err)
- brcmf_err(NULL, "PCIE driver registration failed, err=%d\n",
- err);
+ return pci_register_driver(&brcmf_pciedrvr);
}
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.h
index d026401d2001..8e6c227e8315 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.h
@@ -11,9 +11,4 @@ struct brcmf_pciedev {
struct brcmf_pciedev_info *devinfo;
};
-
-void brcmf_pcie_exit(void);
-void brcmf_pcie_register(void);
-
-
#endif /* BRCMFMAC_PCIE_H */
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c
index 586f4dfc638b..9fb68c2dc7e3 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c
@@ -1584,12 +1584,8 @@ void brcmf_usb_exit(void)
usb_deregister(&brcmf_usbdrvr);
}
-void brcmf_usb_register(void)
+int brcmf_usb_register(void)
{
- int ret;
-
brcmf_dbg(USB, "Enter\n");
- ret = usb_register(&brcmf_usbdrvr);
- if (ret)
- brcmf_err("usb_register failed %d\n", ret);
+ return usb_register(&brcmf_usbdrvr);
}
diff --git a/drivers/net/wireless/marvell/libertas/mesh.c b/drivers/net/wireless/marvell/libertas/mesh.c
index f5b78257d551..c68814841583 100644
--- a/drivers/net/wireless/marvell/libertas/mesh.c
+++ b/drivers/net/wireless/marvell/libertas/mesh.c
@@ -801,24 +801,6 @@ static const struct attribute_group mesh_ie_group = {
.attrs = mesh_ie_attrs,
};
-static void lbs_persist_config_init(struct net_device *dev)
-{
- int ret;
- ret = sysfs_create_group(&(dev->dev.kobj), &boot_opts_group);
- if (ret)
- pr_err("failed to create boot_opts_group.\n");
-
- ret = sysfs_create_group(&(dev->dev.kobj), &mesh_ie_group);
- if (ret)
- pr_err("failed to create mesh_ie_group.\n");
-}
-
-static void lbs_persist_config_remove(struct net_device *dev)
-{
- sysfs_remove_group(&(dev->dev.kobj), &boot_opts_group);
- sysfs_remove_group(&(dev->dev.kobj), &mesh_ie_group);
-}
-
/***************************************************************************
* Initializing and starting, stopping mesh
@@ -1014,6 +996,10 @@ static int lbs_add_mesh(struct lbs_private *priv)
SET_NETDEV_DEV(priv->mesh_dev, priv->dev->dev.parent);
mesh_dev->flags |= IFF_BROADCAST | IFF_MULTICAST;
+ mesh_dev->sysfs_groups[0] = &lbs_mesh_attr_group;
+ mesh_dev->sysfs_groups[1] = &boot_opts_group;
+ mesh_dev->sysfs_groups[2] = &mesh_ie_group;
+
/* Register virtual mesh interface */
ret = register_netdev(mesh_dev);
if (ret) {
@@ -1021,19 +1007,10 @@ static int lbs_add_mesh(struct lbs_private *priv)
goto err_free_netdev;
}
- ret = sysfs_create_group(&(mesh_dev->dev.kobj), &lbs_mesh_attr_group);
- if (ret)
- goto err_unregister;
-
- lbs_persist_config_init(mesh_dev);
-
/* Everything successful */
ret = 0;
goto done;
-err_unregister:
- unregister_netdev(mesh_dev);
-
err_free_netdev:
free_netdev(mesh_dev);
@@ -1054,8 +1031,6 @@ void lbs_remove_mesh(struct lbs_private *priv)
netif_stop_queue(mesh_dev);
netif_carrier_off(mesh_dev);
- sysfs_remove_group(&(mesh_dev->dev.kobj), &lbs_mesh_attr_group);
- lbs_persist_config_remove(mesh_dev);
unregister_netdev(mesh_dev);
priv->mesh_dev = NULL;
kfree(mesh_dev->ieee80211_ptr);
diff --git a/drivers/net/wireless/realtek/rtlwifi/base.c b/drivers/net/wireless/realtek/rtlwifi/base.c
index 2a7ee90a3f54..ffd150ec181f 100644
--- a/drivers/net/wireless/realtek/rtlwifi/base.c
+++ b/drivers/net/wireless/realtek/rtlwifi/base.c
@@ -440,9 +440,14 @@ static void rtl_watchdog_wq_callback(struct work_struct *work);
static void rtl_fwevt_wq_callback(struct work_struct *work);
static void rtl_c2hcmd_wq_callback(struct work_struct *work);
-static void _rtl_init_deferred_work(struct ieee80211_hw *hw)
+static int _rtl_init_deferred_work(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct workqueue_struct *wq;
+
+ wq = alloc_workqueue("%s", 0, 0, rtlpriv->cfg->name);
+ if (!wq)
+ return -ENOMEM;
/* <1> timer */
timer_setup(&rtlpriv->works.watchdog_timer,
@@ -451,11 +456,7 @@ static void _rtl_init_deferred_work(struct ieee80211_hw *hw)
rtl_easy_concurrent_retrytimer_callback, 0);
/* <2> work queue */
rtlpriv->works.hw = hw;
- rtlpriv->works.rtl_wq = alloc_workqueue("%s", 0, 0, rtlpriv->cfg->name);
- if (unlikely(!rtlpriv->works.rtl_wq)) {
- pr_err("Failed to allocate work queue\n");
- return;
- }
+ rtlpriv->works.rtl_wq = wq;
INIT_DELAYED_WORK(&rtlpriv->works.watchdog_wq,
rtl_watchdog_wq_callback);
@@ -466,6 +467,7 @@ static void _rtl_init_deferred_work(struct ieee80211_hw *hw)
rtl_swlps_rfon_wq_callback);
INIT_DELAYED_WORK(&rtlpriv->works.fwevt_wq, rtl_fwevt_wq_callback);
INIT_DELAYED_WORK(&rtlpriv->works.c2hcmd_wq, rtl_c2hcmd_wq_callback);
+ return 0;
}
void rtl_deinit_deferred_work(struct ieee80211_hw *hw, bool ips_wq)
@@ -564,9 +566,7 @@ int rtl_init_core(struct ieee80211_hw *hw)
rtlmac->link_state = MAC80211_NOLINK;
/* <6> init deferred work */
- _rtl_init_deferred_work(hw);
-
- return 0;
+ return _rtl_init_deferred_work(hw);
}
EXPORT_SYMBOL_GPL(rtl_init_core);