diff options
author | Linus Torvalds | 2020-06-13 16:27:13 -0700 |
---|---|---|
committer | Linus Torvalds | 2020-06-13 16:27:13 -0700 |
commit | 96144c58abe7ff767e754b5b80995f7b8846d49b (patch) | |
tree | 7fcc47090ced9be71fa35cbf5e00d0160b04a2d1 /drivers | |
parent | f82e7b57b5fc48199e2f26ffafe2f96f7338ad3d (diff) | |
parent | bc139119a1708ae3db1ebb379630f286e28d06e8 (diff) |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net
Pull networking fixes from David Miller:
1) Fix cfg80211 deadlock, from Johannes Berg.
2) RXRPC fails to send norigications, from David Howells.
3) MPTCP RM_ADDR parsing has an off by one pointer error, fix from
Geliang Tang.
4) Fix crash when using MSG_PEEK with sockmap, from Anny Hu.
5) The ucc_geth driver needs __netdev_watchdog_up exported, from
Valentin Longchamp.
6) Fix hashtable memory leak in dccp, from Wang Hai.
7) Fix how nexthops are marked as FDB nexthops, from David Ahern.
8) Fix mptcp races between shutdown and recvmsg, from Paolo Abeni.
9) Fix crashes in tipc_disc_rcv(), from Tuong Lien.
10) Fix link speed reporting in iavf driver, from Brett Creeley.
11) When a channel is used for XSK and then reused again later for XSK,
we forget to clear out the relevant data structures in mlx5 which
causes all kinds of problems. Fix from Maxim Mikityanskiy.
12) Fix memory leak in genetlink, from Cong Wang.
13) Disallow sockmap attachments to UDP sockets, it simply won't work.
From Lorenz Bauer.
* git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net: (83 commits)
net: ethernet: ti: ale: fix allmulti for nu type ale
net: ethernet: ti: am65-cpsw-nuss: fix ale parameters init
net: atm: Remove the error message according to the atomic context
bpf: Undo internal BPF_PROBE_MEM in BPF insns dump
libbpf: Support pre-initializing .bss global variables
tools/bpftool: Fix skeleton codegen
bpf: Fix memlock accounting for sock_hash
bpf: sockmap: Don't attach programs to UDP sockets
bpf: tcp: Recv() should return 0 when the peer socket is closed
ibmvnic: Flush existing work items before device removal
genetlink: clean up family attributes allocations
net: ipa: header pad field only valid for AP->modem endpoint
net: ipa: program upper nibbles of sequencer type
net: ipa: fix modem LAN RX endpoint id
net: ipa: program metadata mask differently
ionic: add pcie_print_link_status
rxrpc: Fix race between incoming ACK parser and retransmitter
net/mlx5: E-Switch, Fix some error pointer dereferences
net/mlx5: Don't fail driver on failure to create debugfs
net/mlx5e: CT: Fix ipv6 nat header rewrite actions
...
Diffstat (limited to 'drivers')
34 files changed, 459 insertions, 234 deletions
diff --git a/drivers/crypto/chelsio/chcr_algo.c b/drivers/crypto/chelsio/chcr_algo.c index f26a7a15551a..4c2553672b6f 100644 --- a/drivers/crypto/chelsio/chcr_algo.c +++ b/drivers/crypto/chelsio/chcr_algo.c @@ -2590,11 +2590,22 @@ int chcr_aead_dma_map(struct device *dev, struct chcr_aead_reqctx *reqctx = aead_request_ctx(req); struct crypto_aead *tfm = crypto_aead_reqtfm(req); unsigned int authsize = crypto_aead_authsize(tfm); - int dst_size; + int src_len, dst_len; - dst_size = req->assoclen + req->cryptlen + (op_type ? - 0 : authsize); - if (!req->cryptlen || !dst_size) + /* calculate and handle src and dst sg length separately + * for inplace and out-of place operations + */ + if (req->src == req->dst) { + src_len = req->assoclen + req->cryptlen + (op_type ? + 0 : authsize); + dst_len = src_len; + } else { + src_len = req->assoclen + req->cryptlen; + dst_len = req->assoclen + req->cryptlen + (op_type ? + -authsize : authsize); + } + + if (!req->cryptlen || !src_len || !dst_len) return 0; reqctx->iv_dma = dma_map_single(dev, reqctx->iv, (IV + reqctx->b0_len), DMA_BIDIRECTIONAL); @@ -2606,20 +2617,23 @@ int chcr_aead_dma_map(struct device *dev, reqctx->b0_dma = 0; if (req->src == req->dst) { error = dma_map_sg(dev, req->src, - sg_nents_for_len(req->src, dst_size), + sg_nents_for_len(req->src, src_len), DMA_BIDIRECTIONAL); if (!error) goto err; } else { - error = dma_map_sg(dev, req->src, sg_nents(req->src), + error = dma_map_sg(dev, req->src, + sg_nents_for_len(req->src, src_len), DMA_TO_DEVICE); if (!error) goto err; - error = dma_map_sg(dev, req->dst, sg_nents(req->dst), + error = dma_map_sg(dev, req->dst, + sg_nents_for_len(req->dst, dst_len), DMA_FROM_DEVICE); if (!error) { - dma_unmap_sg(dev, req->src, sg_nents(req->src), - DMA_TO_DEVICE); + dma_unmap_sg(dev, req->src, + sg_nents_for_len(req->src, src_len), + DMA_TO_DEVICE); goto err; } } @@ -2637,24 +2651,37 @@ void chcr_aead_dma_unmap(struct device *dev, struct chcr_aead_reqctx *reqctx = aead_request_ctx(req); struct crypto_aead *tfm = crypto_aead_reqtfm(req); unsigned int authsize = crypto_aead_authsize(tfm); - int dst_size; + int src_len, dst_len; - dst_size = req->assoclen + req->cryptlen + (op_type ? - 0 : authsize); - if (!req->cryptlen || !dst_size) + /* calculate and handle src and dst sg length separately + * for inplace and out-of place operations + */ + if (req->src == req->dst) { + src_len = req->assoclen + req->cryptlen + (op_type ? + 0 : authsize); + dst_len = src_len; + } else { + src_len = req->assoclen + req->cryptlen; + dst_len = req->assoclen + req->cryptlen + (op_type ? + -authsize : authsize); + } + + if (!req->cryptlen || !src_len || !dst_len) return; dma_unmap_single(dev, reqctx->iv_dma, (IV + reqctx->b0_len), DMA_BIDIRECTIONAL); if (req->src == req->dst) { dma_unmap_sg(dev, req->src, - sg_nents_for_len(req->src, dst_size), + sg_nents_for_len(req->src, src_len), DMA_BIDIRECTIONAL); } else { - dma_unmap_sg(dev, req->src, sg_nents(req->src), - DMA_TO_DEVICE); - dma_unmap_sg(dev, req->dst, sg_nents(req->dst), - DMA_FROM_DEVICE); + dma_unmap_sg(dev, req->src, + sg_nents_for_len(req->src, src_len), + DMA_TO_DEVICE); + dma_unmap_sg(dev, req->dst, + sg_nents_for_len(req->dst, dst_len), + DMA_FROM_DEVICE); } } @@ -4364,22 +4391,32 @@ static int chcr_unregister_alg(void) for (i = 0; i < ARRAY_SIZE(driver_algs); i++) { switch (driver_algs[i].type & CRYPTO_ALG_TYPE_MASK) { case CRYPTO_ALG_TYPE_SKCIPHER: - if (driver_algs[i].is_registered) + if (driver_algs[i].is_registered && refcount_read( + &driver_algs[i].alg.skcipher.base.cra_refcnt) + == 1) { crypto_unregister_skcipher( &driver_algs[i].alg.skcipher); + driver_algs[i].is_registered = 0; + } break; case CRYPTO_ALG_TYPE_AEAD: - if (driver_algs[i].is_registered) + if (driver_algs[i].is_registered && refcount_read( + &driver_algs[i].alg.aead.base.cra_refcnt) == 1) { crypto_unregister_aead( &driver_algs[i].alg.aead); + driver_algs[i].is_registered = 0; + } break; case CRYPTO_ALG_TYPE_AHASH: - if (driver_algs[i].is_registered) + if (driver_algs[i].is_registered && refcount_read( + &driver_algs[i].alg.hash.halg.base.cra_refcnt) + == 1) { crypto_unregister_ahash( &driver_algs[i].alg.hash); + driver_algs[i].is_registered = 0; + } break; } - driver_algs[i].is_registered = 0; } return 0; } diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index a25c65d4af71..004919aea5fb 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -3687,8 +3687,6 @@ static int bond_do_ioctl(struct net_device *bond_dev, struct ifreq *ifr, int cmd case BOND_RELEASE_OLD: case SIOCBONDRELEASE: res = bond_release(bond_dev, slave_dev); - if (!res) - netdev_update_lockdep_key(slave_dev); break; case BOND_SETHWADDR_OLD: case SIOCBONDSETHWADDR: diff --git a/drivers/net/bonding/bond_options.c b/drivers/net/bonding/bond_options.c index 215c10923289..ddb3916d3506 100644 --- a/drivers/net/bonding/bond_options.c +++ b/drivers/net/bonding/bond_options.c @@ -1398,8 +1398,6 @@ static int bond_option_slaves_set(struct bonding *bond, case '-': slave_dbg(bond->dev, dev, "Releasing interface\n"); ret = bond_release(bond->dev, dev); - if (!ret) - netdev_update_lockdep_key(dev); break; default: diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c index 36290a8e2a84..5b9d7c60eebc 100644 --- a/drivers/net/ethernet/cadence/macb_main.c +++ b/drivers/net/ethernet/cadence/macb_main.c @@ -2558,13 +2558,16 @@ static int macb_open(struct net_device *dev) err = macb_phylink_connect(bp); if (err) - goto pm_exit; + goto napi_exit; netif_tx_start_all_queues(dev); if (bp->ptp_info) bp->ptp_info->ptp_init(dev); +napi_exit: + for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) + napi_disable(&queue->napi); pm_exit: if (err) { pm_runtime_put_sync(&bp->pdev->dev); diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c index 8fb48de5d18c..f150cd454fa4 100644 --- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c @@ -2907,8 +2907,9 @@ static int setup_dpni(struct fsl_mc_device *ls_dev) if (err && err != -EOPNOTSUPP) goto close; - priv->cls_rules = devm_kzalloc(dev, sizeof(struct dpaa2_eth_cls_rule) * - dpaa2_eth_fs_count(priv), GFP_KERNEL); + priv->cls_rules = devm_kcalloc(dev, dpaa2_eth_fs_count(priv), + sizeof(struct dpaa2_eth_cls_rule), + GFP_KERNEL); if (!priv->cls_rules) { err = -ENOMEM; goto close; diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c index 197dc5b2c090..1b4d04e4474b 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.c +++ b/drivers/net/ethernet/ibm/ibmvnic.c @@ -5184,6 +5184,9 @@ static int ibmvnic_remove(struct vio_dev *dev) adapter->state = VNIC_REMOVING; spin_unlock_irqrestore(&adapter->state_lock, flags); + flush_work(&adapter->ibmvnic_reset); + flush_delayed_work(&adapter->ibmvnic_delayed_reset); + rtnl_lock(); unregister_netdevice(netdev); diff --git a/drivers/net/ethernet/intel/iavf/iavf.h b/drivers/net/ethernet/intel/iavf/iavf.h index bcd11b4b29df..10b805ba03ee 100644 --- a/drivers/net/ethernet/intel/iavf/iavf.h +++ b/drivers/net/ethernet/intel/iavf/iavf.h @@ -87,6 +87,10 @@ struct iavf_vsi { #define IAVF_HLUT_ARRAY_SIZE ((IAVF_VFQF_HLUT_MAX_INDEX + 1) * 4) #define IAVF_MBPS_DIVISOR 125000 /* divisor to convert to Mbps */ +#define IAVF_VIRTCHNL_VF_RESOURCE_SIZE (sizeof(struct virtchnl_vf_resource) + \ + (IAVF_MAX_VF_VSI * \ + sizeof(struct virtchnl_vsi_resource))) + /* MAX_MSIX_Q_VECTORS of these are allocated, * but we only use one per queue-specific vector. */ @@ -215,6 +219,10 @@ struct iavf_cloud_filter { bool add; /* filter needs to be added */ }; +#define IAVF_RESET_WAIT_MS 10 +#define IAVF_RESET_WAIT_DETECTED_COUNT 500 +#define IAVF_RESET_WAIT_COMPLETE_COUNT 2000 + /* board specific private data structure */ struct iavf_adapter { struct work_struct reset_task; @@ -306,6 +314,14 @@ struct iavf_adapter { bool netdev_registered; bool link_up; enum virtchnl_link_speed link_speed; + /* This is only populated if the VIRTCHNL_VF_CAP_ADV_LINK_SPEED is set + * in vf_res->vf_cap_flags. Use ADV_LINK_SUPPORT macro to determine if + * this field is valid. This field should be used going forward and the + * enum virtchnl_link_speed above should be considered the legacy way of + * storing/communicating link speeds. + */ + u32 link_speed_mbps; + enum virtchnl_ops current_op; #define CLIENT_ALLOWED(_a) ((_a)->vf_res ? \ (_a)->vf_res->vf_cap_flags & \ @@ -322,6 +338,8 @@ struct iavf_adapter { VIRTCHNL_VF_OFFLOAD_RSS_PF))) #define VLAN_ALLOWED(_a) ((_a)->vf_res->vf_cap_flags & \ VIRTCHNL_VF_OFFLOAD_VLAN) +#define ADV_LINK_SUPPORT(_a) ((_a)->vf_res->vf_cap_flags & \ + VIRTCHNL_VF_CAP_ADV_LINK_SPEED) struct virtchnl_vf_resource *vf_res; /* incl. all VSIs */ struct virtchnl_vsi_resource *vsi_res; /* our LAN VSI */ struct virtchnl_version_info pf_version; diff --git a/drivers/net/ethernet/intel/iavf/iavf_ethtool.c b/drivers/net/ethernet/intel/iavf/iavf_ethtool.c index 2c39d46b6138..181573822942 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_ethtool.c +++ b/drivers/net/ethernet/intel/iavf/iavf_ethtool.c @@ -278,35 +278,46 @@ static int iavf_get_link_ksettings(struct net_device *netdev, ethtool_link_ksettings_zero_link_mode(cmd, supported); cmd->base.autoneg = AUTONEG_DISABLE; cmd->base.port = PORT_NONE; - /* Set speed and duplex */ + cmd->base.duplex = DUPLEX_FULL; + + if (ADV_LINK_SUPPORT(adapter)) { + if (adapter->link_speed_mbps && + adapter->link_speed_mbps < U32_MAX) + cmd->base.speed = adapter->link_speed_mbps; + else + cmd->base.speed = SPEED_UNKNOWN; + + return 0; + } + switch (adapter->link_speed) { - case IAVF_LINK_SPEED_40GB: + case VIRTCHNL_LINK_SPEED_40GB: cmd->base.speed = SPEED_40000; break; - case IAVF_LINK_SPEED_25GB: -#ifdef SPEED_25000 + case VIRTCHNL_LINK_SPEED_25GB: cmd->base.speed = SPEED_25000; -#else - netdev_info(netdev, - "Speed is 25G, display not supported by this version of ethtool.\n"); -#endif break; - case IAVF_LINK_SPEED_20GB: + case VIRTCHNL_LINK_SPEED_20GB: cmd->base.speed = SPEED_20000; break; - case IAVF_LINK_SPEED_10GB: + case VIRTCHNL_LINK_SPEED_10GB: cmd->base.speed = SPEED_10000; break; - case IAVF_LINK_SPEED_1GB: + case VIRTCHNL_LINK_SPEED_5GB: + cmd->base.speed = SPEED_5000; + break; + case VIRTCHNL_LINK_SPEED_2_5GB: + cmd->base.speed = SPEED_2500; + break; + case VIRTCHNL_LINK_SPEED_1GB: cmd->base.speed = SPEED_1000; break; - case IAVF_LINK_SPEED_100MB: + case VIRTCHNL_LINK_SPEED_100MB: cmd->base.speed = SPEED_100; break; default: break; } - cmd->base.duplex = DUPLEX_FULL; return 0; } diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c index 2050649848ba..fa82768e5eda 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_main.c +++ b/drivers/net/ethernet/intel/iavf/iavf_main.c @@ -1756,17 +1756,17 @@ static int iavf_init_get_resources(struct iavf_adapter *adapter) struct net_device *netdev = adapter->netdev; struct pci_dev *pdev = adapter->pdev; struct iavf_hw *hw = &adapter->hw; - int err = 0, bufsz; + int err; WARN_ON(adapter->state != __IAVF_INIT_GET_RESOURCES); /* aq msg sent, awaiting reply */ if (!adapter->vf_res) { - bufsz = sizeof(struct virtchnl_vf_resource) + - (IAVF_MAX_VF_VSI * - sizeof(struct virtchnl_vsi_resource)); - adapter->vf_res = kzalloc(bufsz, GFP_KERNEL); - if (!adapter->vf_res) + adapter->vf_res = kzalloc(IAVF_VIRTCHNL_VF_RESOURCE_SIZE, + GFP_KERNEL); + if (!adapter->vf_res) { + err = -ENOMEM; goto err; + } } err = iavf_get_vf_config(adapter); if (err == IAVF_ERR_ADMIN_QUEUE_NO_WORK) { @@ -2036,7 +2036,7 @@ static void iavf_disable_vf(struct iavf_adapter *adapter) iavf_reset_interrupt_capability(adapter); iavf_free_queues(adapter); iavf_free_q_vectors(adapter); - kfree(adapter->vf_res); + memset(adapter->vf_res, 0, IAVF_VIRTCHNL_VF_RESOURCE_SIZE); iavf_shutdown_adminq(&adapter->hw); adapter->netdev->flags &= ~IFF_UP; clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section); @@ -2046,8 +2046,6 @@ static void iavf_disable_vf(struct iavf_adapter *adapter) dev_info(&adapter->pdev->dev, "Reset task did not complete, VF disabled\n"); } -#define IAVF_RESET_WAIT_MS 10 -#define IAVF_RESET_WAIT_COUNT 500 /** * iavf_reset_task - Call-back task to handle hardware reset * @work: pointer to work_struct @@ -2101,20 +2099,20 @@ static void iavf_reset_task(struct work_struct *work) adapter->flags |= IAVF_FLAG_RESET_PENDING; /* poll until we see the reset actually happen */ - for (i = 0; i < IAVF_RESET_WAIT_COUNT; i++) { + for (i = 0; i < IAVF_RESET_WAIT_DETECTED_COUNT; i++) { reg_val = rd32(hw, IAVF_VF_ARQLEN1) & IAVF_VF_ARQLEN1_ARQENABLE_MASK; if (!reg_val) break; usleep_range(5000, 10000); } - if (i == IAVF_RESET_WAIT_COUNT) { + if (i == IAVF_RESET_WAIT_DETECTED_COUNT) { dev_info(&adapter->pdev->dev, "Never saw reset\n"); goto continue_reset; /* act like the reset happened */ } /* wait until the reset is complete and the PF is responding to us */ - for (i = 0; i < IAVF_RESET_WAIT_COUNT; i++) { + for (i = 0; i < IAVF_RESET_WAIT_COMPLETE_COUNT; i++) { /* sleep first to make sure a minimum wait time is met */ msleep(IAVF_RESET_WAIT_MS); @@ -2126,7 +2124,7 @@ static void iavf_reset_task(struct work_struct *work) pci_set_master(adapter->pdev); - if (i == IAVF_RESET_WAIT_COUNT) { + if (i == IAVF_RESET_WAIT_COMPLETE_COUNT) { dev_err(&adapter->pdev->dev, "Reset never finished (%x)\n", reg_val); iavf_disable_vf(adapter); @@ -2487,29 +2485,46 @@ static int iavf_validate_tx_bandwidth(struct iavf_adapter *adapter, { int speed = 0, ret = 0; + if (ADV_LINK_SUPPORT(adapter)) { + if (adapter->link_speed_mbps < U32_MAX) { + speed = adapter->link_speed_mbps; + goto validate_bw; + } else { + dev_err(&adapter->pdev->dev, "Unknown link speed\n"); + return -EINVAL; + } + } + switch (adapter->link_speed) { - case IAVF_LINK_SPEED_40GB: - speed = 40000; + case VIRTCHNL_LINK_SPEED_40GB: + speed = SPEED_40000; + break; + case VIRTCHNL_LINK_SPEED_25GB: + speed = SPEED_25000; + break; + case VIRTCHNL_LINK_SPEED_20GB: + speed = SPEED_20000; break; - case IAVF_LINK_SPEED_25GB: - speed = 25000; + case VIRTCHNL_LINK_SPEED_10GB: + speed = SPEED_10000; break; - case IAVF_LINK_SPEED_20GB: - speed = 20000; + case VIRTCHNL_LINK_SPEED_5GB: + speed = SPEED_5000; break; - case IAVF_LINK_SPEED_10GB: - speed = 10000; + case VIRTCHNL_LINK_SPEED_2_5GB: + speed = SPEED_2500; break; - case IAVF_LINK_SPEED_1GB: - speed = 1000; + case VIRTCHNL_LINK_SPEED_1GB: + speed = SPEED_1000; break; - case IAVF_LINK_SPEED_100MB: - speed = 100; + case VIRTCHNL_LINK_SPEED_100MB: + speed = SPEED_100; break; default: break; } +validate_bw: if (max_tx_rate > speed) { dev_err(&adapter->pdev->dev, "Invalid tx rate specified\n"); @@ -3412,7 +3427,7 @@ static int iavf_check_reset_complete(struct iavf_hw *hw) u32 rstat; int i; - for (i = 0; i < 100; i++) { + for (i = 0; i < IAVF_RESET_WAIT_COMPLETE_COUNT; i++) { rstat = rd32(hw, IAVF_VFGEN_RSTAT) & IAVF_VFGEN_RSTAT_VFR_STATE_MASK; if ((rstat == VIRTCHNL_VFR_VFACTIVE) || diff --git a/drivers/net/ethernet/intel/iavf/iavf_txrx.c b/drivers/net/ethernet/intel/iavf/iavf_txrx.c index 7a30d5d5ef53..e091bab7e770 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_txrx.c +++ b/drivers/net/ethernet/intel/iavf/iavf_txrx.c @@ -379,19 +379,19 @@ static inline unsigned int iavf_itr_divisor(struct iavf_q_vector *q_vector) unsigned int divisor; switch (q_vector->adapter->link_speed) { - case IAVF_LINK_SPEED_40GB: + case VIRTCHNL_LINK_SPEED_40GB: divisor = IAVF_ITR_ADAPTIVE_MIN_INC * 1024; break; - case IAVF_LINK_SPEED_25GB: - case IAVF_LINK_SPEED_20GB: + case VIRTCHNL_LINK_SPEED_25GB: + case VIRTCHNL_LINK_SPEED_20GB: divisor = IAVF_ITR_ADAPTIVE_MIN_INC * 512; break; default: - case IAVF_LINK_SPEED_10GB: + case VIRTCHNL_LINK_SPEED_10GB: divisor = IAVF_ITR_ADAPTIVE_MIN_INC * 256; break; - case IAVF_LINK_SPEED_1GB: - case IAVF_LINK_SPEED_100MB: + case VIRTCHNL_LINK_SPEED_1GB: + case VIRTCHNL_LINK_SPEED_100MB: divisor = IAVF_ITR_ADAPTIVE_MIN_INC * 32; break; } diff --git a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c index d58374c2c33d..ed08ace4f05a 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c +++ b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c @@ -139,7 +139,8 @@ int iavf_send_vf_config_msg(struct iavf_adapter *adapter) VIRTCHNL_VF_OFFLOAD_ENCAP | VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM | VIRTCHNL_VF_OFFLOAD_REQ_QUEUES | - VIRTCHNL_VF_OFFLOAD_ADQ; + VIRTCHNL_VF_OFFLOAD_ADQ | + VIRTCHNL_VF_CAP_ADV_LINK_SPEED; adapter->current_op = VIRTCHNL_OP_GET_VF_RESOURCES; adapter->aq_required &= ~IAVF_FLAG_AQ_GET_CONFIG; @@ -891,6 +892,8 @@ void iavf_disable_vlan_stripping(struct iavf_adapter *adapter) iavf_send_pf_msg(adapter, VIRTCHNL_OP_DISABLE_VLAN_STRIPPING, NULL, 0); } +#define IAVF_MAX_SPEED_STRLEN 13 + /** * iavf_print_link_message - print link up or down * @adapter: adapter structure @@ -900,37 +903,105 @@ void iavf_disable_vlan_stripping(struct iavf_adapter *adapter) static void iavf_print_link_message(struct iavf_adapter *adapter) { struct net_device *netdev = adapter->netdev; - char *speed = "Unknown "; + int link_speed_mbps; + char *speed; if (!adapter->link_up) { netdev_info(netdev, "NIC Link is Down\n"); return; } + speed = kcalloc(1, IAVF_MAX_SPEED_STRLEN, GFP_KERNEL); + if (!speed) + return; + + if (ADV_LINK_SUPPORT(adapter)) { + link_speed_mbps = adapter->link_speed_mbps; + goto print_link_msg; + } + switch (adapter->link_speed) { - case IAVF_LINK_SPEED_40GB: - speed = "40 G"; + case VIRTCHNL_LINK_SPEED_40GB: + link_speed_mbps = SPEED_40000; + break; + case VIRTCHNL_LINK_SPEED_25GB: + link_speed_mbps = SPEED_25000; + break; + case VIRTCHNL_LINK_SPEED_20GB: + link_speed_mbps = SPEED_20000; break; - case IAVF_LINK_SPEED_25GB: - speed = "25 G"; + case VIRTCHNL_LINK_SPEED_10GB: + link_speed_mbps = SPEED_10000; break; - case IAVF_LINK_SPEED_20GB: - speed = "20 G"; + case VIRTCHNL_LINK_SPEED_5GB: + link_speed_mbps = SPEED_5000; break; - case IAVF_LINK_SPEED_10GB: - speed = "10 G"; + case VIRTCHNL_LINK_SPEED_2_5GB: + link_speed_mbps = SPEED_2500; break; - case IAVF_LINK_SPEED_1GB: - speed = "1000 M"; + case VIRTCHNL_LINK_SPEED_1GB: + link_speed_mbps = SPEED_1000; break; - case IAVF_LINK_SPEED_100MB: - speed = "100 M"; + case VIRTCHNL_LINK_SPEED_100MB: + link_speed_mbps = SPEED_100; break; default: + link_speed_mbps = SPEED_UNKNOWN; break; } - netdev_info(netdev, "NIC Link is Up %sbps Full Duplex\n", speed); +print_link_msg: + if (link_speed_mbps > SPEED_1000) { + if (link_speed_mbps == SPEED_2500) + snprintf(speed, IAVF_MAX_SPEED_STRLEN, "2.5 Gbps"); + else + /* convert to Gbps inline */ + snprintf(speed, IAVF_MAX_SPEED_STRLEN, "%d %s", + link_speed_mbps / 1000, "Gbps"); + } else if (link_speed_mbps == SPEED_UNKNOWN) { + snprintf(speed, IAVF_MAX_SPEED_STRLEN, "%s", "Unknown Mbps"); + } else { + snprintf(speed, IAVF_MAX_SPEED_STRLEN, "%u %s", + link_speed_mbps, "Mbps"); + } + + netdev_info(netdev, "NIC Link is Up Speed is %s Full Duplex\n", speed); + kfree(speed); +} + +/** + * iavf_get_vpe_link_status + * @adapter: adapter structure + * @vpe: virtchnl_pf_event structure + * + * Helper function for determining the link status + **/ +static bool +iavf_get_vpe_link_status(struct iavf_adapter *adapter, + struct virtchnl_pf_event *vpe) +{ + if (ADV_LINK_SUPPORT(adapter)) + return vpe->event_data.link_event_adv.link_status; + else + return vpe->event_data.link_event.link_status; +} + +/** + * iavf_set_adapter_link_speed_from_vpe + * @adapter: adapter structure for which we are setting the link speed + * @vpe: virtchnl_pf_event structure that contains the link speed we are setting + * + * Helper function for setting iavf_adapter link speed + **/ +static void +iavf_set_adapter_link_speed_from_vpe(struct iavf_adapter *adapter, + struct virtchnl_pf_event *vpe) +{ + if (ADV_LINK_SUPPORT(adapter)) + adapter->link_speed_mbps = + vpe->event_data.link_event_adv.link_speed; + else + adapter->link_speed = vpe->event_data.link_event.link_speed; } /** @@ -1160,12 +1231,11 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter, if (v_opcode == VIRTCHNL_OP_EVENT) { struct virtchnl_pf_event *vpe = (struct virtchnl_pf_event *)msg; - bool link_up = vpe->event_data.link_event.link_status; + bool link_up = iavf_get_vpe_link_status(adapter, vpe); switch (vpe->event) { case VIRTCHNL_EVENT_LINK_CHANGE: - adapter->link_speed = - vpe->event_data.link_event.link_speed; + iavf_set_adapter_link_speed_from_vpe(adapter, vpe); /* we've already got the right link status, bail */ if (adapter->link_up == link_up) diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c index 4cc9abd61c43..946925bbcb2d 100644 --- a/drivers/net/ethernet/marvell/mvneta.c +++ b/drivers/net/ethernet/marvell/mvneta.c @@ -452,11 +452,17 @@ struct mvneta_pcpu_port { u32 cause_rx_tx; }; +enum { + __MVNETA_DOWN, +}; + struct mvneta_port { u8 id; struct mvneta_pcpu_port __percpu *ports; struct mvneta_pcpu_stats __percpu *stats; + unsigned long state; + int pkt_size; void __iomem *base; struct mvneta_rx_queue *rxqs; @@ -2113,6 +2119,9 @@ mvneta_xdp_xmit(struct net_device *dev, int num_frame, struct netdev_queue *nq; u32 ret; + if (unlikely(test_bit(__MVNETA_DOWN, &pp->state))) + return -ENETDOWN; + if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) return -EINVAL; @@ -3568,12 +3577,16 @@ static void mvneta_start_dev(struct mvneta_port *pp) phylink_start(pp->phylink); netif_tx_start_all_queues(pp->dev); + + clear_bit(__MVNETA_DOWN, &pp->state); } static void mvneta_stop_dev(struct mvneta_port *pp) { unsigned int cpu; + set_bit(__MVNETA_DOWN, &pp->state); + phylink_stop(pp->phylink); if (!pp->neta_armada3700) { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c index e94f0c4d74a7..a99fe4b02b9b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c @@ -283,7 +283,6 @@ int mlx5_devlink_register(struct devlink *devlink, struct device *dev) goto params_reg_err; mlx5_devlink_set_params_init_values(devlink); devlink_params_publish(devlink); - devlink_reload_enable(devlink); return 0; params_reg_err: @@ -293,7 +292,6 @@ params_reg_err: void mlx5_devlink_unregister(struct devlink *devlink) { - devlink_reload_disable(devlink); devlink_params_unregister(devlink, mlx5_devlink_params, ARRAY_SIZE(mlx5_devlink_params)); devlink_unregister(devlink); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c index afc19dca1f5f..430025550fad 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c @@ -328,21 +328,21 @@ mlx5_tc_ct_parse_mangle_to_mod_act(struct flow_action_entry *act, case FLOW_ACT_MANGLE_HDR_TYPE_IP6: MLX5_SET(set_action_in, modact, length, 0); - if (offset == offsetof(struct ipv6hdr, saddr)) + if (offset == offsetof(struct ipv6hdr, saddr) + 12) field = MLX5_ACTION_IN_FIELD_OUT_SIPV6_31_0; - else if (offset == offsetof(struct ipv6hdr, saddr) + 4) - field = MLX5_ACTION_IN_FIELD_OUT_SIPV6_63_32; else if (offset == offsetof(struct ipv6hdr, saddr) + 8) + field = MLX5_ACTION_IN_FIELD_OUT_SIPV6_63_32; + else if (offset == offsetof(struct ipv6hdr, saddr) + 4) field = MLX5_ACTION_IN_FIELD_OUT_SIPV6_95_64; - else if (offset == offsetof(struct ipv6hdr, saddr) + 12) + else if (offset == offsetof(struct ipv6hdr, saddr)) field = MLX5_ACTION_IN_FIELD_OUT_SIPV6_127_96; - else if (offset == offsetof(struct ipv6hdr, daddr)) + else if (offset == offsetof(struct ipv6hdr, daddr) + 12) field = MLX5_ACTION_IN_FIELD_OUT_DIPV6_31_0; - else if (offset == offsetof(struct ipv6hdr, daddr) + 4) - field = MLX5_ACTION_IN_FIELD_OUT_DIPV6_63_32; else if (offset == offsetof(struct ipv6hdr, daddr) + 8) + field = MLX5_ACTION_IN_FIELD_OUT_DIPV6_63_32; + else if (offset == offsetof(struct ipv6hdr, daddr) + 4) field = MLX5_ACTION_IN_FIELD_OUT_DIPV6_95_64; - else if (offset == offsetof(struct ipv6hdr, daddr) + 12) + else if (offset == offsetof(struct ipv6hdr, daddr)) field = MLX5_ACTION_IN_FIELD_OUT_DIPV6_127_96; else return -EOPNOTSUPP; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c index c28cbae42331..2c80205dc939 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c @@ -152,6 +152,10 @@ void mlx5e_close_xsk(struct mlx5e_channel *c) mlx5e_close_cq(&c->xskicosq.cq); mlx5e_close_xdpsq(&c->xsksq); mlx5e_close_cq(&c->xsksq.cq); + + memset(&c->xskrq, 0, sizeof(c->xskrq)); + memset(&c->xsksq, 0, sizeof(c->xsksq)); + memset(&c->xskicosq, 0, sizeof(c->xskicosq)); } void mlx5e_activate_xsk(struct mlx5e_channel *c) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c index 3ef2525e8de9..ec5658bbe3c5 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c @@ -1173,7 +1173,8 @@ int mlx5e_set_rxfh(struct net_device *dev, const u32 *indir, struct mlx5e_priv *priv = netdev_priv(dev); struct mlx5e_rss_params *rss = &priv->rss_params; int inlen = MLX5_ST_SZ_BYTES(modify_tir_in); - bool hash_changed = false; + bool refresh_tirs = false; + bool refresh_rqt = false; void *in; if ((hfunc != ETH_RSS_HASH_NO_CHANGE) && @@ -1189,36 +1190,38 @@ int mlx5e_set_rxfh(struct net_device *dev, const u32 *indir, if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != rss->hfunc) { rss->hfunc = hfunc; - hash_changed = true; + refresh_rqt = true; + refresh_tirs = true; } if (indir) { memcpy(rss->indirection_rqt, indir, sizeof(rss->indirection_rqt)); - - if (test_bit(MLX5E_STATE_OPENED, &priv->state)) { - u32 rqtn = priv->indir_rqt.rqtn; - struct mlx5e_redirect_rqt_param rrp = { - .is_rss = true, - { - .rss = { - .hfunc = rss->hfunc, - .channels = &priv->channels, - }, - }, - }; - - mlx5e_redirect_rqt(priv, rqtn, MLX5E_INDIR_RQT_SIZE, rrp); - } + refresh_rqt = true; } if (key) { memcpy(rss->toeplitz_hash_key, key, sizeof(rss->toeplitz_hash_key)); - hash_changed = hash_changed || rss->hfunc == ETH_RSS_HASH_TOP; + refresh_tirs = refresh_tirs || rss->hfunc == ETH_RSS_HASH_TOP; + } + + if (refresh_rqt && test_bit(MLX5E_STATE_OPENED, &priv->state)) { + struct mlx5e_redirect_rqt_param rrp = { + .is_rss = true, + { + .rss = { + .hfunc = rss->hfunc, + .channels = &priv->channels, + }, + }, + }; + u32 rqtn = priv->indir_rqt.rqtn; + + mlx5e_redirect_rqt(priv, rqtn, MLX5E_INDIR_RQT_SIZE, rrp); } - if (hash_changed) + if (refresh_tirs) mlx5e_modify_tirs_hash(priv, in); mutex_unlock(&priv->state_lock); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_lgcy.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_lgcy.c index 9bda4fe2eafa..5dc335e621c5 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_lgcy.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_lgcy.c @@ -162,10 +162,12 @@ int esw_acl_ingress_lgcy_setup(struct mlx5_eswitch *esw, if (MLX5_CAP_ESW_INGRESS_ACL(esw->dev, flow_counter)) { counter = mlx5_fc_create(esw->dev, false); - if (IS_ERR(counter)) + if (IS_ERR(counter)) { esw_warn(esw->dev, "vport[%d] configure ingress drop rule counter failed\n", vport->vport); + counter = NULL; + } vport->ingress.legacy.drop_counter = counter; } @@ -272,7 +274,7 @@ void esw_acl_ingress_lgcy_cleanup(struct mlx5_eswitch *esw, esw_acl_ingress_table_destroy(vport); clean_drop_counter: - if (!IS_ERR_OR_NULL(vport->ingress.legacy.drop_counter)) { + if (vport->ingress.legacy.drop_counter) { mlx5_fc_destroy(esw->dev, vport->ingress.legacy.drop_counter); vport->ingress.legacy.drop_counter = NULL; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/health.c b/drivers/net/ethernet/mellanox/mlx5/core/health.c index c0cfbab15fe9..b31f769d2df9 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/health.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/health.c @@ -192,15 +192,23 @@ static bool reset_fw_if_needed(struct mlx5_core_dev *dev) void mlx5_enter_error_state(struct mlx5_core_dev *dev, bool force) { + bool err_detected = false; + + /* Mark the device as fatal in order to abort FW commands */ + if ((check_fatal_sensors(dev) || force) && + dev->state == MLX5_DEVICE_STATE_UP) { + dev->state = MLX5_DEVICE_STATE_INTERNAL_ERROR; + err_detected = true; + } mutex_lock(&dev->intf_state_mutex); - if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) - goto unlock; + if (!err_detected && dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) + goto unlock;/* a previous error is still being handled */ if (dev->state == MLX5_DEVICE_STATE_UNINITIALIZED) { dev->state = MLX5_DEVICE_STATE_INTERNAL_ERROR; goto unlock; } - if (check_fatal_sensors(dev) || force) { + if (check_fatal_sensors(dev) || force) { /* protected state setting */ dev->state = MLX5_DEVICE_STATE_INTERNAL_ERROR; mlx5_cmd_flush(dev); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c index df46b1fce3a7..8b658908f044 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c @@ -785,6 +785,11 @@ err_disable: static void mlx5_pci_close(struct mlx5_core_dev *dev) { + /* health work might still be active, and it needs pci bar in + * order to know the NIC state. Therefore, drain the health WQ + * before removing the pci bars + */ + mlx5_drain_health_wq(dev); iounmap(dev->iseg); pci_clear_master(dev->pdev); release_bar(dev->pdev); @@ -1194,23 +1199,22 @@ int mlx5_load_one(struct mlx5_core_dev *dev, bool boot) if (err) goto err_load; + set_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state); + if (boot) { err = mlx5_devlink_register(priv_to_devlink(dev), dev->device); if (err) goto err_devlink_reg; - } - - if (mlx5_device_registered(dev)) - mlx5_attach_device(dev); - else mlx5_register_device(dev); - - set_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state); + } else { + mlx5_attach_device(dev); + } mutex_unlock(&dev->intf_state_mutex); return 0; err_devlink_reg: + clear_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state); mlx5_unload(dev); err_load: if (boot) @@ -1226,10 +1230,15 @@ out: void mlx5_unload_one(struct mlx5_core_dev *dev, bool cleanup) { - if (cleanup) + mutex_lock(&dev->intf_state_mutex); + + if (cleanup) { mlx5_unregister_device(dev); + mlx5_devlink_unregister(priv_to_devlink(dev)); + } else { + mlx5_detach_device(dev); + } - mutex_lock(&dev->intf_state_mutex); if (!test_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state)) { mlx5_core_warn(dev, "%s: interface is down, NOP\n", __func__); @@ -1240,9 +1249,6 @@ void mlx5_unload_one(struct mlx5_core_dev *dev, bool cleanup) clear_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state); - if (mlx5_device_registered(dev)) - mlx5_detach_device(dev); - mlx5_unload(dev); if (cleanup) @@ -1275,11 +1281,6 @@ static int mlx5_mdev_init(struct mlx5_core_dev *dev, int profile_idx) priv->dbg_root = debugfs_create_dir(dev_name(dev->device), mlx5_debugfs_root); - if (!priv->dbg_root) { - dev_err(dev->device, "mlx5_core: error, Cannot create debugfs dir, aborting\n"); - goto err_dbg_root; - } - err = mlx5_health_init(dev); if (err) goto err_health_init; @@ -1294,7 +1295,6 @@ err_pagealloc_init: mlx5_health_cleanup(dev); err_health_init: debugfs_remove(dev->priv.dbg_root); -err_dbg_root: mutex_destroy(&priv->pgdir_mutex); mutex_destroy(&priv->alloc_mutex); mutex_destroy(&priv->bfregs.wc_head.lock); @@ -1362,6 +1362,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *id) dev_err(&pdev->dev, "mlx5_crdump_enable failed with error code %d\n", err); pci_save_state(pdev); + devlink_reload_enable(devlink); return 0; err_load_one: @@ -1379,9 +1380,8 @@ static void remove_one(struct pci_dev *pdev) struct mlx5_core_dev *dev = pci_get_drvdata(pdev); struct devlink *devlink = priv_to_devlink(dev); + devlink_reload_disable(devlink); mlx5_crdump_disable(dev); - mlx5_devlink_unregister(devlink); - mlx5_drain_health_wq(dev); mlx5_unload_one(dev, true); mlx5_pci_close(dev); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c index f421013b0b54..2ca79b9bde1f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c @@ -179,7 +179,7 @@ static struct mlx5dr_qp *dr_create_rc_qp(struct mlx5_core_dev *mdev, MLX5_SET(create_qp_in, in, opcode, MLX5_CMD_OP_CREATE_QP); err = mlx5_cmd_exec(mdev, in, inlen, out, sizeof(out)); dr_qp->qpn = MLX5_GET(create_qp_out, out, qpn); - kfree(in); + kvfree(in); if (err) goto err_in; dr_qp->uar = attr->uar; diff --git a/drivers/net/ethernet/pensando/ionic/ionic.h b/drivers/net/ethernet/pensando/ionic/ionic.h index 23ccc0da2341..f5a910c458ba 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic.h +++ b/drivers/net/ethernet/pensando/ionic/ionic.h @@ -17,7 +17,6 @@ struct ionic_lif; #define PCI_DEVICE_ID_PENSANDO_IONIC_ETH_PF 0x1002 #define PCI_DEVICE_ID_PENSANDO_IONIC_ETH_VF 0x1003 -#define PCI_DEVICE_ID_PENSANDO_IONIC_ETH_MGMT 0x1004 #define DEVCMD_TIMEOUT 10 @@ -42,7 +41,6 @@ struct ionic { struct dentry *dentry; struct ionic_dev_bar bars[IONIC_BARS_MAX]; unsigned int num_bars; - bool is_mgmt_nic; struct ionic_identity ident; struct list_head lifs; struct ionic_lif *master_lif; diff --git a/drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c b/drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c index 60fc191a35e5..2924cde440aa 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c +++ b/drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c @@ -15,7 +15,6 @@ static const struct pci_device_id ionic_id_table[] = { { PCI_VDEVICE(PENSANDO, PCI_DEVICE_ID_PENSANDO_IONIC_ETH_PF) }, { PCI_VDEVICE(PENSANDO, PCI_DEVICE_ID_PENSANDO_IONIC_ETH_VF) }, - { PCI_VDEVICE(PENSANDO, PCI_DEVICE_ID_PENSANDO_IONIC_ETH_MGMT) }, { 0, } /* end of table */ }; MODULE_DEVICE_TABLE(pci, ionic_id_table); @@ -225,9 +224,6 @@ static int ionic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) pci_set_drvdata(pdev, ionic); mutex_init(&ionic->dev_cmd_lock); - ionic->is_mgmt_nic = - ent->device == PCI_DEVICE_ID_PENSANDO_IONIC_ETH_MGMT; - /* Query system for DMA addressing limitation for the device. */ err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(IONIC_ADDR_LEN)); if (err) { @@ -252,8 +248,7 @@ static int ionic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) } pci_set_master(pdev); - if (!ionic->is_mgmt_nic) - pcie_print_link_status(pdev); + pcie_print_link_status(pdev); err = ionic_map_bars(ionic); if (err) diff --git a/drivers/net/ethernet/pensando/ionic/ionic_devlink.c b/drivers/net/ethernet/pensando/ionic/ionic_devlink.c index 273c889faaad..2d590e571133 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_devlink.c +++ b/drivers/net/ethernet/pensando/ionic/ionic_devlink.c @@ -77,10 +77,6 @@ int ionic_devlink_register(struct ionic *ionic) return err; } - /* don't register the mgmt_nic as a port */ - if (ionic->is_mgmt_nic) - return 0; - devlink_port_attrs_set(&ionic->dl_port, DEVLINK_PORT_FLAVOUR_PHYSICAL, 0, false, 0, NULL, 0); err = devlink_port_register(dl, &ionic->dl_port, 0); diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.c b/drivers/net/ethernet/pensando/ionic/ionic_lif.c index 7321a92f8395..9d8c969f21cb 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_lif.c +++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.c @@ -99,9 +99,6 @@ static void ionic_link_status_check(struct ionic_lif *lif) if (!test_bit(IONIC_LIF_F_LINK_CHECK_REQUESTED, lif->state)) return; - if (lif->ionic->is_mgmt_nic) - return; - link_status = le16_to_cpu(lif->info->status.link_status); link_up = link_status == IONIC_PORT_OPER_STATUS_UP; @@ -116,7 +113,7 @@ static void ionic_link_status_check(struct ionic_lif *lif) netif_carrier_on(netdev); } - if (netif_running(lif->netdev)) + if (lif->netdev->flags & IFF_UP && netif_running(lif->netdev)) ionic_start_queues(lif); } else { if (netif_carrier_ok(netdev)) { @@ -124,7 +121,7 @@ static void ionic_link_status_check(struct ionic_lif *lif) netif_carrier_off(netdev); } - if (netif_running(lif->netdev)) + if (lif->netdev->flags & IFF_UP && netif_running(lif->netdev)) ionic_stop_queues(lif); } @@ -1193,10 +1190,6 @@ static int ionic_init_nic_features(struct ionic_lif *lif) netdev_features_t features; int err; - /* no netdev features on the management device */ - if (lif->ionic->is_mgmt_nic) - return 0; - /* set up what we expect to support by default */ features = NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX | @@ -2594,12 +2587,6 @@ int ionic_lifs_register(struct ionic *ionic) { int err; - /* the netdev is not registered on the management device, it is - * only used as a vehicle for napi operations on the adminq - */ - if (ionic->is_mgmt_nic) - return 0; - INIT_WORK(&ionic->nb_work, ionic_lif_notify_work); ionic->nb.notifier_call = ionic_lif_notify; diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.c b/drivers/net/ethernet/ti/am65-cpsw-nuss.c index 87a4775ed53a..1492648247d9 100644 --- a/drivers/net/ethernet/ti/am65-cpsw-nuss.c +++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.c @@ -1981,7 +1981,7 @@ MODULE_DEVICE_TABLE(of, am65_cpsw_nuss_of_mtable); static int am65_cpsw_nuss_probe(struct platform_device *pdev) { - struct cpsw_ale_params ale_params; + struct cpsw_ale_params ale_params = { 0 }; const struct of_device_id *of_id; struct device *dev = &pdev->dev; struct am65_cpsw_common *common; diff --git a/drivers/net/ethernet/ti/cpsw_ale.c b/drivers/net/ethernet/ti/cpsw_ale.c index 8dc6be11b2ff..9ad872bfae3a 100644 --- a/drivers/net/ethernet/ti/cpsw_ale.c +++ b/drivers/net/ethernet/ti/cpsw_ale.c @@ -604,10 +604,44 @@ void cpsw_ale_set_unreg_mcast(struct cpsw_ale *ale, int unreg_mcast_mask, } } +static void cpsw_ale_vlan_set_unreg_mcast(struct cpsw_ale *ale, u32 *ale_entry, + int allmulti) +{ + int unreg_mcast; + + unreg_mcast = + cpsw_ale_get_vlan_unreg_mcast(ale_entry, + ale->vlan_field_bits); + if (allmulti) + unreg_mcast |= ALE_PORT_HOST; + else + unreg_mcast &= ~ALE_PORT_HOST; + cpsw_ale_set_vlan_unreg_mcast(ale_entry, unreg_mcast, + ale->vlan_field_bits); +} + +static void +cpsw_ale_vlan_set_unreg_mcast_idx(struct cpsw_ale *ale, u32 *ale_entry, + int allmulti) +{ + int unreg_mcast; + int idx; + + idx = cpsw_ale_get_vlan_unreg_mcast_idx(ale_entry); + + unreg_mcast = readl(ale->params.ale_regs + ALE_VLAN_MASK_MUX(idx)); + + if (allmulti) + unreg_mcast |= ALE_PORT_HOST; + else + unreg_mcast &= ~ALE_PORT_HOST; + + writel(unreg_mcast, ale->params.ale_regs + ALE_VLAN_MASK_MUX(idx)); +} + void cpsw_ale_set_allmulti(struct cpsw_ale *ale, int allmulti, int port) { u32 ale_entry[ALE_ENTRY_WORDS]; - int unreg_mcast = 0; int type, idx; for (idx = 0; idx < ale->params.ale_entries; idx++) { @@ -624,15 +658,12 @@ void cpsw_ale_set_allmulti(struct cpsw_ale *ale, int allmulti, int port) if (port != -1 && !(vlan_members & BIT(port))) continue; - unreg_mcast = - cpsw_ale_get_vlan_unreg_mcast(ale_entry, - ale->vlan_field_bits); - if (allmulti) - unreg_mcast |= ALE_PORT_HOST; + if (!ale->params.nu_switch_ale) + cpsw_ale_vlan_set_unreg_mcast(ale, ale_entry, allmulti); else - unreg_mcast &= ~ALE_PORT_HOST; - cpsw_ale_set_vlan_unreg_mcast(ale_entry, unreg_mcast, - ale->vlan_field_bits); + cpsw_ale_vlan_set_unreg_mcast_idx(ale, ale_entry, + allmulti); + cpsw_ale_write(ale, idx, ale_entry); } } diff --git a/drivers/net/hamradio/bpqether.c b/drivers/net/hamradio/bpqether.c index 60dcaf2a04a9..1ad6085994b1 100644 --- a/drivers/net/hamradio/bpqether.c +++ b/drivers/net/hamradio/bpqether.c @@ -113,6 +113,7 @@ static LIST_HEAD(bpq_devices); * off into a separate class since they always nest. */ static struct lock_class_key bpq_netdev_xmit_lock_key; +static struct lock_class_key bpq_netdev_addr_lock_key; static void bpq_set_lockdep_class_one(struct net_device *dev, struct netdev_queue *txq, @@ -123,6 +124,7 @@ static void bpq_set_lockdep_class_one(struct net_device *dev, static void bpq_set_lockdep_class(struct net_device *dev) { + lockdep_set_class(&dev->addr_list_lock, &bpq_netdev_addr_lock_key); netdev_for_each_tx_queue(dev, bpq_set_lockdep_class_one, NULL); } diff --git a/drivers/net/ipa/ipa_data-sc7180.c b/drivers/net/ipa/ipa_data-sc7180.c index 43faa35ae726..d4c2bc7ad24b 100644 --- a/drivers/net/ipa/ipa_data-sc7180.c +++ b/drivers/net/ipa/ipa_data-sc7180.c @@ -106,7 +106,7 @@ static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = { [IPA_ENDPOINT_MODEM_LAN_RX] = { .ee_id = GSI_EE_MODEM, .channel_id = 3, - .endpoint_id = 13, + .endpoint_id = 11, .toward_ipa = false, }, [IPA_ENDPOINT_MODEM_AP_TX] = { diff --git a/drivers/net/ipa/ipa_endpoint.c b/drivers/net/ipa/ipa_endpoint.c index 66649a806dd1..9f50d0d11704 100644 --- a/drivers/net/ipa/ipa_endpoint.c +++ b/drivers/net/ipa/ipa_endpoint.c @@ -32,6 +32,9 @@ /* The amount of RX buffer space consumed by standard skb overhead */ #define IPA_RX_BUFFER_OVERHEAD (PAGE_SIZE - SKB_MAX_ORDER(NET_SKB_PAD, 0)) +/* Where to find the QMAP mux_id for a packet within modem-supplied metadata */ +#define IPA_ENDPOINT_QMAP_METADATA_MASK 0x000000ff /* host byte order */ + #define IPA_ENDPOINT_RESET_AGGR_RETRY_MAX 3 #define IPA_AGGR_TIME_LIMIT_DEFAULT 1000 /* microseconds */ @@ -433,6 +436,24 @@ static void ipa_endpoint_init_cfg(struct ipa_endpoint *endpoint) iowrite32(val, endpoint->ipa->reg_virt + offset); } +/** + * We program QMAP endpoints so each packet received is preceded by a QMAP + * header structure. The QMAP header contains a 1-byte mux_id and 2-byte + * packet size field, and we have the IPA hardware populate both for each + * received packet. The header is configured (in the HDR_EXT register) + * to use big endian format. + * + * The packet size is written into the QMAP header's pkt_len field. That + * location is defined here using the HDR_OFST_PKT_SIZE field. + * + * The mux_id comes from a 4-byte metadata value supplied with each packet + * by the modem. It is *not* a QMAP header, but it does contain the mux_id + * value that we want, in its low-order byte. A bitmask defined in the + * endpoint's METADATA_MASK register defines which byte within the modem + * metadata contains the mux_id. And the OFST_METADATA field programmed + * here indicates where the extracted byte should be placed within the QMAP + * header. + */ static void ipa_endpoint_init_hdr(struct ipa_endpoint *endpoint) { u32 offset = IPA_REG_ENDP_INIT_HDR_N_OFFSET(endpoint->endpoint_id); @@ -441,25 +462,31 @@ static void ipa_endpoint_init_hdr(struct ipa_endpoint *endpoint) if (endpoint->data->qmap) { size_t header_size = sizeof(struct rmnet_map_header); + /* We might supply a checksum header after the QMAP header */ if (endpoint->toward_ipa && endpoint->data->checksum) header_size += sizeof(struct rmnet_map_ul_csum_header); - val |= u32_encode_bits(header_size, HDR_LEN_FMASK); - /* metadata is the 4 byte rmnet_map header itself */ - val |= HDR_OFST_METADATA_VALID_FMASK; - val |= u32_encode_bits(0, HDR_OFST_METADATA_FMASK); - /* HDR_ADDITIONAL_CONST_LEN is 0; (IPA->AP only) */ + + /* Define how to fill fields in a received QMAP header */ if (!endpoint->toward_ipa) { - u32 size_offset = offsetof(struct rmnet_map_header, - pkt_len); + u32 off; /* Field offset within header */ + + /* Where IPA will write the metadata value */ + off = offsetof(struct rmnet_map_header, mux_id); + val |= u32_encode_bits(off, HDR_OFST_METADATA_FMASK); + /* Where IPA will write the length */ + off = offsetof(struct rmnet_map_header, pkt_len); val |= HDR_OFST_PKT_SIZE_VALID_FMASK; - val |= u32_encode_bits(size_offset, - HDR_OFST_PKT_SIZE_FMASK); + val |= u32_encode_bits(off, HDR_OFST_PKT_SIZE_FMASK); } + /* For QMAP TX, metadata offset is 0 (modem assumes this) */ + val |= HDR_OFST_METADATA_VALID_FMASK; + + /* HDR_ADDITIONAL_CONST_LEN is 0; (RX only) */ /* HDR_A5_MUX is 0 */ /* HDR_LEN_INC_DEAGG_HDR is 0 */ - /* HDR_METADATA_REG_VALID is 0; (AP->IPA only) */ + /* HDR_METADATA_REG_VALID is 0 (TX only) */ } iowrite32(val, endpoint->ipa->reg_virt + offset); @@ -472,38 +499,27 @@ static void ipa_endpoint_init_hdr_ext(struct ipa_endpoint *endpoint) u32 val = 0; val |= HDR_ENDIANNESS_FMASK; /* big endian */ - val |= HDR_TOTAL_LEN_OR_PAD_VALID_FMASK; - /* HDR_TOTAL_LEN_OR_PAD is 0 (pad, not total_len) */ + + /* A QMAP header contains a 6 bit pad field at offset 0. The RMNet + * driver assumes this field is meaningful in packets it receives, + * and assumes the header's payload length includes that padding. + * The RMNet driver does *not* pad packets it sends, however, so + * the pad field (although 0) should be ignored. + */ + if (endpoint->data->qmap && !endpoint->toward_ipa) { + val |= HDR_TOTAL_LEN_OR_PAD_VALID_FMASK; + /* HDR_TOTAL_LEN_OR_PAD is 0 (pad, not total_len) */ + val |= HDR_PAYLOAD_LEN_INC_PADDING_FMASK; + /* HDR_TOTAL_LEN_OR_PAD_OFFSET is 0 */ + } + /* HDR_PAYLOAD_LEN_INC_PADDING is 0 */ - /* HDR_TOTAL_LEN_OR_PAD_OFFSET is 0 */ if (!endpoint->toward_ipa) val |= u32_encode_bits(pad_align, HDR_PAD_TO_ALIGNMENT_FMASK); iowrite32(val, endpoint->ipa->reg_virt + offset); } -/** - * Generate a metadata mask value that will select only the mux_id - * field in an rmnet_map header structure. The mux_id is at offset - * 1 byte from the beginning of the structure, but the metadata - * value is treated as a 4-byte unit. So this mask must be computed - * with endianness in mind. Note that ipa_endpoint_init_hdr_metadata_mask() - * will convert this value to the proper byte order. - * - * Marked __always_inline because this is really computing a - * constant value. - */ -static __always_inline __be32 ipa_rmnet_mux_id_metadata_mask(void) -{ - size_t mux_id_offset = offsetof(struct rmnet_map_header, mux_id); - u32 mux_id_mask = 0; - u8 *bytes; - - bytes = (u8 *)&mux_id_mask; - bytes[mux_id_offset] = 0xff; /* mux_id is 1 byte */ - - return cpu_to_be32(mux_id_mask); -} static void ipa_endpoint_init_hdr_metadata_mask(struct ipa_endpoint *endpoint) { @@ -513,8 +529,9 @@ static void ipa_endpoint_init_hdr_metadata_mask(struct ipa_endpoint *endpoint) offset = IPA_REG_ENDP_INIT_HDR_METADATA_MASK_N_OFFSET(endpoint_id); + /* Note that HDR_ENDIANNESS indicates big endian header fields */ if (!endpoint->toward_ipa && endpoint->data->qmap) - val = ipa_rmnet_mux_id_metadata_mask(); + val = cpu_to_be32(IPA_ENDPOINT_QMAP_METADATA_MASK); iowrite32(val, endpoint->ipa->reg_virt + offset); } @@ -693,10 +710,12 @@ static void ipa_endpoint_init_seq(struct ipa_endpoint *endpoint) u32 seq_type = endpoint->seq_type; u32 val = 0; + /* Sequencer type is made up of four nibbles */ val |= u32_encode_bits(seq_type & 0xf, HPS_SEQ_TYPE_FMASK); val |= u32_encode_bits((seq_type >> 4) & 0xf, DPS_SEQ_TYPE_FMASK); - /* HPS_REP_SEQ_TYPE is 0 */ - /* DPS_REP_SEQ_TYPE is 0 */ + /* The second two apply to replicated packets */ + val |= u32_encode_bits((seq_type >> 8) & 0xf, HPS_REP_SEQ_TYPE_FMASK); + val |= u32_encode_bits((seq_type >> 12) & 0xf, DPS_REP_SEQ_TYPE_FMASK); iowrite32(val, endpoint->ipa->reg_virt + offset); } diff --git a/drivers/net/ipa/ipa_reg.h b/drivers/net/ipa/ipa_reg.h index 3b8106aa277a..0a688d8c1d7c 100644 --- a/drivers/net/ipa/ipa_reg.h +++ b/drivers/net/ipa/ipa_reg.h @@ -455,6 +455,8 @@ enum ipa_mode { * second packet processing pass + no decipher + microcontroller * @IPA_SEQ_DMA_DEC: DMA + cipher/decipher * @IPA_SEQ_DMA_COMP_DECOMP: DMA + compression/decompression + * @IPA_SEQ_PKT_PROCESS_NO_DEC_NO_UCP_DMAP: + * packet processing + no decipher + no uCP + HPS REP DMA parser * @IPA_SEQ_INVALID: invalid sequencer type * * The values defined here are broken into 4-bit nibbles that are written diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c index 20b53e255f68..e56547bfdac9 100644 --- a/drivers/net/macsec.c +++ b/drivers/net/macsec.c @@ -3999,6 +3999,8 @@ static int macsec_add_dev(struct net_device *dev, sci_t sci, u8 icv_len) return 0; } +static struct lock_class_key macsec_netdev_addr_lock_key; + static int macsec_newlink(struct net *net, struct net_device *dev, struct nlattr *tb[], struct nlattr *data[], struct netlink_ext_ack *extack) @@ -4050,6 +4052,9 @@ static int macsec_newlink(struct net *net, struct net_device *dev, return err; netdev_lockdep_set_classes(dev); + lockdep_set_class_and_subclass(&dev->addr_list_lock, + &macsec_netdev_addr_lock_key, + dev->lower_level); err = netdev_upper_dev_link(real_dev, dev, extack); if (err < 0) diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c index 563aed5b3d9f..6a6cc9f75307 100644 --- a/drivers/net/macvlan.c +++ b/drivers/net/macvlan.c @@ -860,6 +860,8 @@ static int macvlan_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) * "super class" of normal network devices; split their locks off into a * separate class since they always nest. */ +static struct lock_class_key macvlan_netdev_addr_lock_key; + #define ALWAYS_ON_OFFLOADS \ (NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_GSO_SOFTWARE | \ NETIF_F_GSO_ROBUST | NETIF_F_GSO_ENCAP_ALL) @@ -875,6 +877,14 @@ static int macvlan_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) #define MACVLAN_STATE_MASK \ ((1<<__LINK_STATE_NOCARRIER) | (1<<__LINK_STATE_DORMANT)) +static void macvlan_set_lockdep_class(struct net_device *dev) +{ + netdev_lockdep_set_classes(dev); + lockdep_set_class_and_subclass(&dev->addr_list_lock, + &macvlan_netdev_addr_lock_key, + dev->lower_level); +} + static int macvlan_init(struct net_device *dev) { struct macvlan_dev *vlan = netdev_priv(dev); @@ -892,8 +902,7 @@ static int macvlan_init(struct net_device *dev) dev->gso_max_size = lowerdev->gso_max_size; dev->gso_max_segs = lowerdev->gso_max_segs; dev->hard_header_len = lowerdev->hard_header_len; - - netdev_lockdep_set_classes(dev); + macvlan_set_lockdep_class(dev); vlan->pcpu_stats = netdev_alloc_pcpu_stats(struct vlan_pcpu_stats); if (!vlan->pcpu_stats) diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index 5bb448ae6c9c..e8085ab6d484 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c @@ -857,7 +857,6 @@ static int vxlan_fdb_nh_update(struct vxlan_dev *vxlan, struct vxlan_fdb *fdb, u32 nhid, struct netlink_ext_ack *extack) { struct nexthop *old_nh = rtnl_dereference(fdb->nh); - struct nh_group *nhg; struct nexthop *nh; int err = -EINVAL; @@ -876,13 +875,12 @@ static int vxlan_fdb_nh_update(struct vxlan_dev *vxlan, struct vxlan_fdb *fdb, nh = NULL; goto err_inval; } - if (!nh->is_fdb_nh) { + if (!nexthop_is_fdb(nh)) { NL_SET_ERR_MSG(extack, "Nexthop is not a fdb nexthop"); goto err_inval; } - nhg = rtnl_dereference(nh->nh_grp); - if (!nh->is_group || !nhg->mpath) { + if (!nexthop_is_multipath(nh)) { NL_SET_ERR_MSG(extack, "Nexthop is not a multipath group"); goto err_inval; } @@ -890,14 +888,14 @@ static int vxlan_fdb_nh_update(struct vxlan_dev *vxlan, struct vxlan_fdb *fdb, /* check nexthop group family */ switch (vxlan->default_dst.remote_ip.sa.sa_family) { case AF_INET: - if (!nhg->has_v4) { + if (!nexthop_has_v4(nh)) { err = -EAFNOSUPPORT; NL_SET_ERR_MSG(extack, "Nexthop group family not supported"); goto err_inval; } break; case AF_INET6: - if (nhg->has_v4) { + if (nexthop_has_v4(nh)) { err = -EAFNOSUPPORT; NL_SET_ERR_MSG(extack, "Nexthop group family not supported"); goto err_inval; @@ -4245,10 +4243,8 @@ static int vxlan_changelink(struct net_device *dev, struct nlattr *tb[], mod_timer(&vxlan->age_timer, jiffies); netdev_adjacent_change_commit(dst->remote_dev, lowerdev, dev); - if (lowerdev && lowerdev != dst->remote_dev) { + if (lowerdev && lowerdev != dst->remote_dev) dst->remote_dev = lowerdev; - netdev_update_lockdep_key(lowerdev); - } vxlan_config_apply(dev, &conf, lowerdev, vxlan->net, true); return 0; } diff --git a/drivers/net/wireless/intersil/hostap/hostap_hw.c b/drivers/net/wireless/intersil/hostap/hostap_hw.c index aadf3dec5bf3..2ab34cf74ecc 100644 --- a/drivers/net/wireless/intersil/hostap/hostap_hw.c +++ b/drivers/net/wireless/intersil/hostap/hostap_hw.c @@ -3048,6 +3048,7 @@ static void prism2_clear_set_tim_queue(local_info_t *local) * This is a natural nesting, which needs a split lock type. */ static struct lock_class_key hostap_netdev_xmit_lock_key; +static struct lock_class_key hostap_netdev_addr_lock_key; static void prism2_set_lockdep_class_one(struct net_device *dev, struct netdev_queue *txq, @@ -3059,6 +3060,8 @@ static void prism2_set_lockdep_class_one(struct net_device *dev, static void prism2_set_lockdep_class(struct net_device *dev) { + lockdep_set_class(&dev->addr_list_lock, + &hostap_netdev_addr_lock_key); netdev_for_each_tx_queue(dev, prism2_set_lockdep_class_one, NULL); } |