diff options
author | Jakub Kicinski | 2021-06-29 15:45:27 -0700 |
---|---|---|
committer | Jakub Kicinski | 2021-06-29 15:45:27 -0700 |
commit | b6df00789e2831fff7a2c65aa7164b2a4dcbe599 (patch) | |
tree | a94cbeeca3f0ae2fffed008cb287c02dbee4dceb /drivers | |
parent | 3f8ad50a9e43b6a59070e6c9c5eec79626f81095 (diff) | |
parent | a118ff661889ecee3ca90f8125bad8fb5bbc07d5 (diff) |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net
Trivial conflict in net/netfilter/nf_tables_api.c.
Duplicate fix in tools/testing/selftests/net/devlink_port_split.py
- take the net-next version.
skmsg, and L4 bpf - keep the bpf code but remove the flags
and err params.
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
Diffstat (limited to 'drivers')
31 files changed, 259 insertions, 123 deletions
diff --git a/drivers/atm/nicstar.c b/drivers/atm/nicstar.c index b015c3e14336..bc5a6ab6fa4b 100644 --- a/drivers/atm/nicstar.c +++ b/drivers/atm/nicstar.c @@ -527,6 +527,15 @@ static int ns_init_card(int i, struct pci_dev *pcidev) /* Set the VPI/VCI MSb mask to zero so we can receive OAM cells */ writel(0x00000000, card->membase + VPM); + card->intcnt = 0; + if (request_irq + (pcidev->irq, &ns_irq_handler, IRQF_SHARED, "nicstar", card) != 0) { + pr_err("nicstar%d: can't allocate IRQ %d.\n", i, pcidev->irq); + error = 9; + ns_init_card_error(card, error); + return error; + } + /* Initialize TSQ */ card->tsq.org = dma_alloc_coherent(&card->pcidev->dev, NS_TSQSIZE + NS_TSQ_ALIGNMENT, @@ -753,15 +762,6 @@ static int ns_init_card(int i, struct pci_dev *pcidev) card->efbie = 1; - card->intcnt = 0; - if (request_irq - (pcidev->irq, &ns_irq_handler, IRQF_SHARED, "nicstar", card) != 0) { - printk("nicstar%d: can't allocate IRQ %d.\n", i, pcidev->irq); - error = 9; - ns_init_card_error(card, error); - return error; - } - /* Register device */ card->atmdev = atm_dev_register("nicstar", &card->pcidev->dev, &atm_ops, -1, NULL); @@ -839,10 +839,12 @@ static void ns_init_card_error(ns_dev *card, int error) dev_kfree_skb_any(hb); } if (error >= 12) { - kfree(card->rsq.org); + dma_free_coherent(&card->pcidev->dev, NS_RSQSIZE + NS_RSQ_ALIGNMENT, + card->rsq.org, card->rsq.dma); } if (error >= 11) { - kfree(card->tsq.org); + dma_free_coherent(&card->pcidev->dev, NS_TSQSIZE + NS_TSQ_ALIGNMENT, + card->tsq.org, card->tsq.dma); } if (error >= 10) { free_irq(card->pcidev->irq, card); diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 1d9137e77dfc..0ff7567bd04f 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -1600,6 +1600,14 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev, int link_reporting; int res = 0, i; + if (slave_dev->flags & IFF_MASTER && + !netif_is_bond_master(slave_dev)) { + NL_SET_ERR_MSG(extack, "Device with IFF_MASTER cannot be enslaved"); + netdev_err(bond_dev, + "Error: Device with IFF_MASTER cannot be enslaved\n"); + return -EPERM; + } + if (!bond->params.use_carrier && slave_dev->ethtool_ops->get_link == NULL && slave_ops->ndo_do_ioctl == NULL) { diff --git a/drivers/net/can/peak_canfd/peak_canfd.c b/drivers/net/can/peak_canfd/peak_canfd.c index 00847cbaf7b6..d08718e98e11 100644 --- a/drivers/net/can/peak_canfd/peak_canfd.c +++ b/drivers/net/can/peak_canfd/peak_canfd.c @@ -351,8 +351,8 @@ static int pucan_handle_status(struct peak_canfd_priv *priv, return err; } - /* start network queue (echo_skb array is empty) */ - netif_start_queue(ndev); + /* wake network queue up (echo_skb array is empty) */ + netif_wake_queue(ndev); return 0; } diff --git a/drivers/net/can/usb/ems_usb.c b/drivers/net/can/usb/ems_usb.c index 5af69787d9d5..0a37af4a3fa4 100644 --- a/drivers/net/can/usb/ems_usb.c +++ b/drivers/net/can/usb/ems_usb.c @@ -1053,7 +1053,6 @@ static void ems_usb_disconnect(struct usb_interface *intf) if (dev) { unregister_netdev(dev->netdev); - free_candev(dev->netdev); unlink_all_urbs(dev); @@ -1061,6 +1060,8 @@ static void ems_usb_disconnect(struct usb_interface *intf) kfree(dev->intr_in_buffer); kfree(dev->tx_msg_buffer); + + free_candev(dev->netdev); } } diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c index eca285aaf72f..961fa6b75cad 100644 --- a/drivers/net/dsa/mv88e6xxx/chip.c +++ b/drivers/net/dsa/mv88e6xxx/chip.c @@ -1618,9 +1618,6 @@ static int mv88e6xxx_port_check_hw_vlan(struct dsa_switch *ds, int port, struct mv88e6xxx_vtu_entry vlan; int i, err; - if (!vid) - return -EOPNOTSUPP; - /* DSA and CPU ports have to be members of multiple vlans */ if (dsa_is_dsa_port(ds, port) || dsa_is_cpu_port(ds, port)) return 0; @@ -2109,6 +2106,9 @@ static int mv88e6xxx_port_vlan_add(struct dsa_switch *ds, int port, u8 member; int err; + if (!vlan->vid) + return 0; + err = mv88e6xxx_port_vlan_prepare(ds, port, vlan); if (err) return err; diff --git a/drivers/net/dsa/sja1105/sja1105_main.c b/drivers/net/dsa/sja1105/sja1105_main.c index a9777eb564c6..4f0545605f6b 100644 --- a/drivers/net/dsa/sja1105/sja1105_main.c +++ b/drivers/net/dsa/sja1105/sja1105_main.c @@ -1818,6 +1818,12 @@ static int sja1105_reload_cbs(struct sja1105_private *priv) { int rc = 0, i; + /* The credit based shapers are only allocated if + * CONFIG_NET_SCH_CBS is enabled. + */ + if (!priv->cbs) + return 0; + for (i = 0; i < priv->info->num_cbs_shapers; i++) { struct sja1105_cbs_entry *cbs = &priv->cbs[i]; diff --git a/drivers/net/ethernet/aeroflex/greth.c b/drivers/net/ethernet/aeroflex/greth.c index d77fafbc1530..c560ad06f0be 100644 --- a/drivers/net/ethernet/aeroflex/greth.c +++ b/drivers/net/ethernet/aeroflex/greth.c @@ -1539,10 +1539,11 @@ static int greth_of_remove(struct platform_device *of_dev) mdiobus_unregister(greth->mdio); unregister_netdev(ndev); - free_netdev(ndev); of_iounmap(&of_dev->resource[0], greth->regs, resource_size(&of_dev->resource[0])); + free_netdev(ndev); + return 0; } diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_macsec.h b/drivers/net/ethernet/aquantia/atlantic/aq_macsec.h index f5fba8b8cdea..a47e2710487e 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_macsec.h +++ b/drivers/net/ethernet/aquantia/atlantic/aq_macsec.h @@ -91,7 +91,7 @@ struct aq_macsec_txsc { u32 hw_sc_idx; unsigned long tx_sa_idx_busy; const struct macsec_secy *sw_secy; - u8 tx_sa_key[MACSEC_NUM_AN][MACSEC_KEYID_LEN]; + u8 tx_sa_key[MACSEC_NUM_AN][MACSEC_MAX_KEY_LEN]; struct aq_macsec_tx_sc_stats stats; struct aq_macsec_tx_sa_stats tx_sa_stats[MACSEC_NUM_AN]; }; @@ -101,7 +101,7 @@ struct aq_macsec_rxsc { unsigned long rx_sa_idx_busy; const struct macsec_secy *sw_secy; const struct macsec_rx_sc *sw_rxsc; - u8 rx_sa_key[MACSEC_NUM_AN][MACSEC_KEYID_LEN]; + u8 rx_sa_key[MACSEC_NUM_AN][MACSEC_MAX_KEY_LEN]; struct aq_macsec_rx_sa_stats rx_sa_stats[MACSEC_NUM_AN]; }; diff --git a/drivers/net/ethernet/broadcom/bcm4908_enet.c b/drivers/net/ethernet/broadcom/bcm4908_enet.c index 60d908507f51..02a569500234 100644 --- a/drivers/net/ethernet/broadcom/bcm4908_enet.c +++ b/drivers/net/ethernet/broadcom/bcm4908_enet.c @@ -174,9 +174,6 @@ static int bcm4908_dma_alloc_buf_descs(struct bcm4908_enet *enet, if (!ring->slots) goto err_free_buf_descs; - ring->read_idx = 0; - ring->write_idx = 0; - return 0; err_free_buf_descs: @@ -304,6 +301,9 @@ static void bcm4908_enet_dma_ring_init(struct bcm4908_enet *enet, enet_write(enet, ring->st_ram_block + ENET_DMA_CH_STATE_RAM_BASE_DESC_PTR, (uint32_t)ring->dma_addr); + + ring->read_idx = 0; + ring->write_idx = 0; } static void bcm4908_enet_dma_uninit(struct bcm4908_enet *enet) diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c index 701c12c9e033..649c5c429bd7 100644 --- a/drivers/net/ethernet/emulex/benet/be_cmds.c +++ b/drivers/net/ethernet/emulex/benet/be_cmds.c @@ -550,7 +550,7 @@ int be_process_mcc(struct be_adapter *adapter) int num = 0, status = 0; struct be_mcc_obj *mcc_obj = &adapter->mcc_obj; - spin_lock_bh(&adapter->mcc_cq_lock); + spin_lock(&adapter->mcc_cq_lock); while ((compl = be_mcc_compl_get(adapter))) { if (compl->flags & CQE_FLAGS_ASYNC_MASK) { @@ -566,7 +566,7 @@ int be_process_mcc(struct be_adapter *adapter) if (num) be_cq_notify(adapter, mcc_obj->cq.id, mcc_obj->rearm_cq, num); - spin_unlock_bh(&adapter->mcc_cq_lock); + spin_unlock(&adapter->mcc_cq_lock); return status; } @@ -581,7 +581,9 @@ static int be_mcc_wait_compl(struct be_adapter *adapter) if (be_check_error(adapter, BE_ERROR_ANY)) return -EIO; + local_bh_disable(); status = be_process_mcc(adapter); + local_bh_enable(); if (atomic_read(&mcc_obj->q.used) == 0) break; diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c index 7968568bbe21..361c1c87c183 100644 --- a/drivers/net/ethernet/emulex/benet/be_main.c +++ b/drivers/net/ethernet/emulex/benet/be_main.c @@ -5501,7 +5501,9 @@ static void be_worker(struct work_struct *work) * mcc completions */ if (!netif_running(adapter->netdev)) { + local_bh_disable(); be_process_mcc(adapter); + local_bh_enable(); goto reschedule; } diff --git a/drivers/net/ethernet/ezchip/nps_enet.c b/drivers/net/ethernet/ezchip/nps_enet.c index e3954d8835e7..f9a288a6ec8c 100644 --- a/drivers/net/ethernet/ezchip/nps_enet.c +++ b/drivers/net/ethernet/ezchip/nps_enet.c @@ -607,7 +607,7 @@ static s32 nps_enet_probe(struct platform_device *pdev) /* Get IRQ number */ priv->irq = platform_get_irq(pdev, 0); - if (!priv->irq) { + if (priv->irq < 0) { dev_err(dev, "failed to retrieve <irq Rx-Tx> value from device tree\n"); err = -ENODEV; goto out_netdev; @@ -630,8 +630,7 @@ static s32 nps_enet_probe(struct platform_device *pdev) out_netif_api: netif_napi_del(&priv->napi); out_netdev: - if (err) - free_netdev(ndev); + free_netdev(ndev); return err; } @@ -642,8 +641,8 @@ static s32 nps_enet_remove(struct platform_device *pdev) struct nps_enet_priv *priv = netdev_priv(ndev); unregister_netdev(ndev); - free_netdev(ndev); netif_napi_del(&priv->napi); + free_netdev(ndev); return 0; } diff --git a/drivers/net/ethernet/google/gve/gve_main.c b/drivers/net/ethernet/google/gve/gve_main.c index ac4819c25aca..867e87af3432 100644 --- a/drivers/net/ethernet/google/gve/gve_main.c +++ b/drivers/net/ethernet/google/gve/gve_main.c @@ -1506,8 +1506,8 @@ static int gve_probe(struct pci_dev *pdev, const struct pci_device_id *ent) gve_write_version(®_bar->driver_version); /* Get max queues to alloc etherdev */ - max_rx_queues = ioread32be(®_bar->max_tx_queues); - max_tx_queues = ioread32be(®_bar->max_rx_queues); + max_tx_queues = ioread32be(®_bar->max_tx_queues); + max_rx_queues = ioread32be(®_bar->max_rx_queues); /* Alloc and setup the netdev and priv */ dev = alloc_etherdev_mqs(sizeof(*priv), max_tx_queues, max_rx_queues); if (!dev) { diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c index adb0d5ca9ff1..374a75d4faea 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.c +++ b/drivers/net/ethernet/ibm/ibmvnic.c @@ -106,6 +106,8 @@ static void release_crq_queue(struct ibmvnic_adapter *); static int __ibmvnic_set_mac(struct net_device *, u8 *); static int init_crq_queue(struct ibmvnic_adapter *adapter); static int send_query_phys_parms(struct ibmvnic_adapter *adapter); +static void ibmvnic_tx_scrq_clean_buffer(struct ibmvnic_adapter *adapter, + struct ibmvnic_sub_crq_queue *tx_scrq); struct ibmvnic_stat { char name[ETH_GSTRING_LEN]; @@ -232,12 +234,11 @@ static int alloc_long_term_buff(struct ibmvnic_adapter *adapter, mutex_lock(&adapter->fw_lock); adapter->fw_done_rc = 0; reinit_completion(&adapter->fw_done); - rc = send_request_map(adapter, ltb->addr, - ltb->size, ltb->map_id); + + rc = send_request_map(adapter, ltb->addr, ltb->size, ltb->map_id); if (rc) { - dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr); - mutex_unlock(&adapter->fw_lock); - return rc; + dev_err(dev, "send_request_map failed, rc = %d\n", rc); + goto out; } rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000); @@ -245,20 +246,23 @@ static int alloc_long_term_buff(struct ibmvnic_adapter *adapter, dev_err(dev, "Long term map request aborted or timed out,rc = %d\n", rc); - dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr); - mutex_unlock(&adapter->fw_lock); - return rc; + goto out; } if (adapter->fw_done_rc) { dev_err(dev, "Couldn't map long term buffer,rc = %d\n", adapter->fw_done_rc); + rc = -1; + goto out; + } + rc = 0; +out: + if (rc) { dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr); - mutex_unlock(&adapter->fw_lock); - return -1; + ltb->buff = NULL; } mutex_unlock(&adapter->fw_lock); - return 0; + return rc; } static void free_long_term_buff(struct ibmvnic_adapter *adapter, @@ -278,14 +282,44 @@ static void free_long_term_buff(struct ibmvnic_adapter *adapter, adapter->reset_reason != VNIC_RESET_TIMEOUT) send_request_unmap(adapter, ltb->map_id); dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr); + ltb->buff = NULL; + ltb->map_id = 0; } -static int reset_long_term_buff(struct ibmvnic_long_term_buff *ltb) +static int reset_long_term_buff(struct ibmvnic_adapter *adapter, + struct ibmvnic_long_term_buff *ltb) { - if (!ltb->buff) - return -EINVAL; + struct device *dev = &adapter->vdev->dev; + int rc; memset(ltb->buff, 0, ltb->size); + + mutex_lock(&adapter->fw_lock); + adapter->fw_done_rc = 0; + + reinit_completion(&adapter->fw_done); + rc = send_request_map(adapter, ltb->addr, ltb->size, ltb->map_id); + if (rc) { + mutex_unlock(&adapter->fw_lock); + return rc; + } + + rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000); + if (rc) { + dev_info(dev, + "Reset failed, long term map request timed out or aborted\n"); + mutex_unlock(&adapter->fw_lock); + return rc; + } + + if (adapter->fw_done_rc) { + dev_info(dev, + "Reset failed, attempting to free and reallocate buffer\n"); + free_long_term_buff(adapter, ltb); + mutex_unlock(&adapter->fw_lock); + return alloc_long_term_buff(adapter, ltb, ltb->size); + } + mutex_unlock(&adapter->fw_lock); return 0; } @@ -321,7 +355,14 @@ static void replenish_rx_pool(struct ibmvnic_adapter *adapter, rx_scrq = adapter->rx_scrq[pool->index]; ind_bufp = &rx_scrq->ind_buf; - for (i = 0; i < count; ++i) { + + /* netdev_skb_alloc() could have failed after we saved a few skbs + * in the indir_buf and we would not have sent them to VIOS yet. + * To account for them, start the loop at ind_bufp->index rather + * than 0. If we pushed all the skbs to VIOS, ind_bufp->index will + * be 0. + */ + for (i = ind_bufp->index; i < count; ++i) { skb = netdev_alloc_skb(adapter->netdev, pool->buff_size); if (!skb) { dev_err(dev, "Couldn't replenish rx buff\n"); @@ -507,7 +548,8 @@ static int reset_rx_pools(struct ibmvnic_adapter *adapter) rx_pool->size * rx_pool->buff_size); } else { - rc = reset_long_term_buff(&rx_pool->long_term_buff); + rc = reset_long_term_buff(adapter, + &rx_pool->long_term_buff); } if (rc) @@ -630,11 +672,12 @@ static int init_rx_pools(struct net_device *netdev) return 0; } -static int reset_one_tx_pool(struct ibmvnic_tx_pool *tx_pool) +static int reset_one_tx_pool(struct ibmvnic_adapter *adapter, + struct ibmvnic_tx_pool *tx_pool) { int rc, i; - rc = reset_long_term_buff(&tx_pool->long_term_buff); + rc = reset_long_term_buff(adapter, &tx_pool->long_term_buff); if (rc) return rc; @@ -661,10 +704,11 @@ static int reset_tx_pools(struct ibmvnic_adapter *adapter) tx_scrqs = adapter->num_active_tx_pools; for (i = 0; i < tx_scrqs; i++) { - rc = reset_one_tx_pool(&adapter->tso_pool[i]); + ibmvnic_tx_scrq_clean_buffer(adapter, adapter->tx_scrq[i]); + rc = reset_one_tx_pool(adapter, &adapter->tso_pool[i]); if (rc) return rc; - rc = reset_one_tx_pool(&adapter->tx_pool[i]); + rc = reset_one_tx_pool(adapter, &adapter->tx_pool[i]); if (rc) return rc; } @@ -757,8 +801,11 @@ static int init_tx_pools(struct net_device *netdev) adapter->tso_pool = kcalloc(tx_subcrqs, sizeof(struct ibmvnic_tx_pool), GFP_KERNEL); - if (!adapter->tso_pool) + if (!adapter->tso_pool) { + kfree(adapter->tx_pool); + adapter->tx_pool = NULL; return -1; + } adapter->num_active_tx_pools = tx_subcrqs; @@ -1204,6 +1251,11 @@ static int __ibmvnic_open(struct net_device *netdev) netif_tx_start_all_queues(netdev); + if (prev_state == VNIC_CLOSED) { + for (i = 0; i < adapter->req_rx_queues; i++) + napi_schedule(&adapter->napi[i]); + } + adapter->state = VNIC_OPEN; return rc; } @@ -1608,7 +1660,8 @@ static void ibmvnic_tx_scrq_clean_buffer(struct ibmvnic_adapter *adapter, ind_bufp->index = 0; if (atomic_sub_return(entries, &tx_scrq->used) <= (adapter->req_tx_entries_per_subcrq / 2) && - __netif_subqueue_stopped(adapter->netdev, queue_num)) { + __netif_subqueue_stopped(adapter->netdev, queue_num) && + !test_bit(0, &adapter->resetting)) { netif_wake_subqueue(adapter->netdev, queue_num); netdev_dbg(adapter->netdev, "Started queue %d\n", queue_num); @@ -1701,7 +1754,6 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev) tx_send_failed++; tx_dropped++; ret = NETDEV_TX_OK; - ibmvnic_tx_scrq_flush(adapter, tx_scrq); goto out; } @@ -3241,6 +3293,7 @@ static void release_sub_crqs(struct ibmvnic_adapter *adapter, bool do_h_free) netdev_dbg(adapter->netdev, "Releasing tx_scrq[%d]\n", i); + ibmvnic_tx_scrq_clean_buffer(adapter, adapter->tx_scrq[i]); if (adapter->tx_scrq[i]->irq) { free_irq(adapter->tx_scrq[i]->irq, adapter->tx_scrq[i]); @@ -3314,7 +3367,7 @@ static int enable_scrq_irq(struct ibmvnic_adapter *adapter, /* H_EOI would fail with rc = H_FUNCTION when running * in XIVE mode which is expected, but not an error. */ - if (rc && rc != H_FUNCTION) + if (rc && (rc != H_FUNCTION)) dev_err(dev, "H_EOI FAILED irq 0x%llx. rc=%ld\n", val, rc); } diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c index 5435606149b0..d150dade06cf 100644 --- a/drivers/net/ethernet/intel/e1000e/netdev.c +++ b/drivers/net/ethernet/intel/e1000e/netdev.c @@ -5223,18 +5223,20 @@ static void e1000_watchdog_task(struct work_struct *work) pm_runtime_resume(netdev->dev.parent); /* Checking if MAC is in DMoff state*/ - pcim_state = er32(STATUS); - while (pcim_state & E1000_STATUS_PCIM_STATE) { - if (tries++ == dmoff_exit_timeout) { - e_dbg("Error in exiting dmoff\n"); - break; - } - usleep_range(10000, 20000); + if (er32(FWSM) & E1000_ICH_FWSM_FW_VALID) { pcim_state = er32(STATUS); - - /* Checking if MAC exited DMoff state */ - if (!(pcim_state & E1000_STATUS_PCIM_STATE)) - e1000_phy_hw_reset(&adapter->hw); + while (pcim_state & E1000_STATUS_PCIM_STATE) { + if (tries++ == dmoff_exit_timeout) { + e_dbg("Error in exiting dmoff\n"); + break; + } + usleep_range(10000, 20000); + pcim_state = er32(STATUS); + + /* Checking if MAC exited DMoff state */ + if (!(pcim_state & E1000_STATUS_PCIM_STATE)) + e1000_phy_hw_reset(&adapter->hw); + } } /* update snapshot of PHY registers on LSC */ diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c index ccd5b9486ea9..3e822bad4851 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c @@ -1262,8 +1262,7 @@ static int i40e_set_link_ksettings(struct net_device *netdev, if (ethtool_link_ksettings_test_link_mode(&safe_ks, supported, Autoneg) && - hw->phy.link_info.phy_type != - I40E_PHY_TYPE_10GBASE_T) { + hw->phy.media_type != I40E_MEDIA_TYPE_BASET) { netdev_info(netdev, "Autoneg cannot be disabled on this phy\n"); err = -EINVAL; goto done; diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index 9db1968fc491..861e59a350bd 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -32,7 +32,7 @@ static void i40e_vsi_reinit_locked(struct i40e_vsi *vsi); static void i40e_handle_reset_warning(struct i40e_pf *pf, bool lock_acquired); static int i40e_add_vsi(struct i40e_vsi *vsi); static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi); -static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit); +static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit, bool lock_acquired); static int i40e_setup_misc_vector(struct i40e_pf *pf); static void i40e_determine_queue_usage(struct i40e_pf *pf); static int i40e_setup_pf_filter_control(struct i40e_pf *pf); @@ -8703,6 +8703,8 @@ int i40e_vsi_open(struct i40e_vsi *vsi) dev_driver_string(&pf->pdev->dev), dev_name(&pf->pdev->dev)); err = i40e_vsi_request_irq(vsi, int_name); + if (err) + goto err_setup_rx; } else { err = -EINVAL; @@ -10569,7 +10571,7 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired) #endif /* CONFIG_I40E_DCB */ if (!lock_acquired) rtnl_lock(); - ret = i40e_setup_pf_switch(pf, reinit); + ret = i40e_setup_pf_switch(pf, reinit, true); if (ret) goto end_unlock; @@ -14627,10 +14629,11 @@ int i40e_fetch_switch_configuration(struct i40e_pf *pf, bool printconfig) * i40e_setup_pf_switch - Setup the HW switch on startup or after reset * @pf: board private structure * @reinit: if the Main VSI needs to re-initialized. + * @lock_acquired: indicates whether or not the lock has been acquired * * Returns 0 on success, negative value on failure **/ -static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit) +static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit, bool lock_acquired) { u16 flags = 0; int ret; @@ -14732,9 +14735,15 @@ static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit) i40e_ptp_init(pf); + if (!lock_acquired) + rtnl_lock(); + /* repopulate tunnel port filters */ udp_tunnel_nic_reset_ntf(pf->vsi[pf->lan_vsi]->netdev); + if (!lock_acquired) + rtnl_unlock(); + return ret; } @@ -15528,7 +15537,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) pf->flags |= I40E_FLAG_VEB_MODE_ENABLED; } #endif - err = i40e_setup_pf_switch(pf, false); + err = i40e_setup_pf_switch(pf, false, false); if (err) { dev_info(&pdev->dev, "setup_pf_switch failed: %d\n", err); goto err_vsis; diff --git a/drivers/net/ethernet/intel/i40e/i40e_ptp.c b/drivers/net/ethernet/intel/i40e/i40e_ptp.c index f1f6fc3744e9..7b971b205d36 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ptp.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ptp.c @@ -11,13 +11,14 @@ * operate with the nanosecond field directly without fear of overflow. * * Much like the 82599, the update period is dependent upon the link speed: - * At 40Gb link or no link, the period is 1.6ns. - * At 10Gb link, the period is multiplied by 2. (3.2ns) + * At 40Gb, 25Gb, or no link, the period is 1.6ns. + * At 10Gb or 5Gb link, the period is multiplied by 2. (3.2ns) * At 1Gb link, the period is multiplied by 20. (32ns) * 1588 functionality is not supported at 100Mbps. */ #define I40E_PTP_40GB_INCVAL 0x0199999999ULL #define I40E_PTP_10GB_INCVAL_MULT 2 +#define I40E_PTP_5GB_INCVAL_MULT 2 #define I40E_PTP_1GB_INCVAL_MULT 20 #define I40E_PRTTSYN_CTL1_TSYNTYPE_V1 BIT(I40E_PRTTSYN_CTL1_TSYNTYPE_SHIFT) @@ -465,6 +466,9 @@ void i40e_ptp_set_increment(struct i40e_pf *pf) case I40E_LINK_SPEED_10GB: mult = I40E_PTP_10GB_INCVAL_MULT; break; + case I40E_LINK_SPEED_5GB: + mult = I40E_PTP_5GB_INCVAL_MULT; + break; case I40E_LINK_SPEED_1GB: mult = I40E_PTP_1GB_INCVAL_MULT; break; diff --git a/drivers/net/ethernet/microsoft/mana/mana_en.c b/drivers/net/ethernet/microsoft/mana/mana_en.c index 46aee2c49f1b..fff78900fc8a 100644 --- a/drivers/net/ethernet/microsoft/mana/mana_en.c +++ b/drivers/net/ethernet/microsoft/mana/mana_en.c @@ -1230,8 +1230,10 @@ static int mana_create_txq(struct mana_port_context *apc, cq->gdma_id = cq->gdma_cq->id; - if (WARN_ON(cq->gdma_id >= gc->max_num_cqs)) - return -EINVAL; + if (WARN_ON(cq->gdma_id >= gc->max_num_cqs)) { + err = -EINVAL; + goto out; + } gc->cq_table[cq->gdma_id] = cq->gdma_cq; diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c index d597c89f00ed..e7e2223aebbf 100644 --- a/drivers/net/ethernet/sfc/ef10.c +++ b/drivers/net/ethernet/sfc/ef10.c @@ -1069,7 +1069,8 @@ static int efx_ef10_probe_vf(struct efx_nic *efx) /* If the parent PF has no VF data structure, it doesn't know about this * VF so fail probe. The VF needs to be re-created. This can happen - * if the PF driver is unloaded while the VF is assigned to a guest. + * if the PF driver was unloaded while any VF was assigned to a guest + * (using Xen, only). */ pci_dev_pf = efx->pci_dev->physfn; if (pci_dev_pf) { diff --git a/drivers/net/ethernet/sfc/ef10_sriov.c b/drivers/net/ethernet/sfc/ef10_sriov.c index 21fa6c0e8873..752d6406f07e 100644 --- a/drivers/net/ethernet/sfc/ef10_sriov.c +++ b/drivers/net/ethernet/sfc/ef10_sriov.c @@ -122,8 +122,7 @@ static void efx_ef10_sriov_free_vf_vports(struct efx_nic *efx) struct ef10_vf *vf = nic_data->vf + i; /* If VF is assigned, do not free the vport */ - if (vf->pci_dev && - vf->pci_dev->dev_flags & PCI_DEV_FLAGS_ASSIGNED) + if (vf->pci_dev && pci_is_dev_assigned(vf->pci_dev)) continue; if (vf->vport_assigned) { @@ -207,9 +206,7 @@ static int efx_ef10_sriov_alloc_vf_vswitching(struct efx_nic *efx) return 0; fail: - efx_ef10_sriov_free_vf_vports(efx); - kfree(nic_data->vf); - nic_data->vf = NULL; + efx_ef10_sriov_free_vf_vswitching(efx); return rc; } @@ -402,12 +399,17 @@ fail1: return rc; } +/* Disable SRIOV and remove VFs + * If some VFs are attached to a guest (using Xen, only) nothing is + * done if force=false, and vports are freed if force=true (for the non + * attachedc ones, only) but SRIOV is not disabled and VFs are not + * removed in either case. + */ static int efx_ef10_pci_sriov_disable(struct efx_nic *efx, bool force) { struct pci_dev *dev = efx->pci_dev; - unsigned int vfs_assigned = 0; - - vfs_assigned = pci_vfs_assigned(dev); + unsigned int vfs_assigned = pci_vfs_assigned(dev); + int rc = 0; if (vfs_assigned && !force) { netif_info(efx, drv, efx->net_dev, "VFs are assigned to guests; " @@ -417,10 +419,12 @@ static int efx_ef10_pci_sriov_disable(struct efx_nic *efx, bool force) if (!vfs_assigned) pci_disable_sriov(dev); + else + rc = -EBUSY; efx_ef10_sriov_free_vf_vswitching(efx); efx->vf_count = 0; - return 0; + return rc; } int efx_ef10_sriov_configure(struct efx_nic *efx, int num_vfs) @@ -439,24 +443,18 @@ int efx_ef10_sriov_init(struct efx_nic *efx) void efx_ef10_sriov_fini(struct efx_nic *efx) { struct efx_ef10_nic_data *nic_data = efx->nic_data; - unsigned int i; int rc; if (!nic_data->vf) { - /* Remove any un-assigned orphaned VFs */ + /* Remove any un-assigned orphaned VFs. This can happen if the PF driver + * was unloaded while any VF was assigned to a guest (using Xen, only). + */ if (pci_num_vf(efx->pci_dev) && !pci_vfs_assigned(efx->pci_dev)) pci_disable_sriov(efx->pci_dev); return; } - /* Remove any VFs in the host */ - for (i = 0; i < efx->vf_count; ++i) { - struct efx_nic *vf_efx = nic_data->vf[i].efx; - - if (vf_efx) - vf_efx->pci_dev->driver->remove(vf_efx->pci_dev); - } - + /* Disable SRIOV and remove any VFs in the host */ rc = efx_ef10_pci_sriov_disable(efx, true); if (rc) netif_dbg(efx, drv, efx->net_dev, diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.c b/drivers/net/ethernet/ti/am65-cpsw-nuss.c index 6a67b026df0b..718539cdd2f2 100644 --- a/drivers/net/ethernet/ti/am65-cpsw-nuss.c +++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.c @@ -1506,12 +1506,12 @@ static void am65_cpsw_nuss_free_tx_chns(void *data) for (i = 0; i < common->tx_ch_num; i++) { struct am65_cpsw_tx_chn *tx_chn = &common->tx_chns[i]; - if (!IS_ERR_OR_NULL(tx_chn->tx_chn)) - k3_udma_glue_release_tx_chn(tx_chn->tx_chn); - if (!IS_ERR_OR_NULL(tx_chn->desc_pool)) k3_cppi_desc_pool_destroy(tx_chn->desc_pool); + if (!IS_ERR_OR_NULL(tx_chn->tx_chn)) + k3_udma_glue_release_tx_chn(tx_chn->tx_chn); + memset(tx_chn, 0, sizeof(*tx_chn)); } } @@ -1531,12 +1531,12 @@ void am65_cpsw_nuss_remove_tx_chns(struct am65_cpsw_common *common) netif_napi_del(&tx_chn->napi_tx); - if (!IS_ERR_OR_NULL(tx_chn->tx_chn)) - k3_udma_glue_release_tx_chn(tx_chn->tx_chn); - if (!IS_ERR_OR_NULL(tx_chn->desc_pool)) k3_cppi_desc_pool_destroy(tx_chn->desc_pool); + if (!IS_ERR_OR_NULL(tx_chn->tx_chn)) + k3_udma_glue_release_tx_chn(tx_chn->tx_chn); + memset(tx_chn, 0, sizeof(*tx_chn)); } } @@ -1624,11 +1624,11 @@ static void am65_cpsw_nuss_free_rx_chns(void *data) rx_chn = &common->rx_chns; - if (!IS_ERR_OR_NULL(rx_chn->rx_chn)) - k3_udma_glue_release_rx_chn(rx_chn->rx_chn); - if (!IS_ERR_OR_NULL(rx_chn->desc_pool)) k3_cppi_desc_pool_destroy(rx_chn->desc_pool); + + if (!IS_ERR_OR_NULL(rx_chn->rx_chn)) + k3_udma_glue_release_rx_chn(rx_chn->rx_chn); } static int am65_cpsw_nuss_init_rx_chns(struct am65_cpsw_common *common) diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c index f682a5572d84..382bebc2420d 100644 --- a/drivers/net/hyperv/netvsc_drv.c +++ b/drivers/net/hyperv/netvsc_drv.c @@ -2384,6 +2384,9 @@ static int netvsc_register_vf(struct net_device *vf_netdev) dev_hold(vf_netdev); rcu_assign_pointer(net_device_ctx->vf_netdev, vf_netdev); + if (ndev->needed_headroom < vf_netdev->needed_headroom) + ndev->needed_headroom = vf_netdev->needed_headroom; + vf_netdev->wanted_features = ndev->features; netdev_update_features(vf_netdev); @@ -2462,6 +2465,8 @@ static int netvsc_unregister_vf(struct net_device *vf_netdev) RCU_INIT_POINTER(net_device_ctx->vf_netdev, NULL); dev_put(vf_netdev); + ndev->needed_headroom = RNDIS_AND_PPI_SIZE; + return NOTIFY_OK; } diff --git a/drivers/net/ieee802154/mac802154_hwsim.c b/drivers/net/ieee802154/mac802154_hwsim.c index da9135231c07..ebc976b7fcc2 100644 --- a/drivers/net/ieee802154/mac802154_hwsim.c +++ b/drivers/net/ieee802154/mac802154_hwsim.c @@ -480,7 +480,7 @@ static int hwsim_del_edge_nl(struct sk_buff *msg, struct genl_info *info) struct hwsim_edge *e; u32 v0, v1; - if (!info->attrs[MAC802154_HWSIM_ATTR_RADIO_ID] && + if (!info->attrs[MAC802154_HWSIM_ATTR_RADIO_ID] || !info->attrs[MAC802154_HWSIM_ATTR_RADIO_EDGE]) return -EINVAL; @@ -715,6 +715,8 @@ static int hwsim_subscribe_all_others(struct hwsim_phy *phy) return 0; +sub_fail: + hwsim_edge_unsubscribe_me(phy); me_fail: rcu_read_lock(); list_for_each_entry_rcu(e, &phy->edges, list) { @@ -722,8 +724,6 @@ me_fail: hwsim_free_edge(e); } rcu_read_unlock(); -sub_fail: - hwsim_edge_unsubscribe_me(phy); return -ENOMEM; } @@ -824,12 +824,17 @@ err_pib: static void hwsim_del(struct hwsim_phy *phy) { struct hwsim_pib *pib; + struct hwsim_edge *e; hwsim_edge_unsubscribe_me(phy); list_del(&phy->list); rcu_read_lock(); + list_for_each_entry_rcu(e, &phy->edges, list) { + list_del_rcu(&e->list); + hwsim_free_edge(e); + } pib = rcu_dereference(phy->pib); rcu_read_unlock(); diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c index 92425e1fd70c..93dc48b9b4f2 100644 --- a/drivers/net/macsec.c +++ b/drivers/net/macsec.c @@ -1819,7 +1819,7 @@ static int macsec_add_rxsa(struct sk_buff *skb, struct genl_info *info) ctx.sa.rx_sa = rx_sa; ctx.secy = secy; memcpy(ctx.sa.key, nla_data(tb_sa[MACSEC_SA_ATTR_KEY]), - MACSEC_KEYID_LEN); + secy->key_len); err = macsec_offload(ops->mdo_add_rxsa, &ctx); if (err) @@ -2061,7 +2061,7 @@ static int macsec_add_txsa(struct sk_buff *skb, struct genl_info *info) ctx.sa.tx_sa = tx_sa; ctx.secy = secy; memcpy(ctx.sa.key, nla_data(tb_sa[MACSEC_SA_ATTR_KEY]), - MACSEC_KEYID_LEN); + secy->key_len); err = macsec_offload(ops->mdo_add_txsa, &ctx); if (err) diff --git a/drivers/net/phy/at803x.c b/drivers/net/phy/at803x.c index 6697c9368b40..5d62b85a4024 100644 --- a/drivers/net/phy/at803x.c +++ b/drivers/net/phy/at803x.c @@ -701,6 +701,34 @@ static void at803x_remove(struct phy_device *phydev) regulator_disable(priv->vddio); } +static int at803x_get_features(struct phy_device *phydev) +{ + int err; + + err = genphy_read_abilities(phydev); + if (err) + return err; + + if (!at803x_match_phy_id(phydev, ATH8031_PHY_ID)) + return 0; + + /* AR8031/AR8033 have different status registers + * for copper and fiber operation. However, the + * extended status register is the same for both + * operation modes. + * + * As a result of that, ESTATUS_1000_XFULL is set + * to 1 even when operating in copper TP mode. + * + * Remove this mode from the supported link modes, + * as this driver currently only supports copper + * operation. + */ + linkmode_clear_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT, + phydev->supported); + return 0; +} + static int at803x_smarteee_config(struct phy_device *phydev) { struct at803x_priv *priv = phydev->priv; @@ -1344,7 +1372,7 @@ static struct phy_driver at803x_driver[] = { .resume = at803x_resume, .read_page = at803x_read_page, .write_page = at803x_write_page, - /* PHY_GBIT_FEATURES */ + .get_features = at803x_get_features, .read_status = at803x_read_status, .config_intr = &at803x_config_intr, .handle_interrupt = at803x_handle_interrupt, diff --git a/drivers/net/phy/mscc/mscc_macsec.c b/drivers/net/phy/mscc/mscc_macsec.c index 10be266e48e8..b7b2521c73fb 100644 --- a/drivers/net/phy/mscc/mscc_macsec.c +++ b/drivers/net/phy/mscc/mscc_macsec.c @@ -501,7 +501,7 @@ static u32 vsc8584_macsec_flow_context_id(struct macsec_flow *flow) } /* Derive the AES key to get a key for the hash autentication */ -static int vsc8584_macsec_derive_key(const u8 key[MACSEC_KEYID_LEN], +static int vsc8584_macsec_derive_key(const u8 key[MACSEC_MAX_KEY_LEN], u16 key_len, u8 hkey[16]) { const u8 input[AES_BLOCK_SIZE] = {0}; diff --git a/drivers/net/phy/mscc/mscc_macsec.h b/drivers/net/phy/mscc/mscc_macsec.h index 9c6d25e36de2..453304bae778 100644 --- a/drivers/net/phy/mscc/mscc_macsec.h +++ b/drivers/net/phy/mscc/mscc_macsec.h @@ -81,7 +81,7 @@ struct macsec_flow { /* Highest takes precedence [0..15] */ u8 priority; - u8 key[MACSEC_KEYID_LEN]; + u8 key[MACSEC_MAX_KEY_LEN]; union { struct macsec_rx_sa *rx_sa; diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c index 452822f88214..2b1b944d4b28 100644 --- a/drivers/net/vrf.c +++ b/drivers/net/vrf.c @@ -1366,22 +1366,22 @@ static struct sk_buff *vrf_ip6_rcv(struct net_device *vrf_dev, int orig_iif = skb->skb_iif; bool need_strict = rt6_need_strict(&ipv6_hdr(skb)->daddr); bool is_ndisc = ipv6_ndisc_frame(skb); - bool is_ll_src; /* loopback, multicast & non-ND link-local traffic; do not push through * packet taps again. Reset pkt_type for upper layers to process skb. - * for packets with lladdr src, however, skip so that the dst can be - * determine at input using original ifindex in the case that daddr - * needs strict + * For strict packets with a source LLA, determine the dst using the + * original ifindex. */ - is_ll_src = ipv6_addr_type(&ipv6_hdr(skb)->saddr) & IPV6_ADDR_LINKLOCAL; - if (skb->pkt_type == PACKET_LOOPBACK || - (need_strict && !is_ndisc && !is_ll_src)) { + if (skb->pkt_type == PACKET_LOOPBACK || (need_strict && !is_ndisc)) { skb->dev = vrf_dev; skb->skb_iif = vrf_dev->ifindex; IP6CB(skb)->flags |= IP6SKB_L3SLAVE; + if (skb->pkt_type == PACKET_LOOPBACK) skb->pkt_type = PACKET_HOST; + else if (ipv6_addr_type(&ipv6_hdr(skb)->saddr) & IPV6_ADDR_LINKLOCAL) + vrf_ip6_input_dst(skb, vrf_dev, orig_iif); + goto out; } diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index 02a14f1b938a..5a8df5a195cb 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c @@ -2164,6 +2164,7 @@ static int neigh_reduce(struct net_device *dev, struct sk_buff *skb, __be32 vni) struct neighbour *n; struct nd_msg *msg; + rcu_read_lock(); in6_dev = __in6_dev_get(dev); if (!in6_dev) goto out; @@ -2215,6 +2216,7 @@ static int neigh_reduce(struct net_device *dev, struct sk_buff *skb, __be32 vni) } out: + rcu_read_unlock(); consume_skb(skb); return NETDEV_TX_OK; } diff --git a/drivers/net/wireless/marvell/mwifiex/main.c b/drivers/net/wireless/marvell/mwifiex/main.c index 529dfd8b7ae8..17399d4aa129 100644 --- a/drivers/net/wireless/marvell/mwifiex/main.c +++ b/drivers/net/wireless/marvell/mwifiex/main.c @@ -1445,11 +1445,18 @@ static void mwifiex_uninit_sw(struct mwifiex_adapter *adapter) if (!priv) continue; rtnl_lock(); - wiphy_lock(adapter->wiphy); if (priv->netdev && - priv->wdev.iftype != NL80211_IFTYPE_UNSPECIFIED) + priv->wdev.iftype != NL80211_IFTYPE_UNSPECIFIED) { + /* + * Close the netdev now, because if we do it later, the + * netdev notifiers will need to acquire the wiphy lock + * again --> deadlock. + */ + dev_close(priv->wdev.netdev); + wiphy_lock(adapter->wiphy); mwifiex_del_virtual_intf(adapter->wiphy, &priv->wdev); - wiphy_unlock(adapter->wiphy); + wiphy_unlock(adapter->wiphy); + } rtnl_unlock(); } |