diff options
101 files changed, 783 insertions, 399 deletions
diff --git a/MAINTAINERS b/MAINTAINERS index f5e6a535bc34..4029c63d8a7d 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -4554,6 +4554,15 @@ S: Maintained F: drivers/net/ethernet/freescale/fs_enet/ F: include/linux/fs_enet_pd.h +FREESCALE IMX / MXC FEC DRIVER +M: Fugang Duan <fugang.duan@nxp.com> +L: netdev@vger.kernel.org +S: Maintained +F: drivers/net/ethernet/freescale/fec_main.c +F: drivers/net/ethernet/freescale/fec_ptp.c +F: drivers/net/ethernet/freescale/fec.h +F: Documentation/devicetree/bindings/net/fsl-fec.txt + FREESCALE QUICC ENGINE LIBRARY L: linuxppc-dev@lists.ozlabs.org S: Orphan @@ -6770,6 +6779,7 @@ S: Maintained F: Documentation/networking/mac80211-injection.txt F: include/net/mac80211.h F: net/mac80211/ +F: drivers/net/wireless/mac80211_hwsim.[ch] MACVLAN DRIVER M: Patrick McHardy <kaber@trash.net> diff --git a/drivers/net/can/spi/mcp251x.c b/drivers/net/can/spi/mcp251x.c index 575790e8a75a..74a7dfecee27 100644 --- a/drivers/net/can/spi/mcp251x.c +++ b/drivers/net/can/spi/mcp251x.c @@ -843,7 +843,7 @@ static irqreturn_t mcp251x_can_ist(int irq, void *dev_id) if (clear_intf) mcp251x_write_bits(spi, CANINTF, clear_intf, 0x00); - if (eflag) + if (eflag & (EFLG_RX0OVR | EFLG_RX1OVR)) mcp251x_write_bits(spi, EFLG, eflag, 0x00); /* Update can state */ diff --git a/drivers/net/can/usb/gs_usb.c b/drivers/net/can/usb/gs_usb.c index 5eee62badf45..cbc99d5649af 100644 --- a/drivers/net/can/usb/gs_usb.c +++ b/drivers/net/can/usb/gs_usb.c @@ -826,9 +826,8 @@ static struct gs_can *gs_make_candev(unsigned int channel, struct usb_interface static void gs_destroy_candev(struct gs_can *dev) { unregister_candev(dev->netdev); - free_candev(dev->netdev); usb_kill_anchored_urbs(&dev->tx_submitted); - kfree(dev); + free_candev(dev->netdev); } static int gs_usb_probe(struct usb_interface *intf, const struct usb_device_id *id) @@ -913,12 +912,15 @@ static int gs_usb_probe(struct usb_interface *intf, const struct usb_device_id * for (i = 0; i < icount; i++) { dev->canch[i] = gs_make_candev(i, intf); if (IS_ERR_OR_NULL(dev->canch[i])) { + /* save error code to return later */ + rc = PTR_ERR(dev->canch[i]); + /* on failure destroy previously created candevs */ icount = i; - for (i = 0; i < icount; i++) { + for (i = 0; i < icount; i++) gs_destroy_candev(dev->canch[i]); - dev->canch[i] = NULL; - } + + usb_kill_anchored_urbs(&dev->rx_submitted); kfree(dev); return rc; } @@ -939,16 +941,12 @@ static void gs_usb_disconnect(struct usb_interface *intf) return; } - for (i = 0; i < GS_MAX_INTF; i++) { - struct gs_can *can = dev->canch[i]; - - if (!can) - continue; - - gs_destroy_candev(can); - } + for (i = 0; i < GS_MAX_INTF; i++) + if (dev->canch[i]) + gs_destroy_candev(dev->canch[i]); usb_kill_anchored_urbs(&dev->rx_submitted); + kfree(dev); } static const struct usb_device_id gs_usb_table[] = { diff --git a/drivers/net/ethernet/3com/3c59x.c b/drivers/net/ethernet/3com/3c59x.c index 79e1a0282163..17b2126075e0 100644 --- a/drivers/net/ethernet/3com/3c59x.c +++ b/drivers/net/ethernet/3com/3c59x.c @@ -2461,7 +2461,7 @@ boomerang_interrupt(int irq, void *dev_id) int i; pci_unmap_single(VORTEX_PCI(vp), le32_to_cpu(vp->tx_ring[entry].frag[0].addr), - le32_to_cpu(vp->tx_ring[entry].frag[0].length), + le32_to_cpu(vp->tx_ring[entry].frag[0].length)&0xFFF, PCI_DMA_TODEVICE); for (i=1; i<=skb_shinfo(skb)->nr_frags; i++) diff --git a/drivers/net/ethernet/altera/altera_tse_main.c b/drivers/net/ethernet/altera/altera_tse_main.c index 17472851674f..f749e4d389eb 100644 --- a/drivers/net/ethernet/altera/altera_tse_main.c +++ b/drivers/net/ethernet/altera/altera_tse_main.c @@ -193,7 +193,6 @@ static void altera_tse_mdio_destroy(struct net_device *dev) priv->mdio->id); mdiobus_unregister(priv->mdio); - kfree(priv->mdio->irq); mdiobus_free(priv->mdio); priv->mdio = NULL; } diff --git a/drivers/net/ethernet/aurora/nb8800.c b/drivers/net/ethernet/aurora/nb8800.c index f71ab2647a3b..08a23e6b60e9 100644 --- a/drivers/net/ethernet/aurora/nb8800.c +++ b/drivers/net/ethernet/aurora/nb8800.c @@ -1460,7 +1460,19 @@ static int nb8800_probe(struct platform_device *pdev) goto err_disable_clk; } - priv->phy_node = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0); + if (of_phy_is_fixed_link(pdev->dev.of_node)) { + ret = of_phy_register_fixed_link(pdev->dev.of_node); + if (ret < 0) { + dev_err(&pdev->dev, "bad fixed-link spec\n"); + goto err_free_bus; + } + priv->phy_node = of_node_get(pdev->dev.of_node); + } + + if (!priv->phy_node) + priv->phy_node = of_parse_phandle(pdev->dev.of_node, + "phy-handle", 0); + if (!priv->phy_node) { dev_err(&pdev->dev, "no PHY specified\n"); ret = -ENODEV; diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h index 27aa0802d87d..91874d24fd56 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h @@ -4896,9 +4896,9 @@ struct c2s_pri_trans_table_entry { * cfc delete event data */ struct cfc_del_event_data { - u32 cid; - u32 reserved0; - u32 reserved1; + __le32 cid; + __le32 reserved0; + __le32 reserved1; }; @@ -5114,15 +5114,9 @@ struct vf_pf_channel_zone_trigger { * zone that triggers the in-bound interrupt */ struct trigger_vf_zone { -#if defined(__BIG_ENDIAN) - u16 reserved1; - u8 reserved0; - struct vf_pf_channel_zone_trigger vf_pf_channel; -#elif defined(__LITTLE_ENDIAN) struct vf_pf_channel_zone_trigger vf_pf_channel; u8 reserved0; u16 reserved1; -#endif u32 reserved2; }; @@ -5207,9 +5201,9 @@ struct e2_integ_data { * set mac event data */ struct eth_event_data { - u32 echo; - u32 reserved0; - u32 reserved1; + __le32 echo; + __le32 reserved0; + __le32 reserved1; }; @@ -5219,9 +5213,9 @@ struct eth_event_data { struct vf_pf_event_data { u8 vf_id; u8 reserved0; - u16 reserved1; - u32 msg_addr_lo; - u32 msg_addr_hi; + __le16 reserved1; + __le32 msg_addr_lo; + __le32 msg_addr_hi; }; /* @@ -5230,9 +5224,9 @@ struct vf_pf_event_data { struct vf_flr_event_data { u8 vf_id; u8 reserved0; - u16 reserved1; - u32 reserved2; - u32 reserved3; + __le16 reserved1; + __le32 reserved2; + __le32 reserved3; }; /* @@ -5241,9 +5235,9 @@ struct vf_flr_event_data { struct malicious_vf_event_data { u8 vf_id; u8 err_id; - u16 reserved1; - u32 reserved2; - u32 reserved3; + __le16 reserved1; + __le32 reserved2; + __le32 reserved3; }; /* diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c index 6c4e3a69976f..2bf9c871144f 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c @@ -5280,14 +5280,14 @@ static void bnx2x_handle_classification_eqe(struct bnx2x *bp, { unsigned long ramrod_flags = 0; int rc = 0; - u32 cid = elem->message.data.eth_event.echo & BNX2X_SWCID_MASK; + u32 echo = le32_to_cpu(elem->message.data.eth_event.echo); + u32 cid = echo & BNX2X_SWCID_MASK; struct bnx2x_vlan_mac_obj *vlan_mac_obj; /* Always push next commands out, don't wait here */ __set_bit(RAMROD_CONT, &ramrod_flags); - switch (le32_to_cpu((__force __le32)elem->message.data.eth_event.echo) - >> BNX2X_SWCID_SHIFT) { + switch (echo >> BNX2X_SWCID_SHIFT) { case BNX2X_FILTER_MAC_PENDING: DP(BNX2X_MSG_SP, "Got SETUP_MAC completions\n"); if (CNIC_LOADED(bp) && (cid == BNX2X_ISCSI_ETH_CID(bp))) @@ -5308,8 +5308,7 @@ static void bnx2x_handle_classification_eqe(struct bnx2x *bp, bnx2x_handle_mcast_eqe(bp); return; default: - BNX2X_ERR("Unsupported classification command: %d\n", - elem->message.data.eth_event.echo); + BNX2X_ERR("Unsupported classification command: 0x%x\n", echo); return; } @@ -5478,9 +5477,6 @@ static void bnx2x_eq_int(struct bnx2x *bp) goto next_spqe; } - /* elem CID originates from FW; actually LE */ - cid = SW_CID((__force __le32) - elem->message.data.cfc_del_event.cid); opcode = elem->message.opcode; /* handle eq element */ @@ -5503,6 +5499,10 @@ static void bnx2x_eq_int(struct bnx2x *bp) * we may want to verify here that the bp state is * HALTING */ + + /* elem CID originates from FW; actually LE */ + cid = SW_CID(elem->message.data.cfc_del_event.cid); + DP(BNX2X_MSG_SP, "got delete ramrod for MULTI[%d]\n", cid); @@ -5596,10 +5596,8 @@ static void bnx2x_eq_int(struct bnx2x *bp) BNX2X_STATE_OPENING_WAIT4_PORT): case (EVENT_RING_OPCODE_RSS_UPDATE_RULES | BNX2X_STATE_CLOSING_WAIT4_HALT): - cid = elem->message.data.eth_event.echo & - BNX2X_SWCID_MASK; DP(BNX2X_MSG_SP, "got RSS_UPDATE ramrod. CID %d\n", - cid); + SW_CID(elem->message.data.eth_event.echo)); rss_raw->clear_pending(rss_raw); break; @@ -5684,7 +5682,7 @@ static void bnx2x_sp_task(struct work_struct *work) if (status & BNX2X_DEF_SB_IDX) { struct bnx2x_fastpath *fp = bnx2x_fcoe_fp(bp); - if (FCOE_INIT(bp) && + if (FCOE_INIT(bp) && (bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) { /* Prevent local bottom-halves from running as * we are going to change the local NAPI list. diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c index 9d027348cd09..632daff117d3 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c @@ -1672,11 +1672,12 @@ void bnx2x_vf_handle_classification_eqe(struct bnx2x *bp, { unsigned long ramrod_flags = 0; int rc = 0; + u32 echo = le32_to_cpu(elem->message.data.eth_event.echo); /* Always push next commands out, don't wait here */ set_bit(RAMROD_CONT, &ramrod_flags); - switch (elem->message.data.eth_event.echo >> BNX2X_SWCID_SHIFT) { + switch (echo >> BNX2X_SWCID_SHIFT) { case BNX2X_FILTER_MAC_PENDING: rc = vfq->mac_obj.complete(bp, &vfq->mac_obj, elem, &ramrod_flags); @@ -1686,8 +1687,7 @@ void bnx2x_vf_handle_classification_eqe(struct bnx2x *bp, &ramrod_flags); break; default: - BNX2X_ERR("Unsupported classification command: %d\n", - elem->message.data.eth_event.echo); + BNX2X_ERR("Unsupported classification command: 0x%x\n", echo); return; } if (rc < 0) @@ -1747,16 +1747,14 @@ int bnx2x_iov_eq_sp_event(struct bnx2x *bp, union event_ring_elem *elem) switch (opcode) { case EVENT_RING_OPCODE_CFC_DEL: - cid = SW_CID((__force __le32) - elem->message.data.cfc_del_event.cid); + cid = SW_CID(elem->message.data.cfc_del_event.cid); DP(BNX2X_MSG_IOV, "checking cfc-del comp cid=%d\n", cid); break; case EVENT_RING_OPCODE_CLASSIFICATION_RULES: case EVENT_RING_OPCODE_MULTICAST_RULES: case EVENT_RING_OPCODE_FILTERS_RULES: case EVENT_RING_OPCODE_RSS_UPDATE_RULES: - cid = (elem->message.data.eth_event.echo & - BNX2X_SWCID_MASK); + cid = SW_CID(elem->message.data.eth_event.echo); DP(BNX2X_MSG_IOV, "checking filtering comp cid=%d\n", cid); break; case EVENT_RING_OPCODE_VF_FLR: diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c index 1374e5394a79..bfae300cf25f 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c @@ -2187,8 +2187,10 @@ void bnx2x_vf_mbx_schedule(struct bnx2x *bp, /* Update VFDB with current message and schedule its handling */ mutex_lock(&BP_VFDB(bp)->event_mutex); - BP_VF_MBX(bp, vf_idx)->vf_addr_hi = vfpf_event->msg_addr_hi; - BP_VF_MBX(bp, vf_idx)->vf_addr_lo = vfpf_event->msg_addr_lo; + BP_VF_MBX(bp, vf_idx)->vf_addr_hi = + le32_to_cpu(vfpf_event->msg_addr_hi); + BP_VF_MBX(bp, vf_idx)->vf_addr_lo = + le32_to_cpu(vfpf_event->msg_addr_lo); BP_VFDB(bp)->event_occur |= (1ULL << vf_idx); mutex_unlock(&BP_VFDB(bp)->event_mutex); diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index 8ab000dd52d9..82f191382989 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -248,7 +248,8 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev) tx_push1->tx_bd_cfa_meta = cpu_to_le32(vlan_tag_flags); tx_push1->tx_bd_cfa_action = cpu_to_le32(cfa_action); - end = PTR_ALIGN(pdata + length + 1, 8) - 1; + end = pdata + length; + end = PTR_ALIGN(end, 8) - 1; *end = 0; skb_copy_from_linear_data(skb, pdata, len); diff --git a/drivers/net/ethernet/brocade/bna/bna_tx_rx.c b/drivers/net/ethernet/brocade/bna/bna_tx_rx.c index 04b0d16b210e..95bc470ae441 100644 --- a/drivers/net/ethernet/brocade/bna/bna_tx_rx.c +++ b/drivers/net/ethernet/brocade/bna/bna_tx_rx.c @@ -987,7 +987,7 @@ bna_rxf_ucast_cfg_apply(struct bna_rxf *rxf) if (!list_empty(&rxf->ucast_pending_add_q)) { mac = list_first_entry(&rxf->ucast_pending_add_q, struct bna_mac, qe); - list_add_tail(&mac->qe, &rxf->ucast_active_q); + list_move_tail(&mac->qe, &rxf->ucast_active_q); bna_bfi_ucast_req(rxf, mac, BFI_ENET_H2I_MAC_UCAST_ADD_REQ); return 1; } diff --git a/drivers/net/ethernet/cavium/thunder/nic.h b/drivers/net/ethernet/cavium/thunder/nic.h index 688828865c48..34e9acea8747 100644 --- a/drivers/net/ethernet/cavium/thunder/nic.h +++ b/drivers/net/ethernet/cavium/thunder/nic.h @@ -116,6 +116,15 @@ #define NIC_PF_INTR_ID_MBOX0 8 #define NIC_PF_INTR_ID_MBOX1 9 +/* Minimum FIFO level before all packets for the CQ are dropped + * + * This value ensures that once a packet has been "accepted" + * for reception it will not get dropped due to non-availability + * of CQ descriptor. An errata in HW mandates this value to be + * atleast 0x100. + */ +#define NICPF_CQM_MIN_DROP_LEVEL 0x100 + /* Global timer for CQ timer thresh interrupts * Calculated for SCLK of 700Mhz * value written should be a 1/16th of what is expected diff --git a/drivers/net/ethernet/cavium/thunder/nic_main.c b/drivers/net/ethernet/cavium/thunder/nic_main.c index 4dded90076c8..95f17f8cadac 100644 --- a/drivers/net/ethernet/cavium/thunder/nic_main.c +++ b/drivers/net/ethernet/cavium/thunder/nic_main.c @@ -304,6 +304,7 @@ static void nic_set_lmac_vf_mapping(struct nicpf *nic) static void nic_init_hw(struct nicpf *nic) { int i; + u64 cqm_cfg; /* Enable NIC HW block */ nic_reg_write(nic, NIC_PF_CFG, 0x3); @@ -340,6 +341,11 @@ static void nic_init_hw(struct nicpf *nic) /* Enable VLAN ethertype matching and stripping */ nic_reg_write(nic, NIC_PF_RX_ETYPE_0_7, (2 << 19) | (ETYPE_ALG_VLAN_STRIP << 16) | ETH_P_8021Q); + + /* Check if HW expected value is higher (could be in future chips) */ + cqm_cfg = nic_reg_read(nic, NIC_PF_CQM_CFG); + if (cqm_cfg < NICPF_CQM_MIN_DROP_LEVEL) + nic_reg_write(nic, NIC_PF_CQM_CFG, NICPF_CQM_MIN_DROP_LEVEL); } /* Channel parse index configuration */ diff --git a/drivers/net/ethernet/cavium/thunder/nic_reg.h b/drivers/net/ethernet/cavium/thunder/nic_reg.h index dd536be20193..afb10e326b4f 100644 --- a/drivers/net/ethernet/cavium/thunder/nic_reg.h +++ b/drivers/net/ethernet/cavium/thunder/nic_reg.h @@ -21,7 +21,7 @@ #define NIC_PF_TCP_TIMER (0x0060) #define NIC_PF_BP_CFG (0x0080) #define NIC_PF_RRM_CFG (0x0088) -#define NIC_PF_CQM_CF (0x00A0) +#define NIC_PF_CQM_CFG (0x00A0) #define NIC_PF_CNM_CF (0x00A8) #define NIC_PF_CNM_STATUS (0x00B0) #define NIC_PF_CQ_AVG_CFG (0x00C0) diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h index cf837831304b..f9751294ece7 100644 --- a/drivers/net/ethernet/emulex/benet/be.h +++ b/drivers/net/ethernet/emulex/benet/be.h @@ -531,6 +531,7 @@ struct be_adapter { struct delayed_work be_err_detection_work; u8 err_flags; + bool pcicfg_mapped; /* pcicfg obtained via pci_iomap() */ u32 flags; u32 cmd_privileges; /* Ethtool knobs and info */ diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.h b/drivers/net/ethernet/emulex/benet/be_cmds.h index 241819b36ca7..6d9a8d78e8ad 100644 --- a/drivers/net/ethernet/emulex/benet/be_cmds.h +++ b/drivers/net/ethernet/emulex/benet/be_cmds.h @@ -622,10 +622,13 @@ enum be_if_flags { BE_IF_FLAGS_VLAN_PROMISCUOUS |\ BE_IF_FLAGS_MCAST_PROMISCUOUS) -#define BE_IF_EN_FLAGS (BE_IF_FLAGS_BROADCAST | BE_IF_FLAGS_PASS_L3L4_ERRORS |\ - BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_UNTAGGED) +#define BE_IF_FILT_FLAGS_BASIC (BE_IF_FLAGS_BROADCAST | \ + BE_IF_FLAGS_PASS_L3L4_ERRORS | \ + BE_IF_FLAGS_UNTAGGED) -#define BE_IF_ALL_FILT_FLAGS (BE_IF_EN_FLAGS | BE_IF_FLAGS_ALL_PROMISCUOUS) +#define BE_IF_ALL_FILT_FLAGS (BE_IF_FILT_FLAGS_BASIC | \ + BE_IF_FLAGS_MULTICAST | \ + BE_IF_FLAGS_ALL_PROMISCUOUS) /* An RX interface is an object with one or more MAC addresses and * filtering capabilities. */ diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c index f99de3657ce3..d1cf1274fc2f 100644 --- a/drivers/net/ethernet/emulex/benet/be_main.c +++ b/drivers/net/ethernet/emulex/benet/be_main.c @@ -125,6 +125,11 @@ static const char * const ue_status_hi_desc[] = { "Unknown" }; +#define BE_VF_IF_EN_FLAGS (BE_IF_FLAGS_UNTAGGED | \ + BE_IF_FLAGS_BROADCAST | \ + BE_IF_FLAGS_MULTICAST | \ + BE_IF_FLAGS_PASS_L3L4_ERRORS) + static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q) { struct be_dma_mem *mem = &q->dma_mem; @@ -3537,7 +3542,7 @@ static int be_enable_if_filters(struct be_adapter *adapter) { int status; - status = be_cmd_rx_filter(adapter, BE_IF_EN_FLAGS, ON); + status = be_cmd_rx_filter(adapter, BE_IF_FILT_FLAGS_BASIC, ON); if (status) return status; @@ -3857,8 +3862,7 @@ static int be_vfs_if_create(struct be_adapter *adapter) int status; /* If a FW profile exists, then cap_flags are updated */ - cap_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST | - BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS; + cap_flags = BE_VF_IF_EN_FLAGS; for_all_vfs(adapter, vf_cfg, vf) { if (!BE3_chip(adapter)) { @@ -3874,10 +3878,8 @@ static int be_vfs_if_create(struct be_adapter *adapter) } } - en_flags = cap_flags & (BE_IF_FLAGS_UNTAGGED | - BE_IF_FLAGS_BROADCAST | - BE_IF_FLAGS_MULTICAST | - BE_IF_FLAGS_PASS_L3L4_ERRORS); + /* PF should enable IF flags during proxy if_create call */ + en_flags = cap_flags & BE_VF_IF_EN_FLAGS; status = be_cmd_if_create(adapter, cap_flags, en_flags, &vf_cfg->if_handle, vf + 1); if (status) @@ -4968,6 +4970,8 @@ static void be_unmap_pci_bars(struct be_adapter *adapter) pci_iounmap(adapter->pdev, adapter->csr); if (adapter->db) pci_iounmap(adapter->pdev, adapter->db); + if (adapter->pcicfg && adapter->pcicfg_mapped) + pci_iounmap(adapter->pdev, adapter->pcicfg); } static int db_bar(struct be_adapter *adapter) @@ -5019,8 +5023,10 @@ static int be_map_pci_bars(struct be_adapter *adapter) if (!addr) goto pci_map_err; adapter->pcicfg = addr; + adapter->pcicfg_mapped = true; } else { adapter->pcicfg = adapter->db + SRIOV_VF_PCICFG_OFFSET; + adapter->pcicfg_mapped = false; } } diff --git a/drivers/net/ethernet/ethoc.c b/drivers/net/ethernet/ethoc.c index 62fa136554ac..41b010645100 100644 --- a/drivers/net/ethernet/ethoc.c +++ b/drivers/net/ethernet/ethoc.c @@ -1265,7 +1265,6 @@ static int ethoc_remove(struct platform_device *pdev) if (priv->mdio) { mdiobus_unregister(priv->mdio); - kfree(priv->mdio->irq); mdiobus_free(priv->mdio); } if (priv->clk) diff --git a/drivers/net/ethernet/freescale/fman/fman.c b/drivers/net/ethernet/freescale/fman/fman.c index 623aa1c8ebc6..79a210aaf0bb 100644 --- a/drivers/net/ethernet/freescale/fman/fman.c +++ b/drivers/net/ethernet/freescale/fman/fman.c @@ -2791,6 +2791,8 @@ static struct fman *read_dts_node(struct platform_device *of_dev) goto fman_free; } + fman->dev = &of_dev->dev; + return fman; fman_node_put: @@ -2845,8 +2847,6 @@ static int fman_probe(struct platform_device *of_dev) dev_set_drvdata(dev, fman); - fman->dev = dev; - dev_dbg(dev, "FMan%d probed\n", fman->dts_params.id); return 0; diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c index 2aa7b401cc3b..b9ecf197ad11 100644 --- a/drivers/net/ethernet/freescale/gianfar.c +++ b/drivers/net/ethernet/freescale/gianfar.c @@ -1111,8 +1111,10 @@ static void __gfar_detect_errata_85xx(struct gfar_private *priv) if ((SVR_SOC_VER(svr) == SVR_8548) && (SVR_REV(svr) == 0x20)) priv->errata |= GFAR_ERRATA_12; + /* P2020/P1010 Rev 1; MPC8548 Rev 2 */ if (((SVR_SOC_VER(svr) == SVR_P2020) && (SVR_REV(svr) < 0x20)) || - ((SVR_SOC_VER(svr) == SVR_P2010) && (SVR_REV(svr) < 0x20))) + ((SVR_SOC_VER(svr) == SVR_P2010) && (SVR_REV(svr) < 0x20)) || + ((SVR_SOC_VER(svr) == SVR_8548) && (SVR_REV(svr) < 0x31))) priv->errata |= GFAR_ERRATA_76; /* aka eTSEC 20 */ } #endif diff --git a/drivers/net/ethernet/hisilicon/Kconfig b/drivers/net/ethernet/hisilicon/Kconfig index 74beb1867230..4ccc032633c4 100644 --- a/drivers/net/ethernet/hisilicon/Kconfig +++ b/drivers/net/ethernet/hisilicon/Kconfig @@ -25,6 +25,7 @@ config HIX5HD2_GMAC config HIP04_ETH tristate "HISILICON P04 Ethernet support" + depends on HAS_IOMEM # For MFD_SYSCON select MARVELL_PHY select MFD_SYSCON select HNS_MDIO diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c b/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c index a0070d0e740d..d4f92ed322d6 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c @@ -675,8 +675,12 @@ static int hns_ae_config_loopback(struct hnae_handle *handle, { int ret; struct hnae_vf_cb *vf_cb = hns_ae_get_vf_cb(handle); + struct hns_mac_cb *mac_cb = hns_get_mac_cb(handle); switch (loop) { + case MAC_INTERNALLOOP_PHY: + ret = 0; + break; case MAC_INTERNALLOOP_SERDES: ret = hns_mac_config_sds_loopback(vf_cb->mac_cb, en); break; @@ -686,6 +690,10 @@ static int hns_ae_config_loopback(struct hnae_handle *handle, default: ret = -EINVAL; } + + if (!ret) + hns_dsaf_set_inner_lb(mac_cb->dsaf_dev, mac_cb->mac_id, en); + return ret; } diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c index 9439f04962e1..38fc5be3870c 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c @@ -230,6 +230,30 @@ static void hns_dsaf_mix_def_qid_cfg(struct dsaf_device *dsaf_dev) } } +static void hns_dsaf_inner_qid_cfg(struct dsaf_device *dsaf_dev) +{ + u16 max_q_per_vf, max_vfn; + u32 q_id, q_num_per_port; + u32 mac_id; + + if (AE_IS_VER1(dsaf_dev->dsaf_ver)) + return; + + hns_rcb_get_queue_mode(dsaf_dev->dsaf_mode, + HNS_DSAF_COMM_SERVICE_NW_IDX, + &max_vfn, &max_q_per_vf); + q_num_per_port = max_vfn * max_q_per_vf; + + for (mac_id = 0, q_id = 0; mac_id < DSAF_SERVICE_NW_NUM; mac_id++) { + dsaf_set_dev_field(dsaf_dev, + DSAFV2_SERDES_LBK_0_REG + 4 * mac_id, + DSAFV2_SERDES_LBK_QID_M, + DSAFV2_SERDES_LBK_QID_S, + q_id); + q_id += q_num_per_port; + } +} + /** * hns_dsaf_sw_port_type_cfg - cfg sw type * @dsaf_id: dsa fabric id @@ -691,6 +715,16 @@ void hns_dsaf_set_promisc_mode(struct dsaf_device *dsaf_dev, u32 en) dsaf_set_dev_bit(dsaf_dev, DSAF_CFG_0_REG, DSAF_CFG_MIX_MODE_S, !!en); } +void hns_dsaf_set_inner_lb(struct dsaf_device *dsaf_dev, u32 mac_id, u32 en) +{ + if (AE_IS_VER1(dsaf_dev->dsaf_ver) || + dsaf_dev->mac_cb[mac_id].mac_type == HNAE_PORT_DEBUG) + return; + + dsaf_set_dev_bit(dsaf_dev, DSAFV2_SERDES_LBK_0_REG + 4 * mac_id, + DSAFV2_SERDES_LBK_EN_B, !!en); +} + /** * hns_dsaf_tbl_stat_en - tbl * @dsaf_id: dsa fabric id @@ -1022,6 +1056,9 @@ static void hns_dsaf_comm_init(struct dsaf_device *dsaf_dev) /* set promisc def queue id */ hns_dsaf_mix_def_qid_cfg(dsaf_dev); + /* set inner loopback queue id */ + hns_dsaf_inner_qid_cfg(dsaf_dev); + /* in non switch mode, set all port to access mode */ hns_dsaf_sw_port_type_cfg(dsaf_dev, DSAF_SW_PORT_TYPE_NON_VLAN); diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h index 40205b910f80..5fea226efaf3 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h @@ -417,5 +417,6 @@ void hns_dsaf_get_strings(int stringset, u8 *data, int port); void hns_dsaf_get_regs(struct dsaf_device *ddev, u32 port, void *data); int hns_dsaf_get_regs_count(void); void hns_dsaf_set_promisc_mode(struct dsaf_device *dsaf_dev, u32 en); +void hns_dsaf_set_inner_lb(struct dsaf_device *dsaf_dev, u32 mac_id, u32 en); #endif /* __HNS_DSAF_MAIN_H__ */ diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h index f0c4f9b09d5b..60d695daa471 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h @@ -134,6 +134,7 @@ #define DSAF_XGE_INT_STS_0_REG 0x1C0 #define DSAF_PPE_INT_STS_0_REG 0x1E0 #define DSAF_ROCEE_INT_STS_0_REG 0x200 +#define DSAFV2_SERDES_LBK_0_REG 0x220 #define DSAF_PPE_QID_CFG_0_REG 0x300 #define DSAF_SW_PORT_TYPE_0_REG 0x320 #define DSAF_STP_PORT_TYPE_0_REG 0x340 @@ -857,6 +858,10 @@ #define PPEV2_CFG_RSS_TBL_4N3_S 24 #define PPEV2_CFG_RSS_TBL_4N3_M (((1UL << 5) - 1) << PPEV2_CFG_RSS_TBL_4N3_S) +#define DSAFV2_SERDES_LBK_EN_B 8 +#define DSAFV2_SERDES_LBK_QID_S 0 +#define DSAFV2_SERDES_LBK_QID_M (((1UL << 8) - 1) << DSAFV2_SERDES_LBK_QID_S) + #define PPE_CNT_CLR_CE_B 0 #define PPE_CNT_CLR_SNAP_EN_B 1 diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c index 3df22840fcd1..3c4a3bc31a89 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c @@ -295,8 +295,10 @@ static int __lb_setup(struct net_device *ndev, switch (loop) { case MAC_INTERNALLOOP_PHY: - if ((phy_dev) && (!phy_dev->is_c45)) + if ((phy_dev) && (!phy_dev->is_c45)) { ret = hns_nic_config_phy_loopback(phy_dev, 0x1); + ret |= h->dev->ops->set_loopback(h, loop, 0x1); + } break; case MAC_INTERNALLOOP_MAC: if ((h->dev->ops->set_loopback) && @@ -376,6 +378,7 @@ static void __lb_other_process(struct hns_nic_ring_data *ring_data, struct sk_buff *skb) { struct net_device *ndev; + struct hns_nic_priv *priv; struct hnae_ring *ring; struct netdev_queue *dev_queue; struct sk_buff *new_skb; @@ -385,8 +388,17 @@ static void __lb_other_process(struct hns_nic_ring_data *ring_data, char buff[33]; /* 32B data and the last character '\0' */ if (!ring_data) { /* Just for doing create frame*/ + ndev = skb->dev; + priv = netdev_priv(ndev); + frame_size = skb->len; memset(skb->data, 0xFF, frame_size); + if ((!AE_IS_VER1(priv->enet_ver)) && + (priv->ae_handle->port_type == HNAE_PORT_SERVICE)) { + memcpy(skb->data, ndev->dev_addr, 6); + skb->data[5] += 0x1f; + } + frame_size &= ~1ul; memset(&skb->data[frame_size / 2], 0xAA, frame_size / 2 - 1); memset(&skb->data[frame_size / 2 + 10], 0xBE, @@ -486,6 +498,7 @@ static int __lb_run_test(struct net_device *ndev, /* place data into test skb */ (void)skb_put(skb, size); + skb->dev = ndev; __lb_other_process(NULL, skb); skb->queue_mapping = NIC_LB_TEST_RING_ID; diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c index 335417b4756b..ebe60719e489 100644 --- a/drivers/net/ethernet/ibm/ibmveth.c +++ b/drivers/net/ethernet/ibm/ibmveth.c @@ -1166,7 +1166,10 @@ map_failed: if (!firmware_has_feature(FW_FEATURE_CMO)) netdev_err(netdev, "tx: unable to map xmit buffer\n"); adapter->tx_map_failed++; - skb_linearize(skb); + if (skb_linearize(skb)) { + netdev->stats.tx_dropped++; + goto out; + } force_bounce = 1; goto retry_bounce; } diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c index 7d6570843723..6e9e16eee5d0 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.c +++ b/drivers/net/ethernet/ibm/ibmvnic.c @@ -1348,44 +1348,44 @@ static void init_sub_crqs(struct ibmvnic_adapter *adapter, int retry) crq.request_capability.cmd = REQUEST_CAPABILITY; crq.request_capability.capability = cpu_to_be16(REQ_TX_QUEUES); - crq.request_capability.number = cpu_to_be32(adapter->req_tx_queues); + crq.request_capability.number = cpu_to_be64(adapter->req_tx_queues); ibmvnic_send_crq(adapter, &crq); crq.request_capability.capability = cpu_to_be16(REQ_RX_QUEUES); - crq.request_capability.number = cpu_to_be32(adapter->req_rx_queues); + crq.request_capability.number = cpu_to_be64(adapter->req_rx_queues); ibmvnic_send_crq(adapter, &crq); crq.request_capability.capability = cpu_to_be16(REQ_RX_ADD_QUEUES); - crq.request_capability.number = cpu_to_be32(adapter->req_rx_add_queues); + crq.request_capability.number = cpu_to_be64(adapter->req_rx_add_queues); ibmvnic_send_crq(adapter, &crq); crq.request_capability.capability = cpu_to_be16(REQ_TX_ENTRIES_PER_SUBCRQ); crq.request_capability.number = - cpu_to_be32(adapter->req_tx_entries_per_subcrq); + cpu_to_be64(adapter->req_tx_entries_per_subcrq); ibmvnic_send_crq(adapter, &crq); crq.request_capability.capability = cpu_to_be16(REQ_RX_ADD_ENTRIES_PER_SUBCRQ); crq.request_capability.number = - cpu_to_be32(adapter->req_rx_add_entries_per_subcrq); + cpu_to_be64(adapter->req_rx_add_entries_per_subcrq); ibmvnic_send_crq(adapter, &crq); crq.request_capability.capability = cpu_to_be16(REQ_MTU); - crq.request_capability.number = cpu_to_be32(adapter->req_mtu); + crq.request_capability.number = cpu_to_be64(adapter->req_mtu); ibmvnic_send_crq(adapter, &crq); if (adapter->netdev->flags & IFF_PROMISC) { if (adapter->promisc_supported) { crq.request_capability.capability = cpu_to_be16(PROMISC_REQUESTED); - crq.request_capability.number = cpu_to_be32(1); + crq.request_capability.number = cpu_to_be64(1); ibmvnic_send_crq(adapter, &crq); } } else { crq.request_capability.capability = cpu_to_be16(PROMISC_REQUESTED); - crq.request_capability.number = cpu_to_be32(0); + crq.request_capability.number = cpu_to_be64(0); ibmvnic_send_crq(adapter, &crq); } @@ -2312,93 +2312,93 @@ static void handle_query_cap_rsp(union ibmvnic_crq *crq, switch (be16_to_cpu(crq->query_capability.capability)) { case MIN_TX_QUEUES: adapter->min_tx_queues = - be32_to_cpu(crq->query_capability.number); + be64_to_cpu(crq->query_capability.number); netdev_dbg(netdev, "min_tx_queues = %lld\n", adapter->min_tx_queues); break; case MIN_RX_QUEUES: adapter->min_rx_queues = - be32_to_cpu(crq->query_capability.number); + be64_to_cpu(crq->query_capability.number); netdev_dbg(netdev, "min_rx_queues = %lld\n", adapter->min_rx_queues); break; case MIN_RX_ADD_QUEUES: adapter->min_rx_add_queues = - be32_to_cpu(crq->query_capability.number); + be64_to_cpu(crq->query_capability.number); netdev_dbg(netdev, "min_rx_add_queues = %lld\n", adapter->min_rx_add_queues); break; case MAX_TX_QUEUES: adapter->max_tx_queues = - be32_to_cpu(crq->query_capability.number); + be64_to_cpu(crq->query_capability.number); netdev_dbg(netdev, "max_tx_queues = %lld\n", adapter->max_tx_queues); break; case MAX_RX_QUEUES: adapter->max_rx_queues = - be32_to_cpu(crq->query_capability.number); + be64_to_cpu(crq->query_capability.number); netdev_dbg(netdev, "max_rx_queues = %lld\n", adapter->max_rx_queues); break; case MAX_RX_ADD_QUEUES: adapter->max_rx_add_queues = - be32_to_cpu(crq->query_capability.number); + be64_to_cpu(crq->query_capability.number); netdev_dbg(netdev, "max_rx_add_queues = %lld\n", adapter->max_rx_add_queues); break; case MIN_TX_ENTRIES_PER_SUBCRQ: adapter->min_tx_entries_per_subcrq = - be32_to_cpu(crq->query_capability.number); + be64_to_cpu(crq->query_capability.number); netdev_dbg(netdev, "min_tx_entries_per_subcrq = %lld\n", adapter->min_tx_entries_per_subcrq); break; case MIN_RX_ADD_ENTRIES_PER_SUBCRQ: adapter->min_rx_add_entries_per_subcrq = - be32_to_cpu(crq->query_capability.number); + be64_to_cpu(crq->query_capability.number); netdev_dbg(netdev, "min_rx_add_entrs_per_subcrq = %lld\n", adapter->min_rx_add_entries_per_subcrq); break; case MAX_TX_ENTRIES_PER_SUBCRQ: adapter->max_tx_entries_per_subcrq = - be32_to_cpu(crq->query_capability.number); + be64_to_cpu(crq->query_capability.number); netdev_dbg(netdev, "max_tx_entries_per_subcrq = %lld\n", adapter->max_tx_entries_per_subcrq); break; case MAX_RX_ADD_ENTRIES_PER_SUBCRQ: adapter->max_rx_add_entries_per_subcrq = - be32_to_cpu(crq->query_capability.number); + be64_to_cpu(crq->query_capability.number); netdev_dbg(netdev, "max_rx_add_entrs_per_subcrq = %lld\n", adapter->max_rx_add_entries_per_subcrq); break; case TCP_IP_OFFLOAD: adapter->tcp_ip_offload = - be32_to_cpu(crq->query_capability.number); + be64_to_cpu(crq->query_capability.number); netdev_dbg(netdev, "tcp_ip_offload = %lld\n", adapter->tcp_ip_offload); break; case PROMISC_SUPPORTED: adapter->promisc_supported = - be32_to_cpu(crq->query_capability.number); + be64_to_cpu(crq->query_capability.number); netdev_dbg(netdev, "promisc_supported = %lld\n", adapter->promisc_supported); break; case MIN_MTU: - adapter->min_mtu = be32_to_cpu(crq->query_capability.number); + adapter->min_mtu = be64_to_cpu(crq->query_capability.number); netdev_dbg(netdev, "min_mtu = %lld\n", adapter->min_mtu); break; case MAX_MTU: - adapter->max_mtu = be32_to_cpu(crq->query_capability.number); + adapter->max_mtu = be64_to_cpu(crq->query_capability.number); netdev_dbg(netdev, "max_mtu = %lld\n", adapter->max_mtu); break; case MAX_MULTICAST_FILTERS: adapter->max_multicast_filters = - be32_to_cpu(crq->query_capability.number); + be64_to_cpu(crq->query_capability.number); netdev_dbg(netdev, "max_multicast_filters = %lld\n", adapter->max_multicast_filters); break; case VLAN_HEADER_INSERTION: adapter->vlan_header_insertion = - be32_to_cpu(crq->query_capability.number); + be64_to_cpu(crq->query_capability.number); if (adapter->vlan_header_insertion) netdev->features |= NETIF_F_HW_VLAN_STAG_TX; netdev_dbg(netdev, "vlan_header_insertion = %lld\n", @@ -2406,43 +2406,43 @@ static void handle_query_cap_rsp(union ibmvnic_crq *crq, break; case MAX_TX_SG_ENTRIES: adapter->max_tx_sg_entries = - be32_to_cpu(crq->query_capability.number); + be64_to_cpu(crq->query_capability.number); netdev_dbg(netdev, "max_tx_sg_entries = %lld\n", adapter->max_tx_sg_entries); break; case RX_SG_SUPPORTED: adapter->rx_sg_supported = - be32_to_cpu(crq->query_capability.number); + be64_to_cpu(crq->query_capability.number); netdev_dbg(netdev, "rx_sg_supported = %lld\n", adapter->rx_sg_supported); break; case OPT_TX_COMP_SUB_QUEUES: adapter->opt_tx_comp_sub_queues = - be32_to_cpu(crq->query_capability.number); + be64_to_cpu(crq->query_capability.number); netdev_dbg(netdev, "opt_tx_comp_sub_queues = %lld\n", adapter->opt_tx_comp_sub_queues); break; case OPT_RX_COMP_QUEUES: adapter->opt_rx_comp_queues = - be32_to_cpu(crq->query_capability.number); + be64_to_cpu(crq->query_capability.number); netdev_dbg(netdev, "opt_rx_comp_queues = %lld\n", adapter->opt_rx_comp_queues); break; case OPT_RX_BUFADD_Q_PER_RX_COMP_Q: adapter->opt_rx_bufadd_q_per_rx_comp_q = - be32_to_cpu(crq->query_capability.number); + be64_to_cpu(crq->query_capability.number); netdev_dbg(netdev, "opt_rx_bufadd_q_per_rx_comp_q = %lld\n", adapter->opt_rx_bufadd_q_per_rx_comp_q); break; case OPT_TX_ENTRIES_PER_SUBCRQ: adapter->opt_tx_entries_per_subcrq = - be32_to_cpu(crq->query_capability.number); + be64_to_cpu(crq->query_capability.number); netdev_dbg(netdev, "opt_tx_entries_per_subcrq = %lld\n", adapter->opt_tx_entries_per_subcrq); break; case OPT_RXBA_ENTRIES_PER_SUBCRQ: adapter->opt_rxba_entries_per_subcrq = - be32_to_cpu(crq->query_capability.number); + be64_to_cpu(crq->query_capability.number); netdev_dbg(netdev, "opt_rxba_entries_per_subcrq = %lld\n", adapter->opt_rxba_entries_per_subcrq); break; diff --git a/drivers/net/ethernet/ibm/ibmvnic.h b/drivers/net/ethernet/ibm/ibmvnic.h index 1242925ad34c..1a9993cc79b5 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.h +++ b/drivers/net/ethernet/ibm/ibmvnic.h @@ -319,10 +319,8 @@ struct ibmvnic_capability { u8 first; u8 cmd; __be16 capability; /* one of ibmvnic_capabilities */ + __be64 number; struct ibmvnic_rc rc; - __be32 number; /*FIX: should be __be64, but I'm getting the least - * significant word first - */ } __packed __aligned(8); struct ibmvnic_login { diff --git a/drivers/net/ethernet/jme.c b/drivers/net/ethernet/jme.c index b1de7afd4116..3ddf657bc10b 100644 --- a/drivers/net/ethernet/jme.c +++ b/drivers/net/ethernet/jme.c @@ -270,11 +270,17 @@ jme_reset_mac_processor(struct jme_adapter *jme) } static inline void -jme_clear_pm(struct jme_adapter *jme) +jme_clear_pm_enable_wol(struct jme_adapter *jme) { jwrite32(jme, JME_PMCS, PMCS_STMASK | jme->reg_pmcs); } +static inline void +jme_clear_pm_disable_wol(struct jme_adapter *jme) +{ + jwrite32(jme, JME_PMCS, PMCS_STMASK); +} + static int jme_reload_eeprom(struct jme_adapter *jme) { @@ -1853,7 +1859,7 @@ jme_open(struct net_device *netdev) struct jme_adapter *jme = netdev_priv(netdev); int rc; - jme_clear_pm(jme); + jme_clear_pm_disable_wol(jme); JME_NAPI_ENABLE(jme); tasklet_init(&jme->linkch_task, jme_link_change_tasklet, @@ -1925,11 +1931,11 @@ jme_wait_link(struct jme_adapter *jme) static void jme_powersave_phy(struct jme_adapter *jme) { - if (jme->reg_pmcs) { + if (jme->reg_pmcs && device_may_wakeup(&jme->pdev->dev)) { jme_set_100m_half(jme); if (jme->reg_pmcs & (PMCS_LFEN | PMCS_LREN)) jme_wait_link(jme); - jme_clear_pm(jme); + jme_clear_pm_enable_wol(jme); } else { jme_phy_off(jme); } @@ -2646,9 +2652,6 @@ jme_set_wol(struct net_device *netdev, if (wol->wolopts & WAKE_MAGIC) jme->reg_pmcs |= PMCS_MFEN; - jwrite32(jme, JME_PMCS, jme->reg_pmcs); - device_set_wakeup_enable(&jme->pdev->dev, !!(jme->reg_pmcs)); - return 0; } @@ -3172,8 +3175,8 @@ jme_init_one(struct pci_dev *pdev, jme->mii_if.mdio_read = jme_mdio_read; jme->mii_if.mdio_write = jme_mdio_write; - jme_clear_pm(jme); - device_set_wakeup_enable(&pdev->dev, true); + jme_clear_pm_disable_wol(jme); + device_init_wakeup(&pdev->dev, true); jme_set_phyfifo_5level(jme); jme->pcirev = pdev->revision; @@ -3304,7 +3307,7 @@ jme_resume(struct device *dev) if (!netif_running(netdev)) return 0; - jme_clear_pm(jme); + jme_clear_pm_disable_wol(jme); jme_phy_on(jme); if (test_bit(JME_FLAG_SSET, &jme->flags)) jme_set_settings(netdev, &jme->old_ecmd); @@ -3312,13 +3315,14 @@ jme_resume(struct device *dev) jme_reset_phy_processor(jme); jme_phy_calibration(jme); jme_phy_setEA(jme); - jme_start_irq(jme); netif_device_attach(netdev); atomic_inc(&jme->link_changing); jme_reset_link(jme); + jme_start_irq(jme); + return 0; } diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c index f191a1612589..21e2c0960271 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c @@ -2245,7 +2245,7 @@ static int mlx4_en_set_vf_mac(struct net_device *dev, int queue, u8 *mac) struct mlx4_en_dev *mdev = en_priv->mdev; u64 mac_u64 = mlx4_mac_to_u64(mac); - if (!is_valid_ether_addr(mac)) + if (is_multicast_ether_addr(mac)) return -EINVAL; return mlx4_set_vf_mac(mdev->dev, en_priv->port, queue, mac_u64); diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c index 2cc3c626c3fe..f8674ae62752 100644 --- a/drivers/net/ethernet/mellanox/mlx4/main.c +++ b/drivers/net/ethernet/mellanox/mlx4/main.c @@ -1256,6 +1256,7 @@ err_set_port: static int mlx4_mf_bond(struct mlx4_dev *dev) { int err = 0; + int nvfs; struct mlx4_slaves_pport slaves_port1; struct mlx4_slaves_pport slaves_port2; DECLARE_BITMAP(slaves_port_1_2, MLX4_MFUNC_MAX); @@ -1272,11 +1273,18 @@ static int mlx4_mf_bond(struct mlx4_dev *dev) return -EINVAL; } + /* number of virtual functions is number of total functions minus one + * physical function for each port. + */ + nvfs = bitmap_weight(slaves_port1.slaves, dev->persist->num_vfs + 1) + + bitmap_weight(slaves_port2.slaves, dev->persist->num_vfs + 1) - 2; + /* limit on maximum allowed VFs */ - if ((bitmap_weight(slaves_port1.slaves, dev->persist->num_vfs + 1) + - bitmap_weight(slaves_port2.slaves, dev->persist->num_vfs + 1)) > - MAX_MF_BOND_ALLOWED_SLAVES) + if (nvfs > MAX_MF_BOND_ALLOWED_SLAVES) { + mlx4_warn(dev, "HA mode is not supported for %d VFs (max %d are allowed)\n", + nvfs, MAX_MF_BOND_ALLOWED_SLAVES); return -EINVAL; + } if (dev->caps.steering_mode != MLX4_STEERING_MODE_DEVICE_MANAGED) { mlx4_warn(dev, "HA mode unsupported for NON DMFS steering\n"); diff --git a/drivers/net/ethernet/mellanox/mlx4/port.c b/drivers/net/ethernet/mellanox/mlx4/port.c index 787b7bb54d52..211c65087997 100644 --- a/drivers/net/ethernet/mellanox/mlx4/port.c +++ b/drivers/net/ethernet/mellanox/mlx4/port.c @@ -193,10 +193,10 @@ int __mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac) if (need_mf_bond) { if (port == 1) { mutex_lock(&table->mutex); - mutex_lock(&dup_table->mutex); + mutex_lock_nested(&dup_table->mutex, SINGLE_DEPTH_NESTING); } else { mutex_lock(&dup_table->mutex); - mutex_lock(&table->mutex); + mutex_lock_nested(&table->mutex, SINGLE_DEPTH_NESTING); } } else { mutex_lock(&table->mutex); @@ -389,10 +389,10 @@ void __mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, u64 mac) if (dup) { if (port == 1) { mutex_lock(&table->mutex); - mutex_lock(&dup_table->mutex); + mutex_lock_nested(&dup_table->mutex, SINGLE_DEPTH_NESTING); } else { mutex_lock(&dup_table->mutex); - mutex_lock(&table->mutex); + mutex_lock_nested(&table->mutex, SINGLE_DEPTH_NESTING); } } else { mutex_lock(&table->mutex); @@ -479,10 +479,10 @@ int __mlx4_replace_mac(struct mlx4_dev *dev, u8 port, int qpn, u64 new_mac) if (dup) { if (port == 1) { mutex_lock(&table->mutex); - mutex_lock(&dup_table->mutex); + mutex_lock_nested(&dup_table->mutex, SINGLE_DEPTH_NESTING); } else { mutex_lock(&dup_table->mutex); - mutex_lock(&table->mutex); + mutex_lock_nested(&table->mutex, SINGLE_DEPTH_NESTING); } } else { mutex_lock(&table->mutex); @@ -588,10 +588,10 @@ int __mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, if (need_mf_bond) { if (port == 1) { mutex_lock(&table->mutex); - mutex_lock(&dup_table->mutex); + mutex_lock_nested(&dup_table->mutex, SINGLE_DEPTH_NESTING); } else { mutex_lock(&dup_table->mutex); - mutex_lock(&table->mutex); + mutex_lock_nested(&table->mutex, SINGLE_DEPTH_NESTING); } } else { mutex_lock(&table->mutex); @@ -764,10 +764,10 @@ void __mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, u16 vlan) if (dup) { if (port == 1) { mutex_lock(&table->mutex); - mutex_lock(&dup_table->mutex); + mutex_lock_nested(&dup_table->mutex, SINGLE_DEPTH_NESTING); } else { mutex_lock(&dup_table->mutex); - mutex_lock(&table->mutex); + mutex_lock_nested(&table->mutex, SINGLE_DEPTH_NESTING); } } else { mutex_lock(&table->mutex); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index aac071a7e830..5b1753233c5d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -223,6 +223,7 @@ struct mlx5e_pport_stats { static const char rq_stats_strings[][ETH_GSTRING_LEN] = { "packets", + "bytes", "csum_none", "csum_sw", "lro_packets", @@ -232,16 +233,18 @@ static const char rq_stats_strings[][ETH_GSTRING_LEN] = { struct mlx5e_rq_stats { u64 packets; + u64 bytes; u64 csum_none; u64 csum_sw; u64 lro_packets; u64 lro_bytes; u64 wqe_err; -#define NUM_RQ_STATS 6 +#define NUM_RQ_STATS 7 }; static const char sq_stats_strings[][ETH_GSTRING_LEN] = { "packets", + "bytes", "tso_packets", "tso_bytes", "csum_offload_none", @@ -253,6 +256,7 @@ static const char sq_stats_strings[][ETH_GSTRING_LEN] = { struct mlx5e_sq_stats { u64 packets; + u64 bytes; u64 tso_packets; u64 tso_bytes; u64 csum_offload_none; @@ -260,7 +264,7 @@ struct mlx5e_sq_stats { u64 wake; u64 dropped; u64 nop; -#define NUM_SQ_STATS 8 +#define NUM_SQ_STATS 9 }; struct mlx5e_stats { @@ -304,14 +308,9 @@ enum { MLX5E_RQ_STATE_POST_WQES_ENABLE, }; -enum cq_flags { - MLX5E_CQ_HAS_CQES = 1, -}; - struct mlx5e_cq { /* data path - accessed per cqe */ struct mlx5_cqwq wq; - unsigned long flags; /* data path - accessed per napi poll */ struct napi_struct *napi; @@ -452,6 +451,8 @@ enum mlx5e_traffic_types { MLX5E_NUM_TT, }; +#define IS_HASHING_TT(tt) (tt != MLX5E_TT_ANY) + enum mlx5e_rqt_ix { MLX5E_INDIRECTION_RQT, MLX5E_SINGLE_RQ_RQT, @@ -618,9 +619,12 @@ void mlx5e_enable_vlan_filter(struct mlx5e_priv *priv); void mlx5e_disable_vlan_filter(struct mlx5e_priv *priv); int mlx5e_redirect_rqt(struct mlx5e_priv *priv, enum mlx5e_rqt_ix rqt_ix); +void mlx5e_build_tir_ctx_hash(void *tirc, struct mlx5e_priv *priv); int mlx5e_open_locked(struct net_device *netdev); int mlx5e_close_locked(struct net_device *netdev); +void mlx5e_build_default_indir_rqt(u32 *indirection_rqt, int len, + int num_channels); static inline void mlx5e_tx_notify_hw(struct mlx5e_sq *sq, struct mlx5e_tx_wqe *wqe, int bf_sz) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_clock.c b/drivers/net/ethernet/mellanox/mlx5/core/en_clock.c index be6543570b2b..2018eebe1531 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_clock.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_clock.c @@ -62,10 +62,11 @@ static void mlx5e_timestamp_overflow(struct work_struct *work) struct delayed_work *dwork = to_delayed_work(work); struct mlx5e_tstamp *tstamp = container_of(dwork, struct mlx5e_tstamp, overflow_work); + unsigned long flags; - write_lock(&tstamp->lock); + write_lock_irqsave(&tstamp->lock, flags); timecounter_read(&tstamp->clock); - write_unlock(&tstamp->lock); + write_unlock_irqrestore(&tstamp->lock, flags); schedule_delayed_work(&tstamp->overflow_work, tstamp->overflow_period); } @@ -136,10 +137,11 @@ static int mlx5e_ptp_settime(struct ptp_clock_info *ptp, struct mlx5e_tstamp *tstamp = container_of(ptp, struct mlx5e_tstamp, ptp_info); u64 ns = timespec64_to_ns(ts); + unsigned long flags; - write_lock(&tstamp->lock); + write_lock_irqsave(&tstamp->lock, flags); timecounter_init(&tstamp->clock, &tstamp->cycles, ns); - write_unlock(&tstamp->lock); + write_unlock_irqrestore(&tstamp->lock, flags); return 0; } @@ -150,10 +152,11 @@ static int mlx5e_ptp_gettime(struct ptp_clock_info *ptp, struct mlx5e_tstamp *tstamp = container_of(ptp, struct mlx5e_tstamp, ptp_info); u64 ns; + unsigned long flags; - write_lock(&tstamp->lock); + write_lock_irqsave(&tstamp->lock, flags); ns = timecounter_read(&tstamp->clock); - write_unlock(&tstamp->lock); + write_unlock_irqrestore(&tstamp->lock, flags); *ts = ns_to_timespec64(ns); @@ -164,10 +167,11 @@ static int mlx5e_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta) { struct mlx5e_tstamp *tstamp = container_of(ptp, struct mlx5e_tstamp, ptp_info); + unsigned long flags; - write_lock(&tstamp->lock); + write_lock_irqsave(&tstamp->lock, flags); timecounter_adjtime(&tstamp->clock, delta); - write_unlock(&tstamp->lock); + write_unlock_irqrestore(&tstamp->lock, flags); return 0; } @@ -176,6 +180,7 @@ static int mlx5e_ptp_adjfreq(struct ptp_clock_info *ptp, s32 delta) { u64 adj; u32 diff; + unsigned long flags; int neg_adj = 0; struct mlx5e_tstamp *tstamp = container_of(ptp, struct mlx5e_tstamp, ptp_info); @@ -189,11 +194,11 @@ static int mlx5e_ptp_adjfreq(struct ptp_clock_info *ptp, s32 delta) adj *= delta; diff = div_u64(adj, 1000000000ULL); - write_lock(&tstamp->lock); + write_lock_irqsave(&tstamp->lock, flags); timecounter_read(&tstamp->clock); tstamp->cycles.mult = neg_adj ? tstamp->nominal_c_mult - diff : tstamp->nominal_c_mult + diff; - write_unlock(&tstamp->lock); + write_unlock_irqrestore(&tstamp->lock, flags); return 0; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c index 65624ac65b4c..5abeb00fceb8 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c @@ -385,6 +385,8 @@ static int mlx5e_set_channels(struct net_device *dev, mlx5e_close_locked(dev); priv->params.num_channels = count; + mlx5e_build_default_indir_rqt(priv->params.indirection_rqt, + MLX5E_INDIR_RQT_SIZE, count); if (was_opened) err = mlx5e_open_locked(dev); @@ -703,18 +705,36 @@ static int mlx5e_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, return 0; } +static void mlx5e_modify_tirs_hash(struct mlx5e_priv *priv, void *in, int inlen) +{ + struct mlx5_core_dev *mdev = priv->mdev; + void *tirc = MLX5_ADDR_OF(modify_tir_in, in, ctx); + int i; + + MLX5_SET(modify_tir_in, in, bitmask.hash, 1); + mlx5e_build_tir_ctx_hash(tirc, priv); + + for (i = 0; i < MLX5E_NUM_TT; i++) + if (IS_HASHING_TT(i)) + mlx5_core_modify_tir(mdev, priv->tirn[i], in, inlen); +} + static int mlx5e_set_rxfh(struct net_device *dev, const u32 *indir, const u8 *key, const u8 hfunc) { struct mlx5e_priv *priv = netdev_priv(dev); - bool close_open; - int err = 0; + int inlen = MLX5_ST_SZ_BYTES(modify_tir_in); + void *in; if ((hfunc != ETH_RSS_HASH_NO_CHANGE) && (hfunc != ETH_RSS_HASH_XOR) && (hfunc != ETH_RSS_HASH_TOP)) return -EINVAL; + in = mlx5_vzalloc(inlen); + if (!in) + return -ENOMEM; + mutex_lock(&priv->state_lock); if (indir) { @@ -723,11 +743,6 @@ static int mlx5e_set_rxfh(struct net_device *dev, const u32 *indir, mlx5e_redirect_rqt(priv, MLX5E_INDIRECTION_RQT); } - close_open = (key || (hfunc != ETH_RSS_HASH_NO_CHANGE)) && - test_bit(MLX5E_STATE_OPENED, &priv->state); - if (close_open) - mlx5e_close_locked(dev); - if (key) memcpy(priv->params.toeplitz_hash_key, key, sizeof(priv->params.toeplitz_hash_key)); @@ -735,12 +750,13 @@ static int mlx5e_set_rxfh(struct net_device *dev, const u32 *indir, if (hfunc != ETH_RSS_HASH_NO_CHANGE) priv->params.rss_hfunc = hfunc; - if (close_open) - err = mlx5e_open_locked(priv->netdev); + mlx5e_modify_tirs_hash(priv, in, inlen); mutex_unlock(&priv->state_lock); - return err; + kvfree(in); + + return 0; } static int mlx5e_get_rxnfc(struct net_device *netdev, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index d4e1c3045200..402994bf7e16 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -141,6 +141,10 @@ void mlx5e_update_stats(struct mlx5e_priv *priv) return; /* Collect firts the SW counters and then HW for consistency */ + s->rx_packets = 0; + s->rx_bytes = 0; + s->tx_packets = 0; + s->tx_bytes = 0; s->tso_packets = 0; s->tso_bytes = 0; s->tx_queue_stopped = 0; @@ -155,6 +159,8 @@ void mlx5e_update_stats(struct mlx5e_priv *priv) for (i = 0; i < priv->params.num_channels; i++) { rq_stats = &priv->channel[i]->rq.stats; + s->rx_packets += rq_stats->packets; + s->rx_bytes += rq_stats->bytes; s->lro_packets += rq_stats->lro_packets; s->lro_bytes += rq_stats->lro_bytes; s->rx_csum_none += rq_stats->csum_none; @@ -164,6 +170,8 @@ void mlx5e_update_stats(struct mlx5e_priv *priv) for (j = 0; j < priv->params.num_tc; j++) { sq_stats = &priv->channel[i]->sq[j].stats; + s->tx_packets += sq_stats->packets; + s->tx_bytes += sq_stats->bytes; s->tso_packets += sq_stats->tso_packets; s->tso_bytes += sq_stats->tso_bytes; s->tx_queue_stopped += sq_stats->stopped; @@ -225,23 +233,6 @@ void mlx5e_update_stats(struct mlx5e_priv *priv) s->tx_broadcast_bytes = MLX5_GET_CTR(out, transmitted_eth_broadcast.octets); - s->rx_packets = - s->rx_unicast_packets + - s->rx_multicast_packets + - s->rx_broadcast_packets; - s->rx_bytes = - s->rx_unicast_bytes + - s->rx_multicast_bytes + - s->rx_broadcast_bytes; - s->tx_packets = - s->tx_unicast_packets + - s->tx_multicast_packets + - s->tx_broadcast_packets; - s->tx_bytes = - s->tx_unicast_bytes + - s->tx_multicast_bytes + - s->tx_broadcast_bytes; - /* Update calculated offload counters */ s->tx_csum_offload = s->tx_packets - tx_offload_none; s->rx_csum_good = s->rx_packets - s->rx_csum_none - @@ -1199,7 +1190,6 @@ static void mlx5e_fill_indir_rqt_rqns(struct mlx5e_priv *priv, void *rqtc) ix = mlx5e_bits_invert(i, MLX5E_LOG_INDIR_RQT_SIZE); ix = priv->params.indirection_rqt[ix]; - ix = ix % priv->params.num_channels; MLX5_SET(rqtc, rqtc, rq_num[i], test_bit(MLX5E_STATE_OPENED, &priv->state) ? priv->channel[ix]->rq.rqn : @@ -1317,7 +1307,22 @@ static void mlx5e_build_tir_ctx_lro(void *tirc, struct mlx5e_priv *priv) lro_timer_supported_periods[2])); } -static int mlx5e_modify_tir_lro(struct mlx5e_priv *priv, int tt) +void mlx5e_build_tir_ctx_hash(void *tirc, struct mlx5e_priv *priv) +{ + MLX5_SET(tirc, tirc, rx_hash_fn, + mlx5e_rx_hash_fn(priv->params.rss_hfunc)); + if (priv->params.rss_hfunc == ETH_RSS_HASH_TOP) { + void *rss_key = MLX5_ADDR_OF(tirc, tirc, + rx_hash_toeplitz_key); + size_t len = MLX5_FLD_SZ_BYTES(tirc, + rx_hash_toeplitz_key); + + MLX5_SET(tirc, tirc, rx_hash_symmetric, 1); + memcpy(rss_key, priv->params.toeplitz_hash_key, len); + } +} + +static int mlx5e_modify_tirs_lro(struct mlx5e_priv *priv) { struct mlx5_core_dev *mdev = priv->mdev; @@ -1325,6 +1330,7 @@ static int mlx5e_modify_tir_lro(struct mlx5e_priv *priv, int tt) void *tirc; int inlen; int err; + int tt; inlen = MLX5_ST_SZ_BYTES(modify_tir_in); in = mlx5_vzalloc(inlen); @@ -1336,7 +1342,11 @@ static int mlx5e_modify_tir_lro(struct mlx5e_priv *priv, int tt) mlx5e_build_tir_ctx_lro(tirc, priv); - err = mlx5_core_modify_tir(mdev, priv->tirn[tt], in, inlen); + for (tt = 0; tt < MLX5E_NUM_TT; tt++) { + err = mlx5_core_modify_tir(mdev, priv->tirn[tt], in, inlen); + if (err) + break; + } kvfree(in); @@ -1672,17 +1682,7 @@ static void mlx5e_build_tir_ctx(struct mlx5e_priv *priv, u32 *tirc, int tt) default: MLX5_SET(tirc, tirc, indirect_table, priv->rqtn[MLX5E_INDIRECTION_RQT]); - MLX5_SET(tirc, tirc, rx_hash_fn, - mlx5e_rx_hash_fn(priv->params.rss_hfunc)); - if (priv->params.rss_hfunc == ETH_RSS_HASH_TOP) { - void *rss_key = MLX5_ADDR_OF(tirc, tirc, - rx_hash_toeplitz_key); - size_t len = MLX5_FLD_SZ_BYTES(tirc, - rx_hash_toeplitz_key); - - MLX5_SET(tirc, tirc, rx_hash_symmetric, 1); - memcpy(rss_key, priv->params.toeplitz_hash_key, len); - } + mlx5e_build_tir_ctx_hash(tirc, priv); break; } @@ -1885,8 +1885,10 @@ static int mlx5e_set_features(struct net_device *netdev, mlx5e_close_locked(priv->netdev); priv->params.lro_en = !!(features & NETIF_F_LRO); - mlx5e_modify_tir_lro(priv, MLX5E_TT_IPV4_TCP); - mlx5e_modify_tir_lro(priv, MLX5E_TT_IPV6_TCP); + err = mlx5e_modify_tirs_lro(priv); + if (err) + mlx5_core_warn(priv->mdev, "lro modify failed, %d\n", + err); if (was_opened) err = mlx5e_open_locked(priv->netdev); @@ -2089,12 +2091,20 @@ u16 mlx5e_get_max_inline_cap(struct mlx5_core_dev *mdev) 2 /*sizeof(mlx5e_tx_wqe.inline_hdr_start)*/; } +void mlx5e_build_default_indir_rqt(u32 *indirection_rqt, int len, + int num_channels) +{ + int i; + + for (i = 0; i < len; i++) + indirection_rqt[i] = i % num_channels; +} + static void mlx5e_build_netdev_priv(struct mlx5_core_dev *mdev, struct net_device *netdev, int num_channels) { struct mlx5e_priv *priv = netdev_priv(netdev); - int i; priv->params.log_sq_size = MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE; @@ -2118,8 +2128,8 @@ static void mlx5e_build_netdev_priv(struct mlx5_core_dev *mdev, netdev_rss_key_fill(priv->params.toeplitz_hash_key, sizeof(priv->params.toeplitz_hash_key)); - for (i = 0; i < MLX5E_INDIR_RQT_SIZE; i++) - priv->params.indirection_rqt[i] = i % num_channels; + mlx5e_build_default_indir_rqt(priv->params.indirection_rqt, + MLX5E_INDIR_RQT_SIZE, num_channels); priv->params.lro_wqe_sz = MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c index dd959d929aad..59658b9d05d1 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c @@ -230,10 +230,6 @@ int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget) struct mlx5e_rq *rq = container_of(cq, struct mlx5e_rq, cq); int work_done; - /* avoid accessing cq (dma coherent memory) if not needed */ - if (!test_and_clear_bit(MLX5E_CQ_HAS_CQES, &cq->flags)) - return 0; - for (work_done = 0; work_done < budget; work_done++) { struct mlx5e_rx_wqe *wqe; struct mlx5_cqe64 *cqe; @@ -267,6 +263,7 @@ int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget) mlx5e_build_rx_skb(cqe, rq, skb); rq->stats.packets++; + rq->stats.bytes += be32_to_cpu(cqe->byte_cnt); napi_gro_receive(cq->napi, skb); wq_ll_pop: @@ -279,8 +276,5 @@ wq_ll_pop: /* ensure cq space is freed before enabling more cqes */ wmb(); - if (work_done == budget) - set_bit(MLX5E_CQ_HAS_CQES, &cq->flags); - return work_done; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c index 2c3fba0fff54..bb4eeeb007de 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c @@ -179,6 +179,7 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb) unsigned int skb_len = skb->len; u8 opcode = MLX5_OPCODE_SEND; dma_addr_t dma_addr = 0; + unsigned int num_bytes; bool bf = false; u16 headlen; u16 ds_cnt; @@ -204,8 +205,7 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb) opcode = MLX5_OPCODE_LSO; ihs = skb_transport_offset(skb) + tcp_hdrlen(skb); payload_len = skb->len - ihs; - wi->num_bytes = skb->len + - (skb_shinfo(skb)->gso_segs - 1) * ihs; + num_bytes = skb->len + (skb_shinfo(skb)->gso_segs - 1) * ihs; sq->stats.tso_packets++; sq->stats.tso_bytes += payload_len; } else { @@ -213,9 +213,11 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb) !skb->xmit_more && !skb_shinfo(skb)->nr_frags; ihs = mlx5e_get_inline_hdr_size(sq, skb, bf); - wi->num_bytes = max_t(unsigned int, skb->len, ETH_ZLEN); + num_bytes = max_t(unsigned int, skb->len, ETH_ZLEN); } + wi->num_bytes = num_bytes; + if (skb_vlan_tag_present(skb)) { mlx5e_insert_vlan(eseg->inline_hdr_start, skb, ihs, &skb_data, &skb_len); @@ -307,6 +309,7 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb) sq->bf_budget = bf ? sq->bf_budget - 1 : 0; sq->stats.packets++; + sq->stats.bytes += num_bytes; return NETDEV_TX_OK; dma_unmap_wqe_err: @@ -335,10 +338,6 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq) u16 sqcc; int i; - /* avoid accessing cq (dma coherent memory) if not needed */ - if (!test_and_clear_bit(MLX5E_CQ_HAS_CQES, &cq->flags)) - return false; - sq = container_of(cq, struct mlx5e_sq, cq); npkts = 0; @@ -422,10 +421,6 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq) netif_tx_wake_queue(sq->txq); sq->stats.wake++; } - if (i == MLX5E_TX_CQ_POLL_BUDGET) { - set_bit(MLX5E_CQ_HAS_CQES, &cq->flags); - return true; - } - return false; + return (i == MLX5E_TX_CQ_POLL_BUDGET); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c index 4ac8d716dbdd..66d51a77609e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c @@ -88,7 +88,6 @@ void mlx5e_completion_event(struct mlx5_core_cq *mcq) { struct mlx5e_cq *cq = container_of(mcq, struct mlx5e_cq, mcq); - set_bit(MLX5E_CQ_HAS_CQES, &cq->flags); set_bit(MLX5E_CHANNEL_NAPI_SCHED, &cq->channel->flags); barrier(); napi_schedule(cq->napi); diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci.c b/drivers/net/ethernet/mellanox/mlxsw/pci.c index c071077aafbd..7992c553c1f5 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/pci.c +++ b/drivers/net/ethernet/mellanox/mlxsw/pci.c @@ -215,7 +215,7 @@ mlxsw_pci_queue_elem_info_producer_get(struct mlxsw_pci_queue *q) { int index = q->producer_counter & (q->count - 1); - if ((q->producer_counter - q->consumer_counter) == q->count) + if ((u16) (q->producer_counter - q->consumer_counter) == q->count) return NULL; return mlxsw_pci_queue_elem_info_get(q, index); } diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c index 09ce451c283b..a94daa8c346c 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c @@ -2358,9 +2358,7 @@ static int mlxsw_sp_port_lag_leave(struct mlxsw_sp_port *mlxsw_sp_port, if (mlxsw_sp_port->bridged) { mlxsw_sp_port_active_vlans_del(mlxsw_sp_port); mlxsw_sp_port_bridge_leave(mlxsw_sp_port, false); - - if (lag->ref_count == 1) - mlxsw_sp_master_bridge_dec(mlxsw_sp, NULL); + mlxsw_sp_master_bridge_dec(mlxsw_sp, NULL); } if (lag->ref_count == 1) { diff --git a/drivers/net/ethernet/moxa/moxart_ether.c b/drivers/net/ethernet/moxa/moxart_ether.c index 00cfd95ca59d..3e67f451f2ab 100644 --- a/drivers/net/ethernet/moxa/moxart_ether.c +++ b/drivers/net/ethernet/moxa/moxart_ether.c @@ -474,9 +474,9 @@ static int moxart_mac_probe(struct platform_device *pdev) res = platform_get_resource(pdev, IORESOURCE_MEM, 0); ndev->base_addr = res->start; priv->base = devm_ioremap_resource(p_dev, res); - ret = IS_ERR(priv->base); - if (ret) { + if (IS_ERR(priv->base)) { dev_err(p_dev, "devm_ioremap_resource failed\n"); + ret = PTR_ERR(priv->base); goto init_fail; } diff --git a/drivers/net/ethernet/qualcomm/qca_spi.c b/drivers/net/ethernet/qualcomm/qca_spi.c index 689a4a5c8dcf..1ef03939d25f 100644 --- a/drivers/net/ethernet/qualcomm/qca_spi.c +++ b/drivers/net/ethernet/qualcomm/qca_spi.c @@ -811,7 +811,7 @@ qcaspi_netdev_setup(struct net_device *dev) dev->netdev_ops = &qcaspi_netdev_ops; qcaspi_set_ethtool_ops(dev); dev->watchdog_timeo = QCASPI_TX_TIMEOUT; - dev->flags = IFF_MULTICAST; + dev->priv_flags &= ~IFF_TX_SKB_SHARING; dev->tx_queue_len = 100; qca = netdev_priv(dev); diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c index 537974cfd427..dd2cf3738b73 100644 --- a/drivers/net/ethernet/realtek/r8169.c +++ b/drivers/net/ethernet/realtek/r8169.c @@ -4933,8 +4933,6 @@ static void rtl_init_rxcfg(struct rtl8169_private *tp) RTL_W32(RxConfig, RX128_INT_EN | RX_MULTI_EN | RX_DMA_BURST); break; case RTL_GIGA_MAC_VER_40: - RTL_W32(RxConfig, RX128_INT_EN | RX_MULTI_EN | RX_DMA_BURST | RX_EARLY_OFF); - break; case RTL_GIGA_MAC_VER_41: case RTL_GIGA_MAC_VER_42: case RTL_GIGA_MAC_VER_43: @@ -4943,8 +4941,6 @@ static void rtl_init_rxcfg(struct rtl8169_private *tp) case RTL_GIGA_MAC_VER_46: case RTL_GIGA_MAC_VER_47: case RTL_GIGA_MAC_VER_48: - RTL_W32(RxConfig, RX128_INT_EN | RX_DMA_BURST | RX_EARLY_OFF); - break; case RTL_GIGA_MAC_VER_49: case RTL_GIGA_MAC_VER_50: case RTL_GIGA_MAC_VER_51: @@ -7730,10 +7726,13 @@ rtl8169_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) { struct rtl8169_private *tp = netdev_priv(dev); void __iomem *ioaddr = tp->mmio_addr; + struct pci_dev *pdev = tp->pci_dev; struct rtl8169_counters *counters = tp->counters; unsigned int start; - if (netif_running(dev)) + pm_runtime_get_noresume(&pdev->dev); + + if (netif_running(dev) && pm_runtime_active(&pdev->dev)) rtl8169_rx_missed(dev, ioaddr); do { @@ -7761,7 +7760,8 @@ rtl8169_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) * Fetch additonal counter values missing in stats collected by driver * from tally counters. */ - rtl8169_update_counters(dev); + if (pm_runtime_active(&pdev->dev)) + rtl8169_update_counters(dev); /* * Subtract values fetched during initalization. @@ -7774,6 +7774,8 @@ rtl8169_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) stats->tx_aborted_errors = le16_to_cpu(counters->tx_aborted) - le16_to_cpu(tp->tc_offset.tx_aborted); + pm_runtime_put_noidle(&pdev->dev); + return stats; } @@ -7853,6 +7855,10 @@ static int rtl8169_runtime_suspend(struct device *device) rtl8169_net_suspend(dev); + /* Update counters before going runtime suspend */ + rtl8169_rx_missed(dev, tp->mmio_addr); + rtl8169_update_counters(dev); + return 0; } diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c index 744d7806a9ee..86449c357168 100644 --- a/drivers/net/ethernet/renesas/ravb_main.c +++ b/drivers/net/ethernet/renesas/ravb_main.c @@ -1722,7 +1722,6 @@ static int ravb_set_gti(struct net_device *ndev) static int ravb_probe(struct platform_device *pdev) { struct device_node *np = pdev->dev.of_node; - const struct of_device_id *match; struct ravb_private *priv; enum ravb_chip_id chip_id; struct net_device *ndev; @@ -1754,8 +1753,7 @@ static int ravb_probe(struct platform_device *pdev) ndev->base_addr = res->start; ndev->dma = -1; - match = of_match_device(of_match_ptr(ravb_match_table), &pdev->dev); - chip_id = (enum ravb_chip_id)match->data; + chip_id = (enum ravb_chip_id)of_device_get_match_data(&pdev->dev); if (chip_id == RCAR_GEN3) irq = platform_get_irq_byname(pdev, "ch22"); diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c index dfa9e59c9442..738449992876 100644 --- a/drivers/net/ethernet/renesas/sh_eth.c +++ b/drivers/net/ethernet/renesas/sh_eth.c @@ -3061,15 +3061,11 @@ static int sh_eth_drv_probe(struct platform_device *pdev) mdp->ether_link_active_low = pd->ether_link_active_low; /* set cpu data */ - if (id) { + if (id) mdp->cd = (struct sh_eth_cpu_data *)id->driver_data; - } else { - const struct of_device_id *match; + else + mdp->cd = (struct sh_eth_cpu_data *)of_device_get_match_data(&pdev->dev); - match = of_match_device(of_match_ptr(sh_eth_match_table), - &pdev->dev); - mdp->cd = (struct sh_eth_cpu_data *)match->data; - } mdp->reg_offset = sh_eth_get_register_offset(mdp->cd->register_type); if (!mdp->reg_offset) { dev_err(&pdev->dev, "Unknown register type (%d)\n", diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c index 0faf16336035..efb54f356a67 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c @@ -199,21 +199,12 @@ int stmmac_mdio_register(struct net_device *ndev) struct stmmac_priv *priv = netdev_priv(ndev); struct stmmac_mdio_bus_data *mdio_bus_data = priv->plat->mdio_bus_data; int addr, found; - struct device_node *mdio_node = NULL; - struct device_node *child_node = NULL; + struct device_node *mdio_node = priv->plat->mdio_node; if (!mdio_bus_data) return 0; if (IS_ENABLED(CONFIG_OF)) { - for_each_child_of_node(priv->device->of_node, child_node) { - if (of_device_is_compatible(child_node, - "snps,dwmac-mdio")) { - mdio_node = child_node; - break; - } - } - if (mdio_node) { netdev_dbg(ndev, "FOUND MDIO subnode\n"); } else { diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c index 6a52fa18cbf2..4514ba73d961 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c @@ -110,6 +110,7 @@ stmmac_probe_config_dt(struct platform_device *pdev, const char **mac) struct device_node *np = pdev->dev.of_node; struct plat_stmmacenet_data *plat; struct stmmac_dma_cfg *dma_cfg; + struct device_node *child_node = NULL; plat = devm_kzalloc(&pdev->dev, sizeof(*plat), GFP_KERNEL); if (!plat) @@ -140,13 +141,19 @@ stmmac_probe_config_dt(struct platform_device *pdev, const char **mac) plat->phy_node = of_node_get(np); } + for_each_child_of_node(np, child_node) + if (of_device_is_compatible(child_node, "snps,dwmac-mdio")) { + plat->mdio_node = child_node; + break; + } + /* "snps,phy-addr" is not a standard property. Mark it as deprecated * and warn of its use. Remove this when phy node support is added. */ if (of_property_read_u32(np, "snps,phy-addr", &plat->phy_addr) == 0) dev_warn(&pdev->dev, "snps,phy-addr property is deprecated\n"); - if ((plat->phy_node && !of_phy_is_fixed_link(np)) || plat->phy_bus_name) + if ((plat->phy_node && !of_phy_is_fixed_link(np)) || !plat->mdio_node) plat->mdio_bus_data = NULL; else plat->mdio_bus_data = diff --git a/drivers/net/ethernet/synopsys/dwc_eth_qos.c b/drivers/net/ethernet/synopsys/dwc_eth_qos.c index fc8bbff2d7e3..af11ed1e0bcc 100644 --- a/drivers/net/ethernet/synopsys/dwc_eth_qos.c +++ b/drivers/net/ethernet/synopsys/dwc_eth_qos.c @@ -426,7 +426,7 @@ #define DWC_MMC_RXOCTETCOUNT_GB 0x0784 #define DWC_MMC_RXPACKETCOUNT_GB 0x0780 -static int debug = 3; +static int debug = -1; module_param(debug, int, 0); MODULE_PARM_DESC(debug, "DWC_eth_qos debug level (0=none,...,16=all)"); @@ -650,6 +650,11 @@ struct net_local { u32 mmc_tx_counters_mask; struct dwceqos_flowcontrol flowcontrol; + + /* Tracks the intermediate state of phy started but hardware + * init not finished yet. + */ + bool phy_defer; }; static void dwceqos_read_mmc_counters(struct net_local *lp, u32 rx_mask, @@ -901,6 +906,9 @@ static void dwceqos_adjust_link(struct net_device *ndev) struct phy_device *phydev = lp->phy_dev; int status_change = 0; + if (lp->phy_defer) + return; + if (phydev->link) { if ((lp->speed != phydev->speed) || (lp->duplex != phydev->duplex)) { @@ -1113,7 +1121,7 @@ static int dwceqos_descriptor_init(struct net_local *lp) /* Allocate DMA descriptors */ size = DWCEQOS_RX_DCNT * sizeof(struct dwceqos_dma_desc); lp->rx_descs = dma_alloc_coherent(lp->ndev->dev.parent, size, - &lp->rx_descs_addr, 0); + &lp->rx_descs_addr, GFP_KERNEL); if (!lp->rx_descs) goto err_out; lp->rx_descs_tail_addr = lp->rx_descs_addr + @@ -1121,7 +1129,7 @@ static int dwceqos_descriptor_init(struct net_local *lp) size = DWCEQOS_TX_DCNT * sizeof(struct dwceqos_dma_desc); lp->tx_descs = dma_alloc_coherent(lp->ndev->dev.parent, size, - &lp->tx_descs_addr, 0); + &lp->tx_descs_addr, GFP_KERNEL); if (!lp->tx_descs) goto err_out; lp->tx_descs_tail_addr = lp->tx_descs_addr + @@ -1635,6 +1643,12 @@ static void dwceqos_init_hw(struct net_local *lp) regval = dwceqos_read(lp, REG_DWCEQOS_MAC_CFG); dwceqos_write(lp, REG_DWCEQOS_MAC_CFG, regval | DWCEQOS_MAC_CFG_TE | DWCEQOS_MAC_CFG_RE); + + lp->phy_defer = false; + mutex_lock(&lp->phy_dev->lock); + phy_read_status(lp->phy_dev); + dwceqos_adjust_link(lp->ndev); + mutex_unlock(&lp->phy_dev->lock); } static void dwceqos_tx_reclaim(unsigned long data) @@ -1880,9 +1894,13 @@ static int dwceqos_open(struct net_device *ndev) } netdev_reset_queue(ndev); + /* The dwceqos reset state machine requires all phy clocks to complete, + * hence the unusual init order with phy_start first. + */ + lp->phy_defer = true; + phy_start(lp->phy_dev); dwceqos_init_hw(lp); napi_enable(&lp->napi); - phy_start(lp->phy_dev); netif_start_queue(ndev); tasklet_enable(&lp->tx_bdreclaim_tasklet); @@ -1915,18 +1933,19 @@ static int dwceqos_stop(struct net_device *ndev) { struct net_local *lp = netdev_priv(ndev); - phy_stop(lp->phy_dev); - tasklet_disable(&lp->tx_bdreclaim_tasklet); - netif_stop_queue(ndev); napi_disable(&lp->napi); - dwceqos_drain_dma(lp); + /* Stop all tx before we drain the tx dma. */ + netif_tx_lock_bh(lp->ndev); + netif_stop_queue(ndev); + netif_tx_unlock_bh(lp->ndev); - netif_tx_lock(lp->ndev); + dwceqos_drain_dma(lp); dwceqos_reset_hw(lp); + phy_stop(lp->phy_dev); + dwceqos_descriptor_free(lp); - netif_tx_unlock(lp->ndev); return 0; } @@ -2178,12 +2197,10 @@ static int dwceqos_start_xmit(struct sk_buff *skb, struct net_device *ndev) ((trans.initial_descriptor + trans.nr_descriptors) % DWCEQOS_TX_DCNT)); - dwceqos_tx_finalize(skb, lp, &trans); - - netdev_sent_queue(ndev, skb->len); - spin_lock_bh(&lp->tx_lock); lp->tx_free -= trans.nr_descriptors; + dwceqos_tx_finalize(skb, lp, &trans); + netdev_sent_queue(ndev, skb->len); spin_unlock_bh(&lp->tx_lock); ndev->trans_start = jiffies; diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c index 03833dbfca67..dc85f7095e51 100644 --- a/drivers/net/phy/micrel.c +++ b/drivers/net/phy/micrel.c @@ -297,6 +297,17 @@ static int kszphy_config_init(struct phy_device *phydev) if (priv->led_mode >= 0) kszphy_setup_led(phydev, type->led_mode_reg, priv->led_mode); + if (phy_interrupt_is_valid(phydev)) { + int ctl = phy_read(phydev, MII_BMCR); + + if (ctl < 0) + return ctl; + + ret = phy_write(phydev, MII_BMCR, ctl & ~BMCR_ANENABLE); + if (ret < 0) + return ret; + } + return 0; } @@ -635,6 +646,21 @@ static void kszphy_get_stats(struct phy_device *phydev, data[i] = kszphy_get_stat(phydev, i); } +static int kszphy_resume(struct phy_device *phydev) +{ + int value; + + mutex_lock(&phydev->lock); + + value = phy_read(phydev, MII_BMCR); + phy_write(phydev, MII_BMCR, value & ~BMCR_PDOWN); + + kszphy_config_intr(phydev); + mutex_unlock(&phydev->lock); + + return 0; +} + static int kszphy_probe(struct phy_device *phydev) { const struct kszphy_type *type = phydev->drv->driver_data; @@ -844,7 +870,7 @@ static struct phy_driver ksphy_driver[] = { .get_strings = kszphy_get_strings, .get_stats = kszphy_get_stats, .suspend = genphy_suspend, - .resume = genphy_resume, + .resume = kszphy_resume, }, { .phy_id = PHY_ID_KSZ8061, .name = "Micrel KSZ8061", diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c index fc8ad001bc94..d61da9ece3ba 100644 --- a/drivers/net/ppp/ppp_generic.c +++ b/drivers/net/ppp/ppp_generic.c @@ -443,9 +443,14 @@ static ssize_t ppp_read(struct file *file, char __user *buf, * network traffic (demand mode). */ struct ppp *ppp = PF_TO_PPP(pf); + + ppp_recv_lock(ppp); if (ppp->n_channels == 0 && - (ppp->flags & SC_LOOP_TRAFFIC) == 0) + (ppp->flags & SC_LOOP_TRAFFIC) == 0) { + ppp_recv_unlock(ppp); break; + } + ppp_recv_unlock(ppp); } ret = -EAGAIN; if (file->f_flags & O_NONBLOCK) @@ -532,9 +537,12 @@ static unsigned int ppp_poll(struct file *file, poll_table *wait) else if (pf->kind == INTERFACE) { /* see comment in ppp_read */ struct ppp *ppp = PF_TO_PPP(pf); + + ppp_recv_lock(ppp); if (ppp->n_channels == 0 && (ppp->flags & SC_LOOP_TRAFFIC) == 0) mask |= POLLIN | POLLRDNORM; + ppp_recv_unlock(ppp); } return mask; @@ -2808,6 +2816,7 @@ static struct ppp *ppp_create_interface(struct net *net, int unit, out2: mutex_unlock(&pn->all_ppp_mutex); + rtnl_unlock(); free_netdev(dev); out1: *retp = ret; diff --git a/drivers/net/usb/ax88172a.c b/drivers/net/usb/ax88172a.c index 224e7d82de6d..cf77f2dffa69 100644 --- a/drivers/net/usb/ax88172a.c +++ b/drivers/net/usb/ax88172a.c @@ -134,7 +134,6 @@ static void ax88172a_remove_mdio(struct usbnet *dev) netdev_info(dev->net, "deregistering mdio bus %s\n", priv->mdio->id); mdiobus_unregister(priv->mdio); - kfree(priv->mdio->irq); mdiobus_free(priv->mdio); } diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c index dc0212c3cc28..86ba30ba35e8 100644 --- a/drivers/net/usb/cdc_ncm.c +++ b/drivers/net/usb/cdc_ncm.c @@ -837,7 +837,11 @@ int cdc_ncm_bind_common(struct usbnet *dev, struct usb_interface *intf, u8 data_ iface_no = ctx->data->cur_altsetting->desc.bInterfaceNumber; - /* reset data interface */ + /* Reset data interface. Some devices will not reset properly + * unless they are configured first. Toggle the altsetting to + * force a reset + */ + usb_set_interface(dev->udev, iface_no, data_altsetting); temp = usb_set_interface(dev->udev, iface_no, 0); if (temp) { dev_dbg(&intf->dev, "set interface failed\n"); @@ -984,8 +988,6 @@ EXPORT_SYMBOL_GPL(cdc_ncm_select_altsetting); static int cdc_ncm_bind(struct usbnet *dev, struct usb_interface *intf) { - int ret; - /* MBIM backwards compatible function? */ if (cdc_ncm_select_altsetting(intf) != CDC_NCM_COMM_ALTSETTING_NCM) return -ENODEV; @@ -994,16 +996,7 @@ static int cdc_ncm_bind(struct usbnet *dev, struct usb_interface *intf) * Additionally, generic NCM devices are assumed to accept arbitrarily * placed NDP. */ - ret = cdc_ncm_bind_common(dev, intf, CDC_NCM_DATA_ALTSETTING_NCM, 0); - - /* - * We should get an event when network connection is "connected" or - * "disconnected". Set network connection in "disconnected" state - * (carrier is OFF) during attach, so the IP network stack does not - * start IPv6 negotiation and more. - */ - usbnet_link_change(dev, 0, 0); - return ret; + return cdc_ncm_bind_common(dev, intf, CDC_NCM_DATA_ALTSETTING_NCM, 0); } static void cdc_ncm_align_tail(struct sk_buff *skb, size_t modulus, size_t remainder, size_t max) @@ -1586,7 +1579,8 @@ static void cdc_ncm_status(struct usbnet *dev, struct urb *urb) static const struct driver_info cdc_ncm_info = { .description = "CDC NCM", - .flags = FLAG_POINTTOPOINT | FLAG_NO_SETINT | FLAG_MULTI_PACKET, + .flags = FLAG_POINTTOPOINT | FLAG_NO_SETINT | FLAG_MULTI_PACKET + | FLAG_LINK_INTR, .bind = cdc_ncm_bind, .unbind = cdc_ncm_unbind, .manage_power = usbnet_manage_power, @@ -1599,7 +1593,7 @@ static const struct driver_info cdc_ncm_info = { static const struct driver_info wwan_info = { .description = "Mobile Broadband Network Device", .flags = FLAG_POINTTOPOINT | FLAG_NO_SETINT | FLAG_MULTI_PACKET - | FLAG_WWAN, + | FLAG_LINK_INTR | FLAG_WWAN, .bind = cdc_ncm_bind, .unbind = cdc_ncm_unbind, .manage_power = usbnet_manage_power, @@ -1612,7 +1606,7 @@ static const struct driver_info wwan_info = { static const struct driver_info wwan_noarp_info = { .description = "Mobile Broadband Network Device (NO ARP)", .flags = FLAG_POINTTOPOINT | FLAG_NO_SETINT | FLAG_MULTI_PACKET - | FLAG_WWAN | FLAG_NOARP, + | FLAG_LINK_INTR | FLAG_WWAN | FLAG_NOARP, .bind = cdc_ncm_bind, .unbind = cdc_ncm_unbind, .manage_power = usbnet_manage_power, diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c index 570deef53f74..a3a4ccf7cf52 100644 --- a/drivers/net/usb/qmi_wwan.c +++ b/drivers/net/usb/qmi_wwan.c @@ -861,8 +861,10 @@ static const struct usb_device_id products[] = { {QMI_FIXED_INTF(0x1199, 0x9056, 8)}, /* Sierra Wireless Modem */ {QMI_FIXED_INTF(0x1199, 0x9057, 8)}, {QMI_FIXED_INTF(0x1199, 0x9061, 8)}, /* Sierra Wireless Modem */ - {QMI_FIXED_INTF(0x1199, 0x9071, 8)}, /* Sierra Wireless MC74xx/EM74xx */ - {QMI_FIXED_INTF(0x1199, 0x9071, 10)}, /* Sierra Wireless MC74xx/EM74xx */ + {QMI_FIXED_INTF(0x1199, 0x9071, 8)}, /* Sierra Wireless MC74xx */ + {QMI_FIXED_INTF(0x1199, 0x9071, 10)}, /* Sierra Wireless MC74xx */ + {QMI_FIXED_INTF(0x1199, 0x9079, 8)}, /* Sierra Wireless EM74xx */ + {QMI_FIXED_INTF(0x1199, 0x9079, 10)}, /* Sierra Wireless EM74xx */ {QMI_FIXED_INTF(0x1bbb, 0x011e, 4)}, /* Telekom Speedstick LTE II (Alcatel One Touch L100V LTE) */ {QMI_FIXED_INTF(0x1bbb, 0x0203, 2)}, /* Alcatel L800MA */ {QMI_FIXED_INTF(0x2357, 0x0201, 4)}, /* TP-LINK HSUPA Modem MA180 */ @@ -885,6 +887,7 @@ static const struct usb_device_id products[] = { {QMI_FIXED_INTF(0x413c, 0x81a8, 8)}, /* Dell Wireless 5808 Gobi(TM) 4G LTE Mobile Broadband Card */ {QMI_FIXED_INTF(0x413c, 0x81a9, 8)}, /* Dell Wireless 5808e Gobi(TM) 4G LTE Mobile Broadband Card */ {QMI_FIXED_INTF(0x413c, 0x81b1, 8)}, /* Dell Wireless 5809e Gobi(TM) 4G LTE Mobile Broadband Card */ + {QMI_FIXED_INTF(0x413c, 0x81b3, 8)}, /* Dell Wireless 5809e Gobi(TM) 4G LTE Mobile Broadband Card (rev3) */ {QMI_FIXED_INTF(0x03f0, 0x4e1d, 8)}, /* HP lt4111 LTE/EV-DO/HSPA+ Gobi 4G Module */ {QMI_FIXED_INTF(0x22de, 0x9061, 3)}, /* WeTelecom WPD-600N */ {QMI_FIXED_INTF(0x1e0e, 0x9001, 5)}, /* SIMCom 7230E */ diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c index 0b0ba7ef14e4..10798128c03f 100644 --- a/drivers/net/usb/usbnet.c +++ b/drivers/net/usb/usbnet.c @@ -1769,6 +1769,13 @@ out3: if (info->unbind) info->unbind (dev, udev); out1: + /* subdrivers must undo all they did in bind() if they + * fail it, but we may fail later and a deferred kevent + * may trigger an error resubmitting itself and, worse, + * schedule a timer. So we kill it all just in case. + */ + cancel_work_sync(&dev->kevent); + del_timer_sync(&dev->delay); free_netdev(net); out: return status; diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c index 0cbf520cea77..fc895d0e85d9 100644 --- a/drivers/net/vmxnet3/vmxnet3_drv.c +++ b/drivers/net/vmxnet3/vmxnet3_drv.c @@ -814,7 +814,7 @@ vmxnet3_tq_init_all(struct vmxnet3_adapter *adapter) /* - * parse and copy relevant protocol headers: + * parse relevant protocol headers: * For a tso pkt, relevant headers are L2/3/4 including options * For a pkt requesting csum offloading, they are L2/3 and may include L4 * if it's a TCP/UDP pkt @@ -827,15 +827,14 @@ vmxnet3_tq_init_all(struct vmxnet3_adapter *adapter) * Other effects: * 1. related *ctx fields are updated. * 2. ctx->copy_size is # of bytes copied - * 3. the portion copied is guaranteed to be in the linear part + * 3. the portion to be copied is guaranteed to be in the linear part * */ static int -vmxnet3_parse_and_copy_hdr(struct sk_buff *skb, struct vmxnet3_tx_queue *tq, - struct vmxnet3_tx_ctx *ctx, - struct vmxnet3_adapter *adapter) +vmxnet3_parse_hdr(struct sk_buff *skb, struct vmxnet3_tx_queue *tq, + struct vmxnet3_tx_ctx *ctx, + struct vmxnet3_adapter *adapter) { - struct Vmxnet3_TxDataDesc *tdd; u8 protocol = 0; if (ctx->mss) { /* TSO */ @@ -892,16 +891,34 @@ vmxnet3_parse_and_copy_hdr(struct sk_buff *skb, struct vmxnet3_tx_queue *tq, return 0; } + return 1; +err: + return -1; +} + +/* + * copy relevant protocol headers to the transmit ring: + * For a tso pkt, relevant headers are L2/3/4 including options + * For a pkt requesting csum offloading, they are L2/3 and may include L4 + * if it's a TCP/UDP pkt + * + * + * Note that this requires that vmxnet3_parse_hdr be called first to set the + * appropriate bits in ctx first + */ +static void +vmxnet3_copy_hdr(struct sk_buff *skb, struct vmxnet3_tx_queue *tq, + struct vmxnet3_tx_ctx *ctx, + struct vmxnet3_adapter *adapter) +{ + struct Vmxnet3_TxDataDesc *tdd; + tdd = tq->data_ring.base + tq->tx_ring.next2fill; memcpy(tdd->data, skb->data, ctx->copy_size); netdev_dbg(adapter->netdev, "copy %u bytes to dataRing[%u]\n", ctx->copy_size, tq->tx_ring.next2fill); - return 1; - -err: - return -1; } @@ -998,22 +1015,7 @@ vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq, } } - spin_lock_irqsave(&tq->tx_lock, flags); - - if (count > vmxnet3_cmd_ring_desc_avail(&tq->tx_ring)) { - tq->stats.tx_ring_full++; - netdev_dbg(adapter->netdev, - "tx queue stopped on %s, next2comp %u" - " next2fill %u\n", adapter->netdev->name, - tq->tx_ring.next2comp, tq->tx_ring.next2fill); - - vmxnet3_tq_stop(tq, adapter); - spin_unlock_irqrestore(&tq->tx_lock, flags); - return NETDEV_TX_BUSY; - } - - - ret = vmxnet3_parse_and_copy_hdr(skb, tq, &ctx, adapter); + ret = vmxnet3_parse_hdr(skb, tq, &ctx, adapter); if (ret >= 0) { BUG_ON(ret <= 0 && ctx.copy_size != 0); /* hdrs parsed, check against other limits */ @@ -1033,9 +1035,26 @@ vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq, } } else { tq->stats.drop_hdr_inspect_err++; - goto unlock_drop_pkt; + goto drop_pkt; } + spin_lock_irqsave(&tq->tx_lock, flags); + + if (count > vmxnet3_cmd_ring_desc_avail(&tq->tx_ring)) { + tq->stats.tx_ring_full++; + netdev_dbg(adapter->netdev, + "tx queue stopped on %s, next2comp %u" + " next2fill %u\n", adapter->netdev->name, + tq->tx_ring.next2comp, tq->tx_ring.next2fill); + + vmxnet3_tq_stop(tq, adapter); + spin_unlock_irqrestore(&tq->tx_lock, flags); + return NETDEV_TX_BUSY; + } + + + vmxnet3_copy_hdr(skb, tq, &ctx, adapter); + /* fill tx descs related to addr & len */ if (vmxnet3_map_pkt(skb, &ctx, tq, adapter->pdev, adapter)) goto unlock_drop_pkt; diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c index 66addb7a7911..bdcf617a9d52 100644 --- a/drivers/net/vrf.c +++ b/drivers/net/vrf.c @@ -104,20 +104,23 @@ static struct dst_ops vrf_dst_ops = { #if IS_ENABLED(CONFIG_IPV6) static bool check_ipv6_frame(const struct sk_buff *skb) { - const struct ipv6hdr *ipv6h = (struct ipv6hdr *)skb->data; - size_t hlen = sizeof(*ipv6h); + const struct ipv6hdr *ipv6h; + struct ipv6hdr _ipv6h; bool rc = true; - if (skb->len < hlen) + ipv6h = skb_header_pointer(skb, 0, sizeof(_ipv6h), &_ipv6h); + if (!ipv6h) goto out; if (ipv6h->nexthdr == NEXTHDR_ICMP) { const struct icmp6hdr *icmph; + struct icmp6hdr _icmph; - if (skb->len < hlen + sizeof(*icmph)) + icmph = skb_header_pointer(skb, sizeof(_ipv6h), + sizeof(_icmph), &_icmph); + if (!icmph) goto out; - icmph = (struct icmp6hdr *)(skb->data + sizeof(*ipv6h)); switch (icmph->icmp6_type) { case NDISC_ROUTER_SOLICITATION: case NDISC_ROUTER_ADVERTISEMENT: diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index e6944b29588e..1c32bd104797 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c @@ -931,8 +931,10 @@ static int vxlan_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb, cb->nlh->nlmsg_seq, RTM_NEWNEIGH, NLM_F_MULTI, rd); - if (err < 0) + if (err < 0) { + cb->args[1] = err; goto out; + } skip: ++idx; } @@ -1306,8 +1308,10 @@ static int vxlan_udp_encap_recv(struct sock *sk, struct sk_buff *skb) gbp = (struct vxlanhdr_gbp *)vxh; md->gbp = ntohs(gbp->policy_id); - if (tun_dst) + if (tun_dst) { tun_dst->u.tun_info.key.tun_flags |= TUNNEL_VXLAN_OPT; + tun_dst->u.tun_info.options_len = sizeof(*md); + } if (gbp->dont_learn) md->gbp |= VXLAN_GBP_DONT_LEARN; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c index 4ed5180c547b..0ccc697fef76 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c @@ -107,7 +107,7 @@ static int iwl_send_tx_ant_cfg(struct iwl_mvm *mvm, u8 valid_tx_ant) sizeof(tx_ant_cmd), &tx_ant_cmd); } -static void iwl_free_fw_paging(struct iwl_mvm *mvm) +void iwl_free_fw_paging(struct iwl_mvm *mvm) { int i; @@ -127,6 +127,8 @@ static void iwl_free_fw_paging(struct iwl_mvm *mvm) get_order(mvm->fw_paging_db[i].fw_paging_size)); } kfree(mvm->trans->paging_download_buf); + mvm->trans->paging_download_buf = NULL; + memset(mvm->fw_paging_db, 0, sizeof(mvm->fw_paging_db)); } diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h index 5f3ac8cccf49..ff7c6df9f941 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h @@ -1225,6 +1225,9 @@ void iwl_mvm_rx_umac_scan_complete_notif(struct iwl_mvm *mvm, void iwl_mvm_rx_umac_scan_iter_complete_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb); +/* Paging */ +void iwl_free_fw_paging(struct iwl_mvm *mvm); + /* MVM debugfs */ #ifdef CONFIG_IWLWIFI_DEBUGFS int iwl_mvm_dbgfs_register(struct iwl_mvm *mvm, struct dentry *dbgfs_dir); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c index 89ea70deeb84..e80be9a59520 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c @@ -684,6 +684,8 @@ static void iwl_op_mode_mvm_stop(struct iwl_op_mode *op_mode) for (i = 0; i < NVM_MAX_NUM_SECTIONS; i++) kfree(mvm->nvm_sections[i].data); + iwl_free_fw_paging(mvm); + iwl_mvm_tof_clean(mvm); ieee80211_free_hw(mvm->hw); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c index 0914ec2fd574..a040edc55057 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c @@ -423,6 +423,15 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb) return -1; } + /* + * Increase the pending frames counter, so that later when a reply comes + * in and the counter is decreased - we don't start getting negative + * values. + * Note that we don't need to make sure it isn't agg'd, since we're + * TXing non-sta + */ + atomic_inc(&mvm->pending_frames[sta_id]); + return 0; } diff --git a/drivers/of/of_mdio.c b/drivers/of/of_mdio.c index 39c4be41ef83..365dc7e83ab4 100644 --- a/drivers/of/of_mdio.c +++ b/drivers/of/of_mdio.c @@ -305,6 +305,7 @@ EXPORT_SYMBOL(of_phy_find_device); * @dev: pointer to net_device claiming the phy * @phy_np: Pointer to device tree node for the PHY * @hndlr: Link state callback for the network device + * @flags: flags to pass to the PHY * @iface: PHY data interface type * * If successful, returns a pointer to the phy_device with the embedded diff --git a/drivers/ssb/Kconfig b/drivers/ssb/Kconfig index 0c675861623f..d8e4219c2324 100644 --- a/drivers/ssb/Kconfig +++ b/drivers/ssb/Kconfig @@ -83,6 +83,7 @@ config SSB_SDIOHOST config SSB_HOST_SOC bool "Support for SSB bus on SoC" depends on SSB && BCM47XX_NVRAM + select SSB_SPROM help Host interface for a SSB directly mapped into memory. This is for some Broadcom SoCs from the BCM47xx and BCM53xx lines. diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h index 51f1e540fc2b..58eef02edc7e 100644 --- a/include/linux/mlx5/mlx5_ifc.h +++ b/include/linux/mlx5/mlx5_ifc.h @@ -4245,7 +4245,9 @@ struct mlx5_ifc_modify_tir_bitmask_bits { u8 reserved_at_20[0x1b]; u8 self_lb_en[0x1]; - u8 reserved_at_3c[0x3]; + u8 reserved_at_3c[0x1]; + u8 hash[0x1]; + u8 reserved_at_3e[0x1]; u8 lro[0x1]; }; diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 4ce9ff7086f4..d3fcd4591ce4 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -1985,6 +1985,30 @@ static inline void skb_reserve(struct sk_buff *skb, int len) skb->tail += len; } +/** + * skb_tailroom_reserve - adjust reserved_tailroom + * @skb: buffer to alter + * @mtu: maximum amount of headlen permitted + * @needed_tailroom: minimum amount of reserved_tailroom + * + * Set reserved_tailroom so that headlen can be as large as possible but + * not larger than mtu and tailroom cannot be smaller than + * needed_tailroom. + * The required headroom should already have been reserved before using + * this function. + */ +static inline void skb_tailroom_reserve(struct sk_buff *skb, unsigned int mtu, + unsigned int needed_tailroom) +{ + SKB_LINEAR_ASSERT(skb); + if (mtu < skb_tailroom(skb) - needed_tailroom) + /* use at most mtu */ + skb->reserved_tailroom = skb_tailroom(skb) - mtu; + else + /* use up to all available space */ + skb->reserved_tailroom = needed_tailroom; +} + #define ENCAP_TYPE_ETHER 0 #define ENCAP_TYPE_IPPROTO 1 diff --git a/include/linux/stmmac.h b/include/linux/stmmac.h index eead8ab93c0a..881a79d52467 100644 --- a/include/linux/stmmac.h +++ b/include/linux/stmmac.h @@ -100,6 +100,7 @@ struct plat_stmmacenet_data { int interface; struct stmmac_mdio_bus_data *mdio_bus_data; struct device_node *phy_node; + struct device_node *mdio_node; struct stmmac_dma_cfg *dma_cfg; int clk_csr; int has_gmac; diff --git a/include/net/iw_handler.h b/include/net/iw_handler.h index 8f81bbbc38fc..e0f4109e64c6 100644 --- a/include/net/iw_handler.h +++ b/include/net/iw_handler.h @@ -439,6 +439,12 @@ int dev_get_wireless_info(char *buffer, char **start, off_t offset, int length); /* Send a single event to user space */ void wireless_send_event(struct net_device *dev, unsigned int cmd, union iwreq_data *wrqu, const char *extra); +#ifdef CONFIG_WEXT_CORE +/* flush all previous wext events - if work is done from netdev notifiers */ +void wireless_nlevent_flush(void); +#else +static inline void wireless_nlevent_flush(void) {} +#endif /* We may need a function to send a stream of events to user space. * More on that later... */ diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index aa6f8571de13..5df4881dea7b 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -292,6 +292,9 @@ enum bpf_func_id { /* BPF_FUNC_skb_set_tunnel_key and BPF_FUNC_skb_get_tunnel_key flags. */ #define BPF_F_TUNINFO_IPV6 (1ULL << 0) +/* BPF_FUNC_skb_set_tunnel_key flags. */ +#define BPF_F_ZERO_CSUM_TX (1ULL << 1) + /* user accessible mirror of in-kernel sk_buff. * new fields can only be added to the end of this structure */ diff --git a/net/bridge/br_fdb.c b/net/bridge/br_fdb.c index 82e3e9705017..dcea4f4c62b3 100644 --- a/net/bridge/br_fdb.c +++ b/net/bridge/br_fdb.c @@ -723,6 +723,8 @@ int br_fdb_dump(struct sk_buff *skb, struct net_bridge_fdb_entry *f; hlist_for_each_entry_rcu(f, &br->hash[i], hlist) { + int err; + if (idx < cb->args[0]) goto skip; @@ -741,12 +743,15 @@ int br_fdb_dump(struct sk_buff *skb, if (!filter_dev && f->dst) goto skip; - if (fdb_fill_info(skb, br, f, - NETLINK_CB(cb->skb).portid, - cb->nlh->nlmsg_seq, - RTM_NEWNEIGH, - NLM_F_MULTI) < 0) + err = fdb_fill_info(skb, br, f, + NETLINK_CB(cb->skb).portid, + cb->nlh->nlmsg_seq, + RTM_NEWNEIGH, + NLM_F_MULTI); + if (err < 0) { + cb->args[1] = err; break; + } skip: ++idx; } diff --git a/net/core/filter.c b/net/core/filter.c index 94d26201080d..bba502f7cd57 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -1752,7 +1752,7 @@ static u64 bpf_skb_set_tunnel_key(u64 r1, u64 r2, u64 size, u64 flags, u64 r5) u8 compat[sizeof(struct bpf_tunnel_key)]; struct ip_tunnel_info *info; - if (unlikely(flags & ~(BPF_F_TUNINFO_IPV6))) + if (unlikely(flags & ~(BPF_F_TUNINFO_IPV6 | BPF_F_ZERO_CSUM_TX))) return -EINVAL; if (unlikely(size != sizeof(struct bpf_tunnel_key))) { switch (size) { @@ -1776,7 +1776,7 @@ static u64 bpf_skb_set_tunnel_key(u64 r1, u64 r2, u64 size, u64 flags, u64 r5) info = &md->u.tun_info; info->mode = IP_TUNNEL_INFO_TX; - info->key.tun_flags = TUNNEL_KEY; + info->key.tun_flags = TUNNEL_KEY | TUNNEL_CSUM; info->key.tun_id = cpu_to_be64(from->tunnel_id); info->key.tos = from->tunnel_tos; info->key.ttl = from->tunnel_ttl; @@ -1787,6 +1787,8 @@ static u64 bpf_skb_set_tunnel_key(u64 r1, u64 r2, u64 size, u64 flags, u64 r5) sizeof(from->remote_ipv6)); } else { info->key.u.ipv4.dst = cpu_to_be32(from->remote_ipv4); + if (flags & BPF_F_ZERO_CSUM_TX) + info->key.tun_flags &= ~TUNNEL_CSUM; } return 0; diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index d735e854f916..8261d95dd846 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c @@ -2911,6 +2911,7 @@ int ndo_dflt_fdb_dump(struct sk_buff *skb, nlmsg_populate_fdb(skb, cb, dev, &idx, &dev->mc); out: netif_addr_unlock_bh(dev); + cb->args[1] = err; return idx; } EXPORT_SYMBOL(ndo_dflt_fdb_dump); @@ -2944,6 +2945,7 @@ static int rtnl_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb) ops = br_dev->netdev_ops; } + cb->args[1] = 0; for_each_netdev(net, dev) { if (brport_idx && (dev->ifindex != brport_idx)) continue; @@ -2971,12 +2973,16 @@ static int rtnl_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb) idx = cops->ndo_fdb_dump(skb, cb, br_dev, dev, idx); } + if (cb->args[1] == -EMSGSIZE) + break; if (dev->netdev_ops->ndo_fdb_dump) idx = dev->netdev_ops->ndo_fdb_dump(skb, cb, dev, NULL, idx); else idx = ndo_dflt_fdb_dump(skb, cb, dev, NULL, idx); + if (cb->args[1] == -EMSGSIZE) + break; cops = NULL; } diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 5bf88f58bee7..8616d1147c93 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -2948,6 +2948,24 @@ int skb_append_pagefrags(struct sk_buff *skb, struct page *page, EXPORT_SYMBOL_GPL(skb_append_pagefrags); /** + * skb_push_rcsum - push skb and update receive checksum + * @skb: buffer to update + * @len: length of data pulled + * + * This function performs an skb_push on the packet and updates + * the CHECKSUM_COMPLETE checksum. It should be used on + * receive path processing instead of skb_push unless you know + * that the checksum difference is zero (e.g., a valid IP header) + * or you are setting ip_summed to CHECKSUM_NONE. + */ +static unsigned char *skb_push_rcsum(struct sk_buff *skb, unsigned len) +{ + skb_push(skb, len); + skb_postpush_rcsum(skb, skb->data, len); + return skb->data; +} + +/** * skb_pull_rcsum - pull skb and update receive checksum * @skb: buffer to update * @len: length of data pulled @@ -4084,9 +4102,9 @@ struct sk_buff *skb_checksum_trimmed(struct sk_buff *skb, if (!pskb_may_pull(skb_chk, offset)) goto err; - __skb_pull(skb_chk, offset); + skb_pull_rcsum(skb_chk, offset); ret = skb_chkf(skb_chk); - __skb_push(skb_chk, offset); + skb_push_rcsum(skb_chk, offset); if (ret) goto err; diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c index 05e4cba14162..b3086cf27027 100644 --- a/net/ipv4/igmp.c +++ b/net/ipv4/igmp.c @@ -356,9 +356,8 @@ static struct sk_buff *igmpv3_newpack(struct net_device *dev, unsigned int mtu) skb_dst_set(skb, &rt->dst); skb->dev = dev; - skb->reserved_tailroom = skb_end_offset(skb) - - min(mtu, skb_end_offset(skb)); skb_reserve(skb, hlen); + skb_tailroom_reserve(skb, mtu, tlen); skb_reset_network_header(skb); pip = ip_hdr(skb); diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c index 64878efa045c..565bf64b2b7d 100644 --- a/net/ipv4/ip_output.c +++ b/net/ipv4/ip_output.c @@ -1236,13 +1236,16 @@ ssize_t ip_append_page(struct sock *sk, struct flowi4 *fl4, struct page *page, if (!skb) return -EINVAL; - cork->length += size; if ((size + skb->len > mtu) && (sk->sk_protocol == IPPROTO_UDP) && (rt->dst.dev->features & NETIF_F_UFO)) { + if (skb->ip_summed != CHECKSUM_PARTIAL) + return -EOPNOTSUPP; + skb_shinfo(skb)->gso_size = mtu - fragheaderlen; skb_shinfo(skb)->gso_type = SKB_GSO_UDP; } + cork->length += size; while (size > 0) { if (skb_is_gso(skb)) { diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c index 89e8861e05fc..336e6892a93c 100644 --- a/net/ipv4/ip_tunnel.c +++ b/net/ipv4/ip_tunnel.c @@ -661,6 +661,8 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev, inner_iph = (const struct iphdr *)skb_inner_network_header(skb); connected = (tunnel->parms.iph.daddr != 0); + memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt)); + dst = tnl_params->daddr; if (dst == 0) { /* NBMA tunnel */ @@ -758,7 +760,6 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev, tunnel->err_time + IPTUNNEL_ERR_TIMEO)) { tunnel->err_count--; - memset(IPCB(skb), 0, sizeof(*IPCB(skb))); dst_link_failure(skb); } else tunnel->err_count = 0; diff --git a/net/ipv4/tcp_metrics.c b/net/ipv4/tcp_metrics.c index c8cbc2b4b792..a726d7853ce5 100644 --- a/net/ipv4/tcp_metrics.c +++ b/net/ipv4/tcp_metrics.c @@ -550,7 +550,7 @@ reset: */ if (crtt > tp->srtt_us) { /* Set RTO like tcp_rtt_estimator(), but from cached RTT. */ - crtt /= 8 * USEC_PER_MSEC; + crtt /= 8 * USEC_PER_SEC / HZ; inet_csk(sk)->icsk_rto = crtt + max(2 * crtt, tcp_rto_min(sk)); } else if (tp->srtt_us == 0) { /* RFC6298: 5.7 We've failed to get a valid RTT sample from diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c index 75632a925824..9b02af2139d3 100644 --- a/net/ipv4/tcp_minisocks.c +++ b/net/ipv4/tcp_minisocks.c @@ -455,7 +455,7 @@ struct sock *tcp_create_openreq_child(const struct sock *sk, newtp->rcv_wup = newtp->copied_seq = newtp->rcv_nxt = treq->rcv_isn + 1; - newtp->segs_in = 0; + newtp->segs_in = 1; newtp->snd_sml = newtp->snd_una = newtp->snd_nxt = newtp->snd_up = treq->snt_isn + 1; @@ -815,6 +815,7 @@ int tcp_child_process(struct sock *parent, struct sock *child, int ret = 0; int state = child->sk_state; + tcp_sk(child)->segs_in += max_t(u16, 1, skb_shinfo(skb)->gso_segs); if (!sock_owned_by_user(child)) { ret = tcp_rcv_state_process(child, skb); /* Wakeup parent, send SIGIO */ diff --git a/net/ipv4/udp_tunnel.c b/net/ipv4/udp_tunnel.c index 0ec08814f37d..96599d1a1318 100644 --- a/net/ipv4/udp_tunnel.c +++ b/net/ipv4/udp_tunnel.c @@ -89,6 +89,8 @@ void udp_tunnel_xmit_skb(struct rtable *rt, struct sock *sk, struct sk_buff *skb uh->source = src_port; uh->len = htons(skb->len); + memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt)); + udp_set_csum(nocheck, skb, src, dst, skb->len); iptunnel_xmit(sk, rt, skb, src, dst, IPPROTO_UDP, tos, ttl, df, xnet); diff --git a/net/ipv6/exthdrs_core.c b/net/ipv6/exthdrs_core.c index 5c5d23e59da5..9508a20fbf61 100644 --- a/net/ipv6/exthdrs_core.c +++ b/net/ipv6/exthdrs_core.c @@ -257,7 +257,11 @@ int ipv6_find_hdr(const struct sk_buff *skb, unsigned int *offset, *fragoff = _frag_off; return hp->nexthdr; } - return -ENOENT; + if (!found) + return -ENOENT; + if (fragoff) + *fragoff = _frag_off; + break; } hdrlen = 8; } else if (nexthdr == NEXTHDR_AUTH) { diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c index a69aad1e29d1..c0d4dc1c5ea4 100644 --- a/net/ipv6/ip6_gre.c +++ b/net/ipv6/ip6_gre.c @@ -777,6 +777,8 @@ static inline int ip6gre_xmit_ipv4(struct sk_buff *skb, struct net_device *dev) __u32 mtu; int err; + memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt)); + if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT)) encap_limit = t->parms.encap_limit; diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c index 137fca42aaa6..6c5dfec7a377 100644 --- a/net/ipv6/ip6_tunnel.c +++ b/net/ipv6/ip6_tunnel.c @@ -1180,6 +1180,8 @@ ip4ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev) u8 tproto; int err; + memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt)); + tproto = ACCESS_ONCE(t->parms.proto); if (tproto != IPPROTO_IPIP && tproto != 0) return -1; diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c index 5ee56d0a8699..d64ee7e83664 100644 --- a/net/ipv6/mcast.c +++ b/net/ipv6/mcast.c @@ -1574,9 +1574,8 @@ static struct sk_buff *mld_newpack(struct inet6_dev *idev, unsigned int mtu) return NULL; skb->priority = TC_PRIO_CONTROL; - skb->reserved_tailroom = skb_end_offset(skb) - - min(mtu, skb_end_offset(skb)); skb_reserve(skb, hlen); + skb_tailroom_reserve(skb, mtu, tlen); if (__ipv6_get_lladdr(idev, &addr_buf, IFA_F_TENTATIVE)) { /* <draft-ietf-magma-mld-source-05.txt>: diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c index 22e28a44e3c8..422dd014aa2c 100644 --- a/net/ipv6/udp.c +++ b/net/ipv6/udp.c @@ -962,11 +962,9 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable, ret = udpv6_queue_rcv_skb(sk, skb); sock_put(sk); - /* a return value > 0 means to resubmit the input, but - * it wants the return to be -protocol, or 0 - */ + /* a return value > 0 means to resubmit the input */ if (ret > 0) - return -ret; + return ret; return 0; } diff --git a/net/mac80211/agg-rx.c b/net/mac80211/agg-rx.c index 10ad4ac1fa0b..367784be5df2 100644 --- a/net/mac80211/agg-rx.c +++ b/net/mac80211/agg-rx.c @@ -291,7 +291,7 @@ void __ieee80211_start_rx_ba_session(struct sta_info *sta, } /* prepare A-MPDU MLME for Rx aggregation */ - tid_agg_rx = kmalloc(sizeof(struct tid_ampdu_rx), GFP_KERNEL); + tid_agg_rx = kzalloc(sizeof(*tid_agg_rx), GFP_KERNEL); if (!tid_agg_rx) goto end; diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h index b84f6aa32c08..f006f4a44c0e 100644 --- a/net/mac80211/ieee80211_i.h +++ b/net/mac80211/ieee80211_i.h @@ -92,7 +92,7 @@ struct ieee80211_fragment_entry { u16 extra_len; u16 last_frag; u8 rx_queue; - bool ccmp; /* Whether fragments were encrypted with CCMP */ + bool check_sequential_pn; /* needed for CCMP/GCMP */ u8 last_pn[6]; /* PN of the last fragment if CCMP was used */ }; diff --git a/net/mac80211/rc80211_minstrel.c b/net/mac80211/rc80211_minstrel.c index 3ece7d1034c8..b54f398cda5d 100644 --- a/net/mac80211/rc80211_minstrel.c +++ b/net/mac80211/rc80211_minstrel.c @@ -711,7 +711,7 @@ static u32 minstrel_get_expected_throughput(void *priv_sta) * computing cur_tp */ tmp_mrs = &mi->r[idx].stats; - tmp_cur_tp = minstrel_get_tp_avg(&mi->r[idx], tmp_mrs->prob_ewma); + tmp_cur_tp = minstrel_get_tp_avg(&mi->r[idx], tmp_mrs->prob_ewma) * 10; tmp_cur_tp = tmp_cur_tp * 1200 * 8 / 1024; return tmp_cur_tp; diff --git a/net/mac80211/rc80211_minstrel_ht.c b/net/mac80211/rc80211_minstrel_ht.c index 3928dbd24e25..370d677b547b 100644 --- a/net/mac80211/rc80211_minstrel_ht.c +++ b/net/mac80211/rc80211_minstrel_ht.c @@ -414,15 +414,16 @@ minstrel_ht_set_best_prob_rate(struct minstrel_ht_sta *mi, u16 index) (max_tp_group != MINSTREL_CCK_GROUP)) return; + max_gpr_group = mg->max_group_prob_rate / MCS_GROUP_RATES; + max_gpr_idx = mg->max_group_prob_rate % MCS_GROUP_RATES; + max_gpr_prob = mi->groups[max_gpr_group].rates[max_gpr_idx].prob_ewma; + if (mrs->prob_ewma > MINSTREL_FRAC(75, 100)) { cur_tp_avg = minstrel_ht_get_tp_avg(mi, cur_group, cur_idx, mrs->prob_ewma); if (cur_tp_avg > tmp_tp_avg) mi->max_prob_rate = index; - max_gpr_group = mg->max_group_prob_rate / MCS_GROUP_RATES; - max_gpr_idx = mg->max_group_prob_rate % MCS_GROUP_RATES; - max_gpr_prob = mi->groups[max_gpr_group].rates[max_gpr_idx].prob_ewma; max_gpr_tp_avg = minstrel_ht_get_tp_avg(mi, max_gpr_group, max_gpr_idx, max_gpr_prob); @@ -431,7 +432,7 @@ minstrel_ht_set_best_prob_rate(struct minstrel_ht_sta *mi, u16 index) } else { if (mrs->prob_ewma > tmp_prob) mi->max_prob_rate = index; - if (mrs->prob_ewma > mg->rates[mg->max_group_prob_rate].prob_ewma) + if (mrs->prob_ewma > max_gpr_prob) mg->max_group_prob_rate = index; } } @@ -691,7 +692,7 @@ minstrel_aggr_check(struct ieee80211_sta *pubsta, struct sk_buff *skb) if (likely(sta->ampdu_mlme.tid_tx[tid])) return; - ieee80211_start_tx_ba_session(pubsta, tid, 5000); + ieee80211_start_tx_ba_session(pubsta, tid, 0); } static void @@ -871,7 +872,7 @@ minstrel_ht_set_rate(struct minstrel_priv *mp, struct minstrel_ht_sta *mi, * - if station is in dynamic SMPS (and streams > 1) * - for fallback rates, to increase chances of getting through */ - if (offset > 0 && + if (offset > 0 || (mi->sta->smps_mode == IEEE80211_SMPS_DYNAMIC && group->streams > 1)) { ratetbl->rate[offset].count = ratetbl->rate[offset].count_rts; @@ -1334,7 +1335,8 @@ static u32 minstrel_ht_get_expected_throughput(void *priv_sta) prob = mi->groups[i].rates[j].prob_ewma; /* convert tp_avg from pkt per second in kbps */ - tp_avg = minstrel_ht_get_tp_avg(mi, i, j, prob) * AVG_PKT_SIZE * 8 / 1024; + tp_avg = minstrel_ht_get_tp_avg(mi, i, j, prob) * 10; + tp_avg = tp_avg * AVG_PKT_SIZE * 8 / 1024; return tp_avg; } diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c index bc081850ac0e..60d093f40f1d 100644 --- a/net/mac80211/rx.c +++ b/net/mac80211/rx.c @@ -1753,7 +1753,7 @@ ieee80211_reassemble_add(struct ieee80211_sub_if_data *sdata, entry->seq = seq; entry->rx_queue = rx_queue; entry->last_frag = frag; - entry->ccmp = 0; + entry->check_sequential_pn = false; entry->extra_len = 0; return entry; @@ -1849,15 +1849,27 @@ ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx) rx->seqno_idx, &(rx->skb)); if (rx->key && (rx->key->conf.cipher == WLAN_CIPHER_SUITE_CCMP || - rx->key->conf.cipher == WLAN_CIPHER_SUITE_CCMP_256) && + rx->key->conf.cipher == WLAN_CIPHER_SUITE_CCMP_256 || + rx->key->conf.cipher == WLAN_CIPHER_SUITE_GCMP || + rx->key->conf.cipher == WLAN_CIPHER_SUITE_GCMP_256) && ieee80211_has_protected(fc)) { int queue = rx->security_idx; - /* Store CCMP PN so that we can verify that the next - * fragment has a sequential PN value. */ - entry->ccmp = 1; + + /* Store CCMP/GCMP PN so that we can verify that the + * next fragment has a sequential PN value. + */ + entry->check_sequential_pn = true; memcpy(entry->last_pn, rx->key->u.ccmp.rx_pn[queue], IEEE80211_CCMP_PN_LEN); + BUILD_BUG_ON(offsetof(struct ieee80211_key, + u.ccmp.rx_pn) != + offsetof(struct ieee80211_key, + u.gcmp.rx_pn)); + BUILD_BUG_ON(sizeof(rx->key->u.ccmp.rx_pn[queue]) != + sizeof(rx->key->u.gcmp.rx_pn[queue])); + BUILD_BUG_ON(IEEE80211_CCMP_PN_LEN != + IEEE80211_GCMP_PN_LEN); } return RX_QUEUED; } @@ -1872,15 +1884,21 @@ ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx) return RX_DROP_MONITOR; } - /* Verify that MPDUs within one MSDU have sequential PN values. - * (IEEE 802.11i, 8.3.3.4.5) */ - if (entry->ccmp) { + /* "The receiver shall discard MSDUs and MMPDUs whose constituent + * MPDU PN values are not incrementing in steps of 1." + * see IEEE P802.11-REVmc/D5.0, 12.5.3.4.4, item d (for CCMP) + * and IEEE P802.11-REVmc/D5.0, 12.5.5.4.4, item d (for GCMP) + */ + if (entry->check_sequential_pn) { int i; u8 pn[IEEE80211_CCMP_PN_LEN], *rpn; int queue; + if (!rx->key || (rx->key->conf.cipher != WLAN_CIPHER_SUITE_CCMP && - rx->key->conf.cipher != WLAN_CIPHER_SUITE_CCMP_256)) + rx->key->conf.cipher != WLAN_CIPHER_SUITE_CCMP_256 && + rx->key->conf.cipher != WLAN_CIPHER_SUITE_GCMP && + rx->key->conf.cipher != WLAN_CIPHER_SUITE_GCMP_256)) return RX_DROP_UNUSABLE; memcpy(pn, entry->last_pn, IEEE80211_CCMP_PN_LEN); for (i = IEEE80211_CCMP_PN_LEN - 1; i >= 0; i--) { @@ -3366,6 +3384,7 @@ static bool ieee80211_accept_frame(struct ieee80211_rx_data *rx) return false; /* ignore action frames to TDLS-peers */ if (ieee80211_is_action(hdr->frame_control) && + !is_broadcast_ether_addr(bssid) && !ether_addr_equal(bssid, hdr->addr1)) return false; } diff --git a/net/sched/act_ipt.c b/net/sched/act_ipt.c index d05869646515..6b70399ab781 100644 --- a/net/sched/act_ipt.c +++ b/net/sched/act_ipt.c @@ -62,6 +62,7 @@ static void ipt_destroy_target(struct xt_entry_target *t) struct xt_tgdtor_param par = { .target = t->u.kernel.target, .targinfo = t->data, + .family = NFPROTO_IPV4, }; if (par.target->destroy != NULL) par.target->destroy(&par); @@ -195,6 +196,7 @@ static int tcf_ipt(struct sk_buff *skb, const struct tc_action *a, par.hooknum = ipt->tcfi_hook; par.target = ipt->tcfi_t->u.kernel.target; par.targinfo = ipt->tcfi_t->data; + par.family = NFPROTO_IPV4; ret = par.target->target(skb, &par); switch (ret) { diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c index ec529121f38a..ce46f1c7f133 100644 --- a/net/sctp/ipv6.c +++ b/net/sctp/ipv6.c @@ -526,6 +526,8 @@ static int sctp_v6_cmp_addr(const union sctp_addr *addr1, } return 0; } + if (addr1->v6.sin6_port != addr2->v6.sin6_port) + return 0; if (!ipv6_addr_equal(&addr1->v6.sin6_addr, &addr2->v6.sin6_addr)) return 0; /* If this is a linklocal address, compare the scope_id. */ diff --git a/net/sctp/proc.c b/net/sctp/proc.c index ded7d931a6a5..963dffcc2618 100644 --- a/net/sctp/proc.c +++ b/net/sctp/proc.c @@ -482,7 +482,7 @@ static void sctp_remaddr_seq_stop(struct seq_file *seq, void *v) static int sctp_remaddr_seq_show(struct seq_file *seq, void *v) { struct sctp_association *assoc; - struct sctp_transport *tsp; + struct sctp_transport *transport, *tsp; if (v == SEQ_START_TOKEN) { seq_printf(seq, "ADDR ASSOC_ID HB_ACT RTO MAX_PATH_RTX " @@ -490,10 +490,10 @@ static int sctp_remaddr_seq_show(struct seq_file *seq, void *v) return 0; } - tsp = (struct sctp_transport *)v; - if (!sctp_transport_hold(tsp)) + transport = (struct sctp_transport *)v; + if (!sctp_transport_hold(transport)) return 0; - assoc = tsp->asoc; + assoc = transport->asoc; list_for_each_entry_rcu(tsp, &assoc->peer.transport_addr_list, transports) { @@ -546,7 +546,7 @@ static int sctp_remaddr_seq_show(struct seq_file *seq, void *v) seq_printf(seq, "\n"); } - sctp_transport_put(tsp); + sctp_transport_put(transport); return 0; } diff --git a/net/switchdev/switchdev.c b/net/switchdev/switchdev.c index 47f7da58a7f0..8b5833c1ff2e 100644 --- a/net/switchdev/switchdev.c +++ b/net/switchdev/switchdev.c @@ -1093,8 +1093,11 @@ int switchdev_port_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb, .cb = cb, .idx = idx, }; + int err; - switchdev_port_obj_dump(dev, &dump.fdb.obj, switchdev_port_fdb_dump_cb); + err = switchdev_port_obj_dump(dev, &dump.fdb.obj, + switchdev_port_fdb_dump_cb); + cb->args[1] = err; return dump.idx; } EXPORT_SYMBOL_GPL(switchdev_port_fdb_dump); diff --git a/net/tipc/socket.c b/net/tipc/socket.c index 69c29050f14a..4d420bb27396 100644 --- a/net/tipc/socket.c +++ b/net/tipc/socket.c @@ -673,7 +673,7 @@ static int tipc_sendmcast(struct socket *sock, struct tipc_name_seq *seq, struct tipc_sock *tsk = tipc_sk(sk); struct net *net = sock_net(sk); struct tipc_msg *mhdr = &tsk->phdr; - struct sk_buff_head *pktchain = &sk->sk_write_queue; + struct sk_buff_head pktchain; struct iov_iter save = msg->msg_iter; uint mtu; int rc; @@ -687,14 +687,16 @@ static int tipc_sendmcast(struct socket *sock, struct tipc_name_seq *seq, msg_set_nameupper(mhdr, seq->upper); msg_set_hdr_sz(mhdr, MCAST_H_SIZE); + skb_queue_head_init(&pktchain); + new_mtu: mtu = tipc_bcast_get_mtu(net); - rc = tipc_msg_build(mhdr, msg, 0, dsz, mtu, pktchain); + rc = tipc_msg_build(mhdr, msg, 0, dsz, mtu, &pktchain); if (unlikely(rc < 0)) return rc; do { - rc = tipc_bcast_xmit(net, pktchain); + rc = tipc_bcast_xmit(net, &pktchain); if (likely(!rc)) return dsz; @@ -704,7 +706,7 @@ new_mtu: if (!rc) continue; } - __skb_queue_purge(pktchain); + __skb_queue_purge(&pktchain); if (rc == -EMSGSIZE) { msg->msg_iter = save; goto new_mtu; @@ -863,7 +865,7 @@ static int __tipc_sendmsg(struct socket *sock, struct msghdr *m, size_t dsz) struct net *net = sock_net(sk); struct tipc_msg *mhdr = &tsk->phdr; u32 dnode, dport; - struct sk_buff_head *pktchain = &sk->sk_write_queue; + struct sk_buff_head pktchain; struct sk_buff *skb; struct tipc_name_seq *seq; struct iov_iter save; @@ -924,17 +926,18 @@ static int __tipc_sendmsg(struct socket *sock, struct msghdr *m, size_t dsz) msg_set_hdr_sz(mhdr, BASIC_H_SIZE); } + skb_queue_head_init(&pktchain); save = m->msg_iter; new_mtu: mtu = tipc_node_get_mtu(net, dnode, tsk->portid); - rc = tipc_msg_build(mhdr, m, 0, dsz, mtu, pktchain); + rc = tipc_msg_build(mhdr, m, 0, dsz, mtu, &pktchain); if (rc < 0) return rc; do { - skb = skb_peek(pktchain); + skb = skb_peek(&pktchain); TIPC_SKB_CB(skb)->wakeup_pending = tsk->link_cong; - rc = tipc_node_xmit(net, pktchain, dnode, tsk->portid); + rc = tipc_node_xmit(net, &pktchain, dnode, tsk->portid); if (likely(!rc)) { if (sock->state != SS_READY) sock->state = SS_CONNECTING; @@ -946,7 +949,7 @@ new_mtu: if (!rc) continue; } - __skb_queue_purge(pktchain); + __skb_queue_purge(&pktchain); if (rc == -EMSGSIZE) { m->msg_iter = save; goto new_mtu; @@ -1016,7 +1019,7 @@ static int __tipc_send_stream(struct socket *sock, struct msghdr *m, size_t dsz) struct net *net = sock_net(sk); struct tipc_sock *tsk = tipc_sk(sk); struct tipc_msg *mhdr = &tsk->phdr; - struct sk_buff_head *pktchain = &sk->sk_write_queue; + struct sk_buff_head pktchain; DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name); u32 portid = tsk->portid; int rc = -EINVAL; @@ -1044,17 +1047,19 @@ static int __tipc_send_stream(struct socket *sock, struct msghdr *m, size_t dsz) timeo = sock_sndtimeo(sk, m->msg_flags & MSG_DONTWAIT); dnode = tsk_peer_node(tsk); + skb_queue_head_init(&pktchain); next: save = m->msg_iter; mtu = tsk->max_pkt; send = min_t(uint, dsz - sent, TIPC_MAX_USER_MSG_SIZE); - rc = tipc_msg_build(mhdr, m, sent, send, mtu, pktchain); + rc = tipc_msg_build(mhdr, m, sent, send, mtu, &pktchain); if (unlikely(rc < 0)) return rc; + do { if (likely(!tsk_conn_cong(tsk))) { - rc = tipc_node_xmit(net, pktchain, dnode, portid); + rc = tipc_node_xmit(net, &pktchain, dnode, portid); if (likely(!rc)) { tsk->sent_unacked++; sent += send; @@ -1063,7 +1068,7 @@ next: goto next; } if (rc == -EMSGSIZE) { - __skb_queue_purge(pktchain); + __skb_queue_purge(&pktchain); tsk->max_pkt = tipc_node_get_mtu(net, dnode, portid); m->msg_iter = save; @@ -1077,7 +1082,7 @@ next: rc = tipc_wait_for_sndpkt(sock, &timeo); } while (!rc); - __skb_queue_purge(pktchain); + __skb_queue_purge(&pktchain); return sent ? sent : rc; } diff --git a/net/tipc/subscr.c b/net/tipc/subscr.c index 69ee2eeef968..f9ff73a8d815 100644 --- a/net/tipc/subscr.c +++ b/net/tipc/subscr.c @@ -296,7 +296,8 @@ static void tipc_subscrb_rcv_cb(struct net *net, int conid, if (tipc_subscrp_create(net, (struct tipc_subscr *)buf, subscrb, &sub)) return tipc_conn_terminate(tn->topsrv, subscrb->conid); - tipc_nametbl_subscribe(sub); + if (sub) + tipc_nametbl_subscribe(sub); } /* Handle one request to establish a new subscriber */ diff --git a/net/wireless/core.c b/net/wireless/core.c index b0915515640e..8f0bac7e03c4 100644 --- a/net/wireless/core.c +++ b/net/wireless/core.c @@ -1147,6 +1147,8 @@ static int cfg80211_netdev_notifier_call(struct notifier_block *nb, return NOTIFY_DONE; } + wireless_nlevent_flush(); + return NOTIFY_OK; } diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index d4786f2802aa..711cb7ad6ae0 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c @@ -7547,7 +7547,7 @@ static int nl80211_join_ibss(struct sk_buff *skb, struct genl_info *info) if ((ibss.chandef.width != NL80211_CHAN_WIDTH_20_NOHT) && no_ht) { - kfree(connkeys); + kzfree(connkeys); return -EINVAL; } } diff --git a/net/wireless/sme.c b/net/wireless/sme.c index 8020b5b094d4..d49ed7666d4c 100644 --- a/net/wireless/sme.c +++ b/net/wireless/sme.c @@ -917,6 +917,12 @@ void __cfg80211_disconnected(struct net_device *dev, const u8 *ie, nl80211_send_disconnected(rdev, dev, reason, ie, ie_len, from_ap); + /* stop critical protocol if supported */ + if (rdev->ops->crit_proto_stop && rdev->crit_proto_nlportid) { + rdev->crit_proto_nlportid = 0; + rdev_crit_proto_stop(rdev, wdev); + } + /* * Delete all the keys ... pairwise keys can't really * exist any more anyway, but default keys might. diff --git a/net/wireless/wext-core.c b/net/wireless/wext-core.c index c8717c1d082e..b50ee5d622e1 100644 --- a/net/wireless/wext-core.c +++ b/net/wireless/wext-core.c @@ -342,6 +342,40 @@ static const int compat_event_type_size[] = { /* IW event code */ +void wireless_nlevent_flush(void) +{ + struct sk_buff *skb; + struct net *net; + + ASSERT_RTNL(); + + for_each_net(net) { + while ((skb = skb_dequeue(&net->wext_nlevents))) + rtnl_notify(skb, net, 0, RTNLGRP_LINK, NULL, + GFP_KERNEL); + } +} +EXPORT_SYMBOL_GPL(wireless_nlevent_flush); + +static int wext_netdev_notifier_call(struct notifier_block *nb, + unsigned long state, void *ptr) +{ + /* + * When a netdev changes state in any way, flush all pending messages + * to avoid them going out in a strange order, e.g. RTM_NEWLINK after + * RTM_DELLINK, or with IFF_UP after without IFF_UP during dev_close() + * or similar - all of which could otherwise happen due to delays from + * schedule_work(). + */ + wireless_nlevent_flush(); + + return NOTIFY_OK; +} + +static struct notifier_block wext_netdev_notifier = { + .notifier_call = wext_netdev_notifier_call, +}; + static int __net_init wext_pernet_init(struct net *net) { skb_queue_head_init(&net->wext_nlevents); @@ -360,7 +394,12 @@ static struct pernet_operations wext_pernet_ops = { static int __init wireless_nlevent_init(void) { - return register_pernet_subsys(&wext_pernet_ops); + int err = register_pernet_subsys(&wext_pernet_ops); + + if (err) + return err; + + return register_netdevice_notifier(&wext_netdev_notifier); } subsys_initcall(wireless_nlevent_init); @@ -368,17 +407,8 @@ subsys_initcall(wireless_nlevent_init); /* Process events generated by the wireless layer or the driver. */ static void wireless_nlevent_process(struct work_struct *work) { - struct sk_buff *skb; - struct net *net; - rtnl_lock(); - - for_each_net(net) { - while ((skb = skb_dequeue(&net->wext_nlevents))) - rtnl_notify(skb, net, 0, RTNLGRP_LINK, NULL, - GFP_KERNEL); - } - + wireless_nlevent_flush(); rtnl_unlock(); } |