From e7efc2ce3d0789cd7c21b70ff00cd7838d382639 Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Wed, 14 Jul 2021 16:23:43 +0100 Subject: liquidio: Fix unintentional sign extension issue on left shift of u16 Shifting the u16 integer oct->pcie_port by CN23XX_PKT_INPUT_CTL_MAC_NUM_POS (29) bits will be promoted to a 32 bit signed int and then sign-extended to a u64. In the cases where oct->pcie_port where bit 2 is set (e.g. 3..7) the shifted value will be sign extended and the top 32 bits of the result will be set. Fix this by casting the u16 values to a u64 before the 29 bit left shift. Addresses-Coverity: ("Unintended sign extension") Fixes: 3451b97cce2d ("liquidio: CN23XX register setup") Signed-off-by: Colin Ian King Signed-off-by: David S. Miller --- drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c b/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c index 4cddd628d41b..9ed3d1ab2ca5 100644 --- a/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c +++ b/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c @@ -420,7 +420,7 @@ static int cn23xx_pf_setup_global_input_regs(struct octeon_device *oct) * bits 32:47 indicate the PVF num. */ for (q_no = 0; q_no < ern; q_no++) { - reg_val = oct->pcie_port << CN23XX_PKT_INPUT_CTL_MAC_NUM_POS; + reg_val = (u64)oct->pcie_port << CN23XX_PKT_INPUT_CTL_MAC_NUM_POS; /* for VF assigned queues. */ if (q_no < oct->sriov_info.pf_srn) { -- cgit v1.2.3 From 6206b7981a36476f4695d661ae139f7db36a802d Mon Sep 17 00:00:00 2001 From: Jia He Date: Thu, 15 Jul 2021 16:08:21 +0800 Subject: qed: fix possible unpaired spin_{un}lock_bh in _qed_mcp_cmd_and_union() Liajian reported a bug_on hit on a ThunderX2 arm64 server with FastLinQ QL41000 ethernet controller: BUG: scheduling while atomic: kworker/0:4/531/0x00000200 [qed_probe:488()]hw prepare failed kernel BUG at mm/vmalloc.c:2355! Internal error: Oops - BUG: 0 [#1] SMP CPU: 0 PID: 531 Comm: kworker/0:4 Tainted: G W 5.4.0-77-generic #86-Ubuntu pstate: 00400009 (nzcv daif +PAN -UAO) Call trace: vunmap+0x4c/0x50 iounmap+0x48/0x58 qed_free_pci+0x60/0x80 [qed] qed_probe+0x35c/0x688 [qed] __qede_probe+0x88/0x5c8 [qede] qede_probe+0x60/0xe0 [qede] local_pci_probe+0x48/0xa0 work_for_cpu_fn+0x24/0x38 process_one_work+0x1d0/0x468 worker_thread+0x238/0x4e0 kthread+0xf0/0x118 ret_from_fork+0x10/0x18 In this case, qed_hw_prepare() returns error due to hw/fw error, but in theory work queue should be in process context instead of interrupt. The root cause might be the unpaired spin_{un}lock_bh() in _qed_mcp_cmd_and_union(), which causes botton half is disabled incorrectly. Reported-by: Lijian Zhang Signed-off-by: Jia He Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qed/qed_mcp.c | 23 +++++++++++++++++------ 1 file changed, 17 insertions(+), 6 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.c b/drivers/net/ethernet/qlogic/qed/qed_mcp.c index 4387292c37e2..79d879a5d663 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_mcp.c +++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c @@ -474,14 +474,18 @@ _qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn, spin_lock_bh(&p_hwfn->mcp_info->cmd_lock); - if (!qed_mcp_has_pending_cmd(p_hwfn)) + if (!qed_mcp_has_pending_cmd(p_hwfn)) { + spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock); break; + } rc = qed_mcp_update_pending_cmd(p_hwfn, p_ptt); - if (!rc) + if (!rc) { + spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock); break; - else if (rc != -EAGAIN) + } else if (rc != -EAGAIN) { goto err; + } spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock); @@ -498,6 +502,8 @@ _qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn, return -EAGAIN; } + spin_lock_bh(&p_hwfn->mcp_info->cmd_lock); + /* Send the mailbox command */ qed_mcp_reread_offsets(p_hwfn, p_ptt); seq_num = ++p_hwfn->mcp_info->drv_mb_seq; @@ -524,14 +530,18 @@ _qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn, spin_lock_bh(&p_hwfn->mcp_info->cmd_lock); - if (p_cmd_elem->b_is_completed) + if (p_cmd_elem->b_is_completed) { + spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock); break; + } rc = qed_mcp_update_pending_cmd(p_hwfn, p_ptt); - if (!rc) + if (!rc) { + spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock); break; - else if (rc != -EAGAIN) + } else if (rc != -EAGAIN) { goto err; + } spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock); } while (++cnt < max_retries); @@ -554,6 +564,7 @@ _qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn, return -EAGAIN; } + spin_lock_bh(&p_hwfn->mcp_info->cmd_lock); qed_mcp_cmd_del_elem(p_hwfn, p_cmd_elem); spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock); -- cgit v1.2.3 From c81cfb6256d90ea5ba4a6fb280ea3b171be4e05c Mon Sep 17 00:00:00 2001 From: Kalesh AP Date: Sun, 18 Jul 2021 15:36:25 -0400 Subject: bnxt_en: don't disable an already disabled PCI device If device is already disabled in reset path and PCI io error is detected before the device could be enabled, driver could call pci_disable_device() for already disabled device. Fix this problem by calling pci_disable_device() only if the device is already enabled. Fixes: 6316ea6db93d ("bnxt_en: Enable AER support.") Signed-off-by: Kalesh AP Signed-off-by: Michael Chan Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnxt/bnxt.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index f56245eeef7b..fdfb75a1608d 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -13436,7 +13436,8 @@ static pci_ers_result_t bnxt_io_error_detected(struct pci_dev *pdev, if (netif_running(netdev)) bnxt_close(netdev); - pci_disable_device(pdev); + if (pci_is_enabled(pdev)) + pci_disable_device(pdev); bnxt_free_ctx_mem(bp); kfree(bp->ctx); bp->ctx = NULL; -- cgit v1.2.3 From c08c59653415201ac46ab791c936ae804c45a11b Mon Sep 17 00:00:00 2001 From: Edwin Peer Date: Sun, 18 Jul 2021 15:36:26 -0400 Subject: bnxt_en: reject ETS settings that will starve a TC ETS proportions are presented to HWRM_QUEUE_COS2BW_CFG as minimum bandwidth constraints. Thus, zero is a legal value for a given TC. However, if all the other TCs sum up to 100%, then at least one hardware queue will starve, resulting in guaranteed TX timeouts. Reject such nonsensical configurations. Reviewed-by: Pavan Chebbi Signed-off-by: Edwin Peer Signed-off-by: Michael Chan Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c index 8e90224c43a2..8a68df4d9e59 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c @@ -433,6 +433,7 @@ static int bnxt_hwrm_queue_dscp2pri_cfg(struct bnxt *bp, struct dcb_app *app, static int bnxt_ets_validate(struct bnxt *bp, struct ieee_ets *ets, u8 *tc) { int total_ets_bw = 0; + bool zero = false; u8 max_tc = 0; int i; @@ -453,13 +454,20 @@ static int bnxt_ets_validate(struct bnxt *bp, struct ieee_ets *ets, u8 *tc) break; case IEEE_8021QAZ_TSA_ETS: total_ets_bw += ets->tc_tx_bw[i]; + zero = zero || !ets->tc_tx_bw[i]; break; default: return -ENOTSUPP; } } - if (total_ets_bw > 100) + if (total_ets_bw > 100) { + netdev_warn(bp->dev, "rejecting ETS config exceeding available bandwidth\n"); return -EINVAL; + } + if (zero && total_ets_bw == 100) { + netdev_warn(bp->dev, "rejecting ETS config starving a TC\n"); + return -EINVAL; + } if (max_tc >= bp->max_tc) *tc = bp->max_tc; -- cgit v1.2.3 From 2c9f046bc377efd1f5e26e74817d5f96e9506c86 Mon Sep 17 00:00:00 2001 From: Michael Chan Date: Sun, 18 Jul 2021 15:36:27 -0400 Subject: bnxt_en: Refresh RoCE capabilities in bnxt_ulp_probe() The capabilities can change after firmware upgrade/downgrade, so we should get the up-to-date RoCE capabilities everytime bnxt_ulp_probe() is called. Fixes: 2151fe0830fd ("bnxt_en: Handle RESET_NOTIFY async event from firmware.") Reviewed-by: Somnath Kotur Reviewed-by: Edwin Peer Signed-off-by: Michael Chan Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c index a918e374f3c5..187ff643ad2a 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c @@ -479,16 +479,17 @@ struct bnxt_en_dev *bnxt_ulp_probe(struct net_device *dev) if (!edev) return ERR_PTR(-ENOMEM); edev->en_ops = &bnxt_en_ops_tbl; - if (bp->flags & BNXT_FLAG_ROCEV1_CAP) - edev->flags |= BNXT_EN_FLAG_ROCEV1_CAP; - if (bp->flags & BNXT_FLAG_ROCEV2_CAP) - edev->flags |= BNXT_EN_FLAG_ROCEV2_CAP; edev->net = dev; edev->pdev = bp->pdev; edev->l2_db_size = bp->db_size; edev->l2_db_size_nc = bp->db_size; bp->edev = edev; } + edev->flags &= ~BNXT_EN_FLAG_ROCE_CAP; + if (bp->flags & BNXT_FLAG_ROCEV1_CAP) + edev->flags |= BNXT_EN_FLAG_ROCEV1_CAP; + if (bp->flags & BNXT_FLAG_ROCEV2_CAP) + edev->flags |= BNXT_EN_FLAG_ROCEV2_CAP; return bp->edev; } EXPORT_SYMBOL(bnxt_ulp_probe); -- cgit v1.2.3 From 6cd657cb3ee6f4de57e635b126ffbe0e51d00f1a Mon Sep 17 00:00:00 2001 From: Michael Chan Date: Sun, 18 Jul 2021 15:36:28 -0400 Subject: bnxt_en: Add missing check for BNXT_STATE_ABORT_ERR in bnxt_fw_rset_task() In the BNXT_FW_RESET_STATE_POLL_VF state in bnxt_fw_reset_task() after all VFs have unregistered, we need to check for BNXT_STATE_ABORT_ERR after we acquire the rtnl_lock. If the flag is set, we need to abort. Fixes: 230d1f0de754 ("bnxt_en: Handle firmware reset.") Signed-off-by: Michael Chan Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnxt/bnxt.c | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index fdfb75a1608d..39908a3d9460 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -11992,6 +11992,10 @@ static void bnxt_fw_reset_task(struct work_struct *work) } bp->fw_reset_timestamp = jiffies; rtnl_lock(); + if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state)) { + rtnl_unlock(); + goto fw_reset_abort; + } bnxt_fw_reset_close(bp); if (bp->fw_cap & BNXT_FW_CAP_ERR_RECOVER_RELOAD) { bp->fw_reset_state = BNXT_FW_RESET_STATE_POLL_FW_DOWN; -- cgit v1.2.3 From 3958b1da725a477b4a222183d16a14d85445d4b6 Mon Sep 17 00:00:00 2001 From: Somnath Kotur Date: Sun, 18 Jul 2021 15:36:29 -0400 Subject: bnxt_en: fix error path of FW reset When bnxt_open() fails in the firmware reset path, the driver needs to gracefully abort, but it is executing code that should be invoked only in the success path. Define a function to abort FW reset and consolidate all error paths to call this new function. Fixes: dab62e7c2de7 ("bnxt_en: Implement faster recovery for firmware fatal error.") Signed-off-by: Somnath Kotur Signed-off-by: Michael Chan Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnxt/bnxt.c | 31 +++++++++++++++++++++---------- 1 file changed, 21 insertions(+), 10 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index 39908a3d9460..f2f1136fd492 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -11959,10 +11959,21 @@ static bool bnxt_fw_reset_timeout(struct bnxt *bp) (bp->fw_reset_max_dsecs * HZ / 10)); } +static void bnxt_fw_reset_abort(struct bnxt *bp, int rc) +{ + clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state); + if (bp->fw_reset_state != BNXT_FW_RESET_STATE_POLL_VF) { + bnxt_ulp_start(bp, rc); + bnxt_dl_health_status_update(bp, false); + } + bp->fw_reset_state = 0; + dev_close(bp->dev); +} + static void bnxt_fw_reset_task(struct work_struct *work) { struct bnxt *bp = container_of(work, struct bnxt, fw_reset_task.work); - int rc; + int rc = 0; if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) { netdev_err(bp->dev, "bnxt_fw_reset_task() called when not in fw reset mode!\n"); @@ -11993,8 +12004,9 @@ static void bnxt_fw_reset_task(struct work_struct *work) bp->fw_reset_timestamp = jiffies; rtnl_lock(); if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state)) { + bnxt_fw_reset_abort(bp, rc); rtnl_unlock(); - goto fw_reset_abort; + return; } bnxt_fw_reset_close(bp); if (bp->fw_cap & BNXT_FW_CAP_ERR_RECOVER_RELOAD) { @@ -12043,6 +12055,7 @@ static void bnxt_fw_reset_task(struct work_struct *work) if (val == 0xffff) { if (bnxt_fw_reset_timeout(bp)) { netdev_err(bp->dev, "Firmware reset aborted, PCI config space invalid\n"); + rc = -ETIMEDOUT; goto fw_reset_abort; } bnxt_queue_fw_reset_work(bp, HZ / 1000); @@ -12052,6 +12065,7 @@ static void bnxt_fw_reset_task(struct work_struct *work) clear_bit(BNXT_STATE_FW_FATAL_COND, &bp->state); if (pci_enable_device(bp->pdev)) { netdev_err(bp->dev, "Cannot re-enable PCI device\n"); + rc = -ENODEV; goto fw_reset_abort; } pci_set_master(bp->pdev); @@ -12078,9 +12092,10 @@ static void bnxt_fw_reset_task(struct work_struct *work) } rc = bnxt_open(bp->dev); if (rc) { - netdev_err(bp->dev, "bnxt_open_nic() failed\n"); - clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state); - dev_close(bp->dev); + netdev_err(bp->dev, "bnxt_open() failed during FW reset\n"); + bnxt_fw_reset_abort(bp, rc); + rtnl_unlock(); + return; } bp->fw_reset_state = 0; @@ -12107,12 +12122,8 @@ fw_reset_abort_status: netdev_err(bp->dev, "fw_health_status 0x%x\n", sts); } fw_reset_abort: - clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state); - if (bp->fw_reset_state != BNXT_FW_RESET_STATE_POLL_VF) - bnxt_dl_health_status_update(bp, false); - bp->fw_reset_state = 0; rtnl_lock(); - dev_close(bp->dev); + bnxt_fw_reset_abort(bp, rc); rtnl_unlock(); } -- cgit v1.2.3 From 96bdd4b9ea7ef9a12db8fdd0ce90e37dffbd3703 Mon Sep 17 00:00:00 2001 From: Michael Chan Date: Sun, 18 Jul 2021 15:36:30 -0400 Subject: bnxt_en: Validate vlan protocol ID on RX packets Only pass supported VLAN protocol IDs for stripped VLAN tags to the stack. The stack will hit WARN() if the protocol ID is unsupported. Existing firmware sets up the chip to strip 0x8100, 0x88a8, 0x9100. Only the 1st two protocols are supported by the kernel. Fixes: a196e96bb68f ("bnxt_en: clean up VLAN feature bit handling") Reviewed-by: Somnath Kotur Signed-off-by: Michael Chan Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnxt/bnxt.c | 21 ++++++++++++++++----- 1 file changed, 16 insertions(+), 5 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index f2f1136fd492..169f093e01de 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -1671,11 +1671,16 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp, if ((tpa_info->flags2 & RX_CMP_FLAGS2_META_FORMAT_VLAN) && (skb->dev->features & BNXT_HW_FEATURE_VLAN_ALL_RX)) { - u16 vlan_proto = tpa_info->metadata >> - RX_CMP_FLAGS2_METADATA_TPID_SFT; + __be16 vlan_proto = htons(tpa_info->metadata >> + RX_CMP_FLAGS2_METADATA_TPID_SFT); u16 vtag = tpa_info->metadata & RX_CMP_FLAGS2_METADATA_TCI_MASK; - __vlan_hwaccel_put_tag(skb, htons(vlan_proto), vtag); + if (eth_type_vlan(vlan_proto)) { + __vlan_hwaccel_put_tag(skb, vlan_proto, vtag); + } else { + dev_kfree_skb(skb); + return NULL; + } } skb_checksum_none_assert(skb); @@ -1897,9 +1902,15 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, (skb->dev->features & BNXT_HW_FEATURE_VLAN_ALL_RX)) { u32 meta_data = le32_to_cpu(rxcmp1->rx_cmp_meta_data); u16 vtag = meta_data & RX_CMP_FLAGS2_METADATA_TCI_MASK; - u16 vlan_proto = meta_data >> RX_CMP_FLAGS2_METADATA_TPID_SFT; + __be16 vlan_proto = htons(meta_data >> + RX_CMP_FLAGS2_METADATA_TPID_SFT); - __vlan_hwaccel_put_tag(skb, htons(vlan_proto), vtag); + if (eth_type_vlan(vlan_proto)) { + __vlan_hwaccel_put_tag(skb, vlan_proto, vtag); + } else { + dev_kfree_skb(skb); + goto next_rx; + } } skb_checksum_none_assert(skb); -- cgit v1.2.3 From 11a39259ff79b74bc99f8b7c44075a2d6d5e7ab1 Mon Sep 17 00:00:00 2001 From: Somnath Kotur Date: Sun, 18 Jul 2021 15:36:31 -0400 Subject: bnxt_en: Check abort error state in bnxt_half_open_nic() bnxt_half_open_nic() is called during during ethtool self test and is protected by rtnl_lock. Firmware reset can be happening at the same time. Only critical portions of the entire firmware reset sequence are protected by the rtnl_lock. It is possible that bnxt_half_open_nic() can be called when the firmware reset sequence is aborting. In that case, bnxt_half_open_nic() needs to check if the ABORT_ERR flag is set and abort if it is. The ethtool self test will fail but the NIC will be brought to a consistent IF_DOWN state. Without this patch, if bnxt_half_open_nic() were to continue in this error state, it may crash like this: bnxt_en 0000:82:00.1 enp130s0f1np1: FW reset in progress during close, FW reset will be aborted Unable to handle kernel NULL pointer dereference at virtual address 0000000000000000 ... Process ethtool (pid: 333327, stack limit = 0x0000000046476577) Call trace: bnxt_alloc_mem+0x444/0xef0 [bnxt_en] bnxt_half_open_nic+0x24/0xb8 [bnxt_en] bnxt_self_test+0x2dc/0x390 [bnxt_en] ethtool_self_test+0xe0/0x1f8 dev_ethtool+0x1744/0x22d0 dev_ioctl+0x190/0x3e0 sock_ioctl+0x238/0x480 do_vfs_ioctl+0xc4/0x758 ksys_ioctl+0x84/0xb8 __arm64_sys_ioctl+0x28/0x38 el0_svc_handler+0xb0/0x180 el0_svc+0x8/0xc Fixes: a1301f08c5ac ("bnxt_en: Check abort error state in bnxt_open_nic().") Signed-off-by: Somnath Kotur Signed-off-by: Michael Chan Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnxt/bnxt.c | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index 169f093e01de..31eb3c00851a 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -10208,6 +10208,12 @@ int bnxt_half_open_nic(struct bnxt *bp) { int rc = 0; + if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state)) { + netdev_err(bp->dev, "A previous firmware reset has not completed, aborting half open\n"); + rc = -ENODEV; + goto half_open_err; + } + rc = bnxt_alloc_mem(bp, false); if (rc) { netdev_err(bp->dev, "bnxt_alloc_mem err: %x\n", rc); -- cgit v1.2.3 From d7859afb6880249039b178fdfb1bef94fd954cf2 Mon Sep 17 00:00:00 2001 From: Michael Chan Date: Sun, 18 Jul 2021 15:36:32 -0400 Subject: bnxt_en: Move bnxt_ptp_init() to bnxt_open() The device needs to be in ifup state for PTP to function, so move bnxt_ptp_init() to bnxt_open(). This means that the PHC will be registered during bnxt_open(). This also makes firmware reset work correctly. PTP configurations may change after firmware upgrade or downgrade. bnxt_open() will be called after firmware reset, so it will work properly. bnxt_ptp_start() is now incorporated into bnxt_ptp_init(). We now also need to call bnxt_ptp_clear() in bnxt_close(). Fixes: 93cb62d98e9c ("bnxt_en: Enable hardware PTP support") Cc: Richard Cochran Reviewed-by: Pavan Chebbi Signed-off-by: Michael Chan Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnxt/bnxt.c | 16 +++++++++------- drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c | 24 +++++++----------------- drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.h | 1 - 3 files changed, 16 insertions(+), 25 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index 31eb3c00851a..b8b73c210995 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -10134,7 +10134,6 @@ static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init) } } - bnxt_ptp_start(bp); rc = bnxt_init_nic(bp, irq_re_init); if (rc) { netdev_err(bp->dev, "bnxt_init_nic err: %x\n", rc); @@ -10273,9 +10272,16 @@ static int bnxt_open(struct net_device *dev) rc = bnxt_hwrm_if_change(bp, true); if (rc) return rc; + + if (bnxt_ptp_init(bp)) { + netdev_warn(dev, "PTP initialization failed.\n"); + kfree(bp->ptp_cfg); + bp->ptp_cfg = NULL; + } rc = __bnxt_open_nic(bp, true, true); if (rc) { bnxt_hwrm_if_change(bp, false); + bnxt_ptp_clear(bp); } else { if (test_and_clear_bit(BNXT_STATE_FW_RESET_DET, &bp->state)) { if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) { @@ -10366,6 +10372,7 @@ static int bnxt_close(struct net_device *dev) { struct bnxt *bp = netdev_priv(dev); + bnxt_ptp_clear(bp); bnxt_hwmon_close(bp); bnxt_close_nic(bp, true, true); bnxt_hwrm_shutdown_link(bp); @@ -11352,6 +11359,7 @@ static void bnxt_fw_reset_close(struct bnxt *bp) bnxt_clear_int_mode(bp); pci_disable_device(bp->pdev); } + bnxt_ptp_clear(bp); __bnxt_close_nic(bp, true, false); bnxt_vf_reps_free(bp); bnxt_clear_int_mode(bp); @@ -12694,7 +12702,6 @@ static void bnxt_remove_one(struct pci_dev *pdev) if (BNXT_PF(bp)) devlink_port_type_clear(&bp->dl_port); - bnxt_ptp_clear(bp); pci_disable_pcie_error_reporting(pdev); unregister_netdev(dev); clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state); @@ -13278,11 +13285,6 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) rc); } - if (bnxt_ptp_init(bp)) { - netdev_warn(dev, "PTP initialization failed.\n"); - kfree(bp->ptp_cfg); - bp->ptp_cfg = NULL; - } bnxt_inv_fw_health_reg(bp); bnxt_dl_register(bp); diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c index f698b6bd4ff8..9089e7f3fbd4 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c @@ -385,22 +385,6 @@ int bnxt_get_rx_ts_p5(struct bnxt *bp, u64 *ts, u32 pkt_ts) return 0; } -void bnxt_ptp_start(struct bnxt *bp) -{ - struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; - - if (!ptp) - return; - - if (bp->flags & BNXT_FLAG_CHIP_P5) { - spin_lock_bh(&ptp->ptp_lock); - ptp->current_time = bnxt_refclk_read(bp, NULL); - WRITE_ONCE(ptp->old_time, ptp->current_time); - spin_unlock_bh(&ptp->ptp_lock); - ptp_schedule_worker(ptp->ptp_clock, 0); - } -} - static const struct ptp_clock_info bnxt_ptp_caps = { .owner = THIS_MODULE, .name = "bnxt clock", @@ -450,7 +434,13 @@ int bnxt_ptp_init(struct bnxt *bp) bnxt_unmap_ptp_regs(bp); return err; } - + if (bp->flags & BNXT_FLAG_CHIP_P5) { + spin_lock_bh(&ptp->ptp_lock); + ptp->current_time = bnxt_refclk_read(bp, NULL); + WRITE_ONCE(ptp->old_time, ptp->current_time); + spin_unlock_bh(&ptp->ptp_lock); + ptp_schedule_worker(ptp->ptp_clock, 0); + } return 0; } diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.h index 6b6245750e20..4135ea3ec788 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.h @@ -75,7 +75,6 @@ int bnxt_hwtstamp_set(struct net_device *dev, struct ifreq *ifr); int bnxt_hwtstamp_get(struct net_device *dev, struct ifreq *ifr); int bnxt_get_tx_ts_p5(struct bnxt *bp, struct sk_buff *skb); int bnxt_get_rx_ts_p5(struct bnxt *bp, u64 *ts, u32 pkt_ts); -void bnxt_ptp_start(struct bnxt *bp); int bnxt_ptp_init(struct bnxt *bp); void bnxt_ptp_clear(struct bnxt *bp); #endif -- cgit v1.2.3 From de5bf19414fec860168f05d00d574562bd9d86d1 Mon Sep 17 00:00:00 2001 From: Michael Chan Date: Sun, 18 Jul 2021 15:36:33 -0400 Subject: bnxt_en: Fix PTP capability discovery The current PTP initialization logic does not account for firmware reset that may cause PTP capability to change. The valid pointer bp->ptp_cfg is used to indicate that the device is capable of PTP and that it has been initialized. So we must clean up bp->ptp_cfg and free it if the firmware after reset does not support PTP. Fixes: 93cb62d98e9c ("bnxt_en: Enable hardware PTP support") Cc: Richard Cochran Reviewed-by: Pavan Chebbi Signed-off-by: Michael Chan Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnxt/bnxt.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index b8b73c210995..4db162cee911 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -7574,8 +7574,12 @@ static int __bnxt_hwrm_func_qcaps(struct bnxt *bp) bp->flags &= ~BNXT_FLAG_WOL_CAP; if (flags & FUNC_QCAPS_RESP_FLAGS_WOL_MAGICPKT_SUPPORTED) bp->flags |= BNXT_FLAG_WOL_CAP; - if (flags & FUNC_QCAPS_RESP_FLAGS_PTP_SUPPORTED) + if (flags & FUNC_QCAPS_RESP_FLAGS_PTP_SUPPORTED) { __bnxt_hwrm_ptp_qcfg(bp); + } else { + kfree(bp->ptp_cfg); + bp->ptp_cfg = NULL; + } } else { #ifdef CONFIG_BNXT_SRIOV struct bnxt_vf_info *vf = &bp->vf; -- cgit v1.2.3 From b16f3299ae1aa3c327e1fb742d0379ae4d6e86f2 Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Sun, 18 Jul 2021 13:38:34 -0700 Subject: net: hisilicon: rename CACHE_LINE_MASK to avoid redefinition Building on ARCH=arc causes a "redefined" warning, so rename this driver's CACHE_LINE_MASK to avoid the warning. ../drivers/net/ethernet/hisilicon/hip04_eth.c:134: warning: "CACHE_LINE_MASK" redefined 134 | #define CACHE_LINE_MASK 0x3F In file included from ../include/linux/cache.h:6, from ../include/linux/printk.h:9, from ../include/linux/kernel.h:19, from ../include/linux/list.h:9, from ../include/linux/module.h:12, from ../drivers/net/ethernet/hisilicon/hip04_eth.c:7: ../arch/arc/include/asm/cache.h:17: note: this is the location of the previous definition 17 | #define CACHE_LINE_MASK (~(L1_CACHE_BYTES - 1)) Fixes: d413779cdd93 ("net: hisilicon: Add an tx_desc to adapt HI13X1_GMAC") Signed-off-by: Randy Dunlap Cc: Vineet Gupta Cc: Jiangfeng Xiao Cc: "David S. Miller" Cc: Jakub Kicinski Signed-off-by: David S. Miller --- drivers/net/ethernet/hisilicon/hip04_eth.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/hisilicon/hip04_eth.c b/drivers/net/ethernet/hisilicon/hip04_eth.c index 12f6c2442a7a..e53512f6878a 100644 --- a/drivers/net/ethernet/hisilicon/hip04_eth.c +++ b/drivers/net/ethernet/hisilicon/hip04_eth.c @@ -131,7 +131,7 @@ /* buf unit size is cache_line_size, which is 64, so the shift is 6 */ #define PPE_BUF_SIZE_SHIFT 6 #define PPE_TX_BUF_HOLD BIT(31) -#define CACHE_LINE_MASK 0x3F +#define SOC_CACHE_LINE_MASK 0x3F #else #define PPE_CFG_QOS_VMID_GRP_SHIFT 8 #define PPE_CFG_RX_CTRL_ALIGN_SHIFT 11 @@ -531,8 +531,8 @@ hip04_mac_start_xmit(struct sk_buff *skb, struct net_device *ndev) #if defined(CONFIG_HI13X1_GMAC) desc->cfg = (__force u32)cpu_to_be32(TX_CLEAR_WB | TX_FINISH_CACHE_INV | TX_RELEASE_TO_PPE | priv->port << TX_POOL_SHIFT); - desc->data_offset = (__force u32)cpu_to_be32(phys & CACHE_LINE_MASK); - desc->send_addr = (__force u32)cpu_to_be32(phys & ~CACHE_LINE_MASK); + desc->data_offset = (__force u32)cpu_to_be32(phys & SOC_CACHE_LINE_MASK); + desc->send_addr = (__force u32)cpu_to_be32(phys & ~SOC_CACHE_LINE_MASK); #else desc->cfg = (__force u32)cpu_to_be32(TX_CLEAR_WB | TX_FINISH_CACHE_INV); desc->send_addr = (__force u32)cpu_to_be32(phys); -- cgit v1.2.3 From fa2bf6baf2b1d8350e5193ce4014bdddc51a25d0 Mon Sep 17 00:00:00 2001 From: Subbaraya Sundeep Date: Mon, 19 Jul 2021 14:29:32 +0530 Subject: octeontx2-af: Enable transmit side LBK link For enabling VF-VF switching the packets egressing out of CGX mapped VFs needed to be sent to LBK so that same packets are received back to the system. But the LBK link also needs to be enabled in addition to a VF's mapped CGX_LMAC link otherwise hardware raises send error interrupt indicating selected LBK link is not enabled in NIX_AF_TL3_TL2X_LINKX_CFG register. Hence this patch enables all LBK links in TL3_TL2_LINKX_CFG registers. Also to enable packet flow between PFs/VFs of NIX0 to PFs/VFs of NIX1(in 98xx silicon) the NPC TX DMAC rules has to be installed such that rules must be hit for any TX interface i.e., NIX0-TX or NIX1-TX provided DMAC match creteria is met. Hence this patch changes the behavior such that MCAM is programmed to match with any NIX0/1-TX interface for TX rules. Signed-off-by: Subbaraya Sundeep Signed-off-by: Sunil Kovvuri Goutham Signed-off-by: David S. Miller --- drivers/net/ethernet/marvell/octeontx2/af/rvu.c | 6 ++++ drivers/net/ethernet/marvell/octeontx2/af/rvu.h | 2 ++ .../net/ethernet/marvell/octeontx2/af/rvu_nix.c | 32 ++++++++++++++++++++++ .../net/ethernet/marvell/octeontx2/af/rvu_npc.c | 15 ++++++++-- .../net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c | 9 +++++- 5 files changed, 61 insertions(+), 3 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c index 10cddf1ac7b9..086eb6d283ee 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c @@ -2859,6 +2859,12 @@ static int rvu_enable_sriov(struct rvu *rvu) if (!vfs) return 0; + /* LBK channel number 63 is used for switching packets between + * CGX mapped VFs. Hence limit LBK pairs till 62 only. + */ + if (vfs > 62) + vfs = 62; + /* Save VFs number for reference in VF interrupts handlers. * Since interrupts might start arriving during SRIOV enablement * ordinary API cannot be used to get number of enabled VFs. diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h index 10e58a5d5861..e53f530e5e31 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h @@ -415,6 +415,8 @@ struct npc_kpu_profile_adapter { size_t kpus; }; +#define RVU_SWITCH_LBK_CHAN 63 + struct rvu { void __iomem *afreg_base; void __iomem *pfreg_base; diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c index aeae37704428..a2d69eaac4f8 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c @@ -1952,6 +1952,35 @@ static void nix_tl1_default_cfg(struct rvu *rvu, struct nix_hw *nix_hw, pfvf_map[schq] = TXSCH_SET_FLAG(pfvf_map[schq], NIX_TXSCHQ_CFG_DONE); } +static void rvu_nix_tx_tl2_cfg(struct rvu *rvu, int blkaddr, + u16 pcifunc, struct nix_txsch *txsch) +{ + struct rvu_hwinfo *hw = rvu->hw; + int lbk_link_start, lbk_links; + u8 pf = rvu_get_pf(pcifunc); + int schq; + + if (!is_pf_cgxmapped(rvu, pf)) + return; + + lbk_link_start = hw->cgx_links; + + for (schq = 0; schq < txsch->schq.max; schq++) { + if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc) + continue; + /* Enable all LBK links with channel 63 by default so that + * packets can be sent to LBK with a NPC TX MCAM rule + */ + lbk_links = hw->lbk_links; + while (lbk_links--) + rvu_write64(rvu, blkaddr, + NIX_AF_TL3_TL2X_LINKX_CFG(schq, + lbk_link_start + + lbk_links), + BIT_ULL(12) | RVU_SWITCH_LBK_CHAN); + } +} + int rvu_mbox_handler_nix_txschq_cfg(struct rvu *rvu, struct nix_txschq_config *req, struct msg_rsp *rsp) @@ -2040,6 +2069,9 @@ int rvu_mbox_handler_nix_txschq_cfg(struct rvu *rvu, rvu_write64(rvu, blkaddr, reg, regval); } + rvu_nix_tx_tl2_cfg(rvu, blkaddr, pcifunc, + &nix_hw->txsch[NIX_TXSCH_LVL_TL2]); + return 0; } diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c index 3612e0a2cab3..16c557cbe6a0 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c @@ -468,6 +468,8 @@ static void npc_config_mcam_entry(struct rvu *rvu, struct npc_mcam *mcam, { int bank = npc_get_bank(mcam, index); int kw = 0, actbank, actindex; + u8 tx_intf_mask = ~intf & 0x3; + u8 tx_intf = intf; u64 cam0, cam1; actbank = bank; /* Save bank id, to set action later on */ @@ -488,12 +490,21 @@ static void npc_config_mcam_entry(struct rvu *rvu, struct npc_mcam *mcam, */ for (; bank < (actbank + mcam->banks_per_entry); bank++, kw = kw + 2) { /* Interface should be set in all banks */ + if (is_npc_intf_tx(intf)) { + /* Last bit must be set and rest don't care + * for TX interfaces + */ + tx_intf_mask = 0x1; + tx_intf = intf & tx_intf_mask; + tx_intf_mask = ~tx_intf & tx_intf_mask; + } + rvu_write64(rvu, blkaddr, NPC_AF_MCAMEX_BANKX_CAMX_INTF(index, bank, 1), - intf); + tx_intf); rvu_write64(rvu, blkaddr, NPC_AF_MCAMEX_BANKX_CAMX_INTF(index, bank, 0), - ~intf & 0x3); + tx_intf_mask); /* Set the match key */ npc_get_keyword(entry, kw, &cam0, &cam1); diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c index 68633145a8b8..92d64bdff0ea 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c @@ -949,9 +949,16 @@ static void npc_update_tx_entry(struct rvu *rvu, struct rvu_pfvf *pfvf, struct npc_install_flow_req *req, u16 target) { struct nix_tx_action action; + u64 mask = ~0ULL; + + /* If AF is installing then do not care about + * PF_FUNC in Send Descriptor + */ + if (is_pffunc_af(req->hdr.pcifunc)) + mask = 0; npc_update_entry(rvu, NPC_PF_FUNC, entry, (__force u16)htons(target), - 0, ~0ULL, 0, NIX_INTF_TX); + 0, mask, 0, NIX_INTF_TX); *(u64 *)&action = 0x00; action.op = req->op; -- cgit v1.2.3 From cb7a6b3bac1d0d773f2b6cc35f6bab61eb5de5ef Mon Sep 17 00:00:00 2001 From: Subbaraya Sundeep Date: Mon, 19 Jul 2021 14:29:33 +0530 Subject: octeontx2-af: Prepare for allocating MCAM rules for AF AF till now only manages the allocation and freeing of MCAM rules for other PF/VFs in system. To implement L2 switching between all CGX mapped PF and VFs, AF requires MCAM entries for DMAC rules for each PF and VF. This patch modifies AF driver such that AF can also allocate MCAM rules and install rules for other PFs and VFs. All the checks like channel verification for RX rules and PF_FUNC verification for TX rules are relaxed in case AF is allocating or installing rules. Also all the entry and counter to owner mappings are set to NPC_MCAM_INVALID_MAP when they are free indicating those are not allocated to AF nor PF/VFs. This patch also ensures that AF allocated and installed entries are displayed in debugfs. Signed-off-by: Subbaraya Sundeep Signed-off-by: Sunil Kovvuri Goutham Signed-off-by: David S. Miller --- .../ethernet/marvell/octeontx2/af/rvu_debugfs.c | 5 +--- .../net/ethernet/marvell/octeontx2/af/rvu_npc.c | 32 ++++++++++++++++------ .../net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c | 11 ++++---- 3 files changed, 30 insertions(+), 18 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c index 370d4ca1e5ed..9b2dfbf90e51 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c @@ -2113,9 +2113,6 @@ static void rvu_print_npc_mcam_info(struct seq_file *s, int entry_acnt, entry_ecnt; int cntr_acnt, cntr_ecnt; - /* Skip PF0 */ - if (!pcifunc) - return; rvu_npc_get_mcam_entry_alloc_info(rvu, pcifunc, blkaddr, &entry_acnt, &entry_ecnt); rvu_npc_get_mcam_counter_alloc_info(rvu, pcifunc, blkaddr, @@ -2298,7 +2295,7 @@ static void rvu_dbg_npc_mcam_show_flows(struct seq_file *s, static void rvu_dbg_npc_mcam_show_action(struct seq_file *s, struct rvu_npc_mcam_rule *rule) { - if (rule->intf == NIX_INTF_TX) { + if (is_npc_intf_tx(rule->intf)) { switch (rule->tx_action.op) { case NIX_TX_ACTIONOP_DROP: seq_puts(s, "\taction: Drop\n"); diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c index 16c557cbe6a0..1097291aaa45 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c @@ -442,7 +442,8 @@ static void npc_fixup_vf_rule(struct rvu *rvu, struct npc_mcam *mcam, owner = mcam->entry2pfvf_map[index]; target_func = (entry->action >> 4) & 0xffff; /* do nothing when target is LBK/PF or owner is not PF */ - if (is_afvf(target_func) || (owner & RVU_PFVF_FUNC_MASK) || + if (is_pffunc_af(owner) || is_afvf(target_func) || + (owner & RVU_PFVF_FUNC_MASK) || !(target_func & RVU_PFVF_FUNC_MASK)) return; @@ -661,6 +662,7 @@ void rvu_npc_install_ucast_entry(struct rvu *rvu, u16 pcifunc, eth_broadcast_addr((u8 *)&req.mask.dmac); req.features = BIT_ULL(NPC_DMAC); req.channel = chan; + req.chan_mask = 0xFFFU; req.intf = pfvf->nix_rx_intf; req.op = action.op; req.hdr.pcifunc = 0; /* AF is requester */ @@ -810,6 +812,7 @@ void rvu_npc_install_bcast_match_entry(struct rvu *rvu, u16 pcifunc, eth_broadcast_addr((u8 *)&req.mask.dmac); req.features = BIT_ULL(NPC_DMAC); req.channel = chan; + req.chan_mask = 0xFFFU; req.intf = pfvf->nix_rx_intf; req.entry = index; req.hdr.pcifunc = 0; /* AF is requester */ @@ -1756,6 +1759,8 @@ static int npc_mcam_rsrcs_init(struct rvu *rvu, int blkaddr) int nixlf_count = rvu_get_nixlf_count(rvu); struct npc_mcam *mcam = &rvu->hw->mcam; int rsvd, err; + u16 index; + int cntr; u64 cfg; /* Actual number of MCAM entries vary by entry size */ @@ -1856,6 +1861,14 @@ static int npc_mcam_rsrcs_init(struct rvu *rvu, int blkaddr) if (!mcam->entry2target_pffunc) goto free_mem; + for (index = 0; index < mcam->bmap_entries; index++) { + mcam->entry2pfvf_map[index] = NPC_MCAM_INVALID_MAP; + mcam->entry2cntr_map[index] = NPC_MCAM_INVALID_MAP; + } + + for (cntr = 0; cntr < mcam->counters.max; cntr++) + mcam->cntr2pfvf_map[cntr] = NPC_MCAM_INVALID_MAP; + mutex_init(&mcam->lock); return 0; @@ -2573,7 +2586,7 @@ int rvu_mbox_handler_npc_mcam_alloc_entry(struct rvu *rvu, } /* Alloc request from PFFUNC with no NIXLF attached should be denied */ - if (!is_nixlf_attached(rvu, pcifunc)) + if (!is_pffunc_af(pcifunc) && !is_nixlf_attached(rvu, pcifunc)) return NPC_MCAM_ALLOC_DENIED; return npc_mcam_alloc_entries(mcam, pcifunc, req, rsp); @@ -2593,7 +2606,7 @@ int rvu_mbox_handler_npc_mcam_free_entry(struct rvu *rvu, return NPC_MCAM_INVALID_REQ; /* Free request from PFFUNC with no NIXLF attached, ignore */ - if (!is_nixlf_attached(rvu, pcifunc)) + if (!is_pffunc_af(pcifunc) && !is_nixlf_attached(rvu, pcifunc)) return NPC_MCAM_INVALID_REQ; mutex_lock(&mcam->lock); @@ -2605,7 +2618,7 @@ int rvu_mbox_handler_npc_mcam_free_entry(struct rvu *rvu, if (rc) goto exit; - mcam->entry2pfvf_map[req->entry] = 0; + mcam->entry2pfvf_map[req->entry] = NPC_MCAM_INVALID_MAP; mcam->entry2target_pffunc[req->entry] = 0x0; npc_mcam_clear_bit(mcam, req->entry); npc_enable_mcam_entry(rvu, mcam, blkaddr, req->entry, false); @@ -2690,13 +2703,14 @@ int rvu_mbox_handler_npc_mcam_write_entry(struct rvu *rvu, else nix_intf = pfvf->nix_rx_intf; - if (npc_mcam_verify_channel(rvu, pcifunc, req->intf, channel)) { + if (!is_pffunc_af(pcifunc) && + npc_mcam_verify_channel(rvu, pcifunc, req->intf, channel)) { rc = NPC_MCAM_INVALID_REQ; goto exit; } - if (npc_mcam_verify_pf_func(rvu, &req->entry_data, req->intf, - pcifunc)) { + if (!is_pffunc_af(pcifunc) && + npc_mcam_verify_pf_func(rvu, &req->entry_data, req->intf, pcifunc)) { rc = NPC_MCAM_INVALID_REQ; goto exit; } @@ -2847,7 +2861,7 @@ int rvu_mbox_handler_npc_mcam_alloc_counter(struct rvu *rvu, return NPC_MCAM_INVALID_REQ; /* If the request is from a PFFUNC with no NIXLF attached, ignore */ - if (!is_nixlf_attached(rvu, pcifunc)) + if (!is_pffunc_af(pcifunc) && !is_nixlf_attached(rvu, pcifunc)) return NPC_MCAM_INVALID_REQ; /* Since list of allocated counter IDs needs to be sent to requester, @@ -3092,7 +3106,7 @@ int rvu_mbox_handler_npc_mcam_alloc_and_write_entry(struct rvu *rvu, if (rc) { /* Free allocated MCAM entry */ mutex_lock(&mcam->lock); - mcam->entry2pfvf_map[entry] = 0; + mcam->entry2pfvf_map[entry] = NPC_MCAM_INVALID_MAP; npc_mcam_clear_bit(mcam, entry); mutex_unlock(&mcam->lock); return rc; diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c index 92d64bdff0ea..c1f35a0971ad 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c @@ -913,11 +913,9 @@ static void npc_update_rx_entry(struct rvu *rvu, struct rvu_pfvf *pfvf, struct npc_install_flow_req *req, u16 target) { struct nix_rx_action action; - u64 chan_mask; - chan_mask = req->chan_mask ? req->chan_mask : ~0ULL; - npc_update_entry(rvu, NPC_CHAN, entry, req->channel, 0, chan_mask, 0, - NIX_INTF_RX); + npc_update_entry(rvu, NPC_CHAN, entry, req->channel, 0, req->chan_mask, + 0, NIX_INTF_RX); *(u64 *)&action = 0x00; action.pf_func = target; @@ -1171,7 +1169,9 @@ int rvu_mbox_handler_npc_install_flow(struct rvu *rvu, if (err) return err; - if (npc_mcam_verify_channel(rvu, target, req->intf, req->channel)) + /* Skip channel validation if AF is installing */ + if (!is_pffunc_af(req->hdr.pcifunc) && + npc_mcam_verify_channel(rvu, target, req->intf, req->channel)) return -EINVAL; pfvf = rvu_get_pfvf(rvu, target); @@ -1187,6 +1187,7 @@ int rvu_mbox_handler_npc_install_flow(struct rvu *rvu, eth_broadcast_addr((u8 *)&req->mask.dmac); } + /* Proceed if NIXLF is attached or not for TX rules */ err = nix_get_nixlf(rvu, target, &nixlf, NULL); if (err && is_npc_intf_rx(req->intf) && !pf_set_vfs_mac) return -EINVAL; -- cgit v1.2.3 From 23109f8dd06d0bd04c9360cf7c501c97b0ab1545 Mon Sep 17 00:00:00 2001 From: Subbaraya Sundeep Date: Mon, 19 Jul 2021 14:29:34 +0530 Subject: octeontx2-af: Introduce internal packet switching As of now any communication between CGXs PFs and their VFs within the system is possible only by external switches sending packets back to the system. This patch adds internal switching support. Broadcast packet replication is not covered here. RVU admin function (AF) maintains MAC addresses of all interfaces in the system. When switching is enabled, MCAM entries are allocated to install rules such that packets with DMAC matching any of the internal interface MAC addresses is punted back into the system via the loopback channel. On the receive side the default unicast rules are modified to not check for ingress channel. So any packet with matching DMAC irrespective of which interface it is coming from will be forwarded to the respective PF/VF interface. The transmit side rules and default unicast rules are updated if user changes MAC address of an interface. Signed-off-by: Subbaraya Sundeep Signed-off-by: Sunil Kovvuri Goutham Signed-off-by: David S. Miller --- drivers/net/ethernet/marvell/octeontx2/af/Makefile | 2 +- drivers/net/ethernet/marvell/octeontx2/af/rvu.c | 4 +- drivers/net/ethernet/marvell/octeontx2/af/rvu.h | 19 ++ .../net/ethernet/marvell/octeontx2/af/rvu_cgx.c | 3 + .../ethernet/marvell/octeontx2/af/rvu_devlink.c | 48 +++- .../net/ethernet/marvell/octeontx2/af/rvu_nix.c | 4 + .../net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c | 9 +- .../net/ethernet/marvell/octeontx2/af/rvu_switch.c | 258 +++++++++++++++++++++ 8 files changed, 336 insertions(+), 11 deletions(-) create mode 100644 drivers/net/ethernet/marvell/octeontx2/af/rvu_switch.c (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/marvell/octeontx2/af/Makefile b/drivers/net/ethernet/marvell/octeontx2/af/Makefile index 1a3455620b38..cc8ac36cf687 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/Makefile +++ b/drivers/net/ethernet/marvell/octeontx2/af/Makefile @@ -10,4 +10,4 @@ obj-$(CONFIG_OCTEONTX2_AF) += rvu_af.o rvu_mbox-y := mbox.o rvu_trace.o rvu_af-y := cgx.o rvu.o rvu_cgx.o rvu_npa.o rvu_nix.o \ rvu_reg.o rvu_npc.o rvu_debugfs.o ptp.o rvu_npc_fs.o \ - rvu_cpt.o rvu_devlink.o rpm.o rvu_cn10k.o + rvu_cpt.o rvu_devlink.o rpm.o rvu_cn10k.o rvu_switch.o diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c index 086eb6d283ee..017163fb3cd5 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c @@ -1314,7 +1314,7 @@ int rvu_mbox_handler_detach_resources(struct rvu *rvu, return rvu_detach_rsrcs(rvu, detach, detach->hdr.pcifunc); } -static int rvu_get_nix_blkaddr(struct rvu *rvu, u16 pcifunc) +int rvu_get_nix_blkaddr(struct rvu *rvu, u16 pcifunc) { struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc); int blkaddr = BLKADDR_NIX0, vf; @@ -3007,6 +3007,8 @@ static int rvu_probe(struct pci_dev *pdev, const struct pci_device_id *id) /* Initialize debugfs */ rvu_dbg_init(rvu); + mutex_init(&rvu->rswitch.switch_lock); + return 0; err_dl: rvu_unregister_dl(rvu); diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h index e53f530e5e31..91503fb2762c 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h @@ -417,6 +417,14 @@ struct npc_kpu_profile_adapter { #define RVU_SWITCH_LBK_CHAN 63 +struct rvu_switch { + struct mutex switch_lock; /* Serialize flow installation */ + u32 used_entries; + u16 *entry2pcifunc; + u16 mode; + u16 start_entry; +}; + struct rvu { void __iomem *afreg_base; void __iomem *pfreg_base; @@ -447,6 +455,7 @@ struct rvu { /* CGX */ #define PF_CGXMAP_BASE 1 /* PF 0 is reserved for RVU PF */ + u16 cgx_mapped_vfs; /* maximum CGX mapped VFs */ u8 cgx_mapped_pfs; u8 cgx_cnt_max; /* CGX port count max */ u8 *pf2cgxlmac_map; /* pf to cgx_lmac map */ @@ -479,6 +488,9 @@ struct rvu { struct rvu_debugfs rvu_dbg; #endif struct rvu_devlink *rvu_dl; + + /* RVU switch implementation over NPC with DMAC rules */ + struct rvu_switch rswitch; }; static inline void rvu_write64(struct rvu *rvu, u64 block, u64 offset, u64 val) @@ -693,6 +705,7 @@ int nix_aq_context_read(struct rvu *rvu, struct nix_hw *nix_hw, struct nix_cn10k_aq_enq_req *aq_req, struct nix_cn10k_aq_enq_rsp *aq_rsp, u16 pcifunc, u8 ctype, u32 qidx); +int rvu_get_nix_blkaddr(struct rvu *rvu, u16 pcifunc); /* NPC APIs */ int rvu_npc_init(struct rvu *rvu); @@ -770,4 +783,10 @@ void rvu_dbg_exit(struct rvu *rvu); static inline void rvu_dbg_init(struct rvu *rvu) {} static inline void rvu_dbg_exit(struct rvu *rvu) {} #endif + +/* RVU Switch */ +void rvu_switch_enable(struct rvu *rvu); +void rvu_switch_disable(struct rvu *rvu); +void rvu_switch_update_rules(struct rvu *rvu, u16 pcifunc); + #endif /* RVU_H */ diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c index 6cc8fbb7190c..fe99ac4a4dd8 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c @@ -126,6 +126,7 @@ static int rvu_map_cgx_lmac_pf(struct rvu *rvu) unsigned long lmac_bmap; int size, free_pkind; int cgx, lmac, iter; + int numvfs, hwvfs; if (!cgx_cnt_max) return 0; @@ -166,6 +167,8 @@ static int rvu_map_cgx_lmac_pf(struct rvu *rvu) pkind->pfchan_map[free_pkind] = ((pf) & 0x3F) << 16; rvu_map_cgx_nix_block(rvu, pf, cgx, lmac); rvu->cgx_mapped_pfs++; + rvu_get_pf_numvfs(rvu, pf, &numvfs, &hwvfs); + rvu->cgx_mapped_vfs += numvfs; pf++; } } diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c index 10a98bcb7c54..2688186066d9 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c @@ -1364,6 +1364,44 @@ static void rvu_health_reporters_destroy(struct rvu *rvu) rvu_nix_health_reporters_destroy(rvu_dl); } +static int rvu_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode) +{ + struct rvu_devlink *rvu_dl = devlink_priv(devlink); + struct rvu *rvu = rvu_dl->rvu; + struct rvu_switch *rswitch; + + rswitch = &rvu->rswitch; + *mode = rswitch->mode; + + return 0; +} + +static int rvu_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode, + struct netlink_ext_ack *extack) +{ + struct rvu_devlink *rvu_dl = devlink_priv(devlink); + struct rvu *rvu = rvu_dl->rvu; + struct rvu_switch *rswitch; + + rswitch = &rvu->rswitch; + switch (mode) { + case DEVLINK_ESWITCH_MODE_LEGACY: + case DEVLINK_ESWITCH_MODE_SWITCHDEV: + if (rswitch->mode == mode) + return 0; + rswitch->mode = mode; + if (mode == DEVLINK_ESWITCH_MODE_SWITCHDEV) + rvu_switch_enable(rvu); + else + rvu_switch_disable(rvu); + break; + default: + return -EINVAL; + } + + return 0; +} + static int rvu_devlink_info_get(struct devlink *devlink, struct devlink_info_req *req, struct netlink_ext_ack *extack) { @@ -1372,6 +1410,8 @@ static int rvu_devlink_info_get(struct devlink *devlink, struct devlink_info_req static const struct devlink_ops rvu_devlink_ops = { .info_get = rvu_devlink_info_get, + .eswitch_mode_get = rvu_devlink_eswitch_mode_get, + .eswitch_mode_set = rvu_devlink_eswitch_mode_set, }; int rvu_register_dl(struct rvu *rvu) @@ -1380,14 +1420,9 @@ int rvu_register_dl(struct rvu *rvu) struct devlink *dl; int err; - rvu_dl = kzalloc(sizeof(*rvu_dl), GFP_KERNEL); - if (!rvu_dl) - return -ENOMEM; - dl = devlink_alloc(&rvu_devlink_ops, sizeof(struct rvu_devlink)); if (!dl) { dev_warn(rvu->dev, "devlink_alloc failed\n"); - kfree(rvu_dl); return -ENOMEM; } @@ -1395,10 +1430,10 @@ int rvu_register_dl(struct rvu *rvu) if (err) { dev_err(rvu->dev, "devlink register failed with error %d\n", err); devlink_free(dl); - kfree(rvu_dl); return err; } + rvu_dl = devlink_priv(dl); rvu_dl->dl = dl; rvu_dl->rvu = rvu; rvu->rvu_dl = rvu_dl; @@ -1417,5 +1452,4 @@ void rvu_unregister_dl(struct rvu *rvu) rvu_health_reporters_destroy(rvu); devlink_unregister(dl); devlink_free(dl); - kfree(rvu_dl); } diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c index a2d69eaac4f8..0933699a0d2d 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c @@ -3212,6 +3212,8 @@ int rvu_mbox_handler_nix_set_mac_addr(struct rvu *rvu, if (test_bit(PF_SET_VF_TRUSTED, &pfvf->flags) && from_vf) ether_addr_copy(pfvf->default_mac, req->mac_addr); + rvu_switch_update_rules(rvu, pcifunc); + return 0; } @@ -3881,6 +3883,8 @@ int rvu_mbox_handler_nix_lf_start_rx(struct rvu *rvu, struct msg_req *req, pfvf = rvu_get_pfvf(rvu, pcifunc); set_bit(NIXLF_INITIALIZED, &pfvf->flags); + rvu_switch_update_rules(rvu, pcifunc); + return rvu_cgx_start_stop_io(rvu, pcifunc, true); } diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c index c1f35a0971ad..5c01cf4a9c5b 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c @@ -910,10 +910,15 @@ static void rvu_mcam_add_counter_to_rule(struct rvu *rvu, u16 pcifunc, static void npc_update_rx_entry(struct rvu *rvu, struct rvu_pfvf *pfvf, struct mcam_entry *entry, - struct npc_install_flow_req *req, u16 target) + struct npc_install_flow_req *req, + u16 target, bool pf_set_vfs_mac) { + struct rvu_switch *rswitch = &rvu->rswitch; struct nix_rx_action action; + if (rswitch->mode == DEVLINK_ESWITCH_MODE_SWITCHDEV && pf_set_vfs_mac) + req->chan_mask = 0x0; /* Do not care channel */ + npc_update_entry(rvu, NPC_CHAN, entry, req->channel, 0, req->chan_mask, 0, NIX_INTF_RX); @@ -1007,7 +1012,7 @@ static int npc_install_flow(struct rvu *rvu, int blkaddr, u16 target, req->intf); if (is_npc_intf_rx(req->intf)) - npc_update_rx_entry(rvu, pfvf, entry, req, target); + npc_update_rx_entry(rvu, pfvf, entry, req, target, pf_set_vfs_mac); else npc_update_tx_entry(rvu, pfvf, entry, req, target); diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_switch.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_switch.c new file mode 100644 index 000000000000..2e5379710aa5 --- /dev/null +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_switch.c @@ -0,0 +1,258 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Marvell OcteonTx2 RVU Admin Function driver + * + * Copyright (C) 2021 Marvell. + */ + +#include +#include "rvu.h" + +static int rvu_switch_install_rx_rule(struct rvu *rvu, u16 pcifunc, + u16 chan_mask) +{ + struct npc_install_flow_req req = { 0 }; + struct npc_install_flow_rsp rsp = { 0 }; + struct rvu_pfvf *pfvf; + + pfvf = rvu_get_pfvf(rvu, pcifunc); + /* If the pcifunc is not initialized then nothing to do. + * This same function will be called again via rvu_switch_update_rules + * after pcifunc is initialized. + */ + if (!test_bit(NIXLF_INITIALIZED, &pfvf->flags)) + return 0; + + ether_addr_copy(req.packet.dmac, pfvf->mac_addr); + eth_broadcast_addr((u8 *)&req.mask.dmac); + req.hdr.pcifunc = 0; /* AF is requester */ + req.vf = pcifunc; + req.features = BIT_ULL(NPC_DMAC); + req.channel = pfvf->rx_chan_base; + req.chan_mask = chan_mask; + req.intf = pfvf->nix_rx_intf; + req.op = NIX_RX_ACTION_DEFAULT; + req.default_rule = 1; + + return rvu_mbox_handler_npc_install_flow(rvu, &req, &rsp); +} + +static int rvu_switch_install_tx_rule(struct rvu *rvu, u16 pcifunc, u16 entry) +{ + struct npc_install_flow_req req = { 0 }; + struct npc_install_flow_rsp rsp = { 0 }; + struct rvu_pfvf *pfvf; + u8 lbkid; + + pfvf = rvu_get_pfvf(rvu, pcifunc); + /* If the pcifunc is not initialized then nothing to do. + * This same function will be called again via rvu_switch_update_rules + * after pcifunc is initialized. + */ + if (!test_bit(NIXLF_INITIALIZED, &pfvf->flags)) + return 0; + + lbkid = pfvf->nix_blkaddr == BLKADDR_NIX0 ? 0 : 1; + ether_addr_copy(req.packet.dmac, pfvf->mac_addr); + eth_broadcast_addr((u8 *)&req.mask.dmac); + req.hdr.pcifunc = 0; /* AF is requester */ + req.vf = pcifunc; + req.entry = entry; + req.features = BIT_ULL(NPC_DMAC); + req.intf = pfvf->nix_tx_intf; + req.op = NIX_TX_ACTIONOP_UCAST_CHAN; + req.index = (lbkid << 8) | RVU_SWITCH_LBK_CHAN; + req.set_cntr = 1; + + return rvu_mbox_handler_npc_install_flow(rvu, &req, &rsp); +} + +static int rvu_switch_install_rules(struct rvu *rvu) +{ + struct rvu_switch *rswitch = &rvu->rswitch; + u16 start = rswitch->start_entry; + struct rvu_hwinfo *hw = rvu->hw; + int pf, vf, numvfs, hwvf; + u16 pcifunc, entry = 0; + int err; + + for (pf = 1; pf < hw->total_pfs; pf++) { + if (!is_pf_cgxmapped(rvu, pf)) + continue; + + pcifunc = pf << 10; + /* rvu_get_nix_blkaddr sets up the corresponding NIX block + * address and NIX RX and TX interfaces for a pcifunc. + * Generally it is called during attach call of a pcifunc but it + * is called here since we are pre-installing rules before + * nixlfs are attached + */ + rvu_get_nix_blkaddr(rvu, pcifunc); + + /* MCAM RX rule for a PF/VF already exists as default unicast + * rules installed by AF. Hence change the channel in those + * rules to ignore channel so that packets with the required + * DMAC received from LBK(by other PF/VFs in system) or from + * external world (from wire) are accepted. + */ + err = rvu_switch_install_rx_rule(rvu, pcifunc, 0x0); + if (err) { + dev_err(rvu->dev, "RX rule for PF%d failed(%d)\n", + pf, err); + return err; + } + + err = rvu_switch_install_tx_rule(rvu, pcifunc, start + entry); + if (err) { + dev_err(rvu->dev, "TX rule for PF%d failed(%d)\n", + pf, err); + return err; + } + + rswitch->entry2pcifunc[entry++] = pcifunc; + + rvu_get_pf_numvfs(rvu, pf, &numvfs, &hwvf); + for (vf = 0; vf < numvfs; vf++, hwvf++) { + pcifunc = pf << 10 | ((vf + 1) & 0x3FF); + rvu_get_nix_blkaddr(rvu, pcifunc); + + err = rvu_switch_install_rx_rule(rvu, pcifunc, 0x0); + if (err) { + dev_err(rvu->dev, + "RX rule for PF%dVF%d failed(%d)\n", + pf, vf, err); + return err; + } + + err = rvu_switch_install_tx_rule(rvu, pcifunc, + start + entry); + if (err) { + dev_err(rvu->dev, + "TX rule for PF%dVF%d failed(%d)\n", + pf, vf, err); + return err; + } + + rswitch->entry2pcifunc[entry++] = pcifunc; + } + } + + return 0; +} + +void rvu_switch_enable(struct rvu *rvu) +{ + struct npc_mcam_alloc_entry_req alloc_req = { 0 }; + struct npc_mcam_alloc_entry_rsp alloc_rsp = { 0 }; + struct npc_delete_flow_req uninstall_req = { 0 }; + struct npc_mcam_free_entry_req free_req = { 0 }; + struct rvu_switch *rswitch = &rvu->rswitch; + struct msg_rsp rsp; + int ret; + + alloc_req.contig = true; + alloc_req.count = rvu->cgx_mapped_pfs + rvu->cgx_mapped_vfs; + ret = rvu_mbox_handler_npc_mcam_alloc_entry(rvu, &alloc_req, + &alloc_rsp); + if (ret) { + dev_err(rvu->dev, + "Unable to allocate MCAM entries\n"); + goto exit; + } + + if (alloc_rsp.count != alloc_req.count) { + dev_err(rvu->dev, + "Unable to allocate %d MCAM entries, got %d\n", + alloc_req.count, alloc_rsp.count); + goto free_entries; + } + + rswitch->entry2pcifunc = kcalloc(alloc_req.count, sizeof(u16), + GFP_KERNEL); + if (!rswitch->entry2pcifunc) + goto free_entries; + + rswitch->used_entries = alloc_rsp.count; + rswitch->start_entry = alloc_rsp.entry; + + ret = rvu_switch_install_rules(rvu); + if (ret) + goto uninstall_rules; + + return; + +uninstall_rules: + uninstall_req.start = rswitch->start_entry; + uninstall_req.end = rswitch->start_entry + rswitch->used_entries - 1; + rvu_mbox_handler_npc_delete_flow(rvu, &uninstall_req, &rsp); + kfree(rswitch->entry2pcifunc); +free_entries: + free_req.all = 1; + rvu_mbox_handler_npc_mcam_free_entry(rvu, &free_req, &rsp); +exit: + return; +} + +void rvu_switch_disable(struct rvu *rvu) +{ + struct npc_delete_flow_req uninstall_req = { 0 }; + struct npc_mcam_free_entry_req free_req = { 0 }; + struct rvu_switch *rswitch = &rvu->rswitch; + struct rvu_hwinfo *hw = rvu->hw; + int pf, vf, numvfs, hwvf; + struct msg_rsp rsp; + u16 pcifunc; + int err; + + if (!rswitch->used_entries) + return; + + for (pf = 1; pf < hw->total_pfs; pf++) { + if (!is_pf_cgxmapped(rvu, pf)) + continue; + + pcifunc = pf << 10; + err = rvu_switch_install_rx_rule(rvu, pcifunc, 0xFFF); + if (err) + dev_err(rvu->dev, + "Reverting RX rule for PF%d failed(%d)\n", + pf, err); + + for (vf = 0; vf < numvfs; vf++, hwvf++) { + pcifunc = pf << 10 | ((vf + 1) & 0x3FF); + err = rvu_switch_install_rx_rule(rvu, pcifunc, 0xFFF); + if (err) + dev_err(rvu->dev, + "Reverting RX rule for PF%dVF%d failed(%d)\n", + pf, vf, err); + } + } + + uninstall_req.start = rswitch->start_entry; + uninstall_req.end = rswitch->start_entry + rswitch->used_entries - 1; + free_req.all = 1; + rvu_mbox_handler_npc_delete_flow(rvu, &uninstall_req, &rsp); + rvu_mbox_handler_npc_mcam_free_entry(rvu, &free_req, &rsp); + rswitch->used_entries = 0; + kfree(rswitch->entry2pcifunc); +} + +void rvu_switch_update_rules(struct rvu *rvu, u16 pcifunc) +{ + struct rvu_switch *rswitch = &rvu->rswitch; + u32 max = rswitch->used_entries; + u16 entry; + + if (!rswitch->used_entries) + return; + + for (entry = 0; entry < max; entry++) { + if (rswitch->entry2pcifunc[entry] == pcifunc) + break; + } + + if (entry >= max) + return; + + rvu_switch_install_tx_rule(rvu, pcifunc, rswitch->start_entry + entry); + rvu_switch_install_rx_rule(rvu, pcifunc, 0x0); +} -- cgit v1.2.3 From 1b713d14dc3c077ec45e65dab4ea01a8bc41b8c1 Mon Sep 17 00:00:00 2001 From: Chengwen Feng Date: Mon, 19 Jul 2021 17:13:05 +0800 Subject: net: hns3: fix possible mismatches resp of mailbox Currently, the mailbox synchronous communication between VF and PF use the following fields to maintain communication: 1. Origin_mbx_msg which was combined by message code and subcode, used to match request and response. 2. Received_resp which means whether received response. There may possible mismatches of the following situation: 1. VF sends message A with code=1 subcode=1. 2. PF was blocked about 500ms when processing the message A. 3. VF will detect message A timeout because it can't get the response within 500ms. 4. VF sends message B with code=1 subcode=1 which equal message A. 5. PF processes the first message A and send the response message to VF. 6. VF will identify the response matched the message B because the code/subcode is the same. This will lead to mismatch of request and response. To fix the above bug, we use the following scheme: 1. The message sent from VF was labelled with match_id which was a unique 16-bit non-zero value. 2. The response sent from PF will label with match_id which got from the request. 3. The VF uses the match_id to match request and response message. As for PF driver, it only needs to copy the match_id from request to response. Fixes: dde1a86e93ca ("net: hns3: Add mailbox support to PF driver") Signed-off-by: Chengwen Feng Signed-off-by: Guangbin Huang Signed-off-by: Jakub Kicinski --- drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h | 6 ++++-- drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c | 1 + 2 files changed, 5 insertions(+), 2 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h b/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h index 0a6cda309b24..56b573e47072 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h +++ b/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h @@ -143,7 +143,8 @@ struct hclge_mbx_vf_to_pf_cmd { u8 mbx_need_resp; u8 rsv1[1]; u8 msg_len; - u8 rsv2[3]; + u8 rsv2; + u16 match_id; struct hclge_vf_to_pf_msg msg; }; @@ -153,7 +154,8 @@ struct hclge_mbx_pf_to_vf_cmd { u8 dest_vfid; u8 rsv[3]; u8 msg_len; - u8 rsv1[3]; + u8 rsv1; + u16 match_id; struct hclge_pf_to_vf_msg msg; }; diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c index e10a2c36b706..c0a478ae9583 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c @@ -47,6 +47,7 @@ static int hclge_gen_resp_to_vf(struct hclge_vport *vport, resp_pf_to_vf->dest_vfid = vf_to_pf_req->mbx_src_vfid; resp_pf_to_vf->msg_len = vf_to_pf_req->msg_len; + resp_pf_to_vf->match_id = vf_to_pf_req->match_id; resp_pf_to_vf->msg.code = HCLGE_MBX_PF_VF_RESP; resp_pf_to_vf->msg.vf_mbx_msg_code = vf_to_pf_req->msg.code; -- cgit v1.2.3 From 4671042f1ef0d37137884811afcc4ae67685ce07 Mon Sep 17 00:00:00 2001 From: Peng Li Date: Mon, 19 Jul 2021 17:13:06 +0800 Subject: net: hns3: add match_id to check mailbox response from PF to VF When VF need response from PF, VF will wait (1us - 1s) to receive the response, or it will wait timeout and the VF action fails. If VF do not receive response in 1st action because timeout, the 2nd action may receive response for the 1st action, and get incorrect response data.VF must reciveve the right response from PF,or it will cause unexpected error. This patch adds match_id to check mailbox response from PF to VF, to make sure VF get the right response: 1. The message sent from VF was labelled with match_id which was a unique 16-bit non-zero value. 2. The response sent from PF will label with match_id which got from the request. 3. The VF uses the match_id to match request and response message. This scheme depends on PF driver supports match_id, if PF driver doesn't support then VF will uses the original scheme. Signed-off-by: Peng Li Signed-off-by: Guangbin Huang Signed-off-by: Jakub Kicinski --- drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h | 1 + .../net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c | 19 +++++++++++++++++++ 2 files changed, 20 insertions(+) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h b/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h index 56b573e47072..aa86a81c8f4a 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h +++ b/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h @@ -98,6 +98,7 @@ struct hclgevf_mbx_resp_status { u32 origin_mbx_msg; bool received_resp; int resp_status; + u16 match_id; u8 additional_info[HCLGE_MBX_MAX_RESP_DATA_SIZE]; }; diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c index 9b17735b9f4c..772b2f8acd2e 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c @@ -13,6 +13,7 @@ static int hclgevf_resp_to_errno(u16 resp_code) return resp_code ? -resp_code : 0; } +#define HCLGEVF_MBX_MATCH_ID_START 1 static void hclgevf_reset_mbx_resp_status(struct hclgevf_dev *hdev) { /* this function should be called with mbx_resp.mbx_mutex held @@ -21,6 +22,10 @@ static void hclgevf_reset_mbx_resp_status(struct hclgevf_dev *hdev) hdev->mbx_resp.received_resp = false; hdev->mbx_resp.origin_mbx_msg = 0; hdev->mbx_resp.resp_status = 0; + hdev->mbx_resp.match_id++; + /* Update match_id and ensure the value of match_id is not zero */ + if (hdev->mbx_resp.match_id == 0) + hdev->mbx_resp.match_id = HCLGEVF_MBX_MATCH_ID_START; memset(hdev->mbx_resp.additional_info, 0, HCLGE_MBX_MAX_RESP_DATA_SIZE); } @@ -115,6 +120,7 @@ int hclgevf_send_mbx_msg(struct hclgevf_dev *hdev, if (need_resp) { mutex_lock(&hdev->mbx_resp.mbx_mutex); hclgevf_reset_mbx_resp_status(hdev); + req->match_id = hdev->mbx_resp.match_id; status = hclgevf_cmd_send(&hdev->hw, &desc, 1); if (status) { dev_err(&hdev->pdev->dev, @@ -211,6 +217,19 @@ void hclgevf_mbx_handler(struct hclgevf_dev *hdev) resp->additional_info[i] = *temp; temp++; } + + /* If match_id is not zero, it means PF support + * match_id. If the match_id is right, VF get the + * right response, otherwise ignore the response. + * Driver will clear hdev->mbx_resp when send + * next message which need response. + */ + if (req->match_id) { + if (req->match_id == resp->match_id) + resp->received_resp = true; + } else { + resp->received_resp = true; + } break; case HCLGE_MBX_LINK_STAT_CHANGE: case HCLGE_MBX_ASSERTING_RESET: -- cgit v1.2.3 From 184cd221a86321e53df9389c4b35a247b60c1e77 Mon Sep 17 00:00:00 2001 From: Jian Shen Date: Mon, 19 Jul 2021 17:13:07 +0800 Subject: net: hns3: disable port VLAN filter when support function level VLAN filter control For hardware limitation, port VLAN filter is port level, and effective for all the functions of the port. So if not support port VLAN bypass, it's necessary to disable the port VLAN filter, in order to support function level VLAN filter control. Fixes: 2ba306627f59 ("net: hns3: add support for modify VLAN filter state") Signed-off-by: Jian Shen Signed-off-by: Guangbin Huang Signed-off-by: Jakub Kicinski --- drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c index dd3354a57c62..ebeaf12e409b 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c @@ -9552,13 +9552,17 @@ static int hclge_set_vport_vlan_filter(struct hclge_vport *vport, bool enable) if (ret) return ret; - if (test_bit(HNAE3_DEV_SUPPORT_PORT_VLAN_BYPASS_B, ae_dev->caps)) + if (test_bit(HNAE3_DEV_SUPPORT_PORT_VLAN_BYPASS_B, ae_dev->caps)) { ret = hclge_set_port_vlan_filter_bypass(hdev, vport->vport_id, !enable); - else if (!vport->vport_id) + } else if (!vport->vport_id) { + if (test_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, ae_dev->caps)) + enable = false; + ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT, HCLGE_FILTER_FE_INGRESS, enable, 0); + } return ret; } -- cgit v1.2.3 From bbfd4506f962e7e6fff8f37f017154a3c3791264 Mon Sep 17 00:00:00 2001 From: Jian Shen Date: Mon, 19 Jul 2021 17:13:08 +0800 Subject: net: hns3: fix rx VLAN offload state inconsistent issue Currently, VF doesn't enable rx VLAN offload when initializating, and PF does it for VFs. If user disable the rx VLAN offload for VF with ethtool -K, and reload the VF driver, it may cause the rx VLAN offload state being inconsistent between hardware and software. Fixes it by enabling rx VLAN offload when VF initializing. Fixes: e2cb1dec9779 ("net: hns3: Add HNS3 VF HCL(Hardware Compatibility Layer) Support") Signed-off-by: Jian Shen Signed-off-by: Guangbin Huang Signed-off-by: Jakub Kicinski --- drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c | 10 ++++++++++ 1 file changed, 10 insertions(+) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c index 52eaf82b7cd7..8784d61e833f 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c @@ -2641,6 +2641,16 @@ static int hclgevf_rss_init_hw(struct hclgevf_dev *hdev) static int hclgevf_init_vlan_config(struct hclgevf_dev *hdev) { + struct hnae3_handle *nic = &hdev->nic; + int ret; + + ret = hclgevf_en_hw_strip_rxvtag(nic, true); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed to enable rx vlan offload, ret = %d\n", ret); + return ret; + } + return hclgevf_set_vlan_filter(&hdev->nic, htons(ETH_P_8021Q), 0, false); } -- cgit v1.2.3 From 75d5641497a60bb5d36ff77fd3f526906cbc148c Mon Sep 17 00:00:00 2001 From: Maxim Kochetkov Date: Tue, 20 Jul 2021 08:08:38 +0300 Subject: fsl/fman: Add fibre support Set SUPPORTED_FIBRE to mac_dev->if_support. It allows proper usage of PHYs with optical/fiber support. Signed-off-by: Maxim Kochetkov Acked-by: Madalin Bucur Signed-off-by: David S. Miller --- drivers/net/ethernet/freescale/fman/mac.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/freescale/fman/mac.c b/drivers/net/ethernet/freescale/fman/mac.c index 46ecb42f2ef8..d9fc5c456bf3 100644 --- a/drivers/net/ethernet/freescale/fman/mac.c +++ b/drivers/net/ethernet/freescale/fman/mac.c @@ -524,6 +524,7 @@ static void setup_memac(struct mac_device *mac_dev) | SUPPORTED_Autoneg \ | SUPPORTED_Pause \ | SUPPORTED_Asym_Pause \ + | SUPPORTED_FIBRE \ | SUPPORTED_MII) static DEFINE_MUTEX(eth_lock); -- cgit v1.2.3 From 91bed5565bba03b2a9f7334b58ae4be9df7c3840 Mon Sep 17 00:00:00 2001 From: Jia He Date: Tue, 20 Jul 2021 21:26:55 +0800 Subject: Revert "qed: fix possible unpaired spin_{un}lock_bh in _qed_mcp_cmd_and_union()" This reverts commit 6206b7981a36476f4695d661ae139f7db36a802d. That patch added additional spin_{un}lock_bh(), which was harmless but pointless. The orginal code path has guaranteed the pair of spin_{un}lock_bh(). We'd better revert it before we find the exact root cause of the bug_on mentioned in that patch. Fixes: 6206b7981a36 ("qed: fix possible unpaired spin_{un}lock_bh in _qed_mcp_cmd_and_union()") Cc: David S. Miller Cc: Prabhakar Kushwaha Signed-off-by: Jia He Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qed/qed_mcp.c | 23 ++++++----------------- 1 file changed, 6 insertions(+), 17 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.c b/drivers/net/ethernet/qlogic/qed/qed_mcp.c index 79d879a5d663..4387292c37e2 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_mcp.c +++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c @@ -474,18 +474,14 @@ _qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn, spin_lock_bh(&p_hwfn->mcp_info->cmd_lock); - if (!qed_mcp_has_pending_cmd(p_hwfn)) { - spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock); + if (!qed_mcp_has_pending_cmd(p_hwfn)) break; - } rc = qed_mcp_update_pending_cmd(p_hwfn, p_ptt); - if (!rc) { - spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock); + if (!rc) break; - } else if (rc != -EAGAIN) { + else if (rc != -EAGAIN) goto err; - } spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock); @@ -502,8 +498,6 @@ _qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn, return -EAGAIN; } - spin_lock_bh(&p_hwfn->mcp_info->cmd_lock); - /* Send the mailbox command */ qed_mcp_reread_offsets(p_hwfn, p_ptt); seq_num = ++p_hwfn->mcp_info->drv_mb_seq; @@ -530,18 +524,14 @@ _qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn, spin_lock_bh(&p_hwfn->mcp_info->cmd_lock); - if (p_cmd_elem->b_is_completed) { - spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock); + if (p_cmd_elem->b_is_completed) break; - } rc = qed_mcp_update_pending_cmd(p_hwfn, p_ptt); - if (!rc) { - spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock); + if (!rc) break; - } else if (rc != -EAGAIN) { + else if (rc != -EAGAIN) goto err; - } spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock); } while (++cnt < max_retries); @@ -564,7 +554,6 @@ _qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn, return -EAGAIN; } - spin_lock_bh(&p_hwfn->mcp_info->cmd_lock); qed_mcp_cmd_del_elem(p_hwfn, p_cmd_elem); spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock); -- cgit v1.2.3 From 09cfae9f13d51700b0fecf591dcd658fc5375428 Mon Sep 17 00:00:00 2001 From: Markus Boehme Date: Tue, 20 Jul 2021 16:26:19 -0700 Subject: ixgbe: Fix packet corruption due to missing DMA sync When receiving a packet with multiple fragments, hardware may still touch the first fragment until the entire packet has been received. The driver therefore keeps the first fragment mapped for DMA until end of packet has been asserted, and delays its dma_sync call until then. The driver tries to fit multiple receive buffers on one page. When using 3K receive buffers (e.g. using Jumbo frames and legacy-rx is turned off/build_skb is being used) on an architecture with 4K pages, the driver allocates an order 1 compound page and uses one page per receive buffer. To determine the correct offset for a delayed DMA sync of the first fragment of a multi-fragment packet, the driver then cannot just use PAGE_MASK on the DMA address but has to construct a mask based on the actual size of the backing page. Using PAGE_MASK in the 3K RX buffer/4K page architecture configuration will always sync the first page of a compound page. With the SWIOTLB enabled this can lead to corrupted packets (zeroed out first fragment, re-used garbage from another packet) and various consequences, such as slow/stalling data transfers and connection resets. For example, testing on a link with MTU exceeding 3058 bytes on a host with SWIOTLB enabled (e.g. "iommu=soft swiotlb=262144,force") TCP transfers quickly fizzle out without this patch. Cc: stable@vger.kernel.org Fixes: 0c5661ecc5dd7 ("ixgbe: fix crash in build_skb Rx code path") Signed-off-by: Markus Boehme Tested-by: Tony Brelinski Signed-off-by: Tony Nguyen Signed-off-by: David S. Miller --- drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 913253f8ecb4..14aea40da50f 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -1825,7 +1825,8 @@ static void ixgbe_dma_sync_frag(struct ixgbe_ring *rx_ring, struct sk_buff *skb) { if (ring_uses_build_skb(rx_ring)) { - unsigned long offset = (unsigned long)(skb->data) & ~PAGE_MASK; + unsigned long mask = (unsigned long)ixgbe_rx_pg_size(rx_ring) - 1; + unsigned long offset = (unsigned long)(skb->data) & mask; dma_sync_single_range_for_cpu(rx_ring->dev, IXGBE_CB(skb)->dma, -- cgit v1.2.3 From e9a72f874d5b95cef0765bafc56005a50f72c5fe Mon Sep 17 00:00:00 2001 From: Sayanta Pattanayak Date: Tue, 20 Jul 2021 17:17:40 +0100 Subject: r8169: Avoid duplicate sysfs entry creation error When registering the MDIO bus for a r8169 device, we use the PCI bus/device specifier as a (seemingly) unique device identifier. However the very same BDF number can be used on another PCI segment, which makes the driver fail probing: [ 27.544136] r8169 0002:07:00.0: enabling device (0000 -> 0003) [ 27.559734] sysfs: cannot create duplicate filename '/class/mdio_bus/r8169-700' .... [ 27.684858] libphy: mii_bus r8169-700 failed to register [ 27.695602] r8169: probe of 0002:07:00.0 failed with error -22 Add the segment number to the device name to make it more unique. This fixes operation on ARM N1SDP boards, with two boards connected together to form an SMP system, and all on-board devices showing up twice, just on different PCI segments. A similar issue would occur on large systems with many PCI slots and multiple RTL8169 NICs. Fixes: f1e911d5d0dfd ("r8169: add basic phylib support") Signed-off-by: Sayanta Pattanayak [Andre: expand commit message, use pci_domain_nr()] Signed-off-by: Andre Przywara Acked-by: Heiner Kallweit Signed-off-by: David S. Miller --- drivers/net/ethernet/realtek/r8169_main.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c index f744557c33a3..c7af5bc3b8af 100644 --- a/drivers/net/ethernet/realtek/r8169_main.c +++ b/drivers/net/ethernet/realtek/r8169_main.c @@ -5084,7 +5084,8 @@ static int r8169_mdio_register(struct rtl8169_private *tp) new_bus->priv = tp; new_bus->parent = &pdev->dev; new_bus->irq[0] = PHY_MAC_INTERRUPT; - snprintf(new_bus->id, MII_BUS_ID_SIZE, "r8169-%x", pci_dev_id(pdev)); + snprintf(new_bus->id, MII_BUS_ID_SIZE, "r8169-%x-%x", + pci_domain_nr(pdev->bus), pci_dev_id(pdev)); new_bus->read = r8169_mdio_read_reg; new_bus->write = r8169_mdio_write_reg; -- cgit v1.2.3 From bb55362bd6976631b662ca712779b6532d8de0a6 Mon Sep 17 00:00:00 2001 From: Sukadev Bhattiprolu Date: Tue, 20 Jul 2021 19:34:39 -0700 Subject: ibmvnic: Remove the proper scrq flush Commit 65d6470d139a ("ibmvnic: clean pending indirect buffs during reset") intended to remove the call to ibmvnic_tx_scrq_flush() when the ->resetting flag is true and was tested that way. But during the final rebase to net-next, the hunk got applied to a block few lines below (which happened to have the same diff context) and the wrong call to ibmvnic_tx_scrq_flush() got removed. Fix that by removing the correct ibmvnic_tx_scrq_flush() and restoring the one that was incorrectly removed. Fixes: 65d6470d139a ("ibmvnic: clean pending indirect buffs during reset") Reported-by: Dany Madden Signed-off-by: Sukadev Bhattiprolu Signed-off-by: David S. Miller --- drivers/net/ethernet/ibm/ibmvnic.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c index ed77191d19f4..a775c69e4fd7 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.c +++ b/drivers/net/ethernet/ibm/ibmvnic.c @@ -1731,7 +1731,6 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev) tx_send_failed++; tx_dropped++; ret = NETDEV_TX_OK; - ibmvnic_tx_scrq_flush(adapter, tx_scrq); goto out; } @@ -1753,6 +1752,7 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev) dev_kfree_skb_any(skb); tx_send_failed++; tx_dropped++; + ibmvnic_tx_scrq_flush(adapter, tx_scrq); ret = NETDEV_TX_OK; goto out; } -- cgit v1.2.3 From 161dcc02428858fe338b7493158ed6f5fc2a8f26 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Wed, 21 Jul 2021 17:19:32 +0200 Subject: net: ixp46x: fix ptp build failure The rework of the ixp46x cpu detection left the network driver in a half broken state: drivers/net/ethernet/xscale/ptp_ixp46x.c: In function 'ptp_ixp_init': drivers/net/ethernet/xscale/ptp_ixp46x.c:290:51: error: 'IXP4XX_TIMESYNC_BASE_VIRT' undeclared (first use in this function) 290 | (struct ixp46x_ts_regs __iomem *) IXP4XX_TIMESYNC_BASE_VIRT; | ^~~~~~~~~~~~~~~~~~~~~~~~~ drivers/net/ethernet/xscale/ptp_ixp46x.c:290:51: note: each undeclared identifier is reported only once for each function it appears in drivers/net/ethernet/xscale/ptp_ixp46x.c: At top level: drivers/net/ethernet/xscale/ptp_ixp46x.c:323:1: error: data definition has no type or storage class [-Werror] 323 | module_init(ptp_ixp_init); I have patches to complete the transition for a future release, but for the moment, add the missing include statements to get it to build again. Fixes: 09aa9aabdcc4 ("soc: ixp4xx: move cpu detection to linux/soc/ixp4xx/cpu.h") Signed-off-by: Arnd Bergmann Signed-off-by: David S. Miller --- drivers/net/ethernet/xscale/ptp_ixp46x.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/xscale/ptp_ixp46x.c b/drivers/net/ethernet/xscale/ptp_ixp46x.c index 99d4d9439d05..a6fb88fd42f7 100644 --- a/drivers/net/ethernet/xscale/ptp_ixp46x.c +++ b/drivers/net/ethernet/xscale/ptp_ixp46x.c @@ -14,6 +14,8 @@ #include #include #include +#include +#include #include "ixp46x_ts.h" -- cgit v1.2.3 From 291d0a2c1fa6ff437c8f1156646fdd2525714c80 Mon Sep 17 00:00:00 2001 From: Biju Das Date: Wed, 21 Jul 2021 19:17:21 +0100 Subject: ravb: Fix a typo in comment Fix the typo RX->TX in comment, as the code following the comment process TX and not RX. Signed-off-by: Biju Das Reviewed-by: Lad Prabhakar Reviewed-by: Sergei Shtylyov Signed-off-by: David S. Miller --- drivers/net/ethernet/renesas/ravb_main.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c index 69c50f81e1cb..805397088850 100644 --- a/drivers/net/ethernet/renesas/ravb_main.c +++ b/drivers/net/ethernet/renesas/ravb_main.c @@ -920,7 +920,7 @@ static int ravb_poll(struct napi_struct *napi, int budget) if (ravb_rx(ndev, "a, q)) goto out; - /* Processing RX Descriptor Ring */ + /* Processing TX Descriptor Ring */ spin_lock_irqsave(&priv->lock, flags); /* Clear TX interrupt */ ravb_write(ndev, ~(mask | TIS_RESERVED), TIS); -- cgit v1.2.3 From 9f061b9acbb0bdf5317b301c1608ca55be522c92 Mon Sep 17 00:00:00 2001 From: Biju Das Date: Wed, 21 Jul 2021 19:21:26 +0100 Subject: ravb: Remove extra TAB Align the member description comments for struct ravb_desc by removing the extra TAB. Signed-off-by: Biju Das Reviewed-by: Lad Prabhakar Reviewed-by: Sergei Shtylyov Signed-off-by: David S. Miller --- drivers/net/ethernet/renesas/ravb.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/renesas/ravb.h b/drivers/net/ethernet/renesas/ravb.h index 86a1eb0634e8..80e62ca2e3d3 100644 --- a/drivers/net/ethernet/renesas/ravb.h +++ b/drivers/net/ethernet/renesas/ravb.h @@ -864,7 +864,7 @@ enum GECMR_BIT { /* The Ethernet AVB descriptor definitions. */ struct ravb_desc { - __le16 ds; /* Descriptor size */ + __le16 ds; /* Descriptor size */ u8 cc; /* Content control MSBs (reserved) */ u8 die_dt; /* Descriptor interrupt enable and type */ __le32 dptr; /* Descriptor pointer */ -- cgit v1.2.3 From 98c5b13f3a878066741a907a9d0f1f388556ed5c Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Wed, 21 Jul 2021 15:33:36 -0700 Subject: net: sparx5: fix unmet dependencies warning WARNING: unmet direct dependencies detected for PHY_SPARX5_SERDES Depends on [n]: (ARCH_SPARX5 || COMPILE_TEST [=n]) && OF [=y] && HAS_IOMEM [=y] Selected by [y]: - SPARX5_SWITCH [=y] && NETDEVICES [=y] && ETHERNET [=y] && NET_VENDOR_MICROCHIP [=y] && NET_SWITCHDEV [=y] && HAS_IOMEM [=y] && OF [=y] Signed-off-by: Randy Dunlap Cc: Lars Povlsen Cc: Steen Hegelund Cc: UNGLinuxDriver@microchip.com Cc: "David S. Miller" Cc: Jakub Kicinski Signed-off-by: David S. Miller --- drivers/net/ethernet/microchip/sparx5/Kconfig | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/microchip/sparx5/Kconfig b/drivers/net/ethernet/microchip/sparx5/Kconfig index ac403d43c74c..7bdbb2d09a14 100644 --- a/drivers/net/ethernet/microchip/sparx5/Kconfig +++ b/drivers/net/ethernet/microchip/sparx5/Kconfig @@ -3,6 +3,7 @@ config SPARX5_SWITCH depends on NET_SWITCHDEV depends on HAS_IOMEM depends on OF + depends on ARCH_SPARX5 || COMPILE_TEST select PHYLINK select PHY_SPARX5_SERDES select RESET_CONTROLLER -- cgit v1.2.3 From 7aaa0f311e2df2704fa8ddb8ed681a3b5841d0bf Mon Sep 17 00:00:00 2001 From: Ioana Ciornei Date: Thu, 22 Jul 2021 15:15:51 +0300 Subject: dpaa2-switch: seed the buffer pool after allocating the swp Any interraction with the buffer pool (seeding a buffer, acquire one) is made through a software portal (SWP, a DPIO object). There are circumstances where the dpaa2-switch driver probes on a DPSW before any DPIO devices have been probed. In this case, seeding of the buffer pool will lead to a panic since no SWPs are initialized. To fix this, seed the buffer pool after making sure that the software portals have been probed and are ready to be used. Fixes: 0b1b71370458 ("staging: dpaa2-switch: handle Rx path on control interface") Signed-off-by: Ioana Ciornei Signed-off-by: David S. Miller --- drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c index f3d12d0714fb..68b78642c045 100644 --- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c @@ -2770,32 +2770,32 @@ static int dpaa2_switch_ctrl_if_setup(struct ethsw_core *ethsw) if (err) return err; - err = dpaa2_switch_seed_bp(ethsw); - if (err) - goto err_free_dpbp; - err = dpaa2_switch_alloc_rings(ethsw); if (err) - goto err_drain_dpbp; + goto err_free_dpbp; err = dpaa2_switch_setup_dpio(ethsw); if (err) goto err_destroy_rings; + err = dpaa2_switch_seed_bp(ethsw); + if (err) + goto err_deregister_dpio; + err = dpsw_ctrl_if_enable(ethsw->mc_io, 0, ethsw->dpsw_handle); if (err) { dev_err(ethsw->dev, "dpsw_ctrl_if_enable err %d\n", err); - goto err_deregister_dpio; + goto err_drain_dpbp; } return 0; +err_drain_dpbp: + dpaa2_switch_drain_bp(ethsw); err_deregister_dpio: dpaa2_switch_free_dpio(ethsw); err_destroy_rings: dpaa2_switch_destroy_rings(ethsw); -err_drain_dpbp: - dpaa2_switch_drain_bp(ethsw); err_free_dpbp: dpaa2_switch_free_dpbp(ethsw); -- cgit v1.2.3