aboutsummaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/broadcom/bnxt/bnxt.c
diff options
context:
space:
mode:
authorMark Brown2017-04-25 16:35:35 +0100
committerMark Brown2017-04-25 16:35:35 +0100
commit9095bf25ea08135a5b74875dd0e3eeaddc4218a0 (patch)
treea4e6976eab0e7a5b6258281077cd5853c7e69514 /drivers/net/ethernet/broadcom/bnxt/bnxt.c
parentcdf4275e957c6bad3756e98942341667f1d7de7d (diff)
parentc1ae3cfa0e89fa1a7ecc4c99031f5e9ae99d9201 (diff)
Merge tag 'v4.11-rc1' into regulator-arizona
Linux 4.11-rc1
Diffstat (limited to 'drivers/net/ethernet/broadcom/bnxt/bnxt.c')
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c1453
1 files changed, 1040 insertions, 413 deletions
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index f08a20b921e7..235733e91c79 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -1,6 +1,7 @@
/* Broadcom NetXtreme-C/E network driver.
*
* Copyright (c) 2014-2016 Broadcom Corporation
+ * Copyright (c) 2016-2017 Broadcom Limited
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -33,15 +34,13 @@
#include <linux/if.h>
#include <linux/if_vlan.h>
#include <linux/rtc.h>
+#include <linux/bpf.h>
#include <net/ip.h>
#include <net/tcp.h>
#include <net/udp.h>
#include <net/checksum.h>
#include <net/ip6_checksum.h>
#include <net/udp_tunnel.h>
-#ifdef CONFIG_NET_RX_BUSY_POLL
-#include <net/busy_poll.h>
-#endif
#include <linux/workqueue.h>
#include <linux/prefetch.h>
#include <linux/cache.h>
@@ -52,8 +51,11 @@
#include "bnxt_hsi.h"
#include "bnxt.h"
+#include "bnxt_ulp.h"
#include "bnxt_sriov.h"
#include "bnxt_ethtool.h"
+#include "bnxt_dcb.h"
+#include "bnxt_xdp.h"
#define BNXT_TX_TIMEOUT (5 * HZ)
@@ -97,6 +99,8 @@ enum board_idx {
BCM57407_NPAR,
BCM57414_NPAR,
BCM57416_NPAR,
+ BCM57452,
+ BCM57454,
NETXTREME_E_VF,
NETXTREME_C_VF,
};
@@ -131,6 +135,8 @@ static const struct {
{ "Broadcom BCM57407 NetXtreme-E Ethernet Partition" },
{ "Broadcom BCM57414 NetXtreme-E Ethernet Partition" },
{ "Broadcom BCM57416 NetXtreme-E Ethernet Partition" },
+ { "Broadcom BCM57452 NetXtreme-E 10Gb/25Gb/40Gb/50Gb Ethernet" },
+ { "Broadcom BCM57454 NetXtreme-E 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
{ "Broadcom NetXtreme-E Ethernet Virtual Function" },
{ "Broadcom NetXtreme-C Ethernet Virtual Function" },
};
@@ -166,6 +172,8 @@ static const struct pci_device_id bnxt_pci_tbl[] = {
{ PCI_VDEVICE(BROADCOM, 0x16ed), .driver_data = BCM57414_NPAR },
{ PCI_VDEVICE(BROADCOM, 0x16ee), .driver_data = BCM57416_NPAR },
{ PCI_VDEVICE(BROADCOM, 0x16ef), .driver_data = BCM57416_NPAR },
+ { PCI_VDEVICE(BROADCOM, 0x16f1), .driver_data = BCM57452 },
+ { PCI_VDEVICE(BROADCOM, 0x1614), .driver_data = BCM57454 },
#ifdef CONFIG_BNXT_SRIOV
{ PCI_VDEVICE(BROADCOM, 0x16c1), .driver_data = NETXTREME_E_VF },
{ PCI_VDEVICE(BROADCOM, 0x16cb), .driver_data = NETXTREME_C_VF },
@@ -186,11 +194,11 @@ static const u16 bnxt_vf_req_snif[] = {
};
static const u16 bnxt_async_events_arr[] = {
- HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE,
- HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD,
- HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED,
- HWRM_ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE,
- HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE,
+ ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE,
+ ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD,
+ ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED,
+ ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE,
+ ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE,
};
static bool bnxt_vf_pciid(enum board_idx idx)
@@ -211,16 +219,7 @@ static bool bnxt_vf_pciid(enum board_idx idx)
#define BNXT_CP_DB_IRQ_DIS(db) \
writel(DB_CP_IRQ_DIS_FLAGS, db)
-static inline u32 bnxt_tx_avail(struct bnxt *bp, struct bnxt_tx_ring_info *txr)
-{
- /* Tell compiler to fetch tx indices from memory. */
- barrier();
-
- return bp->tx_ring_size -
- ((txr->tx_prod - txr->tx_cons) & bp->tx_ring_mask);
-}
-
-static const u16 bnxt_lhint_arr[] = {
+const u16 bnxt_lhint_arr[] = {
TX_BD_FLAGS_LHINT_512_AND_SMALLER,
TX_BD_FLAGS_LHINT_512_TO_1023,
TX_BD_FLAGS_LHINT_1024_TO_2047,
@@ -263,8 +262,8 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
return NETDEV_TX_OK;
}
- txr = &bp->tx_ring[i];
txq = netdev_get_tx_queue(dev, i);
+ txr = &bp->tx_ring[bp->tx_ring_map[i]];
prod = txr->tx_prod;
free_size = bnxt_tx_avail(bp, txr);
@@ -510,8 +509,7 @@ tx_dma_error:
static void bnxt_tx_int(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts)
{
struct bnxt_tx_ring_info *txr = bnapi->tx_ring;
- int index = txr - &bp->tx_ring[0];
- struct netdev_queue *txq = netdev_get_tx_queue(bp->dev, index);
+ struct netdev_queue *txq = netdev_get_tx_queue(bp->dev, txr->txq_index);
u16 cons = txr->tx_cons;
struct pci_dev *pdev = bp->pdev;
int i;
@@ -574,6 +572,25 @@ next_tx_int:
}
}
+static struct page *__bnxt_alloc_rx_page(struct bnxt *bp, dma_addr_t *mapping,
+ gfp_t gfp)
+{
+ struct device *dev = &bp->pdev->dev;
+ struct page *page;
+
+ page = alloc_page(gfp);
+ if (!page)
+ return NULL;
+
+ *mapping = dma_map_page(dev, page, 0, PAGE_SIZE, bp->rx_dir);
+ if (dma_mapping_error(dev, *mapping)) {
+ __free_page(page);
+ return NULL;
+ }
+ *mapping += bp->rx_dma_offset;
+ return page;
+}
+
static inline u8 *__bnxt_alloc_rx_data(struct bnxt *bp, dma_addr_t *mapping,
gfp_t gfp)
{
@@ -584,8 +601,8 @@ static inline u8 *__bnxt_alloc_rx_data(struct bnxt *bp, dma_addr_t *mapping,
if (!data)
return NULL;
- *mapping = dma_map_single(&pdev->dev, data + BNXT_RX_DMA_OFFSET,
- bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
+ *mapping = dma_map_single(&pdev->dev, data + bp->rx_dma_offset,
+ bp->rx_buf_use_size, bp->rx_dir);
if (dma_mapping_error(&pdev->dev, *mapping)) {
kfree(data);
@@ -594,29 +611,37 @@ static inline u8 *__bnxt_alloc_rx_data(struct bnxt *bp, dma_addr_t *mapping,
return data;
}
-static inline int bnxt_alloc_rx_data(struct bnxt *bp,
- struct bnxt_rx_ring_info *rxr,
- u16 prod, gfp_t gfp)
+int bnxt_alloc_rx_data(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
+ u16 prod, gfp_t gfp)
{
struct rx_bd *rxbd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[prod];
- u8 *data;
dma_addr_t mapping;
- data = __bnxt_alloc_rx_data(bp, &mapping, gfp);
- if (!data)
- return -ENOMEM;
+ if (BNXT_RX_PAGE_MODE(bp)) {
+ struct page *page = __bnxt_alloc_rx_page(bp, &mapping, gfp);
- rx_buf->data = data;
- dma_unmap_addr_set(rx_buf, mapping, mapping);
+ if (!page)
+ return -ENOMEM;
- rxbd->rx_bd_haddr = cpu_to_le64(mapping);
+ rx_buf->data = page;
+ rx_buf->data_ptr = page_address(page) + bp->rx_offset;
+ } else {
+ u8 *data = __bnxt_alloc_rx_data(bp, &mapping, gfp);
+
+ if (!data)
+ return -ENOMEM;
+ rx_buf->data = data;
+ rx_buf->data_ptr = data + bp->rx_offset;
+ }
+ rx_buf->mapping = mapping;
+
+ rxbd->rx_bd_haddr = cpu_to_le64(mapping);
return 0;
}
-static void bnxt_reuse_rx_data(struct bnxt_rx_ring_info *rxr, u16 cons,
- u8 *data)
+void bnxt_reuse_rx_data(struct bnxt_rx_ring_info *rxr, u16 cons, void *data)
{
u16 prod = rxr->rx_prod;
struct bnxt_sw_rx_bd *cons_rx_buf, *prod_rx_buf;
@@ -626,9 +651,9 @@ static void bnxt_reuse_rx_data(struct bnxt_rx_ring_info *rxr, u16 cons,
cons_rx_buf = &rxr->rx_buf_ring[cons];
prod_rx_buf->data = data;
+ prod_rx_buf->data_ptr = cons_rx_buf->data_ptr;
- dma_unmap_addr_set(prod_rx_buf, mapping,
- dma_unmap_addr(cons_rx_buf, mapping));
+ prod_rx_buf->mapping = cons_rx_buf->mapping;
prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
cons_bd = &rxr->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
@@ -754,13 +779,60 @@ static void bnxt_reuse_rx_agg_bufs(struct bnxt_napi *bnapi, u16 cp_cons,
rxr->rx_sw_agg_prod = sw_prod;
}
+static struct sk_buff *bnxt_rx_page_skb(struct bnxt *bp,
+ struct bnxt_rx_ring_info *rxr,
+ u16 cons, void *data, u8 *data_ptr,
+ dma_addr_t dma_addr,
+ unsigned int offset_and_len)
+{
+ unsigned int payload = offset_and_len >> 16;
+ unsigned int len = offset_and_len & 0xffff;
+ struct skb_frag_struct *frag;
+ struct page *page = data;
+ u16 prod = rxr->rx_prod;
+ struct sk_buff *skb;
+ int off, err;
+
+ err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC);
+ if (unlikely(err)) {
+ bnxt_reuse_rx_data(rxr, cons, data);
+ return NULL;
+ }
+ dma_addr -= bp->rx_dma_offset;
+ dma_unmap_page(&bp->pdev->dev, dma_addr, PAGE_SIZE, bp->rx_dir);
+
+ if (unlikely(!payload))
+ payload = eth_get_headlen(data_ptr, len);
+
+ skb = napi_alloc_skb(&rxr->bnapi->napi, payload);
+ if (!skb) {
+ __free_page(page);
+ return NULL;
+ }
+
+ off = (void *)data_ptr - page_address(page);
+ skb_add_rx_frag(skb, 0, page, off, len, PAGE_SIZE);
+ memcpy(skb->data - NET_IP_ALIGN, data_ptr - NET_IP_ALIGN,
+ payload + NET_IP_ALIGN);
+
+ frag = &skb_shinfo(skb)->frags[0];
+ skb_frag_size_sub(frag, payload);
+ frag->page_offset += payload;
+ skb->data_len -= payload;
+ skb->tail += payload;
+
+ return skb;
+}
+
static struct sk_buff *bnxt_rx_skb(struct bnxt *bp,
struct bnxt_rx_ring_info *rxr, u16 cons,
- u16 prod, u8 *data, dma_addr_t dma_addr,
- unsigned int len)
+ void *data, u8 *data_ptr,
+ dma_addr_t dma_addr,
+ unsigned int offset_and_len)
{
- int err;
+ u16 prod = rxr->rx_prod;
struct sk_buff *skb;
+ int err;
err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC);
if (unlikely(err)) {
@@ -770,14 +842,14 @@ static struct sk_buff *bnxt_rx_skb(struct bnxt *bp,
skb = build_skb(data, 0);
dma_unmap_single(&bp->pdev->dev, dma_addr, bp->rx_buf_use_size,
- PCI_DMA_FROMDEVICE);
+ bp->rx_dir);
if (!skb) {
kfree(data);
return NULL;
}
- skb_reserve(skb, BNXT_RX_OFFSET);
- skb_put(skb, len);
+ skb_reserve(skb, bp->rx_offset);
+ skb_put(skb, offset_and_len & 0xffff);
return skb;
}
@@ -813,7 +885,7 @@ static struct sk_buff *bnxt_rx_pages(struct bnxt *bp, struct bnxt_napi *bnapi,
* a sw_prod index that equals the cons index, so we
* need to clear the cons entry now.
*/
- mapping = dma_unmap_addr(cons_rx_buf, mapping);
+ mapping = cons_rx_buf->mapping;
page = cons_rx_buf->page;
cons_rx_buf->page = NULL;
@@ -876,14 +948,14 @@ static inline struct sk_buff *bnxt_copy_skb(struct bnxt_napi *bnapi, u8 *data,
if (!skb)
return NULL;
- dma_sync_single_for_cpu(&pdev->dev, mapping,
- bp->rx_copy_thresh, PCI_DMA_FROMDEVICE);
+ dma_sync_single_for_cpu(&pdev->dev, mapping, bp->rx_copy_thresh,
+ bp->rx_dir);
- memcpy(skb->data - BNXT_RX_OFFSET, data, len + BNXT_RX_OFFSET);
+ memcpy(skb->data - NET_IP_ALIGN, data - NET_IP_ALIGN,
+ len + NET_IP_ALIGN);
- dma_sync_single_for_device(&pdev->dev, mapping,
- bp->rx_copy_thresh,
- PCI_DMA_FROMDEVICE);
+ dma_sync_single_for_device(&pdev->dev, mapping, bp->rx_copy_thresh,
+ bp->rx_dir);
skb_put(skb, len);
return skb;
@@ -952,17 +1024,19 @@ static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
}
prod_rx_buf->data = tpa_info->data;
+ prod_rx_buf->data_ptr = tpa_info->data_ptr;
mapping = tpa_info->mapping;
- dma_unmap_addr_set(prod_rx_buf, mapping, mapping);
+ prod_rx_buf->mapping = mapping;
prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
prod_bd->rx_bd_haddr = cpu_to_le64(mapping);
tpa_info->data = cons_rx_buf->data;
+ tpa_info->data_ptr = cons_rx_buf->data_ptr;
cons_rx_buf->data = NULL;
- tpa_info->mapping = dma_unmap_addr(cons_rx_buf, mapping);
+ tpa_info->mapping = cons_rx_buf->mapping;
tpa_info->len =
le32_to_cpu(tpa_start->rx_tpa_start_cmp_len_flags_type) >>
@@ -1097,7 +1171,7 @@ static struct sk_buff *bnxt_gro_func_5730x(struct bnxt_tpa_info *tpa_info,
{
#ifdef CONFIG_INET
struct tcphdr *th;
- int len, nw_off, tcp_opt_len;
+ int len, nw_off, tcp_opt_len = 0;
if (tcp_ts)
tcp_opt_len = 12;
@@ -1128,7 +1202,6 @@ static struct sk_buff *bnxt_gro_func_5730x(struct bnxt_tpa_info *tpa_info,
dev_kfree_skb_any(skb);
return NULL;
}
- tcp_gro_complete(skb);
if (nw_off) { /* tunnel */
struct udphdr *uh = NULL;
@@ -1178,6 +1251,8 @@ static inline struct sk_buff *bnxt_gro_skb(struct bnxt *bp,
RX_TPA_END_CMP_PAYLOAD_OFFSET) >>
RX_TPA_END_CMP_PAYLOAD_OFFSET_SHIFT;
skb = bp->gro_func(tpa_info, payload_off, TPA_END_GRO_TS(tpa_end), skb);
+ if (likely(skb))
+ tcp_gro_complete(skb);
#endif
return skb;
}
@@ -1187,17 +1262,18 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
u32 *raw_cons,
struct rx_tpa_end_cmp *tpa_end,
struct rx_tpa_end_cmp_ext *tpa_end1,
- bool *agg_event)
+ u8 *event)
{
struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
u8 agg_id = TPA_END_AGG_ID(tpa_end);
- u8 *data, agg_bufs;
+ u8 *data_ptr, agg_bufs;
u16 cp_cons = RING_CMP(*raw_cons);
unsigned int len;
struct bnxt_tpa_info *tpa_info;
dma_addr_t mapping;
struct sk_buff *skb;
+ void *data;
if (unlikely(bnapi->in_reset)) {
int rc = bnxt_discard_rx(bp, bnapi, raw_cons, tpa_end);
@@ -1209,7 +1285,8 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
tpa_info = &rxr->rx_tpa[agg_id];
data = tpa_info->data;
- prefetch(data);
+ data_ptr = tpa_info->data_ptr;
+ prefetch(data_ptr);
len = tpa_info->len;
mapping = tpa_info->mapping;
@@ -1220,7 +1297,7 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, raw_cons))
return ERR_PTR(-EBUSY);
- *agg_event = true;
+ *event |= BNXT_AGG_EVENT;
cp_cons = NEXT_CMP(cp_cons);
}
@@ -1232,7 +1309,7 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
}
if (len <= bp->rx_copy_thresh) {
- skb = bnxt_copy_skb(bnapi, data, len, mapping);
+ skb = bnxt_copy_skb(bnapi, data_ptr, len, mapping);
if (!skb) {
bnxt_abort_tpa(bp, bnapi, cp_cons, agg_bufs);
return NULL;
@@ -1248,18 +1325,19 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
}
tpa_info->data = new_data;
+ tpa_info->data_ptr = new_data + bp->rx_offset;
tpa_info->mapping = new_mapping;
skb = build_skb(data, 0);
dma_unmap_single(&bp->pdev->dev, mapping, bp->rx_buf_use_size,
- PCI_DMA_FROMDEVICE);
+ bp->rx_dir);
if (!skb) {
kfree(data);
bnxt_abort_tpa(bp, bnapi, cp_cons, agg_bufs);
return NULL;
}
- skb_reserve(skb, BNXT_RX_OFFSET);
+ skb_reserve(skb, bp->rx_offset);
skb_put(skb, len);
}
@@ -1305,7 +1383,7 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
* -EIO - packet aborted due to hw error indicated in BD
*/
static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_napi *bnapi, u32 *raw_cons,
- bool *agg_event)
+ u8 *event)
{
struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
@@ -1316,10 +1394,12 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_napi *bnapi, u32 *raw_cons,
u16 cons, prod, cp_cons = RING_CMP(tmp_raw_cons);
struct bnxt_sw_rx_bd *rx_buf;
unsigned int len;
- u8 *data, agg_bufs, cmp_type;
+ u8 *data_ptr, agg_bufs, cmp_type;
dma_addr_t dma_addr;
struct sk_buff *skb;
+ void *data;
int rc = 0;
+ u32 misc;
rxcmp = (struct rx_cmp *)
&cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
@@ -1340,13 +1420,13 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_napi *bnapi, u32 *raw_cons,
bnxt_tpa_start(bp, rxr, (struct rx_tpa_start_cmp *)rxcmp,
(struct rx_tpa_start_cmp_ext *)rxcmp1);
+ *event |= BNXT_RX_EVENT;
goto next_rx_no_prod;
} else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
skb = bnxt_tpa_end(bp, bnapi, &tmp_raw_cons,
(struct rx_tpa_end_cmp *)rxcmp,
- (struct rx_tpa_end_cmp_ext *)rxcmp1,
- agg_event);
+ (struct rx_tpa_end_cmp_ext *)rxcmp1, event);
if (unlikely(IS_ERR(skb)))
return -EBUSY;
@@ -1354,37 +1434,36 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_napi *bnapi, u32 *raw_cons,
rc = -ENOMEM;
if (likely(skb)) {
skb_record_rx_queue(skb, bnapi->index);
- skb_mark_napi_id(skb, &bnapi->napi);
- if (bnxt_busy_polling(bnapi))
- netif_receive_skb(skb);
- else
- napi_gro_receive(&bnapi->napi, skb);
+ napi_gro_receive(&bnapi->napi, skb);
rc = 1;
}
+ *event |= BNXT_RX_EVENT;
goto next_rx_no_prod;
}
cons = rxcmp->rx_cmp_opaque;
rx_buf = &rxr->rx_buf_ring[cons];
data = rx_buf->data;
+ data_ptr = rx_buf->data_ptr;
if (unlikely(cons != rxr->rx_next_cons)) {
int rc1 = bnxt_discard_rx(bp, bnapi, raw_cons, rxcmp);
bnxt_sched_reset(bp, rxr);
return rc1;
}
- prefetch(data);
+ prefetch(data_ptr);
- agg_bufs = (le32_to_cpu(rxcmp->rx_cmp_misc_v1) & RX_CMP_AGG_BUFS) >>
- RX_CMP_AGG_BUFS_SHIFT;
+ misc = le32_to_cpu(rxcmp->rx_cmp_misc_v1);
+ agg_bufs = (misc & RX_CMP_AGG_BUFS) >> RX_CMP_AGG_BUFS_SHIFT;
if (agg_bufs) {
if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, &tmp_raw_cons))
return -EBUSY;
cp_cons = NEXT_CMP(cp_cons);
- *agg_event = true;
+ *event |= BNXT_AGG_EVENT;
}
+ *event |= BNXT_RX_EVENT;
rx_buf->data = NULL;
if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L2_ERRORS) {
@@ -1397,17 +1476,29 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_napi *bnapi, u32 *raw_cons,
}
len = le32_to_cpu(rxcmp->rx_cmp_len_flags_type) >> RX_CMP_LEN_SHIFT;
- dma_addr = dma_unmap_addr(rx_buf, mapping);
+ dma_addr = rx_buf->mapping;
+
+ if (bnxt_rx_xdp(bp, rxr, cons, data, &data_ptr, &len, event)) {
+ rc = 1;
+ goto next_rx;
+ }
if (len <= bp->rx_copy_thresh) {
- skb = bnxt_copy_skb(bnapi, data, len, dma_addr);
+ skb = bnxt_copy_skb(bnapi, data_ptr, len, dma_addr);
bnxt_reuse_rx_data(rxr, cons, data);
if (!skb) {
rc = -ENOMEM;
goto next_rx;
}
} else {
- skb = bnxt_rx_skb(bp, rxr, cons, prod, data, dma_addr, len);
+ u32 payload;
+
+ if (rx_buf->data_ptr == data_ptr)
+ payload = misc & RX_CMP_PAYLOAD_OFFSET;
+ else
+ payload = 0;
+ skb = bp->rx_skb_func(bp, rxr, cons, data, data_ptr, dma_addr,
+ payload | len);
if (!skb) {
rc = -ENOMEM;
goto next_rx;
@@ -1458,11 +1549,7 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_napi *bnapi, u32 *raw_cons,
}
skb_record_rx_queue(skb, bnapi->index);
- skb_mark_napi_id(skb, &bnapi->napi);
- if (bnxt_busy_polling(bnapi))
- netif_receive_skb(skb);
- else
- napi_gro_receive(&bnapi->napi, skb);
+ napi_gro_receive(&bnapi->napi, skb);
rc = 1;
next_rx:
@@ -1476,8 +1563,8 @@ next_rx_no_prod:
}
#define BNXT_GET_EVENT_PORT(data) \
- ((data) & \
- HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_PORT_ID_MASK)
+ ((data) & \
+ ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_PORT_ID_MASK)
static int bnxt_async_event_process(struct bnxt *bp,
struct hwrm_async_event_cmpl *cmpl)
@@ -1486,7 +1573,7 @@ static int bnxt_async_event_process(struct bnxt *bp,
/* TODO CHIMP_FW: Define event id's for link change, error etc */
switch (event_id) {
- case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE: {
+ case ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE: {
u32 data1 = le32_to_cpu(cmpl->event_data1);
struct bnxt_link_info *link_info = &bp->link_info;
@@ -1499,15 +1586,16 @@ static int bnxt_async_event_process(struct bnxt *bp,
netdev_warn(bp->dev, "Link speed %d no longer supported\n",
speed);
}
+ set_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT, &bp->sp_event);
/* fall thru */
}
- case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE:
+ case ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE:
set_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event);
break;
- case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD:
+ case ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD:
set_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT, &bp->sp_event);
break;
- case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED: {
+ case ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED: {
u32 data1 = le32_to_cpu(cmpl->event_data1);
u16 port_id = BNXT_GET_EVENT_PORT(data1);
@@ -1520,18 +1608,17 @@ static int bnxt_async_event_process(struct bnxt *bp,
set_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event);
break;
}
- case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE:
+ case ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE:
if (BNXT_PF(bp))
goto async_event_process_exit;
set_bit(BNXT_RESET_TASK_SILENT_SP_EVENT, &bp->sp_event);
break;
default:
- netdev_err(bp->dev, "unhandled ASYNC event (id 0x%x)\n",
- event_id);
goto async_event_process_exit;
}
schedule_work(&bp->sp_task);
async_event_process_exit:
+ bnxt_ulp_async_events(bp, cmpl);
return 0;
}
@@ -1635,8 +1722,7 @@ static int bnxt_poll_work(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
u32 cons;
int tx_pkts = 0;
int rx_pkts = 0;
- bool rx_event = false;
- bool agg_event = false;
+ u8 event = 0;
struct tx_cmp *txcmp;
while (1) {
@@ -1658,12 +1744,11 @@ static int bnxt_poll_work(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
if (unlikely(tx_pkts > bp->tx_wake_thresh))
rx_pkts = budget;
} else if ((TX_CMP_TYPE(txcmp) & 0x30) == 0x10) {
- rc = bnxt_rx_pkt(bp, bnapi, &raw_cons, &agg_event);
+ rc = bnxt_rx_pkt(bp, bnapi, &raw_cons, &event);
if (likely(rc >= 0))
rx_pkts += rc;
else if (rc == -EBUSY) /* partial completion */
break;
- rx_event = true;
} else if (unlikely((TX_CMP_TYPE(txcmp) ==
CMPL_BASE_TYPE_HWRM_DONE) ||
(TX_CMP_TYPE(txcmp) ==
@@ -1678,6 +1763,18 @@ static int bnxt_poll_work(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
break;
}
+ if (event & BNXT_TX_EVENT) {
+ struct bnxt_tx_ring_info *txr = bnapi->tx_ring;
+ void __iomem *db = txr->tx_doorbell;
+ u16 prod = txr->tx_prod;
+
+ /* Sync BD data before updating doorbell */
+ wmb();
+
+ writel(DB_KEY_TX | prod, db);
+ writel(DB_KEY_TX | prod, db);
+ }
+
cpr->cp_raw_cons = raw_cons;
/* ACK completion ring before freeing tx ring and producing new
* buffers in rx/agg rings to prevent overflowing the completion
@@ -1686,14 +1783,14 @@ static int bnxt_poll_work(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
BNXT_CP_DB(cpr->cp_doorbell, cpr->cp_raw_cons);
if (tx_pkts)
- bnxt_tx_int(bp, bnapi, tx_pkts);
+ bnapi->tx_int(bp, bnapi, tx_pkts);
- if (rx_event) {
+ if (event & BNXT_RX_EVENT) {
struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
writel(DB_KEY_RX | rxr->rx_prod, rxr->rx_doorbell);
writel(DB_KEY_RX | rxr->rx_prod, rxr->rx_doorbell);
- if (agg_event) {
+ if (event & BNXT_AGG_EVENT) {
writel(DB_KEY_RX | rxr->rx_agg_prod,
rxr->rx_agg_doorbell);
writel(DB_KEY_RX | rxr->rx_agg_prod,
@@ -1714,7 +1811,7 @@ static int bnxt_poll_nitroa0(struct napi_struct *napi, int budget)
u32 cp_cons, tmp_raw_cons;
u32 raw_cons = cpr->cp_raw_cons;
u32 rx_pkts = 0;
- bool agg_event = false;
+ u8 event = 0;
while (1) {
int rc;
@@ -1738,7 +1835,7 @@ static int bnxt_poll_nitroa0(struct napi_struct *napi, int budget)
rxcmp1->rx_cmp_cfa_code_errors_v2 |=
cpu_to_le32(RX_CMPL_ERRORS_CRC_ERROR);
- rc = bnxt_rx_pkt(bp, bnapi, &raw_cons, &agg_event);
+ rc = bnxt_rx_pkt(bp, bnapi, &raw_cons, &event);
if (likely(rc == -EIO))
rx_pkts++;
else if (rc == -EBUSY) /* partial completion */
@@ -1761,13 +1858,13 @@ static int bnxt_poll_nitroa0(struct napi_struct *napi, int budget)
writel(DB_KEY_RX | rxr->rx_prod, rxr->rx_doorbell);
writel(DB_KEY_RX | rxr->rx_prod, rxr->rx_doorbell);
- if (agg_event) {
+ if (event & BNXT_AGG_EVENT) {
writel(DB_KEY_RX | rxr->rx_agg_prod, rxr->rx_agg_doorbell);
writel(DB_KEY_RX | rxr->rx_agg_prod, rxr->rx_agg_doorbell);
}
if (!bnxt_has_work(bp, cpr) && rx_pkts < budget) {
- napi_complete(napi);
+ napi_complete_done(napi, rx_pkts);
BNXT_CP_DB_REARM(cpr->cp_doorbell, cpr->cp_raw_cons);
}
return rx_pkts;
@@ -1780,9 +1877,6 @@ static int bnxt_poll(struct napi_struct *napi, int budget)
struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
int work_done = 0;
- if (!bnxt_lock_napi(bnapi))
- return budget;
-
while (1) {
work_done += bnxt_poll_work(bp, bnapi, budget - work_done);
@@ -1790,42 +1884,16 @@ static int bnxt_poll(struct napi_struct *napi, int budget)
break;
if (!bnxt_has_work(bp, cpr)) {
- napi_complete(napi);
- BNXT_CP_DB_REARM(cpr->cp_doorbell, cpr->cp_raw_cons);
+ if (napi_complete_done(napi, work_done))
+ BNXT_CP_DB_REARM(cpr->cp_doorbell,
+ cpr->cp_raw_cons);
break;
}
}
mmiowb();
- bnxt_unlock_napi(bnapi);
return work_done;
}
-#ifdef CONFIG_NET_RX_BUSY_POLL
-static int bnxt_busy_poll(struct napi_struct *napi)
-{
- struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi);
- struct bnxt *bp = bnapi->bp;
- struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
- int rx_work, budget = 4;
-
- if (atomic_read(&bp->intr_sem) != 0)
- return LL_FLUSH_FAILED;
-
- if (!bp->link_info.link_up)
- return LL_FLUSH_FAILED;
-
- if (!bnxt_lock_poll(bnapi))
- return LL_FLUSH_BUSY;
-
- rx_work = bnxt_poll_work(bp, bnapi, budget);
-
- BNXT_CP_DB_REARM(cpr->cp_doorbell, cpr->cp_raw_cons);
-
- bnxt_unlock_poll(bnapi);
- return rx_work;
-}
-#endif
-
static void bnxt_free_tx_skbs(struct bnxt *bp)
{
int i, max_idx;
@@ -1903,11 +1971,9 @@ static void bnxt_free_rx_skbs(struct bnxt *bp)
if (!data)
continue;
- dma_unmap_single(
- &pdev->dev,
- dma_unmap_addr(tpa_info, mapping),
- bp->rx_buf_use_size,
- PCI_DMA_FROMDEVICE);
+ dma_unmap_single(&pdev->dev, tpa_info->mapping,
+ bp->rx_buf_use_size,
+ bp->rx_dir);
tpa_info->data = NULL;
@@ -1917,19 +1983,20 @@ static void bnxt_free_rx_skbs(struct bnxt *bp)
for (j = 0; j < max_idx; j++) {
struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[j];
- u8 *data = rx_buf->data;
+ void *data = rx_buf->data;
if (!data)
continue;
- dma_unmap_single(&pdev->dev,
- dma_unmap_addr(rx_buf, mapping),
- bp->rx_buf_use_size,
- PCI_DMA_FROMDEVICE);
+ dma_unmap_single(&pdev->dev, rx_buf->mapping,
+ bp->rx_buf_use_size, bp->rx_dir);
rx_buf->data = NULL;
- kfree(data);
+ if (BNXT_RX_PAGE_MODE(bp))
+ __free_page(data);
+ else
+ kfree(data);
}
for (j = 0; j < max_agg_idx; j++) {
@@ -1940,8 +2007,7 @@ static void bnxt_free_rx_skbs(struct bnxt *bp)
if (!page)
continue;
- dma_unmap_page(&pdev->dev,
- dma_unmap_addr(rx_agg_buf, mapping),
+ dma_unmap_page(&pdev->dev, rx_agg_buf->mapping,
BNXT_RX_PAGE_SIZE, PCI_DMA_FROMDEVICE);
rx_agg_buf->page = NULL;
@@ -2032,6 +2098,9 @@ static void bnxt_free_rx_rings(struct bnxt *bp)
struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
struct bnxt_ring_struct *ring;
+ if (rxr->xdp_prog)
+ bpf_prog_put(rxr->xdp_prog);
+
kfree(rxr->rx_tpa);
rxr->rx_tpa = NULL;
@@ -2170,6 +2239,8 @@ static int bnxt_alloc_tx_rings(struct bnxt *bp)
memset(txr->tx_push, 0, sizeof(struct tx_push_bd));
}
ring->queue_id = bp->q_info[j].queue_id;
+ if (i < bp->tx_nr_rings_xdp)
+ continue;
if (i % bp->tx_nr_rings_per_tc == (bp->tx_nr_rings_per_tc - 1))
j++;
}
@@ -2317,6 +2388,15 @@ static int bnxt_init_one_rx_ring(struct bnxt *bp, int ring_nr)
ring = &rxr->rx_ring_struct;
bnxt_init_rxbd_pages(ring, type);
+ if (BNXT_RX_PAGE_MODE(bp) && bp->xdp_prog) {
+ rxr->xdp_prog = bpf_prog_add(bp->xdp_prog, 1);
+ if (IS_ERR(rxr->xdp_prog)) {
+ int rc = PTR_ERR(rxr->xdp_prog);
+
+ rxr->xdp_prog = NULL;
+ return rc;
+ }
+ }
prod = rxr->rx_prod;
for (i = 0; i < bp->rx_ring_size; i++) {
if (bnxt_alloc_rx_data(bp, rxr, prod, GFP_KERNEL) != 0) {
@@ -2363,6 +2443,7 @@ static int bnxt_init_one_rx_ring(struct bnxt *bp, int ring_nr)
return -ENOMEM;
rxr->rx_tpa[i].data = data;
+ rxr->rx_tpa[i].data_ptr = data + bp->rx_offset;
rxr->rx_tpa[i].mapping = mapping;
}
} else {
@@ -2378,6 +2459,14 @@ static int bnxt_init_rx_rings(struct bnxt *bp)
{
int i, rc = 0;
+ if (BNXT_RX_PAGE_MODE(bp)) {
+ bp->rx_offset = NET_IP_ALIGN + XDP_PACKET_HEADROOM;
+ bp->rx_dma_offset = XDP_PACKET_HEADROOM;
+ } else {
+ bp->rx_offset = BNXT_RX_OFFSET;
+ bp->rx_dma_offset = BNXT_RX_DMA_OFFSET;
+ }
+
for (i = 0; i < bp->rx_nr_rings; i++) {
rc = bnxt_init_one_rx_ring(bp, i);
if (rc)
@@ -2501,9 +2590,11 @@ static int bnxt_calc_nr_ring_pages(u32 ring_size, int desc_per_pg)
return pages;
}
-static void bnxt_set_tpa_flags(struct bnxt *bp)
+void bnxt_set_tpa_flags(struct bnxt *bp)
{
bp->flags &= ~BNXT_FLAG_TPA;
+ if (bp->flags & BNXT_FLAG_NO_AGG_RINGS)
+ return;
if (bp->dev->features & NETIF_F_LRO)
bp->flags |= BNXT_FLAG_LRO;
if (bp->dev->features & NETIF_F_GRO)
@@ -2533,7 +2624,7 @@ void bnxt_set_ring_params(struct bnxt *bp)
agg_factor = min_t(u32, 4, 65536 / BNXT_RX_PAGE_SIZE);
bp->flags &= ~BNXT_FLAG_JUMBO;
- if (rx_space > PAGE_SIZE) {
+ if (rx_space > PAGE_SIZE && !(bp->flags & BNXT_FLAG_NO_AGG_RINGS)) {
u32 jumbo_factor;
bp->flags |= BNXT_FLAG_JUMBO;
@@ -2585,6 +2676,27 @@ void bnxt_set_ring_params(struct bnxt *bp)
bp->cp_ring_mask = bp->cp_bit - 1;
}
+int bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode)
+{
+ if (page_mode) {
+ if (bp->dev->mtu > BNXT_MAX_PAGE_MODE_MTU)
+ return -EOPNOTSUPP;
+ bp->dev->max_mtu = BNXT_MAX_PAGE_MODE_MTU;
+ bp->flags &= ~BNXT_FLAG_AGG_RINGS;
+ bp->flags |= BNXT_FLAG_NO_AGG_RINGS | BNXT_FLAG_RX_PAGE_MODE;
+ bp->dev->hw_features &= ~NETIF_F_LRO;
+ bp->dev->features &= ~NETIF_F_LRO;
+ bp->rx_dir = DMA_BIDIRECTIONAL;
+ bp->rx_skb_func = bnxt_rx_page_skb;
+ } else {
+ bp->dev->max_mtu = BNXT_MAX_MTU;
+ bp->flags &= ~BNXT_FLAG_RX_PAGE_MODE;
+ bp->rx_dir = DMA_FROM_DEVICE;
+ bp->rx_skb_func = bnxt_rx_skb;
+ }
+ return 0;
+}
+
static void bnxt_free_vnic_attributes(struct bnxt *bp)
{
int i;
@@ -2667,6 +2779,10 @@ static int bnxt_alloc_vnic_attributes(struct bnxt *bp)
goto out;
}
+ if ((bp->flags & BNXT_FLAG_NEW_RSS_CAP) &&
+ !(vnic->flags & BNXT_VNIC_RSS_FLAG))
+ continue;
+
/* Allocate rss table and hash key */
vnic->rss_table = dma_alloc_coherent(&pdev->dev, PAGE_SIZE,
&vnic->rss_table_dma_addr,
@@ -2890,6 +3006,8 @@ static void bnxt_free_mem(struct bnxt *bp, bool irq_re_init)
bnxt_free_stats(bp);
bnxt_free_ring_grps(bp);
bnxt_free_vnics(bp);
+ kfree(bp->tx_ring_map);
+ bp->tx_ring_map = NULL;
kfree(bp->tx_ring);
bp->tx_ring = NULL;
kfree(bp->rx_ring);
@@ -2942,6 +3060,12 @@ static int bnxt_alloc_mem(struct bnxt *bp, bool irq_re_init)
if (!bp->tx_ring)
return -ENOMEM;
+ bp->tx_ring_map = kcalloc(bp->tx_nr_rings, sizeof(u16),
+ GFP_KERNEL);
+
+ if (!bp->tx_ring_map)
+ return -ENOMEM;
+
if (bp->flags & BNXT_FLAG_SHARED_RINGS)
j = 0;
else
@@ -2950,6 +3074,15 @@ static int bnxt_alloc_mem(struct bnxt *bp, bool irq_re_init)
for (i = 0; i < bp->tx_nr_rings; i++, j++) {
bp->tx_ring[i].bnapi = bp->bnapi[j];
bp->bnapi[j]->tx_ring = &bp->tx_ring[i];
+ bp->tx_ring_map[i] = bp->tx_nr_rings_xdp + i;
+ if (i >= bp->tx_nr_rings_xdp) {
+ bp->tx_ring[i].txq_index = i -
+ bp->tx_nr_rings_xdp;
+ bp->bnapi[j]->tx_int = bnxt_tx_int;
+ } else {
+ bp->bnapi[j]->flags |= BNXT_NAPI_FLAG_XDP;
+ bp->bnapi[j]->tx_int = bnxt_tx_int_xdp;
+ }
}
rc = bnxt_alloc_stats(bp);
@@ -2991,6 +3124,47 @@ alloc_mem_err:
return rc;
}
+static void bnxt_disable_int(struct bnxt *bp)
+{
+ int i;
+
+ if (!bp->bnapi)
+ return;
+
+ for (i = 0; i < bp->cp_nr_rings; i++) {
+ struct bnxt_napi *bnapi = bp->bnapi[i];
+ struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
+ struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
+
+ if (ring->fw_ring_id != INVALID_HW_RING_ID)
+ BNXT_CP_DB(cpr->cp_doorbell, cpr->cp_raw_cons);
+ }
+}
+
+static void bnxt_disable_int_sync(struct bnxt *bp)
+{
+ int i;
+
+ atomic_inc(&bp->intr_sem);
+
+ bnxt_disable_int(bp);
+ for (i = 0; i < bp->cp_nr_rings; i++)
+ synchronize_irq(bp->irq_tbl[i].vector);
+}
+
+static void bnxt_enable_int(struct bnxt *bp)
+{
+ int i;
+
+ atomic_set(&bp->intr_sem, 0);
+ for (i = 0; i < bp->cp_nr_rings; i++) {
+ struct bnxt_napi *bnapi = bp->bnapi[i];
+ struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
+
+ BNXT_CP_DB_REARM(cpr->cp_doorbell, cpr->cp_raw_cons);
+ }
+}
+
void bnxt_hwrm_cmd_hdr_init(struct bnxt *bp, void *request, u16 req_type,
u16 cmpl_ring, u16 target_id)
{
@@ -3115,27 +3289,46 @@ int hwrm_send_message_silent(struct bnxt *bp, void *msg, u32 msg_len,
return rc;
}
-static int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp)
+int bnxt_hwrm_func_rgtr_async_events(struct bnxt *bp, unsigned long *bmap,
+ int bmap_size)
{
struct hwrm_func_drv_rgtr_input req = {0};
- int i;
DECLARE_BITMAP(async_events_bmap, 256);
u32 *events = (u32 *)async_events_bmap;
+ int i;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_RGTR, -1, -1);
req.enables =
- cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_OS_TYPE |
- FUNC_DRV_RGTR_REQ_ENABLES_VER |
- FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD);
+ cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD);
memset(async_events_bmap, 0, sizeof(async_events_bmap));
for (i = 0; i < ARRAY_SIZE(bnxt_async_events_arr); i++)
__set_bit(bnxt_async_events_arr[i], async_events_bmap);
+ if (bmap && bmap_size) {
+ for (i = 0; i < bmap_size; i++) {
+ if (test_bit(i, bmap))
+ __set_bit(i, async_events_bmap);
+ }
+ }
+
for (i = 0; i < 8; i++)
req.async_event_fwd[i] |= cpu_to_le32(events[i]);
+ return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+}
+
+static int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp)
+{
+ struct hwrm_func_drv_rgtr_input req = {0};
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_RGTR, -1, -1);
+
+ req.enables =
+ cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_OS_TYPE |
+ FUNC_DRV_RGTR_REQ_ENABLES_VER);
+
req.os_type = cpu_to_le16(FUNC_DRV_RGTR_REQ_OS_TYPE_LINUX);
req.ver_maj = DRV_VER_MAJ;
req.ver_min = DRV_VER_MIN;
@@ -3144,6 +3337,7 @@ static int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp)
if (BNXT_PF(bp)) {
DECLARE_BITMAP(vf_req_snif_bmap, 256);
u32 *data = (u32 *)vf_req_snif_bmap;
+ int i;
memset(vf_req_snif_bmap, 0, sizeof(vf_req_snif_bmap));
for (i = 0; i < ARRAY_SIZE(bnxt_vf_req_snif); i++)
@@ -3270,6 +3464,9 @@ static int bnxt_hwrm_cfa_ntuple_filter_free(struct bnxt *bp,
CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT_MASK | \
CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_ID)
+#define BNXT_NTP_TUNNEL_FLTR_FLAG \
+ CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE
+
static int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp,
struct bnxt_ntuple_filter *fltr)
{
@@ -3290,10 +3487,31 @@ static int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp,
req.ip_addr_type = CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV4;
req.ip_protocol = keys->basic.ip_proto;
- req.src_ipaddr[0] = keys->addrs.v4addrs.src;
- req.src_ipaddr_mask[0] = cpu_to_be32(0xffffffff);
- req.dst_ipaddr[0] = keys->addrs.v4addrs.dst;
- req.dst_ipaddr_mask[0] = cpu_to_be32(0xffffffff);
+ if (keys->basic.n_proto == htons(ETH_P_IPV6)) {
+ int i;
+
+ req.ethertype = htons(ETH_P_IPV6);
+ req.ip_addr_type =
+ CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV6;
+ *(struct in6_addr *)&req.src_ipaddr[0] =
+ keys->addrs.v6addrs.src;
+ *(struct in6_addr *)&req.dst_ipaddr[0] =
+ keys->addrs.v6addrs.dst;
+ for (i = 0; i < 4; i++) {
+ req.src_ipaddr_mask[i] = cpu_to_be32(0xffffffff);
+ req.dst_ipaddr_mask[i] = cpu_to_be32(0xffffffff);
+ }
+ } else {
+ req.src_ipaddr[0] = keys->addrs.v4addrs.src;
+ req.src_ipaddr_mask[0] = cpu_to_be32(0xffffffff);
+ req.dst_ipaddr[0] = keys->addrs.v4addrs.dst;
+ req.dst_ipaddr_mask[0] = cpu_to_be32(0xffffffff);
+ }
+ if (keys->control.flags & FLOW_DIS_ENCAPSULATION) {
+ req.enables |= cpu_to_le32(BNXT_NTP_TUNNEL_FLTR_FLAG);
+ req.tunnel_type =
+ CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL;
+ }
req.src_port = keys->ports.src;
req.src_port_mask = cpu_to_be16(0xffff);
@@ -3433,13 +3651,7 @@ static int bnxt_hwrm_vnic_set_rss(struct bnxt *bp, u16 vnic_id, bool set_rss)
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_CFG, -1, -1);
if (set_rss) {
- vnic->hash_type = VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4 |
- VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4 |
- VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6 |
- VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6;
-
- req.hash_type = cpu_to_le32(vnic->hash_type);
-
+ req.hash_type = cpu_to_le32(bp->rss_hash_cfg);
if (vnic->flags & BNXT_VNIC_RSS_FLAG) {
if (BNXT_CHIP_TYPE_NITRO_A0(bp))
max_rings = bp->rx_nr_rings - 1;
@@ -3531,7 +3743,7 @@ static int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp, u16 vnic_id, u16 ctx_idx)
return rc;
}
-static int bnxt_hwrm_vnic_cfg(struct bnxt *bp, u16 vnic_id)
+int bnxt_hwrm_vnic_cfg(struct bnxt *bp, u16 vnic_id)
{
unsigned int ring = 0, grp_idx;
struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
@@ -3546,6 +3758,12 @@ static int bnxt_hwrm_vnic_cfg(struct bnxt *bp, u16 vnic_id)
req.rss_rule = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]);
req.enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_RSS_RULE |
VNIC_CFG_REQ_ENABLES_MRU);
+ } else if (vnic->flags & BNXT_VNIC_RFS_NEW_RSS_FLAG) {
+ req.rss_rule =
+ cpu_to_le16(bp->vnic_info[0].fw_rss_cos_lb_ctx[0]);
+ req.enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_RSS_RULE |
+ VNIC_CFG_REQ_ENABLES_MRU);
+ req.flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_RSS_DFLT_CR_MODE);
} else {
req.rss_rule = cpu_to_le16(0xffff);
}
@@ -3579,6 +3797,9 @@ static int bnxt_hwrm_vnic_cfg(struct bnxt *bp, u16 vnic_id)
#endif
if ((bp->flags & BNXT_FLAG_STRIP_VLAN) || def_vlan)
req.flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_VLAN_STRIP_MODE);
+ if (!vnic_id && bnxt_ulp_registered(bp->edev, BNXT_ROCE_ULP))
+ req.flags |=
+ cpu_to_le32(VNIC_CFG_REQ_FLAGS_ROCE_DUAL_VNIC_MODE);
return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
}
@@ -3646,6 +3867,27 @@ static int bnxt_hwrm_vnic_alloc(struct bnxt *bp, u16 vnic_id,
return rc;
}
+static int bnxt_hwrm_vnic_qcaps(struct bnxt *bp)
+{
+ struct hwrm_vnic_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
+ struct hwrm_vnic_qcaps_input req = {0};
+ int rc;
+
+ if (bp->hwrm_spec_code < 0x10600)
+ return 0;
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_QCAPS, -1, -1);
+ mutex_lock(&bp->hwrm_cmd_lock);
+ rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ if (!rc) {
+ if (resp->flags &
+ cpu_to_le32(VNIC_QCAPS_RESP_FLAGS_RSS_DFLT_CR_CAP))
+ bp->flags |= BNXT_FLAG_NEW_RSS_CAP;
+ }
+ mutex_unlock(&bp->hwrm_cmd_lock);
+ return rc;
+}
+
static int bnxt_hwrm_ring_grp_alloc(struct bnxt *bp)
{
u16 i;
@@ -3749,7 +3991,7 @@ static int hwrm_ring_alloc_send_msg(struct bnxt *bp,
req.length = cpu_to_le32(bp->rx_agg_ring_mask + 1);
break;
case HWRM_RING_ALLOC_CMPL:
- req.ring_type = RING_ALLOC_REQ_RING_TYPE_CMPL;
+ req.ring_type = RING_ALLOC_REQ_RING_TYPE_L2_CMPL;
req.length = cpu_to_le32(bp->cp_ring_mask + 1);
if (bp->flags & BNXT_FLAG_USING_MSIX)
req.int_mode = RING_ALLOC_REQ_INT_MODE_MSIX;
@@ -3768,7 +4010,7 @@ static int hwrm_ring_alloc_send_msg(struct bnxt *bp,
if (rc || err) {
switch (ring_type) {
- case RING_FREE_REQ_RING_TYPE_CMPL:
+ case RING_FREE_REQ_RING_TYPE_L2_CMPL:
netdev_err(bp->dev, "hwrm_ring_alloc cp failed. rc:%x err:%x\n",
rc, err);
return -1;
@@ -3792,6 +4034,30 @@ static int hwrm_ring_alloc_send_msg(struct bnxt *bp,
return rc;
}
+static int bnxt_hwrm_set_async_event_cr(struct bnxt *bp, int idx)
+{
+ int rc;
+
+ if (BNXT_PF(bp)) {
+ struct hwrm_func_cfg_input req = {0};
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
+ req.fid = cpu_to_le16(0xffff);
+ req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_ASYNC_EVENT_CR);
+ req.async_event_cr = cpu_to_le16(idx);
+ rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ } else {
+ struct hwrm_func_vf_cfg_input req = {0};
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_VF_CFG, -1, -1);
+ req.enables =
+ cpu_to_le32(FUNC_VF_CFG_REQ_ENABLES_ASYNC_EVENT_CR);
+ req.async_event_cr = cpu_to_le16(idx);
+ rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ }
+ return rc;
+}
+
static int bnxt_hwrm_ring_alloc(struct bnxt *bp)
{
int i, rc = 0;
@@ -3808,6 +4074,12 @@ static int bnxt_hwrm_ring_alloc(struct bnxt *bp)
goto err_out;
BNXT_CP_DB(cpr->cp_doorbell, cpr->cp_raw_cons);
bp->grp_info[i].cp_fw_ring_id = ring->fw_ring_id;
+
+ if (!i) {
+ rc = bnxt_hwrm_set_async_event_cr(bp, ring->fw_ring_id);
+ if (rc)
+ netdev_warn(bp->dev, "Failed to set async event completion ring.\n");
+ }
}
for (i = 0; i < bp->tx_nr_rings; i++) {
@@ -3882,7 +4154,7 @@ static int hwrm_ring_free_send_msg(struct bnxt *bp,
if (rc || error_code) {
switch (ring_type) {
- case RING_FREE_REQ_RING_TYPE_CMPL:
+ case RING_FREE_REQ_RING_TYPE_L2_CMPL:
netdev_err(bp->dev, "hwrm_ring_free cp failed. rc:%d\n",
rc);
return rc;
@@ -3958,6 +4230,12 @@ static void bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path)
}
}
+ /* The completion rings are about to be freed. After that the
+ * IRQ doorbell will not work anymore. So we need to disable
+ * IRQ here.
+ */
+ bnxt_disable_int_sync(bp);
+
for (i = 0; i < bp->cp_nr_rings; i++) {
struct bnxt_napi *bnapi = bp->bnapi[i];
struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
@@ -3965,7 +4243,7 @@ static void bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path)
if (ring->fw_ring_id != INVALID_HW_RING_ID) {
hwrm_ring_free_send_msg(bp, ring,
- RING_FREE_REQ_RING_TYPE_CMPL,
+ RING_FREE_REQ_RING_TYPE_L2_CMPL,
INVALID_HW_RING_ID);
ring->fw_ring_id = INVALID_HW_RING_ID;
bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID;
@@ -3973,6 +4251,50 @@ static void bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path)
}
}
+/* Caller must hold bp->hwrm_cmd_lock */
+int __bnxt_hwrm_get_tx_rings(struct bnxt *bp, u16 fid, int *tx_rings)
+{
+ struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
+ struct hwrm_func_qcfg_input req = {0};
+ int rc;
+
+ if (bp->hwrm_spec_code < 0x10601)
+ return 0;
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCFG, -1, -1);
+ req.fid = cpu_to_le16(fid);
+ rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ if (!rc)
+ *tx_rings = le16_to_cpu(resp->alloc_tx_rings);
+
+ return rc;
+}
+
+static int bnxt_hwrm_reserve_tx_rings(struct bnxt *bp, int *tx_rings)
+{
+ struct hwrm_func_cfg_input req = {0};
+ int rc;
+
+ if (bp->hwrm_spec_code < 0x10601)
+ return 0;
+
+ if (BNXT_VF(bp))
+ return 0;
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
+ req.fid = cpu_to_le16(0xffff);
+ req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS);
+ req.num_tx_rings = cpu_to_le16(*tx_rings);
+ rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ if (rc)
+ return rc;
+
+ mutex_lock(&bp->hwrm_cmd_lock);
+ rc = __bnxt_hwrm_get_tx_rings(bp, 0xffff, tx_rings);
+ mutex_unlock(&bp->hwrm_cmd_lock);
+ return rc;
+}
+
static void bnxt_hwrm_set_coal_params(struct bnxt *bp, u32 max_bufs,
u32 buf_tmrs, u16 flags,
struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req)
@@ -4156,7 +4478,7 @@ func_qcfg_exit:
return rc;
}
-int bnxt_hwrm_func_qcaps(struct bnxt *bp)
+static int bnxt_hwrm_func_qcaps(struct bnxt *bp)
{
int rc = 0;
struct hwrm_func_qcaps_input req = {0};
@@ -4170,6 +4492,11 @@ int bnxt_hwrm_func_qcaps(struct bnxt *bp)
if (rc)
goto hwrm_func_qcaps_exit;
+ if (resp->flags & cpu_to_le32(FUNC_QCAPS_RESP_FLAGS_ROCE_V1_SUPPORTED))
+ bp->flags |= BNXT_FLAG_ROCEV1_CAP;
+ if (resp->flags & cpu_to_le32(FUNC_QCAPS_RESP_FLAGS_ROCE_V2_SUPPORTED))
+ bp->flags |= BNXT_FLAG_ROCEV2_CAP;
+
bp->tx_push_thresh = 0;
if (resp->flags &
cpu_to_le32(FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED))
@@ -4225,7 +4552,7 @@ int bnxt_hwrm_func_qcaps(struct bnxt *bp)
/* overwrite netdev dev_adr with admin VF MAC */
memcpy(bp->dev->dev_addr, vf->mac_addr, ETH_ALEN);
} else {
- random_ether_addr(bp->dev->dev_addr);
+ eth_hw_addr_random(bp->dev);
rc = bnxt_approve_mac(bp, bp->dev->dev_addr);
}
return rc;
@@ -4266,12 +4593,16 @@ static int bnxt_hwrm_queue_qportcfg(struct bnxt *bp)
goto qportcfg_exit;
}
bp->max_tc = resp->max_configurable_queues;
+ bp->max_lltc = resp->max_configurable_lossless_queues;
if (bp->max_tc > BNXT_MAX_QUEUE)
bp->max_tc = BNXT_MAX_QUEUE;
if (resp->queue_cfg_info & QUEUE_QPORTCFG_RESP_QUEUE_CFG_INFO_ASYM_CFG)
bp->max_tc = 1;
+ if (bp->max_lltc > bp->max_tc)
+ bp->max_lltc = bp->max_tc;
+
qptr = &resp->queue_id0;
for (i = 0; i < bp->max_tc; i++) {
bp->q_info[i].queue_id = *qptr++;
@@ -4435,8 +4766,12 @@ static void bnxt_hwrm_resource_free(struct bnxt *bp, bool close_path,
static int bnxt_setup_vnic(struct bnxt *bp, u16 vnic_id)
{
+ struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
int rc;
+ if (vnic->flags & BNXT_VNIC_RFS_NEW_RSS_FLAG)
+ goto skip_rss_ctx;
+
/* allocate context for vnic */
rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id, 0);
if (rc) {
@@ -4456,6 +4791,7 @@ static int bnxt_setup_vnic(struct bnxt *bp, u16 vnic_id)
bp->rsscos_nr_ctxs++;
}
+skip_rss_ctx:
/* configure default vnic, ring grp */
rc = bnxt_hwrm_vnic_cfg(bp, vnic_id);
if (rc) {
@@ -4490,13 +4826,17 @@ static int bnxt_alloc_rfs_vnics(struct bnxt *bp)
int i, rc = 0;
for (i = 0; i < bp->rx_nr_rings; i++) {
+ struct bnxt_vnic_info *vnic;
u16 vnic_id = i + 1;
u16 ring_id = i;
if (vnic_id >= bp->nr_vnics)
break;
- bp->vnic_info[vnic_id].flags |= BNXT_VNIC_RFS_FLAG;
+ vnic = &bp->vnic_info[vnic_id];
+ vnic->flags |= BNXT_VNIC_RFS_FLAG;
+ if (bp->flags & BNXT_FLAG_NEW_RSS_CAP)
+ vnic->flags |= BNXT_VNIC_RFS_NEW_RSS_FLAG;
rc = bnxt_hwrm_vnic_alloc(bp, vnic_id, ring_id, 1);
if (rc) {
netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n",
@@ -4670,40 +5010,13 @@ static int bnxt_init_nic(struct bnxt *bp, bool irq_re_init)
return bnxt_init_chip(bp, irq_re_init);
}
-static void bnxt_disable_int(struct bnxt *bp)
-{
- int i;
-
- if (!bp->bnapi)
- return;
-
- for (i = 0; i < bp->cp_nr_rings; i++) {
- struct bnxt_napi *bnapi = bp->bnapi[i];
- struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
-
- BNXT_CP_DB(cpr->cp_doorbell, cpr->cp_raw_cons);
- }
-}
-
-static void bnxt_enable_int(struct bnxt *bp)
-{
- int i;
-
- atomic_set(&bp->intr_sem, 0);
- for (i = 0; i < bp->cp_nr_rings; i++) {
- struct bnxt_napi *bnapi = bp->bnapi[i];
- struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
-
- BNXT_CP_DB_REARM(cpr->cp_doorbell, cpr->cp_raw_cons);
- }
-}
-
static int bnxt_set_real_num_queues(struct bnxt *bp)
{
int rc;
struct net_device *dev = bp->dev;
- rc = netif_set_real_num_tx_queues(dev, bp->tx_nr_rings);
+ rc = netif_set_real_num_tx_queues(dev, bp->tx_nr_rings -
+ bp->tx_nr_rings_xdp);
if (rc)
return rc;
@@ -4743,16 +5056,147 @@ static int bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max,
return 0;
}
-static int bnxt_setup_msix(struct bnxt *bp)
+static void bnxt_setup_msix(struct bnxt *bp)
{
- struct msix_entry *msix_ent;
+ const int len = sizeof(bp->irq_tbl[0].name);
struct net_device *dev = bp->dev;
- int i, total_vecs, rc = 0, min = 1;
+ int tcs, i;
+
+ tcs = netdev_get_num_tc(dev);
+ if (tcs > 1) {
+ int i, off, count;
+
+ for (i = 0; i < tcs; i++) {
+ count = bp->tx_nr_rings_per_tc;
+ off = i * count;
+ netdev_set_tc_queue(dev, i, count, off);
+ }
+ }
+
+ for (i = 0; i < bp->cp_nr_rings; i++) {
+ char *attr;
+
+ if (bp->flags & BNXT_FLAG_SHARED_RINGS)
+ attr = "TxRx";
+ else if (i < bp->rx_nr_rings)
+ attr = "rx";
+ else
+ attr = "tx";
+
+ snprintf(bp->irq_tbl[i].name, len, "%s-%s-%d", dev->name, attr,
+ i);
+ bp->irq_tbl[i].handler = bnxt_msix;
+ }
+}
+
+static void bnxt_setup_inta(struct bnxt *bp)
+{
const int len = sizeof(bp->irq_tbl[0].name);
- bp->flags &= ~BNXT_FLAG_USING_MSIX;
- total_vecs = bp->cp_nr_rings;
+ if (netdev_get_num_tc(bp->dev))
+ netdev_reset_tc(bp->dev);
+ snprintf(bp->irq_tbl[0].name, len, "%s-%s-%d", bp->dev->name, "TxRx",
+ 0);
+ bp->irq_tbl[0].handler = bnxt_inta;
+}
+
+static int bnxt_setup_int_mode(struct bnxt *bp)
+{
+ int rc;
+
+ if (bp->flags & BNXT_FLAG_USING_MSIX)
+ bnxt_setup_msix(bp);
+ else
+ bnxt_setup_inta(bp);
+
+ rc = bnxt_set_real_num_queues(bp);
+ return rc;
+}
+
+#ifdef CONFIG_RFS_ACCEL
+static unsigned int bnxt_get_max_func_rss_ctxs(struct bnxt *bp)
+{
+#if defined(CONFIG_BNXT_SRIOV)
+ if (BNXT_VF(bp))
+ return bp->vf.max_rsscos_ctxs;
+#endif
+ return bp->pf.max_rsscos_ctxs;
+}
+
+static unsigned int bnxt_get_max_func_vnics(struct bnxt *bp)
+{
+#if defined(CONFIG_BNXT_SRIOV)
+ if (BNXT_VF(bp))
+ return bp->vf.max_vnics;
+#endif
+ return bp->pf.max_vnics;
+}
+#endif
+
+unsigned int bnxt_get_max_func_stat_ctxs(struct bnxt *bp)
+{
+#if defined(CONFIG_BNXT_SRIOV)
+ if (BNXT_VF(bp))
+ return bp->vf.max_stat_ctxs;
+#endif
+ return bp->pf.max_stat_ctxs;
+}
+
+void bnxt_set_max_func_stat_ctxs(struct bnxt *bp, unsigned int max)
+{
+#if defined(CONFIG_BNXT_SRIOV)
+ if (BNXT_VF(bp))
+ bp->vf.max_stat_ctxs = max;
+ else
+#endif
+ bp->pf.max_stat_ctxs = max;
+}
+
+unsigned int bnxt_get_max_func_cp_rings(struct bnxt *bp)
+{
+#if defined(CONFIG_BNXT_SRIOV)
+ if (BNXT_VF(bp))
+ return bp->vf.max_cp_rings;
+#endif
+ return bp->pf.max_cp_rings;
+}
+
+void bnxt_set_max_func_cp_rings(struct bnxt *bp, unsigned int max)
+{
+#if defined(CONFIG_BNXT_SRIOV)
+ if (BNXT_VF(bp))
+ bp->vf.max_cp_rings = max;
+ else
+#endif
+ bp->pf.max_cp_rings = max;
+}
+
+static unsigned int bnxt_get_max_func_irqs(struct bnxt *bp)
+{
+#if defined(CONFIG_BNXT_SRIOV)
+ if (BNXT_VF(bp))
+ return bp->vf.max_irqs;
+#endif
+ return bp->pf.max_irqs;
+}
+
+void bnxt_set_max_func_irqs(struct bnxt *bp, unsigned int max_irqs)
+{
+#if defined(CONFIG_BNXT_SRIOV)
+ if (BNXT_VF(bp))
+ bp->vf.max_irqs = max_irqs;
+ else
+#endif
+ bp->pf.max_irqs = max_irqs;
+}
+
+static int bnxt_init_msix(struct bnxt *bp)
+{
+ int i, total_vecs, rc = 0, min = 1;
+ struct msix_entry *msix_ent;
+
+ total_vecs = bnxt_get_max_func_irqs(bp);
msix_ent = kcalloc(total_vecs, sizeof(struct msix_entry), GFP_KERNEL);
if (!msix_ent)
return -ENOMEM;
@@ -4773,8 +5217,10 @@ static int bnxt_setup_msix(struct bnxt *bp)
bp->irq_tbl = kcalloc(total_vecs, sizeof(struct bnxt_irq), GFP_KERNEL);
if (bp->irq_tbl) {
- int tcs;
+ for (i = 0; i < total_vecs; i++)
+ bp->irq_tbl[i].vector = msix_ent[i].vector;
+ bp->total_irqs = total_vecs;
/* Trim rings based upon num of vectors allocated */
rc = bnxt_trim_rings(bp, &bp->rx_nr_rings, &bp->tx_nr_rings,
total_vecs, min == 1);
@@ -4782,43 +5228,10 @@ static int bnxt_setup_msix(struct bnxt *bp)
goto msix_setup_exit;
bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
- tcs = netdev_get_num_tc(dev);
- if (tcs > 1) {
- bp->tx_nr_rings_per_tc = bp->tx_nr_rings / tcs;
- if (bp->tx_nr_rings_per_tc == 0) {
- netdev_reset_tc(dev);
- bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
- } else {
- int i, off, count;
-
- bp->tx_nr_rings = bp->tx_nr_rings_per_tc * tcs;
- for (i = 0; i < tcs; i++) {
- count = bp->tx_nr_rings_per_tc;
- off = i * count;
- netdev_set_tc_queue(dev, i, count, off);
- }
- }
- }
- bp->cp_nr_rings = total_vecs;
-
- for (i = 0; i < bp->cp_nr_rings; i++) {
- char *attr;
-
- bp->irq_tbl[i].vector = msix_ent[i].vector;
- if (bp->flags & BNXT_FLAG_SHARED_RINGS)
- attr = "TxRx";
- else if (i < bp->rx_nr_rings)
- attr = "rx";
- else
- attr = "tx";
+ bp->cp_nr_rings = (min == 1) ?
+ max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) :
+ bp->tx_nr_rings + bp->rx_nr_rings;
- snprintf(bp->irq_tbl[i].name, len,
- "%s-%s-%d", dev->name, attr, i);
- bp->irq_tbl[i].handler = bnxt_msix;
- }
- rc = bnxt_set_real_num_queues(bp);
- if (rc)
- goto msix_setup_exit;
} else {
rc = -ENOMEM;
goto msix_setup_exit;
@@ -4828,52 +5241,54 @@ static int bnxt_setup_msix(struct bnxt *bp)
return 0;
msix_setup_exit:
- netdev_err(bp->dev, "bnxt_setup_msix err: %x\n", rc);
+ netdev_err(bp->dev, "bnxt_init_msix err: %x\n", rc);
+ kfree(bp->irq_tbl);
+ bp->irq_tbl = NULL;
pci_disable_msix(bp->pdev);
kfree(msix_ent);
return rc;
}
-static int bnxt_setup_inta(struct bnxt *bp)
+static int bnxt_init_inta(struct bnxt *bp)
{
- int rc;
- const int len = sizeof(bp->irq_tbl[0].name);
-
- if (netdev_get_num_tc(bp->dev))
- netdev_reset_tc(bp->dev);
-
bp->irq_tbl = kcalloc(1, sizeof(struct bnxt_irq), GFP_KERNEL);
- if (!bp->irq_tbl) {
- rc = -ENOMEM;
- return rc;
- }
+ if (!bp->irq_tbl)
+ return -ENOMEM;
+
+ bp->total_irqs = 1;
bp->rx_nr_rings = 1;
bp->tx_nr_rings = 1;
bp->cp_nr_rings = 1;
bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
bp->flags |= BNXT_FLAG_SHARED_RINGS;
bp->irq_tbl[0].vector = bp->pdev->irq;
- snprintf(bp->irq_tbl[0].name, len,
- "%s-%s-%d", bp->dev->name, "TxRx", 0);
- bp->irq_tbl[0].handler = bnxt_inta;
- rc = bnxt_set_real_num_queues(bp);
- return rc;
+ return 0;
}
-static int bnxt_setup_int_mode(struct bnxt *bp)
+static int bnxt_init_int_mode(struct bnxt *bp)
{
int rc = 0;
if (bp->flags & BNXT_FLAG_MSIX_CAP)
- rc = bnxt_setup_msix(bp);
+ rc = bnxt_init_msix(bp);
if (!(bp->flags & BNXT_FLAG_USING_MSIX) && BNXT_PF(bp)) {
/* fallback to INTA */
- rc = bnxt_setup_inta(bp);
+ rc = bnxt_init_inta(bp);
}
return rc;
}
+static void bnxt_clear_int_mode(struct bnxt *bp)
+{
+ if (bp->flags & BNXT_FLAG_USING_MSIX)
+ pci_disable_msix(bp->pdev);
+
+ kfree(bp->irq_tbl);
+ bp->irq_tbl = NULL;
+ bp->flags &= ~BNXT_FLAG_USING_MSIX;
+}
+
static void bnxt_free_irq(struct bnxt *bp)
{
struct bnxt_irq *irq;
@@ -4892,10 +5307,6 @@ static void bnxt_free_irq(struct bnxt *bp)
free_irq(irq->vector, bp->bnapi[i]);
irq->requested = 0;
}
- if (bp->flags & BNXT_FLAG_USING_MSIX)
- pci_disable_msix(bp->pdev);
- kfree(bp->irq_tbl);
- bp->irq_tbl = NULL;
}
static int bnxt_request_irq(struct bnxt *bp)
@@ -4967,7 +5378,6 @@ static void bnxt_init_napi(struct bnxt *bp)
bnapi = bp->bnapi[cp_nr_rings];
netif_napi_add(bp->dev, &bnapi->napi,
bnxt_poll_nitroa0, 64);
- napi_hash_add(&bnapi->napi);
}
} else {
bnapi = bp->bnapi[0];
@@ -4982,10 +5392,8 @@ static void bnxt_disable_napi(struct bnxt *bp)
if (!bp->bnapi)
return;
- for (i = 0; i < bp->cp_nr_rings; i++) {
+ for (i = 0; i < bp->cp_nr_rings; i++)
napi_disable(&bp->bnapi[i]->napi);
- bnxt_disable_poll(bp->bnapi[i]);
- }
}
static void bnxt_enable_napi(struct bnxt *bp)
@@ -4994,12 +5402,11 @@ static void bnxt_enable_napi(struct bnxt *bp)
for (i = 0; i < bp->cp_nr_rings; i++) {
bp->bnapi[i]->in_reset = false;
- bnxt_enable_poll(bp->bnapi[i]);
napi_enable(&bp->bnapi[i]->napi);
}
}
-static void bnxt_tx_disable(struct bnxt *bp)
+void bnxt_tx_disable(struct bnxt *bp)
{
int i;
struct bnxt_tx_ring_info *txr;
@@ -5017,7 +5424,7 @@ static void bnxt_tx_disable(struct bnxt *bp)
netif_carrier_off(bp->dev);
}
-static void bnxt_tx_enable(struct bnxt *bp)
+void bnxt_tx_enable(struct bnxt *bp)
{
int i;
struct bnxt_tx_ring_info *txr;
@@ -5038,7 +5445,7 @@ static void bnxt_report_link(struct bnxt *bp)
if (bp->link_info.link_up) {
const char *duplex;
const char *flow_ctrl;
- u16 speed;
+ u16 speed, fec;
netif_carrier_on(bp->dev);
if (bp->link_info.duplex == BNXT_LINK_DUPLEX_FULL)
@@ -5060,6 +5467,12 @@ static void bnxt_report_link(struct bnxt *bp)
netdev_info(bp->dev, "EEE is %s\n",
bp->eee.eee_active ? "active" :
"not active");
+ fec = bp->link_info.fec_cfg;
+ if (!(fec & PORT_PHY_QCFG_RESP_FEC_CFG_FEC_NONE_SUPPORTED))
+ netdev_info(bp->dev, "FEC autoneg %s encodings: %s\n",
+ (fec & BNXT_FEC_AUTONEG) ? "on" : "off",
+ (fec & BNXT_FEC_ENC_BASE_R) ? "BaseR" :
+ (fec & BNXT_FEC_ENC_RS) ? "RS" : "None");
} else {
netif_carrier_off(bp->dev);
netdev_err(bp->dev, "NIC Link is Down\n");
@@ -5109,6 +5522,7 @@ static int bnxt_update_link(struct bnxt *bp, bool chng_link_state)
struct hwrm_port_phy_qcfg_input req = {0};
struct hwrm_port_phy_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
u8 link_up = link_info->link_up;
+ u16 diff;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_QCFG, -1, -1);
@@ -5183,6 +5597,11 @@ static int bnxt_update_link(struct bnxt *bp, bool chng_link_state)
}
}
}
+
+ link_info->fec_cfg = PORT_PHY_QCFG_RESP_FEC_CFG_FEC_NONE_SUPPORTED;
+ if (bp->hwrm_spec_code >= 0x10504)
+ link_info->fec_cfg = le16_to_cpu(resp->fec_cfg);
+
/* TODO: need to add more logic to report VF link */
if (chng_link_state) {
if (link_info->phy_link_status == BNXT_LINK_LINK)
@@ -5196,6 +5615,18 @@ static int bnxt_update_link(struct bnxt *bp, bool chng_link_state)
link_info->link_up = 0;
}
mutex_unlock(&bp->hwrm_cmd_lock);
+
+ diff = link_info->support_auto_speeds ^ link_info->advertising;
+ if ((link_info->support_auto_speeds | diff) !=
+ link_info->support_auto_speeds) {
+ /* An advertised speed is no longer supported, so we need to
+ * update the advertisement settings. Caller holds RTNL
+ * so we can modify link settings.
+ */
+ link_info->advertising = link_info->support_auto_speeds;
+ if (link_info->autoneg & BNXT_AUTONEG_SPEED)
+ bnxt_hwrm_set_link_setting(bp, true, false);
+ }
return 0;
}
@@ -5259,7 +5690,7 @@ static void bnxt_hwrm_set_link_common(struct bnxt *bp,
{
u8 autoneg = bp->link_info.autoneg;
u16 fw_link_speed = bp->link_info.req_link_speed;
- u32 advertising = bp->link_info.advertising;
+ u16 advertising = bp->link_info.advertising;
if (autoneg & BNXT_AUTONEG_SPEED) {
req->auto_mode |=
@@ -5360,10 +5791,49 @@ static int bnxt_hwrm_shutdown_link(struct bnxt *bp)
return 0;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_CFG, -1, -1);
- req.flags = cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE_LINK_DOWN);
+ req.flags = cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE_LINK_DWN);
return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
}
+static int bnxt_hwrm_port_led_qcaps(struct bnxt *bp)
+{
+ struct hwrm_port_led_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
+ struct hwrm_port_led_qcaps_input req = {0};
+ struct bnxt_pf_info *pf = &bp->pf;
+ int rc;
+
+ if (BNXT_VF(bp) || bp->hwrm_spec_code < 0x10601)
+ return 0;
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_LED_QCAPS, -1, -1);
+ req.port_id = cpu_to_le16(pf->port_id);
+ mutex_lock(&bp->hwrm_cmd_lock);
+ rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ if (rc) {
+ mutex_unlock(&bp->hwrm_cmd_lock);
+ return rc;
+ }
+ if (resp->num_leds > 0 && resp->num_leds < BNXT_MAX_LED) {
+ int i;
+
+ bp->num_leds = resp->num_leds;
+ memcpy(bp->leds, &resp->led0_id, sizeof(bp->leds[0]) *
+ bp->num_leds);
+ for (i = 0; i < bp->num_leds; i++) {
+ struct bnxt_led_info *led = &bp->leds[i];
+ __le16 caps = led->led_state_caps;
+
+ if (!led->led_group_id ||
+ !BNXT_LED_ALT_BLINK_CAP(caps)) {
+ bp->num_leds = 0;
+ break;
+ }
+ }
+ }
+ mutex_unlock(&bp->hwrm_cmd_lock);
+ return 0;
+}
+
static bool bnxt_eee_config_ok(struct bnxt *bp)
{
struct ethtool_eee *eee = &bp->eee;
@@ -5402,6 +5872,9 @@ static int bnxt_update_phy_setting(struct bnxt *bp)
rc);
return rc;
}
+ if (!BNXT_SINGLE_PF(bp))
+ return 0;
+
if ((link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) &&
(link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH) !=
link_info->req_flow_ctrl)
@@ -5423,6 +5896,12 @@ static int bnxt_update_phy_setting(struct bnxt *bp)
update_link = true;
}
+ /* The last close may have shutdown the link, so need to call
+ * PHY_CFG to bring it back up.
+ */
+ if (!netif_carrier_ok(bp->dev))
+ update_link = true;
+
if (!bnxt_eee_config_ok(bp))
update_eee = true;
@@ -5543,38 +6022,10 @@ int bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
static int bnxt_open(struct net_device *dev)
{
struct bnxt *bp = netdev_priv(dev);
- int rc = 0;
- if (!test_bit(BNXT_STATE_FN_RST_DONE, &bp->state)) {
- rc = bnxt_hwrm_func_reset(bp);
- if (rc) {
- netdev_err(bp->dev, "hwrm chip reset failure rc: %x\n",
- rc);
- rc = -EBUSY;
- return rc;
- }
- /* Do func_reset during the 1st PF open only to prevent killing
- * the VFs when the PF is brought down and up.
- */
- if (BNXT_PF(bp))
- set_bit(BNXT_STATE_FN_RST_DONE, &bp->state);
- }
return __bnxt_open_nic(bp, true, true);
}
-static void bnxt_disable_int_sync(struct bnxt *bp)
-{
- int i;
-
- atomic_inc(&bp->intr_sem);
- if (!netif_running(bp->dev))
- return;
-
- bnxt_disable_int(bp);
- for (i = 0; i < bp->cp_nr_rings; i++)
- synchronize_irq(bp->irq_tbl[i].vector);
-}
-
int bnxt_close_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
{
int rc = 0;
@@ -5596,13 +6047,12 @@ int bnxt_close_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
while (test_bit(BNXT_STATE_IN_SP_TASK, &bp->state))
msleep(20);
- /* Flush rings before disabling interrupts */
+ /* Flush rings and and disable interrupts */
bnxt_shutdown_nic(bp, irq_re_init);
/* TODO CHIMP_FW: Link/PHY related cleanup if (link_re_init) */
bnxt_disable_napi(bp);
- bnxt_disable_int_sync(bp);
del_timer_sync(&bp->timer);
bnxt_free_skbs(bp);
@@ -5649,16 +6099,14 @@ static int bnxt_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
return -EOPNOTSUPP;
}
-static struct rtnl_link_stats64 *
+static void
bnxt_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
{
u32 i;
struct bnxt *bp = netdev_priv(dev);
- memset(stats, 0, sizeof(struct rtnl_link_stats64));
-
if (!bp->bnapi)
- return stats;
+ return;
/* TODO check if we need to synchronize with bnxt_close path */
for (i = 0; i < bp->cp_nr_rings; i++) {
@@ -5705,8 +6153,6 @@ bnxt_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
stats->tx_fifo_errors = le64_to_cpu(tx->tx_fifo_underruns);
stats->tx_errors = le64_to_cpu(tx->tx_err);
}
-
- return stats;
}
static bool bnxt_mc_list_updated(struct bnxt *bp, u32 *rx_mask)
@@ -5859,20 +6305,36 @@ skip_uc:
return rc;
}
+/* If the chip and firmware supports RFS */
+static bool bnxt_rfs_supported(struct bnxt *bp)
+{
+ if (BNXT_PF(bp) && !BNXT_CHIP_TYPE_NITRO_A0(bp))
+ return true;
+ if (bp->flags & BNXT_FLAG_NEW_RSS_CAP)
+ return true;
+ return false;
+}
+
+/* If runtime conditions support RFS */
static bool bnxt_rfs_capable(struct bnxt *bp)
{
#ifdef CONFIG_RFS_ACCEL
- struct bnxt_pf_info *pf = &bp->pf;
- int vnics;
+ int vnics, max_vnics, max_rss_ctxs;
- if (BNXT_VF(bp) || !(bp->flags & BNXT_FLAG_MSIX_CAP))
+ if (!(bp->flags & BNXT_FLAG_MSIX_CAP))
return false;
vnics = 1 + bp->rx_nr_rings;
- if (vnics > pf->max_rsscos_ctxs || vnics > pf->max_vnics) {
+ max_vnics = bnxt_get_max_func_vnics(bp);
+ max_rss_ctxs = bnxt_get_max_func_rss_ctxs(bp);
+
+ /* RSS contexts not a limiting factor */
+ if (bp->flags & BNXT_FLAG_NEW_RSS_CAP)
+ max_rss_ctxs = max_vnics;
+ if (vnics > max_vnics || vnics > max_rss_ctxs) {
netdev_warn(bp->dev,
"Not enough resources to support NTUPLE filters, enough resources for up to %d rx rings\n",
- min(pf->max_rsscos_ctxs - 1, pf->max_vnics - 1));
+ min(max_rss_ctxs - 1, max_vnics - 1));
return false;
}
@@ -5928,6 +6390,9 @@ static int bnxt_set_features(struct net_device *dev, netdev_features_t features)
if (features & NETIF_F_LRO)
flags |= BNXT_FLAG_LRO;
+ if (bp->flags & BNXT_FLAG_NO_AGG_RINGS)
+ flags &= ~BNXT_FLAG_TPA;
+
if (features & NETIF_F_HW_VLAN_CTAG_RX)
flags |= BNXT_FLAG_STRIP_VLAN;
@@ -6079,29 +6544,37 @@ bnxt_restart_timer:
mod_timer(&bp->timer, jiffies + bp->current_interval);
}
-/* Only called from bnxt_sp_task() */
-static void bnxt_reset(struct bnxt *bp, bool silent)
+static void bnxt_rtnl_lock_sp(struct bnxt *bp)
{
- /* bnxt_reset_task() calls bnxt_close_nic() which waits
- * for BNXT_STATE_IN_SP_TASK to clear.
- * If there is a parallel dev_close(), bnxt_close() may be holding
+ /* We are called from bnxt_sp_task which has BNXT_STATE_IN_SP_TASK
+ * set. If the device is being closed, bnxt_close() may be holding
* rtnl() and waiting for BNXT_STATE_IN_SP_TASK to clear. So we
* must clear BNXT_STATE_IN_SP_TASK before holding rtnl().
*/
clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
rtnl_lock();
- if (test_bit(BNXT_STATE_OPEN, &bp->state))
- bnxt_reset_task(bp, silent);
+}
+
+static void bnxt_rtnl_unlock_sp(struct bnxt *bp)
+{
set_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
rtnl_unlock();
}
+/* Only called from bnxt_sp_task() */
+static void bnxt_reset(struct bnxt *bp, bool silent)
+{
+ bnxt_rtnl_lock_sp(bp);
+ if (test_bit(BNXT_STATE_OPEN, &bp->state))
+ bnxt_reset_task(bp, silent);
+ bnxt_rtnl_unlock_sp(bp);
+}
+
static void bnxt_cfg_ntp_filters(struct bnxt *);
static void bnxt_sp_task(struct work_struct *work)
{
struct bnxt *bp = container_of(work, struct bnxt, sp_task);
- int rc;
set_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
smp_mb__after_atomic();
@@ -6115,12 +6588,6 @@ static void bnxt_sp_task(struct work_struct *work)
if (test_and_clear_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event))
bnxt_cfg_ntp_filters(bp);
- if (test_and_clear_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event)) {
- rc = bnxt_update_link(bp, true);
- if (rc)
- netdev_err(bp->dev, "SP task can't update link (rc: %x)\n",
- rc);
- }
if (test_and_clear_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, &bp->sp_event))
bnxt_hwrm_exec_fwd_req(bp);
if (test_and_clear_bit(BNXT_VXLAN_ADD_PORT_SP_EVENT, &bp->sp_event)) {
@@ -6141,22 +6608,99 @@ static void bnxt_sp_task(struct work_struct *work)
bnxt_hwrm_tunnel_dst_port_free(
bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE);
}
+ if (test_and_clear_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event))
+ bnxt_hwrm_port_qstats(bp);
+
+ /* These functions below will clear BNXT_STATE_IN_SP_TASK. They
+ * must be the last functions to be called before exiting.
+ */
+ if (test_and_clear_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event)) {
+ int rc = 0;
+
+ if (test_and_clear_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT,
+ &bp->sp_event))
+ bnxt_hwrm_phy_qcaps(bp);
+
+ bnxt_rtnl_lock_sp(bp);
+ if (test_bit(BNXT_STATE_OPEN, &bp->state))
+ rc = bnxt_update_link(bp, true);
+ bnxt_rtnl_unlock_sp(bp);
+ if (rc)
+ netdev_err(bp->dev, "SP task can't update link (rc: %x)\n",
+ rc);
+ }
+ if (test_and_clear_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event)) {
+ bnxt_rtnl_lock_sp(bp);
+ if (test_bit(BNXT_STATE_OPEN, &bp->state))
+ bnxt_get_port_module_status(bp);
+ bnxt_rtnl_unlock_sp(bp);
+ }
if (test_and_clear_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event))
bnxt_reset(bp, false);
if (test_and_clear_bit(BNXT_RESET_TASK_SILENT_SP_EVENT, &bp->sp_event))
bnxt_reset(bp, true);
- if (test_and_clear_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event))
- bnxt_get_port_module_status(bp);
-
- if (test_and_clear_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event))
- bnxt_hwrm_port_qstats(bp);
-
smp_mb__before_atomic();
clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
}
+/* Under rtnl_lock */
+int bnxt_reserve_rings(struct bnxt *bp, int tx, int rx, int tcs, int tx_xdp)
+{
+ int max_rx, max_tx, tx_sets = 1;
+ int tx_rings_needed;
+ bool sh = true;
+ int rc;
+
+ if (!(bp->flags & BNXT_FLAG_SHARED_RINGS))
+ sh = false;
+
+ if (tcs)
+ tx_sets = tcs;
+
+ rc = bnxt_get_max_rings(bp, &max_rx, &max_tx, sh);
+ if (rc)
+ return rc;
+
+ if (max_rx < rx)
+ return -ENOMEM;
+
+ tx_rings_needed = tx * tx_sets + tx_xdp;
+ if (max_tx < tx_rings_needed)
+ return -ENOMEM;
+
+ if (bnxt_hwrm_reserve_tx_rings(bp, &tx_rings_needed) ||
+ tx_rings_needed < (tx * tx_sets + tx_xdp))
+ return -ENOMEM;
+ return 0;
+}
+
+static void bnxt_unmap_bars(struct bnxt *bp, struct pci_dev *pdev)
+{
+ if (bp->bar2) {
+ pci_iounmap(pdev, bp->bar2);
+ bp->bar2 = NULL;
+ }
+
+ if (bp->bar1) {
+ pci_iounmap(pdev, bp->bar1);
+ bp->bar1 = NULL;
+ }
+
+ if (bp->bar0) {
+ pci_iounmap(pdev, bp->bar0);
+ bp->bar0 = NULL;
+ }
+}
+
+static void bnxt_cleanup_pci(struct bnxt *bp)
+{
+ bnxt_unmap_bars(bp, bp->pdev);
+ pci_release_regions(bp->pdev);
+ pci_disable_device(bp->pdev);
+}
+
static int bnxt_init_board(struct pci_dev *pdev, struct net_device *dev)
{
int rc;
@@ -6244,25 +6788,10 @@ static int bnxt_init_board(struct pci_dev *pdev, struct net_device *dev)
bp->current_interval = BNXT_TIMER_INTERVAL;
clear_bit(BNXT_STATE_OPEN, &bp->state);
-
return 0;
init_err_release:
- if (bp->bar2) {
- pci_iounmap(pdev, bp->bar2);
- bp->bar2 = NULL;
- }
-
- if (bp->bar1) {
- pci_iounmap(pdev, bp->bar1);
- bp->bar1 = NULL;
- }
-
- if (bp->bar0) {
- pci_iounmap(pdev, bp->bar0);
- bp->bar0 = NULL;
- }
-
+ bnxt_unmap_bars(bp, pdev);
pci_release_regions(pdev);
init_err_disable:
@@ -6303,9 +6832,6 @@ static int bnxt_change_mtu(struct net_device *dev, int new_mtu)
{
struct bnxt *bp = netdev_priv(dev);
- if (new_mtu < 60 || new_mtu > 9500)
- return -EINVAL;
-
if (netif_running(dev))
bnxt_close_nic(bp, false, false);
@@ -6318,20 +6844,14 @@ static int bnxt_change_mtu(struct net_device *dev, int new_mtu)
return 0;
}
-static int bnxt_setup_tc(struct net_device *dev, u32 handle, __be16 proto,
- struct tc_to_netdev *ntc)
+int bnxt_setup_mq_tc(struct net_device *dev, u8 tc)
{
struct bnxt *bp = netdev_priv(dev);
bool sh = false;
- u8 tc;
-
- if (ntc->type != TC_SETUP_MQPRIO)
- return -EINVAL;
-
- tc = ntc->tc;
+ int rc;
if (tc > bp->max_tc) {
- netdev_err(dev, "too many traffic classes requested: %d Max supported is %d\n",
+ netdev_err(dev, "Too many traffic classes requested: %d. Max supported is %d.\n",
tc, bp->max_tc);
return -EINVAL;
}
@@ -6342,13 +6862,10 @@ static int bnxt_setup_tc(struct net_device *dev, u32 handle, __be16 proto,
if (bp->flags & BNXT_FLAG_SHARED_RINGS)
sh = true;
- if (tc) {
- int max_rx_rings, max_tx_rings, rc;
-
- rc = bnxt_get_max_rings(bp, &max_rx_rings, &max_tx_rings, sh);
- if (rc || bp->tx_nr_rings_per_tc * tc > max_tx_rings)
- return -ENOMEM;
- }
+ rc = bnxt_reserve_rings(bp, bp->tx_nr_rings_per_tc, bp->rx_nr_rings,
+ tc, bp->tx_nr_rings_xdp);
+ if (rc)
+ return rc;
/* Needs to close the device and do hw resource re-allocations */
if (netif_running(bp->dev))
@@ -6371,6 +6888,15 @@ static int bnxt_setup_tc(struct net_device *dev, u32 handle, __be16 proto,
return 0;
}
+static int bnxt_setup_tc(struct net_device *dev, u32 handle, __be16 proto,
+ struct tc_to_netdev *ntc)
+{
+ if (ntc->type != TC_SETUP_MQPRIO)
+ return -EINVAL;
+
+ return bnxt_setup_mq_tc(dev, ntc->tc);
+}
+
#ifdef CONFIG_RFS_ACCEL
static bool bnxt_fltr_match(struct bnxt_ntuple_filter *f1,
struct bnxt_ntuple_filter *f2)
@@ -6383,6 +6909,7 @@ static bool bnxt_fltr_match(struct bnxt_ntuple_filter *f1,
keys1->ports.ports == keys2->ports.ports &&
keys1->basic.ip_proto == keys2->basic.ip_proto &&
keys1->basic.n_proto == keys2->basic.n_proto &&
+ keys1->control.flags == keys2->control.flags &&
ether_addr_equal(f1->src_mac_addr, f2->src_mac_addr) &&
ether_addr_equal(f1->dst_mac_addr, f2->dst_mac_addr))
return true;
@@ -6400,9 +6927,6 @@ static int bnxt_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
int rc = 0, idx, bit_id, l2_idx = 0;
struct hlist_head *head;
- if (skb->encapsulation)
- return -EPROTONOSUPPORT;
-
if (!ether_addr_equal(dev->dev_addr, eth->h_dest)) {
struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
int off = 0, j;
@@ -6429,12 +6953,23 @@ static int bnxt_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
goto err_free;
}
- if ((fkeys->basic.n_proto != htons(ETH_P_IP)) ||
+ if ((fkeys->basic.n_proto != htons(ETH_P_IP) &&
+ fkeys->basic.n_proto != htons(ETH_P_IPV6)) ||
((fkeys->basic.ip_proto != IPPROTO_TCP) &&
(fkeys->basic.ip_proto != IPPROTO_UDP))) {
rc = -EPROTONOSUPPORT;
goto err_free;
}
+ if (fkeys->basic.n_proto == htons(ETH_P_IPV6) &&
+ bp->hwrm_spec_code < 0x10601) {
+ rc = -EPROTONOSUPPORT;
+ goto err_free;
+ }
+ if ((fkeys->control.flags & FLOW_DIS_ENCAPSULATION) &&
+ bp->hwrm_spec_code < 0x10601) {
+ rc = -EPROTONOSUPPORT;
+ goto err_free;
+ }
memcpy(new_fltr->dst_mac_addr, eth->h_dest, ETH_ALEN);
memcpy(new_fltr->src_mac_addr, eth->h_source, ETH_ALEN);
@@ -6641,9 +7176,7 @@ static const struct net_device_ops bnxt_netdev_ops = {
#endif
.ndo_udp_tunnel_add = bnxt_udp_tunnel_add,
.ndo_udp_tunnel_del = bnxt_udp_tunnel_del,
-#ifdef CONFIG_NET_RX_BUSY_POLL
- .ndo_busy_poll = bnxt_busy_poll,
-#endif
+ .ndo_xdp = bnxt_xdp,
};
static void bnxt_remove_one(struct pci_dev *pdev)
@@ -6659,15 +7192,16 @@ static void bnxt_remove_one(struct pci_dev *pdev)
cancel_work_sync(&bp->sp_task);
bp->sp_event = 0;
+ bnxt_clear_int_mode(bp);
bnxt_hwrm_func_drv_unrgtr(bp);
bnxt_free_hwrm_resources(bp);
- pci_iounmap(pdev, bp->bar2);
- pci_iounmap(pdev, bp->bar1);
- pci_iounmap(pdev, bp->bar0);
+ bnxt_dcb_free(bp);
+ kfree(bp->edev);
+ bp->edev = NULL;
+ if (bp->xdp_prog)
+ bpf_prog_put(bp->xdp_prog);
+ bnxt_cleanup_pci(bp);
free_netdev(dev);
-
- pci_release_regions(pdev);
- pci_disable_device(pdev);
}
static int bnxt_probe_phy(struct bnxt *bp)
@@ -6772,6 +7306,48 @@ int bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx, bool shared)
return bnxt_trim_rings(bp, max_rx, max_tx, cp, shared);
}
+static int bnxt_get_dflt_rings(struct bnxt *bp, int *max_rx, int *max_tx,
+ bool shared)
+{
+ int rc;
+
+ rc = bnxt_get_max_rings(bp, max_rx, max_tx, shared);
+ if (rc && (bp->flags & BNXT_FLAG_AGG_RINGS)) {
+ /* Not enough rings, try disabling agg rings. */
+ bp->flags &= ~BNXT_FLAG_AGG_RINGS;
+ rc = bnxt_get_max_rings(bp, max_rx, max_tx, shared);
+ if (rc)
+ return rc;
+ bp->flags |= BNXT_FLAG_NO_AGG_RINGS;
+ bp->dev->hw_features &= ~NETIF_F_LRO;
+ bp->dev->features &= ~NETIF_F_LRO;
+ bnxt_set_ring_params(bp);
+ }
+
+ if (bp->flags & BNXT_FLAG_ROCE_CAP) {
+ int max_cp, max_stat, max_irq;
+
+ /* Reserve minimum resources for RoCE */
+ max_cp = bnxt_get_max_func_cp_rings(bp);
+ max_stat = bnxt_get_max_func_stat_ctxs(bp);
+ max_irq = bnxt_get_max_func_irqs(bp);
+ if (max_cp <= BNXT_MIN_ROCE_CP_RINGS ||
+ max_irq <= BNXT_MIN_ROCE_CP_RINGS ||
+ max_stat <= BNXT_MIN_ROCE_STAT_CTXS)
+ return 0;
+
+ max_cp -= BNXT_MIN_ROCE_CP_RINGS;
+ max_irq -= BNXT_MIN_ROCE_CP_RINGS;
+ max_stat -= BNXT_MIN_ROCE_STAT_CTXS;
+ max_cp = min_t(int, max_cp, max_irq);
+ max_cp = min_t(int, max_cp, max_stat);
+ rc = bnxt_trim_rings(bp, max_rx, max_tx, max_cp, shared);
+ if (rc)
+ rc = 0;
+ }
+ return rc;
+}
+
static int bnxt_set_dflt_rings(struct bnxt *bp)
{
int dflt_rings, max_rx_rings, max_tx_rings, rc;
@@ -6780,11 +7356,16 @@ static int bnxt_set_dflt_rings(struct bnxt *bp)
if (sh)
bp->flags |= BNXT_FLAG_SHARED_RINGS;
dflt_rings = netif_get_num_default_rss_queues();
- rc = bnxt_get_max_rings(bp, &max_rx_rings, &max_tx_rings, sh);
+ rc = bnxt_get_dflt_rings(bp, &max_rx_rings, &max_tx_rings, sh);
if (rc)
return rc;
bp->rx_nr_rings = min_t(int, dflt_rings, max_rx_rings);
bp->tx_nr_rings_per_tc = min_t(int, dflt_rings, max_tx_rings);
+
+ rc = bnxt_hwrm_reserve_tx_rings(bp, &bp->tx_nr_rings_per_tc);
+ if (rc)
+ netdev_warn(bp->dev, "Unable to reserve tx rings\n");
+
bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
bp->cp_nr_rings = sh ? max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) :
bp->tx_nr_rings + bp->rx_nr_rings;
@@ -6796,6 +7377,13 @@ static int bnxt_set_dflt_rings(struct bnxt *bp)
return rc;
}
+void bnxt_restore_pf_fw_resources(struct bnxt *bp)
+{
+ ASSERT_RTNL();
+ bnxt_hwrm_func_qcaps(bp);
+ bnxt_subtract_ulp_resources(bp, BNXT_ROCE_ULP);
+}
+
static void bnxt_parse_log_pcie_link(struct bnxt *bp)
{
enum pcie_link_width width = PCIE_LNK_WIDTH_UNKNOWN;
@@ -6819,7 +7407,7 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
struct bnxt *bp;
int rc, max_irqs;
- if (pdev->device == 0x16cd && pci_is_bridge(pdev))
+ if (pci_is_bridge(pdev))
return -ENODEV;
if (version_printed++ == 0)
@@ -6845,17 +7433,16 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
dev->netdev_ops = &bnxt_netdev_ops;
dev->watchdog_timeo = BNXT_TX_TIMEOUT;
dev->ethtool_ops = &bnxt_ethtool_ops;
-
pci_set_drvdata(pdev, dev);
rc = bnxt_alloc_hwrm_resources(bp);
if (rc)
- goto init_err;
+ goto init_err_pci_clean;
mutex_init(&bp->hwrm_cmd_lock);
rc = bnxt_hwrm_ver_get(bp);
if (rc)
- goto init_err;
+ goto init_err_pci_clean;
bnxt_hwrm_fw_set_time(bp);
@@ -6884,6 +7471,12 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
dev->features |= dev->hw_features | NETIF_F_HIGHDMA;
dev->priv_flags |= IFF_UNICAST_FLT;
+ /* MTU range: 60 - 9500 */
+ dev->min_mtu = ETH_ZLEN;
+ dev->max_mtu = BNXT_MAX_MTU;
+
+ bnxt_dcb_init(bp);
+
#ifdef CONFIG_BNXT_SRIOV
init_waitqueue_head(&bp->sriov_cfg_wait);
#endif
@@ -6893,7 +7486,13 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
rc = bnxt_hwrm_func_drv_rgtr(bp);
if (rc)
- goto init_err;
+ goto init_err_pci_clean;
+
+ rc = bnxt_hwrm_func_rgtr_async_events(bp, NULL, 0);
+ if (rc)
+ goto init_err_pci_clean;
+
+ bp->ulp_probe = bnxt_ulp_probe;
/* Get the MAX capabilities for this function */
rc = bnxt_hwrm_func_qcaps(bp);
@@ -6901,7 +7500,7 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
netdev_err(bp->dev, "hwrm query capability failure rc: %x\n",
rc);
rc = -1;
- goto init_err;
+ goto init_err_pci_clean;
}
rc = bnxt_hwrm_queue_qportcfg(bp);
@@ -6909,22 +7508,38 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
netdev_err(bp->dev, "hwrm query qportcfg failure rc: %x\n",
rc);
rc = -1;
- goto init_err;
+ goto init_err_pci_clean;
}
bnxt_hwrm_func_qcfg(bp);
+ bnxt_hwrm_port_led_qcaps(bp);
+ bnxt_set_rx_skb_mode(bp, false);
bnxt_set_tpa_flags(bp);
bnxt_set_ring_params(bp);
- if (BNXT_PF(bp))
- bp->pf.max_irqs = max_irqs;
-#if defined(CONFIG_BNXT_SRIOV)
- else
- bp->vf.max_irqs = max_irqs;
-#endif
- bnxt_set_dflt_rings(bp);
+ bnxt_set_max_func_irqs(bp, max_irqs);
+ rc = bnxt_set_dflt_rings(bp);
+ if (rc) {
+ netdev_err(bp->dev, "Not enough rings available.\n");
+ rc = -ENOMEM;
+ goto init_err_pci_clean;
+ }
+
+ /* Default RSS hash cfg. */
+ bp->rss_hash_cfg = VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4 |
+ VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4 |
+ VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6 |
+ VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6;
+ if (!BNXT_CHIP_NUM_57X0X(bp->chip_num) &&
+ !BNXT_CHIP_TYPE_NITRO_A0(bp) &&
+ bp->hwrm_spec_code >= 0x10501) {
+ bp->flags |= BNXT_FLAG_UDP_RSS_CAP;
+ bp->rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4 |
+ VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6;
+ }
- if (BNXT_PF(bp) && !BNXT_CHIP_TYPE_NITRO_A0(bp)) {
+ bnxt_hwrm_vnic_qcaps(bp);
+ if (bnxt_rfs_supported(bp)) {
dev->hw_features |= NETIF_F_NTUPLE;
if (bnxt_rfs_capable(bp)) {
bp->flags |= BNXT_FLAG_RFS;
@@ -6937,11 +7552,19 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
rc = bnxt_probe_phy(bp);
if (rc)
- goto init_err;
+ goto init_err_pci_clean;
+
+ rc = bnxt_hwrm_func_reset(bp);
+ if (rc)
+ goto init_err_pci_clean;
+
+ rc = bnxt_init_int_mode(bp);
+ if (rc)
+ goto init_err_pci_clean;
rc = register_netdev(dev);
if (rc)
- goto init_err;
+ goto init_err_clr_int;
netdev_info(dev, "%s found at mem %lx, node addr %pM\n",
board_info[ent->driver_data].name,
@@ -6951,10 +7574,11 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
return 0;
-init_err:
- pci_iounmap(pdev, bp->bar0);
- pci_release_regions(pdev);
- pci_disable_device(pdev);
+init_err_clr_int:
+ bnxt_clear_int_mode(bp);
+
+init_err_pci_clean:
+ bnxt_cleanup_pci(bp);
init_err_free:
free_netdev(dev);
@@ -6980,6 +7604,8 @@ static pci_ers_result_t bnxt_io_error_detected(struct pci_dev *pdev,
rtnl_lock();
netif_device_detach(netdev);
+ bnxt_ulp_stop(bp);
+
if (state == pci_channel_io_perm_failure) {
rtnl_unlock();
return PCI_ERS_RESULT_DISCONNECT;
@@ -6988,8 +7614,6 @@ static pci_ers_result_t bnxt_io_error_detected(struct pci_dev *pdev,
if (netif_running(netdev))
bnxt_close(netdev);
- /* So that func_reset will be done during slot_reset */
- clear_bit(BNXT_STATE_FN_RST_DONE, &bp->state);
pci_disable_device(pdev);
rtnl_unlock();
@@ -7023,11 +7647,14 @@ static pci_ers_result_t bnxt_io_slot_reset(struct pci_dev *pdev)
} else {
pci_set_master(pdev);
- if (netif_running(netdev))
+ err = bnxt_hwrm_func_reset(bp);
+ if (!err && netif_running(netdev))
err = bnxt_open(netdev);
- if (!err)
+ if (!err) {
result = PCI_ERS_RESULT_RECOVERED;
+ bnxt_ulp_start(bp);
+ }
}
if (result != PCI_ERS_RESULT_RECOVERED && netif_running(netdev))