aboutsummaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/chelsio/cxgb4
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/chelsio/cxgb4')
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4.h146
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c14
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c222
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c14
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.h5
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c128
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h17
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/l2t.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sched.c5
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sge.c181
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.c149
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_msg.h32
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h5
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_regs.h29
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h8
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h12
16 files changed, 617 insertions, 352 deletions
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
index 2125903043fb..163543b1ea0b 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
@@ -263,6 +263,11 @@ struct tp_params {
u32 vlan_pri_map; /* cached TP_VLAN_PRI_MAP */
u32 ingress_config; /* cached TP_INGRESS_CONFIG */
+ /* cached TP_OUT_CONFIG compressed error vector
+ * and passing outer header info for encapsulated packets.
+ */
+ int rx_pkt_encap;
+
/* TP_VLAN_PRI_MAP Compressed Filter Tuple field offsets. This is a
* subset of the set of fields which may be present in the Compressed
* Filter Tuple portion of filters and TCP TCB connections. The
@@ -581,22 +586,6 @@ struct sge_rspq { /* state for an SGE response queue */
rspq_handler_t handler;
rspq_flush_handler_t flush_handler;
struct t4_lro_mgr lro_mgr;
-#ifdef CONFIG_NET_RX_BUSY_POLL
-#define CXGB_POLL_STATE_IDLE 0
-#define CXGB_POLL_STATE_NAPI BIT(0) /* NAPI owns this poll */
-#define CXGB_POLL_STATE_POLL BIT(1) /* poll owns this poll */
-#define CXGB_POLL_STATE_NAPI_YIELD BIT(2) /* NAPI yielded this poll */
-#define CXGB_POLL_STATE_POLL_YIELD BIT(3) /* poll yielded this poll */
-#define CXGB_POLL_YIELD (CXGB_POLL_STATE_NAPI_YIELD | \
- CXGB_POLL_STATE_POLL_YIELD)
-#define CXGB_POLL_LOCKED (CXGB_POLL_STATE_NAPI | \
- CXGB_POLL_STATE_POLL)
-#define CXGB_POLL_USER_PEND (CXGB_POLL_STATE_POLL | \
- CXGB_POLL_STATE_POLL_YIELD)
- unsigned int bpoll_state;
- spinlock_t bpoll_lock; /* lock for busy poll */
-#endif /* CONFIG_NET_RX_BUSY_POLL */
-
};
struct sge_eth_stats { /* Ethernet queue statistics */
@@ -635,6 +624,7 @@ struct tx_sw_desc;
struct sge_txq {
unsigned int in_use; /* # of in-use Tx descriptors */
+ unsigned int q_type; /* Q type Eth/Ctrl/Ofld */
unsigned int size; /* # of descriptors */
unsigned int cidx; /* SW consumer index */
unsigned int pidx; /* producer index */
@@ -665,7 +655,7 @@ struct sge_eth_txq { /* state for an SGE Ethernet Tx queue */
unsigned long mapping_err; /* # of I/O MMU packet mapping errors */
} ____cacheline_aligned_in_smp;
-struct sge_ofld_txq { /* state for an SGE offload Tx queue */
+struct sge_uld_txq { /* state for an SGE offload Tx queue */
struct sge_txq q;
struct adapter *adap;
struct sk_buff_head sendq; /* list of backpressured packets */
@@ -693,14 +683,20 @@ struct sge_uld_rxq_info {
u8 uld; /* uld type */
};
+struct sge_uld_txq_info {
+ struct sge_uld_txq *uldtxq; /* Txq's for ULD */
+ atomic_t users; /* num users */
+ u16 ntxq; /* # of egress uld queues */
+};
+
struct sge {
struct sge_eth_txq ethtxq[MAX_ETH_QSETS];
- struct sge_ofld_txq ofldtxq[MAX_OFLD_QSETS];
struct sge_ctrl_txq ctrlq[MAX_CTRL_QUEUES];
struct sge_eth_rxq ethrxq[MAX_ETH_QSETS];
struct sge_rspq fw_evtq ____cacheline_aligned_in_smp;
struct sge_uld_rxq_info **uld_rxq_info;
+ struct sge_uld_txq_info **uld_txq_info;
struct sge_rspq intrq ____cacheline_aligned_in_smp;
spinlock_t intrq_lock;
@@ -775,6 +771,10 @@ struct vf_info {
bool pf_set_mac;
};
+struct mbox_list {
+ struct list_head list;
+};
+
struct adapter {
void __iomem *regs;
void __iomem *bar2;
@@ -837,6 +837,10 @@ struct adapter {
struct work_struct db_drop_task;
bool tid_release_task_busy;
+ /* lock for mailbox cmd list */
+ spinlock_t mbox_lock;
+ struct mbox_list mlist;
+
/* support for mailbox command/reply logging */
#define T4_OS_LOG_MBOX_CMDS 256
struct mbox_cmd_log *mbox_log;
@@ -1153,102 +1157,6 @@ static inline struct adapter *netdev2adap(const struct net_device *dev)
return netdev2pinfo(dev)->adapter;
}
-#ifdef CONFIG_NET_RX_BUSY_POLL
-static inline void cxgb_busy_poll_init_lock(struct sge_rspq *q)
-{
- spin_lock_init(&q->bpoll_lock);
- q->bpoll_state = CXGB_POLL_STATE_IDLE;
-}
-
-static inline bool cxgb_poll_lock_napi(struct sge_rspq *q)
-{
- bool rc = true;
-
- spin_lock(&q->bpoll_lock);
- if (q->bpoll_state & CXGB_POLL_LOCKED) {
- q->bpoll_state |= CXGB_POLL_STATE_NAPI_YIELD;
- rc = false;
- } else {
- q->bpoll_state = CXGB_POLL_STATE_NAPI;
- }
- spin_unlock(&q->bpoll_lock);
- return rc;
-}
-
-static inline bool cxgb_poll_unlock_napi(struct sge_rspq *q)
-{
- bool rc = false;
-
- spin_lock(&q->bpoll_lock);
- if (q->bpoll_state & CXGB_POLL_STATE_POLL_YIELD)
- rc = true;
- q->bpoll_state = CXGB_POLL_STATE_IDLE;
- spin_unlock(&q->bpoll_lock);
- return rc;
-}
-
-static inline bool cxgb_poll_lock_poll(struct sge_rspq *q)
-{
- bool rc = true;
-
- spin_lock_bh(&q->bpoll_lock);
- if (q->bpoll_state & CXGB_POLL_LOCKED) {
- q->bpoll_state |= CXGB_POLL_STATE_POLL_YIELD;
- rc = false;
- } else {
- q->bpoll_state |= CXGB_POLL_STATE_POLL;
- }
- spin_unlock_bh(&q->bpoll_lock);
- return rc;
-}
-
-static inline bool cxgb_poll_unlock_poll(struct sge_rspq *q)
-{
- bool rc = false;
-
- spin_lock_bh(&q->bpoll_lock);
- if (q->bpoll_state & CXGB_POLL_STATE_POLL_YIELD)
- rc = true;
- q->bpoll_state = CXGB_POLL_STATE_IDLE;
- spin_unlock_bh(&q->bpoll_lock);
- return rc;
-}
-
-static inline bool cxgb_poll_busy_polling(struct sge_rspq *q)
-{
- return q->bpoll_state & CXGB_POLL_USER_PEND;
-}
-#else
-static inline void cxgb_busy_poll_init_lock(struct sge_rspq *q)
-{
-}
-
-static inline bool cxgb_poll_lock_napi(struct sge_rspq *q)
-{
- return true;
-}
-
-static inline bool cxgb_poll_unlock_napi(struct sge_rspq *q)
-{
- return false;
-}
-
-static inline bool cxgb_poll_lock_poll(struct sge_rspq *q)
-{
- return false;
-}
-
-static inline bool cxgb_poll_unlock_poll(struct sge_rspq *q)
-{
- return false;
-}
-
-static inline bool cxgb_poll_busy_polling(struct sge_rspq *q)
-{
- return false;
-}
-#endif /* CONFIG_NET_RX_BUSY_POLL */
-
/* Return a version number to identify the type of adapter. The scheme is:
* - bits 0..9: chip version
* - bits 10..15: chip revision
@@ -1298,13 +1206,13 @@ int t4_sge_alloc_ctrl_txq(struct adapter *adap, struct sge_ctrl_txq *txq,
unsigned int cmplqid);
int t4_sge_mod_ctrl_txq(struct adapter *adap, unsigned int eqid,
unsigned int cmplqid);
-int t4_sge_alloc_ofld_txq(struct adapter *adap, struct sge_ofld_txq *txq,
- struct net_device *dev, unsigned int iqid);
+int t4_sge_alloc_uld_txq(struct adapter *adap, struct sge_uld_txq *txq,
+ struct net_device *dev, unsigned int iqid,
+ unsigned int uld_type);
irqreturn_t t4_sge_intr_msix(int irq, void *cookie);
int t4_sge_init(struct adapter *adap);
void t4_sge_start(struct adapter *adap);
void t4_sge_stop(struct adapter *adap);
-int cxgb_busy_poll(struct napi_struct *napi);
void cxgb4_set_ethtool_ops(struct net_device *netdev);
int cxgb4_write_rss(const struct port_info *pi, const u16 *queues);
extern int dbfifo_int_thresh;
@@ -1480,6 +1388,7 @@ int t4_prep_fw(struct adapter *adap, struct fw_info *fw_info,
const u8 *fw_data, unsigned int fw_size,
struct fw_hdr *card_fw, enum dev_state state, int *reset);
int t4_prep_adapter(struct adapter *adapter);
+int t4_shutdown_adapter(struct adapter *adapter);
enum t4_bar2_qtype { T4_BAR2_QTYPE_EGRESS, T4_BAR2_QTYPE_INGRESS };
int t4_bar2_sge_qregs(struct adapter *adapter,
@@ -1661,4 +1570,7 @@ int t4_uld_mem_alloc(struct adapter *adap);
void t4_uld_clean_up(struct adapter *adap);
void t4_register_netevent_notifier(void);
void free_rspq_fl(struct adapter *adap, struct sge_rspq *rq, struct sge_fl *fl);
+void free_tx_desc(struct adapter *adap, struct sge_txq *q,
+ unsigned int n, bool unmap);
+void free_txq(struct adapter *adap, struct sge_txq *q);
#endif /* __CXGB4_H__ */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
index 20455d082cb8..f6e739da7bb7 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
@@ -1416,7 +1416,7 @@ static unsigned int xdigit2int(unsigned char c)
* <pattern data>[/<pattern mask>][@<anchor>]
*
* Up to 2 filter patterns can be specified. If 2 are supplied the first one
- * must be anchored at 0. An omited mask is taken as a mask of 1s, an omitted
+ * must be anchored at 0. An omitted mask is taken as a mask of 1s, an omitted
* anchor is taken as 0.
*/
static ssize_t mps_trc_write(struct file *file, const char __user *buf,
@@ -2512,18 +2512,6 @@ do { \
RL("FLLow:", fl.low);
RL("FLStarving:", fl.starving);
- } else if (ofld_idx < ofld_entries) {
- const struct sge_ofld_txq *tx =
- &adap->sge.ofldtxq[ofld_idx * 4];
- int n = min(4, adap->sge.ofldqsets - 4 * ofld_idx);
-
- S("QType:", "OFLD-Txq");
- T("TxQ ID:", q.cntxt_id);
- T("TxQ size:", q.size);
- T("TxQ inuse:", q.in_use);
- T("TxQ CIDX:", q.cidx);
- T("TxQ PIDX:", q.pidx);
-
} else if (ctrl_idx < ctrl_entries) {
const struct sge_ctrl_txq *tx = &adap->sge.ctrlq[ctrl_idx * 4];
int n = min(4, adap->params.nports - 4 * ctrl_idx);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index 19dc9e25aa72..afb0967d2ce6 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -63,7 +63,7 @@
#include <net/addrconf.h>
#include <net/bonding.h>
#include <net/addrconf.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <linux/crash_dump.h>
#include "cxgb4.h"
@@ -134,24 +134,6 @@ MODULE_FIRMWARE(FW5_FNAME);
MODULE_FIRMWARE(FW6_FNAME);
/*
- * Normally we're willing to become the firmware's Master PF but will be happy
- * if another PF has already become the Master and initialized the adapter.
- * Setting "force_init" will cause this driver to forcibly establish itself as
- * the Master PF and initialize the adapter.
- */
-static uint force_init;
-
-module_param(force_init, uint, 0644);
-MODULE_PARM_DESC(force_init, "Forcibly become Master PF and initialize adapter,"
- "deprecated parameter");
-
-static int dflt_msg_enable = DFLT_MSG_ENABLE;
-
-module_param(dflt_msg_enable, int, 0644);
-MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T4 default message enable bitmap, "
- "deprecated parameter");
-
-/*
* The driver uses the best interrupt scheme available on a platform in the
* order MSI-X, MSI, legacy INTx interrupts. This parameter determines which
* of these schemes the driver may consider as follows:
@@ -179,16 +161,6 @@ MODULE_PARM_DESC(msi, "whether to use INTx (0), MSI (1) or MSI-X (2)");
*/
static int rx_dma_offset = 2;
-#ifdef CONFIG_PCI_IOV
-/* Configure the number of PCI-E Virtual Function which are to be instantiated
- * on SR-IOV Capable Physical Functions.
- */
-static unsigned int num_vf[NUM_OF_PF_WITH_SRIOV];
-
-module_param_array(num_vf, uint, NULL, 0644);
-MODULE_PARM_DESC(num_vf, "number of VFs for each of PFs 0-3, deprecated parameter - please use the pci sysfs interface.");
-#endif
-
/* TX Queue select used to determine what algorithm to use for selecting TX
* queue. Select between the kernel provided function (select_queue=0) or user
* cxgb_select_queue function (select_queue=1)
@@ -216,18 +188,24 @@ static void link_report(struct net_device *dev)
const struct port_info *p = netdev_priv(dev);
switch (p->link_cfg.speed) {
- case 10000:
- s = "10Gbps";
+ case 100:
+ s = "100Mbps";
break;
case 1000:
- s = "1000Mbps";
+ s = "1Gbps";
break;
- case 100:
- s = "100Mbps";
+ case 10000:
+ s = "10Gbps";
+ break;
+ case 25000:
+ s = "25Gbps";
break;
case 40000:
s = "40Gbps";
break;
+ case 100000:
+ s = "100Gbps";
+ break;
default:
pr_info("%s: unsupported speed: %d\n",
dev->name, p->link_cfg.speed);
@@ -530,15 +508,15 @@ static int fwevtq_handler(struct sge_rspq *q, const __be64 *rsp,
txq = q->adap->sge.egr_map[qid - q->adap->sge.egr_start];
txq->restarts++;
- if ((u8 *)txq < (u8 *)q->adap->sge.ofldtxq) {
+ if (txq->q_type == CXGB4_TXQ_ETH) {
struct sge_eth_txq *eq;
eq = container_of(txq, struct sge_eth_txq, q);
netif_tx_wake_queue(eq->txq);
} else {
- struct sge_ofld_txq *oq;
+ struct sge_uld_txq *oq;
- oq = container_of(txq, struct sge_ofld_txq, q);
+ oq = container_of(txq, struct sge_uld_txq, q);
tasklet_schedule(&oq->qresume_tsk);
}
} else if (opcode == CPL_FW6_MSG || opcode == CPL_FW4_MSG) {
@@ -766,14 +744,8 @@ static void quiesce_rx(struct adapter *adap)
for (i = 0; i < adap->sge.ingr_sz; i++) {
struct sge_rspq *q = adap->sge.ingr_map[i];
- if (q && q->handler) {
+ if (q && q->handler)
napi_disable(&q->napi);
- local_bh_disable();
- while (!cxgb_poll_lock_napi(q))
- mdelay(1);
- local_bh_enable();
- }
-
}
}
@@ -804,10 +776,9 @@ static void enable_rx(struct adapter *adap)
if (!q)
continue;
- if (q->handler) {
- cxgb_busy_poll_init_lock(q);
+ if (q->handler)
napi_enable(&q->napi);
- }
+
/* 0-increment GTS to start the timer and enable interrupts */
t4_write_reg(adap, MYPF_REG(SGE_PF_GTS_A),
SEINTARM_V(q->intr_params) |
@@ -885,15 +856,6 @@ static int setup_sge_queues(struct adapter *adap)
}
}
- j = s->ofldqsets / adap->params.nports; /* iscsi queues per channel */
- for_each_ofldtxq(s, i) {
- err = t4_sge_alloc_ofld_txq(adap, &s->ofldtxq[i],
- adap->port[i / j],
- s->fw_evtq.cntxt_id);
- if (err)
- goto freeout;
- }
-
for_each_port(adap, i) {
/* Note that cmplqid below is 0 if we don't
* have RDMA queues, and that's the right value.
@@ -1843,7 +1805,7 @@ static void check_neigh_update(struct neighbour *neigh)
const struct device *parent;
const struct net_device *netdev = neigh->dev;
- if (netdev->priv_flags & IFF_802_1Q_VLAN)
+ if (is_vlan_dev(netdev))
netdev = vlan_dev_real_dev(netdev);
parent = netdev->dev.parent;
if (parent && parent->driver == &cxgb4_driver.driver)
@@ -1922,8 +1884,18 @@ static void disable_dbs(struct adapter *adap)
for_each_ethrxq(&adap->sge, i)
disable_txq_db(&adap->sge.ethtxq[i].q);
- for_each_ofldtxq(&adap->sge, i)
- disable_txq_db(&adap->sge.ofldtxq[i].q);
+ if (is_offload(adap)) {
+ struct sge_uld_txq_info *txq_info =
+ adap->sge.uld_txq_info[CXGB4_TX_OFLD];
+
+ if (txq_info) {
+ for_each_ofldtxq(&adap->sge, i) {
+ struct sge_uld_txq *txq = &txq_info->uldtxq[i];
+
+ disable_txq_db(&txq->q);
+ }
+ }
+ }
for_each_port(adap, i)
disable_txq_db(&adap->sge.ctrlq[i].q);
}
@@ -1934,8 +1906,18 @@ static void enable_dbs(struct adapter *adap)
for_each_ethrxq(&adap->sge, i)
enable_txq_db(adap, &adap->sge.ethtxq[i].q);
- for_each_ofldtxq(&adap->sge, i)
- enable_txq_db(adap, &adap->sge.ofldtxq[i].q);
+ if (is_offload(adap)) {
+ struct sge_uld_txq_info *txq_info =
+ adap->sge.uld_txq_info[CXGB4_TX_OFLD];
+
+ if (txq_info) {
+ for_each_ofldtxq(&adap->sge, i) {
+ struct sge_uld_txq *txq = &txq_info->uldtxq[i];
+
+ enable_txq_db(adap, &txq->q);
+ }
+ }
+ }
for_each_port(adap, i)
enable_txq_db(adap, &adap->sge.ctrlq[i].q);
}
@@ -2006,8 +1988,17 @@ static void recover_all_queues(struct adapter *adap)
for_each_ethrxq(&adap->sge, i)
sync_txq_pidx(adap, &adap->sge.ethtxq[i].q);
- for_each_ofldtxq(&adap->sge, i)
- sync_txq_pidx(adap, &adap->sge.ofldtxq[i].q);
+ if (is_offload(adap)) {
+ struct sge_uld_txq_info *txq_info =
+ adap->sge.uld_txq_info[CXGB4_TX_OFLD];
+ if (txq_info) {
+ for_each_ofldtxq(&adap->sge, i) {
+ struct sge_uld_txq *txq = &txq_info->uldtxq[i];
+
+ sync_txq_pidx(adap, &txq->q);
+ }
+ }
+ }
for_each_port(adap, i)
sync_txq_pidx(adap, &adap->sge.ctrlq[i].q);
}
@@ -2120,7 +2111,7 @@ static int cxgb4_inet6addr_handler(struct notifier_block *this,
#if IS_ENABLED(CONFIG_BONDING)
struct adapter *adap;
#endif
- if (event_dev->priv_flags & IFF_802_1Q_VLAN)
+ if (is_vlan_dev(event_dev))
event_dev = vlan_dev_real_dev(event_dev);
#if IS_ENABLED(CONFIG_BONDING)
if (event_dev->flags & IFF_MASTER) {
@@ -2377,8 +2368,8 @@ int cxgb4_remove_server_filter(const struct net_device *dev, unsigned int stid,
}
EXPORT_SYMBOL(cxgb4_remove_server_filter);
-static struct rtnl_link_stats64 *cxgb_get_stats(struct net_device *dev,
- struct rtnl_link_stats64 *ns)
+static void cxgb_get_stats(struct net_device *dev,
+ struct rtnl_link_stats64 *ns)
{
struct port_stats stats;
struct port_info *p = netdev_priv(dev);
@@ -2391,7 +2382,7 @@ static struct rtnl_link_stats64 *cxgb_get_stats(struct net_device *dev,
spin_lock(&adapter->stats_lock);
if (!netif_device_present(dev)) {
spin_unlock(&adapter->stats_lock);
- return ns;
+ return;
}
t4_get_port_stats_offset(adapter, p->tx_chan, &stats,
&p->stats_base);
@@ -2409,7 +2400,7 @@ static struct rtnl_link_stats64 *cxgb_get_stats(struct net_device *dev,
ns->rx_over_errors = 0;
ns->rx_crc_errors = stats.rx_fcs_err;
ns->rx_frame_errors = stats.rx_symbol_err;
- ns->rx_fifo_errors = stats.rx_ovflow0 + stats.rx_ovflow1 +
+ ns->rx_dropped = stats.rx_ovflow0 + stats.rx_ovflow1 +
stats.rx_ovflow2 + stats.rx_ovflow3 +
stats.rx_trunc0 + stats.rx_trunc1 +
stats.rx_trunc2 + stats.rx_trunc3;
@@ -2425,7 +2416,6 @@ static struct rtnl_link_stats64 *cxgb_get_stats(struct net_device *dev,
ns->tx_errors = stats.tx_error_frames;
ns->rx_errors = stats.rx_symbol_err + stats.rx_fcs_err +
ns->rx_length_errors + stats.rx_len_err + ns->rx_fifo_errors;
- return ns;
}
static int cxgb_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
@@ -2502,8 +2492,6 @@ static int cxgb_change_mtu(struct net_device *dev, int new_mtu)
int ret;
struct port_info *pi = netdev_priv(dev);
- if (new_mtu < 81 || new_mtu > MAX_MTU) /* accommodate SACK */
- return -EINVAL;
ret = t4_set_rxmode(pi->adapter, pi->adapter->pf, pi->viid, new_mtu, -1,
-1, -1, -1, true);
if (!ret)
@@ -2588,6 +2576,19 @@ static int cxgb_get_vf_config(struct net_device *dev,
ether_addr_copy(ivi->mac, adap->vfinfo[vf].vf_mac_addr);
return 0;
}
+
+static int cxgb_get_phys_port_id(struct net_device *dev,
+ struct netdev_phys_item_id *ppid)
+{
+ struct port_info *pi = netdev_priv(dev);
+ unsigned int phy_port_id;
+
+ phy_port_id = pi->adapter->adap_idx * 10 + pi->port_id;
+ ppid->id_len = sizeof(phy_port_id);
+ memcpy(ppid->id, &phy_port_id, ppid->id_len);
+ return 0;
+}
+
#endif
static int cxgb_set_mac_addr(struct net_device *dev, void *p)
@@ -2755,9 +2756,6 @@ static const struct net_device_ops cxgb4_netdev_ops = {
.ndo_fcoe_enable = cxgb_fcoe_enable,
.ndo_fcoe_disable = cxgb_fcoe_disable,
#endif /* CONFIG_CHELSIO_T4_FCOE */
-#ifdef CONFIG_NET_RX_BUSY_POLL
- .ndo_busy_poll = cxgb_busy_poll,
-#endif
.ndo_set_tx_maxrate = cxgb_set_tx_maxrate,
.ndo_setup_tc = cxgb_setup_tc,
};
@@ -2767,6 +2765,7 @@ static const struct net_device_ops cxgb4_mgmt_netdev_ops = {
.ndo_open = dummy_open,
.ndo_set_vf_mac = cxgb_set_vf_mac,
.ndo_get_vf_config = cxgb_get_vf_config,
+ .ndo_get_phys_port_id = cxgb_get_phys_port_id,
};
#endif
@@ -2787,8 +2786,24 @@ static const struct ethtool_ops cxgb4_mgmt_ethtool_ops = {
void t4_fatal_err(struct adapter *adap)
{
- t4_set_reg_field(adap, SGE_CONTROL_A, GLOBALENABLE_F, 0);
- t4_intr_disable(adap);
+ int port;
+
+ /* Disable the SGE since ULDs are going to free resources that
+ * could be exposed to the adapter. RDMA MWs for example...
+ */
+ t4_shutdown_adapter(adap);
+ for_each_port(adap, port) {
+ struct net_device *dev = adap->port[port];
+
+ /* If we get here in very early initialization the network
+ * devices may not have been set up yet.
+ */
+ if (!dev)
+ continue;
+
+ netif_tx_stop_all_queues(dev);
+ netif_carrier_off(dev);
+ }
dev_alert(adap->pdev_dev, "encountered fatal error, adapter stopped\n");
}
@@ -3993,7 +4008,7 @@ static inline bool is_x_10g_port(const struct link_config *lc)
static void cfg_queues(struct adapter *adap)
{
struct sge *s = &adap->sge;
- int i, n10g = 0, qidx = 0;
+ int i = 0, n10g = 0, qidx = 0;
#ifndef CONFIG_CHELSIO_T4_DCB
int q10g = 0;
#endif
@@ -4008,8 +4023,7 @@ static void cfg_queues(struct adapter *adap)
adap->params.crypto = 0;
}
- for_each_port(adap, i)
- n10g += is_x_10g_port(&adap2pinfo(adap, i)->link_cfg);
+ n10g += is_x_10g_port(&adap2pinfo(adap, i)->link_cfg);
#ifdef CONFIG_CHELSIO_T4_DCB
/* For Data Center Bridging support we need to be able to support up
* to 8 Traffic Priorities; each of which will be assigned to its
@@ -4077,9 +4091,6 @@ static void cfg_queues(struct adapter *adap)
for (i = 0; i < ARRAY_SIZE(s->ctrlq); i++)
s->ctrlq[i].q.size = 512;
- for (i = 0; i < ARRAY_SIZE(s->ofldtxq); i++)
- s->ofldtxq[i].q.size = 1024;
-
init_rspq(adap, &s->fw_evtq, 0, 1, 1024, 64);
init_rspq(adap, &s->intrq, 0, 1, 512, 64);
}
@@ -4411,9 +4422,9 @@ static void print_port_info(const struct net_device *dev)
spd = " 8 GT/s";
if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_100M)
- bufp += sprintf(bufp, "100/");
+ bufp += sprintf(bufp, "100M/");
if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_1G)
- bufp += sprintf(bufp, "1000/");
+ bufp += sprintf(bufp, "1G/");
if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_10G)
bufp += sprintf(bufp, "10G/");
if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_25G)
@@ -4525,12 +4536,14 @@ static int config_mgmt_dev(struct pci_dev *pdev)
int err;
snprintf(name, IFNAMSIZ, "mgmtpf%d%d", adap->adap_idx, adap->pf);
- netdev = alloc_netdev(0, name, NET_NAME_UNKNOWN, dummy_setup);
+ netdev = alloc_netdev(sizeof(struct port_info), name, NET_NAME_UNKNOWN,
+ dummy_setup);
if (!netdev)
return -ENOMEM;
pi = netdev_priv(netdev);
pi->adapter = adap;
+ pi->port_id = adap->pf % adap->params.nports;
SET_NETDEV_DEV(netdev, &pdev->dev);
adap->port[0] = netdev;
@@ -4620,6 +4633,9 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
u32 whoami, pl_rev;
enum chip_type chip;
static int adap_idx = 1;
+#ifdef CONFIG_PCI_IOV
+ u32 v, port_vec;
+#endif
printk_once(KERN_INFO "%s - version %s\n", DRV_DESC, DRV_VERSION);
@@ -4715,12 +4731,15 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
adapter->name = pci_name(pdev);
adapter->mbox = func;
adapter->pf = func;
- adapter->msg_enable = dflt_msg_enable;
+ adapter->msg_enable = DFLT_MSG_ENABLE;
memset(adapter->chan_map, 0xff, sizeof(adapter->chan_map));
spin_lock_init(&adapter->stats_lock);
spin_lock_init(&adapter->tid_release_lock);
spin_lock_init(&adapter->win0_lock);
+ spin_lock_init(&adapter->mbox_lock);
+
+ INIT_LIST_HEAD(&adapter->mlist.list);
INIT_WORK(&adapter->tid_release_task, process_tid_release_list);
INIT_WORK(&adapter->db_full_task, process_db_full);
@@ -4803,6 +4822,10 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
netdev->priv_flags |= IFF_UNICAST_FLT;
+ /* MTU range: 81 - 9600 */
+ netdev->min_mtu = 81;
+ netdev->max_mtu = MAX_MTU;
+
netdev->netdev_ops = &cxgb4_netdev_ops;
#ifdef CONFIG_CHELSIO_T4_DCB
netdev->dcbnl_ops = &cxgb4_dcb_ops;
@@ -4884,8 +4907,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
"continuing\n");
adapter->params.offload = 0;
} else {
- adapter->tc_u32 = cxgb4_init_tc_u32(adapter,
- CXGB4_MAX_LINK_HANDLE);
+ adapter->tc_u32 = cxgb4_init_tc_u32(adapter);
if (!adapter->tc_u32)
dev_warn(&pdev->dev,
"could not offload tc u32, continuing\n");
@@ -4971,17 +4993,6 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
sriov:
#ifdef CONFIG_PCI_IOV
- if (func < ARRAY_SIZE(num_vf) && num_vf[func] > 0) {
- dev_warn(&pdev->dev,
- "Enabling SR-IOV VFs using the num_vf module "
- "parameter is deprecated - please use the pci sysfs "
- "interface instead.\n");
- if (pci_enable_sriov(pdev, num_vf[func]) == 0)
- dev_info(&pdev->dev,
- "instantiated %u virtual functions\n",
- num_vf[func]);
- }
-
adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
if (!adapter) {
err = -ENOMEM;
@@ -5003,6 +5014,19 @@ sriov:
err = -ENOMEM;
goto free_adapter;
}
+ spin_lock_init(&adapter->mbox_lock);
+ INIT_LIST_HEAD(&adapter->mlist.list);
+
+ v = FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
+ FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_PORTVEC);
+ err = t4_query_params(adapter, adapter->mbox, adapter->pf, 0, 1,
+ &v, &port_vec);
+ if (err < 0) {
+ dev_err(adapter->pdev_dev, "Could not fetch port params\n");
+ goto free_adapter;
+ }
+
+ adapter->params.nports = hweight32(port_vec);
pci_set_drvdata(pdev, adapter);
return 0;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c
index 49d2debb334e..a1b19422b339 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c
@@ -113,7 +113,7 @@ static int fill_action_fields(struct adapter *adap,
}
/* Re-direct to specified port in hardware. */
- if (is_tcf_mirred_redirect(a)) {
+ if (is_tcf_mirred_egress_redirect(a)) {
struct net_device *n_dev;
unsigned int i, index;
bool found = false;
@@ -437,28 +437,26 @@ void cxgb4_cleanup_tc_u32(struct adapter *adap)
t4_free_mem(adap->tc_u32);
}
-struct cxgb4_tc_u32_table *cxgb4_init_tc_u32(struct adapter *adap,
- unsigned int size)
+struct cxgb4_tc_u32_table *cxgb4_init_tc_u32(struct adapter *adap)
{
+ unsigned int max_tids = adap->tids.nftids;
struct cxgb4_tc_u32_table *t;
unsigned int i;
- if (!size)
+ if (!max_tids)
return NULL;
t = t4_alloc_mem(sizeof(*t) +
- (size * sizeof(struct cxgb4_link)));
+ (max_tids * sizeof(struct cxgb4_link)));
if (!t)
return NULL;
- t->size = size;
+ t->size = max_tids;
for (i = 0; i < t->size; i++) {
struct cxgb4_link *link = &t->table[i];
unsigned int bmap_size;
- unsigned int max_tids;
- max_tids = adap->tids.nftids;
bmap_size = BITS_TO_LONGS(max_tids);
link->tid_map = t4_alloc_mem(sizeof(unsigned long) * bmap_size);
if (!link->tid_map)
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.h
index 6bdc885eff22..021261a41c13 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.h
@@ -37,8 +37,6 @@
#include <net/pkt_cls.h>
-#define CXGB4_MAX_LINK_HANDLE 32
-
static inline bool can_tc_u32_offload(struct net_device *dev)
{
struct adapter *adap = netdev2adap(dev);
@@ -52,6 +50,5 @@ int cxgb4_delete_knode(struct net_device *dev, __be16 protocol,
struct tc_cls_u32_offload *cls);
void cxgb4_cleanup_tc_u32(struct adapter *adapter);
-struct cxgb4_tc_u32_table *cxgb4_init_tc_u32(struct adapter *adap,
- unsigned int size);
+struct cxgb4_tc_u32_table *cxgb4_init_tc_u32(struct adapter *adap);
#endif /* __CXGB4_TC_U32_H */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
index 2471ff465d5c..d0868c2320da 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
@@ -408,10 +408,9 @@ static void enable_rx(struct adapter *adap, struct sge_rspq *q)
if (!q)
return;
- if (q->handler) {
- cxgb_busy_poll_init_lock(q);
+ if (q->handler)
napi_enable(&q->napi);
- }
+
/* 0-increment GTS to start the timer and enable interrupts */
t4_write_reg(adap, MYPF_REG(SGE_PF_GTS_A),
SEINTARM_V(q->intr_params) |
@@ -420,13 +419,8 @@ static void enable_rx(struct adapter *adap, struct sge_rspq *q)
static void quiesce_rx(struct adapter *adap, struct sge_rspq *q)
{
- if (q && q->handler) {
+ if (q && q->handler)
napi_disable(&q->napi);
- local_bh_disable();
- while (!cxgb_poll_lock_napi(q))
- mdelay(1);
- local_bh_enable();
- }
}
static void enable_rx_uld(struct adapter *adap, unsigned int uld_type)
@@ -447,6 +441,106 @@ static void quiesce_rx_uld(struct adapter *adap, unsigned int uld_type)
quiesce_rx(adap, &rxq_info->uldrxq[idx].rspq);
}
+static void
+free_sge_txq_uld(struct adapter *adap, struct sge_uld_txq_info *txq_info)
+{
+ int nq = txq_info->ntxq;
+ int i;
+
+ for (i = 0; i < nq; i++) {
+ struct sge_uld_txq *txq = &txq_info->uldtxq[i];
+
+ if (txq && txq->q.desc) {
+ tasklet_kill(&txq->qresume_tsk);
+ t4_ofld_eq_free(adap, adap->mbox, adap->pf, 0,
+ txq->q.cntxt_id);
+ free_tx_desc(adap, &txq->q, txq->q.in_use, false);
+ kfree(txq->q.sdesc);
+ __skb_queue_purge(&txq->sendq);
+ free_txq(adap, &txq->q);
+ }
+ }
+}
+
+static int
+alloc_sge_txq_uld(struct adapter *adap, struct sge_uld_txq_info *txq_info,
+ unsigned int uld_type)
+{
+ struct sge *s = &adap->sge;
+ int nq = txq_info->ntxq;
+ int i, j, err;
+
+ j = nq / adap->params.nports;
+ for (i = 0; i < nq; i++) {
+ struct sge_uld_txq *txq = &txq_info->uldtxq[i];
+
+ txq->q.size = 1024;
+ err = t4_sge_alloc_uld_txq(adap, txq, adap->port[i / j],
+ s->fw_evtq.cntxt_id, uld_type);
+ if (err)
+ goto freeout;
+ }
+ return 0;
+freeout:
+ free_sge_txq_uld(adap, txq_info);
+ return err;
+}
+
+static void
+release_sge_txq_uld(struct adapter *adap, unsigned int uld_type)
+{
+ struct sge_uld_txq_info *txq_info = NULL;
+ int tx_uld_type = TX_ULD(uld_type);
+
+ txq_info = adap->sge.uld_txq_info[tx_uld_type];
+
+ if (txq_info && atomic_dec_and_test(&txq_info->users)) {
+ free_sge_txq_uld(adap, txq_info);
+ kfree(txq_info->uldtxq);
+ kfree(txq_info);
+ adap->sge.uld_txq_info[tx_uld_type] = NULL;
+ }
+}
+
+static int
+setup_sge_txq_uld(struct adapter *adap, unsigned int uld_type,
+ const struct cxgb4_uld_info *uld_info)
+{
+ struct sge_uld_txq_info *txq_info = NULL;
+ int tx_uld_type, i;
+
+ tx_uld_type = TX_ULD(uld_type);
+ txq_info = adap->sge.uld_txq_info[tx_uld_type];
+
+ if ((tx_uld_type == CXGB4_TX_OFLD) && txq_info &&
+ (atomic_inc_return(&txq_info->users) > 1))
+ return 0;
+
+ txq_info = kzalloc(sizeof(*txq_info), GFP_KERNEL);
+ if (!txq_info)
+ return -ENOMEM;
+
+ i = min_t(int, uld_info->ntxq, num_online_cpus());
+ txq_info->ntxq = roundup(i, adap->params.nports);
+
+ txq_info->uldtxq = kcalloc(txq_info->ntxq, sizeof(struct sge_uld_txq),
+ GFP_KERNEL);
+ if (!txq_info->uldtxq) {
+ kfree(txq_info);
+ return -ENOMEM;
+ }
+
+ if (alloc_sge_txq_uld(adap, txq_info, tx_uld_type)) {
+ kfree(txq_info->uldtxq);
+ kfree(txq_info);
+ return -ENOMEM;
+ }
+
+ atomic_inc(&txq_info->users);
+ adap->sge.uld_txq_info[tx_uld_type] = txq_info;
+ return 0;
+}
+
static void uld_queue_init(struct adapter *adap, unsigned int uld_type,
struct cxgb4_lld_info *lli)
{
@@ -472,7 +566,15 @@ int t4_uld_mem_alloc(struct adapter *adap)
if (!s->uld_rxq_info)
goto err_uld;
+ s->uld_txq_info = kzalloc(CXGB4_TX_MAX *
+ sizeof(struct sge_uld_txq_info *),
+ GFP_KERNEL);
+ if (!s->uld_txq_info)
+ goto err_uld_rx;
return 0;
+
+err_uld_rx:
+ kfree(s->uld_rxq_info);
err_uld:
kfree(adap->uld);
return -ENOMEM;
@@ -482,13 +584,13 @@ void t4_uld_mem_free(struct adapter *adap)
{
struct sge *s = &adap->sge;
+ kfree(s->uld_txq_info);
kfree(s->uld_rxq_info);
kfree(adap->uld);
}
void t4_uld_clean_up(struct adapter *adap)
{
- struct sge_uld_rxq_info *rxq_info;
unsigned int i;
if (!adap->uld)
@@ -496,7 +598,6 @@ void t4_uld_clean_up(struct adapter *adap)
for (i = 0; i < CXGB4_ULD_MAX; i++) {
if (!adap->uld[i].handle)
continue;
- rxq_info = adap->sge.uld_rxq_info[i];
if (adap->flags & FULL_INIT_DONE)
quiesce_rx_uld(adap, i);
if (adap->flags & USING_MSIX)
@@ -616,6 +717,9 @@ int cxgb4_register_uld(enum cxgb4_uld type,
ret = -EBUSY;
goto free_irq;
}
+ ret = setup_sge_txq_uld(adap, type, p);
+ if (ret)
+ goto free_irq;
adap->uld[type] = *p;
uld_attach(adap, type);
adap_idx++;
@@ -644,6 +748,7 @@ out:
break;
adap->uld[type].handle = NULL;
adap->uld[type].add = NULL;
+ release_sge_txq_uld(adap, type);
if (adap->flags & FULL_INIT_DONE)
quiesce_rx_uld(adap, type);
if (adap->flags & USING_MSIX)
@@ -679,6 +784,7 @@ int cxgb4_unregister_uld(enum cxgb4_uld type)
continue;
adap->uld[type].handle = NULL;
adap->uld[type].add = NULL;
+ release_sge_txq_uld(adap, type);
if (adap->flags & FULL_INIT_DONE)
quiesce_rx_uld(adap, type);
if (adap->flags & USING_MSIX)
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
index 2996793b1aaa..4c856605fdfa 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
@@ -77,6 +77,8 @@ enum {
/* Special asynchronous notification message */
#define CXGB4_MSG_AN ((void *)1)
+#define TX_ULD(uld)(((uld) != CXGB4_ULD_CRYPTO) ? CXGB4_TX_OFLD :\
+ CXGB4_TX_CRYPTO)
struct serv_entry {
void *data;
@@ -223,6 +225,19 @@ enum cxgb4_uld {
CXGB4_ULD_MAX
};
+enum cxgb4_tx_uld {
+ CXGB4_TX_OFLD,
+ CXGB4_TX_CRYPTO,
+ CXGB4_TX_MAX
+};
+
+enum cxgb4_txq_type {
+ CXGB4_TXQ_ETH,
+ CXGB4_TXQ_ULD,
+ CXGB4_TXQ_CTRL,
+ CXGB4_TXQ_MAX
+};
+
enum cxgb4_state {
CXGB4_STATE_UP,
CXGB4_STATE_START_RECOVERY,
@@ -316,6 +331,7 @@ struct cxgb4_uld_info {
void *handle;
unsigned int nrxq;
unsigned int rxq_size;
+ unsigned int ntxq;
bool ciq;
bool lro;
void *(*add)(const struct cxgb4_lld_info *p);
@@ -333,6 +349,7 @@ struct cxgb4_uld_info {
int cxgb4_register_uld(enum cxgb4_uld type, const struct cxgb4_uld_info *p);
int cxgb4_unregister_uld(enum cxgb4_uld type);
int cxgb4_ofld_send(struct net_device *dev, struct sk_buff *skb);
+int cxgb4_crypto_send(struct net_device *dev, struct sk_buff *skb);
unsigned int cxgb4_dbfifo_count(const struct net_device *dev, int lpfifo);
unsigned int cxgb4_port_chan(const struct net_device *dev);
unsigned int cxgb4_port_viid(const struct net_device *dev);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/l2t.c b/drivers/net/ethernet/chelsio/cxgb4/l2t.c
index 60a26037a1c6..7c8c5b9a3c22 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/l2t.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/l2t.c
@@ -432,7 +432,7 @@ struct l2t_entry *cxgb4_l2t_get(struct l2t_data *d, struct neighbour *neigh,
else
lport = netdev2pinfo(physdev)->lport;
- if (neigh->dev->priv_flags & IFF_802_1Q_VLAN)
+ if (is_vlan_dev(neigh->dev))
vlan = vlan_dev_vlan_id(neigh->dev);
else
vlan = VLAN_NONE;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sched.c b/drivers/net/ethernet/chelsio/cxgb4/sched.c
index cbd68a8fe2e4..c9026352a842 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sched.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/sched.c
@@ -397,9 +397,6 @@ static struct sched_class *t4_sched_class_lookup(struct port_info *pi,
struct ch_sched_params info;
struct ch_sched_params tp;
- memset(&info, 0, sizeof(info));
- memset(&tp, 0, sizeof(tp));
-
memcpy(&tp, p, sizeof(tp));
/* Don't try to match class parameter */
tp.u.params.class = SCHED_CLS_NONE;
@@ -409,7 +406,6 @@ static struct sched_class *t4_sched_class_lookup(struct port_info *pi,
if (e->state == SCHED_STATE_UNUSED)
continue;
- memset(&info, 0, sizeof(info));
memcpy(&info, &e->info, sizeof(info));
/* Don't try to match class parameter */
info.u.params.class = SCHED_CLS_NONE;
@@ -458,7 +454,6 @@ static struct sched_class *t4_sched_class_alloc(struct port_info *pi,
if (!e)
goto out;
- memset(&np, 0, sizeof(np));
memcpy(&np, p, sizeof(np));
np.u.params.class = e->idx;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c
index e19a0ca8e5dd..f05f0d400324 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c
@@ -43,9 +43,7 @@
#include <linux/export.h>
#include <net/ipv6.h>
#include <net/tcp.h>
-#ifdef CONFIG_NET_RX_BUSY_POLL
#include <net/busy_poll.h>
-#endif /* CONFIG_NET_RX_BUSY_POLL */
#ifdef CONFIG_CHELSIO_T4_FCOE
#include <scsi/fc/fc_fcoe.h>
#endif /* CONFIG_CHELSIO_T4_FCOE */
@@ -377,8 +375,8 @@ unmap: dma_unmap_page(dev, be64_to_cpu(p->addr[0]),
* Reclaims Tx descriptors from an SGE Tx queue and frees the associated
* Tx buffers. Called with the Tx queue lock held.
*/
-static void free_tx_desc(struct adapter *adap, struct sge_txq *q,
- unsigned int n, bool unmap)
+void free_tx_desc(struct adapter *adap, struct sge_txq *q,
+ unsigned int n, bool unmap)
{
struct tx_sw_desc *d;
unsigned int cidx = q->cidx;
@@ -1543,7 +1541,7 @@ static inline unsigned int calc_tx_flits_ofld(const struct sk_buff *skb)
* inability to map packets. A periodic timer attempts to restart
* queues so marked.
*/
-static void txq_stop_maperr(struct sge_ofld_txq *q)
+static void txq_stop_maperr(struct sge_uld_txq *q)
{
q->mapping_err++;
q->q.stops++;
@@ -1559,7 +1557,7 @@ static void txq_stop_maperr(struct sge_ofld_txq *q)
* Stops an offload Tx queue that has become full and modifies the packet
* being written to request a wakeup.
*/
-static void ofldtxq_stop(struct sge_ofld_txq *q, struct sk_buff *skb)
+static void ofldtxq_stop(struct sge_uld_txq *q, struct sk_buff *skb)
{
struct fw_wr_hdr *wr = (struct fw_wr_hdr *)skb->data;
@@ -1586,7 +1584,7 @@ static void ofldtxq_stop(struct sge_ofld_txq *q, struct sk_buff *skb)
* boolean "service_ofldq_running" to make sure that only one instance
* is ever running at a time ...
*/
-static void service_ofldq(struct sge_ofld_txq *q)
+static void service_ofldq(struct sge_uld_txq *q)
{
u64 *pos, *before, *end;
int credits;
@@ -1706,7 +1704,7 @@ static void service_ofldq(struct sge_ofld_txq *q)
*
* Send an offload packet through an SGE offload queue.
*/
-static int ofld_xmit(struct sge_ofld_txq *q, struct sk_buff *skb)
+static int ofld_xmit(struct sge_uld_txq *q, struct sk_buff *skb)
{
skb->priority = calc_tx_flits_ofld(skb); /* save for restart */
spin_lock(&q->sendq.lock);
@@ -1735,7 +1733,7 @@ static int ofld_xmit(struct sge_ofld_txq *q, struct sk_buff *skb)
*/
static void restart_ofldq(unsigned long data)
{
- struct sge_ofld_txq *q = (struct sge_ofld_txq *)data;
+ struct sge_uld_txq *q = (struct sge_uld_txq *)data;
spin_lock(&q->sendq.lock);
q->full = 0; /* the queue actually is completely empty now */
@@ -1767,8 +1765,11 @@ static inline unsigned int is_ctrl_pkt(const struct sk_buff *skb)
return skb->queue_mapping & 1;
}
-static inline int ofld_send(struct adapter *adap, struct sk_buff *skb)
+static inline int uld_send(struct adapter *adap, struct sk_buff *skb,
+ unsigned int tx_uld_type)
{
+ struct sge_uld_txq_info *txq_info;
+ struct sge_uld_txq *txq;
unsigned int idx = skb_txq(skb);
if (unlikely(is_ctrl_pkt(skb))) {
@@ -1777,7 +1778,15 @@ static inline int ofld_send(struct adapter *adap, struct sk_buff *skb)
idx = 0;
return ctrl_xmit(&adap->sge.ctrlq[idx], skb);
}
- return ofld_xmit(&adap->sge.ofldtxq[idx], skb);
+
+ txq_info = adap->sge.uld_txq_info[tx_uld_type];
+ if (unlikely(!txq_info)) {
+ WARN_ON(true);
+ return NET_XMIT_DROP;
+ }
+
+ txq = &txq_info->uldtxq[idx];
+ return ofld_xmit(txq, skb);
}
/**
@@ -1794,7 +1803,7 @@ int t4_ofld_send(struct adapter *adap, struct sk_buff *skb)
int ret;
local_bh_disable();
- ret = ofld_send(adap, skb);
+ ret = uld_send(adap, skb, CXGB4_TX_OFLD);
local_bh_enable();
return ret;
}
@@ -1813,6 +1822,39 @@ int cxgb4_ofld_send(struct net_device *dev, struct sk_buff *skb)
}
EXPORT_SYMBOL(cxgb4_ofld_send);
+/**
+ * t4_crypto_send - send crypto packet
+ * @adap: the adapter
+ * @skb: the packet
+ *
+ * Sends crypto packet. We use the packet queue_mapping to select the
+ * appropriate Tx queue as follows: bit 0 indicates whether the packet
+ * should be sent as regular or control, bits 1-15 select the queue.
+ */
+static int t4_crypto_send(struct adapter *adap, struct sk_buff *skb)
+{
+ int ret;
+
+ local_bh_disable();
+ ret = uld_send(adap, skb, CXGB4_TX_CRYPTO);
+ local_bh_enable();
+ return ret;
+}
+
+/**
+ * cxgb4_crypto_send - send crypto packet
+ * @dev: the net device
+ * @skb: the packet
+ *
+ * Sends crypto packet. This is an exported version of @t4_crypto_send,
+ * intended for ULDs.
+ */
+int cxgb4_crypto_send(struct net_device *dev, struct sk_buff *skb)
+{
+ return t4_crypto_send(netdev2adap(dev), skb);
+}
+EXPORT_SYMBOL(cxgb4_crypto_send);
+
static inline void copy_frags(struct sk_buff *skb,
const struct pkt_gl *gl, unsigned int offset)
{
@@ -1999,16 +2041,22 @@ int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp,
struct sge *s = &q->adap->sge;
int cpl_trace_pkt = is_t4(q->adap->params.chip) ?
CPL_TRACE_PKT : CPL_TRACE_PKT_T5;
+ u16 err_vec;
struct port_info *pi;
if (unlikely(*(u8 *)rsp == cpl_trace_pkt))
return handle_trace_pkt(q->adap, si);
pkt = (const struct cpl_rx_pkt *)rsp;
- csum_ok = pkt->csum_calc && !pkt->err_vec &&
+ /* Compressed error vector is enabled for T6 only */
+ if (q->adap->params.tp.rx_pkt_encap)
+ err_vec = T6_COMPR_RXERR_VEC_G(be16_to_cpu(pkt->err_vec));
+ else
+ err_vec = be16_to_cpu(pkt->err_vec);
+
+ csum_ok = pkt->csum_calc && !err_vec &&
(q->netdev->features & NETIF_F_RXCSUM);
if ((pkt->l2info & htonl(RXF_TCP_F)) &&
- !(cxgb_poll_busy_polling(q)) &&
(q->netdev->features & NETIF_F_GRO) && csum_ok && !pkt->ip_frag) {
do_gro(rxq, si, pkt);
return 0;
@@ -2053,7 +2101,12 @@ int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp,
if (!(pkt->l2info & cpu_to_be32(CPL_RX_PKT_FLAGS))) {
if ((pkt->l2info & cpu_to_be32(RXF_FCOE_F)) &&
(pi->fcoe.flags & CXGB_FCOE_ENABLED)) {
- if (!(pkt->err_vec & cpu_to_be16(RXERR_CSUM_F)))
+ if (q->adap->params.tp.rx_pkt_encap)
+ csum_ok = err_vec &
+ T6_COMPR_RXERR_SUM_F;
+ else
+ csum_ok = err_vec & RXERR_CSUM_F;
+ if (!csum_ok)
skb->ip_summed = CHECKSUM_UNNECESSARY;
}
}
@@ -2234,38 +2287,6 @@ static int process_responses(struct sge_rspq *q, int budget)
return budget - budget_left;
}
-#ifdef CONFIG_NET_RX_BUSY_POLL
-int cxgb_busy_poll(struct napi_struct *napi)
-{
- struct sge_rspq *q = container_of(napi, struct sge_rspq, napi);
- unsigned int params, work_done;
- u32 val;
-
- if (!cxgb_poll_lock_poll(q))
- return LL_FLUSH_BUSY;
-
- work_done = process_responses(q, 4);
- params = QINTR_TIMER_IDX_V(TIMERREG_COUNTER0_X) | QINTR_CNT_EN_V(1);
- q->next_intr_params = params;
- val = CIDXINC_V(work_done) | SEINTARM_V(params);
-
- /* If we don't have access to the new User GTS (T5+), use the old
- * doorbell mechanism; otherwise use the new BAR2 mechanism.
- */
- if (unlikely(!q->bar2_addr))
- t4_write_reg(q->adap, MYPF_REG(SGE_PF_GTS_A),
- val | INGRESSQID_V((u32)q->cntxt_id));
- else {
- writel(val | INGRESSQID_V(q->bar2_qid),
- q->bar2_addr + SGE_UDB_GTS);
- wmb();
- }
-
- cxgb_poll_unlock_poll(q);
- return work_done;
-}
-#endif /* CONFIG_NET_RX_BUSY_POLL */
-
/**
* napi_rx_handler - the NAPI handler for Rx processing
* @napi: the napi instance
@@ -2284,9 +2305,6 @@ static int napi_rx_handler(struct napi_struct *napi, int budget)
int work_done;
u32 val;
- if (!cxgb_poll_lock_napi(q))
- return budget;
-
work_done = process_responses(q, budget);
if (likely(work_done < budget)) {
int timer_index;
@@ -2326,7 +2344,6 @@ static int napi_rx_handler(struct napi_struct *napi, int budget)
q->bar2_addr + SGE_UDB_GTS);
wmb();
}
- cxgb_poll_unlock_napi(q);
return work_done;
}
@@ -2479,7 +2496,7 @@ static void sge_tx_timer_cb(unsigned long data)
for (i = 0; i < BITS_TO_LONGS(s->egr_sz); i++)
for (m = s->txq_maperr[i]; m; m &= m - 1) {
unsigned long id = __ffs(m) + i * BITS_PER_LONG;
- struct sge_ofld_txq *txq = s->egr_map[id];
+ struct sge_uld_txq *txq = s->egr_map[id];
clear_bit(id, s->txq_maperr);
tasklet_schedule(&txq->qresume_tsk);
@@ -2799,6 +2816,7 @@ int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq,
return ret;
}
+ txq->q.q_type = CXGB4_TXQ_ETH;
init_txq(adap, &txq->q, FW_EQ_ETH_CMD_EQID_G(ntohl(c.eqid_pkd)));
txq->txq = netdevq;
txq->tso = txq->tx_cso = txq->vlan_ins = 0;
@@ -2852,6 +2870,7 @@ int t4_sge_alloc_ctrl_txq(struct adapter *adap, struct sge_ctrl_txq *txq,
return ret;
}
+ txq->q.q_type = CXGB4_TXQ_CTRL;
init_txq(adap, &txq->q, FW_EQ_CTRL_CMD_EQID_G(ntohl(c.cmpliqid_eqid)));
txq->adap = adap;
skb_queue_head_init(&txq->sendq);
@@ -2872,13 +2891,15 @@ int t4_sge_mod_ctrl_txq(struct adapter *adap, unsigned int eqid,
return t4_set_params(adap, adap->mbox, adap->pf, 0, 1, &param, &val);
}
-int t4_sge_alloc_ofld_txq(struct adapter *adap, struct sge_ofld_txq *txq,
- struct net_device *dev, unsigned int iqid)
+int t4_sge_alloc_uld_txq(struct adapter *adap, struct sge_uld_txq *txq,
+ struct net_device *dev, unsigned int iqid,
+ unsigned int uld_type)
{
int ret, nentries;
struct fw_eq_ofld_cmd c;
struct sge *s = &adap->sge;
struct port_info *pi = netdev_priv(dev);
+ int cmd = FW_EQ_OFLD_CMD;
/* Add status entries */
nentries = txq->q.size + s->stat_len / sizeof(struct tx_desc);
@@ -2891,7 +2912,9 @@ int t4_sge_alloc_ofld_txq(struct adapter *adap, struct sge_ofld_txq *txq,
return -ENOMEM;
memset(&c, 0, sizeof(c));
- c.op_to_vfn = htonl(FW_CMD_OP_V(FW_EQ_OFLD_CMD) | FW_CMD_REQUEST_F |
+ if (unlikely(uld_type == CXGB4_TX_CRYPTO))
+ cmd = FW_EQ_CTRL_CMD;
+ c.op_to_vfn = htonl(FW_CMD_OP_V(cmd) | FW_CMD_REQUEST_F |
FW_CMD_WRITE_F | FW_CMD_EXEC_F |
FW_EQ_OFLD_CMD_PFN_V(adap->pf) |
FW_EQ_OFLD_CMD_VFN_V(0));
@@ -2919,6 +2942,7 @@ int t4_sge_alloc_ofld_txq(struct adapter *adap, struct sge_ofld_txq *txq,
return ret;
}
+ txq->q.q_type = CXGB4_TXQ_ULD;
init_txq(adap, &txq->q, FW_EQ_OFLD_CMD_EQID_G(ntohl(c.eqid_pkd)));
txq->adap = adap;
skb_queue_head_init(&txq->sendq);
@@ -2928,7 +2952,7 @@ int t4_sge_alloc_ofld_txq(struct adapter *adap, struct sge_ofld_txq *txq,
return 0;
}
-static void free_txq(struct adapter *adap, struct sge_txq *q)
+void free_txq(struct adapter *adap, struct sge_txq *q)
{
struct sge *s = &adap->sge;
@@ -3025,21 +3049,6 @@ void t4_free_sge_resources(struct adapter *adap)
}
}
- /* clean up offload Tx queues */
- for (i = 0; i < ARRAY_SIZE(adap->sge.ofldtxq); i++) {
- struct sge_ofld_txq *q = &adap->sge.ofldtxq[i];
-
- if (q->q.desc) {
- tasklet_kill(&q->qresume_tsk);
- t4_ofld_eq_free(adap, adap->mbox, adap->pf, 0,
- q->q.cntxt_id);
- free_tx_desc(adap, &q->q, q->q.in_use, false);
- kfree(q->q.sdesc);
- __skb_queue_purge(&q->sendq);
- free_txq(adap, &q->q);
- }
- }
-
/* clean up control Tx queues */
for (i = 0; i < ARRAY_SIZE(adap->sge.ctrlq); i++) {
struct sge_ctrl_txq *cq = &adap->sge.ctrlq[i];
@@ -3092,12 +3101,34 @@ void t4_sge_stop(struct adapter *adap)
if (s->tx_timer.function)
del_timer_sync(&s->tx_timer);
- for (i = 0; i < ARRAY_SIZE(s->ofldtxq); i++) {
- struct sge_ofld_txq *q = &s->ofldtxq[i];
+ if (is_offload(adap)) {
+ struct sge_uld_txq_info *txq_info;
+
+ txq_info = adap->sge.uld_txq_info[CXGB4_TX_OFLD];
+ if (txq_info) {
+ struct sge_uld_txq *txq = txq_info->uldtxq;
+
+ for_each_ofldtxq(&adap->sge, i) {
+ if (txq->q.desc)
+ tasklet_kill(&txq->qresume_tsk);
+ }
+ }
+ }
+
+ if (is_pci_uld(adap)) {
+ struct sge_uld_txq_info *txq_info;
- if (q->q.desc)
- tasklet_kill(&q->qresume_tsk);
+ txq_info = adap->sge.uld_txq_info[CXGB4_TX_CRYPTO];
+ if (txq_info) {
+ struct sge_uld_txq *txq = txq_info->uldtxq;
+
+ for_each_ofldtxq(&adap->sge, i) {
+ if (txq->q.desc)
+ tasklet_kill(&txq->qresume_tsk);
+ }
+ }
}
+
for (i = 0; i < ARRAY_SIZE(s->ctrlq); i++) {
struct sge_ctrl_txq *cq = &s->ctrlq[i];
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
index e8139514d32c..87000cd39737 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
@@ -284,6 +284,7 @@ int t4_wr_mbox_meat_timeout(struct adapter *adap, int mbox, const void *cmd,
1, 1, 3, 5, 10, 10, 20, 50, 100, 200
};
+ struct mbox_list entry;
u16 access = 0;
u16 execute = 0;
u32 v;
@@ -311,11 +312,62 @@ int t4_wr_mbox_meat_timeout(struct adapter *adap, int mbox, const void *cmd,
timeout = -timeout;
}
+ /* Queue ourselves onto the mailbox access list. When our entry is at
+ * the front of the list, we have rights to access the mailbox. So we
+ * wait [for a while] till we're at the front [or bail out with an
+ * EBUSY] ...
+ */
+ spin_lock(&adap->mbox_lock);
+ list_add_tail(&entry.list, &adap->mlist.list);
+ spin_unlock(&adap->mbox_lock);
+
+ delay_idx = 0;
+ ms = delay[0];
+
+ for (i = 0; ; i += ms) {
+ /* If we've waited too long, return a busy indication. This
+ * really ought to be based on our initial position in the
+ * mailbox access list but this is a start. We very rearely
+ * contend on access to the mailbox ...
+ */
+ pcie_fw = t4_read_reg(adap, PCIE_FW_A);
+ if (i > FW_CMD_MAX_TIMEOUT || (pcie_fw & PCIE_FW_ERR_F)) {
+ spin_lock(&adap->mbox_lock);
+ list_del(&entry.list);
+ spin_unlock(&adap->mbox_lock);
+ ret = (pcie_fw & PCIE_FW_ERR_F) ? -ENXIO : -EBUSY;
+ t4_record_mbox(adap, cmd, size, access, ret);
+ return ret;
+ }
+
+ /* If we're at the head, break out and start the mailbox
+ * protocol.
+ */
+ if (list_first_entry(&adap->mlist.list, struct mbox_list,
+ list) == &entry)
+ break;
+
+ /* Delay for a bit before checking again ... */
+ if (sleep_ok) {
+ ms = delay[delay_idx]; /* last element may repeat */
+ if (delay_idx < ARRAY_SIZE(delay) - 1)
+ delay_idx++;
+ msleep(ms);
+ } else {
+ mdelay(ms);
+ }
+ }
+
+ /* Loop trying to get ownership of the mailbox. Return an error
+ * if we can't gain ownership.
+ */
v = MBOWNER_G(t4_read_reg(adap, ctl_reg));
for (i = 0; v == MBOX_OWNER_NONE && i < 3; i++)
v = MBOWNER_G(t4_read_reg(adap, ctl_reg));
-
if (v != MBOX_OWNER_DRV) {
+ spin_lock(&adap->mbox_lock);
+ list_del(&entry.list);
+ spin_unlock(&adap->mbox_lock);
ret = (v == MBOX_OWNER_FW) ? -EBUSY : -ETIMEDOUT;
t4_record_mbox(adap, cmd, MBOX_LEN, access, ret);
return ret;
@@ -366,6 +418,9 @@ int t4_wr_mbox_meat_timeout(struct adapter *adap, int mbox, const void *cmd,
execute = i + ms;
t4_record_mbox(adap, cmd_rpl,
MBOX_LEN, access, execute);
+ spin_lock(&adap->mbox_lock);
+ list_del(&entry.list);
+ spin_unlock(&adap->mbox_lock);
return -FW_CMD_RETVAL_G((int)res);
}
}
@@ -375,6 +430,10 @@ int t4_wr_mbox_meat_timeout(struct adapter *adap, int mbox, const void *cmd,
dev_err(adap->pdev_dev, "command %#x in mailbox %d timed out\n",
*(const u8 *)cmd, mbox);
t4_report_fw_error(adap);
+ spin_lock(&adap->mbox_lock);
+ list_del(&entry.list);
+ spin_unlock(&adap->mbox_lock);
+ t4_fatal_err(adap);
return ret;
}
@@ -5382,22 +5441,28 @@ unsigned int t4_get_mps_bg_map(struct adapter *adap, int idx)
const char *t4_get_port_type_description(enum fw_port_type port_type)
{
static const char *const port_type_description[] = {
- "R XFI",
- "R XAUI",
- "T SGMII",
- "T XFI",
- "T XAUI",
+ "Fiber_XFI",
+ "Fiber_XAUI",
+ "BT_SGMII",
+ "BT_XFI",
+ "BT_XAUI",
"KX4",
"CX4",
"KX",
"KR",
- "R SFP+",
- "KR/KX",
- "KR/KX/KX4",
- "R QSFP_10G",
- "R QSA",
- "R QSFP",
- "R BP40_BA",
+ "SFP",
+ "BP_AP",
+ "BP4_AP",
+ "QSFP_10G",
+ "QSA",
+ "QSFP",
+ "BP40_BA",
+ "KR4_100G",
+ "CR4_QSFP",
+ "CR_QSFP",
+ "CR2_QSFP",
+ "SFP28",
+ "KR_SFP28",
};
if (port_type < ARRAY_SIZE(port_type_description))
@@ -5438,6 +5503,7 @@ void t4_get_port_stats_offset(struct adapter *adap, int idx,
void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p)
{
u32 bgmap = t4_get_mps_bg_map(adap, idx);
+ u32 stat_ctl = t4_read_reg(adap, MPS_STAT_CTL_A);
#define GET_STAT(name) \
t4_read_reg64(adap, \
@@ -5469,6 +5535,14 @@ void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p)
p->tx_ppp6 = GET_STAT(TX_PORT_PPP6);
p->tx_ppp7 = GET_STAT(TX_PORT_PPP7);
+ if (CHELSIO_CHIP_VERSION(adap->params.chip) >= CHELSIO_T5) {
+ if (stat_ctl & COUNTPAUSESTATTX_F) {
+ p->tx_frames -= p->tx_pause;
+ p->tx_octets -= p->tx_pause * 64;
+ }
+ if (stat_ctl & COUNTPAUSEMCTX_F)
+ p->tx_mcast_frames -= p->tx_pause;
+ }
p->rx_octets = GET_STAT(RX_PORT_BYTES);
p->rx_frames = GET_STAT(RX_PORT_FRAMES);
p->rx_bcast_frames = GET_STAT(RX_PORT_BCAST);
@@ -5497,6 +5571,15 @@ void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p)
p->rx_ppp6 = GET_STAT(RX_PORT_PPP6);
p->rx_ppp7 = GET_STAT(RX_PORT_PPP7);
+ if (CHELSIO_CHIP_VERSION(adap->params.chip) >= CHELSIO_T5) {
+ if (stat_ctl & COUNTPAUSESTATRX_F) {
+ p->rx_frames -= p->rx_pause;
+ p->rx_octets -= p->rx_pause * 64;
+ }
+ if (stat_ctl & COUNTPAUSEMCRX_F)
+ p->rx_mcast_frames -= p->rx_pause;
+ }
+
p->rx_ovflow0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_DROP_FRAME) : 0;
p->rx_ovflow1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_DROP_FRAME) : 0;
p->rx_ovflow2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_DROP_FRAME) : 0;
@@ -7477,6 +7560,39 @@ int t4_prep_adapter(struct adapter *adapter)
}
/**
+ * t4_shutdown_adapter - shut down adapter, host & wire
+ * @adapter: the adapter
+ *
+ * Perform an emergency shutdown of the adapter and stop it from
+ * continuing any further communication on the ports or DMA to the
+ * host. This is typically used when the adapter and/or firmware
+ * have crashed and we want to prevent any further accidental
+ * communication with the rest of the world. This will also force
+ * the port Link Status to go down -- if register writes work --
+ * which should help our peers figure out that we're down.
+ */
+int t4_shutdown_adapter(struct adapter *adapter)
+{
+ int port;
+
+ t4_intr_disable(adapter);
+ t4_write_reg(adapter, DBG_GPIO_EN_A, 0);
+ for_each_port(adapter, port) {
+ u32 a_port_cfg = PORT_REG(port,
+ is_t4(adapter->params.chip)
+ ? XGMAC_PORT_CFG_A
+ : MAC_PORT_CFG_A);
+
+ t4_write_reg(adapter, a_port_cfg,
+ t4_read_reg(adapter, a_port_cfg)
+ & ~SIGNAL_DET_V(1));
+ }
+ t4_set_reg_field(adapter, SGE_CONTROL_A, GLOBALENABLE_F, 0);
+
+ return 0;
+}
+
+/**
* t4_bar2_sge_qregs - return BAR2 SGE Queue register information
* @adapter: the adapter
* @qid: the Queue ID
@@ -7686,6 +7802,13 @@ int t4_init_tp_params(struct adapter *adap)
&adap->params.tp.ingress_config, 1,
TP_INGRESS_CONFIG_A);
}
+ /* For T6, cache the adapter's compressed error vector
+ * and passing outer header info for encapsulated packets.
+ */
+ if (CHELSIO_CHIP_VERSION(adap->params.chip) > CHELSIO_T5) {
+ v = t4_read_reg(adap, TP_OUT_CONFIG_A);
+ adap->params.tp.rx_pkt_encap = (v & CRXPKTENC_F) ? 1 : 0;
+ }
/* Now that we have TP_VLAN_PRI_MAP cached, we can calculate the field
* shift positions of several elements of the Compressed Filter Tuple
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
index fba3b2ad382d..8098c93cd16e 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
@@ -76,6 +76,7 @@ enum {
CPL_PASS_ESTABLISH = 0x41,
CPL_RX_DATA_DDP = 0x42,
CPL_PASS_ACCEPT_REQ = 0x44,
+ CPL_RX_ISCSI_CMP = 0x45,
CPL_TRACE_PKT_T5 = 0x48,
CPL_RX_ISCSI_DDP = 0x49,
@@ -934,6 +935,18 @@ struct cpl_iscsi_data {
__u8 status;
};
+struct cpl_rx_iscsi_cmp {
+ union opcode_tid ot;
+ __be16 pdu_len_ddp;
+ __be16 len;
+ __be32 seq;
+ __be16 urg;
+ __u8 rsvd;
+ __u8 status;
+ __be32 ulp_crc;
+ __be32 ddpvld;
+};
+
struct cpl_tx_data_iso {
__be32 op_to_scsi;
__u8 reserved1;
@@ -1162,6 +1175,21 @@ struct cpl_rx_pkt {
#define RXERR_CSUM_V(x) ((x) << RXERR_CSUM_S)
#define RXERR_CSUM_F RXERR_CSUM_V(1U)
+#define T6_COMPR_RXERR_LEN_S 1
+#define T6_COMPR_RXERR_LEN_V(x) ((x) << T6_COMPR_RXERR_LEN_S)
+#define T6_COMPR_RXERR_LEN_F T6_COMPR_RXERR_LEN_V(1U)
+
+#define T6_COMPR_RXERR_VEC_S 0
+#define T6_COMPR_RXERR_VEC_M 0x3F
+#define T6_COMPR_RXERR_VEC_V(x) ((x) << T6_COMPR_RXERR_LEN_S)
+#define T6_COMPR_RXERR_VEC_G(x) \
+ (((x) >> T6_COMPR_RXERR_VEC_S) & T6_COMPR_RXERR_VEC_M)
+
+/* Logical OR of RX_ERROR_CSUM, RX_ERROR_CSIP */
+#define T6_COMPR_RXERR_SUM_S 4
+#define T6_COMPR_RXERR_SUM_V(x) ((x) << T6_COMPR_RXERR_SUM_S)
+#define T6_COMPR_RXERR_SUM_F T6_COMPR_RXERR_SUM_V(1U)
+
struct cpl_trace_pkt {
u8 opcode;
u8 intf;
@@ -1336,6 +1364,10 @@ struct cpl_tx_data {
#define TX_FORCE_S 13
#define TX_FORCE_V(x) ((x) << TX_FORCE_S)
+#define T6_TX_FORCE_S 20
+#define T6_TX_FORCE_V(x) ((x) << T6_TX_FORCE_S)
+#define T6_TX_FORCE_F T6_TX_FORCE_V(1U)
+
enum {
ULP_TX_MEM_READ = 2,
ULP_TX_MEM_WRITE = 3,
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h b/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h
index ecf3ccc257bc..a323185507ec 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h
@@ -169,6 +169,9 @@ CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN
CH_PCI_ID_TABLE_FENTRY(0x509b), /* Custom T540-CR LOM */
CH_PCI_ID_TABLE_FENTRY(0x509c), /* Custom T520-CR*/
CH_PCI_ID_TABLE_FENTRY(0x509d), /* Custom T540-CR*/
+ CH_PCI_ID_TABLE_FENTRY(0x509e), /* Custom T520-CR */
+ CH_PCI_ID_TABLE_FENTRY(0x509f), /* Custom T540-CR */
+ CH_PCI_ID_TABLE_FENTRY(0x50a0), /* Custom T540-CR */
/* T6 adapters:
*/
@@ -185,6 +188,8 @@ CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN
CH_PCI_ID_TABLE_FENTRY(0x6011),
CH_PCI_ID_TABLE_FENTRY(0x6014),
CH_PCI_ID_TABLE_FENTRY(0x6015),
+ CH_PCI_ID_TABLE_FENTRY(0x6080),
+ CH_PCI_ID_TABLE_FENTRY(0x6081),
CH_PCI_DEVICE_ID_TABLE_DEFINE_END;
#endif /* __T4_PCI_ID_TBL_H__ */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
index 9fea255c7e87..3348d33c36fa 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
@@ -855,6 +855,14 @@
#define PERR_INT_CAUSE_V(x) ((x) << PERR_INT_CAUSE_S)
#define PERR_INT_CAUSE_F PERR_INT_CAUSE_V(1U)
+#define DBG_GPIO_EN_A 0x6010
+#define XGMAC_PORT_CFG_A 0x1000
+#define MAC_PORT_CFG_A 0x800
+
+#define SIGNAL_DET_S 14
+#define SIGNAL_DET_V(x) ((x) << SIGNAL_DET_S)
+#define SIGNAL_DET_F SIGNAL_DET_V(1U)
+
#define MC_ECC_STATUS_A 0x751c
#define MC_P_ECC_STATUS_A 0x4131c
@@ -1276,6 +1284,10 @@
#define DBGLARPTR_M 0x7fU
#define DBGLARPTR_V(x) ((x) << DBGLARPTR_S)
+#define CRXPKTENC_S 3
+#define CRXPKTENC_V(x) ((x) << CRXPKTENC_S)
+#define CRXPKTENC_F CRXPKTENC_V(1U)
+
#define TP_DBG_LA_DATAL_A 0x7ed8
#define TP_DBG_LA_CONFIG_A 0x7ed4
#define TP_OUT_CONFIG_A 0x7d04
@@ -1794,12 +1806,29 @@
#define MPS_CMN_CTL_A 0x9000
+#define COUNTPAUSEMCRX_S 5
+#define COUNTPAUSEMCRX_V(x) ((x) << COUNTPAUSEMCRX_S)
+#define COUNTPAUSEMCRX_F COUNTPAUSEMCRX_V(1U)
+
+#define COUNTPAUSESTATRX_S 4
+#define COUNTPAUSESTATRX_V(x) ((x) << COUNTPAUSESTATRX_S)
+#define COUNTPAUSESTATRX_F COUNTPAUSESTATRX_V(1U)
+
+#define COUNTPAUSEMCTX_S 3
+#define COUNTPAUSEMCTX_V(x) ((x) << COUNTPAUSEMCTX_S)
+#define COUNTPAUSEMCTX_F COUNTPAUSEMCTX_V(1U)
+
+#define COUNTPAUSESTATTX_S 2
+#define COUNTPAUSESTATTX_V(x) ((x) << COUNTPAUSESTATTX_S)
+#define COUNTPAUSESTATTX_F COUNTPAUSESTATTX_V(1U)
+
#define NUMPORTS_S 0
#define NUMPORTS_M 0x3U
#define NUMPORTS_G(x) (((x) >> NUMPORTS_S) & NUMPORTS_M)
#define MPS_INT_CAUSE_A 0x9008
#define MPS_TX_INT_CAUSE_A 0x9408
+#define MPS_STAT_CTL_A 0x9600
#define FRMERR_S 15
#define FRMERR_V(x) ((x) << FRMERR_S)
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
index 8d9e4b7a8e84..ccc05f874419 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
@@ -3385,6 +3385,14 @@ struct fw_crypto_lookaside_wr {
#define FW_CRYPTO_LOOKASIDE_WR_IV_G(x) \
(((x) >> FW_CRYPTO_LOOKASIDE_WR_IV_S) & FW_CRYPTO_LOOKASIDE_WR_IV_M)
+#define FW_CRYPTO_LOOKASIDE_WR_FQIDX_S 15
+#define FW_CRYPTO_LOOKASIDE_WR_FQIDX_M 0xff
+#define FW_CRYPTO_LOOKASIDE_WR_FQIDX_V(x) \
+ ((x) << FW_CRYPTO_LOOKASIDE_WR_FQIDX_S)
+#define FW_CRYPTO_LOOKASIDE_WR_FQIDX_G(x) \
+ (((x) >> FW_CRYPTO_LOOKASIDE_WR_FQIDX_S) & \
+ FW_CRYPTO_LOOKASIDE_WR_FQIDX_M)
+
#define FW_CRYPTO_LOOKASIDE_WR_TX_CH_S 10
#define FW_CRYPTO_LOOKASIDE_WR_TX_CH_M 0x3
#define FW_CRYPTO_LOOKASIDE_WR_TX_CH_V(x) \
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h b/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h
index 2accab386323..fa376444e57c 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h
@@ -36,8 +36,8 @@
#define __T4FW_VERSION_H__
#define T4FW_VERSION_MAJOR 0x01
-#define T4FW_VERSION_MINOR 0x0F
-#define T4FW_VERSION_MICRO 0x25
+#define T4FW_VERSION_MINOR 0x10
+#define T4FW_VERSION_MICRO 0x21
#define T4FW_VERSION_BUILD 0x00
#define T4FW_MIN_VERSION_MAJOR 0x01
@@ -45,8 +45,8 @@
#define T4FW_MIN_VERSION_MICRO 0x00
#define T5FW_VERSION_MAJOR 0x01
-#define T5FW_VERSION_MINOR 0x0F
-#define T5FW_VERSION_MICRO 0x25
+#define T5FW_VERSION_MINOR 0x10
+#define T5FW_VERSION_MICRO 0x21
#define T5FW_VERSION_BUILD 0x00
#define T5FW_MIN_VERSION_MAJOR 0x00
@@ -54,8 +54,8 @@
#define T5FW_MIN_VERSION_MICRO 0x00
#define T6FW_VERSION_MAJOR 0x01
-#define T6FW_VERSION_MINOR 0x0F
-#define T6FW_VERSION_MICRO 0x25
+#define T6FW_VERSION_MINOR 0x10
+#define T6FW_VERSION_MICRO 0x21
#define T6FW_VERSION_BUILD 0x00
#define T6FW_MIN_VERSION_MAJOR 0x00