aboutsummaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
authorDavid S. Miller2016-11-01 10:58:10 -0400
committerDavid S. Miller2016-11-01 10:58:10 -0400
commitc5870942fc20a879ba47e23d6a18a2d0b7b02c7c (patch)
treeb008512f6679b52e1c48854024029658db246714 /drivers
parentbd68a2a854ad5a85f0c8d0a9c8048ca3f6391efb (diff)
parent3aa7b74dbeedfb32406fec70cfd76d797209e8c9 (diff)
Merge branch '40GbE' of git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/next-queue
Jeff Kirsher says: ==================== 40GbE Intel Wired LAN Driver Updates 2016-10-31 This series contains updates to i40e and i40evf. Colin Ian King fixes a minor issue with dev_err message where a new line character was missing from the end of the message. Jake provides several most of the changes in the series, starting with dropping the is_vf and is_netdev fields in the i40e_mac_filter structure since they are not needed (along with the checks that used these fields). Reason being that we use separate VSI's for SRIOV VFs and for netdev VSIs, therefore a single VSI should only have one type of filter. Then simplifies our .set_rx_mode handler by using the kernel provided __dev_uc_sync and __dev_mc_sync functions for notification of add and deletion of filters. Refactored the i40e_put_mac_in_vlan() to resolve an issue where this function was arbitrarily modifying all filters to have the same VLAN, which is incorrect because it could be modifying active filters without putting them into the new state. Refactored the delete filter logic so that we can re-use the functionality, where appropriate, without having to search for the filter twice. Reduced the latency of operations related to searching for specific MAC filters by using a static hash table instead of a list. Reduced code duplication in the adminq command to add/delete for filters. Fixed an issue where TSYNVALID bit was not being checked as the true indicator of whether the packet has an associated timestamp. Cleaned up a second msleep() call by simply re-ordering the code so that the extra wait is no longer needed. Alan provides additional fix to the work Jake has been doing to resolve a bug where adding at least one VLAN and then removing all VLANs leaves the MAC filters for the VSI with an incorrect value for the VID which indicates the MAC filter's VLAN status. Alex adds a common method for finding a VSI by type. Also cleaned up the logic for coalescing RS bits, which was convoluted and larger than it needed to be. Mitch fixes an issue with the failure to add filters when the VF driver is reloaded by simply setting the number of filters to 0 when freeing VF resources. Maciej implements a I40E_NVMUPD_STATE_ERROR state for NVM update, so that the driver has the ability to return NVM image write failure. Filip removes unreachable code which was found using static analysis where "if" statements were never in a "true/false" state, so clean up unnecessary if statements. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e.h74
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq.c4
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_common.c6
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_debugfs.c92
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ethtool.c30
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_fcoe.c12
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c1015
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_nvm.c16
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ptp.c137
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c119
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.h3
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_type.h1
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c89
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_adminq.c4
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_txrx.c105
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_txrx.h1
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_type.h1
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf_main.c8
18 files changed, 867 insertions, 850 deletions
diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h
index 5a6f8518b4e1..29c23183a0e0 100644
--- a/drivers/net/ethernet/intel/i40e/i40e.h
+++ b/drivers/net/ethernet/intel/i40e/i40e.h
@@ -39,6 +39,7 @@
#include <linux/iommu.h>
#include <linux/slab.h>
#include <linux/list.h>
+#include <linux/hashtable.h>
#include <linux/string.h>
#include <linux/in.h>
#include <linux/ip.h>
@@ -428,11 +429,13 @@ struct i40e_pf {
struct ptp_clock_info ptp_caps;
struct sk_buff *ptp_tx_skb;
struct hwtstamp_config tstamp_config;
- unsigned long last_rx_ptp_check;
- spinlock_t tmreg_lock; /* Used to protect the device time registers. */
+ struct mutex tmreg_lock; /* Used to protect the SYSTIME registers. */
u64 ptp_base_adj;
u32 tx_hwtstamp_timeouts;
u32 rx_hwtstamp_cleared;
+ u32 latch_event_flags;
+ spinlock_t ptp_rx_lock; /* Used to protect Rx timestamp registers. */
+ unsigned long latch_events[4];
bool ptp_tx;
bool ptp_rx;
u16 rss_table_size; /* HW RSS table size */
@@ -445,6 +448,20 @@ struct i40e_pf {
u16 phy_led_val;
};
+/**
+ * i40e_mac_to_hkey - Convert a 6-byte MAC Address to a u64 hash key
+ * @macaddr: the MAC Address as the base key
+ *
+ * Simply copies the address and returns it as a u64 for hashing
+ **/
+static inline u64 i40e_addr_to_hkey(const u8 *macaddr)
+{
+ u64 key = 0;
+
+ ether_addr_copy((u8 *)&key, macaddr);
+ return key;
+}
+
enum i40e_filter_state {
I40E_FILTER_INVALID = 0, /* Invalid state */
I40E_FILTER_NEW, /* New, not sent to FW yet */
@@ -454,13 +471,10 @@ enum i40e_filter_state {
/* There is no 'removed' state; the filter struct is freed */
};
struct i40e_mac_filter {
- struct list_head list;
+ struct hlist_node hlist;
u8 macaddr[ETH_ALEN];
#define I40E_VLAN_ANY -1
s16 vlan;
- u8 counter; /* number of instances of this filter */
- bool is_vf; /* filter belongs to a VF */
- bool is_netdev; /* filter belongs to a netdev */
enum i40e_filter_state state;
};
@@ -501,9 +515,11 @@ struct i40e_vsi {
#define I40E_VSI_FLAG_VEB_OWNER BIT(1)
unsigned long flags;
- /* Per VSI lock to protect elements/list (MAC filter) */
- spinlock_t mac_filter_list_lock;
- struct list_head mac_filter_list;
+ /* Per VSI lock to protect elements/hash (MAC filter) */
+ spinlock_t mac_filter_hash_lock;
+ /* Fixed size hash table with 2^8 buckets for MAC filters */
+ DECLARE_HASHTABLE(mac_filter_hash, 8);
+ bool has_vlan_filter;
/* VSI stats */
struct rtnl_link_stats64 net_stats;
@@ -707,6 +723,25 @@ int i40e_get_rss(struct i40e_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size);
void i40e_fill_rss_lut(struct i40e_pf *pf, u8 *lut,
u16 rss_table_size, u16 rss_size);
struct i40e_vsi *i40e_find_vsi_from_id(struct i40e_pf *pf, u16 id);
+/**
+ * i40e_find_vsi_by_type - Find and return Flow Director VSI
+ * @pf: PF to search for VSI
+ * @type: Value indicating type of VSI we are looking for
+ **/
+static inline struct i40e_vsi *
+i40e_find_vsi_by_type(struct i40e_pf *pf, u16 type)
+{
+ int i;
+
+ for (i = 0; i < pf->num_alloc_vsi; i++) {
+ struct i40e_vsi *vsi = pf->vsi[i];
+
+ if (vsi && vsi->type == type)
+ return vsi;
+ }
+
+ return NULL;
+}
void i40e_update_stats(struct i40e_vsi *vsi);
void i40e_update_eth_stats(struct i40e_vsi *vsi);
struct rtnl_link_stats64 *i40e_get_vsi_stats_struct(struct i40e_vsi *vsi);
@@ -723,10 +758,8 @@ u32 i40e_get_global_fd_count(struct i40e_pf *pf);
bool i40e_set_ntuple(struct i40e_pf *pf, netdev_features_t features);
void i40e_set_ethtool_ops(struct net_device *netdev);
struct i40e_mac_filter *i40e_add_filter(struct i40e_vsi *vsi,
- u8 *macaddr, s16 vlan,
- bool is_vf, bool is_netdev);
-void i40e_del_filter(struct i40e_vsi *vsi, u8 *macaddr, s16 vlan,
- bool is_vf, bool is_netdev);
+ const u8 *macaddr, s16 vlan);
+void i40e_del_filter(struct i40e_vsi *vsi, const u8 *macaddr, s16 vlan);
int i40e_sync_vsi_filters(struct i40e_vsi *vsi);
struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type,
u16 uplink, u32 param1);
@@ -740,7 +773,8 @@ void i40e_service_event_schedule(struct i40e_pf *pf);
void i40e_notify_client_of_vf_msg(struct i40e_vsi *vsi, u32 vf_id,
u8 *msg, u16 len);
-int i40e_vsi_control_rings(struct i40e_vsi *vsi, bool enable);
+int i40e_vsi_start_rings(struct i40e_vsi *vsi);
+void i40e_vsi_stop_rings(struct i40e_vsi *vsi);
int i40e_reconfig_rss_queues(struct i40e_pf *pf, int queue_count);
struct i40e_veb *i40e_veb_setup(struct i40e_pf *pf, u16 flags, u16 uplink_seid,
u16 downlink_seid, u8 enabled_tc);
@@ -816,14 +850,12 @@ int i40e_close(struct net_device *netdev);
int i40e_vsi_open(struct i40e_vsi *vsi);
void i40e_vlan_stripping_disable(struct i40e_vsi *vsi);
int i40e_vsi_add_vlan(struct i40e_vsi *vsi, s16 vid);
-int i40e_vsi_kill_vlan(struct i40e_vsi *vsi, s16 vid);
-struct i40e_mac_filter *i40e_put_mac_in_vlan(struct i40e_vsi *vsi, u8 *macaddr,
- bool is_vf, bool is_netdev);
-int i40e_del_mac_all_vlan(struct i40e_vsi *vsi, u8 *macaddr,
- bool is_vf, bool is_netdev);
+void i40e_vsi_kill_vlan(struct i40e_vsi *vsi, s16 vid);
+struct i40e_mac_filter *i40e_put_mac_in_vlan(struct i40e_vsi *vsi,
+ const u8 *macaddr);
+int i40e_del_mac_all_vlan(struct i40e_vsi *vsi, const u8 *macaddr);
bool i40e_is_vsi_in_vlan(struct i40e_vsi *vsi);
-struct i40e_mac_filter *i40e_find_mac(struct i40e_vsi *vsi, u8 *macaddr,
- bool is_vf, bool is_netdev);
+struct i40e_mac_filter *i40e_find_mac(struct i40e_vsi *vsi, const u8 *macaddr);
#ifdef I40E_FCOE
int __i40e_setup_tc(struct net_device *netdev, u32 handle, __be16 proto,
struct tc_to_netdev *tc);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq.c b/drivers/net/ethernet/intel/i40e/i40e_adminq.c
index 738b42a44f20..56fb27298936 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_adminq.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq.c
@@ -964,11 +964,11 @@ i40e_status i40e_clean_arq_element(struct i40e_hw *hw,
desc = I40E_ADMINQ_DESC(hw->aq.arq, ntc);
desc_idx = ntc;
+ hw->aq.arq_last_status =
+ (enum i40e_admin_queue_err)le16_to_cpu(desc->retval);
flags = le16_to_cpu(desc->flags);
if (flags & I40E_AQ_FLAG_ERR) {
ret_code = I40E_ERR_ADMIN_QUEUE_ERROR;
- hw->aq.arq_last_status =
- (enum i40e_admin_queue_err)le16_to_cpu(desc->retval);
i40e_debug(hw,
I40E_DEBUG_AQ_MESSAGE,
"AQRX: Event received with error 0x%X.\n",
diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c
index a47594603d69..98791ba57211 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_common.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_common.c
@@ -3313,8 +3313,10 @@ static void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff,
/* partition id is 1-based, and functions are evenly spread
* across the ports as partitions
*/
- hw->partition_id = (hw->pf_id / hw->num_ports) + 1;
- hw->num_partitions = num_functions / hw->num_ports;
+ if (hw->num_ports != 0) {
+ hw->partition_id = (hw->pf_id / hw->num_ports) + 1;
+ hw->num_partitions = num_functions / hw->num_ports;
+ }
/* additional HW specific goodies that might
* someday be HW version specific
diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
index 0354632fe2f8..b8a03a05c4e8 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
@@ -134,7 +134,7 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid)
struct rtnl_link_stats64 *nstat;
struct i40e_mac_filter *f;
struct i40e_vsi *vsi;
- int i;
+ int i, bkt;
vsi = i40e_dbg_find_vsi(pf, seid);
if (!vsi) {
@@ -166,11 +166,11 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid)
pf->hw.mac.addr,
pf->hw.mac.san_addr,
pf->hw.mac.port_addr);
- list_for_each_entry(f, &vsi->mac_filter_list, list) {
+ hash_for_each(vsi->mac_filter_hash, bkt, f, hlist) {
dev_info(&pf->pdev->dev,
- " mac_filter_list: %pM vid=%d, is_netdev=%d is_vf=%d counter=%d, state %s\n",
- f->macaddr, f->vlan, f->is_netdev, f->is_vf,
- f->counter, i40e_filter_state_string[f->state]);
+ " mac_filter_hash: %pM vid=%d, state %s\n",
+ f->macaddr, f->vlan,
+ i40e_filter_state_string[f->state]);
}
dev_info(&pf->pdev->dev, " active_filters %d, promisc_threshold %d, overflow promisc %s\n",
vsi->active_filters, vsi->promisc_threshold,
@@ -867,86 +867,6 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
dev_info(&pf->pdev->dev, "deleting relay %d\n", veb_seid);
i40e_veb_release(pf->veb[i]);
-
- } else if (strncmp(cmd_buf, "add macaddr", 11) == 0) {
- struct i40e_mac_filter *f;
- int vlan = 0;
- u8 ma[6];
- int ret;
-
- cnt = sscanf(&cmd_buf[11],
- "%i %hhx:%hhx:%hhx:%hhx:%hhx:%hhx %i",
- &vsi_seid,
- &ma[0], &ma[1], &ma[2], &ma[3], &ma[4], &ma[5],
- &vlan);
- if (cnt == 7) {
- vlan = 0;
- } else if (cnt != 8) {
- dev_info(&pf->pdev->dev,
- "add macaddr: bad command string, cnt=%d\n",
- cnt);
- goto command_write_done;
- }
-
- vsi = i40e_dbg_find_vsi(pf, vsi_seid);
- if (!vsi) {
- dev_info(&pf->pdev->dev,
- "add macaddr: VSI %d not found\n", vsi_seid);
- goto command_write_done;
- }
-
- spin_lock_bh(&vsi->mac_filter_list_lock);
- f = i40e_add_filter(vsi, ma, vlan, false, false);
- spin_unlock_bh(&vsi->mac_filter_list_lock);
- ret = i40e_sync_vsi_filters(vsi);
- if (f && !ret)
- dev_info(&pf->pdev->dev,
- "add macaddr: %pM vlan=%d added to VSI %d\n",
- ma, vlan, vsi_seid);
- else
- dev_info(&pf->pdev->dev,
- "add macaddr: %pM vlan=%d to VSI %d failed, f=%p ret=%d\n",
- ma, vlan, vsi_seid, f, ret);
-
- } else if (strncmp(cmd_buf, "del macaddr", 11) == 0) {
- int vlan = 0;
- u8 ma[6];
- int ret;
-
- cnt = sscanf(&cmd_buf[11],
- "%i %hhx:%hhx:%hhx:%hhx:%hhx:%hhx %i",
- &vsi_seid,
- &ma[0], &ma[1], &ma[2], &ma[3], &ma[4], &ma[5],
- &vlan);
- if (cnt == 7) {
- vlan = 0;
- } else if (cnt != 8) {
- dev_info(&pf->pdev->dev,
- "del macaddr: bad command string, cnt=%d\n",
- cnt);
- goto command_write_done;
- }
-
- vsi = i40e_dbg_find_vsi(pf, vsi_seid);
- if (!vsi) {
- dev_info(&pf->pdev->dev,
- "del macaddr: VSI %d not found\n", vsi_seid);
- goto command_write_done;
- }
-
- spin_lock_bh(&vsi->mac_filter_list_lock);
- i40e_del_filter(vsi, ma, vlan, false, false);
- spin_unlock_bh(&vsi->mac_filter_list_lock);
- ret = i40e_sync_vsi_filters(vsi);
- if (!ret)
- dev_info(&pf->pdev->dev,
- "del macaddr: %pM vlan=%d removed from VSI %d\n",
- ma, vlan, vsi_seid);
- else
- dev_info(&pf->pdev->dev,
- "del macaddr: %pM vlan=%d from VSI %d failed, ret=%d\n",
- ma, vlan, vsi_seid, ret);
-
} else if (strncmp(cmd_buf, "add pvid", 8) == 0) {
i40e_status ret;
u16 vid;
@@ -1615,8 +1535,6 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
dev_info(&pf->pdev->dev, " del vsi [vsi_seid]\n");
dev_info(&pf->pdev->dev, " add relay <uplink_seid> <vsi_seid>\n");
dev_info(&pf->pdev->dev, " del relay <relay_seid>\n");
- dev_info(&pf->pdev->dev, " add macaddr <vsi_seid> <aa:bb:cc:dd:ee:ff> [vlan]\n");
- dev_info(&pf->pdev->dev, " del macaddr <vsi_seid> <aa:bb:cc:dd:ee:ff> [vlan]\n");
dev_info(&pf->pdev->dev, " add pvid <vsi_seid> <vid>\n");
dev_info(&pf->pdev->dev, " del pvid <vsi_seid>\n");
dev_info(&pf->pdev->dev, " dump switch\n");
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
index fb4fb524eab2..b9e1162d927f 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
@@ -216,7 +216,6 @@ enum i40e_ethtool_test_id {
I40E_ETH_TEST_REG = 0,
I40E_ETH_TEST_EEPROM,
I40E_ETH_TEST_INTR,
- I40E_ETH_TEST_LOOPBACK,
I40E_ETH_TEST_LINK,
};
@@ -224,7 +223,6 @@ static const char i40e_gstrings_test[][ETH_GSTRING_LEN] = {
"Register test (offline)",
"Eeprom test (offline)",
"Interrupt test (offline)",
- "Loopback test (offline)",
"Link test (on/offline)"
};
@@ -1744,17 +1742,6 @@ static int i40e_intr_test(struct net_device *netdev, u64 *data)
return *data;
}
-static int i40e_loopback_test(struct net_device *netdev, u64 *data)
-{
- struct i40e_netdev_priv *np = netdev_priv(netdev);
- struct i40e_pf *pf = np->vsi->back;
-
- netif_info(pf, hw, netdev, "loopback test not implemented\n");
- *data = 0;
-
- return *data;
-}
-
static inline bool i40e_active_vfs(struct i40e_pf *pf)
{
struct i40e_vf *vfs = pf->vf;
@@ -1768,17 +1755,7 @@ static inline bool i40e_active_vfs(struct i40e_pf *pf)
static inline bool i40e_active_vmdqs(struct i40e_pf *pf)
{
- struct i40e_vsi **vsi = pf->vsi;
- int i;
-
- for (i = 0; i < pf->num_alloc_vsi; i++) {
- if (!vsi[i])
- continue;
- if (vsi[i]->type == I40E_VSI_VMDQ2)
- return true;
- }
-
- return false;
+ return !!i40e_find_vsi_by_type(pf, I40E_VSI_VMDQ2);
}
static void i40e_diag_test(struct net_device *netdev,
@@ -1800,7 +1777,6 @@ static void i40e_diag_test(struct net_device *netdev,
data[I40E_ETH_TEST_REG] = 1;
data[I40E_ETH_TEST_EEPROM] = 1;
data[I40E_ETH_TEST_INTR] = 1;
- data[I40E_ETH_TEST_LOOPBACK] = 1;
data[I40E_ETH_TEST_LINK] = 1;
eth_test->flags |= ETH_TEST_FL_FAILED;
clear_bit(__I40E_TESTING, &pf->state);
@@ -1828,9 +1804,6 @@ static void i40e_diag_test(struct net_device *netdev,
if (i40e_intr_test(netdev, &data[I40E_ETH_TEST_INTR]))
eth_test->flags |= ETH_TEST_FL_FAILED;
- if (i40e_loopback_test(netdev, &data[I40E_ETH_TEST_LOOPBACK]))
- eth_test->flags |= ETH_TEST_FL_FAILED;
-
/* run reg test last, a reset is required after it */
if (i40e_reg_test(netdev, &data[I40E_ETH_TEST_REG]))
eth_test->flags |= ETH_TEST_FL_FAILED;
@@ -1851,7 +1824,6 @@ static void i40e_diag_test(struct net_device *netdev,
data[I40E_ETH_TEST_REG] = 0;
data[I40E_ETH_TEST_EEPROM] = 0;
data[I40E_ETH_TEST_INTR] = 0;
- data[I40E_ETH_TEST_LOOPBACK] = 0;
}
skip_ol_tests:
diff --git a/drivers/net/ethernet/intel/i40e/i40e_fcoe.c b/drivers/net/ethernet/intel/i40e/i40e_fcoe.c
index 58e6c1570335..b077ef8b00fa 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_fcoe.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_fcoe.c
@@ -1522,12 +1522,12 @@ void i40e_fcoe_config_netdev(struct net_device *netdev, struct i40e_vsi *vsi)
* same PCI function.
*/
netdev->dev_port = 1;
- spin_lock_bh(&vsi->mac_filter_list_lock);
- i40e_add_filter(vsi, hw->mac.san_addr, 0, false, false);
- i40e_add_filter(vsi, (u8[6]) FC_FCOE_FLOGI_MAC, 0, false, false);
- i40e_add_filter(vsi, FIP_ALL_FCOE_MACS, 0, false, false);
- i40e_add_filter(vsi, FIP_ALL_ENODE_MACS, 0, false, false);
- spin_unlock_bh(&vsi->mac_filter_list_lock);
+ spin_lock_bh(&vsi->mac_filter_hash_lock);
+ i40e_add_filter(vsi, hw->mac.san_addr, 0);
+ i40e_add_filter(vsi, (u8[6]) FC_FCOE_FLOGI_MAC, 0);
+ i40e_add_filter(vsi, FIP_ALL_FCOE_MACS, 0);
+ i40e_add_filter(vsi, FIP_ALL_ENODE_MACS, 0);
+ spin_unlock_bh(&vsi->mac_filter_hash_lock);
/* use san mac */
ether_addr_copy(netdev->dev_addr, hw->mac.san_addr);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index d78a4dc7b00b..5c6a5ceb8a91 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -1145,25 +1145,22 @@ void i40e_update_stats(struct i40e_vsi *vsi)
* @vsi: the VSI to be searched
* @macaddr: the MAC address
* @vlan: the vlan
- * @is_vf: make sure its a VF filter, else doesn't matter
- * @is_netdev: make sure its a netdev filter, else doesn't matter
*
* Returns ptr to the filter object or NULL
**/
static struct i40e_mac_filter *i40e_find_filter(struct i40e_vsi *vsi,
- u8 *macaddr, s16 vlan,
- bool is_vf, bool is_netdev)
+ const u8 *macaddr, s16 vlan)
{
struct i40e_mac_filter *f;
+ u64 key;
if (!vsi || !macaddr)
return NULL;
- list_for_each_entry(f, &vsi->mac_filter_list, list) {
+ key = i40e_addr_to_hkey(macaddr);
+ hash_for_each_possible(vsi->mac_filter_hash, f, hlist, key) {
if ((ether_addr_equal(macaddr, f->macaddr)) &&
- (vlan == f->vlan) &&
- (!is_vf || f->is_vf) &&
- (!is_netdev || f->is_netdev))
+ (vlan == f->vlan))
return f;
}
return NULL;
@@ -1173,24 +1170,21 @@ static struct i40e_mac_filter *i40e_find_filter(struct i40e_vsi *vsi,
* i40e_find_mac - Find a mac addr in the macvlan filters list
* @vsi: the VSI to be searched
* @macaddr: the MAC address we are searching for
- * @is_vf: make sure its a VF filter, else doesn't matter
- * @is_netdev: make sure its a netdev filter, else doesn't matter
*
* Returns the first filter with the provided MAC address or NULL if
* MAC address was not found
**/
-struct i40e_mac_filter *i40e_find_mac(struct i40e_vsi *vsi, u8 *macaddr,
- bool is_vf, bool is_netdev)
+struct i40e_mac_filter *i40e_find_mac(struct i40e_vsi *vsi, const u8 *macaddr)
{
struct i40e_mac_filter *f;
+ u64 key;
if (!vsi || !macaddr)
return NULL;
- list_for_each_entry(f, &vsi->mac_filter_list, list) {
- if ((ether_addr_equal(macaddr, f->macaddr)) &&
- (!is_vf || f->is_vf) &&
- (!is_netdev || f->is_netdev))
+ key = i40e_addr_to_hkey(macaddr);
+ hash_for_each_possible(vsi->mac_filter_hash, f, hlist, key) {
+ if ((ether_addr_equal(macaddr, f->macaddr)))
return f;
}
return NULL;
@@ -1204,86 +1198,31 @@ struct i40e_mac_filter *i40e_find_mac(struct i40e_vsi *vsi, u8 *macaddr,
**/
bool i40e_is_vsi_in_vlan(struct i40e_vsi *vsi)
{
- struct i40e_mac_filter *f;
+ /* If we have a PVID, always operate in VLAN mode */
+ if (vsi->info.pvid)
+ return true;
- /* Only -1 for all the filters denotes not in vlan mode
- * so we have to go through all the list in order to make sure
+ /* We need to operate in VLAN mode whenever we have any filters with
+ * a VLAN other than I40E_VLAN_ALL. We could check the table each
+ * time, incurring search cost repeatedly. However, we can notice two
+ * things:
+ *
+ * 1) the only place where we can gain a VLAN filter is in
+ * i40e_add_filter.
+ *
+ * 2) the only place where filters are actually removed is in
+ * i40e_vsi_sync_filters_subtask.
+ *
+ * Thus, we can simply use a boolean value, has_vlan_filters which we
+ * will set to true when we add a VLAN filter in i40e_add_filter. Then
+ * we have to perform the full search after deleting filters in
+ * i40e_vsi_sync_filters_subtask, but we already have to search
+ * filters here and can perform the check at the same time. This
+ * results in avoiding embedding a loop for VLAN mode inside another
+ * loop over all the filters, and should maintain correctness as noted
+ * above.
*/
- list_for_each_entry(f, &vsi->mac_filter_list, list) {
- if (f->vlan >= 0 || vsi->info.pvid)
- return true;
- }
-
- return false;
-}
-
-/**
- * i40e_put_mac_in_vlan - Make macvlan filters from macaddrs and vlans
- * @vsi: the VSI to be searched
- * @macaddr: the mac address to be filtered
- * @is_vf: true if it is a VF
- * @is_netdev: true if it is a netdev
- *
- * Goes through all the macvlan filters and adds a
- * macvlan filter for each unique vlan that already exists
- *
- * Returns first filter found on success, else NULL
- **/
-struct i40e_mac_filter *i40e_put_mac_in_vlan(struct i40e_vsi *vsi, u8 *macaddr,
- bool is_vf, bool is_netdev)
-{
- struct i40e_mac_filter *f;
-
- list_for_each_entry(f, &vsi->mac_filter_list, list) {
- if (vsi->info.pvid)
- f->vlan = le16_to_cpu(vsi->info.pvid);
- if (!i40e_find_filter(vsi, macaddr, f->vlan,
- is_vf, is_netdev)) {
- if (!i40e_add_filter(vsi, macaddr, f->vlan,
- is_vf, is_netdev))
- return NULL;
- }
- }
-
- return list_first_entry_or_null(&vsi->mac_filter_list,
- struct i40e_mac_filter, list);
-}
-
-/**
- * i40e_del_mac_all_vlan - Remove a MAC filter from all VLANS
- * @vsi: the VSI to be searched
- * @macaddr: the mac address to be removed
- * @is_vf: true if it is a VF
- * @is_netdev: true if it is a netdev
- *
- * Removes a given MAC address from a VSI, regardless of VLAN
- *
- * Returns 0 for success, or error
- **/
-int i40e_del_mac_all_vlan(struct i40e_vsi *vsi, u8 *macaddr,
- bool is_vf, bool is_netdev)
-{
- struct i40e_mac_filter *f = NULL;
- int changed = 0;
-
- WARN(!spin_is_locked(&vsi->mac_filter_list_lock),
- "Missing mac_filter_list_lock\n");
- list_for_each_entry(f, &vsi->mac_filter_list, list) {
- if ((ether_addr_equal(macaddr, f->macaddr)) &&
- (is_vf == f->is_vf) &&
- (is_netdev == f->is_netdev)) {
- f->counter--;
- changed = 1;
- if (f->counter == 0)
- f->state = I40E_FILTER_REMOVE;
- }
- }
- if (changed) {
- vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
- vsi->back->flags |= I40E_FLAG_FILTER_SYNC;
- return 0;
- }
- return -ENOENT;
+ return vsi->has_vlan_filter;
}
/**
@@ -1291,20 +1230,17 @@ int i40e_del_mac_all_vlan(struct i40e_vsi *vsi, u8 *macaddr,
* @vsi: the VSI to be searched
* @macaddr: the MAC address
* @vlan: the vlan
- * @is_vf: make sure its a VF filter, else doesn't matter
- * @is_netdev: make sure its a netdev filter, else doesn't matter
*
* Returns ptr to the filter object or NULL when no memory available.
*
- * NOTE: This function is expected to be called with mac_filter_list_lock
+ * NOTE: This function is expected to be called with mac_filter_hash_lock
* being held.
**/
struct i40e_mac_filter *i40e_add_filter(struct i40e_vsi *vsi,
- u8 *macaddr, s16 vlan,
- bool is_vf, bool is_netdev)
+ const u8 *macaddr, s16 vlan)
{
struct i40e_mac_filter *f;
- int changed = false;
+ u64 key;
if (!vsi || !macaddr)
return NULL;
@@ -1316,11 +1252,17 @@ struct i40e_mac_filter *i40e_add_filter(struct i40e_vsi *vsi,
if (is_broadcast_ether_addr(macaddr))
return NULL;
- f = i40e_find_filter(vsi, macaddr, vlan, is_vf, is_netdev);
+ f = i40e_find_filter(vsi, macaddr, vlan);
if (!f) {
f = kzalloc(sizeof(*f), GFP_ATOMIC);
if (!f)
- goto add_filter_out;
+ return NULL;
+
+ /* Update the boolean indicating if we need to function in
+ * VLAN mode.
+ */
+ if (vlan >= 0)
+ vsi->has_vlan_filter = true;
ether_addr_copy(f->macaddr, macaddr);
f->vlan = vlan;
@@ -1332,100 +1274,148 @@ struct i40e_mac_filter *i40e_add_filter(struct i40e_vsi *vsi,
f->state = I40E_FILTER_FAILED;
else
f->state = I40E_FILTER_NEW;
- changed = true;
- INIT_LIST_HEAD(&f->list);
- list_add_tail(&f->list, &vsi->mac_filter_list);
- }
+ INIT_HLIST_NODE(&f->hlist);
- /* increment counter and add a new flag if needed */
- if (is_vf) {
- if (!f->is_vf) {
- f->is_vf = true;
- f->counter++;
- }
- } else if (is_netdev) {
- if (!f->is_netdev) {
- f->is_netdev = true;
- f->counter++;
- }
- } else {
- f->counter++;
- }
+ key = i40e_addr_to_hkey(macaddr);
+ hash_add(vsi->mac_filter_hash, &f->hlist, key);
- if (changed) {
vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
vsi->back->flags |= I40E_FLAG_FILTER_SYNC;
}
-add_filter_out:
+ /* If we're asked to add a filter that has been marked for removal, it
+ * is safe to simply restore it to active state. __i40e_del_filter
+ * will have simply deleted any filters which were previously marked
+ * NEW or FAILED, so if it is currently marked REMOVE it must have
+ * previously been ACTIVE. Since we haven't yet run the sync filters
+ * task, just restore this filter to the ACTIVE state so that the
+ * sync task leaves it in place
+ */
+ if (f->state == I40E_FILTER_REMOVE)
+ f->state = I40E_FILTER_ACTIVE;
+
return f;
}
/**
- * i40e_del_filter - Remove a mac/vlan filter from the VSI
+ * __i40e_del_filter - Remove a specific filter from the VSI
+ * @vsi: VSI to remove from
+ * @f: the filter to remove from the list
+ *
+ * This function should be called instead of i40e_del_filter only if you know
+ * the exact filter you will remove already, such as via i40e_find_filter or
+ * i40e_find_mac.
+ *
+ * NOTE: This function is expected to be called with mac_filter_hash_lock
+ * being held.
+ * ANOTHER NOTE: This function MUST be called from within the context of
+ * the "safe" variants of any list iterators, e.g. list_for_each_entry_safe()
+ * instead of list_for_each_entry().
+ **/
+static void __i40e_del_filter(struct i40e_vsi *vsi, struct i40e_mac_filter *f)
+{
+ if (!f)
+ return;
+
+ if ((f->state == I40E_FILTER_FAILED) ||
+ (f->state == I40E_FILTER_NEW)) {
+ /* this one never got added by the FW. Just remove it,
+ * no need to sync anything.
+ */
+ hash_del(&f->hlist);
+ kfree(f);
+ } else {
+ f->state = I40E_FILTER_REMOVE;
+ vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
+ vsi->back->flags |= I40E_FLAG_FILTER_SYNC;
+ }
+}
+
+/**
+ * i40e_del_filter - Remove a MAC/VLAN filter from the VSI
* @vsi: the VSI to be searched
* @macaddr: the MAC address
- * @vlan: the vlan
- * @is_vf: make sure it's a VF filter, else doesn't matter
- * @is_netdev: make sure it's a netdev filter, else doesn't matter
+ * @vlan: the VLAN
*
- * NOTE: This function is expected to be called with mac_filter_list_lock
+ * NOTE: This function is expected to be called with mac_filter_hash_lock
* being held.
* ANOTHER NOTE: This function MUST be called from within the context of
* the "safe" variants of any list iterators, e.g. list_for_each_entry_safe()
* instead of list_for_each_entry().
**/
-void i40e_del_filter(struct i40e_vsi *vsi,
- u8 *macaddr, s16 vlan,
- bool is_vf, bool is_netdev)
+void i40e_del_filter(struct i40e_vsi *vsi, const u8 *macaddr, s16 vlan)
{
struct i40e_mac_filter *f;
if (!vsi || !macaddr)
return;
- f = i40e_find_filter(vsi, macaddr, vlan, is_vf, is_netdev);
- if (!f || f->counter == 0)
- return;
+ f = i40e_find_filter(vsi, macaddr, vlan);
+ __i40e_del_filter(vsi, f);
+}
- if (is_vf) {
- if (f->is_vf) {
- f->is_vf = false;
- f->counter--;
- }
- } else if (is_netdev) {
- if (f->is_netdev) {
- f->is_netdev = false;
- f->counter--;
- }
- } else {
- /* make sure we don't remove a filter in use by VF or netdev */
- int min_f = 0;
+/**
+ * i40e_put_mac_in_vlan - Make macvlan filters from macaddrs and vlans
+ * @vsi: the VSI to be searched
+ * @macaddr: the mac address to be filtered
+ *
+ * Goes through all the macvlan filters and adds a macvlan filter for each
+ * unique vlan that already exists. If a PVID has been assigned, instead only
+ * add the macaddr to that VLAN.
+ *
+ * Returns last filter added on success, else NULL
+ **/
+struct i40e_mac_filter *i40e_put_mac_in_vlan(struct i40e_vsi *vsi,
+ const u8 *macaddr)
+{
+ struct i40e_mac_filter *f, *add = NULL;
+ struct hlist_node *h;
+ int bkt;
- min_f += (f->is_vf ? 1 : 0);
- min_f += (f->is_netdev ? 1 : 0);
+ if (vsi->info.pvid)
+ return i40e_add_filter(vsi, macaddr,
+ le16_to_cpu(vsi->info.pvid));
- if (f->counter > min_f)
- f->counter--;
+ hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
+ if (f->state == I40E_FILTER_REMOVE)
+ continue;
+ add = i40e_add_filter(vsi, macaddr, f->vlan);
+ if (!add)
+ return NULL;
}
- /* counter == 0 tells sync_filters_subtask to
- * remove the filter from the firmware's list
- */
- if (f->counter == 0) {
- if ((f->state == I40E_FILTER_FAILED) ||
- (f->state == I40E_FILTER_NEW)) {
- /* this one never got added by the FW. Just remove it,
- * no need to sync anything.
- */
- list_del(&f->list);
- kfree(f);
- } else {
- f->state = I40E_FILTER_REMOVE;
- vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
- vsi->back->flags |= I40E_FLAG_FILTER_SYNC;
+ return add;
+}
+
+/**
+ * i40e_del_mac_all_vlan - Remove a MAC filter from all VLANS
+ * @vsi: the VSI to be searched
+ * @macaddr: the mac address to be removed
+ *
+ * Removes a given MAC address from a VSI, regardless of VLAN
+ *
+ * Returns 0 for success, or error
+ **/
+int i40e_del_mac_all_vlan(struct i40e_vsi *vsi, const u8 *macaddr)
+{
+ struct i40e_mac_filter *f;
+ struct hlist_node *h;
+ bool found = false;
+ int bkt;
+
+ WARN(!spin_is_locked(&vsi->mac_filter_hash_lock),
+ "Missing mac_filter_hash_lock\n");
+ hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
+ if (ether_addr_equal(macaddr, f->macaddr)) {
+ __i40e_del_filter(vsi, f);
+ found = true;
}
}
+
+ if (found)
+ return 0;
+ else
+ return -ENOENT;
}
/**
@@ -1466,10 +1456,10 @@ static int i40e_set_mac(struct net_device *netdev, void *p)
else
netdev_info(netdev, "set new mac address %pM\n", addr->sa_data);
- spin_lock_bh(&vsi->mac_filter_list_lock);
- i40e_del_mac_all_vlan(vsi, netdev->dev_addr, false, true);
- i40e_put_mac_in_vlan(vsi, addr->sa_data, false, true);
- spin_unlock_bh(&vsi->mac_filter_list_lock);
+ spin_lock_bh(&vsi->mac_filter_hash_lock);
+ i40e_del_mac_all_vlan(vsi, netdev->dev_addr);
+ i40e_put_mac_in_vlan(vsi, addr->sa_data);
+ spin_unlock_bh(&vsi->mac_filter_hash_lock);
ether_addr_copy(netdev->dev_addr, addr->sa_data);
if (vsi->type == I40E_VSI_MAIN) {
i40e_status ret;
@@ -1633,6 +1623,52 @@ static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
}
/**
+ * i40e_addr_sync - Callback for dev_(mc|uc)_sync to add address
+ * @netdev: the netdevice
+ * @addr: address to add
+ *
+ * Called by __dev_(mc|uc)_sync when an address needs to be added. We call
+ * __dev_(uc|mc)_sync from .set_rx_mode and guarantee to hold the hash lock.
+ */
+static int i40e_addr_sync(struct net_device *netdev, const u8 *addr)
+{
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_vsi *vsi = np->vsi;
+ struct i40e_mac_filter *f;
+
+ if (i40e_is_vsi_in_vlan(vsi))
+ f = i40e_put_mac_in_vlan(vsi, addr);
+ else
+ f = i40e_add_filter(vsi, addr, I40E_VLAN_ANY);
+
+ if (f)
+ return 0;
+ else
+ return -ENOMEM;
+}
+
+/**
+ * i40e_addr_unsync - Callback for dev_(mc|uc)_sync to remove address
+ * @netdev: the netdevice
+ * @addr: address to add
+ *
+ * Called by __dev_(mc|uc)_sync when an address needs to be removed. We call
+ * __dev_(uc|mc)_sync from .set_rx_mode and guarantee to hold the hash lock.
+ */
+static int i40e_addr_unsync(struct net_device *netdev, const u8 *addr)
+{
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_vsi *vsi = np->vsi;
+
+ if (i40e_is_vsi_in_vlan(vsi))
+ i40e_del_mac_all_vlan(vsi, addr);
+ else
+ i40e_del_filter(vsi, addr, I40E_VLAN_ANY);
+
+ return 0;
+}
+
+/**
* i40e_set_rx_mode - NDO callback to set the netdev filters
* @netdev: network interface device structure
**/
@@ -1643,62 +1679,14 @@ static void i40e_set_rx_mode(struct net_device *netdev)
#endif
{
struct i40e_netdev_priv *np = netdev_priv(netdev);
- struct i40e_mac_filter *f, *ftmp;
struct i40e_vsi *vsi = np->vsi;
- struct netdev_hw_addr *uca;
- struct netdev_hw_addr *mca;
- struct netdev_hw_addr *ha;
-
- spin_lock_bh(&vsi->mac_filter_list_lock);
-
- /* add addr if not already in the filter list */
- netdev_for_each_uc_addr(uca, netdev) {
- if (!i40e_find_mac(vsi, uca->addr, false, true)) {
- if (i40e_is_vsi_in_vlan(vsi))
- i40e_put_mac_in_vlan(vsi, uca->addr,
- false, true);
- else
- i40e_add_filter(vsi, uca->addr, I40E_VLAN_ANY,
- false, true);
- }
- }
- netdev_for_each_mc_addr(mca, netdev) {
- if (!i40e_find_mac(vsi, mca->addr, false, true)) {
- if (i40e_is_vsi_in_vlan(vsi))
- i40e_put_mac_in_vlan(vsi, mca->addr,
- false, true);
- else
- i40e_add_filter(vsi, mca->addr, I40E_VLAN_ANY,
- false, true);
- }
- }
+ spin_lock_bh(&vsi->mac_filter_hash_lock);
- /* remove filter if not in netdev list */
- list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) {
+ __dev_uc_sync(netdev, i40e_addr_sync, i40e_addr_unsync);
+ __dev_mc_sync(netdev, i40e_addr_sync, i40e_addr_unsync);
- if (!f->is_netdev)
- continue;
-
- netdev_for_each_mc_addr(mca, netdev)
- if (ether_addr_equal(mca->addr, f->macaddr))
- goto bottom_of_search_loop;
-
- netdev_for_each_uc_addr(uca, netdev)
- if (ether_addr_equal(uca->addr, f->macaddr))
- goto bottom_of_search_loop;
-
- for_each_dev_addr(netdev, ha)
- if (ether_addr_equal(ha->addr, f->macaddr))
- goto bottom_of_search_loop;
-
- /* f->macaddr wasn't found in uc, mc, or ha list so delete it */
- i40e_del_filter(vsi, f->macaddr, I40E_VLAN_ANY, false, true);
-
-bottom_of_search_loop:
- continue;
- }
- spin_unlock_bh(&vsi->mac_filter_list_lock);
+ spin_unlock_bh(&vsi->mac_filter_hash_lock);
/* check for other flag changes */
if (vsi->current_netdev_flags != vsi->netdev->flags) {
@@ -1713,21 +1701,26 @@ bottom_of_search_loop:
}
/**
- * i40e_undo_del_filter_entries - Undo the changes made to MAC filter entries
- * @vsi: pointer to vsi struct
+ * i40e_undo_filter_entries - Undo the changes made to MAC filter entries
+ * @vsi: Pointer to VSI struct
* @from: Pointer to list which contains MAC filter entries - changes to
* those entries needs to be undone.
*
- * MAC filter entries from list were slated to be removed from device.
+ * MAC filter entries from list were slated to be sent to firmware, either for
+ * addition or deletion.
**/
-static void i40e_undo_del_filter_entries(struct i40e_vsi *vsi,
- struct list_head *from)
+static void i40e_undo_filter_entries(struct i40e_vsi *vsi,
+ struct hlist_head *from)
{
- struct i40e_mac_filter *f, *ftmp;
+ struct i40e_mac_filter *f;
+ struct hlist_node *h;
+
+ hlist_for_each_entry_safe(f, h, from, hlist) {
+ u64 key = i40e_addr_to_hkey(f->macaddr);
- list_for_each_entry_safe(f, ftmp, from, list) {
/* Move the element back into MAC filter list*/
- list_move_tail(&f->list, &vsi->mac_filter_list);
+ hlist_del(&f->hlist);
+ hash_add(vsi->mac_filter_hash, &f->hlist, key);
}
}
@@ -1756,7 +1749,9 @@ i40e_update_filter_state(int count,
/* Everything's good, mark all filters active. */
for (i = 0; i < count ; i++) {
add_head->state = I40E_FILTER_ACTIVE;
- add_head = list_next_entry(add_head, list);
+ add_head = hlist_entry(add_head->hlist.next,
+ typeof(struct i40e_mac_filter),
+ hlist);
}
} else if (aq_err == I40E_AQ_RC_ENOSPC) {
/* Device ran out of filter space. Check the return value
@@ -1770,20 +1765,98 @@ i40e_update_filter_state(int count,
add_head->state = I40E_FILTER_ACTIVE;
retval++;
}
- add_head = list_next_entry(add_head, list);
+ add_head = hlist_entry(add_head->hlist.next,
+ typeof(struct i40e_mac_filter),
+ hlist);
}
} else {
/* Some other horrible thing happened, fail all filters */
retval = 0;
for (i = 0; i < count ; i++) {
add_head->state = I40E_FILTER_FAILED;
- add_head = list_next_entry(add_head, list);
+ add_head = hlist_entry(add_head->hlist.next,
+ typeof(struct i40e_mac_filter),
+ hlist);
}
}
return retval;
}
/**
+ * i40e_aqc_del_filters - Request firmware to delete a set of filters
+ * @vsi: ptr to the VSI
+ * @vsi_name: name to display in messages
+ * @list: the list of filters to send to firmware
+ * @num_del: the number of filters to delete
+ * @retval: Set to -EIO on failure to delete
+ *
+ * Send a request to firmware via AdminQ to delete a set of filters. Uses
+ * *retval instead of a return value so that success does not force ret_val to
+ * be set to 0. This ensures that a sequence of calls to this function
+ * preserve the previous value of *retval on successful delete.
+ */
+static
+void i40e_aqc_del_filters(struct i40e_vsi *vsi, const char *vsi_name,
+ struct i40e_aqc_remove_macvlan_element_data *list,
+ int num_del, int *retval)
+{
+ struct i40e_hw *hw = &vsi->back->hw;
+ i40e_status aq_ret;
+ int aq_err;
+
+ aq_ret = i40e_aq_remove_macvlan(hw, vsi->seid, list, num_del, NULL);
+ aq_err = hw->aq.asq_last_status;
+
+ /* Explicitly ignore and do not report when firmware returns ENOENT */
+ if (aq_ret && !(aq_err == I40E_AQ_RC_ENOENT)) {
+ *retval = -EIO;
+ dev_info(&vsi->back->pdev->dev,
+ "ignoring delete macvlan error on %s, err %s, aq_err %s\n",
+ vsi_name, i40e_stat_str(hw, aq_ret),
+ i40e_aq_str(hw, aq_err));
+ }
+}
+
+/**
+ * i40e_aqc_add_filters - Request firmware to add a set of filters
+ * @vsi: ptr to the VSI
+ * @vsi_name: name to display in messages
+ * @list: the list of filters to send to firmware
+ * @add_head: Position in the add hlist
+ * @num_add: the number of filters to add
+ * @promisc_change: set to true on exit if promiscuous mode was forced on
+ *
+ * Send a request to firmware via AdminQ to add a chunk of filters. Will set
+ * promisc_changed to true if the firmware has run out of space for more
+ * filters.
+ */
+static
+void i40e_aqc_add_filters(struct i40e_vsi *vsi, const char *vsi_name,
+ struct i40e_aqc_add_macvlan_element_data *list,
+ struct i40e_mac_filter *add_head,
+ int num_add, bool *promisc_changed)
+{
+ struct i40e_hw *hw = &vsi->back->hw;
+ i40e_status aq_ret;
+ int aq_err, fcnt;
+
+ aq_ret = i40e_aq_add_macvlan(hw, vsi->seid, list, num_add, NULL);
+ aq_err = hw->aq.asq_last_status;
+ fcnt = i40e_update_filter_state(num_add, list, add_head, aq_ret);
+ vsi->active_filters += fcnt;
+
+ if (fcnt != num_add) {
+ *promisc_changed = true;
+ set_bit(__I40E_FILTER_OVERFLOW_PROMISC, &vsi->state);
+ vsi->promisc_threshold = (vsi->active_filters * 3) / 4;
+ dev_warn(&vsi->back->pdev->dev,
+ "Error %s adding RX filters on %s, promiscuous mode forced on\n",
+ i40e_aq_str(hw, aq_err),
+ vsi_name);
+ }
+}
+
+/**
* i40e_sync_vsi_filters - Update the VSI filter list to the HW
* @vsi: ptr to the VSI
*
@@ -1793,22 +1866,25 @@ i40e_update_filter_state(int count,
**/
int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
{
- struct i40e_mac_filter *f, *ftmp, *add_head = NULL;
- struct list_head tmp_add_list, tmp_del_list;
+ struct hlist_head tmp_add_list, tmp_del_list;
+ struct i40e_mac_filter *f, *add_head = NULL;
struct i40e_hw *hw = &vsi->back->hw;
+ unsigned int vlan_any_filters = 0;
+ unsigned int non_vlan_filters = 0;
+ unsigned int vlan_filters = 0;
bool promisc_changed = false;
char vsi_name[16] = "PF";
int filter_list_len = 0;
- u32 changed_flags = 0;
i40e_status aq_ret = 0;
- int retval = 0;
+ u32 changed_flags = 0;
+ struct hlist_node *h;
struct i40e_pf *pf;
int num_add = 0;
int num_del = 0;
- int aq_err = 0;
+ int retval = 0;
u16 cmd_flags;
int list_size;
- int fcnt;
+ int bkt;
/* empty array typed pointers, kcalloc later */
struct i40e_aqc_add_macvlan_element_data *add_list;
@@ -1823,8 +1899,8 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
vsi->current_netdev_flags = vsi->netdev->flags;
}
- INIT_LIST_HEAD(&tmp_add_list);
- INIT_LIST_HEAD(&tmp_del_list);
+ INIT_HLIST_HEAD(&tmp_add_list);
+ INIT_HLIST_HEAD(&tmp_del_list);
if (vsi->type == I40E_VSI_SRIOV)
snprintf(vsi_name, sizeof(vsi_name) - 1, "VF %d", vsi->vf_id);
@@ -1834,41 +1910,98 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
if (vsi->flags & I40E_VSI_FLAG_FILTER_CHANGED) {
vsi->flags &= ~I40E_VSI_FLAG_FILTER_CHANGED;
- spin_lock_bh(&vsi->mac_filter_list_lock);
+ spin_lock_bh(&vsi->mac_filter_hash_lock);
/* Create a list of filters to delete. */
- list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) {
+ hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
if (f->state == I40E_FILTER_REMOVE) {
- WARN_ON(f->counter != 0);
/* Move the element into temporary del_list */
- list_move_tail(&f->list, &tmp_del_list);
+ hash_del(&f->hlist);
+ hlist_add_head(&f->hlist, &tmp_del_list);
vsi->active_filters--;
+
+ /* Avoid counting removed filters */
+ continue;
}
if (f->state == I40E_FILTER_NEW) {
- WARN_ON(f->counter == 0);
- /* Move the element into temporary add_list */
- list_move_tail(&f->list, &tmp_add_list);
+ hash_del(&f->hlist);
+ hlist_add_head(&f->hlist, &tmp_add_list);
}
+
+ /* Count the number of each type of filter we have
+ * remaining, ignoring any filters we're about to
+ * delete.
+ */
+ if (f->vlan > 0)
+ vlan_filters++;
+ else if (!f->vlan)
+ non_vlan_filters++;
+ else
+ vlan_any_filters++;
+ }
+
+ /* We should never have VLAN=-1 filters at the same time as we
+ * have either VLAN=0 or VLAN>0 filters, so warn about this
+ * case here to help catch any issues.
+ */
+ WARN_ON(vlan_any_filters && (vlan_filters + non_vlan_filters));
+
+ /* If we only have VLAN=0 filters remaining, and don't have
+ * any other VLAN filters, we need to convert these VLAN=0
+ * filters into VLAN=-1 (I40E_VLAN_ANY) so that we operate
+ * correctly in non-VLAN mode and receive all traffic tagged
+ * or untagged.
+ */
+ if (non_vlan_filters && !vlan_filters) {
+ hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f,
+ hlist) {
+ /* Only replace VLAN=0 filters */
+ if (f->vlan)
+ continue;
+
+ /* Allocate a replacement element */
+ add_head = kzalloc(sizeof(*add_head),
+ GFP_KERNEL);
+ if (!add_head)
+ goto err_no_memory_locked;
+
+ /* Copy the filter, with new state and VLAN */
+ *add_head = *f;
+ add_head->state = I40E_FILTER_NEW;
+ add_head->vlan = I40E_VLAN_ANY;
+
+ /* Move the replacement to the add list */
+ INIT_HLIST_NODE(&add_head->hlist);
+ hlist_add_head(&add_head->hlist,
+ &tmp_add_list);
+
+ /* Move the original to the delete list */
+ f->state = I40E_FILTER_REMOVE;
+ hash_del(&f->hlist);
+ hlist_add_head(&f->hlist, &tmp_del_list);
+ vsi->active_filters--;
+ }
+
+ /* Also update any filters on the tmp_add list */
+ hlist_for_each_entry(f, &tmp_add_list, hlist) {
+ if (!f->vlan)
+ f->vlan = I40E_VLAN_ANY;
+ }
+ add_head = NULL;
}
- spin_unlock_bh(&vsi->mac_filter_list_lock);
+ spin_unlock_bh(&vsi->mac_filter_hash_lock);
}
/* Now process 'del_list' outside the lock */
- if (!list_empty(&tmp_del_list)) {
+ if (!hlist_empty(&tmp_del_list)) {
filter_list_len = hw->aq.asq_buf_size /
sizeof(struct i40e_aqc_remove_macvlan_element_data);
list_size = filter_list_len *
sizeof(struct i40e_aqc_remove_macvlan_element_data);
del_list = kzalloc(list_size, GFP_ATOMIC);
- if (!del_list) {
- /* Undo VSI's MAC filter entry element updates */
- spin_lock_bh(&vsi->mac_filter_list_lock);
- i40e_undo_del_filter_entries(vsi, &tmp_del_list);
- spin_unlock_bh(&vsi->mac_filter_list_lock);
- retval = -ENOMEM;
- goto out;
- }
+ if (!del_list)
+ goto err_no_memory;
- list_for_each_entry_safe(f, ftmp, &tmp_del_list, list) {
+ hlist_for_each_entry_safe(f, h, &tmp_del_list, hlist) {
cmd_flags = 0;
/* add to delete list */
@@ -1887,68 +2020,47 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
/* flush a full buffer */
if (num_del == filter_list_len) {
- aq_ret = i40e_aq_remove_macvlan(hw, vsi->seid,
- del_list,
- num_del, NULL);
- aq_err = hw->aq.asq_last_status;
- num_del = 0;
+ i40e_aqc_del_filters(vsi, vsi_name, del_list,
+ num_del, &retval);
memset(del_list, 0, list_size);
-
- /* Explicitly ignore and do not report when
- * firmware returns ENOENT.
- */
- if (aq_ret && !(aq_err == I40E_AQ_RC_ENOENT)) {
- retval = -EIO;
- dev_info(&pf->pdev->dev,
- "ignoring delete macvlan error on %s, err %s, aq_err %s\n",
- vsi_name,
- i40e_stat_str(hw, aq_ret),
- i40e_aq_str(hw, aq_err));
- }
+ num_del = 0;
}
/* Release memory for MAC filter entries which were
* synced up with HW.
*/
- list_del(&f->list);
+ hlist_del(&f->hlist);
kfree(f);
}
if (num_del) {
- aq_ret = i40e_aq_remove_macvlan(hw, vsi->seid, del_list,
- num_del, NULL);
- aq_err = hw->aq.asq_last_status;
- num_del = 0;
-
- /* Explicitly ignore and do not report when firmware
- * returns ENOENT.
- */
- if (aq_ret && !(aq_err == I40E_AQ_RC_ENOENT)) {
- retval = -EIO;
- dev_info(&pf->pdev->dev,
- "ignoring delete macvlan error on %s, err %s aq_err %s\n",
- vsi_name,
- i40e_stat_str(hw, aq_ret),
- i40e_aq_str(hw, aq_err));
- }
+ i40e_aqc_del_filters(vsi, vsi_name, del_list,
+ num_del, &retval);
}
kfree(del_list);
del_list = NULL;
}
- if (!list_empty(&tmp_add_list)) {
+ /* After finishing notifying firmware of the deleted filters, update
+ * the cached value of vsi->has_vlan_filter. Note that we are safe to
+ * use just !!vlan_filters here because if we only have VLAN=0 (that
+ * is, non_vlan_filters) these will all be converted to VLAN=-1 in the
+ * logic above already so this value would still be correct.
+ */
+ vsi->has_vlan_filter = !!vlan_filters;
+
+ if (!hlist_empty(&tmp_add_list)) {
/* Do all the adds now. */
filter_list_len = hw->aq.asq_buf_size /
sizeof(struct i40e_aqc_add_macvlan_element_data);
list_size = filter_list_len *
sizeof(struct i40e_aqc_add_macvlan_element_data);
add_list = kzalloc(list_size, GFP_ATOMIC);
- if (!add_list) {
- retval = -ENOMEM;
- goto out;
- }
+ if (!add_list)
+ goto err_no_memory;
+
num_add = 0;
- list_for_each_entry(f, &tmp_add_list, list) {
+ hlist_for_each_entry(f, &tmp_add_list, hlist) {
if (test_bit(__I40E_FILTER_OVERFLOW_PROMISC,
&vsi->state)) {
f->state = I40E_FILTER_FAILED;
@@ -1973,57 +2085,28 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
/* flush a full buffer */
if (num_add == filter_list_len) {
- aq_ret = i40e_aq_add_macvlan(hw, vsi->seid,
- add_list, num_add,
- NULL);
- aq_err = hw->aq.asq_last_status;
- fcnt = i40e_update_filter_state(num_add,
- add_list,
- add_head,
- aq_ret);
- vsi->active_filters += fcnt;
-
- if (fcnt != num_add) {
- promisc_changed = true;
- set_bit(__I40E_FILTER_OVERFLOW_PROMISC,
- &vsi->state);
- vsi->promisc_threshold =
- (vsi->active_filters * 3) / 4;
- dev_warn(&pf->pdev->dev,
- "Error %s adding RX filters on %s, promiscuous mode forced on\n",
- i40e_aq_str(hw, aq_err),
- vsi_name);
- }
+ i40e_aqc_add_filters(vsi, vsi_name, add_list,
+ add_head, num_add,
+ &promisc_changed);
memset(add_list, 0, list_size);
num_add = 0;
}
}
if (num_add) {
- aq_ret = i40e_aq_add_macvlan(hw, vsi->seid,
- add_list, num_add, NULL);
- aq_err = hw->aq.asq_last_status;
- fcnt = i40e_update_filter_state(num_add, add_list,
- add_head, aq_ret);
- vsi->active_filters += fcnt;
- if (fcnt != num_add) {
- promisc_changed = true;
- set_bit(__I40E_FILTER_OVERFLOW_PROMISC,
- &vsi->state);
- vsi->promisc_threshold =
- (vsi->active_filters * 3) / 4;
- dev_warn(&pf->pdev->dev,
- "Error %s adding RX filters on %s, promiscuous mode forced on\n",
- i40e_aq_str(hw, aq_err), vsi_name);
- }
+ i40e_aqc_add_filters(vsi, vsi_name, add_list, add_head,
+ num_add, &promisc_changed);
}
/* Now move all of the filters from the temp add list back to
* the VSI's list.
*/
- spin_lock_bh(&vsi->mac_filter_list_lock);
- list_for_each_entry_safe(f, ftmp, &tmp_add_list, list) {
- list_move_tail(&f->list, &vsi->mac_filter_list);
+ spin_lock_bh(&vsi->mac_filter_hash_lock);
+ hlist_for_each_entry_safe(f, h, &tmp_add_list, hlist) {
+ u64 key = i40e_addr_to_hkey(f->macaddr);
+
+ hlist_del(&f->hlist);
+ hash_add(vsi->mac_filter_hash, &f->hlist, key);
}
- spin_unlock_bh(&vsi->mac_filter_list_lock);
+ spin_unlock_bh(&vsi->mac_filter_hash_lock);
kfree(add_list);
add_list = NULL;
}
@@ -2035,12 +2118,12 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
/* See if we have any failed filters. We can't drop out of
* promiscuous until these have all been deleted.
*/
- spin_lock_bh(&vsi->mac_filter_list_lock);
- list_for_each_entry(f, &vsi->mac_filter_list, list) {
+ spin_lock_bh(&vsi->mac_filter_hash_lock);
+ hash_for_each(vsi->mac_filter_hash, bkt, f, hlist) {
if (f->state == I40E_FILTER_FAILED)
failed_count++;
}
- spin_unlock_bh(&vsi->mac_filter_list_lock);
+ spin_unlock_bh(&vsi->mac_filter_hash_lock);
if (!failed_count) {
dev_info(&pf->pdev->dev,
"filter logjam cleared on %s, leaving overflow promiscuous mode\n",
@@ -2168,6 +2251,18 @@ out:
clear_bit(__I40E_CONFIG_BUSY, &vsi->state);
return retval;
+
+err_no_memory:
+ /* Restore elements on the temporary add and delete lists */
+ spin_lock_bh(&vsi->mac_filter_hash_lock);
+err_no_memory_locked:
+ i40e_undo_filter_entries(vsi, &tmp_del_list);
+ i40e_undo_filter_entries(vsi, &tmp_add_list);
+ spin_unlock_bh(&vsi->mac_filter_hash_lock);
+
+ vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
+ clear_bit(__I40E_CONFIG_BUSY, &vsi->state);
+ return -ENOMEM;
}
/**
@@ -2322,34 +2417,33 @@ static void i40e_vlan_rx_register(struct net_device *netdev, u32 features)
**/
int i40e_vsi_add_vlan(struct i40e_vsi *vsi, s16 vid)
{
- struct i40e_mac_filter *f, *ftmp, *add_f;
- bool is_netdev, is_vf;
-
- is_vf = (vsi->type == I40E_VSI_SRIOV);
- is_netdev = !!(vsi->netdev);
+ struct i40e_mac_filter *f, *add_f, *del_f;
+ struct hlist_node *h;
+ int bkt;
/* Locked once because all functions invoked below iterates list*/
- spin_lock_bh(&vsi->mac_filter_list_lock);
+ spin_lock_bh(&vsi->mac_filter_hash_lock);
- if (is_netdev) {
- add_f = i40e_add_filter(vsi, vsi->netdev->dev_addr, vid,
- is_vf, is_netdev);
+ if (vsi->netdev) {
+ add_f = i40e_add_filter(vsi, vsi->netdev->dev_addr, vid);
if (!add_f) {
dev_info(&vsi->back->pdev->dev,
"Could not add vlan filter %d for %pM\n",
vid, vsi->netdev->dev_addr);
- spin_unlock_bh(&vsi->mac_filter_list_lock);
+ spin_unlock_bh(&vsi->mac_filter_hash_lock);
return -ENOMEM;
}
}
- list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) {
- add_f = i40e_add_filter(vsi, f->macaddr, vid, is_vf, is_netdev);
+ hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
+ if (f->state == I40E_FILTER_REMOVE)
+ continue;
+ add_f = i40e_add_filter(vsi, f->macaddr, vid);
if (!add_f) {
dev_info(&vsi->back->pdev->dev,
"Could not add vlan filter %d for %pM\n",
vid, f->macaddr);
- spin_unlock_bh(&vsi->mac_filter_list_lock);
+ spin_unlock_bh(&vsi->mac_filter_hash_lock);
return -ENOMEM;
}
}
@@ -2359,19 +2453,17 @@ int i40e_vsi_add_vlan(struct i40e_vsi *vsi, s16 vid)
* with 0, so we now accept untagged and specified tagged traffic
* (and not all tags along with untagged)
*/
- if (vid > 0) {
- if (is_netdev && i40e_find_filter(vsi, vsi->netdev->dev_addr,
- I40E_VLAN_ANY,
- is_vf, is_netdev)) {
- i40e_del_filter(vsi, vsi->netdev->dev_addr,
- I40E_VLAN_ANY, is_vf, is_netdev);
- add_f = i40e_add_filter(vsi, vsi->netdev->dev_addr, 0,
- is_vf, is_netdev);
+ if (vid > 0 && vsi->netdev) {
+ del_f = i40e_find_filter(vsi, vsi->netdev->dev_addr,
+ I40E_VLAN_ANY);
+ if (del_f) {
+ __i40e_del_filter(vsi, del_f);
+ add_f = i40e_add_filter(vsi, vsi->netdev->dev_addr, 0);
if (!add_f) {
dev_info(&vsi->back->pdev->dev,
"Could not add filter 0 for %pM\n",
vsi->netdev->dev_addr);
- spin_unlock_bh(&vsi->mac_filter_list_lock);
+ spin_unlock_bh(&vsi->mac_filter_hash_lock);
return -ENOMEM;
}
}
@@ -2379,25 +2471,26 @@ int i40e_vsi_add_vlan(struct i40e_vsi *vsi, s16 vid)
/* Do not assume that I40E_VLAN_ANY should be reset to VLAN 0 */
if (vid > 0 && !vsi->info.pvid) {
- list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) {
- if (!i40e_find_filter(vsi, f->macaddr, I40E_VLAN_ANY,
- is_vf, is_netdev))
+ hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
+ if (f->state == I40E_FILTER_REMOVE)
continue;
- i40e_del_filter(vsi, f->macaddr, I40E_VLAN_ANY,
- is_vf, is_netdev);
- add_f = i40e_add_filter(vsi, f->macaddr,
- 0, is_vf, is_netdev);
+ del_f = i40e_find_filter(vsi, f->macaddr,
+ I40E_VLAN_ANY);
+ if (!del_f)
+ continue;
+ __i40e_del_filter(vsi, del_f);
+ add_f = i40e_add_filter(vsi, f->macaddr, 0);
if (!add_f) {
dev_info(&vsi->back->pdev->dev,
"Could not add filter 0 for %pM\n",
f->macaddr);
- spin_unlock_bh(&vsi->mac_filter_list_lock);
+ spin_unlock_bh(&vsi->mac_filter_hash_lock);
return -ENOMEM;
}
}
}
- spin_unlock_bh(&vsi->mac_filter_list_lock);
+ spin_unlock_bh(&vsi->mac_filter_hash_lock);
/* schedule our worker thread which will take care of
* applying the new filter changes
@@ -2410,79 +2503,31 @@ int i40e_vsi_add_vlan(struct i40e_vsi *vsi, s16 vid)
* i40e_vsi_kill_vlan - Remove vsi membership for given vlan
* @vsi: the vsi being configured
* @vid: vlan id to be removed (0 = untagged only , -1 = any)
- *
- * Return: 0 on success or negative otherwise
**/
-int i40e_vsi_kill_vlan(struct i40e_vsi *vsi, s16 vid)
+void i40e_vsi_kill_vlan(struct i40e_vsi *vsi, s16 vid)
{
struct net_device *netdev = vsi->netdev;
- struct i40e_mac_filter *f, *ftmp, *add_f;
- bool is_vf, is_netdev;
- int filter_count = 0;
-
- is_vf = (vsi->type == I40E_VSI_SRIOV);
- is_netdev = !!(netdev);
+ struct i40e_mac_filter *f;
+ struct hlist_node *h;
+ int bkt;
/* Locked once because all functions invoked below iterates list */
- spin_lock_bh(&vsi->mac_filter_list_lock);
-
- if (is_netdev)
- i40e_del_filter(vsi, netdev->dev_addr, vid, is_vf, is_netdev);
-
- list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list)
- i40e_del_filter(vsi, f->macaddr, vid, is_vf, is_netdev);
+ spin_lock_bh(&vsi->mac_filter_hash_lock);
- /* go through all the filters for this VSI and if there is only
- * vid == 0 it means there are no other filters, so vid 0 must
- * be replaced with -1. This signifies that we should from now
- * on accept any traffic (with any tag present, or untagged)
- */
- list_for_each_entry(f, &vsi->mac_filter_list, list) {
- if (is_netdev) {
- if (f->vlan &&
- ether_addr_equal(netdev->dev_addr, f->macaddr))
- filter_count++;
- }
-
- if (f->vlan)
- filter_count++;
- }
-
- if (!filter_count && is_netdev) {
- i40e_del_filter(vsi, netdev->dev_addr, 0, is_vf, is_netdev);
- f = i40e_add_filter(vsi, netdev->dev_addr, I40E_VLAN_ANY,
- is_vf, is_netdev);
- if (!f) {
- dev_info(&vsi->back->pdev->dev,
- "Could not add filter %d for %pM\n",
- I40E_VLAN_ANY, netdev->dev_addr);
- spin_unlock_bh(&vsi->mac_filter_list_lock);
- return -ENOMEM;
- }
- }
+ if (vsi->netdev)
+ i40e_del_filter(vsi, netdev->dev_addr, vid);
- if (!filter_count) {
- list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) {
- i40e_del_filter(vsi, f->macaddr, 0, is_vf, is_netdev);
- add_f = i40e_add_filter(vsi, f->macaddr, I40E_VLAN_ANY,
- is_vf, is_netdev);
- if (!add_f) {
- dev_info(&vsi->back->pdev->dev,
- "Could not add filter %d for %pM\n",
- I40E_VLAN_ANY, f->macaddr);
- spin_unlock_bh(&vsi->mac_filter_list_lock);
- return -ENOMEM;
- }
- }
+ hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
+ if (f->vlan == vid)
+ __i40e_del_filter(vsi, f);
}
- spin_unlock_bh(&vsi->mac_filter_list_lock);
+ spin_unlock_bh(&vsi->mac_filter_hash_lock);
/* schedule our worker thread which will take care of
* applying the new filter changes
*/
i40e_service_event_schedule(vsi->back);
- return 0;
}
/**
@@ -3969,30 +4014,36 @@ static int i40e_vsi_control_rx(struct i40e_vsi *vsi, bool enable)
}
/**
- * i40e_vsi_control_rings - Start or stop a VSI's rings
+ * i40e_vsi_start_rings - Start a VSI's rings
* @vsi: the VSI being configured
- * @enable: start or stop the rings
**/
-int i40e_vsi_control_rings(struct i40e_vsi *vsi, bool request)
+int i40e_vsi_start_rings(struct i40e_vsi *vsi)
{
int ret = 0;
/* do rx first for enable and last for disable */
- if (request) {
- ret = i40e_vsi_control_rx(vsi, request);
- if (ret)
- return ret;
- ret = i40e_vsi_control_tx(vsi, request);
- } else {
- /* Ignore return value, we need to shutdown whatever we can */
- i40e_vsi_control_tx(vsi, request);
- i40e_vsi_control_rx(vsi, request);
- }
+ ret = i40e_vsi_control_rx(vsi, true);
+ if (ret)
+ return ret;
+ ret = i40e_vsi_control_tx(vsi, true);
return ret;
}
/**
+ * i40e_vsi_stop_rings - Stop a VSI's rings
+ * @vsi: the VSI being configured
+ **/
+void i40e_vsi_stop_rings(struct i40e_vsi *vsi)
+{
+ /* do rx first for enable and last for disable
+ * Ignore return value, we need to shutdown whatever we can
+ */
+ i40e_vsi_control_tx(vsi, false);
+ i40e_vsi_control_rx(vsi, false);
+}
+
+/**
* i40e_vsi_free_irq - Free the irq association with the OS
* @vsi: the VSI being configured
**/
@@ -5190,7 +5241,7 @@ static int i40e_up_complete(struct i40e_vsi *vsi)
i40e_configure_msi_and_legacy(vsi);
/* start rings */
- err = i40e_vsi_control_rings(vsi, true);
+ err = i40e_vsi_start_rings(vsi);
if (err)
return err;
@@ -5287,7 +5338,7 @@ void i40e_down(struct i40e_vsi *vsi)
netif_tx_disable(vsi->netdev);
}
i40e_vsi_disable_irq(vsi);
- i40e_vsi_control_rings(vsi, false);
+ i40e_vsi_stop_rings(vsi);
i40e_napi_disable_all(vsi);
for (i = 0; i < vsi->num_queue_pairs; i++) {
@@ -6670,7 +6721,6 @@ static int i40e_vsi_clear(struct i40e_vsi *vsi);
static void i40e_fdir_sb_setup(struct i40e_pf *pf)
{
struct i40e_vsi *vsi;
- int i;
/* quick workaround for an NVM issue that leaves a critical register
* uninitialized
@@ -6681,6 +6731,7 @@ static void i40e_fdir_sb_setup(struct i40e_pf *pf)
0xeacb7d61, 0xaa4f05b6, 0x9c5c89ed, 0xfc425ddb,
0xa4654832, 0xfc7461d4, 0x8f827619, 0xf5c63c21,
0x95b3a76d};
+ int i;
for (i = 0; i <= I40E_GLQF_HKEY_MAX_INDEX; i++)
wr32(&pf->hw, I40E_GLQF_HKEY(i), hkey[i]);
@@ -6690,13 +6741,7 @@ static void i40e_fdir_sb_setup(struct i40e_pf *pf)
return;
/* find existing VSI and see if it needs configuring */
- vsi = NULL;
- for (i = 0; i < pf->num_alloc_vsi; i++) {
- if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR) {
- vsi = pf->vsi[i];
- break;
- }
- }
+ vsi = i40e_find_vsi_by_type(pf, I40E_VSI_FDIR);
/* create a new VSI if none exists */
if (!vsi) {
@@ -6718,15 +6763,12 @@ static void i40e_fdir_sb_setup(struct i40e_pf *pf)
**/
static void i40e_fdir_teardown(struct i40e_pf *pf)
{
- int i;
+ struct i40e_vsi *vsi;
i40e_fdir_filter_exit(pf);
- for (i = 0; i < pf->num_alloc_vsi; i++) {
- if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR) {
- i40e_vsi_release(pf->vsi[i]);
- break;
- }
- }
+ vsi = i40e_find_vsi_by_type(pf, I40E_VSI_FDIR);
+ if (vsi)
+ i40e_vsi_release(vsi);
}
/**
@@ -7354,7 +7396,7 @@ static int i40e_vsi_mem_alloc(struct i40e_pf *pf, enum i40e_vsi_type type)
pf->rss_table_size : 64;
vsi->netdev_registered = false;
vsi->work_limit = I40E_DEFAULT_IRQ_WORK;
- INIT_LIST_HEAD(&vsi->mac_filter_list);
+ hash_init(vsi->mac_filter_hash);
vsi->irqs_ready = false;
ret = i40e_set_num_rings_in_vsi(vsi);
@@ -7369,7 +7411,7 @@ static int i40e_vsi_mem_alloc(struct i40e_pf *pf, enum i40e_vsi_type type)
i40e_vsi_setup_irqhandler(vsi, i40e_msix_clean_rings);
/* Initialize VSI lock */
- spin_lock_init(&vsi->mac_filter_list_lock);
+ spin_lock_init(&vsi->mac_filter_hash_lock);
pf->vsi[vsi_idx] = vsi;
ret = vsi_idx;
goto unlock_pf;
@@ -9154,18 +9196,18 @@ static int i40e_config_netdev(struct i40e_vsi *vsi)
if (vsi->type == I40E_VSI_MAIN) {
SET_NETDEV_DEV(netdev, &pf->pdev->dev);
ether_addr_copy(mac_addr, hw->mac.perm_addr);
- spin_lock_bh(&vsi->mac_filter_list_lock);
- i40e_add_filter(vsi, mac_addr, I40E_VLAN_ANY, false, true);
- spin_unlock_bh(&vsi->mac_filter_list_lock);
+ spin_lock_bh(&vsi->mac_filter_hash_lock);
+ i40e_add_filter(vsi, mac_addr, I40E_VLAN_ANY);
+ spin_unlock_bh(&vsi->mac_filter_hash_lock);
} else {
/* relate the VSI_VMDQ name to the VSI_MAIN name */
snprintf(netdev->name, IFNAMSIZ, "%sv%%d",
pf->vsi[pf->lan_vsi]->netdev->name);
random_ether_addr(mac_addr);
- spin_lock_bh(&vsi->mac_filter_list_lock);
- i40e_add_filter(vsi, mac_addr, I40E_VLAN_ANY, false, false);
- spin_unlock_bh(&vsi->mac_filter_list_lock);
+ spin_lock_bh(&vsi->mac_filter_hash_lock);
+ i40e_add_filter(vsi, mac_addr, I40E_VLAN_ANY);
+ spin_unlock_bh(&vsi->mac_filter_hash_lock);
}
ether_addr_copy(netdev->dev_addr, mac_addr);
@@ -9254,7 +9296,9 @@ static int i40e_add_vsi(struct i40e_vsi *vsi)
struct i40e_pf *pf = vsi->back;
struct i40e_hw *hw = &pf->hw;
struct i40e_vsi_context ctxt;
- struct i40e_mac_filter *f, *ftmp;
+ struct i40e_mac_filter *f;
+ struct hlist_node *h;
+ int bkt;
u8 enabled_tc = 0x1; /* TC0 enabled */
int f_count = 0;
@@ -9453,13 +9497,13 @@ static int i40e_add_vsi(struct i40e_vsi *vsi)
vsi->active_filters = 0;
clear_bit(__I40E_FILTER_OVERFLOW_PROMISC, &vsi->state);
- spin_lock_bh(&vsi->mac_filter_list_lock);
+ spin_lock_bh(&vsi->mac_filter_hash_lock);
/* If macvlan filters already exist, force them to get loaded */
- list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) {
+ hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
f->state = I40E_FILTER_NEW;
f_count++;
}
- spin_unlock_bh(&vsi->mac_filter_list_lock);
+ spin_unlock_bh(&vsi->mac_filter_hash_lock);
if (f_count) {
vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
@@ -9489,11 +9533,12 @@ err:
**/
int i40e_vsi_release(struct i40e_vsi *vsi)
{
- struct i40e_mac_filter *f, *ftmp;
+ struct i40e_mac_filter *f;
+ struct hlist_node *h;
struct i40e_veb *veb = NULL;
struct i40e_pf *pf;
u16 uplink_seid;
- int i, n;
+ int i, n, bkt;
pf = vsi->back;
@@ -9523,11 +9568,19 @@ int i40e_vsi_release(struct i40e_vsi *vsi)
i40e_vsi_disable_irq(vsi);
}
- spin_lock_bh(&vsi->mac_filter_list_lock);
- list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list)
- i40e_del_filter(vsi, f->macaddr, f->vlan,
- f->is_vf, f->is_netdev);
- spin_unlock_bh(&vsi->mac_filter_list_lock);
+ spin_lock_bh(&vsi->mac_filter_hash_lock);
+
+ /* clear the sync flag on all filters */
+ if (vsi->netdev) {
+ __dev_uc_unsync(vsi->netdev, NULL);
+ __dev_mc_unsync(vsi->netdev, NULL);
+ }
+
+ /* make sure any remaining filters are marked for deletion */
+ hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist)
+ __i40e_del_filter(vsi, f);
+
+ spin_unlock_bh(&vsi->mac_filter_hash_lock);
i40e_sync_vsi_filters(vsi);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_nvm.c b/drivers/net/ethernet/intel/i40e/i40e_nvm.c
index 954efe3118db..38ee18f11124 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_nvm.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_nvm.c
@@ -722,9 +722,20 @@ i40e_status i40e_nvmupd_command(struct i40e_hw *hw,
*((u16 *)&bytes[2]) = hw->nvm_wait_opcode;
}
+ /* Clear error status on read */
+ if (hw->nvmupd_state == I40E_NVMUPD_STATE_ERROR)
+ hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
+
return 0;
}
+ /* Clear status even it is not read and log */
+ if (hw->nvmupd_state == I40E_NVMUPD_STATE_ERROR) {
+ i40e_debug(hw, I40E_DEBUG_NVM,
+ "Clearing I40E_NVMUPD_STATE_ERROR state without reading\n");
+ hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
+ }
+
switch (hw->nvmupd_state) {
case I40E_NVMUPD_STATE_INIT:
status = i40e_nvmupd_state_init(hw, cmd, bytes, perrno);
@@ -1074,6 +1085,11 @@ void i40e_nvmupd_check_wait_event(struct i40e_hw *hw, u16 opcode)
}
hw->nvm_wait_opcode = 0;
+ if (hw->aq.arq_last_status) {
+ hw->nvmupd_state = I40E_NVMUPD_STATE_ERROR;
+ return;
+ }
+
switch (hw->nvmupd_state) {
case I40E_NVMUPD_STATE_INIT_WAIT:
hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ptp.c b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
index f1feceab758a..5e2272c9e717 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ptp.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
@@ -159,16 +159,15 @@ static int i40e_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
{
struct i40e_pf *pf = container_of(ptp, struct i40e_pf, ptp_caps);
struct timespec64 now, then;
- unsigned long flags;
then = ns_to_timespec64(delta);
- spin_lock_irqsave(&pf->tmreg_lock, flags);
+ mutex_lock(&pf->tmreg_lock);
i40e_ptp_read(pf, &now);
now = timespec64_add(now, then);
i40e_ptp_write(pf, (const struct timespec64 *)&now);
- spin_unlock_irqrestore(&pf->tmreg_lock, flags);
+ mutex_unlock(&pf->tmreg_lock);
return 0;
}
@@ -184,11 +183,10 @@ static int i40e_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
static int i40e_ptp_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts)
{
struct i40e_pf *pf = container_of(ptp, struct i40e_pf, ptp_caps);
- unsigned long flags;
- spin_lock_irqsave(&pf->tmreg_lock, flags);
+ mutex_lock(&pf->tmreg_lock);
i40e_ptp_read(pf, ts);
- spin_unlock_irqrestore(&pf->tmreg_lock, flags);
+ mutex_unlock(&pf->tmreg_lock);
return 0;
}
@@ -205,11 +203,10 @@ static int i40e_ptp_settime(struct ptp_clock_info *ptp,
const struct timespec64 *ts)
{
struct i40e_pf *pf = container_of(ptp, struct i40e_pf, ptp_caps);
- unsigned long flags;
- spin_lock_irqsave(&pf->tmreg_lock, flags);
+ mutex_lock(&pf->tmreg_lock);
i40e_ptp_write(pf, ts);
- spin_unlock_irqrestore(&pf->tmreg_lock, flags);
+ mutex_unlock(&pf->tmreg_lock);
return 0;
}
@@ -230,6 +227,47 @@ static int i40e_ptp_feature_enable(struct ptp_clock_info *ptp,
}
/**
+ * i40e_ptp_update_latch_events - Read I40E_PRTTSYN_STAT_1 and latch events
+ * @pf: the PF data structure
+ *
+ * This function reads I40E_PRTTSYN_STAT_1 and updates the corresponding timers
+ * for noticed latch events. This allows the driver to keep track of the first
+ * time a latch event was noticed which will be used to help clear out Rx
+ * timestamps for packets that got dropped or lost.
+ *
+ * This function will return the current value of I40E_PRTTSYN_STAT_1 and is
+ * expected to be called only while under the ptp_rx_lock.
+ **/
+static u32 i40e_ptp_get_rx_events(struct i40e_pf *pf)
+{
+ struct i40e_hw *hw = &pf->hw;
+ u32 prttsyn_stat, new_latch_events;
+ int i;
+
+ prttsyn_stat = rd32(hw, I40E_PRTTSYN_STAT_1);
+ new_latch_events = prttsyn_stat & ~pf->latch_event_flags;
+
+ /* Update the jiffies time for any newly latched timestamp. This
+ * ensures that we store the time that we first discovered a timestamp
+ * was latched by the hardware. The service task will later determine
+ * if we should free the latch and drop that timestamp should too much
+ * time pass. This flow ensures that we only update jiffies for new
+ * events latched since the last time we checked, and not all events
+ * currently latched, so that the service task accounting remains
+ * accurate.
+ */
+ for (i = 0; i < 4; i++) {
+ if (new_latch_events & BIT(i))
+ pf->latch_events[i] = jiffies;
+ }
+
+ /* Finally, we store the current status of the Rx timestamp latches */
+ pf->latch_event_flags = prttsyn_stat;
+
+ return prttsyn_stat;
+}
+
+/**
* i40e_ptp_rx_hang - Detect error case when Rx timestamp registers are hung
* @vsi: The VSI with the rings relevant to 1588
*
@@ -242,10 +280,7 @@ void i40e_ptp_rx_hang(struct i40e_vsi *vsi)
{
struct i40e_pf *pf = vsi->back;
struct i40e_hw *hw = &pf->hw;
- struct i40e_ring *rx_ring;
- unsigned long rx_event;
- u32 prttsyn_stat;
- int n;
+ int i;
/* Since we cannot turn off the Rx timestamp logic if the device is
* configured for Tx timestamping, we check if Rx timestamping is
@@ -255,42 +290,30 @@ void i40e_ptp_rx_hang(struct i40e_vsi *vsi)
if (!(pf->flags & I40E_FLAG_PTP) || !pf->ptp_rx)
return;
- prttsyn_stat = rd32(hw, I40E_PRTTSYN_STAT_1);
+ spin_lock_bh(&pf->ptp_rx_lock);
- /* Unless all four receive timestamp registers are latched, we are not
- * concerned about a possible PTP Rx hang, so just update the timeout
- * counter and exit.
- */
- if (!(prttsyn_stat & ((I40E_PRTTSYN_STAT_1_RXT0_MASK <<
- I40E_PRTTSYN_STAT_1_RXT0_SHIFT) |
- (I40E_PRTTSYN_STAT_1_RXT1_MASK <<
- I40E_PRTTSYN_STAT_1_RXT1_SHIFT) |
- (I40E_PRTTSYN_STAT_1_RXT2_MASK <<
- I40E_PRTTSYN_STAT_1_RXT2_SHIFT) |
- (I40E_PRTTSYN_STAT_1_RXT3_MASK <<
- I40E_PRTTSYN_STAT_1_RXT3_SHIFT)))) {
- pf->last_rx_ptp_check = jiffies;
- return;
- }
+ /* Update current latch times for Rx events */
+ i40e_ptp_get_rx_events(pf);
- /* Determine the most recent watchdog or rx_timestamp event. */
- rx_event = pf->last_rx_ptp_check;
- for (n = 0; n < vsi->num_queue_pairs; n++) {
- rx_ring = vsi->rx_rings[n];
- if (time_after(rx_ring->last_rx_timestamp, rx_event))
- rx_event = rx_ring->last_rx_timestamp;
+ /* Check all the currently latched Rx events and see whether they have
+ * been latched for over a second. It is assumed that any timestamp
+ * should have been cleared within this time, or else it was captured
+ * for a dropped frame that the driver never received. Thus, we will
+ * clear any timestamp that has been latched for over 1 second.
+ */
+ for (i = 0; i < 4; i++) {
+ if ((pf->latch_event_flags & BIT(i)) &&
+ time_is_before_jiffies(pf->latch_events[i] + HZ)) {
+ rd32(hw, I40E_PRTTSYN_RXTIME_H(i));
+ pf->latch_event_flags &= ~BIT(i);
+ pf->rx_hwtstamp_cleared++;
+ dev_warn(&pf->pdev->dev,
+ "Clearing a missed Rx timestamp event for RXTIME[%d]\n",
+ i);
+ }
}
- /* Only need to read the high RXSTMP register to clear the lock */
- if (time_is_before_jiffies(rx_event + 5 * HZ)) {
- rd32(hw, I40E_PRTTSYN_RXTIME_H(0));
- rd32(hw, I40E_PRTTSYN_RXTIME_H(1));
- rd32(hw, I40E_PRTTSYN_RXTIME_H(2));
- rd32(hw, I40E_PRTTSYN_RXTIME_H(3));
- pf->last_rx_ptp_check = jiffies;
- pf->rx_hwtstamp_cleared++;
- WARN_ONCE(1, "Detected Rx timestamp register hang\n");
- }
+ spin_unlock_bh(&pf->ptp_rx_lock);
}
/**
@@ -353,14 +376,25 @@ void i40e_ptp_rx_hwtstamp(struct i40e_pf *pf, struct sk_buff *skb, u8 index)
hw = &pf->hw;
- prttsyn_stat = rd32(hw, I40E_PRTTSYN_STAT_1);
+ spin_lock_bh(&pf->ptp_rx_lock);
- if (!(prttsyn_stat & BIT(index)))
+ /* Get current Rx events and update latch times */
+ prttsyn_stat = i40e_ptp_get_rx_events(pf);
+
+ /* TODO: Should we warn about missing Rx timestamp event? */
+ if (!(prttsyn_stat & BIT(index))) {
+ spin_unlock_bh(&pf->ptp_rx_lock);
return;
+ }
+
+ /* Clear the latched event since we're about to read its register */
+ pf->latch_event_flags &= ~BIT(index);
lo = rd32(hw, I40E_PRTTSYN_RXTIME_L(index));
hi = rd32(hw, I40E_PRTTSYN_RXTIME_H(index));
+ spin_unlock_bh(&pf->ptp_rx_lock);
+
ns = (((u64)hi) << 32) | lo;
i40e_ptp_convert_to_hwtstamp(skb_hwtstamps(skb), ns);
@@ -514,12 +548,15 @@ static int i40e_ptp_set_timestamp_mode(struct i40e_pf *pf,
}
/* Clear out all 1588-related registers to clear and unlatch them. */
+ spin_lock_bh(&pf->ptp_rx_lock);
rd32(hw, I40E_PRTTSYN_STAT_0);
rd32(hw, I40E_PRTTSYN_TXTIME_H);
rd32(hw, I40E_PRTTSYN_RXTIME_H(0));
rd32(hw, I40E_PRTTSYN_RXTIME_H(1));
rd32(hw, I40E_PRTTSYN_RXTIME_H(2));
rd32(hw, I40E_PRTTSYN_RXTIME_H(3));
+ pf->latch_event_flags = 0;
+ spin_unlock_bh(&pf->ptp_rx_lock);
/* Enable/disable the Tx timestamp interrupt based on user input. */
regval = rd32(hw, I40E_PRTTSYN_CTL0);
@@ -658,10 +695,8 @@ void i40e_ptp_init(struct i40e_pf *pf)
return;
}
- /* we have to initialize the lock first, since we can't control
- * when the user will enter the PHC device entry points
- */
- spin_lock_init(&pf->tmreg_lock);
+ mutex_init(&pf->tmreg_lock);
+ spin_lock_init(&pf->ptp_rx_lock);
/* ensure we have a clock device */
err = i40e_ptp_create_clock(pf);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index daade4fe80d6..5544b509832f 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -125,10 +125,7 @@ static int i40e_program_fdir_filter(struct i40e_fdir_filter *fdir_data,
u16 i;
/* find existing FDIR VSI */
- vsi = NULL;
- for (i = 0; i < pf->num_alloc_vsi; i++)
- if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR)
- vsi = pf->vsi[i];
+ vsi = i40e_find_vsi_by_type(pf, I40E_VSI_FDIR);
if (!vsi)
return -ENOENT;
@@ -619,7 +616,7 @@ u32 i40e_get_tx_pending(struct i40e_ring *ring, bool in_sw)
return 0;
}
-#define WB_STRIDE 0x3
+#define WB_STRIDE 4
/**
* i40e_clean_tx_irq - Reclaim resources after transmit completes
@@ -735,7 +732,7 @@ static bool i40e_clean_tx_irq(struct i40e_vsi *vsi,
unsigned int j = i40e_get_tx_pending(tx_ring, false);
if (budget &&
- ((j / (WB_STRIDE + 1)) == 0) && (j != 0) &&
+ ((j / WB_STRIDE) == 0) && (j > 0) &&
!test_bit(__I40E_DOWN, &vsi->state) &&
(I40E_DESC_UNUSED(tx_ring) != tx_ring->count))
tx_ring->arm_wb = true;
@@ -1410,13 +1407,12 @@ void i40e_process_skb_fields(struct i40e_ring *rx_ring,
u64 qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
u32 rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >>
I40E_RXD_QW1_STATUS_SHIFT;
- u32 rsyn = (rx_status & I40E_RXD_QW1_STATUS_TSYNINDX_MASK) >>
+ u32 tsynvalid = rx_status & I40E_RXD_QW1_STATUS_TSYNVALID_MASK;
+ u32 tsyn = (rx_status & I40E_RXD_QW1_STATUS_TSYNINDX_MASK) >>
I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT;
- if (unlikely(rsyn)) {
- i40e_ptp_rx_hwtstamp(rx_ring->vsi->back, skb, rsyn);
- rx_ring->last_rx_timestamp = jiffies;
- }
+ if (unlikely(tsynvalid))
+ i40e_ptp_rx_hwtstamp(rx_ring->vsi->back, skb, tsyn);
i40e_rx_hash(rx_ring, rx_desc, skb, rx_ptype);
@@ -2704,9 +2700,7 @@ static inline void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
u32 td_tag = 0;
dma_addr_t dma;
u16 gso_segs;
- u16 desc_count = 0;
- bool tail_bump = true;
- bool do_rs = false;
+ u16 desc_count = 1;
if (tx_flags & I40E_TX_FLAGS_HW_VLAN) {
td_cmd |= I40E_TX_DESC_CMD_IL2TAG1;
@@ -2789,8 +2783,7 @@ static inline void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
tx_bi = &tx_ring->tx_bi[i];
}
- /* set next_to_watch value indicating a packet is present */
- first->next_to_watch = tx_desc;
+ netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount);
i++;
if (i == tx_ring->count)
@@ -2798,66 +2791,72 @@ static inline void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
tx_ring->next_to_use = i;
- netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount);
i40e_maybe_stop_tx(tx_ring, DESC_NEEDED);
+ /* write last descriptor with EOP bit */
+ td_cmd |= I40E_TX_DESC_CMD_EOP;
+
+ /* We can OR these values together as they both are checked against
+ * 4 below and at this point desc_count will be used as a boolean value
+ * after this if/else block.
+ */
+ desc_count |= ++tx_ring->packet_stride;
+
/* Algorithm to optimize tail and RS bit setting:
- * if xmit_more is supported
- * if xmit_more is true
- * do not update tail and do not mark RS bit.
- * if xmit_more is false and last xmit_more was false
- * if every packet spanned less than 4 desc
- * then set RS bit on 4th packet and update tail
- * on every packet
- * else
- * update tail and set RS bit on every packet.
- * if xmit_more is false and last_xmit_more was true
- * update tail and set RS bit.
+ * if queue is stopped
+ * mark RS bit
+ * reset packet counter
+ * else if xmit_more is supported and is true
+ * advance packet counter to 4
+ * reset desc_count to 0
*
- * Optimization: wmb to be issued only in case of tail update.
- * Also optimize the Descriptor WB path for RS bit with the same
- * algorithm.
+ * if desc_count >= 4
+ * mark RS bit
+ * reset packet counter
+ * if desc_count > 0
+ * update tail
*
- * Note: If there are less than 4 packets
+ * Note: If there are less than 4 descriptors
* pending and interrupts were disabled the service task will
* trigger a force WB.
*/
- if (skb->xmit_more &&
- !netif_xmit_stopped(txring_txq(tx_ring))) {
- tx_ring->flags |= I40E_TXR_FLAGS_LAST_XMIT_MORE_SET;
- tail_bump = false;
- } else if (!skb->xmit_more &&
- !netif_xmit_stopped(txring_txq(tx_ring)) &&
- (!(tx_ring->flags & I40E_TXR_FLAGS_LAST_XMIT_MORE_SET)) &&
- (tx_ring->packet_stride < WB_STRIDE) &&
- (desc_count < WB_STRIDE)) {
- tx_ring->packet_stride++;
- } else {
+ if (netif_xmit_stopped(txring_txq(tx_ring))) {
+ goto do_rs;
+ } else if (skb->xmit_more) {
+ /* set stride to arm on next packet and reset desc_count */
+ tx_ring->packet_stride = WB_STRIDE;
+ desc_count = 0;
+ } else if (desc_count >= WB_STRIDE) {
+do_rs:
+ /* write last descriptor with RS bit set */
+ td_cmd |= I40E_TX_DESC_CMD_RS;
tx_ring->packet_stride = 0;
- tx_ring->flags &= ~I40E_TXR_FLAGS_LAST_XMIT_MORE_SET;
- do_rs = true;
}
- if (do_rs)
- tx_ring->packet_stride = 0;
tx_desc->cmd_type_offset_bsz =
- build_ctob(td_cmd, td_offset, size, td_tag) |
- cpu_to_le64((u64)(do_rs ? I40E_TXD_CMD :
- I40E_TX_DESC_CMD_EOP) <<
- I40E_TXD_QW1_CMD_SHIFT);
+ build_ctob(td_cmd, td_offset, size, td_tag);
+
+ /* Force memory writes to complete before letting h/w know there
+ * are new descriptors to fetch.
+ *
+ * We also use this memory barrier to make certain all of the
+ * status bits have been updated before next_to_watch is written.
+ */
+ wmb();
+
+ /* set next_to_watch value indicating a packet is present */
+ first->next_to_watch = tx_desc;
/* notify HW of packet */
- if (!tail_bump) {
- prefetchw(tx_desc + 1);
- } else {
- /* Force memory writes to complete before letting h/w
- * know there are new descriptors to fetch. (Only
- * applicable for weak-ordered memory model archs,
- * such as IA-64).
- */
- wmb();
+ if (desc_count) {
writel(i, tx_ring->tail);
+
+ /* we need this if more than one processor can write to our tail
+ * at a time, it synchronizes IO on IA64/Altix systems
+ */
+ mmiowb();
}
+
return;
dma_error:
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.h b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
index 508840585645..de8550f4e3a4 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
@@ -307,15 +307,12 @@ struct i40e_ring {
u8 atr_sample_rate;
u8 atr_count;
- unsigned long last_rx_timestamp;
-
bool ring_active; /* is ring online or not */
bool arm_wb; /* do something to arm write back */
u8 packet_stride;
u16 flags;
#define I40E_TXR_FLAGS_WB_ON_ITR BIT(0)
-#define I40E_TXR_FLAGS_LAST_XMIT_MORE_SET BIT(2)
/* stats structs */
struct i40e_queue_stats stats;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_type.h b/drivers/net/ethernet/intel/i40e/i40e_type.h
index bd5f13bef83c..d9a266041bf1 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_type.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_type.h
@@ -366,6 +366,7 @@ enum i40e_nvmupd_state {
I40E_NVMUPD_STATE_WRITING,
I40E_NVMUPD_STATE_INIT_WAIT,
I40E_NVMUPD_STATE_WRITE_WAIT,
+ I40E_NVMUPD_STATE_ERROR
};
/* nvm_access definition and its masks/shifts need to be accessible to
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
index 54b8ee2583f1..53b46553dd8d 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
@@ -686,17 +686,17 @@ static int i40e_alloc_vsi_res(struct i40e_vf *vf, enum i40e_vsi_type type)
if (vf->port_vlan_id)
i40e_vsi_add_pvid(vsi, vf->port_vlan_id);
- spin_lock_bh(&vsi->mac_filter_list_lock);
+ spin_lock_bh(&vsi->mac_filter_hash_lock);
if (is_valid_ether_addr(vf->default_lan_addr.addr)) {
f = i40e_add_filter(vsi, vf->default_lan_addr.addr,
- vf->port_vlan_id ? vf->port_vlan_id : -1,
- true, false);
+ vf->port_vlan_id ?
+ vf->port_vlan_id : -1);
if (!f)
dev_info(&pf->pdev->dev,
"Could not add MAC filter %pM for VF %d\n",
vf->default_lan_addr.addr, vf->vf_id);
}
- spin_unlock_bh(&vsi->mac_filter_list_lock);
+ spin_unlock_bh(&vsi->mac_filter_hash_lock);
i40e_write_rx_ctl(&pf->hw, I40E_VFQF_HENA1(0, vf->vf_id),
(u32)hena);
i40e_write_rx_ctl(&pf->hw, I40E_VFQF_HENA1(1, vf->vf_id),
@@ -811,6 +811,7 @@ static void i40e_free_vf_res(struct i40e_vf *vf)
i40e_vsi_release(pf->vsi[vf->lan_vsi_idx]);
vf->lan_vsi_idx = 0;
vf->lan_vsi_id = 0;
+ vf->num_mac = 0;
}
msix_vf = pf->hw.func_caps.num_msix_vectors_vf;
@@ -990,7 +991,7 @@ void i40e_reset_vf(struct i40e_vf *vf, bool flr)
if (vf->lan_vsi_idx == 0)
goto complete_reset;
- i40e_vsi_control_rings(pf->vsi[vf->lan_vsi_idx], false);
+ i40e_vsi_stop_rings(pf->vsi[vf->lan_vsi_idx]);
complete_reset:
/* reallocate VF resources to reset the VSI state */
i40e_free_vf_res(vf);
@@ -1031,8 +1032,7 @@ void i40e_free_vfs(struct i40e_pf *pf)
i40e_notify_client_of_vf_enable(pf, 0);
for (i = 0; i < pf->num_alloc_vfs; i++)
if (test_bit(I40E_VF_STAT_INIT, &pf->vf[i].vf_states))
- i40e_vsi_control_rings(pf->vsi[pf->vf[i].lan_vsi_idx],
- false);
+ i40e_vsi_stop_rings(pf->vsi[pf->vf[i].lan_vsi_idx]);
/* Disable IOV before freeing resources. This lets any VF drivers
* running in the host get themselves cleaned up before we yank
@@ -1449,9 +1449,9 @@ static void i40e_vc_reset_vf_msg(struct i40e_vf *vf)
static inline int i40e_getnum_vf_vsi_vlan_filters(struct i40e_vsi *vsi)
{
struct i40e_mac_filter *f;
- int num_vlans = 0;
+ int num_vlans = 0, bkt;
- list_for_each_entry(f, &vsi->mac_filter_list, list) {
+ hash_for_each(vsi->mac_filter_hash, bkt, f, hlist) {
if (f->vlan >= 0 && f->vlan <= I40E_MAX_VLANID)
num_vlans++;
}
@@ -1481,6 +1481,7 @@ static int i40e_vc_config_promiscuous_mode_msg(struct i40e_vf *vf,
struct i40e_vsi *vsi;
bool alluni = false;
int aq_err = 0;
+ int bkt;
vsi = i40e_find_vsi_from_id(pf, info->vsi_id);
if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) ||
@@ -1507,7 +1508,7 @@ static int i40e_vc_config_promiscuous_mode_msg(struct i40e_vf *vf,
vf->port_vlan_id,
NULL);
} else if (i40e_getnum_vf_vsi_vlan_filters(vsi)) {
- list_for_each_entry(f, &vsi->mac_filter_list, list) {
+ hash_for_each(vsi->mac_filter_hash, bkt, f, hlist) {
if (f->vlan < 0 || f->vlan > I40E_MAX_VLANID)
continue;
aq_ret = i40e_aq_set_vsi_mc_promisc_on_vlan(hw,
@@ -1557,7 +1558,7 @@ static int i40e_vc_config_promiscuous_mode_msg(struct i40e_vf *vf,
vf->port_vlan_id,
NULL);
} else if (i40e_getnum_vf_vsi_vlan_filters(vsi)) {
- list_for_each_entry(f, &vsi->mac_filter_list, list) {
+ hash_for_each(vsi->mac_filter_hash, bkt, f, hlist) {
aq_ret = 0;
if (f->vlan >= 0 && f->vlan <= I40E_MAX_VLANID) {
aq_ret =
@@ -1757,7 +1758,7 @@ static int i40e_vc_enable_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
goto error_param;
}
- if (i40e_vsi_control_rings(pf->vsi[vf->lan_vsi_idx], true))
+ if (i40e_vsi_start_rings(pf->vsi[vf->lan_vsi_idx]))
aq_ret = I40E_ERR_TIMEOUT;
error_param:
/* send the response to the VF */
@@ -1796,8 +1797,7 @@ static int i40e_vc_disable_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
goto error_param;
}
- if (i40e_vsi_control_rings(pf->vsi[vf->lan_vsi_idx], false))
- aq_ret = I40E_ERR_TIMEOUT;
+ i40e_vsi_stop_rings(pf->vsi[vf->lan_vsi_idx]);
error_param:
/* send the response to the VF */
@@ -1927,20 +1927,18 @@ static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
/* Lock once, because all function inside for loop accesses VSI's
* MAC filter list which needs to be protected using same lock.
*/
- spin_lock_bh(&vsi->mac_filter_list_lock);
+ spin_lock_bh(&vsi->mac_filter_hash_lock);
/* add new addresses to the list */
for (i = 0; i < al->num_elements; i++) {
struct i40e_mac_filter *f;
- f = i40e_find_mac(vsi, al->list[i].addr, true, false);
+ f = i40e_find_mac(vsi, al->list[i].addr);
if (!f) {
if (i40e_is_vsi_in_vlan(vsi))
- f = i40e_put_mac_in_vlan(vsi, al->list[i].addr,
- true, false);
+ f = i40e_put_mac_in_vlan(vsi, al->list[i].addr);
else
- f = i40e_add_filter(vsi, al->list[i].addr, -1,
- true, false);
+ f = i40e_add_filter(vsi, al->list[i].addr, -1);
}
if (!f) {
@@ -1948,13 +1946,13 @@ static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
"Unable to add MAC filter %pM for VF %d\n",
al->list[i].addr, vf->vf_id);
ret = I40E_ERR_PARAM;
- spin_unlock_bh(&vsi->mac_filter_list_lock);
+ spin_unlock_bh(&vsi->mac_filter_hash_lock);
goto error_param;
} else {
vf->num_mac++;
}
}
- spin_unlock_bh(&vsi->mac_filter_list_lock);
+ spin_unlock_bh(&vsi->mac_filter_hash_lock);
/* program the updated filter list */
ret = i40e_sync_vsi_filters(vsi);
@@ -2003,18 +2001,18 @@ static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
}
vsi = pf->vsi[vf->lan_vsi_idx];
- spin_lock_bh(&vsi->mac_filter_list_lock);
+ spin_lock_bh(&vsi->mac_filter_hash_lock);
/* delete addresses from the list */
for (i = 0; i < al->num_elements; i++)
- if (i40e_del_mac_all_vlan(vsi, al->list[i].addr, true, false)) {
+ if (i40e_del_mac_all_vlan(vsi, al->list[i].addr)) {
ret = I40E_ERR_INVALID_MAC_ADDR;
- spin_unlock_bh(&vsi->mac_filter_list_lock);
+ spin_unlock_bh(&vsi->mac_filter_hash_lock);
goto error_param;
} else {
vf->num_mac--;
}
- spin_unlock_bh(&vsi->mac_filter_list_lock);
+ spin_unlock_bh(&vsi->mac_filter_hash_lock);
/* program the updated filter list */
ret = i40e_sync_vsi_filters(vsi);
@@ -2139,9 +2137,8 @@ static int i40e_vc_remove_vlan_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
}
for (i = 0; i < vfl->num_elements; i++) {
- int ret = i40e_vsi_kill_vlan(vsi, vfl->vlan_id[i]);
- if (!ret)
- vf->num_vlan--;
+ i40e_vsi_kill_vlan(vsi, vfl->vlan_id[i]);
+ vf->num_vlan--;
if (test_bit(I40E_VF_STAT_UC_PROMISC, &vf->vf_states))
i40e_aq_set_vsi_uc_promisc_on_vlan(&pf->hw, vsi->seid,
@@ -2153,11 +2150,6 @@ static int i40e_vc_remove_vlan_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
false,
vfl->vlan_id[i],
NULL);
-
- if (ret)
- dev_err(&pf->pdev->dev,
- "Unable to delete VLAN filter %d for VF %d, error %d\n",
- vfl->vlan_id[i], vf->vf_id, ret);
}
error_param:
@@ -2689,6 +2681,7 @@ int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
struct i40e_mac_filter *f;
struct i40e_vf *vf;
int ret = 0;
+ int bkt;
/* validate the request */
if (vf_id >= pf->num_alloc_vfs) {
@@ -2715,23 +2708,22 @@ int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
}
/* Lock once because below invoked function add/del_filter requires
- * mac_filter_list_lock to be held
+ * mac_filter_hash_lock to be held
*/
- spin_lock_bh(&vsi->mac_filter_list_lock);
+ spin_lock_bh(&vsi->mac_filter_hash_lock);
/* delete the temporary mac address */
if (!is_zero_ether_addr(vf->default_lan_addr.addr))
i40e_del_filter(vsi, vf->default_lan_addr.addr,
- vf->port_vlan_id ? vf->port_vlan_id : -1,
- true, false);
+ vf->port_vlan_id ? vf->port_vlan_id : -1);
/* Delete all the filters for this VSI - we're going to kill it
* anyway.
*/
- list_for_each_entry(f, &vsi->mac_filter_list, list)
- i40e_del_filter(vsi, f->macaddr, f->vlan, true, false);
+ hash_for_each(vsi->mac_filter_hash, bkt, f, hlist)
+ i40e_del_filter(vsi, f->macaddr, f->vlan);
- spin_unlock_bh(&vsi->mac_filter_list_lock);
+ spin_unlock_bh(&vsi->mac_filter_hash_lock);
dev_info(&pf->pdev->dev, "Setting MAC %pM on VF %d\n", mac, vf_id);
/* program mac filter */
@@ -2803,9 +2795,9 @@ int i40e_ndo_set_vf_port_vlan(struct net_device *netdev, int vf_id,
/* duplicate request, so just return success */
goto error_pvid;
- spin_lock_bh(&vsi->mac_filter_list_lock);
+ spin_lock_bh(&vsi->mac_filter_hash_lock);
is_vsi_in_vlan = i40e_is_vsi_in_vlan(vsi);
- spin_unlock_bh(&vsi->mac_filter_list_lock);
+ spin_unlock_bh(&vsi->mac_filter_hash_lock);
if (le16_to_cpu(vsi->info.pvid) == 0 && is_vsi_in_vlan) {
dev_err(&pf->pdev->dev,
@@ -2835,13 +2827,8 @@ int i40e_ndo_set_vf_port_vlan(struct net_device *netdev, int vf_id,
if (vsi->info.pvid) {
/* kill old VLAN */
- ret = i40e_vsi_kill_vlan(vsi, (le16_to_cpu(vsi->info.pvid) &
- VLAN_VID_MASK));
- if (ret) {
- dev_info(&vsi->back->pdev->dev,
- "remove VLAN failed, ret=%d, aq_err=%d\n",
- ret, pf->hw.aq.asq_last_status);
- }
+ i40e_vsi_kill_vlan(vsi, (le16_to_cpu(vsi->info.pvid) &
+ VLAN_VID_MASK));
}
if (vlan_id || qos)
ret = i40e_vsi_add_pvid(vsi, vlanprio);
@@ -2940,7 +2927,7 @@ int i40e_ndo_set_vf_bw(struct net_device *netdev, int vf_id, int min_tx_rate,
}
if (max_tx_rate > speed) {
- dev_err(&pf->pdev->dev, "Invalid max tx rate %d specified for VF %d.",
+ dev_err(&pf->pdev->dev, "Invalid max tx rate %d specified for VF %d.\n",
max_tx_rate, vf->vf_id);
ret = -EINVAL;
goto error;
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_adminq.c b/drivers/net/ethernet/intel/i40evf/i40e_adminq.c
index 44f7ed7583dd..96385156b824 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_adminq.c
+++ b/drivers/net/ethernet/intel/i40evf/i40e_adminq.c
@@ -912,11 +912,11 @@ i40e_status i40evf_clean_arq_element(struct i40e_hw *hw,
desc = I40E_ADMINQ_DESC(hw->aq.arq, ntc);
desc_idx = ntc;
+ hw->aq.arq_last_status =
+ (enum i40e_admin_queue_err)le16_to_cpu(desc->retval);
flags = le16_to_cpu(desc->flags);
if (flags & I40E_AQ_FLAG_ERR) {
ret_code = I40E_ERR_ADMIN_QUEUE_ERROR;
- hw->aq.arq_last_status =
- (enum i40e_admin_queue_err)le16_to_cpu(desc->retval);
i40e_debug(hw,
I40E_DEBUG_AQ_MESSAGE,
"AQRX: Event received with error 0x%X.\n",
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
index e2d362238fd7..c4b174afd253 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
@@ -150,7 +150,7 @@ u32 i40evf_get_tx_pending(struct i40e_ring *ring, bool in_sw)
return 0;
}
-#define WB_STRIDE 0x3
+#define WB_STRIDE 4
/**
* i40e_clean_tx_irq - Reclaim resources after transmit completes
@@ -266,7 +266,7 @@ static bool i40e_clean_tx_irq(struct i40e_vsi *vsi,
unsigned int j = i40evf_get_tx_pending(tx_ring, false);
if (budget &&
- ((j / (WB_STRIDE + 1)) == 0) && (j > 0) &&
+ ((j / WB_STRIDE) == 0) && (j > 0) &&
!test_bit(__I40E_DOWN, &vsi->state) &&
(I40E_DESC_UNUSED(tx_ring) != tx_ring->count))
tx_ring->arm_wb = true;
@@ -1950,9 +1950,7 @@ static inline void i40evf_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
u32 td_tag = 0;
dma_addr_t dma;
u16 gso_segs;
- u16 desc_count = 0;
- bool tail_bump = true;
- bool do_rs = false;
+ u16 desc_count = 1;
if (tx_flags & I40E_TX_FLAGS_HW_VLAN) {
td_cmd |= I40E_TX_DESC_CMD_IL2TAG1;
@@ -2035,8 +2033,7 @@ static inline void i40evf_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
tx_bi = &tx_ring->tx_bi[i];
}
- /* set next_to_watch value indicating a packet is present */
- first->next_to_watch = tx_desc;
+ netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount);
i++;
if (i == tx_ring->count)
@@ -2044,66 +2041,72 @@ static inline void i40evf_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
tx_ring->next_to_use = i;
- netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount);
i40e_maybe_stop_tx(tx_ring, DESC_NEEDED);
+ /* write last descriptor with EOP bit */
+ td_cmd |= I40E_TX_DESC_CMD_EOP;
+
+ /* We can OR these values together as they both are checked against
+ * 4 below and at this point desc_count will be used as a boolean value
+ * after this if/else block.
+ */
+ desc_count |= ++tx_ring->packet_stride;
+
/* Algorithm to optimize tail and RS bit setting:
- * if xmit_more is supported
- * if xmit_more is true
- * do not update tail and do not mark RS bit.
- * if xmit_more is false and last xmit_more was false
- * if every packet spanned less than 4 desc
- * then set RS bit on 4th packet and update tail
- * on every packet
- * else
- * update tail and set RS bit on every packet.
- * if xmit_more is false and last_xmit_more was true
- * update tail and set RS bit.
+ * if queue is stopped
+ * mark RS bit
+ * reset packet counter
+ * else if xmit_more is supported and is true
+ * advance packet counter to 4
+ * reset desc_count to 0
*
- * Optimization: wmb to be issued only in case of tail update.
- * Also optimize the Descriptor WB path for RS bit with the same
- * algorithm.
+ * if desc_count >= 4
+ * mark RS bit
+ * reset packet counter
+ * if desc_count > 0
+ * update tail
*
- * Note: If there are less than 4 packets
+ * Note: If there are less than 4 descriptors
* pending and interrupts were disabled the service task will
* trigger a force WB.
*/
- if (skb->xmit_more &&
- !netif_xmit_stopped(txring_txq(tx_ring))) {
- tx_ring->flags |= I40E_TXR_FLAGS_LAST_XMIT_MORE_SET;
- tail_bump = false;
- } else if (!skb->xmit_more &&
- !netif_xmit_stopped(txring_txq(tx_ring)) &&
- (!(tx_ring->flags & I40E_TXR_FLAGS_LAST_XMIT_MORE_SET)) &&
- (tx_ring->packet_stride < WB_STRIDE) &&
- (desc_count < WB_STRIDE)) {
- tx_ring->packet_stride++;
- } else {
+ if (netif_xmit_stopped(txring_txq(tx_ring))) {
+ goto do_rs;
+ } else if (skb->xmit_more) {
+ /* set stride to arm on next packet and reset desc_count */
+ tx_ring->packet_stride = WB_STRIDE;
+ desc_count = 0;
+ } else if (desc_count >= WB_STRIDE) {
+do_rs:
+ /* write last descriptor with RS bit set */
+ td_cmd |= I40E_TX_DESC_CMD_RS;
tx_ring->packet_stride = 0;
- tx_ring->flags &= ~I40E_TXR_FLAGS_LAST_XMIT_MORE_SET;
- do_rs = true;
}
- if (do_rs)
- tx_ring->packet_stride = 0;
tx_desc->cmd_type_offset_bsz =
- build_ctob(td_cmd, td_offset, size, td_tag) |
- cpu_to_le64((u64)(do_rs ? I40E_TXD_CMD :
- I40E_TX_DESC_CMD_EOP) <<
- I40E_TXD_QW1_CMD_SHIFT);
+ build_ctob(td_cmd, td_offset, size, td_tag);
+
+ /* Force memory writes to complete before letting h/w know there
+ * are new descriptors to fetch.
+ *
+ * We also use this memory barrier to make certain all of the
+ * status bits have been updated before next_to_watch is written.
+ */
+ wmb();
+
+ /* set next_to_watch value indicating a packet is present */
+ first->next_to_watch = tx_desc;
/* notify HW of packet */
- if (!tail_bump) {
- prefetchw(tx_desc + 1);
- } else {
- /* Force memory writes to complete before letting h/w
- * know there are new descriptors to fetch. (Only
- * applicable for weak-ordered memory model archs,
- * such as IA-64).
- */
- wmb();
+ if (desc_count) {
writel(i, tx_ring->tail);
+
+ /* we need this if more than one processor can write to our tail
+ * at a time, it synchronizes IO on IA64/Altix systems
+ */
+ mmiowb();
}
+
return;
dma_error:
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
index abcdecabbc56..a586e19cfd1d 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
@@ -309,7 +309,6 @@ struct i40e_ring {
bool ring_active; /* is ring online or not */
bool arm_wb; /* do something to arm write back */
u8 packet_stride;
-#define I40E_TXR_FLAGS_LAST_XMIT_MORE_SET BIT(2)
u16 flags;
#define I40E_TXR_FLAGS_WB_ON_ITR BIT(0)
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_type.h b/drivers/net/ethernet/intel/i40evf/i40e_type.h
index 97f96e0d9c4c..ca7afe59c55f 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_type.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_type.h
@@ -348,6 +348,7 @@ enum i40e_nvmupd_state {
I40E_NVMUPD_STATE_WRITING,
I40E_NVMUPD_STATE_INIT_WAIT,
I40E_NVMUPD_STATE_WRITE_WAIT,
+ I40E_NVMUPD_STATE_ERROR
};
/* nvm_access definition and its masks/shifts need to be accessible to
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_main.c b/drivers/net/ethernet/intel/i40evf/i40evf_main.c
index bcb1cafdf28a..db36744c6691 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf_main.c
+++ b/drivers/net/ethernet/intel/i40evf/i40evf_main.c
@@ -1746,15 +1746,17 @@ static void i40evf_reset_task(struct work_struct *work)
/* wait until the reset is complete and the PF is responding to us */
for (i = 0; i < I40EVF_RESET_WAIT_COUNT; i++) {
+ /* sleep first to make sure a minimum wait time is met */
+ msleep(I40EVF_RESET_WAIT_MS);
+
reg_val = rd32(hw, I40E_VFGEN_RSTAT) &
I40E_VFGEN_RSTAT_VFR_STATE_MASK;
if (reg_val == I40E_VFR_VFACTIVE)
break;
- msleep(I40EVF_RESET_WAIT_MS);
}
+
pci_set_master(adapter->pdev);
- /* extra wait to make sure minimum wait is met */
- msleep(I40EVF_RESET_WAIT_MS);
+
if (i == I40EVF_RESET_WAIT_COUNT) {
struct i40evf_mac_filter *ftmp;
struct i40evf_vlan_filter *fv, *fvtmp;