aboutsummaryrefslogtreecommitdiff
path: root/drivers/net/ethernet
diff options
context:
space:
mode:
authorDavid S. Miller2012-07-22 12:23:18 -0700
committerDavid S. Miller2012-07-22 12:23:18 -0700
commitfd183f6a73fc0ff3e32c636dbb539e35d4c530c9 (patch)
tree64c63653710ddbc15e81d81c53d47a3928953b51 /drivers/net/ethernet
parent5dc7c77967a64a1c2cb1a45f45f580fa18927a4c (diff)
parent76886596921dd0e058f7f0a16de6151629390d15 (diff)
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/net-next
Jeff Kirsher says: -------------------- This series contains updates to ixgbe and ixgbevf. ... Akeem G. Abodunrin (1): igb: reset PHY in the link_up process to recover PHY setting after power down. Alexander Duyck (8): ixgbe: Drop probe_vf and merge functionality into ixgbe_enable_sriov ixgbe: Change how we check for pre-existing and assigned VFs ixgbevf: Add lock around mailbox ops to prevent simultaneous access ixgbevf: Add support for PCI error handling ixgbe: Fix handling of FDIR_HASH flag ixgbe: Reduce Rx header size to what is actually used ixgbe: Use num_tcs.pg_tcs as upper limit for TC when checking based on UP ixgbe: Use 1TC DCB instead of disabling DCB for MSI and legacy interrupts Don Skidmore (1): ixgbe: add support for new 82599 device Greg Rose (1): ixgbevf: Fix namespace issue with ixgbe_write_eitr John Fastabend (2): ixgbe: fix RAR entry counting for generic and fdb_add() ixgbe: remove extra unused queues in DCB + FCoE case ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/ethernet')
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c3
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe.h16
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c12
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c41
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c140
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c151
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h1
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_type.h1
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf.h3
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c164
10 files changed, 323 insertions, 209 deletions
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index 8adeca9787ca..1050411e7ca3 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -1500,11 +1500,12 @@ static void igb_configure(struct igb_adapter *adapter)
**/
void igb_power_up_link(struct igb_adapter *adapter)
{
+ igb_reset_phy(&adapter->hw);
+
if (adapter->hw.phy.media_type == e1000_media_type_copper)
igb_power_up_phy_copper(&adapter->hw);
else
igb_power_up_serdes_link_82575(&adapter->hw);
- igb_reset_phy(&adapter->hw);
}
/**
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
index 5a286adc65c0..b9623e9ea895 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
@@ -77,17 +77,18 @@
#define IXGBE_MAX_FCPAUSE 0xFFFF
/* Supported Rx Buffer Sizes */
-#define IXGBE_RXBUFFER_512 512 /* Used for packet split */
+#define IXGBE_RXBUFFER_256 256 /* Used for skb receive header */
#define IXGBE_MAX_RXBUFFER 16384 /* largest size for a single descriptor */
/*
- * NOTE: netdev_alloc_skb reserves up to 64 bytes, NET_IP_ALIGN mans we
- * reserve 2 more, and skb_shared_info adds an additional 384 bytes more,
- * this adds up to 512 bytes of extra data meaning the smallest allocation
- * we could have is 1K.
- * i.e. RXBUFFER_512 --> size-1024 slab
+ * NOTE: netdev_alloc_skb reserves up to 64 bytes, NET_IP_ALIGN means we
+ * reserve 64 more, and skb_shared_info adds an additional 320 bytes more,
+ * this adds up to 448 bytes of extra data.
+ *
+ * Since netdev_alloc_skb now allocates a page fragment we can use a value
+ * of 256 and the resultant skb will have a truesize of 960 or less.
*/
-#define IXGBE_RX_HDR_SIZE IXGBE_RXBUFFER_512
+#define IXGBE_RX_HDR_SIZE IXGBE_RXBUFFER_256
#define MAXIMUM_ETHERNET_VLAN_SIZE (ETH_FRAME_LEN + ETH_FCS_LEN + VLAN_HLEN)
@@ -130,7 +131,6 @@ struct vf_data_storage {
u16 tx_rate;
u16 vlan_count;
u8 spoofchk_enabled;
- struct pci_dev *vfdev;
};
struct vf_macvlans {
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c
index 5442b359141e..9bc17c0cb972 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c
@@ -232,18 +232,22 @@ u8 ixgbe_dcb_get_tc_from_up(struct ixgbe_dcb_config *cfg, int direction, u8 up)
{
struct tc_configuration *tc_config = &cfg->tc_config[0];
u8 prio_mask = 1 << up;
- u8 tc;
+ u8 tc = cfg->num_tcs.pg_tcs;
+
+ /* If tc is 0 then DCB is likely not enabled or supported */
+ if (!tc)
+ goto out;
/*
- * Test for TCs 7 through 1 and report the first match we find. If
+ * Test from maximum TC to 1 and report the first match we find. If
* we find no match we can assume that the TC is 0 since the TC must
* be set for all user priorities
*/
- for (tc = MAX_TRAFFIC_CLASS - 1; tc; tc--) {
+ for (tc--; tc; tc--) {
if (prio_mask & tc_config[tc].path[direction].up_to_tc_bitmap)
break;
}
-
+out:
return tc;
}
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
index 38d1b65777ad..17ecbcedd548 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
@@ -370,6 +370,9 @@ static bool ixgbe_set_dcb_sriov_queues(struct ixgbe_adapter *adapter)
adapter->ring_feature[RING_F_RSS].indices = 1;
adapter->ring_feature[RING_F_RSS].mask = IXGBE_RSS_DISABLED_MASK;
+ /* disable ATR as it is not supported when VMDq is enabled */
+ adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
+
adapter->num_rx_pools = vmdq_i;
adapter->num_rx_queues_per_pool = tcs;
@@ -450,6 +453,9 @@ static bool ixgbe_set_dcb_queues(struct ixgbe_adapter *adapter)
f->indices = rss_i;
f->mask = rss_m;
+ /* disable ATR as it is not supported when multiple TCs are enabled */
+ adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
+
#ifdef IXGBE_FCOE
/* FCoE enabled queues require special configuration indexed
* by feature specific indices and offset. Here we map FCoE
@@ -606,16 +612,22 @@ static bool ixgbe_set_rss_queues(struct ixgbe_adapter *adapter)
f->indices = rss_i;
f->mask = IXGBE_RSS_16Q_MASK;
+ /* disable ATR by default, it will be configured below */
+ adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
+
/*
* Use Flow Director in addition to RSS to ensure the best
* distribution of flows across cores, even when an FDIR flow
* isn't matched.
*/
- if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) {
+ if (rss_i > 1 && adapter->atr_sample_rate) {
f = &adapter->ring_feature[RING_F_FDIR];
f->indices = min_t(u16, num_online_cpus(), f->limit);
rss_i = max_t(u16, rss_i, f->indices);
+
+ if (!(adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE))
+ adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE;
}
#ifdef IXGBE_FCOE
@@ -1053,18 +1065,27 @@ static void ixgbe_set_interrupt_capability(struct ixgbe_adapter *adapter)
return;
}
- adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED;
- if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) {
- e_err(probe,
- "ATR is not supported while multiple "
- "queues are disabled. Disabling Flow Director\n");
+ /* disable DCB if number of TCs exceeds 1 */
+ if (netdev_get_num_tc(adapter->netdev) > 1) {
+ e_err(probe, "num TCs exceeds number of queues - disabling DCB\n");
+ netdev_reset_tc(adapter->netdev);
+
+ if (adapter->hw.mac.type == ixgbe_mac_82598EB)
+ adapter->hw.fc.requested_mode = adapter->last_lfc_mode;
+
+ adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED;
+ adapter->temp_dcb_cfg.pfc_mode_enable = false;
+ adapter->dcb_cfg.pfc_mode_enable = false;
}
- adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
- adapter->atr_sample_rate = 0;
- if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
- ixgbe_disable_sriov(adapter);
+ adapter->dcb_cfg.num_tcs.pg_tcs = 1;
+ adapter->dcb_cfg.num_tcs.pfc_tcs = 1;
+
+ /* disable SR-IOV */
+ ixgbe_disable_sriov(adapter);
+ /* disable RSS */
adapter->ring_feature[RING_F_RSS].limit = 1;
+
ixgbe_set_num_queues(adapter);
adapter->num_q_vectors = 1;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index f4e53c1a7338..3b6784cf134a 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -1517,8 +1517,8 @@ static bool ixgbe_cleanup_headers(struct ixgbe_ring *rx_ring,
* 60 bytes if the skb->len is less than 60 for skb_pad.
*/
pull_len = skb_frag_size(frag);
- if (pull_len > 256)
- pull_len = ixgbe_get_headlen(va, pull_len);
+ if (pull_len > IXGBE_RX_HDR_SIZE)
+ pull_len = ixgbe_get_headlen(va, IXGBE_RX_HDR_SIZE);
/* align pull length to size of long to optimize memcpy performance */
skb_copy_to_linear_data(skb, va, ALIGN(pull_len, sizeof(long)));
@@ -2688,8 +2688,7 @@ void ixgbe_configure_tx_ring(struct ixgbe_adapter *adapter,
32; /* PTHRESH = 32 */
/* reinitialize flowdirector state */
- if ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) &&
- adapter->atr_sample_rate) {
+ if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) {
ring->atr_sample_rate = adapter->atr_sample_rate;
ring->atr_count = 0;
set_bit(__IXGBE_TX_FDIR_INIT_DONE, &ring->state);
@@ -3442,14 +3441,18 @@ static int ixgbe_write_uc_addr_list(struct net_device *netdev)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
struct ixgbe_hw *hw = &adapter->hw;
- unsigned int rar_entries = IXGBE_MAX_PF_MACVLANS;
+ unsigned int rar_entries = hw->mac.num_rar_entries - 1;
int count = 0;
+ /* In SR-IOV mode significantly less RAR entries are available */
+ if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
+ rar_entries = IXGBE_MAX_PF_MACVLANS - 1;
+
/* return ENOMEM indicating insufficient memory for addresses */
if (netdev_uc_count(netdev) > rar_entries)
return -ENOMEM;
- if (!netdev_uc_empty(netdev) && rar_entries) {
+ if (!netdev_uc_empty(netdev)) {
struct netdev_hw_addr *ha;
/* return error if we do not support writing to RAR table */
if (!hw->mac.ops.set_rar)
@@ -4419,7 +4422,6 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)
if (hw->device_id == IXGBE_DEV_ID_82599_T3_LOM)
adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE;
/* Flow Director hash filters enabled */
- adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE;
adapter->atr_sample_rate = 20;
adapter->ring_feature[RING_F_FDIR].limit =
IXGBE_MAX_FDIR_INDICES;
@@ -4490,6 +4492,12 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)
hw->fc.send_xon = true;
hw->fc.disable_fc_autoneg = false;
+#ifdef CONFIG_PCI_IOV
+ /* assign number of SR-IOV VFs */
+ if (hw->mac.type != ixgbe_mac_82598EB)
+ adapter->num_vfs = (max_vfs > 63) ? 0 : max_vfs;
+
+#endif
/* enable itr by default in dynamic mode */
adapter->rx_itr_setting = 1;
adapter->tx_itr_setting = 1;
@@ -6695,12 +6703,6 @@ int ixgbe_setup_tc(struct net_device *dev, u8 tc)
struct ixgbe_adapter *adapter = netdev_priv(dev);
struct ixgbe_hw *hw = &adapter->hw;
- /* Multiple traffic classes requires multiple queues */
- if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) {
- e_err(drv, "Enable failed, needs MSI-X\n");
- return -EINVAL;
- }
-
/* Hardware supports up to 8 traffic classes */
if (tc > adapter->dcb_cfg.num_tcs.pg_tcs ||
(hw->mac.type == ixgbe_mac_82598EB &&
@@ -6720,7 +6722,6 @@ int ixgbe_setup_tc(struct net_device *dev, u8 tc)
ixgbe_set_prio_tc_map(adapter);
adapter->flags |= IXGBE_FLAG_DCB_ENABLED;
- adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
adapter->last_lfc_mode = adapter->hw.fc.requested_mode;
@@ -6733,7 +6734,6 @@ int ixgbe_setup_tc(struct net_device *dev, u8 tc)
adapter->hw.fc.requested_mode = adapter->last_lfc_mode;
adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED;
- adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE;
adapter->temp_dcb_cfg.pfc_mode_enable = false;
adapter->dcb_cfg.pfc_mode_enable = false;
@@ -6802,20 +6802,40 @@ static int ixgbe_set_features(struct net_device *netdev,
* Check if Flow Director n-tuple support was enabled or disabled. If
* the state changed, we need to reset.
*/
- if (!(features & NETIF_F_NTUPLE)) {
- if (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE) {
- /* turn off Flow Director, set ATR and reset */
- if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) &&
- !(adapter->flags & IXGBE_FLAG_DCB_ENABLED))
- adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE;
- need_reset = true;
- }
- adapter->flags &= ~IXGBE_FLAG_FDIR_PERFECT_CAPABLE;
- } else if (!(adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)) {
+ switch (features & NETIF_F_NTUPLE) {
+ case NETIF_F_NTUPLE:
/* turn off ATR, enable perfect filters and reset */
+ if (!(adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE))
+ need_reset = true;
+
adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
adapter->flags |= IXGBE_FLAG_FDIR_PERFECT_CAPABLE;
- need_reset = true;
+ break;
+ default:
+ /* turn off perfect filters, enable ATR and reset */
+ if (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)
+ need_reset = true;
+
+ adapter->flags &= ~IXGBE_FLAG_FDIR_PERFECT_CAPABLE;
+
+ /* We cannot enable ATR if SR-IOV is enabled */
+ if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
+ break;
+
+ /* We cannot enable ATR if we have 2 or more traffic classes */
+ if (netdev_get_num_tc(netdev) > 1)
+ break;
+
+ /* We cannot enable ATR if RSS is disabled */
+ if (adapter->ring_feature[RING_F_RSS].limit <= 1)
+ break;
+
+ /* A sample rate of 0 indicates ATR disabled */
+ if (!adapter->atr_sample_rate)
+ break;
+
+ adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE;
+ break;
}
if (features & NETIF_F_HW_VLAN_RX)
@@ -6839,7 +6859,10 @@ static int ixgbe_ndo_fdb_add(struct ndmsg *ndm,
u16 flags)
{
struct ixgbe_adapter *adapter = netdev_priv(dev);
- int err = -EOPNOTSUPP;
+ int err;
+
+ if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
+ return -EOPNOTSUPP;
if (ndm->ndm_state & NUD_PERMANENT) {
pr_info("%s: FDB only supports static addresses\n",
@@ -6847,13 +6870,17 @@ static int ixgbe_ndo_fdb_add(struct ndmsg *ndm,
return -EINVAL;
}
- if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
- if (is_unicast_ether_addr(addr))
+ if (is_unicast_ether_addr(addr)) {
+ u32 rar_uc_entries = IXGBE_MAX_PF_MACVLANS;
+
+ if (netdev_uc_count(dev) < rar_uc_entries)
err = dev_uc_add_excl(dev, addr);
- else if (is_multicast_ether_addr(addr))
- err = dev_mc_add_excl(dev, addr);
else
- err = -EINVAL;
+ err = -ENOMEM;
+ } else if (is_multicast_ether_addr(addr)) {
+ err = dev_mc_add_excl(dev, addr);
+ } else {
+ err = -EINVAL;
}
/* Only return duplicate errors if NLM_F_EXCL is set */
@@ -6942,26 +6969,6 @@ static const struct net_device_ops ixgbe_netdev_ops = {
.ndo_fdb_dump = ixgbe_ndo_fdb_dump,
};
-static void __devinit ixgbe_probe_vf(struct ixgbe_adapter *adapter,
- const struct ixgbe_info *ii)
-{
-#ifdef CONFIG_PCI_IOV
- struct ixgbe_hw *hw = &adapter->hw;
-
- if (hw->mac.type == ixgbe_mac_82598EB)
- return;
-
- /* The 82599 supports up to 64 VFs per physical function
- * but this implementation limits allocation to 63 so that
- * basic networking resources are still available to the
- * physical function. If the user requests greater thn
- * 63 VFs then it is an error - reset to default of zero.
- */
- adapter->num_vfs = (max_vfs > 63) ? 0 : max_vfs;
- ixgbe_enable_sriov(adapter, ii);
-#endif /* CONFIG_PCI_IOV */
-}
-
/**
* ixgbe_wol_supported - Check whether device supports WoL
* @hw: hw specific details
@@ -6988,6 +6995,7 @@ int ixgbe_wol_supported(struct ixgbe_adapter *adapter, u16 device_id,
if (hw->bus.func != 0)
break;
case IXGBE_SUBDEV_ID_82599_SFP:
+ case IXGBE_SUBDEV_ID_82599_RNDC:
is_wol_supported = 1;
break;
}
@@ -7035,6 +7043,7 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
int i, err, pci_using_dac;
u8 part_str[IXGBE_PBANUM_LENGTH];
unsigned int indices = num_possible_cpus();
+ unsigned int dcb_max = 0;
#ifdef IXGBE_FCOE
u16 device_caps;
#endif
@@ -7084,15 +7093,16 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
pci_save_state(pdev);
#ifdef CONFIG_IXGBE_DCB
- indices *= MAX_TRAFFIC_CLASS;
+ if (ii->mac == ixgbe_mac_82598EB)
+ dcb_max = min_t(unsigned int, indices * MAX_TRAFFIC_CLASS,
+ IXGBE_MAX_RSS_INDICES);
+ else
+ dcb_max = min_t(unsigned int, indices * MAX_TRAFFIC_CLASS,
+ IXGBE_MAX_FDIR_INDICES);
#endif
if (ii->mac == ixgbe_mac_82598EB)
-#ifdef CONFIG_IXGBE_DCB
- indices = min_t(unsigned int, indices, MAX_TRAFFIC_CLASS * 4);
-#else
indices = min_t(unsigned int, indices, IXGBE_MAX_RSS_INDICES);
-#endif
else
indices = min_t(unsigned int, indices, IXGBE_MAX_FDIR_INDICES);
@@ -7100,6 +7110,7 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
indices += min_t(unsigned int, num_possible_cpus(),
IXGBE_MAX_FCOE_INDICES);
#endif
+ indices = max_t(unsigned int, dcb_max, indices);
netdev = alloc_etherdev_mq(sizeof(struct ixgbe_adapter), indices);
if (!netdev) {
err = -ENOMEM;
@@ -7206,8 +7217,10 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
goto err_sw_init;
}
- ixgbe_probe_vf(adapter, ii);
+#ifdef CONFIG_PCI_IOV
+ ixgbe_enable_sriov(adapter, ii);
+#endif
netdev->features = NETIF_F_SG |
NETIF_F_IP_CSUM |
NETIF_F_IPV6_CSUM |
@@ -7411,8 +7424,7 @@ err_register:
ixgbe_release_hw_control(adapter);
ixgbe_clear_interrupt_scheme(adapter);
err_sw_init:
- if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
- ixgbe_disable_sriov(adapter);
+ ixgbe_disable_sriov(adapter);
adapter->flags2 &= ~IXGBE_FLAG2_SEARCH_FOR_SFP;
iounmap(hw->hw_addr);
err_ioremap:
@@ -7465,13 +7477,7 @@ static void __devexit ixgbe_remove(struct pci_dev *pdev)
if (netdev->reg_state == NETREG_REGISTERED)
unregister_netdev(netdev);
- if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
- if (!(ixgbe_check_vf_assignment(adapter)))
- ixgbe_disable_sriov(adapter);
- else
- e_dev_warn("Unloading driver while VFs are assigned "
- "- VFs will not be deallocated\n");
- }
+ ixgbe_disable_sriov(adapter);
ixgbe_clear_interrupt_scheme(adapter);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
index a825d4808cd2..47b2ce740ec1 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
@@ -44,50 +44,15 @@
#include "ixgbe_sriov.h"
#ifdef CONFIG_PCI_IOV
-static int ixgbe_find_enabled_vfs(struct ixgbe_adapter *adapter)
-{
- struct pci_dev *pdev = adapter->pdev;
- struct pci_dev *pvfdev;
- u16 vf_devfn = 0;
- int device_id;
- int vfs_found = 0;
-
- switch (adapter->hw.mac.type) {
- case ixgbe_mac_82599EB:
- device_id = IXGBE_DEV_ID_82599_VF;
- break;
- case ixgbe_mac_X540:
- device_id = IXGBE_DEV_ID_X540_VF;
- break;
- default:
- device_id = 0;
- break;
- }
-
- vf_devfn = pdev->devfn + 0x80;
- pvfdev = pci_get_device(PCI_VENDOR_ID_INTEL, device_id, NULL);
- while (pvfdev) {
- if (pvfdev->devfn == vf_devfn &&
- (pvfdev->bus->number >= pdev->bus->number))
- vfs_found++;
- vf_devfn += 2;
- pvfdev = pci_get_device(PCI_VENDOR_ID_INTEL,
- device_id, pvfdev);
- }
-
- return vfs_found;
-}
-
void ixgbe_enable_sriov(struct ixgbe_adapter *adapter,
const struct ixgbe_info *ii)
{
struct ixgbe_hw *hw = &adapter->hw;
- int err = 0;
int num_vf_macvlans, i;
struct vf_macvlans *mv_list;
int pre_existing_vfs = 0;
- pre_existing_vfs = ixgbe_find_enabled_vfs(adapter);
+ pre_existing_vfs = pci_num_vf(adapter->pdev);
if (!pre_existing_vfs && !adapter->num_vfs)
return;
@@ -106,10 +71,21 @@ void ixgbe_enable_sriov(struct ixgbe_adapter *adapter,
"enabled for this device - Please reload all "
"VF drivers to avoid spoofed packet errors\n");
} else {
+ int err;
+ /*
+ * The 82599 supports up to 64 VFs per physical function
+ * but this implementation limits allocation to 63 so that
+ * basic networking resources are still available to the
+ * physical function. If the user requests greater thn
+ * 63 VFs then it is an error - reset to default of zero.
+ */
+ adapter->num_vfs = min_t(unsigned int, adapter->num_vfs, 63);
+
err = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
if (err) {
e_err(probe, "Failed to enable PCI sriov: %d\n", err);
- goto err_novfs;
+ adapter->num_vfs = 0;
+ return;
}
}
@@ -193,20 +169,48 @@ void ixgbe_enable_sriov(struct ixgbe_adapter *adapter,
/* Oh oh */
e_err(probe, "Unable to allocate memory for VF Data Storage - "
"SRIOV disabled\n");
- pci_disable_sriov(adapter->pdev);
+ ixgbe_disable_sriov(adapter);
+}
-err_novfs:
- adapter->flags &= ~IXGBE_FLAG_SRIOV_ENABLED;
- adapter->num_vfs = 0;
+static bool ixgbe_vfs_are_assigned(struct ixgbe_adapter *adapter)
+{
+ struct pci_dev *pdev = adapter->pdev;
+ struct pci_dev *vfdev;
+ int dev_id;
+
+ switch (adapter->hw.mac.type) {
+ case ixgbe_mac_82599EB:
+ dev_id = IXGBE_DEV_ID_82599_VF;
+ break;
+ case ixgbe_mac_X540:
+ dev_id = IXGBE_DEV_ID_X540_VF;
+ break;
+ default:
+ return false;
+ }
+
+ /* loop through all the VFs to see if we own any that are assigned */
+ vfdev = pci_get_device(PCI_VENDOR_ID_INTEL, dev_id, NULL);
+ while (vfdev) {
+ /* if we don't own it we don't care */
+ if (vfdev->is_virtfn && vfdev->physfn == pdev) {
+ /* if it is assigned we cannot release it */
+ if (vfdev->dev_flags & PCI_DEV_FLAGS_ASSIGNED)
+ return true;
+ }
+
+ vfdev = pci_get_device(PCI_VENDOR_ID_INTEL, dev_id, vfdev);
+ }
+
+ return false;
}
-#endif /* #ifdef CONFIG_PCI_IOV */
+#endif /* #ifdef CONFIG_PCI_IOV */
void ixgbe_disable_sriov(struct ixgbe_adapter *adapter)
{
struct ixgbe_hw *hw = &adapter->hw;
u32 gpie;
u32 vmdctl;
- int i;
/* set num VFs to 0 to prevent access to vfinfo */
adapter->num_vfs = 0;
@@ -219,7 +223,20 @@ void ixgbe_disable_sriov(struct ixgbe_adapter *adapter)
kfree(adapter->mv_list);
adapter->mv_list = NULL;
+ /* if SR-IOV is already disabled then there is nothing to do */
+ if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
+ return;
+
#ifdef CONFIG_PCI_IOV
+ /*
+ * If our VFs are assigned we cannot shut down SR-IOV
+ * without causing issues, so just leave the hardware
+ * available but disabled
+ */
+ if (ixgbe_vfs_are_assigned(adapter)) {
+ e_dev_warn("Unloading driver while VFs are assigned - VFs will not be deallocated\n");
+ return;
+
/* disable iov and allow time for transactions to clear */
pci_disable_sriov(adapter->pdev);
#endif
@@ -244,12 +261,6 @@ void ixgbe_disable_sriov(struct ixgbe_adapter *adapter)
/* take a breather then clean up driver data */
msleep(100);
- /* Release reference to VF devices */
- for (i = 0; i < adapter->num_vfs; i++) {
- if (adapter->vfinfo[i].vfdev)
- pci_dev_put(adapter->vfinfo[i].vfdev);
- }
-
adapter->flags &= ~IXGBE_FLAG_SRIOV_ENABLED;
}
@@ -483,28 +494,11 @@ static int ixgbe_set_vf_macvlan(struct ixgbe_adapter *adapter,
return 0;
}
-int ixgbe_check_vf_assignment(struct ixgbe_adapter *adapter)
-{
-#ifdef CONFIG_PCI_IOV
- int i;
- for (i = 0; i < adapter->num_vfs; i++) {
- if (adapter->vfinfo[i].vfdev->dev_flags &
- PCI_DEV_FLAGS_ASSIGNED)
- return true;
- }
-#endif
- return false;
-}
-
int ixgbe_vf_configuration(struct pci_dev *pdev, unsigned int event_mask)
{
unsigned char vf_mac_addr[6];
struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
unsigned int vfn = (event_mask & 0x3f);
- struct pci_dev *pvfdev;
- unsigned int device_id;
- u16 thisvf_devfn = (pdev->devfn + 0x80 + (vfn << 1)) |
- (pdev->devfn & 1);
bool enable = ((event_mask & 0x10000000U) != 0);
@@ -517,31 +511,6 @@ int ixgbe_vf_configuration(struct pci_dev *pdev, unsigned int event_mask)
* for it later.
*/
memcpy(adapter->vfinfo[vfn].vf_mac_addresses, vf_mac_addr, 6);
-
- switch (adapter->hw.mac.type) {
- case ixgbe_mac_82599EB:
- device_id = IXGBE_DEV_ID_82599_VF;
- break;
- case ixgbe_mac_X540:
- device_id = IXGBE_DEV_ID_X540_VF;
- break;
- default:
- device_id = 0;
- break;
- }
-
- pvfdev = pci_get_device(PCI_VENDOR_ID_INTEL, device_id, NULL);
- while (pvfdev) {
- if (pvfdev->devfn == thisvf_devfn)
- break;
- pvfdev = pci_get_device(PCI_VENDOR_ID_INTEL,
- device_id, pvfdev);
- }
- if (pvfdev)
- adapter->vfinfo[vfn].vfdev = pvfdev;
- else
- e_err(drv, "Couldn't find pci dev ptr for VF %4.4x\n",
- thisvf_devfn);
}
return 0;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h
index 2ab38d5fda92..1be1d30e4e78 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h
@@ -42,7 +42,6 @@ int ixgbe_ndo_get_vf_config(struct net_device *netdev,
int vf, struct ifla_vf_info *ivi);
void ixgbe_check_vf_rate_limit(struct ixgbe_adapter *adapter);
void ixgbe_disable_sriov(struct ixgbe_adapter *adapter);
-int ixgbe_check_vf_assignment(struct ixgbe_adapter *adapter);
#ifdef CONFIG_PCI_IOV
void ixgbe_enable_sriov(struct ixgbe_adapter *adapter,
const struct ixgbe_info *ii);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
index fe0a19d91d4a..400f86a31174 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
@@ -54,6 +54,7 @@
#define IXGBE_DEV_ID_82599_BACKPLANE_FCOE 0x152a
#define IXGBE_DEV_ID_82599_SFP_FCOE 0x1529
#define IXGBE_SUBDEV_ID_82599_SFP 0x11A9
+#define IXGBE_SUBDEV_ID_82599_RNDC 0x1F72
#define IXGBE_SUBDEV_ID_82599_560FLR 0x17D0
#define IXGBE_DEV_ID_82599_SFP_EM 0x1507
#define IXGBE_DEV_ID_82599_SFP_SF2 0x154D
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
index e167d1bb6dea..98cadb0c4dab 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
@@ -249,6 +249,8 @@ struct ixgbevf_adapter {
bool link_up;
struct work_struct watchdog_task;
+
+ spinlock_t mbx_lock;
};
enum ixbgevf_state_t {
@@ -284,7 +286,6 @@ extern void ixgbevf_free_rx_resources(struct ixgbevf_adapter *,
extern void ixgbevf_free_tx_resources(struct ixgbevf_adapter *,
struct ixgbevf_ring *);
extern void ixgbevf_update_stats(struct ixgbevf_adapter *adapter);
-void ixgbevf_write_eitr(struct ixgbevf_q_vector *);
extern int ethtool_ioctl(struct ifreq *ifr);
extern void ixgbe_napi_add_all(struct ixgbevf_adapter *adapter);
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index 2dc78d7e297a..3f9841d619ad 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -540,6 +540,25 @@ static int ixgbevf_poll(struct napi_struct *napi, int budget)
return 0;
}
+/**
+ * ixgbevf_write_eitr - write VTEITR register in hardware specific way
+ * @q_vector: structure containing interrupt and ring information
+ */
+static void ixgbevf_write_eitr(struct ixgbevf_q_vector *q_vector)
+{
+ struct ixgbevf_adapter *adapter = q_vector->adapter;
+ struct ixgbe_hw *hw = &adapter->hw;
+ int v_idx = q_vector->v_idx;
+ u32 itr_reg = q_vector->itr & IXGBE_MAX_EITR;
+
+ /*
+ * set the WDIS bit to not clear the timer bits and cause an
+ * immediate assertion of the interrupt
+ */
+ itr_reg |= IXGBE_EITR_CNT_WDIS;
+
+ IXGBE_WRITE_REG(hw, IXGBE_VTEITR(v_idx), itr_reg);
+}
/**
* ixgbevf_configure_msix - Configure MSI-X hardware
@@ -662,30 +681,6 @@ static void ixgbevf_update_itr(struct ixgbevf_q_vector *q_vector,
ring_container->itr = itr_setting;
}
-/**
- * ixgbevf_write_eitr - write VTEITR register in hardware specific way
- * @q_vector: structure containing interrupt and ring information
- *
- * This function is made to be called by ethtool and by the driver
- * when it needs to update VTEITR registers at runtime. Hardware
- * specific quirks/differences are taken care of here.
- */
-void ixgbevf_write_eitr(struct ixgbevf_q_vector *q_vector)
-{
- struct ixgbevf_adapter *adapter = q_vector->adapter;
- struct ixgbe_hw *hw = &adapter->hw;
- int v_idx = q_vector->v_idx;
- u32 itr_reg = q_vector->itr & IXGBE_MAX_EITR;
-
- /*
- * set the WDIS bit to not clear the timer bits and cause an
- * immediate assertion of the interrupt
- */
- itr_reg |= IXGBE_EITR_CNT_WDIS;
-
- IXGBE_WRITE_REG(hw, IXGBE_VTEITR(v_idx), itr_reg);
-}
-
static void ixgbevf_set_itr(struct ixgbevf_q_vector *q_vector)
{
u32 new_itr = q_vector->itr;
@@ -1120,9 +1115,14 @@ static int ixgbevf_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
struct ixgbevf_adapter *adapter = netdev_priv(netdev);
struct ixgbe_hw *hw = &adapter->hw;
+ spin_lock(&adapter->mbx_lock);
+
/* add VID to filter table */
if (hw->mac.ops.set_vfta)
hw->mac.ops.set_vfta(hw, vid, 0, true);
+
+ spin_unlock(&adapter->mbx_lock);
+
set_bit(vid, adapter->active_vlans);
return 0;
@@ -1133,9 +1133,14 @@ static int ixgbevf_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
struct ixgbevf_adapter *adapter = netdev_priv(netdev);
struct ixgbe_hw *hw = &adapter->hw;
+ spin_lock(&adapter->mbx_lock);
+
/* remove VID from filter table */
if (hw->mac.ops.set_vfta)
hw->mac.ops.set_vfta(hw, vid, 0, false);
+
+ spin_unlock(&adapter->mbx_lock);
+
clear_bit(vid, adapter->active_vlans);
return 0;
@@ -1190,11 +1195,15 @@ static void ixgbevf_set_rx_mode(struct net_device *netdev)
struct ixgbevf_adapter *adapter = netdev_priv(netdev);
struct ixgbe_hw *hw = &adapter->hw;
+ spin_lock(&adapter->mbx_lock);
+
/* reprogram multicast list */
if (hw->mac.ops.update_mc_addr_list)
hw->mac.ops.update_mc_addr_list(hw, netdev);
ixgbevf_write_uc_addr_list(netdev);
+
+ spin_unlock(&adapter->mbx_lock);
}
static void ixgbevf_napi_enable_all(struct ixgbevf_adapter *adapter)
@@ -1339,6 +1348,8 @@ static void ixgbevf_up_complete(struct ixgbevf_adapter *adapter)
ixgbevf_configure_msix(adapter);
+ spin_lock(&adapter->mbx_lock);
+
if (hw->mac.ops.set_rar) {
if (is_valid_ether_addr(hw->mac.addr))
hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0);
@@ -1350,6 +1361,8 @@ static void ixgbevf_up_complete(struct ixgbevf_adapter *adapter)
msg[1] = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
hw->mbx.ops.write_posted(hw, msg, 2);
+ spin_unlock(&adapter->mbx_lock);
+
clear_bit(__IXGBEVF_DOWN, &adapter->state);
ixgbevf_napi_enable_all(adapter);
@@ -1562,11 +1575,15 @@ void ixgbevf_reset(struct ixgbevf_adapter *adapter)
struct ixgbe_hw *hw = &adapter->hw;
struct net_device *netdev = adapter->netdev;
+ spin_lock(&adapter->mbx_lock);
+
if (hw->mac.ops.reset_hw(hw))
hw_dbg(hw, "PF still resetting\n");
else
hw->mac.ops.init_hw(hw);
+ spin_unlock(&adapter->mbx_lock);
+
if (is_valid_ether_addr(adapter->hw.mac.addr)) {
memcpy(netdev->dev_addr, adapter->hw.mac.addr,
netdev->addr_len);
@@ -1893,6 +1910,9 @@ static int __devinit ixgbevf_sw_init(struct ixgbevf_adapter *adapter)
adapter->netdev->addr_len);
}
+ /* lock to protect mailbox accesses */
+ spin_lock_init(&adapter->mbx_lock);
+
/* Enable dynamic interrupt throttling rates */
adapter->rx_itr_setting = 1;
adapter->tx_itr_setting = 1;
@@ -2032,8 +2052,16 @@ static void ixgbevf_watchdog_task(struct work_struct *work)
* no LSC interrupt
*/
if (hw->mac.ops.check_link) {
- if ((hw->mac.ops.check_link(hw, &link_speed,
- &link_up, false)) != 0) {
+ s32 need_reset;
+
+ spin_lock(&adapter->mbx_lock);
+
+ need_reset = hw->mac.ops.check_link(hw, &link_speed,
+ &link_up, false);
+
+ spin_unlock(&adapter->mbx_lock);
+
+ if (need_reset) {
adapter->link_up = link_up;
adapter->link_speed = link_speed;
netif_carrier_off(netdev);
@@ -2813,9 +2841,13 @@ static int ixgbevf_set_mac(struct net_device *netdev, void *p)
memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
+ spin_lock(&adapter->mbx_lock);
+
if (hw->mac.ops.set_rar)
hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0);
+ spin_unlock(&adapter->mbx_lock);
+
return 0;
}
@@ -3152,12 +3184,92 @@ static void __devexit ixgbevf_remove(struct pci_dev *pdev)
pci_disable_device(pdev);
}
+/**
+ * ixgbevf_io_error_detected - called when PCI error is detected
+ * @pdev: Pointer to PCI device
+ * @state: The current pci connection state
+ *
+ * This function is called after a PCI bus error affecting
+ * this device has been detected.
+ */
+static pci_ers_result_t ixgbevf_io_error_detected(struct pci_dev *pdev,
+ pci_channel_state_t state)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+
+ netif_device_detach(netdev);
+
+ if (state == pci_channel_io_perm_failure)
+ return PCI_ERS_RESULT_DISCONNECT;
+
+ if (netif_running(netdev))
+ ixgbevf_down(adapter);
+
+ pci_disable_device(pdev);
+
+ /* Request a slot slot reset. */
+ return PCI_ERS_RESULT_NEED_RESET;
+}
+
+/**
+ * ixgbevf_io_slot_reset - called after the pci bus has been reset.
+ * @pdev: Pointer to PCI device
+ *
+ * Restart the card from scratch, as if from a cold-boot. Implementation
+ * resembles the first-half of the ixgbevf_resume routine.
+ */
+static pci_ers_result_t ixgbevf_io_slot_reset(struct pci_dev *pdev)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+
+ if (pci_enable_device_mem(pdev)) {
+ dev_err(&pdev->dev,
+ "Cannot re-enable PCI device after reset.\n");
+ return PCI_ERS_RESULT_DISCONNECT;
+ }
+
+ pci_set_master(pdev);
+
+ ixgbevf_reset(adapter);
+
+ return PCI_ERS_RESULT_RECOVERED;
+}
+
+/**
+ * ixgbevf_io_resume - called when traffic can start flowing again.
+ * @pdev: Pointer to PCI device
+ *
+ * This callback is called when the error recovery driver tells us that
+ * its OK to resume normal operation. Implementation resembles the
+ * second-half of the ixgbevf_resume routine.
+ */
+static void ixgbevf_io_resume(struct pci_dev *pdev)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+
+ if (netif_running(netdev))
+ ixgbevf_up(adapter);
+
+ netif_device_attach(netdev);
+}
+
+/* PCI Error Recovery (ERS) */
+static struct pci_error_handlers ixgbevf_err_handler = {
+ .error_detected = ixgbevf_io_error_detected,
+ .slot_reset = ixgbevf_io_slot_reset,
+ .resume = ixgbevf_io_resume,
+};
+
static struct pci_driver ixgbevf_driver = {
.name = ixgbevf_driver_name,
.id_table = ixgbevf_pci_tbl,
.probe = ixgbevf_probe,
.remove = __devexit_p(ixgbevf_remove),
.shutdown = ixgbevf_shutdown,
+ .err_handler = &ixgbevf_err_handler
};
/**