diff options
-rw-r--r-- | drivers/net/ethernet/intel/ice/ice.h | 13 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/ice/ice_base.c | 4 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/ice/ice_eswitch.c | 161 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/ice/ice_ethtool.c | 20 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/ice/ice_lib.c | 203 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/ice/ice_lib.h | 3 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/ice/ice_main.c | 64 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/ice/ice_repr.c | 54 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/ice/ice_txrx.c | 2 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/ice/ice_vf_vsi_vlan_ops.c | 19 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c | 13 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c | 787 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h | 83 |
13 files changed, 872 insertions, 554 deletions
diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h index 1ca1273a7cd3..dc42ff92dbad 100644 --- a/drivers/net/ethernet/intel/ice/ice.h +++ b/drivers/net/ethernet/intel/ice/ice.h @@ -109,7 +109,6 @@ /* All VF control VSIs share the same IRQ, so assign a unique ID for them */ #define ICE_RES_VF_CTRL_VEC_ID (ICE_RES_RDMA_VEC_ID - 1) #define ICE_INVAL_Q_INDEX 0xffff -#define ICE_INVAL_VFID 256 #define ICE_MAX_RXQS_PER_TC 256 /* Used when setting VSI context per TC Rx queues */ @@ -333,7 +332,7 @@ struct ice_vsi { u16 vsi_num; /* HW (absolute) index of this VSI */ u16 idx; /* software index in pf->vsi[] */ - s16 vf_id; /* VF ID for SR-IOV VSIs */ + struct ice_vf *vf; /* VF associated with this VSI */ u16 ethtype; /* Ethernet protocol for pause frame */ u16 num_gfltr; @@ -529,15 +528,7 @@ struct ice_pf { struct ice_vsi **vsi; /* VSIs created by the driver */ struct ice_sw *first_sw; /* first switch created by firmware */ u16 eswitch_mode; /* current mode of eswitch */ - /* Virtchnl/SR-IOV config info */ - struct ice_vf *vf; - u16 num_alloc_vfs; /* actual number of VFs allocated */ - u16 num_vfs_supported; /* num VFs supported for this PF */ - u16 num_qps_per_vf; - u16 num_msix_per_vf; - /* used to ratelimit the MDD event logging */ - unsigned long last_printed_mdd_jiffies; - DECLARE_BITMAP(malvfs, ICE_MAX_VF_COUNT); + struct ice_vfs vfs; DECLARE_BITMAP(features, ICE_F_MAX); DECLARE_BITMAP(state, ICE_STATE_NBITS); DECLARE_BITMAP(flags, ICE_PF_FLAGS_NBITS); diff --git a/drivers/net/ethernet/intel/ice/ice_base.c b/drivers/net/ethernet/intel/ice/ice_base.c index 2360e6abdb1e..a3094470d31d 100644 --- a/drivers/net/ethernet/intel/ice/ice_base.c +++ b/drivers/net/ethernet/intel/ice/ice_base.c @@ -323,7 +323,7 @@ ice_setup_tx_ctx(struct ice_tx_ring *ring, struct ice_tlan_ctx *tlan_ctx, u16 pf break; case ICE_VSI_VF: /* Firmware expects vmvf_num to be absolute VF ID */ - tlan_ctx->vmvf_num = hw->func_caps.vf_base_id + vsi->vf_id; + tlan_ctx->vmvf_num = hw->func_caps.vf_base_id + vsi->vf->vf_id; tlan_ctx->vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_VF; break; case ICE_VSI_SWITCHDEV_CTRL: @@ -429,7 +429,7 @@ static int ice_setup_rx_ctx(struct ice_rx_ring *ring) */ if (ice_is_dvm_ena(hw)) if (vsi->type == ICE_VSI_VF && - ice_vf_is_port_vlan_ena(&vsi->back->vf[vsi->vf_id])) + ice_vf_is_port_vlan_ena(vsi->vf)) rlan_ctx.l2tsel = 1; else rlan_ctx.l2tsel = 0; diff --git a/drivers/net/ethernet/intel/ice/ice_eswitch.c b/drivers/net/ethernet/intel/ice/ice_eswitch.c index 95c81fc9ec9f..9a84d746a6c4 100644 --- a/drivers/net/ethernet/intel/ice/ice_eswitch.c +++ b/drivers/net/ethernet/intel/ice/ice_eswitch.c @@ -176,10 +176,20 @@ static void ice_eswitch_remap_rings_to_vectors(struct ice_pf *pf) int q_id; ice_for_each_txq(vsi, q_id) { - struct ice_repr *repr = pf->vf[q_id].repr; - struct ice_q_vector *q_vector = repr->q_vector; - struct ice_tx_ring *tx_ring = vsi->tx_rings[q_id]; - struct ice_rx_ring *rx_ring = vsi->rx_rings[q_id]; + struct ice_q_vector *q_vector; + struct ice_tx_ring *tx_ring; + struct ice_rx_ring *rx_ring; + struct ice_repr *repr; + struct ice_vf *vf; + + vf = ice_get_vf_by_id(pf, q_id); + if (WARN_ON(!vf)) + continue; + + repr = vf->repr; + q_vector = repr->q_vector; + tx_ring = vsi->tx_rings[q_id]; + rx_ring = vsi->rx_rings[q_id]; q_vector->vsi = vsi; q_vector->reg_idx = vsi->q_vectors[0]->reg_idx; @@ -199,6 +209,38 @@ static void ice_eswitch_remap_rings_to_vectors(struct ice_pf *pf) rx_ring->q_vector = q_vector; rx_ring->next = NULL; rx_ring->netdev = repr->netdev; + + ice_put_vf(vf); + } +} + +/** + * ice_eswitch_release_reprs - clear PR VSIs configuration + * @pf: poiner to PF struct + * @ctrl_vsi: pointer to switchdev control VSI + */ +static void +ice_eswitch_release_reprs(struct ice_pf *pf, struct ice_vsi *ctrl_vsi) +{ + struct ice_vf *vf; + unsigned int bkt; + + lockdep_assert_held(&pf->vfs.table_lock); + + ice_for_each_vf(pf, bkt, vf) { + struct ice_vsi *vsi = vf->repr->src_vsi; + + /* Skip VFs that aren't configured */ + if (!vf->repr->dst) + continue; + + ice_vsi_update_security(vsi, ice_vsi_ctx_set_antispoof); + metadata_dst_free(vf->repr->dst); + vf->repr->dst = NULL; + ice_fltr_add_mac_and_broadcast(vsi, vf->hw_lan_addr.addr, + ICE_FWD_TO_VSI); + + netif_napi_del(&vf->repr->q_vector->napi); } } @@ -210,11 +252,13 @@ static int ice_eswitch_setup_reprs(struct ice_pf *pf) { struct ice_vsi *ctrl_vsi = pf->switchdev.control_vsi; int max_vsi_num = 0; - int i; + struct ice_vf *vf; + unsigned int bkt; + + lockdep_assert_held(&pf->vfs.table_lock); - ice_for_each_vf(pf, i) { - struct ice_vsi *vsi = pf->vf[i].repr->src_vsi; - struct ice_vf *vf = &pf->vf[i]; + ice_for_each_vf(pf, bkt, vf) { + struct ice_vsi *vsi = vf->repr->src_vsi; ice_remove_vsi_fltr(&pf->hw, vsi->idx); vf->repr->dst = metadata_dst_alloc(0, METADATA_HW_PORT_MUX, @@ -231,6 +275,7 @@ static int ice_eswitch_setup_reprs(struct ice_pf *pf) vf->hw_lan_addr.addr, ICE_FWD_TO_VSI); metadata_dst_free(vf->repr->dst); + vf->repr->dst = NULL; goto err; } @@ -239,6 +284,7 @@ static int ice_eswitch_setup_reprs(struct ice_pf *pf) vf->hw_lan_addr.addr, ICE_FWD_TO_VSI); metadata_dst_free(vf->repr->dst); + vf->repr->dst = NULL; ice_vsi_update_security(vsi, ice_vsi_ctx_set_antispoof); goto err; } @@ -252,8 +298,8 @@ static int ice_eswitch_setup_reprs(struct ice_pf *pf) netif_keep_dst(vf->repr->netdev); } - ice_for_each_vf(pf, i) { - struct ice_repr *repr = pf->vf[i].repr; + ice_for_each_vf(pf, bkt, vf) { + struct ice_repr *repr = vf->repr; struct ice_vsi *vsi = repr->src_vsi; struct metadata_dst *dst; @@ -266,43 +312,12 @@ static int ice_eswitch_setup_reprs(struct ice_pf *pf) return 0; err: - for (i = i - 1; i >= 0; i--) { - struct ice_vsi *vsi = pf->vf[i].repr->src_vsi; - struct ice_vf *vf = &pf->vf[i]; - - ice_vsi_update_security(vsi, ice_vsi_ctx_set_antispoof); - metadata_dst_free(vf->repr->dst); - ice_fltr_add_mac_and_broadcast(vsi, vf->hw_lan_addr.addr, - ICE_FWD_TO_VSI); - } + ice_eswitch_release_reprs(pf, ctrl_vsi); return -ENODEV; } /** - * ice_eswitch_release_reprs - clear PR VSIs configuration - * @pf: poiner to PF struct - * @ctrl_vsi: pointer to switchdev control VSI - */ -static void -ice_eswitch_release_reprs(struct ice_pf *pf, struct ice_vsi *ctrl_vsi) -{ - int i; - - ice_for_each_vf(pf, i) { - struct ice_vsi *vsi = pf->vf[i].repr->src_vsi; - struct ice_vf *vf = &pf->vf[i]; - - ice_vsi_update_security(vsi, ice_vsi_ctx_set_antispoof); - metadata_dst_free(vf->repr->dst); - ice_fltr_add_mac_and_broadcast(vsi, vf->hw_lan_addr.addr, - ICE_FWD_TO_VSI); - - netif_napi_del(&vf->repr->q_vector->napi); - } -} - -/** * ice_eswitch_update_repr - reconfigure VF port representor * @vsi: VF VSI for which port representor is configured */ @@ -316,7 +331,7 @@ void ice_eswitch_update_repr(struct ice_vsi *vsi) if (!ice_is_switchdev_running(pf)) return; - vf = &pf->vf[vsi->vf_id]; + vf = vsi->vf; repr = vf->repr; repr->src_vsi = vsi; repr->dst->u.port_info.port_id = vsi->vsi_num; @@ -324,7 +339,8 @@ void ice_eswitch_update_repr(struct ice_vsi *vsi) ret = ice_vsi_update_security(vsi, ice_vsi_ctx_clear_antispoof); if (ret) { ice_fltr_add_mac_and_broadcast(vsi, vf->hw_lan_addr.addr, ICE_FWD_TO_VSI); - dev_err(ice_pf_to_dev(pf), "Failed to update VF %d port representor", vsi->vf_id); + dev_err(ice_pf_to_dev(pf), "Failed to update VF %d port representor", + vsi->vf->vf_id); } } @@ -408,7 +424,7 @@ static void ice_eswitch_release_env(struct ice_pf *pf) static struct ice_vsi * ice_eswitch_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi) { - return ice_vsi_setup(pf, pi, ICE_VSI_SWITCHDEV_CTRL, ICE_INVAL_VFID, NULL); + return ice_vsi_setup(pf, pi, ICE_VSI_SWITCHDEV_CTRL, NULL, NULL); } /** @@ -417,10 +433,13 @@ ice_eswitch_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi) */ static void ice_eswitch_napi_del(struct ice_pf *pf) { - int i; + struct ice_vf *vf; + unsigned int bkt; - ice_for_each_vf(pf, i) - netif_napi_del(&pf->vf[i].repr->q_vector->napi); + lockdep_assert_held(&pf->vfs.table_lock); + + ice_for_each_vf(pf, bkt, vf) + netif_napi_del(&vf->repr->q_vector->napi); } /** @@ -429,10 +448,13 @@ static void ice_eswitch_napi_del(struct ice_pf *pf) */ static void ice_eswitch_napi_enable(struct ice_pf *pf) { - int i; + struct ice_vf *vf; + unsigned int bkt; + + lockdep_assert_held(&pf->vfs.table_lock); - ice_for_each_vf(pf, i) - napi_enable(&pf->vf[i].repr->q_vector->napi); + ice_for_each_vf(pf, bkt, vf) + napi_enable(&vf->repr->q_vector->napi); } /** @@ -441,10 +463,13 @@ static void ice_eswitch_napi_enable(struct ice_pf *pf) */ static void ice_eswitch_napi_disable(struct ice_pf *pf) { - int i; + struct ice_vf *vf; + unsigned int bkt; + + lockdep_assert_held(&pf->vfs.table_lock); - ice_for_each_vf(pf, i) - napi_disable(&pf->vf[i].repr->q_vector->napi); + ice_for_each_vf(pf, bkt, vf) + napi_disable(&vf->repr->q_vector->napi); } /** @@ -522,7 +547,7 @@ ice_eswitch_mode_set(struct devlink *devlink, u16 mode, if (pf->eswitch_mode == mode) return 0; - if (pf->num_alloc_vfs) { + if (ice_has_vfs(pf)) { dev_info(ice_pf_to_dev(pf), "Changing eswitch mode is allowed only if there is no VFs created"); NL_SET_ERR_MSG_MOD(extack, "Changing eswitch mode is allowed only if there is no VFs created"); return -EOPNOTSUPP; @@ -613,16 +638,17 @@ int ice_eswitch_configure(struct ice_pf *pf) */ static void ice_eswitch_start_all_tx_queues(struct ice_pf *pf) { - struct ice_repr *repr; - int i; + struct ice_vf *vf; + unsigned int bkt; + + lockdep_assert_held(&pf->vfs.table_lock); if (test_bit(ICE_DOWN, pf->state)) return; - ice_for_each_vf(pf, i) { - repr = pf->vf[i].repr; - if (repr) - ice_repr_start_tx_queues(repr); + ice_for_each_vf(pf, bkt, vf) { + if (vf->repr) + ice_repr_start_tx_queues(vf->repr); } } @@ -632,16 +658,17 @@ static void ice_eswitch_start_all_tx_queues(struct ice_pf *pf) */ void ice_eswitch_stop_all_tx_queues(struct ice_pf *pf) { - struct ice_repr *repr; - int i; + struct ice_vf *vf; + unsigned int bkt; + + lockdep_assert_held(&pf->vfs.table_lock); if (test_bit(ICE_DOWN, pf->state)) return; - ice_for_each_vf(pf, i) { - repr = pf->vf[i].repr; - if (repr) - ice_repr_stop_tx_queues(repr); + ice_for_each_vf(pf, bkt, vf) { + if (vf->repr) + ice_repr_stop_tx_queues(vf->repr); } } diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c index a3492754d0d3..399625892f9e 100644 --- a/drivers/net/ethernet/intel/ice/ice_ethtool.c +++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c @@ -316,16 +316,20 @@ out: */ static bool ice_active_vfs(struct ice_pf *pf) { - unsigned int i; - - ice_for_each_vf(pf, i) { - struct ice_vf *vf = &pf->vf[i]; + bool active = false; + struct ice_vf *vf; + unsigned int bkt; - if (test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) - return true; + rcu_read_lock(); + ice_for_each_vf_rcu(pf, bkt, vf) { + if (test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) { + active = true; + break; + } } + rcu_read_unlock(); - return false; + return active; } /** @@ -1298,7 +1302,7 @@ static int ice_set_priv_flags(struct net_device *netdev, u32 flags) } if (test_bit(ICE_FLAG_VF_VLAN_PRUNING, change_flags) && - pf->num_alloc_vfs) { + ice_has_vfs(pf)) { dev_err(dev, "vf-vlan-pruning: VLAN pruning cannot be changed while VFs are active.\n"); /* toggle bit back to previous state */ change_bit(ICE_FLAG_VF_VLAN_PRUNING, pf->flags); diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c index 662493d1002b..113a2c56c14c 100644 --- a/drivers/net/ethernet/intel/ice/ice_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_lib.c @@ -166,21 +166,19 @@ static void ice_vsi_set_num_desc(struct ice_vsi *vsi) /** * ice_vsi_set_num_qs - Set number of queues, descriptors and vectors for a VSI * @vsi: the VSI being configured - * @vf_id: ID of the VF being configured + * @vf: the VF associated with this VSI, if any * * Return 0 on success and a negative value on error */ -static void ice_vsi_set_num_qs(struct ice_vsi *vsi, u16 vf_id) +static void ice_vsi_set_num_qs(struct ice_vsi *vsi, struct ice_vf *vf) { + enum ice_vsi_type vsi_type = vsi->type; struct ice_pf *pf = vsi->back; - struct ice_vf *vf = NULL; - if (vsi->type == ICE_VSI_VF) - vsi->vf_id = vf_id; - else - vsi->vf_id = ICE_INVAL_VFID; + if (WARN_ON(vsi_type == ICE_VSI_VF && !vf)) + return; - switch (vsi->type) { + switch (vsi_type) { case ICE_VSI_PF: if (vsi->req_txq) { vsi->alloc_txq = vsi->req_txq; @@ -217,22 +215,21 @@ static void ice_vsi_set_num_qs(struct ice_vsi *vsi, u16 vf_id) /* The number of queues for ctrl VSI is equal to number of VFs. * Each ring is associated to the corresponding VF_PR netdev. */ - vsi->alloc_txq = pf->num_alloc_vfs; - vsi->alloc_rxq = pf->num_alloc_vfs; + vsi->alloc_txq = ice_get_num_vfs(pf); + vsi->alloc_rxq = vsi->alloc_txq; vsi->num_q_vectors = 1; break; case ICE_VSI_VF: - vf = &pf->vf[vsi->vf_id]; if (vf->num_req_qs) vf->num_vf_qs = vf->num_req_qs; vsi->alloc_txq = vf->num_vf_qs; vsi->alloc_rxq = vf->num_vf_qs; - /* pf->num_msix_per_vf includes (VF miscellaneous vector + + /* pf->vfs.num_msix_per includes (VF miscellaneous vector + * data queue interrupts). Since vsi->num_q_vectors is number * of queues vectors, subtract 1 (ICE_NONQ_VECS_VF) from the * original vector count */ - vsi->num_q_vectors = pf->num_msix_per_vf - ICE_NONQ_VECS_VF; + vsi->num_q_vectors = pf->vfs.num_msix_per - ICE_NONQ_VECS_VF; break; case ICE_VSI_CTRL: vsi->alloc_txq = 1; @@ -248,7 +245,7 @@ static void ice_vsi_set_num_qs(struct ice_vsi *vsi, u16 vf_id) vsi->alloc_rxq = 1; break; default: - dev_warn(ice_pf_to_dev(pf), "Unknown VSI type %d\n", vsi->type); + dev_warn(ice_pf_to_dev(pf), "Unknown VSI type %d\n", vsi_type); break; } @@ -299,7 +296,7 @@ void ice_vsi_delete(struct ice_vsi *vsi) return; if (vsi->type == ICE_VSI_VF) - ctxt->vf_num = vsi->vf_id; + ctxt->vf_num = vsi->vf->vf_id; ctxt->vsi_num = vsi->vsi_num; memcpy(&ctxt->info, &vsi->info, sizeof(ctxt->info)); @@ -384,8 +381,7 @@ int ice_vsi_clear(struct ice_vsi *vsi) pf->vsi[vsi->idx] = NULL; if (vsi->idx < pf->next_vsi && vsi->type != ICE_VSI_CTRL) pf->next_vsi = vsi->idx; - if (vsi->idx < pf->next_vsi && vsi->type == ICE_VSI_CTRL && - vsi->vf_id != ICE_INVAL_VFID) + if (vsi->idx < pf->next_vsi && vsi->type == ICE_VSI_CTRL && vsi->vf) pf->next_vsi = vsi->idx; ice_vsi_free_arrays(vsi); @@ -437,13 +433,16 @@ static irqreturn_t ice_eswitch_msix_clean_rings(int __always_unused irq, void *d { struct ice_q_vector *q_vector = (struct ice_q_vector *)data; struct ice_pf *pf = q_vector->vsi->back; - int i; + struct ice_vf *vf; + unsigned int bkt; if (!q_vector->tx.tx_ring && !q_vector->rx.rx_ring) return IRQ_HANDLED; - ice_for_each_vf(pf, i) - napi_schedule(&pf->vf[i].repr->q_vector->napi); + rcu_read_lock(); + ice_for_each_vf_rcu(pf, bkt, vf) + napi_schedule(&vf->repr->q_vector->napi); + rcu_read_unlock(); return IRQ_HANDLED; } @@ -453,17 +452,24 @@ static irqreturn_t ice_eswitch_msix_clean_rings(int __always_unused irq, void *d * @pf: board private structure * @vsi_type: type of VSI * @ch: ptr to channel - * @vf_id: ID of the VF being configured + * @vf: VF for ICE_VSI_VF and ICE_VSI_CTRL + * + * The VF pointer is used for ICE_VSI_VF and ICE_VSI_CTRL. For ICE_VSI_CTRL, + * it may be NULL in the case there is no association with a VF. For + * ICE_VSI_VF the VF pointer *must not* be NULL. * * returns a pointer to a VSI on success, NULL on failure. */ static struct ice_vsi * ice_vsi_alloc(struct ice_pf *pf, enum ice_vsi_type vsi_type, - struct ice_channel *ch, u16 vf_id) + struct ice_channel *ch, struct ice_vf *vf) { struct device *dev = ice_pf_to_dev(pf); struct ice_vsi *vsi = NULL; + if (WARN_ON(vsi_type == ICE_VSI_VF && !vf)) + return NULL; + /* Need to protect the allocation of the VSIs at the PF level */ mutex_lock(&pf->sw_mutex); @@ -485,9 +491,9 @@ ice_vsi_alloc(struct ice_pf *pf, enum ice_vsi_type vsi_type, set_bit(ICE_VSI_DOWN, vsi->state); if (vsi_type == ICE_VSI_VF) - ice_vsi_set_num_qs(vsi, vf_id); + ice_vsi_set_num_qs(vsi, vf); else if (vsi_type != ICE_VSI_CHNL) - ice_vsi_set_num_qs(vsi, ICE_INVAL_VFID); + ice_vsi_set_num_qs(vsi, NULL); switch (vsi->type) { case ICE_VSI_SWITCHDEV_CTRL: @@ -510,10 +516,16 @@ ice_vsi_alloc(struct ice_pf *pf, enum ice_vsi_type vsi_type, /* Setup ctrl VSI MSIX irq handler */ vsi->irq_handler = ice_msix_clean_ctrl_vsi; + + /* For the PF control VSI this is NULL, for the VF control VSI + * this will be the first VF to allocate it. + */ + vsi->vf = vf; break; case ICE_VSI_VF: if (ice_vsi_alloc_arrays(vsi)) goto err_rings; + vsi->vf = vf; break; case ICE_VSI_CHNL: if (!ch) @@ -531,7 +543,7 @@ ice_vsi_alloc(struct ice_pf *pf, enum ice_vsi_type vsi_type, goto unlock_pf; } - if (vsi->type == ICE_VSI_CTRL && vf_id == ICE_INVAL_VFID) { + if (vsi->type == ICE_VSI_CTRL && !vf) { /* Use the last VSI slot as the index for PF control VSI */ vsi->idx = pf->num_alloc_vsi - 1; pf->ctrl_vsi_idx = vsi->idx; @@ -546,8 +558,8 @@ ice_vsi_alloc(struct ice_pf *pf, enum ice_vsi_type vsi_type, pf->next_vsi); } - if (vsi->type == ICE_VSI_CTRL && vf_id != ICE_INVAL_VFID) - pf->vf[vf_id].ctrl_vsi_idx = vsi->idx; + if (vsi->type == ICE_VSI_CTRL && vf) + vf->ctrl_vsi_idx = vsi->idx; goto unlock_pf; err_rings: @@ -1130,7 +1142,7 @@ static int ice_vsi_init(struct ice_vsi *vsi, bool init_vsi) case ICE_VSI_VF: ctxt->flags = ICE_AQ_VSI_TYPE_VF; /* VF number here is the absolute VF number (0-255) */ - ctxt->vf_num = vsi->vf_id + hw->func_caps.vf_base_id; + ctxt->vf_num = vsi->vf->vf_id + hw->func_caps.vf_base_id; break; default: ret = -ENODEV; @@ -1322,6 +1334,36 @@ ice_get_res(struct ice_pf *pf, struct ice_res_tracker *res, u16 needed, u16 id) } /** + * ice_get_vf_ctrl_res - Get VF control VSI resource + * @pf: pointer to the PF structure + * @vsi: the VSI to allocate a resource for + * + * Look up whether another VF has already allocated the control VSI resource. + * If so, re-use this resource so that we share it among all VFs. + * + * Otherwise, allocate the resource and return it. + */ +static int ice_get_vf_ctrl_res(struct ice_pf *pf, struct ice_vsi *vsi) +{ + struct ice_vf *vf; + unsigned int bkt; + int base; + + rcu_read_lock(); + ice_for_each_vf_rcu(pf, bkt, vf) { + if (vf != vsi->vf && vf->ctrl_vsi_idx != ICE_NO_VSI) { + base = pf->vsi[vf->ctrl_vsi_idx]->base_vector; + rcu_read_unlock(); + return base; + } + } + rcu_read_unlock(); + + return ice_get_res(pf, pf->irq_tracker, vsi->num_q_vectors, + ICE_RES_VF_CTRL_VEC_ID); +} + +/** * ice_vsi_setup_vector_base - Set up the base vector for the given VSI * @vsi: ptr to the VSI * @@ -1353,20 +1395,8 @@ static int ice_vsi_setup_vector_base(struct ice_vsi *vsi) num_q_vectors = vsi->num_q_vectors; /* reserve slots from OS requested IRQs */ - if (vsi->type == ICE_VSI_CTRL && vsi->vf_id != ICE_INVAL_VFID) { - int i; - - ice_for_each_vf(pf, i) { - struct ice_vf *vf = &pf->vf[i]; - - if (i != vsi->vf_id && vf->ctrl_vsi_idx != ICE_NO_VSI) { - base = pf->vsi[vf->ctrl_vsi_idx]->base_vector; - break; - } - } - if (i == pf->num_alloc_vfs) - base = ice_get_res(pf, pf->irq_tracker, num_q_vectors, - ICE_RES_VF_CTRL_VEC_ID); + if (vsi->type == ICE_VSI_CTRL && vsi->vf) { + base = ice_get_vf_ctrl_res(pf, vsi); } else { base = ice_get_res(pf, pf->irq_tracker, num_q_vectors, vsi->idx); @@ -2218,7 +2248,7 @@ ice_vsi_set_q_vectors_reg_idx(struct ice_vsi *vsi) } if (vsi->type == ICE_VSI_VF) { - struct ice_vf *vf = &vsi->back->vf[vsi->vf_id]; + struct ice_vf *vf = vsi->vf; q_vector->reg_idx = ice_calc_vf_reg_idx(vf, q_vector); } else { @@ -2403,9 +2433,8 @@ static void ice_set_agg_vsi(struct ice_vsi *vsi) * @pf: board private structure * @pi: pointer to the port_info instance * @vsi_type: VSI type - * @vf_id: defines VF ID to which this VSI connects. This field is meant to be - * used only for ICE_VSI_VF VSI type. For other VSI types, should - * fill-in ICE_INVAL_VFID as input. + * @vf: pointer to VF to which this VSI connects. This field is used primarily + * for the ICE_VSI_VF type. Other VSI types should pass NULL. * @ch: ptr to channel * * This allocates the sw VSI structure and its queue resources. @@ -2415,7 +2444,8 @@ static void ice_set_agg_vsi(struct ice_vsi *vsi) */ struct ice_vsi * ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi, - enum ice_vsi_type vsi_type, u16 vf_id, struct ice_channel *ch) + enum ice_vsi_type vsi_type, struct ice_vf *vf, + struct ice_channel *ch) { u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 }; struct device *dev = ice_pf_to_dev(pf); @@ -2423,11 +2453,11 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi, int ret, i; if (vsi_type == ICE_VSI_CHNL) - vsi = ice_vsi_alloc(pf, vsi_type, ch, ICE_INVAL_VFID); + vsi = ice_vsi_alloc(pf, vsi_type, ch, NULL); else if (vsi_type == ICE_VSI_VF || vsi_type == ICE_VSI_CTRL) - vsi = ice_vsi_alloc(pf, vsi_type, NULL, vf_id); + vsi = ice_vsi_alloc(pf, vsi_type, NULL, vf); else - vsi = ice_vsi_alloc(pf, vsi_type, NULL, ICE_INVAL_VFID); + vsi = ice_vsi_alloc(pf, vsi_type, NULL, NULL); if (!vsi) { dev_err(dev, "could not allocate VSI\n"); @@ -2439,9 +2469,6 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi, if (vsi->type == ICE_VSI_PF) vsi->ethtype = ETH_P_PAUSE; - if (vsi->type == ICE_VSI_VF || vsi->type == ICE_VSI_CTRL) - vsi->vf_id = vf_id; - ice_alloc_fd_res(vsi); if (vsi_type != ICE_VSI_CHNL) { @@ -2862,6 +2889,37 @@ void ice_napi_del(struct ice_vsi *vsi) } /** + * ice_free_vf_ctrl_res - Free the VF control VSI resource + * @pf: pointer to PF structure + * @vsi: the VSI to free resources for + * + * Check if the VF control VSI resource is still in use. If no VF is using it + * any more, release the VSI resource. Otherwise, leave it to be cleaned up + * once no other VF uses it. + */ +static void ice_free_vf_ctrl_res(struct ice_pf *pf, struct ice_vsi *vsi) +{ + struct ice_vf *vf; + unsigned int bkt; + + rcu_read_lock(); + ice_for_each_vf_rcu(pf, bkt, vf) { + if (vf != vsi->vf && vf->ctrl_vsi_idx != ICE_NO_VSI) { + rcu_read_unlock(); + return; + } + } + rcu_read_unlock(); + + /* No other VFs left that have control VSI. It is now safe to reclaim + * SW interrupts back to the common pool. + */ + ice_free_res(pf->irq_tracker, vsi->base_vector, + ICE_RES_VF_CTRL_VEC_ID); + pf->num_avail_sw_msix += vsi->num_q_vectors; +} + +/** * ice_vsi_release - Delete a VSI and free its resources * @vsi: the VSI being removed * @@ -2904,23 +2962,8 @@ int ice_vsi_release(struct ice_vsi *vsi) * many interrupts each VF needs. SR-IOV MSIX resources are also * cleared in the same manner. */ - if (vsi->type == ICE_VSI_CTRL && vsi->vf_id != ICE_INVAL_VFID) { - int i; - - ice_for_each_vf(pf, i) { - struct ice_vf *vf = &pf->vf[i]; - - if (i != vsi->vf_id && vf->ctrl_vsi_idx != ICE_NO_VSI) - break; - } - if (i == pf->num_alloc_vfs) { - /* No other VFs left that have control VSI, reclaim SW - * interrupts back to the common pool - */ - ice_free_res(pf->irq_tracker, vsi->base_vector, - ICE_RES_VF_CTRL_VEC_ID); - pf->num_avail_sw_msix += vsi->num_q_vectors; - } + if (vsi->type == ICE_VSI_CTRL && vsi->vf) { + ice_free_vf_ctrl_res(pf, vsi); } else if (vsi->type != ICE_VSI_VF) { /* reclaim SW interrupts back to the common pool */ ice_free_res(pf->irq_tracker, vsi->base_vector, vsi->idx); @@ -3104,7 +3147,6 @@ int ice_vsi_rebuild(struct ice_vsi *vsi, bool init_vsi) u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 }; struct ice_coalesce_stored *coalesce; int prev_num_q_vectors = 0; - struct ice_vf *vf = NULL; enum ice_vsi_type vtype; struct ice_pf *pf; int ret, i; @@ -3114,8 +3156,8 @@ int ice_vsi_rebuild(struct ice_vsi *vsi, bool init_vsi) pf = vsi->back; vtype = vsi->type; - if (vtype == ICE_VSI_VF) - vf = &pf->vf[vsi->vf_id]; + if (WARN_ON(vtype == ICE_VSI_VF) && !vsi->vf) + return -EINVAL; ice_vsi_init_vlan_ops(vsi); @@ -3154,9 +3196,9 @@ int ice_vsi_rebuild(struct ice_vsi *vsi, bool init_vsi) ice_vsi_clear_rings(vsi); ice_vsi_free_arrays(vsi); if (vtype == ICE_VSI_VF) - ice_vsi_set_num_qs(vsi, vf->vf_id); + ice_vsi_set_num_qs(vsi, vsi->vf); else - ice_vsi_set_num_qs(vsi, ICE_INVAL_VFID); + ice_vsi_set_num_qs(vsi, NULL); ret = ice_vsi_alloc_arrays(vsi); if (ret < 0) @@ -4013,9 +4055,14 @@ static u16 ice_vsi_num_zero_vlans(struct ice_vsi *vsi) #define ICE_DVM_NUM_ZERO_VLAN_FLTRS 2 #define ICE_SVM_NUM_ZERO_VLAN_FLTRS 1 /* no VLAN 0 filter is created when a port VLAN is active */ - if (vsi->type == ICE_VSI_VF && - ice_vf_is_port_vlan_ena(&vsi->back->vf[vsi->vf_id])) - return 0; + if (vsi->type == ICE_VSI_VF) { + if (WARN_ON(!vsi->vf)) + return 0; + + if (ice_vf_is_port_vlan_ena(vsi->vf)) + return 0; + } + if (ice_is_dvm_ena(&vsi->back->hw)) return ICE_DVM_NUM_ZERO_VLAN_FLTRS; else diff --git a/drivers/net/ethernet/intel/ice/ice_lib.h b/drivers/net/ethernet/intel/ice/ice_lib.h index 491f13f98797..0095329949d4 100644 --- a/drivers/net/ethernet/intel/ice/ice_lib.h +++ b/drivers/net/ethernet/intel/ice/ice_lib.h @@ -52,7 +52,8 @@ void ice_vsi_cfg_netdev_tc(struct ice_vsi *vsi, u8 ena_tc); struct ice_vsi * ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi, - enum ice_vsi_type vsi_type, u16 vf_id, struct ice_channel *ch); + enum ice_vsi_type vsi_type, struct ice_vf *vf, + struct ice_channel *ch); void ice_napi_del(struct ice_vsi *vsi); diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c index 95dfd04e8c3a..289e5c99e313 100644 --- a/drivers/net/ethernet/intel/ice/ice_main.c +++ b/drivers/net/ethernet/intel/ice/ice_main.c @@ -505,7 +505,8 @@ ice_prepare_for_reset(struct ice_pf *pf, enum ice_reset_req reset_type) { struct ice_hw *hw = &pf->hw; struct ice_vsi *vsi; - unsigned int i; + struct ice_vf *vf; + unsigned int bkt; dev_dbg(ice_pf_to_dev(pf), "reset_type=%d\n", reset_type); @@ -520,8 +521,10 @@ ice_prepare_for_reset(struct ice_pf *pf, enum ice_reset_req reset_type) ice_vc_notify_reset(pf); /* Disable VFs until reset is completed */ - ice_for_each_vf(pf, i) - ice_set_vf_state_qs_dis(&pf->vf[i]); + mutex_lock(&pf->vfs.table_lock); + ice_for_each_vf(pf, bkt, vf) + ice_set_vf_state_qs_dis(vf); + mutex_unlock(&pf->vfs.table_lock); if (ice_is_eswitch_mode_switchdev(pf)) { if (reset_type != ICE_RESET_PFR) @@ -1666,7 +1669,8 @@ static void ice_handle_mdd_event(struct ice_pf *pf) { struct device *dev = ice_pf_to_dev(pf); struct ice_hw *hw = &pf->hw; - unsigned int i; + struct ice_vf *vf; + unsigned int bkt; u32 reg; if (!test_and_clear_bit(ICE_MDD_EVENT_PENDING, pf->state)) { @@ -1754,47 +1758,46 @@ static void ice_handle_mdd_event(struct ice_pf *pf) /* Check to see if one of the VFs caused an MDD event, and then * increment counters and set print pending */ - ice_for_each_vf(pf, i) { - struct ice_vf *vf = &pf->vf[i]; - - reg = rd32(hw, VP_MDET_TX_PQM(i)); + mutex_lock(&pf->vfs.table_lock); + ice_for_each_vf(pf, bkt, vf) { + reg = rd32(hw, VP_MDET_TX_PQM(vf->vf_id)); if (reg & VP_MDET_TX_PQM_VALID_M) { - wr32(hw, VP_MDET_TX_PQM(i), 0xFFFF); + wr32(hw, VP_MDET_TX_PQM(vf->vf_id), 0xFFFF); vf->mdd_tx_events.count++; set_bit(ICE_MDD_VF_PRINT_PENDING, pf->state); if (netif_msg_tx_err(pf)) dev_info(dev, "Malicious Driver Detection event TX_PQM detected on VF %d\n", - i); + vf->vf_id); } - reg = rd32(hw, VP_MDET_TX_TCLAN(i)); + reg = rd32(hw, VP_MDET_TX_TCLAN(vf->vf_id)); if (reg & VP_MDET_TX_TCLAN_VALID_M) { - wr32(hw, VP_MDET_TX_TCLAN(i), 0xFFFF); + wr32(hw, VP_MDET_TX_TCLAN(vf->vf_id), 0xFFFF); vf->mdd_tx_events.count++; set_bit(ICE_MDD_VF_PRINT_PENDING, pf->state); if (netif_msg_tx_err(pf)) dev_info(dev, "Malicious Driver Detection event TX_TCLAN detected on VF %d\n", - i); + vf->vf_id); } - reg = rd32(hw, VP_MDET_TX_TDPU(i)); + reg = rd32(hw, VP_MDET_TX_TDPU(vf->vf_id)); if (reg & VP_MDET_TX_TDPU_VALID_M) { - wr32(hw, VP_MDET_TX_TDPU(i), 0xFFFF); + wr32(hw, VP_MDET_TX_TDPU(vf->vf_id), 0xFFFF); vf->mdd_tx_events.count++; set_bit(ICE_MDD_VF_PRINT_PENDING, pf->state); if (netif_msg_tx_err(pf)) dev_info(dev, "Malicious Driver Detection event TX_TDPU detected on VF %d\n", - i); + vf->vf_id); } - reg = rd32(hw, VP_MDET_RX(i)); + reg = rd32(hw, VP_MDET_RX(vf->vf_id)); if (reg & VP_MDET_RX_VALID_M) { - wr32(hw, VP_MDET_RX(i), 0xFFFF); + wr32(hw, VP_MDET_RX(vf->vf_id), 0xFFFF); vf->mdd_rx_events.count++; set_bit(ICE_MDD_VF_PRINT_PENDING, pf->state); if (netif_msg_rx_err(pf)) dev_info(dev, "Malicious Driver Detection event RX detected on VF %d\n", - i); + vf->vf_id); /* Since the queue is disabled on VF Rx MDD events, the * PF can be configured to reset the VF through ethtool @@ -1805,12 +1808,13 @@ static void ice_handle_mdd_event(struct ice_pf *pf) * reset, so print the event prior to reset. */ ice_print_vf_rx_mdd_event(vf); - mutex_lock(&pf->vf[i].cfg_lock); - ice_reset_vf(&pf->vf[i], false); - mutex_unlock(&pf->vf[i].cfg_lock); + mutex_lock(&vf->cfg_lock); + ice_reset_vf(vf, false); + mutex_unlock(&vf->cfg_lock); } } } + mutex_unlock(&pf->vfs.table_lock); ice_print_vfs_mdd_events(pf); } @@ -2439,7 +2443,7 @@ static int ice_vsi_req_irq_msix(struct ice_vsi *vsi, char *basename) /* skip this unused q_vector */ continue; } - if (vsi->type == ICE_VSI_CTRL && vsi->vf_id != ICE_INVAL_VFID) + if (vsi->type == ICE_VSI_CTRL && vsi->vf) err = devm_request_irq(dev, irq_num, vsi->irq_handler, IRQF_SHARED, q_vector->name, q_vector); @@ -3386,14 +3390,14 @@ void ice_fill_rss_lut(u8 *lut, u16 rss_table_size, u16 rss_size) static struct ice_vsi * ice_pf_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi) { - return ice_vsi_setup(pf, pi, ICE_VSI_PF, ICE_INVAL_VFID, NULL); + return ice_vsi_setup(pf, pi, ICE_VSI_PF, NULL, NULL); } static struct ice_vsi * ice_chnl_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi, struct ice_channel *ch) { - return ice_vsi_setup(pf, pi, ICE_VSI_CHNL, ICE_INVAL_VFID, ch); + return ice_vsi_setup(pf, pi, ICE_VSI_CHNL, NULL, ch); } /** @@ -3407,7 +3411,7 @@ ice_chnl_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi, static struct ice_vsi * ice_ctrl_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi) { - return ice_vsi_setup(pf, pi, ICE_VSI_CTRL, ICE_INVAL_VFID, NULL); + return ice_vsi_setup(pf, pi, ICE_VSI_CTRL, NULL, NULL); } /** @@ -3421,7 +3425,7 @@ ice_ctrl_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi) struct ice_vsi * ice_lb_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi) { - return ice_vsi_setup(pf, pi, ICE_VSI_LB, ICE_INVAL_VFID, NULL); + return ice_vsi_setup(pf, pi, ICE_VSI_LB, NULL, NULL); } /** @@ -3680,6 +3684,7 @@ static void ice_deinit_pf(struct ice_pf *pf) mutex_destroy(&pf->sw_mutex); mutex_destroy(&pf->tc_mutex); mutex_destroy(&pf->avail_q_mutex); + mutex_destroy(&pf->vfs.table_lock); if (pf->avail_txqs) { bitmap_free(pf->avail_txqs); @@ -3712,7 +3717,7 @@ static void ice_set_pf_caps(struct ice_pf *pf) clear_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags); if (func_caps->common_cap.sr_iov_1_1) { set_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags); - pf->num_vfs_supported = min_t(int, func_caps->num_allocd_vfs, + pf->vfs.num_supported = min_t(int, func_caps->num_allocd_vfs, ICE_MAX_VF_COUNT); } clear_bit(ICE_FLAG_RSS_ENA, pf->flags); @@ -3779,6 +3784,9 @@ static int ice_init_pf(struct ice_pf *pf) return -ENOMEM; } + mutex_init(&pf->vfs.table_lock); + hash_init(pf->vfs.table); + return 0; } diff --git a/drivers/net/ethernet/intel/ice/ice_repr.c b/drivers/net/ethernet/intel/ice/ice_repr.c index dcc310e29300..2adfaf21e056 100644 --- a/drivers/net/ethernet/intel/ice/ice_repr.c +++ b/drivers/net/ethernet/intel/ice/ice_repr.c @@ -284,6 +284,8 @@ static int ice_repr_add(struct ice_vf *vf) devlink_port_type_eth_set(&vf->devlink_port, repr->netdev); + ice_vc_change_ops_to_repr(&vf->vc_ops); + return 0; err_netdev: @@ -311,6 +313,9 @@ err_alloc_rule: */ static void ice_repr_rem(struct ice_vf *vf) { + if (!vf->repr) + return; + ice_devlink_destroy_vf_port(vf); kfree(vf->repr->q_vector); vf->repr->q_vector = NULL; @@ -323,6 +328,23 @@ static void ice_repr_rem(struct ice_vf *vf) #endif kfree(vf->repr); vf->repr = NULL; + + ice_vc_set_dflt_vf_ops(&vf->vc_ops); +} + +/** + * ice_repr_rem_from_all_vfs - remove port representor for all VFs + * @pf: pointer to PF structure + */ +void ice_repr_rem_from_all_vfs(struct ice_pf *pf) +{ + struct ice_vf *vf; + unsigned int bkt; + + lockdep_assert_held(&pf->vfs.table_lock); + + ice_for_each_vf(pf, bkt, vf) + ice_repr_rem(vf); } /** @@ -331,49 +353,27 @@ static void ice_repr_rem(struct ice_vf *vf) */ int ice_repr_add_for_all_vfs(struct ice_pf *pf) { + struct ice_vf *vf; + unsigned int bkt; int err; - int i; - ice_for_each_vf(pf, i) { - struct ice_vf *vf = &pf->vf[i]; + lockdep_assert_held(&pf->vfs.table_lock); + ice_for_each_vf(pf, bkt, vf) { err = ice_repr_add(vf); if (err) goto err; - - ice_vc_change_ops_to_repr(&vf->vc_ops); } return 0; err: - for (i = i - 1; i >= 0; i--) { - struct ice_vf *vf = &pf->vf[i]; - - ice_repr_rem(vf); - ice_vc_set_dflt_vf_ops(&vf->vc_ops); - } + ice_repr_rem_from_all_vfs(pf); return err; } /** - * ice_repr_rem_from_all_vfs - remove port representor for all VFs - * @pf: pointer to PF structure - */ -void ice_repr_rem_from_all_vfs(struct ice_pf *pf) -{ - int i; - - ice_for_each_vf(pf, i) { - struct ice_vf *vf = &pf->vf[i]; - - ice_repr_rem(vf); - ice_vc_set_dflt_vf_ops(&vf->vc_ops); - } -} - -/** * ice_repr_start_tx_queues - start Tx queues of port representor * @repr: pointer to repr structure */ diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c index ff93ec71aed6..853f57a9589a 100644 --- a/drivers/net/ethernet/intel/ice/ice_txrx.c +++ b/drivers/net/ethernet/intel/ice/ice_txrx.c @@ -1165,7 +1165,7 @@ int ice_clean_rx_irq(struct ice_rx_ring *rx_ring, int budget) struct ice_vsi *ctrl_vsi = rx_ring->vsi; if (rx_desc->wb.rxdid == FDIR_DESC_RXDID && - ctrl_vsi->vf_id != ICE_INVAL_VFID) + ctrl_vsi->vf) ice_vc_fdir_irq_handler(ctrl_vsi, rx_desc); ice_put_rx_buf(rx_ring, NULL, 0); cleaned_count++; diff --git a/drivers/net/ethernet/intel/ice/ice_vf_vsi_vlan_ops.c b/drivers/net/ethernet/intel/ice/ice_vf_vsi_vlan_ops.c index 39f2d36cabba..b16f946185f2 100644 --- a/drivers/net/ethernet/intel/ice/ice_vf_vsi_vlan_ops.c +++ b/drivers/net/ethernet/intel/ice/ice_vf_vsi_vlan_ops.c @@ -34,9 +34,10 @@ void ice_vf_vsi_init_vlan_ops(struct ice_vsi *vsi) { struct ice_vsi_vlan_ops *vlan_ops; struct ice_pf *pf = vsi->back; - struct ice_vf *vf; + struct ice_vf *vf = vsi->vf; - vf = &pf->vf[vsi->vf_id]; + if (WARN_ON(!vf)) + return; if (ice_is_dvm_ena(&pf->hw)) { vlan_ops = &vsi->outer_vlan_ops; @@ -126,9 +127,14 @@ void ice_vf_vsi_init_vlan_ops(struct ice_vsi *vsi) */ void ice_vf_vsi_cfg_dvm_legacy_vlan_mode(struct ice_vsi *vsi) { - struct ice_vf *vf = &vsi->back->vf[vsi->vf_id]; - struct device *dev = ice_pf_to_dev(vf->pf); struct ice_vsi_vlan_ops *vlan_ops; + struct ice_vf *vf = vsi->vf; + struct device *dev; + + if (WARN_ON(!vf)) + return; + + dev = ice_pf_to_dev(vf->pf); if (!ice_is_dvm_ena(&vsi->back->hw) || ice_vf_is_port_vlan_ena(vf)) return; @@ -192,7 +198,10 @@ void ice_vf_vsi_cfg_dvm_legacy_vlan_mode(struct ice_vsi *vsi) */ void ice_vf_vsi_cfg_svm_legacy_vlan_mode(struct ice_vsi *vsi) { - struct ice_vf *vf = &vsi->back->vf[vsi->vf_id]; + struct ice_vf *vf = vsi->vf; + + if (WARN_ON(!vf)) + return; if (ice_is_dvm_ena(&vsi->back->hw) || ice_vf_is_port_vlan_ena(vf)) return; diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c index d64df81d4893..07989f1d08ef 100644 --- a/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c +++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c @@ -1288,15 +1288,16 @@ ice_vc_fdir_irq_handler(struct ice_vsi *ctrl_vsi, union ice_32b_rx_flex_desc *rx_desc) { struct ice_pf *pf = ctrl_vsi->back; + struct ice_vf *vf = ctrl_vsi->vf; struct ice_vf_fdir_ctx *ctx_done; struct ice_vf_fdir_ctx *ctx_irq; struct ice_vf_fdir *fdir; unsigned long flags; struct device *dev; - struct ice_vf *vf; int ret; - vf = &pf->vf[ctrl_vsi->vf_id]; + if (WARN_ON(!vf)) + return; fdir = &vf->fdir; ctx_done = &fdir->ctx_done; @@ -1571,15 +1572,16 @@ err_exit: */ void ice_flush_fdir_ctx(struct ice_pf *pf) { - int i; + struct ice_vf *vf; + unsigned int bkt; if (!test_and_clear_bit(ICE_FD_VF_FLUSH_CTX, pf->state)) return; - ice_for_each_vf(pf, i) { + mutex_lock(&pf->vfs.table_lock); + ice_for_each_vf(pf, bkt, vf) { struct device *dev = ice_pf_to_dev(pf); enum virtchnl_fdir_prgm_status status; - struct ice_vf *vf = &pf->vf[i]; struct ice_vf_fdir_ctx *ctx; unsigned long flags; int ret; @@ -1633,6 +1635,7 @@ err_exit: ctx->flags &= ~ICE_VF_FDIR_CTX_VALID; spin_unlock_irqrestore(&vf->fdir.ctx_lock, flags); } + mutex_unlock(&pf->vfs.table_lock); } /** diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c index 455ce5dd0031..4840570c494d 100644 --- a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c @@ -175,18 +175,107 @@ struct ice_vsi *ice_get_vf_vsi(struct ice_vf *vf) } /** - * ice_validate_vf_id - helper to check if VF ID is valid - * @pf: pointer to the PF structure - * @vf_id: the ID of the VF to check + * ice_get_vf_by_id - Get pointer to VF by ID + * @pf: the PF private structure + * @vf_id: the VF ID to locate + * + * Locate and return a pointer to the VF structure associated with a given ID. + * Returns NULL if the ID does not have a valid VF structure associated with + * it. + * + * This function takes a reference to the VF, which must be released by + * calling ice_put_vf() once the caller is finished accessing the VF structure + * returned. */ -static int ice_validate_vf_id(struct ice_pf *pf, u16 vf_id) +struct ice_vf *ice_get_vf_by_id(struct ice_pf *pf, u16 vf_id) { - /* vf_id range is only valid for 0-255, and should always be unsigned */ - if (vf_id >= pf->num_alloc_vfs) { - dev_err(ice_pf_to_dev(pf), "Invalid VF ID: %u\n", vf_id); - return -EINVAL; + struct ice_vf *vf; + + rcu_read_lock(); + hash_for_each_possible_rcu(pf->vfs.table, vf, entry, vf_id) { + if (vf->vf_id == vf_id) { + struct ice_vf *found; + + if (kref_get_unless_zero(&vf->refcnt)) + found = vf; + else + found = NULL; + + rcu_read_unlock(); + return found; + } } - return 0; + rcu_read_unlock(); + + return NULL; +} + +/** + * ice_release_vf - Release VF associated with a refcount + * @ref: the kref decremented to zero + * + * Callback function for kref_put to release a VF once its reference count has + * hit zero. + */ +static void ice_release_vf(struct kref *ref) +{ + struct ice_vf *vf = container_of(ref, struct ice_vf, refcnt); + + mutex_destroy(&vf->cfg_lock); + + kfree_rcu(vf, rcu); +} + +/** + * ice_put_vf - Release a reference to a VF + * @vf: the VF structure to decrease reference count on + * + * This must be called after ice_get_vf_by_id() once the reference to the VF + * structure is no longer used. Otherwise, the VF structure will never be + * freed. + */ +void ice_put_vf(struct ice_vf *vf) +{ + kref_put(&vf->refcnt, ice_release_vf); +} + +/** + * ice_has_vfs - Return true if the PF has any associated VFs + * @pf: the PF private structure + * + * Return whether or not the PF has any allocated VFs. + * + * Note that this function only guarantees that there are no VFs at the point + * of calling it. It does not guarantee that no more VFs will be added. + */ +bool ice_has_vfs(struct ice_pf *pf) +{ + /* A simple check that the hash table is not empty does not require + * the mutex or rcu_read_lock. + */ + return !hash_empty(pf->vfs.table); +} + +/** + * ice_get_num_vfs - Get number of allocated VFs + * @pf: the PF private structure + * + * Return the total number of allocated VFs. NOTE: VF IDs are not guaranteed + * to be contiguous. Do not assume that a VF ID is guaranteed to be less than + * the output of this function. + */ +u16 ice_get_num_vfs(struct ice_pf *pf) +{ + struct ice_vf *vf; + unsigned int bkt; + u16 num_vfs = 0; + + rcu_read_lock(); + ice_for_each_vf_rcu(pf, bkt, vf) + num_vfs++; + rcu_read_unlock(); + + return num_vfs; } /** @@ -205,6 +294,32 @@ static int ice_check_vf_init(struct ice_pf *pf, struct ice_vf *vf) } /** + * ice_free_vf_entries - Free all VF entries from the hash table + * @pf: pointer to the PF structure + * + * Iterate over the VF hash table, removing and releasing all VF entries. + * Called during VF teardown or as cleanup during failed VF initialization. + */ +static void ice_free_vf_entries(struct ice_pf *pf) +{ + struct ice_vfs *vfs = &pf->vfs; + struct hlist_node *tmp; + struct ice_vf *vf; + unsigned int bkt; + + /* Remove all VFs from the hash table and release their main + * reference. Once all references to the VF are dropped, ice_put_vf() + * will call ice_release_vf which will remove the VF memory. + */ + lockdep_assert_held(&vfs->table_lock); + + hash_for_each_safe(vfs->table, bkt, tmp, vf, entry) { + hash_del_rcu(&vf->entry); + ice_put_vf(vf); + } +} + +/** * ice_vc_vf_broadcast - Broadcast a message to all VFs on PF * @pf: pointer to the PF structure * @v_opcode: operation code @@ -217,11 +332,11 @@ ice_vc_vf_broadcast(struct ice_pf *pf, enum virtchnl_ops v_opcode, enum virtchnl_status_code v_retval, u8 *msg, u16 msglen) { struct ice_hw *hw = &pf->hw; - unsigned int i; - - ice_for_each_vf(pf, i) { - struct ice_vf *vf = &pf->vf[i]; + struct ice_vf *vf; + unsigned int bkt; + mutex_lock(&pf->vfs.table_lock); + ice_for_each_vf(pf, bkt, vf) { /* Not all vfs are enabled so skip the ones that are not */ if (!test_bit(ICE_VF_STATE_INIT, vf->vf_states) && !test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) @@ -233,6 +348,7 @@ ice_vc_vf_broadcast(struct ice_pf *pf, enum virtchnl_ops v_opcode, ice_aq_send_msg_to_vf(hw, vf->vf_id, v_opcode, v_retval, msg, msglen, NULL); } + mutex_unlock(&pf->vfs.table_lock); } /** @@ -381,7 +497,7 @@ static void ice_free_vf_res(struct ice_vf *vf) vf->num_mac = 0; } - last_vector_idx = vf->first_vector_idx + pf->num_msix_per_vf - 1; + last_vector_idx = vf->first_vector_idx + pf->vfs.num_msix_per - 1; /* clear VF MDD event information */ memset(&vf->mdd_tx_events, 0, sizeof(vf->mdd_tx_events)); @@ -417,7 +533,7 @@ static void ice_dis_vf_mappings(struct ice_vf *vf) wr32(hw, VPINT_ALLOC_PCI(vf->vf_id), 0); first = vf->first_vector_idx; - last = first + pf->num_msix_per_vf - 1; + last = first + pf->vfs.num_msix_per - 1; for (v = first; v <= last; v++) { u32 reg; @@ -499,14 +615,14 @@ static void ice_dis_vf_qs(struct ice_vf *vf) void ice_free_vfs(struct ice_pf *pf) { struct device *dev = ice_pf_to_dev(pf); + struct ice_vfs *vfs = &pf->vfs; struct ice_hw *hw = &pf->hw; - unsigned int tmp, i; + struct ice_vf *vf; + unsigned int bkt; - if (!pf->vf) + if (!ice_has_vfs(pf)) return; - ice_eswitch_release(pf); - while (test_and_set_bit(ICE_VF_DIS, pf->state)) usleep_range(1000, 2000); @@ -519,12 +635,11 @@ void ice_free_vfs(struct ice_pf *pf) else dev_warn(dev, "VFs are assigned - not disabling SR-IOV\n"); - tmp = pf->num_alloc_vfs; - pf->num_qps_per_vf = 0; - pf->num_alloc_vfs = 0; - for (i = 0; i < tmp; i++) { - struct ice_vf *vf = &pf->vf[i]; + mutex_lock(&vfs->table_lock); + + ice_eswitch_release(pf); + ice_for_each_vf(pf, bkt, vf) { mutex_lock(&vf->cfg_lock); ice_dis_vf_qs(vf); @@ -536,42 +651,30 @@ void ice_free_vfs(struct ice_pf *pf) ice_free_vf_res(vf); } - mutex_unlock(&vf->cfg_lock); + if (!pci_vfs_assigned(pf->pdev)) { + u32 reg_idx, bit_idx; + + reg_idx = (hw->func_caps.vf_base_id + vf->vf_id) / 32; + bit_idx = (hw->func_caps.vf_base_id + vf->vf_id) % 32; + wr32(hw, GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx)); + } - mutex_destroy(&vf->cfg_lock); + /* clear malicious info since the VF is getting released */ + if (ice_mbx_clear_malvf(&hw->mbx_snapshot, pf->vfs.malvfs, + ICE_MAX_VF_COUNT, vf->vf_id)) + dev_dbg(dev, "failed to clear malicious VF state for VF %u\n", + vf->vf_id); + + mutex_unlock(&vf->cfg_lock); } if (ice_sriov_free_msix_res(pf)) dev_err(dev, "Failed to free MSIX resources used by SR-IOV\n"); - devm_kfree(dev, pf->vf); - pf->vf = NULL; - - /* This check is for when the driver is unloaded while VFs are - * assigned. Setting the number of VFs to 0 through sysfs is caught - * before this function ever gets called. - */ - if (!pci_vfs_assigned(pf->pdev)) { - unsigned int vf_id; - - /* Acknowledge VFLR for all VFs. Without this, VFs will fail to - * work correctly when SR-IOV gets re-enabled. - */ - for (vf_id = 0; vf_id < tmp; vf_id++) { - u32 reg_idx, bit_idx; - - reg_idx = (hw->func_caps.vf_base_id + vf_id) / 32; - bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32; - wr32(hw, GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx)); - } - } + vfs->num_qps_per = 0; + ice_free_vf_entries(pf); - /* clear malicious info if the VFs are getting released */ - for (i = 0; i < tmp; i++) - if (ice_mbx_clear_malvf(&hw->mbx_snapshot, pf->malvfs, - ICE_MAX_VF_COUNT, i)) - dev_dbg(dev, "failed to clear malicious VF state for VF %u\n", - i); + mutex_unlock(&vfs->table_lock); clear_bit(ICE_VF_DIS, pf->state); clear_bit(ICE_FLAG_SRIOV_ENA, pf->flags); @@ -667,7 +770,7 @@ static struct ice_vsi *ice_vf_vsi_setup(struct ice_vf *vf) struct ice_pf *pf = vf->pf; struct ice_vsi *vsi; - vsi = ice_vsi_setup(pf, pi, ICE_VSI_VF, vf->vf_id, NULL); + vsi = ice_vsi_setup(pf, pi, ICE_VSI_VF, vf, NULL); if (!vsi) { dev_err(ice_pf_to_dev(pf), "Failed to create VF VSI\n"); @@ -694,7 +797,7 @@ struct ice_vsi *ice_vf_ctrl_vsi_setup(struct ice_vf *vf) struct ice_pf *pf = vf->pf; struct ice_vsi *vsi; - vsi = ice_vsi_setup(pf, pi, ICE_VSI_CTRL, vf->vf_id, NULL); + vsi = ice_vsi_setup(pf, pi, ICE_VSI_CTRL, vf, NULL); if (!vsi) { dev_err(ice_pf_to_dev(pf), "Failed to create VF control VSI\n"); ice_vf_ctrl_invalidate_vsi(vf); @@ -717,7 +820,7 @@ struct ice_vsi *ice_vf_ctrl_vsi_setup(struct ice_vf *vf) */ static int ice_calc_vf_first_vector_idx(struct ice_pf *pf, struct ice_vf *vf) { - return pf->sriov_base_vector + vf->vf_id * pf->num_msix_per_vf; + return pf->sriov_base_vector + vf->vf_id * pf->vfs.num_msix_per; } /** @@ -974,12 +1077,12 @@ static void ice_ena_vf_msix_mappings(struct ice_vf *vf) hw = &pf->hw; pf_based_first_msix = vf->first_vector_idx; - pf_based_last_msix = (pf_based_first_msix + pf->num_msix_per_vf) - 1; + pf_based_last_msix = (pf_based_first_msix + pf->vfs.num_msix_per) - 1; device_based_first_msix = pf_based_first_msix + pf->hw.func_caps.common_cap.msix_vector_first_id; device_based_last_msix = - (device_based_first_msix + pf->num_msix_per_vf) - 1; + (device_based_first_msix + pf->vfs.num_msix_per) - 1; device_based_vf_id = vf->vf_id + hw->func_caps.vf_base_id; reg = (((device_based_first_msix << VPINT_ALLOC_FIRST_S) & @@ -1070,45 +1173,6 @@ static void ice_ena_vf_mappings(struct ice_vf *vf) } /** - * ice_determine_res - * @pf: pointer to the PF structure - * @avail_res: available resources in the PF structure - * @max_res: maximum resources that can be given per VF - * @min_res: minimum resources that can be given per VF - * - * Returns non-zero value if resources (queues/vectors) are available or - * returns zero if PF cannot accommodate for all num_alloc_vfs. - */ -static int -ice_determine_res(struct ice_pf *pf, u16 avail_res, u16 max_res, u16 min_res) -{ - bool checked_min_res = false; - int res; - - /* start by checking if PF can assign max number of resources for - * all num_alloc_vfs. - * if yes, return number per VF - * If no, divide by 2 and roundup, check again - * repeat the loop till we reach a point where even minimum resources - * are not available, in that case return 0 - */ - res = max_res; - while ((res >= min_res) && !checked_min_res) { - int num_all_res; - - num_all_res = pf->num_alloc_vfs * res; - if (num_all_res <= avail_res) - return res; - - if (res == min_res) - checked_min_res = true; - - res = DIV_ROUND_UP(res, 2); - } - return 0; -} - -/** * ice_calc_vf_reg_idx - Calculate the VF's register index in the PF space * @vf: VF to calculate the register index for * @q_vector: a q_vector associated to the VF @@ -1123,7 +1187,7 @@ int ice_calc_vf_reg_idx(struct ice_vf *vf, struct ice_q_vector *q_vector) pf = vf->pf; /* always add one to account for the OICR being the first MSIX */ - return pf->sriov_base_vector + pf->num_msix_per_vf * vf->vf_id + + return pf->sriov_base_vector + pf->vfs.num_msix_per * vf->vf_id + q_vector->v_idx + 1; } @@ -1187,6 +1251,7 @@ static int ice_sriov_set_msix_res(struct ice_pf *pf, u16 num_msix_needed) /** * ice_set_per_vf_res - check if vectors and queues are available * @pf: pointer to the PF structure + * @num_vfs: the number of SR-IOV VFs being configured * * First, determine HW interrupts from common pool. If we allocate fewer VFs, we * get more vectors and can enable more queues per VF. Note that this does not @@ -1205,20 +1270,22 @@ static int ice_sriov_set_msix_res(struct ice_pf *pf, u16 num_msix_needed) * Lastly, set queue and MSI-X VF variables tracked by the PF so it can be used * by each VF during VF initialization and reset. */ -static int ice_set_per_vf_res(struct ice_pf *pf) +static int ice_set_per_vf_res(struct ice_pf *pf, u16 num_vfs) { int max_valid_res_idx = ice_get_max_valid_res_idx(pf->irq_tracker); + u16 num_msix_per_vf, num_txq, num_rxq, avail_qs; int msix_avail_per_vf, msix_avail_for_sriov; struct device *dev = ice_pf_to_dev(pf); - u16 num_msix_per_vf, num_txq, num_rxq; - if (!pf->num_alloc_vfs || max_valid_res_idx < 0) + lockdep_assert_held(&pf->vfs.table_lock); + + if (!num_vfs || max_valid_res_idx < 0) return -EINVAL; /* determine MSI-X resources per VF */ msix_avail_for_sriov = pf->hw.func_caps.common_cap.num_msix_vectors - pf->irq_tracker->num_entries; - msix_avail_per_vf = msix_avail_for_sriov / pf->num_alloc_vfs; + msix_avail_per_vf = msix_avail_for_sriov / num_vfs; if (msix_avail_per_vf >= ICE_NUM_VF_MSIX_MED) { num_msix_per_vf = ICE_NUM_VF_MSIX_MED; } else if (msix_avail_per_vf >= ICE_NUM_VF_MSIX_SMALL) { @@ -1230,40 +1297,43 @@ static int ice_set_per_vf_res(struct ice_pf *pf) } else { dev_err(dev, "Only %d MSI-X interrupts available for SR-IOV. Not enough to support minimum of %d MSI-X interrupts per VF for %d VFs\n", msix_avail_for_sriov, ICE_MIN_INTR_PER_VF, - pf->num_alloc_vfs); + num_vfs); return -EIO; } - /* determine queue resources per VF */ - num_txq = ice_determine_res(pf, ice_get_avail_txq_count(pf), - min_t(u16, - num_msix_per_vf - ICE_NONQ_VECS_VF, - ICE_MAX_RSS_QS_PER_VF), - ICE_MIN_QS_PER_VF); + num_txq = min_t(u16, num_msix_per_vf - ICE_NONQ_VECS_VF, + ICE_MAX_RSS_QS_PER_VF); + avail_qs = ice_get_avail_txq_count(pf) / num_vfs; + if (!avail_qs) + num_txq = 0; + else if (num_txq > avail_qs) + num_txq = rounddown_pow_of_two(avail_qs); - num_rxq = ice_determine_res(pf, ice_get_avail_rxq_count(pf), - min_t(u16, - num_msix_per_vf - ICE_NONQ_VECS_VF, - ICE_MAX_RSS_QS_PER_VF), - ICE_MIN_QS_PER_VF); + num_rxq = min_t(u16, num_msix_per_vf - ICE_NONQ_VECS_VF, + ICE_MAX_RSS_QS_PER_VF); + avail_qs = ice_get_avail_rxq_count(pf) / num_vfs; + if (!avail_qs) + num_rxq = 0; + else if (num_rxq > avail_qs) + num_rxq = rounddown_pow_of_two(avail_qs); - if (!num_txq || !num_rxq) { + if (num_txq < ICE_MIN_QS_PER_VF || num_rxq < ICE_MIN_QS_PER_VF) { dev_err(dev, "Not enough queues to support minimum of %d queue pairs per VF for %d VFs\n", - ICE_MIN_QS_PER_VF, pf->num_alloc_vfs); + ICE_MIN_QS_PER_VF, num_vfs); return -EIO; } - if (ice_sriov_set_msix_res(pf, num_msix_per_vf * pf->num_alloc_vfs)) { + if (ice_sriov_set_msix_res(pf, num_msix_per_vf * num_vfs)) { dev_err(dev, "Unable to set MSI-X resources for %d VFs\n", - pf->num_alloc_vfs); + num_vfs); return -EINVAL; } /* only allow equal Tx/Rx queue count (i.e. queue pairs) */ - pf->num_qps_per_vf = min_t(int, num_txq, num_rxq); - pf->num_msix_per_vf = num_msix_per_vf; + pf->vfs.num_qps_per = min_t(int, num_txq, num_rxq); + pf->vfs.num_msix_per = num_msix_per_vf; dev_info(dev, "Enabling %d VFs with %d vectors and %d queues per VF\n", - pf->num_alloc_vfs, pf->num_msix_per_vf, pf->num_qps_per_vf); + num_vfs, pf->vfs.num_msix_per, pf->vfs.num_qps_per); return 0; } @@ -1510,24 +1580,30 @@ bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr) struct device *dev = ice_pf_to_dev(pf); struct ice_hw *hw = &pf->hw; struct ice_vf *vf; - int v, i; + unsigned int bkt; /* If we don't have any VFs, then there is nothing to reset */ - if (!pf->num_alloc_vfs) + if (!ice_has_vfs(pf)) return false; + mutex_lock(&pf->vfs.table_lock); + /* clear all malicious info if the VFs are getting reset */ - ice_for_each_vf(pf, i) - if (ice_mbx_clear_malvf(&hw->mbx_snapshot, pf->malvfs, ICE_MAX_VF_COUNT, i)) - dev_dbg(dev, "failed to clear malicious VF state for VF %u\n", i); + ice_for_each_vf(pf, bkt, vf) + if (ice_mbx_clear_malvf(&hw->mbx_snapshot, pf->vfs.malvfs, + ICE_MAX_VF_COUNT, vf->vf_id)) + dev_dbg(dev, "failed to clear malicious VF state for VF %u\n", + vf->vf_id); /* If VFs have been disabled, there is no need to reset */ - if (test_and_set_bit(ICE_VF_DIS, pf->state)) + if (test_and_set_bit(ICE_VF_DIS, pf->state)) { + mutex_unlock(&pf->vfs.table_lock); return false; + } /* Begin reset on all VFs at once */ - ice_for_each_vf(pf, v) - ice_trigger_vf_reset(&pf->vf[v], is_vflr, true); + ice_for_each_vf(pf, bkt, vf) + ice_trigger_vf_reset(vf, is_vflr, true); /* HW requires some time to make sure it can flush the FIFO for a VF * when it resets it. Poll the VPGEN_VFRSTAT register for each VF in @@ -1535,36 +1611,34 @@ bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr) * the VFs using a simple iterator that increments once that VF has * finished resetting. */ - for (i = 0, v = 0; i < 10 && v < pf->num_alloc_vfs; i++) { - /* Check each VF in sequence */ - while (v < pf->num_alloc_vfs) { - u32 reg; - - vf = &pf->vf[v]; - reg = rd32(hw, VPGEN_VFRSTAT(vf->vf_id)); - if (!(reg & VPGEN_VFRSTAT_VFRD_M)) { - /* only delay if the check failed */ - usleep_range(10, 20); + ice_for_each_vf(pf, bkt, vf) { + bool done = false; + unsigned int i; + u32 reg; + + for (i = 0; i < 10; i++) { + reg = rd32(&pf->hw, VPGEN_VFRSTAT(vf->vf_id)); + if (reg & VPGEN_VFRSTAT_VFRD_M) { + done = true; break; } - /* If the current VF has finished resetting, move on - * to the next VF in sequence. + /* only delay if check failed */ + usleep_range(10, 20); + } + + if (!done) { + /* Display a warning if at least one VF didn't manage + * to reset in time, but continue on with the + * operation. */ - v++; + dev_warn(dev, "VF %u reset check timeout\n", vf->vf_id); + break; } } - /* Display a warning if at least one VF didn't manage to reset in - * time, but continue on with the operation. - */ - if (v < pf->num_alloc_vfs) - dev_warn(dev, "VF reset check timeout\n"); - /* free VF resources to begin resetting the VSI state */ - ice_for_each_vf(pf, v) { - vf = &pf->vf[v]; - + ice_for_each_vf(pf, bkt, vf) { mutex_lock(&vf->cfg_lock); vf->driver_caps = 0; @@ -1592,6 +1666,8 @@ bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr) ice_flush(hw); clear_bit(ICE_VF_DIS, pf->state); + mutex_unlock(&pf->vfs.table_lock); + return true; } @@ -1728,7 +1804,8 @@ bool ice_reset_vf(struct ice_vf *vf, bool is_vflr) ice_eswitch_replay_vf_mac_rule(vf); /* if the VF has been reset allow it to come up again */ - if (ice_mbx_clear_malvf(&hw->mbx_snapshot, pf->malvfs, ICE_MAX_VF_COUNT, vf->vf_id)) + if (ice_mbx_clear_malvf(&hw->mbx_snapshot, pf->vfs.malvfs, + ICE_MAX_VF_COUNT, vf->vf_id)) dev_dbg(dev, "failed to clear malicious VF state for VF %u\n", i); return true; @@ -1740,10 +1817,13 @@ bool ice_reset_vf(struct ice_vf *vf, bool is_vflr) */ void ice_vc_notify_link_state(struct ice_pf *pf) { - int i; + struct ice_vf *vf; + unsigned int bkt; - ice_for_each_vf(pf, i) - ice_vc_notify_vf_link_state(&pf->vf[i]); + mutex_lock(&pf->vfs.table_lock); + ice_for_each_vf(pf, bkt, vf) + ice_vc_notify_vf_link_state(vf); + mutex_unlock(&pf->vfs.table_lock); } /** @@ -1756,7 +1836,7 @@ void ice_vc_notify_reset(struct ice_pf *pf) { struct virtchnl_pf_event pfe; - if (!pf->num_alloc_vfs) + if (!ice_has_vfs(pf)) return; pfe.event = VIRTCHNL_EVENT_RESET_IMPENDING; @@ -1772,14 +1852,7 @@ void ice_vc_notify_reset(struct ice_pf *pf) static void ice_vc_notify_vf_reset(struct ice_vf *vf) { struct virtchnl_pf_event pfe; - struct ice_pf *pf; - - if (!vf) - return; - - pf = vf->pf; - if (ice_validate_vf_id(pf, vf->vf_id)) - return; + struct ice_pf *pf = vf->pf; /* Bail out if VF is in disabled state, neither initialized, nor active * state - otherwise proceed with notifications @@ -1865,11 +1938,14 @@ release_vsi: static int ice_start_vfs(struct ice_pf *pf) { struct ice_hw *hw = &pf->hw; - int retval, i; + unsigned int bkt, it_cnt; + struct ice_vf *vf; + int retval; - ice_for_each_vf(pf, i) { - struct ice_vf *vf = &pf->vf[i]; + lockdep_assert_held(&pf->vfs.table_lock); + it_cnt = 0; + ice_for_each_vf(pf, bkt, vf) { ice_clear_vf_reset_trigger(vf); retval = ice_init_vf_vsi_res(vf); @@ -1882,40 +1958,63 @@ static int ice_start_vfs(struct ice_pf *pf) set_bit(ICE_VF_STATE_INIT, vf->vf_states); ice_ena_vf_mappings(vf); wr32(hw, VFGEN_RSTAT(vf->vf_id), VIRTCHNL_VFR_VFACTIVE); + it_cnt++; } ice_flush(hw); return 0; teardown: - for (i = i - 1; i >= 0; i--) { - struct ice_vf *vf = &pf->vf[i]; + ice_for_each_vf(pf, bkt, vf) { + if (it_cnt == 0) + break; ice_dis_vf_mappings(vf); ice_vf_vsi_release(vf); + it_cnt--; } return retval; } /** - * ice_set_dflt_settings_vfs - set VF defaults during initialization/creation - * @pf: PF holding reference to all VFs for default configuration + * ice_create_vf_entries - Allocate and insert VF entries + * @pf: pointer to the PF structure + * @num_vfs: the number of VFs to allocate + * + * Allocate new VF entries and insert them into the hash table. Set some + * basic default fields for initializing the new VFs. + * + * After this function exits, the hash table will have num_vfs entries + * inserted. + * + * Returns 0 on success or an integer error code on failure. */ -static void ice_set_dflt_settings_vfs(struct ice_pf *pf) +static int ice_create_vf_entries(struct ice_pf *pf, u16 num_vfs) { - int i; + struct ice_vfs *vfs = &pf->vfs; + struct ice_vf *vf; + u16 vf_id; + int err; + + lockdep_assert_held(&vfs->table_lock); - ice_for_each_vf(pf, i) { - struct ice_vf *vf = &pf->vf[i]; + for (vf_id = 0; vf_id < num_vfs; vf_id++) { + vf = kzalloc(sizeof(*vf), GFP_KERNEL); + if (!vf) { + err = -ENOMEM; + goto err_free_entries; + } + kref_init(&vf->refcnt); vf->pf = pf; - vf->vf_id = i; + vf->vf_id = vf_id; + vf->vf_sw_id = pf->first_sw; /* assign default capabilities */ set_bit(ICE_VIRTCHNL_VF_CAP_L2, &vf->vf_caps); vf->spoofchk = true; - vf->num_vf_qs = pf->num_qps_per_vf; + vf->num_vf_qs = pf->vfs.num_qps_per; ice_vc_set_default_allowlist(vf); /* ctrl_vsi_idx will be set to a valid value only when VF @@ -1927,27 +2026,15 @@ static void ice_set_dflt_settings_vfs(struct ice_pf *pf) ice_vc_set_dflt_vf_ops(&vf->vc_ops); mutex_init(&vf->cfg_lock); - } -} - -/** - * ice_alloc_vfs - allocate num_vfs in the PF structure - * @pf: PF to store the allocated VFs in - * @num_vfs: number of VFs to allocate - */ -static int ice_alloc_vfs(struct ice_pf *pf, int num_vfs) -{ - struct ice_vf *vfs; - - vfs = devm_kcalloc(ice_pf_to_dev(pf), num_vfs, sizeof(*vfs), - GFP_KERNEL); - if (!vfs) - return -ENOMEM; - pf->vf = vfs; - pf->num_alloc_vfs = num_vfs; + hash_add_rcu(vfs->table, &vf->entry, vf_id); + } return 0; + +err_free_entries: + ice_free_vf_entries(pf); + return err; } /** @@ -1968,28 +2055,29 @@ static int ice_ena_vfs(struct ice_pf *pf, u16 num_vfs) ice_flush(hw); ret = pci_enable_sriov(pf->pdev, num_vfs); - if (ret) { - pf->num_alloc_vfs = 0; + if (ret) goto err_unroll_intr; - } - ret = ice_alloc_vfs(pf, num_vfs); - if (ret) - goto err_pci_disable_sriov; + mutex_lock(&pf->vfs.table_lock); - if (ice_set_per_vf_res(pf)) { + if (ice_set_per_vf_res(pf, num_vfs)) { dev_err(dev, "Not enough resources for %d VFs, try with fewer number of VFs\n", num_vfs); ret = -ENOSPC; goto err_unroll_sriov; } - ice_set_dflt_settings_vfs(pf); + ret = ice_create_vf_entries(pf, num_vfs); + if (ret) { + dev_err(dev, "Failed to allocate VF entries for %d VFs\n", + num_vfs); + goto err_unroll_sriov; + } if (ice_start_vfs(pf)) { dev_err(dev, "Failed to start VF(s)\n"); ret = -EAGAIN; - goto err_unroll_sriov; + goto err_unroll_vf_entries; } clear_bit(ICE_VF_DIS, pf->state); @@ -2002,13 +2090,14 @@ static int ice_ena_vfs(struct ice_pf *pf, u16 num_vfs) if (test_and_clear_bit(ICE_OICR_INTR_DIS, pf->state)) ice_irq_dynamic_ena(hw, NULL, NULL); + mutex_unlock(&pf->vfs.table_lock); + return 0; +err_unroll_vf_entries: + ice_free_vf_entries(pf); err_unroll_sriov: - devm_kfree(dev, pf->vf); - pf->vf = NULL; - pf->num_alloc_vfs = 0; -err_pci_disable_sriov: + mutex_unlock(&pf->vfs.table_lock); pci_disable_sriov(pf->pdev); err_unroll_intr: /* rearm interrupts here */ @@ -2035,9 +2124,9 @@ static int ice_pci_sriov_ena(struct ice_pf *pf, int num_vfs) else if (pre_existing_vfs && pre_existing_vfs == num_vfs) return 0; - if (num_vfs > pf->num_vfs_supported) { + if (num_vfs > pf->vfs.num_supported) { dev_err(dev, "Can't enable %d VFs, max VFs supported is %d\n", - num_vfs, pf->num_vfs_supported); + num_vfs, pf->vfs.num_supported); return -EOPNOTSUPP; } @@ -2135,19 +2224,20 @@ int ice_sriov_configure(struct pci_dev *pdev, int num_vfs) void ice_process_vflr_event(struct ice_pf *pf) { struct ice_hw *hw = &pf->hw; - unsigned int vf_id; + struct ice_vf *vf; + unsigned int bkt; u32 reg; if (!test_and_clear_bit(ICE_VFLR_EVENT_PENDING, pf->state) || - !pf->num_alloc_vfs) + !ice_has_vfs(pf)) return; - ice_for_each_vf(pf, vf_id) { - struct ice_vf *vf = &pf->vf[vf_id]; + mutex_lock(&pf->vfs.table_lock); + ice_for_each_vf(pf, bkt, vf) { u32 reg_idx, bit_idx; - reg_idx = (hw->func_caps.vf_base_id + vf_id) / 32; - bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32; + reg_idx = (hw->func_caps.vf_base_id + vf->vf_id) / 32; + bit_idx = (hw->func_caps.vf_base_id + vf->vf_id) % 32; /* read GLGEN_VFLRSTAT register to find out the flr VFs */ reg = rd32(hw, GLGEN_VFLRSTAT(reg_idx)); if (reg & BIT(bit_idx)) { @@ -2157,6 +2247,7 @@ void ice_process_vflr_event(struct ice_pf *pf) mutex_unlock(&vf->cfg_lock); } } + mutex_unlock(&pf->vfs.table_lock); } /** @@ -2176,22 +2267,36 @@ static void ice_vc_reset_vf(struct ice_vf *vf) * * If no VF is found who owns the pfq then return NULL, otherwise return a * pointer to the VF who owns the pfq + * + * If this function returns non-NULL, it acquires a reference count of the VF + * structure. The caller is responsible for calling ice_put_vf() to drop this + * reference. */ static struct ice_vf *ice_get_vf_from_pfq(struct ice_pf *pf, u16 pfq) { - unsigned int vf_id; + struct ice_vf *vf; + unsigned int bkt; - ice_for_each_vf(pf, vf_id) { - struct ice_vf *vf = &pf->vf[vf_id]; + rcu_read_lock(); + ice_for_each_vf_rcu(pf, bkt, vf) { struct ice_vsi *vsi; u16 rxq_idx; vsi = ice_get_vf_vsi(vf); ice_for_each_rxq(vsi, rxq_idx) - if (vsi->rxq_map[rxq_idx] == pfq) - return vf; + if (vsi->rxq_map[rxq_idx] == pfq) { + struct ice_vf *found; + + if (kref_get_unless_zero(&vf->refcnt)) + found = vf; + else + found = NULL; + rcu_read_unlock(); + return found; + } } + rcu_read_unlock(); return NULL; } @@ -2235,6 +2340,8 @@ ice_vf_lan_overflow_event(struct ice_pf *pf, struct ice_rq_event_info *event) mutex_lock(&vf->cfg_lock); ice_vc_reset_vf(vf); mutex_unlock(&vf->cfg_lock); + + ice_put_vf(vf); } /** @@ -2255,13 +2362,7 @@ ice_vc_send_msg_to_vf(struct ice_vf *vf, u32 v_opcode, struct ice_pf *pf; int aq_ret; - if (!vf) - return -EINVAL; - pf = vf->pf; - if (ice_validate_vf_id(pf, vf->vf_id)) - return -EINVAL; - dev = ice_pf_to_dev(pf); /* single place to detect unsuccessful return values */ @@ -2452,7 +2553,7 @@ static int ice_vc_get_vf_res_msg(struct ice_vf *vf, u8 *msg) vfres->num_vsis = 1; /* Tx and Rx queue are equal for VF */ vfres->num_queue_pairs = vsi->num_txq; - vfres->max_vectors = pf->num_msix_per_vf; + vfres->max_vectors = pf->vfs.num_msix_per; vfres->rss_key_size = ICE_VSIQF_HKEY_ARRAY_SIZE; vfres->rss_lut_size = ICE_VSIQF_HLUT_ARRAY_SIZE; vfres->max_mtu = ice_vc_get_max_frame_size(vf); @@ -2526,7 +2627,7 @@ bool ice_vc_isvalid_vsi_id(struct ice_vf *vf, u16 vsi_id) vsi = ice_find_vsi_from_id(pf, vsi_id); - return (vsi && (vsi->vf_id == vf->vf_id)); + return (vsi && (vsi->vf == vf)); } /** @@ -3017,30 +3118,34 @@ int ice_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool ena) int ret; dev = ice_pf_to_dev(pf); - if (ice_validate_vf_id(pf, vf_id)) + + vf = ice_get_vf_by_id(pf, vf_id); + if (!vf) return -EINVAL; - vf = &pf->vf[vf_id]; ret = ice_check_vf_ready_for_cfg(vf); if (ret) - return ret; + goto out_put_vf; vf_vsi = ice_get_vf_vsi(vf); if (!vf_vsi) { netdev_err(netdev, "VSI %d for VF %d is null\n", vf->lan_vsi_idx, vf->vf_id); - return -EINVAL; + ret = -EINVAL; + goto out_put_vf; } if (vf_vsi->type != ICE_VSI_VF) { netdev_err(netdev, "Type %d of VSI %d for VF %d is no ICE_VSI_VF\n", vf_vsi->type, vf_vsi->vsi_num, vf->vf_id); - return -ENODEV; + ret = -ENODEV; + goto out_put_vf; } if (ena == vf->spoofchk) { dev_dbg(dev, "VF spoofchk already %s\n", ena ? "ON" : "OFF"); - return 0; + ret = 0; + goto out_put_vf; } if (ena) @@ -3053,6 +3158,8 @@ int ice_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool ena) else vf->spoofchk = ena; +out_put_vf: + ice_put_vf(vf); return ret; } @@ -3065,18 +3172,22 @@ int ice_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool ena) */ bool ice_is_any_vf_in_promisc(struct ice_pf *pf) { - int vf_idx; - - ice_for_each_vf(pf, vf_idx) { - struct ice_vf *vf = &pf->vf[vf_idx]; + bool is_vf_promisc = false; + struct ice_vf *vf; + unsigned int bkt; + rcu_read_lock(); + ice_for_each_vf_rcu(pf, bkt, vf) { /* found a VF that has promiscuous mode configured */ if (test_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states) || - test_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states)) - return true; + test_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states)) { + is_vf_promisc = true; + break; + } } + rcu_read_unlock(); - return false; + return is_vf_promisc; } /** @@ -3596,7 +3707,7 @@ static int ice_vc_cfg_irq_map_msg(struct ice_vf *vf, u8 *msg) * there is actually at least a single VF queue vector mapped */ if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states) || - pf->num_msix_per_vf < num_q_vectors_mapped || + pf->vfs.num_msix_per < num_q_vectors_mapped || !num_q_vectors_mapped) { v_ret = VIRTCHNL_STATUS_ERR_PARAM; goto error_param; @@ -3618,7 +3729,7 @@ static int ice_vc_cfg_irq_map_msg(struct ice_vf *vf, u8 *msg) /* vector_id is always 0-based for each VF, and can never be * larger than or equal to the max allowed interrupts per VF */ - if (!(vector_id < pf->num_msix_per_vf) || + if (!(vector_id < pf->vfs.num_msix_per) || !ice_vc_isvalid_vsi_id(vf, vsi_id) || (!vector_id && (map->rxq_map || map->txq_map))) { v_ret = VIRTCHNL_STATUS_ERR_PARAM; @@ -4209,8 +4320,6 @@ ice_set_vf_port_vlan(struct net_device *netdev, int vf_id, u16 vlan_id, u8 qos, int ret; dev = ice_pf_to_dev(pf); - if (ice_validate_vf_id(pf, vf_id)) - return -EINVAL; if (vlan_id >= VLAN_N_VID || qos > 7) { dev_err(dev, "Invalid Port VLAN parameters for VF %d, ID %d, QoS %d\n", @@ -4224,10 +4333,13 @@ ice_set_vf_port_vlan(struct net_device *netdev, int vf_id, u16 vlan_id, u8 qos, return -EPROTONOSUPPORT; } - vf = &pf->vf[vf_id]; + vf = ice_get_vf_by_id(pf, vf_id); + if (!vf) + return -EINVAL; + ret = ice_check_vf_ready_for_cfg(vf); if (ret) - return ret; + goto out_put_vf; if (ice_vf_get_port_vlan_prio(vf) == qos && ice_vf_get_port_vlan_tpid(vf) == local_vlan_proto && @@ -4235,7 +4347,8 @@ ice_set_vf_port_vlan(struct net_device *netdev, int vf_id, u16 vlan_id, u8 qos, /* duplicate request, so just return success */ dev_dbg(dev, "Duplicate port VLAN %u, QoS %u, TPID 0x%04x request\n", vlan_id, qos, local_vlan_proto); - return 0; + ret = 0; + goto out_put_vf; } mutex_lock(&vf->cfg_lock); @@ -4250,7 +4363,9 @@ ice_set_vf_port_vlan(struct net_device *netdev, int vf_id, u16 vlan_id, u8 qos, ice_vc_reset_vf(vf); mutex_unlock(&vf->cfg_lock); - return 0; +out_put_vf: + ice_put_vf(vf); + return ret; } /** @@ -5772,12 +5887,13 @@ void ice_vc_process_vf_msg(struct ice_pf *pf, struct ice_rq_event_info *event) int err = 0; dev = ice_pf_to_dev(pf); - if (ice_validate_vf_id(pf, vf_id)) { - err = -EINVAL; - goto error_handler; - } - vf = &pf->vf[vf_id]; + vf = ice_get_vf_by_id(pf, vf_id); + if (!vf) { + dev_err(dev, "Unable to locate VF for message from VF ID %d, opcode %d, len %d\n", + vf_id, v_opcode, msglen); + return; + } /* Check if VF is disabled. */ if (test_bit(ICE_VF_STATE_DIS, vf->vf_states)) { @@ -5800,6 +5916,7 @@ void ice_vc_process_vf_msg(struct ice_pf *pf, struct ice_rq_event_info *event) ice_vc_send_msg_to_vf(vf, v_opcode, VIRTCHNL_STATUS_ERR_NOT_SUPPORTED, NULL, 0); + ice_put_vf(vf); return; } @@ -5809,6 +5926,7 @@ error_handler: NULL, 0); dev_err(dev, "Invalid message from VF %d, opcode %d, len %d, error %d\n", vf_id, v_opcode, msglen, err); + ice_put_vf(vf); return; } @@ -5818,6 +5936,7 @@ error_handler: if (!mutex_trylock(&vf->cfg_lock)) { dev_info(dev, "VF %u is being configured in another context that will trigger a VFR, so there is no need to handle this message\n", vf->vf_id); + ice_put_vf(vf); return; } @@ -5932,6 +6051,7 @@ error_handler: } mutex_unlock(&vf->cfg_lock); + ice_put_vf(vf); } /** @@ -5947,14 +6067,15 @@ ice_get_vf_cfg(struct net_device *netdev, int vf_id, struct ifla_vf_info *ivi) { struct ice_pf *pf = ice_netdev_to_pf(netdev); struct ice_vf *vf; + int ret; - if (ice_validate_vf_id(pf, vf_id)) + vf = ice_get_vf_by_id(pf, vf_id); + if (!vf) return -EINVAL; - vf = &pf->vf[vf_id]; - - if (ice_check_vf_init(pf, vf)) - return -EBUSY; + ret = ice_check_vf_ready_for_cfg(vf); + if (ret) + goto out_put_vf; ivi->vf = vf_id; ether_addr_copy(ivi->mac, vf->hw_lan_addr.addr); @@ -5975,7 +6096,10 @@ ice_get_vf_cfg(struct net_device *netdev, int vf_id, struct ifla_vf_info *ivi) ivi->linkstate = IFLA_VF_LINK_STATE_DISABLE; ivi->max_tx_rate = vf->max_tx_rate; ivi->min_tx_rate = vf->min_tx_rate; - return 0; + +out_put_vf: + ice_put_vf(vf); + return ret; } /** @@ -6025,28 +6149,31 @@ int ice_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac) struct ice_vf *vf; int ret; - if (ice_validate_vf_id(pf, vf_id)) - return -EINVAL; - if (is_multicast_ether_addr(mac)) { netdev_err(netdev, "%pM not a valid unicast address\n", mac); return -EINVAL; } - vf = &pf->vf[vf_id]; + vf = ice_get_vf_by_id(pf, vf_id); + if (!vf) + return -EINVAL; + /* nothing left to do, unicast MAC already set */ if (ether_addr_equal(vf->dev_lan_addr.addr, mac) && - ether_addr_equal(vf->hw_lan_addr.addr, mac)) - return 0; + ether_addr_equal(vf->hw_lan_addr.addr, mac)) { + ret = 0; + goto out_put_vf; + } ret = ice_check_vf_ready_for_cfg(vf); if (ret) - return ret; + goto out_put_vf; if (ice_unicast_mac_exists(pf, mac)) { netdev_err(netdev, "Unicast MAC %pM already exists on this PF. Preventing setting VF %u unicast MAC address to %pM\n", mac, vf_id, mac); - return -EINVAL; + ret = -EINVAL; + goto out_put_vf; } mutex_lock(&vf->cfg_lock); @@ -6070,7 +6197,10 @@ int ice_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac) ice_vc_reset_vf(vf); mutex_unlock(&vf->cfg_lock); - return 0; + +out_put_vf: + ice_put_vf(vf); + return ret; } /** @@ -6092,17 +6222,19 @@ int ice_set_vf_trust(struct net_device *netdev, int vf_id, bool trusted) return -EOPNOTSUPP; } - if (ice_validate_vf_id(pf, vf_id)) + vf = ice_get_vf_by_id(pf, vf_id); + if (!vf) return -EINVAL; - vf = &pf->vf[vf_id]; ret = ice_check_vf_ready_for_cfg(vf); if (ret) - return ret; + goto out_put_vf; /* Check if already trusted */ - if (trusted == vf->trusted) - return 0; + if (trusted == vf->trusted) { + ret = 0; + goto out_put_vf; + } mutex_lock(&vf->cfg_lock); @@ -6113,7 +6245,9 @@ int ice_set_vf_trust(struct net_device *netdev, int vf_id, bool trusted) mutex_unlock(&vf->cfg_lock); - return 0; +out_put_vf: + ice_put_vf(vf); + return ret; } /** @@ -6130,13 +6264,13 @@ int ice_set_vf_link_state(struct net_device *netdev, int vf_id, int link_state) struct ice_vf *vf; int ret; - if (ice_validate_vf_id(pf, vf_id)) + vf = ice_get_vf_by_id(pf, vf_id); + if (!vf) return -EINVAL; - vf = &pf->vf[vf_id]; ret = ice_check_vf_ready_for_cfg(vf); if (ret) - return ret; + goto out_put_vf; switch (link_state) { case IFLA_VF_LINK_STATE_AUTO: @@ -6151,12 +6285,15 @@ int ice_set_vf_link_state(struct net_device *netdev, int vf_id, int link_state) vf->link_up = false; break; default: - return -EINVAL; + ret = -EINVAL; + goto out_put_vf; } ice_vc_notify_vf_link_state(vf); - return 0; +out_put_vf: + ice_put_vf(vf); + return ret; } /** @@ -6165,10 +6302,14 @@ int ice_set_vf_link_state(struct net_device *netdev, int vf_id, int link_state) */ static int ice_calc_all_vfs_min_tx_rate(struct ice_pf *pf) { - int rate = 0, i; + struct ice_vf *vf; + unsigned int bkt; + int rate = 0; - ice_for_each_vf(pf, i) - rate += pf->vf[i].min_tx_rate; + rcu_read_lock(); + ice_for_each_vf_rcu(pf, bkt, vf) + rate += vf->min_tx_rate; + rcu_read_unlock(); return rate; } @@ -6223,13 +6364,14 @@ ice_set_vf_bw(struct net_device *netdev, int vf_id, int min_tx_rate, int ret; dev = ice_pf_to_dev(pf); - if (ice_validate_vf_id(pf, vf_id)) + + vf = ice_get_vf_by_id(pf, vf_id); + if (!vf) return -EINVAL; - vf = &pf->vf[vf_id]; ret = ice_check_vf_ready_for_cfg(vf); if (ret) - return ret; + goto out_put_vf; vsi = ice_get_vf_vsi(vf); @@ -6239,23 +6381,27 @@ ice_set_vf_bw(struct net_device *netdev, int vf_id, int min_tx_rate, if (max_tx_rate && min_tx_rate > max_tx_rate) { dev_err(dev, "Cannot set min Tx rate %d Mbps greater than max Tx rate %d Mbps\n", min_tx_rate, max_tx_rate); - return -EINVAL; + ret = -EINVAL; + goto out_put_vf; } if (min_tx_rate && ice_is_dcb_active(pf)) { dev_err(dev, "DCB on PF is currently enabled. VF min Tx rate limiting not allowed on this PF.\n"); - return -EOPNOTSUPP; + ret = -EOPNOTSUPP; + goto out_put_vf; } - if (ice_min_tx_rate_oversubscribed(vf, min_tx_rate)) - return -EINVAL; + if (ice_min_tx_rate_oversubscribed(vf, min_tx_rate)) { + ret = -EINVAL; + goto out_put_vf; + } if (vf->min_tx_rate != (unsigned int)min_tx_rate) { ret = ice_set_min_bw_limit(vsi, (u64)min_tx_rate * 1000); if (ret) { dev_err(dev, "Unable to set min-tx-rate for VF %d\n", vf->vf_id); - return ret; + goto out_put_vf; } vf->min_tx_rate = min_tx_rate; @@ -6266,13 +6412,15 @@ ice_set_vf_bw(struct net_device *netdev, int vf_id, int min_tx_rate, if (ret) { dev_err(dev, "Unable to set max-tx-rate for VF %d\n", vf->vf_id); - return ret; + goto out_put_vf; } vf->max_tx_rate = max_tx_rate; } - return 0; +out_put_vf: + ice_put_vf(vf); + return ret; } /** @@ -6290,17 +6438,19 @@ int ice_get_vf_stats(struct net_device *netdev, int vf_id, struct ice_vf *vf; int ret; - if (ice_validate_vf_id(pf, vf_id)) + vf = ice_get_vf_by_id(pf, vf_id); + if (!vf) return -EINVAL; - vf = &pf->vf[vf_id]; ret = ice_check_vf_ready_for_cfg(vf); if (ret) - return ret; + goto out_put_vf; vsi = ice_get_vf_vsi(vf); - if (!vsi) - return -EINVAL; + if (!vsi) { + ret = -EINVAL; + goto out_put_vf; + } ice_update_eth_stats(vsi); stats = &vsi->eth_stats; @@ -6318,7 +6468,9 @@ int ice_get_vf_stats(struct net_device *netdev, int vf_id, vf_stats->rx_dropped = stats->rx_discards; vf_stats->tx_dropped = stats->tx_discards; - return 0; +out_put_vf: + ice_put_vf(vf); + return ret; } /** @@ -6349,21 +6501,21 @@ void ice_print_vfs_mdd_events(struct ice_pf *pf) { struct device *dev = ice_pf_to_dev(pf); struct ice_hw *hw = &pf->hw; - int i; + struct ice_vf *vf; + unsigned int bkt; /* check that there are pending MDD events to print */ if (!test_and_clear_bit(ICE_MDD_VF_PRINT_PENDING, pf->state)) return; /* VF MDD event logs are rate limited to one second intervals */ - if (time_is_after_jiffies(pf->last_printed_mdd_jiffies + HZ * 1)) + if (time_is_after_jiffies(pf->vfs.last_printed_mdd_jiffies + HZ * 1)) return; - pf->last_printed_mdd_jiffies = jiffies; - - ice_for_each_vf(pf, i) { - struct ice_vf *vf = &pf->vf[i]; + pf->vfs.last_printed_mdd_jiffies = jiffies; + mutex_lock(&pf->vfs.table_lock); + ice_for_each_vf(pf, bkt, vf) { /* only print Rx MDD event message if there are new events */ if (vf->mdd_rx_events.count != vf->mdd_rx_events.last_printed) { vf->mdd_rx_events.last_printed = @@ -6377,10 +6529,11 @@ void ice_print_vfs_mdd_events(struct ice_pf *pf) vf->mdd_tx_events.count; dev_info(dev, "%d Tx Malicious Driver Detection events detected on PF %d VF %d MAC %pM.\n", - vf->mdd_tx_events.count, hw->pf_id, i, + vf->mdd_tx_events.count, hw->pf_id, vf->vf_id, vf->dev_lan_addr.addr); } } + mutex_unlock(&pf->vfs.table_lock); } /** @@ -6432,13 +6585,12 @@ ice_is_malicious_vf(struct ice_pf *pf, struct ice_rq_event_info *event, struct ice_vf *vf; int status; - if (ice_validate_vf_id(pf, vf_id)) + vf = ice_get_vf_by_id(pf, vf_id); + if (!vf) return false; - vf = &pf->vf[vf_id]; - /* Check if VF is disabled. */ if (test_bit(ICE_VF_STATE_DIS, vf->vf_states)) - return false; + goto out_put_vf; mbxdata.num_msg_proc = num_msg_proc; mbxdata.num_pending_arq = num_msg_pending; @@ -6449,7 +6601,7 @@ ice_is_malicious_vf(struct ice_pf *pf, struct ice_rq_event_info *event, /* check to see if we have a malicious VF */ status = ice_mbx_vf_state_handler(&pf->hw, &mbxdata, vf_id, &malvf); if (status) - return false; + goto out_put_vf; if (malvf) { bool report_vf = false; @@ -6457,7 +6609,7 @@ ice_is_malicious_vf(struct ice_pf *pf, struct ice_rq_event_info *event, /* if the VF is malicious and we haven't let the user * know about it, then let them know now */ - status = ice_mbx_report_malvf(&pf->hw, pf->malvfs, + status = ice_mbx_report_malvf(&pf->hw, pf->vfs.malvfs, ICE_MAX_VF_COUNT, vf_id, &report_vf); if (status) @@ -6471,12 +6623,9 @@ ice_is_malicious_vf(struct ice_pf *pf, struct ice_rq_event_info *event, &vf->dev_lan_addr.addr[0], pf_vsi->netdev->dev_addr); } - - return true; } - /* if there was an error in detection or the VF is not malicious then - * return false - */ - return false; +out_put_vf: + ice_put_vf(vf); + return malvf; } diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h index 4f4961043638..02e3d306f6dd 100644 --- a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h +++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h @@ -39,8 +39,50 @@ #define ICE_MAX_VF_RESET_TRIES 40 #define ICE_MAX_VF_RESET_SLEEP_MS 20 -#define ice_for_each_vf(pf, i) \ - for ((i) = 0; (i) < (pf)->num_alloc_vfs; (i)++) +/* VF Hash Table access functions + * + * These functions provide abstraction for interacting with the VF hash table. + * In general, direct access to the hash table should be avoided outside of + * these functions where possible. + * + * The VF entries in the hash table are protected by reference counting to + * track lifetime of accesses from the table. The ice_get_vf_by_id() function + * obtains a reference to the VF structure which must be dropped by using + * ice_put_vf(). + */ + +/** + * ice_for_each_vf - Iterate over each VF entry + * @pf: pointer to the PF private structure + * @bkt: bucket index used for iteration + * @vf: pointer to the VF entry currently being processed in the loop. + * + * The bkt variable is an unsigned integer iterator used to traverse the VF + * entries. It is *not* guaranteed to be the VF's vf_id. Do not assume it is. + * Use vf->vf_id to get the id number if needed. + * + * The caller is expected to be under the table_lock mutex for the entire + * loop. Use this iterator if your loop is long or if it might sleep. + */ +#define ice_for_each_vf(pf, bkt, vf) \ + hash_for_each((pf)->vfs.table, (bkt), (vf), entry) + +/** + * ice_for_each_vf_rcu - Iterate over each VF entry protected by RCU + * @pf: pointer to the PF private structure + * @bkt: bucket index used for iteration + * @vf: pointer to the VF entry currently being processed in the loop. + * + * The bkt variable is an unsigned integer iterator used to traverse the VF + * entries. It is *not* guaranteed to be the VF's vf_id. Do not assume it is. + * Use vf->vf_id to get the id number if needed. + * + * The caller is expected to be under rcu_read_lock() for the entire loop. + * Only use this iterator if your loop is short and you can guarantee it does + * not sleep. + */ +#define ice_for_each_vf_rcu(pf, bkt, vf) \ + hash_for_each_rcu((pf)->vfs.table, (bkt), (vf), entry) /* Specific VF states */ enum ice_vf_states { @@ -104,8 +146,22 @@ struct ice_vc_vf_ops { int (*dis_vlan_insertion_v2_msg)(struct ice_vf *vf, u8 *msg); }; +/* Virtchnl/SR-IOV config info */ +struct ice_vfs { + DECLARE_HASHTABLE(table, 8); /* table of VF entries */ + struct mutex table_lock; /* Lock for protecting the hash table */ + u16 num_supported; /* max supported VFs on this PF */ + u16 num_qps_per; /* number of queue pairs per VF */ + u16 num_msix_per; /* number of MSI-X vectors per VF */ + unsigned long last_printed_mdd_jiffies; /* MDD message rate limit */ + DECLARE_BITMAP(malvfs, ICE_MAX_VF_COUNT); /* malicious VF indicator */ +}; + /* VF information structure */ struct ice_vf { + struct hlist_node entry; + struct rcu_head rcu; + struct kref refcnt; struct ice_pf *pf; /* Used during virtchnl message handling and NDO ops against the VF @@ -162,6 +218,10 @@ struct ice_vf { }; #ifdef CONFIG_PCI_IOV +struct ice_vf *ice_get_vf_by_id(struct ice_pf *pf, u16 vf_id); +void ice_put_vf(struct ice_vf *vf); +bool ice_has_vfs(struct ice_pf *pf); +u16 ice_get_num_vfs(struct ice_pf *pf); struct ice_vsi *ice_get_vf_vsi(struct ice_vf *vf); void ice_process_vflr_event(struct ice_pf *pf); int ice_sriov_configure(struct pci_dev *pdev, int num_vfs); @@ -221,6 +281,25 @@ ice_vc_send_msg_to_vf(struct ice_vf *vf, u32 v_opcode, bool ice_vc_isvalid_vsi_id(struct ice_vf *vf, u16 vsi_id); bool ice_vf_is_port_vlan_ena(struct ice_vf *vf); #else /* CONFIG_PCI_IOV */ +static inline struct ice_vf *ice_get_vf_by_id(struct ice_pf *pf, u16 vf_id) +{ + return NULL; +} + +static inline void ice_put_vf(struct ice_vf *vf) +{ +} + +static inline bool ice_has_vfs(struct ice_pf *pf) +{ + return false; +} + +static inline u16 ice_get_num_vfs(struct ice_pf *pf) +{ + return 0; +} + static inline void ice_process_vflr_event(struct ice_pf *pf) { } static inline void ice_free_vfs(struct ice_pf *pf) { } static inline |