diff options
author | David S. Miller | 2021-10-08 14:31:01 +0100 |
---|---|---|
committer | David S. Miller | 2021-10-08 14:31:01 +0100 |
commit | faeb8e7a0aac8dc79c4aa9fc0f01a36518fc5004 (patch) | |
tree | 952058ae9622807f8507e64920abc42743203115 /drivers | |
parent | 9fe1155233c8290ca6e206053e531ef87f9026ea (diff) | |
parent | 7aae80cef7ba4b5245d392e62de1ebf1fc035f49 (diff) |
Merge branch '100GbE' of git://git.kernel.org/pub/scm/linux/kernel/git/tnguy/next-queue
Tony Nguyen says:
====================
100GbE Intel Wired LAN Driver Updates 2021-10-07
Michal Swiatkowski says:
The following patch series introduces basic switchdev model
support in ice driver. Implement the following blocks of
switchdev framework:
- VF port representors creation
- control plane VSI definition
- exception path (a. k. a. "slow-path") - to allow a virtual
switch or linux bridge to receive any packet that doesn't match
any hw filter
- link state management of virtual ports
- query virtual port statistics
Hardware offload support in switchdev mode is out of scope of
this patchset. Devlink interface is used to toggle between
switchdev and legacy (the default) modes of the driver.
---
Note: This series includes the use enum ice_status, however, we have
patches in our queue to remove it from the driver [1]. We are working
through the patches that precede the removal series.
[1] https://patchwork.ozlabs.org/project/intel-wired-lan/list/?series=265957
====================
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers')
25 files changed, 1899 insertions, 94 deletions
diff --git a/drivers/net/ethernet/intel/Kconfig b/drivers/net/ethernet/intel/Kconfig index ed8ea63bb172..0b274d8fa45b 100644 --- a/drivers/net/ethernet/intel/Kconfig +++ b/drivers/net/ethernet/intel/Kconfig @@ -313,6 +313,20 @@ config ICE To compile this driver as a module, choose M here. The module will be called ice. +config ICE_SWITCHDEV + bool "Switchdev Support" + default y + depends on ICE && NET_SWITCHDEV + help + Switchdev support provides internal SRIOV packet steering and switching. + + To enable it on running kernel use devlink tool: + #devlink dev eswitch set pci/0000:XX:XX.X mode switchdev + + Say Y here if you want to use Switchdev in the driver. + + If unsure, say N. + config FM10K tristate "Intel(R) FM10000 Ethernet Switch Host Interface Support" default n diff --git a/drivers/net/ethernet/intel/ice/Makefile b/drivers/net/ethernet/intel/ice/Makefile index 4f538cdf42c1..1866be50095d 100644 --- a/drivers/net/ethernet/intel/ice/Makefile +++ b/drivers/net/ethernet/intel/ice/Makefile @@ -26,10 +26,12 @@ ice-y := ice_main.o \ ice_devlink.o \ ice_fw_update.o \ ice_lag.o \ - ice_ethtool.o + ice_ethtool.o \ + ice_repr.o ice-$(CONFIG_PCI_IOV) += ice_virtchnl_allowlist.o ice-$(CONFIG_PCI_IOV) += ice_virtchnl_pf.o ice_sriov.o ice_virtchnl_fdir.o ice-$(CONFIG_PTP_1588_CLOCK) += ice_ptp.o ice_ptp_hw.o ice-$(CONFIG_DCB) += ice_dcb.o ice_dcb_nl.o ice_dcb_lib.o ice-$(CONFIG_RFS_ACCEL) += ice_arfs.o ice-$(CONFIG_XDP_SOCKETS) += ice_xsk.o +ice-$(CONFIG_ICE_SWITCHDEV) += ice_eswitch.o diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h index 83413772b00c..0d44a3767bad 100644 --- a/drivers/net/ethernet/intel/ice/ice.h +++ b/drivers/net/ethernet/intel/ice/ice.h @@ -63,6 +63,7 @@ #include "ice_fdir.h" #include "ice_xsk.h" #include "ice_arfs.h" +#include "ice_repr.h" #include "ice_lag.h" #define ICE_BAR0 0 @@ -84,6 +85,7 @@ #define ICE_FDIR_MSIX 2 #define ICE_RDMA_NUM_AEQ_MSIX 4 #define ICE_MIN_RDMA_MSIX 2 +#define ICE_ESWITCH_MSIX 1 #define ICE_NO_VSI 0xffff #define ICE_VSI_MAP_CONTIG 0 #define ICE_VSI_MAP_SCATTER 1 @@ -311,10 +313,6 @@ struct ice_vsi { spinlock_t arfs_lock; /* protects aRFS hash table and filter state */ atomic_t *arfs_last_fltr_id; - /* devlink port data */ - struct devlink_port devlink_port; - bool devlink_port_registered; - u16 max_frame; u16 rx_buf_len; @@ -354,6 +352,8 @@ struct ice_vsi { u16 num_xdp_txq; /* Used XDP queues */ u8 xdp_mapping_mode; /* ICE_MAP_MODE_[CONTIG|SCATTER] */ + struct net_device **target_netdevs; + /* setup back reference, to which aggregator node this VSI * corresponds to */ @@ -413,6 +413,12 @@ enum ice_pf_flags { ICE_PF_FLAGS_NBITS /* must be last */ }; +struct ice_switchdev_info { + struct ice_vsi *control_vsi; + struct ice_vsi *uplink_vsi; + bool is_running; +}; + struct ice_agg_node { u32 agg_id; #define ICE_MAX_VSIS_IN_AGG_NODE 64 @@ -426,6 +432,9 @@ struct ice_pf { struct devlink_region *nvm_region; struct devlink_region *devcaps_region; + /* devlink port data */ + struct devlink_port devlink_port; + /* OS reserved IRQ details */ struct msix_entry *msix_entries; struct ice_res_tracker *irq_tracker; @@ -439,6 +448,7 @@ struct ice_pf { struct ice_vsi **vsi; /* VSIs created by the driver */ struct ice_sw *first_sw; /* first switch created by firmware */ + u16 eswitch_mode; /* current mode of eswitch */ /* Virtchnl/SR-IOV config info */ struct ice_vf *vf; u16 num_alloc_vfs; /* actual number of VFs allocated */ @@ -507,6 +517,8 @@ struct ice_pf { struct ice_link_default_override_tlv link_dflt_override; struct ice_lag *lag; /* Link Aggregation information */ + struct ice_switchdev_info switchdev; + #define ICE_INVALID_AGG_NODE_ID 0 #define ICE_PF_AGG_NODE_ID_START 1 #define ICE_MAX_PF_AGG_NODES 32 @@ -518,6 +530,7 @@ struct ice_pf { struct ice_netdev_priv { struct ice_vsi *vsi; + struct ice_repr *repr; }; /** @@ -603,6 +616,19 @@ static inline struct ice_vsi *ice_get_main_vsi(struct ice_pf *pf) } /** + * ice_get_netdev_priv_vsi - return VSI associated with netdev priv. + * @np: private netdev structure + */ +static inline struct ice_vsi *ice_get_netdev_priv_vsi(struct ice_netdev_priv *np) +{ + /* In case of port representor return source port VSI. */ + if (np->repr) + return np->repr->src_vsi; + else + return np->vsi; +} + +/** * ice_get_ctrl_vsi - Get the control VSI * @pf: PF instance */ @@ -616,6 +642,18 @@ static inline struct ice_vsi *ice_get_ctrl_vsi(struct ice_pf *pf) } /** + * ice_is_switchdev_running - check if switchdev is configured + * @pf: pointer to PF structure + * + * Returns true if eswitch mode is set to DEVLINK_ESWITCH_MODE_SWITCHDEV + * and switchdev is configured, false otherwise. + */ +static inline bool ice_is_switchdev_running(struct ice_pf *pf) +{ + return pf->switchdev.is_running; +} + +/** * ice_set_sriov_cap - enable SRIOV in PF flags * @pf: PF struct */ @@ -643,7 +681,9 @@ bool netif_is_ice(struct net_device *dev); int ice_vsi_setup_tx_rings(struct ice_vsi *vsi); int ice_vsi_setup_rx_rings(struct ice_vsi *vsi); int ice_vsi_open_ctrl(struct ice_vsi *vsi); +int ice_vsi_open(struct ice_vsi *vsi); void ice_set_ethtool_ops(struct net_device *netdev); +void ice_set_ethtool_repr_ops(struct net_device *netdev); void ice_set_ethtool_safe_mode_ops(struct net_device *netdev); u16 ice_get_avail_txq_count(struct ice_pf *pf); u16 ice_get_avail_rxq_count(struct ice_pf *pf); diff --git a/drivers/net/ethernet/intel/ice/ice_base.c b/drivers/net/ethernet/intel/ice/ice_base.c index c36057efc7ae..d7a5ac9346bc 100644 --- a/drivers/net/ethernet/intel/ice/ice_base.c +++ b/drivers/net/ethernet/intel/ice/ice_base.c @@ -218,6 +218,30 @@ static u16 ice_calc_q_handle(struct ice_vsi *vsi, struct ice_ring *ring, u8 tc) } /** + * ice_eswitch_calc_q_handle + * @ring: pointer to ring which unique index is needed + * + * To correctly work with many netdevs ring->q_index of Tx rings on switchdev + * VSI can repeat. Hardware ring setup requires unique q_index. Calculate it + * here by finding index in vsi->tx_rings of this ring. + * + * Return ICE_INVAL_Q_INDEX when index wasn't found. Should never happen, + * because VSI is get from ring->vsi, so it has to be present in this VSI. + */ +static u16 ice_eswitch_calc_q_handle(struct ice_ring *ring) +{ + struct ice_vsi *vsi = ring->vsi; + int i; + + ice_for_each_txq(vsi, i) { + if (vsi->tx_rings[i] == ring) + return i; + } + + return ICE_INVAL_Q_INDEX; +} + +/** * ice_cfg_xps_tx_ring - Configure XPS for a Tx ring * @ring: The Tx ring to configure * @@ -280,6 +304,9 @@ ice_setup_tx_ctx(struct ice_ring *ring, struct ice_tlan_ctx *tlan_ctx, u16 pf_q) tlan_ctx->vmvf_num = hw->func_caps.vf_base_id + vsi->vf_id; tlan_ctx->vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_VF; break; + case ICE_VSI_SWITCHDEV_CTRL: + tlan_ctx->vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_VMQ; + break; default: return; } @@ -746,7 +773,14 @@ ice_vsi_cfg_txq(struct ice_vsi *vsi, struct ice_ring *ring, /* Add unique software queue handle of the Tx queue per * TC into the VSI Tx ring */ - ring->q_handle = ice_calc_q_handle(vsi, ring, tc); + if (vsi->type == ICE_VSI_SWITCHDEV_CTRL) { + ring->q_handle = ice_eswitch_calc_q_handle(ring); + + if (ring->q_handle == ICE_INVAL_Q_INDEX) + return -ENODEV; + } else { + ring->q_handle = ice_calc_q_handle(vsi, ring, tc); + } status = ice_ena_vsi_txq(vsi->port_info, vsi->idx, tc, ring->q_handle, 1, qg_buf, buf_len, NULL); diff --git a/drivers/net/ethernet/intel/ice/ice_dcb_lib.c b/drivers/net/ethernet/intel/ice/ice_dcb_lib.c index 26b4d5f579e6..73714685fb68 100644 --- a/drivers/net/ethernet/intel/ice/ice_dcb_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_dcb_lib.c @@ -683,6 +683,11 @@ void ice_pf_dcb_recfg(struct ice_pf *pf) vsi->idx); continue; } + /* no need to proceed with remaining cfg if it is switchdev + * VSI + */ + if (vsi->type == ICE_VSI_SWITCHDEV_CTRL) + continue; ice_vsi_map_rings_to_vectors(vsi); if (vsi->type == ICE_VSI_PF) diff --git a/drivers/net/ethernet/intel/ice/ice_devlink.c b/drivers/net/ethernet/intel/ice/ice_devlink.c index cae1cd97a1ef..55353bf4cbef 100644 --- a/drivers/net/ethernet/intel/ice/ice_devlink.c +++ b/drivers/net/ethernet/intel/ice/ice_devlink.c @@ -4,6 +4,7 @@ #include "ice.h" #include "ice_lib.h" #include "ice_devlink.h" +#include "ice_eswitch.h" #include "ice_fw_update.h" /* context for devlink info version reporting */ @@ -423,6 +424,8 @@ ice_devlink_flash_update(struct devlink *devlink, static const struct devlink_ops ice_devlink_ops = { .supported_flash_update_params = DEVLINK_SUPPORT_FLASH_UPDATE_OVERWRITE_MASK, + .eswitch_mode_get = ice_eswitch_mode_get, + .eswitch_mode_set = ice_eswitch_mode_set, .info_get = ice_devlink_info_get, .flash_update = ice_devlink_flash_update, }; @@ -484,60 +487,115 @@ void ice_devlink_unregister(struct ice_pf *pf) } /** - * ice_devlink_create_port - Create a devlink port for this VSI - * @vsi: the VSI to create a port for + * ice_devlink_create_pf_port - Create a devlink port for this PF + * @pf: the PF to create a devlink port for * - * Create and register a devlink_port for this VSI. + * Create and register a devlink_port for this PF. * * Return: zero on success or an error code on failure. */ -int ice_devlink_create_port(struct ice_vsi *vsi) +int ice_devlink_create_pf_port(struct ice_pf *pf) { struct devlink_port_attrs attrs = {}; - struct ice_port_info *pi; + struct devlink_port *devlink_port; struct devlink *devlink; + struct ice_vsi *vsi; struct device *dev; - struct ice_pf *pf; int err; - /* Currently we only create devlink_port instances for PF VSIs */ - if (vsi->type != ICE_VSI_PF) - return -EINVAL; - - pf = vsi->back; - devlink = priv_to_devlink(pf); dev = ice_pf_to_dev(pf); - pi = pf->hw.port_info; + + devlink_port = &pf->devlink_port; + + vsi = ice_get_main_vsi(pf); + if (!vsi) + return -EIO; attrs.flavour = DEVLINK_PORT_FLAVOUR_PHYSICAL; - attrs.phys.port_number = pi->lport; - devlink_port_attrs_set(&vsi->devlink_port, &attrs); - err = devlink_port_register(devlink, &vsi->devlink_port, vsi->idx); + attrs.phys.port_number = pf->hw.bus.func; + devlink_port_attrs_set(devlink_port, &attrs); + devlink = priv_to_devlink(pf); + + err = devlink_port_register(devlink, devlink_port, vsi->idx); if (err) { - dev_err(dev, "devlink_port_register failed: %d\n", err); + dev_err(dev, "Failed to create devlink port for PF %d, error %d\n", + pf->hw.pf_id, err); return err; } - vsi->devlink_port_registered = true; + return 0; +} + +/** + * ice_devlink_destroy_pf_port - Destroy the devlink_port for this PF + * @pf: the PF to cleanup + * + * Unregisters the devlink_port structure associated with this PF. + */ +void ice_devlink_destroy_pf_port(struct ice_pf *pf) +{ + struct devlink_port *devlink_port; + + devlink_port = &pf->devlink_port; + + devlink_port_type_clear(devlink_port); + devlink_port_unregister(devlink_port); +} + +/** + * ice_devlink_create_vf_port - Create a devlink port for this VF + * @vf: the VF to create a port for + * + * Create and register a devlink_port for this VF. + * + * Return: zero on success or an error code on failure. + */ +int ice_devlink_create_vf_port(struct ice_vf *vf) +{ + struct devlink_port_attrs attrs = {}; + struct devlink_port *devlink_port; + struct devlink *devlink; + struct ice_vsi *vsi; + struct device *dev; + struct ice_pf *pf; + int err; + + pf = vf->pf; + dev = ice_pf_to_dev(pf); + vsi = ice_get_vf_vsi(vf); + devlink_port = &vf->devlink_port; + + attrs.flavour = DEVLINK_PORT_FLAVOUR_PCI_VF; + attrs.pci_vf.pf = pf->hw.bus.func; + attrs.pci_vf.vf = vf->vf_id; + + devlink_port_attrs_set(devlink_port, &attrs); + devlink = priv_to_devlink(pf); + + err = devlink_port_register(devlink, devlink_port, vsi->idx); + if (err) { + dev_err(dev, "Failed to create devlink port for VF %d, error %d\n", + vf->vf_id, err); + return err; + } return 0; } /** - * ice_devlink_destroy_port - Destroy the devlink_port for this VSI - * @vsi: the VSI to cleanup + * ice_devlink_destroy_vf_port - Destroy the devlink_port for this VF + * @vf: the VF to cleanup * - * Unregisters the devlink_port structure associated with this VSI. + * Unregisters the devlink_port structure associated with this VF. */ -void ice_devlink_destroy_port(struct ice_vsi *vsi) +void ice_devlink_destroy_vf_port(struct ice_vf *vf) { - if (!vsi->devlink_port_registered) - return; + struct devlink_port *devlink_port; - devlink_port_type_clear(&vsi->devlink_port); - devlink_port_unregister(&vsi->devlink_port); + devlink_port = &vf->devlink_port; - vsi->devlink_port_registered = false; + devlink_port_type_clear(devlink_port); + devlink_port_unregister(devlink_port); } /** diff --git a/drivers/net/ethernet/intel/ice/ice_devlink.h b/drivers/net/ethernet/intel/ice/ice_devlink.h index e721d7b0d627..b7f9551e4fc4 100644 --- a/drivers/net/ethernet/intel/ice/ice_devlink.h +++ b/drivers/net/ethernet/intel/ice/ice_devlink.h @@ -8,8 +8,10 @@ struct ice_pf *ice_allocate_pf(struct device *dev); void ice_devlink_register(struct ice_pf *pf); void ice_devlink_unregister(struct ice_pf *pf); -int ice_devlink_create_port(struct ice_vsi *vsi); -void ice_devlink_destroy_port(struct ice_vsi *vsi); +int ice_devlink_create_pf_port(struct ice_pf *pf); +void ice_devlink_destroy_pf_port(struct ice_pf *pf); +int ice_devlink_create_vf_port(struct ice_vf *vf); +void ice_devlink_destroy_vf_port(struct ice_vf *vf); void ice_devlink_init_regions(struct ice_pf *pf); void ice_devlink_destroy_regions(struct ice_pf *pf); diff --git a/drivers/net/ethernet/intel/ice/ice_eswitch.c b/drivers/net/ethernet/intel/ice/ice_eswitch.c new file mode 100644 index 000000000000..477e3f2d616d --- /dev/null +++ b/drivers/net/ethernet/intel/ice/ice_eswitch.c @@ -0,0 +1,657 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2019-2021, Intel Corporation. */ + +#include "ice.h" +#include "ice_lib.h" +#include "ice_eswitch.h" +#include "ice_fltr.h" +#include "ice_repr.h" +#include "ice_devlink.h" + +/** + * ice_eswitch_setup_env - configure switchdev HW filters + * @pf: pointer to PF struct + * + * This function adds HW filters configuration specific for switchdev + * mode. + */ +static int ice_eswitch_setup_env(struct ice_pf *pf) +{ + struct ice_vsi *uplink_vsi = pf->switchdev.uplink_vsi; + struct ice_vsi *ctrl_vsi = pf->switchdev.control_vsi; + struct ice_port_info *pi = pf->hw.port_info; + bool rule_added = false; + + ice_vsi_manage_vlan_stripping(ctrl_vsi, false); + + ice_remove_vsi_fltr(&pf->hw, uplink_vsi->idx); + + if (ice_vsi_add_vlan(uplink_vsi, 0, ICE_FWD_TO_VSI)) + goto err_def_rx; + + if (!ice_is_dflt_vsi_in_use(uplink_vsi->vsw)) { + if (ice_set_dflt_vsi(uplink_vsi->vsw, uplink_vsi)) + goto err_def_rx; + rule_added = true; + } + + if (ice_cfg_dflt_vsi(pi->hw, ctrl_vsi->idx, true, ICE_FLTR_TX)) + goto err_def_tx; + + if (ice_vsi_update_security(uplink_vsi, ice_vsi_ctx_set_allow_override)) + goto err_override_uplink; + + if (ice_vsi_update_security(ctrl_vsi, ice_vsi_ctx_set_allow_override)) + goto err_override_control; + + if (ice_fltr_update_flags_dflt_rule(ctrl_vsi, pi->dflt_tx_vsi_rule_id, + ICE_FLTR_TX, + ICE_SINGLE_ACT_LB_ENABLE)) + goto err_update_action; + + return 0; + +err_update_action: + ice_vsi_update_security(ctrl_vsi, ice_vsi_ctx_clear_allow_override); +err_override_control: + ice_vsi_update_security(uplink_vsi, ice_vsi_ctx_clear_allow_override); +err_override_uplink: + ice_cfg_dflt_vsi(pi->hw, ctrl_vsi->idx, false, ICE_FLTR_TX); +err_def_tx: + if (rule_added) + ice_clear_dflt_vsi(uplink_vsi->vsw); +err_def_rx: + ice_fltr_add_mac_and_broadcast(uplink_vsi, + uplink_vsi->port_info->mac.perm_addr, + ICE_FWD_TO_VSI); + return -ENODEV; +} + +/** + * ice_eswitch_remap_ring - reconfigure ring of switchdev ctrl VSI + * @ring: pointer to ring + * @q_vector: pointer of q_vector which is connected with this ring + * @netdev: netdevice connected with this ring + */ +static void +ice_eswitch_remap_ring(struct ice_ring *ring, struct ice_q_vector *q_vector, + struct net_device *netdev) +{ + ring->q_vector = q_vector; + ring->next = NULL; + ring->netdev = netdev; +} + +/** + * ice_eswitch_remap_rings_to_vectors - reconfigure rings of switchdev ctrl VSI + * @pf: pointer to PF struct + * + * In switchdev number of allocated Tx/Rx rings is equal. + * + * This function fills q_vectors structures associated with representor and + * move each ring pairs to port representor netdevs. Each port representor + * will have dedicated 1 Tx/Rx ring pair, so number of rings pair is equal to + * number of VFs. + */ +static void ice_eswitch_remap_rings_to_vectors(struct ice_pf *pf) +{ + struct ice_vsi *vsi = pf->switchdev.control_vsi; + int q_id; + + ice_for_each_txq(vsi, q_id) { + struct ice_repr *repr = pf->vf[q_id].repr; + struct ice_q_vector *q_vector = repr->q_vector; + struct ice_ring *tx_ring = vsi->tx_rings[q_id]; + struct ice_ring *rx_ring = vsi->rx_rings[q_id]; + + q_vector->vsi = vsi; + q_vector->reg_idx = vsi->q_vectors[0]->reg_idx; + + q_vector->num_ring_tx = 1; + q_vector->tx.ring = tx_ring; + ice_eswitch_remap_ring(tx_ring, q_vector, repr->netdev); + /* In switchdev mode, from OS stack perspective, there is only + * one queue for given netdev, so it needs to be indexed as 0. + */ + tx_ring->q_index = 0; + + q_vector->num_ring_rx = 1; + q_vector->rx.ring = rx_ring; + ice_eswitch_remap_ring(rx_ring, q_vector, repr->netdev); + } +} + +/** + * ice_eswitch_setup_reprs - configure port reprs to run in switchdev mode + * @pf: pointer to PF struct + */ +static int ice_eswitch_setup_reprs(struct ice_pf *pf) +{ + struct ice_vsi *ctrl_vsi = pf->switchdev.control_vsi; + int max_vsi_num = 0; + int i; + + ice_for_each_vf(pf, i) { + struct ice_vsi *vsi = pf->vf[i].repr->src_vsi; + struct ice_vf *vf = &pf->vf[i]; + + ice_remove_vsi_fltr(&pf->hw, vsi->idx); + vf->repr->dst = metadata_dst_alloc(0, METADATA_HW_PORT_MUX, + GFP_KERNEL); + if (!vf->repr->dst) { + ice_fltr_add_mac_and_broadcast(vsi, + vf->hw_lan_addr.addr, + ICE_FWD_TO_VSI); + goto err; + } + + if (ice_vsi_update_security(vsi, ice_vsi_ctx_clear_antispoof)) { + ice_fltr_add_mac_and_broadcast(vsi, + vf->hw_lan_addr.addr, + ICE_FWD_TO_VSI); + metadata_dst_free(vf->repr->dst); + goto err; + } + + if (ice_vsi_add_vlan(vsi, 0, ICE_FWD_TO_VSI)) { + ice_fltr_add_mac_and_broadcast(vsi, + vf->hw_lan_addr.addr, + ICE_FWD_TO_VSI); + metadata_dst_free(vf->repr->dst); + ice_vsi_update_security(vsi, ice_vsi_ctx_set_antispoof); + goto err; + } + + if (max_vsi_num < vsi->vsi_num) + max_vsi_num = vsi->vsi_num; + + netif_napi_add(vf->repr->netdev, &vf->repr->q_vector->napi, ice_napi_poll, + NAPI_POLL_WEIGHT); + + netif_keep_dst(vf->repr->netdev); + } + + kfree(ctrl_vsi->target_netdevs); + + ctrl_vsi->target_netdevs = kcalloc(max_vsi_num + 1, + sizeof(*ctrl_vsi->target_netdevs), + GFP_KERNEL); + if (!ctrl_vsi->target_netdevs) + goto err; + + ice_for_each_vf(pf, i) { + struct ice_repr *repr = pf->vf[i].repr; + struct ice_vsi *vsi = repr->src_vsi; + struct metadata_dst *dst; + + ctrl_vsi->target_netdevs[vsi->vsi_num] = repr->netdev; + + dst = repr->dst; + dst->u.port_info.port_id = vsi->vsi_num; + dst->u.port_info.lower_dev = repr->netdev; + ice_repr_set_traffic_vsi(repr, ctrl_vsi); + } + + return 0; + +err: + for (i = i - 1; i >= 0; i--) { + struct ice_vsi *vsi = pf->vf[i].repr->src_vsi; + struct ice_vf *vf = &pf->vf[i]; + + ice_vsi_update_security(vsi, ice_vsi_ctx_set_antispoof); + metadata_dst_free(vf->repr->dst); + ice_fltr_add_mac_and_broadcast(vsi, vf->hw_lan_addr.addr, + ICE_FWD_TO_VSI); + } + + return -ENODEV; +} + +/** + * ice_eswitch_release_reprs - clear PR VSIs configuration + * @pf: poiner to PF struct + * @ctrl_vsi: pointer to switchdev control VSI + */ +static void +ice_eswitch_release_reprs(struct ice_pf *pf, struct ice_vsi *ctrl_vsi) +{ + int i; + + kfree(ctrl_vsi->target_netdevs); + ice_for_each_vf(pf, i) { + struct ice_vsi *vsi = pf->vf[i].repr->src_vsi; + struct ice_vf *vf = &pf->vf[i]; + + ice_vsi_update_security(vsi, ice_vsi_ctx_set_antispoof); + metadata_dst_free(vf->repr->dst); + ice_fltr_add_mac_and_broadcast(vsi, vf->hw_lan_addr.addr, + ICE_FWD_TO_VSI); + + netif_napi_del(&vf->repr->q_vector->napi); + } +} + +/** + * ice_eswitch_update_repr - reconfigure VF port representor + * @vsi: VF VSI for which port representor is configured + */ +void ice_eswitch_update_repr(struct ice_vsi *vsi) +{ + struct ice_pf *pf = vsi->back; + struct ice_repr *repr; + struct ice_vf *vf; + int ret; + + if (!ice_is_switchdev_running(pf)) + return; + + vf = &pf->vf[vsi->vf_id]; + repr = vf->repr; + repr->src_vsi = vsi; + repr->dst->u.port_info.port_id = vsi->vsi_num; + + ret = ice_vsi_update_security(vsi, ice_vsi_ctx_clear_antispoof); + if (ret) { + ice_fltr_add_mac_and_broadcast(vsi, vf->hw_lan_addr.addr, ICE_FWD_TO_VSI); + dev_err(ice_pf_to_dev(pf), "Failed to update VF %d port representor", vsi->vf_id); + } +} + +/** + * ice_eswitch_port_start_xmit - callback for packets transmit + * @skb: send buffer + * @netdev: network interface device structure + * + * Returns NETDEV_TX_OK if sent, else an error code + */ +netdev_tx_t +ice_eswitch_port_start_xmit(struct sk_buff *skb, struct net_device *netdev) +{ + struct ice_netdev_priv *np; + struct ice_repr *repr; + struct ice_vsi *vsi; + + np = netdev_priv(netdev); + vsi = np->vsi; + + if (ice_is_reset_in_progress(vsi->back->state)) + return NETDEV_TX_BUSY; + + repr = ice_netdev_to_repr(netdev); + skb_dst_drop(skb); + dst_hold((struct dst_entry *)repr->dst); + skb_dst_set(skb, (struct dst_entry *)repr->dst); + skb->queue_mapping = repr->vf->vf_id; + + return ice_start_xmit(skb, netdev); +} + +/** + * ice_eswitch_set_target_vsi - set switchdev context in Tx context descriptor + * @skb: pointer to send buffer + * @off: pointer to offload struct + */ +void +ice_eswitch_set_target_vsi(struct sk_buff *skb, + struct ice_tx_offload_params *off) +{ + struct metadata_dst *dst = skb_metadata_dst(skb); + u64 cd_cmd, dst_vsi; + + if (!dst) { + cd_cmd = ICE_TX_CTX_DESC_SWTCH_UPLINK << ICE_TXD_CTX_QW1_CMD_S; + off->cd_qw1 |= (cd_cmd | ICE_TX_DESC_DTYPE_CTX); + } else { + cd_cmd = ICE_TX_CTX_DESC_SWTCH_VSI << ICE_TXD_CTX_QW1_CMD_S; + dst_vsi = ((u64)dst->u.port_info.port_id << + ICE_TXD_CTX_QW1_VSI_S) & ICE_TXD_CTX_QW1_VSI_M; + off->cd_qw1 = cd_cmd | dst_vsi | ICE_TX_DESC_DTYPE_CTX; + } +} + +/** + * ice_eswitch_release_env - clear switchdev HW filters + * @pf: pointer to PF struct + * + * This function removes HW filters configuration specific for switchdev + * mode and restores default legacy mode settings. + */ +static void ice_eswitch_release_env(struct ice_pf *pf) +{ + struct ice_vsi *uplink_vsi = pf->switchdev.uplink_vsi; + struct ice_vsi *ctrl_vsi = pf->switchdev.control_vsi; + + ice_vsi_update_security(ctrl_vsi, ice_vsi_ctx_clear_allow_override); + ice_vsi_update_security(uplink_vsi, ice_vsi_ctx_clear_allow_override); + ice_cfg_dflt_vsi(&pf->hw, ctrl_vsi->idx, false, ICE_FLTR_TX); + ice_clear_dflt_vsi(uplink_vsi->vsw); + ice_fltr_add_mac_and_broadcast(uplink_vsi, + uplink_vsi->port_info->mac.perm_addr, + ICE_FWD_TO_VSI); +} + +/** + * ice_eswitch_vsi_setup - configure switchdev control VSI + * @pf: pointer to PF structure + * @pi: pointer to port_info structure + */ +static struct ice_vsi * +ice_eswitch_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi) +{ + return ice_vsi_setup(pf, pi, ICE_VSI_SWITCHDEV_CTRL, ICE_INVAL_VFID); +} + +/** + * ice_eswitch_napi_del - remove NAPI handle for all port representors + * @pf: pointer to PF structure + */ +static void ice_eswitch_napi_del(struct ice_pf *pf) +{ + int i; + + ice_for_each_vf(pf, i) + netif_napi_del(&pf->vf[i].repr->q_vector->napi); +} + +/** + * ice_eswitch_napi_enable - enable NAPI for all port representors + * @pf: pointer to PF structure + */ +static void ice_eswitch_napi_enable(struct ice_pf *pf) +{ + int i; + + ice_for_each_vf(pf, i) + napi_enable(&pf->vf[i].repr->q_vector->napi); +} + +/** + * ice_eswitch_napi_disable - disable NAPI for all port representors + * @pf: pointer to PF structure + */ +static void ice_eswitch_napi_disable(struct ice_pf *pf) +{ + int i; + + ice_for_each_vf(pf, i) + napi_disable(&pf->vf[i].repr->q_vector->napi); +} + +/** + * ice_eswitch_set_rxdid - configure rxdid on all Rx queues from VSI + * @vsi: VSI to setup rxdid on + * @rxdid: flex descriptor id + */ +static void ice_eswitch_set_rxdid(struct ice_vsi *vsi, u32 rxdid) +{ + struct ice_hw *hw = &vsi->back->hw; + int i; + + ice_for_each_rxq(vsi, i) { + struct ice_ring *ring = vsi->rx_rings[i]; + u16 pf_q = vsi->rxq_map[ring->q_index]; + + ice_write_qrxflxp_cntxt(hw, pf_q, rxdid, 0x3, true); + } +} + +/** + * ice_eswitch_enable_switchdev - configure eswitch in switchdev mode + * @pf: pointer to PF structure + */ +static int ice_eswitch_enable_switchdev(struct ice_pf *pf) +{ + struct ice_vsi *ctrl_vsi; + + pf->switchdev.control_vsi = ice_eswitch_vsi_setup(pf, pf->hw.port_info); + if (!pf->switchdev.control_vsi) + return -ENODEV; + + ctrl_vsi = pf->switchdev.control_vsi; + pf->switchdev.uplink_vsi = ice_get_main_vsi(pf); + if (!pf->switchdev.uplink_vsi) + goto err_vsi; + + if (ice_eswitch_setup_env(pf)) + goto err_vsi; + + if (ice_repr_add_for_all_vfs(pf)) + goto err_repr_add; + + if (ice_eswitch_setup_reprs(pf)) + goto err_setup_reprs; + + ice_eswitch_remap_rings_to_vectors(pf); + + if (ice_vsi_open(ctrl_vsi)) + goto err_setup_reprs; + + ice_eswitch_napi_enable(pf); + + ice_eswitch_set_rxdid(ctrl_vsi, ICE_RXDID_FLEX_NIC_2); + + return 0; + +err_setup_reprs: + ice_repr_rem_from_all_vfs(pf); +err_repr_add: + ice_eswitch_release_env(pf); +err_vsi: + ice_vsi_release(ctrl_vsi); + return -ENODEV; +} + +/** + * ice_eswitch_disable_switchdev - disable switchdev resources + * @pf: pointer to PF structure + */ +static void ice_eswitch_disable_switchdev(struct ice_pf *pf) +{ + struct ice_vsi *ctrl_vsi = pf->switchdev.control_vsi; + + ice_eswitch_napi_disable(pf); + ice_eswitch_release_env(pf); + ice_eswitch_release_reprs(pf, ctrl_vsi); + ice_vsi_release(ctrl_vsi); + ice_repr_rem_from_all_vfs(pf); +} + +/** + * ice_eswitch_mode_set - set new eswitch mode + * @devlink: pointer to devlink structure + * @mode: eswitch mode to switch to + * @extack: pointer to extack structure + */ +int +ice_eswitch_mode_set(struct devlink *devlink, u16 mode, + struct netlink_ext_ack *extack) +{ + struct ice_pf *pf = devlink_priv(devlink); + + if (pf->eswitch_mode == mode) + return 0; + + if (pf->num_alloc_vfs) { + dev_info(ice_pf_to_dev(pf), "Changing eswitch mode is allowed only if there is no VFs created"); + NL_SET_ERR_MSG_MOD(extack, "Changing eswitch mode is allowed only if there is no VFs created"); + return -EOPNOTSUPP; + } + + switch (mode) { + case DEVLINK_ESWITCH_MODE_LEGACY: + dev_info(ice_pf_to_dev(pf), "PF %d changed eswitch mode to legacy", + pf->hw.pf_id); + NL_SET_ERR_MSG_MOD(extack, "Changed eswitch mode to legacy"); + break; + case DEVLINK_ESWITCH_MODE_SWITCHDEV: + { + dev_info(ice_pf_to_dev(pf), "PF %d changed eswitch mode to switchdev", + pf->hw.pf_id); + NL_SET_ERR_MSG_MOD(extack, "Changed eswitch mode to switchdev"); + break; + } + default: + NL_SET_ERR_MSG_MOD(extack, "Unknown eswitch mode"); + return -EINVAL; + } + + pf->eswitch_mode = mode; + return 0; +} + +/** + * ice_eswitch_get_target_netdev - return port representor netdev + * @rx_ring: pointer to Rx ring + * @rx_desc: pointer to Rx descriptor + * + * When working in switchdev mode context (when control VSI is used), this + * function returns netdev of appropriate port representor. For non-switchdev + * context, regular netdev associated with Rx ring is returned. + */ +struct net_device * +ice_eswitch_get_target_netdev(struct ice_ring *rx_ring, + union ice_32b_rx_flex_desc *rx_desc) +{ + struct ice_32b_rx_flex_desc_nic_2 *desc; + struct ice_vsi *vsi = rx_ring->vsi; + struct ice_vsi *control_vsi; + u16 target_vsi_id; + + control_vsi = vsi->back->switchdev.control_vsi; + if (vsi != control_vsi) + return rx_ring->netdev; + + desc = (struct ice_32b_rx_flex_desc_nic_2 *)rx_desc; + target_vsi_id = le16_to_cpu(desc->src_vsi); + + return vsi->target_netdevs[target_vsi_id]; +} + +/** + * ice_eswitch_mode_get - get current eswitch mode + * @devlink: pointer to devlink structure + * @mode: output parameter for current eswitch mode + */ +int ice_eswitch_mode_get(struct devlink *devlink, u16 *mode) +{ + struct ice_pf *pf = devlink_priv(devlink); + + *mode = pf->eswitch_mode; + return 0; +} + +/** + * ice_is_eswitch_mode_switchdev - check if eswitch mode is set to switchdev + * @pf: pointer to PF structure + * + * Returns true if eswitch mode is set to DEVLINK_ESWITCH_MODE_SWITCHDEV, + * false otherwise. + */ +bool ice_is_eswitch_mode_switchdev(struct ice_pf *pf) +{ + return pf->eswitch_mode == DEVLINK_ESWITCH_MODE_SWITCHDEV; +} + +/** + * ice_eswitch_release - cleanup eswitch + * @pf: pointer to PF structure + */ +void ice_eswitch_release(struct ice_pf *pf) +{ + if (pf->eswitch_mode == DEVLINK_ESWITCH_MODE_LEGACY) + return; + + ice_eswitch_disable_switchdev(pf); + pf->switchdev.is_running = false; +} + +/** + * ice_eswitch_configure - configure eswitch + * @pf: pointer to PF structure + */ +int ice_eswitch_configure(struct ice_pf *pf) +{ + int status; + + if (pf->eswitch_mode == DEVLINK_ESWITCH_MODE_LEGACY || pf->switchdev.is_running) + return 0; + + status = ice_eswitch_enable_switchdev(pf); + if (status) + return status; + + pf->switchdev.is_running = true; + return 0; +} + +/** + * ice_eswitch_start_all_tx_queues - start Tx queues of all port representors + * @pf: pointer to PF structure + */ +static void ice_eswitch_start_all_tx_queues(struct ice_pf *pf) +{ + struct ice_repr *repr; + int i; + + if (test_bit(ICE_DOWN, pf->state)) + return; + + ice_for_each_vf(pf, i) { + repr = pf->vf[i].repr; + if (repr) + ice_repr_start_tx_queues(repr); + } +} + +/** + * ice_eswitch_stop_all_tx_queues - stop Tx queues of all port representors + * @pf: pointer to PF structure + */ +void ice_eswitch_stop_all_tx_queues(struct ice_pf *pf) +{ + struct ice_repr *repr; + int i; + + if (test_bit(ICE_DOWN, pf->state)) + return; + + ice_for_each_vf(pf, i) { + repr = pf->vf[i].repr; + if (repr) + ice_repr_stop_tx_queues(repr); + } +} + +/** + * ice_eswitch_rebuild - rebuild eswitch + * @pf: pointer to PF structure + */ +int ice_eswitch_rebuild(struct ice_pf *pf) +{ + struct ice_vsi *ctrl_vsi = pf->switchdev.control_vsi; + int status; + + ice_eswitch_napi_disable(pf); + ice_eswitch_napi_del(pf); + + status = ice_eswitch_setup_env(pf); + if (status) + return status; + + status = ice_eswitch_setup_reprs(pf); + if (status) + return status; + + ice_eswitch_remap_rings_to_vectors(pf); + + status = ice_vsi_open(ctrl_vsi); + if (status) + return status; + + ice_eswitch_napi_enable(pf); + ice_eswitch_set_rxdid(ctrl_vsi, ICE_RXDID_FLEX_NIC_2); + ice_eswitch_start_all_tx_queues(pf); + + return 0; +} diff --git a/drivers/net/ethernet/intel/ice/ice_eswitch.h b/drivers/net/ethernet/intel/ice/ice_eswitch.h new file mode 100644 index 000000000000..23df0d400847 --- /dev/null +++ b/drivers/net/ethernet/intel/ice/ice_eswitch.h @@ -0,0 +1,83 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2019-2021, Intel Corporation. */ + +#ifndef _ICE_ESWITCH_H_ +#define _ICE_ESWITCH_H_ + +#include <net/devlink.h> + +#ifdef CONFIG_ICE_SWITCHDEV +void ice_eswitch_release(struct ice_pf *pf); +int ice_eswitch_configure(struct ice_pf *pf); +int ice_eswitch_rebuild(struct ice_pf *pf); + +int ice_eswitch_mode_get(struct devlink *devlink, u16 *mode); +int +ice_eswitch_mode_set(struct devlink *devlink, u16 mode, + struct netlink_ext_ack *extack); +bool ice_is_eswitch_mode_switchdev(struct ice_pf *pf); + +void ice_eswitch_update_repr(struct ice_vsi *vsi); + +void ice_eswitch_stop_all_tx_queues(struct ice_pf *pf); + +struct net_device * +ice_eswitch_get_target_netdev(struct ice_ring *rx_ring, + union ice_32b_rx_flex_desc *rx_desc); + +void ice_eswitch_set_target_vsi(struct sk_buff *skb, + struct ice_tx_offload_params *off); +netdev_tx_t +ice_eswitch_port_start_xmit(struct sk_buff *skb, struct net_device *netdev); +#else /* CONFIG_ICE_SWITCHDEV */ +static inline void ice_eswitch_release(struct ice_pf *pf) { } + +static inline void ice_eswitch_stop_all_tx_queues(struct ice_pf *pf) { } + +static inline void +ice_eswitch_set_target_vsi(struct sk_buff *skb, + struct ice_tx_offload_params *off) { } + +static inline void ice_eswitch_update_repr(struct ice_vsi *vsi) { } + +static inline int ice_eswitch_configure(struct ice_pf *pf) +{ + return -EOPNOTSUPP; +} + +static inline int ice_eswitch_rebuild(struct ice_pf *pf) +{ + return -EOPNOTSUPP; +} + +static inline int ice_eswitch_mode_get(struct devlink *devlink, u16 *mode) +{ + return DEVLINK_ESWITCH_MODE_LEGACY; +} + +static inline int +ice_eswitch_mode_set(struct devlink *devlink, u16 mode, + struct netlink_ext_ack *extack) +{ + return -EOPNOTSUPP; +} + +static inline bool ice_is_eswitch_mode_switchdev(struct ice_pf *pf) +{ + return false; +} + +static inline struct net_device * +ice_eswitch_get_target_netdev(struct ice_ring *rx_ring, + union ice_32b_rx_flex_desc *rx_desc) +{ + return rx_ring->netdev; +} + +static inline netdev_tx_t +ice_eswitch_port_start_xmit(struct sk_buff *skb, struct net_device *netdev) +{ + return NETDEV_TX_BUSY; +} +#endif /* CONFIG_ICE_SWITCHDEV */ +#endif /* _ICE_ESWITCH_H_ */ diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c index 6f0a29be3ee5..201979cc47fb 100644 --- a/drivers/net/ethernet/intel/ice/ice_ethtool.c +++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c @@ -170,10 +170,9 @@ static const struct ice_priv_flag ice_gstrings_priv_flags[] = { #define ICE_PRIV_FLAG_ARRAY_SIZE ARRAY_SIZE(ice_gstrings_priv_flags) static void -ice_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo) +__ice_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo, + struct ice_vsi *vsi) { - struct ice_netdev_priv *np = netdev_priv(netdev); - struct ice_vsi *vsi = np->vsi; struct ice_pf *pf = vsi->back; struct ice_hw *hw = &pf->hw; struct ice_orom_info *orom; @@ -196,6 +195,26 @@ ice_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo) drvinfo->n_priv_flags = ICE_PRIV_FLAG_ARRAY_SIZE; } +static void +ice_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo) +{ + struct ice_netdev_priv *np = netdev_priv(netdev); + + __ice_get_drvinfo(netdev, drvinfo, np->vsi); +} + +static void +ice_repr_get_drvinfo(struct net_device *netdev, + struct ethtool_drvinfo *drvinfo) +{ + struct ice_repr *repr = ice_netdev_to_repr(netdev); + + if (ice_check_vf_ready_for_cfg(repr->vf)) + return; + + __ice_get_drvinfo(netdev, drvinfo, repr->src_vsi); +} + static int ice_get_regs_len(struct net_device __always_unused *netdev) { return sizeof(ice_regs_dump_list); @@ -869,7 +888,7 @@ skip_ol_tests: static void ice_get_strings(struct net_device *netdev, u32 stringset, u8 *data) { struct ice_netdev_priv *np = netdev_priv(netdev); - struct ice_vsi *vsi = np->vsi; + struct ice_vsi *vsi = ice_get_netdev_priv_vsi(np); unsigned int i; u8 *p = data; @@ -879,6 +898,9 @@ static void ice_get_strings(struct net_device *netdev, u32 stringset, u8 *data) ethtool_sprintf(&p, ice_gstrings_vsi_stats[i].stat_string); + if (ice_is_port_repr_netdev(netdev)) + return; + ice_for_each_alloc_txq(vsi, i) { ethtool_sprintf(&p, "tx_queue_%u_packets", i); ethtool_sprintf(&p, "tx_queue_%u_bytes", i); @@ -1308,6 +1330,9 @@ static int ice_get_sset_count(struct net_device *netdev, int sset) * order of strings will suffer from race conditions and are * not safe. */ + if (ice_is_port_repr_netdev(netdev)) + return ICE_VSI_STATS_LEN; + return ICE_ALL_STATS_LEN(netdev); case ETH_SS_TEST: return ICE_TEST_LEN; @@ -1323,7 +1348,7 @@ ice_get_ethtool_stats(struct net_device *netdev, struct ethtool_stats __always_unused *stats, u64 *data) { struct ice_netdev_priv *np = netdev_priv(netdev); - struct ice_vsi *vsi = np->vsi; + struct ice_vsi *vsi = ice_get_netdev_priv_vsi(np); struct ice_pf *pf = vsi->back; struct ice_ring *ring; unsigned int j; @@ -1339,6 +1364,9 @@ ice_get_ethtool_stats(struct net_device *netdev, sizeof(u64)) ? *(u64 *)p : *(u32 *)p; } + if (ice_is_port_repr_netdev(netdev)) + return; + /* populate per queue stats */ rcu_read_lock(); @@ -4062,6 +4090,23 @@ void ice_set_ethtool_safe_mode_ops(struct net_device *netdev) netdev->ethtool_ops = &ice_ethtool_safe_mode_ops; } +static const struct ethtool_ops ice_ethtool_repr_ops = { + .get_drvinfo = ice_repr_get_drvinfo, + .get_link = ethtool_op_get_link, + .get_strings = ice_get_strings, + .get_ethtool_stats = ice_get_ethtool_stats, + .get_sset_count = ice_get_sset_count, +}; + +/** + * ice_set_ethtool_repr_ops - setup VF's port representor ethtool ops + * @netdev: network interface device structure + */ +void ice_set_ethtool_repr_ops(struct net_device *netdev) +{ + netdev->ethtool_ops = &ice_ethtool_repr_ops; +} + /** * ice_set_ethtool_ops - setup netdev ethtool ops * @netdev: network interface device structure diff --git a/drivers/net/ethernet/intel/ice/ice_fltr.c b/drivers/net/ethernet/intel/ice/ice_fltr.c index 2418d4fff037..c2e78eaf4ccb 100644 --- a/drivers/net/ethernet/intel/ice/ice_fltr.c +++ b/drivers/net/ethernet/intel/ice/ice_fltr.c @@ -395,3 +395,83 @@ enum ice_status ice_fltr_remove_eth(struct ice_vsi *vsi, u16 ethertype, return ice_fltr_prepare_eth(vsi, ethertype, flag, action, ice_fltr_remove_eth_list); } + +/** + * ice_fltr_update_rule_flags - update lan_en/lb_en flags + * @hw: pointer to hw + * @rule_id: id of rule being updated + * @recipe_id: recipe id of rule + * @act: current action field + * @type: Rx or Tx + * @src: source VSI + * @new_flags: combinations of lb_en and lan_en + */ +static enum ice_status +ice_fltr_update_rule_flags(struct ice_hw *hw, u16 rule_id, u16 recipe_id, + u32 act, u16 type, u16 src, u32 new_flags) +{ + struct ice_aqc_sw_rules_elem *s_rule; + enum ice_status err; + u32 flags_mask; + + s_rule = kzalloc(ICE_SW_RULE_RX_TX_NO_HDR_SIZE, GFP_KERNEL); + if (!s_rule) + return ICE_ERR_NO_MEMORY; + + flags_mask = ICE_SINGLE_ACT_LB_ENABLE | ICE_SINGLE_ACT_LAN_ENABLE; + act &= ~flags_mask; + act |= (flags_mask & new_flags); + + s_rule->pdata.lkup_tx_rx.recipe_id = cpu_to_le16(recipe_id); + s_rule->pdata.lkup_tx_rx.index = cpu_to_le16(rule_id); + s_rule->pdata.lkup_tx_rx.act = cpu_to_le32(act); + + if (type & ICE_FLTR_RX) { + s_rule->pdata.lkup_tx_rx.src = + cpu_to_le16(hw->port_info->lport); + s_rule->type = cpu_to_le16(ICE_AQC_SW_RULES_T_LKUP_RX); + + } else { + s_rule->pdata.lkup_tx_rx.src = cpu_to_le16(src); + s_rule->type = cpu_to_le16(ICE_AQC_SW_RULES_T_LKUP_TX); + } + + err = ice_aq_sw_rules(hw, s_rule, ICE_SW_RULE_RX_TX_NO_HDR_SIZE, 1, + ice_aqc_opc_update_sw_rules, NULL); + + kfree(s_rule); + return err; +} + +/** + * ice_fltr_build_action - build action for rule + * @vsi_id: id of VSI which is use to build action + */ +static u32 ice_fltr_build_action(u16 vsi_id) +{ + return ((vsi_id << ICE_SINGLE_ACT_VSI_ID_S) & ICE_SINGLE_ACT_VSI_ID_M) | + ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_VALID_BIT; +} + +/** + * ice_fltr_update_flags_dflt_rule - update flags on default rule + * @vsi: pointer to VSI + * @rule_id: id of rule + * @direction: Tx or Rx + * @new_flags: flags to update + * + * Function updates flags on default rule with ICE_SW_LKUP_DFLT. + * + * Flags should be a combination of ICE_SINGLE_ACT_LB_ENABLE and + * ICE_SINGLE_ACT_LAN_ENABLE. + */ +enum ice_status +ice_fltr_update_flags_dflt_rule(struct ice_vsi *vsi, u16 rule_id, u8 direction, + u32 new_flags) +{ + u32 action = ice_fltr_build_action(vsi->vsi_num); + struct ice_hw *hw = &vsi->back->hw; + + return ice_fltr_update_rule_flags(hw, rule_id, ICE_SW_LKUP_DFLT, action, + direction, vsi->vsi_num, new_flags); +} diff --git a/drivers/net/ethernet/intel/ice/ice_fltr.h b/drivers/net/ethernet/intel/ice/ice_fltr.h index 361cb4da9b43..949e38ce016c 100644 --- a/drivers/net/ethernet/intel/ice/ice_fltr.h +++ b/drivers/net/ethernet/intel/ice/ice_fltr.h @@ -36,4 +36,11 @@ enum ice_status ice_fltr_remove_eth(struct ice_vsi *vsi, u16 ethertype, u16 flag, enum ice_sw_fwd_act_type action); void ice_fltr_remove_all(struct ice_vsi *vsi); + +enum ice_status +ice_fltr_update_flags(struct ice_vsi *vsi, u16 rule_id, u16 recipe_id, + u32 new_flags); +enum ice_status +ice_fltr_update_flags_dflt_rule(struct ice_vsi *vsi, u16 rule_id, u8 direction, + u32 new_flags); #endif diff --git a/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h b/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h index 80736e0ec0dc..d981dc6f2323 100644 --- a/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h +++ b/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h @@ -301,6 +301,46 @@ struct ice_32b_rx_flex_desc_nic { } flex_ts; }; +/* Rx Flex Descriptor NIC Profile + * RxDID Profile ID 6 + * Flex-field 0: RSS hash lower 16-bits + * Flex-field 1: RSS hash upper 16-bits + * Flex-field 2: Flow ID lower 16-bits + * Flex-field 3: Source VSI + * Flex-field 4: reserved, VLAN ID taken from L2Tag + */ +struct ice_32b_rx_flex_desc_nic_2 { + /* Qword 0 */ + u8 rxdid; + u8 mir_id_umb_cast; + __le16 ptype_flexi_flags0; + __le16 pkt_len; + __le16 hdr_len_sph_flex_flags1; + + /* Qword 1 */ + __le16 status_error0; + __le16 l2tag1; + __le32 rss_hash; + + /* Qword 2 */ + __le16 status_error1; + u8 flexi_flags2; + u8 ts_low; + __le16 l2tag2_1st; + __le16 l2tag2_2nd; + + /* Qword 3 */ + __le16 flow_id; + __le16 src_vsi; + union { + struct { + __le16 rsvd; + __le16 flow_id_ipv6; + } flex; + __le32 ts_high; + } flex_ts; +}; + /* Receive Flex Descriptor profile IDs: There are a total * of 64 profiles where profile IDs 0/1 are for legacy; and * profiles 2-63 are flex profiles that can be programmed @@ -529,6 +569,9 @@ struct ice_tx_ctx_desc { #define ICE_TXD_CTX_QW1_MSS_S 50 +#define ICE_TXD_CTX_QW1_VSI_S 50 +#define ICE_TXD_CTX_QW1_VSI_M (0x3FFULL << ICE_TXD_CTX_QW1_VSI_S) + enum ice_tx_ctx_desc_cmd_bits { ICE_TX_CTX_DESC_TSO = 0x01, ICE_TX_CTX_DESC_TSYN = 0x02, diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c index 3adbd9a179a7..93565f597266 100644 --- a/drivers/net/ethernet/intel/ice/ice_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_lib.c @@ -24,6 +24,8 @@ const char *ice_vsi_type_str(enum ice_vsi_type vsi_type) return "ICE_VSI_CTRL"; case ICE_VSI_LB: return "ICE_VSI_LB"; + case ICE_VSI_SWITCHDEV_CTRL: + return "ICE_VSI_SWITCHDEV_CTRL"; default: return "unknown"; } @@ -132,6 +134,7 @@ static void ice_vsi_set_num_desc(struct ice_vsi *vsi) { switch (vsi->type) { case ICE_VSI_PF: + case ICE_VSI_SWITCHDEV_CTRL: case ICE_VSI_CTRL: case ICE_VSI_LB: /* a user could change the values of num_[tr]x_desc using @@ -200,6 +203,14 @@ static void ice_vsi_set_num_qs(struct ice_vsi *vsi, u16 vf_id) max_t(int, vsi->alloc_rxq, vsi->alloc_txq)); break; + case ICE_VSI_SWITCHDEV_CTRL: + /* The number of queues for ctrl VSI is equal to number of VFs. + * Each ring is associated to the corresponding VF_PR netdev. + */ + vsi->alloc_txq = pf->num_alloc_vfs; + vsi->alloc_rxq = pf->num_alloc_vfs; + vsi->num_q_vectors = 1; + break; case ICE_VSI_VF: vf = &pf->vf[vsi->vf_id]; if (vf->num_req_qs) @@ -408,6 +419,21 @@ static irqreturn_t ice_msix_clean_rings(int __always_unused irq, void *data) return IRQ_HANDLED; } +static irqreturn_t ice_eswitch_msix_clean_rings(int __always_unused irq, void *data) +{ + struct ice_q_vector *q_vector = (struct ice_q_vector *)data; + struct ice_pf *pf = q_vector->vsi->back; + int i; + + if (!q_vector->tx.ring && !q_vector->rx.ring) + return IRQ_HANDLED; + + ice_for_each_vf(pf, i) + napi_schedule(&pf->vf[i].repr->q_vector->napi); + + return IRQ_HANDLED; +} + /** * ice_vsi_alloc - Allocates the next available struct VSI in the PF * @pf: board private structure @@ -448,6 +474,13 @@ ice_vsi_alloc(struct ice_pf *pf, enum ice_vsi_type vsi_type, u16 vf_id) ice_vsi_set_num_qs(vsi, ICE_INVAL_VFID); switch (vsi->type) { + case ICE_VSI_SWITCHDEV_CTRL: + if (ice_vsi_alloc_arrays(vsi)) + goto err_rings; + + /* Setup eswitch MSIX irq handler for VSI */ + vsi->irq_handler = ice_eswitch_msix_clean_rings; + break; case ICE_VSI_PF: if (ice_vsi_alloc_arrays(vsi)) goto err_rings; @@ -707,6 +740,12 @@ static void ice_vsi_set_rss_params(struct ice_vsi *vsi) BIT(cap->rss_table_entry_width)); vsi->rss_lut_type = ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF; break; + case ICE_VSI_SWITCHDEV_CTRL: + vsi->rss_table_size = ICE_VSIQF_HLUT_ARRAY_SIZE; + vsi->rss_size = min_t(u16, num_online_cpus(), + BIT(cap->rss_table_entry_width)); + vsi->rss_lut_type = ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_VSI; + break; case ICE_VSI_VF: /* VF VSI will get a small RSS table. * For VSI_LUT, LUT size should be set to 64 bytes. @@ -980,6 +1019,9 @@ static int ice_vsi_init(struct ice_vsi *vsi, bool init_vsi) case ICE_VSI_PF: ctxt->flags = ICE_AQ_VSI_TYPE_PF; break; + case ICE_VSI_SWITCHDEV_CTRL: + ctxt->flags = ICE_AQ_VSI_TYPE_VMDQ2; + break; case ICE_VSI_VF: ctxt->flags = ICE_AQ_VSI_TYPE_VF; /* VF number here is the absolute VF number (0-255) */ @@ -2297,6 +2339,7 @@ static void ice_set_agg_vsi(struct ice_vsi *vsi) case ICE_VSI_CTRL: case ICE_VSI_LB: case ICE_VSI_PF: + case ICE_VSI_SWITCHDEV_CTRL: max_agg_nodes = ICE_MAX_PF_AGG_NODES; agg_node_id_start = ICE_PF_AGG_NODE_ID_START; agg_node_iter = &pf->pf_agg_node[0]; @@ -2448,6 +2491,7 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi, switch (vsi->type) { case ICE_VSI_CTRL: + case ICE_VSI_SWITCHDEV_CTRL: case ICE_VSI_PF: ret = ice_vsi_alloc_q_vectors(vsi); if (ret) @@ -2757,7 +2801,8 @@ void ice_dis_vsi(struct ice_vsi *vsi, bool locked) } else { ice_vsi_close(vsi); } - } else if (vsi->type == ICE_VSI_CTRL) { + } else if (vsi->type == ICE_VSI_CTRL || + vsi->type == ICE_VSI_SWITCHDEV_CTRL) { ice_vsi_close(vsi); } } @@ -2859,7 +2904,8 @@ int ice_vsi_release(struct ice_vsi *vsi) clear_bit(ICE_VSI_NETDEV_REGISTERED, vsi->state); } - ice_devlink_destroy_port(vsi); + if (vsi->type == ICE_VSI_PF) + ice_devlink_destroy_pf_port(pf); if (test_bit(ICE_FLAG_RSS_ENA, pf->flags)) ice_rss_clean(vsi); @@ -3135,6 +3181,7 @@ int ice_vsi_rebuild(struct ice_vsi *vsi, bool init_vsi) switch (vtype) { case ICE_VSI_CTRL: + case ICE_VSI_SWITCHDEV_CTRL: case ICE_VSI_PF: ret = ice_vsi_alloc_q_vectors(vsi); if (ret) @@ -3620,3 +3667,64 @@ void ice_init_feature_support(struct ice_pf *pf) break; } } + +/** + * ice_vsi_update_security - update security block in VSI + * @vsi: pointer to VSI structure + * @fill: function pointer to fill ctx + */ +int +ice_vsi_update_security(struct ice_vsi *vsi, void (*fill)(struct ice_vsi_ctx *)) +{ + struct ice_vsi_ctx ctx = { 0 }; + + ctx.info = vsi->info; + ctx.info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_SECURITY_VALID); + fill(&ctx); + + if (ice_update_vsi(&vsi->back->hw, vsi->idx, &ctx, NULL)) + return -ENODEV; + + vsi->info = ctx.info; + return 0; +} + +/** + * ice_vsi_ctx_set_antispoof - set antispoof function in VSI ctx + * @ctx: pointer to VSI ctx structure + */ +void ice_vsi_ctx_set_antispoof(struct ice_vsi_ctx *ctx) +{ + ctx->info.sec_flags |= ICE_AQ_VSI_SEC_FLAG_ENA_MAC_ANTI_SPOOF | + (ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA << + ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S); +} + +/** + * ice_vsi_ctx_clear_antispoof - clear antispoof function in VSI ctx + * @ctx: pointer to VSI ctx structure + */ +void ice_vsi_ctx_clear_antispoof(struct ice_vsi_ctx *ctx) +{ + ctx->info.sec_flags &= ~ICE_AQ_VSI_SEC_FLAG_ENA_MAC_ANTI_SPOOF & + ~(ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA << + ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S); +} + +/** + * ice_vsi_ctx_set_allow_override - allow destination override on VSI + * @ctx: pointer to VSI ctx structure + */ +void ice_vsi_ctx_set_allow_override(struct ice_vsi_ctx *ctx) +{ + ctx->info.sec_flags |= ICE_AQ_VSI_SEC_FLAG_ALLOW_DEST_OVRD; +} + +/** + * ice_vsi_ctx_clear_allow_override - turn off destination override on VSI + * @ctx: pointer to VSI ctx structure + */ +void ice_vsi_ctx_clear_allow_override(struct ice_vsi_ctx *ctx) +{ + ctx->info.sec_flags &= ~ICE_AQ_VSI_SEC_FLAG_ALLOW_DEST_OVRD; +} diff --git a/drivers/net/ethernet/intel/ice/ice_lib.h b/drivers/net/ethernet/intel/ice/ice_lib.h index 4512c8513178..3f3fef6551c0 100644 --- a/drivers/net/ethernet/intel/ice/ice_lib.h +++ b/drivers/net/ethernet/intel/ice/ice_lib.h @@ -116,6 +116,18 @@ bool ice_is_vsi_dflt_vsi(struct ice_sw *sw, struct ice_vsi *vsi); int ice_set_dflt_vsi(struct ice_sw *sw, struct ice_vsi *vsi); int ice_clear_dflt_vsi(struct ice_sw *sw); + +int +ice_vsi_update_security(struct ice_vsi *vsi, void (*fill)(struct ice_vsi_ctx *)); + +void ice_vsi_ctx_set_antispoof(struct ice_vsi_ctx *ctx); + +void ice_vsi_ctx_clear_antispoof(struct ice_vsi_ctx *ctx); + +void ice_vsi_ctx_set_allow_override(struct ice_vsi_ctx *ctx); + +void ice_vsi_ctx_clear_allow_override(struct ice_vsi_ctx *ctx); + bool ice_is_feature_supported(struct ice_pf *pf, enum ice_feature f); void ice_init_feature_support(struct ice_pf *pf); #endif /* !_ICE_LIB_H_ */ diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c index cabe84bb29fe..b3066cfca6b7 100644 --- a/drivers/net/ethernet/intel/ice/ice_main.c +++ b/drivers/net/ethernet/intel/ice/ice_main.c @@ -19,6 +19,7 @@ */ #define CREATE_TRACE_POINTS #include "ice_trace.h" +#include "ice_eswitch.h" #define DRV_SUMMARY "Intel(R) Ethernet Connection E800 Series Linux Driver" static const char ice_driver_string[] = DRV_SUMMARY; @@ -46,7 +47,6 @@ static DEFINE_IDA(ice_aux_ida); static struct workqueue_struct *ice_wq; static const struct net_device_ops ice_netdev_safe_mode_ops; static const struct net_device_ops ice_netdev_ops; -static int ice_vsi_open(struct ice_vsi *vsi); static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type); @@ -3542,6 +3542,13 @@ static int ice_ena_msix_range(struct ice_pf *pf) v_left -= needed; } + /* reserve for switchdev */ + needed = ICE_ESWITCH_MSIX; + if (v_left < needed) + goto no_hw_vecs_left_err; + v_budget += needed; + v_left -= needed; + /* total used for non-traffic vectors */ v_other = v_budget; @@ -4174,11 +4181,11 @@ static int ice_register_netdev(struct ice_pf *pf) set_bit(ICE_VSI_NETDEV_REGISTERED, vsi->state); netif_carrier_off(vsi->netdev); netif_tx_stop_all_queues(vsi->netdev); - err = ice_devlink_create_port(vsi); + err = ice_devlink_create_pf_port(pf); if (err) goto err_devlink_create; - devlink_port_type_eth_set(&vsi->devlink_port, vsi->netdev); + devlink_port_type_eth_set(&pf->devlink_port, vsi->netdev); return 0; err_devlink_create: @@ -5986,9 +5993,11 @@ int ice_down(struct ice_vsi *vsi) /* Caller of this function is expected to set the * vsi->state ICE_DOWN bit */ - if (vsi->netdev) { + if (vsi->netdev && vsi->type == ICE_VSI_PF) { netif_carrier_off(vsi->netdev); netif_tx_disable(vsi->netdev); + } else if (vsi->type == ICE_VSI_SWITCHDEV_CTRL) { + ice_eswitch_stop_all_tx_queues(vsi->back); } ice_vsi_dis_irq(vsi); @@ -6055,7 +6064,8 @@ int ice_vsi_setup_tx_rings(struct ice_vsi *vsi) if (!ring) return -EINVAL; - ring->netdev = vsi->netdev; + if (vsi->netdev) + ring->netdev = vsi->netdev; err = ice_setup_tx_ring(ring); if (err) break; @@ -6086,7 +6096,8 @@ int ice_vsi_setup_rx_rings(struct ice_vsi *vsi) if (!ring) return -EINVAL; - ring->netdev = vsi->netdev; + if (vsi->netdev) + ring->netdev = vsi->netdev; err = ice_setup_rx_ring(ring); if (err) break; @@ -6159,7 +6170,7 @@ err_setup_tx: * * Returns 0 on success, negative value on error */ -static int ice_vsi_open(struct ice_vsi *vsi) +int ice_vsi_open(struct ice_vsi *vsi) { char int_name[ICE_INT_NAME_STR_LEN]; struct ice_pf *pf = vsi->back; @@ -6184,14 +6195,16 @@ static int ice_vsi_open(struct ice_vsi *vsi) if (err) goto err_setup_rx; - /* Notify the stack of the actual queue counts. */ - err = netif_set_real_num_tx_queues(vsi->netdev, vsi->num_txq); - if (err) - goto err_set_qs; + if (vsi->type == ICE_VSI_PF) { + /* Notify the stack of the actual queue counts. */ + err = netif_set_real_num_tx_queues(vsi->netdev, vsi->num_txq); + if (err) + goto err_set_qs; - err = netif_set_real_num_rx_queues(vsi->netdev, vsi->num_rxq); - if (err) - goto err_set_qs; + err = netif_set_real_num_rx_queues(vsi->netdev, vsi->num_rxq); + if (err) + goto err_set_qs; + } err = ice_up_complete(vsi); if (err) @@ -6430,6 +6443,12 @@ static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type) goto err_vsi_rebuild; } + err = ice_vsi_rebuild_by_type(pf, ICE_VSI_SWITCHDEV_CTRL); + if (err) { + dev_err(dev, "Switchdev CTRL VSI rebuild failed: %d\n", err); + goto err_vsi_rebuild; + } + /* If Flow Director is active */ if (test_bit(ICE_FLAG_FD_ENA, pf->flags)) { err = ice_vsi_rebuild_by_type(pf, ICE_VSI_CTRL); diff --git a/drivers/net/ethernet/intel/ice/ice_repr.c b/drivers/net/ethernet/intel/ice/ice_repr.c new file mode 100644 index 000000000000..cb83f58d7c71 --- /dev/null +++ b/drivers/net/ethernet/intel/ice/ice_repr.c @@ -0,0 +1,333 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2019-2021, Intel Corporation. */ + +#include "ice.h" +#include "ice_eswitch.h" +#include "ice_devlink.h" +#include "ice_virtchnl_pf.h" + +/** + * ice_repr_get_sw_port_id - get port ID associated with representor + * @repr: pointer to port representor + */ +static int ice_repr_get_sw_port_id(struct ice_repr *repr) +{ + return repr->vf->pf->hw.port_info->lport; +} + +/** + * ice_repr_get_phys_port_name - get phys port name + * @netdev: pointer to port representor netdev + * @buf: write here port name + * @len: max length of buf + */ +static int +ice_repr_get_phys_port_name(struct net_device *netdev, char *buf, size_t len) +{ + struct ice_netdev_priv *np = netdev_priv(netdev); + struct ice_repr *repr = np->repr; + int res; + + /* Devlink port is registered and devlink core is taking care of name formatting. */ + if (repr->vf->devlink_port.devlink) + return -EOPNOTSUPP; + + res = snprintf(buf, len, "pf%dvfr%d", ice_repr_get_sw_port_id(repr), + repr->vf->vf_id); + if (res <= 0) + return -EOPNOTSUPP; + return 0; +} + +/** + * ice_repr_get_stats64 - get VF stats for VFPR use + * @netdev: pointer to port representor netdev + * @stats: pointer to struct where stats can be stored + */ +static void +ice_repr_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats) +{ + struct ice_netdev_priv *np = netdev_priv(netdev); + struct ice_eth_stats *eth_stats; + struct ice_vsi *vsi; + + if (ice_is_vf_disabled(np->repr->vf)) + return; + vsi = np->repr->src_vsi; + + ice_update_vsi_stats(vsi); + eth_stats = &vsi->eth_stats; + + stats->tx_packets = eth_stats->tx_unicast + eth_stats->tx_broadcast + + eth_stats->tx_multicast; + stats->rx_packets = eth_stats->rx_unicast + eth_stats->rx_broadcast + + eth_stats->rx_multicast; + stats->tx_bytes = eth_stats->tx_bytes; + stats->rx_bytes = eth_stats->rx_bytes; + stats->multicast = eth_stats->rx_multicast; + stats->tx_errors = eth_stats->tx_errors; + stats->tx_dropped = eth_stats->tx_discards; + stats->rx_dropped = eth_stats->rx_discards; +} + +/** + * ice_netdev_to_repr - Get port representor for given netdevice + * @netdev: pointer to port representor netdev + */ +struct ice_repr *ice_netdev_to_repr(struct net_device *netdev) +{ + struct ice_netdev_priv *np = netdev_priv(netdev); + + return np->repr; +} + +/** + * ice_repr_open - Enable port representor's network interface + * @netdev: network interface device structure + * + * The open entry point is called when a port representor's network + * interface is made active by the system (IFF_UP). Corresponding + * VF is notified about link status change. + * + * Returns 0 on success + */ +static int ice_repr_open(struct net_device *netdev) +{ + struct ice_repr *repr = ice_netdev_to_repr(netdev); + struct ice_vf *vf; + + vf = repr->vf; + vf->link_forced = true; + vf->link_up = true; + ice_vc_notify_vf_link_state(vf); + + netif_carrier_on(netdev); + netif_tx_start_all_queues(netdev); + + return 0; +} + +/** + * ice_repr_stop - Disable port representor's network interface + * @netdev: network interface device structure + * + * The stop entry point is called when a port representor's network + * interface is de-activated by the system. Corresponding + * VF is notified about link status change. + * + * Returns 0 on success + */ +static int ice_repr_stop(struct net_device *netdev) +{ + struct ice_repr *repr = ice_netdev_to_repr(netdev); + struct ice_vf *vf; + + vf = repr->vf; + vf->link_forced = true; + vf->link_up = false; + ice_vc_notify_vf_link_state(vf); + + netif_carrier_off(netdev); + netif_tx_stop_all_queues(netdev); + + return 0; +} + +static struct devlink_port * +ice_repr_get_devlink_port(struct net_device *netdev) +{ + struct ice_repr *repr = ice_netdev_to_repr(netdev); + + return &repr->vf->devlink_port; +} + +static const struct net_device_ops ice_repr_netdev_ops = { + .ndo_get_phys_port_name = ice_repr_get_phys_port_name, + .ndo_get_stats64 = ice_repr_get_stats64, + .ndo_open = ice_repr_open, + .ndo_stop = ice_repr_stop, + .ndo_start_xmit = ice_eswitch_port_start_xmit, + .ndo_get_devlink_port = ice_repr_get_devlink_port, +}; + +/** + * ice_is_port_repr_netdev - Check if a given netdevice is a port representor netdev + * @netdev: pointer to netdev + */ +bool ice_is_port_repr_netdev(struct net_device *netdev) +{ + return netdev && (netdev->netdev_ops == &ice_repr_netdev_ops); +} + +/** + * ice_repr_reg_netdev - register port representor netdev + * @netdev: pointer to port representor netdev + */ +static int +ice_repr_reg_netdev(struct net_device *netdev) +{ + eth_hw_addr_random(netdev); + netdev->netdev_ops = &ice_repr_netdev_ops; + ice_set_ethtool_repr_ops(netdev); + + netif_carrier_off(netdev); + netif_tx_stop_all_queues(netdev); + + return register_netdev(netdev); +} + +/** + * ice_repr_add - add representor for VF + * @vf: pointer to VF structure + */ +static int ice_repr_add(struct ice_vf *vf) +{ + struct ice_q_vector *q_vector; + struct ice_netdev_priv *np; + struct ice_repr *repr; + int err; + + repr = kzalloc(sizeof(*repr), GFP_KERNEL); + if (!repr) + return -ENOMEM; + + repr->netdev = alloc_etherdev(sizeof(struct ice_netdev_priv)); + if (!repr->netdev) { + err = -ENOMEM; + goto err_alloc; + } + + repr->src_vsi = ice_get_vf_vsi(vf); + repr->vf = vf; + vf->repr = repr; + np = netdev_priv(repr->netdev); + np->repr = repr; + + q_vector = kzalloc(sizeof(*q_vector), GFP_KERNEL); + if (!q_vector) { + err = -ENOMEM; + goto err_alloc_q_vector; + } + repr->q_vector = q_vector; + + err = ice_devlink_create_vf_port(vf); + if (err) + goto err_devlink; + + err = ice_repr_reg_netdev(repr->netdev); + if (err) + goto err_netdev; + + devlink_port_type_eth_set(&vf->devlink_port, repr->netdev); + + return 0; + +err_netdev: + ice_devlink_destroy_vf_port(vf); +err_devlink: + kfree(repr->q_vector); + vf->repr->q_vector = NULL; +err_alloc_q_vector: + free_netdev(repr->netdev); + repr->netdev = NULL; +err_alloc: + kfree(repr); + vf->repr = NULL; + return err; +} + +/** + * ice_repr_rem - remove representor from VF + * @vf: pointer to VF structure + */ +static void ice_repr_rem(struct ice_vf *vf) +{ + ice_devlink_destroy_vf_port(vf); + kfree(vf->repr->q_vector); + vf->repr->q_vector = NULL; + unregister_netdev(vf->repr->netdev); + free_netdev(vf->repr->netdev); + vf->repr->netdev = NULL; + kfree(vf->repr); + vf->repr = NULL; +} + +/** + * ice_repr_add_for_all_vfs - add port representor for all VFs + * @pf: pointer to PF structure + */ +int ice_repr_add_for_all_vfs(struct ice_pf *pf) +{ + int err; + int i; + + ice_for_each_vf(pf, i) { + struct ice_vf *vf = &pf->vf[i]; + + err = ice_repr_add(vf); + if (err) + goto err; + + ice_vc_change_ops_to_repr(&vf->vc_ops); + } + + return 0; + +err: + for (i = i - 1; i >= 0; i--) { + struct ice_vf *vf = &pf->vf[i]; + + ice_repr_rem(vf); + ice_vc_set_dflt_vf_ops(&vf->vc_ops); + } + + return err; +} + +/** + * ice_repr_rem_from_all_vfs - remove port representor for all VFs + * @pf: pointer to PF structure + */ +void ice_repr_rem_from_all_vfs(struct ice_pf *pf) +{ + int i; + + ice_for_each_vf(pf, i) { + struct ice_vf *vf = &pf->vf[i]; + + ice_repr_rem(vf); + ice_vc_set_dflt_vf_ops(&vf->vc_ops); + } +} + +/** + * ice_repr_start_tx_queues - start Tx queues of port representor + * @repr: pointer to repr structure + */ +void ice_repr_start_tx_queues(struct ice_repr *repr) +{ + netif_carrier_on(repr->netdev); + netif_tx_start_all_queues(repr->netdev); +} + +/** + * ice_repr_stop_tx_queues - stop Tx queues of port representor + * @repr: pointer to repr structure + */ +void ice_repr_stop_tx_queues(struct ice_repr *repr) +{ + netif_carrier_off(repr->netdev); + netif_tx_stop_all_queues(repr->netdev); +} + +/** + * ice_repr_set_traffic_vsi - set traffic VSI for port representor + * @repr: repr on with VSI will be set + * @vsi: pointer to VSI that will be used by port representor to pass traffic + */ +void ice_repr_set_traffic_vsi(struct ice_repr *repr, struct ice_vsi *vsi) +{ + struct ice_netdev_priv *np = netdev_priv(repr->netdev); + + np->vsi = vsi; +} diff --git a/drivers/net/ethernet/intel/ice/ice_repr.h b/drivers/net/ethernet/intel/ice/ice_repr.h new file mode 100644 index 000000000000..806de22933c6 --- /dev/null +++ b/drivers/net/ethernet/intel/ice/ice_repr.h @@ -0,0 +1,28 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2019-2021, Intel Corporation. */ + +#ifndef _ICE_REPR_H_ +#define _ICE_REPR_H_ + +#include <net/dst_metadata.h> +#include "ice.h" + +struct ice_repr { + struct ice_vsi *src_vsi; + struct ice_vf *vf; + struct ice_q_vector *q_vector; + struct net_device *netdev; + struct metadata_dst *dst; +}; + +int ice_repr_add_for_all_vfs(struct ice_pf *pf); +void ice_repr_rem_from_all_vfs(struct ice_pf *pf); + +void ice_repr_start_tx_queues(struct ice_repr *repr); +void ice_repr_stop_tx_queues(struct ice_repr *repr); + +void ice_repr_set_traffic_vsi(struct ice_repr *repr, struct ice_vsi *vsi); + +struct ice_repr *ice_netdev_to_repr(struct net_device *netdev); +bool ice_is_port_repr_netdev(struct net_device *netdev); +#endif diff --git a/drivers/net/ethernet/intel/ice/ice_switch.c b/drivers/net/ethernet/intel/ice/ice_switch.c index 3b6c1420aa7b..1e86a6dba454 100644 --- a/drivers/net/ethernet/intel/ice/ice_switch.c +++ b/drivers/net/ethernet/intel/ice/ice_switch.c @@ -518,7 +518,7 @@ ice_aq_alloc_free_vsi_list_exit: * * Add(0x02a0)/Update(0x02a1)/Remove(0x02a2) switch rules commands to firmware */ -static enum ice_status +enum ice_status ice_aq_sw_rules(struct ice_hw *hw, void *rule_list, u16 rule_list_sz, u8 num_rules, enum ice_adminq_opc opc, struct ice_sq_cd *cd) { diff --git a/drivers/net/ethernet/intel/ice/ice_switch.h b/drivers/net/ethernet/intel/ice/ice_switch.h index c5db8d56133f..e6eeffb3dde9 100644 --- a/drivers/net/ethernet/intel/ice/ice_switch.h +++ b/drivers/net/ethernet/intel/ice/ice_switch.h @@ -14,6 +14,9 @@ #define ICE_VSI_INVAL_ID 0xffff #define ICE_INVAL_Q_HANDLE 0xFFFF +#define ICE_SW_RULE_RX_TX_NO_HDR_SIZE \ + (offsetof(struct ice_aqc_sw_rules_elem, pdata.lkup_tx_rx.hdr)) + /* VSI context structure for add/get/update/free operations */ struct ice_vsi_ctx { u16 vsi_num; @@ -251,4 +254,7 @@ u16 ice_get_hw_vsi_num(struct ice_hw *hw, u16 vsi_handle); enum ice_status ice_replay_vsi_all_fltr(struct ice_hw *hw, u16 vsi_handle); void ice_rm_all_sw_replay_rule_info(struct ice_hw *hw); +enum ice_status +ice_aq_sw_rules(struct ice_hw *hw, void *rule_list, u16 rule_list_sz, + u8 num_rules, enum ice_adminq_opc opc, struct ice_sq_cd *cd); #endif /* _ICE_SWITCH_H_ */ diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c index 13b2bdc25b0d..4da9420df1b0 100644 --- a/drivers/net/ethernet/intel/ice/ice_txrx.c +++ b/drivers/net/ethernet/intel/ice/ice_txrx.c @@ -14,6 +14,7 @@ #include "ice_trace.h" #include "ice_dcb_lib.h" #include "ice_xsk.h" +#include "ice_eswitch.h" #define ICE_RX_HDR_SIZE 256 @@ -2246,6 +2247,8 @@ ice_xmit_frame_ring(struct sk_buff *skb, struct ice_ring *tx_ring) ICE_TXD_CTX_QW1_CMD_S); ice_tstamp(tx_ring, skb, first, &offload); + if (ice_is_switchdev_running(vsi->back)) + ice_eswitch_set_target_vsi(skb, &offload); if (offload.cd_qw1 & ICE_TX_DESC_DTYPE_CTX) { struct ice_tx_ctx_desc *cdesc; diff --git a/drivers/net/ethernet/intel/ice/ice_txrx_lib.c b/drivers/net/ethernet/intel/ice/ice_txrx_lib.c index 171397dcf00a..e314a1aee0ff 100644 --- a/drivers/net/ethernet/intel/ice/ice_txrx_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_txrx_lib.c @@ -2,6 +2,7 @@ /* Copyright (c) 2019, Intel Corporation. */ #include "ice_txrx_lib.h" +#include "ice_eswitch.h" /** * ice_release_rx_desc - Store the new tail and head values @@ -185,7 +186,8 @@ ice_process_skb_fields(struct ice_ring *rx_ring, ice_rx_hash(rx_ring, rx_desc, skb, ptype); /* modifies the skb - consumes the enet header */ - skb->protocol = eth_type_trans(skb, rx_ring->netdev); + skb->protocol = eth_type_trans(skb, ice_eswitch_get_target_netdev + (rx_ring, rx_desc)); ice_rx_csum(rx_ring, skb, rx_desc, ptype); diff --git a/drivers/net/ethernet/intel/ice/ice_type.h b/drivers/net/ethernet/intel/ice/ice_type.h index 6705f56be020..e064439fc1a9 100644 --- a/drivers/net/ethernet/intel/ice/ice_type.h +++ b/drivers/net/ethernet/intel/ice/ice_type.h @@ -139,6 +139,7 @@ enum ice_vsi_type { ICE_VSI_VF = 1, ICE_VSI_CTRL = 3, /* equates to ICE_VSI_PF with 1 queue pair */ ICE_VSI_LB = 6, + ICE_VSI_SWITCHDEV_CTRL = 7, }; struct ice_link_status { diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c index e93430ab37f1..4d0b643906ff 100644 --- a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c @@ -6,6 +6,7 @@ #include "ice_lib.h" #include "ice_fltr.h" #include "ice_flow.h" +#include "ice_eswitch.h" #include "ice_virtchnl_allowlist.h" #define FIELD_SELECTOR(proto_hdr_field) \ @@ -251,7 +252,7 @@ ice_vc_hash_field_match_type ice_vc_hash_field_list_comms[] = { * ice_get_vf_vsi - get VF's VSI based on the stored index * @vf: VF used to get VSI */ -static struct ice_vsi *ice_get_vf_vsi(struct ice_vf *vf) +struct ice_vsi *ice_get_vf_vsi(struct ice_vf *vf) { return vf->pf->vsi[vf->lan_vsi_idx]; } @@ -412,7 +413,7 @@ static bool ice_is_vf_link_up(struct ice_vf *vf) * * send a link status message to a single VF */ -static void ice_vc_notify_vf_link_state(struct ice_vf *vf) +void ice_vc_notify_vf_link_state(struct ice_vf *vf) { struct virtchnl_pf_event pfe = { 0 }; struct ice_hw *hw = &vf->pf->hw; @@ -620,6 +621,8 @@ void ice_free_vfs(struct ice_pf *pf) if (!pf->vf) return; + ice_eswitch_release(pf); + while (test_and_set_bit(ICE_VF_DIS, pf->state)) usleep_range(1000, 2000); @@ -932,6 +935,9 @@ static int ice_vf_rebuild_host_mac_cfg(struct ice_vf *vf) enum ice_status status; u8 broadcast[ETH_ALEN]; + if (ice_is_eswitch_mode_switchdev(vf->pf)) + return 0; + eth_broadcast_addr(broadcast); status = ice_fltr_add_mac(vsi, broadcast, ICE_FWD_TO_VSI); if (status) { @@ -1581,6 +1587,10 @@ bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr) ice_vf_post_vsi_rebuild(vf); } + if (ice_is_eswitch_mode_switchdev(pf)) + if (ice_eswitch_rebuild(pf)) + dev_warn(dev, "eswitch rebuild failed\n"); + ice_flush(hw); clear_bit(ICE_VF_DIS, pf->state); @@ -1593,7 +1603,7 @@ bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr) * * Returns true if the PF or VF is disabled, false otherwise. */ -static bool ice_is_vf_disabled(struct ice_vf *vf) +bool ice_is_vf_disabled(struct ice_vf *vf) { struct ice_pf *pf = vf->pf; @@ -1711,6 +1721,8 @@ bool ice_reset_vf(struct ice_vf *vf, bool is_vflr) } ice_vf_post_vsi_rebuild(vf); + vsi = ice_get_vf_vsi(vf); + ice_eswitch_update_repr(vsi); /* if the VF has been reset allow it to come up again */ if (ice_mbx_clear_malvf(&hw->mbx_snapshot, pf->malvfs, ICE_MAX_VF_COUNT, vf->vf_id)) @@ -1894,6 +1906,8 @@ static void ice_set_dflt_settings_vfs(struct ice_pf *pf) */ ice_vf_ctrl_invalidate_vsi(vf); ice_vf_fdir_init(vf); + + ice_vc_set_dflt_vf_ops(&vf->vc_ops); } } @@ -1960,6 +1974,10 @@ static int ice_ena_vfs(struct ice_pf *pf, u16 num_vfs) } clear_bit(ICE_VF_DIS, pf->state); + + if (ice_eswitch_configure(pf)) + goto err_unroll_sriov; + return 0; err_unroll_sriov: @@ -2823,7 +2841,7 @@ static void ice_wait_on_vf_reset(struct ice_vf *vf) * disabled, and initialized so it can be configured and/or queried by a host * administrator. */ -static int ice_check_vf_ready_for_cfg(struct ice_vf *vf) +int ice_check_vf_ready_for_cfg(struct ice_vf *vf) { struct ice_pf *pf; @@ -3802,6 +3820,26 @@ static bool ice_is_legacy_umac_expired(struct ice_time_mac *last_added_umac) } /** + * ice_update_legacy_cached_mac - update cached hardware MAC for legacy VF + * @vf: VF to update + * @vc_ether_addr: structure from VIRTCHNL with MAC to check + * + * only update cached hardware MAC for legacy VF drivers on delete + * because we cannot guarantee order/type of MAC from the VF driver + */ +static void +ice_update_legacy_cached_mac(struct ice_vf *vf, + struct virtchnl_ether_addr *vc_ether_addr) +{ + if (!ice_is_vc_addr_legacy(vc_ether_addr) || + ice_is_legacy_umac_expired(&vf->legacy_last_added_umac)) + return; + + ether_addr_copy(vf->dev_lan_addr.addr, vf->legacy_last_added_umac.addr); + ether_addr_copy(vf->hw_lan_addr.addr, vf->legacy_last_added_umac.addr); +} + +/** * ice_vfhw_mac_del - update the VF's cached hardware MAC if allowed * @vf: VF to update * @vc_ether_addr: structure from VIRTCHNL with MAC to delete @@ -3822,16 +3860,7 @@ ice_vfhw_mac_del(struct ice_vf *vf, struct virtchnl_ether_addr *vc_ether_addr) */ eth_zero_addr(vf->dev_lan_addr.addr); - /* only update cached hardware MAC for legacy VF drivers on delete - * because we cannot guarantee order/type of MAC from the VF driver - */ - if (ice_is_vc_addr_legacy(vc_ether_addr) && - !ice_is_legacy_umac_expired(&vf->legacy_last_added_umac)) { - ether_addr_copy(vf->dev_lan_addr.addr, - vf->legacy_last_added_umac.addr); - ether_addr_copy(vf->hw_lan_addr.addr, - vf->legacy_last_added_umac.addr); - } + ice_update_legacy_cached_mac(vf, vc_ether_addr); } /** @@ -4400,6 +4429,133 @@ static int ice_vf_init_vlan_stripping(struct ice_vf *vf) return ice_vsi_manage_vlan_stripping(vsi, false); } +static struct ice_vc_vf_ops ice_vc_vf_dflt_ops = { + .get_ver_msg = ice_vc_get_ver_msg, + .get_vf_res_msg = ice_vc_get_vf_res_msg, + .reset_vf = ice_vc_reset_vf_msg, + .add_mac_addr_msg = ice_vc_add_mac_addr_msg, + .del_mac_addr_msg = ice_vc_del_mac_addr_msg, + .cfg_qs_msg = ice_vc_cfg_qs_msg, + .ena_qs_msg = ice_vc_ena_qs_msg, + .dis_qs_msg = ice_vc_dis_qs_msg, + .request_qs_msg = ice_vc_request_qs_msg, + .cfg_irq_map_msg = ice_vc_cfg_irq_map_msg, + .config_rss_key = ice_vc_config_rss_key, + .config_rss_lut = ice_vc_config_rss_lut, + .get_stats_msg = ice_vc_get_stats_msg, + .cfg_promiscuous_mode_msg = ice_vc_cfg_promiscuous_mode_msg, + .add_vlan_msg = ice_vc_add_vlan_msg, + .remove_vlan_msg = ice_vc_remove_vlan_msg, + .ena_vlan_stripping = ice_vc_ena_vlan_stripping, + .dis_vlan_stripping = ice_vc_dis_vlan_stripping, + .handle_rss_cfg_msg = ice_vc_handle_rss_cfg, + .add_fdir_fltr_msg = ice_vc_add_fdir_fltr, + .del_fdir_fltr_msg = ice_vc_del_fdir_fltr, +}; + +void ice_vc_set_dflt_vf_ops(struct ice_vc_vf_ops *ops) +{ + *ops = ice_vc_vf_dflt_ops; +} + +static int +ice_vc_repr_no_action_msg(struct ice_vf __always_unused *vf, + u8 __always_unused *msg) +{ + return 0; +} + +/** + * ice_vc_repr_add_mac + * @vf: pointer to VF + * @msg: virtchannel message + * + * When port representors are created, we do not add MAC rule + * to firmware, we store it so that PF could report same + * MAC as VF. + */ +static int ice_vc_repr_add_mac(struct ice_vf *vf, u8 *msg) +{ + enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS; + struct virtchnl_ether_addr_list *al = + (struct virtchnl_ether_addr_list *)msg; + struct ice_vsi *vsi; + struct ice_pf *pf; + int i; + + if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states) || + !ice_vc_isvalid_vsi_id(vf, al->vsi_id)) { + v_ret = VIRTCHNL_STATUS_ERR_PARAM; + goto handle_mac_exit; + } + + pf = vf->pf; + + vsi = ice_get_vf_vsi(vf); + if (!vsi) { + v_ret = VIRTCHNL_STATUS_ERR_PARAM; + goto handle_mac_exit; + } + + for (i = 0; i < al->num_elements; i++) { + u8 *mac_addr = al->list[i].addr; + + if (!is_unicast_ether_addr(mac_addr) || + ether_addr_equal(mac_addr, vf->hw_lan_addr.addr)) + continue; + + if (vf->pf_set_mac) { + dev_err(ice_pf_to_dev(pf), "VF attempting to override administratively set MAC address\n"); + v_ret = VIRTCHNL_STATUS_ERR_NOT_SUPPORTED; + goto handle_mac_exit; + } + + ice_vfhw_mac_add(vf, &al->list[i]); + vf->num_mac++; + break; + } + +handle_mac_exit: + return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ADD_ETH_ADDR, + v_ret, NULL, 0); +} + +/** + * ice_vc_repr_del_mac - response with success for deleting MAC + * @vf: pointer to VF + * @msg: virtchannel message + * + * Respond with success to not break normal VF flow. + * For legacy VF driver try to update cached MAC address. + */ +static int +ice_vc_repr_del_mac(struct ice_vf __always_unused *vf, u8 __always_unused *msg) +{ + struct virtchnl_ether_addr_list *al = + (struct virtchnl_ether_addr_list *)msg; + + ice_update_legacy_cached_mac(vf, &al->list[0]); + + return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DEL_ETH_ADDR, + VIRTCHNL_STATUS_SUCCESS, NULL, 0); +} + +static int ice_vc_repr_no_action(struct ice_vf __always_unused *vf) +{ + return 0; +} + +void ice_vc_change_ops_to_repr(struct ice_vc_vf_ops *ops) +{ + ops->add_mac_addr_msg = ice_vc_repr_add_mac; + ops->del_mac_addr_msg = ice_vc_repr_del_mac; + ops->add_vlan_msg = ice_vc_repr_no_action_msg; + ops->remove_vlan_msg = ice_vc_repr_no_action_msg; + ops->ena_vlan_stripping = ice_vc_repr_no_action; + ops->dis_vlan_stripping = ice_vc_repr_no_action; + ops->cfg_promiscuous_mode_msg = ice_vc_repr_no_action_msg; +} + /** * ice_vc_process_vf_msg - Process request from VF * @pf: pointer to the PF structure @@ -4413,6 +4569,7 @@ void ice_vc_process_vf_msg(struct ice_pf *pf, struct ice_rq_event_info *event) u32 v_opcode = le32_to_cpu(event->desc.cookie_high); s16 vf_id = le16_to_cpu(event->desc.retval); u16 msglen = event->msg_len; + struct ice_vc_vf_ops *ops; u8 *msg = event->msg_buf; struct ice_vf *vf = NULL; struct device *dev; @@ -4436,6 +4593,8 @@ void ice_vc_process_vf_msg(struct ice_pf *pf, struct ice_rq_event_info *event) goto error_handler; } + ops = &vf->vc_ops; + /* Perform basic checks on the msg */ err = virtchnl_vc_validate_vf_msg(&vf->vf_ver, v_opcode, msg, msglen); if (err) { @@ -4463,75 +4622,75 @@ error_handler: switch (v_opcode) { case VIRTCHNL_OP_VERSION: - err = ice_vc_get_ver_msg(vf, msg); + err = ops->get_ver_msg(vf, msg); break; case VIRTCHNL_OP_GET_VF_RESOURCES: - err = ice_vc_get_vf_res_msg(vf, msg); + err = ops->get_vf_res_msg(vf, msg); if (ice_vf_init_vlan_stripping(vf)) dev_err(dev, "Failed to initialize VLAN stripping for VF %d\n", vf->vf_id); ice_vc_notify_vf_link_state(vf); break; case VIRTCHNL_OP_RESET_VF: - ice_vc_reset_vf_msg(vf); + ops->reset_vf(vf); break; case VIRTCHNL_OP_ADD_ETH_ADDR: - err = ice_vc_add_mac_addr_msg(vf, msg); + err = ops->add_mac_addr_msg(vf, msg); break; case VIRTCHNL_OP_DEL_ETH_ADDR: - err = ice_vc_del_mac_addr_msg(vf, msg); + err = ops->del_mac_addr_msg(vf, msg); break; case VIRTCHNL_OP_CONFIG_VSI_QUEUES: - err = ice_vc_cfg_qs_msg(vf, msg); + err = ops->cfg_qs_msg(vf, msg); break; case VIRTCHNL_OP_ENABLE_QUEUES: - err = ice_vc_ena_qs_msg(vf, msg); + err = ops->ena_qs_msg(vf, msg); ice_vc_notify_vf_link_state(vf); break; case VIRTCHNL_OP_DISABLE_QUEUES: - err = ice_vc_dis_qs_msg(vf, msg); + err = ops->dis_qs_msg(vf, msg); break; case VIRTCHNL_OP_REQUEST_QUEUES: - err = ice_vc_request_qs_msg(vf, msg); + err = ops->request_qs_msg(vf, msg); break; case VIRTCHNL_OP_CONFIG_IRQ_MAP: - err = ice_vc_cfg_irq_map_msg(vf, msg); + err = ops->cfg_irq_map_msg(vf, msg); break; case VIRTCHNL_OP_CONFIG_RSS_KEY: - err = ice_vc_config_rss_key(vf, msg); + err = ops->config_rss_key(vf, msg); break; case VIRTCHNL_OP_CONFIG_RSS_LUT: - err = ice_vc_config_rss_lut(vf, msg); + err = ops->config_rss_lut(vf, msg); break; case VIRTCHNL_OP_GET_STATS: - err = ice_vc_get_stats_msg(vf, msg); + err = ops->get_stats_msg(vf, msg); break; case VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE: - err = ice_vc_cfg_promiscuous_mode_msg(vf, msg); + err = ops->cfg_promiscuous_mode_msg(vf, msg); break; case VIRTCHNL_OP_ADD_VLAN: - err = ice_vc_add_vlan_msg(vf, msg); + err = ops->add_vlan_msg(vf, msg); break; case VIRTCHNL_OP_DEL_VLAN: - err = ice_vc_remove_vlan_msg(vf, msg); + err = ops->remove_vlan_msg(vf, msg); break; case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING: - err = ice_vc_ena_vlan_stripping(vf); + err = ops->ena_vlan_stripping(vf); break; case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING: - err = ice_vc_dis_vlan_stripping(vf); + err = ops->dis_vlan_stripping(vf); break; case VIRTCHNL_OP_ADD_FDIR_FILTER: - err = ice_vc_add_fdir_fltr(vf, msg); + err = ops->add_fdir_fltr_msg(vf, msg); break; case VIRTCHNL_OP_DEL_FDIR_FILTER: - err = ice_vc_del_fdir_fltr(vf, msg); + err = ops->del_fdir_fltr_msg(vf, msg); break; case VIRTCHNL_OP_ADD_RSS_CFG: - err = ice_vc_handle_rss_cfg(vf, msg, true); + err = ops->handle_rss_cfg_msg(vf, msg, true); break; case VIRTCHNL_OP_DEL_RSS_CFG: - err = ice_vc_handle_rss_cfg(vf, msg, false); + err = ops->handle_rss_cfg_msg(vf, msg, false); break; case VIRTCHNL_OP_UNKNOWN: default: @@ -4640,6 +4799,11 @@ int ice_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac) struct ice_vf *vf; int ret; + if (ice_is_eswitch_mode_switchdev(pf)) { + dev_info(ice_pf_to_dev(pf), "Trusted VF is forbidden in switchdev mode\n"); + return -EOPNOTSUPP; + } + if (ice_validate_vf_id(pf, vf_id)) return -EINVAL; diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h index 842cb077df86..3115284e5411 100644 --- a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h +++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h @@ -70,6 +70,32 @@ struct ice_mdd_vf_events { u16 last_printed; }; +struct ice_vf; + +struct ice_vc_vf_ops { + int (*get_ver_msg)(struct ice_vf *vf, u8 *msg); + int (*get_vf_res_msg)(struct ice_vf *vf, u8 *msg); + void (*reset_vf)(struct ice_vf *vf); + int (*add_mac_addr_msg)(struct ice_vf *vf, u8 *msg); + int (*del_mac_addr_msg)(struct ice_vf *vf, u8 *msg); + int (*cfg_qs_msg)(struct ice_vf *vf, u8 *msg); + int (*ena_qs_msg)(struct ice_vf *vf, u8 *msg); + int (*dis_qs_msg)(struct ice_vf *vf, u8 *msg); + int (*request_qs_msg)(struct ice_vf *vf, u8 *msg); + int (*cfg_irq_map_msg)(struct ice_vf *vf, u8 *msg); + int (*config_rss_key)(struct ice_vf *vf, u8 *msg); + int (*config_rss_lut)(struct ice_vf *vf, u8 *msg); + int (*get_stats_msg)(struct ice_vf *vf, u8 *msg); + int (*cfg_promiscuous_mode_msg)(struct ice_vf *vf, u8 *msg); + int (*add_vlan_msg)(struct ice_vf *vf, u8 *msg); + int (*remove_vlan_msg)(struct ice_vf *vf, u8 *msg); + int (*ena_vlan_stripping)(struct ice_vf *vf); + int (*dis_vlan_stripping)(struct ice_vf *vf); + int (*handle_rss_cfg_msg)(struct ice_vf *vf, u8 *msg, bool add); + int (*add_fdir_fltr_msg)(struct ice_vf *vf, u8 *msg); + int (*del_fdir_fltr_msg)(struct ice_vf *vf, u8 *msg); +}; + /* VF information structure */ struct ice_vf { struct ice_pf *pf; @@ -111,9 +137,17 @@ struct ice_vf { struct ice_mdd_vf_events mdd_rx_events; struct ice_mdd_vf_events mdd_tx_events; DECLARE_BITMAP(opcodes_allowlist, VIRTCHNL_OP_MAX); + + struct ice_repr *repr; + + struct ice_vc_vf_ops vc_ops; + + /* devlink port data */ + struct devlink_port devlink_port; }; #ifdef CONFIG_PCI_IOV +struct ice_vsi *ice_get_vf_vsi(struct ice_vf *vf); void ice_process_vflr_event(struct ice_pf *pf); int ice_sriov_configure(struct pci_dev *pdev, int num_vfs); int ice_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac); @@ -124,6 +158,9 @@ void ice_free_vfs(struct ice_pf *pf); void ice_vc_process_vf_msg(struct ice_pf *pf, struct ice_rq_event_info *event); void ice_vc_notify_link_state(struct ice_pf *pf); void ice_vc_notify_reset(struct ice_pf *pf); +void ice_vc_notify_vf_link_state(struct ice_vf *vf); +void ice_vc_change_ops_to_repr(struct ice_vc_vf_ops *ops); +void ice_vc_set_dflt_vf_ops(struct ice_vc_vf_ops *ops); bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr); bool ice_reset_vf(struct ice_vf *vf, bool is_vflr); void ice_restore_all_vfs_msi_state(struct pci_dev *pdev); @@ -139,6 +176,10 @@ int ice_set_vf_trust(struct net_device *netdev, int vf_id, bool trusted); int ice_set_vf_link_state(struct net_device *netdev, int vf_id, int link_state); +int ice_check_vf_ready_for_cfg(struct ice_vf *vf); + +bool ice_is_vf_disabled(struct ice_vf *vf); + int ice_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool ena); int ice_calc_vf_reg_idx(struct ice_vf *vf, struct ice_q_vector *q_vector); @@ -164,6 +205,9 @@ static inline void ice_vc_process_vf_msg(struct ice_pf *pf, struct ice_rq_event_info *event) { } static inline void ice_vc_notify_link_state(struct ice_pf *pf) { } static inline void ice_vc_notify_reset(struct ice_pf *pf) { } +static inline void ice_vc_notify_vf_link_state(struct ice_vf *vf) { } +static inline void ice_vc_change_ops_to_repr(struct ice_vc_vf_ops *ops) { } +static inline void ice_vc_set_dflt_vf_ops(struct ice_vc_vf_ops *ops) { } static inline void ice_set_vf_state_qs_dis(struct ice_vf *vf) { } static inline void ice_vf_lan_overflow_event(struct ice_pf *pf, struct ice_rq_event_info *event) { } @@ -171,6 +215,21 @@ static inline void ice_print_vfs_mdd_events(struct ice_pf *pf) { } static inline void ice_print_vf_rx_mdd_event(struct ice_vf *vf) { } static inline void ice_restore_all_vfs_msi_state(struct pci_dev *pdev) { } +static inline int ice_check_vf_ready_for_cfg(struct ice_vf *vf) +{ + return -EOPNOTSUPP; +} + +static inline bool ice_is_vf_disabled(struct ice_vf *vf) +{ + return true; +} + +static inline struct ice_vsi *ice_get_vf_vsi(struct ice_vf *vf) +{ + return NULL; +} + static inline bool ice_is_malicious_vf(struct ice_pf __always_unused *pf, struct ice_rq_event_info __always_unused *event, |