diff options
-rw-r--r-- | drivers/net/ethernet/intel/ice/ice.h | 9 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/ice/ice_adminq_cmd.h | 2 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/ice/ice_flex_pipe.c | 30 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/ice/ice_flex_type.h | 4 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/ice/ice_lib.c | 1 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/ice/ice_main.c | 242 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/ice/ice_protocol_type.h | 35 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/ice/ice_repr.c | 3 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/ice/ice_switch.c | 389 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/ice/ice_switch.h | 3 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/ice/ice_tc_lib.c | 401 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/ice/ice_tc_lib.h | 10 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c | 63 |
13 files changed, 1110 insertions, 82 deletions
diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h index 967a90efcb11..bf4ecd9a517c 100644 --- a/drivers/net/ethernet/intel/ice/ice.h +++ b/drivers/net/ethernet/intel/ice/ice.h @@ -34,6 +34,7 @@ #include <linux/if_bridge.h> #include <linux/ctype.h> #include <linux/bpf.h> +#include <linux/btf.h> #include <linux/auxiliary_bus.h> #include <linux/avf/virtchnl.h> #include <linux/cpu_rmap.h> @@ -479,6 +480,7 @@ enum ice_pf_flags { ICE_FLAG_NO_MEDIA, ICE_FLAG_FW_LLDP_AGENT, ICE_FLAG_MOD_POWER_UNSUPPORTED, + ICE_FLAG_PHY_FW_LOAD_FAILED, ICE_FLAG_ETHTOOL_CTXT, /* set when ethtool holds RTNL lock */ ICE_FLAG_LEGACY_RX, ICE_FLAG_VF_TRUE_PROMISC_ENA, @@ -610,6 +612,13 @@ struct ice_pf { struct ice_netdev_priv { struct ice_vsi *vsi; struct ice_repr *repr; + /* indirect block callbacks on registered higher level devices + * (e.g. tunnel devices) + * + * tc_indr_block_cb_priv_list is used to look up indirect callback + * private data + */ + struct list_head tc_indr_block_priv_list; }; /** diff --git a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h index a5425f0dce3f..4eef3488d86f 100644 --- a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h +++ b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h @@ -1185,6 +1185,7 @@ struct ice_aqc_get_link_status_data { #define ICE_AQ_LINK_TOPO_UNSUPP_MEDIA BIT(7) u8 link_cfg_err; #define ICE_AQ_LINK_MODULE_POWER_UNSUPPORTED BIT(5) +#define ICE_AQ_LINK_EXTERNAL_PHY_LOAD_FAILURE BIT(6) #define ICE_AQ_LINK_INVAL_MAX_POWER_LIMIT BIT(7) u8 link_info; #define ICE_AQ_LINK_UP BIT(0) /* Link Status */ @@ -1268,6 +1269,7 @@ struct ice_aqc_set_event_mask { #define ICE_AQ_LINK_EVENT_AN_COMPLETED BIT(7) #define ICE_AQ_LINK_EVENT_MODULE_QUAL_FAIL BIT(8) #define ICE_AQ_LINK_EVENT_PORT_TX_SUSPENDED BIT(9) +#define ICE_AQ_LINK_EVENT_PHY_FW_LOAD_FAIL BIT(12) u8 reserved1[6]; }; diff --git a/drivers/net/ethernet/intel/ice/ice_flex_pipe.c b/drivers/net/ethernet/intel/ice/ice_flex_pipe.c index e731b46270c3..23cfcceb1536 100644 --- a/drivers/net/ethernet/intel/ice/ice_flex_pipe.c +++ b/drivers/net/ethernet/intel/ice/ice_flex_pipe.c @@ -1566,6 +1566,30 @@ static struct ice_buf_build *ice_pkg_buf_alloc(struct ice_hw *hw) } /** + * ice_get_sw_prof_type - determine switch profile type + * @hw: pointer to the HW structure + * @fv: pointer to the switch field vector + */ +static enum ice_prof_type +ice_get_sw_prof_type(struct ice_hw *hw, struct ice_fv *fv) +{ + u16 i; + + for (i = 0; i < hw->blk[ICE_BLK_SW].es.fvw; i++) { + /* UDP tunnel will have UDP_OF protocol ID and VNI offset */ + if (fv->ew[i].prot_id == (u8)ICE_PROT_UDP_OF && + fv->ew[i].off == ICE_VNI_OFFSET) + return ICE_PROF_TUN_UDP; + + /* GRE tunnel will have GRE protocol */ + if (fv->ew[i].prot_id == (u8)ICE_PROT_GRE_OF) + return ICE_PROF_TUN_GRE; + } + + return ICE_PROF_NON_TUN; +} + +/** * ice_get_sw_fv_bitmap - Get switch field vector bitmap based on profile type * @hw: pointer to hardware structure * @req_profs: type of profiles requested @@ -1588,6 +1612,7 @@ ice_get_sw_fv_bitmap(struct ice_hw *hw, enum ice_prof_type req_profs, bitmap_zero(bm, ICE_MAX_NUM_PROFILES); ice_seg = hw->seg; do { + enum ice_prof_type prof_type; u32 offset; fv = ice_pkg_enum_entry(ice_seg, &state, ICE_SID_FLD_VEC_SW, @@ -1595,7 +1620,10 @@ ice_get_sw_fv_bitmap(struct ice_hw *hw, enum ice_prof_type req_profs, ice_seg = NULL; if (fv) { - if (req_profs & ICE_PROF_NON_TUN) + /* Determine field vector type */ + prof_type = ice_get_sw_prof_type(hw, fv); + + if (req_profs & prof_type) set_bit((u16)offset, bm); } } while (fv); diff --git a/drivers/net/ethernet/intel/ice/ice_flex_type.h b/drivers/net/ethernet/intel/ice/ice_flex_type.h index 120bcebaa080..0f572a36d021 100644 --- a/drivers/net/ethernet/intel/ice/ice_flex_type.h +++ b/drivers/net/ethernet/intel/ice/ice_flex_type.h @@ -373,6 +373,7 @@ struct ice_pkg_enum { enum ice_tunnel_type { TNL_VXLAN = 0, TNL_GENEVE, + TNL_GRETAP, __TNL_TYPE_CNT, TNL_LAST = 0xFF, TNL_ALL = 0xFF, @@ -614,6 +615,9 @@ struct ice_chs_chg { enum ice_prof_type { ICE_PROF_NON_TUN = 0x1, + ICE_PROF_TUN_UDP = 0x2, + ICE_PROF_TUN_GRE = 0x4, + ICE_PROF_TUN_ALL = 0x6, ICE_PROF_ALL = 0xFF, }; #endif /* _ICE_FLEX_TYPE_H_ */ diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c index 77dceab9fbbe..159c52b9b9d4 100644 --- a/drivers/net/ethernet/intel/ice/ice_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_lib.c @@ -1983,6 +1983,7 @@ static struct ice_q_vector *ice_pull_qvec_from_rc(struct ice_ring_container *rc) case ICE_TX_CONTAINER: if (rc->tx_ring) return rc->tx_ring->q_vector; + break; default: break; } diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c index 9ba22778011d..66112addfb9a 100644 --- a/drivers/net/ethernet/intel/ice/ice_main.c +++ b/drivers/net/ethernet/intel/ice/ice_main.c @@ -58,6 +58,12 @@ static void ice_vsi_release_all(struct ice_pf *pf); static int ice_rebuild_channels(struct ice_pf *pf); static void ice_remove_q_channels(struct ice_vsi *vsi, bool rem_adv_fltr); +static int +ice_indr_setup_tc_cb(struct net_device *netdev, struct Qdisc *sch, + void *cb_priv, enum tc_setup_type type, void *type_data, + void *data, + void (*cleanup)(struct flow_block_cb *block_cb)); + bool netif_is_ice(struct net_device *dev) { return dev && (dev->netdev_ops == &ice_netdev_ops); @@ -931,6 +937,29 @@ static void ice_set_dflt_mib(struct ice_pf *pf) } /** + * ice_check_phy_fw_load - check if PHY FW load failed + * @pf: pointer to PF struct + * @link_cfg_err: bitmap from the link info structure + * + * check if external PHY FW load failed and print an error message if it did + */ +static void ice_check_phy_fw_load(struct ice_pf *pf, u8 link_cfg_err) +{ + if (!(link_cfg_err & ICE_AQ_LINK_EXTERNAL_PHY_LOAD_FAILURE)) { + clear_bit(ICE_FLAG_PHY_FW_LOAD_FAILED, pf->flags); + return; + } + + if (test_bit(ICE_FLAG_PHY_FW_LOAD_FAILED, pf->flags)) + return; + + if (link_cfg_err & ICE_AQ_LINK_EXTERNAL_PHY_LOAD_FAILURE) { + dev_err(ice_pf_to_dev(pf), "Device failed to load the FW for the external PHY. Please download and install the latest NVM for your device and try again\n"); + set_bit(ICE_FLAG_PHY_FW_LOAD_FAILED, pf->flags); + } +} + +/** * ice_check_module_power * @pf: pointer to PF struct * @link_cfg_err: bitmap from the link info structure @@ -963,6 +992,20 @@ static void ice_check_module_power(struct ice_pf *pf, u8 link_cfg_err) } /** + * ice_check_link_cfg_err - check if link configuration failed + * @pf: pointer to the PF struct + * @link_cfg_err: bitmap from the link info structure + * + * print if any link configuration failure happens due to the value in the + * link_cfg_err parameter in the link info structure + */ +static void ice_check_link_cfg_err(struct ice_pf *pf, u8 link_cfg_err) +{ + ice_check_module_power(pf, link_cfg_err); + ice_check_phy_fw_load(pf, link_cfg_err); +} + +/** * ice_link_event - process the link event * @pf: PF that the link event is associated with * @pi: port_info for the port that the link event is associated with @@ -997,7 +1040,7 @@ ice_link_event(struct ice_pf *pf, struct ice_port_info *pi, bool link_up, pi->lport, ice_stat_str(status), ice_aq_str(pi->hw->adminq.sq_last_status)); - ice_check_module_power(pf, pi->phy.link_info.link_cfg_err); + ice_check_link_cfg_err(pf, pi->phy.link_info.link_cfg_err); /* Check if the link state is up after updating link info, and treat * this event as an UP event since the link is actually UP now. @@ -1075,7 +1118,8 @@ static int ice_init_link_events(struct ice_port_info *pi) u16 mask; mask = ~((u16)(ICE_AQ_LINK_EVENT_UPDOWN | ICE_AQ_LINK_EVENT_MEDIA_NA | - ICE_AQ_LINK_EVENT_MODULE_QUAL_FAIL)); + ICE_AQ_LINK_EVENT_MODULE_QUAL_FAIL | + ICE_AQ_LINK_EVENT_PHY_FW_LOAD_FAIL)); if (ice_aq_set_event_mask(pi->hw, pi->lport, mask, NULL)) { dev_dbg(ice_hw_to_dev(pi->hw), "Failed to set link event mask for port %d\n", @@ -2146,7 +2190,7 @@ static void ice_check_media_subtask(struct ice_pf *pf) if (err) return; - ice_check_module_power(pf, pi->phy.link_info.link_cfg_err); + ice_check_link_cfg_err(pf, pi->phy.link_info.link_cfg_err); if (pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE) { if (!test_bit(ICE_PHY_INIT_COMPLETE, pf->state)) @@ -3394,6 +3438,63 @@ ice_vlan_rx_kill_vid(struct net_device *netdev, __always_unused __be16 proto, } /** + * ice_rep_indr_tc_block_unbind + * @cb_priv: indirection block private data + */ +static void ice_rep_indr_tc_block_unbind(void *cb_priv) +{ + struct ice_indr_block_priv *indr_priv = cb_priv; + + list_del(&indr_priv->list); + kfree(indr_priv); +} + +/** + * ice_tc_indir_block_unregister - Unregister TC indirect block notifications + * @vsi: VSI struct which has the netdev + */ +static void ice_tc_indir_block_unregister(struct ice_vsi *vsi) +{ + struct ice_netdev_priv *np = netdev_priv(vsi->netdev); + + flow_indr_dev_unregister(ice_indr_setup_tc_cb, np, + ice_rep_indr_tc_block_unbind); +} + +/** + * ice_tc_indir_block_remove - clean indirect TC block notifications + * @pf: PF structure + */ +static void ice_tc_indir_block_remove(struct ice_pf *pf) +{ + struct ice_vsi *pf_vsi = ice_get_main_vsi(pf); + + if (!pf_vsi) + return; + + ice_tc_indir_block_unregister(pf_vsi); +} + +/** + * ice_tc_indir_block_register - Register TC indirect block notifications + * @vsi: VSI struct which has the netdev + * + * Returns 0 on success, negative value on failure + */ +static int ice_tc_indir_block_register(struct ice_vsi *vsi) +{ + struct ice_netdev_priv *np; + + if (!vsi || !vsi->netdev) + return -EINVAL; + + np = netdev_priv(vsi->netdev); + + INIT_LIST_HEAD(&np->tc_indr_block_priv_list); + return flow_indr_dev_register(ice_indr_setup_tc_cb, np); +} + +/** * ice_setup_pf_sw - Setup the HW switch on startup or after reset * @pf: board private structure * @@ -3401,6 +3502,7 @@ ice_vlan_rx_kill_vid(struct net_device *netdev, __always_unused __be16 proto, */ static int ice_setup_pf_sw(struct ice_pf *pf) { + struct device *dev = ice_pf_to_dev(pf); struct ice_vsi *vsi; int status = 0; @@ -3422,6 +3524,13 @@ static int ice_setup_pf_sw(struct ice_pf *pf) /* netdev has to be configured before setting frame size */ ice_vsi_cfg_frame_size(vsi); + /* init indirect block notifications */ + status = ice_tc_indir_block_register(vsi); + if (status) { + dev_err(dev, "Failed to register netdev notifier\n"); + goto unroll_cfg_netdev; + } + /* Setup DCB netlink interface */ ice_dcbnl_setup(vsi); @@ -3433,7 +3542,7 @@ static int ice_setup_pf_sw(struct ice_pf *pf) status = ice_set_cpu_rx_rmap(vsi); if (status) { - dev_err(ice_pf_to_dev(pf), "Failed to set CPU Rx map VSI %d error %d\n", + dev_err(dev, "Failed to set CPU Rx map VSI %d error %d\n", vsi->vsi_num, status); status = -EINVAL; goto unroll_napi_add; @@ -3446,8 +3555,9 @@ static int ice_setup_pf_sw(struct ice_pf *pf) free_cpu_rx_map: ice_free_cpu_rx_rmap(vsi); - unroll_napi_add: + ice_tc_indir_block_unregister(vsi); +unroll_cfg_netdev: if (vsi) { ice_napi_del(vsi); if (vsi->netdev) { @@ -4528,7 +4638,8 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent) ice_init_link_dflt_override(pf->hw.port_info); - ice_check_module_power(pf, pf->hw.port_info->phy.link_info.link_cfg_err); + ice_check_link_cfg_err(pf, + pf->hw.port_info->phy.link_info.link_cfg_err); /* if media available, initialize PHY settings */ if (pf->hw.port_info->phy.link_info.link_info & @@ -4721,6 +4832,8 @@ static void ice_remove(struct pci_dev *pdev) msleep(100); } + ice_tc_indir_block_remove(pf); + if (test_bit(ICE_FLAG_SRIOV_ENA, pf->flags)) { set_bit(ICE_VF_RESETS_DISABLED, pf->state); ice_free_vfs(pf); @@ -8155,6 +8268,121 @@ ice_setup_tc(struct net_device *netdev, enum tc_setup_type type, return -EOPNOTSUPP; } +static struct ice_indr_block_priv * +ice_indr_block_priv_lookup(struct ice_netdev_priv *np, + struct net_device *netdev) +{ + struct ice_indr_block_priv *cb_priv; + + list_for_each_entry(cb_priv, &np->tc_indr_block_priv_list, list) { + if (!cb_priv->netdev) + return NULL; + if (cb_priv->netdev == netdev) + return cb_priv; + } + return NULL; +} + +static int +ice_indr_setup_block_cb(enum tc_setup_type type, void *type_data, + void *indr_priv) +{ + struct ice_indr_block_priv *priv = indr_priv; + struct ice_netdev_priv *np = priv->np; + + switch (type) { + case TC_SETUP_CLSFLOWER: + return ice_setup_tc_cls_flower(np, priv->netdev, + (struct flow_cls_offload *) + type_data); + default: + return -EOPNOTSUPP; + } +} + +static int +ice_indr_setup_tc_block(struct net_device *netdev, struct Qdisc *sch, + struct ice_netdev_priv *np, + struct flow_block_offload *f, void *data, + void (*cleanup)(struct flow_block_cb *block_cb)) +{ + struct ice_indr_block_priv *indr_priv; + struct flow_block_cb *block_cb; + + if (!ice_is_tunnel_supported(netdev) && + !(is_vlan_dev(netdev) && + vlan_dev_real_dev(netdev) == np->vsi->netdev)) + return -EOPNOTSUPP; + + if (f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS) + return -EOPNOTSUPP; + + switch (f->command) { + case FLOW_BLOCK_BIND: + indr_priv = ice_indr_block_priv_lookup(np, netdev); + if (indr_priv) + return -EEXIST; + + indr_priv = kzalloc(sizeof(*indr_priv), GFP_KERNEL); + if (!indr_priv) + return -ENOMEM; + + indr_priv->netdev = netdev; + indr_priv->np = np; + list_add(&indr_priv->list, &np->tc_indr_block_priv_list); + + block_cb = + flow_indr_block_cb_alloc(ice_indr_setup_block_cb, + indr_priv, indr_priv, + ice_rep_indr_tc_block_unbind, + f, netdev, sch, data, np, + cleanup); + + if (IS_ERR(block_cb)) { + list_del(&indr_priv->list); + kfree(indr_priv); + return PTR_ERR(block_cb); + } + flow_block_cb_add(block_cb, f); + list_add_tail(&block_cb->driver_list, &ice_block_cb_list); + break; + case FLOW_BLOCK_UNBIND: + indr_priv = ice_indr_block_priv_lookup(np, netdev); + if (!indr_priv) + return -ENOENT; + + block_cb = flow_block_cb_lookup(f->block, + ice_indr_setup_block_cb, + indr_priv); + if (!block_cb) + return -ENOENT; + + flow_indr_block_cb_remove(block_cb, f); + + list_del(&block_cb->driver_list); + break; + default: + return -EOPNOTSUPP; + } + return 0; +} + +static int +ice_indr_setup_tc_cb(struct net_device *netdev, struct Qdisc *sch, + void *cb_priv, enum tc_setup_type type, void *type_data, + void *data, + void (*cleanup)(struct flow_block_cb *block_cb)) +{ + switch (type) { + case TC_SETUP_BLOCK: + return ice_indr_setup_tc_block(netdev, sch, cb_priv, type_data, + data, cleanup); + + default: + return -EOPNOTSUPP; + } +} + /** * ice_open - Called when a network interface becomes active * @netdev: network interface device structure @@ -8213,7 +8441,7 @@ int ice_open_internal(struct net_device *netdev) return -EIO; } - ice_check_module_power(pf, pi->phy.link_info.link_cfg_err); + ice_check_link_cfg_err(pf, pi->phy.link_info.link_cfg_err); /* Set PHY if there is media, otherwise, turn off PHY */ if (pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE) { diff --git a/drivers/net/ethernet/intel/ice/ice_protocol_type.h b/drivers/net/ethernet/intel/ice/ice_protocol_type.h index 0b220dfa7457..dc1b0e9e6df5 100644 --- a/drivers/net/ethernet/intel/ice/ice_protocol_type.h +++ b/drivers/net/ethernet/intel/ice/ice_protocol_type.h @@ -37,10 +37,22 @@ enum ice_protocol_type { ICE_TCP_IL, ICE_UDP_OF, ICE_UDP_ILOS, + ICE_VXLAN, + ICE_GENEVE, + ICE_NVGRE, + ICE_VXLAN_GPE, ICE_SCTP_IL, ICE_PROTOCOL_LAST }; +enum ice_sw_tunnel_type { + ICE_NON_TUN = 0, + ICE_SW_TUN_VXLAN, + ICE_SW_TUN_GENEVE, + ICE_SW_TUN_NVGRE, + ICE_ALL_TUNNELS /* All tunnel types including NVGRE */ +}; + /* Decoders for ice_prot_id: * - F: First * - I: Inner @@ -74,6 +86,8 @@ enum ice_prot_id { ICE_PROT_INVALID = 255 /* when offset == ICE_FV_OFFSET_INVAL */ }; +#define ICE_VNI_OFFSET 12 /* offset of VNI from ICE_PROT_UDP_OF */ + #define ICE_MAC_OFOS_HW 1 #define ICE_MAC_IL_HW 4 #define ICE_ETYPE_OL_HW 9 @@ -85,8 +99,15 @@ enum ice_prot_id { #define ICE_IPV6_IL_HW 41 #define ICE_TCP_IL_HW 49 #define ICE_UDP_ILOS_HW 53 +#define ICE_GRE_OF_HW 64 #define ICE_UDP_OF_HW 52 /* UDP Tunnels */ +#define ICE_META_DATA_ID_HW 255 /* this is used for tunnel type */ + +#define ICE_MDID_SIZE 2 +#define ICE_TUN_FLAG_MDID 21 +#define ICE_TUN_FLAG_MDID_OFF (ICE_MDID_SIZE * ICE_TUN_FLAG_MDID) +#define ICE_TUN_FLAG_MASK 0xFF #define ICE_TUN_FLAG_FV_IND 2 @@ -152,6 +173,18 @@ struct ice_l4_hdr { __be16 check; }; +struct ice_udp_tnl_hdr { + __be16 field; + __be16 proto_type; + __be32 vni; /* only use lower 24-bits */ +}; + +struct ice_nvgre_hdr { + __be16 flags; + __be16 protocol; + __be32 tni_flow; +}; + union ice_prot_hdr { struct ice_ether_hdr eth_hdr; struct ice_ethtype_hdr ethertype; @@ -160,6 +193,8 @@ union ice_prot_hdr { struct ice_ipv6_hdr ipv6_hdr; struct ice_l4_hdr l4_hdr; struct ice_sctp_hdr sctp_hdr; + struct ice_udp_tnl_hdr tnl_hdr; + struct ice_nvgre_hdr nvgre_hdr; }; /* This is mapping table entry that maps every word within a given protocol diff --git a/drivers/net/ethernet/intel/ice/ice_repr.c b/drivers/net/ethernet/intel/ice/ice_repr.c index c49eeea7cb67..af8e6ef5f571 100644 --- a/drivers/net/ethernet/intel/ice/ice_repr.c +++ b/drivers/net/ethernet/intel/ice/ice_repr.c @@ -267,6 +267,9 @@ static int ice_repr_add(struct ice_vf *vf) if (err) goto err_devlink; + repr->netdev->min_mtu = ETH_MIN_MTU; + repr->netdev->max_mtu = ICE_MAX_MTU; + err = ice_repr_reg_netdev(repr->netdev); if (err) goto err_netdev; diff --git a/drivers/net/ethernet/intel/ice/ice_switch.c b/drivers/net/ethernet/intel/ice/ice_switch.c index 2742e1c1e337..793f4a9fc2cd 100644 --- a/drivers/net/ethernet/intel/ice/ice_switch.c +++ b/drivers/net/ethernet/intel/ice/ice_switch.c @@ -35,6 +35,192 @@ struct ice_dummy_pkt_offsets { u16 offset; /* ICE_PROTOCOL_LAST indicates end of list */ }; +static const struct ice_dummy_pkt_offsets dummy_gre_tcp_packet_offsets[] = { + { ICE_MAC_OFOS, 0 }, + { ICE_ETYPE_OL, 12 }, + { ICE_IPV4_OFOS, 14 }, + { ICE_NVGRE, 34 }, + { ICE_MAC_IL, 42 }, + { ICE_IPV4_IL, 56 }, + { ICE_TCP_IL, 76 }, + { ICE_PROTOCOL_LAST, 0 }, +}; + +static const u8 dummy_gre_tcp_packet[] = { + 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */ + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + + 0x08, 0x00, /* ICE_ETYPE_OL 12 */ + + 0x45, 0x00, 0x00, 0x3E, /* ICE_IPV4_OFOS 14 */ + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x2F, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + + 0x80, 0x00, 0x65, 0x58, /* ICE_NVGRE 34 */ + 0x00, 0x00, 0x00, 0x00, + + 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 42 */ + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x08, 0x00, + + 0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 56 */ + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x06, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + + 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 76 */ + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x50, 0x02, 0x20, 0x00, + 0x00, 0x00, 0x00, 0x00 +}; + +static const struct ice_dummy_pkt_offsets dummy_gre_udp_packet_offsets[] = { + { ICE_MAC_OFOS, 0 }, + { ICE_ETYPE_OL, 12 }, + { ICE_IPV4_OFOS, 14 }, + { ICE_NVGRE, 34 }, + { ICE_MAC_IL, 42 }, + { ICE_IPV4_IL, 56 }, + { ICE_UDP_ILOS, 76 }, + { ICE_PROTOCOL_LAST, 0 }, +}; + +static const u8 dummy_gre_udp_packet[] = { + 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */ + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + + 0x08, 0x00, /* ICE_ETYPE_OL 12 */ + + 0x45, 0x00, 0x00, 0x3E, /* ICE_IPV4_OFOS 14 */ + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x2F, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + + 0x80, 0x00, 0x65, 0x58, /* ICE_NVGRE 34 */ + 0x00, 0x00, 0x00, 0x00, + + 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 42 */ + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x08, 0x00, + + 0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 56 */ + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x11, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + + 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 76 */ + 0x00, 0x08, 0x00, 0x00, +}; + +static const struct ice_dummy_pkt_offsets dummy_udp_tun_tcp_packet_offsets[] = { + { ICE_MAC_OFOS, 0 }, + { ICE_ETYPE_OL, 12 }, + { ICE_IPV4_OFOS, 14 }, + { ICE_UDP_OF, 34 }, + { ICE_VXLAN, 42 }, + { ICE_GENEVE, 42 }, + { ICE_VXLAN_GPE, 42 }, + { ICE_MAC_IL, 50 }, + { ICE_IPV4_IL, 64 }, + { ICE_TCP_IL, 84 }, + { ICE_PROTOCOL_LAST, 0 }, +}; + +static const u8 dummy_udp_tun_tcp_packet[] = { + 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */ + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + + 0x08, 0x00, /* ICE_ETYPE_OL 12 */ + + 0x45, 0x00, 0x00, 0x5a, /* ICE_IPV4_OFOS 14 */ + 0x00, 0x01, 0x00, 0x00, + 0x40, 0x11, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + + 0x00, 0x00, 0x12, 0xb5, /* ICE_UDP_OF 34 */ + 0x00, 0x46, 0x00, 0x00, + + 0x00, 0x00, 0x65, 0x58, /* ICE_VXLAN 42 */ + 0x00, 0x00, 0x00, 0x00, + + 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 50 */ + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x08, 0x00, + + 0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_IL 64 */ + 0x00, 0x01, 0x00, 0x00, + 0x40, 0x06, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + + 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 84 */ + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x50, 0x02, 0x20, 0x00, + 0x00, 0x00, 0x00, 0x00 +}; + +static const struct ice_dummy_pkt_offsets dummy_udp_tun_udp_packet_offsets[] = { + { ICE_MAC_OFOS, 0 }, + { ICE_ETYPE_OL, 12 }, + { ICE_IPV4_OFOS, 14 }, + { ICE_UDP_OF, 34 }, + { ICE_VXLAN, 42 }, + { ICE_GENEVE, 42 }, + { ICE_VXLAN_GPE, 42 }, + { ICE_MAC_IL, 50 }, + { ICE_IPV4_IL, 64 }, + { ICE_UDP_ILOS, 84 }, + { ICE_PROTOCOL_LAST, 0 }, +}; + +static const u8 dummy_udp_tun_udp_packet[] = { + 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */ + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + + 0x08, 0x00, /* ICE_ETYPE_OL 12 */ + + 0x45, 0x00, 0x00, 0x4e, /* ICE_IPV4_OFOS 14 */ + 0x00, 0x01, 0x00, 0x00, + 0x00, 0x11, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + + 0x00, 0x00, 0x12, 0xb5, /* ICE_UDP_OF 34 */ + 0x00, 0x3a, 0x00, 0x00, + + 0x00, 0x00, 0x65, 0x58, /* ICE_VXLAN 42 */ + 0x00, 0x00, 0x00, 0x00, + + 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 50 */ + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x08, 0x00, + + 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_IL 64 */ + 0x00, 0x01, 0x00, 0x00, + 0x00, 0x11, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + + 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 84 */ + 0x00, 0x08, 0x00, 0x00, +}; + /* offset info for MAC + IPv4 + UDP dummy packet */ static const struct ice_dummy_pkt_offsets dummy_udp_packet_offsets[] = { { ICE_MAC_OFOS, 0 }, @@ -1177,8 +1363,10 @@ ice_get_recp_frm_fw(struct ice_hw *hw, struct ice_sw_recipe *recps, u8 rid, recps[rid].root_buf = devm_kmemdup(ice_hw_to_dev(hw), tmp, recps[rid].n_grp_count * sizeof(*recps[rid].root_buf), GFP_KERNEL); - if (!recps[rid].root_buf) + if (!recps[rid].root_buf) { + status = ICE_ERR_NO_MEMORY; goto err_unroll; + } /* Copy result indexes */ bitmap_copy(recps[rid].res_idxs, result_bm, ICE_MAX_FV_WORDS); @@ -3582,6 +3770,9 @@ static const struct ice_prot_ext_tbl_entry ice_prot_ext[ICE_PROTOCOL_LAST] = { { ICE_TCP_IL, { 0, 2 } }, { ICE_UDP_OF, { 0, 2 } }, { ICE_UDP_ILOS, { 0, 2 } }, + { ICE_VXLAN, { 8, 10, 12, 14 } }, + { ICE_GENEVE, { 8, 10, 12, 14 } }, + { ICE_NVGRE, { 0, 2, 4, 6 } }, }; static struct ice_protocol_entry ice_prot_id_tbl[ICE_PROTOCOL_LAST] = { @@ -3596,6 +3787,9 @@ static struct ice_protocol_entry ice_prot_id_tbl[ICE_PROTOCOL_LAST] = { { ICE_TCP_IL, ICE_TCP_IL_HW }, { ICE_UDP_OF, ICE_UDP_OF_HW }, { ICE_UDP_ILOS, ICE_UDP_ILOS_HW }, + { ICE_VXLAN, ICE_UDP_OF_HW }, + { ICE_GENEVE, ICE_UDP_OF_HW }, + { ICE_NVGRE, ICE_GRE_OF_HW }, }; /** @@ -3915,12 +4109,11 @@ ice_find_free_recp_res_idx(struct ice_hw *hw, const unsigned long *profiles, * ice_add_sw_recipe - function to call AQ calls to create switch recipe * @hw: pointer to hardware structure * @rm: recipe management list entry - * @match_tun_mask: tunnel mask that needs to be programmed * @profiles: bitmap of profiles that will be associated. */ static enum ice_status ice_add_sw_recipe(struct ice_hw *hw, struct ice_sw_recipe *rm, - u16 match_tun_mask, unsigned long *profiles) + unsigned long *profiles) { DECLARE_BITMAP(result_idx_bm, ICE_MAX_FV_WORDS); struct ice_aqc_recipe_data_elem *tmp; @@ -4128,15 +4321,6 @@ ice_add_sw_recipe(struct ice_hw *hw, struct ice_sw_recipe *rm, } buf[recps].content.act_ctrl_fwd_priority = rm->priority; - /* To differentiate among different UDP tunnels, a meta data ID - * flag is used. - */ - if (match_tun_mask) { - buf[recps].content.lkup_indx[i] = ICE_TUN_FLAG_FV_IND; - buf[recps].content.mask[i] = - cpu_to_le16(match_tun_mask); - } - recps++; rm->root_rid = (u8)rid; } @@ -4199,6 +4383,7 @@ ice_add_sw_recipe(struct ice_hw *hw, struct ice_sw_recipe *rm, recp->chain_idx = entry->chain_idx; recp->priority = buf[buf_idx].content.act_ctrl_fwd_priority; recp->n_grp_count = rm->n_grp_count; + recp->tun_type = rm->tun_type; recp->recp_created = true; } rm->root_buf = buf; @@ -4279,6 +4464,55 @@ free_mem: return status; } +/** + * ice_tun_type_match_word - determine if tun type needs a match mask + * @tun_type: tunnel type + * @mask: mask to be used for the tunnel + */ +static bool ice_tun_type_match_word(enum ice_sw_tunnel_type tun_type, u16 *mask) +{ + switch (tun_type) { + case ICE_SW_TUN_GENEVE: + case ICE_SW_TUN_VXLAN: + case ICE_SW_TUN_NVGRE: + *mask = ICE_TUN_FLAG_MASK; + return true; + + default: + *mask = 0; + return false; + } +} + +/** + * ice_add_special_words - Add words that are not protocols, such as metadata + * @rinfo: other information regarding the rule e.g. priority and action info + * @lkup_exts: lookup word structure + */ +static enum ice_status +ice_add_special_words(struct ice_adv_rule_info *rinfo, + struct ice_prot_lkup_ext *lkup_exts) +{ + u16 mask; + + /* If this is a tunneled packet, then add recipe index to match the + * tunnel bit in the packet metadata flags. + */ + if (ice_tun_type_match_word(rinfo->tun_type, &mask)) { + if (lkup_exts->n_val_words < ICE_MAX_CHAIN_WORDS) { + u8 word = lkup_exts->n_val_words++; + + lkup_exts->fv_words[word].prot_id = ICE_META_DATA_ID_HW; + lkup_exts->fv_words[word].off = ICE_TUN_FLAG_MDID_OFF; + lkup_exts->field_mask[word] = mask; + } else { + return ICE_ERR_MAX_LIMIT; + } + } + + return 0; +} + /* ice_get_compat_fv_bitmap - Get compatible field vector bitmap for rule * @hw: pointer to hardware structure * @rinfo: other information regarding the rule e.g. priority and action info @@ -4288,9 +4522,30 @@ static void ice_get_compat_fv_bitmap(struct ice_hw *hw, struct ice_adv_rule_info *rinfo, unsigned long *bm) { + enum ice_prof_type prof_type; + bitmap_zero(bm, ICE_MAX_NUM_PROFILES); - ice_get_sw_fv_bitmap(hw, ICE_PROF_NON_TUN, bm); + switch (rinfo->tun_type) { + case ICE_NON_TUN: + prof_type = ICE_PROF_NON_TUN; + break; + case ICE_ALL_TUNNELS: + prof_type = ICE_PROF_TUN_ALL; + break; + case ICE_SW_TUN_GENEVE: + case ICE_SW_TUN_VXLAN: + prof_type = ICE_PROF_TUN_UDP; + break; + case ICE_SW_TUN_NVGRE: + prof_type = ICE_PROF_TUN_GRE; + break; + default: + prof_type = ICE_PROF_ALL; + break; + } + + ice_get_sw_fv_bitmap(hw, prof_type, bm); } /** @@ -4315,7 +4570,6 @@ ice_add_adv_recipe(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, struct ice_sw_fv_list_entry *tmp; enum ice_status status = 0; struct ice_sw_recipe *rm; - u16 match_tun_mask = 0; u8 i; if (!lkups_cnt) @@ -4365,6 +4619,13 @@ ice_add_adv_recipe(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, if (status) goto err_unroll; + /* Create any special protocol/offset pairs, such as looking at tunnel + * bits by extracting metadata + */ + status = ice_add_special_words(rinfo, lkup_exts); + if (status) + goto err_free_lkup_exts; + /* Group match words into recipes using preferred recipe grouping * criteria. */ @@ -4396,7 +4657,7 @@ ice_add_adv_recipe(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, goto err_unroll; /* Recipe we need does not exist, add a recipe */ - status = ice_add_sw_recipe(hw, rm, match_tun_mask, profiles); + status = ice_add_sw_recipe(hw, rm, profiles); if (status) goto err_unroll; @@ -4466,12 +4727,14 @@ err_free_lkup_exts: * @lkups: lookup elements or match criteria for the advanced recipe, one * structure per protocol header * @lkups_cnt: number of protocols + * @tun_type: tunnel type * @pkt: dummy packet to fill according to filter match criteria * @pkt_len: packet length of dummy packet * @offsets: pointer to receive the pointer to the offsets for the packet */ static void ice_find_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt, + enum ice_sw_tunnel_type tun_type, const u8 **pkt, u16 *pkt_len, const struct ice_dummy_pkt_offsets **offsets) { @@ -4495,6 +4758,35 @@ ice_find_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt, ipv6 = true; } + if (tun_type == ICE_SW_TUN_NVGRE) { + if (tcp) { + *pkt = dummy_gre_tcp_packet; + *pkt_len = sizeof(dummy_gre_tcp_packet); + *offsets = dummy_gre_tcp_packet_offsets; + return; + } + + *pkt = dummy_gre_udp_packet; + *pkt_len = sizeof(dummy_gre_udp_packet); + *offsets = dummy_gre_udp_packet_offsets; + return; + } + + if (tun_type == ICE_SW_TUN_VXLAN || + tun_type == ICE_SW_TUN_GENEVE) { + if (tcp) { + *pkt = dummy_udp_tun_tcp_packet; + *pkt_len = sizeof(dummy_udp_tun_tcp_packet); + *offsets = dummy_udp_tun_tcp_packet_offsets; + return; + } + + *pkt = dummy_udp_tun_udp_packet; + *pkt_len = sizeof(dummy_udp_tun_udp_packet); + *offsets = dummy_udp_tun_udp_packet_offsets; + return; + } + if (udp && !ipv6) { if (vlan) { *pkt = dummy_vlan_udp_packet; @@ -4615,6 +4907,13 @@ ice_fill_adv_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt, case ICE_SCTP_IL: len = sizeof(struct ice_sctp_hdr); break; + case ICE_NVGRE: + len = sizeof(struct ice_nvgre_hdr); + break; + case ICE_VXLAN: + case ICE_GENEVE: + len = sizeof(struct ice_udp_tnl_hdr); + break; default: return ICE_ERR_PARAM; } @@ -4645,6 +4944,48 @@ ice_fill_adv_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt, } /** + * ice_fill_adv_packet_tun - fill dummy packet with udp tunnel port + * @hw: pointer to the hardware structure + * @tun_type: tunnel type + * @pkt: dummy packet to fill in + * @offsets: offset info for the dummy packet + */ +static enum ice_status +ice_fill_adv_packet_tun(struct ice_hw *hw, enum ice_sw_tunnel_type tun_type, + u8 *pkt, const struct ice_dummy_pkt_offsets *offsets) +{ + u16 open_port, i; + + switch (tun_type) { + case ICE_SW_TUN_VXLAN: + case ICE_SW_TUN_GENEVE: + if (!ice_get_open_tunnel_port(hw, &open_port)) + return ICE_ERR_CFG; + break; + + default: + /* Nothing needs to be done for this tunnel type */ + return 0; + } + + /* Find the outer UDP protocol header and insert the port number */ + for (i = 0; offsets[i].type != ICE_PROTOCOL_LAST; i++) { + if (offsets[i].type == ICE_UDP_OF) { + struct ice_l4_hdr *hdr; + u16 offset; + + offset = offsets[i].offset; + hdr = (struct ice_l4_hdr *)&pkt[offset]; + hdr->dst_port = cpu_to_be16(open_port); + + return 0; + } + } + + return ICE_ERR_CFG; +} + +/** * ice_find_adv_rule_entry - Search a rule entry * @hw: pointer to the hardware structure * @lkups: lookup elements or match criteria for the advanced recipe, one @@ -4678,6 +5019,7 @@ ice_find_adv_rule_entry(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, break; } if (rinfo->sw_act.flag == list_itr->rule_info.sw_act.flag && + rinfo->tun_type == list_itr->rule_info.tun_type && lkups_matched) return list_itr; } @@ -4852,7 +5194,7 @@ ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, return ICE_ERR_PARAM; /* make sure that we can locate a dummy packet */ - ice_find_dummy_packet(lkups, lkups_cnt, &pkt, &pkt_len, + ice_find_dummy_packet(lkups, lkups_cnt, rinfo->tun_type, &pkt, &pkt_len, &pkt_offsets); if (!pkt) { status = ICE_ERR_PARAM; @@ -4963,6 +5305,14 @@ ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, if (status) goto err_ice_add_adv_rule; + if (rinfo->tun_type != ICE_NON_TUN) { + status = ice_fill_adv_packet_tun(hw, rinfo->tun_type, + s_rule->pdata.lkup_tx_rx.hdr, + pkt_offsets); + if (status) + goto err_ice_add_adv_rule; + } + status = ice_aq_sw_rules(hw, (struct ice_aqc_sw_rules *)s_rule, rule_buf_sz, 1, ice_aqc_opc_add_sw_rules, NULL); @@ -5198,6 +5548,13 @@ ice_rem_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, return ICE_ERR_CFG; } + /* Create any special protocol/offset pairs, such as looking at tunnel + * bits by extracting metadata + */ + status = ice_add_special_words(rinfo, &lkup_exts); + if (status) + return status; + rid = ice_find_recp(hw, &lkup_exts); /* If did not find a recipe that match the existing criteria */ if (rid == ICE_MAX_NUM_RECIPES) diff --git a/drivers/net/ethernet/intel/ice/ice_switch.h b/drivers/net/ethernet/intel/ice/ice_switch.h index c4dd2062c469..d8a38906f16f 100644 --- a/drivers/net/ethernet/intel/ice/ice_switch.h +++ b/drivers/net/ethernet/intel/ice/ice_switch.h @@ -171,6 +171,7 @@ struct ice_adv_rule_flags_info { }; struct ice_adv_rule_info { + enum ice_sw_tunnel_type tun_type; struct ice_sw_act_ctrl sw_act; u32 priority; u8 rx; /* true means LOOKUP_RX otherwise LOOKUP_TX */ @@ -211,6 +212,8 @@ struct ice_sw_recipe { /* Bit map specifying the IDs associated with this group of recipe */ DECLARE_BITMAP(r_bitmap, ICE_MAX_NUM_RECIPES); + enum ice_sw_tunnel_type tun_type; + /* List of type ice_fltr_mgmt_list_entry or adv_rule */ u8 adv_rule; struct list_head filt_rules; diff --git a/drivers/net/ethernet/intel/ice/ice_tc_lib.c b/drivers/net/ethernet/intel/ice/ice_tc_lib.c index 725caa160b13..e5d23feb6701 100644 --- a/drivers/net/ethernet/intel/ice/ice_tc_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_tc_lib.c @@ -3,8 +3,9 @@ #include "ice.h" #include "ice_tc_lib.h" -#include "ice_lib.h" #include "ice_fltr.h" +#include "ice_lib.h" +#include "ice_protocol_type.h" /** * ice_tc_count_lkups - determine lookup count for switch filter @@ -20,7 +21,21 @@ ice_tc_count_lkups(u32 flags, struct ice_tc_flower_lyr_2_4_hdrs *headers, { int lkups_cnt = 0; - if (flags & ICE_TC_FLWR_FIELD_ETH_TYPE_ID) + if (flags & ICE_TC_FLWR_FIELD_TENANT_ID) + lkups_cnt++; + + if (flags & (ICE_TC_FLWR_FIELD_ENC_SRC_IPV4 | + ICE_TC_FLWR_FIELD_ENC_DEST_IPV4 | + ICE_TC_FLWR_FIELD_ENC_SRC_IPV6 | + ICE_TC_FLWR_FIELD_ENC_DEST_IPV6)) + lkups_cnt++; + + if (flags & ICE_TC_FLWR_FIELD_ENC_DEST_L4_PORT) + lkups_cnt++; + + /* currently inner etype filter isn't supported */ + if ((flags & ICE_TC_FLWR_FIELD_ETH_TYPE_ID) && + fltr->tunnel_type == TNL_LAST) lkups_cnt++; /* are MAC fields specified? */ @@ -32,10 +47,8 @@ ice_tc_count_lkups(u32 flags, struct ice_tc_flower_lyr_2_4_hdrs *headers, lkups_cnt++; /* are IPv[4|6] fields specified? */ - if (flags & (ICE_TC_FLWR_FIELD_DEST_IPV4 | ICE_TC_FLWR_FIELD_SRC_IPV4)) - lkups_cnt++; - else if (flags & (ICE_TC_FLWR_FIELD_DEST_IPV6 | - ICE_TC_FLWR_FIELD_SRC_IPV6)) + if (flags & (ICE_TC_FLWR_FIELD_DEST_IPV4 | ICE_TC_FLWR_FIELD_SRC_IPV4 | + ICE_TC_FLWR_FIELD_DEST_IPV6 | ICE_TC_FLWR_FIELD_SRC_IPV6)) lkups_cnt++; /* is L4 (TCP/UDP/any other L4 protocol fields) specified? */ @@ -46,6 +59,148 @@ ice_tc_count_lkups(u32 flags, struct ice_tc_flower_lyr_2_4_hdrs *headers, return lkups_cnt; } +static enum ice_protocol_type ice_proto_type_from_mac(bool inner) +{ + return inner ? ICE_MAC_IL : ICE_MAC_OFOS; +} + +static enum ice_protocol_type ice_proto_type_from_ipv4(bool inner) +{ + return inner ? ICE_IPV4_IL : ICE_IPV4_OFOS; +} + +static enum ice_protocol_type ice_proto_type_from_ipv6(bool inner) +{ + return inner ? ICE_IPV6_IL : ICE_IPV6_OFOS; +} + +static enum ice_protocol_type +ice_proto_type_from_l4_port(bool inner, u16 ip_proto) +{ + if (inner) { + switch (ip_proto) { + case IPPROTO_UDP: + return ICE_UDP_ILOS; + } + } else { + switch (ip_proto) { + case IPPROTO_TCP: + return ICE_TCP_IL; + case IPPROTO_UDP: + return ICE_UDP_OF; + } + } + + return 0; +} + +static enum ice_protocol_type +ice_proto_type_from_tunnel(enum ice_tunnel_type type) +{ + switch (type) { + case TNL_VXLAN: + return ICE_VXLAN; + case TNL_GENEVE: + return ICE_GENEVE; + case TNL_GRETAP: + return ICE_NVGRE; + default: + return 0; + } +} + +static enum ice_sw_tunnel_type +ice_sw_type_from_tunnel(enum ice_tunnel_type type) +{ + switch (type) { + case TNL_VXLAN: + return ICE_SW_TUN_VXLAN; + case TNL_GENEVE: + return ICE_SW_TUN_GENEVE; + case TNL_GRETAP: + return ICE_SW_TUN_NVGRE; + default: + return ICE_NON_TUN; + } +} + +static int +ice_tc_fill_tunnel_outer(u32 flags, struct ice_tc_flower_fltr *fltr, + struct ice_adv_lkup_elem *list) +{ + struct ice_tc_flower_lyr_2_4_hdrs *hdr = &fltr->outer_headers; + int i = 0; + + if (flags & ICE_TC_FLWR_FIELD_TENANT_ID) { + u32 tenant_id; + + list[i].type = ice_proto_type_from_tunnel(fltr->tunnel_type); + switch (fltr->tunnel_type) { + case TNL_VXLAN: + case TNL_GENEVE: + tenant_id = be32_to_cpu(fltr->tenant_id) << 8; + list[i].h_u.tnl_hdr.vni = cpu_to_be32(tenant_id); + memcpy(&list[i].m_u.tnl_hdr.vni, "\xff\xff\xff\x00", 4); + i++; + break; + case TNL_GRETAP: + list[i].h_u.nvgre_hdr.tni_flow = fltr->tenant_id; + memcpy(&list[i].m_u.nvgre_hdr.tni_flow, "\xff\xff\xff\xff", 4); + i++; + break; + default: + break; + } + } + + if (flags & (ICE_TC_FLWR_FIELD_ENC_SRC_IPV4 | + ICE_TC_FLWR_FIELD_ENC_DEST_IPV4)) { + list[i].type = ice_proto_type_from_ipv4(false); + + if (flags & ICE_TC_FLWR_FIELD_ENC_SRC_IPV4) { + list[i].h_u.ipv4_hdr.src_addr = hdr->l3_key.src_ipv4; + list[i].m_u.ipv4_hdr.src_addr = hdr->l3_mask.src_ipv4; + } + if (flags & ICE_TC_FLWR_FIELD_ENC_DEST_IPV4) { + list[i].h_u.ipv4_hdr.dst_addr = hdr->l3_key.dst_ipv4; + list[i].m_u.ipv4_hdr.dst_addr = hdr->l3_mask.dst_ipv4; + } + i++; + } + + if (flags & (ICE_TC_FLWR_FIELD_ENC_SRC_IPV6 | + ICE_TC_FLWR_FIELD_ENC_DEST_IPV6)) { + list[i].type = ice_proto_type_from_ipv6(false); + + if (flags & ICE_TC_FLWR_FIELD_ENC_SRC_IPV6) { + memcpy(&list[i].h_u.ipv6_hdr.src_addr, + &hdr->l3_key.src_ipv6_addr, + sizeof(hdr->l3_key.src_ipv6_addr)); + memcpy(&list[i].m_u.ipv6_hdr.src_addr, + &hdr->l3_mask.src_ipv6_addr, + sizeof(hdr->l3_mask.src_ipv6_addr)); + } + if (flags & ICE_TC_FLWR_FIELD_ENC_DEST_IPV6) { + memcpy(&list[i].h_u.ipv6_hdr.dst_addr, + &hdr->l3_key.dst_ipv6_addr, + sizeof(hdr->l3_key.dst_ipv6_addr)); + memcpy(&list[i].m_u.ipv6_hdr.dst_addr, + &hdr->l3_mask.dst_ipv6_addr, + sizeof(hdr->l3_mask.dst_ipv6_addr)); + } + i++; + } + + if (flags & ICE_TC_FLWR_FIELD_ENC_DEST_L4_PORT) { + list[i].type = ice_proto_type_from_l4_port(false, hdr->l3_key.ip_proto); + list[i].h_u.l4_hdr.dst_port = hdr->l4_key.dst_port; + list[i].m_u.l4_hdr.dst_port = hdr->l4_mask.dst_port; + i++; + } + + return i; +} + /** * ice_tc_fill_rules - fill filter rules based on TC fltr * @hw: pointer to HW structure @@ -67,9 +222,16 @@ ice_tc_fill_rules(struct ice_hw *hw, u32 flags, u16 *l4_proto) { struct ice_tc_flower_lyr_2_4_hdrs *headers = &tc_fltr->outer_headers; + bool inner = false; int i = 0; - if (flags & ICE_TC_FLWR_FIELD_ETH_TYPE_ID) { + rule_info->tun_type = ice_sw_type_from_tunnel(tc_fltr->tunnel_type); + if (tc_fltr->tunnel_type != TNL_LAST) { + i = ice_tc_fill_tunnel_outer(flags, tc_fltr, list); + + headers = &tc_fltr->inner_headers; + inner = true; + } else if (flags & ICE_TC_FLWR_FIELD_ETH_TYPE_ID) { list[i].type = ICE_ETYPE_OL; list[i].h_u.ethertype.ethtype_id = headers->l2_key.n_proto; list[i].m_u.ethertype.ethtype_id = headers->l2_mask.n_proto; @@ -83,7 +245,7 @@ ice_tc_fill_rules(struct ice_hw *hw, u32 flags, l2_key = &headers->l2_key; l2_mask = &headers->l2_mask; - list[i].type = ICE_MAC_OFOS; + list[i].type = ice_proto_type_from_mac(inner); if (flags & ICE_TC_FLWR_FIELD_DST_MAC) { ether_addr_copy(list[i].h_u.eth_hdr.dst_addr, l2_key->dst_mac); @@ -112,7 +274,7 @@ ice_tc_fill_rules(struct ice_hw *hw, u32 flags, ICE_TC_FLWR_FIELD_SRC_IPV4)) { struct ice_tc_l3_hdr *l3_key, *l3_mask; - list[i].type = ICE_IPV4_OFOS; + list[i].type = ice_proto_type_from_ipv4(inner); l3_key = &headers->l3_key; l3_mask = &headers->l3_mask; if (flags & ICE_TC_FLWR_FIELD_DEST_IPV4) { @@ -129,7 +291,7 @@ ice_tc_fill_rules(struct ice_hw *hw, u32 flags, struct ice_ipv6_hdr *ipv6_hdr, *ipv6_mask; struct ice_tc_l3_hdr *l3_key, *l3_mask; - list[i].type = ICE_IPV6_OFOS; + list[i].type = ice_proto_type_from_ipv6(inner); ipv6_hdr = &list[i].h_u.ipv6_hdr; ipv6_mask = &list[i].m_u.ipv6_hdr; l3_key = &headers->l3_key; @@ -155,19 +317,10 @@ ice_tc_fill_rules(struct ice_hw *hw, u32 flags, ICE_TC_FLWR_FIELD_SRC_L4_PORT)) { struct ice_tc_l4_hdr *l4_key, *l4_mask; + list[i].type = ice_proto_type_from_l4_port(inner, headers->l3_key.ip_proto); l4_key = &headers->l4_key; l4_mask = &headers->l4_mask; - if (headers->l3_key.ip_proto == IPPROTO_TCP) { - list[i].type = ICE_TCP_IL; - /* detected L4 proto is TCP */ - if (l4_proto) - *l4_proto = IPPROTO_TCP; - } else if (headers->l3_key.ip_proto == IPPROTO_UDP) { - list[i].type = ICE_UDP_ILOS; - /* detected L4 proto is UDP */ - if (l4_proto) - *l4_proto = IPPROTO_UDP; - } + if (flags & ICE_TC_FLWR_FIELD_DEST_L4_PORT) { list[i].h_u.l4_hdr.dst_port = l4_key->dst_port; list[i].m_u.l4_hdr.dst_port = l4_mask->dst_port; @@ -182,6 +335,30 @@ ice_tc_fill_rules(struct ice_hw *hw, u32 flags, return i; } +/** + * ice_tc_tun_get_type - get the tunnel type + * @tunnel_dev: ptr to tunnel device + * + * This function detects appropriate tunnel_type if specified device is + * tunnel device such as VXLAN/Geneve + */ +static int ice_tc_tun_get_type(struct net_device *tunnel_dev) +{ + if (netif_is_vxlan(tunnel_dev)) + return TNL_VXLAN; + if (netif_is_geneve(tunnel_dev)) + return TNL_GENEVE; + if (netif_is_gretap(tunnel_dev) || + netif_is_ip6gretap(tunnel_dev)) + return TNL_GRETAP; + return TNL_LAST; +} + +bool ice_is_tunnel_supported(struct net_device *dev) +{ + return ice_tc_tun_get_type(dev) != TNL_LAST; +} + static int ice_eswitch_tc_parse_action(struct ice_tc_flower_fltr *fltr, struct flow_action_entry *act) @@ -201,10 +378,8 @@ ice_eswitch_tc_parse_action(struct ice_tc_flower_fltr *fltr, fltr->dest_vsi = repr->src_vsi; fltr->direction = ICE_ESWITCH_FLTR_INGRESS; - } else if (netif_is_ice(act->dev)) { - struct ice_netdev_priv *np = netdev_priv(act->dev); - - fltr->dest_vsi = np->vsi; + } else if (netif_is_ice(act->dev) || + ice_is_tunnel_supported(act->dev)) { fltr->direction = ICE_ESWITCH_FLTR_EGRESS; } else { NL_SET_ERR_MSG_MOD(fltr->extack, "Unsupported netdevice in switchdev mode"); @@ -235,11 +410,7 @@ ice_eswitch_add_tc_fltr(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr) int ret = 0; int i; - if (!flags || (flags & (ICE_TC_FLWR_FIELD_ENC_DEST_IPV4 | - ICE_TC_FLWR_FIELD_ENC_SRC_IPV4 | - ICE_TC_FLWR_FIELD_ENC_DEST_IPV6 | - ICE_TC_FLWR_FIELD_ENC_SRC_IPV6 | - ICE_TC_FLWR_FIELD_ENC_SRC_L4_PORT))) { + if (!flags || (flags & ICE_TC_FLWR_FIELD_ENC_SRC_L4_PORT)) { NL_SET_ERR_MSG_MOD(fltr->extack, "Unsupported encap field(s)"); return -EOPNOTSUPP; } @@ -255,6 +426,10 @@ ice_eswitch_add_tc_fltr(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr) goto exit; } + /* egress traffic is always redirect to uplink */ + if (fltr->direction == ICE_ESWITCH_FLTR_EGRESS) + fltr->dest_vsi = vsi->back->switchdev.uplink_vsi; + rule_info.sw_act.fltr_act = fltr->action.fltr_act; if (fltr->action.fltr_act != ICE_DROP_PACKET) rule_info.sw_act.vsi_handle = fltr->dest_vsi->idx; @@ -438,19 +613,26 @@ exit: * @match: Pointer to flow match structure * @fltr: Pointer to filter structure * @headers: inner or outer header fields + * @is_encap: set true for tunnel IPv4 address */ static int ice_tc_set_ipv4(struct flow_match_ipv4_addrs *match, struct ice_tc_flower_fltr *fltr, - struct ice_tc_flower_lyr_2_4_hdrs *headers) + struct ice_tc_flower_lyr_2_4_hdrs *headers, bool is_encap) { if (match->key->dst) { - fltr->flags |= ICE_TC_FLWR_FIELD_DEST_IPV4; + if (is_encap) + fltr->flags |= ICE_TC_FLWR_FIELD_ENC_DEST_IPV4; + else + fltr->flags |= ICE_TC_FLWR_FIELD_DEST_IPV4; headers->l3_key.dst_ipv4 = match->key->dst; headers->l3_mask.dst_ipv4 = match->mask->dst; } if (match->key->src) { - fltr->flags |= ICE_TC_FLWR_FIELD_SRC_IPV4; + if (is_encap) + fltr->flags |= ICE_TC_FLWR_FIELD_ENC_SRC_IPV4; + else + fltr->flags |= ICE_TC_FLWR_FIELD_SRC_IPV4; headers->l3_key.src_ipv4 = match->key->src; headers->l3_mask.src_ipv4 = match->mask->src; } @@ -462,11 +644,12 @@ ice_tc_set_ipv4(struct flow_match_ipv4_addrs *match, * @match: Pointer to flow match structure * @fltr: Pointer to filter structure * @headers: inner or outer header fields + * @is_encap: set true for tunnel IPv6 address */ static int ice_tc_set_ipv6(struct flow_match_ipv6_addrs *match, struct ice_tc_flower_fltr *fltr, - struct ice_tc_flower_lyr_2_4_hdrs *headers) + struct ice_tc_flower_lyr_2_4_hdrs *headers, bool is_encap) { struct ice_tc_l3_hdr *l3_key, *l3_mask; @@ -484,21 +667,31 @@ ice_tc_set_ipv6(struct flow_match_ipv6_addrs *match, NL_SET_ERR_MSG_MOD(fltr->extack, "Bad src/dest IPv6, addr is any"); return -EINVAL; } - if (!ipv6_addr_any(&match->mask->dst)) - fltr->flags |= ICE_TC_FLWR_FIELD_DEST_IPV6; - if (!ipv6_addr_any(&match->mask->src)) - fltr->flags |= ICE_TC_FLWR_FIELD_SRC_IPV6; + if (!ipv6_addr_any(&match->mask->dst)) { + if (is_encap) + fltr->flags |= ICE_TC_FLWR_FIELD_ENC_DEST_IPV6; + else + fltr->flags |= ICE_TC_FLWR_FIELD_DEST_IPV6; + } + if (!ipv6_addr_any(&match->mask->src)) { + if (is_encap) + fltr->flags |= ICE_TC_FLWR_FIELD_ENC_SRC_IPV6; + else + fltr->flags |= ICE_TC_FLWR_FIELD_SRC_IPV6; + } l3_key = &headers->l3_key; l3_mask = &headers->l3_mask; - if (fltr->flags & ICE_TC_FLWR_FIELD_SRC_IPV6) { + if (fltr->flags & (ICE_TC_FLWR_FIELD_ENC_SRC_IPV6 | + ICE_TC_FLWR_FIELD_SRC_IPV6)) { memcpy(&l3_key->src_ipv6_addr, &match->key->src.s6_addr, sizeof(match->key->src.s6_addr)); memcpy(&l3_mask->src_ipv6_addr, &match->mask->src.s6_addr, sizeof(match->mask->src.s6_addr)); } - if (fltr->flags & ICE_TC_FLWR_FIELD_DEST_IPV6) { + if (fltr->flags & (ICE_TC_FLWR_FIELD_ENC_DEST_IPV6 | + ICE_TC_FLWR_FIELD_DEST_IPV6)) { memcpy(&l3_key->dst_ipv6_addr, &match->key->dst.s6_addr, sizeof(match->key->dst.s6_addr)); memcpy(&l3_mask->dst_ipv6_addr, &match->mask->dst.s6_addr, @@ -513,18 +706,27 @@ ice_tc_set_ipv6(struct flow_match_ipv6_addrs *match, * @match: Flow match structure * @fltr: Pointer to filter structure * @headers: inner or outer header fields + * @is_encap: set true for tunnel port */ static int ice_tc_set_port(struct flow_match_ports match, struct ice_tc_flower_fltr *fltr, - struct ice_tc_flower_lyr_2_4_hdrs *headers) + struct ice_tc_flower_lyr_2_4_hdrs *headers, bool is_encap) { if (match.key->dst) { + if (is_encap) + fltr->flags |= ICE_TC_FLWR_FIELD_ENC_DEST_L4_PORT; + else + fltr->flags |= ICE_TC_FLWR_FIELD_DEST_L4_PORT; fltr->flags |= ICE_TC_FLWR_FIELD_DEST_L4_PORT; headers->l4_key.dst_port = match.key->dst; headers->l4_mask.dst_port = match.mask->dst; } if (match.key->src) { + if (is_encap) + fltr->flags |= ICE_TC_FLWR_FIELD_ENC_SRC_L4_PORT; + else + fltr->flags |= ICE_TC_FLWR_FIELD_SRC_L4_PORT; fltr->flags |= ICE_TC_FLWR_FIELD_SRC_L4_PORT; headers->l4_key.src_port = match.key->src; headers->l4_mask.src_port = match.mask->src; @@ -532,6 +734,85 @@ ice_tc_set_port(struct flow_match_ports match, return 0; } +static struct net_device * +ice_get_tunnel_device(struct net_device *dev, struct flow_rule *rule) +{ + struct flow_action_entry *act; + int i; + + if (ice_is_tunnel_supported(dev)) + return dev; + + flow_action_for_each(i, act, &rule->action) { + if (act->id == FLOW_ACTION_REDIRECT && + ice_is_tunnel_supported(act->dev)) + return act->dev; + } + + return NULL; +} + +static int +ice_parse_tunnel_attr(struct net_device *dev, struct flow_rule *rule, + struct ice_tc_flower_fltr *fltr) +{ + struct ice_tc_flower_lyr_2_4_hdrs *headers = &fltr->outer_headers; + struct flow_match_control enc_control; + + fltr->tunnel_type = ice_tc_tun_get_type(dev); + headers->l3_key.ip_proto = IPPROTO_UDP; + + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_KEYID)) { + struct flow_match_enc_keyid enc_keyid; + + flow_rule_match_enc_keyid(rule, &enc_keyid); + + if (!enc_keyid.mask->keyid || + enc_keyid.mask->keyid != cpu_to_be32(ICE_TC_FLOWER_MASK_32)) + return -EINVAL; + + fltr->flags |= ICE_TC_FLWR_FIELD_TENANT_ID; + fltr->tenant_id = enc_keyid.key->keyid; + } + + flow_rule_match_enc_control(rule, &enc_control); + + if (enc_control.key->addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) { + struct flow_match_ipv4_addrs match; + + flow_rule_match_enc_ipv4_addrs(rule, &match); + if (ice_tc_set_ipv4(&match, fltr, headers, true)) + return -EINVAL; + } else if (enc_control.key->addr_type == + FLOW_DISSECTOR_KEY_IPV6_ADDRS) { + struct flow_match_ipv6_addrs match; + + flow_rule_match_enc_ipv6_addrs(rule, &match); + if (ice_tc_set_ipv6(&match, fltr, headers, true)) + return -EINVAL; + } + + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_IP)) { + struct flow_match_ip match; + + flow_rule_match_enc_ip(rule, &match); + headers->l3_key.tos = match.key->tos; + headers->l3_key.ttl = match.key->ttl; + headers->l3_mask.tos = match.mask->tos; + headers->l3_mask.ttl = match.mask->ttl; + } + + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_PORTS)) { + struct flow_match_ports match; + + flow_rule_match_enc_ports(rule, &match); + if (ice_tc_set_port(match, fltr, headers, true)) + return -EINVAL; + } + + return 0; +} + /** * ice_parse_cls_flower - Parse TC flower filters provided by kernel * @vsi: Pointer to the VSI @@ -548,6 +829,7 @@ ice_parse_cls_flower(struct net_device *filter_dev, struct ice_vsi *vsi, struct flow_rule *rule = flow_cls_offload_flow_rule(f); u16 n_proto_mask = 0, n_proto_key = 0, addr_type = 0; struct flow_dissector *dissector; + struct net_device *tunnel_dev; dissector = rule->match.dissector; @@ -559,12 +841,43 @@ ice_parse_cls_flower(struct net_device *filter_dev, struct ice_vsi *vsi, BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) | BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) | BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL) | + BIT(FLOW_DISSECTOR_KEY_ENC_KEYID) | + BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) | + BIT(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) | + BIT(FLOW_DISSECTOR_KEY_ENC_PORTS) | BIT(FLOW_DISSECTOR_KEY_ENC_IP) | BIT(FLOW_DISSECTOR_KEY_PORTS))) { NL_SET_ERR_MSG_MOD(fltr->extack, "Unsupported key used"); return -EOPNOTSUPP; } + tunnel_dev = ice_get_tunnel_device(filter_dev, rule); + if (tunnel_dev) { + int err; + + filter_dev = tunnel_dev; + + err = ice_parse_tunnel_attr(filter_dev, rule, fltr); + if (err) { + NL_SET_ERR_MSG_MOD(fltr->extack, "Failed to parse TC flower tunnel attributes"); + return err; + } + + /* header pointers should point to the inner headers, outer + * header were already set by ice_parse_tunnel_attr + */ + headers = &fltr->inner_headers; + } else if (dissector->used_keys & + (BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) | + BIT(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) | + BIT(FLOW_DISSECTOR_KEY_ENC_KEYID) | + BIT(FLOW_DISSECTOR_KEY_ENC_PORTS))) { + NL_SET_ERR_MSG_MOD(fltr->extack, "Tunnel key used, but device isn't a tunnel"); + return -EOPNOTSUPP; + } else { + fltr->tunnel_type = TNL_LAST; + } + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) { struct flow_match_basic match; @@ -651,7 +964,7 @@ ice_parse_cls_flower(struct net_device *filter_dev, struct ice_vsi *vsi, struct flow_match_ipv4_addrs match; flow_rule_match_ipv4_addrs(rule, &match); - if (ice_tc_set_ipv4(&match, fltr, headers)) + if (ice_tc_set_ipv4(&match, fltr, headers, false)) return -EINVAL; } @@ -659,7 +972,7 @@ ice_parse_cls_flower(struct net_device *filter_dev, struct ice_vsi *vsi, struct flow_match_ipv6_addrs match; flow_rule_match_ipv6_addrs(rule, &match); - if (ice_tc_set_ipv6(&match, fltr, headers)) + if (ice_tc_set_ipv6(&match, fltr, headers, false)) return -EINVAL; } @@ -667,7 +980,7 @@ ice_parse_cls_flower(struct net_device *filter_dev, struct ice_vsi *vsi, struct flow_match_ports match; flow_rule_match_ports(rule, &match); - if (ice_tc_set_port(match, fltr, headers)) + if (ice_tc_set_port(match, fltr, headers, false)) return -EINVAL; switch (headers->l3_key.ip_proto) { case IPPROTO_TCP: diff --git a/drivers/net/ethernet/intel/ice/ice_tc_lib.h b/drivers/net/ethernet/intel/ice/ice_tc_lib.h index ee9b284fcc02..319049477959 100644 --- a/drivers/net/ethernet/intel/ice/ice_tc_lib.h +++ b/drivers/net/ethernet/intel/ice/ice_tc_lib.h @@ -23,6 +23,14 @@ #define ICE_TC_FLWR_FIELD_ENC_DST_MAC BIT(16) #define ICE_TC_FLWR_FIELD_ETH_TYPE_ID BIT(17) +#define ICE_TC_FLOWER_MASK_32 0xFFFFFFFF + +struct ice_indr_block_priv { + struct net_device *netdev; + struct ice_netdev_priv *np; + struct list_head list; +}; + struct ice_tc_flower_action { u32 tc_class; enum ice_sw_fwd_act_type fltr_act; @@ -112,6 +120,7 @@ struct ice_tc_flower_fltr { struct ice_vsi *src_vsi; __be32 tenant_id; u32 flags; + u8 tunnel_type; struct ice_tc_flower_action action; /* cache ptr which is used wherever needed to communicate netlink @@ -148,5 +157,6 @@ ice_add_cls_flower(struct net_device *netdev, struct ice_vsi *vsi, int ice_del_cls_flower(struct ice_vsi *vsi, struct flow_cls_offload *cls_flower); void ice_replay_tc_fltrs(struct ice_pf *pf); +bool ice_is_tunnel_supported(struct net_device *dev); #endif /* _ICE_TC_LIB_H_ */ diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c index a42eaf6f942e..6a74344a3c21 100644 --- a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c @@ -4499,13 +4499,6 @@ void ice_vc_set_dflt_vf_ops(struct ice_vc_vf_ops *ops) *ops = ice_vc_vf_dflt_ops; } -static int -ice_vc_repr_no_action_msg(struct ice_vf __always_unused *vf, - u8 __always_unused *msg) -{ - return 0; -} - /** * ice_vc_repr_add_mac * @vf: pointer to VF @@ -4581,20 +4574,62 @@ ice_vc_repr_del_mac(struct ice_vf __always_unused *vf, u8 __always_unused *msg) VIRTCHNL_STATUS_SUCCESS, NULL, 0); } -static int ice_vc_repr_no_action(struct ice_vf __always_unused *vf) +static int ice_vc_repr_add_vlan(struct ice_vf *vf, u8 __always_unused *msg) { - return 0; + dev_dbg(ice_pf_to_dev(vf->pf), + "Can't add VLAN in switchdev mode for VF %d\n", vf->vf_id); + return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ADD_VLAN, + VIRTCHNL_STATUS_SUCCESS, NULL, 0); +} + +static int ice_vc_repr_del_vlan(struct ice_vf *vf, u8 __always_unused *msg) +{ + dev_dbg(ice_pf_to_dev(vf->pf), + "Can't delete VLAN in switchdev mode for VF %d\n", vf->vf_id); + return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DEL_VLAN, + VIRTCHNL_STATUS_SUCCESS, NULL, 0); +} + +static int ice_vc_repr_ena_vlan_stripping(struct ice_vf *vf) +{ + dev_dbg(ice_pf_to_dev(vf->pf), + "Can't enable VLAN stripping in switchdev mode for VF %d\n", + vf->vf_id); + return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ENABLE_VLAN_STRIPPING, + VIRTCHNL_STATUS_ERR_NOT_SUPPORTED, + NULL, 0); +} + +static int ice_vc_repr_dis_vlan_stripping(struct ice_vf *vf) +{ + dev_dbg(ice_pf_to_dev(vf->pf), + "Can't disable VLAN stripping in switchdev mode for VF %d\n", + vf->vf_id); + return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DISABLE_VLAN_STRIPPING, + VIRTCHNL_STATUS_ERR_NOT_SUPPORTED, + NULL, 0); +} + +static int +ice_vc_repr_cfg_promiscuous_mode(struct ice_vf *vf, u8 __always_unused *msg) +{ + dev_dbg(ice_pf_to_dev(vf->pf), + "Can't config promiscuous mode in switchdev mode for VF %d\n", + vf->vf_id); + return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, + VIRTCHNL_STATUS_ERR_NOT_SUPPORTED, + NULL, 0); } void ice_vc_change_ops_to_repr(struct ice_vc_vf_ops *ops) { ops->add_mac_addr_msg = ice_vc_repr_add_mac; ops->del_mac_addr_msg = ice_vc_repr_del_mac; - ops->add_vlan_msg = ice_vc_repr_no_action_msg; - ops->remove_vlan_msg = ice_vc_repr_no_action_msg; - ops->ena_vlan_stripping = ice_vc_repr_no_action; - ops->dis_vlan_stripping = ice_vc_repr_no_action; - ops->cfg_promiscuous_mode_msg = ice_vc_repr_no_action_msg; + ops->add_vlan_msg = ice_vc_repr_add_vlan; + ops->remove_vlan_msg = ice_vc_repr_del_vlan; + ops->ena_vlan_stripping = ice_vc_repr_ena_vlan_stripping; + ops->dis_vlan_stripping = ice_vc_repr_dis_vlan_stripping; + ops->cfg_promiscuous_mode_msg = ice_vc_repr_cfg_promiscuous_mode; } /** |