diff options
author | Jakub Kicinski | 2019-11-23 16:36:49 -0800 |
---|---|---|
committer | Jakub Kicinski | 2019-11-23 16:36:49 -0800 |
commit | 9520aea75b88f4df4f9cae6b770626a143364ddc (patch) | |
tree | b3d7433d522ff391fe5b812a412fcbdb9612d4ea /drivers | |
parent | d46b7e4fb06037a61415f5b6964fcf632ee1dc34 (diff) | |
parent | 90ac245814abc30d2423474310654d31e3908b2f (diff) |
Merge tag 'mlx5-updates-2019-11-22' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux
Saeed Mahameed says:
====================
mlx5-updates-2019-11-22
1) Misc Cleanups
2) Software steering support for Geneve
====================
Signed-off-by: Jakub Kicinski <jakub.kicinski@netronome.com>
Diffstat (limited to 'drivers')
7 files changed, 186 insertions, 93 deletions
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c index 5316cedd78bf..784b1e26f414 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c @@ -141,7 +141,6 @@ static int mlx5e_route_lookup_ipv6(struct mlx5e_priv *priv, struct dst_entry *dst; struct neighbour *n; -#if IS_ENABLED(CONFIG_INET) && IS_ENABLED(CONFIG_IPV6) int ret; ret = ipv6_stub->ipv6_dst_lookup(dev_net(mirred_dev), NULL, &dst, @@ -157,9 +156,6 @@ static int mlx5e_route_lookup_ipv6(struct mlx5e_priv *priv, dst_release(dst); return ret; } -#else - return -EOPNOTSUPP; -#endif n = dst_neigh_lookup(dst, &fl6->daddr); dst_release(dst); @@ -240,13 +236,13 @@ int mlx5e_tc_tun_create_header_ipv4(struct mlx5e_priv *priv, mlx5_core_warn(priv->mdev, "encap size %d too big, max supported is %d\n", ipv4_encap_size, max_encap_size); err = -EOPNOTSUPP; - goto out; + goto release_neigh; } encap_header = kzalloc(ipv4_encap_size, GFP_KERNEL); if (!encap_header) { err = -ENOMEM; - goto out; + goto release_neigh; } /* used by mlx5e_detach_encap to lookup a neigh hash table @@ -298,7 +294,7 @@ int mlx5e_tc_tun_create_header_ipv4(struct mlx5e_priv *priv, /* the encap entry will be made valid on neigh update event * and not used before that. */ - goto out; + goto release_neigh; } e->pkt_reformat = mlx5_packet_reformat_alloc(priv->mdev, e->reformat_type, @@ -318,9 +314,8 @@ destroy_neigh_entry: mlx5e_rep_encap_entry_detach(netdev_priv(e->out_dev), e); free_encap: kfree(encap_header); -out: - if (n) - neigh_release(n); +release_neigh: + neigh_release(n); return err; } @@ -359,13 +354,13 @@ int mlx5e_tc_tun_create_header_ipv6(struct mlx5e_priv *priv, mlx5_core_warn(priv->mdev, "encap size %d too big, max supported is %d\n", ipv6_encap_size, max_encap_size); err = -EOPNOTSUPP; - goto out; + goto release_neigh; } encap_header = kzalloc(ipv6_encap_size, GFP_KERNEL); if (!encap_header) { err = -ENOMEM; - goto out; + goto release_neigh; } /* used by mlx5e_detach_encap to lookup a neigh hash table @@ -416,7 +411,7 @@ int mlx5e_tc_tun_create_header_ipv6(struct mlx5e_priv *priv, /* the encap entry will be made valid on neigh update event * and not used before that. */ - goto out; + goto release_neigh; } e->pkt_reformat = mlx5_packet_reformat_alloc(priv->mdev, @@ -437,9 +432,8 @@ destroy_neigh_entry: mlx5e_rep_encap_entry_detach(netdev_priv(e->out_dev), e); free_encap: kfree(encap_header); -out: - if (n) - neigh_release(n); +release_neigh: + neigh_release(n); return err; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.h index c362b9225dc2..6f9a78c85ffd 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.h @@ -58,9 +58,16 @@ int mlx5e_tc_tun_create_header_ipv4(struct mlx5e_priv *priv, struct net_device *mirred_dev, struct mlx5e_encap_entry *e); +#if IS_ENABLED(CONFIG_INET) && IS_ENABLED(CONFIG_IPV6) int mlx5e_tc_tun_create_header_ipv6(struct mlx5e_priv *priv, struct net_device *mirred_dev, struct mlx5e_encap_entry *e); +#else +static inline int +mlx5e_tc_tun_create_header_ipv6(struct mlx5e_priv *priv, + struct net_device *mirred_dev, + struct mlx5e_encap_entry *e) { return -EOPNOTSUPP; } +#endif bool mlx5e_tc_tun_device_to_offload(struct mlx5e_priv *priv, struct net_device *netdev); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c index c6548980daf0..c6dbd856df94 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c @@ -102,13 +102,52 @@ static bool dr_mask_is_gre_set(struct mlx5dr_match_misc *misc) DR_MASK_IS_OUTER_MPLS_OVER_GRE_UDP_SET((_misc2), gre) || \ DR_MASK_IS_OUTER_MPLS_OVER_GRE_UDP_SET((_misc2), udp)) -static bool dr_mask_is_flex_parser_tnl_set(struct mlx5dr_match_misc3 *misc3) +static bool +dr_mask_is_misc3_vxlan_gpe_set(struct mlx5dr_match_misc3 *misc3) { return (misc3->outer_vxlan_gpe_vni || misc3->outer_vxlan_gpe_next_protocol || misc3->outer_vxlan_gpe_flags); } +static bool +dr_matcher_supp_flex_parser_vxlan_gpe(struct mlx5dr_cmd_caps *caps) +{ + return caps->flex_protocols & + MLX5_FLEX_PARSER_VXLAN_GPE_ENABLED; +} + +static bool +dr_mask_is_flex_parser_tnl_vxlan_gpe_set(struct mlx5dr_match_param *mask, + struct mlx5dr_domain *dmn) +{ + return dr_mask_is_misc3_vxlan_gpe_set(&mask->misc3) && + dr_matcher_supp_flex_parser_vxlan_gpe(&dmn->info.caps); +} + +static bool dr_mask_is_misc_geneve_set(struct mlx5dr_match_misc *misc) +{ + return misc->geneve_vni || + misc->geneve_oam || + misc->geneve_protocol_type || + misc->geneve_opt_len; +} + +static bool +dr_matcher_supp_flex_parser_geneve(struct mlx5dr_cmd_caps *caps) +{ + return caps->flex_protocols & + MLX5_FLEX_PARSER_GENEVE_ENABLED; +} + +static bool +dr_mask_is_flex_parser_tnl_geneve_set(struct mlx5dr_match_param *mask, + struct mlx5dr_domain *dmn) +{ + return dr_mask_is_misc_geneve_set(&mask->misc) && + dr_matcher_supp_flex_parser_geneve(&dmn->info.caps); +} + static bool dr_mask_is_flex_parser_icmpv6_set(struct mlx5dr_match_misc3 *misc3) { return (misc3->icmpv6_type || misc3->icmpv6_code || @@ -137,13 +176,6 @@ static bool dr_mask_is_gvmi_or_qpn_set(struct mlx5dr_match_misc *misc) return (misc->source_sqn || misc->source_port); } -static bool -dr_matcher_supp_flex_parser_vxlan_gpe(struct mlx5dr_domain *dmn) -{ - return dmn->info.caps.flex_protocols & - MLX5_FLEX_PARSER_VXLAN_GPE_ENABLED; -} - int mlx5dr_matcher_select_builders(struct mlx5dr_matcher *matcher, struct mlx5dr_matcher_rx_tx *nic_matcher, enum mlx5dr_ipv outer_ipv, @@ -262,10 +294,14 @@ static int dr_matcher_set_ste_builders(struct mlx5dr_matcher *matcher, inner, rx); } - if (dr_mask_is_flex_parser_tnl_set(&mask.misc3) && - dr_matcher_supp_flex_parser_vxlan_gpe(dmn)) - mlx5dr_ste_build_flex_parser_tnl(&sb[idx++], &mask, - inner, rx); + if (dr_mask_is_flex_parser_tnl_vxlan_gpe_set(&mask, dmn)) + mlx5dr_ste_build_flex_parser_tnl_vxlan_gpe(&sb[idx++], + &mask, + inner, rx); + else if (dr_mask_is_flex_parser_tnl_geneve_set(&mask, dmn)) + mlx5dr_ste_build_flex_parser_tnl_geneve(&sb[idx++], + &mask, + inner, rx); if (DR_MASK_IS_ETH_L4_MISC_SET(mask.misc3, outer)) mlx5dr_ste_build_eth_l4_misc(&sb[idx++], &mask, inner, rx); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c index dc7e41566b2c..a5a266983dd3 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c @@ -2083,68 +2083,110 @@ void mlx5dr_ste_build_eth_l4_misc(struct mlx5dr_ste_build *sb, sb->ste_build_tag_func = &dr_ste_build_eth_l4_misc_tag; } -static void dr_ste_build_flex_parser_tnl_bit_mask(struct mlx5dr_match_param *value, - bool inner, u8 *bit_mask) +static void +dr_ste_build_flex_parser_tnl_vxlan_gpe_bit_mask(struct mlx5dr_match_param *value, + bool inner, u8 *bit_mask) { struct mlx5dr_match_misc3 *misc_3_mask = &value->misc3; - if (misc_3_mask->outer_vxlan_gpe_flags || - misc_3_mask->outer_vxlan_gpe_next_protocol) { - MLX5_SET(ste_flex_parser_tnl, bit_mask, - flex_parser_tunneling_header_63_32, - (misc_3_mask->outer_vxlan_gpe_flags << 24) | - (misc_3_mask->outer_vxlan_gpe_next_protocol)); - misc_3_mask->outer_vxlan_gpe_flags = 0; - misc_3_mask->outer_vxlan_gpe_next_protocol = 0; - } - - if (misc_3_mask->outer_vxlan_gpe_vni) { - MLX5_SET(ste_flex_parser_tnl, bit_mask, - flex_parser_tunneling_header_31_0, - misc_3_mask->outer_vxlan_gpe_vni << 8); - misc_3_mask->outer_vxlan_gpe_vni = 0; - } + DR_STE_SET_MASK_V(flex_parser_tnl_vxlan_gpe, bit_mask, + outer_vxlan_gpe_flags, + misc_3_mask, outer_vxlan_gpe_flags); + DR_STE_SET_MASK_V(flex_parser_tnl_vxlan_gpe, bit_mask, + outer_vxlan_gpe_next_protocol, + misc_3_mask, outer_vxlan_gpe_next_protocol); + DR_STE_SET_MASK_V(flex_parser_tnl_vxlan_gpe, bit_mask, + outer_vxlan_gpe_vni, + misc_3_mask, outer_vxlan_gpe_vni); } -static int dr_ste_build_flex_parser_tnl_tag(struct mlx5dr_match_param *value, - struct mlx5dr_ste_build *sb, - u8 *hw_ste_p) +static int +dr_ste_build_flex_parser_tnl_vxlan_gpe_tag(struct mlx5dr_match_param *value, + struct mlx5dr_ste_build *sb, + u8 *hw_ste_p) { struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p; struct mlx5dr_match_misc3 *misc3 = &value->misc3; u8 *tag = hw_ste->tag; - if (misc3->outer_vxlan_gpe_flags || - misc3->outer_vxlan_gpe_next_protocol) { - MLX5_SET(ste_flex_parser_tnl, tag, - flex_parser_tunneling_header_63_32, - (misc3->outer_vxlan_gpe_flags << 24) | - (misc3->outer_vxlan_gpe_next_protocol)); - misc3->outer_vxlan_gpe_flags = 0; - misc3->outer_vxlan_gpe_next_protocol = 0; - } - - if (misc3->outer_vxlan_gpe_vni) { - MLX5_SET(ste_flex_parser_tnl, tag, - flex_parser_tunneling_header_31_0, - misc3->outer_vxlan_gpe_vni << 8); - misc3->outer_vxlan_gpe_vni = 0; - } + DR_STE_SET_TAG(flex_parser_tnl_vxlan_gpe, tag, + outer_vxlan_gpe_flags, misc3, + outer_vxlan_gpe_flags); + DR_STE_SET_TAG(flex_parser_tnl_vxlan_gpe, tag, + outer_vxlan_gpe_next_protocol, misc3, + outer_vxlan_gpe_next_protocol); + DR_STE_SET_TAG(flex_parser_tnl_vxlan_gpe, tag, + outer_vxlan_gpe_vni, misc3, + outer_vxlan_gpe_vni); return 0; } -void mlx5dr_ste_build_flex_parser_tnl(struct mlx5dr_ste_build *sb, - struct mlx5dr_match_param *mask, - bool inner, bool rx) +void mlx5dr_ste_build_flex_parser_tnl_vxlan_gpe(struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask, + bool inner, bool rx) +{ + dr_ste_build_flex_parser_tnl_vxlan_gpe_bit_mask(mask, inner, + sb->bit_mask); + + sb->rx = rx; + sb->inner = inner; + sb->lu_type = MLX5DR_STE_LU_TYPE_FLEX_PARSER_TNL_HEADER; + sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask); + sb->ste_build_tag_func = &dr_ste_build_flex_parser_tnl_vxlan_gpe_tag; +} + +static void +dr_ste_build_flex_parser_tnl_geneve_bit_mask(struct mlx5dr_match_param *value, + u8 *bit_mask) { - dr_ste_build_flex_parser_tnl_bit_mask(mask, inner, sb->bit_mask); + struct mlx5dr_match_misc *misc_mask = &value->misc; + DR_STE_SET_MASK_V(flex_parser_tnl_geneve, bit_mask, + geneve_protocol_type, + misc_mask, geneve_protocol_type); + DR_STE_SET_MASK_V(flex_parser_tnl_geneve, bit_mask, + geneve_oam, + misc_mask, geneve_oam); + DR_STE_SET_MASK_V(flex_parser_tnl_geneve, bit_mask, + geneve_opt_len, + misc_mask, geneve_opt_len); + DR_STE_SET_MASK_V(flex_parser_tnl_geneve, bit_mask, + geneve_vni, + misc_mask, geneve_vni); +} + +static int +dr_ste_build_flex_parser_tnl_geneve_tag(struct mlx5dr_match_param *value, + struct mlx5dr_ste_build *sb, + u8 *hw_ste_p) +{ + struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p; + struct mlx5dr_match_misc *misc = &value->misc; + u8 *tag = hw_ste->tag; + + DR_STE_SET_TAG(flex_parser_tnl_geneve, tag, + geneve_protocol_type, misc, geneve_protocol_type); + DR_STE_SET_TAG(flex_parser_tnl_geneve, tag, + geneve_oam, misc, geneve_oam); + DR_STE_SET_TAG(flex_parser_tnl_geneve, tag, + geneve_opt_len, misc, geneve_opt_len); + DR_STE_SET_TAG(flex_parser_tnl_geneve, tag, + geneve_vni, misc, geneve_vni); + + return 0; +} + +void mlx5dr_ste_build_flex_parser_tnl_geneve(struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask, + bool inner, bool rx) +{ + dr_ste_build_flex_parser_tnl_geneve_bit_mask(mask, sb->bit_mask); sb->rx = rx; sb->inner = inner; sb->lu_type = MLX5DR_STE_LU_TYPE_FLEX_PARSER_TNL_HEADER; sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask); - sb->ste_build_tag_func = &dr_ste_build_flex_parser_tnl_tag; + sb->ste_build_tag_func = &dr_ste_build_flex_parser_tnl_geneve_tag; } static void dr_ste_build_register_0_bit_mask(struct mlx5dr_match_param *value, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h index c1f45a60ee6b..290fe61c33d0 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h @@ -325,9 +325,12 @@ int mlx5dr_ste_build_flex_parser_1(struct mlx5dr_ste_build *sb, struct mlx5dr_match_param *mask, struct mlx5dr_cmd_caps *caps, bool inner, bool rx); -void mlx5dr_ste_build_flex_parser_tnl(struct mlx5dr_ste_build *sb, - struct mlx5dr_match_param *mask, - bool inner, bool rx); +void mlx5dr_ste_build_flex_parser_tnl_vxlan_gpe(struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask, + bool inner, bool rx); +void mlx5dr_ste_build_flex_parser_tnl_geneve(struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask, + bool inner, bool rx); void mlx5dr_ste_build_general_purpose(struct mlx5dr_ste_build *sb, struct mlx5dr_match_param *mask, bool inner, bool rx); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5_ifc_dr.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5_ifc_dr.h index 596c927220d9..1722f4668269 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5_ifc_dr.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5_ifc_dr.h @@ -548,6 +548,30 @@ struct mlx5_ifc_ste_flex_parser_tnl_bits { u8 reserved_at_40[0x40]; }; +struct mlx5_ifc_ste_flex_parser_tnl_vxlan_gpe_bits { + u8 outer_vxlan_gpe_flags[0x8]; + u8 reserved_at_8[0x10]; + u8 outer_vxlan_gpe_next_protocol[0x8]; + + u8 outer_vxlan_gpe_vni[0x18]; + u8 reserved_at_38[0x8]; + + u8 reserved_at_40[0x40]; +}; + +struct mlx5_ifc_ste_flex_parser_tnl_geneve_bits { + u8 reserved_at_0[0x2]; + u8 geneve_opt_len[0x6]; + u8 geneve_oam[0x1]; + u8 reserved_at_9[0x7]; + u8 geneve_protocol_type[0x10]; + + u8 geneve_vni[0x18]; + u8 reserved_at_38[0x8]; + + u8 reserved_at_40[0x40]; +}; + struct mlx5_ifc_ste_general_purpose_bits { u8 general_purpose_lookup_field[0x20]; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vport.c b/drivers/net/ethernet/mellanox/mlx5/core/vport.c index 30f7848a6f88..1faac31f74d0 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/vport.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/vport.c @@ -1064,26 +1064,13 @@ int mlx5_core_modify_hca_vport_context(struct mlx5_core_dev *dev, ctx = MLX5_ADDR_OF(modify_hca_vport_context_in, in, hca_vport_context); MLX5_SET(hca_vport_context, ctx, field_select, req->field_select); - MLX5_SET(hca_vport_context, ctx, sm_virt_aware, req->sm_virt_aware); - MLX5_SET(hca_vport_context, ctx, has_smi, req->has_smi); - MLX5_SET(hca_vport_context, ctx, has_raw, req->has_raw); - MLX5_SET(hca_vport_context, ctx, vport_state_policy, req->policy); - MLX5_SET(hca_vport_context, ctx, port_physical_state, req->phys_state); - MLX5_SET(hca_vport_context, ctx, vport_state, req->vport_state); - MLX5_SET64(hca_vport_context, ctx, port_guid, req->port_guid); - MLX5_SET64(hca_vport_context, ctx, node_guid, req->node_guid); - MLX5_SET(hca_vport_context, ctx, cap_mask1, req->cap_mask1); - MLX5_SET(hca_vport_context, ctx, cap_mask1_field_select, req->cap_mask1_perm); - MLX5_SET(hca_vport_context, ctx, cap_mask2, req->cap_mask2); - MLX5_SET(hca_vport_context, ctx, cap_mask2_field_select, req->cap_mask2_perm); - MLX5_SET(hca_vport_context, ctx, lid, req->lid); - MLX5_SET(hca_vport_context, ctx, init_type_reply, req->init_type_reply); - MLX5_SET(hca_vport_context, ctx, lmc, req->lmc); - MLX5_SET(hca_vport_context, ctx, subnet_timeout, req->subnet_timeout); - MLX5_SET(hca_vport_context, ctx, sm_lid, req->sm_lid); - MLX5_SET(hca_vport_context, ctx, sm_sl, req->sm_sl); - MLX5_SET(hca_vport_context, ctx, qkey_violation_counter, req->qkey_violation_counter); - MLX5_SET(hca_vport_context, ctx, pkey_violation_counter, req->pkey_violation_counter); + if (req->field_select & MLX5_HCA_VPORT_SEL_STATE_POLICY) + MLX5_SET(hca_vport_context, ctx, vport_state_policy, + req->policy); + if (req->field_select & MLX5_HCA_VPORT_SEL_PORT_GUID) + MLX5_SET64(hca_vport_context, ctx, port_guid, req->port_guid); + if (req->field_select & MLX5_HCA_VPORT_SEL_NODE_GUID) + MLX5_SET64(hca_vport_context, ctx, node_guid, req->node_guid); err = mlx5_cmd_exec(dev, in, in_sz, out, sizeof(out)); ex: kfree(in); |