aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDavid S. Miller2021-08-27 09:53:31 +0100
committerDavid S. Miller2021-08-27 09:53:31 +0100
commita550409378d2aea4d2104a551c192e7a65ddd6c0 (patch)
tree88c2077986e6141786a15c003c876afcafb50865
parent5ab54e5792a44dc7431cef0d57755d3c0aa9740b (diff)
parenta2ebfbb7b181774570224faee570f717ae11b6d8 (diff)
Merge tag 'mlx5-updates-2021-08-26' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux
Saeed Mahameed says: ==================== This patch series contains various fixes, additions and improvements to mlx5 software steering. Patch 1: adds support for REMOVE_HEADER packet reformat - a new reformat type that is supported starting with ConnectX-6 DX, and allows removing an arbitrary size packet segment at a selected position. Patches 2 and 3: add support for VLAN pop on TX and VLAN push on RX flows. Patch 4: enables retransmission mechanism for the SW Steering RC QP. Patch 5: does some improvements to error flow in building STE array and adds a more informative printout of an invalid actions sequence. Patch 6: improves error flow on SW Steering QP error. Patch 7: reduces the log level of a message that is printed when a table is connected to a lower/same level destination table, as this case proves to be not as rare as it was in the past. Patch 8: adds missing support for matching on IPv6 flow label for devices older than ConnectX-6 DX. Patch 9: replaces uintN_t types with kernel-style types. Patch 10: allows for using the right API for updating flow tables - if it is a FW-owned table, then FW API will be used. Patch 11: adds support for 'ignore_flow_level' on multi-destination flow tables that are created by SW Steering. Patch 12: optimizes FDB RX steering rule by skipping matching on source port, as the source port for all incoming packets equals to wire. Patch 13: is a small code refactoring - it merges several DR_STE_SIZE enums into a single enum. Patch 14: does some additional refactoring and removes HW-specific STE type from NIC domain. Patch 15: removes rehash ctrl struct from dr_htbl struct and saves some memory. Patch 16: does a more significant improvement in terms of memory consumption and was able to save about 1.6 Gb for 8M rules. Patch 17: adds support for update FTE, which is needed for cases where there are multiple rules with the same match. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c271
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_fw.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c16
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c150
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c17
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c36
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v0.c57
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c101
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h68
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c51
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5_ifc_dr.h6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h4
15 files changed, 556 insertions, 236 deletions
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c
index 6475ba35cf6b..a5b9f65db23c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c
@@ -18,12 +18,39 @@ enum dr_action_valid_state {
DR_ACTION_STATE_ENCAP,
DR_ACTION_STATE_DECAP,
DR_ACTION_STATE_MODIFY_HDR,
- DR_ACTION_STATE_MODIFY_VLAN,
+ DR_ACTION_STATE_POP_VLAN,
+ DR_ACTION_STATE_PUSH_VLAN,
DR_ACTION_STATE_NON_TERM,
DR_ACTION_STATE_TERM,
DR_ACTION_STATE_MAX,
};
+static const char * const action_type_to_str[] = {
+ [DR_ACTION_TYP_TNL_L2_TO_L2] = "DR_ACTION_TYP_TNL_L2_TO_L2",
+ [DR_ACTION_TYP_L2_TO_TNL_L2] = "DR_ACTION_TYP_L2_TO_TNL_L2",
+ [DR_ACTION_TYP_TNL_L3_TO_L2] = "DR_ACTION_TYP_TNL_L3_TO_L2",
+ [DR_ACTION_TYP_L2_TO_TNL_L3] = "DR_ACTION_TYP_L2_TO_TNL_L3",
+ [DR_ACTION_TYP_DROP] = "DR_ACTION_TYP_DROP",
+ [DR_ACTION_TYP_QP] = "DR_ACTION_TYP_QP",
+ [DR_ACTION_TYP_FT] = "DR_ACTION_TYP_FT",
+ [DR_ACTION_TYP_CTR] = "DR_ACTION_TYP_CTR",
+ [DR_ACTION_TYP_TAG] = "DR_ACTION_TYP_TAG",
+ [DR_ACTION_TYP_MODIFY_HDR] = "DR_ACTION_TYP_MODIFY_HDR",
+ [DR_ACTION_TYP_VPORT] = "DR_ACTION_TYP_VPORT",
+ [DR_ACTION_TYP_POP_VLAN] = "DR_ACTION_TYP_POP_VLAN",
+ [DR_ACTION_TYP_PUSH_VLAN] = "DR_ACTION_TYP_PUSH_VLAN",
+ [DR_ACTION_TYP_INSERT_HDR] = "DR_ACTION_TYP_INSERT_HDR",
+ [DR_ACTION_TYP_REMOVE_HDR] = "DR_ACTION_TYP_REMOVE_HDR",
+ [DR_ACTION_TYP_MAX] = "DR_ACTION_UNKNOWN",
+};
+
+static const char *dr_action_id_to_str(enum mlx5dr_action_type action_id)
+{
+ if (action_id > DR_ACTION_TYP_MAX)
+ action_id = DR_ACTION_TYP_MAX;
+ return action_type_to_str[action_id];
+}
+
static const enum dr_action_valid_state
next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX] = {
[DR_ACTION_DOMAIN_NIC_INGRESS] = {
@@ -39,8 +66,10 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX]
[DR_ACTION_TYP_L2_TO_TNL_L2] = DR_ACTION_STATE_ENCAP,
[DR_ACTION_TYP_L2_TO_TNL_L3] = DR_ACTION_STATE_ENCAP,
[DR_ACTION_TYP_INSERT_HDR] = DR_ACTION_STATE_ENCAP,
+ [DR_ACTION_TYP_REMOVE_HDR] = DR_ACTION_STATE_DECAP,
[DR_ACTION_TYP_MODIFY_HDR] = DR_ACTION_STATE_MODIFY_HDR,
- [DR_ACTION_TYP_POP_VLAN] = DR_ACTION_STATE_MODIFY_VLAN,
+ [DR_ACTION_TYP_POP_VLAN] = DR_ACTION_STATE_POP_VLAN,
+ [DR_ACTION_TYP_PUSH_VLAN] = DR_ACTION_STATE_PUSH_VLAN,
},
[DR_ACTION_STATE_DECAP] = {
[DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM,
@@ -53,7 +82,8 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX]
[DR_ACTION_TYP_L2_TO_TNL_L3] = DR_ACTION_STATE_ENCAP,
[DR_ACTION_TYP_INSERT_HDR] = DR_ACTION_STATE_ENCAP,
[DR_ACTION_TYP_MODIFY_HDR] = DR_ACTION_STATE_MODIFY_HDR,
- [DR_ACTION_TYP_POP_VLAN] = DR_ACTION_STATE_MODIFY_VLAN,
+ [DR_ACTION_TYP_POP_VLAN] = DR_ACTION_STATE_POP_VLAN,
+ [DR_ACTION_TYP_PUSH_VLAN] = DR_ACTION_STATE_PUSH_VLAN,
},
[DR_ACTION_STATE_ENCAP] = {
[DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM,
@@ -73,20 +103,31 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX]
[DR_ACTION_TYP_L2_TO_TNL_L2] = DR_ACTION_STATE_ENCAP,
[DR_ACTION_TYP_L2_TO_TNL_L3] = DR_ACTION_STATE_ENCAP,
[DR_ACTION_TYP_INSERT_HDR] = DR_ACTION_STATE_ENCAP,
+ [DR_ACTION_TYP_PUSH_VLAN] = DR_ACTION_STATE_PUSH_VLAN,
},
- [DR_ACTION_STATE_MODIFY_VLAN] = {
+ [DR_ACTION_STATE_POP_VLAN] = {
[DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_QP] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_FT] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_SAMPLER] = DR_ACTION_STATE_TERM,
- [DR_ACTION_TYP_TAG] = DR_ACTION_STATE_MODIFY_VLAN,
- [DR_ACTION_TYP_CTR] = DR_ACTION_STATE_MODIFY_VLAN,
- [DR_ACTION_TYP_POP_VLAN] = DR_ACTION_STATE_MODIFY_VLAN,
+ [DR_ACTION_TYP_TAG] = DR_ACTION_STATE_POP_VLAN,
+ [DR_ACTION_TYP_CTR] = DR_ACTION_STATE_POP_VLAN,
+ [DR_ACTION_TYP_POP_VLAN] = DR_ACTION_STATE_POP_VLAN,
[DR_ACTION_TYP_MODIFY_HDR] = DR_ACTION_STATE_MODIFY_HDR,
[DR_ACTION_TYP_L2_TO_TNL_L2] = DR_ACTION_STATE_ENCAP,
[DR_ACTION_TYP_L2_TO_TNL_L3] = DR_ACTION_STATE_ENCAP,
[DR_ACTION_TYP_INSERT_HDR] = DR_ACTION_STATE_ENCAP,
},
+ [DR_ACTION_STATE_PUSH_VLAN] = {
+ [DR_ACTION_TYP_QP] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_FT] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_SAMPLER] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_TAG] = DR_ACTION_STATE_PUSH_VLAN,
+ [DR_ACTION_TYP_CTR] = DR_ACTION_STATE_PUSH_VLAN,
+ [DR_ACTION_TYP_PUSH_VLAN] = DR_ACTION_STATE_PUSH_VLAN,
+ [DR_ACTION_TYP_L2_TO_TNL_L2] = DR_ACTION_STATE_ENCAP,
+ [DR_ACTION_TYP_L2_TO_TNL_L3] = DR_ACTION_STATE_ENCAP,
+ },
[DR_ACTION_STATE_NON_TERM] = {
[DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_QP] = DR_ACTION_STATE_TERM,
@@ -99,8 +140,10 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX]
[DR_ACTION_TYP_L2_TO_TNL_L2] = DR_ACTION_STATE_ENCAP,
[DR_ACTION_TYP_L2_TO_TNL_L3] = DR_ACTION_STATE_ENCAP,
[DR_ACTION_TYP_INSERT_HDR] = DR_ACTION_STATE_ENCAP,
+ [DR_ACTION_TYP_REMOVE_HDR] = DR_ACTION_STATE_DECAP,
[DR_ACTION_TYP_MODIFY_HDR] = DR_ACTION_STATE_MODIFY_HDR,
- [DR_ACTION_TYP_POP_VLAN] = DR_ACTION_STATE_MODIFY_VLAN,
+ [DR_ACTION_TYP_POP_VLAN] = DR_ACTION_STATE_POP_VLAN,
+ [DR_ACTION_TYP_PUSH_VLAN] = DR_ACTION_STATE_PUSH_VLAN,
},
[DR_ACTION_STATE_TERM] = {
[DR_ACTION_TYP_CTR] = DR_ACTION_STATE_TERM,
@@ -115,8 +158,16 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX]
[DR_ACTION_TYP_L2_TO_TNL_L2] = DR_ACTION_STATE_ENCAP,
[DR_ACTION_TYP_L2_TO_TNL_L3] = DR_ACTION_STATE_ENCAP,
[DR_ACTION_TYP_INSERT_HDR] = DR_ACTION_STATE_ENCAP,
+ [DR_ACTION_TYP_REMOVE_HDR] = DR_ACTION_STATE_DECAP,
[DR_ACTION_TYP_MODIFY_HDR] = DR_ACTION_STATE_MODIFY_HDR,
- [DR_ACTION_TYP_PUSH_VLAN] = DR_ACTION_STATE_MODIFY_VLAN,
+ [DR_ACTION_TYP_PUSH_VLAN] = DR_ACTION_STATE_PUSH_VLAN,
+ [DR_ACTION_TYP_POP_VLAN] = DR_ACTION_STATE_POP_VLAN,
+ },
+ [DR_ACTION_STATE_DECAP] = {
+ [DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_FT] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_SAMPLER] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_CTR] = DR_ACTION_STATE_DECAP,
},
[DR_ACTION_STATE_ENCAP] = {
[DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM,
@@ -132,14 +183,25 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX]
[DR_ACTION_TYP_L2_TO_TNL_L2] = DR_ACTION_STATE_ENCAP,
[DR_ACTION_TYP_L2_TO_TNL_L3] = DR_ACTION_STATE_ENCAP,
[DR_ACTION_TYP_INSERT_HDR] = DR_ACTION_STATE_ENCAP,
- [DR_ACTION_TYP_PUSH_VLAN] = DR_ACTION_STATE_MODIFY_VLAN,
+ [DR_ACTION_TYP_PUSH_VLAN] = DR_ACTION_STATE_PUSH_VLAN,
+ },
+ [DR_ACTION_STATE_POP_VLAN] = {
+ [DR_ACTION_TYP_FT] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_SAMPLER] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_CTR] = DR_ACTION_STATE_POP_VLAN,
+ [DR_ACTION_TYP_POP_VLAN] = DR_ACTION_STATE_POP_VLAN,
+ [DR_ACTION_TYP_PUSH_VLAN] = DR_ACTION_STATE_PUSH_VLAN,
+ [DR_ACTION_TYP_MODIFY_HDR] = DR_ACTION_STATE_MODIFY_HDR,
+ [DR_ACTION_TYP_L2_TO_TNL_L2] = DR_ACTION_STATE_ENCAP,
+ [DR_ACTION_TYP_L2_TO_TNL_L3] = DR_ACTION_STATE_ENCAP,
+ [DR_ACTION_TYP_INSERT_HDR] = DR_ACTION_STATE_ENCAP,
},
- [DR_ACTION_STATE_MODIFY_VLAN] = {
+ [DR_ACTION_STATE_PUSH_VLAN] = {
[DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_FT] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_SAMPLER] = DR_ACTION_STATE_TERM,
- [DR_ACTION_TYP_CTR] = DR_ACTION_STATE_MODIFY_VLAN,
- [DR_ACTION_TYP_PUSH_VLAN] = DR_ACTION_STATE_MODIFY_VLAN,
+ [DR_ACTION_TYP_CTR] = DR_ACTION_STATE_PUSH_VLAN,
+ [DR_ACTION_TYP_PUSH_VLAN] = DR_ACTION_STATE_PUSH_VLAN,
[DR_ACTION_TYP_L2_TO_TNL_L2] = DR_ACTION_STATE_ENCAP,
[DR_ACTION_TYP_L2_TO_TNL_L3] = DR_ACTION_STATE_ENCAP,
[DR_ACTION_TYP_INSERT_HDR] = DR_ACTION_STATE_ENCAP,
@@ -152,8 +214,10 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX]
[DR_ACTION_TYP_L2_TO_TNL_L2] = DR_ACTION_STATE_ENCAP,
[DR_ACTION_TYP_L2_TO_TNL_L3] = DR_ACTION_STATE_ENCAP,
[DR_ACTION_TYP_INSERT_HDR] = DR_ACTION_STATE_ENCAP,
+ [DR_ACTION_TYP_REMOVE_HDR] = DR_ACTION_STATE_DECAP,
[DR_ACTION_TYP_MODIFY_HDR] = DR_ACTION_STATE_MODIFY_HDR,
- [DR_ACTION_TYP_PUSH_VLAN] = DR_ACTION_STATE_MODIFY_VLAN,
+ [DR_ACTION_TYP_PUSH_VLAN] = DR_ACTION_STATE_PUSH_VLAN,
+ [DR_ACTION_TYP_POP_VLAN] = DR_ACTION_STATE_POP_VLAN,
},
[DR_ACTION_STATE_TERM] = {
[DR_ACTION_TYP_CTR] = DR_ACTION_STATE_TERM,
@@ -170,8 +234,10 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX]
[DR_ACTION_TYP_L2_TO_TNL_L2] = DR_ACTION_STATE_ENCAP,
[DR_ACTION_TYP_L2_TO_TNL_L3] = DR_ACTION_STATE_ENCAP,
[DR_ACTION_TYP_INSERT_HDR] = DR_ACTION_STATE_ENCAP,
+ [DR_ACTION_TYP_REMOVE_HDR] = DR_ACTION_STATE_DECAP,
[DR_ACTION_TYP_MODIFY_HDR] = DR_ACTION_STATE_MODIFY_HDR,
- [DR_ACTION_TYP_POP_VLAN] = DR_ACTION_STATE_MODIFY_VLAN,
+ [DR_ACTION_TYP_PUSH_VLAN] = DR_ACTION_STATE_PUSH_VLAN,
+ [DR_ACTION_TYP_POP_VLAN] = DR_ACTION_STATE_POP_VLAN,
[DR_ACTION_TYP_VPORT] = DR_ACTION_STATE_TERM,
},
[DR_ACTION_STATE_DECAP] = {
@@ -180,11 +246,12 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX]
[DR_ACTION_TYP_CTR] = DR_ACTION_STATE_DECAP,
[DR_ACTION_TYP_SAMPLER] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_MODIFY_HDR] = DR_ACTION_STATE_MODIFY_HDR,
- [DR_ACTION_TYP_POP_VLAN] = DR_ACTION_STATE_MODIFY_VLAN,
[DR_ACTION_TYP_VPORT] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_L2_TO_TNL_L2] = DR_ACTION_STATE_ENCAP,
[DR_ACTION_TYP_L2_TO_TNL_L3] = DR_ACTION_STATE_ENCAP,
[DR_ACTION_TYP_INSERT_HDR] = DR_ACTION_STATE_ENCAP,
+ [DR_ACTION_TYP_POP_VLAN] = DR_ACTION_STATE_POP_VLAN,
+ [DR_ACTION_TYP_PUSH_VLAN] = DR_ACTION_STATE_PUSH_VLAN,
},
[DR_ACTION_STATE_ENCAP] = {
[DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM,
@@ -203,13 +270,26 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX]
[DR_ACTION_TYP_L2_TO_TNL_L2] = DR_ACTION_STATE_ENCAP,
[DR_ACTION_TYP_L2_TO_TNL_L3] = DR_ACTION_STATE_ENCAP,
[DR_ACTION_TYP_INSERT_HDR] = DR_ACTION_STATE_ENCAP,
+ [DR_ACTION_TYP_PUSH_VLAN] = DR_ACTION_STATE_PUSH_VLAN,
},
- [DR_ACTION_STATE_MODIFY_VLAN] = {
+ [DR_ACTION_STATE_POP_VLAN] = {
[DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_FT] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_SAMPLER] = DR_ACTION_STATE_TERM,
- [DR_ACTION_TYP_POP_VLAN] = DR_ACTION_STATE_MODIFY_VLAN,
- [DR_ACTION_TYP_CTR] = DR_ACTION_STATE_MODIFY_VLAN,
+ [DR_ACTION_TYP_POP_VLAN] = DR_ACTION_STATE_POP_VLAN,
+ [DR_ACTION_TYP_CTR] = DR_ACTION_STATE_POP_VLAN,
+ [DR_ACTION_TYP_VPORT] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_MODIFY_HDR] = DR_ACTION_STATE_MODIFY_HDR,
+ [DR_ACTION_TYP_L2_TO_TNL_L2] = DR_ACTION_STATE_ENCAP,
+ [DR_ACTION_TYP_L2_TO_TNL_L3] = DR_ACTION_STATE_ENCAP,
+ [DR_ACTION_TYP_INSERT_HDR] = DR_ACTION_STATE_ENCAP,
+ },
+ [DR_ACTION_STATE_PUSH_VLAN] = {
+ [DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_FT] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_SAMPLER] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_PUSH_VLAN] = DR_ACTION_STATE_PUSH_VLAN,
+ [DR_ACTION_TYP_CTR] = DR_ACTION_STATE_PUSH_VLAN,
[DR_ACTION_TYP_VPORT] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_MODIFY_HDR] = DR_ACTION_STATE_MODIFY_HDR,
[DR_ACTION_TYP_L2_TO_TNL_L2] = DR_ACTION_STATE_ENCAP,
@@ -226,8 +306,10 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX]
[DR_ACTION_TYP_L2_TO_TNL_L2] = DR_ACTION_STATE_ENCAP,
[DR_ACTION_TYP_L2_TO_TNL_L3] = DR_ACTION_STATE_ENCAP,
[DR_ACTION_TYP_INSERT_HDR] = DR_ACTION_STATE_ENCAP,
+ [DR_ACTION_TYP_REMOVE_HDR] = DR_ACTION_STATE_DECAP,
[DR_ACTION_TYP_MODIFY_HDR] = DR_ACTION_STATE_MODIFY_HDR,
- [DR_ACTION_TYP_POP_VLAN] = DR_ACTION_STATE_MODIFY_VLAN,
+ [DR_ACTION_TYP_POP_VLAN] = DR_ACTION_STATE_POP_VLAN,
+ [DR_ACTION_TYP_PUSH_VLAN] = DR_ACTION_STATE_PUSH_VLAN,
[DR_ACTION_TYP_VPORT] = DR_ACTION_STATE_TERM,
},
[DR_ACTION_STATE_TERM] = {
@@ -244,8 +326,17 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX]
[DR_ACTION_TYP_L2_TO_TNL_L2] = DR_ACTION_STATE_ENCAP,
[DR_ACTION_TYP_L2_TO_TNL_L3] = DR_ACTION_STATE_ENCAP,
[DR_ACTION_TYP_INSERT_HDR] = DR_ACTION_STATE_ENCAP,
- [DR_ACTION_TYP_PUSH_VLAN] = DR_ACTION_STATE_MODIFY_VLAN,
+ [DR_ACTION_TYP_REMOVE_HDR] = DR_ACTION_STATE_DECAP,
+ [DR_ACTION_TYP_PUSH_VLAN] = DR_ACTION_STATE_PUSH_VLAN,
+ [DR_ACTION_TYP_POP_VLAN] = DR_ACTION_STATE_POP_VLAN,
+ [DR_ACTION_TYP_VPORT] = DR_ACTION_STATE_TERM,
+ },
+ [DR_ACTION_STATE_DECAP] = {
+ [DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_FT] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_CTR] = DR_ACTION_STATE_DECAP,
[DR_ACTION_TYP_VPORT] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_SAMPLER] = DR_ACTION_STATE_TERM,
},
[DR_ACTION_STATE_ENCAP] = {
[DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM,
@@ -262,15 +353,27 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX]
[DR_ACTION_TYP_L2_TO_TNL_L2] = DR_ACTION_STATE_ENCAP,
[DR_ACTION_TYP_L2_TO_TNL_L3] = DR_ACTION_STATE_ENCAP,
[DR_ACTION_TYP_INSERT_HDR] = DR_ACTION_STATE_ENCAP,
- [DR_ACTION_TYP_PUSH_VLAN] = DR_ACTION_STATE_MODIFY_VLAN,
+ [DR_ACTION_TYP_PUSH_VLAN] = DR_ACTION_STATE_PUSH_VLAN,
+ [DR_ACTION_TYP_VPORT] = DR_ACTION_STATE_TERM,
+ },
+ [DR_ACTION_STATE_POP_VLAN] = {
+ [DR_ACTION_TYP_FT] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_SAMPLER] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_CTR] = DR_ACTION_STATE_POP_VLAN,
+ [DR_ACTION_TYP_POP_VLAN] = DR_ACTION_STATE_POP_VLAN,
+ [DR_ACTION_TYP_PUSH_VLAN] = DR_ACTION_STATE_PUSH_VLAN,
+ [DR_ACTION_TYP_MODIFY_HDR] = DR_ACTION_STATE_MODIFY_HDR,
+ [DR_ACTION_TYP_L2_TO_TNL_L2] = DR_ACTION_STATE_ENCAP,
+ [DR_ACTION_TYP_L2_TO_TNL_L3] = DR_ACTION_STATE_ENCAP,
+ [DR_ACTION_TYP_INSERT_HDR] = DR_ACTION_STATE_ENCAP,
[DR_ACTION_TYP_VPORT] = DR_ACTION_STATE_TERM,
},
- [DR_ACTION_STATE_MODIFY_VLAN] = {
+ [DR_ACTION_STATE_PUSH_VLAN] = {
[DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_FT] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_SAMPLER] = DR_ACTION_STATE_TERM,
- [DR_ACTION_TYP_PUSH_VLAN] = DR_ACTION_STATE_MODIFY_VLAN,
- [DR_ACTION_TYP_CTR] = DR_ACTION_STATE_MODIFY_VLAN,
+ [DR_ACTION_TYP_PUSH_VLAN] = DR_ACTION_STATE_PUSH_VLAN,
+ [DR_ACTION_TYP_CTR] = DR_ACTION_STATE_PUSH_VLAN,
[DR_ACTION_TYP_L2_TO_TNL_L2] = DR_ACTION_STATE_ENCAP,
[DR_ACTION_TYP_L2_TO_TNL_L3] = DR_ACTION_STATE_ENCAP,
[DR_ACTION_TYP_INSERT_HDR] = DR_ACTION_STATE_ENCAP,
@@ -285,7 +388,9 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX]
[DR_ACTION_TYP_L2_TO_TNL_L2] = DR_ACTION_STATE_ENCAP,
[DR_ACTION_TYP_L2_TO_TNL_L3] = DR_ACTION_STATE_ENCAP,
[DR_ACTION_TYP_INSERT_HDR] = DR_ACTION_STATE_ENCAP,
- [DR_ACTION_TYP_PUSH_VLAN] = DR_ACTION_STATE_MODIFY_VLAN,
+ [DR_ACTION_TYP_REMOVE_HDR] = DR_ACTION_STATE_DECAP,
+ [DR_ACTION_TYP_PUSH_VLAN] = DR_ACTION_STATE_PUSH_VLAN,
+ [DR_ACTION_TYP_POP_VLAN] = DR_ACTION_STATE_POP_VLAN,
[DR_ACTION_TYP_VPORT] = DR_ACTION_STATE_TERM,
},
[DR_ACTION_STATE_TERM] = {
@@ -314,6 +419,9 @@ dr_action_reformat_to_action_type(enum mlx5dr_action_reformat_type reformat_type
case DR_ACTION_REFORMAT_TYP_INSERT_HDR:
*action_type = DR_ACTION_TYP_INSERT_HDR;
break;
+ case DR_ACTION_REFORMAT_TYP_REMOVE_HDR:
+ *action_type = DR_ACTION_TYP_REMOVE_HDR;
+ break;
default:
return -EINVAL;
}
@@ -326,7 +434,7 @@ dr_action_reformat_to_action_type(enum mlx5dr_action_reformat_type reformat_type
* the new size of the STEs array, rule with actions.
*/
static void dr_actions_apply(struct mlx5dr_domain *dmn,
- enum mlx5dr_ste_entry_type ste_type,
+ enum mlx5dr_domain_nic_type nic_type,
u8 *action_type_set,
u8 *last_ste,
struct mlx5dr_ste_actions_attr *attr,
@@ -335,7 +443,7 @@ static void dr_actions_apply(struct mlx5dr_domain *dmn,
struct mlx5dr_ste_ctx *ste_ctx = dmn->ste_ctx;
u32 added_stes = 0;
- if (ste_type == MLX5DR_STE_TYPE_RX)
+ if (nic_type == DR_DOMAIN_NIC_TYPE_RX)
mlx5dr_ste_set_actions_rx(ste_ctx, dmn, action_type_set,
last_ste, attr, &added_stes);
else
@@ -347,7 +455,7 @@ static void dr_actions_apply(struct mlx5dr_domain *dmn,
static enum dr_action_domain
dr_action_get_action_domain(enum mlx5dr_domain_type domain,
- enum mlx5dr_ste_entry_type ste_type)
+ enum mlx5dr_domain_nic_type nic_type)
{
switch (domain) {
case MLX5DR_DOMAIN_TYPE_NIC_RX:
@@ -355,7 +463,7 @@ dr_action_get_action_domain(enum mlx5dr_domain_type domain,
case MLX5DR_DOMAIN_TYPE_NIC_TX:
return DR_ACTION_DOMAIN_NIC_EGRESS;
case MLX5DR_DOMAIN_TYPE_FDB:
- if (ste_type == MLX5DR_STE_TYPE_RX)
+ if (nic_type == DR_DOMAIN_NIC_TYPE_RX)
return DR_ACTION_DOMAIN_FDB_INGRESS;
return DR_ACTION_DOMAIN_FDB_EGRESS;
default:
@@ -421,6 +529,18 @@ static int dr_action_handle_cs_recalc(struct mlx5dr_domain *dmn,
return 0;
}
+static void dr_action_print_sequence(struct mlx5dr_domain *dmn,
+ struct mlx5dr_action *actions[],
+ int last_idx)
+{
+ int i;
+
+ for (i = 0; i <= last_idx; i++)
+ mlx5dr_err(dmn, "< %s (%d) > ",
+ dr_action_id_to_str(actions[i]->action_type),
+ actions[i]->action_type);
+}
+
#define WITH_VLAN_NUM_HW_ACTIONS 6
int mlx5dr_actions_build_ste_arr(struct mlx5dr_matcher *matcher,
@@ -431,7 +551,7 @@ int mlx5dr_actions_build_ste_arr(struct mlx5dr_matcher *matcher,
u32 *new_hw_ste_arr_sz)
{
struct mlx5dr_domain_rx_tx *nic_dmn = nic_matcher->nic_tbl->nic_dmn;
- bool rx_rule = nic_dmn->ste_type == MLX5DR_STE_TYPE_RX;
+ bool rx_rule = nic_dmn->type == DR_DOMAIN_NIC_TYPE_RX;
struct mlx5dr_domain *dmn = matcher->tbl->dmn;
u8 action_type_set[DR_ACTION_TYP_MAX] = {};
struct mlx5dr_ste_actions_attr attr = {};
@@ -445,7 +565,7 @@ int mlx5dr_actions_build_ste_arr(struct mlx5dr_matcher *matcher,
attr.gvmi = dmn->info.caps.gvmi;
attr.hit_gvmi = dmn->info.caps.gvmi;
attr.final_icm_addr = nic_dmn->default_icm_addr;
- action_domain = dr_action_get_action_domain(dmn->type, nic_dmn->ste_type);
+ action_domain = dr_action_get_action_domain(dmn->type, nic_dmn->type);
for (i = 0; i < num_actions; i++) {
struct mlx5dr_action_dest_tbl *dest_tbl;
@@ -467,11 +587,11 @@ int mlx5dr_actions_build_ste_arr(struct mlx5dr_matcher *matcher,
if (dest_tbl->tbl->dmn != dmn) {
mlx5dr_err(dmn,
"Destination table belongs to a different domain\n");
- goto out_invalid_arg;
+ return -EINVAL;
}
if (dest_tbl->tbl->level <= matcher->tbl->level) {
- mlx5_core_warn_once(dmn->mdev,
- "Connecting table to a lower/same level destination table\n");
+ mlx5_core_dbg_once(dmn->mdev,
+ "Connecting table to a lower/same level destination table\n");
mlx5dr_dbg(dmn,
"Connecting table at level %d to a destination table at level %d\n",
matcher->tbl->level,
@@ -509,7 +629,7 @@ int mlx5dr_actions_build_ste_arr(struct mlx5dr_matcher *matcher,
break;
case DR_ACTION_TYP_QP:
mlx5dr_info(dmn, "Domain doesn't support QP\n");
- goto out_invalid_arg;
+ return -EOPNOTSUPP;
case DR_ACTION_TYP_CTR:
attr.ctr_id = action->ctr->ctr_id +
action->ctr->offeset;
@@ -536,7 +656,7 @@ int mlx5dr_actions_build_ste_arr(struct mlx5dr_matcher *matcher,
if (rx_rule &&
!(dmn->ste_ctx->actions_caps & DR_STE_CTX_ACTION_CAP_RX_ENCAP)) {
mlx5dr_info(dmn, "Device doesn't support Encap on RX\n");
- goto out_invalid_arg;
+ return -EOPNOTSUPP;
}
attr.reformat.size = action->reformat->size;
attr.reformat.id = action->reformat->id;
@@ -549,48 +669,66 @@ int mlx5dr_actions_build_ste_arr(struct mlx5dr_matcher *matcher,
attr.hit_gvmi = action->vport->caps->vhca_gvmi;
dest_action = action;
if (rx_rule) {
- /* Loopback on WIRE vport is not supported */
- if (action->vport->caps->num == WIRE_PORT)
- goto out_invalid_arg;
-
+ if (action->vport->caps->num == WIRE_PORT) {
+ mlx5dr_dbg(dmn, "Device doesn't support Loopback on WIRE vport\n");
+ return -EOPNOTSUPP;
+ }
attr.final_icm_addr = action->vport->caps->icm_address_rx;
} else {
attr.final_icm_addr = action->vport->caps->icm_address_tx;
}
break;
case DR_ACTION_TYP_POP_VLAN:
+ if (!rx_rule && !(dmn->ste_ctx->actions_caps &
+ DR_STE_CTX_ACTION_CAP_TX_POP)) {
+ mlx5dr_dbg(dmn, "Device doesn't support POP VLAN action on TX\n");
+ return -EOPNOTSUPP;
+ }
+
max_actions_type = MLX5DR_MAX_VLANS;
attr.vlans.count++;
break;
case DR_ACTION_TYP_PUSH_VLAN:
+ if (rx_rule && !(dmn->ste_ctx->actions_caps &
+ DR_STE_CTX_ACTION_CAP_RX_PUSH)) {
+ mlx5dr_dbg(dmn, "Device doesn't support PUSH VLAN action on RX\n");
+ return -EOPNOTSUPP;
+ }
+
max_actions_type = MLX5DR_MAX_VLANS;
- if (attr.vlans.count == MLX5DR_MAX_VLANS)
+ if (attr.vlans.count == MLX5DR_MAX_VLANS) {
+ mlx5dr_dbg(dmn, "Max VLAN push/pop count exceeded\n");
return -EINVAL;
+ }
attr.vlans.headers[attr.vlans.count++] = action->push_vlan->vlan_hdr;
break;
case DR_ACTION_TYP_INSERT_HDR:
+ case DR_ACTION_TYP_REMOVE_HDR:
attr.reformat.size = action->reformat->size;
attr.reformat.id = action->reformat->id;
attr.reformat.param_0 = action->reformat->param_0;
attr.reformat.param_1 = action->reformat->param_1;
break;
default:
- goto out_invalid_arg;
+ mlx5dr_err(dmn, "Unsupported action type %d\n", action_type);
+ return -EINVAL;
}
/* Check action duplication */
if (++action_type_set[action_type] > max_actions_type) {
mlx5dr_err(dmn, "Action type %d supports only max %d time(s)\n",
action_type, max_actions_type);
- goto out_invalid_arg;
+ return -EINVAL;
}
/* Check action state machine is valid */
if (dr_action_validate_and_get_next_state(action_domain,
action_type,
&state)) {
- mlx5dr_err(dmn, "Invalid action sequence provided\n");
+ mlx5dr_err(dmn, "Invalid action (gvmi: %d, is_rx: %d) sequence provided:",
+ attr.gvmi, rx_rule);
+ dr_action_print_sequence(dmn, actions, i);
return -EOPNOTSUPP;
}
}
@@ -614,16 +752,13 @@ int mlx5dr_actions_build_ste_arr(struct mlx5dr_matcher *matcher,
}
dr_actions_apply(dmn,
- nic_dmn->ste_type,
+ nic_dmn->type,
action_type_set,
last_ste,
&attr,
new_hw_ste_arr_sz);
return 0;
-
-out_invalid_arg:
- return -EINVAL;
}
static unsigned int action_size[DR_ACTION_TYP_MAX] = {
@@ -638,6 +773,7 @@ static unsigned int action_size[DR_ACTION_TYP_MAX] = {
[DR_ACTION_TYP_VPORT] = sizeof(struct mlx5dr_action_vport),
[DR_ACTION_TYP_PUSH_VLAN] = sizeof(struct mlx5dr_action_push_vlan),
[DR_ACTION_TYP_INSERT_HDR] = sizeof(struct mlx5dr_action_reformat),
+ [DR_ACTION_TYP_REMOVE_HDR] = sizeof(struct mlx5dr_action_reformat),
[DR_ACTION_TYP_SAMPLER] = sizeof(struct mlx5dr_action_sampler),
};
@@ -709,7 +845,8 @@ dec_ref:
struct mlx5dr_action *
mlx5dr_action_create_mult_dest_tbl(struct mlx5dr_domain *dmn,
struct mlx5dr_action_dest *dests,
- u32 num_of_dests)
+ u32 num_of_dests,
+ bool ignore_flow_level)
{
struct mlx5dr_cmd_flow_destination_hw_info *hw_dests;
struct mlx5dr_action **ref_actions;
@@ -776,7 +913,8 @@ mlx5dr_action_create_mult_dest_tbl(struct mlx5dr_domain *dmn,
num_of_dests,
reformat_req,
&action->dest_tbl->fw_tbl.id,
- &action->dest_tbl->fw_tbl.group_id);
+ &action->dest_tbl->fw_tbl.group_id,
+ ignore_flow_level);
if (ret)
goto free_action;
@@ -884,11 +1022,23 @@ dr_action_verify_reformat_params(enum mlx5dr_action_type reformat_type,
size_t data_sz,
void *data)
{
- if ((!data && data_sz) || (data && !data_sz) ||
- ((reformat_param_0 || reformat_param_1) &&
- reformat_type != DR_ACTION_TYP_INSERT_HDR) ||
- reformat_type > DR_ACTION_TYP_INSERT_HDR) {
- mlx5dr_dbg(dmn, "Invalid reformat parameter!\n");
+ if (reformat_type == DR_ACTION_TYP_INSERT_HDR) {
+ if ((!data && data_sz) || (data && !data_sz) ||
+ MLX5_CAP_GEN_2(dmn->mdev, max_reformat_insert_size) < data_sz ||
+ MLX5_CAP_GEN_2(dmn->mdev, max_reformat_insert_offset) < reformat_param_1) {
+ mlx5dr_dbg(dmn, "Invalid reformat parameters for INSERT_HDR\n");
+ goto out_err;
+ }
+ } else if (reformat_type == DR_ACTION_TYP_REMOVE_HDR) {
+ if (data ||
+ MLX5_CAP_GEN_2(dmn->mdev, max_reformat_remove_size) < data_sz ||
+ MLX5_CAP_GEN_2(dmn->mdev, max_reformat_remove_offset) < reformat_param_1) {
+ mlx5dr_dbg(dmn, "Invalid reformat parameters for REMOVE_HDR\n");
+ goto out_err;
+ }
+ } else if (reformat_param_0 || reformat_param_1 ||
+ reformat_type > DR_ACTION_TYP_REMOVE_HDR) {
+ mlx5dr_dbg(dmn, "Invalid reformat parameters\n");
goto out_err;
}
@@ -987,7 +1137,6 @@ dr_action_create_reformat_action(struct mlx5dr_domain *dmn,
return 0;
}
case DR_ACTION_TYP_INSERT_HDR:
- {
ret = mlx5dr_cmd_create_reformat_ctx(dmn->mdev,
MLX5_REFORMAT_TYPE_INSERT_HDR,
reformat_param_0,
@@ -1002,7 +1151,12 @@ dr_action_create_reformat_action(struct mlx5dr_domain *dmn,
action->reformat->param_0 = reformat_param_0;
action->reformat->param_1 = reformat_param_1;
return 0;
- }
+ case DR_ACTION_TYP_REMOVE_HDR:
+ action->reformat->id = 0;
+ action->reformat->size = data_sz;
+ action->reformat->param_0 = reformat_param_0;
+ action->reformat->param_1 = reformat_param_1;
+ return 0;
default:
mlx5dr_info(dmn, "Reformat type is not supported %d\n", action->action_type);
return -EINVAL;
@@ -1658,6 +1812,7 @@ int mlx5dr_action_destroy(struct mlx5dr_action *action)
}
break;
case DR_ACTION_TYP_TNL_L2_TO_L2:
+ case DR_ACTION_TYP_REMOVE_HDR:
refcount_dec(&action->reformat->dmn->refcount);
break;
case DR_ACTION_TYP_TNL_L3_TO_L2:
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c
index 54e1f5438bbe..56307283bf9b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c
@@ -655,6 +655,7 @@ int mlx5dr_cmd_set_fte(struct mlx5_core_dev *dev,
MLX5_SET(set_fte_in, in, table_type, ft->type);
MLX5_SET(set_fte_in, in, table_id, ft->id);
MLX5_SET(set_fte_in, in, flow_index, fte->index);
+ MLX5_SET(set_fte_in, in, ignore_flow_level, fte->ignore_flow_level);
if (ft->vport) {
MLX5_SET(set_fte_in, in, vport_number, ft->vport);
MLX5_SET(set_fte_in, in, other_vport, 1);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c
index 7091b1be84ef..0fe159809ba1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c
@@ -245,7 +245,7 @@ static int dr_domain_caps_init(struct mlx5_core_dev *mdev,
return -ENOTSUPP;
dmn->info.supp_sw_steering = true;
- dmn->info.rx.ste_type = MLX5DR_STE_TYPE_RX;
+ dmn->info.rx.type = DR_DOMAIN_NIC_TYPE_RX;
dmn->info.rx.default_icm_addr = dmn->info.caps.nic_rx_drop_address;
dmn->info.rx.drop_icm_addr = dmn->info.caps.nic_rx_drop_address;
break;
@@ -254,7 +254,7 @@ static int dr_domain_caps_init(struct mlx5_core_dev *mdev,
return -ENOTSUPP;
dmn->info.supp_sw_steering = true;
- dmn->info.tx.ste_type = MLX5DR_STE_TYPE_TX;
+ dmn->info.tx.type = DR_DOMAIN_NIC_TYPE_TX;
dmn->info.tx.default_icm_addr = dmn->info.caps.nic_tx_allow_address;
dmn->info.tx.drop_icm_addr = dmn->info.caps.nic_tx_drop_address;
break;
@@ -265,8 +265,8 @@ static int dr_domain_caps_init(struct mlx5_core_dev *mdev,
if (!DR_DOMAIN_SW_STEERING_SUPPORTED(dmn, fdb))
return -ENOTSUPP;
- dmn->info.rx.ste_type = MLX5DR_STE_TYPE_RX;
- dmn->info.tx.ste_type = MLX5DR_STE_TYPE_TX;
+ dmn->info.rx.type = DR_DOMAIN_NIC_TYPE_RX;
+ dmn->info.tx.type = DR_DOMAIN_NIC_TYPE_TX;
vport_cap = mlx5dr_get_vport_cap(&dmn->info.caps, 0);
if (!vport_cap) {
mlx5dr_err(dmn, "Failed to get esw manager vport\n");
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_fw.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_fw.c
index 7ccfd40586ce..0d6f86eb248b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_fw.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_fw.c
@@ -103,7 +103,8 @@ int mlx5dr_fw_create_md_tbl(struct mlx5dr_domain *dmn,
int num_dest,
bool reformat_req,
u32 *tbl_id,
- u32 *group_id)
+ u32 *group_id,
+ bool ignore_flow_level)
{
struct mlx5dr_cmd_create_flow_table_attr ft_attr = {};
struct mlx5dr_cmd_fte_info fte_info = {};
@@ -137,6 +138,7 @@ int mlx5dr_fw_create_md_tbl(struct mlx5dr_domain *dmn,
fte_info.dests_size = num_dest;
fte_info.val = val;
fte_info.dest_arr = dest;
+ fte_info.ignore_flow_level = ignore_flow_level;
ret = mlx5dr_cmd_set_fte(dmn->mdev, 0, 0, &ft_info, *group_id, &fte_info);
if (ret) {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c
index 6f6191d1d5a6..b5409cc021d3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c
@@ -396,13 +396,14 @@ static int dr_matcher_set_ste_builders(struct mlx5dr_matcher *matcher,
struct mlx5dr_domain *dmn = matcher->tbl->dmn;
struct mlx5dr_ste_ctx *ste_ctx = dmn->ste_ctx;
struct mlx5dr_match_param mask = {};
+ bool allow_empty_match = false;
struct mlx5dr_ste_build *sb;
bool inner, rx;
int idx = 0;
int ret, i;
sb = nic_matcher->ste_builder_arr[outer_ipv][inner_ipv];
- rx = nic_dmn->ste_type == MLX5DR_STE_TYPE_RX;
+ rx = nic_dmn->type == DR_DOMAIN_NIC_TYPE_RX;
/* Create a temporary mask to track and clear used mask fields */
if (matcher->match_criteria & DR_MATCHER_CRITERIA_OUTER)
@@ -428,6 +429,16 @@ static int dr_matcher_set_ste_builders(struct mlx5dr_matcher *matcher,
if (ret)
return ret;
+ /* Optimize RX pipe by reducing source port match, since
+ * the FDB RX part is connected only to the wire.
+ */
+ if (dmn->type == MLX5DR_DOMAIN_TYPE_FDB &&
+ rx && mask.misc.source_port) {
+ mask.misc.source_port = 0;
+ mask.misc.source_eswitch_owner_vhca_id = 0;
+ allow_empty_match = true;
+ }
+
/* Outer */
if (matcher->match_criteria & (DR_MATCHER_CRITERIA_OUTER |
DR_MATCHER_CRITERIA_MISC |
@@ -619,7 +630,8 @@ static int dr_matcher_set_ste_builders(struct mlx5dr_matcher *matcher,
}
/* Empty matcher, takes all */
- if (matcher->match_criteria == DR_MATCHER_CRITERIA_EMPTY)
+ if ((!idx && allow_empty_match) ||
+ matcher->match_criteria == DR_MATCHER_CRITERIA_EMPTY)
mlx5dr_ste_build_empty_always_hit(&sb[idx++], rx);
if (idx == 0) {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c
index 43356fad53de..a1c8ac0ecc23 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c
@@ -81,6 +81,7 @@ dr_rule_create_collision_entry(struct mlx5dr_matcher *matcher,
}
ste->ste_chain_location = orig_ste->ste_chain_location;
+ ste->htbl->pointing_ste = orig_ste->htbl->pointing_ste;
/* In collision entry, all members share the same miss_list_head */
ste->htbl->miss_list = mlx5dr_ste_get_miss_list(orig_ste);
@@ -185,6 +186,9 @@ dr_rule_rehash_handle_collision(struct mlx5dr_matcher *matcher,
if (!new_ste)
return NULL;
+ /* Update collision pointing STE */
+ new_ste->htbl->pointing_ste = col_ste->htbl->pointing_ste;
+
/* In collision entry, all members share the same miss_list_head */
new_ste->htbl->miss_list = mlx5dr_ste_get_miss_list(col_ste);
@@ -212,7 +216,7 @@ static void dr_rule_rehash_copy_ste_ctrl(struct mlx5dr_matcher *matcher,
new_ste->next_htbl = cur_ste->next_htbl;
new_ste->ste_chain_location = cur_ste->ste_chain_location;
- if (!mlx5dr_ste_is_last_in_rule(nic_matcher, new_ste->ste_chain_location))
+ if (new_ste->next_htbl)
new_ste->next_htbl->pointing_ste = new_ste;
/* We need to copy the refcount since this ste
@@ -220,10 +224,8 @@ static void dr_rule_rehash_copy_ste_ctrl(struct mlx5dr_matcher *matcher,
*/
new_ste->refcount = cur_ste->refcount;
- /* Link old STEs rule_mem list to the new ste */
- mlx5dr_rule_update_rule_member(cur_ste, new_ste);
- INIT_LIST_HEAD(&new_ste->rule_list);
- list_splice_tail_init(&cur_ste->rule_list, &new_ste->rule_list);
+ /* Link old STEs rule to the new ste */
+ mlx5dr_rule_set_last_member(cur_ste->rule_rx_tx, new_ste, false);
}
static struct mlx5dr_ste *
@@ -404,7 +406,7 @@ dr_rule_rehash_htbl(struct mlx5dr_rule *rule,
info.miss_icm_addr = nic_matcher->e_anchor->chunk->icm_addr;
mlx5dr_ste_set_formatted_ste(dmn->ste_ctx,
dmn->info.caps.gvmi,
- nic_dmn,
+ nic_dmn->type,
new_htbl,
formatted_ste,
&info);
@@ -581,34 +583,66 @@ free_action_members:
return -ENOMEM;
}
-/* While the pointer of ste is no longer valid, like while moving ste to be
- * the first in the miss_list, and to be in the origin table,
- * all rule-members that are attached to this ste should update their ste member
- * to the new pointer
- */
-void mlx5dr_rule_update_rule_member(struct mlx5dr_ste *ste,
- struct mlx5dr_ste *new_ste)
+void mlx5dr_rule_set_last_member(struct mlx5dr_rule_rx_tx *nic_rule,
+ struct mlx5dr_ste *ste,
+ bool force)
+{
+ /* Update rule member is usually done for the last STE or during rule
+ * creation to recover from mid-creation failure (for this peruse the
+ * force flag is used)
+ */
+ if (ste->next_htbl && !force)
+ return;
+
+ /* Update is required since each rule keeps track of its last STE */
+ ste->rule_rx_tx = nic_rule;
+ nic_rule->last_rule_ste = ste;
+}
+
+static struct mlx5dr_ste *dr_rule_get_pointed_ste(struct mlx5dr_ste *curr_ste)
+{
+ struct mlx5dr_ste *first_ste;
+
+ first_ste = list_first_entry(mlx5dr_ste_get_miss_list(curr_ste),
+ struct mlx5dr_ste, miss_list_node);
+
+ return first_ste->htbl->pointing_ste;
+}
+
+int mlx5dr_rule_get_reverse_rule_members(struct mlx5dr_ste **ste_arr,
+ struct mlx5dr_ste *curr_ste,
+ int *num_of_stes)
{
- struct mlx5dr_rule_member *rule_mem;
+ bool first = false;
+
+ *num_of_stes = 0;
+
+ if (!curr_ste)
+ return -ENOENT;
+
+ /* Iterate from last to first */
+ while (!first) {
+ first = curr_ste->ste_chain_location == 1;
+ ste_arr[*num_of_stes] = curr_ste;
+ *num_of_stes += 1;
+ curr_ste = dr_rule_get_pointed_ste(curr_ste);
+ }
- list_for_each_entry(rule_mem, &ste->rule_list, use_ste_list)
- rule_mem->ste = new_ste;
+ return 0;
}
static void dr_rule_clean_rule_members(struct mlx5dr_rule *rule,
struct mlx5dr_rule_rx_tx *nic_rule)
{
- struct mlx5dr_rule_member *rule_mem;
- struct mlx5dr_rule_member *tmp_mem;
+ struct mlx5dr_ste *ste_arr[DR_RULE_MAX_STES + DR_ACTION_MAX_STES];
+ struct mlx5dr_ste *curr_ste = nic_rule->last_rule_ste;
+ int i;
- if (list_empty(&nic_rule->rule_members_list))
+ if (mlx5dr_rule_get_reverse_rule_members(ste_arr, curr_ste, &i))
return;
- list_for_each_entry_safe(rule_mem, tmp_mem, &nic_rule->rule_members_list, list) {
- list_del(&rule_mem->list);
- list_del(&rule_mem->use_ste_list);
- mlx5dr_ste_put(rule_mem->ste, rule->matcher, nic_rule->nic_matcher);
- kvfree(rule_mem);
- }
+
+ while (i--)
+ mlx5dr_ste_put(ste_arr[i], rule->matcher, nic_rule->nic_matcher);
}
static u16 dr_get_bits_per_mask(u16 byte_mask)
@@ -628,43 +662,25 @@ static bool dr_rule_need_enlarge_hash(struct mlx5dr_ste_htbl *htbl,
struct mlx5dr_domain_rx_tx *nic_dmn)
{
struct mlx5dr_ste_htbl_ctrl *ctrl = &htbl->ctrl;
+ int threshold;
if (dmn->info.max_log_sw_icm_sz <= htbl->chunk_size)
return false;
- if (!ctrl->may_grow)
+ if (!mlx5dr_ste_htbl_may_grow(htbl))
return false;
if (dr_get_bits_per_mask(htbl->byte_mask) * BITS_PER_BYTE <= htbl->chunk_size)
return false;
- if (ctrl->num_of_collisions >= ctrl->increase_threshold &&
- (ctrl->num_of_valid_entries - ctrl->num_of_collisions) >= ctrl->increase_threshold)
+ threshold = mlx5dr_ste_htbl_increase_threshold(htbl);
+ if (ctrl->num_of_collisions >= threshold &&
+ (ctrl->num_of_valid_entries - ctrl->num_of_collisions) >= threshold)
return true;
return false;
}
-static int dr_rule_add_member(struct mlx5dr_rule_rx_tx *nic_rule,
- struct mlx5dr_ste *ste)
-{
- struct mlx5dr_rule_member *rule_mem;
-
- rule_mem = kvzalloc(sizeof(*rule_mem), GFP_KERNEL);
- if (!rule_mem)
- return -ENOMEM;
-
- INIT_LIST_HEAD(&rule_mem->list);
- INIT_LIST_HEAD(&rule_mem->use_ste_list);
-
- rule_mem->ste = ste;
- list_add_tail(&rule_mem->list, &nic_rule->rule_members_list);
-
- list_add_tail(&rule_mem->use_ste_list, &ste->rule_list);
-
- return 0;
-}
-
static int dr_rule_handle_action_stes(struct mlx5dr_rule *rule,
struct mlx5dr_rule_rx_tx *nic_rule,
struct list_head *send_ste_list,
@@ -679,15 +695,13 @@ static int dr_rule_handle_action_stes(struct mlx5dr_rule *rule,
struct mlx5dr_domain *dmn = matcher->tbl->dmn;
u8 *curr_hw_ste, *prev_hw_ste;
struct mlx5dr_ste *action_ste;
- int i, k, ret;
+ int i, k;
/* Two cases:
* 1. num_of_builders is equal to new_hw_ste_arr_sz, the action in the ste
* 2. num_of_builders is less then new_hw_ste_arr_sz, new ste was added
* to support the action.
*/
- if (num_of_builders == new_hw_ste_arr_sz)
- return 0;
for (i = num_of_builders, k = 0; i < new_hw_ste_arr_sz; i++, k++) {
curr_hw_ste = hw_ste_arr + i * DR_STE_SIZE;
@@ -700,6 +714,10 @@ static int dr_rule_handle_action_stes(struct mlx5dr_rule *rule,
mlx5dr_ste_get(action_ste);
+ action_ste->htbl->pointing_ste = last_ste;
+ last_ste->next_htbl = action_ste->htbl;
+ last_ste = action_ste;
+
/* While free ste we go over the miss list, so add this ste to the list */
list_add_tail(&action_ste->miss_list_node,
mlx5dr_ste_get_miss_list(action_ste));
@@ -713,21 +731,19 @@ static int dr_rule_handle_action_stes(struct mlx5dr_rule *rule,
mlx5dr_ste_set_hit_addr_by_next_htbl(dmn->ste_ctx,
prev_hw_ste,
action_ste->htbl);
- ret = dr_rule_add_member(nic_rule, action_ste);
- if (ret) {
- mlx5dr_dbg(dmn, "Failed adding rule member\n");
- goto free_ste_info;
- }
+
+ mlx5dr_rule_set_last_member(nic_rule, action_ste, true);
+
mlx5dr_send_fill_and_append_ste_send_info(action_ste, DR_STE_SIZE, 0,
curr_hw_ste,
ste_info_arr[k],
send_ste_list, false);
}
+ last_ste->next_htbl = NULL;
+
return 0;
-free_ste_info:
- kfree(ste_info_arr[k]);
err_exit:
mlx5dr_ste_put(action_ste, matcher, nic_matcher);
return -ENOMEM;
@@ -1015,12 +1031,12 @@ static enum mlx5dr_ipv dr_rule_get_ipv(struct mlx5dr_match_spec *spec)
}
static bool dr_rule_skip(enum mlx5dr_domain_type domain,
- enum mlx5dr_ste_entry_type ste_type,
+ enum mlx5dr_domain_nic_type nic_type,
struct mlx5dr_match_param *mask,
struct mlx5dr_match_param *value,
u32 flow_source)
{
- bool rx = ste_type == MLX5DR_STE_TYPE_RX;
+ bool rx = nic_type == DR_DOMAIN_NIC_TYPE_RX;
if (domain != MLX5DR_DOMAIN_TYPE_FDB)
return false;
@@ -1065,9 +1081,7 @@ dr_rule_create_rule_nic(struct mlx5dr_rule *rule,
nic_matcher = nic_rule->nic_matcher;
nic_dmn = nic_matcher->nic_tbl->nic_dmn;
- INIT_LIST_HEAD(&nic_rule->rule_members_list);
-
- if (dr_rule_skip(dmn->type, nic_dmn->ste_type, &matcher->mask, param,
+ if (dr_rule_skip(dmn->type, nic_dmn->type, &matcher->mask, param,
rule->flow_source))
return 0;
@@ -1121,14 +1135,8 @@ dr_rule_create_rule_nic(struct mlx5dr_rule *rule,
cur_htbl = ste->next_htbl;
- /* Keep all STEs in the rule struct */
- ret = dr_rule_add_member(nic_rule, ste);
- if (ret) {
- mlx5dr_dbg(dmn, "Failed adding rule member index %d\n", i);
- goto free_ste;
- }
-
mlx5dr_ste_get(ste);
+ mlx5dr_rule_set_last_member(nic_rule, ste, true);
}
/* Connect actions */
@@ -1153,8 +1161,6 @@ dr_rule_create_rule_nic(struct mlx5dr_rule *rule,
return 0;
-free_ste:
- mlx5dr_ste_put(ste, matcher, nic_matcher);
free_rule:
dr_rule_clean_rule_members(rule, nic_rule);
/* Clean all ste_info's */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c
index 8a1623a4d8bc..bfb14b4b1906 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c
@@ -325,10 +325,14 @@ static int dr_handle_pending_wc(struct mlx5dr_domain *dmn,
do {
ne = dr_poll_cq(send_ring->cq, 1);
- if (ne < 0)
+ if (unlikely(ne < 0)) {
+ mlx5_core_warn_once(dmn->mdev, "SMFS QPN 0x%x is disabled/limited",
+ send_ring->qp->qpn);
+ send_ring->err_state = true;
return ne;
- else if (ne == 1)
+ } else if (ne == 1) {
send_ring->pending_wqe -= send_ring->signal_th;
+ }
} while (is_drain && send_ring->pending_wqe);
return 0;
@@ -361,6 +365,14 @@ static int dr_postsend_icm_data(struct mlx5dr_domain *dmn,
u32 buff_offset;
int ret;
+ if (unlikely(dmn->mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR ||
+ send_ring->err_state)) {
+ mlx5_core_dbg_once(dmn->mdev,
+ "Skipping post send: QP err state: %d, device state: %d\n",
+ send_ring->err_state, dmn->mdev->state);
+ return 0;
+ }
+
spin_lock(&send_ring->lock);
ret = dr_handle_pending_wc(dmn, send_ring);
@@ -620,6 +632,7 @@ static int dr_cmd_modify_qp_rtr2rts(struct mlx5_core_dev *mdev,
MLX5_SET(qpc, qpc, retry_count, attr->retry_cnt);
MLX5_SET(qpc, qpc, rnr_retry, attr->rnr_retry);
+ MLX5_SET(qpc, qpc, primary_address_path.ack_timeout, 0x8); /* ~1ms */
MLX5_SET(rtr2rts_qp_in, in, opcode, MLX5_CMD_OP_RTR2RTS_QP);
MLX5_SET(rtr2rts_qp_in, in, qpn, dr_qp->qpn);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c
index 9b1529137cba..1cdfe4fccc7a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c
@@ -172,9 +172,6 @@ static void dr_ste_replace(struct mlx5dr_ste *dst, struct mlx5dr_ste *src)
dst->next_htbl->pointing_ste = dst;
dst->refcount = src->refcount;
-
- INIT_LIST_HEAD(&dst->rule_list);
- list_splice_tail_init(&src->rule_list, &dst->rule_list);
}
/* Free ste which is the head and the only one in miss_list */
@@ -233,12 +230,12 @@ dr_ste_replace_head_ste(struct mlx5dr_matcher_rx_tx *nic_matcher,
/* Remove from the miss_list the next_ste before copy */
list_del_init(&next_ste->miss_list_node);
- /* All rule-members that use next_ste should know about that */
- mlx5dr_rule_update_rule_member(next_ste, ste);
-
/* Move data from next into ste */
dr_ste_replace(ste, next_ste);
+ /* Update the rule on STE change */
+ mlx5dr_rule_set_last_member(next_ste->rule_rx_tx, ste, false);
+
/* Copy all 64 hw_ste bytes */
memcpy(hw_ste, ste->hw_ste, DR_STE_SIZE_REDUCED);
sb_idx = ste->ste_chain_location - 1;
@@ -382,14 +379,15 @@ void mlx5dr_ste_prepare_for_postsend(struct mlx5dr_ste_ctx *ste_ctx,
/* Init one ste as a pattern for ste data array */
void mlx5dr_ste_set_formatted_ste(struct mlx5dr_ste_ctx *ste_ctx,
u16 gvmi,
- struct mlx5dr_domain_rx_tx *nic_dmn,
+ enum mlx5dr_domain_nic_type nic_type,
struct mlx5dr_ste_htbl *htbl,
u8 *formatted_ste,
struct mlx5dr_htbl_connect_info *connect_info)
{
+ bool is_rx = nic_type == DR_DOMAIN_NIC_TYPE_RX;
struct mlx5dr_ste ste = {};
- ste_ctx->ste_init(formatted_ste, htbl->lu_type, nic_dmn->ste_type, gvmi);
+ ste_ctx->ste_init(formatted_ste, htbl->lu_type, is_rx, gvmi);
ste.hw_ste = formatted_ste;
if (connect_info->type == CONNECT_HIT)
@@ -408,7 +406,7 @@ int mlx5dr_ste_htbl_init_and_postsend(struct mlx5dr_domain *dmn,
mlx5dr_ste_set_formatted_ste(dmn->ste_ctx,
dmn->info.caps.gvmi,
- nic_dmn,
+ nic_dmn->type,
htbl,
formatted_ste,
connect_info);
@@ -466,21 +464,6 @@ free_table:
return -ENOENT;
}
-static void dr_ste_set_ctrl(struct mlx5dr_ste_htbl *htbl)
-{
- struct mlx5dr_ste_htbl_ctrl *ctrl = &htbl->ctrl;
- int num_of_entries;
-
- htbl->ctrl.may_grow = true;
-
- if (htbl->chunk_size == DR_CHUNK_SIZE_MAX - 1 || !htbl->byte_mask)
- htbl->ctrl.may_grow = false;
-
- /* Threshold is 50%, one is added to table of size 1 */
- num_of_entries = mlx5dr_icm_pool_chunk_size_to_entries(htbl->chunk_size);
- ctrl->increase_threshold = (num_of_entries + 1) / 2;
-}
-
struct mlx5dr_ste_htbl *mlx5dr_ste_htbl_alloc(struct mlx5dr_icm_pool *pool,
enum mlx5dr_icm_chunk_size chunk_size,
u16 lu_type, u16 byte_mask)
@@ -513,11 +496,9 @@ struct mlx5dr_ste_htbl *mlx5dr_ste_htbl_alloc(struct mlx5dr_icm_pool *pool,
ste->refcount = 0;
INIT_LIST_HEAD(&ste->miss_list_node);
INIT_LIST_HEAD(&htbl->miss_list[i]);
- INIT_LIST_HEAD(&ste->rule_list);
}
htbl->chunk_size = chunk_size;
- dr_ste_set_ctrl(htbl);
return htbl;
out_free_htbl:
@@ -649,6 +630,7 @@ int mlx5dr_ste_build_ste_arr(struct mlx5dr_matcher *matcher,
u8 *ste_arr)
{
struct mlx5dr_domain_rx_tx *nic_dmn = nic_matcher->nic_tbl->nic_dmn;
+ bool is_rx = nic_dmn->type == DR_DOMAIN_NIC_TYPE_RX;
struct mlx5dr_domain *dmn = matcher->tbl->dmn;
struct mlx5dr_ste_ctx *ste_ctx = dmn->ste_ctx;
struct mlx5dr_ste_build *sb;
@@ -663,7 +645,7 @@ int mlx5dr_ste_build_ste_arr(struct mlx5dr_matcher *matcher,
for (i = 0; i < nic_matcher->num_of_builders; i++) {
ste_ctx->ste_init(ste_arr,
sb->lu_type,
- nic_dmn->ste_type,
+ is_rx,
dmn->info.caps.gvmi);
mlx5dr_ste_set_bit_mask(ste_arr, sb->bit_mask);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.h
index 12a8bbbf944b..2d52d065dc8b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.h
@@ -146,7 +146,7 @@ struct mlx5dr_ste_ctx {
/* Getters and Setters */
void (*ste_init)(u8 *hw_ste_p, u16 lu_type,
- u8 entry_type, u16 gvmi);
+ bool is_rx, u16 gvmi);
void (*set_next_lu_type)(u8 *hw_ste_p, u16 lu_type);
u16 (*get_next_lu_type)(u8 *hw_ste_p);
void (*set_miss_addr)(u8 *hw_ste_p, u64 miss_addr);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v0.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v0.c
index e4dd4eed5aee..9c704bce3c12 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v0.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v0.c
@@ -8,6 +8,12 @@
#define SVLAN_ETHERTYPE 0x88a8
#define DR_STE_ENABLE_FLOW_TAG BIT(31)
+enum dr_ste_v0_entry_type {
+ DR_STE_TYPE_TX = 1,
+ DR_STE_TYPE_RX = 2,
+ DR_STE_TYPE_MODIFY_PKT = 6,
+};
+
enum dr_ste_v0_action_tunl {
DR_STE_TUNL_ACTION_NONE = 0,
DR_STE_TUNL_ACTION_ENABLE = 1,
@@ -292,8 +298,8 @@ static void dr_ste_v0_set_hit_addr(u8 *hw_ste_p, u64 icm_addr, u32 ht_size)
MLX5_SET(ste_general, hw_ste_p, next_table_base_31_5_size, index);
}
-static void dr_ste_v0_init(u8 *hw_ste_p, u16 lu_type,
- u8 entry_type, u16 gvmi)
+static void dr_ste_v0_init_full(u8 *hw_ste_p, u16 lu_type,
+ enum dr_ste_v0_entry_type entry_type, u16 gvmi)
{
dr_ste_v0_set_entry_type(hw_ste_p, entry_type);
dr_ste_v0_set_lu_type(hw_ste_p, lu_type);
@@ -307,6 +313,15 @@ static void dr_ste_v0_init(u8 *hw_ste_p, u16 lu_type,
MLX5_SET(ste_rx_steering_mult, hw_ste_p, miss_address_63_48, gvmi);
}
+static void dr_ste_v0_init(u8 *hw_ste_p, u16 lu_type,
+ bool is_rx, u16 gvmi)
+{
+ enum dr_ste_v0_entry_type entry_type;
+
+ entry_type = is_rx ? DR_STE_TYPE_RX : DR_STE_TYPE_TX;
+ dr_ste_v0_init_full(hw_ste_p, lu_type, entry_type, gvmi);
+}
+
static void dr_ste_v0_rx_set_flow_tag(u8 *hw_ste_p, u32 flow_tag)
{
MLX5_SET(ste_rx_steering_mult, hw_ste_p, qp_list_pointer,
@@ -380,13 +395,13 @@ static void dr_ste_v0_set_rewrite_actions(u8 *hw_ste_p, u16 num_of_actions,
static void dr_ste_v0_arr_init_next(u8 **last_ste,
u32 *added_stes,
- enum mlx5dr_ste_entry_type entry_type,
+ enum dr_ste_v0_entry_type entry_type,
u16 gvmi)
{
(*added_stes)++;
*last_ste += DR_STE_SIZE;
- dr_ste_v0_init(*last_ste, MLX5DR_STE_LU_TYPE_DONT_CARE,
- entry_type, gvmi);
+ dr_ste_v0_init_full(*last_ste, MLX5DR_STE_LU_TYPE_DONT_CARE,
+ entry_type, gvmi);
}
static void
@@ -404,7 +419,7 @@ dr_ste_v0_set_actions_tx(struct mlx5dr_domain *dmn,
* modify headers for outer headers only
*/
if (action_type_set[DR_ACTION_TYP_MODIFY_HDR]) {
- dr_ste_v0_set_entry_type(last_ste, MLX5DR_STE_TYPE_MODIFY_PKT);
+ dr_ste_v0_set_entry_type(last_ste, DR_STE_TYPE_MODIFY_PKT);
dr_ste_v0_set_rewrite_actions(last_ste,
attr->modify_actions,
attr->modify_index);
@@ -417,7 +432,7 @@ dr_ste_v0_set_actions_tx(struct mlx5dr_domain *dmn,
if (i || action_type_set[DR_ACTION_TYP_MODIFY_HDR])
dr_ste_v0_arr_init_next(&last_ste,
added_stes,
- MLX5DR_STE_TYPE_TX,
+ DR_STE_TYPE_TX,
attr->gvmi);
dr_ste_v0_set_tx_push_vlan(last_ste,
@@ -435,7 +450,7 @@ dr_ste_v0_set_actions_tx(struct mlx5dr_domain *dmn,
action_type_set[DR_ACTION_TYP_PUSH_VLAN])
dr_ste_v0_arr_init_next(&last_ste,
added_stes,
- MLX5DR_STE_TYPE_TX,
+ DR_STE_TYPE_TX,
attr->gvmi);
dr_ste_v0_set_tx_encap(last_ste,
@@ -469,7 +484,7 @@ dr_ste_v0_set_actions_rx(struct mlx5dr_domain *dmn,
dr_ste_v0_set_counter_id(last_ste, attr->ctr_id);
if (action_type_set[DR_ACTION_TYP_TNL_L3_TO_L2]) {
- dr_ste_v0_set_entry_type(last_ste, MLX5DR_STE_TYPE_MODIFY_PKT);
+ dr_ste_v0_set_entry_type(last_ste, DR_STE_TYPE_MODIFY_PKT);
dr_ste_v0_set_rx_decap_l3(last_ste, attr->decap_with_vlan);
dr_ste_v0_set_rewrite_actions(last_ste,
attr->decap_actions,
@@ -488,7 +503,7 @@ dr_ste_v0_set_actions_rx(struct mlx5dr_domain *dmn,
action_type_set[DR_ACTION_TYP_TNL_L3_TO_L2])
dr_ste_v0_arr_init_next(&last_ste,
added_stes,
- MLX5DR_STE_TYPE_RX,
+ DR_STE_TYPE_RX,
attr->gvmi);
dr_ste_v0_set_rx_pop_vlan(last_ste);
@@ -496,13 +511,13 @@ dr_ste_v0_set_actions_rx(struct mlx5dr_domain *dmn,
}
if (action_type_set[DR_ACTION_TYP_MODIFY_HDR]) {
- if (dr_ste_v0_get_entry_type(last_ste) == MLX5DR_STE_TYPE_MODIFY_PKT)
+ if (dr_ste_v0_get_entry_type(last_ste) == DR_STE_TYPE_MODIFY_PKT)
dr_ste_v0_arr_init_next(&last_ste,
added_stes,
- MLX5DR_STE_TYPE_MODIFY_PKT,
+ DR_STE_TYPE_MODIFY_PKT,
attr->gvmi);
else
- dr_ste_v0_set_entry_type(last_ste, MLX5DR_STE_TYPE_MODIFY_PKT);
+ dr_ste_v0_set_entry_type(last_ste, DR_STE_TYPE_MODIFY_PKT);
dr_ste_v0_set_rewrite_actions(last_ste,
attr->modify_actions,
@@ -510,10 +525,10 @@ dr_ste_v0_set_actions_rx(struct mlx5dr_domain *dmn,
}
if (action_type_set[DR_ACTION_TYP_TAG]) {
- if (dr_ste_v0_get_entry_type(last_ste) == MLX5DR_STE_TYPE_MODIFY_PKT)
+ if (dr_ste_v0_get_entry_type(last_ste) == DR_STE_TYPE_MODIFY_PKT)
dr_ste_v0_arr_init_next(&last_ste,
added_stes,
- MLX5DR_STE_TYPE_RX,
+ DR_STE_TYPE_RX,
attr->gvmi);
dr_ste_v0_rx_set_flow_tag(last_ste, attr->flow_tag);
@@ -1157,6 +1172,7 @@ dr_ste_v0_build_eth_ipv6_l3_l4_tag(struct mlx5dr_match_param *value,
u8 *tag)
{
struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
+ struct mlx5dr_match_misc *misc = &value->misc;
DR_STE_SET_TAG(eth_l4, tag, dst_port, spec, tcp_dport);
DR_STE_SET_TAG(eth_l4, tag, src_port, spec, tcp_sport);
@@ -1168,6 +1184,11 @@ dr_ste_v0_build_eth_ipv6_l3_l4_tag(struct mlx5dr_match_param *value,
DR_STE_SET_TAG(eth_l4, tag, ecn, spec, ip_ecn);
DR_STE_SET_TAG(eth_l4, tag, ipv6_hop_limit, spec, ttl_hoplimit);
+ if (sb->inner)
+ DR_STE_SET_TAG(eth_l4, tag, flow_label, misc, inner_ipv6_flow_label);
+ else
+ DR_STE_SET_TAG(eth_l4, tag, flow_label, misc, outer_ipv6_flow_label);
+
if (spec->tcp_flags) {
DR_STE_SET_TCP_FLAGS(eth_l4, tag, spec);
spec->tcp_flags = 0;
@@ -1772,7 +1793,7 @@ dr_ste_v0_build_flex_parser_tnl_geneve_tlv_opt_init(struct mlx5dr_ste_build *sb,
static int dr_ste_v0_build_flex_parser_tnl_gtpu_tag(struct mlx5dr_match_param *value,
struct mlx5dr_ste_build *sb,
- uint8_t *tag)
+ u8 *tag)
{
struct mlx5dr_match_misc3 *misc3 = &value->misc3;
@@ -1802,7 +1823,7 @@ static void dr_ste_v0_build_flex_parser_tnl_gtpu_init(struct mlx5dr_ste_build *s
static int
dr_ste_v0_build_tnl_gtpu_flex_parser_0_tag(struct mlx5dr_match_param *value,
struct mlx5dr_ste_build *sb,
- uint8_t *tag)
+ u8 *tag)
{
if (dr_is_flex_parser_0_id(sb->caps->flex_parser_id_gtpu_dw_0))
DR_STE_SET_FLEX_PARSER_FIELD(tag, gtpu_dw_0, sb->caps, &value->misc3);
@@ -1829,7 +1850,7 @@ dr_ste_v0_build_tnl_gtpu_flex_parser_0_init(struct mlx5dr_ste_build *sb,
static int
dr_ste_v0_build_tnl_gtpu_flex_parser_1_tag(struct mlx5dr_match_param *value,
struct mlx5dr_ste_build *sb,
- uint8_t *tag)
+ u8 *tag)
{
if (dr_is_flex_parser_1_id(sb->caps->flex_parser_id_gtpu_dw_0))
DR_STE_SET_FLEX_PARSER_FIELD(tag, gtpu_dw_0, sb->caps, &value->misc3);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c
index 4aaca8eb7597..b2481c99da79 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c
@@ -322,7 +322,7 @@ static void dr_ste_v1_set_hit_addr(u8 *hw_ste_p, u64 icm_addr, u32 ht_size)
}
static void dr_ste_v1_init(u8 *hw_ste_p, u16 lu_type,
- u8 entry_type, u16 gvmi)
+ bool is_rx, u16 gvmi)
{
dr_ste_v1_set_lu_type(hw_ste_p, lu_type);
dr_ste_v1_set_next_lu_type(hw_ste_p, MLX5DR_STE_LU_TYPE_DONT_CARE);
@@ -402,8 +402,23 @@ static void dr_ste_v1_set_insert_hdr(u8 *hw_ste_p, u8 *d_action,
dr_ste_v1_set_reparse(hw_ste_p);
}
-static void dr_ste_v1_set_tx_push_vlan(u8 *hw_ste_p, u8 *d_action,
- u32 vlan_hdr)
+static void dr_ste_v1_set_remove_hdr(u8 *hw_ste_p, u8 *s_action,
+ u8 anchor, u8 offset,
+ int size)
+{
+ MLX5_SET(ste_single_action_remove_header_size_v1, s_action,
+ action_id, DR_STE_V1_ACTION_ID_REMOVE_BY_SIZE);
+ MLX5_SET(ste_single_action_remove_header_size_v1, s_action, start_anchor, anchor);
+
+ /* The hardware expects here size and offset in words (2 byte) */
+ MLX5_SET(ste_single_action_remove_header_size_v1, s_action, remove_size, size / 2);
+ MLX5_SET(ste_single_action_remove_header_size_v1, s_action, start_offset, offset / 2);
+
+ dr_ste_v1_set_reparse(hw_ste_p);
+}
+
+static void dr_ste_v1_set_push_vlan(u8 *hw_ste_p, u8 *d_action,
+ u32 vlan_hdr)
{
MLX5_SET(ste_double_action_insert_with_inline_v1, d_action,
action_id, DR_STE_V1_ACTION_ID_INSERT_INLINE);
@@ -416,7 +431,7 @@ static void dr_ste_v1_set_tx_push_vlan(u8 *hw_ste_p, u8 *d_action,
dr_ste_v1_set_reparse(hw_ste_p);
}
-static void dr_ste_v1_set_rx_pop_vlan(u8 *hw_ste_p, u8 *s_action, u8 vlans_num)
+static void dr_ste_v1_set_pop_vlan(u8 *hw_ste_p, u8 *s_action, u8 vlans_num)
{
MLX5_SET(ste_single_action_remove_header_size_v1, s_action,
action_id, DR_STE_V1_ACTION_ID_REMOVE_BY_SIZE);
@@ -503,13 +518,28 @@ static void dr_ste_v1_set_actions_tx(struct mlx5dr_domain *dmn,
{
u8 *action = MLX5_ADDR_OF(ste_match_bwc_v1, last_ste, action);
u8 action_sz = DR_STE_ACTION_DOUBLE_SZ;
+ bool allow_modify_hdr = true;
bool allow_encap = true;
+ if (action_type_set[DR_ACTION_TYP_POP_VLAN]) {
+ if (action_sz < DR_STE_ACTION_SINGLE_SZ) {
+ dr_ste_v1_arr_init_next_match(&last_ste, added_stes,
+ attr->gvmi);
+ action = MLX5_ADDR_OF(ste_mask_and_match_v1,
+ last_ste, action);
+ action_sz = DR_STE_ACTION_TRIPLE_SZ;
+ }
+ dr_ste_v1_set_pop_vlan(last_ste, action, attr->vlans.count);
+ action_sz -= DR_STE_ACTION_SINGLE_SZ;
+ action += DR_STE_ACTION_SINGLE_SZ;
+ allow_modify_hdr = false;
+ }
+
if (action_type_set[DR_ACTION_TYP_CTR])
dr_ste_v1_set_counter_id(last_ste, attr->ctr_id);
if (action_type_set[DR_ACTION_TYP_MODIFY_HDR]) {
- if (action_sz < DR_STE_ACTION_DOUBLE_SZ) {
+ if (!allow_modify_hdr || action_sz < DR_STE_ACTION_DOUBLE_SZ) {
dr_ste_v1_arr_init_next_match(&last_ste, added_stes,
attr->gvmi);
action = MLX5_ADDR_OF(ste_mask_and_match_v1,
@@ -534,7 +564,8 @@ static void dr_ste_v1_set_actions_tx(struct mlx5dr_domain *dmn,
action_sz = DR_STE_ACTION_TRIPLE_SZ;
allow_encap = true;
}
- dr_ste_v1_set_tx_push_vlan(last_ste, action, attr->vlans.headers[i]);
+ dr_ste_v1_set_push_vlan(last_ste, action,
+ attr->vlans.headers[i]);
action_sz -= DR_STE_ACTION_DOUBLE_SZ;
action += DR_STE_ACTION_DOUBLE_SZ;
}
@@ -579,6 +610,18 @@ static void dr_ste_v1_set_actions_tx(struct mlx5dr_domain *dmn,
attr->reformat.size);
action_sz -= DR_STE_ACTION_DOUBLE_SZ;
action += DR_STE_ACTION_DOUBLE_SZ;
+ } else if (action_type_set[DR_ACTION_TYP_REMOVE_HDR]) {
+ if (action_sz < DR_STE_ACTION_SINGLE_SZ) {
+ dr_ste_v1_arr_init_next_match(&last_ste, added_stes, attr->gvmi);
+ action = MLX5_ADDR_OF(ste_mask_and_match_v1, last_ste, action);
+ action_sz = DR_STE_ACTION_TRIPLE_SZ;
+ }
+ dr_ste_v1_set_remove_hdr(last_ste, action,
+ attr->reformat.param_0,
+ attr->reformat.param_1,
+ attr->reformat.size);
+ action_sz -= DR_STE_ACTION_SINGLE_SZ;
+ action += DR_STE_ACTION_SINGLE_SZ;
}
dr_ste_v1_set_hit_gvmi(last_ste, attr->hit_gvmi);
@@ -635,7 +678,7 @@ static void dr_ste_v1_set_actions_rx(struct mlx5dr_domain *dmn,
allow_ctr = false;
}
- dr_ste_v1_set_rx_pop_vlan(last_ste, action, attr->vlans.count);
+ dr_ste_v1_set_pop_vlan(last_ste, action, attr->vlans.count);
action_sz -= DR_STE_ACTION_SINGLE_SZ;
action += DR_STE_ACTION_SINGLE_SZ;
}
@@ -656,6 +699,26 @@ static void dr_ste_v1_set_actions_rx(struct mlx5dr_domain *dmn,
action += DR_STE_ACTION_DOUBLE_SZ;
}
+ if (action_type_set[DR_ACTION_TYP_PUSH_VLAN]) {
+ int i;
+
+ for (i = 0; i < attr->vlans.count; i++) {
+ if (action_sz < DR_STE_ACTION_DOUBLE_SZ ||
+ !allow_modify_hdr) {
+ dr_ste_v1_arr_init_next_match(&last_ste,
+ added_stes,
+ attr->gvmi);
+ action = MLX5_ADDR_OF(ste_mask_and_match_v1,
+ last_ste, action);
+ action_sz = DR_STE_ACTION_TRIPLE_SZ;
+ }
+ dr_ste_v1_set_push_vlan(last_ste, action,
+ attr->vlans.headers[i]);
+ action_sz -= DR_STE_ACTION_DOUBLE_SZ;
+ action += DR_STE_ACTION_DOUBLE_SZ;
+ }
+ }
+
if (action_type_set[DR_ACTION_TYP_CTR]) {
/* Counter action set after decap and before insert_hdr
* to exclude decaped / encaped header respectively.
@@ -714,6 +777,20 @@ static void dr_ste_v1_set_actions_rx(struct mlx5dr_domain *dmn,
action_sz -= DR_STE_ACTION_DOUBLE_SZ;
action += DR_STE_ACTION_DOUBLE_SZ;
allow_modify_hdr = false;
+ } else if (action_type_set[DR_ACTION_TYP_REMOVE_HDR]) {
+ if (action_sz < DR_STE_ACTION_SINGLE_SZ) {
+ dr_ste_v1_arr_init_next_match(&last_ste, added_stes, attr->gvmi);
+ action = MLX5_ADDR_OF(ste_mask_and_match_v1, last_ste, action);
+ action_sz = DR_STE_ACTION_TRIPLE_SZ;
+ allow_modify_hdr = true;
+ allow_ctr = true;
+ }
+ dr_ste_v1_set_remove_hdr(last_ste, action,
+ attr->reformat.param_0,
+ attr->reformat.param_1,
+ attr->reformat.size);
+ action_sz -= DR_STE_ACTION_SINGLE_SZ;
+ action += DR_STE_ACTION_SINGLE_SZ;
}
dr_ste_v1_set_hit_gvmi(last_ste, attr->hit_gvmi);
@@ -1844,7 +1921,7 @@ dr_ste_v1_build_flex_parser_tnl_geneve_tlv_opt_init(struct mlx5dr_ste_build *sb,
static int dr_ste_v1_build_flex_parser_tnl_gtpu_tag(struct mlx5dr_match_param *value,
struct mlx5dr_ste_build *sb,
- uint8_t *tag)
+ u8 *tag)
{
struct mlx5dr_match_misc3 *misc3 = &value->misc3;
@@ -1868,7 +1945,7 @@ static void dr_ste_v1_build_flex_parser_tnl_gtpu_init(struct mlx5dr_ste_build *s
static int
dr_ste_v1_build_tnl_gtpu_flex_parser_0_tag(struct mlx5dr_match_param *value,
struct mlx5dr_ste_build *sb,
- uint8_t *tag)
+ u8 *tag)
{
if (dr_is_flex_parser_0_id(sb->caps->flex_parser_id_gtpu_dw_0))
DR_STE_SET_FLEX_PARSER_FIELD(tag, gtpu_dw_0, sb->caps, &value->misc3);
@@ -1895,7 +1972,7 @@ dr_ste_v1_build_tnl_gtpu_flex_parser_0_init(struct mlx5dr_ste_build *sb,
static int
dr_ste_v1_build_tnl_gtpu_flex_parser_1_tag(struct mlx5dr_match_param *value,
struct mlx5dr_ste_build *sb,
- uint8_t *tag)
+ u8 *tag)
{
if (dr_is_flex_parser_1_id(sb->caps->flex_parser_id_gtpu_dw_0))
DR_STE_SET_FLEX_PARSER_FIELD(tag, gtpu_dw_0, sb->caps, &value->misc3);
@@ -1960,7 +2037,9 @@ struct mlx5dr_ste_ctx ste_ctx_v1 = {
.set_byte_mask = &dr_ste_v1_set_byte_mask,
.get_byte_mask = &dr_ste_v1_get_byte_mask,
/* Actions */
- .actions_caps = DR_STE_CTX_ACTION_CAP_RX_ENCAP,
+ .actions_caps = DR_STE_CTX_ACTION_CAP_TX_POP |
+ DR_STE_CTX_ACTION_CAP_RX_PUSH |
+ DR_STE_CTX_ACTION_CAP_RX_ENCAP,
.set_actions_rx = &dr_ste_v1_set_actions_rx,
.set_actions_tx = &dr_ste_v1_set_actions_tx,
.modify_field_arr_sz = ARRAY_SIZE(dr_ste_v1_action_modify_field_arr),
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h
index f5e93fa87aff..b20e8aabb861 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h
@@ -83,15 +83,14 @@ enum {
DR_STE_SIZE_CTRL = 32,
DR_STE_SIZE_TAG = 16,
DR_STE_SIZE_MASK = 16,
-};
-
-enum {
DR_STE_SIZE_REDUCED = DR_STE_SIZE - DR_STE_SIZE_MASK,
};
enum mlx5dr_ste_ctx_action_cap {
DR_STE_CTX_ACTION_CAP_NONE = 0,
- DR_STE_CTX_ACTION_CAP_RX_ENCAP = 1 << 0,
+ DR_STE_CTX_ACTION_CAP_TX_POP = 1 << 0,
+ DR_STE_CTX_ACTION_CAP_RX_PUSH = 1 << 1,
+ DR_STE_CTX_ACTION_CAP_RX_ENCAP = 1 << 2,
};
enum {
@@ -124,6 +123,7 @@ enum mlx5dr_action_type {
DR_ACTION_TYP_POP_VLAN,
DR_ACTION_TYP_PUSH_VLAN,
DR_ACTION_TYP_INSERT_HDR,
+ DR_ACTION_TYP_REMOVE_HDR,
DR_ACTION_TYP_SAMPLER,
DR_ACTION_TYP_MAX,
};
@@ -140,6 +140,7 @@ struct mlx5dr_icm_buddy_mem;
struct mlx5dr_ste_htbl;
struct mlx5dr_match_param;
struct mlx5dr_cmd_caps;
+struct mlx5dr_rule_rx_tx;
struct mlx5dr_matcher_rx_tx;
struct mlx5dr_ste_ctx;
@@ -151,14 +152,14 @@ struct mlx5dr_ste {
/* attached to the miss_list head at each htbl entry */
struct list_head miss_list_node;
- /* each rule member that uses this ste attached here */
- struct list_head rule_list;
-
/* this ste is member of htbl */
struct mlx5dr_ste_htbl *htbl;
struct mlx5dr_ste_htbl *next_htbl;
+ /* The rule this STE belongs to */
+ struct mlx5dr_rule_rx_tx *rule_rx_tx;
+
/* this ste is part of a rule, located in ste's chain */
u8 ste_chain_location;
};
@@ -171,8 +172,6 @@ struct mlx5dr_ste_htbl_ctrl {
/* total number of collisions entries attached to this table */
unsigned int num_of_collisions;
- unsigned int increase_threshold;
- u8 may_grow:1;
};
struct mlx5dr_ste_htbl {
@@ -804,10 +803,15 @@ struct mlx5dr_cmd_caps {
u8 isolate_vl_tc:1;
};
+enum mlx5dr_domain_nic_type {
+ DR_DOMAIN_NIC_TYPE_RX,
+ DR_DOMAIN_NIC_TYPE_TX,
+};
+
struct mlx5dr_domain_rx_tx {
u64 drop_icm_addr;
u64 default_icm_addr;
- enum mlx5dr_ste_entry_type ste_type;
+ enum mlx5dr_domain_nic_type type;
struct mutex mutex; /* protect rx/tx domain */
};
@@ -885,14 +889,6 @@ struct mlx5dr_matcher {
struct mlx5dv_flow_matcher *dv_matcher;
};
-struct mlx5dr_rule_member {
- struct mlx5dr_ste *ste;
- /* attached to mlx5dr_rule via this */
- struct list_head list;
- /* attached to mlx5dr_ste via this */
- struct list_head use_ste_list;
-};
-
struct mlx5dr_ste_action_modify_field {
u16 hw_field;
u8 start;
@@ -993,8 +989,8 @@ struct mlx5dr_htbl_connect_info {
};
struct mlx5dr_rule_rx_tx {
- struct list_head rule_members_list;
struct mlx5dr_matcher_rx_tx *nic_matcher;
+ struct mlx5dr_ste *last_rule_ste;
};
struct mlx5dr_rule {
@@ -1005,8 +1001,12 @@ struct mlx5dr_rule {
u32 flow_source;
};
-void mlx5dr_rule_update_rule_member(struct mlx5dr_ste *new_ste,
- struct mlx5dr_ste *ste);
+void mlx5dr_rule_set_last_member(struct mlx5dr_rule_rx_tx *nic_rule,
+ struct mlx5dr_ste *ste,
+ bool force);
+int mlx5dr_rule_get_reverse_rule_members(struct mlx5dr_ste **ste_arr,
+ struct mlx5dr_ste *curr_ste,
+ int *num_of_stes);
struct mlx5dr_icm_chunk {
struct mlx5dr_icm_buddy_mem *buddy_mem;
@@ -1083,6 +1083,25 @@ mlx5dr_icm_pool_chunk_size_to_byte(enum mlx5dr_icm_chunk_size chunk_size,
return entry_size * num_of_entries;
}
+static inline int
+mlx5dr_ste_htbl_increase_threshold(struct mlx5dr_ste_htbl *htbl)
+{
+ int num_of_entries =
+ mlx5dr_icm_pool_chunk_size_to_entries(htbl->chunk_size);
+
+ /* Threshold is 50%, one is added to table of size 1 */
+ return (num_of_entries + 1) / 2;
+}
+
+static inline bool
+mlx5dr_ste_htbl_may_grow(struct mlx5dr_ste_htbl *htbl)
+{
+ if (htbl->chunk_size == DR_CHUNK_SIZE_MAX - 1 || !htbl->byte_mask)
+ return false;
+
+ return true;
+}
+
static inline struct mlx5dr_cmd_vport_cap *
mlx5dr_get_vport_cap(struct mlx5dr_cmd_caps *caps, u32 vport)
{
@@ -1216,7 +1235,7 @@ int mlx5dr_ste_htbl_init_and_postsend(struct mlx5dr_domain *dmn,
bool update_hw_ste);
void mlx5dr_ste_set_formatted_ste(struct mlx5dr_ste_ctx *ste_ctx,
u16 gvmi,
- struct mlx5dr_domain_rx_tx *nic_dmn,
+ enum mlx5dr_domain_nic_type nic_type,
struct mlx5dr_ste_htbl *htbl,
u8 *formatted_ste,
struct mlx5dr_htbl_connect_info *connect_info);
@@ -1282,6 +1301,7 @@ struct mlx5dr_send_ring {
u8 sync_buff[MIN_READ_SYNC];
struct mlx5dr_mr *sync_mr;
spinlock_t lock; /* Protect the data path of the send ring */
+ bool err_state; /* send_ring is not usable in err state */
};
int mlx5dr_send_ring_alloc(struct mlx5dr_domain *dmn);
@@ -1333,6 +1353,7 @@ struct mlx5dr_cmd_fte_info {
u32 *val;
struct mlx5_flow_act action;
struct mlx5dr_cmd_flow_destination_hw_info *dest_arr;
+ bool ignore_flow_level;
};
int mlx5dr_cmd_set_fte(struct mlx5_core_dev *dev,
@@ -1362,7 +1383,8 @@ int mlx5dr_fw_create_md_tbl(struct mlx5dr_domain *dmn,
int num_dest,
bool reformat_req,
u32 *tbl_id,
- u32 *group_id);
+ u32 *group_id,
+ bool ignore_flow_level);
void mlx5dr_fw_destroy_md_tbl(struct mlx5dr_domain *dmn, u32 tbl_id,
u32 group_id);
#endif /* _DR_TYPES_H_ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c
index d5926dd7e972..7e58f4e594b7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c
@@ -133,6 +133,9 @@ static int mlx5_cmd_dr_modify_flow_table(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft,
struct mlx5_flow_table *next_ft)
{
+ if (mlx5_dr_is_fw_table(ft->flags))
+ return mlx5_fs_cmd_get_fw_cmds()->modify_flow_table(ns, ft, next_ft);
+
return set_miss_action(ns, ft, next_ft);
}
@@ -487,9 +490,13 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns,
actions[num_actions++] = term_actions->dest;
} else if (num_term_actions > 1) {
+ bool ignore_flow_level =
+ !!(fte->action.flags & FLOW_ACT_IGNORE_FLOW_LEVEL);
+
tmp_action = mlx5dr_action_create_mult_dest_tbl(domain,
term_actions,
- num_term_actions);
+ num_term_actions,
+ ignore_flow_level);
if (!tmp_action) {
err = -EOPNOTSUPP;
goto free_actions;
@@ -557,6 +564,9 @@ static int mlx5_cmd_dr_packet_reformat_alloc(struct mlx5_flow_root_namespace *ns
case MLX5_REFORMAT_TYPE_INSERT_HDR:
dr_reformat = DR_ACTION_REFORMAT_TYP_INSERT_HDR;
break;
+ case MLX5_REFORMAT_TYPE_REMOVE_HDR:
+ dr_reformat = DR_ACTION_REFORMAT_TYP_REMOVE_HDR;
+ break;
default:
mlx5_core_err(ns->dev, "Packet-reformat not supported(%d)\n",
params->type);
@@ -615,15 +625,6 @@ static void mlx5_cmd_dr_modify_header_dealloc(struct mlx5_flow_root_namespace *n
mlx5dr_action_destroy(modify_hdr->action.dr_action);
}
-static int mlx5_cmd_dr_update_fte(struct mlx5_flow_root_namespace *ns,
- struct mlx5_flow_table *ft,
- struct mlx5_flow_group *group,
- int modify_mask,
- struct fs_fte *fte)
-{
- return -EOPNOTSUPP;
-}
-
static int mlx5_cmd_dr_delete_fte(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft,
struct fs_fte *fte)
@@ -648,6 +649,36 @@ static int mlx5_cmd_dr_delete_fte(struct mlx5_flow_root_namespace *ns,
return 0;
}
+static int mlx5_cmd_dr_update_fte(struct mlx5_flow_root_namespace *ns,
+ struct mlx5_flow_table *ft,
+ struct mlx5_flow_group *group,
+ int modify_mask,
+ struct fs_fte *fte)
+{
+ struct fs_fte fte_tmp = {};
+ int ret;
+
+ if (mlx5_dr_is_fw_table(ft->flags))
+ return mlx5_fs_cmd_get_fw_cmds()->update_fte(ns, ft, group, modify_mask, fte);
+
+ /* Backup current dr rule details */
+ fte_tmp.fs_dr_rule = fte->fs_dr_rule;
+ memset(&fte->fs_dr_rule, 0, sizeof(struct mlx5_fs_dr_rule));
+
+ /* First add the new updated rule, then delete the old rule */
+ ret = mlx5_cmd_dr_create_fte(ns, ft, group, fte);
+ if (ret)
+ goto restore_fte;
+
+ ret = mlx5_cmd_dr_delete_fte(ns, ft, &fte_tmp);
+ WARN_ONCE(ret, "dr update fte duplicate rule deletion failed\n");
+ return ret;
+
+restore_fte:
+ fte->fs_dr_rule = fte_tmp.fs_dr_rule;
+ return ret;
+}
+
static int mlx5_cmd_dr_set_peer(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_root_namespace *peer_ns)
{
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5_ifc_dr.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5_ifc_dr.h
index 9643ee647f57..d2a937f69784 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5_ifc_dr.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5_ifc_dr.h
@@ -8,12 +8,6 @@ enum {
MLX5DR_STE_LU_TYPE_DONT_CARE = 0x0f,
};
-enum mlx5dr_ste_entry_type {
- MLX5DR_STE_TYPE_TX = 1,
- MLX5DR_STE_TYPE_RX = 2,
- MLX5DR_STE_TYPE_MODIFY_PKT = 6,
-};
-
struct mlx5_ifc_ste_general_bits {
u8 entry_type[0x4];
u8 reserved_at_4[0x4];
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h
index bbfe101d4e57..c5a8b1601999 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h
@@ -27,6 +27,7 @@ enum mlx5dr_action_reformat_type {
DR_ACTION_REFORMAT_TYP_TNL_L3_TO_L2,
DR_ACTION_REFORMAT_TYP_L2_TO_TNL_L3,
DR_ACTION_REFORMAT_TYP_INSERT_HDR,
+ DR_ACTION_REFORMAT_TYP_REMOVE_HDR,
};
struct mlx5dr_match_parameters {
@@ -94,7 +95,8 @@ mlx5dr_action_create_dest_vport(struct mlx5dr_domain *domain,
struct mlx5dr_action *
mlx5dr_action_create_mult_dest_tbl(struct mlx5dr_domain *dmn,
struct mlx5dr_action_dest *dests,
- u32 num_of_dests);
+ u32 num_of_dests,
+ bool ignore_flow_level);
struct mlx5dr_action *mlx5dr_action_create_drop(void);