aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDavid S. Miller2021-06-16 12:42:53 -0700
committerDavid S. Miller2021-06-16 12:42:53 -0700
commit1d24b6b4b092a510c1ade459ea814902954f404b (patch)
treed477e1bdfcea7242fe11e0cf3c1d4d77538bd531
parent63e96bc4e32811a2bc1e9172691e263e074a32ae (diff)
parent30c4a9f4fe3f47ffa5783329fa5553f8baef3a76 (diff)
Merge branch 'nfp-ct-part-two'
Simon Horman says: ==================== Next set of conntrack patches for the nfp driver Louis Peens says: This follows on from the previous series of a similar nature. Looking at the diagram as explained in the previous series this implements changes up to the point where the merged nft entries are saved. There are still bits of stubbed out code where offloading of the flows will be implemented. +-------------+ +----------+ | pre_ct flow +--------+ | nft flow | +-------------+ v +------+---+ +----------+ | | tc_merge +--------+ | +----------+ v v +--------------+ ^ +-------------+ | post_ct flow +-------+ +---+nft_tc merge | +--------------+ | +-------------+ | | | v Offload to nfp ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/conntrack.c726
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/conntrack.h76
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/metadata.c28
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/offload.c9
4 files changed, 819 insertions, 20 deletions
diff --git a/drivers/net/ethernet/netronome/nfp/flower/conntrack.c b/drivers/net/ethernet/netronome/nfp/flower/conntrack.c
index b1709affb52d..9ea77bb3b69c 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/conntrack.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/conntrack.c
@@ -11,6 +11,17 @@ const struct rhashtable_params nfp_tc_ct_merge_params = {
.automatic_shrinking = true,
};
+const struct rhashtable_params nfp_nft_ct_merge_params = {
+ .head_offset = offsetof(struct nfp_fl_nft_tc_merge,
+ hash_node),
+ .key_len = sizeof(unsigned long) * 3,
+ .key_offset = offsetof(struct nfp_fl_nft_tc_merge, cookie),
+ .automatic_shrinking = true,
+};
+
+static struct flow_action_entry *get_flow_act(struct flow_rule *rule,
+ enum flow_action_id act_id);
+
/**
* get_hashentry() - Wrapper around hashtable lookup.
* @ht: hashtable where entry could be found
@@ -67,7 +78,430 @@ bool is_post_ct_flow(struct flow_cls_offload *flow)
static int nfp_ct_merge_check(struct nfp_fl_ct_flow_entry *entry1,
struct nfp_fl_ct_flow_entry *entry2)
{
+ unsigned int ovlp_keys = entry1->rule->match.dissector->used_keys &
+ entry2->rule->match.dissector->used_keys;
+ bool out;
+
+ /* check the overlapped fields one by one, the unmasked part
+ * should not conflict with each other.
+ */
+ if (ovlp_keys & BIT(FLOW_DISSECTOR_KEY_CONTROL)) {
+ struct flow_match_control match1, match2;
+
+ flow_rule_match_control(entry1->rule, &match1);
+ flow_rule_match_control(entry2->rule, &match2);
+ COMPARE_UNMASKED_FIELDS(match1, match2, &out);
+ if (out)
+ goto check_failed;
+ }
+
+ if (ovlp_keys & BIT(FLOW_DISSECTOR_KEY_BASIC)) {
+ struct flow_match_basic match1, match2;
+
+ flow_rule_match_basic(entry1->rule, &match1);
+ flow_rule_match_basic(entry2->rule, &match2);
+ COMPARE_UNMASKED_FIELDS(match1, match2, &out);
+ if (out)
+ goto check_failed;
+ }
+
+ if (ovlp_keys & BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS)) {
+ struct flow_match_ipv4_addrs match1, match2;
+
+ flow_rule_match_ipv4_addrs(entry1->rule, &match1);
+ flow_rule_match_ipv4_addrs(entry2->rule, &match2);
+ COMPARE_UNMASKED_FIELDS(match1, match2, &out);
+ if (out)
+ goto check_failed;
+ }
+
+ if (ovlp_keys & BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS)) {
+ struct flow_match_ipv6_addrs match1, match2;
+
+ flow_rule_match_ipv6_addrs(entry1->rule, &match1);
+ flow_rule_match_ipv6_addrs(entry2->rule, &match2);
+ COMPARE_UNMASKED_FIELDS(match1, match2, &out);
+ if (out)
+ goto check_failed;
+ }
+
+ if (ovlp_keys & BIT(FLOW_DISSECTOR_KEY_PORTS)) {
+ struct flow_match_ports match1, match2;
+
+ flow_rule_match_ports(entry1->rule, &match1);
+ flow_rule_match_ports(entry2->rule, &match2);
+ COMPARE_UNMASKED_FIELDS(match1, match2, &out);
+ if (out)
+ goto check_failed;
+ }
+
+ if (ovlp_keys & BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
+ struct flow_match_eth_addrs match1, match2;
+
+ flow_rule_match_eth_addrs(entry1->rule, &match1);
+ flow_rule_match_eth_addrs(entry2->rule, &match2);
+ COMPARE_UNMASKED_FIELDS(match1, match2, &out);
+ if (out)
+ goto check_failed;
+ }
+
+ if (ovlp_keys & BIT(FLOW_DISSECTOR_KEY_VLAN)) {
+ struct flow_match_vlan match1, match2;
+
+ flow_rule_match_vlan(entry1->rule, &match1);
+ flow_rule_match_vlan(entry2->rule, &match2);
+ COMPARE_UNMASKED_FIELDS(match1, match2, &out);
+ if (out)
+ goto check_failed;
+ }
+
+ if (ovlp_keys & BIT(FLOW_DISSECTOR_KEY_MPLS)) {
+ struct flow_match_mpls match1, match2;
+
+ flow_rule_match_mpls(entry1->rule, &match1);
+ flow_rule_match_mpls(entry2->rule, &match2);
+ COMPARE_UNMASKED_FIELDS(match1, match2, &out);
+ if (out)
+ goto check_failed;
+ }
+
+ if (ovlp_keys & BIT(FLOW_DISSECTOR_KEY_TCP)) {
+ struct flow_match_tcp match1, match2;
+
+ flow_rule_match_tcp(entry1->rule, &match1);
+ flow_rule_match_tcp(entry2->rule, &match2);
+ COMPARE_UNMASKED_FIELDS(match1, match2, &out);
+ if (out)
+ goto check_failed;
+ }
+
+ if (ovlp_keys & BIT(FLOW_DISSECTOR_KEY_IP)) {
+ struct flow_match_ip match1, match2;
+
+ flow_rule_match_ip(entry1->rule, &match1);
+ flow_rule_match_ip(entry2->rule, &match2);
+ COMPARE_UNMASKED_FIELDS(match1, match2, &out);
+ if (out)
+ goto check_failed;
+ }
+
+ if (ovlp_keys & BIT(FLOW_DISSECTOR_KEY_ENC_KEYID)) {
+ struct flow_match_enc_keyid match1, match2;
+
+ flow_rule_match_enc_keyid(entry1->rule, &match1);
+ flow_rule_match_enc_keyid(entry2->rule, &match2);
+ COMPARE_UNMASKED_FIELDS(match1, match2, &out);
+ if (out)
+ goto check_failed;
+ }
+
+ if (ovlp_keys & BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS)) {
+ struct flow_match_ipv4_addrs match1, match2;
+
+ flow_rule_match_enc_ipv4_addrs(entry1->rule, &match1);
+ flow_rule_match_enc_ipv4_addrs(entry2->rule, &match2);
+ COMPARE_UNMASKED_FIELDS(match1, match2, &out);
+ if (out)
+ goto check_failed;
+ }
+
+ if (ovlp_keys & BIT(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS)) {
+ struct flow_match_ipv6_addrs match1, match2;
+
+ flow_rule_match_enc_ipv6_addrs(entry1->rule, &match1);
+ flow_rule_match_enc_ipv6_addrs(entry2->rule, &match2);
+ COMPARE_UNMASKED_FIELDS(match1, match2, &out);
+ if (out)
+ goto check_failed;
+ }
+
+ if (ovlp_keys & BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL)) {
+ struct flow_match_control match1, match2;
+
+ flow_rule_match_enc_control(entry1->rule, &match1);
+ flow_rule_match_enc_control(entry2->rule, &match2);
+ COMPARE_UNMASKED_FIELDS(match1, match2, &out);
+ if (out)
+ goto check_failed;
+ }
+
+ if (ovlp_keys & BIT(FLOW_DISSECTOR_KEY_ENC_IP)) {
+ struct flow_match_ip match1, match2;
+
+ flow_rule_match_enc_ip(entry1->rule, &match1);
+ flow_rule_match_enc_ip(entry2->rule, &match2);
+ COMPARE_UNMASKED_FIELDS(match1, match2, &out);
+ if (out)
+ goto check_failed;
+ }
+
+ if (ovlp_keys & BIT(FLOW_DISSECTOR_KEY_ENC_OPTS)) {
+ struct flow_match_enc_opts match1, match2;
+
+ flow_rule_match_enc_opts(entry1->rule, &match1);
+ flow_rule_match_enc_opts(entry2->rule, &match2);
+ COMPARE_UNMASKED_FIELDS(match1, match2, &out);
+ if (out)
+ goto check_failed;
+ }
+
return 0;
+
+check_failed:
+ return -EINVAL;
+}
+
+static int nfp_ct_check_mangle_merge(struct flow_action_entry *a_in,
+ struct flow_rule *rule)
+{
+ enum flow_action_mangle_base htype = a_in->mangle.htype;
+ u32 offset = a_in->mangle.offset;
+
+ switch (htype) {
+ case FLOW_ACT_MANGLE_HDR_TYPE_ETH:
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS))
+ return -EOPNOTSUPP;
+ break;
+ case FLOW_ACT_MANGLE_HDR_TYPE_IP4:
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IP)) {
+ struct flow_match_ip match;
+
+ flow_rule_match_ip(rule, &match);
+ if (offset == offsetof(struct iphdr, ttl) &&
+ match.mask->ttl)
+ return -EOPNOTSUPP;
+ if (offset == round_down(offsetof(struct iphdr, tos), 4) &&
+ match.mask->tos)
+ return -EOPNOTSUPP;
+ }
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV4_ADDRS)) {
+ struct flow_match_ipv4_addrs match;
+
+ flow_rule_match_ipv4_addrs(rule, &match);
+ if (offset == offsetof(struct iphdr, saddr) &&
+ match.mask->src)
+ return -EOPNOTSUPP;
+ if (offset == offsetof(struct iphdr, daddr) &&
+ match.mask->dst)
+ return -EOPNOTSUPP;
+ }
+ break;
+ case FLOW_ACT_MANGLE_HDR_TYPE_IP6:
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IP)) {
+ struct flow_match_ip match;
+
+ flow_rule_match_ip(rule, &match);
+ if (offset == round_down(offsetof(struct ipv6hdr, hop_limit), 4) &&
+ match.mask->ttl)
+ return -EOPNOTSUPP;
+ /* for ipv6, tos and flow_lbl are in the same word */
+ if (offset == round_down(offsetof(struct ipv6hdr, flow_lbl), 4) &&
+ match.mask->tos)
+ return -EOPNOTSUPP;
+ }
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV6_ADDRS)) {
+ struct flow_match_ipv6_addrs match;
+
+ flow_rule_match_ipv6_addrs(rule, &match);
+ if (offset >= offsetof(struct ipv6hdr, saddr) &&
+ offset < offsetof(struct ipv6hdr, daddr) &&
+ memchr_inv(&match.mask->src, 0, sizeof(match.mask->src)))
+ return -EOPNOTSUPP;
+ if (offset >= offsetof(struct ipv6hdr, daddr) &&
+ offset < sizeof(struct ipv6hdr) &&
+ memchr_inv(&match.mask->dst, 0, sizeof(match.mask->dst)))
+ return -EOPNOTSUPP;
+ }
+ break;
+ case FLOW_ACT_MANGLE_HDR_TYPE_TCP:
+ case FLOW_ACT_MANGLE_HDR_TYPE_UDP:
+ /* currently only can modify ports */
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS))
+ return -EOPNOTSUPP;
+ break;
+ default:
+ break;
+ }
+ return 0;
+}
+
+static int nfp_ct_merge_act_check(struct nfp_fl_ct_flow_entry *pre_ct_entry,
+ struct nfp_fl_ct_flow_entry *post_ct_entry,
+ struct nfp_fl_ct_flow_entry *nft_entry)
+{
+ struct flow_action_entry *act;
+ int err, i;
+
+ /* Check for pre_ct->action conflicts */
+ flow_action_for_each(i, act, &pre_ct_entry->rule->action) {
+ switch (act->id) {
+ case FLOW_ACTION_MANGLE:
+ err = nfp_ct_check_mangle_merge(act, nft_entry->rule);
+ if (err)
+ return err;
+ err = nfp_ct_check_mangle_merge(act, post_ct_entry->rule);
+ if (err)
+ return err;
+ break;
+ case FLOW_ACTION_VLAN_PUSH:
+ case FLOW_ACTION_VLAN_POP:
+ case FLOW_ACTION_VLAN_MANGLE:
+ case FLOW_ACTION_MPLS_PUSH:
+ case FLOW_ACTION_MPLS_POP:
+ case FLOW_ACTION_MPLS_MANGLE:
+ return -EOPNOTSUPP;
+ default:
+ break;
+ }
+ }
+
+ /* Check for nft->action conflicts */
+ flow_action_for_each(i, act, &nft_entry->rule->action) {
+ switch (act->id) {
+ case FLOW_ACTION_MANGLE:
+ err = nfp_ct_check_mangle_merge(act, post_ct_entry->rule);
+ if (err)
+ return err;
+ break;
+ case FLOW_ACTION_VLAN_PUSH:
+ case FLOW_ACTION_VLAN_POP:
+ case FLOW_ACTION_VLAN_MANGLE:
+ case FLOW_ACTION_MPLS_PUSH:
+ case FLOW_ACTION_MPLS_POP:
+ case FLOW_ACTION_MPLS_MANGLE:
+ return -EOPNOTSUPP;
+ default:
+ break;
+ }
+ }
+ return 0;
+}
+
+static int nfp_ct_check_meta(struct nfp_fl_ct_flow_entry *post_ct_entry,
+ struct nfp_fl_ct_flow_entry *nft_entry)
+{
+ struct flow_dissector *dissector = post_ct_entry->rule->match.dissector;
+ struct flow_action_entry *ct_met;
+ struct flow_match_ct ct;
+ int i;
+
+ ct_met = get_flow_act(nft_entry->rule, FLOW_ACTION_CT_METADATA);
+ if (ct_met && (dissector->used_keys & BIT(FLOW_DISSECTOR_KEY_CT))) {
+ u32 *act_lbl;
+
+ act_lbl = ct_met->ct_metadata.labels;
+ flow_rule_match_ct(post_ct_entry->rule, &ct);
+ for (i = 0; i < 4; i++) {
+ if ((ct.key->ct_labels[i] & ct.mask->ct_labels[i]) ^
+ (act_lbl[i] & ct.mask->ct_labels[i]))
+ return -EINVAL;
+ }
+
+ if ((ct.key->ct_mark & ct.mask->ct_mark) ^
+ (ct_met->ct_metadata.mark & ct.mask->ct_mark))
+ return -EINVAL;
+
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static int nfp_fl_ct_add_offload(struct nfp_fl_nft_tc_merge *m_entry)
+{
+ return 0;
+}
+
+static int nfp_fl_ct_del_offload(struct nfp_app *app, unsigned long cookie,
+ struct net_device *netdev)
+{
+ return 0;
+}
+
+static int nfp_ct_do_nft_merge(struct nfp_fl_ct_zone_entry *zt,
+ struct nfp_fl_ct_flow_entry *nft_entry,
+ struct nfp_fl_ct_tc_merge *tc_m_entry)
+{
+ struct nfp_fl_ct_flow_entry *post_ct_entry, *pre_ct_entry;
+ struct nfp_fl_nft_tc_merge *nft_m_entry;
+ unsigned long new_cookie[3];
+ int err;
+
+ pre_ct_entry = tc_m_entry->pre_ct_parent;
+ post_ct_entry = tc_m_entry->post_ct_parent;
+
+ err = nfp_ct_merge_act_check(pre_ct_entry, post_ct_entry, nft_entry);
+ if (err)
+ return err;
+
+ /* Check that the two tc flows are also compatible with
+ * the nft entry. No need to check the pre_ct and post_ct
+ * entries as that was already done during pre_merge.
+ * The nft entry does not have a netdev or chain populated, so
+ * skip this check.
+ */
+ err = nfp_ct_merge_check(pre_ct_entry, nft_entry);
+ if (err)
+ return err;
+ err = nfp_ct_merge_check(post_ct_entry, nft_entry);
+ if (err)
+ return err;
+ err = nfp_ct_check_meta(post_ct_entry, nft_entry);
+ if (err)
+ return err;
+
+ /* Combine tc_merge and nft cookies for this cookie. */
+ new_cookie[0] = tc_m_entry->cookie[0];
+ new_cookie[1] = tc_m_entry->cookie[1];
+ new_cookie[2] = nft_entry->cookie;
+ nft_m_entry = get_hashentry(&zt->nft_merge_tb,
+ &new_cookie,
+ nfp_nft_ct_merge_params,
+ sizeof(*nft_m_entry));
+
+ if (IS_ERR(nft_m_entry))
+ return PTR_ERR(nft_m_entry);
+
+ /* nft_m_entry already present, not merging again */
+ if (!memcmp(&new_cookie, nft_m_entry->cookie, sizeof(new_cookie)))
+ return 0;
+
+ memcpy(&nft_m_entry->cookie, &new_cookie, sizeof(new_cookie));
+ nft_m_entry->zt = zt;
+ nft_m_entry->tc_m_parent = tc_m_entry;
+ nft_m_entry->nft_parent = nft_entry;
+ nft_m_entry->tc_flower_cookie = 0;
+ /* Copy the netdev from one the pre_ct entry. When the tc_m_entry was created
+ * it only combined them if the netdevs were the same, so can use any of them.
+ */
+ nft_m_entry->netdev = pre_ct_entry->netdev;
+
+ /* Add this entry to the tc_m_list and nft_flow lists */
+ list_add(&nft_m_entry->tc_merge_list, &tc_m_entry->children);
+ list_add(&nft_m_entry->nft_flow_list, &nft_entry->children);
+
+ /* Generate offload structure and send to nfp */
+ err = nfp_fl_ct_add_offload(nft_m_entry);
+ if (err)
+ goto err_nft_ct_offload;
+
+ err = rhashtable_insert_fast(&zt->nft_merge_tb, &nft_m_entry->hash_node,
+ nfp_nft_ct_merge_params);
+ if (err)
+ goto err_nft_ct_merge_insert;
+
+ zt->nft_merge_count++;
+
+ return err;
+
+err_nft_ct_merge_insert:
+ nfp_fl_ct_del_offload(zt->priv->app, nft_m_entry->tc_flower_cookie,
+ nft_m_entry->netdev);
+err_nft_ct_offload:
+ list_del(&nft_m_entry->tc_merge_list);
+ list_del(&nft_m_entry->nft_flow_list);
+ kfree(nft_m_entry);
+ return err;
}
static int nfp_ct_do_tc_merge(struct nfp_fl_ct_zone_entry *zt,
@@ -75,6 +509,7 @@ static int nfp_ct_do_tc_merge(struct nfp_fl_ct_zone_entry *zt,
struct nfp_fl_ct_flow_entry *ct_entry2)
{
struct nfp_fl_ct_flow_entry *post_ct_entry, *pre_ct_entry;
+ struct nfp_fl_ct_flow_entry *nft_entry, *nft_tmp;
struct nfp_fl_ct_tc_merge *m_entry;
unsigned long new_cookie[2];
int err;
@@ -126,6 +561,12 @@ static int nfp_ct_do_tc_merge(struct nfp_fl_ct_zone_entry *zt,
goto err_ct_tc_merge_insert;
zt->tc_merge_count++;
+ /* Merge with existing nft flows */
+ list_for_each_entry_safe(nft_entry, nft_tmp, &zt->nft_flows_list,
+ list_node) {
+ nfp_ct_do_nft_merge(zt, nft_entry, m_entry);
+ }
+
return 0;
err_ct_tc_merge_insert:
@@ -165,11 +606,16 @@ nfp_fl_ct_zone_entry *get_nfp_zone_entry(struct nfp_flower_priv *priv,
/* init the various hash tables and lists*/
INIT_LIST_HEAD(&zt->pre_ct_list);
INIT_LIST_HEAD(&zt->post_ct_list);
+ INIT_LIST_HEAD(&zt->nft_flows_list);
err = rhashtable_init(&zt->tc_merge_tb, &nfp_tc_ct_merge_params);
if (err)
goto err_tc_merge_tb_init;
+ err = rhashtable_init(&zt->nft_merge_tb, &nfp_nft_ct_merge_params);
+ if (err)
+ goto err_nft_merge_tb_init;
+
if (wildcarded) {
priv->ct_zone_wc = zt;
} else {
@@ -183,6 +629,8 @@ nfp_fl_ct_zone_entry *get_nfp_zone_entry(struct nfp_flower_priv *priv,
return zt;
err_zone_insert:
+ rhashtable_destroy(&zt->nft_merge_tb);
+err_nft_merge_tb_init:
rhashtable_destroy(&zt->tc_merge_tb);
err_tc_merge_tb_init:
kfree(zt);
@@ -193,8 +641,9 @@ static struct
nfp_fl_ct_flow_entry *nfp_fl_ct_add_flow(struct nfp_fl_ct_zone_entry *zt,
struct net_device *netdev,
struct flow_cls_offload *flow,
- struct netlink_ext_ack *extack)
+ bool is_nft, struct netlink_ext_ack *extack)
{
+ struct nf_flow_match *nft_match = NULL;
struct nfp_fl_ct_flow_entry *entry;
struct nfp_fl_ct_map_entry *map;
struct flow_action_entry *act;
@@ -204,17 +653,39 @@ nfp_fl_ct_flow_entry *nfp_fl_ct_add_flow(struct nfp_fl_ct_zone_entry *zt,
if (!entry)
return ERR_PTR(-ENOMEM);
- entry->zt = zt;
- entry->netdev = netdev;
- entry->cookie = flow->cookie;
entry->rule = flow_rule_alloc(flow->rule->action.num_entries);
if (!entry->rule) {
err = -ENOMEM;
- goto err_pre_ct_act;
+ goto err_pre_ct_rule;
}
- entry->rule->match.dissector = flow->rule->match.dissector;
- entry->rule->match.mask = flow->rule->match.mask;
- entry->rule->match.key = flow->rule->match.key;
+
+ /* nft flows gets destroyed after callback return, so need
+ * to do a full copy instead of just a reference.
+ */
+ if (is_nft) {
+ nft_match = kzalloc(sizeof(*nft_match), GFP_KERNEL);
+ if (!nft_match) {
+ err = -ENOMEM;
+ goto err_pre_ct_act;
+ }
+ memcpy(&nft_match->dissector, flow->rule->match.dissector,
+ sizeof(nft_match->dissector));
+ memcpy(&nft_match->mask, flow->rule->match.mask,
+ sizeof(nft_match->mask));
+ memcpy(&nft_match->key, flow->rule->match.key,
+ sizeof(nft_match->key));
+ entry->rule->match.dissector = &nft_match->dissector;
+ entry->rule->match.mask = &nft_match->mask;
+ entry->rule->match.key = &nft_match->key;
+ } else {
+ entry->rule->match.dissector = flow->rule->match.dissector;
+ entry->rule->match.mask = flow->rule->match.mask;
+ entry->rule->match.key = flow->rule->match.key;
+ }
+
+ entry->zt = zt;
+ entry->netdev = netdev;
+ entry->cookie = flow->cookie;
entry->chain_index = flow->common.chain_index;
entry->tun_offset = NFP_FL_CT_NO_TUN;
@@ -275,14 +746,65 @@ err_ct_flow_insert:
if (entry->tun_offset != NFP_FL_CT_NO_TUN)
kfree(entry->rule->action.entries[entry->tun_offset].tunnel);
err_pre_ct_tun_cp:
- kfree(entry->rule);
+ kfree(nft_match);
err_pre_ct_act:
+ kfree(entry->rule);
+err_pre_ct_rule:
kfree(entry);
return ERR_PTR(err);
}
+static void cleanup_nft_merge_entry(struct nfp_fl_nft_tc_merge *m_entry)
+{
+ struct nfp_fl_ct_zone_entry *zt;
+ int err;
+
+ zt = m_entry->zt;
+
+ /* Flow is in HW, need to delete */
+ if (m_entry->tc_flower_cookie) {
+ err = nfp_fl_ct_del_offload(zt->priv->app, m_entry->tc_flower_cookie,
+ m_entry->netdev);
+ if (err)
+ return;
+ }
+
+ WARN_ON_ONCE(rhashtable_remove_fast(&zt->nft_merge_tb,
+ &m_entry->hash_node,
+ nfp_nft_ct_merge_params));
+ zt->nft_merge_count--;
+ list_del(&m_entry->tc_merge_list);
+ list_del(&m_entry->nft_flow_list);
+
+ kfree(m_entry);
+}
+
static void nfp_free_nft_merge_children(void *entry, bool is_nft_flow)
{
+ struct nfp_fl_nft_tc_merge *m_entry, *tmp;
+
+ /* These post entries are parts of two lists, one is a list of nft_entries
+ * and the other is of from a list of tc_merge structures. Iterate
+ * through the relevant list and cleanup the entries.
+ */
+
+ if (is_nft_flow) {
+ /* Need to iterate through list of nft_flow entries*/
+ struct nfp_fl_ct_flow_entry *ct_entry = entry;
+
+ list_for_each_entry_safe(m_entry, tmp, &ct_entry->children,
+ nft_flow_list) {
+ cleanup_nft_merge_entry(m_entry);
+ }
+ } else {
+ /* Need to iterate through list of tc_merged_flow entries*/
+ struct nfp_fl_ct_tc_merge *ct_entry = entry;
+
+ list_for_each_entry_safe(m_entry, tmp, &ct_entry->children,
+ tc_merge_list) {
+ cleanup_nft_merge_entry(m_entry);
+ }
+ }
}
static void nfp_del_tc_merge_entry(struct nfp_fl_ct_tc_merge *m_ent)
@@ -338,17 +860,26 @@ void nfp_fl_ct_clean_flow_entry(struct nfp_fl_ct_flow_entry *entry)
if (entry->tun_offset != NFP_FL_CT_NO_TUN)
kfree(entry->rule->action.entries[entry->tun_offset].tunnel);
+
+ if (entry->type == CT_TYPE_NFT) {
+ struct nf_flow_match *nft_match;
+
+ nft_match = container_of(entry->rule->match.dissector,
+ struct nf_flow_match, dissector);
+ kfree(nft_match);
+ }
+
kfree(entry->rule);
kfree(entry);
}
-static struct flow_action_entry *get_flow_act(struct flow_cls_offload *flow,
+static struct flow_action_entry *get_flow_act(struct flow_rule *rule,
enum flow_action_id act_id)
{
struct flow_action_entry *act = NULL;
int i;
- flow_action_for_each(i, act, &flow->rule->action) {
+ flow_action_for_each(i, act, &rule->action) {
if (act->id == act_id)
return act;
}
@@ -376,6 +907,26 @@ nfp_ct_merge_tc_entries(struct nfp_fl_ct_flow_entry *ct_entry1,
}
}
+static void
+nfp_ct_merge_nft_with_tc(struct nfp_fl_ct_flow_entry *nft_entry,
+ struct nfp_fl_ct_zone_entry *zt)
+{
+ struct nfp_fl_ct_tc_merge *tc_merge_entry;
+ struct rhashtable_iter iter;
+
+ rhashtable_walk_enter(&zt->tc_merge_tb, &iter);
+ rhashtable_walk_start(&iter);
+ while ((tc_merge_entry = rhashtable_walk_next(&iter)) != NULL) {
+ if (IS_ERR(tc_merge_entry))
+ continue;
+ rhashtable_walk_stop(&iter);
+ nfp_ct_do_nft_merge(zt, nft_entry, tc_merge_entry);
+ rhashtable_walk_start(&iter);
+ }
+ rhashtable_walk_stop(&iter);
+ rhashtable_walk_exit(&iter);
+}
+
int nfp_fl_ct_handle_pre_ct(struct nfp_flower_priv *priv,
struct net_device *netdev,
struct flow_cls_offload *flow,
@@ -384,15 +935,16 @@ int nfp_fl_ct_handle_pre_ct(struct nfp_flower_priv *priv,
struct flow_action_entry *ct_act, *ct_goto;
struct nfp_fl_ct_flow_entry *ct_entry;
struct nfp_fl_ct_zone_entry *zt;
+ int err;
- ct_act = get_flow_act(flow, FLOW_ACTION_CT);
+ ct_act = get_flow_act(flow->rule, FLOW_ACTION_CT);
if (!ct_act) {
NL_SET_ERR_MSG_MOD(extack,
"unsupported offload: Conntrack action empty in conntrack offload");
return -EOPNOTSUPP;
}
- ct_goto = get_flow_act(flow, FLOW_ACTION_GOTO);
+ ct_goto = get_flow_act(flow->rule, FLOW_ACTION_GOTO);
if (!ct_goto) {
NL_SET_ERR_MSG_MOD(extack,
"unsupported offload: Conntrack requires ACTION_GOTO");
@@ -406,11 +958,18 @@ int nfp_fl_ct_handle_pre_ct(struct nfp_flower_priv *priv,
return PTR_ERR(zt);
}
- if (!zt->nft)
+ if (!zt->nft) {
zt->nft = ct_act->ct.flow_table;
+ err = nf_flow_table_offload_add_cb(zt->nft, nfp_fl_ct_handle_nft_flow, zt);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "offload error: Could not register nft_callback");
+ return err;
+ }
+ }
/* Add entry to pre_ct_list */
- ct_entry = nfp_fl_ct_add_flow(zt, netdev, flow, extack);
+ ct_entry = nfp_fl_ct_add_flow(zt, netdev, flow, false, extack);
if (IS_ERR(ct_entry))
return PTR_ERR(ct_entry);
ct_entry->type = CT_TYPE_PRE_CT;
@@ -424,8 +983,7 @@ int nfp_fl_ct_handle_pre_ct(struct nfp_flower_priv *priv,
if (priv->ct_zone_wc)
nfp_ct_merge_tc_entries(ct_entry, priv->ct_zone_wc, zt);
- NL_SET_ERR_MSG_MOD(extack, "unsupported offload: Conntrack action not supported");
- return -EOPNOTSUPP;
+ return 0;
}
int nfp_fl_ct_handle_post_ct(struct nfp_flower_priv *priv,
@@ -456,7 +1014,7 @@ int nfp_fl_ct_handle_post_ct(struct nfp_flower_priv *priv,
}
/* Add entry to post_ct_list */
- ct_entry = nfp_fl_ct_add_flow(zt, netdev, flow, extack);
+ ct_entry = nfp_fl_ct_add_flow(zt, netdev, flow, false, extack);
if (IS_ERR(ct_entry))
return PTR_ERR(ct_entry);
@@ -487,6 +1045,134 @@ int nfp_fl_ct_handle_post_ct(struct nfp_flower_priv *priv,
nfp_ct_merge_tc_entries(ct_entry, zt, zt);
}
- NL_SET_ERR_MSG_MOD(extack, "unsupported offload: Conntrack match not supported");
- return -EOPNOTSUPP;
+ return 0;
+}
+
+static int
+nfp_fl_ct_offload_nft_flow(struct nfp_fl_ct_zone_entry *zt, struct flow_cls_offload *flow)
+{
+ struct nfp_fl_ct_map_entry *ct_map_ent;
+ struct nfp_fl_ct_flow_entry *ct_entry;
+ struct netlink_ext_ack *extack = NULL;
+
+ ASSERT_RTNL();
+
+ extack = flow->common.extack;
+ switch (flow->command) {
+ case FLOW_CLS_REPLACE:
+ /* Netfilter can request offload multiple times for the same
+ * flow - protect against adding duplicates.
+ */
+ ct_map_ent = rhashtable_lookup_fast(&zt->priv->ct_map_table, &flow->cookie,
+ nfp_ct_map_params);
+ if (!ct_map_ent) {
+ ct_entry = nfp_fl_ct_add_flow(zt, NULL, flow, true, extack);
+ ct_entry->type = CT_TYPE_NFT;
+ list_add(&ct_entry->list_node, &zt->nft_flows_list);
+ zt->nft_flows_count++;
+ nfp_ct_merge_nft_with_tc(ct_entry, zt);
+ }
+ return 0;
+ case FLOW_CLS_DESTROY:
+ ct_map_ent = rhashtable_lookup_fast(&zt->priv->ct_map_table, &flow->cookie,
+ nfp_ct_map_params);
+ return nfp_fl_ct_del_flow(ct_map_ent);
+ case FLOW_CLS_STATS:
+ return 0;
+ default:
+ break;
+ }
+ return -EINVAL;
+}
+
+int nfp_fl_ct_handle_nft_flow(enum tc_setup_type type, void *type_data, void *cb_priv)
+{
+ struct flow_cls_offload *flow = type_data;
+ struct nfp_fl_ct_zone_entry *zt = cb_priv;
+ int err = -EOPNOTSUPP;
+
+ switch (type) {
+ case TC_SETUP_CLSFLOWER:
+ rtnl_lock();
+ err = nfp_fl_ct_offload_nft_flow(zt, flow);
+ rtnl_unlock();
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+ return err;
+}
+
+static void
+nfp_fl_ct_clean_nft_entries(struct nfp_fl_ct_zone_entry *zt)
+{
+ struct nfp_fl_ct_flow_entry *nft_entry, *ct_tmp;
+ struct nfp_fl_ct_map_entry *ct_map_ent;
+
+ list_for_each_entry_safe(nft_entry, ct_tmp, &zt->nft_flows_list,
+ list_node) {
+ ct_map_ent = rhashtable_lookup_fast(&zt->priv->ct_map_table,
+ &nft_entry->cookie,
+ nfp_ct_map_params);
+ nfp_fl_ct_del_flow(ct_map_ent);
+ }
+}
+
+int nfp_fl_ct_del_flow(struct nfp_fl_ct_map_entry *ct_map_ent)
+{
+ struct nfp_fl_ct_flow_entry *ct_entry;
+ struct nfp_fl_ct_zone_entry *zt;
+ struct rhashtable *m_table;
+
+ if (!ct_map_ent)
+ return -ENOENT;
+
+ zt = ct_map_ent->ct_entry->zt;
+ ct_entry = ct_map_ent->ct_entry;
+ m_table = &zt->priv->ct_map_table;
+
+ switch (ct_entry->type) {
+ case CT_TYPE_PRE_CT:
+ zt->pre_ct_count--;
+ rhashtable_remove_fast(m_table, &ct_map_ent->hash_node,
+ nfp_ct_map_params);
+ nfp_fl_ct_clean_flow_entry(ct_entry);
+ kfree(ct_map_ent);
+
+ /* If this is the last pre_ct_rule it means that it is
+ * very likely that the nft table will be cleaned up next,
+ * as this happens on the removal of the last act_ct flow.
+ * However we cannot deregister the callback on the removal
+ * of the last nft flow as this runs into a deadlock situation.
+ * So deregister the callback on removal of the last pre_ct flow
+ * and remove any remaining nft flow entries. We also cannot
+ * save this state and delete the callback later since the
+ * nft table would already have been freed at that time.
+ */
+ if (!zt->pre_ct_count) {
+ nf_flow_table_offload_del_cb(zt->nft,
+ nfp_fl_ct_handle_nft_flow,
+ zt);
+ zt->nft = NULL;
+ nfp_fl_ct_clean_nft_entries(zt);
+ }
+ break;
+ case CT_TYPE_POST_CT:
+ zt->post_ct_count--;
+ rhashtable_remove_fast(m_table, &ct_map_ent->hash_node,
+ nfp_ct_map_params);
+ nfp_fl_ct_clean_flow_entry(ct_entry);
+ kfree(ct_map_ent);
+ break;
+ case CT_TYPE_NFT:
+ zt->nft_flows_count--;
+ rhashtable_remove_fast(m_table, &ct_map_ent->hash_node,
+ nfp_ct_map_params);
+ nfp_fl_ct_clean_flow_entry(ct_map_ent->ct_entry);
+ kfree(ct_map_ent);
+ default:
+ break;
+ }
+
+ return 0;
}
diff --git a/drivers/net/ethernet/netronome/nfp/flower/conntrack.h b/drivers/net/ethernet/netronome/nfp/flower/conntrack.h
index 3d7d260c6e5c..170b6cdb8cd0 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/conntrack.h
+++ b/drivers/net/ethernet/netronome/nfp/flower/conntrack.h
@@ -4,13 +4,35 @@
#ifndef __NFP_FLOWER_CONNTRACK_H__
#define __NFP_FLOWER_CONNTRACK_H__ 1
+#include <net/netfilter/nf_flow_table.h>
#include "main.h"
#define NFP_FL_CT_NO_TUN 0xff
+#define COMPARE_UNMASKED_FIELDS(__match1, __match2, __out) \
+ do { \
+ typeof(__match1) _match1 = (__match1); \
+ typeof(__match2) _match2 = (__match2); \
+ bool *_out = (__out); \
+ int i, size = sizeof(*(_match1).key); \
+ char *k1, *m1, *k2, *m2; \
+ *_out = false; \
+ k1 = (char *)_match1.key; \
+ m1 = (char *)_match1.mask; \
+ k2 = (char *)_match2.key; \
+ m2 = (char *)_match2.mask; \
+ for (i = 0; i < size; i++) \
+ if ((k1[i] & m1[i] & m2[i]) ^ \
+ (k2[i] & m1[i] & m2[i])) { \
+ *_out = true; \
+ break; \
+ } \
+ } while (0) \
+
extern const struct rhashtable_params nfp_zone_table_params;
extern const struct rhashtable_params nfp_ct_map_params;
extern const struct rhashtable_params nfp_tc_ct_merge_params;
+extern const struct rhashtable_params nfp_nft_ct_merge_params;
/**
* struct nfp_fl_ct_zone_entry - Zone entry containing conntrack flow information
@@ -27,6 +49,12 @@ extern const struct rhashtable_params nfp_tc_ct_merge_params;
*
* @tc_merge_tb: The table of merged tc flows
* @tc_merge_count: Keep count of the number of merged tc entries
+ *
+ * @nft_flows_list: The list of nft relatednfp_fl_ct_flow_entry entries
+ * @nft_flows_count: Keep count of the number of nft_flow entries
+ *
+ * @nft_merge_tb: The table of merged tc+nft flows
+ * @nft_merge_count: Keep count of the number of merged tc+nft entries
*/
struct nfp_fl_ct_zone_entry {
u16 zone;
@@ -43,6 +71,12 @@ struct nfp_fl_ct_zone_entry {
struct rhashtable tc_merge_tb;
unsigned int tc_merge_count;
+
+ struct list_head nft_flows_list;
+ unsigned int nft_flows_count;
+
+ struct rhashtable nft_merge_tb;
+ unsigned int nft_merge_count;
};
enum ct_entry_type {
@@ -100,6 +134,32 @@ struct nfp_fl_ct_tc_merge {
};
/**
+ * struct nfp_fl_nft_tc_merge - Merge of tc_merge flows with nft flow
+ * @netdev: Ingress netdev name
+ * @cookie: Flow cookie, combination of tc_merge and nft cookies
+ * @hash_node: Used by the hashtable
+ * @zt: Reference to the zone table this belongs to
+ * @nft_flow_list: This entry is part of a nft_flows_list
+ * @tc_merge_list: This entry is part of a ct_merge_list
+ * @tc_m_parent: The tc_merge parent
+ * @nft_parent: The nft_entry parent
+ * @tc_flower_cookie: The cookie of the flow offloaded to the nfp
+ * @flow_pay: Reference to the offloaded flow struct
+ */
+struct nfp_fl_nft_tc_merge {
+ struct net_device *netdev;
+ unsigned long cookie[3];
+ struct rhash_head hash_node;
+ struct nfp_fl_ct_zone_entry *zt;
+ struct list_head nft_flow_list;
+ struct list_head tc_merge_list;
+ struct nfp_fl_ct_tc_merge *tc_m_parent;
+ struct nfp_fl_ct_flow_entry *nft_parent;
+ unsigned long tc_flower_cookie;
+ struct nfp_fl_payload *flow_pay;
+};
+
+/**
* struct nfp_fl_ct_map_entry - Map between flow cookie and specific ct_flow
* @cookie: Flow cookie, same as original TC flow, used as key
* @hash_node: Used by the hashtable
@@ -152,4 +212,20 @@ int nfp_fl_ct_handle_post_ct(struct nfp_flower_priv *priv,
* @entry: Flow entry to cleanup
*/
void nfp_fl_ct_clean_flow_entry(struct nfp_fl_ct_flow_entry *entry);
+
+/**
+ * nfp_fl_ct_del_flow() - Handle flow_del callbacks for conntrack
+ * @ct_map_ent: ct map entry for the flow that needs deleting
+ */
+int nfp_fl_ct_del_flow(struct nfp_fl_ct_map_entry *ct_map_ent);
+
+/**
+ * nfp_fl_ct_handle_nft_flow() - Handle flower flow callbacks for nft table
+ * @type: Type provided by callback
+ * @type_data: Callback data
+ * @cb_priv: Pointer to data provided when registering the callback, in this
+ * case it's the zone table.
+ */
+int nfp_fl_ct_handle_nft_flow(enum tc_setup_type type, void *type_data,
+ void *cb_priv);
#endif
diff --git a/drivers/net/ethernet/netronome/nfp/flower/metadata.c b/drivers/net/ethernet/netronome/nfp/flower/metadata.c
index 8658c5cedf91..621113650a9b 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/metadata.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/metadata.c
@@ -639,8 +639,36 @@ static void nfp_zone_table_entry_destroy(struct nfp_fl_ct_zone_entry *zt)
}
}
+ if (zt->nft) {
+ nf_flow_table_offload_del_cb(zt->nft,
+ nfp_fl_ct_handle_nft_flow,
+ zt);
+ zt->nft = NULL;
+ }
+
+ if (!list_empty(&zt->nft_flows_list)) {
+ struct rhashtable *m_table = &zt->priv->ct_map_table;
+ struct nfp_fl_ct_flow_entry *entry, *tmp;
+ struct nfp_fl_ct_map_entry *map;
+
+ WARN_ONCE(1, "nft_flows_list not empty as expected, cleaning up\n");
+ list_for_each_entry_safe(entry, tmp, &zt->nft_flows_list,
+ list_node) {
+ map = rhashtable_lookup_fast(m_table,
+ &entry->cookie,
+ nfp_ct_map_params);
+ WARN_ON_ONCE(rhashtable_remove_fast(m_table,
+ &map->hash_node,
+ nfp_ct_map_params));
+ nfp_fl_ct_clean_flow_entry(entry);
+ kfree(map);
+ }
+ }
+
rhashtable_free_and_destroy(&zt->tc_merge_tb,
nfp_check_rhashtable_empty, NULL);
+ rhashtable_free_and_destroy(&zt->nft_merge_tb,
+ nfp_check_rhashtable_empty, NULL);
kfree(zt);
}
diff --git a/drivers/net/ethernet/netronome/nfp/flower/offload.c b/drivers/net/ethernet/netronome/nfp/flower/offload.c
index 7e4ad5d58859..2406d33356ad 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/offload.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/offload.c
@@ -1505,6 +1505,7 @@ nfp_flower_del_offload(struct nfp_app *app, struct net_device *netdev,
struct flow_cls_offload *flow)
{
struct nfp_flower_priv *priv = app->priv;
+ struct nfp_fl_ct_map_entry *ct_map_ent;
struct netlink_ext_ack *extack = NULL;
struct nfp_fl_payload *nfp_flow;
struct nfp_port *port = NULL;
@@ -1514,6 +1515,14 @@ nfp_flower_del_offload(struct nfp_app *app, struct net_device *netdev,
if (nfp_netdev_is_nfp_repr(netdev))
port = nfp_port_from_netdev(netdev);
+ /* Check ct_map_table */
+ ct_map_ent = rhashtable_lookup_fast(&priv->ct_map_table, &flow->cookie,
+ nfp_ct_map_params);
+ if (ct_map_ent) {
+ err = nfp_fl_ct_del_flow(ct_map_ent);
+ return err;
+ }
+
nfp_flow = nfp_flower_search_fl_table(app, flow->cookie, netdev);
if (!nfp_flow) {
NL_SET_ERR_MSG_MOD(extack, "invalid entry: cannot remove flow that does not exist");