diff options
author | David S. Miller | 2022-09-07 14:02:09 +0100 |
---|---|---|
committer | David S. Miller | 2022-09-07 14:02:09 +0100 |
commit | 016eb59012b576f5a7b7b415d757717dc8cb3c6b (patch) | |
tree | 6106a627186301ace18900121d75f752da601354 | |
parent | da7d8e65b3fcebce0a2f606669cabce64fca0475 (diff) | |
parent | 99d4dc66c82379b1e077f4e139b2048b207ff07c (diff) |
Merge branch 'macsec-offload-mlx5'
Saeed Mahameed says:
====================
Introduce MACsec skb_metadata_dst and mlx5 macsec offload
v1->v2:
- attach mlx5 implementation patches.
This patchset introduces MACsec skb_metadata_dst to lay the ground
for MACsec HW offload.
MACsec is an IEEE standard (IEEE 802.1AE) for MAC security.
It defines a way to establish a protocol independent connection
between two hosts with data confidentiality, authenticity and/or
integrity, using GCM-AES. MACsec operates on the Ethernet layer and
as such is a layer 2 protocol, which means it’s designed to secure
traffic within a layer 2 network, including DHCP or ARP requests.
Linux has a software implementation of the MACsec standard and
HW offloading support.
The offloading is re-using the logic, netlink API and data
structures of the existing MACsec software implementation.
For Tx:
In the current MACsec offload implementation, MACsec interfaces shares
the same MAC address by default.
Therefore, HW can't distinguish from which MACsec interface the traffic
originated from.
MACsec stack will use skb_metadata_dst to store the SCI value, which is
unique per MACsec interface, skb_metadat_dst will be used later by the
offloading device driver to associate the SKB with the corresponding
offloaded interface (SCI) to facilitate HW MACsec offload.
For Rx:
Like in the Tx changes, if there are more than one MACsec device with
the same MAC address as in the packet's destination MAC, the packet will
be forward only to one of the devices and not neccessarly to the desired one.
Offloading device driver sets the MACsec skb_metadata_dst sci
field with the appropriaate Rx SCI for each SKB so the MACsec rx handler
will know to which port to divert those skbs, instead of wrongly solely
relaying on dst MAC address comparison.
1) patch 1,2, Add support to skb_metadata_dst in MACsec code:
net/macsec: Add MACsec skb_metadata_dst Tx Data path support
net/macsec: Add MACsec skb_metadata_dst Rx Data path support
2) patch 3, Move some MACsec driver code for sharing with various
drivers that implements offload:
net/macsec: Move some code for sharing with various drivers that
implements offload
3) The rest of the patches introduce mlx5 implementation for macsec
offloads TX and RX via steering tables.
a) TX, intercept skbs with macsec offlad mark in skb_metadata_dst and mark
the descriptor for offload.
b) RX, intercept offloaded frames and prepare the proper
skb_metadata_dst to mark offloaded rx frames.
====================
Signed-off-by: David S. Miller <davem@davemloft.net>
28 files changed, 3180 insertions, 53 deletions
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig index bfc0cd5ec423..26685fd0fdaa 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig +++ b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig @@ -139,6 +139,14 @@ config MLX5_CORE_IPOIB help MLX5 IPoIB offloads & acceleration support. +config MLX5_EN_MACSEC + bool "Connect-X support for MACSec offload" + depends on MLX5_CORE_EN + depends on MACSEC + default n + help + Build support for MACsec cryptography-offload acceleration in the NIC. + config MLX5_EN_IPSEC bool "Mellanox Technologies IPsec Connect-X support" depends on MLX5_CORE_EN diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/Makefile index a3773a8177ed..a22c32aabf11 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/Makefile +++ b/drivers/net/ethernet/mellanox/mlx5/core/Makefile @@ -92,6 +92,9 @@ mlx5_core-$(CONFIG_MLX5_CORE_IPOIB) += ipoib/ipoib.o ipoib/ethtool.o ipoib/ipoib # mlx5_core-$(CONFIG_MLX5_FPGA) += fpga/cmd.o fpga/core.o fpga/conn.o fpga/sdk.o +mlx5_core-$(CONFIG_MLX5_EN_MACSEC) += en_accel/macsec.o en_accel/macsec_fs.o \ + en_accel/macsec_stats.o + mlx5_core-$(CONFIG_MLX5_EN_IPSEC) += en_accel/ipsec.o en_accel/ipsec_rxtx.o \ en_accel/ipsec_stats.o en_accel/ipsec_fs.o \ en_accel/ipsec_offload.o diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index e464024481b4..13aac5131ff7 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -954,6 +954,9 @@ struct mlx5e_priv { const struct mlx5e_profile *profile; void *ppriv; +#ifdef CONFIG_MLX5_EN_MACSEC + struct mlx5e_macsec *macsec; +#endif #ifdef CONFIG_MLX5_EN_IPSEC struct mlx5e_ipsec *ipsec; #endif diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h index 1839f1ab1ddd..07187028f0d3 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h @@ -39,6 +39,7 @@ #include "en_accel/ipsec_rxtx.h" #include "en_accel/ktls.h" #include "en_accel/ktls_txrx.h" +#include <en_accel/macsec.h> #include "en.h" #include "en/txrx.h" @@ -137,6 +138,15 @@ static inline bool mlx5e_accel_tx_begin(struct net_device *dev, } #endif +#ifdef CONFIG_MLX5_EN_MACSEC + if (unlikely(mlx5e_macsec_skb_is_offload(skb))) { + struct mlx5e_priv *priv = netdev_priv(dev); + + if (unlikely(!mlx5e_macsec_handle_tx_skb(priv->macsec, skb))) + return false; + } +#endif + return true; } @@ -163,6 +173,11 @@ static inline void mlx5e_accel_tx_eseg(struct mlx5e_priv *priv, mlx5e_ipsec_tx_build_eseg(priv, skb, eseg); #endif +#ifdef CONFIG_MLX5_EN_MACSEC + if (unlikely(mlx5e_macsec_skb_is_offload(skb))) + mlx5e_macsec_tx_build_eseg(priv->macsec, skb, eseg); +#endif + #if IS_ENABLED(CONFIG_GENEVE) if (skb->encapsulation && skb->ip_summed == CHECKSUM_PARTIAL) mlx5e_tx_tunnel_accel(skb, eseg, ihs); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c index e776b9f2da06..b859e4a4c744 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c @@ -388,7 +388,8 @@ static void setup_fte_common(struct mlx5_accel_esp_xfrm_attrs *attrs, 0xff, 16); } - flow_act->ipsec_obj_id = ipsec_obj_id; + flow_act->crypto.type = MLX5_FLOW_CONTEXT_ENCRYPT_DECRYPT_TYPE_IPSEC; + flow_act->crypto.obj_id = ipsec_obj_id; flow_act->flags |= FLOW_ACT_NO_APPEND; } @@ -444,7 +445,7 @@ static int rx_add_rule(struct mlx5e_priv *priv, } flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | - MLX5_FLOW_CONTEXT_ACTION_IPSEC_DECRYPT | + MLX5_FLOW_CONTEXT_ACTION_CRYPTO_DECRYPT | MLX5_FLOW_CONTEXT_ACTION_MOD_HDR; dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; flow_act.modify_hdr = modify_hdr; @@ -500,7 +501,7 @@ static int tx_add_rule(struct mlx5e_priv *priv, MLX5_ETH_WQE_FT_META_IPSEC); flow_act.action = MLX5_FLOW_CONTEXT_ACTION_ALLOW | - MLX5_FLOW_CONTEXT_ACTION_IPSEC_ENCRYPT; + MLX5_FLOW_CONTEXT_ACTION_CRYPTO_ENCRYPT; rule = mlx5_add_flow_rules(priv->ipsec->tx_fs->ft, spec, &flow_act, NULL, 0); if (IS_ERR(rule)) { err = PTR_ERR(rule); @@ -576,7 +577,7 @@ int mlx5e_accel_ipsec_fs_init(struct mlx5e_ipsec *ipsec) int err = -ENOMEM; ns = mlx5_get_flow_namespace(ipsec->mdev, - MLX5_FLOW_NAMESPACE_EGRESS_KERNEL); + MLX5_FLOW_NAMESPACE_EGRESS_IPSEC); if (!ns) return -EOPNOTSUPP; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h index 0ae4e12ce528..c72b62f52574 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h @@ -39,9 +39,9 @@ #include "en.h" #include "en/txrx.h" -/* Bit31: IPsec marker, Bit30-24: IPsec syndrome, Bit23-0: IPsec obj id */ +/* Bit31: IPsec marker, Bit30: reserved, Bit29-24: IPsec syndrome, Bit23-0: IPsec obj id */ #define MLX5_IPSEC_METADATA_MARKER(metadata) (((metadata) >> 31) & 0x1) -#define MLX5_IPSEC_METADATA_SYNDROM(metadata) (((metadata) >> 24) & GENMASK(6, 0)) +#define MLX5_IPSEC_METADATA_SYNDROM(metadata) (((metadata) >> 24) & GENMASK(5, 0)) #define MLX5_IPSEC_METADATA_HANDLE(metadata) ((metadata) & GENMASK(23, 0)) struct mlx5e_accel_tx_ipsec_state { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c new file mode 100644 index 000000000000..d9d18b039d8c --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c @@ -0,0 +1,1332 @@ +// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB +/* Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */ + +#include <linux/mlx5/device.h> +#include <linux/mlx5/mlx5_ifc.h> +#include <linux/xarray.h> + +#include "en.h" +#include "lib/mlx5.h" +#include "en_accel/macsec.h" +#include "en_accel/macsec_fs.h" + +#define MLX5_MACSEC_ASO_INC_SN 0x2 +#define MLX5_MACSEC_ASO_REG_C_4_5 0x2 + +struct mlx5e_macsec_sa { + bool active; + u8 assoc_num; + u32 macsec_obj_id; + u32 enc_key_id; + u32 next_pn; + sci_t sci; + + struct rhash_head hash; + u32 fs_id; + union mlx5e_macsec_rule *macsec_rule; + struct rcu_head rcu_head; +}; + +struct mlx5e_macsec_rx_sc; +struct mlx5e_macsec_rx_sc_xarray_element { + u32 fs_id; + struct mlx5e_macsec_rx_sc *rx_sc; +}; + +struct mlx5e_macsec_rx_sc { + bool active; + sci_t sci; + struct mlx5e_macsec_sa *rx_sa[MACSEC_NUM_AN]; + struct list_head rx_sc_list_element; + struct mlx5e_macsec_rx_sc_xarray_element *sc_xarray_element; + struct metadata_dst *md_dst; + struct rcu_head rcu_head; +}; + +static const struct rhashtable_params rhash_sci = { + .key_len = sizeof_field(struct mlx5e_macsec_sa, sci), + .key_offset = offsetof(struct mlx5e_macsec_sa, sci), + .head_offset = offsetof(struct mlx5e_macsec_sa, hash), + .automatic_shrinking = true, + .min_size = 1, +}; + +struct mlx5e_macsec_device { + const struct net_device *netdev; + struct mlx5e_macsec_sa *tx_sa[MACSEC_NUM_AN]; + struct list_head macsec_rx_sc_list_head; + unsigned char *dev_addr; + struct list_head macsec_device_list_element; +}; + +struct mlx5e_macsec { + struct list_head macsec_device_list_head; + int num_of_devices; + struct mlx5e_macsec_fs *macsec_fs; + struct mutex lock; /* Protects mlx5e_macsec internal contexts */ + + /* Global PD for MACsec object ASO context */ + u32 aso_pdn; + + /* Tx sci -> fs id mapping handling */ + struct rhashtable sci_hash; /* sci -> mlx5e_macsec_sa */ + + /* Rx fs_id -> rx_sc mapping */ + struct xarray sc_xarray; + + struct mlx5_core_dev *mdev; + + /* Stats manage */ + struct mlx5e_macsec_stats stats; +}; + +struct mlx5_macsec_obj_attrs { + u32 aso_pdn; + u32 next_pn; + __be64 sci; + u32 enc_key_id; + bool encrypt; +}; + +static int mlx5e_macsec_create_object(struct mlx5_core_dev *mdev, + struct mlx5_macsec_obj_attrs *attrs, + bool is_tx, + u32 *macsec_obj_id) +{ + u32 in[MLX5_ST_SZ_DW(create_macsec_obj_in)] = {}; + u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)]; + void *aso_ctx; + void *obj; + int err; + + obj = MLX5_ADDR_OF(create_macsec_obj_in, in, macsec_object); + aso_ctx = MLX5_ADDR_OF(macsec_offload_obj, obj, macsec_aso); + + MLX5_SET(macsec_offload_obj, obj, confidentiality_en, attrs->encrypt); + MLX5_SET(macsec_offload_obj, obj, dekn, attrs->enc_key_id); + MLX5_SET64(macsec_offload_obj, obj, sci, (__force u64)(attrs->sci)); + MLX5_SET(macsec_offload_obj, obj, aso_return_reg, MLX5_MACSEC_ASO_REG_C_4_5); + MLX5_SET(macsec_offload_obj, obj, macsec_aso_access_pd, attrs->aso_pdn); + + MLX5_SET(macsec_aso, aso_ctx, valid, 0x1); + if (is_tx) { + MLX5_SET(macsec_aso, aso_ctx, mode, MLX5_MACSEC_ASO_INC_SN); + MLX5_SET(macsec_aso, aso_ctx, mode_parameter, attrs->next_pn); + } + + /* general object fields set */ + MLX5_SET(general_obj_in_cmd_hdr, in, opcode, MLX5_CMD_OP_CREATE_GENERAL_OBJECT); + MLX5_SET(general_obj_in_cmd_hdr, in, obj_type, MLX5_GENERAL_OBJECT_TYPES_MACSEC); + + err = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out)); + if (err) { + mlx5_core_err(mdev, + "MACsec offload: Failed to create MACsec object (err = %d)\n", + err); + return err; + } + + *macsec_obj_id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id); + + return err; +} + +static void mlx5e_macsec_destroy_object(struct mlx5_core_dev *mdev, u32 macsec_obj_id) +{ + u32 in[MLX5_ST_SZ_DW(general_obj_in_cmd_hdr)] = {}; + u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)]; + + MLX5_SET(general_obj_in_cmd_hdr, in, opcode, MLX5_CMD_OP_DESTROY_GENERAL_OBJECT); + MLX5_SET(general_obj_in_cmd_hdr, in, obj_type, MLX5_GENERAL_OBJECT_TYPES_MACSEC); + MLX5_SET(general_obj_in_cmd_hdr, in, obj_id, macsec_obj_id); + + mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out)); +} + +static void mlx5e_macsec_cleanup_sa(struct mlx5e_macsec *macsec, + struct mlx5e_macsec_sa *sa, + bool is_tx) +{ + int action = (is_tx) ? MLX5_ACCEL_MACSEC_ACTION_ENCRYPT : + MLX5_ACCEL_MACSEC_ACTION_DECRYPT; + + if ((is_tx) && sa->fs_id) { + /* Make sure ongoing datapath readers sees a valid SA */ + rhashtable_remove_fast(&macsec->sci_hash, &sa->hash, rhash_sci); + sa->fs_id = 0; + } + + if (!sa->macsec_rule) + return; + + mlx5e_macsec_fs_del_rule(macsec->macsec_fs, sa->macsec_rule, action); + mlx5e_macsec_destroy_object(macsec->mdev, sa->macsec_obj_id); + sa->macsec_rule = NULL; +} + +static int mlx5e_macsec_init_sa(struct macsec_context *ctx, + struct mlx5e_macsec_sa *sa, + bool encrypt, + bool is_tx) +{ + struct mlx5e_priv *priv = netdev_priv(ctx->netdev); + struct mlx5e_macsec *macsec = priv->macsec; + struct mlx5_macsec_rule_attrs rule_attrs; + struct mlx5_core_dev *mdev = priv->mdev; + struct mlx5_macsec_obj_attrs obj_attrs; + union mlx5e_macsec_rule *macsec_rule; + int err; + + obj_attrs.next_pn = sa->next_pn; + obj_attrs.sci = cpu_to_be64((__force u64)sa->sci); + obj_attrs.enc_key_id = sa->enc_key_id; + obj_attrs.encrypt = encrypt; + obj_attrs.aso_pdn = macsec->aso_pdn; + + err = mlx5e_macsec_create_object(mdev, &obj_attrs, is_tx, &sa->macsec_obj_id); + if (err) + return err; + + rule_attrs.macsec_obj_id = sa->macsec_obj_id; + rule_attrs.sci = sa->sci; + rule_attrs.assoc_num = sa->assoc_num; + rule_attrs.action = (is_tx) ? MLX5_ACCEL_MACSEC_ACTION_ENCRYPT : + MLX5_ACCEL_MACSEC_ACTION_DECRYPT; + + macsec_rule = mlx5e_macsec_fs_add_rule(macsec->macsec_fs, ctx, &rule_attrs, &sa->fs_id); + if (IS_ERR_OR_NULL(macsec_rule)) + goto destroy_macsec_object; + + sa->macsec_rule = macsec_rule; + + if (is_tx) { + err = rhashtable_insert_fast(&macsec->sci_hash, &sa->hash, rhash_sci); + if (err) + goto destroy_macsec_object_and_rule; + } + + return 0; + +destroy_macsec_object_and_rule: + mlx5e_macsec_cleanup_sa(macsec, sa, is_tx); +destroy_macsec_object: + mlx5e_macsec_destroy_object(mdev, sa->macsec_obj_id); + + return err; +} + +static struct mlx5e_macsec_rx_sc * +mlx5e_macsec_get_rx_sc_from_sc_list(const struct list_head *list, sci_t sci) +{ + struct mlx5e_macsec_rx_sc *iter; + + list_for_each_entry_rcu(iter, list, rx_sc_list_element) { + if (iter->sci == sci) + return iter; + } + + return NULL; +} + +static int mlx5e_macsec_update_rx_sa(struct mlx5e_macsec *macsec, + struct mlx5e_macsec_sa *rx_sa, + bool active) +{ + struct mlx5_core_dev *mdev = macsec->mdev; + struct mlx5_macsec_obj_attrs attrs; + int err = 0; + + if (rx_sa->active != active) + return 0; + + rx_sa->active = active; + if (!active) { + mlx5e_macsec_cleanup_sa(macsec, rx_sa, false); + return 0; + } + + attrs.sci = rx_sa->sci; + attrs.enc_key_id = rx_sa->enc_key_id; + err = mlx5e_macsec_create_object(mdev, &attrs, false, &rx_sa->macsec_obj_id); + if (err) + return err; + + return 0; +} + +static bool mlx5e_macsec_secy_features_validate(struct macsec_context *ctx) +{ + const struct net_device *netdev = ctx->netdev; + const struct macsec_secy *secy = ctx->secy; + + if (secy->validate_frames != MACSEC_VALIDATE_STRICT) { + netdev_err(netdev, + "MACsec offload is supported only when validate_frame is in strict mode\n"); + return false; + } + + if (secy->icv_len != MACSEC_DEFAULT_ICV_LEN) { + netdev_err(netdev, "MACsec offload is supported only when icv_len is %d\n", + MACSEC_DEFAULT_ICV_LEN); + return false; + } + + if (!secy->protect_frames) { + netdev_err(netdev, + "MACsec offload is supported only when protect_frames is set\n"); + return false; + } + + if (secy->xpn) { + netdev_err(netdev, "MACsec offload: xpn is not supported\n"); + return false; + } + + if (secy->replay_protect) { + netdev_err(netdev, "MACsec offload: replay protection is not supported\n"); + return false; + } + + return true; +} + +static struct mlx5e_macsec_device * +mlx5e_macsec_get_macsec_device_context(const struct mlx5e_macsec *macsec, + const struct macsec_context *ctx) +{ + struct mlx5e_macsec_device *iter; + const struct list_head *list; + + list = &macsec->macsec_device_list_head; + list_for_each_entry_rcu(iter, list, macsec_device_list_element) { + if (iter->netdev == ctx->secy->netdev) + return iter; + } + + return NULL; +} + +static int mlx5e_macsec_add_txsa(struct macsec_context *ctx) +{ + const struct macsec_tx_sc *tx_sc = &ctx->secy->tx_sc; + const struct macsec_tx_sa *ctx_tx_sa = ctx->sa.tx_sa; + struct mlx5e_priv *priv = netdev_priv(ctx->netdev); + const struct macsec_secy *secy = ctx->secy; + struct mlx5e_macsec_device *macsec_device; + struct mlx5_core_dev *mdev = priv->mdev; + u8 assoc_num = ctx->sa.assoc_num; + struct mlx5e_macsec_sa *tx_sa; + struct mlx5e_macsec *macsec; + int err = 0; + + if (ctx->prepare) + return 0; + + mutex_lock(&priv->macsec->lock); + + macsec = priv->macsec; + macsec_device = mlx5e_macsec_get_macsec_device_context(macsec, ctx); + if (!macsec_device) { + netdev_err(ctx->netdev, "MACsec offload: Failed to find device context\n"); + err = -EEXIST; + goto out; + } + + if (macsec_device->tx_sa[assoc_num]) { + netdev_err(ctx->netdev, "MACsec offload tx_sa: %d already exist\n", assoc_num); + err = -EEXIST; + goto out; + } + + tx_sa = kzalloc(sizeof(*tx_sa), GFP_KERNEL); + if (!tx_sa) { + err = -ENOMEM; + goto out; + } + + tx_sa->active = ctx_tx_sa->active; + tx_sa->next_pn = ctx_tx_sa->next_pn_halves.lower; + tx_sa->sci = secy->sci; + tx_sa->assoc_num = assoc_num; + err = mlx5_create_encryption_key(mdev, ctx->sa.key, secy->key_len, + MLX5_ACCEL_OBJ_MACSEC_KEY, + &tx_sa->enc_key_id); + if (err) + goto destroy_sa; + + macsec_device->tx_sa[assoc_num] = tx_sa; + if (!secy->operational || + assoc_num != tx_sc->encoding_sa || + !tx_sa->active) + goto out; + + err = mlx5e_macsec_init_sa(ctx, tx_sa, tx_sc->encrypt, true); + if (err) + goto destroy_encryption_key; + + mutex_unlock(&macsec->lock); + + return 0; + +destroy_encryption_key: + macsec_device->tx_sa[assoc_num] = NULL; + mlx5_destroy_encryption_key(mdev, tx_sa->enc_key_id); +destroy_sa: + kfree(tx_sa); +out: + mutex_unlock(&macsec->lock); + + return err; +} + +static int mlx5e_macsec_upd_txsa(struct macsec_context *ctx) +{ + const struct macsec_tx_sc *tx_sc = &ctx->secy->tx_sc; + const struct macsec_tx_sa *ctx_tx_sa = ctx->sa.tx_sa; + struct mlx5e_priv *priv = netdev_priv(ctx->netdev); + struct mlx5e_macsec_device *macsec_device; + u8 assoc_num = ctx->sa.assoc_num; + struct mlx5e_macsec_sa *tx_sa; + struct mlx5e_macsec *macsec; + struct net_device *netdev; + int err = 0; + + if (ctx->prepare) + return 0; + + mutex_lock(&priv->macsec->lock); + + macsec = priv->macsec; + netdev = ctx->netdev; + macsec_device = mlx5e_macsec_get_macsec_device_context(macsec, ctx); + if (!macsec_device) { + netdev_err(netdev, "MACsec offload: Failed to find device context\n"); + err = -EINVAL; + goto out; + } + + tx_sa = macsec_device->tx_sa[assoc_num]; + if (!tx_sa) { + netdev_err(netdev, "MACsec offload: TX sa 0x%x doesn't exist\n", assoc_num); + err = -EEXIST; + goto out; + } + + if (tx_sa->next_pn != ctx_tx_sa->next_pn_halves.lower) { + netdev_err(netdev, "MACsec offload: update TX sa %d PN isn't supported\n", + assoc_num); + err = -EINVAL; + goto out; + } + + if (tx_sa->active == ctx_tx_sa->active) + goto out; + + if (tx_sa->assoc_num != tx_sc->encoding_sa) + goto out; + + if (ctx_tx_sa->active) { + err = mlx5e_macsec_init_sa(ctx, tx_sa, tx_sc->encrypt, true); + if (err) + goto out; + } else { + if (!tx_sa->macsec_rule) { + err = -EINVAL; + goto out; + } + + mlx5e_macsec_cleanup_sa(macsec, tx_sa, true); + } + + tx_sa->active = ctx_tx_sa->active; +out: + mutex_unlock(&macsec->lock); + + return err; +} + +static int mlx5e_macsec_del_txsa(struct macsec_context *ctx) +{ + struct mlx5e_priv *priv = netdev_priv(ctx->netdev); + struct mlx5e_macsec_device *macsec_device; + u8 assoc_num = ctx->sa.assoc_num; + struct mlx5e_macsec_sa *tx_sa; + struct mlx5e_macsec *macsec; + int err = 0; + + if (ctx->prepare) + return 0; + + mutex_lock(&priv->macsec->lock); + macsec = priv->macsec; + macsec_device = mlx5e_macsec_get_macsec_device_context(macsec, ctx); + if (!macsec_device) { + netdev_err(ctx->netdev, "MACsec offload: Failed to find device context\n"); + err = -EINVAL; + goto out; + } + + tx_sa = macsec_device->tx_sa[assoc_num]; + if (!tx_sa) { + netdev_err(ctx->netdev, "MACsec offload: TX sa 0x%x doesn't exist\n", assoc_num); + err = -EEXIST; + goto out; + } + + mlx5e_macsec_cleanup_sa(macsec, tx_sa, true); + mlx5_destroy_encryption_key(macsec->mdev, tx_sa->enc_key_id); + kfree_rcu(tx_sa); + macsec_device->tx_sa[assoc_num] = NULL; + +out: + mutex_unlock(&macsec->lock); + + return err; +} + +static u32 mlx5e_macsec_get_sa_from_hashtable(struct rhashtable *sci_hash, sci_t *sci) +{ + struct mlx5e_macsec_sa *macsec_sa; + u32 fs_id = 0; + + rcu_read_lock(); + macsec_sa = rhashtable_lookup(sci_hash, sci, rhash_sci); + if (macsec_sa) + fs_id = macsec_sa->fs_id; + rcu_read_unlock(); + + return fs_id; +} + +static int mlx5e_macsec_add_rxsc(struct macsec_context *ctx) +{ + struct mlx5e_macsec_rx_sc_xarray_element *sc_xarray_element; + struct mlx5e_priv *priv = netdev_priv(ctx->netdev); + const struct macsec_rx_sc *ctx_rx_sc = ctx->rx_sc; + struct mlx5e_macsec_device *macsec_device; + struct mlx5e_macsec_rx_sc *rx_sc; + struct list_head *rx_sc_list; + struct mlx5e_macsec *macsec; + int err = 0; + + if (ctx->prepare) + return 0; + + mutex_lock(&priv->macsec->lock); + macsec = priv->macsec; + macsec_device = mlx5e_macsec_get_macsec_device_context(macsec, ctx); + if (!macsec_device) { + netdev_err(ctx->netdev, "MACsec offload: Failed to find device context\n"); + err = -EINVAL; + goto out; + } + + rx_sc_list = &macsec_device->macsec_rx_sc_list_head; + rx_sc = mlx5e_macsec_get_rx_sc_from_sc_list(rx_sc_list, ctx_rx_sc->sci); + if (rx_sc) { + netdev_err(ctx->netdev, "MACsec offload: rx_sc (sci %lld) already exists\n", + ctx_rx_sc->sci); + err = -EEXIST; + goto out; + } + + rx_sc = kzalloc(sizeof(*rx_sc), GFP_KERNEL); + if (!rx_sc) { + err = -ENOMEM; + goto out; + } + + sc_xarray_element = kzalloc(sizeof(*sc_xarray_element), GFP_KERNEL); + if (!sc_xarray_element) { + err = -ENOMEM; + goto destroy_rx_sc; + } + + sc_xarray_element->rx_sc = rx_sc; + err = xa_alloc(&macsec->sc_xarray, &sc_xarray_element->fs_id, sc_xarray_element, + XA_LIMIT(1, USHRT_MAX), GFP_KERNEL); + if (err) + goto destroy_sc_xarray_elemenet; + + rx_sc->md_dst = metadata_dst_alloc(0, METADATA_MACSEC, GFP_KERNEL); + if (!rx_sc->md_dst) { + err = -ENOMEM; + goto erase_xa_alloc; + } + + rx_sc->sci = ctx_rx_sc->sci; + rx_sc->active = ctx_rx_sc->active; + list_add_rcu(&rx_sc->rx_sc_list_element, rx_sc_list); + + rx_sc->sc_xarray_element = sc_xarray_element; + rx_sc->md_dst->u.macsec_info.sci = rx_sc->sci; + mutex_unlock(&macsec->lock); + + return 0; + +erase_xa_alloc: + xa_erase(&macsec->sc_xarray, sc_xarray_element->fs_id); +destroy_sc_xarray_elemenet: + kfree(sc_xarray_element); +destroy_rx_sc: + kfree(rx_sc); + +out: + mutex_unlock(&macsec->lock); + + return err; +} + +static int mlx5e_macsec_upd_rxsc(struct macsec_context *ctx) +{ + struct mlx5e_priv *priv = netdev_priv(ctx->netdev); + const struct macsec_rx_sc *ctx_rx_sc = ctx->rx_sc; + struct mlx5e_macsec_device *macsec_device; + struct mlx5e_macsec_rx_sc *rx_sc; + struct mlx5e_macsec_sa *rx_sa; + struct mlx5e_macsec *macsec; + struct list_head *list; + int i; + int err = 0; + + if (ctx->prepare) + return 0; + + mutex_lock(&priv->macsec->lock); + + macsec = priv->macsec; + macsec_device = mlx5e_macsec_get_macsec_device_context(macsec, ctx); + if (!macsec_device) { + netdev_err(ctx->netdev, "MACsec offload: Failed to find device context\n"); + err = -EINVAL; + goto out; + } + + list = &macsec_device->macsec_rx_sc_list_head; + rx_sc = mlx5e_macsec_get_rx_sc_from_sc_list(list, ctx_rx_sc->sci); + if (!rx_sc) { + err = -EINVAL; + goto out; + } + + rx_sc->active = ctx_rx_sc->active; + if (rx_sc->active == ctx_rx_sc->active) + goto out; + + for (i = 0; i < MACSEC_NUM_AN; ++i) { + rx_sa = rx_sc->rx_sa[i]; + if (!rx_sa) + continue; + + err = mlx5e_macsec_update_rx_sa(macsec, rx_sa, rx_sa->active && ctx_rx_sc->active); + if (err) + goto out; + } + +out: + mutex_unlock(&macsec->lock); + + return err; +} + +static int mlx5e_macsec_del_rxsc(struct macsec_context *ctx) +{ + struct mlx5e_priv *priv = netdev_priv(ctx->netdev); + struct mlx5e_macsec_device *macsec_device; + struct mlx5e_macsec_rx_sc *rx_sc; + struct mlx5e_macsec_sa *rx_sa; + struct mlx5e_macsec *macsec; + struct list_head *list; + int err = 0; + int i; + + if (ctx->prepare) + return 0; + + mutex_lock(&priv->macsec->lock); + + macsec = priv->macsec; + macsec_device = mlx5e_macsec_get_macsec_device_context(macsec, ctx); + if (!macsec_device) { + netdev_err(ctx->netdev, "MACsec offload: Failed to find device context\n"); + err = -EINVAL; + goto out; + } + + list = &macsec_device->macsec_rx_sc_list_head; + rx_sc = mlx5e_macsec_get_rx_sc_from_sc_list(list, ctx->rx_sc->sci); + if (!rx_sc) { + netdev_err(ctx->netdev, + "MACsec offload rx_sc sci %lld doesn't exist\n", + ctx->sa.rx_sa->sc->sci); + err = -EINVAL; + goto out; + } + + for (i = 0; i < MACSEC_NUM_AN; ++i) { + rx_sa = rx_sc->rx_sa[i]; + if (!rx_sa) + continue; + + mlx5e_macsec_cleanup_sa(macsec, rx_sa, false); + mlx5_destroy_encryption_key(macsec->mdev, rx_sa->enc_key_id); + + kfree(rx_sa); + rx_sc->rx_sa[i] = NULL; + } + +/* + * At this point the relevant MACsec offload Rx rule already removed at + * mlx5e_macsec_cleanup_sa need to wait for datapath to finish current + * Rx related data propagating using xa_erase which uses rcu to sync, + * once fs_id is erased then this rx_sc is hidden from datapath. + */ + list_del_rcu(&rx_sc->rx_sc_list_element); + xa_erase(&macsec->sc_xarray, rx_sc->sc_xarray_element->fs_id); + metadata_dst_free(rx_sc->md_dst); + kfree(rx_sc->sc_xarray_element); + + kfree_rcu(rx_sc); + +out: + mutex_unlock(&macsec->lock); + + return err; +} + +static int mlx5e_macsec_add_rxsa(struct macsec_context *ctx) +{ + const struct macsec_rx_sa *ctx_rx_sa = ctx->sa.rx_sa; + struct mlx5e_priv *priv = netdev_priv(ctx->netdev); + struct mlx5e_macsec_device *macsec_device; + struct mlx5_core_dev *mdev = priv->mdev; + u8 assoc_num = ctx->sa.assoc_num; + struct mlx5e_macsec_rx_sc *rx_sc; + sci_t sci = ctx_rx_sa->sc->sci; + struct mlx5e_macsec_sa *rx_sa; + struct mlx5e_macsec *macsec; + struct list_head *list; + int err = 0; + + if (ctx->prepare) + return 0; + + mutex_lock(&priv->macsec->lock); + + macsec = priv->macsec; + macsec_device = mlx5e_macsec_get_macsec_device_context(macsec, ctx); + if (!macsec_device) { + netdev_err(ctx->netdev, "MACsec offload: Failed to find device context\n"); + err = -EINVAL; + goto out; + } + + list = &macsec_device->macsec_rx_sc_list_head; + rx_sc = mlx5e_macsec_get_rx_sc_from_sc_list(list, sci); + if (!rx_sc) { + netdev_err(ctx->netdev, + "MACsec offload rx_sc sci %lld doesn't exist\n", + ctx->sa.rx_sa->sc->sci); + err = -EINVAL; + goto out; + } + + if (rx_sc->rx_sa[assoc_num]) { + netdev_err(ctx->netdev, + "MACsec offload rx_sc sci %lld rx_sa %d already exist\n", + sci, assoc_num); + err = -EEXIST; + goto out; + } + + rx_sa = kzalloc(sizeof(*rx_sa), GFP_KERNEL); + if (!rx_sa) { + err = -ENOMEM; + goto out; + } + + rx_sa->active = ctx_rx_sa->active; + rx_sa->next_pn = ctx_rx_sa->next_pn; + rx_sa->sci = sci; + rx_sa->assoc_num = assoc_num; + rx_sa->fs_id = rx_sc->sc_xarray_element->fs_id; + + err = mlx5_create_encryption_key(mdev, ctx->sa.key, ctx->secy->key_len, + MLX5_ACCEL_OBJ_MACSEC_KEY, + &rx_sa->enc_key_id); + if (err) + goto destroy_sa; + + rx_sc->rx_sa[assoc_num] = rx_sa; + if (!rx_sa->active) + goto out; + + //TODO - add support for both authentication and encryption flows + err = mlx5e_macsec_init_sa(ctx, rx_sa, true, false); + if (err) + goto destroy_encryption_key; + + goto out; + +destroy_encryption_key: + rx_sc->rx_sa[assoc_num] = NULL; + mlx5_destroy_encryption_key(mdev, rx_sa->enc_key_id); +destroy_sa: + kfree(rx_sa); +out: + mutex_unlock(&macsec->lock); + + return err; +} + +static int mlx5e_macsec_upd_rxsa(struct macsec_context *ctx) +{ + const struct macsec_rx_sa *ctx_rx_sa = ctx->sa.rx_sa; + struct mlx5e_priv *priv = netdev_priv(ctx->netdev); + struct mlx5e_macsec_device *macsec_device; + u8 assoc_num = ctx->sa.assoc_num; + struct mlx5e_macsec_rx_sc *rx_sc; + sci_t sci = ctx_rx_sa->sc->sci; + struct mlx5e_macsec_sa *rx_sa; + struct mlx5e_macsec *macsec; + struct list_head *list; + int err = 0; + + if (ctx->prepare) + return 0; + + mutex_lock(&priv->macsec->lock); + + macsec = priv->macsec; + macsec_device = mlx5e_macsec_get_macsec_device_context(macsec, ctx); + if (!macsec_device) { + netdev_err(ctx->netdev, "MACsec offload: Failed to find device context\n"); + err = -EINVAL; + goto out; + } + + list = &macsec_device->macsec_rx_sc_list_head; + rx_sc = mlx5e_macsec_get_rx_sc_from_sc_list(list, sci); + if (!rx_sc) { + netdev_err(ctx->netdev, + "MACsec offload rx_sc sci %lld doesn't exist\n", + ctx->sa.rx_sa->sc->sci); + err = -EINVAL; + goto out; + } + + rx_sa = rx_sc->rx_sa[assoc_num]; + if (rx_sa) { + netdev_err(ctx->netdev, + "MACsec offload rx_sc sci %lld rx_sa %d already exist\n", + sci, assoc_num); + err = -EEXIST; + goto out; + } + + if (rx_sa->next_pn != ctx_rx_sa->next_pn_halves.lower) { + netdev_err(ctx->netdev, + "MACsec offload update RX sa %d PN isn't supported\n", + assoc_num); + err = -EINVAL; + goto out; + } + + err = mlx5e_macsec_update_rx_sa(macsec, rx_sa, ctx_rx_sa->active); +out: + mutex_unlock(&macsec->lock); + + return err; +} + +static int mlx5e_macsec_del_rxsa(struct macsec_context *ctx) +{ + struct mlx5e_priv *priv = netdev_priv(ctx->netdev); + struct mlx5e_macsec_device *macsec_device; + sci_t sci = ctx->sa.rx_sa->sc->sci; + struct mlx5e_macsec_rx_sc *rx_sc; + u8 assoc_num = ctx->sa.assoc_num; + struct mlx5e_macsec_sa *rx_sa; + struct mlx5e_macsec *macsec; + struct list_head *list; + int err = 0; + + if (ctx->prepare) + return 0; + + mutex_lock(&priv->macsec->lock); + + macsec = priv->macsec; + macsec_device = mlx5e_macsec_get_macsec_device_context(macsec, ctx); + if (!macsec_device) { + netdev_err(ctx->netdev, "MACsec offload: Failed to find device context\n"); + err = -EINVAL; + goto out; + } + + list = &macsec_device->macsec_rx_sc_list_head; + rx_sc = mlx5e_macsec_get_rx_sc_from_sc_list(list, sci); + if (!rx_sc) { + netdev_err(ctx->netdev, + "MACsec offload rx_sc sci %lld doesn't exist\n", + ctx->sa.rx_sa->sc->sci); + err = -EINVAL; + goto out; + } + + rx_sa = rx_sc->rx_sa[assoc_num]; + if (rx_sa) { + netdev_err(ctx->netdev, + "MACsec offload rx_sc sci %lld rx_sa %d already exist\n", + sci, assoc_num); + err = -EEXIST; + goto out; + } + + mlx5e_macsec_cleanup_sa(macsec, rx_sa, false); + mlx5_destroy_encryption_key(macsec->mdev, rx_sa->enc_key_id); + kfree(rx_sa); + rx_sc->rx_sa[assoc_num] = NULL; + +out: + mutex_unlock(&macsec->lock); + + return err; +} + +static int mlx5e_macsec_add_secy(struct macsec_context *ctx) +{ + struct mlx5e_priv *priv = netdev_priv(ctx->netdev); + const struct net_device *dev = ctx->secy->netdev; + const struct net_device *netdev = ctx->netdev; + struct mlx5e_macsec_device *macsec_device; + struct mlx5e_macsec *macsec; + int err = 0; + + if (ctx->prepare) + return 0; + + if (!mlx5e_macsec_secy_features_validate(ctx)) + return -EINVAL; + + mutex_lock(&priv->macsec->lock); + macsec = priv->macsec; + if (mlx5e_macsec_get_macsec_device_context(macsec, ctx)) { + netdev_err(netdev, "MACsec offload: MACsec net_device already exist\n"); + goto out; + } + + if (macsec->num_of_devices >= MLX5_MACSEC_NUM_OF_SUPPORTED_INTERFACES) { + netdev_err(netdev, "Currently, only %d MACsec offload devices can be set\n", + MLX5_MACSEC_NUM_OF_SUPPORTED_INTERFACES); + err = -EBUSY; + goto out; + } + + macsec_device = kzalloc(sizeof(*macsec_device), GFP_KERNEL); + if (!macsec_device) { + err = -ENOMEM; + goto out; + } + + macsec_device->dev_addr = kzalloc(dev->addr_len, GFP_KERNEL); + if (!macsec_device->dev_addr) { + kfree(macsec_device); + err = -ENOMEM; + goto out; + } + + memcpy(macsec_device->dev_addr, dev->dev_addr, dev->addr_len); + macsec_device->netdev = dev; + + INIT_LIST_HEAD_RCU(&macsec_device->macsec_rx_sc_list_head); + list_add_rcu(&macsec_device->macsec_device_list_element, &macsec->macsec_device_list_head); + + ++macsec->num_of_devices; +out: + mutex_unlock(&macsec->lock); + + return err; +} + +static int macsec_upd_secy_hw_address(struct macsec_context *ctx, + struct mlx5e_macsec_device *macsec_device) +{ + struct mlx5e_priv *priv = netdev_priv(ctx->netdev); + const struct net_device *dev = ctx->secy->netdev; + struct mlx5e_macsec *macsec = priv->macsec; + struct mlx5e_macsec_rx_sc *rx_sc, *tmp; + struct mlx5e_macsec_sa *rx_sa; + struct list_head *list; + int i, err = 0; + + + list = &macsec_device->macsec_rx_sc_list_head; + list_for_each_entry_safe(rx_sc, tmp, list, rx_sc_list_element) { + for (i = 0; i < MACSEC_NUM_AN; ++i) { + rx_sa = rx_sc->rx_sa[i]; + if (!rx_sa || !rx_sa->macsec_rule) + continue; + + mlx5e_macsec_cleanup_sa(macsec, rx_sa, false); + } + } + + list_for_each_entry_safe(rx_sc, tmp, list, rx_sc_list_element) { + for (i = 0; i < MACSEC_NUM_AN; ++i) { + rx_sa = rx_sc->rx_sa[i]; + if (!rx_sa) + continue; + + if (rx_sa->active) { + err = mlx5e_macsec_init_sa(ctx, rx_sa, false, false); + if (err) + goto out; + } + } + } + + memcpy(macsec_device->dev_addr, dev->dev_addr, dev->addr_len); +out: + return err; +} + +/* this function is called from 2 macsec ops functions: + * macsec_set_mac_address – MAC address was changed, therefore we need to destroy + * and create new Tx contexts(macsec object + steering). + * macsec_changelink – in this case the tx SC or SecY may be changed, therefore need to + * destroy Tx and Rx contexts(macsec object + steering) + */ +static int mlx5e_macsec_upd_secy(struct macsec_context *ctx) +{ + const struct macsec_tx_sc *tx_sc = &ctx->secy->tx_sc; + struct mlx5e_priv *priv = netdev_priv(ctx->netdev); + const struct net_device *dev = ctx->secy->netdev; + struct mlx5e_macsec_device *macsec_device; + struct mlx5e_macsec_sa *tx_sa; + struct mlx5e_macsec *macsec; + int i, err = 0; + + if (ctx->prepare) + return 0; + + if (!mlx5e_macsec_secy_features_validate(ctx)) + return -EINVAL; + + mutex_lock(&priv->macsec->lock); + + macsec = priv->macsec; + macsec_device = mlx5e_macsec_get_macsec_device_context(macsec, ctx); + if (!macsec_device) { + netdev_err(ctx->netdev, "MACsec offload: Failed to find device context\n"); + err = -EINVAL; + goto out; + } + + /* if the dev_addr hasn't change, it mean the callback is from macsec_changelink */ + if (!memcmp(macsec_device->dev_addr, dev->dev_addr, dev->addr_len)) { + err = macsec_upd_secy_hw_address(ctx, macsec_device); + if (err) + goto out; + } + + for (i = 0; i < MACSEC_NUM_AN; ++i) { + tx_sa = macsec_device->tx_sa[i]; + if (!tx_sa) + continue; + + mlx5e_macsec_cleanup_sa(macsec, tx_sa, true); + } + + for (i = 0; i < MACSEC_NUM_AN; ++i) { + tx_sa = macsec_device->tx_sa[i]; + if (!tx_sa) + continue; + + if (tx_sa->assoc_num == tx_sc->encoding_sa && tx_sa->active) { + err = mlx5e_macsec_init_sa(ctx, tx_sa, tx_sc->encrypt, true); + if (err) + goto out; + } + } + +out: + mutex_unlock(&macsec->lock); + + return err; +} + +static int mlx5e_macsec_del_secy(struct macsec_context *ctx) +{ + struct mlx5e_priv *priv = netdev_priv(ctx->netdev); + struct mlx5e_macsec_device *macsec_device; + struct mlx5e_macsec_rx_sc *rx_sc, *tmp; + struct mlx5e_macsec_sa *rx_sa; + struct mlx5e_macsec_sa *tx_sa; + struct mlx5e_macsec *macsec; + struct list_head *list; + int err = 0; + int i; + + if (ctx->prepare) + return 0; + + mutex_lock(&priv->macsec->lock); + macsec = priv->macsec; + macsec_device = mlx5e_macsec_get_macsec_device_context(macsec, ctx); + if (!macsec_device) { + netdev_err(ctx->netdev, "MACsec offload: Failed to find device context\n"); + err = -EINVAL; + + goto out; + } + + for (i = 0; i < MACSEC_NUM_AN; ++i) { + tx_sa = macsec_device->tx_sa[i]; + if (!tx_sa) + continue; + + mlx5e_macsec_cleanup_sa(macsec, tx_sa, true); + mlx5_destroy_encryption_key(macsec->mdev, tx_sa->enc_key_id); + kfree(tx_sa); + macsec_device->tx_sa[i] = NULL; + } + + list = &macsec_device->macsec_rx_sc_list_head; + list_for_each_entry_safe(rx_sc, tmp, list, rx_sc_list_element) { + for (i = 0; i < MACSEC_NUM_AN; ++i) { + rx_sa = rx_sc->rx_sa[i]; + if (!rx_sa) + continue; + + mlx5e_macsec_cleanup_sa(macsec, rx_sa, false); + mlx5_destroy_encryption_key(macsec->mdev, rx_sa->enc_key_id); + kfree(rx_sa); + rx_sc->rx_sa[i] = NULL; + } + + list_del_rcu(&rx_sc->rx_sc_list_element); + + kfree_rcu(rx_sc); + } + + kfree(macsec_device->dev_addr); + macsec_device->dev_addr = NULL; + + list_del_rcu(&macsec_device->macsec_device_list_element); + --macsec->num_of_devices; + +out: + mutex_unlock(&macsec->lock); + + return err; +} + +bool mlx5e_is_macsec_device(const struct mlx5_core_dev *mdev) +{ + if (!(MLX5_CAP_GEN_64(mdev, general_obj_types) & + MLX5_GENERAL_OBJ_TYPES_CAP_MACSEC_OFFLOAD)) + return false; + + if (!MLX5_CAP_GEN(mdev, log_max_dek)) + return false; + + if (!MLX5_CAP_MACSEC(mdev, log_max_macsec_offload)) + return false; + + if (!MLX5_CAP_FLOWTABLE_NIC_RX(mdev, macsec_decrypt) || + !MLX5_CAP_FLOWTABLE_NIC_RX(mdev, reformat_remove_macsec)) + return false; + + if (!MLX5_CAP_FLOWTABLE_NIC_TX(mdev, macsec_encrypt) || + !MLX5_CAP_FLOWTABLE_NIC_TX(mdev, reformat_add_macsec)) + return false; + + if (!MLX5_CAP_MACSEC(mdev, macsec_crypto_esp_aes_gcm_128_encrypt) && + !MLX5_CAP_MACSEC(mdev, macsec_crypto_esp_aes_gcm_256_encrypt)) + return false; + + if (!MLX5_CAP_MACSEC(mdev, macsec_crypto_esp_aes_gcm_128_decrypt) && + !MLX5_CAP_MACSEC(mdev, macsec_crypto_esp_aes_gcm_256_decrypt)) + return false; + + return true; +} + +void mlx5e_macsec_get_stats_fill(struct mlx5e_macsec *macsec, void *macsec_stats) +{ + mlx5e_macsec_fs_get_stats_fill(macsec->macsec_fs, macsec_stats); +} + +struct mlx5e_macsec_stats *mlx5e_macsec_get_stats(struct mlx5e_macsec *macsec) +{ + if (!macsec) + return NULL; + + return &macsec->stats; +} + +static const struct macsec_ops macsec_offload_ops = { + .mdo_add_txsa = mlx5e_macsec_add_txsa, + .mdo_upd_txsa = mlx5e_macsec_upd_txsa, + .mdo_del_txsa = mlx5e_macsec_del_txsa, + .mdo_add_rxsc = mlx5e_macsec_add_rxsc, + .mdo_upd_rxsc = mlx5e_macsec_upd_rxsc, + .mdo_del_rxsc = mlx5e_macsec_del_rxsc, + .mdo_add_rxsa = mlx5e_macsec_add_rxsa, + .mdo_upd_rxsa = mlx5e_macsec_upd_rxsa, + .mdo_del_rxsa = mlx5e_macsec_del_rxsa, + .mdo_add_secy = mlx5e_macsec_add_secy, + .mdo_upd_secy = mlx5e_macsec_upd_secy, + .mdo_del_secy = mlx5e_macsec_del_secy, +}; + +bool mlx5e_macsec_handle_tx_skb(struct mlx5e_macsec *macsec, struct sk_buff *skb) +{ + struct metadata_dst *md_dst = skb_metadata_dst(skb); + u32 fs_id; + + fs_id = mlx5e_macsec_get_sa_from_hashtable(&macsec->sci_hash, &md_dst->u.macsec_info.sci); + if (!fs_id) + goto err_out; + + return true; + +err_out: + dev_kfree_skb_any(skb); + return false; +} + +void mlx5e_macsec_tx_build_eseg(struct mlx5e_macsec *macsec, + struct sk_buff *skb, + struct mlx5_wqe_eth_seg *eseg) +{ + struct metadata_dst *md_dst = skb_metadata_dst(skb); + u32 fs_id; + + fs_id = mlx5e_macsec_get_sa_from_hashtable(&macsec->sci_hash, &md_dst->u.macsec_info.sci); + if (!fs_id) + return; + + eseg->flow_table_metadata = cpu_to_be32(MLX5_ETH_WQE_FT_META_MACSEC | fs_id << 2); +} + +void mlx5e_macsec_offload_handle_rx_skb(struct net_device *netdev, + struct sk_buff *skb, + struct mlx5_cqe64 *cqe) +{ + struct mlx5e_macsec_rx_sc_xarray_element *sc_xarray_element; + u32 macsec_meta_data = be32_to_cpu(cqe->ft_metadata); + struct mlx5e_priv *priv = netdev_priv(netdev); + struct mlx5e_macsec_rx_sc *rx_sc; + struct mlx5e_macsec *macsec; + u32 fs_id; + + macsec = priv->macsec; + if (!macsec) + return; + + fs_id = MLX5_MACSEC_METADATA_HANDLE(macsec_meta_data); + + rcu_read_lock(); + sc_xarray_element = xa_load(&macsec->sc_xarray, fs_id); + rx_sc = sc_xarray_element->rx_sc; + if (rx_sc) { + dst_hold(&rx_sc->md_dst->dst); + skb_dst_set(skb, &rx_sc->md_dst->dst); + } + + rcu_read_unlock(); +} + +void mlx5e_macsec_build_netdev(struct mlx5e_priv *priv) +{ + struct net_device *netdev = priv->netdev; + + if (!mlx5e_is_macsec_device(priv->mdev)) + return; + + /* Enable MACsec */ + mlx5_core_dbg(priv->mdev, "mlx5e: MACsec acceleration enabled\n"); + netdev->macsec_ops = &macsec_offload_ops; + netdev->features |= NETIF_F_HW_MACSEC; + netif_keep_dst(netdev); +} + +int mlx5e_macsec_init(struct mlx5e_priv *priv) +{ + struct mlx5_core_dev *mdev = priv->mdev; + struct mlx5e_macsec *macsec = NULL; + struct mlx5e_macsec_fs *macsec_fs; + int err; + + if (!mlx5e_is_macsec_device(priv->mdev)) { + mlx5_core_dbg(mdev, "Not a MACsec offload device\n"); + return 0; + } + + macsec = kzalloc(sizeof(*macsec), GFP_KERNEL); + if (!macsec) + return -ENOMEM; + + INIT_LIST_HEAD(&macsec->macsec_device_list_head); + mutex_init(&macsec->lock); + + err = mlx5_core_alloc_pd(mdev, &macsec->aso_pdn); + if (err) { + mlx5_core_err(mdev, + "MACsec offload: Failed to alloc pd for MACsec ASO, err=%d\n", + err); + goto err_pd; + } + + err = rhashtable_init(&macsec->sci_hash, &rhash_sci); + if (err) { + mlx5_core_err(mdev, "MACsec offload: Failed to init SCI hash table, err=%d\n", + err); + goto err_out; + } + + xa_init_flags(&macsec->sc_xarray, XA_FLAGS_ALLOC1); + + priv->macsec = macsec; + + macsec->mdev = mdev; + + macsec_fs = mlx5e_macsec_fs_init(mdev, priv->netdev); + if (IS_ERR_OR_NULL(macsec_fs)) + goto err_out; + + macsec->macsec_fs = macsec_fs; + + mlx5_core_dbg(mdev, "MACsec attached to netdevice\n"); + + return 0; + +err_out: + mlx5_core_dealloc_pd(priv->mdev, macsec->aso_pdn); +err_pd: + kfree(macsec); + priv->macsec = NULL; + return err; +} + +void mlx5e_macsec_cleanup(struct mlx5e_priv *priv) +{ + struct mlx5e_macsec *macsec = priv->macsec; + + if (!macsec) + return; + + mlx5e_macsec_fs_cleanup(macsec->macsec_fs); + + priv->macsec = NULL; + + mlx5_core_dealloc_pd(priv->mdev, macsec->aso_pdn); + + rhashtable_destroy(&macsec->sci_hash); + + mutex_destroy(&macsec->lock); + + kfree(macsec); +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.h new file mode 100644 index 000000000000..ada557fc042d --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.h @@ -0,0 +1,72 @@ +/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ +/* Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */ + +#ifndef __MLX5_EN_ACCEL_MACSEC_H__ +#define __MLX5_EN_ACCEL_MACSEC_H__ + +#ifdef CONFIG_MLX5_EN_MACSEC + +#include <linux/mlx5/driver.h> +#include <net/macsec.h> +#include <net/dst_metadata.h> + +/* Bit31 - 30: MACsec marker, Bit3-0: MACsec id */ +#define MLX5_MACSEC_METADATA_MARKER(metadata) ((((metadata) >> 30) & 0x3) == 0x1) +#define MLX5_MACSEC_METADATA_HANDLE(metadata) ((metadata) & GENMASK(3, 0)) + +struct mlx5e_priv; +struct mlx5e_macsec; + +struct mlx5e_macsec_stats { + u64 macsec_rx_pkts; + u64 macsec_rx_bytes; + u64 macsec_rx_pkts_drop; + u64 macsec_rx_bytes_drop; + u64 macsec_tx_pkts; + u64 macsec_tx_bytes; + u64 macsec_tx_pkts_drop; + u64 macsec_tx_bytes_drop; +}; + +void mlx5e_macsec_build_netdev(struct mlx5e_priv *priv); +int mlx5e_macsec_init(struct mlx5e_priv *priv); +void mlx5e_macsec_cleanup(struct mlx5e_priv *priv); +bool mlx5e_macsec_handle_tx_skb(struct mlx5e_macsec *macsec, struct sk_buff *skb); +void mlx5e_macsec_tx_build_eseg(struct mlx5e_macsec *macsec, + struct sk_buff *skb, + struct mlx5_wqe_eth_seg *eseg); + +static inline bool mlx5e_macsec_skb_is_offload(struct sk_buff *skb) +{ + struct metadata_dst *md_dst = skb_metadata_dst(skb); + + return md_dst && (md_dst->type == METADATA_MACSEC); +} + +static inline bool mlx5e_macsec_is_rx_flow(struct mlx5_cqe64 *cqe) +{ + return MLX5_MACSEC_METADATA_MARKER(be32_to_cpu(cqe->ft_metadata)); +} + +void mlx5e_macsec_offload_handle_rx_skb(struct net_device *netdev, struct sk_buff *skb, + struct mlx5_cqe64 *cqe); +bool mlx5e_is_macsec_device(const struct mlx5_core_dev *mdev); +void mlx5e_macsec_get_stats_fill(struct mlx5e_macsec *macsec, void *macsec_stats); +struct mlx5e_macsec_stats *mlx5e_macsec_get_stats(struct mlx5e_macsec *macsec); + +#else + +static inline void mlx5e_macsec_build_netdev(struct mlx5e_priv *priv) {} +static inline int mlx5e_macsec_init(struct mlx5e_priv *priv) { return 0; } +static inline void mlx5e_macsec_cleanup(struct mlx5e_priv *priv) {} +static inline bool mlx5e_macsec_skb_is_offload(struct sk_buff *skb) { return false; } +static inline bool mlx5e_macsec_is_rx_flow(struct mlx5_cqe64 *cqe) { return false; } +static inline void mlx5e_macsec_offload_handle_rx_skb(struct net_device *netdev, + struct sk_buff *skb, + struct mlx5_cqe64 *cqe) +{} +static inline bool mlx5e_is_macsec_device(const struct mlx5_core_dev *mdev) { return false; } + +#endif /* CONFIG_MLX5_EN_MACSEC */ + +#endif /* __MLX5_ACCEL_EN_MACSEC_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec_fs.c new file mode 100644 index 000000000000..608fbbaa5a58 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec_fs.c @@ -0,0 +1,1384 @@ +// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB +/* Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */ + +#include <net/macsec.h> +#include <linux/netdevice.h> +#include <linux/mlx5/qp.h> +#include "fs_core.h" +#include "en/fs.h" +#include "en_accel/macsec_fs.h" +#include "mlx5_core.h" + +/* MACsec TX flow steering */ +#define CRYPTO_NUM_MAXSEC_FTE BIT(15) +#define CRYPTO_TABLE_DEFAULT_RULE_GROUP_SIZE 1 + +#define TX_CRYPTO_TABLE_LEVEL 0 +#define TX_CRYPTO_TABLE_NUM_GROUPS 3 +#define TX_CRYPTO_TABLE_MKE_GROUP_SIZE 1 +#define TX_CRYPTO_TABLE_SA_GROUP_SIZE \ + (CRYPTO_NUM_MAXSEC_FTE - (TX_CRYPTO_TABLE_MKE_GROUP_SIZE + \ + CRYPTO_TABLE_DEFAULT_RULE_GROUP_SIZE)) +#define TX_CHECK_TABLE_LEVEL 1 +#define TX_CHECK_TABLE_NUM_FTE 2 +#define RX_CRYPTO_TABLE_LEVEL 0 +#define RX_CHECK_TABLE_LEVEL 1 +#define RX_CHECK_TABLE_NUM_FTE 3 +#define RX_CRYPTO_TABLE_NUM_GROUPS 3 +#define RX_CRYPTO_TABLE_SA_RULE_WITH_SCI_GROUP_SIZE \ + ((CRYPTO_NUM_MAXSEC_FTE - CRYPTO_TABLE_DEFAULT_RULE_GROUP_SIZE) / 2) +#define RX_CRYPTO_TABLE_SA_RULE_WITHOUT_SCI_GROUP_SIZE \ + (CRYPTO_NUM_MAXSEC_FTE - RX_CRYPTO_TABLE_SA_RULE_WITH_SCI_GROUP_SIZE) +#define RX_NUM_OF_RULES_PER_SA 2 + +#define MLX5_MACSEC_TAG_LEN 8 /* SecTAG length with ethertype and without the optional SCI */ +#define MLX5_MACSEC_SECTAG_TCI_AN_FIELD_BITMASK 0x23 +#define MLX5_MACSEC_SECTAG_TCI_AN_FIELD_OFFSET 0x8 +#define MLX5_MACSEC_SECTAG_TCI_SC_FIELD_OFFSET 0x5 +#define MLX5_MACSEC_SECTAG_TCI_SC_FIELD_BIT (0x1 << MLX5_MACSEC_SECTAG_TCI_SC_FIELD_OFFSET) +#define MLX5_SECTAG_HEADER_SIZE_WITHOUT_SCI 0x8 +#define MLX5_SECTAG_HEADER_SIZE_WITH_SCI (MLX5_SECTAG_HEADER_SIZE_WITHOUT_SCI + MACSEC_SCI_LEN) + +/* MACsec RX flow steering */ +#define MLX5_ETH_WQE_FT_META_MACSEC_MASK 0x3E + +struct mlx5_sectag_header { + __be16 ethertype; + u8 tci_an; + u8 sl; + u32 pn; + u8 sci[MACSEC_SCI_LEN]; /* optional */ +} __packed; + +struct mlx5e_macsec_tx_rule { + struct mlx5_flow_handle *rule; + struct mlx5_pkt_reformat *pkt_reformat; + u32 fs_id; +}; + +struct mlx5e_macsec_tables { + struct mlx5e_flow_table ft_crypto; + struct mlx5_flow_handle *crypto_miss_rule; + + struct mlx5_flow_table *ft_check; + struct mlx5_flow_group *ft_check_group; + struct mlx5_fc *check_miss_rule_counter; + struct mlx5_flow_handle *check_miss_rule; + struct mlx5_fc *check_rule_counter; + + u32 refcnt; +}; + +struct mlx5e_macsec_tx { + struct mlx5_flow_handle *crypto_mke_rule; + struct mlx5_flow_handle *check_rule; + + struct ida tx_halloc; + + struct mlx5e_macsec_tables tables; +}; + +struct mlx5e_macsec_rx_rule { + struct mlx5_flow_handle *rule[RX_NUM_OF_RULES_PER_SA]; + struct mlx5_modify_hdr *meta_modhdr; +}; + +struct mlx5e_macsec_rx { + struct mlx5_flow_handle *check_rule[2]; + struct mlx5_pkt_reformat *check_rule_pkt_reformat[2]; + + struct mlx5e_macsec_tables tables; +}; + +union mlx5e_macsec_rule { + struct mlx5e_macsec_tx_rule tx_rule; + struct mlx5e_macsec_rx_rule rx_rule; +}; + +struct mlx5e_macsec_fs { + struct mlx5_core_dev *mdev; + struct net_device *netdev; + struct mlx5e_macsec_tx *tx_fs; + struct mlx5e_macsec_rx *rx_fs; +}; + +static void macsec_fs_tx_destroy(struct mlx5e_macsec_fs *macsec_fs) +{ + struct mlx5e_macsec_tx *tx_fs = macsec_fs->tx_fs; + struct mlx5e_macsec_tables *tx_tables; + + tx_tables = &tx_fs->tables; + + /* Tx check table */ + if (tx_fs->check_rule) { + mlx5_del_flow_rules(tx_fs->check_rule); + tx_fs->check_rule = NULL; + } + + if (tx_tables->check_miss_rule) { + mlx5_del_flow_rules(tx_tables->check_miss_rule); + tx_tables->check_miss_rule = NULL; + } + + if (tx_tables->ft_check_group) { + mlx5_destroy_flow_group(tx_tables->ft_check_group); + tx_tables->ft_check_group = NULL; + } + + if (tx_tables->ft_check) { + mlx5_destroy_flow_table(tx_tables->ft_check); + tx_tables->ft_check = NULL; + } + + /* Tx crypto table */ + if (tx_fs->crypto_mke_rule) { + mlx5_del_flow_rules(tx_fs->crypto_mke_rule); + tx_fs->crypto_mke_rule = NULL; + } + + if (tx_tables->crypto_miss_rule) { + mlx5_del_flow_rules(tx_tables->crypto_miss_rule); + tx_tables->crypto_miss_rule = NULL; + } + + mlx5e_destroy_flow_table(&tx_tables->ft_crypto); +} + +static int macsec_fs_tx_create_crypto_table_groups(struct mlx5e_flow_table *ft) +{ + int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); + int mclen = MLX5_ST_SZ_BYTES(fte_match_param); + int ix = 0; + u32 *in; + int err; + u8 *mc; + + ft->g = kcalloc(TX_CRYPTO_TABLE_NUM_GROUPS, sizeof(*ft->g), GFP_KERNEL); + if (!ft->g) + return -ENOMEM; + in = kvzalloc(inlen, GFP_KERNEL); + + if (!in) { + kfree(ft->g); + return -ENOMEM; + } + + mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria); + + /* Flow Group for MKE match */ + MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS); + MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype); + + MLX5_SET_CFG(in, start_flow_index, ix); + ix += TX_CRYPTO_TABLE_MKE_GROUP_SIZE; + MLX5_SET_CFG(in, end_flow_index, ix - 1); + ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in); + if (IS_ERR(ft->g[ft->num_groups])) + goto err; + ft->num_groups++; + + /* Flow Group for SA rules */ + memset(in, 0, inlen); + memset(mc, 0, mclen); + MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_MISC_PARAMETERS_2); + MLX5_SET(fte_match_param, mc, misc_parameters_2.metadata_reg_a, + MLX5_ETH_WQE_FT_META_MACSEC_MASK); + + MLX5_SET_CFG(in, start_flow_index, ix); + ix += TX_CRYPTO_TABLE_SA_GROUP_SIZE; + MLX5_SET_CFG(in, end_flow_index, ix - 1); + ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in); + if (IS_ERR(ft->g[ft->num_groups])) + goto err; + ft->num_groups++; + + /* Flow Group for l2 traps */ + memset(in, 0, inlen); + memset(mc, 0, mclen); + MLX5_SET_CFG(in, start_flow_index, ix); + ix += CRYPTO_TABLE_DEFAULT_RULE_GROUP_SIZE; + MLX5_SET_CFG(in, end_flow_index, ix - 1); + ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in); + if (IS_ERR(ft->g[ft->num_groups])) + goto err; + ft->num_groups++; + + kvfree(in); + return 0; + +err: + err = PTR_ERR(ft->g[ft->num_groups]); + ft->g[ft->num_groups] = NULL; + kvfree(in); + + return err; +} + +static struct mlx5_flow_table + *macsec_fs_auto_group_table_create(struct mlx5_flow_namespace *ns, int flags, + int level, int max_fte) +{ + struct mlx5_flow_table_attr ft_attr = {}; + struct mlx5_flow_table *fdb = NULL; + + /* reserve entry for the match all miss group and rule */ + ft_attr.autogroup.num_reserved_entries = 1; + ft_attr.autogroup.max_num_groups = 1; + ft_attr.prio = 0; + ft_attr.flags = flags; + ft_attr.level = level; + ft_attr.max_fte = max_fte; + + fdb = mlx5_create_auto_grouped_flow_table(ns, &ft_attr); + + return fdb; +} + +static int macsec_fs_tx_create(struct mlx5e_macsec_fs *macsec_fs) +{ + int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); + struct mlx5e_macsec_tx *tx_fs = macsec_fs->tx_fs; + struct net_device *netdev = macsec_fs->netdev; + struct mlx5_flow_table_attr ft_attr = {}; + struct mlx5_flow_destination dest = {}; + struct mlx5e_macsec_tables *tx_tables; + struct mlx5_flow_act flow_act = {}; + struct mlx5e_flow_table *ft_crypto; + struct mlx5_flow_table *flow_table; + struct mlx5_flow_group *flow_group; + struct mlx5_flow_namespace *ns; + struct mlx5_flow_handle *rule; + struct mlx5_flow_spec *spec; + u32 *flow_group_in; + int err = 0; + + ns = mlx5_get_flow_namespace(macsec_fs->mdev, MLX5_FLOW_NAMESPACE_EGRESS_MACSEC); + if (!ns) + return -ENOMEM; + + spec = kvzalloc(sizeof(*spec), GFP_KERNEL); + if (!spec) + return -ENOMEM; + + flow_group_in = kvzalloc(inlen, GFP_KERNEL); + if (!flow_group_in) + goto out_spec; + + tx_tables = &tx_fs->tables; + ft_crypto = &tx_tables->ft_crypto; + + /* Tx crypto table */ + ft_attr.flags = MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT; + ft_attr.level = TX_CRYPTO_TABLE_LEVEL; + ft_attr.max_fte = CRYPTO_NUM_MAXSEC_FTE; + + flow_table = mlx5_create_flow_table(ns, &ft_attr); + if (IS_ERR(flow_table)) { + err = PTR_ERR(flow_table); + netdev_err(netdev, "Failed to create MACsec Tx crypto table err(%d)\n", err); + goto out_flow_group; + } + ft_crypto->t = flow_table; + + /* Tx crypto table groups */ + err = macsec_fs_tx_create_crypto_table_groups(ft_crypto); + if (err) { + netdev_err(netdev, + "Failed to create default flow group for MACsec Tx crypto table err(%d)\n", + err); + goto err; + } + + /* Tx crypto table MKE rule - MKE packets shouldn't be offloaded */ + memset(&flow_act, 0, sizeof(flow_act)); + memset(spec, 0, sizeof(*spec)); + spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS; + + MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ethertype); + MLX5_SET(fte_match_param, spec->match_value, outer_headers.ethertype, ETH_P_PAE); + flow_act.action = MLX5_FLOW_CONTEXT_ACTION_ALLOW; + + rule = mlx5_add_flow_rules(ft_crypto->t, spec, &flow_act, NULL, 0); + if (IS_ERR(rule)) { + err = PTR_ERR(rule); + netdev_err(netdev, "Failed to add MACsec TX MKE rule, err=%d\n", err); + goto err; + } + tx_fs->crypto_mke_rule = rule; + + /* Tx crypto table Default miss rule */ + memset(&flow_act, 0, sizeof(flow_act)); + flow_act.action = MLX5_FLOW_CONTEXT_ACTION_ALLOW; + rule = mlx5_add_flow_rules(ft_crypto->t, NULL, &flow_act, NULL, 0); + if (IS_ERR(rule)) { + err = PTR_ERR(rule); + netdev_err(netdev, "Failed to add MACsec Tx table default miss rule %d\n", err); + goto err; + } + tx_tables->crypto_miss_rule = rule; + + /* Tx check table */ + flow_table = macsec_fs_auto_group_table_create(ns, 0, TX_CHECK_TABLE_LEVEL, + TX_CHECK_TABLE_NUM_FTE); + if (IS_ERR(flow_table)) { + err = PTR_ERR(flow_table); + netdev_err(netdev, "fail to create MACsec TX check table, err(%d)\n", err); + goto err; + } + tx_tables->ft_check = flow_table; + + /* Tx check table Default miss group/rule */ + memset(flow_group_in, 0, inlen); + MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, flow_table->max_fte - 1); + MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, flow_table->max_fte - 1); + flow_group = mlx5_create_flow_group(tx_tables->ft_check, flow_group_in); + if (IS_ERR(flow_group)) { + err = PTR_ERR(flow_group); + netdev_err(netdev, + "Failed to create default flow group for MACsec Tx crypto table err(%d)\n", + err); + goto err; + } + tx_tables->ft_check_group = flow_group; + + /* Tx check table default drop rule */ + memset(&dest, 0, sizeof(struct mlx5_flow_destination)); + memset(&flow_act, 0, sizeof(flow_act)); + dest.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER; + dest.counter_id = mlx5_fc_id(tx_tables->check_miss_rule_counter); + flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP | MLX5_FLOW_CONTEXT_ACTION_COUNT; + rule = mlx5_add_flow_rules(tx_tables->ft_check, NULL, &flow_act, &dest, 1); + if (IS_ERR(rule)) { + err = PTR_ERR(rule); + netdev_err(netdev, "Failed to added MACsec tx check drop rule, err(%d)\n", err); + goto err; + } + tx_tables->check_miss_rule = rule; + + /* Tx check table rule */ + memset(spec, 0, sizeof(struct mlx5_flow_spec)); + memset(&dest, 0, sizeof(struct mlx5_flow_destination)); + memset(&flow_act, 0, sizeof(flow_act)); + + MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, misc_parameters_2.metadata_reg_c_4); + MLX5_SET(fte_match_param, spec->match_value, misc_parameters_2.metadata_reg_c_4, 0); + spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2; + + flow_act.flags = FLOW_ACT_NO_APPEND; + flow_act.action = MLX5_FLOW_CONTEXT_ACTION_ALLOW | MLX5_FLOW_CONTEXT_ACTION_COUNT; + dest.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER; + dest.counter_id = mlx5_fc_id(tx_tables->check_rule_counter); + rule = mlx5_add_flow_rules(tx_tables->ft_check, spec, &flow_act, &dest, 1); + if (IS_ERR(rule)) { + err = PTR_ERR(rule); + netdev_err(netdev, "Failed to add MACsec check rule, err=%d\n", err); + goto err; + } + tx_fs->check_rule = rule; + + goto out_flow_group; + +err: + macsec_fs_tx_destroy(macsec_fs); +out_flow_group: + kvfree(flow_group_in); +out_spec: + kvfree(spec); + return err; +} + +static int macsec_fs_tx_ft_get(struct mlx5e_macsec_fs *macsec_fs) +{ + struct mlx5e_macsec_tx *tx_fs = macsec_fs->tx_fs; + struct mlx5e_macsec_tables *tx_tables; + int err = 0; + + tx_tables = &tx_fs->tables; + if (tx_tables->refcnt) + goto out; + + err = macsec_fs_tx_create(macsec_fs); + if (err) + return err; + +out: + tx_tables->refcnt++; + return err; +} + +static void macsec_fs_tx_ft_put(struct mlx5e_macsec_fs *macsec_fs) +{ + struct mlx5e_macsec_tables *tx_tables = &macsec_fs->tx_fs->tables; + + if (--tx_tables->refcnt) + return; + + macsec_fs_tx_destroy(macsec_fs); +} + +static int macsec_fs_tx_setup_fte(struct mlx5e_macsec_fs *macsec_fs, + struct mlx5_flow_spec *spec, + struct mlx5_flow_act *flow_act, + u32 macsec_obj_id, + u32 *fs_id) +{ + struct mlx5e_macsec_tx *tx_fs = macsec_fs->tx_fs; + int err = 0; + u32 id; + + err = ida_alloc_range(&tx_fs->tx_halloc, 1, + MLX5_MACSEC_NUM_OF_SUPPORTED_INTERFACES, + GFP_KERNEL); + if (err < 0) + return err; + + id = err; + spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_2; + + /* Metadata match */ + MLX5_SET(fte_match_param, spec->match_criteria, misc_parameters_2.metadata_reg_a, + MLX5_ETH_WQE_FT_META_MACSEC_MASK); + MLX5_SET(fte_match_param, spec->match_value, misc_parameters_2.metadata_reg_a, + MLX5_ETH_WQE_FT_META_MACSEC | id << 2); + + *fs_id = id; + flow_act->crypto.type = MLX5_FLOW_CONTEXT_ENCRYPT_DECRYPT_TYPE_MACSEC; + flow_act->crypto.obj_id = macsec_obj_id; + + mlx5_core_dbg(macsec_fs->mdev, "Tx fte: macsec obj_id %u, fs_id %u\n", macsec_obj_id, id); + return 0; +} + +static void macsec_fs_tx_create_sectag_header(const struct macsec_context *ctx, + char *reformatbf, + size_t *reformat_size) +{ + const struct macsec_secy *secy = ctx->secy; + bool sci_present = macsec_send_sci(secy); + struct mlx5_sectag_header sectag = {}; + const struct macsec_tx_sc *tx_sc; + + tx_sc = &secy->tx_sc; + sectag.ethertype = htons(ETH_P_MACSEC); + + if (sci_present) { + sectag.tci_an |= MACSEC_TCI_SC; + memcpy(§ag.sci, &secy->sci, + sizeof(sectag.sci)); + } else { + if (tx_sc->end_station) + sectag.tci_an |= MACSEC_TCI_ES; + if (tx_sc->scb) + sectag.tci_an |= MACSEC_TCI_SCB; + } + + /* With GCM, C/E clear for !encrypt, both set for encrypt */ + if (tx_sc->encrypt) + sectag.tci_an |= MACSEC_TCI_CONFID; + else if (secy->icv_len != MACSEC_DEFAULT_ICV_LEN) + sectag.tci_an |= MACSEC_TCI_C; + + sectag.tci_an |= tx_sc->encoding_sa; + + *reformat_size = MLX5_MACSEC_TAG_LEN + (sci_present ? MACSEC_SCI_LEN : 0); + + memcpy(reformatbf, §ag, *reformat_size); +} + +static void macsec_fs_tx_del_rule(struct mlx5e_macsec_fs *macsec_fs, + struct mlx5e_macsec_tx_rule *tx_rule) +{ + if (tx_rule->rule) { + mlx5_del_flow_rules(tx_rule->rule); + tx_rule->rule = NULL; + } + + if (tx_rule->pkt_reformat) { + mlx5_packet_reformat_dealloc(macsec_fs->mdev, tx_rule->pkt_reformat); + tx_rule->pkt_reformat = NULL; + } + + if (tx_rule->fs_id) { + ida_free(&macsec_fs->tx_fs->tx_halloc, tx_rule->fs_id); + tx_rule->fs_id = 0; + } + + kfree(tx_rule); + + macsec_fs_tx_ft_put(macsec_fs); +} + +static union mlx5e_macsec_rule * +macsec_fs_tx_add_rule(struct mlx5e_macsec_fs *macsec_fs, + const struct macsec_context *macsec_ctx, + struct mlx5_macsec_rule_attrs *attrs, + u32 *sa_fs_id) +{ + char reformatbf[MLX5_MACSEC_TAG_LEN + MACSEC_SCI_LEN]; + struct mlx5_pkt_reformat_params reformat_params = {}; + struct mlx5e_macsec_tx *tx_fs = macsec_fs->tx_fs; + struct net_device *netdev = macsec_fs->netdev; + struct mlx5_flow_destination dest = {}; + struct mlx5e_macsec_tables *tx_tables; + union mlx5e_macsec_rule *macsec_rule; + struct mlx5e_macsec_tx_rule *tx_rule; + struct mlx5_flow_act flow_act = {}; + struct mlx5_flow_handle *rule; + struct mlx5_flow_spec *spec; + size_t reformat_size; + int err = 0; + u32 fs_id; + + tx_tables = &tx_fs->tables; + + spec = kvzalloc(sizeof(*spec), GFP_KERNEL); + if (!spec) + return NULL; + + err = macsec_fs_tx_ft_get(macsec_fs); + if (err) + goto out_spec; + + macsec_rule = kzalloc(sizeof(*macsec_rule), GFP_KERNEL); + if (!macsec_rule) { + macsec_fs_tx_ft_put(macsec_fs); + goto out_spec; + } + + tx_rule = &macsec_rule->tx_rule; + + /* Tx crypto table crypto rule */ + macsec_fs_tx_create_sectag_header(macsec_ctx, reformatbf, &reformat_size); + + reformat_params.type = MLX5_REFORMAT_TYPE_ADD_MACSEC; + reformat_params.size = reformat_size; + reformat_params.data = reformatbf; + flow_act.pkt_reformat = mlx5_packet_reformat_alloc(macsec_fs->mdev, + &reformat_params, + MLX5_FLOW_NAMESPACE_EGRESS_MACSEC); + if (IS_ERR(flow_act.pkt_reformat)) { + err = PTR_ERR(flow_act.pkt_reformat); + netdev_err(netdev, "Failed to allocate MACsec Tx reformat context err=%d\n", err); + goto err; + } + tx_rule->pkt_reformat = flow_act.pkt_reformat; + + err = macsec_fs_tx_setup_fte(macsec_fs, spec, &flow_act, attrs->macsec_obj_id, &fs_id); + if (err) { + netdev_err(netdev, + "Failed to add packet reformat for MACsec TX crypto rule, err=%d\n", + err); + goto err; + } + + tx_rule->fs_id = fs_id; + *sa_fs_id = fs_id; + + flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | + MLX5_FLOW_CONTEXT_ACTION_CRYPTO_ENCRYPT | + MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT; + dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; + dest.ft = tx_tables->ft_check; + rule = mlx5_add_flow_rules(tx_tables->ft_crypto.t, spec, &flow_act, &dest, 1); + if (IS_ERR(rule)) { + err = PTR_ERR(rule); + netdev_err(netdev, "Failed to add MACsec TX crypto rule, err=%d\n", err); + goto err; + } + tx_rule->rule = rule; + + goto out_spec; + +err: + macsec_fs_tx_del_rule(macsec_fs, tx_rule); + macsec_rule = NULL; +out_spec: + kvfree(spec); + + return macsec_rule; +} + +static void macsec_fs_tx_cleanup(struct mlx5e_macsec_fs *macsec_fs) +{ + struct mlx5e_macsec_tx *tx_fs = macsec_fs->tx_fs; + struct mlx5_core_dev *mdev = macsec_fs->mdev; + struct mlx5e_macsec_tables *tx_tables; + + if (!tx_fs) + return; + + tx_tables = &tx_fs->tables; + if (tx_tables->refcnt) { + netdev_err(macsec_fs->netdev, + "Can't destroy MACsec offload tx_fs, refcnt(%u) isn't 0\n", + tx_tables->refcnt); + return; + } + + ida_destroy(&tx_fs->tx_halloc); + + if (tx_tables->check_miss_rule_counter) { + mlx5_fc_destroy(mdev, tx_tables->check_miss_rule_counter); + tx_tables->check_miss_rule_counter = NULL; + } + + if (tx_tables->check_rule_counter) { + mlx5_fc_destroy(mdev, tx_tables->check_rule_counter); + tx_tables->check_rule_counter = NULL; + } + + kfree(tx_fs); + macsec_fs->tx_fs = NULL; +} + +static int macsec_fs_tx_init(struct mlx5e_macsec_fs *macsec_fs) +{ + struct net_device *netdev = macsec_fs->netdev; + struct mlx5_core_dev *mdev = macsec_fs->mdev; + struct mlx5e_macsec_tables *tx_tables; + struct mlx5e_macsec_tx *tx_fs; + struct mlx5_fc *flow_counter; + int err; + + tx_fs = kzalloc(sizeof(*tx_fs), GFP_KERNEL); + if (!tx_fs) + return -ENOMEM; + + tx_tables = &tx_fs->tables; + + flow_counter = mlx5_fc_create(mdev, false); + if (IS_ERR(flow_counter)) { + err = PTR_ERR(flow_counter); + netdev_err(netdev, + "Failed to create MACsec Tx encrypt flow counter, err(%d)\n", + err); + goto err_encrypt_counter; + } + tx_tables->check_rule_counter = flow_counter; + + flow_counter = mlx5_fc_create(mdev, false); + if (IS_ERR(flow_counter)) { + err = PTR_ERR(flow_counter); + netdev_err(netdev, + "Failed to create MACsec Tx drop flow counter, err(%d)\n", + err); + goto err_drop_counter; + } + tx_tables->check_miss_rule_counter = flow_counter; + + ida_init(&tx_fs->tx_halloc); + + macsec_fs->tx_fs = tx_fs; + + return 0; + +err_drop_counter: + mlx5_fc_destroy(mdev, tx_tables->check_rule_counter); + tx_tables->check_rule_counter = NULL; + +err_encrypt_counter: + kfree(tx_fs); + macsec_fs->tx_fs = NULL; + + return err; +} + +static void macsec_fs_rx_destroy(struct mlx5e_macsec_fs *macsec_fs) +{ + struct mlx5e_macsec_rx *rx_fs = macsec_fs->rx_fs; + struct mlx5e_macsec_tables *rx_tables; + int i; + + /* Rx check table */ + for (i = 1; i >= 0; --i) { + if (rx_fs->check_rule[i]) { + mlx5_del_flow_rules(rx_fs->check_rule[i]); + rx_fs->check_rule[i] = NULL; + } + + if (rx_fs->check_rule_pkt_reformat[i]) { + mlx5_packet_reformat_dealloc(macsec_fs->mdev, + rx_fs->check_rule_pkt_reformat[i]); + rx_fs->check_rule_pkt_reformat[i] = NULL; + } + } + + rx_tables = &rx_fs->tables; + + if (rx_tables->check_miss_rule) { + mlx5_del_flow_rules(rx_tables->check_miss_rule); + rx_tables->check_miss_rule = NULL; + } + + if (rx_tables->ft_check_group) { + mlx5_destroy_flow_group(rx_tables->ft_check_group); + rx_tables->ft_check_group = NULL; + } + + if (rx_tables->ft_check) { + mlx5_destroy_flow_table(rx_tables->ft_check); + rx_tables->ft_check = NULL; + } + + /* Rx crypto table */ + if (rx_tables->crypto_miss_rule) { + mlx5_del_flow_rules(rx_tables->crypto_miss_rule); + rx_tables->crypto_miss_rule = NULL; + } + + mlx5e_destroy_flow_table(&rx_tables->ft_crypto); +} + +static int macsec_fs_rx_create_crypto_table_groups(struct mlx5e_flow_table *ft) +{ + int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); + int mclen = MLX5_ST_SZ_BYTES(fte_match_param); + int ix = 0; + u32 *in; + int err; + u8 *mc; + + ft->g = kcalloc(RX_CRYPTO_TABLE_NUM_GROUPS, sizeof(*ft->g), GFP_KERNEL); + if (!ft->g) + return -ENOMEM; + + in = kvzalloc(inlen, GFP_KERNEL); + if (!in) { + kfree(ft->g); + return -ENOMEM; + } + + mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria); + + /* Flow group for SA rule with SCI */ + MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS | + MLX5_MATCH_MISC_PARAMETERS_5); + MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype); + + MLX5_SET(fte_match_param, mc, misc_parameters_5.macsec_tag_0, + MLX5_MACSEC_SECTAG_TCI_AN_FIELD_BITMASK << + MLX5_MACSEC_SECTAG_TCI_AN_FIELD_OFFSET); + MLX5_SET_TO_ONES(fte_match_param, mc, misc_parameters_5.macsec_tag_2); + MLX5_SET_TO_ONES(fte_match_param, mc, misc_parameters_5.macsec_tag_3); + + MLX5_SET_CFG(in, start_flow_index, ix); + ix += RX_CRYPTO_TABLE_SA_RULE_WITH_SCI_GROUP_SIZE; + MLX5_SET_CFG(in, end_flow_index, ix - 1); + ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in); + if (IS_ERR(ft->g[ft->num_groups])) + goto err; + ft->num_groups++; + + /* Flow group for SA rule without SCI */ + memset(in, 0, inlen); + memset(mc, 0, mclen); + MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS | + MLX5_MATCH_MISC_PARAMETERS_5); + MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.smac_47_16); + MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.smac_15_0); + MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype); + + MLX5_SET(fte_match_param, mc, misc_parameters_5.macsec_tag_0, + MLX5_MACSEC_SECTAG_TCI_AN_FIELD_BITMASK << MLX5_MACSEC_SECTAG_TCI_AN_FIELD_OFFSET); + + MLX5_SET_CFG(in, start_flow_index, ix); + ix += RX_CRYPTO_TABLE_SA_RULE_WITHOUT_SCI_GROUP_SIZE; + MLX5_SET_CFG(in, end_flow_index, ix - 1); + ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in); + if (IS_ERR(ft->g[ft->num_groups])) + goto err; + ft->num_groups++; + + /* Flow Group for l2 traps */ + memset(in, 0, inlen); + memset(mc, 0, mclen); + MLX5_SET_CFG(in, start_flow_index, ix); + ix += CRYPTO_TABLE_DEFAULT_RULE_GROUP_SIZE; + MLX5_SET_CFG(in, end_flow_index, ix - 1); + ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in); + if (IS_ERR(ft->g[ft->num_groups])) + goto err; + ft->num_groups++; + + kvfree(in); + return 0; + +err: + err = PTR_ERR(ft->g[ft->num_groups]); + ft->g[ft->num_groups] = NULL; + kvfree(in); + + return err; +} + +static int macsec_fs_rx_create_check_decap_rule(struct mlx5e_macsec_fs *macsec_fs, + struct mlx5_flow_destination *dest, + struct mlx5_flow_act *flow_act, + struct mlx5_flow_spec *spec, + int reformat_param_size) +{ + int rule_index = (reformat_param_size == MLX5_SECTAG_HEADER_SIZE_WITH_SCI) ? 0 : 1; + u8 mlx5_reformat_buf[MLX5_SECTAG_HEADER_SIZE_WITH_SCI]; + struct mlx5_pkt_reformat_params reformat_params = {}; + struct mlx5e_macsec_rx *rx_fs = macsec_fs->rx_fs; + struct net_device *netdev = macsec_fs->netdev; + struct mlx5e_macsec_tables *rx_tables; + struct mlx5_flow_handle *rule; + int err = 0; + + rx_tables = &rx_fs->tables; + + /* Rx check table decap 16B rule */ + memset(dest, 0, sizeof(*dest)); + memset(flow_act, 0, sizeof(*flow_act)); + memset(spec, 0, sizeof(*spec)); + + reformat_params.type = MLX5_REFORMAT_TYPE_DEL_MACSEC; + reformat_params.size = reformat_param_size; + reformat_params.data = mlx5_reformat_buf; + flow_act->pkt_reformat = mlx5_packet_reformat_alloc(macsec_fs->mdev, + &reformat_params, + MLX5_FLOW_NAMESPACE_KERNEL_RX_MACSEC); + if (IS_ERR(flow_act->pkt_reformat)) { + err = PTR_ERR(flow_act->pkt_reformat); + netdev_err(netdev, "Failed to allocate MACsec Rx reformat context err=%d\n", err); + return err; + } + rx_fs->check_rule_pkt_reformat[rule_index] = flow_act->pkt_reformat; + + spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2; + /* MACsec syndrome match */ + MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, misc_parameters_2.macsec_syndrome); + MLX5_SET(fte_match_param, spec->match_value, misc_parameters_2.macsec_syndrome, 0); + /* ASO return reg syndrome match */ + MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, misc_parameters_2.metadata_reg_c_4); + MLX5_SET(fte_match_param, spec->match_value, misc_parameters_2.metadata_reg_c_4, 0); + + spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_5; + /* Sectag TCI SC present bit*/ + MLX5_SET(fte_match_param, spec->match_criteria, misc_parameters_5.macsec_tag_0, + MLX5_MACSEC_SECTAG_TCI_SC_FIELD_BIT << MLX5_MACSEC_SECTAG_TCI_AN_FIELD_OFFSET); + + if (reformat_param_size == MLX5_SECTAG_HEADER_SIZE_WITH_SCI) + MLX5_SET(fte_match_param, spec->match_value, misc_parameters_5.macsec_tag_0, + MLX5_MACSEC_SECTAG_TCI_SC_FIELD_BIT << + MLX5_MACSEC_SECTAG_TCI_AN_FIELD_OFFSET); + + flow_act->flags = FLOW_ACT_NO_APPEND; + flow_act->action = MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO | + MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT | + MLX5_FLOW_CONTEXT_ACTION_COUNT; + dest->type = MLX5_FLOW_DESTINATION_TYPE_COUNTER; + dest->counter_id = mlx5_fc_id(rx_tables->check_rule_counter); + rule = mlx5_add_flow_rules(rx_tables->ft_check, spec, flow_act, dest, 1); + if (IS_ERR(rule)) { + err = PTR_ERR(rule); + netdev_err(netdev, "Failed to add MACsec Rx check rule, err=%d\n", err); + return err; + } + + rx_fs->check_rule[rule_index] = rule; + + return 0; +} + +static int macsec_fs_rx_create(struct mlx5e_macsec_fs *macsec_fs) +{ + int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); + struct mlx5e_macsec_rx *rx_fs = macsec_fs->rx_fs; + struct net_device *netdev = macsec_fs->netdev; + struct mlx5_flow_table_attr ft_attr = {}; + struct mlx5_flow_destination dest = {}; + struct mlx5e_macsec_tables *rx_tables; + struct mlx5e_flow_table *ft_crypto; + struct mlx5_flow_table *flow_table; + struct mlx5_flow_group *flow_group; + struct mlx5_flow_act flow_act = {}; + struct mlx5_flow_namespace *ns; + struct mlx5_flow_handle *rule; + struct mlx5_flow_spec *spec; + u32 *flow_group_in; + int err = 0; + + ns = mlx5_get_flow_namespace(macsec_fs->mdev, MLX5_FLOW_NAMESPACE_KERNEL_RX_MACSEC); + if (!ns) + return -ENOMEM; + + spec = kvzalloc(sizeof(*spec), GFP_KERNEL); + if (!spec) + return -ENOMEM; + + flow_group_in = kvzalloc(inlen, GFP_KERNEL); + if (!flow_group_in) + goto free_spec; + + rx_tables = &rx_fs->tables; + ft_crypto = &rx_tables->ft_crypto; + + /* Rx crypto table */ + ft_attr.level = RX_CRYPTO_TABLE_LEVEL; + ft_attr.max_fte = CRYPTO_NUM_MAXSEC_FTE; + + flow_table = mlx5_create_flow_table(ns, &ft_attr); + if (IS_ERR(flow_table)) { + err = PTR_ERR(flow_table); + netdev_err(netdev, "Failed to create MACsec Rx crypto table err(%d)\n", err); + goto out_flow_group; + } + ft_crypto->t = flow_table; + + /* Rx crypto table groups */ + err = macsec_fs_rx_create_crypto_table_groups(ft_crypto); + if (err) { + netdev_err(netdev, + "Failed to create default flow group for MACsec Tx crypto table err(%d)\n", + err); + goto err; + } + + flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO; + rule = mlx5_add_flow_rules(ft_crypto->t, NULL, &flow_act, NULL, 0); + if (IS_ERR(rule)) { + err = PTR_ERR(rule); + netdev_err(netdev, + "Failed to add MACsec Rx crypto table default miss rule %d\n", + err); + goto err; + } + rx_tables->crypto_miss_rule = rule; + + /* Rx check table */ + flow_table = macsec_fs_auto_group_table_create(ns, + MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT, + RX_CHECK_TABLE_LEVEL, + RX_CHECK_TABLE_NUM_FTE); + if (IS_ERR(flow_table)) { + err = PTR_ERR(flow_table); + netdev_err(netdev, "fail to create MACsec RX check table, err(%d)\n", err); + goto err; + } + rx_tables->ft_check = flow_table; + + /* Rx check table Default miss group/rule */ + MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, flow_table->max_fte - 1); + MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, flow_table->max_fte - 1); + flow_group = mlx5_create_flow_group(rx_tables->ft_check, flow_group_in); + if (IS_ERR(flow_group)) { + err = PTR_ERR(flow_group); + netdev_err(netdev, + "Failed to create default flow group for MACsec Rx check table err(%d)\n", + err); + goto err; + } + rx_tables->ft_check_group = flow_group; + + /* Rx check table default drop rule */ + memset(&flow_act, 0, sizeof(flow_act)); + + dest.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER; + dest.counter_id = mlx5_fc_id(rx_tables->check_miss_rule_counter); + flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP | MLX5_FLOW_CONTEXT_ACTION_COUNT; + rule = mlx5_add_flow_rules(rx_tables->ft_check, NULL, &flow_act, &dest, 1); + if (IS_ERR(rule)) { + err = PTR_ERR(rule); + netdev_err(netdev, "Failed to added MACsec Rx check drop rule, err(%d)\n", err); + goto err; + } + rx_tables->check_miss_rule = rule; + + /* Rx check table decap rules */ + err = macsec_fs_rx_create_check_decap_rule(macsec_fs, &dest, &flow_act, spec, + MLX5_SECTAG_HEADER_SIZE_WITH_SCI); + if (err) + goto err; + + err = macsec_fs_rx_create_check_decap_rule(macsec_fs, &dest, &flow_act, spec, + MLX5_SECTAG_HEADER_SIZE_WITHOUT_SCI); + if (err) + goto err; + + goto out_flow_group; + +err: + macsec_fs_rx_destroy(macsec_fs); +out_flow_group: + kvfree(flow_group_in); +free_spec: + kvfree(spec); + return err; +} + +static int macsec_fs_rx_ft_get(struct mlx5e_macsec_fs *macsec_fs) +{ + struct mlx5e_macsec_tables *rx_tables = &macsec_fs->rx_fs->tables; + int err = 0; + + if (rx_tables->refcnt) + goto out; + + err = macsec_fs_rx_create(macsec_fs); + if (err) + return err; + +out: + rx_tables->refcnt++; + return err; +} + +static void macsec_fs_rx_ft_put(struct mlx5e_macsec_fs *macsec_fs) +{ + struct mlx5e_macsec_tables *rx_tables = &macsec_fs->rx_fs->tables; + + if (--rx_tables->refcnt) + return; + + macsec_fs_rx_destroy(macsec_fs); +} + +static void macsec_fs_rx_del_rule(struct mlx5e_macsec_fs *macsec_fs, + struct mlx5e_macsec_rx_rule *rx_rule) +{ + int i; + + for (i = 0; i < RX_NUM_OF_RULES_PER_SA; ++i) { + if (rx_rule->rule[i]) { + mlx5_del_flow_rules(rx_rule->rule[i]); + rx_rule->rule[i] = NULL; + } + } + + if (rx_rule->meta_modhdr) { + mlx5_modify_header_dealloc(macsec_fs->mdev, rx_rule->meta_modhdr); + rx_rule->meta_modhdr = NULL; + } + + kfree(rx_rule); + + macsec_fs_rx_ft_put(macsec_fs); +} + +static void macsec_fs_rx_setup_fte(struct mlx5_flow_spec *spec, + struct mlx5_flow_act *flow_act, + struct mlx5_macsec_rule_attrs *attrs, + bool sci_present) +{ + u8 tci_an = (sci_present << MLX5_MACSEC_SECTAG_TCI_SC_FIELD_OFFSET) | attrs->assoc_num; + struct mlx5_flow_act_crypto_params *crypto_params = &flow_act->crypto; + __be32 *sci_p = (__be32 *)(&attrs->sci); + + spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS; + + /* MACsec ethertype */ + MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ethertype); + MLX5_SET(fte_match_param, spec->match_value, outer_headers.ethertype, ETH_P_MACSEC); + + spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_5; + + /* Sectag AN + TCI SC present bit*/ + MLX5_SET(fte_match_param, spec->match_criteria, misc_parameters_5.macsec_tag_0, + MLX5_MACSEC_SECTAG_TCI_AN_FIELD_BITMASK << MLX5_MACSEC_SECTAG_TCI_AN_FIELD_OFFSET); + MLX5_SET(fte_match_param, spec->match_value, misc_parameters_5.macsec_tag_0, + tci_an << MLX5_MACSEC_SECTAG_TCI_AN_FIELD_OFFSET); + + if (sci_present) { + MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, + misc_parameters_5.macsec_tag_2); + MLX5_SET(fte_match_param, spec->match_value, misc_parameters_5.macsec_tag_2, + be32_to_cpu(sci_p[0])); + + MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, + misc_parameters_5.macsec_tag_3); + MLX5_SET(fte_match_param, spec->match_value, misc_parameters_5.macsec_tag_3, + be32_to_cpu(sci_p[1])); + } else { + /* When SCI isn't present in the Sectag, need to match the source */ + /* MAC address only if the SCI contains the default MACsec PORT */ + MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.smac_47_16); + MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.smac_15_0); + memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value, outer_headers.smac_47_16), + sci_p, ETH_ALEN); + } + + crypto_params->type = MLX5_FLOW_CONTEXT_ENCRYPT_DECRYPT_TYPE_MACSEC; + crypto_params->obj_id = attrs->macsec_obj_id; +} + +static union mlx5e_macsec_rule * +macsec_fs_rx_add_rule(struct mlx5e_macsec_fs *macsec_fs, + const struct macsec_context *macsec_ctx, + struct mlx5_macsec_rule_attrs *attrs, + u32 fs_id) +{ + u8 action[MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)] = {}; + struct mlx5e_macsec_rx *rx_fs = macsec_fs->rx_fs; + struct net_device *netdev = macsec_fs->netdev; + struct mlx5_modify_hdr *modify_hdr = NULL; + struct mlx5_flow_destination dest = {}; + struct mlx5e_macsec_tables *rx_tables; + union mlx5e_macsec_rule *macsec_rule; + struct mlx5e_macsec_rx_rule *rx_rule; + struct mlx5_flow_act flow_act = {}; + struct mlx5e_flow_table *ft_crypto; + struct mlx5_flow_handle *rule; + struct mlx5_flow_spec *spec; + int err = 0; + + spec = kvzalloc(sizeof(*spec), GFP_KERNEL); + if (!spec) + return NULL; + + err = macsec_fs_rx_ft_get(macsec_fs); + if (err) + goto out_spec; + + macsec_rule = kzalloc(sizeof(*macsec_rule), GFP_KERNEL); + if (!macsec_rule) { + macsec_fs_rx_ft_put(macsec_fs); + goto out_spec; + } + + rx_rule = &macsec_rule->rx_rule; + rx_tables = &rx_fs->tables; + ft_crypto = &rx_tables->ft_crypto; + + /* Set bit[31 - 30] macsec marker - 0x01 */ + /* Set bit[3-0] fs id */ + MLX5_SET(set_action_in, action, action_type, MLX5_ACTION_TYPE_SET); + MLX5_SET(set_action_in, action, field, MLX5_ACTION_IN_FIELD_METADATA_REG_B); + MLX5_SET(set_action_in, action, data, fs_id | BIT(30)); + MLX5_SET(set_action_in, action, offset, 0); + MLX5_SET(set_action_in, action, length, 32); + + modify_hdr = mlx5_modify_header_alloc(macsec_fs->mdev, MLX5_FLOW_NAMESPACE_KERNEL_RX_MACSEC, + 1, action); + if (IS_ERR(modify_hdr)) { + err = PTR_ERR(modify_hdr); + netdev_err(netdev, "fail to alloc MACsec set modify_header_id err=%d\n", err); + modify_hdr = NULL; + goto err; + } + rx_rule->meta_modhdr = modify_hdr; + + /* Rx crypto table with SCI rule */ + macsec_fs_rx_setup_fte(spec, &flow_act, attrs, true); + + flow_act.modify_hdr = modify_hdr; + flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | + MLX5_FLOW_CONTEXT_ACTION_CRYPTO_DECRYPT | + MLX5_FLOW_CONTEXT_ACTION_MOD_HDR; + + dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; + dest.ft = rx_tables->ft_check; + rule = mlx5_add_flow_rules(ft_crypto->t, spec, &flow_act, &dest, 1); + if (IS_ERR(rule)) { + err = PTR_ERR(rule); + netdev_err(netdev, + "Failed to add SA with SCI rule to Rx crypto rule, err=%d\n", + err); + goto err; + } + rx_rule->rule[0] = rule; + + /* Rx crypto table without SCI rule */ + if (cpu_to_be64((__force u64)attrs->sci) & ntohs(MACSEC_PORT_ES)) { + memset(spec, 0, sizeof(struct mlx5_flow_spec)); + memset(&dest, 0, sizeof(struct mlx5_flow_destination)); + memset(&flow_act, 0, sizeof(flow_act)); + + macsec_fs_rx_setup_fte(spec, &flow_act, attrs, false); + + flow_act.modify_hdr = modify_hdr; + flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | + MLX5_FLOW_CONTEXT_ACTION_CRYPTO_DECRYPT | + MLX5_FLOW_CONTEXT_ACTION_MOD_HDR; + + dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; + dest.ft = rx_tables->ft_check; + rule = mlx5_add_flow_rules(ft_crypto->t, spec, &flow_act, &dest, 1); + if (IS_ERR(rule)) { + err = PTR_ERR(rule); + netdev_err(netdev, + "Failed to add SA without SCI rule to Rx crypto rule, err=%d\n", + err); + goto err; + } + rx_rule->rule[1] = rule; + } + + return macsec_rule; + +err: + macsec_fs_rx_del_rule(macsec_fs, rx_rule); + macsec_rule = NULL; +out_spec: + kvfree(spec); + return macsec_rule; +} + +static int macsec_fs_rx_init(struct mlx5e_macsec_fs *macsec_fs) +{ + struct net_device *netdev = macsec_fs->netdev; + struct mlx5_core_dev *mdev = macsec_fs->mdev; + struct mlx5e_macsec_tables *rx_tables; + struct mlx5e_macsec_rx *rx_fs; + struct mlx5_fc *flow_counter; + int err; + + rx_fs = kzalloc(sizeof(*rx_fs), GFP_KERNEL); + if (!rx_fs) + return -ENOMEM; + + flow_counter = mlx5_fc_create(mdev, false); + if (IS_ERR(flow_counter)) { + err = PTR_ERR(flow_counter); + netdev_err(netdev, + "Failed to create MACsec Rx encrypt flow counter, err(%d)\n", + err); + goto err_encrypt_counter; + } + + rx_tables = &rx_fs->tables; + rx_tables->check_rule_counter = flow_counter; + + flow_counter = mlx5_fc_create(mdev, false); + if (IS_ERR(flow_counter)) { + err = PTR_ERR(flow_counter); + netdev_err(netdev, + "Failed to create MACsec Rx drop flow counter, err(%d)\n", + err); + goto err_drop_counter; + } + rx_tables->check_miss_rule_counter = flow_counter; + + macsec_fs->rx_fs = rx_fs; + + return 0; + +err_drop_counter: + mlx5_fc_destroy(mdev, rx_tables->check_rule_counter); + rx_tables->check_rule_counter = NULL; + +err_encrypt_counter: + kfree(rx_fs); + macsec_fs->rx_fs = NULL; + + return err; +} + +static void macsec_fs_rx_cleanup(struct mlx5e_macsec_fs *macsec_fs) +{ + struct mlx5e_macsec_rx *rx_fs = macsec_fs->rx_fs; + struct mlx5_core_dev *mdev = macsec_fs->mdev; + struct mlx5e_macsec_tables *rx_tables; + + if (!rx_fs) + return; + + rx_tables = &rx_fs->tables; + + if (rx_tables->refcnt) { + netdev_err(macsec_fs->netdev, + "Can't destroy MACsec offload rx_fs, refcnt(%u) isn't 0\n", + rx_tables->refcnt); + return; + } + + if (rx_tables->check_miss_rule_counter) { + mlx5_fc_destroy(mdev, rx_tables->check_miss_rule_counter); + rx_tables->check_miss_rule_counter = NULL; + } + + if (rx_tables->check_rule_counter) { + mlx5_fc_destroy(mdev, rx_tables->check_rule_counter); + rx_tables->check_rule_counter = NULL; + } + + kfree(rx_fs); + macsec_fs->rx_fs = NULL; +} + +void mlx5e_macsec_fs_get_stats_fill(struct mlx5e_macsec_fs *macsec_fs, void *macsec_stats) +{ + struct mlx5e_macsec_stats *stats = (struct mlx5e_macsec_stats *)macsec_stats; + struct mlx5e_macsec_tables *tx_tables = &macsec_fs->tx_fs->tables; + struct mlx5e_macsec_tables *rx_tables = &macsec_fs->rx_fs->tables; + struct mlx5_core_dev *mdev = macsec_fs->mdev; + + if (tx_tables->check_rule_counter) + mlx5_fc_query(mdev, tx_tables->check_rule_counter, + &stats->macsec_tx_pkts, &stats->macsec_tx_bytes); + + if (tx_tables->check_miss_rule_counter) + mlx5_fc_query(mdev, tx_tables->check_miss_rule_counter, + &stats->macsec_tx_pkts_drop, &stats->macsec_tx_bytes_drop); + + if (rx_tables->check_rule_counter) + mlx5_fc_query(mdev, rx_tables->check_rule_counter, + &stats->macsec_rx_pkts, &stats->macsec_rx_bytes); + + if (rx_tables->check_miss_rule_counter) + mlx5_fc_query(mdev, rx_tables->check_miss_rule_counter, + &stats->macsec_rx_pkts_drop, &stats->macsec_rx_bytes_drop); +} + +union mlx5e_macsec_rule * +mlx5e_macsec_fs_add_rule(struct mlx5e_macsec_fs *macsec_fs, + const struct macsec_context *macsec_ctx, + struct mlx5_macsec_rule_attrs *attrs, + u32 *sa_fs_id) +{ + return (attrs->action == MLX5_ACCEL_MACSEC_ACTION_ENCRYPT) ? + macsec_fs_tx_add_rule(macsec_fs, macsec_ctx, attrs, sa_fs_id) : + macsec_fs_rx_add_rule(macsec_fs, macsec_ctx, attrs, *sa_fs_id); +} + +void mlx5e_macsec_fs_del_rule(struct mlx5e_macsec_fs *macsec_fs, + union mlx5e_macsec_rule *macsec_rule, + int action) +{ + (action == MLX5_ACCEL_MACSEC_ACTION_ENCRYPT) ? + macsec_fs_tx_del_rule(macsec_fs, &macsec_rule->tx_rule) : + macsec_fs_rx_del_rule(macsec_fs, &macsec_rule->rx_rule); +} + +void mlx5e_macsec_fs_cleanup(struct mlx5e_macsec_fs *macsec_fs) +{ + macsec_fs_rx_cleanup(macsec_fs); + macsec_fs_tx_cleanup(macsec_fs); + kfree(macsec_fs); +} + +struct mlx5e_macsec_fs * +mlx5e_macsec_fs_init(struct mlx5_core_dev *mdev, + struct net_device *netdev) +{ + struct mlx5e_macsec_fs *macsec_fs; + int err; + + macsec_fs = kzalloc(sizeof(*macsec_fs), GFP_KERNEL); + if (!macsec_fs) + return NULL; + + macsec_fs->mdev = mdev; + macsec_fs->netdev = netdev; + + err = macsec_fs_tx_init(macsec_fs); + if (err) { + netdev_err(netdev, "MACsec offload: Failed to init tx_fs, err=%d\n", err); + goto err; + } + + err = macsec_fs_rx_init(macsec_fs); + if (err) { + netdev_err(netdev, "MACsec offload: Failed to init tx_fs, err=%d\n", err); + goto tx_cleanup; + } + + return macsec_fs; + +tx_cleanup: + macsec_fs_tx_cleanup(macsec_fs); +err: + kfree(macsec_fs); + return NULL; +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec_fs.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec_fs.h new file mode 100644 index 000000000000..b429648d4ee7 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec_fs.h @@ -0,0 +1,47 @@ +/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ +/* Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */ + +#ifndef __MLX5_MACSEC_STEERING_H__ +#define __MLX5_MACSEC_STEERING_H__ + +#ifdef CONFIG_MLX5_EN_MACSEC + +#include "en_accel/macsec.h" + +#define MLX5_MACSEC_NUM_OF_SUPPORTED_INTERFACES 16 + +struct mlx5e_macsec_fs; +union mlx5e_macsec_rule; + +struct mlx5_macsec_rule_attrs { + sci_t sci; + u32 macsec_obj_id; + u8 assoc_num; + int action; +}; + +enum mlx5_macsec_action { + MLX5_ACCEL_MACSEC_ACTION_ENCRYPT, + MLX5_ACCEL_MACSEC_ACTION_DECRYPT, +}; + +void mlx5e_macsec_fs_cleanup(struct mlx5e_macsec_fs *macsec_fs); + +struct mlx5e_macsec_fs * +mlx5e_macsec_fs_init(struct mlx5_core_dev *mdev, struct net_device *netdev); + +union mlx5e_macsec_rule * +mlx5e_macsec_fs_add_rule(struct mlx5e_macsec_fs *macsec_fs, + const struct macsec_context *ctx, + struct mlx5_macsec_rule_attrs *attrs, + u32 *sa_fs_id); + +void mlx5e_macsec_fs_del_rule(struct mlx5e_macsec_fs *macsec_fs, + union mlx5e_macsec_rule *macsec_rule, + int action); + +void mlx5e_macsec_fs_get_stats_fill(struct mlx5e_macsec_fs *macsec_fs, void *macsec_stats); + +#endif + +#endif /* __MLX5_MACSEC_STEERING_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec_stats.c new file mode 100644 index 000000000000..e50a2e3f3d18 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec_stats.c @@ -0,0 +1,72 @@ +// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB +/* Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */ + +#include <linux/ethtool.h> +#include <net/sock.h> + +#include "en.h" +#include "en_accel/macsec.h" + +static const struct counter_desc mlx5e_macsec_hw_stats_desc[] = { + { MLX5E_DECLARE_STAT(struct mlx5e_macsec_stats, macsec_rx_pkts) }, + { MLX5E_DECLARE_STAT(struct mlx5e_macsec_stats, macsec_rx_bytes) }, + { MLX5E_DECLARE_STAT(struct mlx5e_macsec_stats, macsec_rx_pkts_drop) }, + { MLX5E_DECLARE_STAT(struct mlx5e_macsec_stats, macsec_rx_bytes_drop) }, + { MLX5E_DECLARE_STAT(struct mlx5e_macsec_stats, macsec_tx_pkts) }, + { MLX5E_DECLARE_STAT(struct mlx5e_macsec_stats, macsec_tx_bytes) }, + { MLX5E_DECLARE_STAT(struct mlx5e_macsec_stats, macsec_tx_pkts_drop) }, + { MLX5E_DECLARE_STAT(struct mlx5e_macsec_stats, macsec_tx_bytes_drop) }, +}; + +#define NUM_MACSEC_HW_COUNTERS ARRAY_SIZE(mlx5e_macsec_hw_stats_desc) + +static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(macsec_hw) +{ + if (!priv->macsec) + return 0; + + if (mlx5e_is_macsec_device(priv->mdev)) + return NUM_MACSEC_HW_COUNTERS; + + return 0; +} + +static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(macsec_hw) {} + +static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(macsec_hw) +{ + unsigned int i; + + if (!priv->macsec) + return idx; + + if (!mlx5e_is_macsec_device(priv->mdev)) + return idx; + + for (i = 0; i < NUM_MACSEC_HW_COUNTERS; i++) + strcpy(data + (idx++) * ETH_GSTRING_LEN, + mlx5e_macsec_hw_stats_desc[i].format); + + return idx; +} + +static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(macsec_hw) +{ + int i; + + if (!priv->macsec) + return idx; + + if (!mlx5e_is_macsec_device(priv->mdev)) + return idx; + + mlx5e_macsec_get_stats_fill(priv->macsec, mlx5e_macsec_get_stats(priv->macsec)); + for (i = 0; i < NUM_MACSEC_HW_COUNTERS; i++) + data[idx++] = MLX5E_READ_CTR64_CPU(mlx5e_macsec_get_stats(priv->macsec), + mlx5e_macsec_hw_stats_desc, + i); + + return idx; +} + +MLX5E_DEFINE_STATS_GRP(macsec_hw, 0); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 7c1a13738a58..905025a10a8a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -45,6 +45,7 @@ #include "en_tc.h" #include "en_rep.h" #include "en_accel/ipsec.h" +#include "en_accel/macsec.h" #include "en_accel/en_accel.h" #include "en_accel/ktls.h" #include "lib/vxlan.h" @@ -4990,6 +4991,7 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev) netif_set_tso_max_size(netdev, GSO_MAX_SIZE); mlx5e_set_netdev_dev_addr(netdev); + mlx5e_macsec_build_netdev(priv); mlx5e_ipsec_build_netdev(priv); mlx5e_ktls_build_netdev(priv); } @@ -5053,6 +5055,10 @@ static int mlx5e_nic_init(struct mlx5_core_dev *mdev, } priv->fs = fs; + err = mlx5e_macsec_init(priv); + if (err) + mlx5_core_err(mdev, "MACsec initialization failed, %d\n", err); + err = mlx5e_ipsec_init(priv); if (err) mlx5_core_err(mdev, "IPSec initialization failed, %d\n", err); @@ -5070,6 +5076,7 @@ static void mlx5e_nic_cleanup(struct mlx5e_priv *priv) mlx5e_health_destroy_reporters(priv); mlx5e_ktls_cleanup(priv); mlx5e_ipsec_cleanup(priv); + mlx5e_macsec_cleanup(priv); mlx5e_fs_cleanup(priv->fs); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c index 24de37b79f5a..4d3e7897b51b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c @@ -49,6 +49,7 @@ #include "en/rep/tc.h" #include "ipoib/ipoib.h" #include "en_accel/ipsec.h" +#include "en_accel/macsec.h" #include "en_accel/ipsec_rxtx.h" #include "en_accel/ktls_txrx.h" #include "en/xdp.h" @@ -1421,6 +1422,9 @@ static inline void mlx5e_build_rx_skb(struct mlx5_cqe64 *cqe, if (unlikely(mlx5_ipsec_is_rx_flow(cqe))) mlx5e_ipsec_offload_handle_rx_skb(netdev, skb, cqe); + if (unlikely(mlx5e_macsec_is_rx_flow(cqe))) + mlx5e_macsec_offload_handle_rx_skb(netdev, skb, cqe); + if (lro_num_seg > 1) { mlx5e_lro_update_hdr(skb, cqe, cqe_bcnt); skb_shinfo(skb)->gso_size = DIV_ROUND_UP(cqe_bcnt, lro_num_seg); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c index 7409829d1201..575717186912 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c @@ -2451,6 +2451,9 @@ mlx5e_stats_grp_t mlx5e_nic_stats_grps[] = { &MLX5E_STATS_GRP(per_port_buff_congest), &MLX5E_STATS_GRP(ptp), &MLX5E_STATS_GRP(qos), +#ifdef CONFIG_MLX5_EN_MACSEC + &MLX5E_STATS_GRP(macsec_hw), +#endif }; unsigned int mlx5e_nic_stats_grps_num(struct mlx5e_priv *priv) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h index ed4fc940e4ef..99e321bfb744 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h @@ -486,5 +486,6 @@ extern MLX5E_DECLARE_STATS_GRP(channels); extern MLX5E_DECLARE_STATS_GRP(per_port_buff_congest); extern MLX5E_DECLARE_STATS_GRP(ipsec_sw); extern MLX5E_DECLARE_STATS_GRP(ptp); +extern MLX5E_DECLARE_STATS_GRP(macsec_hw); #endif /* __MLX5_EN_STATS_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c index 27f791feb517..bf2232a2a836 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c @@ -39,6 +39,7 @@ #include "ipoib/ipoib.h" #include "en_accel/en_accel.h" #include "en_accel/ipsec_rxtx.h" +#include "en_accel/macsec.h" #include "en/ptp.h" #include <net/ipv6.h> @@ -485,7 +486,7 @@ err_drop: static bool mlx5e_tx_skb_supports_mpwqe(struct sk_buff *skb, struct mlx5e_tx_attr *attr) { return !skb_is_nonlinear(skb) && !skb_vlan_tag_present(skb) && !attr->ihs && - !attr->insz; + !attr->insz && !mlx5e_macsec_skb_is_offload(skb); } static bool mlx5e_tx_mpwqe_same_eseg(struct mlx5e_txqsq *sq, struct mlx5_wqe_eth_seg *eseg) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c index e735e19461ba..32d4c967469c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c @@ -577,7 +577,10 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev, MLX5_SET(flow_context, in_flow_context, modify_header_id, fte->action.modify_hdr->id); - MLX5_SET(flow_context, in_flow_context, ipsec_obj_id, fte->action.ipsec_obj_id); + MLX5_SET(flow_context, in_flow_context, encrypt_decrypt_type, + fte->action.crypto.type); + MLX5_SET(flow_context, in_flow_context, encrypt_decrypt_obj_id, + fte->action.crypto.obj_id); vlan = MLX5_ADDR_OF(flow_context, in_flow_context, push_vlan); @@ -919,13 +922,15 @@ static int mlx5_cmd_modify_header_alloc(struct mlx5_flow_root_namespace *ns, max_actions = MLX5_CAP_ESW_FLOWTABLE_FDB(dev, max_modify_header_actions); table_type = FS_FT_FDB; break; + case MLX5_FLOW_NAMESPACE_KERNEL_RX_MACSEC: case MLX5_FLOW_NAMESPACE_KERNEL: case MLX5_FLOW_NAMESPACE_BYPASS: max_actions = MLX5_CAP_FLOWTABLE_NIC_RX(dev, max_modify_header_actions); table_type = FS_FT_NIC_RX; break; case MLX5_FLOW_NAMESPACE_EGRESS: - case MLX5_FLOW_NAMESPACE_EGRESS_KERNEL: + case MLX5_FLOW_NAMESPACE_EGRESS_IPSEC: + case MLX5_FLOW_NAMESPACE_EGRESS_MACSEC: max_actions = MLX5_CAP_FLOWTABLE_NIC_TX(dev, max_modify_header_actions); table_type = FS_FT_NIC_TX; break; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c index e3960cdf5131..d53749248fa0 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c @@ -104,6 +104,10 @@ #define BY_PASS_MIN_LEVEL (ETHTOOL_MIN_LEVEL + MLX5_BY_PASS_NUM_PRIOS +\ LEFTOVERS_NUM_PRIOS) +#define KERNEL_RX_MACSEC_NUM_PRIOS 1 +#define KERNEL_RX_MACSEC_NUM_LEVELS 2 +#define KERNEL_RX_MACSEC_MIN_LEVEL (BY_PASS_MIN_LEVEL + KERNEL_RX_MACSEC_NUM_PRIOS) + #define ETHTOOL_PRIO_NUM_LEVELS 1 #define ETHTOOL_NUM_PRIOS 11 #define ETHTOOL_MIN_LEVEL (KERNEL_MIN_LEVEL + ETHTOOL_NUM_PRIOS) @@ -126,11 +130,15 @@ #define LAG_PRIO_NUM_LEVELS 1 #define LAG_NUM_PRIOS 1 -#define LAG_MIN_LEVEL (OFFLOADS_MIN_LEVEL + 1) +#define LAG_MIN_LEVEL (OFFLOADS_MIN_LEVEL + KERNEL_RX_MACSEC_MIN_LEVEL + 1) #define KERNEL_TX_IPSEC_NUM_PRIOS 1 #define KERNEL_TX_IPSEC_NUM_LEVELS 1 -#define KERNEL_TX_MIN_LEVEL (KERNEL_TX_IPSEC_NUM_LEVELS) +#define KERNEL_TX_IPSEC_MIN_LEVEL (KERNEL_TX_IPSEC_NUM_LEVELS) + +#define KERNEL_TX_MACSEC_NUM_PRIOS 1 +#define KERNEL_TX_MACSEC_NUM_LEVELS 2 +#define KERNEL_TX_MACSEC_MIN_LEVEL (KERNEL_TX_IPSEC_MIN_LEVEL + KERNEL_TX_MACSEC_NUM_PRIOS) struct node_caps { size_t arr_sz; @@ -149,12 +157,16 @@ static struct init_tree_node { enum mlx5_flow_table_miss_action def_miss_action; } root_fs = { .type = FS_TYPE_NAMESPACE, - .ar_size = 7, + .ar_size = 8, .children = (struct init_tree_node[]){ ADD_PRIO(0, BY_PASS_MIN_LEVEL, 0, FS_CHAINING_CAPS, ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF, ADD_MULTIPLE_PRIO(MLX5_BY_PASS_NUM_PRIOS, BY_PASS_PRIO_NUM_LEVELS))), + ADD_PRIO(0, KERNEL_RX_MACSEC_MIN_LEVEL, 0, FS_CHAINING_CAPS, + ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF, + ADD_MULTIPLE_PRIO(KERNEL_RX_MACSEC_NUM_PRIOS, + KERNEL_RX_MACSEC_NUM_LEVELS))), ADD_PRIO(0, LAG_MIN_LEVEL, 0, FS_CHAINING_CAPS, ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF, ADD_MULTIPLE_PRIO(LAG_NUM_PRIOS, @@ -186,18 +198,23 @@ static struct init_tree_node { static struct init_tree_node egress_root_fs = { .type = FS_TYPE_NAMESPACE, - .ar_size = 2, + .ar_size = 3, .children = (struct init_tree_node[]) { ADD_PRIO(0, MLX5_BY_PASS_NUM_PRIOS, 0, FS_CHAINING_CAPS_EGRESS, ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF, ADD_MULTIPLE_PRIO(MLX5_BY_PASS_NUM_PRIOS, BY_PASS_PRIO_NUM_LEVELS))), - ADD_PRIO(0, KERNEL_TX_MIN_LEVEL, 0, + ADD_PRIO(0, KERNEL_TX_IPSEC_MIN_LEVEL, 0, FS_CHAINING_CAPS_EGRESS, ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF, ADD_MULTIPLE_PRIO(KERNEL_TX_IPSEC_NUM_PRIOS, KERNEL_TX_IPSEC_NUM_LEVELS))), + ADD_PRIO(0, KERNEL_TX_MACSEC_MIN_LEVEL, 0, + FS_CHAINING_CAPS_EGRESS, + ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF, + ADD_MULTIPLE_PRIO(KERNEL_TX_MACSEC_NUM_PRIOS, + KERNEL_TX_MACSEC_NUM_LEVELS))), } }; @@ -2269,6 +2286,7 @@ static bool is_nic_rx_ns(enum mlx5_flow_namespace_type type) { switch (type) { case MLX5_FLOW_NAMESPACE_BYPASS: + case MLX5_FLOW_NAMESPACE_KERNEL_RX_MACSEC: case MLX5_FLOW_NAMESPACE_LAG: case MLX5_FLOW_NAMESPACE_OFFLOADS: case MLX5_FLOW_NAMESPACE_ETHTOOL: @@ -2315,7 +2333,8 @@ struct mlx5_flow_namespace *mlx5_get_flow_namespace(struct mlx5_core_dev *dev, prio = FDB_BYPASS_PATH; break; case MLX5_FLOW_NAMESPACE_EGRESS: - case MLX5_FLOW_NAMESPACE_EGRESS_KERNEL: + case MLX5_FLOW_NAMESPACE_EGRESS_IPSEC: + case MLX5_FLOW_NAMESPACE_EGRESS_MACSEC: root_ns = steering->egress_root_ns; prio = type - MLX5_FLOW_NAMESPACE_EGRESS; break; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw.c b/drivers/net/ethernet/mellanox/mlx5/core/fw.c index 079fa44ada71..c63ce03e79e0 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fw.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fw.c @@ -273,6 +273,13 @@ int mlx5_query_hca_caps(struct mlx5_core_dev *dev) return err; } + if (MLX5_CAP_GEN_64(dev, general_obj_types) & + MLX5_GENERAL_OBJ_TYPES_CAP_MACSEC_OFFLOAD) { + err = mlx5_core_get_caps(dev, MLX5_CAP_MACSEC); + if (err) + return err; + } + return 0; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/mlx5.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/mlx5.h index 2f536c5d30b1..032adb21ad4b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lib/mlx5.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/mlx5.h @@ -83,6 +83,7 @@ int mlx5_notifier_call_chain(struct mlx5_events *events, unsigned int event, voi enum { MLX5_ACCEL_OBJ_TLS_KEY = MLX5_GENERAL_OBJECT_TYPE_ENCRYPTION_KEY_TYPE_TLS, MLX5_ACCEL_OBJ_IPSEC_KEY = MLX5_GENERAL_OBJECT_TYPE_ENCRYPTION_KEY_TYPE_IPSEC, + MLX5_ACCEL_OBJ_MACSEC_KEY = MLX5_GENERAL_OBJECT_TYPE_ENCRYPTION_KEY_TYPE_MACSEC, }; int mlx5_create_encryption_key(struct mlx5_core_dev *mdev, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c index c085b031abfc..1986f1c715b5 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c @@ -1488,6 +1488,7 @@ static const int types[] = { MLX5_CAP_IPSEC, MLX5_CAP_PORT_SELECTION, MLX5_CAP_DEV_SHAMPO, + MLX5_CAP_MACSEC, }; static void mlx5_hca_caps_free(struct mlx5_core_dev *dev) diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c index adf448a8162b..830fed3914b6 100644 --- a/drivers/net/macsec.c +++ b/drivers/net/macsec.c @@ -18,14 +18,13 @@ #include <net/sock.h> #include <net/gro_cells.h> #include <net/macsec.h> +#include <net/dst_metadata.h> #include <linux/phy.h> #include <linux/byteorder/generic.h> #include <linux/if_arp.h> #include <uapi/linux/if_macsec.h> -#define MACSEC_SCI_LEN 8 - /* SecTAG length = macsec_eth_header without the optional SCI */ #define MACSEC_TAG_LEN 6 @@ -46,20 +45,10 @@ struct macsec_eth_header { u8 secure_channel_id[8]; /* optional */ } __packed; -#define MACSEC_TCI_VERSION 0x80 -#define MACSEC_TCI_ES 0x40 /* end station */ -#define MACSEC_TCI_SC 0x20 /* SCI present */ -#define MACSEC_TCI_SCB 0x10 /* epon */ -#define MACSEC_TCI_E 0x08 /* encryption */ -#define MACSEC_TCI_C 0x04 /* changed text */ -#define MACSEC_AN_MASK 0x03 /* association number */ -#define MACSEC_TCI_CONFID (MACSEC_TCI_E | MACSEC_TCI_C) - /* minimum secure data length deemed "not short", see IEEE 802.1AE-2006 9.7 */ #define MIN_NON_SHORT_LEN 48 #define GCM_AES_IV_LEN 12 -#define DEFAULT_ICV_LEN 16 #define for_each_rxsc(secy, sc) \ for (sc = rcu_dereference_bh(secy->rx_sc); \ @@ -243,7 +232,6 @@ static struct macsec_cb *macsec_skb_cb(struct sk_buff *skb) return (struct macsec_cb *)skb->cb; } -#define MACSEC_PORT_ES (htons(0x0001)) #define MACSEC_PORT_SCB (0x0000) #define MACSEC_UNDEF_SCI ((__force sci_t)0xffffffffffffffffULL) #define MACSEC_UNDEF_SSCI ((__force ssci_t)0xffffffff) @@ -258,14 +246,6 @@ static struct macsec_cb *macsec_skb_cb(struct sk_buff *skb) #define DEFAULT_ENCODING_SA 0 #define MACSEC_XPN_MAX_REPLAY_WINDOW (((1 << 30) - 1)) -static bool send_sci(const struct macsec_secy *secy) -{ - const struct macsec_tx_sc *tx_sc = &secy->tx_sc; - - return tx_sc->send_sci || - (secy->n_rx_sc > 1 && !tx_sc->end_station && !tx_sc->scb); -} - static sci_t make_sci(const u8 *addr, __be16 port) { sci_t sci; @@ -330,7 +310,7 @@ static void macsec_fill_sectag(struct macsec_eth_header *h, /* with GCM, C/E clear for !encrypt, both set for encrypt */ if (tx_sc->encrypt) h->tci_an |= MACSEC_TCI_CONFID; - else if (secy->icv_len != DEFAULT_ICV_LEN) + else if (secy->icv_len != MACSEC_DEFAULT_ICV_LEN) h->tci_an |= MACSEC_TCI_C; h->tci_an |= tx_sc->encoding_sa; @@ -654,7 +634,7 @@ static struct sk_buff *macsec_encrypt(struct sk_buff *skb, unprotected_len = skb->len; eth = eth_hdr(skb); - sci_present = send_sci(secy); + sci_present = macsec_send_sci(secy); hh = skb_push(skb, macsec_extra_len(sci_present)); memmove(hh, eth, 2 * ETH_ALEN); @@ -1024,11 +1004,13 @@ static enum rx_handler_result handle_not_macsec(struct sk_buff *skb) /* Deliver to the uncontrolled port by default */ enum rx_handler_result ret = RX_HANDLER_PASS; struct ethhdr *hdr = eth_hdr(skb); + struct metadata_dst *md_dst; struct macsec_rxh_data *rxd; struct macsec_dev *macsec; rcu_read_lock(); rxd = macsec_data_rcu(skb->dev); + md_dst = skb_metadata_dst(skb); list_for_each_entry_rcu(macsec, &rxd->secys, secys) { struct sk_buff *nskb; @@ -1039,6 +1021,10 @@ static enum rx_handler_result handle_not_macsec(struct sk_buff *skb) * the SecTAG, so we have to deduce which port to deliver to. */ if (macsec_is_offloaded(macsec) && netif_running(ndev)) { + if (md_dst && md_dst->type == METADATA_MACSEC && + (!find_rx_sc(&macsec->secy, md_dst->u.macsec_info.sci))) + continue; + if (ether_addr_equal_64bits(hdr->h_dest, ndev->dev_addr)) { /* exact match, divert skb to this port */ @@ -1296,7 +1282,7 @@ nosci: /* 10.6.1 if the SC is not found */ cbit = !!(hdr->tci_an & MACSEC_TCI_C); if (!cbit) - macsec_finalize_skb(skb, DEFAULT_ICV_LEN, + macsec_finalize_skb(skb, MACSEC_DEFAULT_ICV_LEN, macsec_extra_len(macsec_skb_cb(skb)->has_sci)); list_for_each_entry_rcu(macsec, &rxd->secys, secys) { @@ -3416,6 +3402,11 @@ static netdev_tx_t macsec_start_xmit(struct sk_buff *skb, int ret, len; if (macsec_is_offloaded(netdev_priv(dev))) { + struct metadata_dst *md_dst = secy->tx_sc.md_dst; + + skb_dst_drop(skb); + dst_hold(&md_dst->dst); + skb_dst_set(skb, &md_dst->dst); skb->dev = macsec->real_dev; return dev_queue_xmit(skb); } @@ -3743,6 +3734,7 @@ static void macsec_free_netdev(struct net_device *dev) { struct macsec_dev *macsec = macsec_priv(dev); + metadata_dst_free(macsec->secy.tx_sc.md_dst); free_percpu(macsec->stats); free_percpu(macsec->secy.tx_sc.stats); @@ -4015,6 +4007,13 @@ static int macsec_add_dev(struct net_device *dev, sci_t sci, u8 icv_len) return -ENOMEM; } + secy->tx_sc.md_dst = metadata_dst_alloc(0, METADATA_MACSEC, GFP_KERNEL); + if (!secy->tx_sc.md_dst) { + free_percpu(secy->tx_sc.stats); + free_percpu(macsec->stats); + return -ENOMEM; + } + if (sci == MACSEC_UNDEF_SCI) sci = dev_to_sci(dev, MACSEC_PORT_ES); @@ -4028,6 +4027,7 @@ static int macsec_add_dev(struct net_device *dev, sci_t sci, u8 icv_len) secy->xpn = DEFAULT_XPN; secy->sci = sci; + secy->tx_sc.md_dst->u.macsec_info.sci = sci; secy->tx_sc.active = true; secy->tx_sc.encoding_sa = DEFAULT_ENCODING_SA; secy->tx_sc.encrypt = DEFAULT_ENCRYPT; @@ -4046,7 +4046,7 @@ static int macsec_newlink(struct net *net, struct net_device *dev, { struct macsec_dev *macsec = macsec_priv(dev); rx_handler_func_t *rx_handler; - u8 icv_len = DEFAULT_ICV_LEN; + u8 icv_len = MACSEC_DEFAULT_ICV_LEN; struct net_device *real_dev; int err, mtu; sci_t sci; @@ -4170,7 +4170,7 @@ static int macsec_validate_attr(struct nlattr *tb[], struct nlattr *data[], struct netlink_ext_ack *extack) { u64 csid = MACSEC_DEFAULT_CIPHER_ID; - u8 icv_len = DEFAULT_ICV_LEN; + u8 icv_len = MACSEC_DEFAULT_ICV_LEN; int flag; bool es, scb, sci; @@ -4182,7 +4182,7 @@ static int macsec_validate_attr(struct nlattr *tb[], struct nlattr *data[], if (data[IFLA_MACSEC_ICV_LEN]) { icv_len = nla_get_u8(data[IFLA_MACSEC_ICV_LEN]); - if (icv_len != DEFAULT_ICV_LEN) { + if (icv_len != MACSEC_DEFAULT_ICV_LEN) { char dummy_key[DEFAULT_SAK_LEN] = { 0 }; struct crypto_aead *dummy_tfm; diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h index b5f58fd37a0f..2927810f172b 100644 --- a/include/linux/mlx5/device.h +++ b/include/linux/mlx5/device.h @@ -1198,6 +1198,7 @@ enum mlx5_cap_type { MLX5_CAP_DEV_EVENT = 0x14, MLX5_CAP_IPSEC, MLX5_CAP_DEV_SHAMPO = 0x1d, + MLX5_CAP_MACSEC = 0x1f, MLX5_CAP_GENERAL_2 = 0x20, MLX5_CAP_PORT_SELECTION = 0x25, /* NUM OF CAP Types */ @@ -1446,6 +1447,9 @@ enum mlx5_qcam_feature_groups { #define MLX5_CAP_DEV_SHAMPO(mdev, cap)\ MLX5_GET(shampo_cap, mdev->caps.hca_cur[MLX5_CAP_DEV_SHAMPO], cap) +#define MLX5_CAP_MACSEC(mdev, cap)\ + MLX5_GET(macsec_cap, (mdev)->caps.hca[MLX5_CAP_MACSEC]->cur, cap) + enum { MLX5_CMD_STAT_OK = 0x0, MLX5_CMD_STAT_INT_ERR = 0x1, diff --git a/include/linux/mlx5/fs.h b/include/linux/mlx5/fs.h index 8e73c377da2c..c7a91981cd5a 100644 --- a/include/linux/mlx5/fs.h +++ b/include/linux/mlx5/fs.h @@ -79,6 +79,7 @@ static inline void build_leftovers_ft_param(int *priority, enum mlx5_flow_namespace_type { MLX5_FLOW_NAMESPACE_BYPASS, + MLX5_FLOW_NAMESPACE_KERNEL_RX_MACSEC, MLX5_FLOW_NAMESPACE_LAG, MLX5_FLOW_NAMESPACE_OFFLOADS, MLX5_FLOW_NAMESPACE_ETHTOOL, @@ -92,7 +93,8 @@ enum mlx5_flow_namespace_type { MLX5_FLOW_NAMESPACE_SNIFFER_RX, MLX5_FLOW_NAMESPACE_SNIFFER_TX, MLX5_FLOW_NAMESPACE_EGRESS, - MLX5_FLOW_NAMESPACE_EGRESS_KERNEL, + MLX5_FLOW_NAMESPACE_EGRESS_IPSEC, + MLX5_FLOW_NAMESPACE_EGRESS_MACSEC, MLX5_FLOW_NAMESPACE_RDMA_RX, MLX5_FLOW_NAMESPACE_RDMA_RX_KERNEL, MLX5_FLOW_NAMESPACE_RDMA_TX, @@ -243,10 +245,10 @@ struct mlx5_flow_act { u32 action; struct mlx5_modify_hdr *modify_hdr; struct mlx5_pkt_reformat *pkt_reformat; - union { - u32 ipsec_obj_id; - uintptr_t esp_id; - }; + struct mlx5_flow_act_crypto_params { + u8 type; + u32 obj_id; + } crypto; u32 flags; struct mlx5_fs_vlan vlan[MLX5_FS_VLAN_DEPTH]; struct ib_counters *counters; diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h index 4acd5610e96b..8decbf9a7bdd 100644 --- a/include/linux/mlx5/mlx5_ifc.h +++ b/include/linux/mlx5/mlx5_ifc.h @@ -82,6 +82,7 @@ enum { MLX5_GENERAL_OBJ_TYPES_CAP_SW_ICM = (1ULL << MLX5_OBJ_TYPE_SW_ICM), MLX5_GENERAL_OBJ_TYPES_CAP_GENEVE_TLV_OPT = (1ULL << 11), MLX5_GENERAL_OBJ_TYPES_CAP_VIRTIO_NET_Q = (1ULL << 13), + MLX5_GENERAL_OBJ_TYPES_CAP_MACSEC_OFFLOAD = (1ULL << 39), }; enum { @@ -449,7 +450,12 @@ struct mlx5_ifc_flow_table_prop_layout_bits { u8 reserved_at_60[0x2]; u8 reformat_insert[0x1]; u8 reformat_remove[0x1]; - u8 reserver_at_64[0x14]; + u8 macsec_encrypt[0x1]; + u8 macsec_decrypt[0x1]; + u8 reserved_at_66[0x2]; + u8 reformat_add_macsec[0x1]; + u8 reformat_remove_macsec[0x1]; + u8 reserved_at_6a[0xe]; u8 log_max_ft_num[0x8]; u8 reserved_at_80[0x10]; @@ -611,7 +617,11 @@ struct mlx5_ifc_fte_match_set_misc2_bits { u8 metadata_reg_a[0x20]; - u8 reserved_at_1a0[0x60]; + u8 reserved_at_1a0[0x8]; + + u8 macsec_syndrome[0x8]; + + u8 reserved_at_1b0[0x50]; }; struct mlx5_ifc_fte_match_set_misc3_bits { @@ -1276,6 +1286,24 @@ struct mlx5_ifc_ipsec_cap_bits { u8 reserved_at_30[0x7d0]; }; +struct mlx5_ifc_macsec_cap_bits { + u8 macsec_epn[0x1]; + u8 reserved_at_1[0x2]; + u8 macsec_crypto_esp_aes_gcm_256_encrypt[0x1]; + u8 macsec_crypto_esp_aes_gcm_128_encrypt[0x1]; + u8 macsec_crypto_esp_aes_gcm_256_decrypt[0x1]; + u8 macsec_crypto_esp_aes_gcm_128_decrypt[0x1]; + u8 reserved_at_7[0x4]; + u8 log_max_macsec_offload[0x5]; + u8 reserved_at_10[0x10]; + + u8 min_log_macsec_full_replay_window[0x8]; + u8 max_log_macsec_full_replay_window[0x8]; + u8 reserved_at_30[0x10]; + + u8 reserved_at_40[0x7c0]; +}; + enum { MLX5_WQ_TYPE_LINKED_LIST = 0x0, MLX5_WQ_TYPE_CYCLIC = 0x1, @@ -3295,6 +3323,7 @@ union mlx5_ifc_hca_cap_union_bits { struct mlx5_ifc_device_mem_cap_bits device_mem_cap; struct mlx5_ifc_virtio_emulation_cap_bits virtio_emulation_cap; struct mlx5_ifc_shampo_cap_bits shampo_cap; + struct mlx5_ifc_macsec_cap_bits macsec_cap; u8 reserved_at_0[0x8000]; }; @@ -3310,8 +3339,8 @@ enum { MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH = 0x100, MLX5_FLOW_CONTEXT_ACTION_VLAN_POP_2 = 0x400, MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2 = 0x800, - MLX5_FLOW_CONTEXT_ACTION_IPSEC_DECRYPT = 0x1000, - MLX5_FLOW_CONTEXT_ACTION_IPSEC_ENCRYPT = 0x2000, + MLX5_FLOW_CONTEXT_ACTION_CRYPTO_DECRYPT = 0x1000, + MLX5_FLOW_CONTEXT_ACTION_CRYPTO_ENCRYPT = 0x2000, MLX5_FLOW_CONTEXT_ACTION_EXECUTE_ASO = 0x4000, }; @@ -3321,6 +3350,11 @@ enum { MLX5_FLOW_CONTEXT_FLOW_SOURCE_LOCAL_VPORT = 0x2, }; +enum { + MLX5_FLOW_CONTEXT_ENCRYPT_DECRYPT_TYPE_IPSEC = 0x0, + MLX5_FLOW_CONTEXT_ENCRYPT_DECRYPT_TYPE_MACSEC = 0x1, +}; + struct mlx5_ifc_vlan_bits { u8 ethtype[0x10]; u8 prio[0x3]; @@ -3374,7 +3408,7 @@ struct mlx5_ifc_flow_context_bits { u8 extended_destination[0x1]; u8 reserved_at_81[0x1]; u8 flow_source[0x2]; - u8 reserved_at_84[0x4]; + u8 encrypt_decrypt_type[0x4]; u8 destination_list_size[0x18]; u8 reserved_at_a0[0x8]; @@ -3386,7 +3420,7 @@ struct mlx5_ifc_flow_context_bits { struct mlx5_ifc_vlan_bits push_vlan_2; - u8 ipsec_obj_id[0x20]; + u8 encrypt_decrypt_obj_id[0x20]; u8 reserved_at_140[0xc0]; struct mlx5_ifc_fte_match_param_bits match_value; @@ -6316,6 +6350,8 @@ enum mlx5_reformat_ctx_type { MLX5_REFORMAT_TYPE_L2_TO_L3_TUNNEL = 0x4, MLX5_REFORMAT_TYPE_INSERT_HDR = 0xf, MLX5_REFORMAT_TYPE_REMOVE_HDR = 0x10, + MLX5_REFORMAT_TYPE_ADD_MACSEC = 0x11, + MLX5_REFORMAT_TYPE_DEL_MACSEC = 0x12, }; struct mlx5_ifc_alloc_packet_reformat_context_in_bits { @@ -11471,6 +11507,7 @@ enum { MLX5_GENERAL_OBJECT_TYPES_IPSEC = 0x13, MLX5_GENERAL_OBJECT_TYPES_SAMPLER = 0x20, MLX5_GENERAL_OBJECT_TYPES_FLOW_METER_ASO = 0x24, + MLX5_GENERAL_OBJECT_TYPES_MACSEC = 0x27, }; enum { @@ -11521,6 +11558,67 @@ struct mlx5_ifc_modify_ipsec_obj_in_bits { struct mlx5_ifc_ipsec_obj_bits ipsec_object; }; +struct mlx5_ifc_macsec_aso_bits { + u8 valid[0x1]; + u8 reserved_at_1[0x1]; + u8 mode[0x2]; + u8 window_size[0x2]; + u8 soft_lifetime_arm[0x1]; + u8 hard_lifetime_arm[0x1]; + u8 remove_flow_enable[0x1]; + u8 epn_event_arm[0x1]; + u8 reserved_at_a[0x16]; + + u8 remove_flow_packet_count[0x20]; + + u8 remove_flow_soft_lifetime[0x20]; + + u8 reserved_at_60[0x80]; + + u8 mode_parameter[0x20]; + + u8 replay_protection_window[8][0x20]; +}; + +struct mlx5_ifc_macsec_offload_obj_bits { + u8 modify_field_select[0x40]; + + u8 confidentiality_en[0x1]; + u8 reserved_at_41[0x1]; + u8 esn_en[0x1]; + u8 esn_overlap[0x1]; + u8 reserved_at_44[0x2]; + u8 confidentiality_offset[0x2]; + u8 reserved_at_48[0x4]; + u8 aso_return_reg[0x4]; + u8 reserved_at_50[0x10]; + + u8 esn_msb[0x20]; + + u8 reserved_at_80[0x8]; + u8 dekn[0x18]; + + u8 reserved_at_a0[0x20]; + + u8 sci[0x40]; + + u8 reserved_at_100[0x8]; + u8 macsec_aso_access_pd[0x18]; + + u8 reserved_at_120[0x60]; + + u8 salt[3][0x20]; + + u8 reserved_at_1e0[0x20]; + + struct mlx5_ifc_macsec_aso_bits macsec_aso; +}; + +struct mlx5_ifc_create_macsec_obj_in_bits { + struct mlx5_ifc_general_obj_in_cmd_hdr_bits general_obj_in_cmd_hdr; + struct mlx5_ifc_macsec_offload_obj_bits macsec_object; +}; + struct mlx5_ifc_encryption_key_obj_bits { u8 modify_field_select[0x40]; @@ -11638,6 +11736,7 @@ enum { enum { MLX5_GENERAL_OBJECT_TYPE_ENCRYPTION_KEY_TYPE_TLS = 0x1, MLX5_GENERAL_OBJECT_TYPE_ENCRYPTION_KEY_TYPE_IPSEC = 0x2, + MLX5_GENERAL_OBJECT_TYPE_ENCRYPTION_KEY_TYPE_MACSEC = 0x4, }; struct mlx5_ifc_tls_static_params_bits { diff --git a/include/linux/mlx5/qp.h b/include/linux/mlx5/qp.h index 8bda3ba5b109..be640c749d5e 100644 --- a/include/linux/mlx5/qp.h +++ b/include/linux/mlx5/qp.h @@ -252,6 +252,7 @@ enum { enum { MLX5_ETH_WQE_FT_META_IPSEC = BIT(0), + MLX5_ETH_WQE_FT_META_MACSEC = BIT(1), }; struct mlx5_wqe_eth_seg { diff --git a/include/net/dst_metadata.h b/include/net/dst_metadata.h index adab27ba1ecb..22a6924bf6da 100644 --- a/include/net/dst_metadata.h +++ b/include/net/dst_metadata.h @@ -4,11 +4,13 @@ #include <linux/skbuff.h> #include <net/ip_tunnels.h> +#include <net/macsec.h> #include <net/dst.h> enum metadata_type { METADATA_IP_TUNNEL, METADATA_HW_PORT_MUX, + METADATA_MACSEC, }; struct hw_port_info { @@ -16,12 +18,17 @@ struct hw_port_info { u32 port_id; }; +struct macsec_info { + sci_t sci; +}; + struct metadata_dst { struct dst_entry dst; enum metadata_type type; union { struct ip_tunnel_info tun_info; struct hw_port_info port_info; + struct macsec_info macsec_info; } u; }; @@ -82,6 +89,9 @@ static inline int skb_metadata_dst_cmp(const struct sk_buff *skb_a, return memcmp(&a->u.tun_info, &b->u.tun_info, sizeof(a->u.tun_info) + a->u.tun_info.options_len); + case METADATA_MACSEC: + return memcmp(&a->u.macsec_info, &b->u.macsec_info, + sizeof(a->u.macsec_info)); default: return 1; } diff --git a/include/net/macsec.h b/include/net/macsec.h index 73780aa73644..871599b11707 100644 --- a/include/net/macsec.h +++ b/include/net/macsec.h @@ -16,9 +16,25 @@ #define MACSEC_NUM_AN 4 /* 2 bits for the association number */ +#define MACSEC_SCI_LEN 8 +#define MACSEC_PORT_ES (htons(0x0001)) + +#define MACSEC_TCI_VERSION 0x80 +#define MACSEC_TCI_ES 0x40 /* end station */ +#define MACSEC_TCI_SC 0x20 /* SCI present */ +#define MACSEC_TCI_SCB 0x10 /* epon */ +#define MACSEC_TCI_E 0x08 /* encryption */ +#define MACSEC_TCI_C 0x04 /* changed text */ +#define MACSEC_AN_MASK 0x03 /* association number */ +#define MACSEC_TCI_CONFID (MACSEC_TCI_E | MACSEC_TCI_C) + +#define MACSEC_DEFAULT_ICV_LEN 16 + typedef u64 __bitwise sci_t; typedef u32 __bitwise ssci_t; +struct metadata_dst; + typedef union salt { struct { u32 ssci; @@ -182,6 +198,7 @@ struct macsec_tx_sa { * @scb: single copy broadcast flag * @sa: array of secure associations * @stats: stats for this TXSC + * @md_dst: MACsec offload metadata dst */ struct macsec_tx_sc { bool active; @@ -192,6 +209,7 @@ struct macsec_tx_sc { bool scb; struct macsec_tx_sa __rcu *sa[MACSEC_NUM_AN]; struct pcpu_tx_sc_stats __percpu *stats; + struct metadata_dst *md_dst; }; /** @@ -288,5 +306,12 @@ struct macsec_ops { }; void macsec_pn_wrapped(struct macsec_secy *secy, struct macsec_tx_sa *tx_sa); +static inline bool macsec_send_sci(const struct macsec_secy *secy) +{ + const struct macsec_tx_sc *tx_sc = &secy->tx_sc; + + return tx_sc->send_sci || + (secy->n_rx_sc > 1 && !tx_sc->end_station && !tx_sc->scb); +} #endif /* _NET_MACSEC_H_ */ |