diff options
author | Linus Torvalds | 2019-07-11 10:55:49 -0700 |
---|---|---|
committer | Linus Torvalds | 2019-07-11 10:55:49 -0700 |
commit | 237f83dfbe668443b5e31c3c7576125871cca674 (patch) | |
tree | 11848a8d0aa414a1d3ce2024e181071b1d9dea08 /drivers/infiniband | |
parent | 8f6ccf6159aed1f04c6d179f61f6fb2691261e84 (diff) | |
parent | 1ff2f0fa450ea4e4f87793d9ed513098ec6e12be (diff) |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next
Pull networking updates from David Miller:
"Some highlights from this development cycle:
1) Big refactoring of ipv6 route and neigh handling to support
nexthop objects configurable as units from userspace. From David
Ahern.
2) Convert explored_states in BPF verifier into a hash table,
significantly decreased state held for programs with bpf2bpf
calls, from Alexei Starovoitov.
3) Implement bpf_send_signal() helper, from Yonghong Song.
4) Various classifier enhancements to mvpp2 driver, from Maxime
Chevallier.
5) Add aRFS support to hns3 driver, from Jian Shen.
6) Fix use after free in inet frags by allocating fqdirs dynamically
and reworking how rhashtable dismantle occurs, from Eric Dumazet.
7) Add act_ctinfo packet classifier action, from Kevin
Darbyshire-Bryant.
8) Add TFO key backup infrastructure, from Jason Baron.
9) Remove several old and unused ISDN drivers, from Arnd Bergmann.
10) Add devlink notifications for flash update status to mlxsw driver,
from Jiri Pirko.
11) Lots of kTLS offload infrastructure fixes, from Jakub Kicinski.
12) Add support for mv88e6250 DSA chips, from Rasmus Villemoes.
13) Various enhancements to ipv6 flow label handling, from Eric
Dumazet and Willem de Bruijn.
14) Support TLS offload in nfp driver, from Jakub Kicinski, Dirk van
der Merwe, and others.
15) Various improvements to axienet driver including converting it to
phylink, from Robert Hancock.
16) Add PTP support to sja1105 DSA driver, from Vladimir Oltean.
17) Add mqprio qdisc offload support to dpaa2-eth, from Ioana
Radulescu.
18) Add devlink health reporting to mlx5, from Moshe Shemesh.
19) Convert stmmac over to phylink, from Jose Abreu.
20) Add PTP PHC (Physical Hardware Clock) support to mlxsw, from
Shalom Toledo.
21) Add nftables SYNPROXY support, from Fernando Fernandez Mancera.
22) Convert tcp_fastopen over to use SipHash, from Ard Biesheuvel.
23) Track spill/fill of constants in BPF verifier, from Alexei
Starovoitov.
24) Support bounded loops in BPF, from Alexei Starovoitov.
25) Various page_pool API fixes and improvements, from Jesper Dangaard
Brouer.
26) Just like ipv4, support ref-countless ipv6 route handling. From
Wei Wang.
27) Support VLAN offloading in aquantia driver, from Igor Russkikh.
28) Add AF_XDP zero-copy support to mlx5, from Maxim Mikityanskiy.
29) Add flower GRE encap/decap support to nfp driver, from Pieter
Jansen van Vuuren.
30) Protect against stack overflow when using act_mirred, from John
Hurley.
31) Allow devmap map lookups from eBPF, from Toke Høiland-Jørgensen.
32) Use page_pool API in netsec driver, Ilias Apalodimas.
33) Add Google gve network driver, from Catherine Sullivan.
34) More indirect call avoidance, from Paolo Abeni.
35) Add kTLS TX HW offload support to mlx5, from Tariq Toukan.
36) Add XDP_REDIRECT support to bnxt_en, from Andy Gospodarek.
37) Add MPLS manipulation actions to TC, from John Hurley.
38) Add sending a packet to connection tracking from TC actions, and
then allow flower classifier matching on conntrack state. From
Paul Blakey.
39) Netfilter hw offload support, from Pablo Neira Ayuso"
* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next: (2080 commits)
net/mlx5e: Return in default case statement in tx_post_resync_params
mlx5: Return -EINVAL when WARN_ON_ONCE triggers in mlx5e_tls_resync().
net: dsa: add support for BRIDGE_MROUTER attribute
pkt_sched: Include const.h
net: netsec: remove static declaration for netsec_set_tx_de()
net: netsec: remove superfluous if statement
netfilter: nf_tables: add hardware offload support
net: flow_offload: rename tc_cls_flower_offload to flow_cls_offload
net: flow_offload: add flow_block_cb_is_busy() and use it
net: sched: remove tcf block API
drivers: net: use flow block API
net: sched: use flow block API
net: flow_offload: add flow_block_cb_{priv, incref, decref}()
net: flow_offload: add list handling functions
net: flow_offload: add flow_block_cb_alloc() and flow_block_cb_free()
net: flow_offload: rename TCF_BLOCK_BINDER_TYPE_* to FLOW_BLOCK_BINDER_TYPE_*
net: flow_offload: rename TC_BLOCK_{UN}BIND to FLOW_BLOCK_{UN}BIND
net: flow_offload: add flow_block_cb_setup_simple()
net: hisilicon: Add an tx_desc to adapt HI13X1_GMAC
net: hisilicon: Add an rx_desc to adapt HI13X1_GMAC
...
Diffstat (limited to 'drivers/infiniband')
-rw-r--r-- | drivers/infiniband/core/roce_gid_mgmt.c | 5 | ||||
-rw-r--r-- | drivers/infiniband/hw/cxgb4/cm.c | 9 | ||||
-rw-r--r-- | drivers/infiniband/hw/i40iw/i40iw_cm.c | 7 | ||||
-rw-r--r-- | drivers/infiniband/hw/i40iw/i40iw_main.c | 6 | ||||
-rw-r--r-- | drivers/infiniband/hw/i40iw/i40iw_utils.c | 12 | ||||
-rw-r--r-- | drivers/infiniband/hw/mlx5/cq.c | 13 | ||||
-rw-r--r-- | drivers/infiniband/hw/mlx5/devx.c | 18 | ||||
-rw-r--r-- | drivers/infiniband/hw/mlx5/flow.c | 13 | ||||
-rw-r--r-- | drivers/infiniband/hw/mlx5/ib_rep.c | 39 | ||||
-rw-r--r-- | drivers/infiniband/hw/mlx5/ib_rep.h | 4 | ||||
-rw-r--r-- | drivers/infiniband/hw/mlx5/main.c | 79 | ||||
-rw-r--r-- | drivers/infiniband/hw/mlx5/mlx5_ib.h | 3 | ||||
-rw-r--r-- | drivers/infiniband/hw/mlx5/mr.c | 10 | ||||
-rw-r--r-- | drivers/infiniband/hw/mlx5/odp.c | 33 | ||||
-rw-r--r-- | drivers/infiniband/hw/mlx5/qp.c | 2 | ||||
-rw-r--r-- | drivers/infiniband/hw/nes/nes.c | 8 | ||||
-rw-r--r-- | drivers/infiniband/hw/qedr/main.c | 25 | ||||
-rw-r--r-- | drivers/infiniband/hw/qedr/qedr.h | 2 | ||||
-rw-r--r-- | drivers/infiniband/hw/usnic/usnic_ib_main.c | 15 | ||||
-rw-r--r-- | drivers/infiniband/ulp/ipoib/ipoib_main.c | 1 |
20 files changed, 198 insertions, 106 deletions
diff --git a/drivers/infiniband/core/roce_gid_mgmt.c b/drivers/infiniband/core/roce_gid_mgmt.c index 558de0b9895c..2860def84f4d 100644 --- a/drivers/infiniband/core/roce_gid_mgmt.c +++ b/drivers/infiniband/core/roce_gid_mgmt.c @@ -330,6 +330,7 @@ static void bond_delete_netdev_default_gids(struct ib_device *ib_dev, static void enum_netdev_ipv4_ips(struct ib_device *ib_dev, u8 port, struct net_device *ndev) { + const struct in_ifaddr *ifa; struct in_device *in_dev; struct sin_list { struct list_head list; @@ -349,7 +350,7 @@ static void enum_netdev_ipv4_ips(struct ib_device *ib_dev, return; } - for_ifa(in_dev) { + in_dev_for_each_ifa_rcu(ifa, in_dev) { struct sin_list *entry = kzalloc(sizeof(*entry), GFP_ATOMIC); if (!entry) @@ -359,7 +360,7 @@ static void enum_netdev_ipv4_ips(struct ib_device *ib_dev, entry->ip.sin_addr.s_addr = ifa->ifa_address; list_add_tail(&entry->list, &sin_list); } - endfor_ifa(in_dev); + rcu_read_unlock(); list_for_each_entry_safe(sin_iter, sin_temp, &sin_list, list) { diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c index 0f3b1193d5f8..09fcfc9e052d 100644 --- a/drivers/infiniband/hw/cxgb4/cm.c +++ b/drivers/infiniband/hw/cxgb4/cm.c @@ -3230,17 +3230,22 @@ static int pick_local_ipaddrs(struct c4iw_dev *dev, struct iw_cm_id *cm_id) int found = 0; struct sockaddr_in *laddr = (struct sockaddr_in *)&cm_id->m_local_addr; struct sockaddr_in *raddr = (struct sockaddr_in *)&cm_id->m_remote_addr; + const struct in_ifaddr *ifa; ind = in_dev_get(dev->rdev.lldi.ports[0]); if (!ind) return -EADDRNOTAVAIL; - for_primary_ifa(ind) { + rcu_read_lock(); + in_dev_for_each_ifa_rcu(ifa, ind) { + if (ifa->ifa_flags & IFA_F_SECONDARY) + continue; laddr->sin_addr.s_addr = ifa->ifa_address; raddr->sin_addr.s_addr = ifa->ifa_address; found = 1; break; } - endfor_ifa(ind); + rcu_read_unlock(); + in_dev_put(ind); return found ? 0 : -EADDRNOTAVAIL; } diff --git a/drivers/infiniband/hw/i40iw/i40iw_cm.c b/drivers/infiniband/hw/i40iw/i40iw_cm.c index 8233f5a4e623..700a5d06b60c 100644 --- a/drivers/infiniband/hw/i40iw/i40iw_cm.c +++ b/drivers/infiniband/hw/i40iw/i40iw_cm.c @@ -1773,8 +1773,11 @@ static enum i40iw_status_code i40iw_add_mqh_4( if ((((rdma_vlan_dev_vlan_id(dev) < I40IW_NO_VLAN) && (rdma_vlan_dev_real_dev(dev) == iwdev->netdev)) || (dev == iwdev->netdev)) && (dev->flags & IFF_UP)) { + const struct in_ifaddr *ifa; + idev = in_dev_get(dev); - for_ifa(idev) { + + in_dev_for_each_ifa_rtnl(ifa, idev) { i40iw_debug(&iwdev->sc_dev, I40IW_DEBUG_CM, "Allocating child CM Listener forIP=%pI4, vlan_id=%d, MAC=%pM\n", @@ -1819,7 +1822,7 @@ static enum i40iw_status_code i40iw_add_mqh_4( cm_parent_listen_node->cm_core->stats_listen_nodes_created--; } } - endfor_ifa(idev); + in_dev_put(idev); } } diff --git a/drivers/infiniband/hw/i40iw/i40iw_main.c b/drivers/infiniband/hw/i40iw/i40iw_main.c index 10932baee279..d44cf33df81a 100644 --- a/drivers/infiniband/hw/i40iw/i40iw_main.c +++ b/drivers/infiniband/hw/i40iw/i40iw_main.c @@ -1222,8 +1222,10 @@ static void i40iw_add_ipv4_addr(struct i40iw_device *iwdev) if ((((rdma_vlan_dev_vlan_id(dev) < 0xFFFF) && (rdma_vlan_dev_real_dev(dev) == iwdev->netdev)) || (dev == iwdev->netdev)) && (dev->flags & IFF_UP)) { + const struct in_ifaddr *ifa; + idev = in_dev_get(dev); - for_ifa(idev) { + in_dev_for_each_ifa_rtnl(ifa, idev) { i40iw_debug(&iwdev->sc_dev, I40IW_DEBUG_CM, "IP=%pI4, vlan_id=%d, MAC=%pM\n", &ifa->ifa_address, rdma_vlan_dev_vlan_id(dev), dev->dev_addr); @@ -1235,7 +1237,7 @@ static void i40iw_add_ipv4_addr(struct i40iw_device *iwdev) true, I40IW_ARP_ADD); } - endfor_ifa(idev); + in_dev_put(idev); } } diff --git a/drivers/infiniband/hw/i40iw/i40iw_utils.c b/drivers/infiniband/hw/i40iw/i40iw_utils.c index 337410f40860..016524683e17 100644 --- a/drivers/infiniband/hw/i40iw/i40iw_utils.c +++ b/drivers/infiniband/hw/i40iw/i40iw_utils.c @@ -174,10 +174,14 @@ int i40iw_inetaddr_event(struct notifier_block *notifier, rcu_read_lock(); in = __in_dev_get_rcu(upper_dev); - if (!in->ifa_list) - local_ipaddr = 0; - else - local_ipaddr = ntohl(in->ifa_list->ifa_address); + local_ipaddr = 0; + if (in) { + struct in_ifaddr *ifa; + + ifa = rcu_dereference(in->ifa_list); + if (ifa) + local_ipaddr = ntohl(ifa->ifa_address); + } rcu_read_unlock(); } else { diff --git a/drivers/infiniband/hw/mlx5/cq.c b/drivers/infiniband/hw/mlx5/cq.c index 2e2e65f00257..4efbbd2fce0c 100644 --- a/drivers/infiniband/hw/mlx5/cq.c +++ b/drivers/infiniband/hw/mlx5/cq.c @@ -37,7 +37,7 @@ #include "mlx5_ib.h" #include "srq.h" -static void mlx5_ib_cq_comp(struct mlx5_core_cq *cq) +static void mlx5_ib_cq_comp(struct mlx5_core_cq *cq, struct mlx5_eqe *eqe) { struct ib_cq *ibcq = &to_mibcq(cq)->ibcq; @@ -522,9 +522,9 @@ repoll: case MLX5_CQE_SIG_ERR: sig_err_cqe = (struct mlx5_sig_err_cqe *)cqe64; - read_lock(&dev->mdev->priv.mkey_table.lock); - mmkey = __mlx5_mr_lookup(dev->mdev, - mlx5_base_mkey(be32_to_cpu(sig_err_cqe->mkey))); + xa_lock(&dev->mdev->priv.mkey_table); + mmkey = xa_load(&dev->mdev->priv.mkey_table, + mlx5_base_mkey(be32_to_cpu(sig_err_cqe->mkey))); mr = to_mibmr(mmkey); get_sig_err_item(sig_err_cqe, &mr->sig->err_item); mr->sig->sig_err_exists = true; @@ -537,7 +537,7 @@ repoll: mr->sig->err_item.expected, mr->sig->err_item.actual); - read_unlock(&dev->mdev->priv.mkey_table.lock); + xa_unlock(&dev->mdev->priv.mkey_table); goto repoll; } @@ -891,6 +891,7 @@ struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev, int entries = attr->cqe; int vector = attr->comp_vector; struct mlx5_ib_dev *dev = to_mdev(ibdev); + u32 out[MLX5_ST_SZ_DW(create_cq_out)]; struct mlx5_ib_cq *cq; int uninitialized_var(index); int uninitialized_var(inlen); @@ -958,7 +959,7 @@ struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev, if (cq->create_flags & IB_UVERBS_CQ_FLAGS_IGNORE_OVERRUN) MLX5_SET(cqc, cqc, oi, 1); - err = mlx5_core_create_cq(dev->mdev, &cq->mcq, cqb, inlen); + err = mlx5_core_create_cq(dev->mdev, &cq->mcq, cqb, inlen, out, sizeof(out)); if (err) goto err_cqb; diff --git a/drivers/infiniband/hw/mlx5/devx.c b/drivers/infiniband/hw/mlx5/devx.c index 80b42d069328..931f587dfb8f 100644 --- a/drivers/infiniband/hw/mlx5/devx.c +++ b/drivers/infiniband/hw/mlx5/devx.c @@ -1043,13 +1043,10 @@ static int devx_handle_mkey_indirect(struct devx_obj *obj, struct mlx5_ib_dev *dev, void *in, void *out) { - struct mlx5_mkey_table *table = &dev->mdev->priv.mkey_table; struct mlx5_ib_devx_mr *devx_mr = &obj->devx_mr; - unsigned long flags; struct mlx5_core_mkey *mkey; void *mkc; u8 key; - int err; mkey = &devx_mr->mmkey; mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry); @@ -1062,11 +1059,8 @@ static int devx_handle_mkey_indirect(struct devx_obj *obj, mkey->pd = MLX5_GET(mkc, mkc, pd); devx_mr->ndescs = MLX5_GET(mkc, mkc, translations_octword_size); - write_lock_irqsave(&table->lock, flags); - err = radix_tree_insert(&table->tree, mlx5_base_mkey(mkey->key), - mkey); - write_unlock_irqrestore(&table->lock, flags); - return err; + return xa_err(xa_store(&dev->mdev->priv.mkey_table, + mlx5_base_mkey(mkey->key), mkey, GFP_KERNEL)); } static int devx_handle_mkey_create(struct mlx5_ib_dev *dev, @@ -1117,12 +1111,8 @@ static void devx_free_indirect_mkey(struct rcu_head *rcu) */ static void devx_cleanup_mkey(struct devx_obj *obj) { - struct mlx5_mkey_table *table = &obj->mdev->priv.mkey_table; - unsigned long flags; - - write_lock_irqsave(&table->lock, flags); - radix_tree_delete(&table->tree, mlx5_base_mkey(obj->devx_mr.mmkey.key)); - write_unlock_irqrestore(&table->lock, flags); + xa_erase(&obj->mdev->priv.mkey_table, + mlx5_base_mkey(obj->devx_mr.mmkey.key)); } static int devx_obj_cleanup(struct ib_uobject *uobject, diff --git a/drivers/infiniband/hw/mlx5/flow.c b/drivers/infiniband/hw/mlx5/flow.c index 1fc302d41a53..b8841355fcd5 100644 --- a/drivers/infiniband/hw/mlx5/flow.c +++ b/drivers/infiniband/hw/mlx5/flow.c @@ -65,11 +65,12 @@ static const struct uverbs_attr_spec mlx5_ib_flow_type[] = { static int UVERBS_HANDLER(MLX5_IB_METHOD_CREATE_FLOW)( struct uverbs_attr_bundle *attrs) { - struct mlx5_flow_act flow_act = {.flow_tag = MLX5_FS_DEFAULT_FLOW_TAG}; + struct mlx5_flow_context flow_context = {.flow_tag = MLX5_FS_DEFAULT_FLOW_TAG}; struct mlx5_ib_flow_handler *flow_handler; struct mlx5_ib_flow_matcher *fs_matcher; struct ib_uobject **arr_flow_actions; struct ib_uflow_resources *uflow_res; + struct mlx5_flow_act flow_act = {}; void *devx_obj; int dest_id, dest_type; void *cmd_in; @@ -172,17 +173,19 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_CREATE_FLOW)( arr_flow_actions[i]->object); } - ret = uverbs_copy_from(&flow_act.flow_tag, attrs, + ret = uverbs_copy_from(&flow_context.flow_tag, attrs, MLX5_IB_ATTR_CREATE_FLOW_TAG); if (!ret) { - if (flow_act.flow_tag >= BIT(24)) { + if (flow_context.flow_tag >= BIT(24)) { ret = -EINVAL; goto err_out; } - flow_act.flags |= FLOW_ACT_HAS_TAG; + flow_context.flags |= FLOW_CONTEXT_HAS_TAG; } - flow_handler = mlx5_ib_raw_fs_rule_add(dev, fs_matcher, &flow_act, + flow_handler = mlx5_ib_raw_fs_rule_add(dev, fs_matcher, + &flow_context, + &flow_act, counter_id, cmd_in, inlen, dest_id, dest_type); diff --git a/drivers/infiniband/hw/mlx5/ib_rep.c b/drivers/infiniband/hw/mlx5/ib_rep.c index 269b24a3baa1..74ce9249e75a 100644 --- a/drivers/infiniband/hw/mlx5/ib_rep.c +++ b/drivers/infiniband/hw/mlx5/ib_rep.c @@ -14,9 +14,10 @@ mlx5_ib_set_vport_rep(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep) int vport_index; ibdev = mlx5_ib_get_uplink_ibdev(dev->priv.eswitch); - vport_index = ibdev->free_port++; + vport_index = rep->vport_index; ibdev->port[vport_index].rep = rep; + rep->rep_data[REP_IB].priv = ibdev; write_lock(&ibdev->port[vport_index].roce.netdev_lock); ibdev->port[vport_index].roce.netdev = mlx5_ib_get_rep_netdev(dev->priv.eswitch, rep->vport); @@ -28,7 +29,7 @@ mlx5_ib_set_vport_rep(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep) static int mlx5_ib_vport_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep) { - int num_ports = MLX5_TOTAL_VPORTS(dev); + int num_ports = mlx5_eswitch_get_total_vports(dev); const struct mlx5_ib_profile *profile; struct mlx5_ib_dev *ibdev; int vport_index; @@ -50,7 +51,7 @@ mlx5_ib_vport_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep) } ibdev->is_rep = true; - vport_index = ibdev->free_port++; + vport_index = rep->vport_index; ibdev->port[vport_index].rep = rep; ibdev->port[vport_index].roce.netdev = mlx5_ib_get_rep_netdev(dev->priv.eswitch, rep->vport); @@ -60,7 +61,7 @@ mlx5_ib_vport_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep) if (!__mlx5_ib_add(ibdev, profile)) return -EINVAL; - rep->rep_if[REP_IB].priv = ibdev; + rep->rep_data[REP_IB].priv = ibdev; return 0; } @@ -68,15 +69,18 @@ mlx5_ib_vport_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep) static void mlx5_ib_vport_rep_unload(struct mlx5_eswitch_rep *rep) { - struct mlx5_ib_dev *dev; + struct mlx5_ib_dev *dev = mlx5_ib_rep_to_dev(rep); + struct mlx5_ib_port *port; - if (!rep->rep_if[REP_IB].priv || - rep->vport != MLX5_VPORT_UPLINK) - return; + port = &dev->port[rep->vport_index]; + write_lock(&port->roce.netdev_lock); + port->roce.netdev = NULL; + write_unlock(&port->roce.netdev_lock); + rep->rep_data[REP_IB].priv = NULL; + port->rep = NULL; - dev = mlx5_ib_rep_to_dev(rep); - __mlx5_ib_remove(dev, dev->profile, MLX5_IB_STAGE_MAX); - rep->rep_if[REP_IB].priv = NULL; + if (rep->vport == MLX5_VPORT_UPLINK) + __mlx5_ib_remove(dev, dev->profile, MLX5_IB_STAGE_MAX); } static void *mlx5_ib_vport_get_proto_dev(struct mlx5_eswitch_rep *rep) @@ -84,16 +88,17 @@ static void *mlx5_ib_vport_get_proto_dev(struct mlx5_eswitch_rep *rep) return mlx5_ib_rep_to_dev(rep); } +static const struct mlx5_eswitch_rep_ops rep_ops = { + .load = mlx5_ib_vport_rep_load, + .unload = mlx5_ib_vport_rep_unload, + .get_proto_dev = mlx5_ib_vport_get_proto_dev, +}; + void mlx5_ib_register_vport_reps(struct mlx5_core_dev *mdev) { struct mlx5_eswitch *esw = mdev->priv.eswitch; - struct mlx5_eswitch_rep_if rep_if = {}; - - rep_if.load = mlx5_ib_vport_rep_load; - rep_if.unload = mlx5_ib_vport_rep_unload; - rep_if.get_proto_dev = mlx5_ib_vport_get_proto_dev; - mlx5_eswitch_register_vport_reps(esw, &rep_if, REP_IB); + mlx5_eswitch_register_vport_reps(esw, &rep_ops, REP_IB); } void mlx5_ib_unregister_vport_reps(struct mlx5_core_dev *mdev) diff --git a/drivers/infiniband/hw/mlx5/ib_rep.h b/drivers/infiniband/hw/mlx5/ib_rep.h index 8336e0517a5c..de43b423bafc 100644 --- a/drivers/infiniband/hw/mlx5/ib_rep.h +++ b/drivers/infiniband/hw/mlx5/ib_rep.h @@ -28,7 +28,7 @@ struct net_device *mlx5_ib_get_rep_netdev(struct mlx5_eswitch *esw, #else /* CONFIG_MLX5_ESWITCH */ static inline u8 mlx5_ib_eswitch_mode(struct mlx5_eswitch *esw) { - return SRIOV_NONE; + return MLX5_ESWITCH_NONE; } static inline @@ -72,6 +72,6 @@ struct net_device *mlx5_ib_get_rep_netdev(struct mlx5_eswitch *esw, static inline struct mlx5_ib_dev *mlx5_ib_rep_to_dev(struct mlx5_eswitch_rep *rep) { - return (struct mlx5_ib_dev *)rep->rep_if[REP_IB].priv; + return rep->rep_data[REP_IB].priv; } #endif /* __MLX5_IB_REP_H__ */ diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c index 340290b883fe..ba312bf59c7a 100644 --- a/drivers/infiniband/hw/mlx5/main.c +++ b/drivers/infiniband/hw/mlx5/main.c @@ -2666,11 +2666,15 @@ int parse_flow_flow_action(struct mlx5_ib_flow_action *maction, } } -static int parse_flow_attr(struct mlx5_core_dev *mdev, u32 *match_c, - u32 *match_v, const union ib_flow_spec *ib_spec, +static int parse_flow_attr(struct mlx5_core_dev *mdev, + struct mlx5_flow_spec *spec, + const union ib_flow_spec *ib_spec, const struct ib_flow_attr *flow_attr, struct mlx5_flow_act *action, u32 prev_type) { + struct mlx5_flow_context *flow_context = &spec->flow_context; + u32 *match_c = spec->match_criteria; + u32 *match_v = spec->match_value; void *misc_params_c = MLX5_ADDR_OF(fte_match_param, match_c, misc_parameters); void *misc_params_v = MLX5_ADDR_OF(fte_match_param, match_v, @@ -2989,8 +2993,8 @@ static int parse_flow_attr(struct mlx5_core_dev *mdev, u32 *match_c, if (ib_spec->flow_tag.tag_id >= BIT(24)) return -EINVAL; - action->flow_tag = ib_spec->flow_tag.tag_id; - action->flags |= FLOW_ACT_HAS_TAG; + flow_context->flow_tag = ib_spec->flow_tag.tag_id; + flow_context->flags |= FLOW_CONTEXT_HAS_TAG; break; case IB_FLOW_SPEC_ACTION_DROP: if (FIELDS_NOT_SUPPORTED(ib_spec->drop, @@ -3084,7 +3088,8 @@ is_valid_esp_aes_gcm(struct mlx5_core_dev *mdev, return VALID_SPEC_NA; return is_crypto && is_ipsec && - (!egress || (!is_drop && !(flow_act->flags & FLOW_ACT_HAS_TAG))) ? + (!egress || (!is_drop && + !(spec->flow_context.flags & FLOW_CONTEXT_HAS_TAG))) ? VALID_SPEC_VALID : VALID_SPEC_INVALID; } @@ -3464,6 +3469,37 @@ free: return ret; } +static void mlx5_ib_set_rule_source_port(struct mlx5_ib_dev *dev, + struct mlx5_flow_spec *spec, + struct mlx5_eswitch_rep *rep) +{ + struct mlx5_eswitch *esw = dev->mdev->priv.eswitch; + void *misc; + + if (mlx5_eswitch_vport_match_metadata_enabled(esw)) { + misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, + misc_parameters_2); + + MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0, + mlx5_eswitch_get_vport_metadata_for_match(esw, + rep->vport)); + misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, + misc_parameters_2); + + MLX5_SET_TO_ONES(fte_match_set_misc2, misc, metadata_reg_c_0); + } else { + misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, + misc_parameters); + + MLX5_SET(fte_match_set_misc, misc, source_port, rep->vport); + + misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, + misc_parameters); + + MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port); + } +} + static struct mlx5_ib_flow_handler *_create_flow_rule(struct mlx5_ib_dev *dev, struct mlx5_ib_flow_prio *ft_prio, const struct ib_flow_attr *flow_attr, @@ -3473,7 +3509,7 @@ static struct mlx5_ib_flow_handler *_create_flow_rule(struct mlx5_ib_dev *dev, { struct mlx5_flow_table *ft = ft_prio->flow_table; struct mlx5_ib_flow_handler *handler; - struct mlx5_flow_act flow_act = {.flow_tag = MLX5_FS_DEFAULT_FLOW_TAG}; + struct mlx5_flow_act flow_act = {}; struct mlx5_flow_spec *spec; struct mlx5_flow_destination dest_arr[2] = {}; struct mlx5_flow_destination *rule_dst = dest_arr; @@ -3504,8 +3540,7 @@ static struct mlx5_ib_flow_handler *_create_flow_rule(struct mlx5_ib_dev *dev, } for (spec_index = 0; spec_index < flow_attr->num_of_specs; spec_index++) { - err = parse_flow_attr(dev->mdev, spec->match_criteria, - spec->match_value, + err = parse_flow_attr(dev->mdev, spec, ib_flow, flow_attr, &flow_act, prev_type); if (err < 0) @@ -3519,19 +3554,15 @@ static struct mlx5_ib_flow_handler *_create_flow_rule(struct mlx5_ib_dev *dev, set_underlay_qp(dev, spec, underlay_qpn); if (dev->is_rep) { - void *misc; + struct mlx5_eswitch_rep *rep; - if (!dev->port[flow_attr->port - 1].rep) { + rep = dev->port[flow_attr->port - 1].rep; + if (!rep) { err = -EINVAL; goto free; } - misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, - misc_parameters); - MLX5_SET(fte_match_set_misc, misc, source_port, - dev->port[flow_attr->port - 1].rep->vport); - misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, - misc_parameters); - MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port); + + mlx5_ib_set_rule_source_port(dev, spec, rep); } spec->match_criteria_enable = get_match_criteria_enable(spec->match_criteria); @@ -3572,11 +3603,11 @@ static struct mlx5_ib_flow_handler *_create_flow_rule(struct mlx5_ib_dev *dev, MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO; } - if ((flow_act.flags & FLOW_ACT_HAS_TAG) && + if ((spec->flow_context.flags & FLOW_CONTEXT_HAS_TAG) && (flow_attr->type == IB_FLOW_ATTR_ALL_DEFAULT || flow_attr->type == IB_FLOW_ATTR_MC_DEFAULT)) { mlx5_ib_warn(dev, "Flow tag %u and attribute type %x isn't allowed in leftovers\n", - flow_act.flow_tag, flow_attr->type); + spec->flow_context.flow_tag, flow_attr->type); err = -EINVAL; goto free; } @@ -3947,6 +3978,7 @@ _create_raw_flow_rule(struct mlx5_ib_dev *dev, struct mlx5_ib_flow_prio *ft_prio, struct mlx5_flow_destination *dst, struct mlx5_ib_flow_matcher *fs_matcher, + struct mlx5_flow_context *flow_context, struct mlx5_flow_act *flow_act, void *cmd_in, int inlen, int dst_num) @@ -3969,6 +4001,7 @@ _create_raw_flow_rule(struct mlx5_ib_dev *dev, memcpy(spec->match_criteria, fs_matcher->matcher_mask.match_params, fs_matcher->mask_len); spec->match_criteria_enable = fs_matcher->match_criteria_enable; + spec->flow_context = *flow_context; handler->rule = mlx5_add_flow_rules(ft, spec, flow_act, dst, dst_num); @@ -4033,6 +4066,7 @@ static bool raw_fs_is_multicast(struct mlx5_ib_flow_matcher *fs_matcher, struct mlx5_ib_flow_handler * mlx5_ib_raw_fs_rule_add(struct mlx5_ib_dev *dev, struct mlx5_ib_flow_matcher *fs_matcher, + struct mlx5_flow_context *flow_context, struct mlx5_flow_act *flow_act, u32 counter_id, void *cmd_in, int inlen, int dest_id, @@ -4085,7 +4119,8 @@ mlx5_ib_raw_fs_rule_add(struct mlx5_ib_dev *dev, dst_num++; } - handler = _create_raw_flow_rule(dev, ft_prio, dst, fs_matcher, flow_act, + handler = _create_raw_flow_rule(dev, ft_prio, dst, fs_matcher, + flow_context, flow_act, cmd_in, inlen, dst_num); if (IS_ERR(handler)) { @@ -4457,7 +4492,7 @@ static void mlx5_ib_handle_internal_error(struct mlx5_ib_dev *ibdev) * lock/unlock above locks Now need to arm all involved CQs. */ list_for_each_entry(mcq, &cq_armed_list, reset_notify) { - mcq->comp(mcq); + mcq->comp(mcq, NULL); } spin_unlock_irqrestore(&ibdev->reset_flow_resource_lock, flags); } @@ -6779,7 +6814,7 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev) printk_once(KERN_INFO "%s", mlx5_version); if (MLX5_ESWITCH_MANAGER(mdev) && - mlx5_ib_eswitch_mode(mdev->priv.eswitch) == SRIOV_OFFLOADS) { + mlx5_ib_eswitch_mode(mdev->priv.eswitch) == MLX5_ESWITCH_OFFLOADS) { if (!mlx5_core_mp_enabled(mdev)) mlx5_ib_register_vport_reps(mdev); return mdev; diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h index 40eb8be482e4..ee73dc122d28 100644 --- a/drivers/infiniband/hw/mlx5/mlx5_ib.h +++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h @@ -920,6 +920,7 @@ struct mlx5_ib_lb_state { }; struct mlx5_ib_pf_eq { + struct notifier_block irq_nb; struct mlx5_ib_dev *dev; struct mlx5_eq *core; struct work_struct work; @@ -977,7 +978,6 @@ struct mlx5_ib_dev { u16 devx_whitelist_uid; struct mlx5_srq_table srq_table; struct mlx5_async_ctx async_ctx; - int free_port; }; static inline struct mlx5_ib_cq *to_mibcq(struct mlx5_core_cq *mcq) @@ -1316,6 +1316,7 @@ extern const struct uapi_definition mlx5_ib_devx_defs[]; extern const struct uapi_definition mlx5_ib_flow_defs[]; struct mlx5_ib_flow_handler *mlx5_ib_raw_fs_rule_add( struct mlx5_ib_dev *dev, struct mlx5_ib_flow_matcher *fs_matcher, + struct mlx5_flow_context *flow_context, struct mlx5_flow_act *flow_act, u32 counter_id, void *cmd_in, int inlen, int dest_id, int dest_type); bool mlx5_ib_devx_is_flow_dest(void *obj, int *dest_id, int *dest_type); diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c index 5f09699fab98..83b452d977d4 100644 --- a/drivers/infiniband/hw/mlx5/mr.c +++ b/drivers/infiniband/hw/mlx5/mr.c @@ -130,7 +130,7 @@ static void reg_mr_callback(int status, struct mlx5_async_work *context) struct mlx5_cache_ent *ent = &cache->ent[c]; u8 key; unsigned long flags; - struct mlx5_mkey_table *table = &dev->mdev->priv.mkey_table; + struct xarray *mkeys = &dev->mdev->priv.mkey_table; int err; spin_lock_irqsave(&ent->lock, flags); @@ -158,12 +158,12 @@ static void reg_mr_callback(int status, struct mlx5_async_work *context) ent->size++; spin_unlock_irqrestore(&ent->lock, flags); - write_lock_irqsave(&table->lock, flags); - err = radix_tree_insert(&table->tree, mlx5_base_mkey(mr->mmkey.key), - &mr->mmkey); + xa_lock_irqsave(mkeys, flags); + err = xa_err(__xa_store(mkeys, mlx5_base_mkey(mr->mmkey.key), + &mr->mmkey, GFP_ATOMIC)); + xa_unlock_irqrestore(mkeys, flags); if (err) pr_err("Error inserting to mkey tree. 0x%x\n", -err); - write_unlock_irqrestore(&table->lock, flags); if (!completion_done(&ent->compl)) complete(&ent->compl); diff --git a/drivers/infiniband/hw/mlx5/odp.c b/drivers/infiniband/hw/mlx5/odp.c index 91507a2e9290..831c450b271a 100644 --- a/drivers/infiniband/hw/mlx5/odp.c +++ b/drivers/infiniband/hw/mlx5/odp.c @@ -768,7 +768,7 @@ static int pagefault_single_data_segment(struct mlx5_ib_dev *dev, bcnt -= *bytes_committed; next_mr: - mmkey = __mlx5_mr_lookup(dev->mdev, mlx5_base_mkey(key)); + mmkey = xa_load(&dev->mdev->priv.mkey_table, mlx5_base_mkey(key)); if (!mkey_is_eq(mmkey, key)) { mlx5_ib_dbg(dev, "failed to find mkey %x\n", key); ret = -EFAULT; @@ -1488,9 +1488,11 @@ static void mlx5_ib_eq_pf_process(struct mlx5_ib_pf_eq *eq) mlx5_eq_update_ci(eq->core, cc, 1); } -static irqreturn_t mlx5_ib_eq_pf_int(int irq, void *eq_ptr) +static int mlx5_ib_eq_pf_int(struct notifier_block *nb, unsigned long type, + void *data) { - struct mlx5_ib_pf_eq *eq = eq_ptr; + struct mlx5_ib_pf_eq *eq = + container_of(nb, struct mlx5_ib_pf_eq, irq_nb); unsigned long flags; if (spin_trylock_irqsave(&eq->lock, flags)) { @@ -1553,20 +1555,26 @@ mlx5_ib_create_pf_eq(struct mlx5_ib_dev *dev, struct mlx5_ib_pf_eq *eq) goto err_mempool; } + eq->irq_nb.notifier_call = mlx5_ib_eq_pf_int; param = (struct mlx5_eq_param) { - .index = MLX5_EQ_PFAULT_IDX, - .mask = 1 << MLX5_EVENT_TYPE_PAGE_FAULT, + .irq_index = 0, .nent = MLX5_IB_NUM_PF_EQE, - .context = eq, - .handler = mlx5_ib_eq_pf_int }; - eq->core = mlx5_eq_create_generic(dev->mdev, "mlx5_ib_page_fault_eq", ¶m); + param.mask[0] = 1ull << MLX5_EVENT_TYPE_PAGE_FAULT; + eq->core = mlx5_eq_create_generic(dev->mdev, ¶m); if (IS_ERR(eq->core)) { err = PTR_ERR(eq->core); goto err_wq; } + err = mlx5_eq_enable(dev->mdev, eq->core, &eq->irq_nb); + if (err) { + mlx5_ib_err(dev, "failed to enable odp EQ %d\n", err); + goto err_eq; + } return 0; +err_eq: + mlx5_eq_destroy_generic(dev->mdev, eq->core); err_wq: destroy_workqueue(eq->wq); err_mempool: @@ -1579,6 +1587,7 @@ mlx5_ib_destroy_pf_eq(struct mlx5_ib_dev *dev, struct mlx5_ib_pf_eq *eq) { int err; + mlx5_eq_disable(dev->mdev, eq->core, &eq->irq_nb); err = mlx5_eq_destroy_generic(dev->mdev, eq->core); cancel_work_sync(&eq->work); destroy_workqueue(eq->wq); @@ -1677,8 +1686,8 @@ static void num_pending_prefetch_dec(struct mlx5_ib_dev *dev, struct mlx5_core_mkey *mmkey; struct mlx5_ib_mr *mr; - mmkey = __mlx5_mr_lookup(dev->mdev, - mlx5_base_mkey(sg_list[i].lkey)); + mmkey = xa_load(&dev->mdev->priv.mkey_table, + mlx5_base_mkey(sg_list[i].lkey)); mr = container_of(mmkey, struct mlx5_ib_mr, mmkey); atomic_dec(&mr->num_pending_prefetch); } @@ -1697,8 +1706,8 @@ static bool num_pending_prefetch_inc(struct ib_pd *pd, struct mlx5_core_mkey *mmkey; struct mlx5_ib_mr *mr; - mmkey = __mlx5_mr_lookup(dev->mdev, - mlx5_base_mkey(sg_list[i].lkey)); + mmkey = xa_load(&dev->mdev->priv.mkey_table, + mlx5_base_mkey(sg_list[i].lkey)); if (!mmkey || mmkey->key != sg_list[i].lkey) { ret = false; break; diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c index f6623c77443a..768c7e81f688 100644 --- a/drivers/infiniband/hw/mlx5/qp.c +++ b/drivers/infiniband/hw/mlx5/qp.c @@ -6297,7 +6297,7 @@ static void handle_drain_completion(struct ib_cq *cq, /* Run the CQ handler - this makes sure that the drain WR will * be processed if wasn't processed yet. */ - mcq->mcq.comp(&mcq->mcq); + mcq->mcq.comp(&mcq->mcq, NULL); } wait_for_completion(&sdrain->done); diff --git a/drivers/infiniband/hw/nes/nes.c b/drivers/infiniband/hw/nes/nes.c index e00add6d78ec..29b324726ea6 100644 --- a/drivers/infiniband/hw/nes/nes.c +++ b/drivers/infiniband/hw/nes/nes.c @@ -183,7 +183,13 @@ static int nes_inetaddr_event(struct notifier_block *notifier, rcu_read_lock(); in = __in_dev_get_rcu(upper_dev); - nesvnic->local_ipaddr = in->ifa_list->ifa_address; + if (in) { + struct in_ifaddr *ifa; + + ifa = rcu_dereference(in->ifa_list); + if (ifa) + nesvnic->local_ipaddr = ifa->ifa_address; + } rcu_read_unlock(); } else { nesvnic->local_ipaddr = ifa->ifa_address; diff --git a/drivers/infiniband/hw/qedr/main.c b/drivers/infiniband/hw/qedr/main.c index 083c2c00a8e9..5ebf3c53b3fb 100644 --- a/drivers/infiniband/hw/qedr/main.c +++ b/drivers/infiniband/hw/qedr/main.c @@ -312,7 +312,8 @@ static void qedr_free_mem_sb(struct qedr_dev *dev, struct qed_sb_info *sb_info, int sb_id) { if (sb_info->sb_virt) { - dev->ops->common->sb_release(dev->cdev, sb_info, sb_id); + dev->ops->common->sb_release(dev->cdev, sb_info, sb_id, + QED_SB_TYPE_CNQ); dma_free_coherent(&dev->pdev->dev, sizeof(*sb_info->sb_virt), (void *)sb_info->sb_virt, sb_info->sb_phys); } @@ -504,11 +505,13 @@ static irqreturn_t qedr_irq_handler(int irq, void *handle) static void qedr_sync_free_irqs(struct qedr_dev *dev) { u32 vector; + u16 idx; int i; for (i = 0; i < dev->int_info.used_cnt; i++) { if (dev->int_info.msix_cnt) { - vector = dev->int_info.msix[i * dev->num_hwfns].vector; + idx = i * dev->num_hwfns + dev->affin_hwfn_idx; + vector = dev->int_info.msix[idx].vector; synchronize_irq(vector); free_irq(vector, &dev->cnq_array[i]); } @@ -520,6 +523,7 @@ static void qedr_sync_free_irqs(struct qedr_dev *dev) static int qedr_req_msix_irqs(struct qedr_dev *dev) { int i, rc = 0; + u16 idx; if (dev->num_cnq > dev->int_info.msix_cnt) { DP_ERR(dev, @@ -529,7 +533,8 @@ static int qedr_req_msix_irqs(struct qedr_dev *dev) } for (i = 0; i < dev->num_cnq; i++) { - rc = request_irq(dev->int_info.msix[i * dev->num_hwfns].vector, + idx = i * dev->num_hwfns + dev->affin_hwfn_idx; + rc = request_irq(dev->int_info.msix[idx].vector, qedr_irq_handler, 0, dev->cnq_array[i].name, &dev->cnq_array[i]); if (rc) { @@ -866,6 +871,16 @@ static struct qedr_dev *qedr_add(struct qed_dev *cdev, struct pci_dev *pdev, dev->user_dpm_enabled = dev_info.user_dpm_enabled; dev->rdma_type = dev_info.rdma_type; dev->num_hwfns = dev_info.common.num_hwfns; + + if (IS_IWARP(dev) && QEDR_IS_CMT(dev)) { + rc = dev->ops->iwarp_set_engine_affin(cdev, false); + if (rc) { + DP_ERR(dev, "iWARP is disabled over a 100g device Enabling it may impact L2 performance. To enable it run devlink dev param set <dev> name iwarp_cmt value true cmode runtime\n"); + goto init_err; + } + } + dev->affin_hwfn_idx = dev->ops->common->get_affin_hwfn_idx(cdev); + dev->rdma_ctx = dev->ops->rdma_get_rdma_ctx(cdev); dev->num_cnq = dev->ops->rdma_get_min_cnq_msix(cdev); @@ -926,6 +941,10 @@ static void qedr_remove(struct qedr_dev *dev) qedr_stop_hw(dev); qedr_sync_free_irqs(dev); qedr_free_resources(dev); + + if (IS_IWARP(dev) && QEDR_IS_CMT(dev)) + dev->ops->iwarp_set_engine_affin(dev->cdev, true); + ib_dealloc_device(&dev->ibdev); } diff --git a/drivers/infiniband/hw/qedr/qedr.h b/drivers/infiniband/hw/qedr/qedr.h index 6175d1e98717..a92ca22e5de1 100644 --- a/drivers/infiniband/hw/qedr/qedr.h +++ b/drivers/infiniband/hw/qedr/qedr.h @@ -157,6 +157,8 @@ struct qedr_dev { u32 dp_module; u8 dp_level; u8 num_hwfns; +#define QEDR_IS_CMT(dev) ((dev)->num_hwfns > 1) + u8 affin_hwfn_idx; u8 gsi_ll2_handle; uint wq_multiplier; diff --git a/drivers/infiniband/hw/usnic/usnic_ib_main.c b/drivers/infiniband/hw/usnic/usnic_ib_main.c index d88d9f8a7f9a..34c1f9d6c915 100644 --- a/drivers/infiniband/hw/usnic/usnic_ib_main.c +++ b/drivers/infiniband/hw/usnic/usnic_ib_main.c @@ -427,11 +427,16 @@ static void *usnic_ib_device_add(struct pci_dev *dev) if (netif_carrier_ok(us_ibdev->netdev)) usnic_fwd_carrier_up(us_ibdev->ufdev); - ind = in_dev_get(netdev); - if (ind->ifa_list) - usnic_fwd_add_ipaddr(us_ibdev->ufdev, - ind->ifa_list->ifa_address); - in_dev_put(ind); + rcu_read_lock(); + ind = __in_dev_get_rcu(netdev); + if (ind) { + const struct in_ifaddr *ifa; + + ifa = rcu_dereference(ind->ifa_list); + if (ifa) + usnic_fwd_add_ipaddr(us_ibdev->ufdev, ifa->ifa_address); + } + rcu_read_unlock(); usnic_mac_ip_to_gid(us_ibdev->netdev->perm_addr, us_ibdev->ufdev->inaddr, &gid.raw[0]); diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c index 9b5e11d3fb85..04ea7db08e87 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_main.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c @@ -1998,6 +1998,7 @@ static int ipoib_get_vf_config(struct net_device *dev, int vf, return err; ivf->vf = vf; + memcpy(ivf->mac, dev->dev_addr, dev->addr_len); return 0; } |