aboutsummaryrefslogtreecommitdiff
path: root/drivers/infiniband/hw
diff options
context:
space:
mode:
authorDasaratharaman Chandramouli2017-04-29 14:41:28 -0400
committerDoug Ledford2017-05-01 14:32:43 -0400
commitd8966fcd4c25708c3a76ea7619644218373df639 (patch)
treebd059c78ae118d3a16cada6228ea3b7a7e7f4fbd /drivers/infiniband/hw
parent2224c47ace2387610f8cff0c562db2c6e63df026 (diff)
IB/core: Use rdma_ah_attr accessor functions
Modify core and driver components to use accessor functions introduced to access individual fields of rdma_ah_attr Reviewed-by: Ira Weiny <ira.weiny@intel.com> Reviewed-by: Don Hiatt <don.hiatt@intel.com> Reviewed-by: Sean Hefty <sean.hefty@intel.com> Reviewed-by: Niranjana Vishwanathapura <niranjana.vishwanathapura@intel.com> Signed-off-by: Dasaratharaman Chandramouli <dasaratharaman.chandramouli@intel.com> Signed-off-by: Doug Ledford <dledford@redhat.com>
Diffstat (limited to 'drivers/infiniband/hw')
-rw-r--r--drivers/infiniband/hw/bnxt_re/ib_verbs.c77
-rw-r--r--drivers/infiniband/hw/hfi1/driver.c4
-rw-r--r--drivers/infiniband/hw/hfi1/mad.c4
-rw-r--r--drivers/infiniband/hw/hfi1/qp.c12
-rw-r--r--drivers/infiniband/hw/hfi1/rc.c24
-rw-r--r--drivers/infiniband/hw/hfi1/ruc.c82
-rw-r--r--drivers/infiniband/hw/hfi1/uc.c6
-rw-r--r--drivers/infiniband/hw/hfi1/ud.c51
-rw-r--r--drivers/infiniband/hw/hfi1/verbs.c20
-rw-r--r--drivers/infiniband/hw/hfi1/verbs.h2
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_ah.c57
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v1.c115
-rw-r--r--drivers/infiniband/hw/mlx4/ah.c115
-rw-r--r--drivers/infiniband/hw/mlx4/mad.c43
-rw-r--r--drivers/infiniband/hw/mlx4/mcg.c2
-rw-r--r--drivers/infiniband/hw/mlx4/qp.c92
-rw-r--r--drivers/infiniband/hw/mlx5/ah.c54
-rw-r--r--drivers/infiniband/hw/mlx5/qp.c86
-rw-r--r--drivers/infiniband/hw/mthca/mthca_av.c64
-rw-r--r--drivers/infiniband/hw/mthca/mthca_mad.c6
-rw-r--r--drivers/infiniband/hw/mthca/mthca_qp.c84
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma.h2
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_ah.c54
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_hw.c21
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_verbs.c38
-rw-r--r--drivers/infiniband/hw/qedr/qedr.h11
-rw-r--r--drivers/infiniband/hw/qedr/qedr_cm.c15
-rw-r--r--drivers/infiniband/hw/qedr/qedr_cm.h2
-rw-r--r--drivers/infiniband/hw/qedr/verbs.c44
-rw-r--r--drivers/infiniband/hw/qib/qib_mad.c5
-rw-r--r--drivers/infiniband/hw/qib/qib_qp.c2
-rw-r--r--drivers/infiniband/hw/qib/qib_rc.c21
-rw-r--r--drivers/infiniband/hw/qib/qib_ruc.c72
-rw-r--r--drivers/infiniband/hw/qib/qib_uc.c6
-rw-r--r--drivers/infiniband/hw/qib/qib_ud.c53
-rw-r--r--drivers/infiniband/hw/qib/qib_verbs.c12
-rw-r--r--drivers/infiniband/hw/qib/qib_verbs.h2
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma_misc.c32
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.c27
39 files changed, 777 insertions, 642 deletions
diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.c b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
index a0670f445246..d2a710b0d8a2 100644
--- a/drivers/infiniband/hw/bnxt_re/ib_verbs.c
+++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
@@ -530,13 +530,14 @@ struct ib_ah *bnxt_re_create_ah(struct ib_pd *ib_pd,
struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
struct bnxt_re_dev *rdev = pd->rdev;
struct bnxt_re_ah *ah;
+ const struct ib_global_route *grh = rdma_ah_read_grh(ah_attr);
int rc;
u16 vlan_tag;
u8 nw_type;
struct ib_gid_attr sgid_attr;
- if (!(ah_attr->ah_flags & IB_AH_GRH)) {
+ if (!(rdma_ah_get_ah_flags(ah_attr) & IB_AH_GRH)) {
dev_err(rdev_to_dev(rdev), "Failed to alloc AH: GRH not set");
return ERR_PTR(-EINVAL);
}
@@ -548,33 +549,33 @@ struct ib_ah *bnxt_re_create_ah(struct ib_pd *ib_pd,
ah->qplib_ah.pd = &pd->qplib_pd;
/* Supply the configuration for the HW */
- memcpy(ah->qplib_ah.dgid.data, ah_attr->grh.dgid.raw,
+ memcpy(ah->qplib_ah.dgid.data, grh->dgid.raw,
sizeof(union ib_gid));
/*
* If RoCE V2 is enabled, stack will have two entries for
* each GID entry. Avoiding this duplicte entry in HW. Dividing
* the GID index by 2 for RoCE V2
*/
- ah->qplib_ah.sgid_index = ah_attr->grh.sgid_index / 2;
- ah->qplib_ah.host_sgid_index = ah_attr->grh.sgid_index;
- ah->qplib_ah.traffic_class = ah_attr->grh.traffic_class;
- ah->qplib_ah.flow_label = ah_attr->grh.flow_label;
- ah->qplib_ah.hop_limit = ah_attr->grh.hop_limit;
- ah->qplib_ah.sl = ah_attr->sl;
+ ah->qplib_ah.sgid_index = grh->sgid_index / 2;
+ ah->qplib_ah.host_sgid_index = grh->sgid_index;
+ ah->qplib_ah.traffic_class = grh->traffic_class;
+ ah->qplib_ah.flow_label = grh->flow_label;
+ ah->qplib_ah.hop_limit = grh->hop_limit;
+ ah->qplib_ah.sl = rdma_ah_get_sl(ah_attr);
if (ib_pd->uobject &&
!rdma_is_multicast_addr((struct in6_addr *)
- ah_attr->grh.dgid.raw) &&
+ grh->dgid.raw) &&
!rdma_link_local_addr((struct in6_addr *)
- ah_attr->grh.dgid.raw)) {
+ grh->dgid.raw)) {
union ib_gid sgid;
rc = ib_get_cached_gid(&rdev->ibdev, 1,
- ah_attr->grh.sgid_index, &sgid,
+ grh->sgid_index, &sgid,
&sgid_attr);
if (rc) {
dev_err(rdev_to_dev(rdev),
"Failed to query gid at index %d",
- ah_attr->grh.sgid_index);
+ grh->sgid_index);
goto fail;
}
if (sgid_attr.ndev) {
@@ -595,7 +596,7 @@ struct ib_ah *bnxt_re_create_ah(struct ib_pd *ib_pd,
ah->qplib_ah.nw_type = CMDQ_CREATE_AH_TYPE_V1;
break;
}
- rc = rdma_addr_find_l2_eth_by_grh(&sgid, &ah_attr->grh.dgid,
+ rc = rdma_addr_find_l2_eth_by_grh(&sgid, &grh->dgid,
ah_attr->dmac, &vlan_tag,
&sgid_attr.ndev->ifindex,
NULL);
@@ -643,15 +644,14 @@ int bnxt_re_query_ah(struct ib_ah *ib_ah, struct rdma_ah_attr *ah_attr)
{
struct bnxt_re_ah *ah = container_of(ib_ah, struct bnxt_re_ah, ib_ah);
- memcpy(ah_attr->grh.dgid.raw, ah->qplib_ah.dgid.data,
- sizeof(union ib_gid));
- ah_attr->grh.sgid_index = ah->qplib_ah.host_sgid_index;
- ah_attr->grh.traffic_class = ah->qplib_ah.traffic_class;
- ah_attr->sl = ah->qplib_ah.sl;
+ rdma_ah_set_sl(ah_attr, ah->qplib_ah.sl);
memcpy(ah_attr->dmac, ah->qplib_ah.dmac, ETH_ALEN);
- ah_attr->ah_flags = IB_AH_GRH;
- ah_attr->port_num = 1;
- ah_attr->static_rate = 0;
+ rdma_ah_set_grh(ah_attr, NULL, 0,
+ ah->qplib_ah.host_sgid_index,
+ 0, ah->qplib_ah.traffic_class);
+ rdma_ah_set_dgid_raw(ah_attr, ah->qplib_ah.dgid.data);
+ rdma_ah_set_port_num(ah_attr, 1);
+ rdma_ah_set_static_rate(ah_attr, 0);
return 0;
}
@@ -1258,6 +1258,9 @@ int bnxt_re_modify_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr,
qp->qplib_qp.qkey = qp_attr->qkey;
}
if (qp_attr_mask & IB_QP_AV) {
+ const struct ib_global_route *grh =
+ rdma_ah_read_grh(&qp_attr->ah_attr);
+
qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_DGID |
CMDQ_MODIFY_QP_MODIFY_MASK_FLOW_LABEL |
CMDQ_MODIFY_QP_MODIFY_MASK_SGID_INDEX |
@@ -1265,25 +1268,22 @@ int bnxt_re_modify_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr,
CMDQ_MODIFY_QP_MODIFY_MASK_TRAFFIC_CLASS |
CMDQ_MODIFY_QP_MODIFY_MASK_DEST_MAC |
CMDQ_MODIFY_QP_MODIFY_MASK_VLAN_ID;
- memcpy(qp->qplib_qp.ah.dgid.data, qp_attr->ah_attr.grh.dgid.raw,
+ memcpy(qp->qplib_qp.ah.dgid.data, grh->dgid.raw,
sizeof(qp->qplib_qp.ah.dgid.data));
- qp->qplib_qp.ah.flow_label = qp_attr->ah_attr.grh.flow_label;
+ qp->qplib_qp.ah.flow_label = grh->flow_label;
/* If RoCE V2 is enabled, stack will have two entries for
* each GID entry. Avoiding this duplicte entry in HW. Dividing
* the GID index by 2 for RoCE V2
*/
- qp->qplib_qp.ah.sgid_index =
- qp_attr->ah_attr.grh.sgid_index / 2;
- qp->qplib_qp.ah.host_sgid_index =
- qp_attr->ah_attr.grh.sgid_index;
- qp->qplib_qp.ah.hop_limit = qp_attr->ah_attr.grh.hop_limit;
- qp->qplib_qp.ah.traffic_class =
- qp_attr->ah_attr.grh.traffic_class;
- qp->qplib_qp.ah.sl = qp_attr->ah_attr.sl;
+ qp->qplib_qp.ah.sgid_index = grh->sgid_index / 2;
+ qp->qplib_qp.ah.host_sgid_index = grh->sgid_index;
+ qp->qplib_qp.ah.hop_limit = grh->hop_limit;
+ qp->qplib_qp.ah.traffic_class = grh->traffic_class;
+ qp->qplib_qp.ah.sl = rdma_ah_get_sl(&qp_attr->ah_attr);
ether_addr_copy(qp->qplib_qp.ah.dmac, qp_attr->ah_attr.dmac);
status = ib_get_cached_gid(&rdev->ibdev, 1,
- qp_attr->ah_attr.grh.sgid_index,
+ grh->sgid_index,
&sgid, &sgid_attr);
if (!status && sgid_attr.ndev) {
memcpy(qp->qplib_qp.smac, sgid_attr.ndev->dev_addr,
@@ -1423,13 +1423,12 @@ int bnxt_re_query_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr,
qp_attr->qp_access_flags = __to_ib_access_flags(qplib_qp.access);
qp_attr->pkey_index = qplib_qp.pkey_index;
qp_attr->qkey = qplib_qp.qkey;
- memcpy(qp_attr->ah_attr.grh.dgid.raw, qplib_qp.ah.dgid.data,
- sizeof(qplib_qp.ah.dgid.data));
- qp_attr->ah_attr.grh.flow_label = qplib_qp.ah.flow_label;
- qp_attr->ah_attr.grh.sgid_index = qplib_qp.ah.host_sgid_index;
- qp_attr->ah_attr.grh.hop_limit = qplib_qp.ah.hop_limit;
- qp_attr->ah_attr.grh.traffic_class = qplib_qp.ah.traffic_class;
- qp_attr->ah_attr.sl = qplib_qp.ah.sl;
+ rdma_ah_set_grh(&qp_attr->ah_attr, NULL, qplib_qp.ah.flow_label,
+ qplib_qp.ah.host_sgid_index,
+ qplib_qp.ah.hop_limit,
+ qplib_qp.ah.traffic_class);
+ rdma_ah_set_dgid_raw(&qp_attr->ah_attr, qplib_qp.ah.dgid.data);
+ rdma_ah_set_sl(&qp_attr->ah_attr, qplib_qp.ah.sl);
ether_addr_copy(qp_attr->ah_attr.dmac, qplib_qp.ah.dmac);
qp_attr->path_mtu = __to_ib_mtu(qplib_qp.path_mtu);
qp_attr->timeout = qplib_qp.timeout;
diff --git a/drivers/infiniband/hw/hfi1/driver.c b/drivers/infiniband/hw/hfi1/driver.c
index 500b129ed565..527895487175 100644
--- a/drivers/infiniband/hw/hfi1/driver.c
+++ b/drivers/infiniband/hw/hfi1/driver.c
@@ -480,12 +480,12 @@ void hfi1_process_ecn_slowpath(struct rvt_qp *qp, struct hfi1_packet *pkt,
(dlid != be16_to_cpu(IB_LID_PERMISSIVE));
break;
case IB_QPT_UC:
- rlid = qp->remote_ah_attr.dlid;
+ rlid = rdma_ah_get_dlid(&qp->remote_ah_attr);
rqpn = qp->remote_qpn;
svc_type = IB_CC_SVCTYPE_UC;
break;
case IB_QPT_RC:
- rlid = qp->remote_ah_attr.dlid;
+ rlid = rdma_ah_get_dlid(&qp->remote_ah_attr);
rqpn = qp->remote_qpn;
svc_type = IB_CC_SVCTYPE_RC;
break;
diff --git a/drivers/infiniband/hw/hfi1/mad.c b/drivers/infiniband/hw/hfi1/mad.c
index 836d00b04547..5977673a52d4 100644
--- a/drivers/infiniband/hw/hfi1/mad.c
+++ b/drivers/infiniband/hw/hfi1/mad.c
@@ -1166,9 +1166,9 @@ static int __subn_set_opa_portinfo(struct opa_smp *smp, u32 am, u8 *data,
spin_lock_irqsave(&ibp->rvp.lock, flags);
if (ibp->rvp.sm_ah) {
if (smlid != ibp->rvp.sm_lid)
- ibp->rvp.sm_ah->attr.dlid = smlid;
+ rdma_ah_set_dlid(&ibp->rvp.sm_ah->attr, smlid);
if (msl != ibp->rvp.sm_sl)
- ibp->rvp.sm_ah->attr.sl = msl;
+ rdma_ah_set_sl(&ibp->rvp.sm_ah->attr, msl);
}
spin_unlock_irqrestore(&ibp->rvp.lock, flags);
if (smlid != ibp->rvp.sm_lid)
diff --git a/drivers/infiniband/hw/hfi1/qp.c b/drivers/infiniband/hw/hfi1/qp.c
index c4ebd097b20f..4573e4c9f35c 100644
--- a/drivers/infiniband/hw/hfi1/qp.c
+++ b/drivers/infiniband/hw/hfi1/qp.c
@@ -294,7 +294,7 @@ int hfi1_check_send_wqe(struct rvt_qp *qp,
ah = ibah_to_rvtah(wqe->ud_wr.ah);
if (wqe->length > (1 << ah->log_pmtu))
return -EINVAL;
- if (ibp->sl_to_sc[ah->attr.sl] == 0xf)
+ if (ibp->sl_to_sc[rdma_ah_get_sl(&ah->attr)] == 0xf)
return -EINVAL;
default:
break;
@@ -631,8 +631,8 @@ void qp_iter_print(struct seq_file *s, struct qp_iter *iter)
qp->s_tail, qp->s_head, qp->s_size,
qp->s_avail,
qp->remote_qpn,
- qp->remote_ah_attr.dlid,
- qp->remote_ah_attr.sl,
+ rdma_ah_get_dlid(&qp->remote_ah_attr),
+ rdma_ah_get_sl(&qp->remote_ah_attr),
qp->pmtu,
qp->s_retry,
qp->s_retry_cnt,
@@ -748,7 +748,7 @@ void hfi1_migrate_qp(struct rvt_qp *qp)
qp->s_mig_state = IB_MIG_MIGRATED;
qp->remote_ah_attr = qp->alt_ah_attr;
- qp->port_num = qp->alt_ah_attr.port_num;
+ qp->port_num = rdma_ah_get_port_num(&qp->alt_ah_attr);
qp->s_pkey_index = qp->s_alt_pkey_index;
qp->s_flags |= RVT_S_AHG_CLEAR;
priv->s_sc = ah_to_sc(qp->ibqp.device, &qp->remote_ah_attr);
@@ -778,7 +778,7 @@ u32 mtu_from_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp, u32 pmtu)
u8 sc, vl;
ibp = &dd->pport[qp->port_num - 1].ibport_data;
- sc = ibp->sl_to_sc[qp->remote_ah_attr.sl];
+ sc = ibp->sl_to_sc[rdma_ah_get_sl(&qp->remote_ah_attr)];
vl = sc_to_vlt(dd, sc);
mtu = verbs_mtu_enum_to_int(qp->ibqp.device, pmtu);
@@ -861,7 +861,7 @@ void hfi1_error_port_qps(struct hfi1_ibport *ibp, u8 sl)
if (qp->port_num == ppd->port &&
(qp->ibqp.qp_type == IB_QPT_UC ||
qp->ibqp.qp_type == IB_QPT_RC) &&
- qp->remote_ah_attr.sl == sl &&
+ rdma_ah_get_sl(&qp->remote_ah_attr) == sl &&
(ib_rvt_state_ops[qp->state] &
RVT_POST_SEND_OK)) {
spin_lock_irq(&qp->r_lock);
diff --git a/drivers/infiniband/hw/hfi1/rc.c b/drivers/infiniband/hw/hfi1/rc.c
index 9b3333fd9dc0..75a729cd0c3d 100644
--- a/drivers/infiniband/hw/hfi1/rc.c
+++ b/drivers/infiniband/hw/hfi1/rc.c
@@ -274,7 +274,7 @@ int hfi1_make_rc_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
goto bail_no_tx;
ohdr = &ps->s_txreq->phdr.hdr.u.oth;
- if (qp->remote_ah_attr.ah_flags & IB_AH_GRH)
+ if (rdma_ah_get_ah_flags(&qp->remote_ah_attr) & IB_AH_GRH)
ohdr = &ps->s_txreq->phdr.hdr.u.l.oth;
/* Sending responses has higher priority over sending requests. */
@@ -744,9 +744,10 @@ void hfi1_send_rc_ack(struct hfi1_ctxtdata *rcd, struct rvt_qp *qp,
/* Construct the header */
/* header size in 32-bit words LRH+BTH+AETH = (8+12+4)/4 */
hwords = 6;
- if (unlikely(qp->remote_ah_attr.ah_flags & IB_AH_GRH)) {
+ if (unlikely(rdma_ah_get_ah_flags(&qp->remote_ah_attr) & IB_AH_GRH)) {
hwords += hfi1_make_grh(ibp, &hdr.u.l.grh,
- &qp->remote_ah_attr.grh, hwords, 0);
+ rdma_ah_read_grh(&qp->remote_ah_attr),
+ hwords, 0);
ohdr = &hdr.u.l.oth;
lrh0 = HFI1_LRH_GRH;
} else {
@@ -763,14 +764,16 @@ void hfi1_send_rc_ack(struct hfi1_ctxtdata *rcd, struct rvt_qp *qp,
IB_AETH_CREDIT_SHIFT));
else
ohdr->u.aeth = rvt_compute_aeth(qp);
- sc5 = ibp->sl_to_sc[qp->remote_ah_attr.sl];
+ sc5 = ibp->sl_to_sc[rdma_ah_get_sl(&qp->remote_ah_attr)];
/* set PBC_DC_INFO bit (aka SC[4]) in pbc_flags */
pbc_flags |= ((!!(sc5 & 0x10)) << PBC_DC_INFO_SHIFT);
- lrh0 |= (sc5 & 0xf) << 12 | (qp->remote_ah_attr.sl & 0xf) << 4;
+ lrh0 |= (sc5 & 0xf) << 12 | (rdma_ah_get_sl(&qp->remote_ah_attr)
+ & 0xf) << 4;
hdr.lrh[0] = cpu_to_be16(lrh0);
- hdr.lrh[1] = cpu_to_be16(qp->remote_ah_attr.dlid);
+ hdr.lrh[1] = cpu_to_be16(rdma_ah_get_dlid(&qp->remote_ah_attr));
hdr.lrh[2] = cpu_to_be16(hwords + SIZE_OF_CRC);
- hdr.lrh[3] = cpu_to_be16(ppd->lid | qp->remote_ah_attr.src_path_bits);
+ hdr.lrh[3] = cpu_to_be16(ppd->lid |
+ rdma_ah_get_path_bits(&qp->remote_ah_attr));
ohdr->bth[0] = cpu_to_be32(bth0);
ohdr->bth[1] = cpu_to_be32(qp->remote_qpn);
ohdr->bth[1] |= cpu_to_be32((!!is_fecn) << IB_BECN_SHIFT);
@@ -1100,10 +1103,11 @@ static struct rvt_swqe *do_rc_completion(struct rvt_qp *qp,
*/
if (ppd->dd->flags & HFI1_HAS_SEND_DMA) {
struct sdma_engine *engine;
+ u8 sl = rdma_ah_get_sl(&qp->remote_ah_attr);
u8 sc5;
/* For now use sc to find engine */
- sc5 = ibp->sl_to_sc[qp->remote_ah_attr.sl];
+ sc5 = ibp->sl_to_sc[sl];
engine = qp_to_sdma_engine(qp, sc5);
sdma_engine_progress_schedule(engine);
}
@@ -2098,7 +2102,7 @@ send_last:
wc.opcode = IB_WC_RECV;
wc.qp = &qp->ibqp;
wc.src_qp = qp->remote_qpn;
- wc.slid = qp->remote_ah_attr.dlid;
+ wc.slid = rdma_ah_get_dlid(&qp->remote_ah_attr);
/*
* It seems that IB mandates the presence of an SL in a
* work completion only for the UD transport (see section
@@ -2110,7 +2114,7 @@ send_last:
*
* See also OPA Vol. 1, section 9.7.6, and table 9-17.
*/
- wc.sl = qp->remote_ah_attr.sl;
+ wc.sl = rdma_ah_get_sl(&qp->remote_ah_attr);
/* zero fields that are N/A */
wc.vendor_err = 0;
wc.pkey_index = 0;
diff --git a/drivers/infiniband/hw/hfi1/ruc.c b/drivers/infiniband/hw/hfi1/ruc.c
index ccf8d8037355..891ba0a81bbd 100644
--- a/drivers/infiniband/hw/hfi1/ruc.c
+++ b/drivers/infiniband/hw/hfi1/ruc.c
@@ -219,23 +219,28 @@ int hfi1_ruc_check_hdr(struct hfi1_ibport *ibp, struct ib_header *hdr,
{
__be64 guid;
unsigned long flags;
- u8 sc5 = ibp->sl_to_sc[qp->remote_ah_attr.sl];
+ u8 sc5 = ibp->sl_to_sc[rdma_ah_get_sl(&qp->remote_ah_attr)];
if (qp->s_mig_state == IB_MIG_ARMED && (bth0 & IB_BTH_MIG_REQ)) {
if (!has_grh) {
- if (qp->alt_ah_attr.ah_flags & IB_AH_GRH)
+ if (rdma_ah_get_ah_flags(&qp->alt_ah_attr) &
+ IB_AH_GRH)
goto err;
} else {
- if (!(qp->alt_ah_attr.ah_flags & IB_AH_GRH))
+ const struct ib_global_route *grh;
+
+ if (!(rdma_ah_get_ah_flags(&qp->alt_ah_attr) &
+ IB_AH_GRH))
goto err;
- guid = get_sguid(ibp, qp->alt_ah_attr.grh.sgid_index);
+ grh = rdma_ah_read_grh(&qp->alt_ah_attr);
+ guid = get_sguid(ibp, grh->sgid_index);
if (!gid_ok(&hdr->u.l.grh.dgid, ibp->rvp.gid_prefix,
guid))
goto err;
if (!gid_ok(
&hdr->u.l.grh.sgid,
- qp->alt_ah_attr.grh.dgid.global.subnet_prefix,
- qp->alt_ah_attr.grh.dgid.global.interface_id))
+ grh->dgid.global.subnet_prefix,
+ grh->dgid.global.interface_id))
goto err;
}
if (unlikely(rcv_pkey_check(ppd_from_ibp(ibp), (u16)bth0, sc5,
@@ -249,28 +254,34 @@ int hfi1_ruc_check_hdr(struct hfi1_ibport *ibp, struct ib_header *hdr,
goto err;
}
/* Validate the SLID. See Ch. 9.6.1.5 and 17.2.8 */
- if (ib_get_slid(hdr) != qp->alt_ah_attr.dlid ||
- ppd_from_ibp(ibp)->port != qp->alt_ah_attr.port_num)
+ if (ib_get_slid(hdr) !=
+ rdma_ah_get_dlid(&qp->alt_ah_attr) ||
+ ppd_from_ibp(ibp)->port !=
+ rdma_ah_get_port_num(&qp->alt_ah_attr))
goto err;
spin_lock_irqsave(&qp->s_lock, flags);
hfi1_migrate_qp(qp);
spin_unlock_irqrestore(&qp->s_lock, flags);
} else {
if (!has_grh) {
- if (qp->remote_ah_attr.ah_flags & IB_AH_GRH)
+ if (rdma_ah_get_ah_flags(&qp->remote_ah_attr) &
+ IB_AH_GRH)
goto err;
} else {
- if (!(qp->remote_ah_attr.ah_flags & IB_AH_GRH))
+ const struct ib_global_route *grh;
+
+ if (!(rdma_ah_get_ah_flags(&qp->remote_ah_attr) &
+ IB_AH_GRH))
goto err;
- guid = get_sguid(ibp,
- qp->remote_ah_attr.grh.sgid_index);
+ grh = rdma_ah_read_grh(&qp->remote_ah_attr);
+ guid = get_sguid(ibp, grh->sgid_index);
if (!gid_ok(&hdr->u.l.grh.dgid, ibp->rvp.gid_prefix,
guid))
goto err;
if (!gid_ok(
&hdr->u.l.grh.sgid,
- qp->remote_ah_attr.grh.dgid.global.subnet_prefix,
- qp->remote_ah_attr.grh.dgid.global.interface_id))
+ grh->dgid.global.subnet_prefix,
+ grh->dgid.global.interface_id))
goto err;
}
if (unlikely(rcv_pkey_check(ppd_from_ibp(ibp), (u16)bth0, sc5,
@@ -284,7 +295,8 @@ int hfi1_ruc_check_hdr(struct hfi1_ibport *ibp, struct ib_header *hdr,
goto err;
}
/* Validate the SLID. See Ch. 9.6.1.5 */
- if (ib_get_slid(hdr) != qp->remote_ah_attr.dlid ||
+ if (ib_get_slid(hdr) !=
+ rdma_ah_get_dlid(&qp->remote_ah_attr) ||
ppd_from_ibp(ibp)->port != qp->port_num)
goto err;
if (qp->s_mig_state == IB_MIG_REARM &&
@@ -542,8 +554,8 @@ do_write:
wc.byte_len = wqe->length;
wc.qp = &qp->ibqp;
wc.src_qp = qp->remote_qpn;
- wc.slid = qp->remote_ah_attr.dlid;
- wc.sl = qp->remote_ah_attr.sl;
+ wc.slid = rdma_ah_get_dlid(&qp->remote_ah_attr);
+ wc.sl = rdma_ah_get_sl(&qp->remote_ah_attr);
wc.port_num = 1;
/* Signal completion event if the solicited bit is set. */
rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc,
@@ -637,7 +649,7 @@ done:
* Return the size of the header in 32 bit words.
*/
u32 hfi1_make_grh(struct hfi1_ibport *ibp, struct ib_grh *hdr,
- struct ib_global_route *grh, u32 hwords, u32 nwords)
+ const struct ib_global_route *grh, u32 hwords, u32 nwords)
{
hdr->version_tclass_flow =
cpu_to_be32((IB_GRH_VERSION << IB_GRH_VERSION_SHIFT) |
@@ -731,15 +743,17 @@ void hfi1_make_ruc_header(struct rvt_qp *qp, struct ib_other_headers *ohdr,
extra_bytes = -ps->s_txreq->s_cur_size & 3;
nwords = (ps->s_txreq->s_cur_size + extra_bytes) >> 2;
lrh0 = HFI1_LRH_BTH;
- if (unlikely(qp->remote_ah_attr.ah_flags & IB_AH_GRH)) {
- qp->s_hdrwords += hfi1_make_grh(ibp,
- &ps->s_txreq->phdr.hdr.u.l.grh,
- &qp->remote_ah_attr.grh,
- qp->s_hdrwords, nwords);
+ if (unlikely(rdma_ah_get_ah_flags(&qp->remote_ah_attr) & IB_AH_GRH)) {
+ qp->s_hdrwords +=
+ hfi1_make_grh(ibp,
+ &ps->s_txreq->phdr.hdr.u.l.grh,
+ rdma_ah_read_grh(&qp->remote_ah_attr),
+ qp->s_hdrwords, nwords);
lrh0 = HFI1_LRH_GRH;
middle = 0;
}
- lrh0 |= (priv->s_sc & 0xf) << 12 | (qp->remote_ah_attr.sl & 0xf) << 4;
+ lrh0 |= (priv->s_sc & 0xf) << 12 |
+ (rdma_ah_get_sl(&qp->remote_ah_attr) & 0xf) << 4;
/*
* reset s_ahg/AHG fields
*
@@ -763,11 +777,13 @@ void hfi1_make_ruc_header(struct rvt_qp *qp, struct ib_other_headers *ohdr,
else
qp->s_flags &= ~RVT_S_AHG_VALID;
ps->s_txreq->phdr.hdr.lrh[0] = cpu_to_be16(lrh0);
- ps->s_txreq->phdr.hdr.lrh[1] = cpu_to_be16(qp->remote_ah_attr.dlid);
+ ps->s_txreq->phdr.hdr.lrh[1] =
+ cpu_to_be16(rdma_ah_get_dlid(&qp->remote_ah_attr));
ps->s_txreq->phdr.hdr.lrh[2] =
cpu_to_be16(qp->s_hdrwords + nwords + SIZE_OF_CRC);
- ps->s_txreq->phdr.hdr.lrh[3] = cpu_to_be16(ppd_from_ibp(ibp)->lid |
- qp->remote_ah_attr.src_path_bits);
+ ps->s_txreq->phdr.hdr.lrh[3] =
+ cpu_to_be16(ppd_from_ibp(ibp)->lid |
+ rdma_ah_get_path_bits(&qp->remote_ah_attr));
bth0 |= hfi1_get_pkey(ibp, qp->s_pkey_index);
bth0 |= extra_bytes << 20;
ohdr->bth[0] = cpu_to_be32(bth0);
@@ -821,9 +837,9 @@ void hfi1_do_send(struct rvt_qp *qp, bool in_thread)
switch (qp->ibqp.qp_type) {
case IB_QPT_RC:
- if (!loopback && ((qp->remote_ah_attr.dlid & ~((1 << ps.ppd->lmc
- ) - 1)) ==
- ps.ppd->lid)) {
+ if (!loopback && ((rdma_ah_get_dlid(&qp->remote_ah_attr) &
+ ~((1 << ps.ppd->lmc) - 1)) ==
+ ps.ppd->lid)) {
ruc_loopback(qp);
return;
}
@@ -831,9 +847,9 @@ void hfi1_do_send(struct rvt_qp *qp, bool in_thread)
timeout_int = (qp->timeout_jiffies);
break;
case IB_QPT_UC:
- if (!loopback && ((qp->remote_ah_attr.dlid & ~((1 << ps.ppd->lmc
- ) - 1)) ==
- ps.ppd->lid)) {
+ if (!loopback && ((rdma_ah_get_dlid(&qp->remote_ah_attr) &
+ ~((1 << ps.ppd->lmc) - 1)) ==
+ ps.ppd->lid)) {
ruc_loopback(qp);
return;
}
diff --git a/drivers/infiniband/hw/hfi1/uc.c b/drivers/infiniband/hw/hfi1/uc.c
index f0bdb100e005..5da1e4546543 100644
--- a/drivers/infiniband/hw/hfi1/uc.c
+++ b/drivers/infiniband/hw/hfi1/uc.c
@@ -94,7 +94,7 @@ int hfi1_make_uc_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
}
ohdr = &ps->s_txreq->phdr.hdr.u.oth;
- if (qp->remote_ah_attr.ah_flags & IB_AH_GRH)
+ if (rdma_ah_get_ah_flags(&qp->remote_ah_attr) & IB_AH_GRH)
ohdr = &ps->s_txreq->phdr.hdr.u.l.oth;
/* Get the next send request. */
@@ -451,7 +451,7 @@ last_imm:
wc.status = IB_WC_SUCCESS;
wc.qp = &qp->ibqp;
wc.src_qp = qp->remote_qpn;
- wc.slid = qp->remote_ah_attr.dlid;
+ wc.slid = rdma_ah_get_dlid(&qp->remote_ah_attr);
/*
* It seems that IB mandates the presence of an SL in a
* work completion only for the UD transport (see section
@@ -463,7 +463,7 @@ last_imm:
*
* See also OPA Vol. 1, section 9.7.6, and table 9-17.
*/
- wc.sl = qp->remote_ah_attr.sl;
+ wc.sl = rdma_ah_get_sl(&qp->remote_ah_attr);
/* zero fields that are N/A */
wc.vendor_err = 0;
wc.pkey_index = 0;
diff --git a/drivers/infiniband/hw/hfi1/ud.c b/drivers/infiniband/hw/hfi1/ud.c
index 7a438a582916..6a4e95cefae5 100644
--- a/drivers/infiniband/hw/hfi1/ud.c
+++ b/drivers/infiniband/hw/hfi1/ud.c
@@ -103,17 +103,17 @@ static void ud_loopback(struct rvt_qp *sqp, struct rvt_swqe *swqe)
if (qp->ibqp.qp_num > 1) {
u16 pkey;
u16 slid;
- u8 sc5 = ibp->sl_to_sc[ah_attr->sl];
+ u8 sc5 = ibp->sl_to_sc[rdma_ah_get_sl(ah_attr)];
pkey = hfi1_get_pkey(ibp, sqp->s_pkey_index);
- slid = ppd->lid | (ah_attr->src_path_bits &
+ slid = ppd->lid | (rdma_ah_get_path_bits(ah_attr) &
((1 << ppd->lmc) - 1));
if (unlikely(ingress_pkey_check(ppd, pkey, sc5,
qp->s_pkey_index, slid))) {
hfi1_bad_pqkey(ibp, OPA_TRAP_BAD_P_KEY, pkey,
- ah_attr->sl,
+ rdma_ah_get_sl(ah_attr),
sqp->ibqp.qp_num, qp->ibqp.qp_num,
- slid, ah_attr->dlid);
+ slid, rdma_ah_get_dlid(ah_attr));
goto drop;
}
}
@@ -131,13 +131,13 @@ static void ud_loopback(struct rvt_qp *sqp, struct rvt_swqe *swqe)
if (unlikely(qkey != qp->qkey)) {
u16 lid;
- lid = ppd->lid | (ah_attr->src_path_bits &
+ lid = ppd->lid | (rdma_ah_get_path_bits(ah_attr) &
((1 << ppd->lmc) - 1));
hfi1_bad_pqkey(ibp, OPA_TRAP_BAD_Q_KEY, qkey,
- ah_attr->sl,
+ rdma_ah_get_sl(ah_attr),
sqp->ibqp.qp_num, qp->ibqp.qp_num,
lid,
- ah_attr->dlid);
+ rdma_ah_get_dlid(ah_attr));
goto drop;
}
}
@@ -183,11 +183,11 @@ static void ud_loopback(struct rvt_qp *sqp, struct rvt_swqe *swqe)
goto bail_unlock;
}
- if (ah_attr->ah_flags & IB_AH_GRH) {
+ if (rdma_ah_get_ah_flags(ah_attr) & IB_AH_GRH) {
struct ib_grh grh;
- struct ib_global_route grd = ah_attr->grh;
+ const struct ib_global_route *grd = rdma_ah_read_grh(ah_attr);
- hfi1_make_grh(ibp, &grh, &grd, 0, 0);
+ hfi1_make_grh(ibp, &grh, grd, 0, 0);
hfi1_copy_sge(&qp->r_sge, &grh,
sizeof(grh), true, false);
wc.wc_flags |= IB_WC_GRH;
@@ -243,12 +243,13 @@ static void ud_loopback(struct rvt_qp *sqp, struct rvt_swqe *swqe)
} else {
wc.pkey_index = 0;
}
- wc.slid = ppd->lid | (ah_attr->src_path_bits & ((1 << ppd->lmc) - 1));
+ wc.slid = ppd->lid | (rdma_ah_get_path_bits(ah_attr) &
+ ((1 << ppd->lmc) - 1));
/* Check for loopback when the port lid is not set */
if (wc.slid == 0 && sqp->ibqp.qp_type == IB_QPT_GSI)
wc.slid = be16_to_cpu(IB_LID_PERMISSIVE);
- wc.sl = ah_attr->sl;
- wc.dlid_path_bits = ah_attr->dlid & ((1 << ppd->lmc) - 1);
+ wc.sl = rdma_ah_get_sl(ah_attr);
+ wc.dlid_path_bits = rdma_ah_get_dlid(ah_attr) & ((1 << ppd->lmc) - 1);
wc.port_num = qp->port_num;
/* Signal completion event if the solicited bit is set. */
rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc,
@@ -319,9 +320,9 @@ int hfi1_make_ud_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
ibp = to_iport(qp->ibqp.device, qp->port_num);
ppd = ppd_from_ibp(ibp);
ah_attr = &ibah_to_rvtah(wqe->ud_wr.ah)->attr;
- if (ah_attr->dlid < be16_to_cpu(IB_MULTICAST_LID_BASE) ||
- ah_attr->dlid == be16_to_cpu(IB_LID_PERMISSIVE)) {
- lid = ah_attr->dlid & ~((1 << ppd->lmc) - 1);
+ if (rdma_ah_get_dlid(ah_attr) < be16_to_cpu(IB_MULTICAST_LID_BASE) ||
+ rdma_ah_get_dlid(ah_attr) == be16_to_cpu(IB_LID_PERMISSIVE)) {
+ lid = rdma_ah_get_dlid(ah_attr) & ~((1 << ppd->lmc) - 1);
if (unlikely(!loopback &&
(lid == ppd->lid ||
(lid == be16_to_cpu(IB_LID_PERMISSIVE) &&
@@ -356,7 +357,7 @@ int hfi1_make_ud_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
qp->s_hdrwords = 7;
ps->s_txreq->s_cur_size = wqe->length;
ps->s_txreq->ss = &qp->s_sge;
- qp->s_srate = ah_attr->static_rate;
+ qp->s_srate = rdma_ah_get_static_rate(ah_attr);
qp->srate_mbps = ib_rate_to_mbps(qp->s_srate);
qp->s_wqe = wqe;
qp->s_sge.sge = wqe->sg_list[0];
@@ -364,11 +365,11 @@ int hfi1_make_ud_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
qp->s_sge.num_sge = wqe->wr.num_sge;
qp->s_sge.total_len = wqe->length;
- if (ah_attr->ah_flags & IB_AH_GRH) {
+ if (rdma_ah_get_ah_flags(ah_attr) & IB_AH_GRH) {
/* Header size in 32-bit words. */
qp->s_hdrwords += hfi1_make_grh(ibp,
&ps->s_txreq->phdr.hdr.u.l.grh,
- &ah_attr->grh,
+ rdma_ah_read_grh(ah_attr),
qp->s_hdrwords, nwords);
lrh0 = HFI1_LRH_GRH;
ohdr = &ps->s_txreq->phdr.hdr.u.l.oth;
@@ -388,8 +389,8 @@ int hfi1_make_ud_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
} else {
bth0 = IB_OPCODE_UD_SEND_ONLY << 24;
}
- sc5 = ibp->sl_to_sc[ah_attr->sl];
- lrh0 |= (ah_attr->sl & 0xf) << 4;
+ sc5 = ibp->sl_to_sc[rdma_ah_get_sl(ah_attr)];
+ lrh0 |= (rdma_ah_get_sl(ah_attr) & 0xf) << 4;
if (qp->ibqp.qp_type == IB_QPT_SMI) {
lrh0 |= 0xF000; /* Set VL (see ch. 13.5.3.1) */
priv->s_sc = 0xf;
@@ -402,15 +403,17 @@ int hfi1_make_ud_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
priv->s_sendcontext = qp_to_send_context(qp, priv->s_sc);
ps->s_txreq->psc = priv->s_sendcontext;
ps->s_txreq->phdr.hdr.lrh[0] = cpu_to_be16(lrh0);
- ps->s_txreq->phdr.hdr.lrh[1] = cpu_to_be16(ah_attr->dlid);
+ ps->s_txreq->phdr.hdr.lrh[1] =
+ cpu_to_be16(rdma_ah_get_dlid(ah_attr));
ps->s_txreq->phdr.hdr.lrh[2] =
cpu_to_be16(qp->s_hdrwords + nwords + SIZE_OF_CRC);
- if (ah_attr->dlid == be16_to_cpu(IB_LID_PERMISSIVE)) {
+ if (rdma_ah_get_dlid(ah_attr) == be16_to_cpu(IB_LID_PERMISSIVE)) {
ps->s_txreq->phdr.hdr.lrh[3] = IB_LID_PERMISSIVE;
} else {
lid = ppd->lid;
if (lid) {
- lid |= ah_attr->src_path_bits & ((1 << ppd->lmc) - 1);
+ lid |= rdma_ah_get_path_bits(ah_attr) &
+ ((1 << ppd->lmc) - 1);
ps->s_txreq->phdr.hdr.lrh[3] = cpu_to_be16(lid);
} else {
ps->s_txreq->phdr.hdr.lrh[3] = IB_LID_PERMISSIVE;
diff --git a/drivers/infiniband/hw/hfi1/verbs.c b/drivers/infiniband/hw/hfi1/verbs.c
index 3cd11222456a..4c2a77e17a54 100644
--- a/drivers/infiniband/hw/hfi1/verbs.c
+++ b/drivers/infiniband/hw/hfi1/verbs.c
@@ -1461,9 +1461,9 @@ static int hfi1_get_guid_be(struct rvt_dev_info *rdi, struct rvt_ibport *rvp,
*/
u8 ah_to_sc(struct ib_device *ibdev, struct rdma_ah_attr *ah)
{
- struct hfi1_ibport *ibp = to_iport(ibdev, ah->port_num);
+ struct hfi1_ibport *ibp = to_iport(ibdev, rdma_ah_get_port_num(ah));
- return ibp->sl_to_sc[ah->sl];
+ return ibp->sl_to_sc[rdma_ah_get_sl(ah)];
}
static int hfi1_check_ah(struct ib_device *ibdev, struct rdma_ah_attr *ah_attr)
@@ -1474,9 +1474,9 @@ static int hfi1_check_ah(struct ib_device *ibdev, struct rdma_ah_attr *ah_attr)
u8 sc5;
/* test the mapping for validity */
- ibp = to_iport(ibdev, ah_attr->port_num);
+ ibp = to_iport(ibdev, rdma_ah_get_port_num(ah_attr));
ppd = ppd_from_ibp(ibp);
- sc5 = ibp->sl_to_sc[ah_attr->sl];
+ sc5 = ibp->sl_to_sc[rdma_ah_get_sl(ah_attr)];
dd = dd_from_ppd(ppd);
if (sc_to_vlt(dd, sc5) > num_vls && sc_to_vlt(dd, sc5) != 0xf)
return -EINVAL;
@@ -1497,9 +1497,9 @@ static void hfi1_notify_new_ah(struct ib_device *ibdev,
* done being setup. We can however modify things which we need to set.
*/
- ibp = to_iport(ibdev, ah_attr->port_num);
+ ibp = to_iport(ibdev, rdma_ah_get_port_num(ah_attr));
ppd = ppd_from_ibp(ibp);
- sc5 = ibp->sl_to_sc[ah->attr.sl];
+ sc5 = ibp->sl_to_sc[rdma_ah_get_sl(&ah->attr)];
dd = dd_from_ppd(ppd);
ah->vl = sc_to_vlt(dd, sc5);
if (ah->vl < num_vls || ah->vl == 15)
@@ -1513,8 +1513,8 @@ struct ib_ah *hfi1_create_qp0_ah(struct hfi1_ibport *ibp, u16 dlid)
struct rvt_qp *qp0;
memset(&attr, 0, sizeof(attr));
- attr.dlid = dlid;
- attr.port_num = ppd_from_ibp(ibp)->port;
+ rdma_ah_set_dlid(&attr, dlid);
+ rdma_ah_set_port_num(&attr, ppd_from_ibp(ibp)->port);
rcu_read_lock();
qp0 = rcu_dereference(ibp->rvp.qp[0]);
if (qp0)
@@ -1913,12 +1913,12 @@ void hfi1_cnp_rcv(struct hfi1_packet *packet)
switch (packet->qp->ibqp.qp_type) {
case IB_QPT_UC:
- rlid = qp->remote_ah_attr.dlid;
+ rlid = rdma_ah_get_dlid(&qp->remote_ah_attr);
rqpn = qp->remote_qpn;
svc_type = IB_CC_SVCTYPE_UC;
break;
case IB_QPT_RC:
- rlid = qp->remote_ah_attr.dlid;
+ rlid = rdma_ah_get_dlid(&qp->remote_ah_attr);
rqpn = qp->remote_qpn;
svc_type = IB_CC_SVCTYPE_RC;
break;
diff --git a/drivers/infiniband/hw/hfi1/verbs.h b/drivers/infiniband/hw/hfi1/verbs.h
index fb7f0a234fdd..52ff275caf54 100644
--- a/drivers/infiniband/hw/hfi1/verbs.h
+++ b/drivers/infiniband/hw/hfi1/verbs.h
@@ -347,7 +347,7 @@ int hfi1_ruc_check_hdr(struct hfi1_ibport *ibp, struct ib_header *hdr,
int has_grh, struct rvt_qp *qp, u32 bth0);
u32 hfi1_make_grh(struct hfi1_ibport *ibp, struct ib_grh *hdr,
- struct ib_global_route *grh, u32 hwords, u32 nwords);
+ const struct ib_global_route *grh, u32 hwords, u32 nwords);
void hfi1_make_ruc_header(struct rvt_qp *qp, struct ib_other_headers *ohdr,
u32 bth0, u32 bth2, int middle,
diff --git a/drivers/infiniband/hw/hns/hns_roce_ah.c b/drivers/infiniband/hw/hns/hns_roce_ah.c
index 214c9b4195f4..f78a733a63ec 100644
--- a/drivers/infiniband/hw/hns/hns_roce_ah.c
+++ b/drivers/infiniband/hw/hns/hns_roce_ah.c
@@ -49,6 +49,7 @@ struct ib_ah *hns_roce_create_ah(struct ib_pd *ibpd,
struct hns_roce_ah *ah;
u16 vlan_tag = 0xffff;
struct in6_addr in6;
+ const struct ib_global_route *grh = rdma_ah_read_grh(ah_attr);
union ib_gid sgid;
int ret;
@@ -57,15 +58,20 @@ struct ib_ah *hns_roce_create_ah(struct ib_pd *ibpd,
return ERR_PTR(-ENOMEM);
/* Get mac address */
- memcpy(&in6, ah_attr->grh.dgid.raw, sizeof(ah_attr->grh.dgid.raw));
- if (rdma_is_multicast_addr(&in6))
+ memcpy(&in6, grh->dgid.raw, sizeof(grh->dgid.raw));
+ if (rdma_is_multicast_addr(&in6)) {
rdma_get_mcast_mac(&in6, ah->av.mac);
- else
- memcpy(ah->av.mac, ah_attr->dmac, sizeof(ah_attr->dmac));
+ } else {
+ u8 *dmac = rdma_ah_retrieve_dmac(ah_attr);
+
+ if (!dmac)
+ return ERR_PTR(-EINVAL);
+ memcpy(ah->av.mac, dmac, ETH_ALEN);
+ }
/* Get source gid */
- ret = ib_get_cached_gid(ibpd->device, ah_attr->port_num,
- ah_attr->grh.sgid_index, &sgid, &gid_attr);
+ ret = ib_get_cached_gid(ibpd->device, rdma_ah_get_port_num(ah_attr),
+ grh->sgid_index, &sgid, &gid_attr);
if (ret) {
dev_err(dev, "get sgid failed! ret = %d\n", ret);
kfree(ah);
@@ -79,21 +85,23 @@ struct ib_ah *hns_roce_create_ah(struct ib_pd *ibpd,
}
if (vlan_tag < 0x1000)
- vlan_tag |= (ah_attr->sl & HNS_ROCE_VLAN_SL_BIT_MASK) <<
+ vlan_tag |= (rdma_ah_get_sl(ah_attr) &
+ HNS_ROCE_VLAN_SL_BIT_MASK) <<
HNS_ROCE_VLAN_SL_SHIFT;
- ah->av.port_pd = cpu_to_be32(to_hr_pd(ibpd)->pdn | (ah_attr->port_num <<
+ ah->av.port_pd = cpu_to_be32(to_hr_pd(ibpd)->pdn |
+ (rdma_ah_get_port_num(ah_attr) <<
HNS_ROCE_PORT_NUM_SHIFT));
- ah->av.gid_index = ah_attr->grh.sgid_index;
+ ah->av.gid_index = grh->sgid_index;
ah->av.vlan = cpu_to_le16(vlan_tag);
dev_dbg(dev, "gid_index = 0x%x,vlan = 0x%x\n", ah->av.gid_index,
ah->av.vlan);
- if (ah_attr->static_rate)
+ if (rdma_ah_get_static_rate(ah_attr))
ah->av.stat_rate = IB_RATE_10_GBPS;
- memcpy(ah->av.dgid, ah_attr->grh.dgid.raw, HNS_ROCE_GID_SIZE);
- ah->av.sl_tclass_flowlabel = cpu_to_le32(ah_attr->sl <<
+ memcpy(ah->av.dgid, grh->dgid.raw, HNS_ROCE_GID_SIZE);
+ ah->av.sl_tclass_flowlabel = cpu_to_le32(rdma_ah_get_sl(ah_attr) <<
HNS_ROCE_SL_SHIFT);
return &ah->ibah;
@@ -105,19 +113,18 @@ int hns_roce_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr)
memset(ah_attr, 0, sizeof(*ah_attr));
- ah_attr->sl = le32_to_cpu(ah->av.sl_tclass_flowlabel) >>
- HNS_ROCE_SL_SHIFT;
- ah_attr->port_num = le32_to_cpu(ah->av.port_pd) >>
- HNS_ROCE_PORT_NUM_SHIFT;
- ah_attr->static_rate = ah->av.stat_rate;
- ah_attr->ah_flags = IB_AH_GRH;
- ah_attr->grh.traffic_class = le32_to_cpu(ah->av.sl_tclass_flowlabel) >>
- HNS_ROCE_TCLASS_SHIFT;
- ah_attr->grh.flow_label = le32_to_cpu(ah->av.sl_tclass_flowlabel) &
- HNS_ROCE_FLOW_LABLE_MASK;
- ah_attr->grh.hop_limit = ah->av.hop_limit;
- ah_attr->grh.sgid_index = ah->av.gid_index;
- memcpy(ah_attr->grh.dgid.raw, ah->av.dgid, HNS_ROCE_GID_SIZE);
+ rdma_ah_set_sl(ah_attr, (le32_to_cpu(ah->av.sl_tclass_flowlabel) >>
+ HNS_ROCE_SL_SHIFT));
+ rdma_ah_set_port_num(ah_attr, (le32_to_cpu(ah->av.port_pd) >>
+ HNS_ROCE_PORT_NUM_SHIFT));
+ rdma_ah_set_static_rate(ah_attr, ah->av.stat_rate);
+ rdma_ah_set_grh(ah_attr, NULL,
+ (le32_to_cpu(ah->av.sl_tclass_flowlabel) &
+ HNS_ROCE_FLOW_LABLE_MASK), ah->av.gid_index,
+ ah->av.hop_limit,
+ (le32_to_cpu(ah->av.sl_tclass_flowlabel) >>
+ HNS_ROCE_TCLASS_SHIFT));
+ rdma_ah_set_dgid_raw(ah_attr, ah->av.dgid);
return 0;
}
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
index e637beb209f7..f4ca9623ced4 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
@@ -658,6 +658,7 @@ static int hns_roce_v1_rsv_lp_qp(struct hns_roce_dev *hr_dev)
struct hns_roce_qp *hr_qp;
struct ib_cq *cq;
struct ib_pd *pd;
+ union ib_gid dgid;
u64 subnet_prefix;
int attr_mask = 0;
int i;
@@ -708,12 +709,8 @@ static int hns_roce_v1_rsv_lp_qp(struct hns_roce_dev *hr_dev)
attr.rnr_retry = 7;
attr.timeout = 0x12;
attr.path_mtu = IB_MTU_256;
- attr.ah_attr.ah_flags = 1;
- attr.ah_attr.static_rate = 3;
- attr.ah_attr.grh.sgid_index = 0;
- attr.ah_attr.grh.hop_limit = 1;
- attr.ah_attr.grh.flow_label = 0;
- attr.ah_attr.grh.traffic_class = 0;
+ rdma_ah_set_grh(&attr.ah_attr, NULL, 0, 0, 1, 0);
+ rdma_ah_set_static_rate(&attr.ah_attr, 3);
subnet_prefix = cpu_to_be64(0xfe80000000000000LL);
for (i = 0; i < HNS_ROCE_V1_RESV_QP; i++) {
@@ -742,24 +739,22 @@ static int hns_roce_v1_rsv_lp_qp(struct hns_roce_dev *hr_dev)
hr_qp->ibqp.recv_cq = cq;
hr_qp->ibqp.send_cq = cq;
- attr.ah_attr.port_num = phy_port + 1;
- attr.ah_attr.sl = sl;
+ rdma_ah_set_port_num(&attr.ah_attr, phy_port + 1);
+ rdma_ah_set_sl(&attr.ah_attr, phy_port + 1);
attr.port_num = phy_port + 1;
attr.dest_qp_num = hr_qp->qpn;
- memcpy(attr.ah_attr.dmac, hr_dev->dev_addr[phy_port],
+ memcpy(rdma_ah_retrieve_dmac(&attr.ah_attr),
+ hr_dev->dev_addr[phy_port],
MAC_ADDR_OCTET_NUM);
- memcpy(attr.ah_attr.grh.dgid.raw,
- &subnet_prefix, sizeof(u64));
- memcpy(&attr.ah_attr.grh.dgid.raw[8],
- hr_dev->dev_addr[phy_port], 3);
- memcpy(&attr.ah_attr.grh.dgid.raw[13],
- hr_dev->dev_addr[phy_port] + 3, 3);
- attr.ah_attr.grh.dgid.raw[11] = 0xff;
- attr.ah_attr.grh.dgid.raw[12] = 0xfe;
- attr.ah_attr.grh.dgid.raw[8] ^= 2;
-
+ memcpy(&dgid.raw, &subnet_prefix, sizeof(u64));
+ memcpy(&dgid.raw[8], hr_dev->dev_addr[phy_port], 3);
+ memcpy(&dgid.raw[13], hr_dev->dev_addr[phy_port] + 3, 3);
+ dgid.raw[11] = 0xff;
+ dgid.raw[12] = 0xfe;
+ dgid.raw[8] ^= 2;
+ rdma_ah_set_dgid_raw(&attr.ah_attr, dgid.raw);
attr_mask |= IB_QP_PORT;
ret = hr_dev->hw->modify_qp(&hr_qp->ibqp, &attr, attr_mask,
@@ -2567,6 +2562,7 @@ static int hns_roce_v1_m_qp(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
struct device *dev = &hr_dev->pdev->dev;
struct hns_roce_qp_context *context;
+ const struct ib_global_route *grh = rdma_ah_read_grh(&attr->ah_attr);
dma_addr_t dma_handle_2 = 0;
dma_addr_t dma_handle = 0;
uint32_t doorbell[2] = {0};
@@ -2575,6 +2571,7 @@ static int hns_roce_v1_m_qp(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
int ret = -EINVAL;
u64 *mtts = NULL;
int port;
+ u8 port_num;
u8 *dmac;
u8 *smac;
@@ -2782,7 +2779,7 @@ static int hns_roce_v1_m_qp(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
roce_set_bit(context->qpc_bytes_32,
QP_CONTEXT_QPC_BYTE_32_GLOBAL_HEADER_S,
- attr->ah_attr.ah_flags);
+ rdma_ah_get_ah_flags(&attr->ah_attr));
roce_set_field(context->qpc_bytes_32,
QP_CONTEXT_QPC_BYTES_32_RESPONDER_RESOURCES_M,
QP_CONTEXT_QPC_BYTES_32_RESPONDER_RESOURCES_S,
@@ -2794,12 +2791,13 @@ static int hns_roce_v1_m_qp(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
attr->dest_qp_num);
/* Configure GID index */
+ port_num = rdma_ah_get_port_num(&attr->ah_attr);
roce_set_field(context->qpc_bytes_36,
QP_CONTEXT_QPC_BYTES_36_SGID_INDEX_M,
QP_CONTEXT_QPC_BYTES_36_SGID_INDEX_S,
- hns_get_gid_index(hr_dev,
- attr->ah_attr.port_num - 1,
- attr->ah_attr.grh.sgid_index));
+ hns_get_gid_index(hr_dev,
+ port_num - 1,
+ grh->sgid_index));
memcpy(&(context->dmac_l), dmac, 4);
@@ -2810,26 +2808,26 @@ static int hns_roce_v1_m_qp(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
roce_set_field(context->qpc_bytes_44,
QP_CONTEXT_QPC_BYTES_44_MAXIMUM_STATIC_RATE_M,
QP_CONTEXT_QPC_BYTES_44_MAXIMUM_STATIC_RATE_S,
- attr->ah_attr.static_rate);
+ rdma_ah_get_static_rate(&attr->ah_attr));
roce_set_field(context->qpc_bytes_44,
QP_CONTEXT_QPC_BYTES_44_HOPLMT_M,
QP_CONTEXT_QPC_BYTES_44_HOPLMT_S,
- attr->ah_attr.grh.hop_limit);
+ grh->hop_limit);
roce_set_field(context->qpc_bytes_48,
QP_CONTEXT_QPC_BYTES_48_FLOWLABEL_M,
QP_CONTEXT_QPC_BYTES_48_FLOWLABEL_S,
- attr->ah_attr.grh.flow_label);
+ grh->flow_label);
roce_set_field(context->qpc_bytes_48,
QP_CONTEXT_QPC_BYTES_48_TCLASS_M,
QP_CONTEXT_QPC_BYTES_48_TCLASS_S,
- attr->ah_attr.grh.traffic_class);
+ grh->traffic_class);
roce_set_field(context->qpc_bytes_48,
QP_CONTEXT_QPC_BYTES_48_MTU_M,
QP_CONTEXT_QPC_BYTES_48_MTU_S, attr->path_mtu);
- memcpy(context->dgid, attr->ah_attr.grh.dgid.raw,
- sizeof(attr->ah_attr.grh.dgid.raw));
+ memcpy(context->dgid, grh->dgid.raw,
+ sizeof(grh->dgid.raw));
dev_dbg(dev, "dmac:%x :%lx\n", context->dmac_l,
roce_get_field(context->qpc_bytes_44,
@@ -2909,8 +2907,9 @@ static int hns_roce_v1_m_qp(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
hr_qp->phy_port);
roce_set_field(context->qpc_bytes_156,
QP_CONTEXT_QPC_BYTES_156_SL_M,
- QP_CONTEXT_QPC_BYTES_156_SL_S, attr->ah_attr.sl);
- hr_qp->sl = attr->ah_attr.sl;
+ QP_CONTEXT_QPC_BYTES_156_SL_S,
+ rdma_ah_get_sl(&attr->ah_attr));
+ hr_qp->sl = rdma_ah_get_sl(&attr->ah_attr);
} else if (cur_state == IB_QPS_RTR &&
new_state == IB_QPS_RTS) {
/* If exist optional param, return error */
@@ -3021,8 +3020,9 @@ static int hns_roce_v1_m_qp(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
hr_qp->phy_port);
roce_set_field(context->qpc_bytes_156,
QP_CONTEXT_QPC_BYTES_156_SL_M,
- QP_CONTEXT_QPC_BYTES_156_SL_S, attr->ah_attr.sl);
- hr_qp->sl = attr->ah_attr.sl;
+ QP_CONTEXT_QPC_BYTES_156_SL_S,
+ rdma_ah_get_sl(&attr->ah_attr));
+ hr_qp->sl = rdma_ah_get_sl(&attr->ah_attr);
roce_set_field(context->qpc_bytes_156,
QP_CONTEXT_QPC_BYTES_156_INITIATOR_DEPTH_M,
QP_CONTEXT_QPC_BYTES_156_INITIATOR_DEPTH_S,
@@ -3357,28 +3357,33 @@ static int hns_roce_v1_q_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
if (hr_qp->ibqp.qp_type == IB_QPT_RC ||
hr_qp->ibqp.qp_type == IB_QPT_UC) {
- qp_attr->ah_attr.sl = roce_get_field(context->qpc_bytes_156,
- QP_CONTEXT_QPC_BYTES_156_SL_M,
- QP_CONTEXT_QPC_BYTES_156_SL_S);
- qp_attr->ah_attr.grh.flow_label = roce_get_field(
- context->qpc_bytes_48,
- QP_CONTEXT_QPC_BYTES_48_FLOWLABEL_M,
- QP_CONTEXT_QPC_BYTES_48_FLOWLABEL_S);
- qp_attr->ah_attr.grh.sgid_index = roce_get_field(
- context->qpc_bytes_36,
- QP_CONTEXT_QPC_BYTES_36_SGID_INDEX_M,
- QP_CONTEXT_QPC_BYTES_36_SGID_INDEX_S);
- qp_attr->ah_attr.grh.hop_limit = roce_get_field(
- context->qpc_bytes_44,
- QP_CONTEXT_QPC_BYTES_44_HOPLMT_M,
- QP_CONTEXT_QPC_BYTES_44_HOPLMT_S);
- qp_attr->ah_attr.grh.traffic_class = roce_get_field(
- context->qpc_bytes_48,
- QP_CONTEXT_QPC_BYTES_48_TCLASS_M,
- QP_CONTEXT_QPC_BYTES_48_TCLASS_S);
-
- memcpy(qp_attr->ah_attr.grh.dgid.raw, context->dgid,
- sizeof(qp_attr->ah_attr.grh.dgid.raw));
+ struct ib_global_route *grh =
+ rdma_ah_retrieve_grh(&qp_attr->ah_attr);
+
+ rdma_ah_set_sl(&qp_attr->ah_attr,
+ roce_get_field(context->qpc_bytes_156,
+ QP_CONTEXT_QPC_BYTES_156_SL_M,
+ QP_CONTEXT_QPC_BYTES_156_SL_S));
+ rdma_ah_set_ah_flags(&qp_attr->ah_attr, IB_AH_GRH);
+ grh->flow_label =
+ roce_get_field(context->qpc_bytes_48,
+ QP_CONTEXT_QPC_BYTES_48_FLOWLABEL_M,
+ QP_CONTEXT_QPC_BYTES_48_FLOWLABEL_S);
+ grh->sgid_index =
+ roce_get_field(context->qpc_bytes_36,
+ QP_CONTEXT_QPC_BYTES_36_SGID_INDEX_M,
+ QP_CONTEXT_QPC_BYTES_36_SGID_INDEX_S);
+ grh->hop_limit =
+ roce_get_field(context->qpc_bytes_44,
+ QP_CONTEXT_QPC_BYTES_44_HOPLMT_M,
+ QP_CONTEXT_QPC_BYTES_44_HOPLMT_S);
+ grh->traffic_class =
+ roce_get_field(context->qpc_bytes_48,
+ QP_CONTEXT_QPC_BYTES_48_TCLASS_M,
+ QP_CONTEXT_QPC_BYTES_48_TCLASS_S);
+
+ memcpy(grh->dgid.raw, context->dgid,
+ sizeof(grh->dgid.raw));
}
qp_attr->pkey_index = roce_get_field(context->qpc_bytes_12,
diff --git a/drivers/infiniband/hw/mlx4/ah.c b/drivers/infiniband/hw/mlx4/ah.c
index 17fcb0b49d0e..3cbac5f7b0f5 100644
--- a/drivers/infiniband/hw/mlx4/ah.c
+++ b/drivers/infiniband/hw/mlx4/ah.c
@@ -46,25 +46,32 @@ static struct ib_ah *create_ib_ah(struct ib_pd *pd,
{
struct mlx4_dev *dev = to_mdev(pd->device)->dev;
- ah->av.ib.port_pd = cpu_to_be32(to_mpd(pd)->pdn | (ah_attr->port_num << 24));
- ah->av.ib.g_slid = ah_attr->src_path_bits;
- ah->av.ib.sl_tclass_flowlabel = cpu_to_be32(ah_attr->sl << 28);
- if (ah_attr->ah_flags & IB_AH_GRH) {
+ ah->av.ib.port_pd = cpu_to_be32(to_mpd(pd)->pdn |
+ (rdma_ah_get_port_num(ah_attr) << 24));
+ ah->av.ib.g_slid = rdma_ah_get_path_bits(ah_attr);
+ ah->av.ib.sl_tclass_flowlabel =
+ cpu_to_be32(rdma_ah_get_sl(ah_attr) << 28);
+ if (rdma_ah_get_ah_flags(ah_attr) & IB_AH_GRH) {
+ const struct ib_global_route *grh = rdma_ah_read_grh(ah_attr);
+
ah->av.ib.g_slid |= 0x80;
- ah->av.ib.gid_index = ah_attr->grh.sgid_index;
- ah->av.ib.hop_limit = ah_attr->grh.hop_limit;
+ ah->av.ib.gid_index = grh->sgid_index;
+ ah->av.ib.hop_limit = grh->hop_limit;
ah->av.ib.sl_tclass_flowlabel |=
- cpu_to_be32((ah_attr->grh.traffic_class << 20) |
- ah_attr->grh.flow_label);
- memcpy(ah->av.ib.dgid, ah_attr->grh.dgid.raw, 16);
+ cpu_to_be32((grh->traffic_class << 20) |
+ grh->flow_label);
+ memcpy(ah->av.ib.dgid, grh->dgid.raw, 16);
}
- ah->av.ib.dlid = cpu_to_be16(ah_attr->dlid);
- if (ah_attr->static_rate) {
- ah->av.ib.stat_rate = ah_attr->static_rate + MLX4_STAT_RATE_OFFSET;
- while (ah->av.ib.stat_rate > IB_RATE_2_5_GBPS + MLX4_STAT_RATE_OFFSET &&
- !(1 << ah->av.ib.stat_rate & dev->caps.stat_rate_support))
- --ah->av.ib.stat_rate;
+ ah->av.ib.dlid = cpu_to_be16(rdma_ah_get_dlid(ah_attr));
+ if (rdma_ah_get_static_rate(ah_attr)) {
+ u8 static_rate = rdma_ah_get_static_rate(ah_attr) +
+ MLX4_STAT_RATE_OFFSET;
+
+ while (static_rate > IB_RATE_2_5_GBPS + MLX4_STAT_RATE_OFFSET &&
+ !(1 << static_rate & dev->caps.stat_rate_support))
+ --static_rate;
+ ah->av.ib.stat_rate = static_rate;
}
return &ah->ibah;
@@ -81,17 +88,18 @@ static struct ib_ah *create_iboe_ah(struct ib_pd *pd,
u16 vlan_tag = 0xffff;
union ib_gid sgid;
struct ib_gid_attr gid_attr;
+ const struct ib_global_route *grh = rdma_ah_read_grh(ah_attr);
int ret;
- memcpy(&in6, ah_attr->grh.dgid.raw, sizeof(in6));
+ memcpy(&in6, grh->dgid.raw, sizeof(in6));
if (rdma_is_multicast_addr(&in6)) {
is_mcast = 1;
rdma_get_mcast_mac(&in6, ah->av.eth.mac);
} else {
memcpy(ah->av.eth.mac, ah_attr->dmac, ETH_ALEN);
}
- ret = ib_get_cached_gid(pd->device, ah_attr->port_num,
- ah_attr->grh.sgid_index, &sgid, &gid_attr);
+ ret = ib_get_cached_gid(pd->device, rdma_ah_get_port_num(ah_attr),
+ grh->sgid_index, &sgid, &gid_attr);
if (ret)
return ERR_PTR(ret);
eth_zero_addr(ah->av.eth.s_mac);
@@ -102,32 +110,36 @@ static struct ib_ah *create_iboe_ah(struct ib_pd *pd,
dev_put(gid_attr.ndev);
}
if (vlan_tag < 0x1000)
- vlan_tag |= (ah_attr->sl & 7) << 13;
- ah->av.eth.port_pd = cpu_to_be32(to_mpd(pd)->pdn | (ah_attr->port_num << 24));
- ret = mlx4_ib_gid_index_to_real_index(ibdev, ah_attr->port_num, ah_attr->grh.sgid_index);
+ vlan_tag |= (rdma_ah_get_sl(ah_attr) & 7) << 13;
+ ah->av.eth.port_pd = cpu_to_be32(to_mpd(pd)->pdn |
+ (rdma_ah_get_port_num(ah_attr) << 24));
+ ret = mlx4_ib_gid_index_to_real_index(ibdev,
+ rdma_ah_get_port_num(ah_attr),
+ grh->sgid_index);
if (ret < 0)
return ERR_PTR(ret);
ah->av.eth.gid_index = ret;
ah->av.eth.vlan = cpu_to_be16(vlan_tag);
- ah->av.eth.hop_limit = ah_attr->grh.hop_limit;
- if (ah_attr->static_rate) {
- ah->av.eth.stat_rate = ah_attr->static_rate + MLX4_STAT_RATE_OFFSET;
+ ah->av.eth.hop_limit = grh->hop_limit;
+ if (rdma_ah_get_static_rate(ah_attr)) {
+ ah->av.eth.stat_rate = rdma_ah_get_static_rate(ah_attr) +
+ MLX4_STAT_RATE_OFFSET;
while (ah->av.eth.stat_rate > IB_RATE_2_5_GBPS + MLX4_STAT_RATE_OFFSET &&
!(1 << ah->av.eth.stat_rate & dev->caps.stat_rate_support))
--ah->av.eth.stat_rate;
}
ah->av.eth.sl_tclass_flowlabel |=
- cpu_to_be32((ah_attr->grh.traffic_class << 20) |
- ah_attr->grh.flow_label);
+ cpu_to_be32((grh->traffic_class << 20) |
+ grh->flow_label);
/*
* HW requires multicast LID so we just choose one.
*/
if (is_mcast)
ah->av.ib.dlid = cpu_to_be16(0xc000);
- memcpy(ah->av.eth.dgid, ah_attr->grh.dgid.raw, 16);
- ah->av.eth.sl_tclass_flowlabel |= cpu_to_be32(ah_attr->sl << 29);
-
+ memcpy(ah->av.eth.dgid, grh->dgid.raw, 16);
+ ah->av.eth.sl_tclass_flowlabel |= cpu_to_be32(rdma_ah_get_sl(ah_attr)
+ << 29);
return &ah->ibah;
}
@@ -142,8 +154,10 @@ struct ib_ah *mlx4_ib_create_ah(struct ib_pd *pd, struct rdma_ah_attr *ah_attr,
if (!ah)
return ERR_PTR(-ENOMEM);
- if (rdma_port_get_link_layer(pd->device, ah_attr->port_num) == IB_LINK_LAYER_ETHERNET) {
- if (!(ah_attr->ah_flags & IB_AH_GRH)) {
+ if (rdma_port_get_link_layer(pd->device,
+ rdma_ah_get_port_num(ah_attr)) ==
+ IB_LINK_LAYER_ETHERNET) {
+ if (!(rdma_ah_get_ah_flags(ah_attr) & IB_AH_GRH)) {
ret = ERR_PTR(-EINVAL);
} else {
/*
@@ -171,28 +185,35 @@ int mlx4_ib_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr)
enum rdma_link_layer ll;
memset(ah_attr, 0, sizeof *ah_attr);
- ah_attr->port_num = be32_to_cpu(ah->av.ib.port_pd) >> 24;
- ll = rdma_port_get_link_layer(ibah->device, ah_attr->port_num);
+ rdma_ah_set_port_num(ah_attr,
+ be32_to_cpu(ah->av.ib.port_pd) >> 24);
+ ll = rdma_port_get_link_layer(ibah->device,
+ rdma_ah_get_port_num(ah_attr));
if (ll == IB_LINK_LAYER_ETHERNET)
- ah_attr->sl = be32_to_cpu(ah->av.eth.sl_tclass_flowlabel) >> 29;
+ rdma_ah_set_sl(ah_attr,
+ be32_to_cpu(ah->av.eth.sl_tclass_flowlabel)
+ >> 29);
else
- ah_attr->sl = be32_to_cpu(ah->av.ib.sl_tclass_flowlabel) >> 28;
+ rdma_ah_set_sl(ah_attr,
+ be32_to_cpu(ah->av.ib.sl_tclass_flowlabel)
+ >> 28);
- ah_attr->dlid = ll == IB_LINK_LAYER_INFINIBAND ? be16_to_cpu(ah->av.ib.dlid) : 0;
+ rdma_ah_set_dlid(ah_attr, (ll == IB_LINK_LAYER_INFINIBAND) ?
+ be16_to_cpu(ah->av.ib.dlid) : 0);
if (ah->av.ib.stat_rate)
- ah_attr->static_rate = ah->av.ib.stat_rate - MLX4_STAT_RATE_OFFSET;
- ah_attr->src_path_bits = ah->av.ib.g_slid & 0x7F;
+ rdma_ah_set_static_rate(ah_attr,
+ ah->av.ib.stat_rate -
+ MLX4_STAT_RATE_OFFSET);
+ rdma_ah_set_path_bits(ah_attr, ah->av.ib.g_slid & 0x7F);
if (mlx4_ib_ah_grh_present(ah)) {
- ah_attr->ah_flags = IB_AH_GRH;
-
- ah_attr->grh.traffic_class =
- be32_to_cpu(ah->av.ib.sl_tclass_flowlabel) >> 20;
- ah_attr->grh.flow_label =
- be32_to_cpu(ah->av.ib.sl_tclass_flowlabel) & 0xfffff;
- ah_attr->grh.hop_limit = ah->av.ib.hop_limit;
- ah_attr->grh.sgid_index = ah->av.ib.gid_index;
- memcpy(ah_attr->grh.dgid.raw, ah->av.ib.dgid, 16);
+ u32 tc_fl = be32_to_cpu(ah->av.ib.sl_tclass_flowlabel);
+
+ rdma_ah_set_grh(ah_attr, NULL,
+ tc_fl & 0xfffff, ah->av.ib.gid_index,
+ ah->av.ib.hop_limit,
+ tc_fl >> 20);
+ rdma_ah_set_dgid_raw(ah_attr, ah->av.ib.dgid);
}
return 0;
diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c
index 90915c5e5338..425515eb01ea 100644
--- a/drivers/infiniband/hw/mlx4/mad.c
+++ b/drivers/infiniband/hw/mlx4/mad.c
@@ -196,9 +196,9 @@ static void update_sm_ah(struct mlx4_ib_dev *dev, u8 port_num, u16 lid, u8 sl)
return;
memset(&ah_attr, 0, sizeof ah_attr);
- ah_attr.dlid = lid;
- ah_attr.sl = sl;
- ah_attr.port_num = port_num;
+ rdma_ah_set_dlid(&ah_attr, lid);
+ rdma_ah_set_sl(&ah_attr, sl);
+ rdma_ah_set_port_num(&ah_attr, port_num);
new_ah = rdma_create_ah(dev->send_agent[port_num - 1][0]->qp->pd,
&ah_attr);
@@ -555,13 +555,15 @@ int mlx4_ib_send_to_slave(struct mlx4_ib_dev *dev, int slave, u8 port,
/* create ah. Just need an empty one with the port num for the post send.
* The driver will set the force loopback bit in post_send */
memset(&attr, 0, sizeof attr);
- attr.port_num = port;
+
+ rdma_ah_set_port_num(&attr, port);
if (is_eth) {
union ib_gid sgid;
+ union ib_gid dgid;
- if (get_gids_from_l3_hdr(grh, &sgid, &attr.grh.dgid))
+ if (get_gids_from_l3_hdr(grh, &sgid, &dgid))
return -EINVAL;
- attr.ah_flags = IB_AH_GRH;
+ rdma_ah_set_grh(&attr, &dgid, 0, 0, 0, 0);
}
ah = rdma_create_ah(tun_ctx->pd, &attr);
if (IS_ERR(ah))
@@ -1363,6 +1365,7 @@ int mlx4_ib_send_to_wire(struct mlx4_ib_dev *dev, int slave, u8 port,
struct mlx4_mad_snd_buf *sqp_mad;
struct ib_ah *ah;
struct ib_qp *send_qp = NULL;
+ struct ib_global_route *grh;
unsigned wire_tx_ix = 0;
int ret = 0;
u16 wire_pkey_ix;
@@ -1389,12 +1392,13 @@ int mlx4_ib_send_to_wire(struct mlx4_ib_dev *dev, int slave, u8 port,
send_qp = sqp->qp;
/* create ah */
- sgid_index = attr->grh.sgid_index;
- attr->grh.sgid_index = 0;
+ grh = rdma_ah_retrieve_grh(attr);
+ sgid_index = grh->sgid_index;
+ grh->sgid_index = 0;
ah = rdma_create_ah(sqp_ctx->pd, attr);
if (IS_ERR(ah))
return -ENOMEM;
- attr->grh.sgid_index = sgid_index;
+ grh->sgid_index = sgid_index;
to_mah(ah)->av.ib.gid_index = sgid_index;
/* get rid of force-loopback bit */
to_mah(ah)->av.ib.port_pd &= cpu_to_be32(0x7FFFFFFF);
@@ -1442,7 +1446,7 @@ int mlx4_ib_send_to_wire(struct mlx4_ib_dev *dev, int slave, u8 port,
if (s_mac)
memcpy(to_mah(ah)->av.eth.s_mac, s_mac, 6);
if (vlan_id < 0x1000)
- vlan_id |= (attr->sl & 7) << 13;
+ vlan_id |= (rdma_ah_get_sl(attr) & 7) << 13;
to_mah(ah)->av.eth.vlan = cpu_to_be16(vlan_id);
@@ -1469,10 +1473,11 @@ static int get_slave_base_gid_ix(struct mlx4_ib_dev *dev, int slave, int port)
static void fill_in_real_sgid_index(struct mlx4_ib_dev *dev, int slave, int port,
struct rdma_ah_attr *ah_attr)
{
+ struct ib_global_route *grh = rdma_ah_retrieve_grh(ah_attr);
if (rdma_port_get_link_layer(&dev->ib_dev, port) == IB_LINK_LAYER_INFINIBAND)
- ah_attr->grh.sgid_index = slave;
+ grh->sgid_index = slave;
else
- ah_attr->grh.sgid_index += get_slave_base_gid_ix(dev, slave, port);
+ grh->sgid_index += get_slave_base_gid_ix(dev, slave, port);
}
static void mlx4_ib_multiplex_mad(struct mlx4_ib_demux_pv_ctx *ctx, struct ib_wc *wc)
@@ -1487,6 +1492,8 @@ static void mlx4_ib_multiplex_mad(struct mlx4_ib_demux_pv_ctx *ctx, struct ib_wc
int slave;
int port;
u16 vlan_id;
+ u8 qos;
+ u8 *dmac;
/* Get slave that sent this packet */
if (wc->src_qp < dev->dev->phys_caps.base_proxy_sqpn ||
@@ -1571,14 +1578,16 @@ static void mlx4_ib_multiplex_mad(struct mlx4_ib_demux_pv_ctx *ctx, struct ib_wc
ah.av.ib.port_pd = cpu_to_be32(port << 24 | (be32_to_cpu(ah.av.ib.port_pd) & 0xffffff));
mlx4_ib_query_ah(&ah.ibah, &ah_attr);
- if (ah_attr.ah_flags & IB_AH_GRH)
+ if (rdma_ah_get_ah_flags(&ah_attr) & IB_AH_GRH)
fill_in_real_sgid_index(dev, slave, ctx->port, &ah_attr);
-
- memcpy(ah_attr.dmac, tunnel->hdr.mac, 6);
+ dmac = rdma_ah_retrieve_dmac(&ah_attr);
+ if (dmac)
+ memcpy(dmac, tunnel->hdr.mac, ETH_ALEN);
vlan_id = be16_to_cpu(tunnel->hdr.vlan);
/* if slave have default vlan use it */
- mlx4_get_slave_default_vlan(dev->dev, ctx->port, slave,
- &vlan_id, &ah_attr.sl);
+ if (mlx4_get_slave_default_vlan(dev->dev, ctx->port, slave,
+ &vlan_id, &qos))
+ rdma_ah_set_sl(&ah_attr, qos);
mlx4_ib_send_to_wire(dev, slave, ctx->port,
is_proxy_qp0(dev, wc->src_qp, slave) ?
diff --git a/drivers/infiniband/hw/mlx4/mcg.c b/drivers/infiniband/hw/mlx4/mcg.c
index eb009045643d..3405e947dc1e 100644
--- a/drivers/infiniband/hw/mlx4/mcg.c
+++ b/drivers/infiniband/hw/mlx4/mcg.c
@@ -244,7 +244,7 @@ static int send_mad_to_slave(int slave, struct mlx4_ib_demux_ctx *ctx,
wc.sl = 0;
wc.dlid_path_bits = 0;
wc.port_num = ctx->port;
- wc.slid = ah_attr.dlid; /* opensm lid */
+ wc.slid = rdma_ah_get_dlid(&ah_attr); /* opensm lid */
wc.src_qp = 1;
return mlx4_ib_send_to_slave(dev, slave, ctx->port, IB_QPT_GSI, &wc, NULL, mad);
}
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
index c211902ea22b..ef4adf32da66 100644
--- a/drivers/infiniband/hw/mlx4/qp.c
+++ b/drivers/infiniband/hw/mlx4/qp.c
@@ -1394,21 +1394,22 @@ static int _mlx4_set_path(struct mlx4_ib_dev *dev,
int smac_index;
int err;
-
- path->grh_mylmc = ah->src_path_bits & 0x7f;
- path->rlid = cpu_to_be16(ah->dlid);
- if (ah->static_rate) {
- path->static_rate = ah->static_rate + MLX4_STAT_RATE_OFFSET;
+ path->grh_mylmc = rdma_ah_get_path_bits(ah) & 0x7f;
+ path->rlid = cpu_to_be16(rdma_ah_get_dlid(ah));
+ if (rdma_ah_get_static_rate(ah)) {
+ path->static_rate = rdma_ah_get_static_rate(ah) +
+ MLX4_STAT_RATE_OFFSET;
while (path->static_rate > IB_RATE_2_5_GBPS + MLX4_STAT_RATE_OFFSET &&
!(1 << path->static_rate & dev->dev->caps.stat_rate_support))
--path->static_rate;
} else
path->static_rate = 0;
- if (ah->ah_flags & IB_AH_GRH) {
- int real_sgid_index = mlx4_ib_gid_index_to_real_index(dev,
- port,
- ah->grh.sgid_index);
+ if (rdma_ah_get_ah_flags(ah) & IB_AH_GRH) {
+ const struct ib_global_route *grh = rdma_ah_read_grh(ah);
+ int real_sgid_index =
+ mlx4_ib_gid_index_to_real_index(dev, port,
+ grh->sgid_index);
if (real_sgid_index >= dev->dev->caps.gid_table_len[port]) {
pr_err("sgid_index (%u) too large. max is %d\n",
@@ -1418,19 +1419,19 @@ static int _mlx4_set_path(struct mlx4_ib_dev *dev,
path->grh_mylmc |= 1 << 7;
path->mgid_index = real_sgid_index;
- path->hop_limit = ah->grh.hop_limit;
+ path->hop_limit = grh->hop_limit;
path->tclass_flowlabel =
- cpu_to_be32((ah->grh.traffic_class << 20) |
- (ah->grh.flow_label));
- memcpy(path->rgid, ah->grh.dgid.raw, 16);
+ cpu_to_be32((grh->traffic_class << 20) |
+ (grh->flow_label));
+ memcpy(path->rgid, grh->dgid.raw, 16);
}
if (is_eth) {
- if (!(ah->ah_flags & IB_AH_GRH))
+ if (!(rdma_ah_get_ah_flags(ah) & IB_AH_GRH))
return -1;
path->sched_queue = MLX4_IB_DEFAULT_SCHED_QUEUE |
- ((port - 1) << 6) | ((ah->sl & 7) << 3);
+ ((port - 1) << 6) | ((rdma_ah_get_sl(ah) & 7) << 3);
path->feup |= MLX4_FEUP_FORCE_ETH_UP;
if (vlan_tag < 0x1000) {
@@ -1489,14 +1490,13 @@ static int _mlx4_set_path(struct mlx4_ib_dev *dev,
} else {
smac_index = smac_info->smac_index;
}
-
memcpy(path->dmac, ah->dmac, 6);
path->ackto = MLX4_IB_LINK_TYPE_ETH;
/* put MAC table smac index for IBoE */
path->grh_mylmc = (u8) (smac_index) | 0x80;
} else {
path->sched_queue = MLX4_IB_DEFAULT_SCHED_QUEUE |
- ((port - 1) << 6) | ((ah->sl & 0xf) << 2);
+ ((port - 1) << 6) | ((rdma_ah_get_sl(ah) & 0xf) << 2);
}
return 0;
@@ -1768,11 +1768,13 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp,
u16 vlan = 0xffff;
u8 smac[ETH_ALEN];
int status = 0;
- int is_eth = rdma_cap_eth_ah(&dev->ib_dev, port_num) &&
- attr->ah_attr.ah_flags & IB_AH_GRH;
+ int is_eth =
+ rdma_cap_eth_ah(&dev->ib_dev, port_num) &&
+ rdma_ah_get_ah_flags(&attr->ah_attr) & IB_AH_GRH;
- if (is_eth && attr->ah_attr.ah_flags & IB_AH_GRH) {
- int index = attr->ah_attr.grh.sgid_index;
+ if (is_eth) {
+ int index =
+ rdma_ah_read_grh(&attr->ah_attr)->sgid_index;
status = ib_get_cached_gid(ibqp->device, port_num,
index, &gid, &gid_attr);
@@ -3396,39 +3398,40 @@ static int to_ib_qp_access_flags(int mlx4_flags)
}
static void to_rdma_ah_attr(struct mlx4_ib_dev *ibdev,
- struct rdma_ah_attr *ib_ah_attr,
+ struct rdma_ah_attr *ah_attr,
struct mlx4_qp_path *path)
{
struct mlx4_dev *dev = ibdev->dev;
int is_eth;
+ u8 port_num = path->sched_queue & 0x40 ? 2 : 1;
- memset(ib_ah_attr, 0, sizeof *ib_ah_attr);
- ib_ah_attr->port_num = path->sched_queue & 0x40 ? 2 : 1;
+ memset(ah_attr, 0, sizeof(*ah_attr));
+ rdma_ah_set_port_num(ah_attr, port_num);
- if (ib_ah_attr->port_num == 0 || ib_ah_attr->port_num > dev->caps.num_ports)
+ if (port_num == 0 || port_num > dev->caps.num_ports)
return;
- is_eth = rdma_port_get_link_layer(&ibdev->ib_dev, ib_ah_attr->port_num) ==
- IB_LINK_LAYER_ETHERNET;
+ is_eth = rdma_port_get_link_layer(&ibdev->ib_dev,
+ rdma_ah_get_port_num(ah_attr)) ==
+ IB_LINK_LAYER_ETHERNET;
if (is_eth)
- ib_ah_attr->sl = ((path->sched_queue >> 3) & 0x7) |
- ((path->sched_queue & 4) << 1);
+ rdma_ah_set_sl(ah_attr, ((path->sched_queue >> 3) & 0x7) |
+ ((path->sched_queue & 4) << 1));
else
- ib_ah_attr->sl = (path->sched_queue >> 2) & 0xf;
+ rdma_ah_set_sl(ah_attr, (path->sched_queue >> 2) & 0xf);
- ib_ah_attr->dlid = be16_to_cpu(path->rlid);
- ib_ah_attr->src_path_bits = path->grh_mylmc & 0x7f;
- ib_ah_attr->static_rate = path->static_rate ? path->static_rate - 5 : 0;
- ib_ah_attr->ah_flags = (path->grh_mylmc & (1 << 7)) ? IB_AH_GRH : 0;
- if (ib_ah_attr->ah_flags) {
- ib_ah_attr->grh.sgid_index = path->mgid_index;
- ib_ah_attr->grh.hop_limit = path->hop_limit;
- ib_ah_attr->grh.traffic_class =
- (be32_to_cpu(path->tclass_flowlabel) >> 20) & 0xff;
- ib_ah_attr->grh.flow_label =
- be32_to_cpu(path->tclass_flowlabel) & 0xfffff;
- memcpy(ib_ah_attr->grh.dgid.raw,
- path->rgid, sizeof ib_ah_attr->grh.dgid.raw);
+ rdma_ah_set_dlid(ah_attr, be16_to_cpu(path->rlid));
+ rdma_ah_set_path_bits(ah_attr, path->grh_mylmc & 0x7f);
+ rdma_ah_set_static_rate(ah_attr,
+ path->static_rate ? path->static_rate - 5 : 0);
+ if (path->grh_mylmc & (1 << 7)) {
+ rdma_ah_set_grh(ah_attr, NULL,
+ be32_to_cpu(path->tclass_flowlabel) & 0xfffff,
+ path->mgid_index,
+ path->hop_limit,
+ (be32_to_cpu(path->tclass_flowlabel)
+ >> 20) & 0xff);
+ rdma_ah_set_dgid_raw(ah_attr, path->rgid);
}
}
@@ -3472,7 +3475,8 @@ int mlx4_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr
to_rdma_ah_attr(dev, &qp_attr->ah_attr, &context.pri_path);
to_rdma_ah_attr(dev, &qp_attr->alt_ah_attr, &context.alt_path);
qp_attr->alt_pkey_index = context.alt_path.pkey_index & 0x7f;
- qp_attr->alt_port_num = qp_attr->alt_ah_attr.port_num;
+ qp_attr->alt_port_num =
+ rdma_ah_get_port_num(&qp_attr->alt_ah_attr);
}
qp_attr->pkey_index = context.pri_path.pkey_index & 0x7f;
diff --git a/drivers/infiniband/hw/mlx5/ah.c b/drivers/infiniband/hw/mlx5/ah.c
index 47529abb9223..5455f3f6cd77 100644
--- a/drivers/infiniband/hw/mlx5/ah.c
+++ b/drivers/infiniband/hw/mlx5/ah.c
@@ -37,28 +37,30 @@ static struct ib_ah *create_ib_ah(struct mlx5_ib_dev *dev,
struct rdma_ah_attr *ah_attr,
enum rdma_link_layer ll)
{
- if (ah_attr->ah_flags & IB_AH_GRH) {
- memcpy(ah->av.rgid, &ah_attr->grh.dgid, 16);
- ah->av.grh_gid_fl = cpu_to_be32(ah_attr->grh.flow_label |
+ if (rdma_ah_get_ah_flags(ah_attr) & IB_AH_GRH) {
+ const struct ib_global_route *grh = rdma_ah_read_grh(ah_attr);
+
+ memcpy(ah->av.rgid, &grh->dgid, 16);
+ ah->av.grh_gid_fl = cpu_to_be32(grh->flow_label |
(1 << 30) |
- ah_attr->grh.sgid_index << 20);
- ah->av.hop_limit = ah_attr->grh.hop_limit;
- ah->av.tclass = ah_attr->grh.traffic_class;
+ grh->sgid_index << 20);
+ ah->av.hop_limit = grh->hop_limit;
+ ah->av.tclass = grh->traffic_class;
}
- ah->av.stat_rate_sl = (ah_attr->static_rate << 4);
+ ah->av.stat_rate_sl = (rdma_ah_get_static_rate(ah_attr) << 4);
if (ll == IB_LINK_LAYER_ETHERNET) {
memcpy(ah->av.rmac, ah_attr->dmac, sizeof(ah_attr->dmac));
ah->av.udp_sport =
- mlx5_get_roce_udp_sport(dev,
- ah_attr->port_num,
- ah_attr->grh.sgid_index);
- ah->av.stat_rate_sl |= (ah_attr->sl & 0x7) << 1;
+ mlx5_get_roce_udp_sport(dev,
+ rdma_ah_get_port_num(ah_attr),
+ rdma_ah_read_grh(ah_attr)->sgid_index);
+ ah->av.stat_rate_sl |= (rdma_ah_get_sl(ah_attr) & 0x7) << 1;
} else {
- ah->av.rlid = cpu_to_be16(ah_attr->dlid);
- ah->av.fl_mlid = ah_attr->src_path_bits & 0x7f;
- ah->av.stat_rate_sl |= (ah_attr->sl & 0xf);
+ ah->av.rlid = cpu_to_be16(rdma_ah_get_dlid(ah_attr));
+ ah->av.fl_mlid = rdma_ah_get_path_bits(ah_attr) & 0x7f;
+ ah->av.stat_rate_sl |= (rdma_ah_get_sl(ah_attr) & 0xf);
}
return &ah->ibah;
@@ -72,9 +74,11 @@ struct ib_ah *mlx5_ib_create_ah(struct ib_pd *pd, struct rdma_ah_attr *ah_attr,
struct mlx5_ib_dev *dev = to_mdev(pd->device);
enum rdma_link_layer ll;
- ll = pd->device->get_link_layer(pd->device, ah_attr->port_num);
+ ll = pd->device->get_link_layer(pd->device,
+ rdma_ah_get_port_num(ah_attr));
- if (ll == IB_LINK_LAYER_ETHERNET && !(ah_attr->ah_flags & IB_AH_GRH))
+ if (ll == IB_LINK_LAYER_ETHERNET &&
+ !(rdma_ah_get_ah_flags(ah_attr) & IB_AH_GRH))
return ERR_PTR(-EINVAL);
if (ll == IB_LINK_LAYER_ETHERNET && udata) {
@@ -114,16 +118,16 @@ int mlx5_ib_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr)
tmp = be32_to_cpu(ah->av.grh_gid_fl);
if (tmp & (1 << 30)) {
- ah_attr->ah_flags = IB_AH_GRH;
- ah_attr->grh.sgid_index = (tmp >> 20) & 0xff;
- ah_attr->grh.flow_label = tmp & 0xfffff;
- memcpy(&ah_attr->grh.dgid, ah->av.rgid, 16);
- ah_attr->grh.hop_limit = ah->av.hop_limit;
- ah_attr->grh.traffic_class = ah->av.tclass;
+ rdma_ah_set_grh(ah_attr, NULL,
+ tmp & 0xfffff,
+ (tmp >> 20) & 0xff,
+ ah->av.hop_limit,
+ ah->av.tclass);
+ rdma_ah_set_dgid_raw(ah_attr, ah->av.rgid);
}
- ah_attr->dlid = be16_to_cpu(ah->av.rlid);
- ah_attr->static_rate = ah->av.stat_rate_sl >> 4;
- ah_attr->sl = ah->av.stat_rate_sl & 0xf;
+ rdma_ah_set_dlid(ah_attr, be16_to_cpu(ah->av.rlid));
+ rdma_ah_set_static_rate(ah_attr, ah->av.stat_rate_sl >> 4);
+ rdma_ah_set_sl(ah_attr, ah->av.stat_rate_sl & 0xf);
return 0;
}
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
index 22c6739d9311..21acb30c3e9a 100644
--- a/drivers/infiniband/hw/mlx5/qp.c
+++ b/drivers/infiniband/hw/mlx5/qp.c
@@ -2211,58 +2211,60 @@ static int mlx5_set_path(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
u32 path_flags, const struct ib_qp_attr *attr,
bool alt)
{
+ const struct ib_global_route *grh = rdma_ah_read_grh(ah);
enum rdma_link_layer ll = rdma_port_get_link_layer(&dev->ib_dev, port);
int err;
enum ib_gid_type gid_type;
+ u8 ah_flags = rdma_ah_get_ah_flags(ah);
+ u8 sl = rdma_ah_get_sl(ah);
if (attr_mask & IB_QP_PKEY_INDEX)
path->pkey_index = cpu_to_be16(alt ? attr->alt_pkey_index :
attr->pkey_index);
- if (ah->ah_flags & IB_AH_GRH) {
- if (ah->grh.sgid_index >=
+ if (ah_flags & IB_AH_GRH) {
+ if (grh->sgid_index >=
dev->mdev->port_caps[port - 1].gid_table_len) {
pr_err("sgid_index (%u) too large. max is %d\n",
- ah->grh.sgid_index,
+ grh->sgid_index,
dev->mdev->port_caps[port - 1].gid_table_len);
return -EINVAL;
}
}
-
if (ll == IB_LINK_LAYER_ETHERNET) {
- if (!(ah->ah_flags & IB_AH_GRH))
+ if (!(ah_flags & IB_AH_GRH))
return -EINVAL;
- err = mlx5_get_roce_gid_type(dev, port, ah->grh.sgid_index,
+ err = mlx5_get_roce_gid_type(dev, port, grh->sgid_index,
&gid_type);
if (err)
return err;
memcpy(path->rmac, ah->dmac, sizeof(ah->dmac));
path->udp_sport = mlx5_get_roce_udp_sport(dev, port,
- ah->grh.sgid_index);
- path->dci_cfi_prio_sl = (ah->sl & 0x7) << 4;
+ grh->sgid_index);
+ path->dci_cfi_prio_sl = (sl & 0x7) << 4;
if (gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP)
- path->ecn_dscp = (ah->grh.traffic_class >> 2) & 0x3f;
+ path->ecn_dscp = (grh->traffic_class >> 2) & 0x3f;
} else {
path->fl_free_ar = (path_flags & MLX5_PATH_FLAG_FL) ? 0x80 : 0;
path->fl_free_ar |=
(path_flags & MLX5_PATH_FLAG_FREE_AR) ? 0x40 : 0;
- path->rlid = cpu_to_be16(ah->dlid);
- path->grh_mlid = ah->src_path_bits & 0x7f;
- if (ah->ah_flags & IB_AH_GRH)
+ path->rlid = cpu_to_be16(rdma_ah_get_dlid(ah));
+ path->grh_mlid = rdma_ah_get_path_bits(ah) & 0x7f;
+ if (ah_flags & IB_AH_GRH)
path->grh_mlid |= 1 << 7;
- path->dci_cfi_prio_sl = ah->sl & 0xf;
+ path->dci_cfi_prio_sl = sl & 0xf;
}
- if (ah->ah_flags & IB_AH_GRH) {
- path->mgid_index = ah->grh.sgid_index;
- path->hop_limit = ah->grh.hop_limit;
+ if (ah_flags & IB_AH_GRH) {
+ path->mgid_index = grh->sgid_index;
+ path->hop_limit = grh->hop_limit;
path->tclass_flowlabel =
- cpu_to_be32((ah->grh.traffic_class << 20) |
- (ah->grh.flow_label));
- memcpy(path->rgid, ah->grh.dgid.raw, 16);
+ cpu_to_be32((grh->traffic_class << 20) |
+ (grh->flow_label));
+ memcpy(path->rgid, grh->dgid.raw, 16);
}
- err = ib_rate_to_mlx5(dev, ah->static_rate);
+ err = ib_rate_to_mlx5(dev, rdma_ah_get_static_rate(ah));
if (err < 0)
return err;
path->static_rate = err;
@@ -2274,7 +2276,7 @@ static int mlx5_set_path(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
if ((qp->ibqp.qp_type == IB_QPT_RAW_PACKET) && qp->sq.wqe_cnt)
return modify_raw_packet_eth_prio(dev->mdev,
&qp->raw_packet_qp.sq,
- ah->sl & 0xf);
+ sl & 0xf);
return 0;
}
@@ -4250,33 +4252,34 @@ static int to_ib_qp_access_flags(int mlx5_flags)
}
static void to_rdma_ah_attr(struct mlx5_ib_dev *ibdev,
- struct rdma_ah_attr *ib_ah_attr,
+ struct rdma_ah_attr *ah_attr,
struct mlx5_qp_path *path)
{
struct mlx5_core_dev *dev = ibdev->mdev;
- memset(ib_ah_attr, 0, sizeof(*ib_ah_attr));
- ib_ah_attr->port_num = path->port;
+ memset(ah_attr, 0, sizeof(*ah_attr));
- if (ib_ah_attr->port_num == 0 ||
- ib_ah_attr->port_num > MLX5_CAP_GEN(dev, num_ports))
+ rdma_ah_set_port_num(ah_attr, path->port);
+ if (rdma_ah_get_port_num(ah_attr) == 0 ||
+ rdma_ah_get_port_num(ah_attr) > MLX5_CAP_GEN(dev, num_ports))
return;
- ib_ah_attr->sl = path->dci_cfi_prio_sl & 0xf;
+ rdma_ah_set_port_num(ah_attr, path->port);
+ rdma_ah_set_sl(ah_attr, path->dci_cfi_prio_sl & 0xf);
+
+ rdma_ah_set_dlid(ah_attr, be16_to_cpu(path->rlid));
+ rdma_ah_set_path_bits(ah_attr, path->grh_mlid & 0x7f);
+ rdma_ah_set_static_rate(ah_attr,
+ path->static_rate ? path->static_rate - 5 : 0);
+ if (path->grh_mlid & (1 << 7)) {
+ u32 tc_fl = be32_to_cpu(path->tclass_flowlabel);
- ib_ah_attr->dlid = be16_to_cpu(path->rlid);
- ib_ah_attr->src_path_bits = path->grh_mlid & 0x7f;
- ib_ah_attr->static_rate = path->static_rate ? path->static_rate - 5 : 0;
- ib_ah_attr->ah_flags = (path->grh_mlid & (1 << 7)) ? IB_AH_GRH : 0;
- if (ib_ah_attr->ah_flags) {
- ib_ah_attr->grh.sgid_index = path->mgid_index;
- ib_ah_attr->grh.hop_limit = path->hop_limit;
- ib_ah_attr->grh.traffic_class =
- (be32_to_cpu(path->tclass_flowlabel) >> 20) & 0xff;
- ib_ah_attr->grh.flow_label =
- be32_to_cpu(path->tclass_flowlabel) & 0xfffff;
- memcpy(ib_ah_attr->grh.dgid.raw,
- path->rgid, sizeof(ib_ah_attr->grh.dgid.raw));
+ rdma_ah_set_grh(ah_attr, NULL,
+ tc_fl & 0xfffff,
+ path->mgid_index,
+ path->hop_limit,
+ (tc_fl >> 20) & 0xff);
+ rdma_ah_set_dgid_raw(ah_attr, path->rgid);
}
}
@@ -4445,7 +4448,8 @@ static int query_qp_attr(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
to_rdma_ah_attr(dev, &qp_attr->alt_ah_attr, &context->alt_path);
qp_attr->alt_pkey_index =
be16_to_cpu(context->alt_path.pkey_index);
- qp_attr->alt_port_num = qp_attr->alt_ah_attr.port_num;
+ qp_attr->alt_port_num =
+ rdma_ah_get_port_num(&qp_attr->alt_ah_attr);
}
qp_attr->pkey_index = be16_to_cpu(context->pri_path.pkey_index);
diff --git a/drivers/infiniband/hw/mthca/mthca_av.c b/drivers/infiniband/hw/mthca/mthca_av.c
index 0c95668fe7ad..d315f526fc48 100644
--- a/drivers/infiniband/hw/mthca/mthca_av.c
+++ b/drivers/infiniband/hw/mthca/mthca_av.c
@@ -196,21 +196,26 @@ on_hca_fail:
ah->key = pd->ntmr.ibmr.lkey;
- av->port_pd = cpu_to_be32(pd->pd_num | (ah_attr->port_num << 24));
- av->g_slid = ah_attr->src_path_bits;
- av->dlid = cpu_to_be16(ah_attr->dlid);
+ av->port_pd = cpu_to_be32(pd->pd_num |
+ (rdma_ah_get_port_num(ah_attr) << 24));
+ av->g_slid = rdma_ah_get_path_bits(ah_attr);
+ av->dlid = cpu_to_be16(rdma_ah_get_dlid(ah_attr));
av->msg_sr = (3 << 4) | /* 2K message */
- mthca_get_rate(dev, ah_attr->static_rate, ah_attr->port_num);
- av->sl_tclass_flowlabel = cpu_to_be32(ah_attr->sl << 28);
- if (ah_attr->ah_flags & IB_AH_GRH) {
+ mthca_get_rate(dev, rdma_ah_get_static_rate(ah_attr),
+ rdma_ah_get_port_num(ah_attr));
+ av->sl_tclass_flowlabel = cpu_to_be32(rdma_ah_get_sl(ah_attr) << 28);
+ if (rdma_ah_get_ah_flags(ah_attr) & IB_AH_GRH) {
+ const struct ib_global_route *grh = rdma_ah_read_grh(ah_attr);
+
av->g_slid |= 0x80;
- av->gid_index = (ah_attr->port_num - 1) * dev->limits.gid_table_len +
- ah_attr->grh.sgid_index;
- av->hop_limit = ah_attr->grh.hop_limit;
+ av->gid_index = (rdma_ah_get_port_num(ah_attr) - 1) *
+ dev->limits.gid_table_len +
+ grh->sgid_index;
+ av->hop_limit = grh->hop_limit;
av->sl_tclass_flowlabel |=
- cpu_to_be32((ah_attr->grh.traffic_class << 20) |
- ah_attr->grh.flow_label);
- memcpy(av->dgid, ah_attr->grh.dgid.raw, 16);
+ cpu_to_be32((grh->traffic_class << 20) |
+ grh->flow_label);
+ memcpy(av->dgid, grh->dgid.raw, 16);
} else {
/* Arbel workaround -- low byte of GID must be 2 */
av->dgid[3] = cpu_to_be32(2);
@@ -291,29 +296,30 @@ int mthca_ah_query(struct ib_ah *ibah, struct rdma_ah_attr *attr)
{
struct mthca_ah *ah = to_mah(ibah);
struct mthca_dev *dev = to_mdev(ibah->device);
+ u8 port_num = be32_to_cpu(ah->av->port_pd) >> 24;
/* Only implement for MAD and memfree ah for now. */
if (ah->type == MTHCA_AH_ON_HCA)
return -ENOSYS;
memset(attr, 0, sizeof *attr);
- attr->dlid = be16_to_cpu(ah->av->dlid);
- attr->sl = be32_to_cpu(ah->av->sl_tclass_flowlabel) >> 28;
- attr->port_num = be32_to_cpu(ah->av->port_pd) >> 24;
- attr->static_rate = mthca_rate_to_ib(dev, ah->av->msg_sr & 0x7,
- attr->port_num);
- attr->src_path_bits = ah->av->g_slid & 0x7F;
- attr->ah_flags = mthca_ah_grh_present(ah) ? IB_AH_GRH : 0;
-
- if (attr->ah_flags) {
- attr->grh.traffic_class =
- be32_to_cpu(ah->av->sl_tclass_flowlabel) >> 20;
- attr->grh.flow_label =
- be32_to_cpu(ah->av->sl_tclass_flowlabel) & 0xfffff;
- attr->grh.hop_limit = ah->av->hop_limit;
- attr->grh.sgid_index = ah->av->gid_index &
- (dev->limits.gid_table_len - 1);
- memcpy(attr->grh.dgid.raw, ah->av->dgid, 16);
+ rdma_ah_set_dlid(attr, be16_to_cpu(ah->av->dlid));
+ rdma_ah_set_sl(attr, be32_to_cpu(ah->av->sl_tclass_flowlabel) >> 28);
+ rdma_ah_set_port_num(attr, port_num);
+ rdma_ah_set_static_rate(attr,
+ mthca_rate_to_ib(dev, ah->av->msg_sr & 0x7,
+ port_num));
+ rdma_ah_set_path_bits(attr, ah->av->g_slid & 0x7F);
+ if (mthca_ah_grh_present(ah)) {
+ u32 tc_fl = be32_to_cpu(ah->av->sl_tclass_flowlabel);
+
+ rdma_ah_set_grh(attr, NULL,
+ tc_fl & 0xfffff,
+ ah->av->gid_index &
+ (dev->limits.gid_table_len - 1),
+ ah->av->hop_limit,
+ (tc_fl >> 20) & 0xff);
+ rdma_ah_set_dgid_raw(attr, ah->av->dgid);
}
return 0;
diff --git a/drivers/infiniband/hw/mthca/mthca_mad.c b/drivers/infiniband/hw/mthca/mthca_mad.c
index 6f2e448b49a5..45fe1502499b 100644
--- a/drivers/infiniband/hw/mthca/mthca_mad.c
+++ b/drivers/infiniband/hw/mthca/mthca_mad.c
@@ -82,9 +82,9 @@ static void update_sm_ah(struct mthca_dev *dev,
return;
memset(&ah_attr, 0, sizeof ah_attr);
- ah_attr.dlid = lid;
- ah_attr.sl = sl;
- ah_attr.port_num = port_num;
+ rdma_ah_set_dlid(&ah_attr, lid);
+ rdma_ah_set_sl(&ah_attr, sl);
+ rdma_ah_set_port_num(&ah_attr, port_num);
new_ah = rdma_create_ah(dev->send_agent[port_num - 1][0]->qp->pd,
&ah_attr);
diff --git a/drivers/infiniband/hw/mthca/mthca_qp.c b/drivers/infiniband/hw/mthca/mthca_qp.c
index baf65fe5286e..6ef9b6ac8904 100644
--- a/drivers/infiniband/hw/mthca/mthca_qp.c
+++ b/drivers/infiniband/hw/mthca/mthca_qp.c
@@ -394,31 +394,34 @@ static int to_ib_qp_access_flags(int mthca_flags)
}
static void to_rdma_ah_attr(struct mthca_dev *dev,
- struct rdma_ah_attr *ib_ah_attr,
+ struct rdma_ah_attr *ah_attr,
struct mthca_qp_path *path)
{
- memset(ib_ah_attr, 0, sizeof *ib_ah_attr);
- ib_ah_attr->port_num = (be32_to_cpu(path->port_pkey) >> 24) & 0x3;
+ u8 port_num = (be32_to_cpu(path->port_pkey) >> 24) & 0x3;
- if (ib_ah_attr->port_num == 0 || ib_ah_attr->port_num > dev->limits.num_ports)
- return;
+ memset(ah_attr, 0, sizeof(*ah_attr));
- ib_ah_attr->dlid = be16_to_cpu(path->rlid);
- ib_ah_attr->sl = be32_to_cpu(path->sl_tclass_flowlabel) >> 28;
- ib_ah_attr->src_path_bits = path->g_mylmc & 0x7f;
- ib_ah_attr->static_rate = mthca_rate_to_ib(dev,
- path->static_rate & 0xf,
- ib_ah_attr->port_num);
- ib_ah_attr->ah_flags = (path->g_mylmc & (1 << 7)) ? IB_AH_GRH : 0;
- if (ib_ah_attr->ah_flags) {
- ib_ah_attr->grh.sgid_index = path->mgid_index & (dev->limits.gid_table_len - 1);
- ib_ah_attr->grh.hop_limit = path->hop_limit;
- ib_ah_attr->grh.traffic_class =
- (be32_to_cpu(path->sl_tclass_flowlabel) >> 20) & 0xff;
- ib_ah_attr->grh.flow_label =
- be32_to_cpu(path->sl_tclass_flowlabel) & 0xfffff;
- memcpy(ib_ah_attr->grh.dgid.raw,
- path->rgid, sizeof ib_ah_attr->grh.dgid.raw);
+ if (port_num == 0 || port_num > dev->limits.num_ports)
+ return;
+ rdma_ah_set_port_num(ah_attr, port_num);
+
+ rdma_ah_set_dlid(ah_attr, be16_to_cpu(path->rlid));
+ rdma_ah_set_sl(ah_attr, be32_to_cpu(path->sl_tclass_flowlabel) >> 28);
+ rdma_ah_set_path_bits(ah_attr, path->g_mylmc & 0x7f);
+ rdma_ah_set_static_rate(ah_attr,
+ mthca_rate_to_ib(dev,
+ path->static_rate & 0xf,
+ port_num));
+ if (path->g_mylmc & (1 << 7)) {
+ u32 tc_fl = be32_to_cpu(path->sl_tclass_flowlabel);
+
+ rdma_ah_set_grh(ah_attr, NULL,
+ tc_fl & 0xfffff,
+ path->mgid_index &
+ (dev->limits.gid_table_len - 1),
+ path->hop_limit,
+ (tc_fl >> 20) & 0xff);
+ rdma_ah_set_dgid_raw(ah_attr, path->rgid);
}
}
@@ -473,7 +476,8 @@ int mthca_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr_m
to_rdma_ah_attr(dev, &qp_attr->alt_ah_attr, &context->alt_path);
qp_attr->alt_pkey_index =
be32_to_cpu(context->alt_path.port_pkey) & 0x7f;
- qp_attr->alt_port_num = qp_attr->alt_ah_attr.port_num;
+ qp_attr->alt_port_num =
+ rdma_ah_get_port_num(&qp_attr->alt_ah_attr);
}
qp_attr->pkey_index = be32_to_cpu(context->pri_path.port_pkey) & 0x7f;
@@ -516,27 +520,33 @@ out:
static int mthca_path_set(struct mthca_dev *dev, const struct rdma_ah_attr *ah,
struct mthca_qp_path *path, u8 port)
{
- path->g_mylmc = ah->src_path_bits & 0x7f;
- path->rlid = cpu_to_be16(ah->dlid);
- path->static_rate = mthca_get_rate(dev, ah->static_rate, port);
+ path->g_mylmc = rdma_ah_get_path_bits(ah) & 0x7f;
+ path->rlid = cpu_to_be16(rdma_ah_get_dlid(ah));
+ path->static_rate = mthca_get_rate(dev, rdma_ah_get_static_rate(ah),
+ port);
+
+ if (rdma_ah_get_ah_flags(ah) & IB_AH_GRH) {
+ const struct ib_global_route *grh = rdma_ah_read_grh(ah);
- if (ah->ah_flags & IB_AH_GRH) {
- if (ah->grh.sgid_index >= dev->limits.gid_table_len) {
+ if (grh->sgid_index >= dev->limits.gid_table_len) {
mthca_dbg(dev, "sgid_index (%u) too large. max is %d\n",
- ah->grh.sgid_index, dev->limits.gid_table_len-1);
+ grh->sgid_index,
+ dev->limits.gid_table_len - 1);
return -1;
}
path->g_mylmc |= 1 << 7;
- path->mgid_index = ah->grh.sgid_index;
- path->hop_limit = ah->grh.hop_limit;
+ path->mgid_index = grh->sgid_index;
+ path->hop_limit = grh->hop_limit;
path->sl_tclass_flowlabel =
- cpu_to_be32((ah->sl << 28) |
- (ah->grh.traffic_class << 20) |
- (ah->grh.flow_label));
- memcpy(path->rgid, ah->grh.dgid.raw, 16);
- } else
- path->sl_tclass_flowlabel = cpu_to_be32(ah->sl << 28);
+ cpu_to_be32((rdma_ah_get_sl(ah) << 28) |
+ (grh->traffic_class << 20) |
+ (grh->flow_label));
+ memcpy(path->rgid, grh->dgid.raw, 16);
+ } else {
+ path->sl_tclass_flowlabel = cpu_to_be32(rdma_ah_get_sl(ah) <<
+ 28);
+ }
return 0;
}
@@ -681,7 +691,7 @@ static int __mthca_modify_qp(struct ib_qp *ibqp,
}
if (mthca_path_set(dev, &attr->alt_ah_attr, &qp_context->alt_path,
- attr->alt_ah_attr.port_num))
+ rdma_ah_get_port_num(&attr->alt_ah_attr)))
goto out_mailbox;
qp_context->alt_path.port_pkey |= cpu_to_be32(attr->alt_pkey_index |
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma.h b/drivers/infiniband/hw/ocrdma/ocrdma.h
index 49ea7b6cbebe..afcbd2acf835 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma.h
+++ b/drivers/infiniband/hw/ocrdma/ocrdma.h
@@ -531,7 +531,7 @@ static inline int ocrdma_resolve_dmac(struct ocrdma_dev *dev,
{
struct in6_addr in6;
- memcpy(&in6, ah_attr->grh.dgid.raw, sizeof(in6));
+ memcpy(&in6, rdma_ah_read_grh(ah_attr)->dgid.raw, sizeof(in6));
if (rdma_is_multicast_addr(&in6))
rdma_get_mcast_mac(&in6, mac_addr);
else if (rdma_link_local_addr(&in6))
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_ah.c b/drivers/infiniband/hw/ocrdma/ocrdma_ah.c
index 71723db83e9b..97a829d98ffe 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_ah.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_ah.c
@@ -81,6 +81,7 @@ static inline int set_av_attr(struct ocrdma_dev *dev, struct ocrdma_ah *ah,
u16 proto_num = 0;
u8 nxthdr = 0x11;
struct iphdr ipv4;
+ const struct ib_global_route *ib_grh;
union {
struct sockaddr _sockaddr;
struct sockaddr_in _sockaddr_in;
@@ -120,32 +121,33 @@ static inline int set_av_attr(struct ocrdma_dev *dev, struct ocrdma_ah *ah,
status = ocrdma_resolve_dmac(dev, attr, &eth.dmac[0]);
if (status)
return status;
- ah->sgid_index = attr->grh.sgid_index;
+ ib_grh = rdma_ah_read_grh(attr);
+ ah->sgid_index = ib_grh->sgid_index;
/* Eth HDR */
memcpy(&ah->av->eth_hdr, &eth, eth_sz);
if (ah->hdr_type == RDMA_NETWORK_IPV4) {
*((__be16 *)&ipv4) = htons((4 << 12) | (5 << 8) |
- attr->grh.traffic_class);
+ ib_grh->traffic_class);
ipv4.id = cpu_to_be16(pdid);
ipv4.frag_off = htons(IP_DF);
ipv4.tot_len = htons(0);
- ipv4.ttl = attr->grh.hop_limit;
+ ipv4.ttl = ib_grh->hop_limit;
ipv4.protocol = nxthdr;
rdma_gid2ip(&sgid_addr._sockaddr, sgid);
ipv4.saddr = sgid_addr._sockaddr_in.sin_addr.s_addr;
- rdma_gid2ip(&dgid_addr._sockaddr, &attr->grh.dgid);
+ rdma_gid2ip(&dgid_addr._sockaddr, &ib_grh->dgid);
ipv4.daddr = dgid_addr._sockaddr_in.sin_addr.s_addr;
memcpy((u8 *)ah->av + eth_sz, &ipv4, sizeof(struct iphdr));
} else {
memcpy(&grh.sgid[0], sgid->raw, sizeof(union ib_gid));
grh.tclass_flow = cpu_to_be32((6 << 28) |
- (attr->grh.traffic_class << 24) |
- attr->grh.flow_label);
- memcpy(&grh.dgid[0], attr->grh.dgid.raw,
- sizeof(attr->grh.dgid.raw));
+ (ib_grh->traffic_class << 24) |
+ ib_grh->flow_label);
+ memcpy(&grh.dgid[0], ib_grh->dgid.raw,
+ sizeof(ib_grh->dgid.raw));
grh.pdid_hoplimit = cpu_to_be32((pdid << 16) |
(nxthdr << 8) |
- attr->grh.hop_limit);
+ ib_grh->hop_limit);
memcpy((u8 *)ah->av + eth_sz, &grh, sizeof(struct ocrdma_grh));
}
if (*isvlan)
@@ -165,11 +167,13 @@ struct ib_ah *ocrdma_create_ah(struct ib_pd *ibpd, struct rdma_ah_attr *attr,
struct ib_gid_attr sgid_attr;
struct ocrdma_pd *pd = get_ocrdma_pd(ibpd);
struct ocrdma_dev *dev = get_ocrdma_dev(ibpd->device);
+ const struct ib_global_route *grh;
union ib_gid sgid;
- if (!(attr->ah_flags & IB_AH_GRH))
+ if (!(rdma_ah_get_ah_flags(attr) & IB_AH_GRH))
return ERR_PTR(-EINVAL);
+ grh = rdma_ah_read_grh(attr);
if (atomic_cmpxchg(&dev->update_sl, 1, 0))
ocrdma_init_service_level(dev);
@@ -181,7 +185,7 @@ struct ib_ah *ocrdma_create_ah(struct ib_pd *ibpd, struct rdma_ah_attr *attr,
if (status)
goto av_err;
- status = ib_get_cached_gid(&dev->ibdev, 1, attr->grh.sgid_index, &sgid,
+ status = ib_get_cached_gid(&dev->ibdev, 1, grh->sgid_index, &sgid,
&sgid_attr);
if (status) {
pr_err("%s(): Failed to query sgid, status = %d\n",
@@ -197,10 +201,11 @@ struct ib_ah *ocrdma_create_ah(struct ib_pd *ibpd, struct rdma_ah_attr *attr,
ah->hdr_type = ib_gid_to_network_type(sgid_attr.gid_type, &sgid);
if ((pd->uctx) &&
- (!rdma_is_multicast_addr((struct in6_addr *)attr->grh.dgid.raw)) &&
- (!rdma_link_local_addr((struct in6_addr *)attr->grh.dgid.raw))) {
- status = rdma_addr_find_l2_eth_by_grh(&sgid, &attr->grh.dgid,
- attr->dmac, &vlan_tag,
+ (!rdma_is_multicast_addr((struct in6_addr *)grh->dgid.raw)) &&
+ (!rdma_link_local_addr((struct in6_addr *)grh->dgid.raw))) {
+ status = rdma_addr_find_l2_eth_by_grh(&sgid, &grh->dgid,
+ attr->dmac,
+ &vlan_tag,
&sgid_attr.ndev->ifindex,
NULL);
if (status) {
@@ -216,7 +221,7 @@ struct ib_ah *ocrdma_create_ah(struct ib_pd *ibpd, struct rdma_ah_attr *attr,
/* if pd is for the user process, pass the ah_id to user space */
if ((pd->uctx) && (pd->uctx->ah_tbl.va)) {
- ahid_addr = pd->uctx->ah_tbl.va + attr->dlid;
+ ahid_addr = pd->uctx->ah_tbl.va + rdma_ah_get_dlid(attr);
*ahid_addr = 0;
*ahid_addr |= ah->id & OCRDMA_AH_ID_MASK;
if (ocrdma_is_udp_encap_supported(dev)) {
@@ -253,21 +258,22 @@ int ocrdma_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *attr)
struct ocrdma_ah *ah = get_ocrdma_ah(ibah);
struct ocrdma_av *av = ah->av;
struct ocrdma_grh *grh;
- attr->ah_flags |= IB_AH_GRH;
+
if (ah->av->valid & OCRDMA_AV_VALID) {
grh = (struct ocrdma_grh *)((u8 *)ah->av +
sizeof(struct ocrdma_eth_vlan));
- attr->sl = be16_to_cpu(av->eth_hdr.vlan_tag) >> 13;
+ rdma_ah_set_sl(attr, be16_to_cpu(av->eth_hdr.vlan_tag) >> 13);
} else {
grh = (struct ocrdma_grh *)((u8 *)ah->av +
sizeof(struct ocrdma_eth_basic));
- attr->sl = 0;
+ rdma_ah_set_sl(attr, 0);
}
- memcpy(&attr->grh.dgid.raw[0], &grh->dgid[0], sizeof(grh->dgid));
- attr->grh.sgid_index = ah->sgid_index;
- attr->grh.hop_limit = be32_to_cpu(grh->pdid_hoplimit) & 0xff;
- attr->grh.traffic_class = be32_to_cpu(grh->tclass_flow) >> 24;
- attr->grh.flow_label = be32_to_cpu(grh->tclass_flow) & 0x00ffffffff;
+ rdma_ah_set_grh(attr, NULL,
+ be32_to_cpu(grh->tclass_flow) & 0xffffffff,
+ ah->sgid_index,
+ be32_to_cpu(grh->pdid_hoplimit) & 0xff,
+ be32_to_cpu(grh->tclass_flow) >> 24);
+ rdma_ah_set_dgid_raw(attr, &grh->dgid[0]);
return 0;
}
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_hw.c b/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
index 0ca52fa920a5..dcb5942f9fb5 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
@@ -2510,25 +2510,28 @@ static int ocrdma_set_av_params(struct ocrdma_qp *qp,
struct sockaddr_in6 _sockaddr_in6;
} sgid_addr, dgid_addr;
struct ocrdma_dev *dev = get_ocrdma_dev(qp->ibqp.device);
+ const struct ib_global_route *grh;
- if ((ah_attr->ah_flags & IB_AH_GRH) == 0)
+ if ((rdma_ah_get_ah_flags(ah_attr) & IB_AH_GRH) == 0)
return -EINVAL;
+ grh = rdma_ah_read_grh(ah_attr);
if (atomic_cmpxchg(&dev->update_sl, 1, 0))
ocrdma_init_service_level(dev);
cmd->params.tclass_sq_psn |=
- (ah_attr->grh.traffic_class << OCRDMA_QP_PARAMS_TCLASS_SHIFT);
+ (grh->traffic_class << OCRDMA_QP_PARAMS_TCLASS_SHIFT);
cmd->params.rnt_rc_sl_fl |=
- (ah_attr->grh.flow_label & OCRDMA_QP_PARAMS_FLOW_LABEL_MASK);
- cmd->params.rnt_rc_sl_fl |= (ah_attr->sl << OCRDMA_QP_PARAMS_SL_SHIFT);
+ (grh->flow_label & OCRDMA_QP_PARAMS_FLOW_LABEL_MASK);
+ cmd->params.rnt_rc_sl_fl |= (rdma_ah_get_sl(ah_attr) <<
+ OCRDMA_QP_PARAMS_SL_SHIFT);
cmd->params.hop_lmt_rq_psn |=
- (ah_attr->grh.hop_limit << OCRDMA_QP_PARAMS_HOP_LMT_SHIFT);
+ (grh->hop_limit << OCRDMA_QP_PARAMS_HOP_LMT_SHIFT);
cmd->flags |= OCRDMA_QP_PARA_FLOW_LBL_VALID;
/* GIDs */
- memcpy(&cmd->params.dgid[0], &ah_attr->grh.dgid.raw[0],
+ memcpy(&cmd->params.dgid[0], &grh->dgid.raw[0],
sizeof(cmd->params.dgid));
- status = ib_get_cached_gid(&dev->ibdev, 1, ah_attr->grh.sgid_index,
+ status = ib_get_cached_gid(&dev->ibdev, 1, grh->sgid_index,
&sgid, &sgid_attr);
if (!status && sgid_attr.ndev) {
vlan_id = rdma_vlan_dev_vlan_id(sgid_attr.ndev);
@@ -2540,7 +2543,7 @@ static int ocrdma_set_av_params(struct ocrdma_qp *qp,
if (!memcmp(&sgid, &zgid, sizeof(zgid)))
return -EINVAL;
- qp->sgid_idx = ah_attr->grh.sgid_index;
+ qp->sgid_idx = grh->sgid_index;
memcpy(&cmd->params.sgid[0], &sgid.raw[0], sizeof(cmd->params.sgid));
status = ocrdma_resolve_dmac(dev, ah_attr, &mac_addr[0]);
if (status)
@@ -2551,7 +2554,7 @@ static int ocrdma_set_av_params(struct ocrdma_qp *qp,
hdr_type = ib_gid_to_network_type(sgid_attr.gid_type, &sgid);
if (hdr_type == RDMA_NETWORK_IPV4) {
rdma_gid2ip(&sgid_addr._sockaddr, &sgid);
- rdma_gid2ip(&dgid_addr._sockaddr, &ah_attr->grh.dgid);
+ rdma_gid2ip(&dgid_addr._sockaddr, &grh->dgid);
memcpy(&cmd->params.dgid[0],
&dgid_addr._sockaddr_in.sin_addr.s_addr, 4);
memcpy(&cmd->params.sgid[0],
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
index c57e387b55a2..caec48c6e1ec 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
@@ -1598,23 +1598,23 @@ int ocrdma_query_qp(struct ib_qp *ibqp,
qp_attr->cap.max_recv_sge = qp->rq.max_sges;
qp_attr->cap.max_inline_data = qp->max_inline_data;
qp_init_attr->cap = qp_attr->cap;
- memcpy(&qp_attr->ah_attr.grh.dgid, &params.dgid[0],
- sizeof(params.dgid));
- qp_attr->ah_attr.grh.flow_label = params.rnt_rc_sl_fl &
- OCRDMA_QP_PARAMS_FLOW_LABEL_MASK;
- qp_attr->ah_attr.grh.sgid_index = qp->sgid_idx;
- qp_attr->ah_attr.grh.hop_limit = (params.hop_lmt_rq_psn &
- OCRDMA_QP_PARAMS_HOP_LMT_MASK) >>
- OCRDMA_QP_PARAMS_HOP_LMT_SHIFT;
- qp_attr->ah_attr.grh.traffic_class = (params.tclass_sq_psn &
- OCRDMA_QP_PARAMS_TCLASS_MASK) >>
- OCRDMA_QP_PARAMS_TCLASS_SHIFT;
-
- qp_attr->ah_attr.ah_flags = IB_AH_GRH;
- qp_attr->ah_attr.port_num = 1;
- qp_attr->ah_attr.sl = (params.rnt_rc_sl_fl &
- OCRDMA_QP_PARAMS_SL_MASK) >>
- OCRDMA_QP_PARAMS_SL_SHIFT;
+
+ rdma_ah_set_grh(&qp_attr->ah_attr, NULL,
+ params.rnt_rc_sl_fl &
+ OCRDMA_QP_PARAMS_FLOW_LABEL_MASK,
+ qp->sgid_idx,
+ (params.hop_lmt_rq_psn &
+ OCRDMA_QP_PARAMS_HOP_LMT_MASK) >>
+ OCRDMA_QP_PARAMS_HOP_LMT_SHIFT,
+ (params.tclass_sq_psn &
+ OCRDMA_QP_PARAMS_TCLASS_MASK) >>
+ OCRDMA_QP_PARAMS_TCLASS_SHIFT);
+ rdma_ah_set_dgid_raw(&qp_attr->ah_attr, &params.dgid[0]);
+
+ rdma_ah_set_port_num(&qp_attr->ah_attr, 1);
+ rdma_ah_set_sl(&qp_attr->ah_attr, (params.rnt_rc_sl_fl &
+ OCRDMA_QP_PARAMS_SL_MASK) >>
+ OCRDMA_QP_PARAMS_SL_SHIFT);
qp_attr->timeout = (params.ack_to_rnr_rtc_dest_qpn &
OCRDMA_QP_PARAMS_ACK_TIMEOUT_MASK) >>
OCRDMA_QP_PARAMS_ACK_TIMEOUT_SHIFT;
@@ -1627,8 +1627,8 @@ int ocrdma_query_qp(struct ib_qp *ibqp,
qp_attr->min_rnr_timer = 0;
qp_attr->pkey_index = 0;
qp_attr->port_num = 1;
- qp_attr->ah_attr.src_path_bits = 0;
- qp_attr->ah_attr.static_rate = 0;
+ rdma_ah_set_path_bits(&qp_attr->ah_attr, 0);
+ rdma_ah_set_static_rate(&qp_attr->ah_attr, 0);
qp_attr->alt_pkey_index = 0;
qp_attr->alt_port_num = 0;
qp_attr->alt_timeout = 0;
diff --git a/drivers/infiniband/hw/qedr/qedr.h b/drivers/infiniband/hw/qedr/qedr.h
index 42df61512e6d..aa08c76a4245 100644
--- a/drivers/infiniband/hw/qedr/qedr.h
+++ b/drivers/infiniband/hw/qedr/qedr.h
@@ -450,15 +450,20 @@ static inline int qedr_get_dmac(struct qedr_dev *dev,
{
union ib_gid zero_sgid = { { 0 } };
struct in6_addr in6;
+ const struct ib_global_route *grh = rdma_ah_read_grh(ah_attr);
+ u8 *dmac;
- if (!memcmp(&ah_attr->grh.dgid, &zero_sgid, sizeof(union ib_gid))) {
+ if (!memcmp(&grh->dgid, &zero_sgid, sizeof(union ib_gid))) {
DP_ERR(dev, "Local port GID not supported\n");
eth_zero_addr(mac_addr);
return -EINVAL;
}
- memcpy(&in6, ah_attr->grh.dgid.raw, sizeof(in6));
- ether_addr_copy(mac_addr, ah_attr->dmac);
+ memcpy(&in6, grh->dgid.raw, sizeof(in6));
+ dmac = rdma_ah_retrieve_dmac(ah_attr);
+ if (!dmac)
+ return -EINVAL;
+ ether_addr_copy(mac_addr, dmac);
return 0;
}
diff --git a/drivers/infiniband/hw/qedr/qedr_cm.c b/drivers/infiniband/hw/qedr/qedr_cm.c
index 118966e8bc3b..7b151d4ae9ac 100644
--- a/drivers/infiniband/hw/qedr/qedr_cm.c
+++ b/drivers/infiniband/hw/qedr/qedr_cm.c
@@ -244,7 +244,7 @@ static inline int qedr_gsi_build_header(struct qedr_dev *dev,
{
bool has_vlan = false, has_grh_ipv6 = true;
struct rdma_ah_attr *ah_attr = &get_qedr_ah(ud_wr(swr)->ah)->attr;
- struct ib_global_route *grh = &ah_attr->grh;
+ const struct ib_global_route *grh = rdma_ah_read_grh(ah_attr);
union ib_gid sgid;
int send_size = 0;
u16 vlan_id = 0;
@@ -260,12 +260,13 @@ static inline int qedr_gsi_build_header(struct qedr_dev *dev,
for (i = 0; i < swr->num_sge; ++i)
send_size += swr->sg_list[i].length;
- rc = ib_get_cached_gid(qp->ibqp.device, ah_attr->port_num,
+ rc = ib_get_cached_gid(qp->ibqp.device, rdma_ah_get_port_num(ah_attr),
grh->sgid_index, &sgid, &sgid_attr);
if (rc) {
DP_ERR(dev,
"gsi post send: failed to get cached GID (port=%d, ix=%d)\n",
- ah_attr->port_num, grh->sgid_index);
+ rdma_ah_get_port_num(ah_attr),
+ grh->sgid_index);
return rc;
}
@@ -277,7 +278,7 @@ static inline int qedr_gsi_build_header(struct qedr_dev *dev,
if (!memcmp(&sgid, &zgid, sizeof(sgid))) {
DP_ERR(dev, "gsi post send: GID not found GID index %d\n",
- ah_attr->grh.sgid_index);
+ grh->sgid_index);
return -ENOENT;
}
@@ -341,13 +342,13 @@ static inline int qedr_gsi_build_header(struct qedr_dev *dev,
u32 ipv4_addr;
udh->ip4.protocol = IPPROTO_UDP;
- udh->ip4.tos = htonl(ah_attr->grh.flow_label);
+ udh->ip4.tos = htonl(grh->flow_label);
udh->ip4.frag_off = htons(IP_DF);
- udh->ip4.ttl = ah_attr->grh.hop_limit;
+ udh->ip4.ttl = grh->hop_limit;
ipv4_addr = qedr_get_ipv4_from_gid(sgid.raw);
udh->ip4.saddr = ipv4_addr;
- ipv4_addr = qedr_get_ipv4_from_gid(ah_attr->grh.dgid.raw);
+ ipv4_addr = qedr_get_ipv4_from_gid(grh->dgid.raw);
udh->ip4.daddr = ipv4_addr;
/* note: checksum is calculated by the device */
}
diff --git a/drivers/infiniband/hw/qedr/qedr_cm.h b/drivers/infiniband/hw/qedr/qedr_cm.h
index 78efb1b056d1..a55916323ea9 100644
--- a/drivers/infiniband/hw/qedr/qedr_cm.h
+++ b/drivers/infiniband/hw/qedr/qedr_cm.h
@@ -39,7 +39,7 @@
#define QEDR_ROCE_V2_UDP_SPORT (0000)
-static inline u32 qedr_get_ipv4_from_gid(u8 *gid)
+static inline u32 qedr_get_ipv4_from_gid(const u8 *gid)
{
return *(u32 *)(void *)&gid[12];
}
diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c
index 8ced8ec954c5..e9930d5b0e0b 100644
--- a/drivers/infiniband/hw/qedr/verbs.c
+++ b/drivers/infiniband/hw/qedr/verbs.c
@@ -1084,13 +1084,15 @@ static inline int get_gid_info_from_table(struct ib_qp *ibqp,
{
enum rdma_network_type nw_type;
struct ib_gid_attr gid_attr;
+ const struct ib_global_route *grh = rdma_ah_read_grh(&attr->ah_attr);
union ib_gid gid;
u32 ipv4_addr;
int rc = 0;
int i;
- rc = ib_get_cached_gid(ibqp->device, attr->ah_attr.port_num,
- attr->ah_attr.grh.sgid_index, &gid, &gid_attr);
+ rc = ib_get_cached_gid(ibqp->device,
+ rdma_ah_get_port_num(&attr->ah_attr),
+ grh->sgid_index, &gid, &gid_attr);
if (rc)
return rc;
@@ -1107,7 +1109,7 @@ static inline int get_gid_info_from_table(struct ib_qp *ibqp,
memcpy(&qp_params->sgid.bytes[0], &gid.raw[0],
sizeof(qp_params->sgid));
memcpy(&qp_params->dgid.bytes[0],
- &attr->ah_attr.grh.dgid,
+ &grh->dgid,
sizeof(qp_params->dgid));
qp_params->roce_mode = ROCE_V2_IPV6;
SET_FIELD(qp_params->modify_flags,
@@ -1117,7 +1119,7 @@ static inline int get_gid_info_from_table(struct ib_qp *ibqp,
memcpy(&qp_params->sgid.bytes[0], &gid.raw[0],
sizeof(qp_params->sgid));
memcpy(&qp_params->dgid.bytes[0],
- &attr->ah_attr.grh.dgid,
+ &grh->dgid,
sizeof(qp_params->dgid));
qp_params->roce_mode = ROCE_V1;
break;
@@ -1127,7 +1129,7 @@ static inline int get_gid_info_from_table(struct ib_qp *ibqp,
ipv4_addr = qedr_get_ipv4_from_gid(gid.raw);
qp_params->sgid.ipv4_addr = ipv4_addr;
ipv4_addr =
- qedr_get_ipv4_from_gid(attr->ah_attr.grh.dgid.raw);
+ qedr_get_ipv4_from_gid(grh->dgid.raw);
qp_params->dgid.ipv4_addr = ipv4_addr;
SET_FIELD(qp_params->modify_flags,
QED_ROCE_MODIFY_QP_VALID_ROCE_MODE, 1);
@@ -1749,6 +1751,7 @@ int qedr_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
struct qedr_qp *qp = get_qedr_qp(ibqp);
struct qed_rdma_modify_qp_in_params qp_params = { 0 };
struct qedr_dev *dev = get_qedr_dev(&qp->dev->ibdev);
+ const struct ib_global_route *grh = rdma_ah_read_grh(&attr->ah_attr);
enum ib_qp_state old_qp_state, new_qp_state;
int rc = 0;
@@ -1831,17 +1834,17 @@ int qedr_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
SET_FIELD(qp_params.modify_flags,
QED_ROCE_MODIFY_QP_VALID_ADDRESS_VECTOR, 1);
- qp_params.traffic_class_tos = attr->ah_attr.grh.traffic_class;
- qp_params.flow_label = attr->ah_attr.grh.flow_label;
- qp_params.hop_limit_ttl = attr->ah_attr.grh.hop_limit;
+ qp_params.traffic_class_tos = grh->traffic_class;
+ qp_params.flow_label = grh->flow_label;
+ qp_params.hop_limit_ttl = grh->hop_limit;
- qp->sgid_idx = attr->ah_attr.grh.sgid_index;
+ qp->sgid_idx = grh->sgid_index;
rc = get_gid_info_from_table(ibqp, attr, attr_mask, &qp_params);
if (rc) {
DP_ERR(dev,
"modify qp: problems with GID index %d (rc=%d)\n",
- attr->ah_attr.grh.sgid_index, rc);
+ grh->sgid_index, rc);
return rc;
}
@@ -2026,25 +2029,20 @@ int qedr_query_qp(struct ib_qp *ibqp,
qp_attr->cap.max_inline_data = ROCE_REQ_MAX_INLINE_DATA_SIZE;
qp_init_attr->cap = qp_attr->cap;
- memcpy(&qp_attr->ah_attr.grh.dgid.raw[0], &params.dgid.bytes[0],
- sizeof(qp_attr->ah_attr.grh.dgid.raw));
-
- qp_attr->ah_attr.grh.flow_label = params.flow_label;
- qp_attr->ah_attr.grh.sgid_index = qp->sgid_idx;
- qp_attr->ah_attr.grh.hop_limit = params.hop_limit_ttl;
- qp_attr->ah_attr.grh.traffic_class = params.traffic_class_tos;
-
- qp_attr->ah_attr.ah_flags = IB_AH_GRH;
- qp_attr->ah_attr.port_num = 1;
- qp_attr->ah_attr.sl = 0;
+ rdma_ah_set_grh(&qp_attr->ah_attr, NULL,
+ params.flow_label, qp->sgid_idx,
+ params.hop_limit_ttl, params.traffic_class_tos);
+ rdma_ah_set_dgid_raw(&qp_attr->ah_attr, &params.dgid.bytes[0]);
+ rdma_ah_set_port_num(&qp_attr->ah_attr, 1);
+ rdma_ah_set_sl(&qp_attr->ah_attr, 0);
qp_attr->timeout = params.timeout;
qp_attr->rnr_retry = params.rnr_retry;
qp_attr->retry_cnt = params.retry_cnt;
qp_attr->min_rnr_timer = params.min_rnr_nak_timer;
qp_attr->pkey_index = params.pkey_index;
qp_attr->port_num = 1;
- qp_attr->ah_attr.src_path_bits = 0;
- qp_attr->ah_attr.static_rate = 0;
+ rdma_ah_set_path_bits(&qp_attr->ah_attr, 0);
+ rdma_ah_set_static_rate(&qp_attr->ah_attr, 0);
qp_attr->alt_pkey_index = 0;
qp_attr->alt_port_num = 0;
qp_attr->alt_timeout = 0;
diff --git a/drivers/infiniband/hw/qib/qib_mad.c b/drivers/infiniband/hw/qib/qib_mad.c
index 859361994665..da295e0392ed 100644
--- a/drivers/infiniband/hw/qib/qib_mad.c
+++ b/drivers/infiniband/hw/qib/qib_mad.c
@@ -717,9 +717,10 @@ static int subn_set_portinfo(struct ib_smp *smp, struct ib_device *ibdev,
spin_lock_irqsave(&ibp->rvp.lock, flags);
if (ibp->rvp.sm_ah) {
if (smlid != ibp->rvp.sm_lid)
- ibp->rvp.sm_ah->attr.dlid = smlid;
+ rdma_ah_set_dlid(&ibp->rvp.sm_ah->attr,
+ smlid);
if (msl != ibp->rvp.sm_sl)
- ibp->rvp.sm_ah->attr.sl = msl;
+ rdma_ah_set_sl(&ibp->rvp.sm_ah->attr, msl);
}
spin_unlock_irqrestore(&ibp->rvp.lock, flags);
if (smlid != ibp->rvp.sm_lid)
diff --git a/drivers/infiniband/hw/qib/qib_qp.c b/drivers/infiniband/hw/qib/qib_qp.c
index 2ac0c0f79e74..5984981e7dd4 100644
--- a/drivers/infiniband/hw/qib/qib_qp.c
+++ b/drivers/infiniband/hw/qib/qib_qp.c
@@ -489,7 +489,7 @@ void qib_qp_iter_print(struct seq_file *s, struct qib_qp_iter *iter)
qp->s_last, qp->s_acked, qp->s_cur,
qp->s_tail, qp->s_head, qp->s_size,
qp->remote_qpn,
- qp->remote_ah_attr.dlid);
+ rdma_ah_get_dlid(&qp->remote_ah_attr));
}
#endif
diff --git a/drivers/infiniband/hw/qib/qib_rc.c b/drivers/infiniband/hw/qib/qib_rc.c
index 023498745b8a..fc8b88514da5 100644
--- a/drivers/infiniband/hw/qib/qib_rc.c
+++ b/drivers/infiniband/hw/qib/qib_rc.c
@@ -234,7 +234,7 @@ int qib_make_rc_req(struct rvt_qp *qp, unsigned long *flags)
int delta;
ohdr = &priv->s_hdr->u.oth;
- if (qp->remote_ah_attr.ah_flags & IB_AH_GRH)
+ if (rdma_ah_get_ah_flags(&qp->remote_ah_attr) & IB_AH_GRH)
ohdr = &priv->s_hdr->u.l.oth;
/* Sending responses has higher priority over sending requests. */
@@ -637,9 +637,11 @@ void qib_send_rc_ack(struct rvt_qp *qp)
lrh0 = QIB_LRH_BTH;
/* header size in 32-bit words LRH+BTH+AETH = (8+12+4)/4. */
hwords = 6;
- if (unlikely(qp->remote_ah_attr.ah_flags & IB_AH_GRH)) {
+ if (unlikely(rdma_ah_get_ah_flags(&qp->remote_ah_attr) &
+ IB_AH_GRH)) {
hwords += qib_make_grh(ibp, &hdr.u.l.grh,
- &qp->remote_ah_attr.grh, hwords, 0);
+ rdma_ah_read_grh(&qp->remote_ah_attr),
+ hwords, 0);
ohdr = &hdr.u.l.oth;
lrh0 = QIB_LRH_GRH;
}
@@ -653,12 +655,13 @@ void qib_send_rc_ack(struct rvt_qp *qp)
IB_AETH_CREDIT_SHIFT));
else
ohdr->u.aeth = rvt_compute_aeth(qp);
- lrh0 |= ibp->sl_to_vl[qp->remote_ah_attr.sl] << 12 |
- qp->remote_ah_attr.sl << 4;
+ lrh0 |= ibp->sl_to_vl[rdma_ah_get_sl(&qp->remote_ah_attr)] << 12 |
+ rdma_ah_get_sl(&qp->remote_ah_attr) << 4;
hdr.lrh[0] = cpu_to_be16(lrh0);
- hdr.lrh[1] = cpu_to_be16(qp->remote_ah_attr.dlid);
+ hdr.lrh[1] = cpu_to_be16(rdma_ah_get_dlid(&qp->remote_ah_attr));
hdr.lrh[2] = cpu_to_be16(hwords + SIZE_OF_CRC);
- hdr.lrh[3] = cpu_to_be16(ppd->lid | qp->remote_ah_attr.src_path_bits);
+ hdr.lrh[3] = cpu_to_be16(ppd->lid |
+ rdma_ah_get_path_bits(&qp->remote_ah_attr));
ohdr->bth[0] = cpu_to_be32(bth0);
ohdr->bth[1] = cpu_to_be32(qp->remote_qpn);
ohdr->bth[2] = cpu_to_be32(qp->r_ack_psn & QIB_PSN_MASK);
@@ -1904,8 +1907,8 @@ send_last:
wc.opcode = IB_WC_RECV;
wc.qp = &qp->ibqp;
wc.src_qp = qp->remote_qpn;
- wc.slid = qp->remote_ah_attr.dlid;
- wc.sl = qp->remote_ah_attr.sl;
+ wc.slid = rdma_ah_get_dlid(&qp->remote_ah_attr);
+ wc.sl = rdma_ah_get_sl(&qp->remote_ah_attr);
/* zero fields that are N/A */
wc.vendor_err = 0;
wc.pkey_index = 0;
diff --git a/drivers/infiniband/hw/qib/qib_ruc.c b/drivers/infiniband/hw/qib/qib_ruc.c
index 6e1adf709483..bd09de7c6e56 100644
--- a/drivers/infiniband/hw/qib/qib_ruc.c
+++ b/drivers/infiniband/hw/qib/qib_ruc.c
@@ -195,7 +195,7 @@ void qib_migrate_qp(struct rvt_qp *qp)
qp->s_mig_state = IB_MIG_MIGRATED;
qp->remote_ah_attr = qp->alt_ah_attr;
- qp->port_num = qp->alt_ah_attr.port_num;
+ qp->port_num = rdma_ah_get_port_num(&qp->alt_ah_attr);
qp->s_pkey_index = qp->s_alt_pkey_index;
ev.device = qp->ibqp.device;
@@ -235,18 +235,23 @@ int qib_ruc_check_hdr(struct qib_ibport *ibp, struct ib_header *hdr,
if (qp->s_mig_state == IB_MIG_ARMED && (bth0 & IB_BTH_MIG_REQ)) {
if (!has_grh) {
- if (qp->alt_ah_attr.ah_flags & IB_AH_GRH)
+ if (rdma_ah_get_ah_flags(&qp->alt_ah_attr) &
+ IB_AH_GRH)
goto err;
} else {
- if (!(qp->alt_ah_attr.ah_flags & IB_AH_GRH))
+ const struct ib_global_route *grh;
+
+ if (!(rdma_ah_get_ah_flags(&qp->alt_ah_attr) &
+ IB_AH_GRH))
goto err;
- guid = get_sguid(ibp, qp->alt_ah_attr.grh.sgid_index);
+ grh = rdma_ah_read_grh(&qp->alt_ah_attr);
+ guid = get_sguid(ibp, grh->sgid_index);
if (!gid_ok(&hdr->u.l.grh.dgid,
ibp->rvp.gid_prefix, guid))
goto err;
if (!gid_ok(&hdr->u.l.grh.sgid,
- qp->alt_ah_attr.grh.dgid.global.subnet_prefix,
- qp->alt_ah_attr.grh.dgid.global.interface_id))
+ grh->dgid.global.subnet_prefix,
+ grh->dgid.global.interface_id))
goto err;
}
if (!qib_pkey_ok((u16)bth0,
@@ -259,27 +264,33 @@ int qib_ruc_check_hdr(struct qib_ibport *ibp, struct ib_header *hdr,
goto err;
}
/* Validate the SLID. See Ch. 9.6.1.5 and 17.2.8 */
- if (be16_to_cpu(hdr->lrh[3]) != qp->alt_ah_attr.dlid ||
- ppd_from_ibp(ibp)->port != qp->alt_ah_attr.port_num)
+ if ((be16_to_cpu(hdr->lrh[3]) !=
+ rdma_ah_get_dlid(&qp->alt_ah_attr)) ||
+ ppd_from_ibp(ibp)->port !=
+ rdma_ah_get_port_num(&qp->alt_ah_attr))
goto err;
spin_lock_irqsave(&qp->s_lock, flags);
qib_migrate_qp(qp);
spin_unlock_irqrestore(&qp->s_lock, flags);
} else {
if (!has_grh) {
- if (qp->remote_ah_attr.ah_flags & IB_AH_GRH)
+ if (rdma_ah_get_ah_flags(&qp->remote_ah_attr) &
+ IB_AH_GRH)
goto err;
} else {
- if (!(qp->remote_ah_attr.ah_flags & IB_AH_GRH))
+ const struct ib_global_route *grh;
+
+ if (!(rdma_ah_get_ah_flags(&qp->remote_ah_attr) &
+ IB_AH_GRH))
goto err;
- guid = get_sguid(ibp,
- qp->remote_ah_attr.grh.sgid_index);
+ grh = rdma_ah_read_grh(&qp->remote_ah_attr);
+ guid = get_sguid(ibp, grh->sgid_index);
if (!gid_ok(&hdr->u.l.grh.dgid,
ibp->rvp.gid_prefix, guid))
goto err;
if (!gid_ok(&hdr->u.l.grh.sgid,
- qp->remote_ah_attr.grh.dgid.global.subnet_prefix,
- qp->remote_ah_attr.grh.dgid.global.interface_id))
+ grh->dgid.global.subnet_prefix,
+ grh->dgid.global.interface_id))
goto err;
}
if (!qib_pkey_ok((u16)bth0,
@@ -292,7 +303,8 @@ int qib_ruc_check_hdr(struct qib_ibport *ibp, struct ib_header *hdr,
goto err;
}
/* Validate the SLID. See Ch. 9.6.1.5 */
- if (be16_to_cpu(hdr->lrh[3]) != qp->remote_ah_attr.dlid ||
+ if (be16_to_cpu(hdr->lrh[3]) !=
+ rdma_ah_get_dlid(&qp->remote_ah_attr) ||
ppd_from_ibp(ibp)->port != qp->port_num)
goto err;
if (qp->s_mig_state == IB_MIG_REARM &&
@@ -528,8 +540,8 @@ again:
wc.byte_len = wqe->length;
wc.qp = &qp->ibqp;
wc.src_qp = qp->remote_qpn;
- wc.slid = qp->remote_ah_attr.dlid;
- wc.sl = qp->remote_ah_attr.sl;
+ wc.slid = rdma_ah_get_dlid(&qp->remote_ah_attr);
+ wc.sl = rdma_ah_get_sl(&qp->remote_ah_attr);
wc.port_num = 1;
/* Signal completion event if the solicited bit is set. */
rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc,
@@ -619,7 +631,7 @@ done:
* Return the size of the header in 32 bit words.
*/
u32 qib_make_grh(struct qib_ibport *ibp, struct ib_grh *hdr,
- struct ib_global_route *grh, u32 hwords, u32 nwords)
+ const struct ib_global_route *grh, u32 hwords, u32 nwords)
{
hdr->version_tclass_flow =
cpu_to_be32((IB_GRH_VERSION << IB_GRH_VERSION_SHIFT) |
@@ -652,20 +664,23 @@ void qib_make_ruc_header(struct rvt_qp *qp, struct ib_other_headers *ohdr,
extra_bytes = -qp->s_cur_size & 3;
nwords = (qp->s_cur_size + extra_bytes) >> 2;
lrh0 = QIB_LRH_BTH;
- if (unlikely(qp->remote_ah_attr.ah_flags & IB_AH_GRH)) {
- qp->s_hdrwords += qib_make_grh(ibp, &priv->s_hdr->u.l.grh,
- &qp->remote_ah_attr.grh,
- qp->s_hdrwords, nwords);
+ if (unlikely(rdma_ah_get_ah_flags(&qp->remote_ah_attr) & IB_AH_GRH)) {
+ qp->s_hdrwords +=
+ qib_make_grh(ibp, &priv->s_hdr->u.l.grh,
+ rdma_ah_read_grh(&qp->remote_ah_attr),
+ qp->s_hdrwords, nwords);
lrh0 = QIB_LRH_GRH;
}
- lrh0 |= ibp->sl_to_vl[qp->remote_ah_attr.sl] << 12 |
- qp->remote_ah_attr.sl << 4;
+ lrh0 |= ibp->sl_to_vl[rdma_ah_get_sl(&qp->remote_ah_attr)] << 12 |
+ rdma_ah_get_sl(&qp->remote_ah_attr) << 4;
priv->s_hdr->lrh[0] = cpu_to_be16(lrh0);
- priv->s_hdr->lrh[1] = cpu_to_be16(qp->remote_ah_attr.dlid);
+ priv->s_hdr->lrh[1] =
+ cpu_to_be16(rdma_ah_get_dlid(&qp->remote_ah_attr));
priv->s_hdr->lrh[2] =
cpu_to_be16(qp->s_hdrwords + nwords + SIZE_OF_CRC);
- priv->s_hdr->lrh[3] = cpu_to_be16(ppd_from_ibp(ibp)->lid |
- qp->remote_ah_attr.src_path_bits);
+ priv->s_hdr->lrh[3] =
+ cpu_to_be16(ppd_from_ibp(ibp)->lid |
+ rdma_ah_get_path_bits(&qp->remote_ah_attr));
bth0 |= qib_get_pkey(ibp, qp->s_pkey_index);
bth0 |= extra_bytes << 20;
if (qp->s_mig_state == IB_MIG_MIGRATED)
@@ -703,7 +718,8 @@ void qib_do_send(struct rvt_qp *qp)
if ((qp->ibqp.qp_type == IB_QPT_RC ||
qp->ibqp.qp_type == IB_QPT_UC) &&
- (qp->remote_ah_attr.dlid & ~((1 << ppd->lmc) - 1)) == ppd->lid) {
+ (rdma_ah_get_dlid(&qp->remote_ah_attr) &
+ ~((1 << ppd->lmc) - 1)) == ppd->lid) {
qib_ruc_loopback(qp);
return;
}
diff --git a/drivers/infiniband/hw/qib/qib_uc.c b/drivers/infiniband/hw/qib/qib_uc.c
index b337b60fc40d..498e2202e72c 100644
--- a/drivers/infiniband/hw/qib/qib_uc.c
+++ b/drivers/infiniband/hw/qib/qib_uc.c
@@ -74,7 +74,7 @@ int qib_make_uc_req(struct rvt_qp *qp, unsigned long *flags)
}
ohdr = &priv->s_hdr->u.oth;
- if (qp->remote_ah_attr.ah_flags & IB_AH_GRH)
+ if (rdma_ah_get_ah_flags(&qp->remote_ah_attr) & IB_AH_GRH)
ohdr = &priv->s_hdr->u.l.oth;
/* header size in 32-bit words LRH+BTH = (8+12)/4. */
@@ -394,8 +394,8 @@ last_imm:
wc.status = IB_WC_SUCCESS;
wc.qp = &qp->ibqp;
wc.src_qp = qp->remote_qpn;
- wc.slid = qp->remote_ah_attr.dlid;
- wc.sl = qp->remote_ah_attr.sl;
+ wc.slid = rdma_ah_get_dlid(&qp->remote_ah_attr);
+ wc.sl = rdma_ah_get_sl(&qp->remote_ah_attr);
/* zero fields that are N/A */
wc.vendor_err = 0;
wc.pkey_index = 0;
diff --git a/drivers/infiniband/hw/qib/qib_ud.c b/drivers/infiniband/hw/qib/qib_ud.c
index a99d7e9dba86..341a123ee95c 100644
--- a/drivers/infiniband/hw/qib/qib_ud.c
+++ b/drivers/infiniband/hw/qib/qib_ud.c
@@ -92,13 +92,13 @@ static void qib_ud_loopback(struct rvt_qp *sqp, struct rvt_swqe *swqe)
pkey1 = qib_get_pkey(ibp, sqp->s_pkey_index);
pkey2 = qib_get_pkey(ibp, qp->s_pkey_index);
if (unlikely(!qib_pkey_ok(pkey1, pkey2))) {
- lid = ppd->lid | (ah_attr->src_path_bits &
+ lid = ppd->lid | (rdma_ah_get_path_bits(ah_attr) &
((1 << ppd->lmc) - 1));
qib_bad_pqkey(ibp, IB_NOTICE_TRAP_BAD_PKEY, pkey1,
- ah_attr->sl,
+ rdma_ah_get_sl(ah_attr),
sqp->ibqp.qp_num, qp->ibqp.qp_num,
cpu_to_be16(lid),
- cpu_to_be16(ah_attr->dlid));
+ cpu_to_be16(rdma_ah_get_dlid(ah_attr)));
goto drop;
}
}
@@ -116,13 +116,13 @@ static void qib_ud_loopback(struct rvt_qp *sqp, struct rvt_swqe *swqe)
if (unlikely(qkey != qp->qkey)) {
u16 lid;
- lid = ppd->lid | (ah_attr->src_path_bits &
+ lid = ppd->lid | (rdma_ah_get_path_bits(ah_attr) &
((1 << ppd->lmc) - 1));
qib_bad_pqkey(ibp, IB_NOTICE_TRAP_BAD_QKEY, qkey,
- ah_attr->sl,
+ rdma_ah_get_sl(ah_attr),
sqp->ibqp.qp_num, qp->ibqp.qp_num,
cpu_to_be16(lid),
- cpu_to_be16(ah_attr->dlid));
+ cpu_to_be16(rdma_ah_get_dlid(ah_attr)));
goto drop;
}
}
@@ -168,11 +168,11 @@ static void qib_ud_loopback(struct rvt_qp *sqp, struct rvt_swqe *swqe)
goto bail_unlock;
}
- if (ah_attr->ah_flags & IB_AH_GRH) {
+ if (rdma_ah_get_ah_flags(ah_attr) & IB_AH_GRH) {
struct ib_grh grh;
- struct ib_global_route grd = ah_attr->grh;
+ const struct ib_global_route *grd = rdma_ah_read_grh(ah_attr);
- qib_make_grh(ibp, &grh, &grd, 0, 0);
+ qib_make_grh(ibp, &grh, grd, 0, 0);
qib_copy_sge(&qp->r_sge, &grh,
sizeof(grh), 1);
wc.wc_flags |= IB_WC_GRH;
@@ -220,9 +220,10 @@ static void qib_ud_loopback(struct rvt_qp *sqp, struct rvt_swqe *swqe)
wc.src_qp = sqp->ibqp.qp_num;
wc.pkey_index = qp->ibqp.qp_type == IB_QPT_GSI ?
swqe->ud_wr.pkey_index : 0;
- wc.slid = ppd->lid | (ah_attr->src_path_bits & ((1 << ppd->lmc) - 1));
- wc.sl = ah_attr->sl;
- wc.dlid_path_bits = ah_attr->dlid & ((1 << ppd->lmc) - 1);
+ wc.slid = ppd->lid | (rdma_ah_get_path_bits(ah_attr) &
+ ((1 << ppd->lmc) - 1));
+ wc.sl = rdma_ah_get_sl(ah_attr);
+ wc.dlid_path_bits = rdma_ah_get_dlid(ah_attr) & ((1 << ppd->lmc) - 1);
wc.port_num = qp->port_num;
/* Signal completion event if the solicited bit is set. */
rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc,
@@ -289,14 +290,15 @@ int qib_make_ud_req(struct rvt_qp *qp, unsigned long *flags)
ibp = to_iport(qp->ibqp.device, qp->port_num);
ppd = ppd_from_ibp(ibp);
ah_attr = &ibah_to_rvtah(wqe->ud_wr.ah)->attr;
- if (ah_attr->dlid >= be16_to_cpu(IB_MULTICAST_LID_BASE)) {
- if (ah_attr->dlid != be16_to_cpu(IB_LID_PERMISSIVE))
+ if (rdma_ah_get_dlid(ah_attr) >= be16_to_cpu(IB_MULTICAST_LID_BASE)) {
+ if (rdma_ah_get_dlid(ah_attr) !=
+ be16_to_cpu(IB_LID_PERMISSIVE))
this_cpu_inc(ibp->pmastats->n_multicast_xmit);
else
this_cpu_inc(ibp->pmastats->n_unicast_xmit);
} else {
this_cpu_inc(ibp->pmastats->n_unicast_xmit);
- lid = ah_attr->dlid & ~((1 << ppd->lmc) - 1);
+ lid = rdma_ah_get_dlid(ah_attr) & ~((1 << ppd->lmc) - 1);
if (unlikely(lid == ppd->lid)) {
unsigned long tflags = *flags;
/*
@@ -328,17 +330,17 @@ int qib_make_ud_req(struct rvt_qp *qp, unsigned long *flags)
qp->s_hdrwords = 7;
qp->s_cur_size = wqe->length;
qp->s_cur_sge = &qp->s_sge;
- qp->s_srate = ah_attr->static_rate;
+ qp->s_srate = rdma_ah_get_static_rate(ah_attr);
qp->s_wqe = wqe;
qp->s_sge.sge = wqe->sg_list[0];
qp->s_sge.sg_list = wqe->sg_list + 1;
qp->s_sge.num_sge = wqe->wr.num_sge;
qp->s_sge.total_len = wqe->length;
- if (ah_attr->ah_flags & IB_AH_GRH) {
+ if (rdma_ah_get_ah_flags(ah_attr) & IB_AH_GRH) {
/* Header size in 32-bit words. */
qp->s_hdrwords += qib_make_grh(ibp, &priv->s_hdr->u.l.grh,
- &ah_attr->grh,
+ rdma_ah_read_grh(ah_attr),
qp->s_hdrwords, nwords);
lrh0 = QIB_LRH_GRH;
ohdr = &priv->s_hdr->u.l.oth;
@@ -357,18 +359,20 @@ int qib_make_ud_req(struct rvt_qp *qp, unsigned long *flags)
bth0 = IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE << 24;
} else
bth0 = IB_OPCODE_UD_SEND_ONLY << 24;
- lrh0 |= ah_attr->sl << 4;
+ lrh0 |= rdma_ah_get_sl(ah_attr) << 4;
if (qp->ibqp.qp_type == IB_QPT_SMI)
lrh0 |= 0xF000; /* Set VL (see ch. 13.5.3.1) */
else
- lrh0 |= ibp->sl_to_vl[ah_attr->sl] << 12;
+ lrh0 |= ibp->sl_to_vl[rdma_ah_get_sl(ah_attr)] << 12;
priv->s_hdr->lrh[0] = cpu_to_be16(lrh0);
- priv->s_hdr->lrh[1] = cpu_to_be16(ah_attr->dlid); /* DEST LID */
+ priv->s_hdr->lrh[1] =
+ cpu_to_be16(rdma_ah_get_dlid(ah_attr)); /* DEST LID */
priv->s_hdr->lrh[2] =
cpu_to_be16(qp->s_hdrwords + nwords + SIZE_OF_CRC);
lid = ppd->lid;
if (lid) {
- lid |= ah_attr->src_path_bits & ((1 << ppd->lmc) - 1);
+ lid |= rdma_ah_get_path_bits(ah_attr) &
+ ((1 << ppd->lmc) - 1);
priv->s_hdr->lrh[3] = cpu_to_be16(lid);
} else
priv->s_hdr->lrh[3] = IB_LID_PERMISSIVE;
@@ -382,8 +386,9 @@ int qib_make_ud_req(struct rvt_qp *qp, unsigned long *flags)
/*
* Use the multicast QP if the destination LID is a multicast LID.
*/
- ohdr->bth[1] = ah_attr->dlid >= be16_to_cpu(IB_MULTICAST_LID_BASE) &&
- ah_attr->dlid != be16_to_cpu(IB_LID_PERMISSIVE) ?
+ ohdr->bth[1] = rdma_ah_get_dlid(ah_attr) >=
+ be16_to_cpu(IB_MULTICAST_LID_BASE) &&
+ rdma_ah_get_dlid(ah_attr) != be16_to_cpu(IB_LID_PERMISSIVE) ?
cpu_to_be32(QIB_MULTICAST_QPN) :
cpu_to_be32(wqe->ud_wr.remote_qpn);
ohdr->bth[2] = cpu_to_be32(wqe->psn & QIB_PSN_MASK);
diff --git a/drivers/infiniband/hw/qib/qib_verbs.c b/drivers/infiniband/hw/qib/qib_verbs.c
index de04acab0768..aa28dbd18e6d 100644
--- a/drivers/infiniband/hw/qib/qib_verbs.c
+++ b/drivers/infiniband/hw/qib/qib_verbs.c
@@ -1338,7 +1338,7 @@ static int qib_get_guid_be(struct rvt_dev_info *rdi, struct rvt_ibport *rvp,
int qib_check_ah(struct ib_device *ibdev, struct rdma_ah_attr *ah_attr)
{
- if (ah_attr->sl > 15)
+ if (rdma_ah_get_sl(ah_attr) > 15)
return -EINVAL;
return 0;
@@ -1356,9 +1356,9 @@ static void qib_notify_new_ah(struct ib_device *ibdev,
* done being setup. We can however modify things which we need to set.
*/
- ibp = to_iport(ibdev, ah_attr->port_num);
+ ibp = to_iport(ibdev, rdma_ah_get_port_num(ah_attr));
ppd = ppd_from_ibp(ibp);
- ah->vl = ibp->sl_to_vl[ah->attr.sl];
+ ah->vl = ibp->sl_to_vl[rdma_ah_get_sl(&ah->attr)];
ah->log_pmtu = ilog2(ppd->ibmtu);
}
@@ -1367,10 +1367,12 @@ struct ib_ah *qib_create_qp0_ah(struct qib_ibport *ibp, u16 dlid)
struct rdma_ah_attr attr;
struct ib_ah *ah = ERR_PTR(-EINVAL);
struct rvt_qp *qp0;
+ struct qib_pportdata *ppd = ppd_from_ibp(ibp);
+ u8 port_num = ppd->port;
memset(&attr, 0, sizeof(attr));
- attr.dlid = dlid;
- attr.port_num = ppd_from_ibp(ibp)->port;
+ rdma_ah_set_dlid(&attr, dlid);
+ rdma_ah_set_port_num(&attr, port_num);
rcu_read_lock();
qp0 = rcu_dereference(ibp->rvp.qp[0]);
if (qp0)
diff --git a/drivers/infiniband/hw/qib/qib_verbs.h b/drivers/infiniband/hw/qib/qib_verbs.h
index a72c3099861d..da0db5485ddc 100644
--- a/drivers/infiniband/hw/qib/qib_verbs.h
+++ b/drivers/infiniband/hw/qib/qib_verbs.h
@@ -335,7 +335,7 @@ int qib_ruc_check_hdr(struct qib_ibport *ibp, struct ib_header *hdr,
int has_grh, struct rvt_qp *qp, u32 bth0);
u32 qib_make_grh(struct qib_ibport *ibp, struct ib_grh *hdr,
- struct ib_global_route *grh, u32 hwords, u32 nwords);
+ const struct ib_global_route *grh, u32 hwords, u32 nwords);
void qib_make_ruc_header(struct rvt_qp *qp, struct ib_other_headers *ohdr,
u32 bth0, u32 bth2);
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_misc.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_misc.c
index 8200b03efd8f..1c6e80a008c6 100644
--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_misc.c
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_misc.c
@@ -280,25 +280,25 @@ void ib_global_route_to_pvrdma(struct pvrdma_global_route *dst,
void pvrdma_ah_attr_to_rdma(struct rdma_ah_attr *dst,
const struct pvrdma_ah_attr *src)
{
- pvrdma_global_route_to_ib(&dst->grh, &src->grh);
- dst->dlid = src->dlid;
- dst->sl = src->sl;
- dst->src_path_bits = src->src_path_bits;
- dst->static_rate = src->static_rate;
- dst->ah_flags = src->ah_flags;
- dst->port_num = src->port_num;
- memcpy(&dst->dmac, &src->dmac, sizeof(dst->dmac));
+ pvrdma_global_route_to_ib(rdma_ah_retrieve_grh(dst), &src->grh);
+ rdma_ah_set_dlid(dst, src->dlid);
+ rdma_ah_set_sl(dst, src->sl);
+ rdma_ah_set_path_bits(dst, src->src_path_bits);
+ rdma_ah_set_static_rate(dst, src->static_rate);
+ rdma_ah_set_ah_flags(dst, src->ah_flags);
+ rdma_ah_set_port_num(dst, src->port_num);
+ memcpy(dst->dmac, &src->dmac, ETH_ALEN);
}
void rdma_ah_attr_to_pvrdma(struct pvrdma_ah_attr *dst,
const struct rdma_ah_attr *src)
{
- ib_global_route_to_pvrdma(&dst->grh, &src->grh);
- dst->dlid = src->dlid;
- dst->sl = src->sl;
- dst->src_path_bits = src->src_path_bits;
- dst->static_rate = src->static_rate;
- dst->ah_flags = src->ah_flags;
- dst->port_num = src->port_num;
- memcpy(&dst->dmac, &src->dmac, sizeof(dst->dmac));
+ ib_global_route_to_pvrdma(&dst->grh, rdma_ah_read_grh(src));
+ dst->dlid = rdma_ah_get_dlid(src);
+ dst->sl = rdma_ah_get_sl(src);
+ dst->src_path_bits = rdma_ah_get_path_bits(src);
+ dst->static_rate = rdma_ah_get_static_rate(src);
+ dst->ah_flags = rdma_ah_get_ah_flags(src);
+ dst->port_num = rdma_ah_get_port_num(src);
+ memcpy(&dst->dmac, src->dmac, sizeof(dst->dmac));
}
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.c
index ae5a03bb39d1..6b11e574b21c 100644
--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.c
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.c
@@ -526,14 +526,17 @@ struct ib_ah *pvrdma_create_ah(struct ib_pd *pd, struct rdma_ah_attr *ah_attr,
struct pvrdma_dev *dev = to_vdev(pd->device);
struct pvrdma_ah *ah;
enum rdma_link_layer ll;
+ const struct ib_global_route *grh;
+ u8 port_num = rdma_ah_get_port_num(ah_attr);
- if (!(ah_attr->ah_flags & IB_AH_GRH))
+ if (!(rdma_ah_get_ah_flags(ah_attr) & IB_AH_GRH))
return ERR_PTR(-EINVAL);
-
- ll = rdma_port_get_link_layer(pd->device, ah_attr->port_num);
+ ll = rdma_port_get_link_layer(pd->device,
+ rdma_ah_get_port_num(ah_attr));
+ grh = rdma_ah_read_grh(ah_attr);
if (ll != IB_LINK_LAYER_ETHERNET ||
- rdma_is_multicast_addr((struct in6_addr *)ah_attr->grh.dgid.raw))
+ rdma_is_multicast_addr((struct in6_addr *)grh->dgid.raw))
return ERR_PTR(-EINVAL);
if (!atomic_add_unless(&dev->num_ahs, 1, dev->dsr->caps.max_ah))
@@ -545,15 +548,15 @@ struct ib_ah *pvrdma_create_ah(struct ib_pd *pd, struct rdma_ah_attr *ah_attr,
return ERR_PTR(-ENOMEM);
}
- ah->av.port_pd = to_vpd(pd)->pd_handle | (ah_attr->port_num << 24);
- ah->av.src_path_bits = ah_attr->src_path_bits;
+ ah->av.port_pd = to_vpd(pd)->pd_handle | (port_num << 24);
+ ah->av.src_path_bits = rdma_ah_get_path_bits(ah_attr);
ah->av.src_path_bits |= 0x80;
- ah->av.gid_index = ah_attr->grh.sgid_index;
- ah->av.hop_limit = ah_attr->grh.hop_limit;
- ah->av.sl_tclass_flowlabel = (ah_attr->grh.traffic_class << 20) |
- ah_attr->grh.flow_label;
- memcpy(ah->av.dgid, ah_attr->grh.dgid.raw, 16);
- memcpy(ah->av.dmac, ah_attr->dmac, 6);
+ ah->av.gid_index = grh->sgid_index;
+ ah->av.hop_limit = grh->hop_limit;
+ ah->av.sl_tclass_flowlabel = (grh->traffic_class << 20) |
+ grh->flow_label;
+ memcpy(ah->av.dgid, grh->dgid.raw, 16);
+ memcpy(ah->av.dmac, ah_attr->dmac, ETH_ALEN);
ah->ibah.device = pd->device;
ah->ibah.pd = pd;