aboutsummaryrefslogtreecommitdiff
path: root/drivers/infiniband
diff options
context:
space:
mode:
authorDavid S. Miller2017-02-07 13:44:08 -0500
committerDavid S. Miller2017-02-07 13:44:08 -0500
commit501ec18757abdaba95440dbbd1d5df0611f813c4 (patch)
tree6dc6aae36d3880ce2b13fc3ccd60b092e90bfca5 /drivers/infiniband
parent219189e764be35372c298bee6492fc4c870b6ffd (diff)
parent8ca967ab67671f07ac7daef4f854559bc66799a3 (diff)
Merge tag 'mlx5-updates-2017-01-31' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux
Saeed Mahameed says: ==================== mlx5-updates-2017-01-31 This series includes some updates to mlx5 core and ethernet driver. We got one patch from Or to fix some static checker warnings. 2nd patche from Dan came to add the support for 128B cache line in the HCA, which will configures the hardware to use 128B alignment only on systems with 128B cache lines, otherwise it will be kept as the current default of 64B. From me three patches to support no inline copy on TX on ConnectX-5 and later HCAs. Starting with two small infrastructure changes and refactoring patches followed by two patches to add the actual support for both xmit ndo and XDP xmit routines. Last patch is a simple fix to return a mistakenly removed pointer from the SQ structure, which was remove in previous submission of mlx5 4K UAR. Saeed. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/infiniband')
-rw-r--r--drivers/infiniband/hw/mlx5/qp.c6
1 files changed, 3 insertions, 3 deletions
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
index 6a83fb32599d..e31bf11ae64f 100644
--- a/drivers/infiniband/hw/mlx5/qp.c
+++ b/drivers/infiniband/hw/mlx5/qp.c
@@ -2984,20 +2984,20 @@ static void *set_eth_seg(struct mlx5_wqe_eth_seg *eseg,
if (wr->opcode == IB_WR_LSO) {
struct ib_ud_wr *ud_wr = container_of(wr, struct ib_ud_wr, wr);
- int size_of_inl_hdr_start = sizeof(eseg->inline_hdr_start);
+ int size_of_inl_hdr_start = sizeof(eseg->inline_hdr.start);
u64 left, leftlen, copysz;
void *pdata = ud_wr->header;
left = ud_wr->hlen;
eseg->mss = cpu_to_be16(ud_wr->mss);
- eseg->inline_hdr_sz = cpu_to_be16(left);
+ eseg->inline_hdr.sz = cpu_to_be16(left);
/*
* check if there is space till the end of queue, if yes,
* copy all in one shot, otherwise copy till the end of queue,
* rollback and than the copy the left
*/
- leftlen = qend - (void *)eseg->inline_hdr_start;
+ leftlen = qend - (void *)eseg->inline_hdr.start;
copysz = min_t(u64, leftlen, left);
memcpy(seg - size_of_inl_hdr_start, pdata, copysz);