aboutsummaryrefslogtreecommitdiff
path: root/include/linux/mlx5/cq.h
diff options
context:
space:
mode:
authorDoug Ledford2018-02-22 20:52:28 -0500
committerDoug Ledford2018-02-22 20:52:28 -0500
commitf76a5c75d93b0e9c14aa4d4422e7e7f8382a7ecb (patch)
tree5088d1f5df40eea66b8668e8c3ee61f660f6a188 /include/linux/mlx5/cq.h
parent3a148896b24adf8688dc0c59af54531931677a40 (diff)
parent388ca8be00370db132464e27f745b8a0add19fcb (diff)
Merge tag 'mlx5-updates-2018-02-21' of git://git.kernel.org/pub/scm/linux/kernel/git/mellanox/linux into k.o/wip/dl-for-next
mlx5-updates-2018-02-21 This series includes shared code updates for mlx5 core driver for both netdev and rdma subsystems. By Saeed, First six patches of the series are meant to address a performance issue and should provide a performance boost for multi core IRQ interrupt hungry workloads. The issue is fixed in the first patch, all other patches are meant to refactor the code in light of this fix. The problem it comes to fix, is a shared spinlock accessed across all HCA IRQs which protects the CQ database. To solve this we simply move the CQ database and its spinlock to be per EQ (IRQ), thus per core. By Yonatan, Fragmented completion queue (CQ) for RDMA, core driver implementation to create fragmented CQ buffers rather than one large contiguous memory buffer, the implementation scheme already exist and used by the netdev CQs, the patch shares that code with the rdma CQ creation flow and makes use of the new API in mlx5_ib driver. Thanks, Saeed. Merged into rdma-next tree as well as net-next tree to prevent conflicts in future patches between the two trees. Signed-off-by: Doug Ledford <dledford@redhat.com>
Diffstat (limited to 'include/linux/mlx5/cq.h')
-rw-r--r--include/linux/mlx5/cq.h14
1 files changed, 12 insertions, 2 deletions
diff --git a/include/linux/mlx5/cq.h b/include/linux/mlx5/cq.h
index 48c181a2acc9..445ad194e0fe 100644
--- a/include/linux/mlx5/cq.h
+++ b/include/linux/mlx5/cq.h
@@ -60,6 +60,7 @@ struct mlx5_core_cq {
} tasklet_ctx;
int reset_notify_added;
struct list_head reset_notify;
+ struct mlx5_eq *eq;
};
@@ -171,8 +172,17 @@ static inline void mlx5_cq_arm(struct mlx5_core_cq *cq, u32 cmd,
mlx5_write64(doorbell, uar_page + MLX5_CQ_DOORBELL, NULL);
}
-int mlx5_init_cq_table(struct mlx5_core_dev *dev);
-void mlx5_cleanup_cq_table(struct mlx5_core_dev *dev);
+static inline void mlx5_cq_hold(struct mlx5_core_cq *cq)
+{
+ refcount_inc(&cq->refcount);
+}
+
+static inline void mlx5_cq_put(struct mlx5_core_cq *cq)
+{
+ if (refcount_dec_and_test(&cq->refcount))
+ complete(&cq->free);
+}
+
int mlx5_core_create_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
u32 *in, int inlen);
int mlx5_core_destroy_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq);