diff options
author | Leon Romanovsky | 2020-09-07 15:09:17 +0300 |
---|---|---|
committer | Jason Gunthorpe | 2020-09-09 14:14:28 -0300 |
commit | 7e3c66c9a989d5b53387ceebc88b9e4a9b1d6434 (patch) | |
tree | 96656531f93acff8cecbff6c083c2e8c3d374f09 /drivers/infiniband | |
parent | 119181d1d4327d3259ab25aa0ea3d3bc364afcdc (diff) |
RDMA/core: Delete function indirection for alloc/free kernel CQ
The ib_alloc_cq*() and ib_free_cq*() are solely kernel verbs to manage CQs
and doesn't need extra indirection just to call same functions with
constant parameter NULL as udata.
Link: https://lore.kernel.org/r/20200907120921.476363-6-leon@kernel.org
Signed-off-by: Leon Romanovsky <leonro@mellanox.com>
Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
Diffstat (limited to 'drivers/infiniband')
-rw-r--r-- | drivers/infiniband/core/cq.c | 27 |
1 files changed, 12 insertions, 15 deletions
diff --git a/drivers/infiniband/core/cq.c b/drivers/infiniband/core/cq.c index 513825e424bf..ab556407803c 100644 --- a/drivers/infiniband/core/cq.c +++ b/drivers/infiniband/core/cq.c @@ -197,24 +197,22 @@ static void ib_cq_completion_workqueue(struct ib_cq *cq, void *private) } /** - * __ib_alloc_cq_user - allocate a completion queue + * __ib_alloc_cq allocate a completion queue * @dev: device to allocate the CQ for * @private: driver private data, accessible from cq->cq_context * @nr_cqe: number of CQEs to allocate * @comp_vector: HCA completion vectors for this CQ * @poll_ctx: context to poll the CQ from. * @caller: module owner name. - * @udata: Valid user data or NULL for kernel object * * This is the proper interface to allocate a CQ for in-kernel users. A * CQ allocated with this interface will automatically be polled from the * specified context. The ULP must use wr->wr_cqe instead of wr->wr_id * to use this CQ abstraction. */ -struct ib_cq *__ib_alloc_cq_user(struct ib_device *dev, void *private, - int nr_cqe, int comp_vector, - enum ib_poll_context poll_ctx, - const char *caller, struct ib_udata *udata) +struct ib_cq *__ib_alloc_cq(struct ib_device *dev, void *private, int nr_cqe, + int comp_vector, enum ib_poll_context poll_ctx, + const char *caller) { struct ib_cq_init_attr cq_attr = { .cqe = nr_cqe, @@ -277,7 +275,7 @@ struct ib_cq *__ib_alloc_cq_user(struct ib_device *dev, void *private, out_destroy_cq: rdma_dim_destroy(cq); rdma_restrack_del(&cq->res); - cq->device->ops.destroy_cq(cq, udata); + cq->device->ops.destroy_cq(cq, NULL); out_free_wc: kfree(cq->wc); out_free_cq: @@ -285,7 +283,7 @@ out_free_cq: trace_cq_alloc_error(nr_cqe, comp_vector, poll_ctx, ret); return ERR_PTR(ret); } -EXPORT_SYMBOL(__ib_alloc_cq_user); +EXPORT_SYMBOL(__ib_alloc_cq); /** * __ib_alloc_cq_any - allocate a completion queue @@ -310,17 +308,16 @@ struct ib_cq *__ib_alloc_cq_any(struct ib_device *dev, void *private, atomic_inc_return(&counter) % min_t(int, dev->num_comp_vectors, num_online_cpus()); - return __ib_alloc_cq_user(dev, private, nr_cqe, comp_vector, poll_ctx, - caller, NULL); + return __ib_alloc_cq(dev, private, nr_cqe, comp_vector, poll_ctx, + caller); } EXPORT_SYMBOL(__ib_alloc_cq_any); /** - * ib_free_cq_user - free a completion queue + * ib_free_cq - free a completion queue * @cq: completion queue to free. - * @udata: User data or NULL for kernel object */ -void ib_free_cq_user(struct ib_cq *cq, struct ib_udata *udata) +void ib_free_cq(struct ib_cq *cq) { if (WARN_ON_ONCE(atomic_read(&cq->usecnt))) return; @@ -344,11 +341,11 @@ void ib_free_cq_user(struct ib_cq *cq, struct ib_udata *udata) rdma_dim_destroy(cq); trace_cq_free(cq); rdma_restrack_del(&cq->res); - cq->device->ops.destroy_cq(cq, udata); + cq->device->ops.destroy_cq(cq, NULL); kfree(cq->wc); kfree(cq); } -EXPORT_SYMBOL(ib_free_cq_user); +EXPORT_SYMBOL(ib_free_cq); void ib_cq_pool_init(struct ib_device *dev) { |