aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorSteve Wise2011-06-01 17:49:14 +0000
committerRoland Dreier2011-06-17 11:52:45 -0700
commit2ff7d09a1b0f20f2d9c1bde0e003d4e384de2313 (patch)
tree9eb1621a6806950d89496c5dde9ff36b6ae90b71
parent2c53b436a30867eb6b47dd7bab23ba638d1fb0d2 (diff)
RDMA/cxgb4: Don't exceed hw IQ depth limit for user CQs
Memory allocated for user CQs gets rounded up to the next page boundary. And after rounding, we recalculate the resulting IQ depth and we need to make sure we don't exceed the HW limits. This bug can result a much smaller CQ allocated than was expected if the HW size field is exceeded, resulting in CQ overflow failures. Signed-off-by: Steve Wise <swise@opengridcomputing.com> Signed-off-by: Roland Dreier <roland@purestorage.com>
-rw-r--r--drivers/infiniband/hw/cxgb4/cq.c4
1 files changed, 4 insertions, 0 deletions
diff --git a/drivers/infiniband/hw/cxgb4/cq.c b/drivers/infiniband/hw/cxgb4/cq.c
index 8d8f8add6fcd..1720dc790d13 100644
--- a/drivers/infiniband/hw/cxgb4/cq.c
+++ b/drivers/infiniband/hw/cxgb4/cq.c
@@ -801,6 +801,10 @@ struct ib_cq *c4iw_create_cq(struct ib_device *ibdev, int entries,
if (ucontext) {
memsize = roundup(memsize, PAGE_SIZE);
hwentries = memsize / sizeof *chp->cq.queue;
+ while (hwentries > T4_MAX_IQ_SIZE) {
+ memsize -= PAGE_SIZE;
+ hwentries = memsize / sizeof *chp->cq.queue;
+ }
}
chp->cq.size = hwentries;
chp->cq.memsize = memsize;