aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorTony Battersby2023-01-26 13:51:17 -0800
committerAndrew Morton2023-05-06 10:33:36 -0700
commit790233528d338f1467662761cf1e871086483ab8 (patch)
treefefd23193491ecd4e740210f1e8ae9d693406a02
parent08cc96c894848bcd1d15a79b15c56a8bb4f07ff5 (diff)
dmapool: cleanup integer types
To represent the size of a single allocation, dmapool currently uses 'unsigned int' in some places and 'size_t' in other places. Standardize on 'unsigned int' to reduce overhead, but use 'size_t' when counting all the blocks in the entire pool. Link: https://lkml.kernel.org/r/20230126215125.4069751-5-kbusch@meta.com Fixes: 2d55c16c0c54 ("dmapool: create/destroy cleanup") Signed-off-by: Tony Battersby <tonyb@cybernetics.com> Signed-off-by: Keith Busch <kbusch@kernel.org> Reviewed-by: Christoph Hellwig <hch@lst.de> Cc: Matthew Wilcox <willy@infradead.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
-rw-r--r--mm/dmapool.c19
1 files changed, 11 insertions, 8 deletions
diff --git a/mm/dmapool.c b/mm/dmapool.c
index 20616b760bb9..ee993bb59fc2 100644
--- a/mm/dmapool.c
+++ b/mm/dmapool.c
@@ -43,10 +43,10 @@
struct dma_pool { /* the pool */
struct list_head page_list;
spinlock_t lock;
- size_t size;
struct device *dev;
- size_t allocation;
- size_t boundary;
+ unsigned int size;
+ unsigned int allocation;
+ unsigned int boundary;
char name[32];
struct list_head pools;
};
@@ -73,7 +73,7 @@ static ssize_t pools_show(struct device *dev, struct device_attribute *attr, cha
mutex_lock(&pools_lock);
list_for_each_entry(pool, &dev->dma_pools, pools) {
unsigned pages = 0;
- unsigned blocks = 0;
+ size_t blocks = 0;
spin_lock_irq(&pool->lock);
list_for_each_entry(page, &pool->page_list, page_list) {
@@ -83,9 +83,10 @@ static ssize_t pools_show(struct device *dev, struct device_attribute *attr, cha
spin_unlock_irq(&pool->lock);
/* per-pool info, no real statistics yet */
- size += sysfs_emit_at(buf, size, "%-16s %4u %4zu %4zu %2u\n",
+ size += sysfs_emit_at(buf, size, "%-16s %4zu %4zu %4u %2u\n",
pool->name, blocks,
- pages * (pool->allocation / pool->size),
+ (size_t) pages *
+ (pool->allocation / pool->size),
pool->size, pages);
}
mutex_unlock(&pools_lock);
@@ -133,7 +134,7 @@ struct dma_pool *dma_pool_create(const char *name, struct device *dev,
else if (align & (align - 1))
return NULL;
- if (size == 0)
+ if (size == 0 || size > INT_MAX)
return NULL;
else if (size < 4)
size = 4;
@@ -146,6 +147,8 @@ struct dma_pool *dma_pool_create(const char *name, struct device *dev,
else if ((boundary < size) || (boundary & (boundary - 1)))
return NULL;
+ boundary = min(boundary, allocation);
+
retval = kmalloc(sizeof(*retval), GFP_KERNEL);
if (!retval)
return retval;
@@ -306,7 +309,7 @@ void *dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags,
{
unsigned long flags;
struct dma_page *page;
- size_t offset;
+ unsigned int offset;
void *retval;
might_alloc(mem_flags);