aboutsummaryrefslogtreecommitdiff
path: root/mm/slab.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/slab.c')
-rw-r--r--mm/slab.c28
1 files changed, 23 insertions, 5 deletions
diff --git a/mm/slab.c b/mm/slab.c
index 2c655532f5ef..909975f6e090 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -109,6 +109,7 @@
#include <linux/mutex.h>
#include <linux/fault-inject.h>
#include <linux/rtmutex.h>
+#include <linux/reciprocal_div.h>
#include <asm/cacheflush.h>
#include <asm/tlbflush.h>
@@ -386,6 +387,7 @@ struct kmem_cache {
unsigned int shared;
unsigned int buffer_size;
+ u32 reciprocal_buffer_size;
/* 3) touched by every alloc & free from the backend */
struct kmem_list3 *nodelists[MAX_NUMNODES];
@@ -627,10 +629,17 @@ static inline void *index_to_obj(struct kmem_cache *cache, struct slab *slab,
return slab->s_mem + cache->buffer_size * idx;
}
-static inline unsigned int obj_to_index(struct kmem_cache *cache,
- struct slab *slab, void *obj)
+/*
+ * We want to avoid an expensive divide : (offset / cache->buffer_size)
+ * Using the fact that buffer_size is a constant for a particular cache,
+ * we can replace (offset / cache->buffer_size) by
+ * reciprocal_divide(offset, cache->reciprocal_buffer_size)
+ */
+static inline unsigned int obj_to_index(const struct kmem_cache *cache,
+ const struct slab *slab, void *obj)
{
- return (unsigned)(obj - slab->s_mem) / cache->buffer_size;
+ u32 offset = (obj - slab->s_mem);
+ return reciprocal_divide(offset, cache->reciprocal_buffer_size);
}
/*
@@ -1427,6 +1436,8 @@ void __init kmem_cache_init(void)
cache_cache.buffer_size = ALIGN(cache_cache.buffer_size,
cache_line_size());
+ cache_cache.reciprocal_buffer_size =
+ reciprocal_value(cache_cache.buffer_size);
for (order = 0; order < MAX_ORDER; order++) {
cache_estimate(order, cache_cache.buffer_size,
@@ -2313,6 +2324,7 @@ kmem_cache_create (const char *name, size_t size, size_t align,
if (flags & SLAB_CACHE_DMA)
cachep->gfpflags |= GFP_DMA;
cachep->buffer_size = size;
+ cachep->reciprocal_buffer_size = reciprocal_value(size);
if (flags & CFLGS_OFF_SLAB) {
cachep->slabp_cache = kmem_find_general_cachep(slab_size, 0u);
@@ -3252,6 +3264,7 @@ void *fallback_alloc(struct kmem_cache *cache, gfp_t flags)
struct zone **z;
void *obj = NULL;
int nid;
+ gfp_t local_flags = (flags & GFP_LEVEL_MASK);
retry:
/*
@@ -3261,7 +3274,7 @@ retry:
for (z = zonelist->zones; *z && !obj; z++) {
nid = zone_to_nid(*z);
- if (cpuset_zone_allowed(*z, flags | __GFP_HARDWALL) &&
+ if (cpuset_zone_allowed_hardwall(*z, flags) &&
cache->nodelists[nid] &&
cache->nodelists[nid]->free_objects)
obj = ____cache_alloc_node(cache,
@@ -3275,7 +3288,12 @@ retry:
* We may trigger various forms of reclaim on the allowed
* set and go into memory reserves if necessary.
*/
+ if (local_flags & __GFP_WAIT)
+ local_irq_enable();
+ kmem_flagcheck(cache, flags);
obj = kmem_getpages(cache, flags, -1);
+ if (local_flags & __GFP_WAIT)
+ local_irq_disable();
if (obj) {
/*
* Insert into the appropriate per node queues
@@ -3535,7 +3553,7 @@ EXPORT_SYMBOL(kmem_cache_zalloc);
*
* Currently only used for dentry validation.
*/
-int fastcall kmem_ptr_validate(struct kmem_cache *cachep, void *ptr)
+int fastcall kmem_ptr_validate(struct kmem_cache *cachep, const void *ptr)
{
unsigned long addr = (unsigned long)ptr;
unsigned long min_addr = PAGE_OFFSET;