aboutsummaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
authorLinus Torvalds2008-04-28 14:08:56 -0700
committerLinus Torvalds2008-04-28 14:08:56 -0700
commite97e386b126c2d60b8da61ce1e4964b41b3d1514 (patch)
tree7e04b7f735004330777200c6742568fc130ff893 /include
parentd9dedc13851f9cbd568fbc631a17b0be83404957 (diff)
parentc124f5b54f879e5870befcc076addbd5d614663f (diff)
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/penberg/slab-2.6
* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/penberg/slab-2.6: slub: pack objects denser slub: Calculate min_objects based on number of processors. slub: Drop DEFAULT_MAX_ORDER / DEFAULT_MIN_OBJECTS slub: Simplify any_slab_object checks slub: Make the order configurable for each slab cache slub: Drop fallback to page allocator method slub: Fallback to minimal order during slab page allocation slub: Update statistics handling for variable order slabs slub: Add kmem_cache_order_objects struct slub: for_each_object must be passed the number of objects in a slab slub: Store max number of objects in the page struct. slub: Dump list of objects not freed on kmem_cache_close() slub: free_list() cleanup slub: improve kmem_cache_destroy() error message slob: fix bug - when slob allocates "struct kmem_cache", it does not force alignment.
Diffstat (limited to 'include')
-rw-r--r--include/linux/mm_types.h5
-rw-r--r--include/linux/slub_def.h16
2 files changed, 18 insertions, 3 deletions
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index 29adaa781cb6..e2bae8dde35a 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -42,7 +42,10 @@ struct page {
* to show when page is mapped
* & limit reverse map searches.
*/
- unsigned int inuse; /* SLUB: Nr of objects */
+ struct { /* SLUB */
+ u16 inuse;
+ u16 objects;
+ };
};
union {
struct {
diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
index 79d59c937fac..71e43a12ebbb 100644
--- a/include/linux/slub_def.h
+++ b/include/linux/slub_def.h
@@ -29,6 +29,7 @@ enum stat_item {
DEACTIVATE_TO_HEAD, /* Cpu slab was moved to the head of partials */
DEACTIVATE_TO_TAIL, /* Cpu slab was moved to the tail of partials */
DEACTIVATE_REMOTE_FREES,/* Slab contained remotely freed objects */
+ ORDER_FALLBACK, /* Number of times fallback was necessary */
NR_SLUB_STAT_ITEMS };
struct kmem_cache_cpu {
@@ -48,11 +49,21 @@ struct kmem_cache_node {
struct list_head partial;
#ifdef CONFIG_SLUB_DEBUG
atomic_long_t nr_slabs;
+ atomic_long_t total_objects;
struct list_head full;
#endif
};
/*
+ * Word size structure that can be atomically updated or read and that
+ * contains both the order and the number of objects that a slab of the
+ * given order would contain.
+ */
+struct kmem_cache_order_objects {
+ unsigned long x;
+};
+
+/*
* Slab cache management.
*/
struct kmem_cache {
@@ -61,7 +72,7 @@ struct kmem_cache {
int size; /* The size of an object including meta data */
int objsize; /* The size of an object without meta data */
int offset; /* Free pointer offset. */
- int order; /* Current preferred allocation order */
+ struct kmem_cache_order_objects oo;
/*
* Avoid an extra cache line for UP, SMP and for the node local to
@@ -70,7 +81,8 @@ struct kmem_cache {
struct kmem_cache_node local_node;
/* Allocation and freeing of slabs */
- int objects; /* Number of objects in slab */
+ struct kmem_cache_order_objects max;
+ struct kmem_cache_order_objects min;
gfp_t allocflags; /* gfp flags to use on each alloc */
int refcount; /* Refcount for slab cache destroy */
void (*ctor)(struct kmem_cache *, void *);