From 1f0ce8b3dd667dca720a47869f8110c298f0e5b8 Mon Sep 17 00:00:00 2001 From: David Woodhouse Date: Wed, 19 May 2010 12:01:42 +0100 Subject: mm: Move ARCH_SLAB_MINALIGN and ARCH_KMALLOC_MINALIGN to Acked-by: Herbert Xu Signed-off-by: David Woodhouse Signed-off-by: Pekka Enberg --- include/linux/slab_def.h | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) (limited to 'include/linux') diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h index ca6b2b317991..1812dac8c496 100644 --- a/include/linux/slab_def.h +++ b/include/linux/slab_def.h @@ -16,6 +16,30 @@ #include #include +#ifndef ARCH_KMALLOC_MINALIGN +/* + * Enforce a minimum alignment for the kmalloc caches. + * Usually, the kmalloc caches are cache_line_size() aligned, except when + * DEBUG and FORCED_DEBUG are enabled, then they are BYTES_PER_WORD aligned. + * Some archs want to perform DMA into kmalloc caches and need a guaranteed + * alignment larger than the alignment of a 64-bit integer. + * ARCH_KMALLOC_MINALIGN allows that. + * Note that increasing this value may disable some debug features. + */ +#define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long long) +#endif + +#ifndef ARCH_SLAB_MINALIGN +/* + * Enforce a minimum alignment for all caches. + * Intended for archs that get misalignment faults even for BYTES_PER_WORD + * aligned buffers. Includes ARCH_KMALLOC_MINALIGN. + * If possible: Do not enable this flag for CONFIG_DEBUG_SLAB, it disables + * some debug features. + */ +#define ARCH_SLAB_MINALIGN 0 +#endif + /* * struct kmem_cache * -- cgit v1.2.3 From bac49ce42a33f53beb7cf04e9a0600879d6265ca Mon Sep 17 00:00:00 2001 From: David Woodhouse Date: Wed, 19 May 2010 12:01:43 +0100 Subject: mm: Move ARCH_SLAB_MINALIGN and ARCH_KMALLOC_MINALIGN to Acked-by: Herbert Xu Signed-off-by: David Woodhouse Signed-off-by: Pekka Enberg --- include/linux/slob_def.h | 8 ++++++++ mm/slob.c | 8 -------- 2 files changed, 8 insertions(+), 8 deletions(-) (limited to 'include/linux') diff --git a/include/linux/slob_def.h b/include/linux/slob_def.h index 0ec00b39d006..62667f72c2ef 100644 --- a/include/linux/slob_def.h +++ b/include/linux/slob_def.h @@ -1,6 +1,14 @@ #ifndef __LINUX_SLOB_DEF_H #define __LINUX_SLOB_DEF_H +#ifndef ARCH_KMALLOC_MINALIGN +#define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long) +#endif + +#ifndef ARCH_SLAB_MINALIGN +#define ARCH_SLAB_MINALIGN __alignof__(unsigned long) +#endif + void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node); static __always_inline void *kmem_cache_alloc(struct kmem_cache *cachep, diff --git a/mm/slob.c b/mm/slob.c index 837ebd64cc34..23631e2bb57a 100644 --- a/mm/slob.c +++ b/mm/slob.c @@ -467,14 +467,6 @@ out: * End of slob allocator proper. Begin kmem_cache_alloc and kmalloc frontend. */ -#ifndef ARCH_KMALLOC_MINALIGN -#define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long) -#endif - -#ifndef ARCH_SLAB_MINALIGN -#define ARCH_SLAB_MINALIGN __alignof__(unsigned long) -#endif - void *__kmalloc_node(size_t size, gfp_t gfp, int node) { unsigned int *m; -- cgit v1.2.3 From 4581ced379736fd76432c754f999d26deb83fbb7 Mon Sep 17 00:00:00 2001 From: David Woodhouse Date: Wed, 19 May 2010 12:02:14 +0100 Subject: mm: Move ARCH_SLAB_MINALIGN and ARCH_KMALLOC_MINALIGN to Acked-by: Herbert Xu Signed-off-by: David Woodhouse Signed-off-by: Pekka Enberg --- include/linux/slub_def.h | 8 ++++++++ mm/slub.c | 8 -------- 2 files changed, 8 insertions(+), 8 deletions(-) (limited to 'include/linux') diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h index 0249d4175bac..55695c8d2f8a 100644 --- a/include/linux/slub_def.h +++ b/include/linux/slub_def.h @@ -116,6 +116,14 @@ struct kmem_cache { #define KMALLOC_SHIFT_LOW ilog2(KMALLOC_MIN_SIZE) +#ifndef ARCH_KMALLOC_MINALIGN +#define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long long) +#endif + +#ifndef ARCH_SLAB_MINALIGN +#define ARCH_SLAB_MINALIGN __alignof__(unsigned long long) +#endif + /* * Maximum kmalloc object size handled by SLUB. Larger object allocations * are passed through to the page allocator. The page allocator "fastpath" diff --git a/mm/slub.c b/mm/slub.c index d2a54fe71ea2..c874c3efac29 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -157,14 +157,6 @@ #define SLUB_MERGE_SAME (SLAB_DEBUG_FREE | SLAB_RECLAIM_ACCOUNT | \ SLAB_CACHE_DMA | SLAB_NOTRACK) -#ifndef ARCH_KMALLOC_MINALIGN -#define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long long) -#endif - -#ifndef ARCH_SLAB_MINALIGN -#define ARCH_SLAB_MINALIGN __alignof__(unsigned long long) -#endif - #define OO_SHIFT 16 #define OO_MASK ((1 << OO_SHIFT) - 1) #define MAX_OBJS_PER_PAGE 65535 /* since page.objects is u16 */ -- cgit v1.2.3 From ec49fdbd354cb133340e25ff84d88284bb17e99a Mon Sep 17 00:00:00 2001 From: David Woodhouse Date: Wed, 19 May 2010 12:02:17 +0100 Subject: crypto: Use ARCH_KMALLOC_MINALIGN for CRYPTO_MINALIGN now that it's exposed Acked-by: Herbert Xu Signed-off-by: David Woodhouse Signed-off-by: Pekka Enberg --- include/linux/crypto.h | 6 ------ 1 file changed, 6 deletions(-) (limited to 'include/linux') diff --git a/include/linux/crypto.h b/include/linux/crypto.h index 24d2e30f1b46..a6a7a1c83f54 100644 --- a/include/linux/crypto.h +++ b/include/linux/crypto.h @@ -99,13 +99,7 @@ * as arm where pointers are 32-bit aligned but there are data types such as * u64 which require 64-bit alignment. */ -#if defined(ARCH_KMALLOC_MINALIGN) #define CRYPTO_MINALIGN ARCH_KMALLOC_MINALIGN -#elif defined(ARCH_SLAB_MINALIGN) -#define CRYPTO_MINALIGN ARCH_SLAB_MINALIGN -#else -#define CRYPTO_MINALIGN __alignof__(unsigned long long) -#endif #define CRYPTO_MINALIGN_ATTR __attribute__ ((__aligned__(CRYPTO_MINALIGN))) -- cgit v1.2.3