aboutsummaryrefslogtreecommitdiff
path: root/mm/slub.c
diff options
context:
space:
mode:
authorChristoph Lameter <cl@linux.com>2012-11-28 16:23:16 +0000
committerPekka Enberg <penberg@kernel.org>2012-12-11 12:14:28 +0200
commit4590685546a374fb0f60682ce0e3a6fd48911d46 (patch)
tree1287ce1e1633067f8bf2cf9f93f1d6fe8a1f8908 /mm/slub.c
parent2f9baa9fcf8d0a204ca129a671d6086cc100faab (diff)
mm/sl[aou]b: Common alignment code
Extract the code to do object alignment from the allocators. Do the alignment calculations in slab_common so that the __kmem_cache_create functions of the allocators do not have to deal with alignment. Signed-off-by: Christoph Lameter <cl@linux.com> Signed-off-by: Pekka Enberg <penberg@kernel.org>
Diffstat (limited to 'mm/slub.c')
-rw-r--r--mm/slub.c38
1 files changed, 1 insertions, 37 deletions
diff --git a/mm/slub.c b/mm/slub.c
index c82453ac812..9640edd2cc7 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -2760,32 +2760,6 @@ static inline int calculate_order(int size, int reserved)
return -ENOSYS;
}
-/*
- * Figure out what the alignment of the objects will be.
- */
-static unsigned long calculate_alignment(unsigned long flags,
- unsigned long align, unsigned long size)
-{
- /*
- * If the user wants hardware cache aligned objects then follow that
- * suggestion if the object is sufficiently large.
- *
- * The hardware cache alignment cannot override the specified
- * alignment though. If that is greater then use it.
- */
- if (flags & SLAB_HWCACHE_ALIGN) {
- unsigned long ralign = cache_line_size();
- while (size <= ralign / 2)
- ralign /= 2;
- align = max(align, ralign);
- }
-
- if (align < ARCH_SLAB_MINALIGN)
- align = ARCH_SLAB_MINALIGN;
-
- return ALIGN(align, sizeof(void *));
-}
-
static void
init_kmem_cache_node(struct kmem_cache_node *n)
{
@@ -2919,7 +2893,6 @@ static int calculate_sizes(struct kmem_cache *s, int forced_order)
{
unsigned long flags = s->flags;
unsigned long size = s->object_size;
- unsigned long align = s->align;
int order;
/*
@@ -2991,19 +2964,11 @@ static int calculate_sizes(struct kmem_cache *s, int forced_order)
#endif
/*
- * Determine the alignment based on various parameters that the
- * user specified and the dynamic determination of cache line size
- * on bootup.
- */
- align = calculate_alignment(flags, align, s->object_size);
- s->align = align;
-
- /*
* SLUB stores one object immediately after another beginning from
* offset 0. In order to align the objects we have to simply size
* each object to conform to the alignment.
*/
- size = ALIGN(size, align);
+ size = ALIGN(size, s->align);
s->size = size;
if (forced_order >= 0)
order = forced_order;
@@ -3032,7 +2997,6 @@ static int calculate_sizes(struct kmem_cache *s, int forced_order)
s->max = s->oo;
return !!oo_objects(s->oo);
-
}
static int kmem_cache_open(struct kmem_cache *s, unsigned long flags)