From 837d678dc264c797c16f81cf56f615f7544891c1 Mon Sep 17 00:00:00 2001 From: Joonsoo Kim Date: Thu, 16 Aug 2012 00:02:40 +0900 Subject: slub: remove one code path and reduce lock contention in __slab_free() When we try to free object, there is some of case that we need to take a node lock. This is the necessary step for preventing a race. After taking a lock, then we try to cmpxchg_double_slab(). But, there is a possible scenario that cmpxchg_double_slab() is failed with taking a lock. Following example explains it. CPU A CPU B need lock ... need lock ... lock!! lock..but spin free success spin... unlock lock!! free fail In this case, retry with taking a lock is occured in CPU A. I think that in this case for CPU A, "release a lock first, and re-take a lock if necessary" is preferable way. There are two reasons for this. First, this makes __slab_free()'s logic somehow simple. With this patch, 'was_frozen = 1' is "always" handled without taking a lock. So we can remove one code path. Second, it may reduce lock contention. When we do retrying, status of slab is already changed, so we don't need a lock anymore in almost every case. "release a lock first, and re-take a lock if necessary" policy is helpful to this. Signed-off-by: Joonsoo Kim Acked-by: Christoph Lameter Signed-off-by: Pekka Enberg --- mm/slub.c | 34 ++++++++++++++-------------------- 1 file changed, 14 insertions(+), 20 deletions(-) (limited to 'mm/slub.c') diff --git a/mm/slub.c b/mm/slub.c index a0d698467f7..e7aec2001ae 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -2459,7 +2459,6 @@ static void __slab_free(struct kmem_cache *s, struct page *page, void *prior; void **object = (void *)x; int was_frozen; - int inuse; struct page new; unsigned long counters; struct kmem_cache_node *n = NULL; @@ -2472,13 +2471,17 @@ static void __slab_free(struct kmem_cache *s, struct page *page, return; do { + if (unlikely(n)) { + spin_unlock_irqrestore(&n->list_lock, flags); + n = NULL; + } prior = page->freelist; counters = page->counters; set_freepointer(s, object, prior); new.counters = counters; was_frozen = new.frozen; new.inuse--; - if ((!new.inuse || !prior) && !was_frozen && !n) { + if ((!new.inuse || !prior) && !was_frozen) { if (!kmem_cache_debug(s) && !prior) @@ -2503,7 +2506,6 @@ static void __slab_free(struct kmem_cache *s, struct page *page, } } - inuse = new.inuse; } while (!cmpxchg_double_slab(s, page, prior, counters, @@ -2529,25 +2531,17 @@ static void __slab_free(struct kmem_cache *s, struct page *page, return; } + if (unlikely(!new.inuse && n->nr_partial > s->min_partial)) + goto slab_empty; + /* - * was_frozen may have been set after we acquired the list_lock in - * an earlier loop. So we need to check it here again. + * Objects left in the slab. If it was not on the partial list before + * then add it. */ - if (was_frozen) - stat(s, FREE_FROZEN); - else { - if (unlikely(!inuse && n->nr_partial > s->min_partial)) - goto slab_empty; - - /* - * Objects left in the slab. If it was not on the partial list before - * then add it. - */ - if (unlikely(!prior)) { - remove_full(s, page); - add_partial(n, page, DEACTIVATE_TO_TAIL); - stat(s, FREE_ADD_PARTIAL); - } + if (kmem_cache_debug(s) && unlikely(!prior)) { + remove_full(s, page); + add_partial(n, page, DEACTIVATE_TO_TAIL); + stat(s, FREE_ADD_PARTIAL); } spin_unlock_irqrestore(&n->list_lock, flags); return; -- cgit v1.2.3 From b7454ad3cfc3043c5264729a6204f049fe1f34b1 Mon Sep 17 00:00:00 2001 From: Glauber Costa Date: Fri, 19 Oct 2012 18:20:25 +0400 Subject: mm/sl[au]b: Move slabinfo processing to slab_common.c This patch moves all the common machinery to slabinfo processing to slab_common.c. We can do better by noticing that the output is heavily common, and having the allocators to just provide finished information about this. But after this first step, this can be done easier. Signed-off-by: Glauber Costa Acked-by: Christoph Lameter CC: David Rientjes Signed-off-by: Pekka Enberg --- mm/slub.c | 51 +++++---------------------------------------------- 1 file changed, 5 insertions(+), 46 deletions(-) (limited to 'mm/slub.c') diff --git a/mm/slub.c b/mm/slub.c index a0d698467f7..77a0c8a9fc7 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -5405,7 +5405,7 @@ __initcall(slab_sysfs_init); * The /proc/slabinfo ABI */ #ifdef CONFIG_SLABINFO -static void print_slabinfo_header(struct seq_file *m) +void print_slabinfo_header(struct seq_file *m) { seq_puts(m, "slabinfo - version: 2.1\n"); seq_puts(m, "# name " @@ -5415,28 +5415,7 @@ static void print_slabinfo_header(struct seq_file *m) seq_putc(m, '\n'); } -static void *s_start(struct seq_file *m, loff_t *pos) -{ - loff_t n = *pos; - - mutex_lock(&slab_mutex); - if (!n) - print_slabinfo_header(m); - - return seq_list_start(&slab_caches, *pos); -} - -static void *s_next(struct seq_file *m, void *p, loff_t *pos) -{ - return seq_list_next(p, &slab_caches, pos); -} - -static void s_stop(struct seq_file *m, void *p) -{ - mutex_unlock(&slab_mutex); -} - -static int s_show(struct seq_file *m, void *p) +int slabinfo_show(struct seq_file *m, void *p) { unsigned long nr_partials = 0; unsigned long nr_slabs = 0; @@ -5472,29 +5451,9 @@ static int s_show(struct seq_file *m, void *p) return 0; } -static const struct seq_operations slabinfo_op = { - .start = s_start, - .next = s_next, - .stop = s_stop, - .show = s_show, -}; - -static int slabinfo_open(struct inode *inode, struct file *file) -{ - return seq_open(file, &slabinfo_op); -} - -static const struct file_operations proc_slabinfo_operations = { - .open = slabinfo_open, - .read = seq_read, - .llseek = seq_lseek, - .release = seq_release, -}; - -static int __init slab_proc_init(void) +ssize_t slabinfo_write(struct file *file, const char __user *buffer, + size_t count, loff_t *ppos) { - proc_create("slabinfo", S_IRUSR, NULL, &proc_slabinfo_operations); - return 0; + return -EIO; } -module_init(slab_proc_init); #endif /* CONFIG_SLABINFO */ -- cgit v1.2.3 From bcee6e2a13d580f6c21d748fcd7239ccc66cb4b8 Mon Sep 17 00:00:00 2001 From: Glauber Costa Date: Fri, 19 Oct 2012 18:20:26 +0400 Subject: mm/sl[au]b: Move print_slabinfo_header to slab_common.c The header format is highly similar between slab and slub. The main difference lays in the fact that slab may optionally have statistics added here in case of CONFIG_SLAB_DEBUG, while the slub will stick them somewhere else. By making sure that information conditionally lives inside a globally-visible CONFIG_DEBUG_SLAB switch, we can move the header printing to a common location. Signed-off-by: Glauber Costa Acked-by: Christoph Lameter CC: David Rientjes Signed-off-by: Pekka Enberg --- mm/slub.c | 10 ---------- 1 file changed, 10 deletions(-) (limited to 'mm/slub.c') diff --git a/mm/slub.c b/mm/slub.c index 77a0c8a9fc7..6b5ee3472e1 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -5405,16 +5405,6 @@ __initcall(slab_sysfs_init); * The /proc/slabinfo ABI */ #ifdef CONFIG_SLABINFO -void print_slabinfo_header(struct seq_file *m) -{ - seq_puts(m, "slabinfo - version: 2.1\n"); - seq_puts(m, "# name " - " "); - seq_puts(m, " : tunables "); - seq_puts(m, " : slabdata "); - seq_putc(m, '\n'); -} - int slabinfo_show(struct seq_file *m, void *p) { unsigned long nr_partials = 0; -- cgit v1.2.3 From 0d7561c61d76690ed84bd1016acc0fcbff063205 Mon Sep 17 00:00:00 2001 From: Glauber Costa Date: Fri, 19 Oct 2012 18:20:27 +0400 Subject: sl[au]b: Process slabinfo_show in common code With all the infrastructure in place, we can now have slabinfo_show done from slab_common.c. A cache-specific function is called to grab information about the cache itself, since that is still heavily dependent on the implementation. But with the values produced by it, all the printing and handling is done from common code. Signed-off-by: Glauber Costa CC: Christoph Lameter CC: David Rientjes Signed-off-by: Pekka Enberg --- mm/slub.c | 24 ++++++++++-------------- 1 file changed, 10 insertions(+), 14 deletions(-) (limited to 'mm/slub.c') diff --git a/mm/slub.c b/mm/slub.c index 6b5ee3472e1..472e739278b 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -5405,18 +5405,14 @@ __initcall(slab_sysfs_init); * The /proc/slabinfo ABI */ #ifdef CONFIG_SLABINFO -int slabinfo_show(struct seq_file *m, void *p) +void get_slabinfo(struct kmem_cache *s, struct slabinfo *sinfo) { unsigned long nr_partials = 0; unsigned long nr_slabs = 0; - unsigned long nr_inuse = 0; unsigned long nr_objs = 0; unsigned long nr_free = 0; - struct kmem_cache *s; int node; - s = list_entry(p, struct kmem_cache, list); - for_each_online_node(node) { struct kmem_cache_node *n = get_node(s, node); @@ -5429,16 +5425,16 @@ int slabinfo_show(struct seq_file *m, void *p) nr_free += count_partial(n, count_free); } - nr_inuse = nr_objs - nr_free; + sinfo->active_objs = nr_objs - nr_free; + sinfo->num_objs = nr_objs; + sinfo->active_slabs = nr_slabs; + sinfo->num_slabs = nr_slabs; + sinfo->objects_per_slab = oo_objects(s->oo); + sinfo->cache_order = oo_order(s->oo); +} - seq_printf(m, "%-17s %6lu %6lu %6u %4u %4d", s->name, nr_inuse, - nr_objs, s->size, oo_objects(s->oo), - (1 << oo_order(s->oo))); - seq_printf(m, " : tunables %4u %4u %4u", 0, 0, 0); - seq_printf(m, " : slabdata %6lu %6lu %6lu", nr_slabs, nr_slabs, - 0UL); - seq_putc(m, '\n'); - return 0; +void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *s) +{ } ssize_t slabinfo_write(struct file *file, const char __user *buffer, -- cgit v1.2.3 From 1b4f59e356cc94929305bd107b7f38eec62715ad Mon Sep 17 00:00:00 2001 From: Glauber Costa Date: Mon, 22 Oct 2012 18:05:36 +0400 Subject: slub: Commonize slab_cache field in struct page Right now, slab and slub have fields in struct page to derive which cache a page belongs to, but they do it slightly differently. slab uses a field called slab_cache, that lives in the third double word. slub, uses a field called "slab", living outside of the doublewords area. Ideally, we could use the same field for this. Since slub heavily makes use of the doubleword region, there isn't really much room to move slub's slab_cache field around. Since slab does not have such strict placement restrictions, we can move it outside the doubleword area. The naming used by slab, "slab_cache", is less confusing, and it is preferred over slub's generic "slab". Signed-off-by: Glauber Costa Acked-by: Christoph Lameter CC: David Rientjes Signed-off-by: Pekka Enberg --- mm/slub.c | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) (limited to 'mm/slub.c') diff --git a/mm/slub.c b/mm/slub.c index 16274b273c6..35483e0ab6b 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -1092,11 +1092,11 @@ static noinline struct kmem_cache_node *free_debug_processing( if (!check_object(s, page, object, SLUB_RED_ACTIVE)) goto out; - if (unlikely(s != page->slab)) { + if (unlikely(s != page->slab_cache)) { if (!PageSlab(page)) { slab_err(s, page, "Attempt to free object(0x%p) " "outside of slab", object); - } else if (!page->slab) { + } else if (!page->slab_cache) { printk(KERN_ERR "SLUB : no slab for object 0x%p.\n", object); @@ -1357,7 +1357,7 @@ static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node) goto out; inc_slabs_node(s, page_to_nid(page), page->objects); - page->slab = s; + page->slab_cache = s; __SetPageSlab(page); if (page->pfmemalloc) SetPageSlabPfmemalloc(page); @@ -1424,7 +1424,7 @@ static void rcu_free_slab(struct rcu_head *h) else page = container_of((struct list_head *)h, struct page, lru); - __free_slab(page->slab, page); + __free_slab(page->slab_cache, page); } static void free_slab(struct kmem_cache *s, struct page *page) @@ -2617,9 +2617,9 @@ void kmem_cache_free(struct kmem_cache *s, void *x) page = virt_to_head_page(x); - if (kmem_cache_debug(s) && page->slab != s) { + if (kmem_cache_debug(s) && page->slab_cache != s) { pr_err("kmem_cache_free: Wrong slab cache. %s but object" - " is from %s\n", page->slab->name, s->name); + " is from %s\n", page->slab_cache->name, s->name); WARN_ON_ONCE(1); return; } @@ -3418,7 +3418,7 @@ size_t ksize(const void *object) return PAGE_SIZE << compound_order(page); } - return slab_ksize(page->slab); + return slab_ksize(page->slab_cache); } EXPORT_SYMBOL(ksize); @@ -3443,8 +3443,8 @@ bool verify_mem_not_deleted(const void *x) } slab_lock(page); - if (on_freelist(page->slab, page, object)) { - object_err(page->slab, page, object, "Object is on free-list"); + if (on_freelist(page->slab_cache, page, object)) { + object_err(page->slab_cache, page, object, "Object is on free-list"); rv = false; } else { rv = true; @@ -3475,7 +3475,7 @@ void kfree(const void *x) __free_pages(page, compound_order(page)); return; } - slab_free(page->slab, page, object, _RET_IP_); + slab_free(page->slab_cache, page, object, _RET_IP_); } EXPORT_SYMBOL(kfree); @@ -3686,11 +3686,11 @@ static void __init kmem_cache_bootstrap_fixup(struct kmem_cache *s) if (n) { list_for_each_entry(p, &n->partial, lru) - p->slab = s; + p->slab_cache = s; #ifdef CONFIG_SLUB_DEBUG list_for_each_entry(p, &n->full, lru) - p->slab = s; + p->slab_cache = s; #endif } } -- cgit v1.2.3 From 242860a47a75b933a79a30f6a40bf4858f4a3ecc Mon Sep 17 00:00:00 2001 From: Ezequiel Garcia Date: Fri, 19 Oct 2012 09:33:12 -0300 Subject: mm/sl[aou]b: Move common kmem_cache_size() to slab.h This function is identically defined in all three allocators and it's trivial to move it to slab.h Since now it's static, inline, header-defined function this patch also drops the EXPORT_SYMBOL tag. Cc: Pekka Enberg Cc: Matt Mackall Acked-by: Christoph Lameter Signed-off-by: Ezequiel Garcia Signed-off-by: Pekka Enberg --- mm/slub.c | 9 --------- 1 file changed, 9 deletions(-) (limited to 'mm/slub.c') diff --git a/mm/slub.c b/mm/slub.c index 35483e0ab6b..deee7c754a7 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -3121,15 +3121,6 @@ error: return -EINVAL; } -/* - * Determine the size of a slab object - */ -unsigned int kmem_cache_size(struct kmem_cache *s) -{ - return s->object_size; -} -EXPORT_SYMBOL(kmem_cache_size); - static void list_slab_objects(struct kmem_cache *s, struct page *page, const char *text) { -- cgit v1.2.3 From d8843922fba49e887874aa1f9e748d620c5092af Mon Sep 17 00:00:00 2001 From: Glauber Costa Date: Wed, 17 Oct 2012 15:36:51 +0400 Subject: slab: Ignore internal flags in cache creation Some flags are used internally by the allocators for management purposes. One example of that is the CFLGS_OFF_SLAB flag that slab uses to mark that the metadata for that cache is stored outside of the slab. No cache should ever pass those as a creation flags. We can just ignore this bit if it happens to be passed (such as when duplicating a cache in the kmem memcg patches). Because such flags can vary from allocator to allocator, we allow them to make their own decisions on that, defining SLAB_AVAILABLE_FLAGS with all flags that are valid at creation time. Allocators that doesn't have any specific flag requirement should define that to mean all flags. Common code will mask out all flags not belonging to that set. Acked-by: Christoph Lameter Acked-by: David Rientjes Signed-off-by: Glauber Costa Signed-off-by: Pekka Enberg --- mm/slub.c | 3 --- 1 file changed, 3 deletions(-) (limited to 'mm/slub.c') diff --git a/mm/slub.c b/mm/slub.c index deee7c754a7..b2ada3db422 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -112,9 +112,6 @@ * the fast path and disables lockless freelists. */ -#define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \ - SLAB_TRACE | SLAB_DEBUG_FREE) - static inline int kmem_cache_debug(struct kmem_cache *s) { #ifdef CONFIG_SLUB_DEBUG -- cgit v1.2.3 From 59a09917c95e5209135b4f1a87f1263d6ef40fdb Mon Sep 17 00:00:00 2001 From: Christoph Lameter Date: Wed, 28 Nov 2012 16:23:00 +0000 Subject: slub: Use correct cpu_slab on dead cpu Pass a kmem_cache_cpu pointer into unfreeze partials so that a different kmem_cache_cpu structure than the local one can be specified. Acked-by: David Rientjes Signed-off-by: Christoph Lameter Signed-off-by: Pekka Enberg --- mm/slub.c | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) (limited to 'mm/slub.c') diff --git a/mm/slub.c b/mm/slub.c index b2ada3db422..33576b0cfc4 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -1869,12 +1869,14 @@ redo: /* * Unfreeze all the cpu partial slabs. * - * This function must be called with interrupt disabled. + * This function must be called with interrupts disabled + * for the cpu using c (or some other guarantee must be there + * to guarantee no concurrent accesses). */ -static void unfreeze_partials(struct kmem_cache *s) +static void unfreeze_partials(struct kmem_cache *s, + struct kmem_cache_cpu *c) { struct kmem_cache_node *n = NULL, *n2 = NULL; - struct kmem_cache_cpu *c = this_cpu_ptr(s->cpu_slab); struct page *page, *discard_page = NULL; while ((page = c->partial)) { @@ -1960,7 +1962,7 @@ static int put_cpu_partial(struct kmem_cache *s, struct page *page, int drain) * set to the per node partial list. */ local_irq_save(flags); - unfreeze_partials(s); + unfreeze_partials(s, this_cpu_ptr(s->cpu_slab)); local_irq_restore(flags); oldpage = NULL; pobjects = 0; @@ -2003,7 +2005,7 @@ static inline void __flush_cpu_slab(struct kmem_cache *s, int cpu) if (c->page) flush_slab(s, c); - unfreeze_partials(s); + unfreeze_partials(s, c); } } -- cgit v1.2.3 From 45530c4474d258b822e2639c786606d8257aad8b Mon Sep 17 00:00:00 2001 From: Christoph Lameter Date: Wed, 28 Nov 2012 16:23:07 +0000 Subject: mm, sl[au]b: create common functions for boot slab creation Use a special function to create kmalloc caches and use that function in SLAB and SLUB. Acked-by: Joonsoo Kim Reviewed-by: Glauber Costa Acked-by: David Rientjes Signed-off-by: Christoph Lameter Signed-off-by: Pekka Enberg --- mm/slub.c | 37 +++++-------------------------------- 1 file changed, 5 insertions(+), 32 deletions(-) (limited to 'mm/slub.c') diff --git a/mm/slub.c b/mm/slub.c index 33576b0cfc4..1be172c157c 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -3245,32 +3245,6 @@ static int __init setup_slub_nomerge(char *str) __setup("slub_nomerge", setup_slub_nomerge); -static struct kmem_cache *__init create_kmalloc_cache(const char *name, - int size, unsigned int flags) -{ - struct kmem_cache *s; - - s = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT); - - s->name = name; - s->size = s->object_size = size; - s->align = ARCH_KMALLOC_MINALIGN; - - /* - * This function is called with IRQs disabled during early-boot on - * single CPU so there's no need to take slab_mutex here. - */ - if (kmem_cache_open(s, flags)) - goto panic; - - list_add(&s->list, &slab_caches); - return s; - -panic: - panic("Creation of kmalloc slab %s size=%d failed.\n", name, size); - return NULL; -} - /* * Conversion table for small slabs sizes / 8 to the index in the * kmalloc array. This is necessary for slabs < 192 since we have non power @@ -3948,6 +3922,10 @@ int __kmem_cache_create(struct kmem_cache *s, unsigned long flags) if (err) return err; + /* Mutex is not taken during early boot */ + if (slab_state <= UP) + return 0; + mutex_unlock(&slab_mutex); err = sysfs_slab_add(s); mutex_lock(&slab_mutex); @@ -5249,13 +5227,8 @@ static int sysfs_slab_add(struct kmem_cache *s) { int err; const char *name; - int unmergeable; - - if (slab_state < FULL) - /* Defer until later */ - return 0; + int unmergeable = slab_unmergeable(s); - unmergeable = slab_unmergeable(s); if (unmergeable) { /* * Slabcache can never be merged so we can use the name proper. -- cgit v1.2.3 From dffb4d605c23110e3ad54b8c9f244a8235c013c2 Mon Sep 17 00:00:00 2001 From: Christoph Lameter Date: Wed, 28 Nov 2012 16:23:07 +0000 Subject: slub: Use statically allocated kmem_cache boot structure for bootstrap Simplify bootstrap by statically allocated two kmem_cache structures. These are freed after bootup is complete. Allows us to no longer worry about calculations of sizes of kmem_cache structures during bootstrap. Reviewed-by: Glauber Costa Signed-off-by: Christoph Lameter Signed-off-by: Pekka Enberg --- mm/slub.c | 67 +++++++++++++++++++-------------------------------------------- 1 file changed, 20 insertions(+), 47 deletions(-) (limited to 'mm/slub.c') diff --git a/mm/slub.c b/mm/slub.c index 1be172c157c..c82453ac812 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -176,8 +176,6 @@ static inline int kmem_cache_debug(struct kmem_cache *s) #define __OBJECT_POISON 0x80000000UL /* Poison object */ #define __CMPXCHG_DOUBLE 0x40000000UL /* Use cmpxchg_double */ -static int kmem_size = sizeof(struct kmem_cache); - #ifdef CONFIG_SMP static struct notifier_block slab_notifier; #endif @@ -3634,15 +3632,16 @@ static int slab_memory_callback(struct notifier_block *self, /* * Used for early kmem_cache structures that were allocated using - * the page allocator + * the page allocator. Allocate them properly then fix up the pointers + * that may be pointing to the wrong kmem_cache structure. */ -static void __init kmem_cache_bootstrap_fixup(struct kmem_cache *s) +static struct kmem_cache * __init bootstrap(struct kmem_cache *static_cache) { int node; + struct kmem_cache *s = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT); - list_add(&s->list, &slab_caches); - s->refcount = -1; + memcpy(s, static_cache, kmem_cache->object_size); for_each_node_state(node, N_NORMAL_MEMORY) { struct kmem_cache_node *n = get_node(s, node); @@ -3658,70 +3657,44 @@ static void __init kmem_cache_bootstrap_fixup(struct kmem_cache *s) #endif } } + list_add(&s->list, &slab_caches); + return s; } void __init kmem_cache_init(void) { + static __initdata struct kmem_cache boot_kmem_cache, + boot_kmem_cache_node; int i; - int caches = 0; - struct kmem_cache *temp_kmem_cache; - int order; - struct kmem_cache *temp_kmem_cache_node; - unsigned long kmalloc_size; + int caches = 2; if (debug_guardpage_minorder()) slub_max_order = 0; - kmem_size = offsetof(struct kmem_cache, node) + - nr_node_ids * sizeof(struct kmem_cache_node *); - - /* Allocate two kmem_caches from the page allocator */ - kmalloc_size = ALIGN(kmem_size, cache_line_size()); - order = get_order(2 * kmalloc_size); - kmem_cache = (void *)__get_free_pages(GFP_NOWAIT | __GFP_ZERO, order); + kmem_cache_node = &boot_kmem_cache_node; + kmem_cache = &boot_kmem_cache; - /* - * Must first have the slab cache available for the allocations of the - * struct kmem_cache_node's. There is special bootstrap code in - * kmem_cache_open for slab_state == DOWN. - */ - kmem_cache_node = (void *)kmem_cache + kmalloc_size; - - kmem_cache_node->name = "kmem_cache_node"; - kmem_cache_node->size = kmem_cache_node->object_size = - sizeof(struct kmem_cache_node); - kmem_cache_open(kmem_cache_node, SLAB_HWCACHE_ALIGN | SLAB_PANIC); + create_boot_cache(kmem_cache_node, "kmem_cache_node", + sizeof(struct kmem_cache_node), SLAB_HWCACHE_ALIGN); hotplug_memory_notifier(slab_memory_callback, SLAB_CALLBACK_PRI); /* Able to allocate the per node structures */ slab_state = PARTIAL; - temp_kmem_cache = kmem_cache; - kmem_cache->name = "kmem_cache"; - kmem_cache->size = kmem_cache->object_size = kmem_size; - kmem_cache_open(kmem_cache, SLAB_HWCACHE_ALIGN | SLAB_PANIC); + create_boot_cache(kmem_cache, "kmem_cache", + offsetof(struct kmem_cache, node) + + nr_node_ids * sizeof(struct kmem_cache_node *), + SLAB_HWCACHE_ALIGN); - kmem_cache = kmem_cache_alloc(kmem_cache, GFP_NOWAIT); - memcpy(kmem_cache, temp_kmem_cache, kmem_size); + kmem_cache = bootstrap(&boot_kmem_cache); /* * Allocate kmem_cache_node properly from the kmem_cache slab. * kmem_cache_node is separately allocated so no need to * update any list pointers. */ - temp_kmem_cache_node = kmem_cache_node; - - kmem_cache_node = kmem_cache_alloc(kmem_cache, GFP_NOWAIT); - memcpy(kmem_cache_node, temp_kmem_cache_node, kmem_size); - - kmem_cache_bootstrap_fixup(kmem_cache_node); - - caches++; - kmem_cache_bootstrap_fixup(kmem_cache); - caches++; - /* Free temporary boot structure */ - free_pages((unsigned long)temp_kmem_cache, order); + kmem_cache_node = bootstrap(&boot_kmem_cache_node); /* Now we can use the kmem_cache to allocate kmalloc slabs */ -- cgit v1.2.3 From 4590685546a374fb0f60682ce0e3a6fd48911d46 Mon Sep 17 00:00:00 2001 From: Christoph Lameter Date: Wed, 28 Nov 2012 16:23:16 +0000 Subject: mm/sl[aou]b: Common alignment code Extract the code to do object alignment from the allocators. Do the alignment calculations in slab_common so that the __kmem_cache_create functions of the allocators do not have to deal with alignment. Signed-off-by: Christoph Lameter Signed-off-by: Pekka Enberg --- mm/slub.c | 38 +------------------------------------- 1 file changed, 1 insertion(+), 37 deletions(-) (limited to 'mm/slub.c') diff --git a/mm/slub.c b/mm/slub.c index c82453ac812..9640edd2cc7 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -2760,32 +2760,6 @@ static inline int calculate_order(int size, int reserved) return -ENOSYS; } -/* - * Figure out what the alignment of the objects will be. - */ -static unsigned long calculate_alignment(unsigned long flags, - unsigned long align, unsigned long size) -{ - /* - * If the user wants hardware cache aligned objects then follow that - * suggestion if the object is sufficiently large. - * - * The hardware cache alignment cannot override the specified - * alignment though. If that is greater then use it. - */ - if (flags & SLAB_HWCACHE_ALIGN) { - unsigned long ralign = cache_line_size(); - while (size <= ralign / 2) - ralign /= 2; - align = max(align, ralign); - } - - if (align < ARCH_SLAB_MINALIGN) - align = ARCH_SLAB_MINALIGN; - - return ALIGN(align, sizeof(void *)); -} - static void init_kmem_cache_node(struct kmem_cache_node *n) { @@ -2919,7 +2893,6 @@ static int calculate_sizes(struct kmem_cache *s, int forced_order) { unsigned long flags = s->flags; unsigned long size = s->object_size; - unsigned long align = s->align; int order; /* @@ -2990,20 +2963,12 @@ static int calculate_sizes(struct kmem_cache *s, int forced_order) size += sizeof(void *); #endif - /* - * Determine the alignment based on various parameters that the - * user specified and the dynamic determination of cache line size - * on bootup. - */ - align = calculate_alignment(flags, align, s->object_size); - s->align = align; - /* * SLUB stores one object immediately after another beginning from * offset 0. In order to align the objects we have to simply size * each object to conform to the alignment. */ - size = ALIGN(size, align); + size = ALIGN(size, s->align); s->size = size; if (forced_order >= 0) order = forced_order; @@ -3032,7 +2997,6 @@ static int calculate_sizes(struct kmem_cache *s, int forced_order) s->max = s->oo; return !!oo_objects(s->oo); - } static int kmem_cache_open(struct kmem_cache *s, unsigned long flags) -- cgit v1.2.3