diff options
author | Thomas Gleixner <tglx@linutronix.de> | 2012-10-25 10:32:35 +0100 |
---|---|---|
committer | Steven Rostedt <rostedt@goodmis.org> | 2013-08-13 16:09:00 -0400 |
commit | 61c7ffc49e91958e09f49f2233eb4668cc2b7ba5 (patch) | |
tree | 0fa2944a22b31b740f42c6c8d55483545ee47299 | |
parent | 4eb85b9b4f8a10a58e9ac25af0fb4c2ee4918f99 (diff) |
mm: Enable SLUB for RT
Make SLUB RT aware and remove the restriction in Kconfig.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
-rw-r--r-- | include/linux/slub_def.h | 2 | ||||
-rw-r--r-- | init/Kconfig | 1 | ||||
-rw-r--r-- | mm/slub.c | 119 |
3 files changed, 92 insertions, 30 deletions
diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h index 9db4825cd393..a58ad34cc06b 100644 --- a/include/linux/slub_def.h +++ b/include/linux/slub_def.h @@ -54,7 +54,7 @@ struct kmem_cache_cpu { }; struct kmem_cache_node { - spinlock_t list_lock; /* Protect partial list and nr_partial */ + raw_spinlock_t list_lock; /* Protect partial list and nr_partial */ unsigned long nr_partial; struct list_head partial; #ifdef CONFIG_SLUB_DEBUG diff --git a/init/Kconfig b/init/Kconfig index f6a406dc03cf..54b18f0baf40 100644 --- a/init/Kconfig +++ b/init/Kconfig @@ -1494,7 +1494,6 @@ config SLAB config SLUB bool "SLUB (Unqueued Allocator)" - depends on !PREEMPT_RT_FULL help SLUB is a slab allocator that minimizes cache line usage instead of managing queues of cached objects (SLAB approach). diff --git a/mm/slub.c b/mm/slub.c index 3d2308f14b19..f8cfc461eaf4 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -1070,7 +1070,7 @@ static noinline struct kmem_cache_node *free_debug_processing( { struct kmem_cache_node *n = get_node(s, page_to_nid(page)); - spin_lock_irqsave(&n->list_lock, *flags); + raw_spin_lock_irqsave(&n->list_lock, *flags); slab_lock(page); if (!check_slab(s, page)) @@ -1118,7 +1118,7 @@ out: fail: slab_unlock(page); - spin_unlock_irqrestore(&n->list_lock, *flags); + raw_spin_unlock_irqrestore(&n->list_lock, *flags); slab_fix(s, "Object at 0x%p not freed", object); return NULL; } @@ -1253,6 +1253,12 @@ static inline void slab_free_hook(struct kmem_cache *s, void *x) {} #endif /* CONFIG_SLUB_DEBUG */ +struct slub_free_list { + raw_spinlock_t lock; + struct list_head list; +}; +static DEFINE_PER_CPU(struct slub_free_list, slub_free_list); + /* * Slab allocation and freeing */ @@ -1277,7 +1283,11 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node) flags &= gfp_allowed_mask; +#ifdef CONFIG_PREEMPT_RT_FULL + if (system_state == SYSTEM_RUNNING) +#else if (flags & __GFP_WAIT) +#endif local_irq_enable(); flags |= s->allocflags; @@ -1317,7 +1327,11 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node) kmemcheck_mark_unallocated_pages(page, pages); } +#ifdef CONFIG_PREEMPT_RT_FULL + if (system_state == SYSTEM_RUNNING) +#else if (flags & __GFP_WAIT) +#endif local_irq_disable(); if (!page) return NULL; @@ -1414,6 +1428,16 @@ static void __free_slab(struct kmem_cache *s, struct page *page) __free_memcg_kmem_pages(page, order); } +static void free_delayed(struct kmem_cache *s, struct list_head *h) +{ + while(!list_empty(h)) { + struct page *page = list_first_entry(h, struct page, lru); + + list_del(&page->lru); + __free_slab(s, page); + } +} + #define need_reserve_slab_rcu \ (sizeof(((struct page *)NULL)->lru) < sizeof(struct rcu_head)) @@ -1448,6 +1472,12 @@ static void free_slab(struct kmem_cache *s, struct page *page) } call_rcu(head, rcu_free_slab); + } else if (irqs_disabled()) { + struct slub_free_list *f = &__get_cpu_var(slub_free_list); + + raw_spin_lock(&f->lock); + list_add(&page->lru, &f->list); + raw_spin_unlock(&f->lock); } else __free_slab(s, page); } @@ -1549,7 +1579,7 @@ static void *get_partial_node(struct kmem_cache *s, struct kmem_cache_node *n, if (!n || !n->nr_partial) return NULL; - spin_lock(&n->list_lock); + raw_spin_lock(&n->list_lock); list_for_each_entry_safe(page, page2, &n->partial, lru) { void *t; int available; @@ -1574,7 +1604,7 @@ static void *get_partial_node(struct kmem_cache *s, struct kmem_cache_node *n, break; } - spin_unlock(&n->list_lock); + raw_spin_unlock(&n->list_lock); return object; } @@ -1816,7 +1846,7 @@ redo: * that acquire_slab() will see a slab page that * is frozen */ - spin_lock(&n->list_lock); + raw_spin_lock(&n->list_lock); } } else { m = M_FULL; @@ -1827,7 +1857,7 @@ redo: * slabs from diagnostic functions will not see * any frozen slabs. */ - spin_lock(&n->list_lock); + raw_spin_lock(&n->list_lock); } } @@ -1862,7 +1892,7 @@ redo: goto redo; if (lock) - spin_unlock(&n->list_lock); + raw_spin_unlock(&n->list_lock); if (m == M_FREE) { stat(s, DEACTIVATE_EMPTY); @@ -1893,10 +1923,10 @@ static void unfreeze_partials(struct kmem_cache *s, n2 = get_node(s, page_to_nid(page)); if (n != n2) { if (n) - spin_unlock(&n->list_lock); + raw_spin_unlock(&n->list_lock); n = n2; - spin_lock(&n->list_lock); + raw_spin_lock(&n->list_lock); } do { @@ -1925,7 +1955,7 @@ static void unfreeze_partials(struct kmem_cache *s, } if (n) - spin_unlock(&n->list_lock); + raw_spin_unlock(&n->list_lock); while (discard_page) { page = discard_page; @@ -1961,14 +1991,21 @@ static int put_cpu_partial(struct kmem_cache *s, struct page *page, int drain) pobjects = oldpage->pobjects; pages = oldpage->pages; if (drain && pobjects > s->cpu_partial) { + struct slub_free_list *f; unsigned long flags; + LIST_HEAD(tofree); /* * partial array is full. Move the existing * set to the per node partial list. */ local_irq_save(flags); unfreeze_partials(s, this_cpu_ptr(s->cpu_slab)); + f = &__get_cpu_var(slub_free_list); + raw_spin_lock(&f->lock); + list_splice_init(&f->list, &tofree); + raw_spin_unlock(&f->lock); local_irq_restore(flags); + free_delayed(s, &tofree); oldpage = NULL; pobjects = 0; pages = 0; @@ -2031,7 +2068,22 @@ static bool has_cpu_slab(int cpu, void *info) static void flush_all(struct kmem_cache *s) { + LIST_HEAD(tofree); + int cpu; + on_each_cpu_cond(has_cpu_slab, flush_cpu_slab, s, 1, GFP_ATOMIC); + for_each_online_cpu(cpu) { + struct slub_free_list *f; + + if (!has_cpu_slab(cpu, s)) + continue; + + f = &per_cpu(slub_free_list, cpu); + raw_spin_lock_irq(&f->lock); + list_splice_init(&f->list, &tofree); + raw_spin_unlock_irq(&f->lock); + free_delayed(s, &tofree); + } } /* @@ -2059,10 +2111,10 @@ static unsigned long count_partial(struct kmem_cache_node *n, unsigned long x = 0; struct page *page; - spin_lock_irqsave(&n->list_lock, flags); + raw_spin_lock_irqsave(&n->list_lock, flags); list_for_each_entry(page, &n->partial, lru) x += get_count(page); - spin_unlock_irqrestore(&n->list_lock, flags); + raw_spin_unlock_irqrestore(&n->list_lock, flags); return x; } @@ -2205,9 +2257,11 @@ static inline void *get_freelist(struct kmem_cache *s, struct page *page) static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node, unsigned long addr, struct kmem_cache_cpu *c) { + struct slub_free_list *f; void *freelist; struct page *page; unsigned long flags; + LIST_HEAD(tofree); local_irq_save(flags); #ifdef CONFIG_PREEMPT @@ -2270,7 +2324,13 @@ load_freelist: VM_BUG_ON(!c->page->frozen); c->freelist = get_freepointer(s, freelist); c->tid = next_tid(c->tid); +out: + f = &__get_cpu_var(slub_free_list); + raw_spin_lock(&f->lock); + list_splice_init(&f->list, &tofree); + raw_spin_unlock(&f->lock); local_irq_restore(flags); + free_delayed(s, &tofree); return freelist; new_slab: @@ -2288,9 +2348,7 @@ new_slab: if (unlikely(!freelist)) { if (!(gfpflags & __GFP_NOWARN) && printk_ratelimit()) slab_out_of_memory(s, gfpflags, node); - - local_irq_restore(flags); - return NULL; + goto out; } page = c->page; @@ -2304,8 +2362,7 @@ new_slab: deactivate_slab(s, page, get_freepointer(s, freelist)); c->page = NULL; c->freelist = NULL; - local_irq_restore(flags); - return freelist; + goto out; } /* @@ -2477,7 +2534,7 @@ static void __slab_free(struct kmem_cache *s, struct page *page, do { if (unlikely(n)) { - spin_unlock_irqrestore(&n->list_lock, flags); + raw_spin_unlock_irqrestore(&n->list_lock, flags); n = NULL; } prior = page->freelist; @@ -2507,7 +2564,7 @@ static void __slab_free(struct kmem_cache *s, struct page *page, * Otherwise the list_lock will synchronize with * other processors updating the list of slabs. */ - spin_lock_irqsave(&n->list_lock, flags); + raw_spin_lock_irqsave(&n->list_lock, flags); } } @@ -2548,7 +2605,7 @@ static void __slab_free(struct kmem_cache *s, struct page *page, add_partial(n, page, DEACTIVATE_TO_TAIL); stat(s, FREE_ADD_PARTIAL); } - spin_unlock_irqrestore(&n->list_lock, flags); + raw_spin_unlock_irqrestore(&n->list_lock, flags); return; slab_empty: @@ -2562,7 +2619,7 @@ slab_empty: /* Slab must be on the full list */ remove_full(s, page); - spin_unlock_irqrestore(&n->list_lock, flags); + raw_spin_unlock_irqrestore(&n->list_lock, flags); stat(s, FREE_SLAB); discard_slab(s, page); } @@ -2764,7 +2821,7 @@ static void init_kmem_cache_node(struct kmem_cache_node *n) { n->nr_partial = 0; - spin_lock_init(&n->list_lock); + raw_spin_lock_init(&n->list_lock); INIT_LIST_HEAD(&n->partial); #ifdef CONFIG_SLUB_DEBUG atomic_long_set(&n->nr_slabs, 0); @@ -3451,7 +3508,7 @@ int kmem_cache_shrink(struct kmem_cache *s) for (i = 0; i < objects; i++) INIT_LIST_HEAD(slabs_by_inuse + i); - spin_lock_irqsave(&n->list_lock, flags); + raw_spin_lock_irqsave(&n->list_lock, flags); /* * Build lists indexed by the items in use in each slab. @@ -3472,7 +3529,7 @@ int kmem_cache_shrink(struct kmem_cache *s) for (i = objects - 1; i > 0; i--) list_splice(slabs_by_inuse + i, n->partial.prev); - spin_unlock_irqrestore(&n->list_lock, flags); + raw_spin_unlock_irqrestore(&n->list_lock, flags); /* Release empty slabs */ list_for_each_entry_safe(page, t, slabs_by_inuse, lru) @@ -3642,6 +3699,12 @@ void __init kmem_cache_init(void) boot_kmem_cache_node; int i; int caches = 2; + int cpu; + + for_each_possible_cpu(cpu) { + raw_spin_lock_init(&per_cpu(slub_free_list, cpu).lock); + INIT_LIST_HEAD(&per_cpu(slub_free_list, cpu).list); + } if (debug_guardpage_minorder()) slub_max_order = 0; @@ -4033,7 +4096,7 @@ static int validate_slab_node(struct kmem_cache *s, struct page *page; unsigned long flags; - spin_lock_irqsave(&n->list_lock, flags); + raw_spin_lock_irqsave(&n->list_lock, flags); list_for_each_entry(page, &n->partial, lru) { validate_slab_slab(s, page, map); @@ -4056,7 +4119,7 @@ static int validate_slab_node(struct kmem_cache *s, atomic_long_read(&n->nr_slabs)); out: - spin_unlock_irqrestore(&n->list_lock, flags); + raw_spin_unlock_irqrestore(&n->list_lock, flags); return count; } @@ -4246,12 +4309,12 @@ static int list_locations(struct kmem_cache *s, char *buf, if (!atomic_long_read(&n->nr_slabs)) continue; - spin_lock_irqsave(&n->list_lock, flags); + raw_spin_lock_irqsave(&n->list_lock, flags); list_for_each_entry(page, &n->partial, lru) process_slab(&t, s, page, alloc, map); list_for_each_entry(page, &n->full, lru) process_slab(&t, s, page, alloc, map); - spin_unlock_irqrestore(&n->list_lock, flags); + raw_spin_unlock_irqrestore(&n->list_lock, flags); } for (i = 0; i < t.count; i++) { |