diff options
author | Kevin Hilman <khilman@linaro.org> | 2015-05-13 15:07:50 -0700 |
---|---|---|
committer | Kevin Hilman <khilman@linaro.org> | 2015-05-13 15:07:50 -0700 |
commit | 1eb6f7db6d9d4baef6976437e7b2bbd8809c0cf8 (patch) | |
tree | 2527df8e064bb9f4c5ef7493b28ae58b166c630d /mm | |
parent | cbe151c53d5efa99085c537bc1d8a8c6a08933ad (diff) | |
parent | 08ad4e77699a8e2374fcc19df0f1cfb5d0175222 (diff) |
Merge tag 'v3.18.11-rt7-lno1' of git://git.linaro.org/people/anders.roxell/linux-rt into linux-linaro-lsk-v3.18-rt
Linux 3.18.11-rt7
* tag 'v3.18.11-rt7-lno1' of git://git.linaro.org/people/anders.roxell/linux-rt: (2276 commits)
localversion.patch
workqueue: Prevent deadlock/stall on RT
sched: Do not clear PF_NO_SETAFFINITY flag in select_fallback_rq()
md: disable bcache
rt,ntp: Move call to schedule_delayed_work() to helper thread
scheduling while atomic in cgroup code
cgroups: use simple wait in css_release()
a few open coded completions
completion: Use simple wait queues
rcu-more-swait-conversions.patch
kernel/treercu: use a simple waitqueue
work-simple: Simple work queue implemenation
simple-wait: rename and export the equivalent of waitqueue_active()
wait-simple: Rework for use with completions
wait-simple: Simple waitqueue implementation
wait.h: include atomic.h
drm/i915: drop trace_i915_gem_ring_dispatch on rt
gpu/i915: don't open code these things
mmc: sdhci: don't provide hard irq handler
mmci: Remove bogus local_irq_save()
...
Diffstat (limited to 'mm')
-rw-r--r-- | mm/Kconfig | 2 | ||||
-rw-r--r-- | mm/filemap.c | 11 | ||||
-rw-r--r-- | mm/highmem.c | 6 | ||||
-rw-r--r-- | mm/memcontrol.c | 26 | ||||
-rw-r--r-- | mm/memory.c | 26 | ||||
-rw-r--r-- | mm/mmu_context.c | 2 | ||||
-rw-r--r-- | mm/page_alloc.c | 142 | ||||
-rw-r--r-- | mm/slab.h | 4 | ||||
-rw-r--r-- | mm/slub.c | 125 | ||||
-rw-r--r-- | mm/swap.c | 34 | ||||
-rw-r--r-- | mm/truncate.c | 7 | ||||
-rw-r--r-- | mm/vmalloc.c | 13 | ||||
-rw-r--r-- | mm/vmstat.c | 6 | ||||
-rw-r--r-- | mm/workingset.c | 23 |
14 files changed, 308 insertions, 119 deletions
diff --git a/mm/Kconfig b/mm/Kconfig index 1d1ae6b078fd..296e4e048db8 100644 --- a/mm/Kconfig +++ b/mm/Kconfig @@ -408,7 +408,7 @@ config NOMMU_INITIAL_TRIM_EXCESS config TRANSPARENT_HUGEPAGE bool "Transparent Hugepage Support" - depends on HAVE_ARCH_TRANSPARENT_HUGEPAGE + depends on HAVE_ARCH_TRANSPARENT_HUGEPAGE && !PREEMPT_RT_FULL select COMPACTION help Transparent Hugepages allows the kernel to use huge pages and diff --git a/mm/filemap.c b/mm/filemap.c index 37beab98b416..9159e0454fba 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -168,7 +168,9 @@ static void page_cache_tree_delete(struct address_space *mapping, if (!workingset_node_pages(node) && list_empty(&node->private_list)) { node->private_data = mapping; - list_lru_add(&workingset_shadow_nodes, &node->private_list); + local_lock(workingset_shadow_lock); + list_lru_add(&__workingset_shadow_nodes, &node->private_list); + local_unlock(workingset_shadow_lock); } } @@ -535,9 +537,12 @@ static int page_cache_tree_insert(struct address_space *mapping, * node->private_list is protected by * mapping->tree_lock. */ - if (!list_empty(&node->private_list)) - list_lru_del(&workingset_shadow_nodes, + if (!list_empty(&node->private_list)) { + local_lock(workingset_shadow_lock); + list_lru_del(&__workingset_shadow_nodes, &node->private_list); + local_unlock(workingset_shadow_lock); + } } return 0; } diff --git a/mm/highmem.c b/mm/highmem.c index 123bcd3ed4f2..16e8cf26d38a 100644 --- a/mm/highmem.c +++ b/mm/highmem.c @@ -29,10 +29,11 @@ #include <linux/kgdb.h> #include <asm/tlbflush.h> - +#ifndef CONFIG_PREEMPT_RT_FULL #if defined(CONFIG_HIGHMEM) || defined(CONFIG_X86_32) DEFINE_PER_CPU(int, __kmap_atomic_idx); #endif +#endif /* * Virtual_count is not a pure "count". @@ -107,8 +108,9 @@ static inline wait_queue_head_t *get_pkmap_wait_queue_head(unsigned int color) unsigned long totalhigh_pages __read_mostly; EXPORT_SYMBOL(totalhigh_pages); - +#ifndef CONFIG_PREEMPT_RT_FULL EXPORT_PER_CPU_SYMBOL(__kmap_atomic_idx); +#endif unsigned int nr_free_highpages (void) { diff --git a/mm/memcontrol.c b/mm/memcontrol.c index d72bdc3ca09b..0c4538811188 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -60,6 +60,8 @@ #include <net/sock.h> #include <net/ip.h> #include <net/tcp_memcontrol.h> +#include <linux/locallock.h> + #include "slab.h" #include <asm/uaccess.h> @@ -87,6 +89,7 @@ static int really_do_swap_account __initdata; #define do_swap_account 0 #endif +static DEFINE_LOCAL_IRQ_LOCK(event_lock); static const char * const mem_cgroup_stat_names[] = { "cache", @@ -2376,14 +2379,17 @@ static void __init memcg_stock_init(void) */ static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages) { - struct memcg_stock_pcp *stock = &get_cpu_var(memcg_stock); + struct memcg_stock_pcp *stock; + int cpu = get_cpu_light(); + + stock = &per_cpu(memcg_stock, cpu); if (stock->cached != memcg) { /* reset if necessary */ drain_stock(stock); stock->cached = memcg; } stock->nr_pages += nr_pages; - put_cpu_var(memcg_stock); + put_cpu_light(); } /* @@ -2397,7 +2403,7 @@ static void drain_all_stock(struct mem_cgroup *root_memcg, bool sync) /* Notify other cpus that system-wide "drain" is running */ get_online_cpus(); - curcpu = get_cpu(); + curcpu = get_cpu_light(); for_each_online_cpu(cpu) { struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu); struct mem_cgroup *memcg; @@ -2414,7 +2420,7 @@ static void drain_all_stock(struct mem_cgroup *root_memcg, bool sync) schedule_work_on(cpu, &stock->work); } } - put_cpu(); + put_cpu_light(); if (!sync) goto out; @@ -3419,12 +3425,12 @@ static int mem_cgroup_move_account(struct page *page, move_unlock_mem_cgroup(from, &flags); ret = 0; - local_irq_disable(); + local_lock_irq(event_lock); mem_cgroup_charge_statistics(to, page, nr_pages); memcg_check_events(to, page); mem_cgroup_charge_statistics(from, page, -nr_pages); memcg_check_events(from, page); - local_irq_enable(); + local_unlock_irq(event_lock); out_unlock: unlock_page(page); out: @@ -6406,10 +6412,10 @@ void mem_cgroup_commit_charge(struct page *page, struct mem_cgroup *memcg, VM_BUG_ON_PAGE(!PageTransHuge(page), page); } - local_irq_disable(); + local_lock_irq(event_lock); mem_cgroup_charge_statistics(memcg, page, nr_pages); memcg_check_events(memcg, page); - local_irq_enable(); + local_unlock_irq(event_lock); if (do_swap_account && PageSwapCache(page)) { swp_entry_t entry = { .val = page_private(page) }; @@ -6468,14 +6474,14 @@ static void uncharge_batch(struct mem_cgroup *memcg, unsigned long pgpgout, memcg_oom_recover(memcg); } - local_irq_save(flags); + local_lock_irqsave(event_lock, flags); __this_cpu_sub(memcg->stat->count[MEM_CGROUP_STAT_RSS], nr_anon); __this_cpu_sub(memcg->stat->count[MEM_CGROUP_STAT_CACHE], nr_file); __this_cpu_sub(memcg->stat->count[MEM_CGROUP_STAT_RSS_HUGE], nr_huge); __this_cpu_add(memcg->stat->events[MEM_CGROUP_EVENTS_PGPGOUT], pgpgout); __this_cpu_add(memcg->stat->nr_page_events, nr_anon + nr_file); memcg_check_events(memcg, dummy_page); - local_irq_restore(flags); + local_unlock_irqrestore(event_lock, flags); } static void uncharge_list(struct list_head *page_list) diff --git a/mm/memory.c b/mm/memory.c index 90fb265b32b6..8b6028fc342e 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -3244,6 +3244,32 @@ unlock: return 0; } +#ifdef CONFIG_PREEMPT_RT_FULL +void pagefault_disable(void) +{ + migrate_disable(); + current->pagefault_disabled++; + /* + * make sure to have issued the store before a pagefault + * can hit. + */ + barrier(); +} +EXPORT_SYMBOL(pagefault_disable); + +void pagefault_enable(void) +{ + /* + * make sure to issue those last loads/stores before enabling + * the pagefault handler again. + */ + barrier(); + current->pagefault_disabled--; + migrate_enable(); +} +EXPORT_SYMBOL(pagefault_enable); +#endif + /* * By the time we get here, we already hold the mm semaphore * diff --git a/mm/mmu_context.c b/mm/mmu_context.c index f802c2d216a7..b1b6f238e42d 100644 --- a/mm/mmu_context.c +++ b/mm/mmu_context.c @@ -23,6 +23,7 @@ void use_mm(struct mm_struct *mm) struct task_struct *tsk = current; task_lock(tsk); + preempt_disable_rt(); active_mm = tsk->active_mm; if (active_mm != mm) { atomic_inc(&mm->mm_count); @@ -30,6 +31,7 @@ void use_mm(struct mm_struct *mm) } tsk->mm = mm; switch_mm(active_mm, mm, tsk); + preempt_enable_rt(); task_unlock(tsk); #ifdef finish_arch_post_lock_switch finish_arch_post_lock_switch(); diff --git a/mm/page_alloc.c b/mm/page_alloc.c index c32cb64a1277..6c18699a36e0 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -59,6 +59,7 @@ #include <linux/page-debug-flags.h> #include <linux/hugetlb.h> #include <linux/sched/rt.h> +#include <linux/locallock.h> #include <asm/sections.h> #include <asm/tlbflush.h> @@ -230,6 +231,18 @@ EXPORT_SYMBOL(nr_node_ids); EXPORT_SYMBOL(nr_online_nodes); #endif +static DEFINE_LOCAL_IRQ_LOCK(pa_lock); + +#ifdef CONFIG_PREEMPT_RT_BASE +# define cpu_lock_irqsave(cpu, flags) \ + local_lock_irqsave_on(pa_lock, flags, cpu) +# define cpu_unlock_irqrestore(cpu, flags) \ + local_unlock_irqrestore_on(pa_lock, flags, cpu) +#else +# define cpu_lock_irqsave(cpu, flags) local_irq_save(flags) +# define cpu_unlock_irqrestore(cpu, flags) local_irq_restore(flags) +#endif + int page_group_by_mobility_disabled __read_mostly; void set_pageblock_migratetype(struct page *page, int migratetype) @@ -654,7 +667,7 @@ static inline int free_pages_check(struct page *page) } /* - * Frees a number of pages from the PCP lists + * Frees a number of pages which have been collected from the pcp lists. * Assumes all pages on list are in same zone, and of same order. * count is the number of pages to free. * @@ -665,18 +678,51 @@ static inline int free_pages_check(struct page *page) * pinned" detection logic. */ static void free_pcppages_bulk(struct zone *zone, int count, - struct per_cpu_pages *pcp) + struct list_head *list) { - int migratetype = 0; - int batch_free = 0; int to_free = count; unsigned long nr_scanned; + unsigned long flags; + + spin_lock_irqsave(&zone->lock, flags); - spin_lock(&zone->lock); nr_scanned = zone_page_state(zone, NR_PAGES_SCANNED); if (nr_scanned) __mod_zone_page_state(zone, NR_PAGES_SCANNED, -nr_scanned); + while (!list_empty(list)) { + struct page *page = list_first_entry(list, struct page, lru); + int mt; /* migratetype of the to-be-freed page */ + + /* must delete as __free_one_page list manipulates */ + list_del(&page->lru); + + mt = get_freepage_migratetype(page); + if (unlikely(has_isolate_pageblock(zone))) + mt = get_pageblock_migratetype(page); + + /* MIGRATE_MOVABLE list may include MIGRATE_RESERVEs */ + __free_one_page(page, page_to_pfn(page), zone, 0, mt); + trace_mm_page_pcpu_drain(page, 0, mt); + to_free--; + } + WARN_ON(to_free != 0); + spin_unlock_irqrestore(&zone->lock, flags); +} + +/* + * Moves a number of pages from the PCP lists to free list which + * is freed outside of the locked region. + * + * Assumes all pages on list are in same zone, and of same order. + * count is the number of pages to free. + */ +static void isolate_pcp_pages(int to_free, struct per_cpu_pages *src, + struct list_head *dst) +{ + int migratetype = 0; + int batch_free = 0; + while (to_free) { struct page *page; struct list_head *list; @@ -692,7 +738,7 @@ static void free_pcppages_bulk(struct zone *zone, int count, batch_free++; if (++migratetype == MIGRATE_PCPTYPES) migratetype = 0; - list = &pcp->lists[migratetype]; + list = &src->lists[migratetype]; } while (list_empty(list)); /* This is the only non-empty list. Free them all. */ @@ -700,21 +746,11 @@ static void free_pcppages_bulk(struct zone *zone, int count, batch_free = to_free; do { - int mt; /* migratetype of the to-be-freed page */ - - page = list_entry(list->prev, struct page, lru); - /* must delete as __free_one_page list manipulates */ + page = list_last_entry(list, struct page, lru); list_del(&page->lru); - mt = get_freepage_migratetype(page); - if (unlikely(has_isolate_pageblock(zone))) - mt = get_pageblock_migratetype(page); - - /* MIGRATE_MOVABLE list may include MIGRATE_RESERVEs */ - __free_one_page(page, page_to_pfn(page), zone, 0, mt); - trace_mm_page_pcpu_drain(page, 0, mt); + list_add(&page->lru, dst); } while (--to_free && --batch_free && !list_empty(list)); } - spin_unlock(&zone->lock); } static void free_one_page(struct zone *zone, @@ -723,7 +759,9 @@ static void free_one_page(struct zone *zone, int migratetype) { unsigned long nr_scanned; - spin_lock(&zone->lock); + unsigned long flags; + + spin_lock_irqsave(&zone->lock, flags); nr_scanned = zone_page_state(zone, NR_PAGES_SCANNED); if (nr_scanned) __mod_zone_page_state(zone, NR_PAGES_SCANNED, -nr_scanned); @@ -733,7 +771,7 @@ static void free_one_page(struct zone *zone, migratetype = get_pfnblock_migratetype(page, pfn); } __free_one_page(page, pfn, zone, order, migratetype); - spin_unlock(&zone->lock); + spin_unlock_irqrestore(&zone->lock, flags); } static bool free_pages_prepare(struct page *page, unsigned int order) @@ -773,11 +811,11 @@ static void __free_pages_ok(struct page *page, unsigned int order) return; migratetype = get_pfnblock_migratetype(page, pfn); - local_irq_save(flags); + local_lock_irqsave(pa_lock, flags); __count_vm_events(PGFREE, 1 << order); set_freepage_migratetype(page, migratetype); free_one_page(page_zone(page), page, pfn, order, migratetype); - local_irq_restore(flags); + local_unlock_irqrestore(pa_lock, flags); } void __init __free_pages_bootmem(struct page *page, unsigned int order) @@ -1251,16 +1289,18 @@ static int rmqueue_bulk(struct zone *zone, unsigned int order, void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp) { unsigned long flags; + LIST_HEAD(dst); int to_drain, batch; - local_irq_save(flags); + local_lock_irqsave(pa_lock, flags); batch = ACCESS_ONCE(pcp->batch); to_drain = min(pcp->count, batch); if (to_drain > 0) { - free_pcppages_bulk(zone, to_drain, pcp); + isolate_pcp_pages(to_drain, pcp, &dst); pcp->count -= to_drain; } - local_irq_restore(flags); + local_unlock_irqrestore(pa_lock, flags); + free_pcppages_bulk(zone, to_drain, &dst); } #endif @@ -1279,16 +1319,21 @@ static void drain_pages(unsigned int cpu) for_each_populated_zone(zone) { struct per_cpu_pageset *pset; struct per_cpu_pages *pcp; + LIST_HEAD(dst); + int count; - local_irq_save(flags); + cpu_lock_irqsave(cpu, flags); pset = per_cpu_ptr(zone->pageset, cpu); pcp = &pset->pcp; - if (pcp->count) { - free_pcppages_bulk(zone, pcp->count, pcp); + count = pcp->count; + if (count) { + isolate_pcp_pages(count, pcp, &dst); pcp->count = 0; } - local_irq_restore(flags); + cpu_unlock_irqrestore(cpu, flags); + if (count) + free_pcppages_bulk(zone, count, &dst); } } @@ -1341,7 +1386,12 @@ void drain_all_pages(void) else cpumask_clear_cpu(cpu, &cpus_with_pcps); } +#ifndef CONFIG_PREEMPT_RT_BASE on_each_cpu_mask(&cpus_with_pcps, drain_local_pages, NULL, 1); +#else + for_each_cpu(cpu, &cpus_with_pcps) + drain_pages(cpu); +#endif } #ifdef CONFIG_HIBERNATION @@ -1397,7 +1447,7 @@ void free_hot_cold_page(struct page *page, bool cold) migratetype = get_pfnblock_migratetype(page, pfn); set_freepage_migratetype(page, migratetype); - local_irq_save(flags); + local_lock_irqsave(pa_lock, flags); __count_vm_event(PGFREE); /* @@ -1423,12 +1473,17 @@ void free_hot_cold_page(struct page *page, bool cold) pcp->count++; if (pcp->count >= pcp->high) { unsigned long batch = ACCESS_ONCE(pcp->batch); - free_pcppages_bulk(zone, batch, pcp); + LIST_HEAD(dst); + + isolate_pcp_pages(batch, pcp, &dst); pcp->count -= batch; + local_unlock_irqrestore(pa_lock, flags); + free_pcppages_bulk(zone, batch, &dst); + return; } out: - local_irq_restore(flags); + local_unlock_irqrestore(pa_lock, flags); } /* @@ -1558,7 +1613,7 @@ again: struct per_cpu_pages *pcp; struct list_head *list; - local_irq_save(flags); + local_lock_irqsave(pa_lock, flags); pcp = &this_cpu_ptr(zone->pageset)->pcp; list = &pcp->lists[migratetype]; if (list_empty(list)) { @@ -1590,13 +1645,15 @@ again: */ WARN_ON_ONCE(order > 1); } - spin_lock_irqsave(&zone->lock, flags); + local_spin_lock_irqsave(pa_lock, &zone->lock, flags); page = __rmqueue(zone, order, migratetype); - spin_unlock(&zone->lock); - if (!page) + if (!page) { + spin_unlock(&zone->lock); goto failed; + } __mod_zone_freepage_state(zone, -(1 << order), get_freepage_migratetype(page)); + spin_unlock(&zone->lock); } __mod_zone_page_state(zone, NR_ALLOC_BATCH, -(1 << order)); @@ -1606,7 +1663,7 @@ again: __count_zone_vm_events(PGALLOC, zone, 1 << order); zone_statistics(preferred_zone, zone, gfp_flags); - local_irq_restore(flags); + local_unlock_irqrestore(pa_lock, flags); VM_BUG_ON_PAGE(bad_range(zone, page), page); if (prep_new_page(page, order, gfp_flags)) @@ -1614,7 +1671,7 @@ again: return page; failed: - local_irq_restore(flags); + local_unlock_irqrestore(pa_lock, flags); return NULL; } @@ -2325,8 +2382,8 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order, count_vm_event(COMPACTSTALL); /* Page migration frees to the PCP lists but we want merging */ - drain_pages(get_cpu()); - put_cpu(); + drain_pages(get_cpu_light()); + put_cpu_light(); page = get_page_from_freelist(gfp_mask, nodemask, order, zonelist, high_zoneidx, @@ -5565,6 +5622,7 @@ static int page_alloc_cpu_notify(struct notifier_block *self, void __init page_alloc_init(void) { hotcpu_notifier(page_alloc_cpu_notify, 0); + local_irq_lock_init(pa_lock); } /* @@ -6459,7 +6517,7 @@ void zone_pcp_reset(struct zone *zone) struct per_cpu_pageset *pset; /* avoid races with drain_pages() */ - local_irq_save(flags); + local_lock_irqsave(pa_lock, flags); if (zone->pageset != &boot_pageset) { for_each_online_cpu(cpu) { pset = per_cpu_ptr(zone->pageset, cpu); @@ -6468,7 +6526,7 @@ void zone_pcp_reset(struct zone *zone) free_percpu(zone->pageset); zone->pageset = &boot_pageset; } - local_irq_restore(flags); + local_unlock_irqrestore(pa_lock, flags); } #ifdef CONFIG_MEMORY_HOTREMOVE diff --git a/mm/slab.h b/mm/slab.h index ab019e63e3c2..c5d1c194f2ca 100644 --- a/mm/slab.h +++ b/mm/slab.h @@ -315,7 +315,11 @@ static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x) * The slab lists for all objects. */ struct kmem_cache_node { +#ifdef CONFIG_SLUB + raw_spinlock_t list_lock; +#else spinlock_t list_lock; +#endif #ifdef CONFIG_SLAB struct list_head slabs_partial; /* partial list first, better asm code */ diff --git a/mm/slub.c b/mm/slub.c index ae7b9f1ad394..72bb06beaabc 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -1044,7 +1044,7 @@ static noinline struct kmem_cache_node *free_debug_processing( { struct kmem_cache_node *n = get_node(s, page_to_nid(page)); - spin_lock_irqsave(&n->list_lock, *flags); + raw_spin_lock_irqsave(&n->list_lock, *flags); slab_lock(page); if (!check_slab(s, page)) @@ -1091,7 +1091,7 @@ out: fail: slab_unlock(page); - spin_unlock_irqrestore(&n->list_lock, *flags); + raw_spin_unlock_irqrestore(&n->list_lock, *flags); slab_fix(s, "Object at 0x%p not freed", object); return NULL; } @@ -1219,6 +1219,12 @@ static inline void dec_slabs_node(struct kmem_cache *s, int node, #endif /* CONFIG_SLUB_DEBUG */ +struct slub_free_list { + raw_spinlock_t lock; + struct list_head list; +}; +static DEFINE_PER_CPU(struct slub_free_list, slub_free_list); + /* * Hooks for other subsystems that check memory allocations. In a typical * production configuration these hooks all should produce no code at all. @@ -1303,10 +1309,15 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node) struct page *page; struct kmem_cache_order_objects oo = s->oo; gfp_t alloc_gfp; + bool enableirqs; flags &= gfp_allowed_mask; - if (flags & __GFP_WAIT) + enableirqs = (flags & __GFP_WAIT) != 0; +#ifdef CONFIG_PREEMPT_RT_FULL + enableirqs |= system_state == SYSTEM_RUNNING; +#endif + if (enableirqs) local_irq_enable(); flags |= s->allocflags; @@ -1347,7 +1358,7 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node) kmemcheck_mark_unallocated_pages(page, pages); } - if (flags & __GFP_WAIT) + if (enableirqs) local_irq_disable(); if (!page) return NULL; @@ -1365,8 +1376,10 @@ static void setup_object(struct kmem_cache *s, struct page *page, void *object) { setup_object_debug(s, page, object); +#ifndef CONFIG_PREEMPT_RT_FULL if (unlikely(s->ctor)) s->ctor(object); +#endif } static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node) @@ -1442,6 +1455,16 @@ static void __free_slab(struct kmem_cache *s, struct page *page) memcg_uncharge_slab(s, order); } +static void free_delayed(struct list_head *h) +{ + while(!list_empty(h)) { + struct page *page = list_first_entry(h, struct page, lru); + + list_del(&page->lru); + __free_slab(page->slab_cache, page); + } +} + #define need_reserve_slab_rcu \ (sizeof(((struct page *)NULL)->lru) < sizeof(struct rcu_head)) @@ -1476,6 +1499,12 @@ static void free_slab(struct kmem_cache *s, struct page *page) } call_rcu(head, rcu_free_slab); + } else if (irqs_disabled()) { + struct slub_free_list *f = &__get_cpu_var(slub_free_list); + + raw_spin_lock(&f->lock); + list_add(&page->lru, &f->list); + raw_spin_unlock(&f->lock); } else __free_slab(s, page); } @@ -1589,7 +1618,7 @@ static void *get_partial_node(struct kmem_cache *s, struct kmem_cache_node *n, if (!n || !n->nr_partial) return NULL; - spin_lock(&n->list_lock); + raw_spin_lock(&n->list_lock); list_for_each_entry_safe(page, page2, &n->partial, lru) { void *t; @@ -1614,7 +1643,7 @@ static void *get_partial_node(struct kmem_cache *s, struct kmem_cache_node *n, break; } - spin_unlock(&n->list_lock); + raw_spin_unlock(&n->list_lock); return object; } @@ -1860,7 +1889,7 @@ redo: * that acquire_slab() will see a slab page that * is frozen */ - spin_lock(&n->list_lock); + raw_spin_lock(&n->list_lock); } } else { m = M_FULL; @@ -1871,7 +1900,7 @@ redo: * slabs from diagnostic functions will not see * any frozen slabs. */ - spin_lock(&n->list_lock); + raw_spin_lock(&n->list_lock); } } @@ -1906,7 +1935,7 @@ redo: goto redo; if (lock) - spin_unlock(&n->list_lock); + raw_spin_unlock(&n->list_lock); if (m == M_FREE) { stat(s, DEACTIVATE_EMPTY); @@ -1938,10 +1967,10 @@ static void unfreeze_partials(struct kmem_cache *s, n2 = get_node(s, page_to_nid(page)); if (n != n2) { if (n) - spin_unlock(&n->list_lock); + raw_spin_unlock(&n->list_lock); n = n2; - spin_lock(&n->list_lock); + raw_spin_lock(&n->list_lock); } do { @@ -1970,7 +1999,7 @@ static void unfreeze_partials(struct kmem_cache *s, } if (n) - spin_unlock(&n->list_lock); + raw_spin_unlock(&n->list_lock); while (discard_page) { page = discard_page; @@ -2008,14 +2037,21 @@ static void put_cpu_partial(struct kmem_cache *s, struct page *page, int drain) pobjects = oldpage->pobjects; pages = oldpage->pages; if (drain && pobjects > s->cpu_partial) { + struct slub_free_list *f; unsigned long flags; + LIST_HEAD(tofree); /* * partial array is full. Move the existing * set to the per node partial list. */ local_irq_save(flags); unfreeze_partials(s, this_cpu_ptr(s->cpu_slab)); + f = &__get_cpu_var(slub_free_list); + raw_spin_lock(&f->lock); + list_splice_init(&f->list, &tofree); + raw_spin_unlock(&f->lock); local_irq_restore(flags); + free_delayed(&tofree); oldpage = NULL; pobjects = 0; pages = 0; @@ -2079,7 +2115,22 @@ static bool has_cpu_slab(int cpu, void *info) static void flush_all(struct kmem_cache *s) { + LIST_HEAD(tofree); + int cpu; + on_each_cpu_cond(has_cpu_slab, flush_cpu_slab, s, 1, GFP_ATOMIC); + for_each_online_cpu(cpu) { + struct slub_free_list *f; + + if (!has_cpu_slab(cpu, s)) + continue; + + f = &per_cpu(slub_free_list, cpu); + raw_spin_lock_irq(&f->lock); + list_splice_init(&f->list, &tofree); + raw_spin_unlock_irq(&f->lock); + free_delayed(&tofree); + } } /* @@ -2115,10 +2166,10 @@ static unsigned long count_partial(struct kmem_cache_node *n, unsigned long x = 0; struct page *page; - spin_lock_irqsave(&n->list_lock, flags); + raw_spin_lock_irqsave(&n->list_lock, flags); list_for_each_entry(page, &n->partial, lru) x += get_count(page); - spin_unlock_irqrestore(&n->list_lock, flags); + raw_spin_unlock_irqrestore(&n->list_lock, flags); return x; } #endif /* CONFIG_SLUB_DEBUG || CONFIG_SYSFS */ @@ -2255,9 +2306,11 @@ static inline void *get_freelist(struct kmem_cache *s, struct page *page) static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node, unsigned long addr, struct kmem_cache_cpu *c) { + struct slub_free_list *f; void *freelist; struct page *page; unsigned long flags; + LIST_HEAD(tofree); local_irq_save(flags); #ifdef CONFIG_PREEMPT @@ -2325,7 +2378,13 @@ load_freelist: VM_BUG_ON(!c->page->frozen); c->freelist = get_freepointer(s, freelist); c->tid = next_tid(c->tid); +out: + f = &__get_cpu_var(slub_free_list); + raw_spin_lock(&f->lock); + list_splice_init(&f->list, &tofree); + raw_spin_unlock(&f->lock); local_irq_restore(flags); + free_delayed(&tofree); return freelist; new_slab: @@ -2342,8 +2401,7 @@ new_slab: if (unlikely(!freelist)) { slab_out_of_memory(s, gfpflags, node); - local_irq_restore(flags); - return NULL; + goto out; } page = c->page; @@ -2358,8 +2416,7 @@ new_slab: deactivate_slab(s, page, get_freepointer(s, freelist)); c->page = NULL; c->freelist = NULL; - local_irq_restore(flags); - return freelist; + goto out; } /* @@ -2444,6 +2501,10 @@ redo: if (unlikely(gfpflags & __GFP_ZERO) && object) memset(object, 0, s->object_size); +#ifdef CONFIG_PREEMPT_RT_FULL + if (unlikely(s->ctor) && object) + s->ctor(object); +#endif slab_post_alloc_hook(s, gfpflags, object); @@ -2531,7 +2592,7 @@ static void __slab_free(struct kmem_cache *s, struct page *page, do { if (unlikely(n)) { - spin_unlock_irqrestore(&n->list_lock, flags); + raw_spin_unlock_irqrestore(&n->list_lock, flags); n = NULL; } prior = page->freelist; @@ -2563,7 +2624,7 @@ static void __slab_free(struct kmem_cache *s, struct page *page, * Otherwise the list_lock will synchronize with * other processors updating the list of slabs. */ - spin_lock_irqsave(&n->list_lock, flags); + raw_spin_lock_irqsave(&n->list_lock, flags); } } @@ -2605,7 +2666,7 @@ static void __slab_free(struct kmem_cache *s, struct page *page, add_partial(n, page, DEACTIVATE_TO_TAIL); stat(s, FREE_ADD_PARTIAL); } - spin_unlock_irqrestore(&n->list_lock, flags); + raw_spin_unlock_irqrestore(&n->list_lock, flags); return; slab_empty: @@ -2620,7 +2681,7 @@ slab_empty: remove_full(s, n, page); } - spin_unlock_irqrestore(&n->list_lock, flags); + raw_spin_unlock_irqrestore(&n->list_lock, flags); stat(s, FREE_SLAB); discard_slab(s, page); } @@ -2816,7 +2877,7 @@ static void init_kmem_cache_node(struct kmem_cache_node *n) { n->nr_partial = 0; - spin_lock_init(&n->list_lock); + raw_spin_lock_init(&n->list_lock); INIT_LIST_HEAD(&n->partial); #ifdef CONFIG_SLUB_DEBUG atomic_long_set(&n->nr_slabs, 0); @@ -3373,7 +3434,7 @@ int __kmem_cache_shrink(struct kmem_cache *s) for (i = 0; i < objects; i++) INIT_LIST_HEAD(slabs_by_inuse + i); - spin_lock_irqsave(&n->list_lock, flags); + raw_spin_lock_irqsave(&n->list_lock, flags); /* * Build lists indexed by the items in use in each slab. @@ -3394,7 +3455,7 @@ int __kmem_cache_shrink(struct kmem_cache *s) for (i = objects - 1; i > 0; i--) list_splice(slabs_by_inuse + i, n->partial.prev); - spin_unlock_irqrestore(&n->list_lock, flags); + raw_spin_unlock_irqrestore(&n->list_lock, flags); /* Release empty slabs */ list_for_each_entry_safe(page, t, slabs_by_inuse, lru) @@ -3567,6 +3628,12 @@ void __init kmem_cache_init(void) { static __initdata struct kmem_cache boot_kmem_cache, boot_kmem_cache_node; + int cpu; + + for_each_possible_cpu(cpu) { + raw_spin_lock_init(&per_cpu(slub_free_list, cpu).lock); + INIT_LIST_HEAD(&per_cpu(slub_free_list, cpu).list); + } if (debug_guardpage_minorder()) slub_max_order = 0; @@ -3815,7 +3882,7 @@ static int validate_slab_node(struct kmem_cache *s, struct page *page; unsigned long flags; - spin_lock_irqsave(&n->list_lock, flags); + raw_spin_lock_irqsave(&n->list_lock, flags); list_for_each_entry(page, &n->partial, lru) { validate_slab_slab(s, page, map); @@ -3837,7 +3904,7 @@ static int validate_slab_node(struct kmem_cache *s, s->name, count, atomic_long_read(&n->nr_slabs)); out: - spin_unlock_irqrestore(&n->list_lock, flags); + raw_spin_unlock_irqrestore(&n->list_lock, flags); return count; } @@ -4025,12 +4092,12 @@ static int list_locations(struct kmem_cache *s, char *buf, if (!atomic_long_read(&n->nr_slabs)) continue; - spin_lock_irqsave(&n->list_lock, flags); + raw_spin_lock_irqsave(&n->list_lock, flags); list_for_each_entry(page, &n->partial, lru) process_slab(&t, s, page, alloc, map); list_for_each_entry(page, &n->full, lru) process_slab(&t, s, page, alloc, map); - spin_unlock_irqrestore(&n->list_lock, flags); + raw_spin_unlock_irqrestore(&n->list_lock, flags); } for (i = 0; i < t.count; i++) { diff --git a/mm/swap.c b/mm/swap.c index 8a12b33936b4..acb833351464 100644 --- a/mm/swap.c +++ b/mm/swap.c @@ -31,6 +31,7 @@ #include <linux/memcontrol.h> #include <linux/gfp.h> #include <linux/uio.h> +#include <linux/locallock.h> #include "internal.h" @@ -44,6 +45,9 @@ static DEFINE_PER_CPU(struct pagevec, lru_add_pvec); static DEFINE_PER_CPU(struct pagevec, lru_rotate_pvecs); static DEFINE_PER_CPU(struct pagevec, lru_deactivate_pvecs); +static DEFINE_LOCAL_IRQ_LOCK(rotate_lock); +static DEFINE_LOCAL_IRQ_LOCK(swapvec_lock); + /* * This path almost never happens for VM activity - pages are normally * freed via pagevecs. But it gets used by networking. @@ -473,11 +477,11 @@ void rotate_reclaimable_page(struct page *page) unsigned long flags; page_cache_get(page); - local_irq_save(flags); + local_lock_irqsave(rotate_lock, flags); pvec = this_cpu_ptr(&lru_rotate_pvecs); if (!pagevec_add(pvec, page)) pagevec_move_tail(pvec); - local_irq_restore(flags); + local_unlock_irqrestore(rotate_lock, flags); } } @@ -528,12 +532,13 @@ static bool need_activate_page_drain(int cpu) void activate_page(struct page *page) { if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) { - struct pagevec *pvec = &get_cpu_var(activate_page_pvecs); + struct pagevec *pvec = &get_locked_var(swapvec_lock, + activate_page_pvecs); page_cache_get(page); if (!pagevec_add(pvec, page)) pagevec_lru_move_fn(pvec, __activate_page, NULL); - put_cpu_var(activate_page_pvecs); + put_locked_var(swapvec_lock, activate_page_pvecs); } } @@ -559,7 +564,7 @@ void activate_page(struct page *page) static void __lru_cache_activate_page(struct page *page) { - struct pagevec *pvec = &get_cpu_var(lru_add_pvec); + struct pagevec *pvec = &get_locked_var(swapvec_lock, lru_add_pvec); int i; /* @@ -581,7 +586,7 @@ static void __lru_cache_activate_page(struct page *page) } } - put_cpu_var(lru_add_pvec); + put_locked_var(swapvec_lock, lru_add_pvec); } /* @@ -620,13 +625,13 @@ EXPORT_SYMBOL(mark_page_accessed); static void __lru_cache_add(struct page *page) { - struct pagevec *pvec = &get_cpu_var(lru_add_pvec); + struct pagevec *pvec = &get_locked_var(swapvec_lock, lru_add_pvec); page_cache_get(page); if (!pagevec_space(pvec)) __pagevec_lru_add(pvec); pagevec_add(pvec, page); - put_cpu_var(lru_add_pvec); + put_locked_var(swapvec_lock, lru_add_pvec); } /** @@ -806,9 +811,9 @@ void lru_add_drain_cpu(int cpu) unsigned long flags; /* No harm done if a racing interrupt already did this */ - local_irq_save(flags); + local_lock_irqsave(rotate_lock, flags); pagevec_move_tail(pvec); - local_irq_restore(flags); + local_unlock_irqrestore(rotate_lock, flags); } pvec = &per_cpu(lru_deactivate_pvecs, cpu); @@ -836,18 +841,19 @@ void deactivate_page(struct page *page) return; if (likely(get_page_unless_zero(page))) { - struct pagevec *pvec = &get_cpu_var(lru_deactivate_pvecs); + struct pagevec *pvec = &get_locked_var(swapvec_lock, + lru_deactivate_pvecs); if (!pagevec_add(pvec, page)) pagevec_lru_move_fn(pvec, lru_deactivate_fn, NULL); - put_cpu_var(lru_deactivate_pvecs); + put_locked_var(swapvec_lock, lru_deactivate_pvecs); } } void lru_add_drain(void) { - lru_add_drain_cpu(get_cpu()); - put_cpu(); + lru_add_drain_cpu(local_lock_cpu(swapvec_lock)); + local_unlock_cpu(swapvec_lock); } static void lru_add_drain_per_cpu(struct work_struct *dummy) diff --git a/mm/truncate.c b/mm/truncate.c index f1e4d6052369..a06369d67fa4 100644 --- a/mm/truncate.c +++ b/mm/truncate.c @@ -56,8 +56,11 @@ static void clear_exceptional_entry(struct address_space *mapping, * protected by mapping->tree_lock. */ if (!workingset_node_shadows(node) && - !list_empty(&node->private_list)) - list_lru_del(&workingset_shadow_nodes, &node->private_list); + !list_empty(&node->private_list)) { + local_lock(workingset_shadow_lock); + list_lru_del(&__workingset_shadow_nodes, &node->private_list); + local_unlock(workingset_shadow_lock); + } __radix_tree_delete_node(&mapping->page_tree, node); unlock: spin_unlock_irq(&mapping->tree_lock); diff --git a/mm/vmalloc.c b/mm/vmalloc.c index 90520af7f186..43122b6c46ed 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c @@ -798,7 +798,7 @@ static struct vmap_block *new_vmap_block(gfp_t gfp_mask) struct vmap_block *vb; struct vmap_area *va; unsigned long vb_idx; - int node, err; + int node, err, cpu; node = numa_node_id(); @@ -836,11 +836,12 @@ static struct vmap_block *new_vmap_block(gfp_t gfp_mask) BUG_ON(err); radix_tree_preload_end(); - vbq = &get_cpu_var(vmap_block_queue); + cpu = get_cpu_light(); + vbq = &__get_cpu_var(vmap_block_queue); spin_lock(&vbq->lock); list_add_rcu(&vb->free_list, &vbq->free); spin_unlock(&vbq->lock); - put_cpu_var(vmap_block_queue); + put_cpu_light(); return vb; } @@ -908,6 +909,7 @@ static void *vb_alloc(unsigned long size, gfp_t gfp_mask) struct vmap_block *vb; unsigned long addr = 0; unsigned int order; + int cpu = 0; BUG_ON(size & ~PAGE_MASK); BUG_ON(size > PAGE_SIZE*VMAP_MAX_ALLOC); @@ -923,7 +925,8 @@ static void *vb_alloc(unsigned long size, gfp_t gfp_mask) again: rcu_read_lock(); - vbq = &get_cpu_var(vmap_block_queue); + cpu = get_cpu_light(); + vbq = &__get_cpu_var(vmap_block_queue); list_for_each_entry_rcu(vb, &vbq->free, free_list) { int i; @@ -947,7 +950,7 @@ next: spin_unlock(&vb->lock); } - put_cpu_var(vmap_block_queue); + put_cpu_light(); rcu_read_unlock(); if (!addr) { diff --git a/mm/vmstat.c b/mm/vmstat.c index 4590aa42b6cd..d65d0c30a536 100644 --- a/mm/vmstat.c +++ b/mm/vmstat.c @@ -221,6 +221,7 @@ void __mod_zone_page_state(struct zone *zone, enum zone_stat_item item, long x; long t; + preempt_disable_rt(); x = delta + __this_cpu_read(*p); t = __this_cpu_read(pcp->stat_threshold); @@ -230,6 +231,7 @@ void __mod_zone_page_state(struct zone *zone, enum zone_stat_item item, x = 0; } __this_cpu_write(*p, x); + preempt_enable_rt(); } EXPORT_SYMBOL(__mod_zone_page_state); @@ -262,6 +264,7 @@ void __inc_zone_state(struct zone *zone, enum zone_stat_item item) s8 __percpu *p = pcp->vm_stat_diff + item; s8 v, t; + preempt_disable_rt(); v = __this_cpu_inc_return(*p); t = __this_cpu_read(pcp->stat_threshold); if (unlikely(v > t)) { @@ -270,6 +273,7 @@ void __inc_zone_state(struct zone *zone, enum zone_stat_item item) zone_page_state_add(v + overstep, zone, item); __this_cpu_write(*p, -overstep); } + preempt_enable_rt(); } void __inc_zone_page_state(struct page *page, enum zone_stat_item item) @@ -284,6 +288,7 @@ void __dec_zone_state(struct zone *zone, enum zone_stat_item item) s8 __percpu *p = pcp->vm_stat_diff + item; s8 v, t; + preempt_disable_rt(); v = __this_cpu_dec_return(*p); t = __this_cpu_read(pcp->stat_threshold); if (unlikely(v < - t)) { @@ -292,6 +297,7 @@ void __dec_zone_state(struct zone *zone, enum zone_stat_item item) zone_page_state_add(v - overstep, zone, item); __this_cpu_write(*p, overstep); } + preempt_enable_rt(); } void __dec_zone_page_state(struct page *page, enum zone_stat_item item) diff --git a/mm/workingset.c b/mm/workingset.c index f7216fa7da27..0decb9ca8db8 100644 --- a/mm/workingset.c +++ b/mm/workingset.c @@ -264,7 +264,8 @@ void workingset_activation(struct page *page) * point where they would still be useful. */ -struct list_lru workingset_shadow_nodes; +struct list_lru __workingset_shadow_nodes; +DEFINE_LOCAL_IRQ_LOCK(workingset_shadow_lock); static unsigned long count_shadow_nodes(struct shrinker *shrinker, struct shrink_control *sc) @@ -274,9 +275,9 @@ static unsigned long count_shadow_nodes(struct shrinker *shrinker, unsigned long pages; /* list_lru lock nests inside IRQ-safe mapping->tree_lock */ - local_irq_disable(); - shadow_nodes = list_lru_count_node(&workingset_shadow_nodes, sc->nid); - local_irq_enable(); + local_lock_irq(workingset_shadow_lock); + shadow_nodes = list_lru_count_node(&__workingset_shadow_nodes, sc->nid); + local_unlock_irq(workingset_shadow_lock); pages = node_present_pages(sc->nid); /* @@ -362,9 +363,9 @@ static enum lru_status shadow_lru_isolate(struct list_head *item, spin_unlock(&mapping->tree_lock); ret = LRU_REMOVED_RETRY; out: - local_irq_enable(); + local_unlock_irq(workingset_shadow_lock); cond_resched(); - local_irq_disable(); + local_lock_irq(workingset_shadow_lock); spin_lock(lru_lock); return ret; } @@ -375,10 +376,10 @@ static unsigned long scan_shadow_nodes(struct shrinker *shrinker, unsigned long ret; /* list_lru lock nests inside IRQ-safe mapping->tree_lock */ - local_irq_disable(); - ret = list_lru_walk_node(&workingset_shadow_nodes, sc->nid, + local_lock_irq(workingset_shadow_lock); + ret = list_lru_walk_node(&__workingset_shadow_nodes, sc->nid, shadow_lru_isolate, NULL, &sc->nr_to_scan); - local_irq_enable(); + local_unlock_irq(workingset_shadow_lock); return ret; } @@ -399,7 +400,7 @@ static int __init workingset_init(void) { int ret; - ret = list_lru_init_key(&workingset_shadow_nodes, &shadow_nodes_key); + ret = list_lru_init_key(&__workingset_shadow_nodes, &shadow_nodes_key); if (ret) goto err; ret = register_shrinker(&workingset_shadow_shrinker); @@ -407,7 +408,7 @@ static int __init workingset_init(void) goto err_list_lru; return 0; err_list_lru: - list_lru_destroy(&workingset_shadow_nodes); + list_lru_destroy(&__workingset_shadow_nodes); err: return ret; } |