diff options
author | Kevin Hilman <khilman@linaro.org> | 2015-11-04 10:01:20 -0800 |
---|---|---|
committer | Kevin Hilman <khilman@linaro.org> | 2015-11-04 10:01:20 -0800 |
commit | 49bd04cb11e57c4cd4d2c00feca898537dd6db1f (patch) | |
tree | 846c594b2b1d2f203c24ff32569b6c615822a5dc /mm | |
parent | 260409578ecbf7ce1725b85f9c082312bff808c0 (diff) | |
parent | 12ba397b828b7e7f5d28a73e9a2ee14470b7edc7 (diff) |
Merge tag 'v4.1.12-rt12' of git://git.kernel.org/pub/scm/linux/kernel/git/rt/linux-rt-devel into v4.1/topic/rtv4.1/topic/rt
v4.1.12-rt12
Diffstat (limited to 'mm')
-rw-r--r-- | mm/hugetlb.c | 8 | ||||
-rw-r--r-- | mm/memcontrol.c | 1 | ||||
-rw-r--r-- | mm/memory-failure.c | 15 | ||||
-rw-r--r-- | mm/migrate.c | 2 | ||||
-rw-r--r-- | mm/page_alloc.c | 7 | ||||
-rw-r--r-- | mm/slab.c | 17 | ||||
-rw-r--r-- | mm/slub.c | 2 | ||||
-rw-r--r-- | mm/vmscan.c | 16 |
8 files changed, 42 insertions, 26 deletions
diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 8c4c1f9f9a9a..a6ff935476e3 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -2897,6 +2897,14 @@ static void unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma, continue; /* + * Shared VMAs have their own reserves and do not affect + * MAP_PRIVATE accounting but it is possible that a shared + * VMA is using the same page so check and skip such VMAs. + */ + if (iter_vma->vm_flags & VM_MAYSHARE) + continue; + + /* * Unmap the page from other VMAs without their own reserves. * They get marked to be SIGKILLed if they fault in these * areas. This is because a future no-page fault on this VMA diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 8bd68b5ec4b5..b4fb4f7692fc 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -3683,6 +3683,7 @@ static int __mem_cgroup_usage_register_event(struct mem_cgroup *memcg, ret = page_counter_memparse(args, "-1", &threshold); if (ret) return ret; + threshold <<= PAGE_SHIFT; mutex_lock(&memcg->thresholds_lock); diff --git a/mm/memory-failure.c b/mm/memory-failure.c index 501820c815b3..9f48145c884f 100644 --- a/mm/memory-failure.c +++ b/mm/memory-failure.c @@ -1558,6 +1558,8 @@ static int get_any_page(struct page *page, unsigned long pfn, int flags) */ ret = __get_any_page(page, pfn, 0); if (!PageLRU(page)) { + /* Drop page reference which is from __get_any_page() */ + put_page(page); pr_info("soft_offline: %#lx: unknown non LRU page type %lx\n", pfn, page->flags); return -EIO; @@ -1587,13 +1589,12 @@ static int soft_offline_huge_page(struct page *page, int flags) unlock_page(hpage); ret = isolate_huge_page(hpage, &pagelist); - if (ret) { - /* - * get_any_page() and isolate_huge_page() takes a refcount each, - * so need to drop one here. - */ - put_page(hpage); - } else { + /* + * get_any_page() and isolate_huge_page() takes a refcount each, + * so need to drop one here. + */ + put_page(hpage); + if (!ret) { pr_info("soft offline: %#lx hugepage failed to isolate\n", pfn); return -EBUSY; } diff --git a/mm/migrate.c b/mm/migrate.c index f53838fe3dfe..2c37b1a44a8c 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -1062,7 +1062,7 @@ out: if (rc != MIGRATEPAGE_SUCCESS && put_new_page) put_new_page(new_hpage, private); else - put_page(new_hpage); + putback_active_hugepage(new_hpage); if (result) { if (rc) diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 5b70c9977dce..41bd90d60cb4 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -1021,12 +1021,15 @@ static int prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags, set_page_owner(page, order, gfp_flags); /* - * page->pfmemalloc is set when ALLOC_NO_WATERMARKS was necessary to + * page is set pfmemalloc when ALLOC_NO_WATERMARKS was necessary to * allocate the page. The expectation is that the caller is taking * steps that will free more memory. The caller should avoid the page * being used for !PFMEMALLOC purposes. */ - page->pfmemalloc = !!(alloc_flags & ALLOC_NO_WATERMARKS); + if (alloc_flags & ALLOC_NO_WATERMARKS) + set_page_pfmemalloc(page); + else + clear_page_pfmemalloc(page); return 0; } diff --git a/mm/slab.c b/mm/slab.c index 7eb38dd1cefa..330039fdcf18 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -1602,7 +1602,7 @@ static struct page *kmem_getpages(struct kmem_cache *cachep, gfp_t flags, } /* Record if ALLOC_NO_WATERMARKS was set when allocating the slab */ - if (unlikely(page->pfmemalloc)) + if (page_is_pfmemalloc(page)) pfmemalloc_active = true; nr_pages = (1 << cachep->gfporder); @@ -1613,7 +1613,7 @@ static struct page *kmem_getpages(struct kmem_cache *cachep, gfp_t flags, add_zone_page_state(page_zone(page), NR_SLAB_UNRECLAIMABLE, nr_pages); __SetPageSlab(page); - if (page->pfmemalloc) + if (page_is_pfmemalloc(page)) SetPageSlabPfmemalloc(page); if (kmemcheck_enabled && !(cachep->flags & SLAB_NOTRACK)) { @@ -2189,9 +2189,16 @@ __kmem_cache_create (struct kmem_cache *cachep, unsigned long flags) size += BYTES_PER_WORD; } #if FORCED_DEBUG && defined(CONFIG_DEBUG_PAGEALLOC) - if (size >= kmalloc_size(INDEX_NODE + 1) - && cachep->object_size > cache_line_size() - && ALIGN(size, cachep->align) < PAGE_SIZE) { + /* + * To activate debug pagealloc, off-slab management is necessary + * requirement. In early phase of initialization, small sized slab + * doesn't get initialized so it would not be possible. So, we need + * to check size >= 256. It guarantees that all necessary small + * sized slab is initialized in current slab initialization sequence. + */ + if (!slab_early_init && size >= kmalloc_size(INDEX_NODE) && + size >= 256 && cachep->object_size > cache_line_size() && + ALIGN(size, cachep->align) < PAGE_SIZE) { cachep->obj_offset += PAGE_SIZE - ALIGN(size, cachep->align); size = PAGE_SIZE; } diff --git a/mm/slub.c b/mm/slub.c index f657453adee3..905e283d7829 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -1409,7 +1409,7 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node) order = compound_order(page); page->slab_cache = s; __SetPageSlab(page); - if (page->pfmemalloc) + if (page_is_pfmemalloc(page)) SetPageSlabPfmemalloc(page); start = page_address(page); diff --git a/mm/vmscan.c b/mm/vmscan.c index 5e8eadd71bac..1a17bd7c0ce5 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -937,21 +937,17 @@ static unsigned long shrink_page_list(struct list_head *page_list, * * 2) Global reclaim encounters a page, memcg encounters a * page that is not marked for immediate reclaim or - * the caller does not have __GFP_IO. In this case mark + * the caller does not have __GFP_FS (or __GFP_IO if it's + * simply going to swap, not to fs). In this case mark * the page for immediate reclaim and continue scanning. * - * __GFP_IO is checked because a loop driver thread might + * Require may_enter_fs because we would wait on fs, which + * may not have submitted IO yet. And the loop driver might * enter reclaim, and deadlock if it waits on a page for * which it is needed to do the write (loop masks off * __GFP_IO|__GFP_FS for this reason); but more thought * would probably show more reasons. * - * Don't require __GFP_FS, since we're not going into the - * FS, just waiting on its writeback completion. Worryingly, - * ext4 gfs2 and xfs allocate pages with - * grab_cache_page_write_begin(,,AOP_FLAG_NOFS), so testing - * may_enter_fs here is liable to OOM on them. - * * 3) memcg encounters a page that is not already marked * PageReclaim. memcg does not have any dirty pages * throttling so we could easily OOM just because too many @@ -968,7 +964,7 @@ static unsigned long shrink_page_list(struct list_head *page_list, /* Case 2 above */ } else if (global_reclaim(sc) || - !PageReclaim(page) || !(sc->gfp_mask & __GFP_IO)) { + !PageReclaim(page) || !may_enter_fs) { /* * This is slightly racy - end_page_writeback() * might have just cleared PageReclaim, then @@ -1157,7 +1153,7 @@ cull_mlocked: if (PageSwapCache(page)) try_to_free_swap(page); unlock_page(page); - putback_lru_page(page); + list_add(&page->lru, &ret_pages); continue; activate_locked: |