diff options
author | Andrey Konovalov <andrey.konovalov@linaro.org> | 2014-04-15 01:23:46 +0400 |
---|---|---|
committer | Andrey Konovalov <andrey.konovalov@linaro.org> | 2014-04-15 01:23:46 +0400 |
commit | 2c41c8eb2b3014ddb4de53fec533edfa47a96db3 (patch) | |
tree | 2a11d9b2fbc72598589523742eac98af3a09d3ff /mm | |
parent | dbb07139ef78699b07137e3974aa9638cfa2d57a (diff) | |
parent | 2609fbf2f5158f57b1c41a5fa2c18fffc6016342 (diff) |
Automatically merging tracking-linaro-android-3.14 into merge-linux-linaro-core-tracking
Conflicting files:
Diffstat (limited to 'mm')
-rw-r--r-- | mm/madvise.c | 3 | ||||
-rw-r--r-- | mm/mempolicy.c | 2 | ||||
-rw-r--r-- | mm/mlock.c | 3 | ||||
-rw-r--r-- | mm/mmap.c | 44 | ||||
-rw-r--r-- | mm/mprotect.c | 3 | ||||
-rw-r--r-- | mm/page_alloc.c | 35 | ||||
-rw-r--r-- | mm/shmem.c | 13 | ||||
-rw-r--r-- | mm/vmscan.c | 43 |
8 files changed, 112 insertions, 34 deletions
diff --git a/mm/madvise.c b/mm/madvise.c index 539eeb96b32..2590a525066 100644 --- a/mm/madvise.c +++ b/mm/madvise.c @@ -102,7 +102,8 @@ static long madvise_behavior(struct vm_area_struct *vma, pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT); *prev = vma_merge(mm, *prev, start, end, new_flags, vma->anon_vma, - vma->vm_file, pgoff, vma_policy(vma)); + vma->vm_file, pgoff, vma_policy(vma), + vma_get_anon_name(vma)); if (*prev) { vma = *prev; goto success; diff --git a/mm/mempolicy.c b/mm/mempolicy.c index ae3c8f3595d..a50d8a2dae7 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c @@ -766,7 +766,7 @@ static int mbind_range(struct mm_struct *mm, unsigned long start, ((vmstart - vma->vm_start) >> PAGE_SHIFT); prev = vma_merge(mm, prev, vmstart, vmend, vma->vm_flags, vma->anon_vma, vma->vm_file, pgoff, - new_pol); + new_pol, vma_get_anon_name(vma)); if (prev) { vma = prev; next = vma->vm_next; diff --git a/mm/mlock.c b/mm/mlock.c index 4e1a6816228..7341db25dae 100644 --- a/mm/mlock.c +++ b/mm/mlock.c @@ -557,7 +557,8 @@ static int mlock_fixup(struct vm_area_struct *vma, struct vm_area_struct **prev, pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT); *prev = vma_merge(mm, *prev, start, end, newflags, vma->anon_vma, - vma->vm_file, pgoff, vma_policy(vma)); + vma->vm_file, pgoff, vma_policy(vma), + vma_get_anon_name(vma)); if (*prev) { vma = *prev; goto success; diff --git a/mm/mmap.c b/mm/mmap.c index 20ff0c33274..5f8401460ee 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -892,7 +892,8 @@ again: remove_next = 1 + (end > next->vm_end); * per-vma resources, so we don't attempt to merge those. */ static inline int is_mergeable_vma(struct vm_area_struct *vma, - struct file *file, unsigned long vm_flags) + struct file *file, unsigned long vm_flags, + const char __user *anon_name) { /* * VM_SOFTDIRTY should not prevent from VMA merging, if we @@ -908,6 +909,8 @@ static inline int is_mergeable_vma(struct vm_area_struct *vma, return 0; if (vma->vm_ops && vma->vm_ops->close) return 0; + if (vma_get_anon_name(vma) != anon_name) + return 0; return 1; } @@ -938,9 +941,10 @@ static inline int is_mergeable_anon_vma(struct anon_vma *anon_vma1, */ static int can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags, - struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff) + struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff, + const char __user *anon_name) { - if (is_mergeable_vma(vma, file, vm_flags) && + if (is_mergeable_vma(vma, file, vm_flags, anon_name) && is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) { if (vma->vm_pgoff == vm_pgoff) return 1; @@ -957,9 +961,10 @@ can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags, */ static int can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags, - struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff) + struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff, + const char __user *anon_name) { - if (is_mergeable_vma(vma, file, vm_flags) && + if (is_mergeable_vma(vma, file, vm_flags, anon_name) && is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) { pgoff_t vm_pglen; vm_pglen = vma_pages(vma); @@ -970,9 +975,9 @@ can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags, } /* - * Given a mapping request (addr,end,vm_flags,file,pgoff), figure out - * whether that can be merged with its predecessor or its successor. - * Or both (it neatly fills a hole). + * Given a mapping request (addr,end,vm_flags,file,pgoff,anon_name), + * figure out whether that can be merged with its predecessor or its + * successor. Or both (it neatly fills a hole). * * In most cases - when called for mmap, brk or mremap - [addr,end) is * certain not to be mapped by the time vma_merge is called; but when @@ -1002,7 +1007,8 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm, struct vm_area_struct *prev, unsigned long addr, unsigned long end, unsigned long vm_flags, struct anon_vma *anon_vma, struct file *file, - pgoff_t pgoff, struct mempolicy *policy) + pgoff_t pgoff, struct mempolicy *policy, + const char __user *anon_name) { pgoff_t pglen = (end - addr) >> PAGE_SHIFT; struct vm_area_struct *area, *next; @@ -1028,15 +1034,15 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm, */ if (prev && prev->vm_end == addr && mpol_equal(vma_policy(prev), policy) && - can_vma_merge_after(prev, vm_flags, - anon_vma, file, pgoff)) { + can_vma_merge_after(prev, vm_flags, anon_vma, + file, pgoff, anon_name)) { /* * OK, it can. Can we now merge in the successor as well? */ if (next && end == next->vm_start && mpol_equal(policy, vma_policy(next)) && - can_vma_merge_before(next, vm_flags, - anon_vma, file, pgoff+pglen) && + can_vma_merge_before(next, vm_flags, anon_vma, + file, pgoff+pglen, anon_name) && is_mergeable_anon_vma(prev->anon_vma, next->anon_vma, NULL)) { /* cases 1, 6 */ @@ -1056,8 +1062,8 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm, */ if (next && end == next->vm_start && mpol_equal(policy, vma_policy(next)) && - can_vma_merge_before(next, vm_flags, - anon_vma, file, pgoff+pglen)) { + can_vma_merge_before(next, vm_flags, anon_vma, + file, pgoff+pglen, anon_name)) { if (prev && addr < prev->vm_end) /* case 4 */ err = vma_adjust(prev, prev->vm_start, addr, prev->vm_pgoff, NULL); @@ -1539,7 +1545,8 @@ munmap_back: /* * Can we just expand an old mapping? */ - vma = vma_merge(mm, prev, addr, addr + len, vm_flags, NULL, file, pgoff, NULL); + vma = vma_merge(mm, prev, addr, addr + len, vm_flags, NULL, file, pgoff, + NULL, NULL); if (vma) goto out; @@ -2642,7 +2649,7 @@ static unsigned long do_brk(unsigned long addr, unsigned long len) /* Can we just expand an old private anonymous mapping? */ vma = vma_merge(mm, prev, addr, addr + len, flags, - NULL, NULL, pgoff, NULL); + NULL, NULL, pgoff, NULL, NULL); if (vma) goto out; @@ -2801,7 +2808,8 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap, if (find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent)) return NULL; /* should never get here */ new_vma = vma_merge(mm, prev, addr, addr + len, vma->vm_flags, - vma->anon_vma, vma->vm_file, pgoff, vma_policy(vma)); + vma->anon_vma, vma->vm_file, pgoff, vma_policy(vma), + vma_get_anon_name(vma)); if (new_vma) { /* * Source vma may have been merged into new_vma diff --git a/mm/mprotect.c b/mm/mprotect.c index 769a67a1580..80b9bca07ef 100644 --- a/mm/mprotect.c +++ b/mm/mprotect.c @@ -252,7 +252,8 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev, */ pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT); *pprev = vma_merge(mm, *pprev, start, end, newflags, - vma->anon_vma, vma->vm_file, pgoff, vma_policy(vma)); + vma->anon_vma, vma->vm_file, pgoff, vma_policy(vma), + vma_get_anon_name(vma)); if (*pprev) { vma = *pprev; goto success; diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 3bac76ae4b3..2be8811e57f 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -204,8 +204,21 @@ static char * const zone_names[MAX_NR_ZONES] = { "Movable", }; +/* + * Try to keep at least this much lowmem free. Do not allow normal + * allocations below this point, only high priority ones. Automatically + * tuned according to the amount of memory in the system. + */ int min_free_kbytes = 1024; int user_min_free_kbytes = -1; +int min_free_order_shift = 1; + +/* + * Extra memory for the system to try freeing. Used to temporarily + * free memory, to make space for new workloads. Anyone can allocate + * down to the min watermarks controlled by min_free_kbytes above. + */ +int extra_free_kbytes = 0; static unsigned long __meminitdata nr_kernel_pages; static unsigned long __meminitdata nr_all_pages; @@ -1712,7 +1725,7 @@ static bool __zone_watermark_ok(struct zone *z, int order, unsigned long mark, free_pages -= z->free_area[o].nr_free << o; /* Require fewer higher order pages to be free */ - min >>= 1; + min >>= min_free_order_shift; if (free_pages <= min) return false; @@ -5604,6 +5617,7 @@ static void setup_per_zone_lowmem_reserve(void) static void __setup_per_zone_wmarks(void) { unsigned long pages_min = min_free_kbytes >> (PAGE_SHIFT - 10); + unsigned long pages_low = extra_free_kbytes >> (PAGE_SHIFT - 10); unsigned long lowmem_pages = 0; struct zone *zone; unsigned long flags; @@ -5615,11 +5629,14 @@ static void __setup_per_zone_wmarks(void) } for_each_zone(zone) { - u64 tmp; + u64 min, low; spin_lock_irqsave(&zone->lock, flags); - tmp = (u64)pages_min * zone->managed_pages; - do_div(tmp, lowmem_pages); + min = (u64)pages_min * zone->managed_pages; + do_div(min, lowmem_pages); + low = (u64)pages_low * zone->managed_pages; + do_div(low, vm_total_pages); + if (is_highmem(zone)) { /* * __GFP_HIGH and PF_MEMALLOC allocations usually don't @@ -5640,11 +5657,13 @@ static void __setup_per_zone_wmarks(void) * If it's a lowmem zone, reserve a number of pages * proportionate to the zone's size. */ - zone->watermark[WMARK_MIN] = tmp; + zone->watermark[WMARK_MIN] = min; } - zone->watermark[WMARK_LOW] = min_wmark_pages(zone) + (tmp >> 2); - zone->watermark[WMARK_HIGH] = min_wmark_pages(zone) + (tmp >> 1); + zone->watermark[WMARK_LOW] = min_wmark_pages(zone) + + low + (min >> 2); + zone->watermark[WMARK_HIGH] = min_wmark_pages(zone) + + low + (min >> 1); __mod_zone_page_state(zone, NR_ALLOC_BATCH, high_wmark_pages(zone) - @@ -5769,7 +5788,7 @@ module_init(init_per_zone_wmark_min) /* * min_free_kbytes_sysctl_handler - just a wrapper around proc_dointvec() so * that we can call two helper functions whenever min_free_kbytes - * changes. + * or extra_free_kbytes changes. */ int min_free_kbytes_sysctl_handler(ctl_table *table, int write, void __user *buffer, size_t *length, loff_t *ppos) diff --git a/mm/shmem.c b/mm/shmem.c index 1f18c9d0d93..9d0702c5011 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -2990,6 +2990,14 @@ struct file *shmem_file_setup(const char *name, loff_t size, unsigned long flags } EXPORT_SYMBOL_GPL(shmem_file_setup); +void shmem_set_file(struct vm_area_struct *vma, struct file *file) +{ + if (vma->vm_file) + fput(vma->vm_file); + vma->vm_file = file; + vma->vm_ops = &shmem_vm_ops; +} + /** * shmem_zero_setup - setup a shared anonymous mapping * @vma: the vma to be mmapped is prepared by do_mmap_pgoff @@ -3003,10 +3011,7 @@ int shmem_zero_setup(struct vm_area_struct *vma) if (IS_ERR(file)) return PTR_ERR(file); - if (vma->vm_file) - fput(vma->vm_file); - vma->vm_file = file; - vma->vm_ops = &shmem_vm_ops; + shmem_set_file(vma, file); return 0; } diff --git a/mm/vmscan.c b/mm/vmscan.c index a9c74b40968..9f4cf9910ce 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -43,6 +43,7 @@ #include <linux/sysctl.h> #include <linux/oom.h> #include <linux/prefetch.h> +#include <linux/debugfs.h> #include <asm/tlbflush.h> #include <asm/div64.h> @@ -174,6 +175,39 @@ static unsigned long get_lru_size(struct lruvec *lruvec, enum lru_list lru) return zone_page_state(lruvec_zone(lruvec), NR_LRU_BASE + lru); } +struct dentry *debug_file; + +static int debug_shrinker_show(struct seq_file *s, void *unused) +{ + struct shrinker *shrinker; + struct shrink_control sc; + + sc.gfp_mask = -1; + sc.nr_to_scan = 0; + + down_read(&shrinker_rwsem); + list_for_each_entry(shrinker, &shrinker_list, list) { + int num_objs; + + num_objs = shrinker->count_objects(shrinker, &sc); + seq_printf(s, "%pf %d\n", shrinker->scan_objects, num_objs); + } + up_read(&shrinker_rwsem); + return 0; +} + +static int debug_shrinker_open(struct inode *inode, struct file *file) +{ + return single_open(file, debug_shrinker_show, inode->i_private); +} + +static const struct file_operations debug_shrinker_fops = { + .open = debug_shrinker_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + /* * Add a shrinker callback to be called from the vm. */ @@ -203,6 +237,15 @@ int register_shrinker(struct shrinker *shrinker) } EXPORT_SYMBOL(register_shrinker); +static int __init add_shrinker_debug(void) +{ + debugfs_create_file("shrinker", 0644, NULL, NULL, + &debug_shrinker_fops); + return 0; +} + +late_initcall(add_shrinker_debug); + /* * Remove one */ |