aboutsummaryrefslogtreecommitdiff
path: root/mm/mmap.c
diff options
context:
space:
mode:
authorIngo Molnar <mingo@kernel.org>2012-04-14 13:18:27 +0200
committerIngo Molnar <mingo@kernel.org>2012-04-14 13:19:04 +0200
commit6ac1ef482d7ae0c690f1640bf6eb818ff9a2d91e (patch)
tree021cc9f6b477146fcebe6f3be4752abfa2ba18a9 /mm/mmap.c
parent682968e0c425c60f0dde37977e5beb2b12ddc4cc (diff)
parenta385ec4f11bdcf81af094c03e2444ee9b7fad2e5 (diff)
Merge branch 'perf/core' into perf/uprobes
Merge in latest upstream (and the latest perf development tree), to prepare for tooling changes, and also to pick up v3.4 MM changes that the uprobes code needs to take care of. Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'mm/mmap.c')
-rw-r--r--mm/mmap.c90
1 files changed, 58 insertions, 32 deletions
diff --git a/mm/mmap.c b/mm/mmap.c
index 7c112fbca40..b17a39f31a5 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -453,9 +453,8 @@ static void vma_link(struct mm_struct *mm, struct vm_area_struct *vma,
}
/*
- * Helper for vma_adjust in the split_vma insert case:
- * insert vm structure into list and rbtree and anon_vma,
- * but it has already been inserted into prio_tree earlier.
+ * Helper for vma_adjust() in the split_vma insert case: insert a vma into the
+ * mm's list and rbtree. It has already been inserted into the prio_tree.
*/
static void __insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma)
{
@@ -954,6 +953,19 @@ void vm_stat_account(struct mm_struct *mm, unsigned long flags,
#endif /* CONFIG_PROC_FS */
/*
+ * If a hint addr is less than mmap_min_addr change hint to be as
+ * low as possible but still greater than mmap_min_addr
+ */
+static inline unsigned long round_hint_to_min(unsigned long hint)
+{
+ hint &= PAGE_MASK;
+ if (((void *)hint != NULL) &&
+ (hint < mmap_min_addr))
+ return PAGE_ALIGN(mmap_min_addr);
+ return hint;
+}
+
+/*
* The caller must hold down_write(&current->mm->mmap_sem).
*/
@@ -1117,9 +1129,9 @@ SYSCALL_DEFINE6(mmap_pgoff, unsigned long, addr, unsigned long, len,
* A dummy user value is used because we are not locking
* memory so no accounting is necessary
*/
- len = ALIGN(len, huge_page_size(&default_hstate));
- file = hugetlb_file_setup(HUGETLB_ANON_FILE, len, VM_NORESERVE,
- &user, HUGETLB_ANONHUGE_INODE);
+ file = hugetlb_file_setup(HUGETLB_ANON_FILE, addr, len,
+ VM_NORESERVE, &user,
+ HUGETLB_ANONHUGE_INODE);
if (IS_ERR(file))
return PTR_ERR(file);
}
@@ -1253,7 +1265,7 @@ munmap_back:
*/
if (accountable_mapping(file, vm_flags)) {
charged = len >> PAGE_SHIFT;
- if (security_vm_enough_memory(charged))
+ if (security_vm_enough_memory_mm(mm, charged))
return -ENOMEM;
vm_flags |= VM_ACCOUNT;
}
@@ -1284,8 +1296,9 @@ munmap_back:
vma->vm_pgoff = pgoff;
INIT_LIST_HEAD(&vma->anon_vma_chain);
+ error = -EINVAL; /* when rejecting VM_GROWSDOWN|VM_GROWSUP */
+
if (file) {
- error = -EINVAL;
if (vm_flags & (VM_GROWSDOWN|VM_GROWSUP))
goto free_vma;
if (vm_flags & VM_DENYWRITE) {
@@ -1311,6 +1324,8 @@ munmap_back:
pgoff = vma->vm_pgoff;
vm_flags = vma->vm_flags;
} else if (vm_flags & VM_SHARED) {
+ if (unlikely(vm_flags & (VM_GROWSDOWN|VM_GROWSUP)))
+ goto free_vma;
error = shmem_zero_setup(vma);
if (error)
goto free_vma;
@@ -1446,10 +1461,8 @@ void arch_unmap_area(struct mm_struct *mm, unsigned long addr)
/*
* Is this a new hole at the lowest possible address?
*/
- if (addr >= TASK_UNMAPPED_BASE && addr < mm->free_area_cache) {
+ if (addr >= TASK_UNMAPPED_BASE && addr < mm->free_area_cache)
mm->free_area_cache = addr;
- mm->cached_hole_size = ~0UL;
- }
}
/*
@@ -1464,7 +1477,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
{
struct vm_area_struct *vma;
struct mm_struct *mm = current->mm;
- unsigned long addr = addr0;
+ unsigned long addr = addr0, start_addr;
/* requested length too big for entire address space */
if (len > TASK_SIZE)
@@ -1488,22 +1501,14 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
mm->free_area_cache = mm->mmap_base;
}
+try_again:
/* either no address requested or can't fit in requested address hole */
- addr = mm->free_area_cache;
-
- /* make sure it can fit in the remaining address space */
- if (addr > len) {
- vma = find_vma(mm, addr-len);
- if (!vma || addr <= vma->vm_start)
- /* remember the address as a hint for next time */
- return (mm->free_area_cache = addr-len);
- }
+ start_addr = addr = mm->free_area_cache;
- if (mm->mmap_base < len)
- goto bottomup;
-
- addr = mm->mmap_base-len;
+ if (addr < len)
+ goto fail;
+ addr -= len;
do {
/*
* Lookup failure means no vma is above this address,
@@ -1523,7 +1528,21 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
addr = vma->vm_start-len;
} while (len < vma->vm_start);
-bottomup:
+fail:
+ /*
+ * if hint left us with no space for the requested
+ * mapping then try again:
+ *
+ * Note: this is different with the case of bottomup
+ * which does the fully line-search, but we use find_vma
+ * here that causes some holes skipped.
+ */
+ if (start_addr != mm->mmap_base) {
+ mm->free_area_cache = mm->mmap_base;
+ mm->cached_hole_size = 0;
+ goto try_again;
+ }
+
/*
* A failed mmap() very likely causes application failure,
* so fall back to the bottom-up function here. This scenario
@@ -1628,7 +1647,6 @@ EXPORT_SYMBOL(find_vma);
/*
* Same as find_vma, but also return a pointer to the previous VMA in *pprev.
- * Note: pprev is set to NULL when return value is NULL.
*/
struct vm_area_struct *
find_vma_prev(struct mm_struct *mm, unsigned long addr,
@@ -1637,7 +1655,16 @@ find_vma_prev(struct mm_struct *mm, unsigned long addr,
struct vm_area_struct *vma;
vma = find_vma(mm, addr);
- *pprev = vma ? vma->vm_prev : NULL;
+ if (vma) {
+ *pprev = vma->vm_prev;
+ } else {
+ struct rb_node *rb_node = mm->mm_rb.rb_node;
+ *pprev = NULL;
+ while (rb_node) {
+ *pprev = rb_entry(rb_node, struct vm_area_struct, vm_rb);
+ rb_node = rb_node->rb_right;
+ }
+ }
return vma;
}
@@ -2192,7 +2219,7 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
if (mm->map_count > sysctl_max_map_count)
return -ENOMEM;
- if (security_vm_enough_memory(len >> PAGE_SHIFT))
+ if (security_vm_enough_memory_mm(mm, len >> PAGE_SHIFT))
return -ENOMEM;
/* Can we just expand an old private anonymous mapping? */
@@ -2236,7 +2263,6 @@ void exit_mmap(struct mm_struct *mm)
struct mmu_gather tlb;
struct vm_area_struct *vma;
unsigned long nr_accounted = 0;
- unsigned long end;
/* mm's last user has gone, and its about to be pulled down */
mmu_notifier_release(mm);
@@ -2261,11 +2287,11 @@ void exit_mmap(struct mm_struct *mm)
tlb_gather_mmu(&tlb, mm, 1);
/* update_hiwater_rss(mm) here? but nobody should be looking */
/* Use -1 here to ensure all VMAs in the mm are unmapped */
- end = unmap_vmas(&tlb, vma, 0, -1, &nr_accounted, NULL);
+ unmap_vmas(&tlb, vma, 0, -1, &nr_accounted, NULL);
vm_unacct_memory(nr_accounted);
free_pgtables(&tlb, vma, FIRST_USER_ADDRESS, 0);
- tlb_finish_mmu(&tlb, 0, end);
+ tlb_finish_mmu(&tlb, 0, -1);
/*
* Walk the list again, actually closing and freeing it,