aboutsummaryrefslogtreecommitdiff
path: root/mm
diff options
context:
space:
mode:
authorAlex Shi <alex.shi@linaro.org>2017-05-25 16:29:05 +0800
committerAlex Shi <alex.shi@linaro.org>2017-05-25 16:29:05 +0800
commitd4a181a68ad99f0a819212f9fc09690707891325 (patch)
tree46e3f5cb6d16675547ae9b74a097f1f3803af506 /mm
parenta88ba91be90186debe98efcc84aa2a12b92d0fba (diff)
parent7b9d239f0d9f9bbde2c473b642d484dab4bdca79 (diff)
Merge remote-tracking branch 'lts/linux-3.18.y' into linux-linaro-lsk-v3.18
Conflicts: fix feature number error in arch/arm64/include/asm/cpufeature.h
Diffstat (limited to 'mm')
-rw-r--r--mm/cma.c7
-rw-r--r--mm/memory.c13
2 files changed, 13 insertions, 7 deletions
diff --git a/mm/cma.c b/mm/cma.c
index 8e9ec13d31db..0f4549cf796f 100644
--- a/mm/cma.c
+++ b/mm/cma.c
@@ -168,7 +168,8 @@ int __init cma_init_reserved_mem(phys_addr_t base, phys_addr_t size,
return -EINVAL;
/* ensure minimal alignment requied by mm core */
- alignment = PAGE_SIZE << max(MAX_ORDER - 1, pageblock_order);
+ alignment = PAGE_SIZE <<
+ max_t(unsigned long, MAX_ORDER - 1, pageblock_order);
/* alignment should be aligned with order_per_bit */
if (!IS_ALIGNED(alignment >> PAGE_SHIFT, 1 << order_per_bit))
@@ -250,8 +251,8 @@ int __init cma_declare_contiguous(phys_addr_t base,
* migratetype page by page allocator's buddy algorithm. In the case,
* you couldn't get a contiguous memory, which is not what we want.
*/
- alignment = max(alignment,
- (phys_addr_t)PAGE_SIZE << max(MAX_ORDER - 1, pageblock_order));
+ alignment = max(alignment, (phys_addr_t)PAGE_SIZE <<
+ max_t(unsigned long, MAX_ORDER - 1, pageblock_order));
base = ALIGN(base, alignment);
size = ALIGN(size, alignment);
limit &= ~(alignment - 1);
diff --git a/mm/memory.c b/mm/memory.c
index e8e3cf7bd247..6ca26c332712 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -2629,6 +2629,10 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
pte_unmap(page_table);
+ /* File mapping without ->vm_ops ? */
+ if (vma->vm_flags & VM_SHARED)
+ return VM_FAULT_SIGBUS;
+
/* Check if we need to add a guard page to the stack */
if (check_stack_guard_page(vma, address) < 0)
return VM_FAULT_SIGSEGV;
@@ -3033,6 +3037,9 @@ static int do_linear_fault(struct mm_struct *mm, struct vm_area_struct *vma,
- vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
pte_unmap(page_table);
+ /* The VMA was not fully populated on mmap() or missing VM_DONTEXPAND */
+ if (!vma->vm_ops->fault)
+ return VM_FAULT_SIGBUS;
if (!(flags & FAULT_FLAG_WRITE))
return do_read_fault(mm, vma, address, pmd, pgoff, flags,
orig_pte);
@@ -3198,11 +3205,9 @@ static int handle_pte_fault(struct mm_struct *mm,
entry = ACCESS_ONCE(*pte);
if (!pte_present(entry)) {
if (pte_none(entry)) {
- if (vma->vm_ops) {
- if (likely(vma->vm_ops->fault))
- return do_linear_fault(mm, vma, address,
+ if (vma->vm_ops)
+ return do_linear_fault(mm, vma, address,
pte, pmd, flags, entry);
- }
return do_anonymous_page(mm, vma, address,
pte, pmd, flags);
}