aboutsummaryrefslogtreecommitdiff
path: root/fs
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2013-10-16 21:36:03 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2013-10-16 21:36:03 -0700
commit056cdce0d3a214158f3a4ea40887b22639f855a8 (patch)
tree8ced4ccf6c7bac7eef49710c3cdfb0745cc85102 /fs
parent0056019da4b7ee5ab51fb174fe0655278578516f (diff)
parent57a8f0cdb87da776bf0e4ce7554a9133854fa779 (diff)
Merge branch 'akpm' (fixes from Andrew Morton)
Merge misc fixes from Andrew Morton. * emailed patches from Andrew Morton <akpm@linux-foundation.org>: (21 commits) mm: revert mremap pud_free anti-fix mm: fix BUG in __split_huge_page_pmd swap: fix set_blocksize race during swapon/swapoff procfs: call default get_unmapped_area on MMU-present architectures procfs: fix unintended truncation of returned mapped address writeback: fix negative bdi max pause percpu_refcount: export symbols fs: buffer: move allocation failure loop into the allocator mm: memcg: handle non-error OOM situations more gracefully tools/testing/selftests: fix uninitialized variable block/partitions/efi.c: treat size mismatch as a warning, not an error mm: hugetlb: initialize PG_reserved for tail pages of gigantic compound pages mm/zswap: bugfix: memory leak when re-swapon mm: /proc/pid/pagemap: inspect _PAGE_SOFT_DIRTY only on present pages mm: migration: do not lose soft dirty bit if page is in migration state gcov: MAINTAINERS: Add an entry for gcov mm/hugetlb.c: correct missing private flag clearing mm/vmscan.c: don't forget to free shrinker->nr_deferred ipc/sem.c: synchronize semop and semctl with IPC_RMID ipc: update locking scheme comments ...
Diffstat (limited to 'fs')
-rw-r--r--fs/buffer.c14
-rw-r--r--fs/proc/inode.c10
-rw-r--r--fs/proc/task_mmu.c4
3 files changed, 22 insertions, 6 deletions
diff --git a/fs/buffer.c b/fs/buffer.c
index 4d7433534f5..6024877335c 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -1005,9 +1005,19 @@ grow_dev_page(struct block_device *bdev, sector_t block,
struct buffer_head *bh;
sector_t end_block;
int ret = 0; /* Will call free_more_memory() */
+ gfp_t gfp_mask;
- page = find_or_create_page(inode->i_mapping, index,
- (mapping_gfp_mask(inode->i_mapping) & ~__GFP_FS)|__GFP_MOVABLE);
+ gfp_mask = mapping_gfp_mask(inode->i_mapping) & ~__GFP_FS;
+ gfp_mask |= __GFP_MOVABLE;
+ /*
+ * XXX: __getblk_slow() can not really deal with failure and
+ * will endlessly loop on improvised global reclaim. Prefer
+ * looping in the allocator rather than here, at least that
+ * code knows what it's doing.
+ */
+ gfp_mask |= __GFP_NOFAIL;
+
+ page = find_or_create_page(inode->i_mapping, index, gfp_mask);
if (!page)
return ret;
diff --git a/fs/proc/inode.c b/fs/proc/inode.c
index 9f8ef9b7674..8eaa1ba793f 100644
--- a/fs/proc/inode.c
+++ b/fs/proc/inode.c
@@ -288,10 +288,14 @@ static int proc_reg_mmap(struct file *file, struct vm_area_struct *vma)
static unsigned long proc_reg_get_unmapped_area(struct file *file, unsigned long orig_addr, unsigned long len, unsigned long pgoff, unsigned long flags)
{
struct proc_dir_entry *pde = PDE(file_inode(file));
- int rv = -EIO;
- unsigned long (*get_unmapped_area)(struct file *, unsigned long, unsigned long, unsigned long, unsigned long);
+ unsigned long rv = -EIO;
+ unsigned long (*get_unmapped_area)(struct file *, unsigned long, unsigned long, unsigned long, unsigned long) = NULL;
if (use_pde(pde)) {
- get_unmapped_area = pde->proc_fops->get_unmapped_area;
+#ifdef CONFIG_MMU
+ get_unmapped_area = current->mm->get_unmapped_area;
+#endif
+ if (pde->proc_fops->get_unmapped_area)
+ get_unmapped_area = pde->proc_fops->get_unmapped_area;
if (get_unmapped_area)
rv = get_unmapped_area(file, orig_addr, len, pgoff, flags);
unuse_pde(pde);
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index 7366e9d63ce..390bdab01c3 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -941,6 +941,8 @@ static void pte_to_pagemap_entry(pagemap_entry_t *pme, struct pagemapread *pm,
frame = pte_pfn(pte);
flags = PM_PRESENT;
page = vm_normal_page(vma, addr, pte);
+ if (pte_soft_dirty(pte))
+ flags2 |= __PM_SOFT_DIRTY;
} else if (is_swap_pte(pte)) {
swp_entry_t entry;
if (pte_swp_soft_dirty(pte))
@@ -960,7 +962,7 @@ static void pte_to_pagemap_entry(pagemap_entry_t *pme, struct pagemapread *pm,
if (page && !PageAnon(page))
flags |= PM_FILE;
- if ((vma->vm_flags & VM_SOFTDIRTY) || pte_soft_dirty(pte))
+ if ((vma->vm_flags & VM_SOFTDIRTY))
flags2 |= __PM_SOFT_DIRTY;
*pme = make_pme(PM_PFRAME(frame) | PM_STATUS2(pm->v2, flags2) | flags);