aboutsummaryrefslogtreecommitdiff
path: root/mm/huge_memory.c
diff options
context:
space:
mode:
authorMel Gorman <mgorman@suse.de>2012-11-02 11:33:45 +0000
committerMel Gorman <mgorman@suse.de>2012-12-11 14:42:42 +0000
commit4daae3b4b9e49b7e0935499a352f1c59d90287d2 (patch)
tree2ac600b955c89e3b1b2070110a9b7293a4511b19 /mm/huge_memory.c
parent149c33e1c98f83050870514f380902dc6d617bd5 (diff)
mm: mempolicy: Use _PAGE_NUMA to migrate pages
Note: Based on "mm/mpol: Use special PROT_NONE to migrate pages" but sufficiently different that the signed-off-bys were dropped Combine our previous _PAGE_NUMA, mpol_misplaced and migrate_misplaced_page() pieces into an effective migrate on fault scheme. Note that (on x86) we rely on PROT_NONE pages being !present and avoid the TLB flush from try_to_unmap(TTU_MIGRATION). This greatly improves the page-migration performance. Based-on-work-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Signed-off-by: Mel Gorman <mgorman@suse.de>
Diffstat (limited to 'mm/huge_memory.c')
-rw-r--r--mm/huge_memory.c31
1 files changed, 28 insertions, 3 deletions
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index f5f37630c54..5723b551c02 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -18,6 +18,7 @@
#include <linux/freezer.h>
#include <linux/mman.h>
#include <linux/pagemap.h>
+#include <linux/migrate.h>
#include <asm/tlb.h>
#include <asm/pgalloc.h>
#include "internal.h"
@@ -1019,17 +1020,39 @@ out:
}
/* NUMA hinting page fault entry point for trans huge pmds */
-int do_huge_pmd_numa_page(struct mm_struct *mm, unsigned long addr,
- pmd_t pmd, pmd_t *pmdp)
+int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
+ unsigned long addr, pmd_t pmd, pmd_t *pmdp)
{
- struct page *page;
+ struct page *page = NULL;
unsigned long haddr = addr & HPAGE_PMD_MASK;
+ int target_nid;
spin_lock(&mm->page_table_lock);
if (unlikely(!pmd_same(pmd, *pmdp)))
goto out_unlock;
page = pmd_page(pmd);
+ get_page(page);
+ spin_unlock(&mm->page_table_lock);
+
+ target_nid = mpol_misplaced(page, vma, haddr);
+ if (target_nid == -1)
+ goto clear_pmdnuma;
+
+ /*
+ * Due to lacking code to migrate thp pages, we'll split
+ * (which preserves the special PROT_NONE) and re-take the
+ * fault on the normal pages.
+ */
+ split_huge_page(page);
+ put_page(page);
+ return 0;
+
+clear_pmdnuma:
+ spin_lock(&mm->page_table_lock);
+ if (unlikely(!pmd_same(pmd, *pmdp)))
+ goto out_unlock;
+
pmd = pmd_mknonnuma(pmd);
set_pmd_at(mm, haddr, pmdp, pmd);
VM_BUG_ON(pmd_numa(*pmdp));
@@ -1037,6 +1060,8 @@ int do_huge_pmd_numa_page(struct mm_struct *mm, unsigned long addr,
out_unlock:
spin_unlock(&mm->page_table_lock);
+ if (page)
+ put_page(page);
return 0;
}