aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--include/linux/memcontrol.h5
-rw-r--r--mm/ksm.c11
-rw-r--r--mm/memcontrol.c17
-rw-r--r--mm/migrate.c2
-rw-r--r--mm/swap_state.c10
5 files changed, 45 insertions, 0 deletions
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index b80de520670..4d34356fe64 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -129,6 +129,7 @@ extern void mem_cgroup_print_oom_info(struct mem_cgroup *memcg,
extern void mem_cgroup_replace_page_cache(struct page *oldpage,
struct page *newpage);
+extern void mem_cgroup_reset_owner(struct page *page);
#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
extern int do_swap_account;
#endif
@@ -391,6 +392,10 @@ static inline void mem_cgroup_replace_page_cache(struct page *oldpage,
struct page *newpage)
{
}
+
+static inline void mem_cgroup_reset_owner(struct page *page)
+{
+}
#endif /* CONFIG_CGROUP_MEM_CONT */
#if !defined(CONFIG_CGROUP_MEM_RES_CTLR) || !defined(CONFIG_DEBUG_VM)
diff --git a/mm/ksm.c b/mm/ksm.c
index 310544a379a..1925ffbfb27 100644
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -28,6 +28,7 @@
#include <linux/kthread.h>
#include <linux/wait.h>
#include <linux/slab.h>
+#include <linux/memcontrol.h>
#include <linux/rbtree.h>
#include <linux/memory.h>
#include <linux/mmu_notifier.h>
@@ -1571,6 +1572,16 @@ struct page *ksm_does_need_to_copy(struct page *page,
new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address);
if (new_page) {
+ /*
+ * The memcg-specific accounting when moving
+ * pages around the LRU lists relies on the
+ * page's owner (memcg) to be valid. Usually,
+ * pages are assigned to a new owner before
+ * being put on the LRU list, but since this
+ * is not the case here, the stale owner from
+ * a previous allocation cycle must be reset.
+ */
+ mem_cgroup_reset_owner(new_page);
copy_user_highpage(new_page, page, address, vma);
SetPageDirty(new_page);
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index d58bb5fa440..c74102d6eb5 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -3050,6 +3050,23 @@ void mem_cgroup_uncharge_end(void)
batch->memcg = NULL;
}
+/*
+ * A function for resetting pc->mem_cgroup for newly allocated pages.
+ * This function should be called if the newpage will be added to LRU
+ * before start accounting.
+ */
+void mem_cgroup_reset_owner(struct page *newpage)
+{
+ struct page_cgroup *pc;
+
+ if (mem_cgroup_disabled())
+ return;
+
+ pc = lookup_page_cgroup(newpage);
+ VM_BUG_ON(PageCgroupUsed(pc));
+ pc->mem_cgroup = root_mem_cgroup;
+}
+
#ifdef CONFIG_SWAP
/*
* called after __delete_from_swap_cache() and drop "page" account.
diff --git a/mm/migrate.c b/mm/migrate.c
index 89ea0854332..fc391985899 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -777,6 +777,8 @@ static int unmap_and_move(new_page_t get_new_page, unsigned long private,
if (!newpage)
return -ENOMEM;
+ mem_cgroup_reset_owner(newpage);
+
if (page_count(page) == 1) {
/* page was freed from under us. So we are done. */
goto out;
diff --git a/mm/swap_state.c b/mm/swap_state.c
index ea6b32d6187..470038a9187 100644
--- a/mm/swap_state.c
+++ b/mm/swap_state.c
@@ -300,6 +300,16 @@ struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
new_page = alloc_page_vma(gfp_mask, vma, addr);
if (!new_page)
break; /* Out of memory */
+ /*
+ * The memcg-specific accounting when moving
+ * pages around the LRU lists relies on the
+ * page's owner (memcg) to be valid. Usually,
+ * pages are assigned to a new owner before
+ * being put on the LRU list, but since this
+ * is not the case here, the stale owner from
+ * a previous allocation cycle must be reset.
+ */
+ mem_cgroup_reset_owner(new_page);
}
/*