aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorNaresh Kamboju <naresh.kamboju@linaro.org>2020-05-22 15:30:36 +0530
committerNaresh Kamboju <naresh.kamboju@linaro.org>2020-05-22 15:30:36 +0530
commitd127624a7afb7338f8130c6f8b994a8a7e62a45b (patch)
tree1937a14151a30948baa62bde3e1b0f951196d3aa
parente8f3274774b45b5f4e9e3d5cad7ff9f43ae3add5 (diff)
Revert "mm, memcg: decouple e{low,min} state mutations from protection checks"revert-1b40dd
This reverts commit 1b40dd68e341c56a85b09e4de40733da9154e3fe.
-rw-r--r--include/linux/memcontrol.h43
-rw-r--r--mm/memcontrol.c28
-rw-r--r--mm/vmscan.c17
3 files changed, 45 insertions, 43 deletions
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index e0bcef180672..47c9d4e0a402 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -47,6 +47,12 @@ enum memcg_memory_event {
MEMCG_NR_MEMORY_EVENTS,
};
+enum mem_cgroup_protection {
+ MEMCG_PROT_NONE,
+ MEMCG_PROT_LOW,
+ MEMCG_PROT_MIN,
+};
+
struct mem_cgroup_reclaim_cookie {
pg_data_t *pgdat;
unsigned int generation;
@@ -385,26 +391,8 @@ static inline unsigned long mem_cgroup_protection(struct mem_cgroup *root,
READ_ONCE(memcg->memory.elow));
}
-void mem_cgroup_calculate_protection(struct mem_cgroup *root,
- struct mem_cgroup *memcg);
-
-static inline bool mem_cgroup_below_low(struct mem_cgroup *memcg)
-{
- if (mem_cgroup_disabled())
- return false;
-
- return READ_ONCE(memcg->memory.elow) >=
- page_counter_read(&memcg->memory);
-}
-
-static inline bool mem_cgroup_below_min(struct mem_cgroup *memcg)
-{
- if (mem_cgroup_disabled())
- return false;
-
- return READ_ONCE(memcg->memory.emin) >=
- page_counter_read(&memcg->memory);
-}
+enum mem_cgroup_protection mem_cgroup_protected(struct mem_cgroup *root,
+ struct mem_cgroup *memcg);
int mem_cgroup_charge(struct page *page, struct mm_struct *mm, gfp_t gfp_mask);
@@ -883,19 +871,10 @@ static inline unsigned long mem_cgroup_protection(struct mem_cgroup *root,
return 0;
}
-static inline void mem_cgroup_calculate_protection(struct mem_cgroup *root,
- struct mem_cgroup *memcg)
-{
-}
-
-static inline bool mem_cgroup_below_low(struct mem_cgroup *memcg)
+static inline enum mem_cgroup_protection mem_cgroup_protected(
+ struct mem_cgroup *root, struct mem_cgroup *memcg)
{
- return false;
-}
-
-static inline bool mem_cgroup_below_min(struct mem_cgroup *memcg)
-{
- return false;
+ return MEMCG_PROT_NONE;
}
static inline int mem_cgroup_charge(struct page *page, struct mm_struct *mm,
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 2df9510b7d64..b681feccce49 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -6333,15 +6333,21 @@ static unsigned long effective_protection(unsigned long usage,
*
* WARNING: This function is not stateless! It can only be used as part
* of a top-down tree iteration, not for isolated queries.
+ *
+ * Returns one of the following:
+ * MEMCG_PROT_NONE: cgroup memory is not protected
+ * MEMCG_PROT_LOW: cgroup memory is protected as long there is
+ * an unprotected supply of reclaimable memory from other cgroups.
+ * MEMCG_PROT_MIN: cgroup memory is protected
*/
-void mem_cgroup_calculate_protection(struct mem_cgroup *root,
- struct mem_cgroup *memcg)
+enum mem_cgroup_protection mem_cgroup_protected(struct mem_cgroup *root,
+ struct mem_cgroup *memcg)
{
unsigned long usage, parent_usage;
struct mem_cgroup *parent;
if (mem_cgroup_disabled())
- return;
+ return MEMCG_PROT_NONE;
if (!root)
root = root_mem_cgroup;
@@ -6354,21 +6360,21 @@ void mem_cgroup_calculate_protection(struct mem_cgroup *root,
* that special casing.
*/
if (memcg == root)
- return;
+ return MEMCG_PROT_NONE;
usage = page_counter_read(&memcg->memory);
if (!usage)
- return;
+ return MEMCG_PROT_NONE;
parent = parent_mem_cgroup(memcg);
/* No parent means a non-hierarchical mode on v1 memcg */
if (!parent)
- return;
+ return MEMCG_PROT_NONE;
if (parent == root) {
memcg->memory.emin = READ_ONCE(memcg->memory.min);
memcg->memory.elow = memcg->memory.low;
- return;
+ goto out;
}
parent_usage = page_counter_read(&parent->memory);
@@ -6381,6 +6387,14 @@ void mem_cgroup_calculate_protection(struct mem_cgroup *root,
WRITE_ONCE(memcg->memory.elow, effective_protection(usage, parent_usage,
memcg->memory.low, READ_ONCE(parent->memory.elow),
atomic_long_read(&parent->memory.children_low_usage)));
+
+out:
+ if (usage <= memcg->memory.emin)
+ return MEMCG_PROT_MIN;
+ else if (usage <= memcg->memory.elow)
+ return MEMCG_PROT_LOW;
+ else
+ return MEMCG_PROT_NONE;
}
/**
diff --git a/mm/vmscan.c b/mm/vmscan.c
index d96e18726751..4dbfa8dcb8dc 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -2645,15 +2645,14 @@ static void shrink_node_memcgs(pg_data_t *pgdat, struct scan_control *sc)
unsigned long reclaimed;
unsigned long scanned;
- mem_cgroup_calculate_protection(target_memcg, memcg);
-
- if (mem_cgroup_below_min(memcg)) {
+ switch (mem_cgroup_protected(target_memcg, memcg)) {
+ case MEMCG_PROT_MIN:
/*
* Hard protection.
* If there is no reclaimable memory, OOM.
*/
continue;
- } else if (mem_cgroup_below_low(memcg)) {
+ case MEMCG_PROT_LOW:
/*
* Soft protection.
* Respect the protection only as long as
@@ -2665,6 +2664,16 @@ static void shrink_node_memcgs(pg_data_t *pgdat, struct scan_control *sc)
continue;
}
memcg_memory_event(memcg, MEMCG_LOW);
+ break;
+ case MEMCG_PROT_NONE:
+ /*
+ * All protection thresholds breached. We may
+ * still choose to vary the scan pressure
+ * applied based on by how much the cgroup in
+ * question has exceeded its protection
+ * thresholds (see get_scan_count).
+ */
+ break;
}
reclaimed = sc->nr_reclaimed;