aboutsummaryrefslogtreecommitdiff
path: root/mm
diff options
context:
space:
mode:
authorSteven Rostedt (VMware) <rostedt@goodmis.org>2017-11-22 07:31:19 -0500
committerSteven Rostedt (VMware) <rostedt@goodmis.org>2017-12-01 11:20:36 -0500
commitc17e74e6b81b2f4a969a66efc7733e54d1fe8a14 (patch)
tree239667bb4520d50804198692acd71f63bc953e9b /mm
parent4e22accddef27103c4fba20b9613b7bf52129be6 (diff)
Revert "memcontrol: Prevent scheduling while atomic in cgroup code"
The commit "memcontrol: Prevent scheduling while atomic in cgroup code" fixed this issue: refill_stock() get_cpu_var() drain_stock() res_counter_uncharge() res_counter_uncharge_until() spin_lock() <== boom But commit 3e32cb2e0a12b ("mm: memcontrol: lockless page counters") replaced the calls to res_counter_uncharge() in drain_stock() to the lockless function page_counter_uncharge(). There is no more spin lock there and no more reason to have that local lock. Cc: stable-rt@vger.kernel.org Reported-by: Haiyang HY1 Tan <tanhy1@lenovo.com> Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org> [bigeasy: That upstream commit appeared in v3.19 and the patch in question in v3.18.7-rt2 and v3.18 seems still to be maintained. So I guess that v3.18 would need the locallocks that we are about to remove here. I am not sure if any earlier versions have the patch backported. The stable tag here is because Haiyang reported (and debugged) a crash in 4.4-RT with this patch applied (which has get_cpu_light() instead the locallocks it gained in v4.9-RT). https://lkml.kernel.org/r/05AA4EC5C6EC1D48BE2CDCFF3AE0B8A637F78A15@CNMAILEX04.lenovo.com ] Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Diffstat (limited to 'mm')
-rw-r--r--mm/memcontrol.c13
1 files changed, 6 insertions, 7 deletions
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 12b94909ba7b..c04403033aec 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -1698,7 +1698,6 @@ struct memcg_stock_pcp {
#define FLUSHING_CACHED_CHARGE 0
};
static DEFINE_PER_CPU(struct memcg_stock_pcp, memcg_stock);
-static DEFINE_LOCAL_IRQ_LOCK(memcg_stock_ll);
static DEFINE_MUTEX(percpu_charge_mutex);
/**
@@ -1721,7 +1720,7 @@ static bool consume_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
if (nr_pages > CHARGE_BATCH)
return ret;
- local_lock_irqsave(memcg_stock_ll, flags);
+ local_irq_save(flags);
stock = this_cpu_ptr(&memcg_stock);
if (memcg == stock->cached && stock->nr_pages >= nr_pages) {
@@ -1729,7 +1728,7 @@ static bool consume_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
ret = true;
}
- local_unlock_irqrestore(memcg_stock_ll, flags);
+ local_irq_restore(flags);
return ret;
}
@@ -1756,13 +1755,13 @@ static void drain_local_stock(struct work_struct *dummy)
struct memcg_stock_pcp *stock;
unsigned long flags;
- local_lock_irqsave(memcg_stock_ll, flags);
+ local_irq_save(flags);
stock = this_cpu_ptr(&memcg_stock);
drain_stock(stock);
clear_bit(FLUSHING_CACHED_CHARGE, &stock->flags);
- local_unlock_irqrestore(memcg_stock_ll, flags);
+ local_irq_restore(flags);
}
/*
@@ -1774,7 +1773,7 @@ static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
struct memcg_stock_pcp *stock;
unsigned long flags;
- local_lock_irqsave(memcg_stock_ll, flags);
+ local_irq_save(flags);
stock = this_cpu_ptr(&memcg_stock);
if (stock->cached != memcg) { /* reset if necessary */
@@ -1783,7 +1782,7 @@ static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
}
stock->nr_pages += nr_pages;
- local_unlock_irqrestore(memcg_stock_ll, flags);
+ local_irq_restore(flags);
}
/*