aboutsummaryrefslogtreecommitdiff
path: root/mm
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2011-06-22 20:47:08 +0200
committerSteven Rostedt <rostedt@rostedt.homelinux.com>2012-08-24 22:39:13 -0400
commitcf77b12027daab5ead43c8f049803b5615cfba11 (patch)
tree67cf0e912114f4aa48593d94c0f4b7d463811e29 /mm
parentae261e02e4cfa59131bebbea1d34cd9d9bcdbed1 (diff)
mm-vmstat-fix-the-irq-lock-asymetry.patch
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'mm')
-rw-r--r--mm/vmscan.c18
1 files changed, 9 insertions, 9 deletions
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 48febd7ce9be..2a29cf8b733b 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -1381,8 +1381,8 @@ static int too_many_isolated(struct zone *zone, int file,
*/
static noinline_for_stack void
putback_lru_pages(struct zone *zone, struct scan_control *sc,
- unsigned long nr_anon, unsigned long nr_file,
- struct list_head *page_list)
+ unsigned long nr_anon, unsigned long nr_file,
+ struct list_head *page_list, unsigned long nr_reclaimed)
{
struct page *page;
struct pagevec pvec;
@@ -1393,7 +1393,12 @@ putback_lru_pages(struct zone *zone, struct scan_control *sc,
/*
* Put back any unfreeable pages.
*/
- spin_lock(&zone->lru_lock);
+ spin_lock_irq(&zone->lru_lock);
+
+ if (current_is_kswapd())
+ __count_vm_events(KSWAPD_STEAL, nr_reclaimed);
+ __count_zone_vm_events(PGSTEAL, zone, nr_reclaimed);
+
while (!list_empty(page_list)) {
int lru;
page = lru_to_page(page_list);
@@ -1576,12 +1581,7 @@ shrink_inactive_list(unsigned long nr_to_scan, struct zone *zone,
priority, &nr_dirty, &nr_writeback);
}
- local_irq_disable();
- if (current_is_kswapd())
- __count_vm_events(KSWAPD_STEAL, nr_reclaimed);
- __count_zone_vm_events(PGSTEAL, zone, nr_reclaimed);
-
- putback_lru_pages(zone, sc, nr_anon, nr_file, &page_list);
+ putback_lru_pages(zone, sc, nr_anon, nr_file, &page_list, nr_reclaimed);
/*
* If reclaim is isolating dirty pages under writeback, it implies