aboutsummaryrefslogtreecommitdiff
path: root/mm
diff options
context:
space:
mode:
Diffstat (limited to 'mm')
-rw-r--r--mm/memory_hotplug.c4
-rw-r--r--mm/page_alloc.c19
-rw-r--r--mm/page_owner.c4
-rw-r--r--mm/slub.c4
-rw-r--r--mm/vmscan.c17
5 files changed, 34 insertions, 14 deletions
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index 8dccc317aac2..d620d0427b6b 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -689,6 +689,10 @@ static int online_pages_range(unsigned long start_pfn, unsigned long nr_pages,
if (PageReserved(pfn_to_page(start_pfn)))
for (i = 0; i < nr_pages; i++) {
page = pfn_to_page(start_pfn + i);
+ if (PageHWPoison(page)) {
+ ClearPageReserved(page);
+ continue;
+ }
(*online_page_callback)(page);
onlined_pages++;
}
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 6d30e914afb6..80e4adb4c360 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -5213,14 +5213,18 @@ static int __build_all_zonelists(void *data)
memset(node_load, 0, sizeof(node_load));
#endif
+ /*
+ * This node is hotadded and no memory is yet present. So just
+ * building zonelists is fine - no need to touch other nodes.
+ */
if (self && !node_online(self->node_id)) {
build_zonelists(self);
- }
-
- for_each_online_node(nid) {
- pg_data_t *pgdat = NODE_DATA(nid);
+ } else {
+ for_each_online_node(nid) {
+ pg_data_t *pgdat = NODE_DATA(nid);
- build_zonelists(pgdat);
+ build_zonelists(pgdat);
+ }
}
/*
@@ -5862,6 +5866,11 @@ static unsigned long __meminit zone_absent_pages_in_node(int nid,
adjust_zone_range_for_zone_movable(nid, zone_type,
node_start_pfn, node_end_pfn,
&zone_start_pfn, &zone_end_pfn);
+
+ /* If this node has no page within this zone, return 0. */
+ if (zone_start_pfn == zone_end_pfn)
+ return 0;
+
nr_absent = __absent_pages_in_range(nid, zone_start_pfn, zone_end_pfn);
/*
diff --git a/mm/page_owner.c b/mm/page_owner.c
index 0fd9dcf2c5dc..401feb070335 100644
--- a/mm/page_owner.c
+++ b/mm/page_owner.c
@@ -261,7 +261,7 @@ void pagetypeinfo_showmixedcount_print(struct seq_file *m,
*/
for (; pfn < end_pfn; ) {
if (!pfn_valid(pfn)) {
- pfn = ALIGN(pfn + 1, MAX_ORDER_NR_PAGES);
+ pfn = ALIGN(pfn + 1, pageblock_nr_pages);
continue;
}
@@ -531,7 +531,7 @@ static void init_pages_in_zone(pg_data_t *pgdat, struct zone *zone)
*/
for (; pfn < end_pfn; ) {
if (!pfn_valid(pfn)) {
- pfn = ALIGN(pfn + 1, MAX_ORDER_NR_PAGES);
+ pfn = ALIGN(pfn + 1, pageblock_nr_pages);
continue;
}
diff --git a/mm/slub.c b/mm/slub.c
index 1d3f9835f4ea..5ac0b5ad029b 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -3358,8 +3358,8 @@ static void free_kmem_cache_nodes(struct kmem_cache *s)
struct kmem_cache_node *n;
for_each_kmem_cache_node(s, node, n) {
- kmem_cache_free(kmem_cache_node, n);
s->node[node] = NULL;
+ kmem_cache_free(kmem_cache_node, n);
}
}
@@ -3389,8 +3389,8 @@ static int init_kmem_cache_nodes(struct kmem_cache *s)
return 0;
}
- s->node[node] = n;
init_kmem_cache_node(n);
+ s->node[node] = n;
}
return 1;
}
diff --git a/mm/vmscan.c b/mm/vmscan.c
index a1af041930a6..efc9da21c5e6 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -2632,16 +2632,23 @@ static bool shrink_node(pg_data_t *pgdat, struct scan_control *sc)
sc->nr_scanned - nr_scanned,
node_lru_pages);
+ /*
+ * Record the subtree's reclaim efficiency. The reclaimed
+ * pages from slab is excluded here because the corresponding
+ * scanned pages is not accounted. Moreover, freeing a page
+ * by slab shrinking depends on each slab's object population,
+ * making the cost model (i.e. scan:free) different from that
+ * of LRU.
+ */
+ vmpressure(sc->gfp_mask, sc->target_mem_cgroup, true,
+ sc->nr_scanned - nr_scanned,
+ sc->nr_reclaimed - nr_reclaimed);
+
if (reclaim_state) {
sc->nr_reclaimed += reclaim_state->reclaimed_slab;
reclaim_state->reclaimed_slab = 0;
}
- /* Record the subtree's reclaim efficiency */
- vmpressure(sc->gfp_mask, sc->target_mem_cgroup, true,
- sc->nr_scanned - nr_scanned,
- sc->nr_reclaimed - nr_reclaimed);
-
if (sc->nr_reclaimed - nr_reclaimed)
reclaimable = true;