aboutsummaryrefslogtreecommitdiff
path: root/mm
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2010-02-21 20:17:22 +0100
committerThomas Gleixner <tglx@linutronix.de>2010-02-21 20:17:22 +0100
commit5f854cfc024622e4aae14d7cf422f6ff86278688 (patch)
tree426e77c6f6e4939c80440bf1fabcb020e3ee145b /mm
parentcc24da0742870f152ddf1002aa39dfcd83f7cf9c (diff)
parent4ec62b2b2e6bd7ddef7b6cea6e5db7b5578a6532 (diff)
Forward to 2.6.33-rc8
Merge branch 'linus' into rt/head with a pile of conflicts. Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'mm')
-rw-r--r--mm/Kconfig57
-rw-r--r--mm/Kconfig.debug12
-rw-r--r--mm/Makefile16
-rw-r--r--mm/allocpercpu.c149
-rw-r--r--mm/backing-dev.c448
-rw-r--r--mm/bootmem.c38
-rw-r--r--mm/filemap.c327
-rw-r--r--mm/filemap_xip.c2
-rw-r--r--mm/highmem.c17
-rw-r--r--mm/hugetlb.c744
-rw-r--r--mm/hwpoison-inject.c138
-rw-r--r--mm/internal.h45
-rw-r--r--mm/kmemleak-test.c6
-rw-r--r--mm/kmemleak.c521
-rw-r--r--mm/ksm.c1979
-rw-r--r--mm/maccess.c11
-rw-r--r--mm/madvise.c90
-rw-r--r--mm/memcontrol.c1124
-rw-r--r--mm/memory-failure.c1294
-rw-r--r--mm/memory.c344
-rw-r--r--mm/memory_hotplug.c53
-rw-r--r--mm/mempolicy.c82
-rw-r--r--mm/mempool.c7
-rw-r--r--mm/migrate.c148
-rw-r--r--mm/mincore.c37
-rw-r--r--mm/mlock.c173
-rw-r--r--mm/mmap.c159
-rw-r--r--mm/mmu_context.c60
-rw-r--r--mm/mmu_notifier.c20
-rw-r--r--mm/mprotect.c4
-rw-r--r--mm/mremap.c259
-rw-r--r--mm/nommu.c288
-rw-r--r--mm/oom_kill.c183
-rw-r--r--mm/page-writeback.c257
-rw-r--r--mm/page_alloc.c508
-rw-r--r--mm/page_cgroup.c12
-rw-r--r--mm/page_io.c17
-rw-r--r--mm/pagewalk.c32
-rw-r--r--mm/pdflush.c269
-rw-r--r--mm/percpu.c1635
-rw-r--r--mm/quicklist.c5
-rw-r--r--mm/readahead.c12
-rw-r--r--mm/rmap.c478
-rw-r--r--mm/shmem.c122
-rw-r--r--mm/shmem_acl.c180
-rw-r--r--mm/slab.c179
-rw-r--r--mm/slob.c5
-rw-r--r--mm/slub.c115
-rw-r--r--mm/sparse-vmemmap.c8
-rw-r--r--mm/sparse.c9
-rw-r--r--mm/swap.c8
-rw-r--r--mm/swap_state.c144
-rw-r--r--mm/swapfile.c868
-rw-r--r--mm/truncate.c142
-rw-r--r--mm/util.c2
-rw-r--r--mm/vmalloc.c732
-rw-r--r--mm/vmscan.c576
-rw-r--r--mm/vmstat.c15
58 files changed, 11191 insertions, 3974 deletions
diff --git a/mm/Kconfig b/mm/Kconfig
index fe5f674d7a7d..17b8947aa7da 100644
--- a/mm/Kconfig
+++ b/mm/Kconfig
@@ -67,7 +67,7 @@ config DISCONTIGMEM
config SPARSEMEM
def_bool y
- depends on SPARSEMEM_MANUAL
+ depends on (!SELECT_MEMORY_MODEL && ARCH_SPARSEMEM_ENABLE) || SPARSEMEM_MANUAL
config FLATMEM
def_bool y
@@ -128,11 +128,8 @@ config SPARSEMEM_VMEMMAP
config MEMORY_HOTPLUG
bool "Allow for memory hot-add"
depends on SPARSEMEM || X86_64_ACPI_NUMA
- depends on HOTPLUG && !(HIBERNATION && !S390) && ARCH_ENABLE_MEMORY_HOTPLUG
- depends on (IA64 || X86 || PPC64 || SUPERH || S390)
-
-comment "Memory hotplug is currently incompatible with Software Suspend"
- depends on SPARSEMEM && HOTPLUG && HIBERNATION && !S390
+ depends on HOTPLUG && ARCH_ENABLE_MEMORY_HOTPLUG
+ depends on (IA64 || X86 || PPC_BOOK3S_64 || SUPERH || S390)
config MEMORY_HOTPLUG_SPARSE
def_bool y
@@ -153,7 +150,7 @@ config MEMORY_HOTREMOVE
#
config PAGEFLAGS_EXTENDED
def_bool y
- depends on 64BIT || SPARSEMEM_VMEMMAP || !NUMA || !SPARSEMEM
+ depends on 64BIT || SPARSEMEM_VMEMMAP || !SPARSEMEM
# Heavily threaded applications may benefit from splitting the mm-wide
# page_table_lock, so that faults on different parts of the user address
@@ -161,11 +158,13 @@ config PAGEFLAGS_EXTENDED
# Default to 4 for wider testing, though 8 might be more appropriate.
# ARM's adjust_pte (unused if VIPT) depends on mm-wide page_table_lock.
# PA-RISC 7xxx's spinlock_t would enlarge struct page from 32 to 44 bytes.
+# DEBUG_SPINLOCK and DEBUG_LOCK_ALLOC spinlock_t also enlarge struct page.
#
config SPLIT_PTLOCK_CPUS
int
- default "4096" if ARM && !CPU_CACHE_VIPT
- default "4096" if PARISC && !PA20
+ default "999999" if ARM && !CPU_CACHE_VIPT
+ default "999999" if PARISC && !PA20
+ default "999999" if DEBUG_SPINLOCK || DEBUG_LOCK_ALLOC
default "4"
#
@@ -203,19 +202,26 @@ config VIRT_TO_BUS
def_bool y
depends on !ARCH_NO_VIRT_TO_BUS
-config HAVE_MLOCK
- bool
- default y if MMU=y
-
-config HAVE_MLOCKED_PAGE_BIT
- bool
- default y if HAVE_MLOCK=y
-
config MMU_NOTIFIER
bool
+config KSM
+ bool "Enable KSM for page merging"
+ depends on MMU
+ help
+ Enable Kernel Samepage Merging: KSM periodically scans those areas
+ of an application's address space that an app has advised may be
+ mergeable. When it finds pages of identical content, it replaces
+ the many instances by a single page with that content, so
+ saving memory until one or another app needs to modify the content.
+ Recommended for use with KVM, or with other duplicative applications.
+ See Documentation/vm/ksm.txt for more information: KSM is inactive
+ until a program has madvised that an area is MADV_MERGEABLE, and
+ root has set /sys/kernel/mm/ksm/run to 1 (if CONFIG_SYSFS is set).
+
config DEFAULT_MMAP_MIN_ADDR
int "Low address space to protect from user allocation"
+ depends on MMU
default 4096
help
This is the portion of low virtual memory which should be protected
@@ -232,6 +238,23 @@ config DEFAULT_MMAP_MIN_ADDR
This value can be changed after boot using the
/proc/sys/vm/mmap_min_addr tunable.
+config ARCH_SUPPORTS_MEMORY_FAILURE
+ bool
+
+config MEMORY_FAILURE
+ depends on MMU
+ depends on ARCH_SUPPORTS_MEMORY_FAILURE
+ bool "Enable recovery from hardware memory errors"
+ help
+ Enables code to recover from some memory failures on systems
+ with MCA recovery. This allows a system to continue running
+ even when some of its memory has uncorrected errors. This requires
+ special hardware support and typically ECC memory.
+
+config HWPOISON_INJECT
+ tristate "HWPoison pages injector"
+ depends on MEMORY_FAILURE && DEBUG_KERNEL && PROC_FS
+ select PROC_PAGE_MONITOR
config NOMMU_INITIAL_TRIM_EXCESS
int "Turn on mmap() excess space trimming before booting"
diff --git a/mm/Kconfig.debug b/mm/Kconfig.debug
index aa99fd1f7109..af7cfb43d2f0 100644
--- a/mm/Kconfig.debug
+++ b/mm/Kconfig.debug
@@ -6,7 +6,7 @@ config DEBUG_PAGEALLOC
---help---
Unmap pages from the kernel linear mapping after free_pages().
This results in a large slowdown, but helps to find certain types
- of memory corruptions.
+ of memory corruption.
config WANT_PAGE_DEBUG_FLAGS
bool
@@ -17,11 +17,11 @@ config PAGE_POISONING
depends on !HIBERNATION
select DEBUG_PAGEALLOC
select WANT_PAGE_DEBUG_FLAGS
- help
+ ---help---
Fill the pages with poison patterns after free_pages() and verify
the patterns before alloc_pages(). This results in a large slowdown,
- but helps to find certain types of memory corruptions.
+ but helps to find certain types of memory corruption.
- This option cannot enalbe with hibernation. Otherwise, it will get
- wrong messages for memory corruption because the free pages are not
- saved to the suspend image.
+ This option cannot be enabled in combination with hibernation as
+ that would result in incorrect warnings of memory corruption after
+ a resume because free pages are not saved to the suspend image.
diff --git a/mm/Makefile b/mm/Makefile
index 5e0bd6426693..7a68d2ab5560 100644
--- a/mm/Makefile
+++ b/mm/Makefile
@@ -5,16 +5,16 @@
mmu-y := nommu.o
mmu-$(CONFIG_MMU) := fremap.o highmem.o madvise.o memory.o mincore.o \
mlock.o mmap.o mprotect.o mremap.o msync.o rmap.o \
- vmalloc.o
+ vmalloc.o pagewalk.o
obj-y := bootmem.o filemap.o mempool.o oom_kill.o fadvise.o \
- maccess.o page_alloc.o page-writeback.o pdflush.o \
+ maccess.o page_alloc.o page-writeback.o \
readahead.o swap.o truncate.o vmscan.o shmem.o \
prio_tree.o util.o mmzone.o vmstat.o backing-dev.o \
- page_isolation.o mm_init.o $(mmu-y)
+ page_isolation.o mm_init.o mmu_context.o \
+ $(mmu-y)
obj-y += init-mm.o
-obj-$(CONFIG_PROC_PAGE_MONITOR) += pagewalk.o
obj-$(CONFIG_BOUNCE) += bounce.o
obj-$(CONFIG_SWAP) += page_io.o swap_state.o swapfile.o thrash.o
obj-$(CONFIG_HAS_DMA) += dmapool.o
@@ -22,9 +22,9 @@ obj-$(CONFIG_HUGETLBFS) += hugetlb.o
obj-$(CONFIG_NUMA) += mempolicy.o
obj-$(CONFIG_SPARSEMEM) += sparse.o
obj-$(CONFIG_SPARSEMEM_VMEMMAP) += sparse-vmemmap.o
-obj-$(CONFIG_TMPFS_POSIX_ACL) += shmem_acl.o
obj-$(CONFIG_SLOB) += slob.o
obj-$(CONFIG_MMU_NOTIFIER) += mmu_notifier.o
+obj-$(CONFIG_KSM) += ksm.o
obj-$(CONFIG_PAGE_POISONING) += debug-pagealloc.o
obj-$(CONFIG_SLAB) += slab.o
obj-$(CONFIG_SLUB) += slub.o
@@ -33,12 +33,10 @@ obj-$(CONFIG_FAILSLAB) += failslab.o
obj-$(CONFIG_MEMORY_HOTPLUG) += memory_hotplug.o
obj-$(CONFIG_FS_XIP) += filemap_xip.o
obj-$(CONFIG_MIGRATION) += migrate.o
-ifdef CONFIG_HAVE_DYNAMIC_PER_CPU_AREA
obj-$(CONFIG_SMP) += percpu.o
-else
-obj-$(CONFIG_SMP) += allocpercpu.o
-endif
obj-$(CONFIG_QUICKLIST) += quicklist.o
obj-$(CONFIG_CGROUP_MEM_RES_CTLR) += memcontrol.o page_cgroup.o
+obj-$(CONFIG_MEMORY_FAILURE) += memory-failure.o
+obj-$(CONFIG_HWPOISON_INJECT) += hwpoison-inject.o
obj-$(CONFIG_DEBUG_KMEMLEAK) += kmemleak.o
obj-$(CONFIG_DEBUG_KMEMLEAK_TEST) += kmemleak-test.o
diff --git a/mm/allocpercpu.c b/mm/allocpercpu.c
deleted file mode 100644
index dfdee6a47359..000000000000
--- a/mm/allocpercpu.c
+++ /dev/null
@@ -1,149 +0,0 @@
-/*
- * linux/mm/allocpercpu.c
- *
- * Separated from slab.c August 11, 2006 Christoph Lameter
- */
-#include <linux/mm.h>
-#include <linux/module.h>
-
-#ifndef cache_line_size
-#define cache_line_size() L1_CACHE_BYTES
-#endif
-
-/**
- * percpu_depopulate - depopulate per-cpu data for given cpu
- * @__pdata: per-cpu data to depopulate
- * @cpu: depopulate per-cpu data for this cpu
- *
- * Depopulating per-cpu data for a cpu going offline would be a typical
- * use case. You need to register a cpu hotplug handler for that purpose.
- */
-static void percpu_depopulate(void *__pdata, int cpu)
-{
- struct percpu_data *pdata = __percpu_disguise(__pdata);
-
- kfree(pdata->ptrs[cpu]);
- pdata->ptrs[cpu] = NULL;
-}
-
-/**
- * percpu_depopulate_mask - depopulate per-cpu data for some cpu's
- * @__pdata: per-cpu data to depopulate
- * @mask: depopulate per-cpu data for cpu's selected through mask bits
- */
-static void __percpu_depopulate_mask(void *__pdata, const cpumask_t *mask)
-{
- int cpu;
- for_each_cpu_mask_nr(cpu, *mask)
- percpu_depopulate(__pdata, cpu);
-}
-
-#define percpu_depopulate_mask(__pdata, mask) \
- __percpu_depopulate_mask((__pdata), &(mask))
-
-/**
- * percpu_populate - populate per-cpu data for given cpu
- * @__pdata: per-cpu data to populate further
- * @size: size of per-cpu object
- * @gfp: may sleep or not etc.
- * @cpu: populate per-data for this cpu
- *
- * Populating per-cpu data for a cpu coming online would be a typical
- * use case. You need to register a cpu hotplug handler for that purpose.
- * Per-cpu object is populated with zeroed buffer.
- */
-static void *percpu_populate(void *__pdata, size_t size, gfp_t gfp, int cpu)
-{
- struct percpu_data *pdata = __percpu_disguise(__pdata);
- int node = cpu_to_node(cpu);
-
- /*
- * We should make sure each CPU gets private memory.
- */
- size = roundup(size, cache_line_size());
-
- BUG_ON(pdata->ptrs[cpu]);
- if (node_online(node))
- pdata->ptrs[cpu] = kmalloc_node(size, gfp|__GFP_ZERO, node);
- else
- pdata->ptrs[cpu] = kzalloc(size, gfp);
- return pdata->ptrs[cpu];
-}
-
-/**
- * percpu_populate_mask - populate per-cpu data for more cpu's
- * @__pdata: per-cpu data to populate further
- * @size: size of per-cpu object
- * @gfp: may sleep or not etc.
- * @mask: populate per-cpu data for cpu's selected through mask bits
- *
- * Per-cpu objects are populated with zeroed buffers.
- */
-static int __percpu_populate_mask(void *__pdata, size_t size, gfp_t gfp,
- cpumask_t *mask)
-{
- cpumask_t populated;
- int cpu;
-
- cpus_clear(populated);
- for_each_cpu_mask_nr(cpu, *mask)
- if (unlikely(!percpu_populate(__pdata, size, gfp, cpu))) {
- __percpu_depopulate_mask(__pdata, &populated);
- return -ENOMEM;
- } else
- cpu_set(cpu, populated);
- return 0;
-}
-
-#define percpu_populate_mask(__pdata, size, gfp, mask) \
- __percpu_populate_mask((__pdata), (size), (gfp), &(mask))
-
-/**
- * alloc_percpu - initial setup of per-cpu data
- * @size: size of per-cpu object
- * @align: alignment
- *
- * Allocate dynamic percpu area. Percpu objects are populated with
- * zeroed buffers.
- */
-void *__alloc_percpu(size_t size, size_t align)
-{
- /*
- * We allocate whole cache lines to avoid false sharing
- */
- size_t sz = roundup(nr_cpu_ids * sizeof(void *), cache_line_size());
- void *pdata = kzalloc(sz, GFP_KERNEL);
- void *__pdata = __percpu_disguise(pdata);
-
- /*
- * Can't easily make larger alignment work with kmalloc. WARN
- * on it. Larger alignment should only be used for module
- * percpu sections on SMP for which this path isn't used.
- */
- WARN_ON_ONCE(align > SMP_CACHE_BYTES);
-
- if (unlikely(!pdata))
- return NULL;
- if (likely(!__percpu_populate_mask(__pdata, size, GFP_KERNEL,
- &cpu_possible_map)))
- return __pdata;
- kfree(pdata);
- return NULL;
-}
-EXPORT_SYMBOL_GPL(__alloc_percpu);
-
-/**
- * free_percpu - final cleanup of per-cpu data
- * @__pdata: object to clean up
- *
- * We simply clean up any per-cpu object left. No need for the client to
- * track and specify through a bis mask which per-cpu objects are to free.
- */
-void free_percpu(void *__pdata)
-{
- if (unlikely(!__pdata))
- return;
- __percpu_depopulate_mask(__pdata, cpu_possible_mask);
- kfree(__percpu_disguise(__pdata));
-}
-EXPORT_SYMBOL_GPL(free_percpu);
diff --git a/mm/backing-dev.c b/mm/backing-dev.c
index c86edd244294..0e8ca0347707 100644
--- a/mm/backing-dev.c
+++ b/mm/backing-dev.c
@@ -1,8 +1,11 @@
#include <linux/wait.h>
#include <linux/backing-dev.h>
+#include <linux/kthread.h>
+#include <linux/freezer.h>
#include <linux/fs.h>
#include <linux/pagemap.h>
+#include <linux/mm.h>
#include <linux/sched.h>
#include <linux/module.h>
#include <linux/writeback.h>
@@ -14,6 +17,7 @@ void default_unplug_io_fn(struct backing_dev_info *bdi, struct page *page)
EXPORT_SYMBOL(default_unplug_io_fn);
struct backing_dev_info default_backing_dev_info = {
+ .name = "default",
.ra_pages = VM_MAX_READAHEAD * 1024 / PAGE_CACHE_SIZE,
.state = 0,
.capabilities = BDI_CAP_MAP_COPY,
@@ -23,6 +27,24 @@ EXPORT_SYMBOL_GPL(default_backing_dev_info);
static struct class *bdi_class;
+/*
+ * bdi_lock protects updates to bdi_list and bdi_pending_list, as well as
+ * reader side protection for bdi_pending_list. bdi_list has RCU reader side
+ * locking.
+ */
+DEFINE_SPINLOCK(bdi_lock);
+LIST_HEAD(bdi_list);
+LIST_HEAD(bdi_pending_list);
+
+static struct task_struct *sync_supers_tsk;
+static struct timer_list sync_supers_timer;
+
+static int bdi_sync_supers(void *);
+static void sync_supers_timer_fn(unsigned long);
+static void arm_supers_timer(void);
+
+static void bdi_add_default_flusher_task(struct backing_dev_info *bdi);
+
#ifdef CONFIG_DEBUG_FS
#include <linux/debugfs.h>
#include <linux/seq_file.h>
@@ -37,9 +59,29 @@ static void bdi_debug_init(void)
static int bdi_debug_stats_show(struct seq_file *m, void *v)
{
struct backing_dev_info *bdi = m->private;
+ struct bdi_writeback *wb;
unsigned long background_thresh;
unsigned long dirty_thresh;
unsigned long bdi_thresh;
+ unsigned long nr_dirty, nr_io, nr_more_io, nr_wb;
+ struct inode *inode;
+
+ /*
+ * inode lock is enough here, the bdi->wb_list is protected by
+ * RCU on the reader side
+ */
+ nr_wb = nr_dirty = nr_io = nr_more_io = 0;
+ spin_lock(&inode_lock);
+ list_for_each_entry(wb, &bdi->wb_list, list) {
+ nr_wb++;
+ list_for_each_entry(inode, &wb->b_dirty, i_list)
+ nr_dirty++;
+ list_for_each_entry(inode, &wb->b_io, i_list)
+ nr_io++;
+ list_for_each_entry(inode, &wb->b_more_io, i_list)
+ nr_more_io++;
+ }
+ spin_unlock(&inode_lock);
get_dirty_limits(&background_thresh, &dirty_thresh, &bdi_thresh, bdi);
@@ -49,12 +91,22 @@ static int bdi_debug_stats_show(struct seq_file *m, void *v)
"BdiReclaimable: %8lu kB\n"
"BdiDirtyThresh: %8lu kB\n"
"DirtyThresh: %8lu kB\n"
- "BackgroundThresh: %8lu kB\n",
+ "BackgroundThresh: %8lu kB\n"
+ "WritebackThreads: %8lu\n"
+ "b_dirty: %8lu\n"
+ "b_io: %8lu\n"
+ "b_more_io: %8lu\n"
+ "bdi_list: %8u\n"
+ "state: %8lx\n"
+ "wb_mask: %8lx\n"
+ "wb_list: %8u\n"
+ "wb_cnt: %8u\n",
(unsigned long) K(bdi_stat(bdi, BDI_WRITEBACK)),
(unsigned long) K(bdi_stat(bdi, BDI_RECLAIMABLE)),
- K(bdi_thresh),
- K(dirty_thresh),
- K(background_thresh));
+ K(bdi_thresh), K(dirty_thresh),
+ K(background_thresh), nr_wb, nr_dirty, nr_io, nr_more_io,
+ !list_empty(&bdi->bdi_list), bdi->state, bdi->wb_mask,
+ !list_empty(&bdi->wb_list), bdi->wb_cnt);
#undef K
return 0;
@@ -185,6 +237,13 @@ static int __init default_bdi_init(void)
{
int err;
+ sync_supers_tsk = kthread_run(bdi_sync_supers, NULL, "sync_supers");
+ BUG_ON(IS_ERR(sync_supers_tsk));
+
+ init_timer(&sync_supers_timer);
+ setup_timer(&sync_supers_timer, sync_supers_timer_fn, 0);
+ arm_supers_timer();
+
err = bdi_init(&default_backing_dev_info);
if (!err)
bdi_register(&default_backing_dev_info, NULL, "default");
@@ -193,6 +252,279 @@ static int __init default_bdi_init(void)
}
subsys_initcall(default_bdi_init);
+static void bdi_wb_init(struct bdi_writeback *wb, struct backing_dev_info *bdi)
+{
+ memset(wb, 0, sizeof(*wb));
+
+ wb->bdi = bdi;
+ wb->last_old_flush = jiffies;
+ INIT_LIST_HEAD(&wb->b_dirty);
+ INIT_LIST_HEAD(&wb->b_io);
+ INIT_LIST_HEAD(&wb->b_more_io);
+}
+
+static void bdi_task_init(struct backing_dev_info *bdi,
+ struct bdi_writeback *wb)
+{
+ struct task_struct *tsk = current;
+
+ spin_lock(&bdi->wb_lock);
+ list_add_tail_rcu(&wb->list, &bdi->wb_list);
+ spin_unlock(&bdi->wb_lock);
+
+ tsk->flags |= PF_FLUSHER | PF_SWAPWRITE;
+ set_freezable();
+
+ /*
+ * Our parent may run at a different priority, just set us to normal
+ */
+ set_user_nice(tsk, 0);
+}
+
+static int bdi_start_fn(void *ptr)
+{
+ struct bdi_writeback *wb = ptr;
+ struct backing_dev_info *bdi = wb->bdi;
+ int ret;
+
+ /*
+ * Add us to the active bdi_list
+ */
+ spin_lock_bh(&bdi_lock);
+ list_add_rcu(&bdi->bdi_list, &bdi_list);
+ spin_unlock_bh(&bdi_lock);
+
+ bdi_task_init(bdi, wb);
+
+ /*
+ * Clear pending bit and wakeup anybody waiting to tear us down
+ */
+ clear_bit(BDI_pending, &bdi->state);
+ smp_mb__after_clear_bit();
+ wake_up_bit(&bdi->state, BDI_pending);
+
+ ret = bdi_writeback_task(wb);
+
+ /*
+ * Remove us from the list
+ */
+ spin_lock(&bdi->wb_lock);
+ list_del_rcu(&wb->list);
+ spin_unlock(&bdi->wb_lock);
+
+ /*
+ * Flush any work that raced with us exiting. No new work
+ * will be added, since this bdi isn't discoverable anymore.
+ */
+ if (!list_empty(&bdi->work_list))
+ wb_do_writeback(wb, 1);
+
+ wb->task = NULL;
+ return ret;
+}
+
+int bdi_has_dirty_io(struct backing_dev_info *bdi)
+{
+ return wb_has_dirty_io(&bdi->wb);
+}
+
+static void bdi_flush_io(struct backing_dev_info *bdi)
+{
+ struct writeback_control wbc = {
+ .bdi = bdi,
+ .sync_mode = WB_SYNC_NONE,
+ .older_than_this = NULL,
+ .range_cyclic = 1,
+ .nr_to_write = 1024,
+ };
+
+ writeback_inodes_wbc(&wbc);
+}
+
+/*
+ * kupdated() used to do this. We cannot do it from the bdi_forker_task()
+ * or we risk deadlocking on ->s_umount. The longer term solution would be
+ * to implement sync_supers_bdi() or similar and simply do it from the
+ * bdi writeback tasks individually.
+ */
+static int bdi_sync_supers(void *unused)
+{
+ set_user_nice(current, 0);
+
+ while (!kthread_should_stop()) {
+ set_current_state(TASK_INTERRUPTIBLE);
+ schedule();
+
+ /*
+ * Do this periodically, like kupdated() did before.
+ */
+ sync_supers();
+ }
+
+ return 0;
+}
+
+static void arm_supers_timer(void)
+{
+ unsigned long next;
+
+ next = msecs_to_jiffies(dirty_writeback_interval * 10) + jiffies;
+ mod_timer(&sync_supers_timer, round_jiffies_up(next));
+}
+
+static void sync_supers_timer_fn(unsigned long unused)
+{
+ wake_up_process(sync_supers_tsk);
+ arm_supers_timer();
+}
+
+static int bdi_forker_task(void *ptr)
+{
+ struct bdi_writeback *me = ptr;
+
+ bdi_task_init(me->bdi, me);
+
+ for (;;) {
+ struct backing_dev_info *bdi, *tmp;
+ struct bdi_writeback *wb;
+
+ /*
+ * Temporary measure, we want to make sure we don't see
+ * dirty data on the default backing_dev_info
+ */
+ if (wb_has_dirty_io(me) || !list_empty(&me->bdi->work_list))
+ wb_do_writeback(me, 0);
+
+ spin_lock_bh(&bdi_lock);
+
+ /*
+ * Check if any existing bdi's have dirty data without
+ * a thread registered. If so, set that up.
+ */
+ list_for_each_entry_safe(bdi, tmp, &bdi_list, bdi_list) {
+ if (bdi->wb.task)
+ continue;
+ if (list_empty(&bdi->work_list) &&
+ !bdi_has_dirty_io(bdi))
+ continue;
+
+ bdi_add_default_flusher_task(bdi);
+ }
+
+ set_current_state(TASK_INTERRUPTIBLE);
+
+ if (list_empty(&bdi_pending_list)) {
+ unsigned long wait;
+
+ spin_unlock_bh(&bdi_lock);
+ wait = msecs_to_jiffies(dirty_writeback_interval * 10);
+ schedule_timeout(wait);
+ try_to_freeze();
+ continue;
+ }
+
+ __set_current_state(TASK_RUNNING);
+
+ /*
+ * This is our real job - check for pending entries in
+ * bdi_pending_list, and create the tasks that got added
+ */
+ bdi = list_entry(bdi_pending_list.next, struct backing_dev_info,
+ bdi_list);
+ list_del_init(&bdi->bdi_list);
+ spin_unlock_bh(&bdi_lock);
+
+ wb = &bdi->wb;
+ wb->task = kthread_run(bdi_start_fn, wb, "flush-%s",
+ dev_name(bdi->dev));
+ /*
+ * If task creation fails, then readd the bdi to
+ * the pending list and force writeout of the bdi
+ * from this forker thread. That will free some memory
+ * and we can try again.
+ */
+ if (IS_ERR(wb->task)) {
+ wb->task = NULL;
+
+ /*
+ * Add this 'bdi' to the back, so we get
+ * a chance to flush other bdi's to free
+ * memory.
+ */
+ spin_lock_bh(&bdi_lock);
+ list_add_tail(&bdi->bdi_list, &bdi_pending_list);
+ spin_unlock_bh(&bdi_lock);
+
+ bdi_flush_io(bdi);
+ }
+ }
+
+ return 0;
+}
+
+static void bdi_add_to_pending(struct rcu_head *head)
+{
+ struct backing_dev_info *bdi;
+
+ bdi = container_of(head, struct backing_dev_info, rcu_head);
+ INIT_LIST_HEAD(&bdi->bdi_list);
+
+ spin_lock(&bdi_lock);
+ list_add_tail(&bdi->bdi_list, &bdi_pending_list);
+ spin_unlock(&bdi_lock);
+
+ /*
+ * We are now on the pending list, wake up bdi_forker_task()
+ * to finish the job and add us back to the active bdi_list
+ */
+ wake_up_process(default_backing_dev_info.wb.task);
+}
+
+/*
+ * Add the default flusher task that gets created for any bdi
+ * that has dirty data pending writeout
+ */
+void static bdi_add_default_flusher_task(struct backing_dev_info *bdi)
+{
+ if (!bdi_cap_writeback_dirty(bdi))
+ return;
+
+ if (WARN_ON(!test_bit(BDI_registered, &bdi->state))) {
+ printk(KERN_ERR "bdi %p/%s is not registered!\n",
+ bdi, bdi->name);
+ return;
+ }
+
+ /*
+ * Check with the helper whether to proceed adding a task. Will only
+ * abort if we two or more simultanous calls to
+ * bdi_add_default_flusher_task() occured, further additions will block
+ * waiting for previous additions to finish.
+ */
+ if (!test_and_set_bit(BDI_pending, &bdi->state)) {
+ list_del_rcu(&bdi->bdi_list);
+
+ /*
+ * We must wait for the current RCU period to end before
+ * moving to the pending list. So schedule that operation
+ * from an RCU callback.
+ */
+ call_rcu(&bdi->rcu_head, bdi_add_to_pending);
+ }
+}
+
+/*
+ * Remove bdi from bdi_list, and ensure that it is no longer visible
+ */
+static void bdi_remove_from_list(struct backing_dev_info *bdi)
+{
+ spin_lock_bh(&bdi_lock);
+ list_del_rcu(&bdi->bdi_list);
+ spin_unlock_bh(&bdi_lock);
+
+ synchronize_rcu();
+}
+
int bdi_register(struct backing_dev_info *bdi, struct device *parent,
const char *fmt, ...)
{
@@ -211,9 +543,33 @@ int bdi_register(struct backing_dev_info *bdi, struct device *parent,
goto exit;
}
+ spin_lock_bh(&bdi_lock);
+ list_add_tail_rcu(&bdi->bdi_list, &bdi_list);
+ spin_unlock_bh(&bdi_lock);
+
bdi->dev = dev;
- bdi_debug_register(bdi, dev_name(dev));
+ /*
+ * Just start the forker thread for our default backing_dev_info,
+ * and add other bdi's to the list. They will get a thread created
+ * on-demand when they need it.
+ */
+ if (bdi_cap_flush_forker(bdi)) {
+ struct bdi_writeback *wb = &bdi->wb;
+
+ wb->task = kthread_run(bdi_forker_task, wb, "bdi-%s",
+ dev_name(dev));
+ if (IS_ERR(wb->task)) {
+ wb->task = NULL;
+ ret = -ENOMEM;
+
+ bdi_remove_from_list(bdi);
+ goto exit;
+ }
+ }
+
+ bdi_debug_register(bdi, dev_name(dev));
+ set_bit(BDI_registered, &bdi->state);
exit:
return ret;
}
@@ -225,9 +581,61 @@ int bdi_register_dev(struct backing_dev_info *bdi, dev_t dev)
}
EXPORT_SYMBOL(bdi_register_dev);
+/*
+ * Remove bdi from the global list and shutdown any threads we have running
+ */
+static void bdi_wb_shutdown(struct backing_dev_info *bdi)
+{
+ struct bdi_writeback *wb;
+
+ if (!bdi_cap_writeback_dirty(bdi))
+ return;
+
+ /*
+ * If setup is pending, wait for that to complete first
+ */
+ wait_on_bit(&bdi->state, BDI_pending, bdi_sched_wait,
+ TASK_UNINTERRUPTIBLE);
+
+ /*
+ * Make sure nobody finds us on the bdi_list anymore
+ */
+ bdi_remove_from_list(bdi);
+
+ /*
+ * Finally, kill the kernel threads. We don't need to be RCU
+ * safe anymore, since the bdi is gone from visibility. Force
+ * unfreeze of the thread before calling kthread_stop(), otherwise
+ * it would never exet if it is currently stuck in the refrigerator.
+ */
+ list_for_each_entry(wb, &bdi->wb_list, list) {
+ thaw_process(wb->task);
+ kthread_stop(wb->task);
+ }
+}
+
+/*
+ * This bdi is going away now, make sure that no super_blocks point to it
+ */
+static void bdi_prune_sb(struct backing_dev_info *bdi)
+{
+ struct super_block *sb;
+
+ spin_lock(&sb_lock);
+ list_for_each_entry(sb, &super_blocks, s_list) {
+ if (sb->s_bdi == bdi)
+ sb->s_bdi = NULL;
+ }
+ spin_unlock(&sb_lock);
+}
+
void bdi_unregister(struct backing_dev_info *bdi)
{
if (bdi->dev) {
+ bdi_prune_sb(bdi);
+
+ if (!bdi_cap_flush_forker(bdi))
+ bdi_wb_shutdown(bdi);
bdi_debug_unregister(bdi);
device_unregister(bdi->dev);
bdi->dev = NULL;
@@ -237,14 +645,26 @@ EXPORT_SYMBOL(bdi_unregister);
int bdi_init(struct backing_dev_info *bdi)
{
- int i;
- int err;
+ int i, err;
bdi->dev = NULL;
bdi->min_ratio = 0;
bdi->max_ratio = 100;
bdi->max_prop_frac = PROP_FRAC_BASE;
+ spin_lock_init(&bdi->wb_lock);
+ INIT_RCU_HEAD(&bdi->rcu_head);
+ INIT_LIST_HEAD(&bdi->bdi_list);
+ INIT_LIST_HEAD(&bdi->wb_list);
+ INIT_LIST_HEAD(&bdi->work_list);
+
+ bdi_wb_init(&bdi->wb, bdi);
+
+ /*
+ * Just one thread support for now, hard code mask and count
+ */
+ bdi->wb_mask = 1;
+ bdi->wb_cnt = 1;
for (i = 0; i < NR_BDI_STAT_ITEMS; i++) {
err = percpu_counter_init(&bdi->bdi_stat[i], 0);
@@ -269,6 +689,20 @@ void bdi_destroy(struct backing_dev_info *bdi)
{
int i;
+ /*
+ * Splice our entries to the default_backing_dev_info, if this
+ * bdi disappears
+ */
+ if (bdi_has_dirty_io(bdi)) {
+ struct bdi_writeback *dst = &default_backing_dev_info.wb;
+
+ spin_lock(&inode_lock);
+ list_splice(&bdi->wb.b_dirty, &dst->b_dirty);
+ list_splice(&bdi->wb.b_io, &dst->b_io);
+ list_splice(&bdi->wb.b_more_io, &dst->b_more_io);
+ spin_unlock(&inode_lock);
+ }
+
bdi_unregister(bdi);
for (i = 0; i < NR_BDI_STAT_ITEMS; i++)
diff --git a/mm/bootmem.c b/mm/bootmem.c
index 701740c9e81b..7d1486875e1c 100644
--- a/mm/bootmem.c
+++ b/mm/bootmem.c
@@ -143,6 +143,30 @@ unsigned long __init init_bootmem(unsigned long start, unsigned long pages)
return init_bootmem_core(NODE_DATA(0)->bdata, start, 0, pages);
}
+/*
+ * free_bootmem_late - free bootmem pages directly to page allocator
+ * @addr: starting address of the range
+ * @size: size of the range in bytes
+ *
+ * This is only useful when the bootmem allocator has already been torn
+ * down, but we are still initializing the system. Pages are given directly
+ * to the page allocator, no bootmem metadata is updated because it is gone.
+ */
+void __init free_bootmem_late(unsigned long addr, unsigned long size)
+{
+ unsigned long cursor, end;
+
+ kmemleak_free_part(__va(addr), size);
+
+ cursor = PFN_UP(addr);
+ end = PFN_DOWN(addr + size);
+
+ for (; cursor < end; cursor++) {
+ __free_pages_bootmem(pfn_to_page(cursor), 0);
+ totalram_pages++;
+ }
+}
+
static unsigned long __init free_all_bootmem_core(bootmem_data_t *bdata)
{
int aligned;
@@ -408,8 +432,8 @@ int __init reserve_bootmem(unsigned long addr, unsigned long size,
return mark_bootmem(start, end, 1, flags);
}
-static unsigned long align_idx(struct bootmem_data *bdata, unsigned long idx,
- unsigned long step)
+static unsigned long __init align_idx(struct bootmem_data *bdata,
+ unsigned long idx, unsigned long step)
{
unsigned long base = bdata->node_min_pfn;
@@ -421,8 +445,8 @@ static unsigned long align_idx(struct bootmem_data *bdata, unsigned long idx,
return ALIGN(base + idx, step) - base;
}
-static unsigned long align_off(struct bootmem_data *bdata, unsigned long off,
- unsigned long align)
+static unsigned long __init align_off(struct bootmem_data *bdata,
+ unsigned long off, unsigned long align)
{
unsigned long base = PFN_PHYS(bdata->node_min_pfn);
@@ -521,7 +545,11 @@ find_block:
region = phys_to_virt(PFN_PHYS(bdata->node_min_pfn) +
start_off);
memset(region, 0, size);
- kmemleak_alloc(region, size, 1, 0);
+ /*
+ * The min_count is set to 0 so that bootmem allocated blocks
+ * are never reported as leaks.
+ */
+ kmemleak_alloc(region, size, 0, 0);
return region;
}
diff --git a/mm/filemap.c b/mm/filemap.c
index 769d389262d7..01c2711fd9d3 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -39,11 +39,10 @@
/*
* FIXME: remove all knowledge of the buffer layer from the core VM
*/
-#include <linux/buffer_head.h> /* for generic_osync_inode */
+#include <linux/buffer_head.h> /* for try_to_free_buffers */
#include <asm/mman.h>
-
/*
* Shared mappings implemented 30.11.1994. It's not fully working yet,
* though.
@@ -59,7 +58,7 @@
/*
* Lock ordering:
*
- * ->i_mmap_lock (vmtruncate)
+ * ->i_mmap_lock (truncate_pagecache)
* ->private_lock (__free_pte->__set_page_dirty_buffers)
* ->swap_lock (exclusive_swap_page, others)
* ->mapping->tree_lock
@@ -105,6 +104,10 @@
*
* ->task->proc_lock
* ->dcache_lock (proc_pid_lookup)
+ *
+ * (code doesn't rely on that order, so you could switch it around)
+ * ->tasklist_lock (memory_failure, collect_procs_ao)
+ * ->i_mmap_lock
*/
/*
@@ -120,6 +123,8 @@ void __remove_from_page_cache(struct page *page)
page->mapping = NULL;
mapping->nrpages--;
__dec_zone_page_state(page, NR_FILE_PAGES);
+ if (PageSwapBacked(page))
+ __dec_zone_page_state(page, NR_SHMEM);
BUG_ON(page_mapped(page));
/*
@@ -255,27 +260,27 @@ int filemap_flush(struct address_space *mapping)
EXPORT_SYMBOL(filemap_flush);
/**
- * wait_on_page_writeback_range - wait for writeback to complete
- * @mapping: target address_space
- * @start: beginning page index
- * @end: ending page index
+ * filemap_fdatawait_range - wait for writeback to complete
+ * @mapping: address space structure to wait for
+ * @start_byte: offset in bytes where the range starts
+ * @end_byte: offset in bytes where the range ends (inclusive)
*
- * Wait for writeback to complete against pages indexed by start->end
- * inclusive
+ * Walk the list of under-writeback pages of the given address space
+ * in the given range and wait for all of them.
*/
-int wait_on_page_writeback_range(struct address_space *mapping,
- pgoff_t start, pgoff_t end)
+int filemap_fdatawait_range(struct address_space *mapping, loff_t start_byte,
+ loff_t end_byte)
{
+ pgoff_t index = start_byte >> PAGE_CACHE_SHIFT;
+ pgoff_t end = end_byte >> PAGE_CACHE_SHIFT;
struct pagevec pvec;
int nr_pages;
int ret = 0;
- pgoff_t index;
- if (end < start)
+ if (end_byte < start_byte)
return 0;
pagevec_init(&pvec, 0);
- index = start;
while ((index <= end) &&
(nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
PAGECACHE_TAG_WRITEBACK,
@@ -305,70 +310,7 @@ int wait_on_page_writeback_range(struct address_space *mapping,
return ret;
}
-
-/**
- * sync_page_range - write and wait on all pages in the passed range
- * @inode: target inode
- * @mapping: target address_space
- * @pos: beginning offset in pages to write
- * @count: number of bytes to write
- *
- * Write and wait upon all the pages in the passed range. This is a "data
- * integrity" operation. It waits upon in-flight writeout before starting and
- * waiting upon new writeout. If there was an IO error, return it.
- *
- * We need to re-take i_mutex during the generic_osync_inode list walk because
- * it is otherwise livelockable.
- */
-int sync_page_range(struct inode *inode, struct address_space *mapping,
- loff_t pos, loff_t count)
-{
- pgoff_t start = pos >> PAGE_CACHE_SHIFT;
- pgoff_t end = (pos + count - 1) >> PAGE_CACHE_SHIFT;
- int ret;
-
- if (!mapping_cap_writeback_dirty(mapping) || !count)
- return 0;
- ret = filemap_fdatawrite_range(mapping, pos, pos + count - 1);
- if (ret == 0) {
- mutex_lock(&inode->i_mutex);
- ret = generic_osync_inode(inode, mapping, OSYNC_METADATA);
- mutex_unlock(&inode->i_mutex);
- }
- if (ret == 0)
- ret = wait_on_page_writeback_range(mapping, start, end);
- return ret;
-}
-EXPORT_SYMBOL(sync_page_range);
-
-/**
- * sync_page_range_nolock - write & wait on all pages in the passed range without locking
- * @inode: target inode
- * @mapping: target address_space
- * @pos: beginning offset in pages to write
- * @count: number of bytes to write
- *
- * Note: Holding i_mutex across sync_page_range_nolock() is not a good idea
- * as it forces O_SYNC writers to different parts of the same file
- * to be serialised right until io completion.
- */
-int sync_page_range_nolock(struct inode *inode, struct address_space *mapping,
- loff_t pos, loff_t count)
-{
- pgoff_t start = pos >> PAGE_CACHE_SHIFT;
- pgoff_t end = (pos + count - 1) >> PAGE_CACHE_SHIFT;
- int ret;
-
- if (!mapping_cap_writeback_dirty(mapping) || !count)
- return 0;
- ret = filemap_fdatawrite_range(mapping, pos, pos + count - 1);
- if (ret == 0)
- ret = generic_osync_inode(inode, mapping, OSYNC_METADATA);
- if (ret == 0)
- ret = wait_on_page_writeback_range(mapping, start, end);
- return ret;
-}
-EXPORT_SYMBOL(sync_page_range_nolock);
+EXPORT_SYMBOL(filemap_fdatawait_range);
/**
* filemap_fdatawait - wait for all under-writeback pages to complete
@@ -384,8 +326,7 @@ int filemap_fdatawait(struct address_space *mapping)
if (i_size == 0)
return 0;
- return wait_on_page_writeback_range(mapping, 0,
- (i_size - 1) >> PAGE_CACHE_SHIFT);
+ return filemap_fdatawait_range(mapping, 0, i_size - 1);
}
EXPORT_SYMBOL(filemap_fdatawait);
@@ -432,9 +373,8 @@ int filemap_write_and_wait_range(struct address_space *mapping,
WB_SYNC_ALL);
/* See comment of filemap_write_and_wait() */
if (err != -EIO) {
- int err2 = wait_on_page_writeback_range(mapping,
- lstart >> PAGE_CACHE_SHIFT,
- lend >> PAGE_CACHE_SHIFT);
+ int err2 = filemap_fdatawait_range(mapping,
+ lstart, lend);
if (!err)
err = err2;
}
@@ -476,6 +416,8 @@ int add_to_page_cache_locked(struct page *page, struct address_space *mapping,
if (likely(!error)) {
mapping->nrpages++;
__inc_zone_page_state(page, NR_FILE_PAGES);
+ if (PageSwapBacked(page))
+ __inc_zone_page_state(page, NR_SHMEM);
spin_unlock_irq(&mapping->tree_lock);
} else {
page->mapping = NULL;
@@ -1648,7 +1590,7 @@ page_not_uptodate:
}
EXPORT_SYMBOL(filemap_fault);
-struct vm_operations_struct generic_file_vm_ops = {
+const struct vm_operations_struct generic_file_vm_ops = {
.fault = filemap_fault,
};
@@ -1692,14 +1634,15 @@ EXPORT_SYMBOL(generic_file_readonly_mmap);
static struct page *__read_cache_page(struct address_space *mapping,
pgoff_t index,
int (*filler)(void *,struct page*),
- void *data)
+ void *data,
+ gfp_t gfp)
{
struct page *page;
int err;
repeat:
page = find_get_page(mapping, index);
if (!page) {
- page = page_cache_alloc_cold(mapping);
+ page = __page_cache_alloc(gfp | __GFP_COLD);
if (!page)
return ERR_PTR(-ENOMEM);
err = add_to_page_cache_lru(page, mapping, index, GFP_KERNEL);
@@ -1719,31 +1662,18 @@ repeat:
return page;
}
-/**
- * read_cache_page_async - read into page cache, fill it if needed
- * @mapping: the page's address_space
- * @index: the page index
- * @filler: function to perform the read
- * @data: destination for read data
- *
- * Same as read_cache_page, but don't wait for page to become unlocked
- * after submitting it to the filler.
- *
- * Read into the page cache. If a page already exists, and PageUptodate() is
- * not set, try to fill the page but don't wait for it to become unlocked.
- *
- * If the page does not get brought uptodate, return -EIO.
- */
-struct page *read_cache_page_async(struct address_space *mapping,
+static struct page *do_read_cache_page(struct address_space *mapping,
pgoff_t index,
int (*filler)(void *,struct page*),
- void *data)
+ void *data,
+ gfp_t gfp)
+
{
struct page *page;
int err;
retry:
- page = __read_cache_page(mapping, index, filler, data);
+ page = __read_cache_page(mapping, index, filler, data, gfp);
if (IS_ERR(page))
return page;
if (PageUptodate(page))
@@ -1768,8 +1698,67 @@ out:
mark_page_accessed(page);
return page;
}
+
+/**
+ * read_cache_page_async - read into page cache, fill it if needed
+ * @mapping: the page's address_space
+ * @index: the page index
+ * @filler: function to perform the read
+ * @data: destination for read data
+ *
+ * Same as read_cache_page, but don't wait for page to become unlocked
+ * after submitting it to the filler.
+ *
+ * Read into the page cache. If a page already exists, and PageUptodate() is
+ * not set, try to fill the page but don't wait for it to become unlocked.
+ *
+ * If the page does not get brought uptodate, return -EIO.
+ */
+struct page *read_cache_page_async(struct address_space *mapping,
+ pgoff_t index,
+ int (*filler)(void *,struct page*),
+ void *data)
+{
+ return do_read_cache_page(mapping, index, filler, data, mapping_gfp_mask(mapping));
+}
EXPORT_SYMBOL(read_cache_page_async);
+static struct page *wait_on_page_read(struct page *page)
+{
+ if (!IS_ERR(page)) {
+ wait_on_page_locked(page);
+ if (!PageUptodate(page)) {
+ page_cache_release(page);
+ page = ERR_PTR(-EIO);
+ }
+ }
+ return page;
+}
+
+/**
+ * read_cache_page_gfp - read into page cache, using specified page allocation flags.
+ * @mapping: the page's address_space
+ * @index: the page index
+ * @gfp: the page allocator flags to use if allocating
+ *
+ * This is the same as "read_mapping_page(mapping, index, NULL)", but with
+ * any new page allocations done using the specified allocation flags. Note
+ * that the Radix tree operations will still use GFP_KERNEL, so you can't
+ * expect to do this atomically or anything like that - but you can pass in
+ * other page requirements.
+ *
+ * If the page does not get brought uptodate, return -EIO.
+ */
+struct page *read_cache_page_gfp(struct address_space *mapping,
+ pgoff_t index,
+ gfp_t gfp)
+{
+ filler_t *filler = (filler_t *)mapping->a_ops->readpage;
+
+ return wait_on_page_read(do_read_cache_page(mapping, index, filler, NULL, gfp));
+}
+EXPORT_SYMBOL(read_cache_page_gfp);
+
/**
* read_cache_page - read into page cache, fill it if needed
* @mapping: the page's address_space
@@ -1787,18 +1776,7 @@ struct page *read_cache_page(struct address_space *mapping,
int (*filler)(void *,struct page*),
void *data)
{
- struct page *page;
-
- page = read_cache_page_async(mapping, index, filler, data);
- if (IS_ERR(page))
- goto out;
- wait_on_page_locked(page);
- if (!PageUptodate(page)) {
- page_cache_release(page);
- page = ERR_PTR(-EIO);
- }
- out:
- return page;
+ return wait_on_page_read(read_cache_page_async(mapping, index, filler, data));
}
EXPORT_SYMBOL(read_cache_page);
@@ -1881,7 +1859,7 @@ static size_t __iovec_copy_from_user_inatomic(char *vaddr,
/*
* Copy as much as we can into the page and return the number of bytes which
- * were sucessfully copied. If a fault is encountered then return the number of
+ * were successfully copied. If a fault is encountered then return the number of
* bytes which were copied.
*/
size_t iov_iter_copy_from_user_atomic(struct page *page,
@@ -2167,20 +2145,7 @@ generic_file_direct_write(struct kiocb *iocb, const struct iovec *iov,
}
*ppos = end;
}
-
- /*
- * Sync the fs metadata but not the minor inode changes and
- * of course not the data as we did direct DMA for the IO.
- * i_mutex is held, which protects generic_osync_inode() from
- * livelocking. AIO O_DIRECT ops attempt to sync metadata here.
- */
out:
- if ((written >= 0 || written == -EIOCBQUEUED) &&
- ((file->f_flags & O_SYNC) || IS_SYNC(inode))) {
- int err = generic_osync_inode(inode, mapping, OSYNC_METADATA);
- if (err < 0)
- written = err;
- }
return written;
}
EXPORT_SYMBOL(generic_file_direct_write);
@@ -2267,6 +2232,9 @@ again:
if (unlikely(status))
break;
+ if (mapping_writably_mapped(mapping))
+ flush_dcache_page(page);
+
pagefault_disable();
copied = iov_iter_copy_from_user_atomic(page, i, offset, bytes);
pagefault_enable();
@@ -2311,9 +2279,6 @@ generic_file_buffered_write(struct kiocb *iocb, const struct iovec *iov,
size_t count, ssize_t written)
{
struct file *file = iocb->ki_filp;
- struct address_space *mapping = file->f_mapping;
- const struct address_space_operations *a_ops = mapping->a_ops;
- struct inode *inode = mapping->host;
ssize_t status;
struct iov_iter i;
@@ -2323,34 +2288,33 @@ generic_file_buffered_write(struct kiocb *iocb, const struct iovec *iov,
if (likely(status >= 0)) {
written += status;
*ppos = pos + status;
-
- /*
- * For now, when the user asks for O_SYNC, we'll actually give
- * O_DSYNC
- */
- if (unlikely((file->f_flags & O_SYNC) || IS_SYNC(inode))) {
- if (!a_ops->writepage || !is_sync_kiocb(iocb))
- status = generic_osync_inode(inode, mapping,
- OSYNC_METADATA|OSYNC_DATA);
- }
}
- /*
- * If we get here for O_DIRECT writes then we must have fallen through
- * to buffered writes (block instantiation inside i_size). So we sync
- * the file data here, to try to honour O_DIRECT expectations.
- */
- if (unlikely(file->f_flags & O_DIRECT) && written)
- status = filemap_write_and_wait_range(mapping,
- pos, pos + written - 1);
-
return written ? written : status;
}
EXPORT_SYMBOL(generic_file_buffered_write);
-static ssize_t
-__generic_file_aio_write_nolock(struct kiocb *iocb, const struct iovec *iov,
- unsigned long nr_segs, loff_t *ppos)
+/**
+ * __generic_file_aio_write - write data to a file
+ * @iocb: IO state structure (file, offset, etc.)
+ * @iov: vector with data to write
+ * @nr_segs: number of segments in the vector
+ * @ppos: position where to write
+ *
+ * This function does all the work needed for actually writing data to a
+ * file. It does all basic checks, removes SUID from the file, updates
+ * modification times and calls proper subroutines depending on whether we
+ * do direct IO or a standard buffered write.
+ *
+ * It expects i_mutex to be grabbed unless we work on a block device or similar
+ * object which does not need locking at all.
+ *
+ * This function does *not* take care of syncing data in case of O_SYNC write.
+ * A caller has to handle it. This is mainly due to the fact that we want to
+ * avoid syncing under i_mutex.
+ */
+ssize_t __generic_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
+ unsigned long nr_segs, loff_t *ppos)
{
struct file *file = iocb->ki_filp;
struct address_space * mapping = file->f_mapping;
@@ -2424,10 +2388,7 @@ __generic_file_aio_write_nolock(struct kiocb *iocb, const struct iovec *iov,
* semantics.
*/
endbyte = pos + written_buffered - written - 1;
- err = do_sync_mapping_range(file->f_mapping, pos, endbyte,
- SYNC_FILE_RANGE_WAIT_BEFORE|
- SYNC_FILE_RANGE_WRITE|
- SYNC_FILE_RANGE_WAIT_AFTER);
+ err = filemap_write_and_wait_range(file->f_mapping, pos, endbyte);
if (err == 0) {
written = written_buffered;
invalidate_mapping_pages(mapping,
@@ -2447,51 +2408,37 @@ out:
current->backing_dev_info = NULL;
return written ? written : err;
}
+EXPORT_SYMBOL(__generic_file_aio_write);
-ssize_t generic_file_aio_write_nolock(struct kiocb *iocb,
- const struct iovec *iov, unsigned long nr_segs, loff_t pos)
-{
- struct file *file = iocb->ki_filp;
- struct address_space *mapping = file->f_mapping;
- struct inode *inode = mapping->host;
- ssize_t ret;
-
- BUG_ON(iocb->ki_pos != pos);
-
- ret = __generic_file_aio_write_nolock(iocb, iov, nr_segs,
- &iocb->ki_pos);
-
- if (ret > 0 && ((file->f_flags & O_SYNC) || IS_SYNC(inode))) {
- ssize_t err;
-
- err = sync_page_range_nolock(inode, mapping, pos, ret);
- if (err < 0)
- ret = err;
- }
- return ret;
-}
-EXPORT_SYMBOL(generic_file_aio_write_nolock);
-
+/**
+ * generic_file_aio_write - write data to a file
+ * @iocb: IO state structure
+ * @iov: vector with data to write
+ * @nr_segs: number of segments in the vector
+ * @pos: position in file where to write
+ *
+ * This is a wrapper around __generic_file_aio_write() to be used by most
+ * filesystems. It takes care of syncing the file in case of O_SYNC file
+ * and acquires i_mutex as needed.
+ */
ssize_t generic_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
unsigned long nr_segs, loff_t pos)
{
struct file *file = iocb->ki_filp;
- struct address_space *mapping = file->f_mapping;
- struct inode *inode = mapping->host;
+ struct inode *inode = file->f_mapping->host;
ssize_t ret;
BUG_ON(iocb->ki_pos != pos);
mutex_lock(&inode->i_mutex);
- ret = __generic_file_aio_write_nolock(iocb, iov, nr_segs,
- &iocb->ki_pos);
+ ret = __generic_file_aio_write(iocb, iov, nr_segs, &iocb->ki_pos);
mutex_unlock(&inode->i_mutex);
- if (ret > 0 && ((file->f_flags & O_SYNC) || IS_SYNC(inode))) {
+ if (ret > 0 || ret == -EIOCBQUEUED) {
ssize_t err;
- err = sync_page_range(inode, mapping, pos, ret);
- if (err < 0)
+ err = generic_write_sync(file, pos, ret);
+ if (err < 0 && ret > 0)
ret = err;
}
return ret;
diff --git a/mm/filemap_xip.c b/mm/filemap_xip.c
index 427dfe3ce78c..1888b2d71bb8 100644
--- a/mm/filemap_xip.c
+++ b/mm/filemap_xip.c
@@ -296,7 +296,7 @@ out:
}
}
-static struct vm_operations_struct xip_file_vm_ops = {
+static const struct vm_operations_struct xip_file_vm_ops = {
.fault = xip_file_fault,
};
diff --git a/mm/highmem.c b/mm/highmem.c
index 66e915acd783..446b75caa521 100644
--- a/mm/highmem.c
+++ b/mm/highmem.c
@@ -557,16 +557,21 @@ void __init page_address_init(void)
void debug_kmap_atomic(enum km_type type)
{
- static unsigned warn_count = 10;
+ static int warn_count = 10;
- if (unlikely(warn_count == 0))
+ if (unlikely(warn_count < 0))
return;
if (unlikely(in_interrupt())) {
- if (in_irq()) {
+ if (in_nmi()) {
+ if (type != KM_NMI && type != KM_NMI_PTE) {
+ WARN_ON(1);
+ warn_count--;
+ }
+ } else if (in_irq()) {
if (type != KM_IRQ0 && type != KM_IRQ1 &&
type != KM_BIO_SRC_IRQ && type != KM_BIO_DST_IRQ &&
- type != KM_BOUNCE_READ) {
+ type != KM_BOUNCE_READ && type != KM_IRQ_PTE) {
WARN_ON(1);
warn_count--;
}
@@ -583,7 +588,9 @@ void debug_kmap_atomic(enum km_type type)
}
if (type == KM_IRQ0 || type == KM_IRQ1 || type == KM_BOUNCE_READ ||
- type == KM_BIO_SRC_IRQ || type == KM_BIO_DST_IRQ) {
+ type == KM_BIO_SRC_IRQ || type == KM_BIO_DST_IRQ ||
+ type == KM_IRQ_PTE || type == KM_NMI ||
+ type == KM_NMI_PTE ) {
if (!irqs_disabled()) {
WARN_ON(1);
warn_count--;
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index cafdcee154e8..2d16fa6b8c2d 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -24,6 +24,7 @@
#include <asm/io.h>
#include <linux/hugetlb.h>
+#include <linux/node.h>
#include "internal.h"
const unsigned long hugetlb_zero = 0, hugetlb_infinity = ~0UL;
@@ -234,6 +235,7 @@ unsigned long vma_kernel_pagesize(struct vm_area_struct *vma)
return 1UL << (hstate->order + PAGE_SHIFT);
}
+EXPORT_SYMBOL_GPL(vma_kernel_pagesize);
/*
* Return the page size being used by the MMU to back a VMA. In the majority
@@ -400,7 +402,7 @@ static void clear_huge_page(struct page *page,
{
int i;
- if (unlikely(sz > MAX_ORDER_NR_PAGES)) {
+ if (unlikely(sz/PAGE_SIZE > MAX_ORDER_NR_PAGES)) {
clear_gigantic_page(page, addr, sz);
return;
}
@@ -455,24 +457,6 @@ static void enqueue_huge_page(struct hstate *h, struct page *page)
h->free_huge_pages_node[nid]++;
}
-static struct page *dequeue_huge_page(struct hstate *h)
-{
- int nid;
- struct page *page = NULL;
-
- for (nid = 0; nid < MAX_NUMNODES; ++nid) {
- if (!list_empty(&h->hugepage_freelists[nid])) {
- page = list_entry(h->hugepage_freelists[nid].next,
- struct page, lru);
- list_del(&page->lru);
- h->free_huge_pages--;
- h->free_huge_pages_node[nid]--;
- break;
- }
- }
- return page;
-}
-
static struct page *dequeue_huge_page_vma(struct hstate *h,
struct vm_area_struct *vma,
unsigned long address, int avoid_reserve)
@@ -639,41 +623,66 @@ static struct page *alloc_fresh_huge_page_node(struct hstate *h, int nid)
}
/*
- * Use a helper variable to find the next node and then
- * copy it back to hugetlb_next_nid afterwards:
- * otherwise there's a window in which a racer might
- * pass invalid nid MAX_NUMNODES to alloc_pages_exact_node.
- * But we don't need to use a spin_lock here: it really
- * doesn't matter if occasionally a racer chooses the
- * same nid as we do. Move nid forward in the mask even
- * if we just successfully allocated a hugepage so that
- * the next caller gets hugepages on the next node.
+ * common helper functions for hstate_next_node_to_{alloc|free}.
+ * We may have allocated or freed a huge page based on a different
+ * nodes_allowed previously, so h->next_node_to_{alloc|free} might
+ * be outside of *nodes_allowed. Ensure that we use an allowed
+ * node for alloc or free.
*/
-static int hstate_next_node(struct hstate *h)
+static int next_node_allowed(int nid, nodemask_t *nodes_allowed)
{
- int next_nid;
- next_nid = next_node(h->hugetlb_next_nid, node_online_map);
- if (next_nid == MAX_NUMNODES)
- next_nid = first_node(node_online_map);
- h->hugetlb_next_nid = next_nid;
- return next_nid;
+ nid = next_node(nid, *nodes_allowed);
+ if (nid == MAX_NUMNODES)
+ nid = first_node(*nodes_allowed);
+ VM_BUG_ON(nid >= MAX_NUMNODES);
+
+ return nid;
}
-static int alloc_fresh_huge_page(struct hstate *h)
+static int get_valid_node_allowed(int nid, nodemask_t *nodes_allowed)
+{
+ if (!node_isset(nid, *nodes_allowed))
+ nid = next_node_allowed(nid, nodes_allowed);
+ return nid;
+}
+
+/*
+ * returns the previously saved node ["this node"] from which to
+ * allocate a persistent huge page for the pool and advance the
+ * next node from which to allocate, handling wrap at end of node
+ * mask.
+ */
+static int hstate_next_node_to_alloc(struct hstate *h,
+ nodemask_t *nodes_allowed)
+{
+ int nid;
+
+ VM_BUG_ON(!nodes_allowed);
+
+ nid = get_valid_node_allowed(h->next_nid_to_alloc, nodes_allowed);
+ h->next_nid_to_alloc = next_node_allowed(nid, nodes_allowed);
+
+ return nid;
+}
+
+static int alloc_fresh_huge_page(struct hstate *h, nodemask_t *nodes_allowed)
{
struct page *page;
int start_nid;
int next_nid;
int ret = 0;
- start_nid = h->hugetlb_next_nid;
+ start_nid = hstate_next_node_to_alloc(h, nodes_allowed);
+ next_nid = start_nid;
do {
- page = alloc_fresh_huge_page_node(h, h->hugetlb_next_nid);
- if (page)
+ page = alloc_fresh_huge_page_node(h, next_nid);
+ if (page) {
ret = 1;
- next_nid = hstate_next_node(h);
- } while (!page && h->hugetlb_next_nid != start_nid);
+ break;
+ }
+ next_nid = hstate_next_node_to_alloc(h, nodes_allowed);
+ } while (next_nid != start_nid);
if (ret)
count_vm_event(HTLB_BUDDY_PGALLOC);
@@ -683,6 +692,67 @@ static int alloc_fresh_huge_page(struct hstate *h)
return ret;
}
+/*
+ * helper for free_pool_huge_page() - return the previously saved
+ * node ["this node"] from which to free a huge page. Advance the
+ * next node id whether or not we find a free huge page to free so
+ * that the next attempt to free addresses the next node.
+ */
+static int hstate_next_node_to_free(struct hstate *h, nodemask_t *nodes_allowed)
+{
+ int nid;
+
+ VM_BUG_ON(!nodes_allowed);
+
+ nid = get_valid_node_allowed(h->next_nid_to_free, nodes_allowed);
+ h->next_nid_to_free = next_node_allowed(nid, nodes_allowed);
+
+ return nid;
+}
+
+/*
+ * Free huge page from pool from next node to free.
+ * Attempt to keep persistent huge pages more or less
+ * balanced over allowed nodes.
+ * Called with hugetlb_lock locked.
+ */
+static int free_pool_huge_page(struct hstate *h, nodemask_t *nodes_allowed,
+ bool acct_surplus)
+{
+ int start_nid;
+ int next_nid;
+ int ret = 0;
+
+ start_nid = hstate_next_node_to_free(h, nodes_allowed);
+ next_nid = start_nid;
+
+ do {
+ /*
+ * If we're returning unused surplus pages, only examine
+ * nodes with surplus pages.
+ */
+ if ((!acct_surplus || h->surplus_huge_pages_node[next_nid]) &&
+ !list_empty(&h->hugepage_freelists[next_nid])) {
+ struct page *page =
+ list_entry(h->hugepage_freelists[next_nid].next,
+ struct page, lru);
+ list_del(&page->lru);
+ h->free_huge_pages--;
+ h->free_huge_pages_node[next_nid]--;
+ if (acct_surplus) {
+ h->surplus_huge_pages--;
+ h->surplus_huge_pages_node[next_nid]--;
+ }
+ update_and_free_page(h, page);
+ ret = 1;
+ break;
+ }
+ next_nid = hstate_next_node_to_free(h, nodes_allowed);
+ } while (next_nid != start_nid);
+
+ return ret;
+}
+
static struct page *alloc_buddy_huge_page(struct hstate *h,
struct vm_area_struct *vma, unsigned long address)
{
@@ -854,22 +924,13 @@ free:
* When releasing a hugetlb pool reservation, any surplus pages that were
* allocated to satisfy the reservation must be explicitly freed if they were
* never used.
+ * Called with hugetlb_lock held.
*/
static void return_unused_surplus_pages(struct hstate *h,
unsigned long unused_resv_pages)
{
- static int nid = -1;
- struct page *page;
unsigned long nr_pages;
- /*
- * We want to release as many surplus pages as possible, spread
- * evenly across all nodes. Iterate across all nodes until we
- * can no longer free unreserved surplus pages. This occurs when
- * the nodes with surplus pages have no free pages.
- */
- unsigned long remaining_iterations = nr_online_nodes;
-
/* Uncommit the reservation */
h->resv_huge_pages -= unused_resv_pages;
@@ -879,26 +940,17 @@ static void return_unused_surplus_pages(struct hstate *h,
nr_pages = min(unused_resv_pages, h->surplus_huge_pages);
- while (remaining_iterations-- && nr_pages) {
- nid = next_node(nid, node_online_map);
- if (nid == MAX_NUMNODES)
- nid = first_node(node_online_map);
-
- if (!h->surplus_huge_pages_node[nid])
- continue;
-
- if (!list_empty(&h->hugepage_freelists[nid])) {
- page = list_entry(h->hugepage_freelists[nid].next,
- struct page, lru);
- list_del(&page->lru);
- update_and_free_page(h, page);
- h->free_huge_pages--;
- h->free_huge_pages_node[nid]--;
- h->surplus_huge_pages--;
- h->surplus_huge_pages_node[nid]--;
- nr_pages--;
- remaining_iterations = nr_online_nodes;
- }
+ /*
+ * We want to release as many surplus pages as possible, spread
+ * evenly across all nodes with memory. Iterate across these nodes
+ * until we can no longer free unreserved surplus pages. This occurs
+ * when the nodes with surplus pages have no free pages.
+ * free_pool_huge_page() will balance the the freed pages across the
+ * on-line nodes with memory and will handle the hstate accounting.
+ */
+ while (nr_pages--) {
+ if (!free_pool_huge_page(h, &node_states[N_HIGH_MEMORY], 1))
+ break;
}
}
@@ -1001,13 +1053,14 @@ static struct page *alloc_huge_page(struct vm_area_struct *vma,
int __weak alloc_bootmem_huge_page(struct hstate *h)
{
struct huge_bootmem_page *m;
- int nr_nodes = nodes_weight(node_online_map);
+ int nr_nodes = nodes_weight(node_states[N_HIGH_MEMORY]);
while (nr_nodes) {
void *addr;
addr = __alloc_bootmem_node_nopanic(
- NODE_DATA(h->hugetlb_next_nid),
+ NODE_DATA(hstate_next_node_to_alloc(h,
+ &node_states[N_HIGH_MEMORY])),
huge_page_size(h), huge_page_size(h), 0);
if (addr) {
@@ -1019,7 +1072,6 @@ int __weak alloc_bootmem_huge_page(struct hstate *h)
m = addr;
goto found;
}
- hstate_next_node(h);
nr_nodes--;
}
return 0;
@@ -1063,7 +1115,8 @@ static void __init hugetlb_hstate_alloc_pages(struct hstate *h)
if (h->order >= MAX_ORDER) {
if (!alloc_bootmem_huge_page(h))
break;
- } else if (!alloc_fresh_huge_page(h))
+ } else if (!alloc_fresh_huge_page(h,
+ &node_states[N_HIGH_MEMORY]))
break;
}
h->max_huge_pages = i;
@@ -1105,14 +1158,15 @@ static void __init report_hugepages(void)
}
#ifdef CONFIG_HIGHMEM
-static void try_to_free_low(struct hstate *h, unsigned long count)
+static void try_to_free_low(struct hstate *h, unsigned long count,
+ nodemask_t *nodes_allowed)
{
int i;
if (h->order >= MAX_ORDER)
return;
- for (i = 0; i < MAX_NUMNODES; ++i) {
+ for_each_node_mask(i, *nodes_allowed) {
struct page *page, *next;
struct list_head *freel = &h->hugepage_freelists[i];
list_for_each_entry_safe(page, next, freel, lru) {
@@ -1128,7 +1182,8 @@ static void try_to_free_low(struct hstate *h, unsigned long count)
}
}
#else
-static inline void try_to_free_low(struct hstate *h, unsigned long count)
+static inline void try_to_free_low(struct hstate *h, unsigned long count,
+ nodemask_t *nodes_allowed)
{
}
#endif
@@ -1138,38 +1193,56 @@ static inline void try_to_free_low(struct hstate *h, unsigned long count)
* balanced by operating on them in a round-robin fashion.
* Returns 1 if an adjustment was made.
*/
-static int adjust_pool_surplus(struct hstate *h, int delta)
+static int adjust_pool_surplus(struct hstate *h, nodemask_t *nodes_allowed,
+ int delta)
{
- static int prev_nid;
- int nid = prev_nid;
+ int start_nid, next_nid;
int ret = 0;
VM_BUG_ON(delta != -1 && delta != 1);
- do {
- nid = next_node(nid, node_online_map);
- if (nid == MAX_NUMNODES)
- nid = first_node(node_online_map);
- /* To shrink on this node, there must be a surplus page */
- if (delta < 0 && !h->surplus_huge_pages_node[nid])
- continue;
- /* Surplus cannot exceed the total number of pages */
- if (delta > 0 && h->surplus_huge_pages_node[nid] >=
- h->nr_huge_pages_node[nid])
- continue;
+ if (delta < 0)
+ start_nid = hstate_next_node_to_alloc(h, nodes_allowed);
+ else
+ start_nid = hstate_next_node_to_free(h, nodes_allowed);
+ next_nid = start_nid;
+
+ do {
+ int nid = next_nid;
+ if (delta < 0) {
+ /*
+ * To shrink on this node, there must be a surplus page
+ */
+ if (!h->surplus_huge_pages_node[nid]) {
+ next_nid = hstate_next_node_to_alloc(h,
+ nodes_allowed);
+ continue;
+ }
+ }
+ if (delta > 0) {
+ /*
+ * Surplus cannot exceed the total number of pages
+ */
+ if (h->surplus_huge_pages_node[nid] >=
+ h->nr_huge_pages_node[nid]) {
+ next_nid = hstate_next_node_to_free(h,
+ nodes_allowed);
+ continue;
+ }
+ }
h->surplus_huge_pages += delta;
h->surplus_huge_pages_node[nid] += delta;
ret = 1;
break;
- } while (nid != prev_nid);
+ } while (next_nid != start_nid);
- prev_nid = nid;
return ret;
}
#define persistent_huge_pages(h) (h->nr_huge_pages - h->surplus_huge_pages)
-static unsigned long set_max_huge_pages(struct hstate *h, unsigned long count)
+static unsigned long set_max_huge_pages(struct hstate *h, unsigned long count,
+ nodemask_t *nodes_allowed)
{
unsigned long min_count, ret;
@@ -1189,7 +1262,7 @@ static unsigned long set_max_huge_pages(struct hstate *h, unsigned long count)
*/
spin_lock(&hugetlb_lock);
while (h->surplus_huge_pages && count > persistent_huge_pages(h)) {
- if (!adjust_pool_surplus(h, -1))
+ if (!adjust_pool_surplus(h, nodes_allowed, -1))
break;
}
@@ -1200,11 +1273,14 @@ static unsigned long set_max_huge_pages(struct hstate *h, unsigned long count)
* and reducing the surplus.
*/
spin_unlock(&hugetlb_lock);
- ret = alloc_fresh_huge_page(h);
+ ret = alloc_fresh_huge_page(h, nodes_allowed);
spin_lock(&hugetlb_lock);
if (!ret)
goto out;
+ /* Bail for signals. Probably ctrl-c from user */
+ if (signal_pending(current))
+ goto out;
}
/*
@@ -1224,15 +1300,13 @@ static unsigned long set_max_huge_pages(struct hstate *h, unsigned long count)
*/
min_count = h->resv_huge_pages + h->nr_huge_pages - h->free_huge_pages;
min_count = max(count, min_count);
- try_to_free_low(h, min_count);
+ try_to_free_low(h, min_count, nodes_allowed);
while (min_count < persistent_huge_pages(h)) {
- struct page *page = dequeue_huge_page(h);
- if (!page)
+ if (!free_pool_huge_page(h, nodes_allowed, 0))
break;
- update_and_free_page(h, page);
}
while (count < persistent_huge_pages(h)) {
- if (!adjust_pool_surplus(h, 1))
+ if (!adjust_pool_surplus(h, nodes_allowed, 1))
break;
}
out:
@@ -1251,43 +1325,117 @@ out:
static struct kobject *hugepages_kobj;
static struct kobject *hstate_kobjs[HUGE_MAX_HSTATE];
-static struct hstate *kobj_to_hstate(struct kobject *kobj)
+static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp);
+
+static struct hstate *kobj_to_hstate(struct kobject *kobj, int *nidp)
{
int i;
+
for (i = 0; i < HUGE_MAX_HSTATE; i++)
- if (hstate_kobjs[i] == kobj)
+ if (hstate_kobjs[i] == kobj) {
+ if (nidp)
+ *nidp = NUMA_NO_NODE;
return &hstates[i];
- BUG();
- return NULL;
+ }
+
+ return kobj_to_node_hstate(kobj, nidp);
}
-static ssize_t nr_hugepages_show(struct kobject *kobj,
+static ssize_t nr_hugepages_show_common(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
- struct hstate *h = kobj_to_hstate(kobj);
- return sprintf(buf, "%lu\n", h->nr_huge_pages);
+ struct hstate *h;
+ unsigned long nr_huge_pages;
+ int nid;
+
+ h = kobj_to_hstate(kobj, &nid);
+ if (nid == NUMA_NO_NODE)
+ nr_huge_pages = h->nr_huge_pages;
+ else
+ nr_huge_pages = h->nr_huge_pages_node[nid];
+
+ return sprintf(buf, "%lu\n", nr_huge_pages);
}
-static ssize_t nr_hugepages_store(struct kobject *kobj,
- struct kobj_attribute *attr, const char *buf, size_t count)
+static ssize_t nr_hugepages_store_common(bool obey_mempolicy,
+ struct kobject *kobj, struct kobj_attribute *attr,
+ const char *buf, size_t len)
{
int err;
- unsigned long input;
- struct hstate *h = kobj_to_hstate(kobj);
+ int nid;
+ unsigned long count;
+ struct hstate *h;
+ NODEMASK_ALLOC(nodemask_t, nodes_allowed, GFP_KERNEL | __GFP_NORETRY);
- err = strict_strtoul(buf, 10, &input);
+ err = strict_strtoul(buf, 10, &count);
if (err)
return 0;
- h->max_huge_pages = set_max_huge_pages(h, input);
+ h = kobj_to_hstate(kobj, &nid);
+ if (nid == NUMA_NO_NODE) {
+ /*
+ * global hstate attribute
+ */
+ if (!(obey_mempolicy &&
+ init_nodemask_of_mempolicy(nodes_allowed))) {
+ NODEMASK_FREE(nodes_allowed);
+ nodes_allowed = &node_states[N_HIGH_MEMORY];
+ }
+ } else if (nodes_allowed) {
+ /*
+ * per node hstate attribute: adjust count to global,
+ * but restrict alloc/free to the specified node.
+ */
+ count += h->nr_huge_pages - h->nr_huge_pages_node[nid];
+ init_nodemask_of_node(nodes_allowed, nid);
+ } else
+ nodes_allowed = &node_states[N_HIGH_MEMORY];
- return count;
+ h->max_huge_pages = set_max_huge_pages(h, count, nodes_allowed);
+
+ if (nodes_allowed != &node_states[N_HIGH_MEMORY])
+ NODEMASK_FREE(nodes_allowed);
+
+ return len;
+}
+
+static ssize_t nr_hugepages_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ return nr_hugepages_show_common(kobj, attr, buf);
+}
+
+static ssize_t nr_hugepages_store(struct kobject *kobj,
+ struct kobj_attribute *attr, const char *buf, size_t len)
+{
+ return nr_hugepages_store_common(false, kobj, attr, buf, len);
}
HSTATE_ATTR(nr_hugepages);
+#ifdef CONFIG_NUMA
+
+/*
+ * hstate attribute for optionally mempolicy-based constraint on persistent
+ * huge page alloc/free.
+ */
+static ssize_t nr_hugepages_mempolicy_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ return nr_hugepages_show_common(kobj, attr, buf);
+}
+
+static ssize_t nr_hugepages_mempolicy_store(struct kobject *kobj,
+ struct kobj_attribute *attr, const char *buf, size_t len)
+{
+ return nr_hugepages_store_common(true, kobj, attr, buf, len);
+}
+HSTATE_ATTR(nr_hugepages_mempolicy);
+#endif
+
+
static ssize_t nr_overcommit_hugepages_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
- struct hstate *h = kobj_to_hstate(kobj);
+ struct hstate *h = kobj_to_hstate(kobj, NULL);
return sprintf(buf, "%lu\n", h->nr_overcommit_huge_pages);
}
static ssize_t nr_overcommit_hugepages_store(struct kobject *kobj,
@@ -1295,7 +1443,7 @@ static ssize_t nr_overcommit_hugepages_store(struct kobject *kobj,
{
int err;
unsigned long input;
- struct hstate *h = kobj_to_hstate(kobj);
+ struct hstate *h = kobj_to_hstate(kobj, NULL);
err = strict_strtoul(buf, 10, &input);
if (err)
@@ -1312,15 +1460,24 @@ HSTATE_ATTR(nr_overcommit_hugepages);
static ssize_t free_hugepages_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
- struct hstate *h = kobj_to_hstate(kobj);
- return sprintf(buf, "%lu\n", h->free_huge_pages);
+ struct hstate *h;
+ unsigned long free_huge_pages;
+ int nid;
+
+ h = kobj_to_hstate(kobj, &nid);
+ if (nid == NUMA_NO_NODE)
+ free_huge_pages = h->free_huge_pages;
+ else
+ free_huge_pages = h->free_huge_pages_node[nid];
+
+ return sprintf(buf, "%lu\n", free_huge_pages);
}
HSTATE_ATTR_RO(free_hugepages);
static ssize_t resv_hugepages_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
- struct hstate *h = kobj_to_hstate(kobj);
+ struct hstate *h = kobj_to_hstate(kobj, NULL);
return sprintf(buf, "%lu\n", h->resv_huge_pages);
}
HSTATE_ATTR_RO(resv_hugepages);
@@ -1328,8 +1485,17 @@ HSTATE_ATTR_RO(resv_hugepages);
static ssize_t surplus_hugepages_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
- struct hstate *h = kobj_to_hstate(kobj);
- return sprintf(buf, "%lu\n", h->surplus_huge_pages);
+ struct hstate *h;
+ unsigned long surplus_huge_pages;
+ int nid;
+
+ h = kobj_to_hstate(kobj, &nid);
+ if (nid == NUMA_NO_NODE)
+ surplus_huge_pages = h->surplus_huge_pages;
+ else
+ surplus_huge_pages = h->surplus_huge_pages_node[nid];
+
+ return sprintf(buf, "%lu\n", surplus_huge_pages);
}
HSTATE_ATTR_RO(surplus_hugepages);
@@ -1339,6 +1505,9 @@ static struct attribute *hstate_attrs[] = {
&free_hugepages_attr.attr,
&resv_hugepages_attr.attr,
&surplus_hugepages_attr.attr,
+#ifdef CONFIG_NUMA
+ &nr_hugepages_mempolicy_attr.attr,
+#endif
NULL,
};
@@ -1346,19 +1515,20 @@ static struct attribute_group hstate_attr_group = {
.attrs = hstate_attrs,
};
-static int __init hugetlb_sysfs_add_hstate(struct hstate *h)
+static int hugetlb_sysfs_add_hstate(struct hstate *h, struct kobject *parent,
+ struct kobject **hstate_kobjs,
+ struct attribute_group *hstate_attr_group)
{
int retval;
+ int hi = h - hstates;
- hstate_kobjs[h - hstates] = kobject_create_and_add(h->name,
- hugepages_kobj);
- if (!hstate_kobjs[h - hstates])
+ hstate_kobjs[hi] = kobject_create_and_add(h->name, parent);
+ if (!hstate_kobjs[hi])
return -ENOMEM;
- retval = sysfs_create_group(hstate_kobjs[h - hstates],
- &hstate_attr_group);
+ retval = sysfs_create_group(hstate_kobjs[hi], hstate_attr_group);
if (retval)
- kobject_put(hstate_kobjs[h - hstates]);
+ kobject_put(hstate_kobjs[hi]);
return retval;
}
@@ -1373,17 +1543,184 @@ static void __init hugetlb_sysfs_init(void)
return;
for_each_hstate(h) {
- err = hugetlb_sysfs_add_hstate(h);
+ err = hugetlb_sysfs_add_hstate(h, hugepages_kobj,
+ hstate_kobjs, &hstate_attr_group);
if (err)
printk(KERN_ERR "Hugetlb: Unable to add hstate %s",
h->name);
}
}
+#ifdef CONFIG_NUMA
+
+/*
+ * node_hstate/s - associate per node hstate attributes, via their kobjects,
+ * with node sysdevs in node_devices[] using a parallel array. The array
+ * index of a node sysdev or _hstate == node id.
+ * This is here to avoid any static dependency of the node sysdev driver, in
+ * the base kernel, on the hugetlb module.
+ */
+struct node_hstate {
+ struct kobject *hugepages_kobj;
+ struct kobject *hstate_kobjs[HUGE_MAX_HSTATE];
+};
+struct node_hstate node_hstates[MAX_NUMNODES];
+
+/*
+ * A subset of global hstate attributes for node sysdevs
+ */
+static struct attribute *per_node_hstate_attrs[] = {
+ &nr_hugepages_attr.attr,
+ &free_hugepages_attr.attr,
+ &surplus_hugepages_attr.attr,
+ NULL,
+};
+
+static struct attribute_group per_node_hstate_attr_group = {
+ .attrs = per_node_hstate_attrs,
+};
+
+/*
+ * kobj_to_node_hstate - lookup global hstate for node sysdev hstate attr kobj.
+ * Returns node id via non-NULL nidp.
+ */
+static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp)
+{
+ int nid;
+
+ for (nid = 0; nid < nr_node_ids; nid++) {
+ struct node_hstate *nhs = &node_hstates[nid];
+ int i;
+ for (i = 0; i < HUGE_MAX_HSTATE; i++)
+ if (nhs->hstate_kobjs[i] == kobj) {
+ if (nidp)
+ *nidp = nid;
+ return &hstates[i];
+ }
+ }
+
+ BUG();
+ return NULL;
+}
+
+/*
+ * Unregister hstate attributes from a single node sysdev.
+ * No-op if no hstate attributes attached.
+ */
+void hugetlb_unregister_node(struct node *node)
+{
+ struct hstate *h;
+ struct node_hstate *nhs = &node_hstates[node->sysdev.id];
+
+ if (!nhs->hugepages_kobj)
+ return; /* no hstate attributes */
+
+ for_each_hstate(h)
+ if (nhs->hstate_kobjs[h - hstates]) {
+ kobject_put(nhs->hstate_kobjs[h - hstates]);
+ nhs->hstate_kobjs[h - hstates] = NULL;
+ }
+
+ kobject_put(nhs->hugepages_kobj);
+ nhs->hugepages_kobj = NULL;
+}
+
+/*
+ * hugetlb module exit: unregister hstate attributes from node sysdevs
+ * that have them.
+ */
+static void hugetlb_unregister_all_nodes(void)
+{
+ int nid;
+
+ /*
+ * disable node sysdev registrations.
+ */
+ register_hugetlbfs_with_node(NULL, NULL);
+
+ /*
+ * remove hstate attributes from any nodes that have them.
+ */
+ for (nid = 0; nid < nr_node_ids; nid++)
+ hugetlb_unregister_node(&node_devices[nid]);
+}
+
+/*
+ * Register hstate attributes for a single node sysdev.
+ * No-op if attributes already registered.
+ */
+void hugetlb_register_node(struct node *node)
+{
+ struct hstate *h;
+ struct node_hstate *nhs = &node_hstates[node->sysdev.id];
+ int err;
+
+ if (nhs->hugepages_kobj)
+ return; /* already allocated */
+
+ nhs->hugepages_kobj = kobject_create_and_add("hugepages",
+ &node->sysdev.kobj);
+ if (!nhs->hugepages_kobj)
+ return;
+
+ for_each_hstate(h) {
+ err = hugetlb_sysfs_add_hstate(h, nhs->hugepages_kobj,
+ nhs->hstate_kobjs,
+ &per_node_hstate_attr_group);
+ if (err) {
+ printk(KERN_ERR "Hugetlb: Unable to add hstate %s"
+ " for node %d\n",
+ h->name, node->sysdev.id);
+ hugetlb_unregister_node(node);
+ break;
+ }
+ }
+}
+
+/*
+ * hugetlb init time: register hstate attributes for all registered node
+ * sysdevs of nodes that have memory. All on-line nodes should have
+ * registered their associated sysdev by this time.
+ */
+static void hugetlb_register_all_nodes(void)
+{
+ int nid;
+
+ for_each_node_state(nid, N_HIGH_MEMORY) {
+ struct node *node = &node_devices[nid];
+ if (node->sysdev.id == nid)
+ hugetlb_register_node(node);
+ }
+
+ /*
+ * Let the node sysdev driver know we're here so it can
+ * [un]register hstate attributes on node hotplug.
+ */
+ register_hugetlbfs_with_node(hugetlb_register_node,
+ hugetlb_unregister_node);
+}
+#else /* !CONFIG_NUMA */
+
+static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp)
+{
+ BUG();
+ if (nidp)
+ *nidp = -1;
+ return NULL;
+}
+
+static void hugetlb_unregister_all_nodes(void) { }
+
+static void hugetlb_register_all_nodes(void) { }
+
+#endif
+
static void __exit hugetlb_exit(void)
{
struct hstate *h;
+ hugetlb_unregister_all_nodes();
+
for_each_hstate(h) {
kobject_put(hstate_kobjs[h - hstates]);
}
@@ -1418,6 +1755,8 @@ static int __init hugetlb_init(void)
hugetlb_sysfs_init();
+ hugetlb_register_all_nodes();
+
return 0;
}
module_init(hugetlb_init);
@@ -1441,7 +1780,8 @@ void __init hugetlb_add_hstate(unsigned order)
h->free_huge_pages = 0;
for (i = 0; i < MAX_NUMNODES; ++i)
INIT_LIST_HEAD(&h->hugepage_freelists[i]);
- h->hugetlb_next_nid = first_node(node_online_map);
+ h->next_nid_to_alloc = first_node(node_states[N_HIGH_MEMORY]);
+ h->next_nid_to_free = first_node(node_states[N_HIGH_MEMORY]);
snprintf(h->name, HSTATE_NAME_LEN, "hugepages-%lukB",
huge_page_size(h)/1024);
@@ -1504,9 +1844,9 @@ static unsigned int cpuset_mems_nr(unsigned int *array)
}
#ifdef CONFIG_SYSCTL
-int hugetlb_sysctl_handler(struct ctl_table *table, int write,
- struct file *file, void __user *buffer,
- size_t *length, loff_t *ppos)
+static int hugetlb_sysctl_handler_common(bool obey_mempolicy,
+ struct ctl_table *table, int write,
+ void __user *buffer, size_t *length, loff_t *ppos)
{
struct hstate *h = &default_hstate;
unsigned long tmp;
@@ -1516,19 +1856,47 @@ int hugetlb_sysctl_handler(struct ctl_table *table, int write,
table->data = &tmp;
table->maxlen = sizeof(unsigned long);
- proc_doulongvec_minmax(table, write, file, buffer, length, ppos);
+ proc_doulongvec_minmax(table, write, buffer, length, ppos);
+
+ if (write) {
+ NODEMASK_ALLOC(nodemask_t, nodes_allowed,
+ GFP_KERNEL | __GFP_NORETRY);
+ if (!(obey_mempolicy &&
+ init_nodemask_of_mempolicy(nodes_allowed))) {
+ NODEMASK_FREE(nodes_allowed);
+ nodes_allowed = &node_states[N_HIGH_MEMORY];
+ }
+ h->max_huge_pages = set_max_huge_pages(h, tmp, nodes_allowed);
- if (write)
- h->max_huge_pages = set_max_huge_pages(h, tmp);
+ if (nodes_allowed != &node_states[N_HIGH_MEMORY])
+ NODEMASK_FREE(nodes_allowed);
+ }
return 0;
}
+int hugetlb_sysctl_handler(struct ctl_table *table, int write,
+ void __user *buffer, size_t *length, loff_t *ppos)
+{
+
+ return hugetlb_sysctl_handler_common(false, table, write,
+ buffer, length, ppos);
+}
+
+#ifdef CONFIG_NUMA
+int hugetlb_mempolicy_sysctl_handler(struct ctl_table *table, int write,
+ void __user *buffer, size_t *length, loff_t *ppos)
+{
+ return hugetlb_sysctl_handler_common(true, table, write,
+ buffer, length, ppos);
+}
+#endif /* CONFIG_NUMA */
+
int hugetlb_treat_movable_handler(struct ctl_table *table, int write,
- struct file *file, void __user *buffer,
+ void __user *buffer,
size_t *length, loff_t *ppos)
{
- proc_dointvec(table, write, file, buffer, length, ppos);
+ proc_dointvec(table, write, buffer, length, ppos);
if (hugepages_treat_as_movable)
htlb_alloc_mask = GFP_HIGHUSER_MOVABLE;
else
@@ -1537,7 +1905,7 @@ int hugetlb_treat_movable_handler(struct ctl_table *table, int write,
}
int hugetlb_overcommit_handler(struct ctl_table *table, int write,
- struct file *file, void __user *buffer,
+ void __user *buffer,
size_t *length, loff_t *ppos)
{
struct hstate *h = &default_hstate;
@@ -1548,7 +1916,7 @@ int hugetlb_overcommit_handler(struct ctl_table *table, int write,
table->data = &tmp;
table->maxlen = sizeof(unsigned long);
- proc_doulongvec_minmax(table, write, file, buffer, length, ppos);
+ proc_doulongvec_minmax(table, write, buffer, length, ppos);
if (write) {
spin_lock(&hugetlb_lock);
@@ -1689,7 +2057,7 @@ static int hugetlb_vm_op_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
return 0;
}
-struct vm_operations_struct hugetlb_vm_ops = {
+const struct vm_operations_struct hugetlb_vm_ops = {
.fault = hugetlb_vm_op_fault,
.open = hugetlb_vm_op_open,
.close = hugetlb_vm_op_close,
@@ -1871,6 +2239,12 @@ static int unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
+ (vma->vm_pgoff >> PAGE_SHIFT);
mapping = (struct address_space *)page_private(page);
+ /*
+ * Take the mapping lock for the duration of the table walk. As
+ * this mapping should be shared between all the VMAs,
+ * __unmap_hugepage_range() is called as the lock is already held
+ */
+ spin_lock(&mapping->i_mmap_lock);
vma_prio_tree_foreach(iter_vma, &iter, &mapping->i_mmap, pgoff, pgoff) {
/* Do not unmap the current VMA */
if (iter_vma == vma)
@@ -1884,10 +2258,11 @@ static int unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
* from the time of fork. This would look like data corruption
*/
if (!is_vma_resv_set(iter_vma, HPAGE_RESV_OWNER))
- unmap_hugepage_range(iter_vma,
+ __unmap_hugepage_range(iter_vma,
address, address + huge_page_size(h),
page);
}
+ spin_unlock(&mapping->i_mmap_lock);
return 1;
}
@@ -1927,6 +2302,9 @@ retry_avoidcopy:
outside_reserve = 1;
page_cache_get(old_page);
+
+ /* Drop page_table_lock as buddy allocator may be called */
+ spin_unlock(&mm->page_table_lock);
new_page = alloc_huge_page(vma, address, outside_reserve);
if (IS_ERR(new_page)) {
@@ -1944,19 +2322,25 @@ retry_avoidcopy:
if (unmap_ref_private(mm, vma, old_page, address)) {
BUG_ON(page_count(old_page) != 1);
BUG_ON(huge_pte_none(pte));
+ spin_lock(&mm->page_table_lock);
goto retry_avoidcopy;
}
WARN_ON_ONCE(1);
}
+ /* Caller expects lock to be held */
+ spin_lock(&mm->page_table_lock);
return -PTR_ERR(new_page);
}
- spin_unlock(&mm->page_table_lock);
copy_huge_page(new_page, old_page, address, vma);
__SetPageUptodate(new_page);
- spin_lock(&mm->page_table_lock);
+ /*
+ * Retake the page_table_lock to check for racing updates
+ * before the page tables are altered
+ */
+ spin_lock(&mm->page_table_lock);
ptep = huge_pte_offset(mm, address & huge_page_mask(h));
if (likely(pte_same(huge_ptep_get(ptep), pte))) {
/* Break COW */
@@ -1984,6 +2368,26 @@ static struct page *hugetlbfs_pagecache_page(struct hstate *h,
return find_lock_page(mapping, idx);
}
+/*
+ * Return whether there is a pagecache page to back given address within VMA.
+ * Caller follow_hugetlb_page() holds page_table_lock so we cannot lock_page.
+ */
+static bool hugetlbfs_pagecache_present(struct hstate *h,
+ struct vm_area_struct *vma, unsigned long address)
+{
+ struct address_space *mapping;
+ pgoff_t idx;
+ struct page *page;
+
+ mapping = vma->vm_file->f_mapping;
+ idx = vma_hugecache_offset(h, vma, address);
+
+ page = find_get_page(mapping, idx);
+ if (page)
+ put_page(page);
+ return page != NULL;
+}
+
static int hugetlb_no_page(struct mm_struct *mm, struct vm_area_struct *vma,
unsigned long address, pte_t *ptep, unsigned int flags)
{
@@ -2179,54 +2583,55 @@ follow_huge_pud(struct mm_struct *mm, unsigned long address,
return NULL;
}
-static int huge_zeropage_ok(pte_t *ptep, int write, int shared)
-{
- if (!ptep || write || shared)
- return 0;
- else
- return huge_pte_none(huge_ptep_get(ptep));
-}
-
int follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
struct page **pages, struct vm_area_struct **vmas,
unsigned long *position, int *length, int i,
- int write)
+ unsigned int flags)
{
unsigned long pfn_offset;
unsigned long vaddr = *position;
int remainder = *length;
struct hstate *h = hstate_vma(vma);
- int zeropage_ok = 0;
- int shared = vma->vm_flags & VM_SHARED;
spin_lock(&mm->page_table_lock);
while (vaddr < vma->vm_end && remainder) {
pte_t *pte;
+ int absent;
struct page *page;
/*
* Some archs (sparc64, sh*) have multiple pte_ts to
- * each hugepage. We have to make * sure we get the
+ * each hugepage. We have to make sure we get the
* first, for the page indexing below to work.
*/
pte = huge_pte_offset(mm, vaddr & huge_page_mask(h));
- if (huge_zeropage_ok(pte, write, shared))
- zeropage_ok = 1;
+ absent = !pte || huge_pte_none(huge_ptep_get(pte));
+
+ /*
+ * When coredumping, it suits get_dump_page if we just return
+ * an error where there's an empty slot with no huge pagecache
+ * to back it. This way, we avoid allocating a hugepage, and
+ * the sparse dumpfile avoids allocating disk blocks, but its
+ * huge holes still show up with zeroes where they need to be.
+ */
+ if (absent && (flags & FOLL_DUMP) &&
+ !hugetlbfs_pagecache_present(h, vma, vaddr)) {
+ remainder = 0;
+ break;
+ }
- if (!pte ||
- (huge_pte_none(huge_ptep_get(pte)) && !zeropage_ok) ||
- (write && !pte_write(huge_ptep_get(pte)))) {
+ if (absent ||
+ ((flags & FOLL_WRITE) && !pte_write(huge_ptep_get(pte)))) {
int ret;
spin_unlock(&mm->page_table_lock);
- ret = hugetlb_fault(mm, vma, vaddr, write);
+ ret = hugetlb_fault(mm, vma, vaddr,
+ (flags & FOLL_WRITE) ? FAULT_FLAG_WRITE : 0);
spin_lock(&mm->page_table_lock);
if (!(ret & VM_FAULT_ERROR))
continue;
remainder = 0;
- if (!i)
- i = -EFAULT;
break;
}
@@ -2234,10 +2639,7 @@ int follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
page = pte_page(huge_ptep_get(pte));
same_page:
if (pages) {
- if (zeropage_ok)
- pages[i] = ZERO_PAGE(0);
- else
- pages[i] = mem_map_offset(page, pfn_offset);
+ pages[i] = mem_map_offset(page, pfn_offset);
get_page(pages[i]);
}
@@ -2261,7 +2663,7 @@ same_page:
*length = remainder;
*position = vaddr;
- return i;
+ return i ? i : -EFAULT;
}
void hugetlb_change_protection(struct vm_area_struct *vma,
diff --git a/mm/hwpoison-inject.c b/mm/hwpoison-inject.c
new file mode 100644
index 000000000000..10ea71905c1f
--- /dev/null
+++ b/mm/hwpoison-inject.c
@@ -0,0 +1,138 @@
+/* Inject a hwpoison memory failure on a arbitary pfn */
+#include <linux/module.h>
+#include <linux/debugfs.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/swap.h>
+#include <linux/pagemap.h>
+#include "internal.h"
+
+static struct dentry *hwpoison_dir;
+
+static int hwpoison_inject(void *data, u64 val)
+{
+ unsigned long pfn = val;
+ struct page *p;
+ int err;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ if (!hwpoison_filter_enable)
+ goto inject;
+ if (!pfn_valid(pfn))
+ return -ENXIO;
+
+ p = pfn_to_page(pfn);
+ /*
+ * This implies unable to support free buddy pages.
+ */
+ if (!get_page_unless_zero(p))
+ return 0;
+
+ if (!PageLRU(p))
+ shake_page(p, 0);
+ /*
+ * This implies unable to support non-LRU pages.
+ */
+ if (!PageLRU(p))
+ return 0;
+
+ /*
+ * do a racy check with elevated page count, to make sure PG_hwpoison
+ * will only be set for the targeted owner (or on a free page).
+ * We temporarily take page lock for try_get_mem_cgroup_from_page().
+ * __memory_failure() will redo the check reliably inside page lock.
+ */
+ lock_page(p);
+ err = hwpoison_filter(p);
+ unlock_page(p);
+ if (err)
+ return 0;
+
+inject:
+ printk(KERN_INFO "Injecting memory failure at pfn %lx\n", pfn);
+ return __memory_failure(pfn, 18, MF_COUNT_INCREASED);
+}
+
+static int hwpoison_unpoison(void *data, u64 val)
+{
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ return unpoison_memory(val);
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(hwpoison_fops, NULL, hwpoison_inject, "%lli\n");
+DEFINE_SIMPLE_ATTRIBUTE(unpoison_fops, NULL, hwpoison_unpoison, "%lli\n");
+
+static void pfn_inject_exit(void)
+{
+ if (hwpoison_dir)
+ debugfs_remove_recursive(hwpoison_dir);
+}
+
+static int pfn_inject_init(void)
+{
+ struct dentry *dentry;
+
+ hwpoison_dir = debugfs_create_dir("hwpoison", NULL);
+ if (hwpoison_dir == NULL)
+ return -ENOMEM;
+
+ /*
+ * Note that the below poison/unpoison interfaces do not involve
+ * hardware status change, hence do not require hardware support.
+ * They are mainly for testing hwpoison in software level.
+ */
+ dentry = debugfs_create_file("corrupt-pfn", 0600, hwpoison_dir,
+ NULL, &hwpoison_fops);
+ if (!dentry)
+ goto fail;
+
+ dentry = debugfs_create_file("unpoison-pfn", 0600, hwpoison_dir,
+ NULL, &unpoison_fops);
+ if (!dentry)
+ goto fail;
+
+ dentry = debugfs_create_u32("corrupt-filter-enable", 0600,
+ hwpoison_dir, &hwpoison_filter_enable);
+ if (!dentry)
+ goto fail;
+
+ dentry = debugfs_create_u32("corrupt-filter-dev-major", 0600,
+ hwpoison_dir, &hwpoison_filter_dev_major);
+ if (!dentry)
+ goto fail;
+
+ dentry = debugfs_create_u32("corrupt-filter-dev-minor", 0600,
+ hwpoison_dir, &hwpoison_filter_dev_minor);
+ if (!dentry)
+ goto fail;
+
+ dentry = debugfs_create_u64("corrupt-filter-flags-mask", 0600,
+ hwpoison_dir, &hwpoison_filter_flags_mask);
+ if (!dentry)
+ goto fail;
+
+ dentry = debugfs_create_u64("corrupt-filter-flags-value", 0600,
+ hwpoison_dir, &hwpoison_filter_flags_value);
+ if (!dentry)
+ goto fail;
+
+#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
+ dentry = debugfs_create_u64("corrupt-filter-memcg", 0600,
+ hwpoison_dir, &hwpoison_filter_memcg);
+ if (!dentry)
+ goto fail;
+#endif
+
+ return 0;
+fail:
+ pfn_inject_exit();
+ return -ENOMEM;
+}
+
+module_init(pfn_inject_init);
+module_exit(pfn_inject_exit);
+MODULE_LICENSE("GPL");
diff --git a/mm/internal.h b/mm/internal.h
index f290c4db528b..6a697bb97fc5 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -37,6 +37,8 @@ static inline void __put_page(struct page *page)
atomic_dec(&page->_count);
}
+extern unsigned long highest_memmap_pfn;
+
/*
* in mm/vmscan.c:
*/
@@ -46,9 +48,11 @@ extern void putback_lru_page(struct page *page);
/*
* in mm/page_alloc.c
*/
-extern unsigned long highest_memmap_pfn;
extern void __free_pages_bootmem(struct page *page, unsigned int order);
extern void prep_compound_page(struct page *page, unsigned long order);
+#ifdef CONFIG_MEMORY_FAILURE
+extern bool is_free_buddy_page(struct page *page);
+#endif
/*
@@ -62,7 +66,7 @@ static inline unsigned long page_order(struct page *page)
return page_private(page);
}
-#ifdef CONFIG_HAVE_MLOCK
+#ifdef CONFIG_MMU
extern long mlock_vma_pages_range(struct vm_area_struct *vma,
unsigned long start, unsigned long end);
extern void munlock_vma_pages_range(struct vm_area_struct *vma,
@@ -71,22 +75,8 @@ static inline void munlock_vma_pages_all(struct vm_area_struct *vma)
{
munlock_vma_pages_range(vma, vma->vm_start, vma->vm_end);
}
-#endif
/*
- * unevictable_migrate_page() called only from migrate_page_copy() to
- * migrate unevictable flag to new page.
- * Note that the old page has been isolated from the LRU lists at this
- * point so we don't need to worry about LRU statistics.
- */
-static inline void unevictable_migrate_page(struct page *new, struct page *old)
-{
- if (TestClearPageUnevictable(old))
- SetPageUnevictable(new);
-}
-
-#ifdef CONFIG_HAVE_MLOCKED_PAGE_BIT
-/*
* Called only in fault path via page_evictable() for a new page
* to determine if it's being mapped into a LOCKED vma.
* If so, mark page as mlocked.
@@ -106,9 +96,10 @@ static inline int is_mlocked_vma(struct vm_area_struct *vma, struct page *page)
}
/*
- * must be called with vma's mmap_sem held for read, and page locked.
+ * must be called with vma's mmap_sem held for read or write, and page locked.
*/
extern void mlock_vma_page(struct page *page);
+extern void munlock_vma_page(struct page *page);
/*
* Clear the page's PageMlocked(). This can be useful in a situation where
@@ -143,7 +134,7 @@ static inline void mlock_migrate_page(struct page *newpage, struct page *page)
}
}
-#else /* CONFIG_HAVE_MLOCKED_PAGE_BIT */
+#else /* !CONFIG_MMU */
static inline int is_mlocked_vma(struct vm_area_struct *v, struct page *p)
{
return 0;
@@ -152,7 +143,7 @@ static inline void clear_page_mlock(struct page *page) { }
static inline void mlock_vma_page(struct page *page) { }
static inline void mlock_migrate_page(struct page *new, struct page *old) { }
-#endif /* CONFIG_HAVE_MLOCKED_PAGE_BIT */
+#endif /* !CONFIG_MMU */
/*
* Return the mem_map entry representing the 'offset' subpage within
@@ -250,13 +241,8 @@ static inline void mminit_validate_memmodel_limits(unsigned long *start_pfn,
}
#endif /* CONFIG_SPARSEMEM */
-#define GUP_FLAGS_WRITE 0x1
-#define GUP_FLAGS_FORCE 0x2
-#define GUP_FLAGS_IGNORE_VMA_PERMISSIONS 0x4
-#define GUP_FLAGS_IGNORE_SIGKILL 0x8
-
int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
- unsigned long start, int len, int flags,
+ unsigned long start, int len, unsigned int foll_flags,
struct page **pages, struct vm_area_struct **vmas);
#define ZONE_RECLAIM_NOSCAN -2
@@ -264,3 +250,12 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
#define ZONE_RECLAIM_SOME 0
#define ZONE_RECLAIM_SUCCESS 1
#endif
+
+extern int hwpoison_filter(struct page *p);
+
+extern u32 hwpoison_filter_dev_major;
+extern u32 hwpoison_filter_dev_minor;
+extern u64 hwpoison_filter_flags_mask;
+extern u64 hwpoison_filter_flags_value;
+extern u64 hwpoison_filter_memcg;
+extern u32 hwpoison_filter_enable;
diff --git a/mm/kmemleak-test.c b/mm/kmemleak-test.c
index d5292fc6f523..177a5169bbde 100644
--- a/mm/kmemleak-test.c
+++ b/mm/kmemleak-test.c
@@ -36,7 +36,7 @@ struct test_node {
};
static LIST_HEAD(test_list);
-static DEFINE_PER_CPU(void *, test_pointer);
+static DEFINE_PER_CPU(void *, kmemleak_test_pointer);
/*
* Some very simple testing. This function needs to be extended for
@@ -86,9 +86,9 @@ static int __init kmemleak_test_init(void)
}
for_each_possible_cpu(i) {
- per_cpu(test_pointer, i) = kmalloc(129, GFP_KERNEL);
+ per_cpu(kmemleak_test_pointer, i) = kmalloc(129, GFP_KERNEL);
pr_info("kmemleak: kmalloc(129) = %p\n",
- per_cpu(test_pointer, i));
+ per_cpu(kmemleak_test_pointer, i));
}
return 0;
diff --git a/mm/kmemleak.c b/mm/kmemleak.c
index 487267310a84..5b069e4f5e48 100644
--- a/mm/kmemleak.c
+++ b/mm/kmemleak.c
@@ -92,11 +92,14 @@
#include <linux/string.h>
#include <linux/nodemask.h>
#include <linux/mm.h>
+#include <linux/workqueue.h>
+#include <linux/crc32.h>
#include <asm/sections.h>
#include <asm/processor.h>
#include <asm/atomic.h>
+#include <linux/kmemcheck.h>
#include <linux/kmemleak.h>
/*
@@ -106,7 +109,7 @@
#define MSECS_MIN_AGE 5000 /* minimum object age for reporting */
#define SECS_FIRST_SCAN 60 /* delay before the first scan */
#define SECS_SCAN_WAIT 600 /* subsequent auto scanning delay */
-#define GRAY_LIST_PASSES 25 /* maximum number of gray list scans */
+#define MAX_SCAN_SIZE 4096 /* maximum size of a scanned block */
#define BYTES_PER_POINTER sizeof(void *)
@@ -116,10 +119,13 @@
/* scanning area inside a memory block */
struct kmemleak_scan_area {
struct hlist_node node;
- unsigned long offset;
- size_t length;
+ unsigned long start;
+ size_t size;
};
+#define KMEMLEAK_GREY 0
+#define KMEMLEAK_BLACK -1
+
/*
* Structure holding the metadata for each allocated memory block.
* Modifications to such objects should be made while holding the
@@ -143,6 +149,8 @@ struct kmemleak_object {
int min_count;
/* the total number of pointers found pointing to this object */
int count;
+ /* checksum for detecting modified objects */
+ u32 checksum;
/* memory ranges to be scanned inside an object (empty for all) */
struct hlist_head area_list;
unsigned long trace[MAX_TRACE];
@@ -158,8 +166,15 @@ struct kmemleak_object {
#define OBJECT_REPORTED (1 << 1)
/* flag set to not scan the object */
#define OBJECT_NO_SCAN (1 << 2)
-/* flag set on newly allocated objects */
-#define OBJECT_NEW (1 << 3)
+
+/* number of bytes to print per line; must be 16 or 32 */
+#define HEX_ROW_SIZE 16
+/* number of bytes to print at a time (1, 2, 4, 8) */
+#define HEX_GROUP_SIZE 1
+/* include ASCII after the hex output */
+#define HEX_ASCII 1
+/* max number of lines to be printed */
+#define HEX_MAX_LINES 2
/* the list of all allocated objects */
static LIST_HEAD(object_list);
@@ -226,13 +241,14 @@ struct early_log {
const void *ptr; /* allocated/freed memory block */
size_t size; /* memory block size */
int min_count; /* minimum reference count */
- unsigned long offset; /* scan area offset */
- size_t length; /* scan area length */
+ unsigned long trace[MAX_TRACE]; /* stack trace */
+ unsigned int trace_len; /* stack trace length */
};
/* early logging buffer and current position */
-static struct early_log early_log[CONFIG_DEBUG_KMEMLEAK_EARLY_LOG_SIZE];
-static int crt_early_log;
+static struct early_log
+ early_log[CONFIG_DEBUG_KMEMLEAK_EARLY_LOG_SIZE] __initdata;
+static int crt_early_log __initdata;
static void kmemleak_disable(void);
@@ -255,6 +271,35 @@ static void kmemleak_disable(void);
} while (0)
/*
+ * Printing of the objects hex dump to the seq file. The number of lines to be
+ * printed is limited to HEX_MAX_LINES to prevent seq file spamming. The
+ * actual number of printed bytes depends on HEX_ROW_SIZE. It must be called
+ * with the object->lock held.
+ */
+static void hex_dump_object(struct seq_file *seq,
+ struct kmemleak_object *object)
+{
+ const u8 *ptr = (const u8 *)object->pointer;
+ int i, len, remaining;
+ unsigned char linebuf[HEX_ROW_SIZE * 5];
+
+ /* limit the number of lines to HEX_MAX_LINES */
+ remaining = len =
+ min(object->size, (size_t)(HEX_MAX_LINES * HEX_ROW_SIZE));
+
+ seq_printf(seq, " hex dump (first %d bytes):\n", len);
+ for (i = 0; i < len; i += HEX_ROW_SIZE) {
+ int linelen = min(remaining, HEX_ROW_SIZE);
+
+ remaining -= HEX_ROW_SIZE;
+ hex_dump_to_buffer(ptr + i, linelen, HEX_ROW_SIZE,
+ HEX_GROUP_SIZE, linebuf, sizeof(linebuf),
+ HEX_ASCII);
+ seq_printf(seq, " %s\n", linebuf);
+ }
+}
+
+/*
* Object colors, encoded with count and min_count:
* - white - orphan object, not enough references to it (count < min_count)
* - gray - not orphan, not marked as false positive (min_count == 0) or
@@ -264,19 +309,16 @@ static void kmemleak_disable(void);
* Newly created objects don't have any color assigned (object->count == -1)
* before the next memory scan when they become white.
*/
-static int color_white(const struct kmemleak_object *object)
-{
- return object->count != -1 && object->count < object->min_count;
-}
-
-static int color_gray(const struct kmemleak_object *object)
+static bool color_white(const struct kmemleak_object *object)
{
- return object->min_count != -1 && object->count >= object->min_count;
+ return object->count != KMEMLEAK_BLACK &&
+ object->count < object->min_count;
}
-static int color_black(const struct kmemleak_object *object)
+static bool color_gray(const struct kmemleak_object *object)
{
- return object->min_count == -1;
+ return object->min_count != KMEMLEAK_BLACK &&
+ object->count >= object->min_count;
}
/*
@@ -284,9 +326,9 @@ static int color_black(const struct kmemleak_object *object)
* not be deleted and have a minimum age to avoid false positives caused by
* pointers temporarily stored in CPU registers.
*/
-static int unreferenced_object(struct kmemleak_object *object)
+static bool unreferenced_object(struct kmemleak_object *object)
{
- return (object->flags & OBJECT_ALLOCATED) && color_white(object) &&
+ return (color_white(object) && object->flags & OBJECT_ALLOCATED) &&
time_before_eq(object->jiffies + jiffies_min_age,
jiffies_last_scan);
}
@@ -299,11 +341,14 @@ static void print_unreferenced(struct seq_file *seq,
struct kmemleak_object *object)
{
int i;
+ unsigned int msecs_age = jiffies_to_msecs(jiffies - object->jiffies);
seq_printf(seq, "unreferenced object 0x%08lx (size %zu):\n",
object->pointer, object->size);
- seq_printf(seq, " comm \"%s\", pid %d, jiffies %lu\n",
- object->comm, object->pid, object->jiffies);
+ seq_printf(seq, " comm \"%s\", pid %d, jiffies %lu (age %d.%03ds)\n",
+ object->comm, object->pid, object->jiffies,
+ msecs_age / 1000, msecs_age % 1000);
+ hex_dump_object(seq, object);
seq_printf(seq, " backtrace:\n");
for (i = 0; i < object->trace_len; i++) {
@@ -330,6 +375,8 @@ static void dump_object_info(struct kmemleak_object *object)
object->comm, object->pid, object->jiffies);
pr_notice(" min_count = %d\n", object->min_count);
pr_notice(" count = %d\n", object->count);
+ pr_notice(" flags = 0x%lx\n", object->flags);
+ pr_notice(" checksum = %d\n", object->checksum);
pr_notice(" backtrace:\n");
print_stack_trace(&trace, 4);
}
@@ -434,21 +481,36 @@ static struct kmemleak_object *find_and_get_object(unsigned long ptr, int alias)
}
/*
+ * Save stack trace to the given array of MAX_TRACE size.
+ */
+static int __save_stack_trace(unsigned long *trace)
+{
+ struct stack_trace stack_trace;
+
+ stack_trace.max_entries = MAX_TRACE;
+ stack_trace.nr_entries = 0;
+ stack_trace.entries = trace;
+ stack_trace.skip = 2;
+ save_stack_trace(&stack_trace);
+
+ return stack_trace.nr_entries;
+}
+
+/*
* Create the metadata (struct kmemleak_object) corresponding to an allocated
* memory block and add it to the object_list and object_tree_root.
*/
-static void create_object(unsigned long ptr, size_t size, int min_count,
- gfp_t gfp)
+static struct kmemleak_object *create_object(unsigned long ptr, size_t size,
+ int min_count, gfp_t gfp)
{
unsigned long flags;
struct kmemleak_object *object;
struct prio_tree_node *node;
- struct stack_trace trace;
object = kmem_cache_alloc(object_cache, gfp & GFP_KMEMLEAK_MASK);
if (!object) {
kmemleak_stop("Cannot allocate a kmemleak_object structure\n");
- return;
+ return NULL;
}
INIT_LIST_HEAD(&object->object_list);
@@ -456,12 +518,13 @@ static void create_object(unsigned long ptr, size_t size, int min_count,
INIT_HLIST_HEAD(&object->area_list);
spin_lock_init(&object->lock);
atomic_set(&object->use_count, 1);
- object->flags = OBJECT_ALLOCATED | OBJECT_NEW;
+ object->flags = OBJECT_ALLOCATED;
object->pointer = ptr;
object->size = size;
object->min_count = min_count;
- object->count = -1; /* no color initially */
+ object->count = 0; /* white color initially */
object->jiffies = jiffies;
+ object->checksum = 0;
/* task information */
if (in_irq()) {
@@ -482,18 +545,14 @@ static void create_object(unsigned long ptr, size_t size, int min_count,
}
/* kernel backtrace */
- trace.max_entries = MAX_TRACE;
- trace.nr_entries = 0;
- trace.entries = object->trace;
- trace.skip = 1;
- save_stack_trace(&trace);
- object->trace_len = trace.nr_entries;
+ object->trace_len = __save_stack_trace(object->trace);
INIT_PRIO_TREE_NODE(&object->tree_node);
object->tree_node.start = ptr;
object->tree_node.last = ptr + size - 1;
write_lock_irqsave(&kmemleak_lock, flags);
+
min_addr = min(min_addr, ptr);
max_addr = max(max_addr, ptr + size);
node = prio_tree_insert(&object_tree_root, &object->tree_node);
@@ -504,20 +563,19 @@ static void create_object(unsigned long ptr, size_t size, int min_count,
* random memory blocks.
*/
if (node != &object->tree_node) {
- unsigned long flags;
-
kmemleak_stop("Cannot insert 0x%lx into the object search tree "
"(already existing)\n", ptr);
object = lookup_object(ptr, 1);
- spin_lock_irqsave(&object->lock, flags);
+ spin_lock(&object->lock);
dump_object_info(object);
- spin_unlock_irqrestore(&object->lock, flags);
+ spin_unlock(&object->lock);
goto out;
}
list_add_tail_rcu(&object->object_list, &object_list);
out:
write_unlock_irqrestore(&kmemleak_lock, flags);
+ return object;
}
/*
@@ -604,60 +662,68 @@ static void delete_object_part(unsigned long ptr, size_t size)
put_object(object);
}
-/*
- * Make a object permanently as gray-colored so that it can no longer be
- * reported as a leak. This is used in general to mark a false positive.
- */
-static void make_gray_object(unsigned long ptr)
+
+static void __paint_it(struct kmemleak_object *object, int color)
+{
+ object->min_count = color;
+ if (color == KMEMLEAK_BLACK)
+ object->flags |= OBJECT_NO_SCAN;
+}
+
+static void paint_it(struct kmemleak_object *object, int color)
{
unsigned long flags;
+
+ spin_lock_irqsave(&object->lock, flags);
+ __paint_it(object, color);
+ spin_unlock_irqrestore(&object->lock, flags);
+}
+
+static void paint_ptr(unsigned long ptr, int color)
+{
struct kmemleak_object *object;
object = find_and_get_object(ptr, 0);
if (!object) {
- kmemleak_warn("Graying unknown object at 0x%08lx\n", ptr);
+ kmemleak_warn("Trying to color unknown object "
+ "at 0x%08lx as %s\n", ptr,
+ (color == KMEMLEAK_GREY) ? "Grey" :
+ (color == KMEMLEAK_BLACK) ? "Black" : "Unknown");
return;
}
-
- spin_lock_irqsave(&object->lock, flags);
- object->min_count = 0;
- spin_unlock_irqrestore(&object->lock, flags);
+ paint_it(object, color);
put_object(object);
}
/*
+ * Make a object permanently as gray-colored so that it can no longer be
+ * reported as a leak. This is used in general to mark a false positive.
+ */
+static void make_gray_object(unsigned long ptr)
+{
+ paint_ptr(ptr, KMEMLEAK_GREY);
+}
+
+/*
* Mark the object as black-colored so that it is ignored from scans and
* reporting.
*/
static void make_black_object(unsigned long ptr)
{
- unsigned long flags;
- struct kmemleak_object *object;
-
- object = find_and_get_object(ptr, 0);
- if (!object) {
- kmemleak_warn("Blacking unknown object at 0x%08lx\n", ptr);
- return;
- }
-
- spin_lock_irqsave(&object->lock, flags);
- object->min_count = -1;
- spin_unlock_irqrestore(&object->lock, flags);
- put_object(object);
+ paint_ptr(ptr, KMEMLEAK_BLACK);
}
/*
* Add a scanning area to the object. If at least one such area is added,
* kmemleak will only scan these ranges rather than the whole memory block.
*/
-static void add_scan_area(unsigned long ptr, unsigned long offset,
- size_t length, gfp_t gfp)
+static void add_scan_area(unsigned long ptr, size_t size, gfp_t gfp)
{
unsigned long flags;
struct kmemleak_object *object;
struct kmemleak_scan_area *area;
- object = find_and_get_object(ptr, 0);
+ object = find_and_get_object(ptr, 1);
if (!object) {
kmemleak_warn("Adding scan area to unknown object at 0x%08lx\n",
ptr);
@@ -671,7 +737,7 @@ static void add_scan_area(unsigned long ptr, unsigned long offset,
}
spin_lock_irqsave(&object->lock, flags);
- if (offset + length > object->size) {
+ if (ptr + size > object->pointer + object->size) {
kmemleak_warn("Scan area larger than object 0x%08lx\n", ptr);
dump_object_info(object);
kmem_cache_free(scan_area_cache, area);
@@ -679,8 +745,8 @@ static void add_scan_area(unsigned long ptr, unsigned long offset,
}
INIT_HLIST_NODE(&area->node);
- area->offset = offset;
- area->length = length;
+ area->start = ptr;
+ area->size = size;
hlist_add_head(&area->node, &object->area_list);
out_unlock:
@@ -715,14 +781,15 @@ static void object_no_scan(unsigned long ptr)
* Log an early kmemleak_* call to the early_log buffer. These calls will be
* processed later once kmemleak is fully initialized.
*/
-static void log_early(int op_type, const void *ptr, size_t size,
- int min_count, unsigned long offset, size_t length)
+static void __init log_early(int op_type, const void *ptr, size_t size,
+ int min_count)
{
unsigned long flags;
struct early_log *log;
if (crt_early_log >= ARRAY_SIZE(early_log)) {
- pr_warning("Early log buffer exceeded\n");
+ pr_warning("Early log buffer exceeded, "
+ "please increase DEBUG_KMEMLEAK_EARLY_LOG_SIZE\n");
kmemleak_disable();
return;
}
@@ -737,25 +804,55 @@ static void log_early(int op_type, const void *ptr, size_t size,
log->ptr = ptr;
log->size = size;
log->min_count = min_count;
- log->offset = offset;
- log->length = length;
+ if (op_type == KMEMLEAK_ALLOC)
+ log->trace_len = __save_stack_trace(log->trace);
crt_early_log++;
local_irq_restore(flags);
}
/*
+ * Log an early allocated block and populate the stack trace.
+ */
+static void early_alloc(struct early_log *log)
+{
+ struct kmemleak_object *object;
+ unsigned long flags;
+ int i;
+
+ if (!atomic_read(&kmemleak_enabled) || !log->ptr || IS_ERR(log->ptr))
+ return;
+
+ /*
+ * RCU locking needed to ensure object is not freed via put_object().
+ */
+ rcu_read_lock();
+ object = create_object((unsigned long)log->ptr, log->size,
+ log->min_count, GFP_ATOMIC);
+ if (!object)
+ goto out;
+ spin_lock_irqsave(&object->lock, flags);
+ for (i = 0; i < log->trace_len; i++)
+ object->trace[i] = log->trace[i];
+ object->trace_len = log->trace_len;
+ spin_unlock_irqrestore(&object->lock, flags);
+out:
+ rcu_read_unlock();
+}
+
+/*
* Memory allocation function callback. This function is called from the
* kernel allocators when a new block is allocated (kmem_cache_alloc, kmalloc,
* vmalloc etc.).
*/
-void kmemleak_alloc(const void *ptr, size_t size, int min_count, gfp_t gfp)
+void __ref kmemleak_alloc(const void *ptr, size_t size, int min_count,
+ gfp_t gfp)
{
pr_debug("%s(0x%p, %zu, %d)\n", __func__, ptr, size, min_count);
if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr))
create_object((unsigned long)ptr, size, min_count, gfp);
else if (atomic_read(&kmemleak_early_log))
- log_early(KMEMLEAK_ALLOC, ptr, size, min_count, 0, 0);
+ log_early(KMEMLEAK_ALLOC, ptr, size, min_count);
}
EXPORT_SYMBOL_GPL(kmemleak_alloc);
@@ -763,14 +860,14 @@ EXPORT_SYMBOL_GPL(kmemleak_alloc);
* Memory freeing function callback. This function is called from the kernel
* allocators when a block is freed (kmem_cache_free, kfree, vfree etc.).
*/
-void kmemleak_free(const void *ptr)
+void __ref kmemleak_free(const void *ptr)
{
pr_debug("%s(0x%p)\n", __func__, ptr);
if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr))
delete_object_full((unsigned long)ptr);
else if (atomic_read(&kmemleak_early_log))
- log_early(KMEMLEAK_FREE, ptr, 0, 0, 0, 0);
+ log_early(KMEMLEAK_FREE, ptr, 0, 0);
}
EXPORT_SYMBOL_GPL(kmemleak_free);
@@ -778,14 +875,14 @@ EXPORT_SYMBOL_GPL(kmemleak_free);
* Partial memory freeing function callback. This function is usually called
* from bootmem allocator when (part of) a memory block is freed.
*/
-void kmemleak_free_part(const void *ptr, size_t size)
+void __ref kmemleak_free_part(const void *ptr, size_t size)
{
pr_debug("%s(0x%p)\n", __func__, ptr);
if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr))
delete_object_part((unsigned long)ptr, size);
else if (atomic_read(&kmemleak_early_log))
- log_early(KMEMLEAK_FREE_PART, ptr, size, 0, 0, 0);
+ log_early(KMEMLEAK_FREE_PART, ptr, size, 0);
}
EXPORT_SYMBOL_GPL(kmemleak_free_part);
@@ -793,14 +890,14 @@ EXPORT_SYMBOL_GPL(kmemleak_free_part);
* Mark an already allocated memory block as a false positive. This will cause
* the block to no longer be reported as leak and always be scanned.
*/
-void kmemleak_not_leak(const void *ptr)
+void __ref kmemleak_not_leak(const void *ptr)
{
pr_debug("%s(0x%p)\n", __func__, ptr);
if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr))
make_gray_object((unsigned long)ptr);
else if (atomic_read(&kmemleak_early_log))
- log_early(KMEMLEAK_NOT_LEAK, ptr, 0, 0, 0, 0);
+ log_early(KMEMLEAK_NOT_LEAK, ptr, 0, 0);
}
EXPORT_SYMBOL(kmemleak_not_leak);
@@ -809,47 +906,60 @@ EXPORT_SYMBOL(kmemleak_not_leak);
* corresponding block is not a leak and does not contain any references to
* other allocated memory blocks.
*/
-void kmemleak_ignore(const void *ptr)
+void __ref kmemleak_ignore(const void *ptr)
{
pr_debug("%s(0x%p)\n", __func__, ptr);
if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr))
make_black_object((unsigned long)ptr);
else if (atomic_read(&kmemleak_early_log))
- log_early(KMEMLEAK_IGNORE, ptr, 0, 0, 0, 0);
+ log_early(KMEMLEAK_IGNORE, ptr, 0, 0);
}
EXPORT_SYMBOL(kmemleak_ignore);
/*
* Limit the range to be scanned in an allocated memory block.
*/
-void kmemleak_scan_area(const void *ptr, unsigned long offset, size_t length,
- gfp_t gfp)
+void __ref kmemleak_scan_area(const void *ptr, size_t size, gfp_t gfp)
{
pr_debug("%s(0x%p)\n", __func__, ptr);
if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr))
- add_scan_area((unsigned long)ptr, offset, length, gfp);
+ add_scan_area((unsigned long)ptr, size, gfp);
else if (atomic_read(&kmemleak_early_log))
- log_early(KMEMLEAK_SCAN_AREA, ptr, 0, 0, offset, length);
+ log_early(KMEMLEAK_SCAN_AREA, ptr, size, 0);
}
EXPORT_SYMBOL(kmemleak_scan_area);
/*
* Inform kmemleak not to scan the given memory block.
*/
-void kmemleak_no_scan(const void *ptr)
+void __ref kmemleak_no_scan(const void *ptr)
{
pr_debug("%s(0x%p)\n", __func__, ptr);
if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr))
object_no_scan((unsigned long)ptr);
else if (atomic_read(&kmemleak_early_log))
- log_early(KMEMLEAK_NO_SCAN, ptr, 0, 0, 0, 0);
+ log_early(KMEMLEAK_NO_SCAN, ptr, 0, 0);
}
EXPORT_SYMBOL(kmemleak_no_scan);
/*
+ * Update an object's checksum and return true if it was modified.
+ */
+static bool update_checksum(struct kmemleak_object *object)
+{
+ u32 old_csum = object->checksum;
+
+ if (!kmemcheck_is_obj_initialized(object->pointer, object->size))
+ return false;
+
+ object->checksum = crc32(0, (void *)object->pointer, object->size);
+ return object->checksum != old_csum;
+}
+
+/*
* Memory scanning is a long process and it needs to be interruptable. This
* function checks whether such interrupt condition occured.
*/
@@ -882,15 +992,22 @@ static void scan_block(void *_start, void *_end,
unsigned long *end = _end - (BYTES_PER_POINTER - 1);
for (ptr = start; ptr < end; ptr++) {
- unsigned long flags;
- unsigned long pointer = *ptr;
struct kmemleak_object *object;
+ unsigned long flags;
+ unsigned long pointer;
if (allow_resched)
cond_resched();
if (scan_should_stop())
break;
+ /* don't scan uninitialized memory */
+ if (!kmemcheck_is_obj_initialized((unsigned long)ptr,
+ BYTES_PER_POINTER))
+ continue;
+
+ pointer = *ptr;
+
object = find_and_get_object(pointer, 1);
if (!object)
continue;
@@ -921,11 +1038,14 @@ static void scan_block(void *_start, void *_end,
* added to the gray_list.
*/
object->count++;
- if (color_gray(object))
+ if (color_gray(object)) {
list_add_tail(&object->gray_list, &gray_list);
- else
- put_object(object);
+ spin_unlock_irqrestore(&object->lock, flags);
+ continue;
+ }
+
spin_unlock_irqrestore(&object->lock, flags);
+ put_object(object);
}
}
@@ -940,8 +1060,8 @@ static void scan_object(struct kmemleak_object *object)
unsigned long flags;
/*
- * Once the object->lock is aquired, the corresponding memory block
- * cannot be freed (the same lock is aquired in delete_object).
+ * Once the object->lock is acquired, the corresponding memory block
+ * cannot be freed (the same lock is acquired in delete_object).
*/
spin_lock_irqsave(&object->lock, flags);
if (object->flags & OBJECT_NO_SCAN)
@@ -949,19 +1069,63 @@ static void scan_object(struct kmemleak_object *object)
if (!(object->flags & OBJECT_ALLOCATED))
/* already freed object */
goto out;
- if (hlist_empty(&object->area_list))
- scan_block((void *)object->pointer,
- (void *)(object->pointer + object->size), object, 0);
- else
+ if (hlist_empty(&object->area_list)) {
+ void *start = (void *)object->pointer;
+ void *end = (void *)(object->pointer + object->size);
+
+ while (start < end && (object->flags & OBJECT_ALLOCATED) &&
+ !(object->flags & OBJECT_NO_SCAN)) {
+ scan_block(start, min(start + MAX_SCAN_SIZE, end),
+ object, 0);
+ start += MAX_SCAN_SIZE;
+
+ spin_unlock_irqrestore(&object->lock, flags);
+ cond_resched();
+ spin_lock_irqsave(&object->lock, flags);
+ }
+ } else
hlist_for_each_entry(area, elem, &object->area_list, node)
- scan_block((void *)(object->pointer + area->offset),
- (void *)(object->pointer + area->offset
- + area->length), object, 0);
+ scan_block((void *)area->start,
+ (void *)(area->start + area->size),
+ object, 0);
out:
spin_unlock_irqrestore(&object->lock, flags);
}
/*
+ * Scan the objects already referenced (gray objects). More objects will be
+ * referenced and, if there are no memory leaks, all the objects are scanned.
+ */
+static void scan_gray_list(void)
+{
+ struct kmemleak_object *object, *tmp;
+
+ /*
+ * The list traversal is safe for both tail additions and removals
+ * from inside the loop. The kmemleak objects cannot be freed from
+ * outside the loop because their use_count was incremented.
+ */
+ object = list_entry(gray_list.next, typeof(*object), gray_list);
+ while (&object->gray_list != &gray_list) {
+ cond_resched();
+
+ /* may add new objects to the list */
+ if (!scan_should_stop())
+ scan_object(object);
+
+ tmp = list_entry(object->gray_list.next, typeof(*object),
+ gray_list);
+
+ /* remove the object from the list and release it */
+ list_del(&object->gray_list);
+ put_object(object);
+
+ object = tmp;
+ }
+ WARN_ON(!list_empty(&gray_list));
+}
+
+/*
* Scan data sections and all the referenced memory blocks allocated via the
* kernel's standard allocators. This function must be called with the
* scan_mutex held.
@@ -969,11 +1133,9 @@ out:
static void kmemleak_scan(void)
{
unsigned long flags;
- struct kmemleak_object *object, *tmp;
- struct task_struct *task;
+ struct kmemleak_object *object;
int i;
int new_leaks = 0;
- int gray_list_pass = 0;
jiffies_last_scan = jiffies;
@@ -994,7 +1156,6 @@ static void kmemleak_scan(void)
#endif
/* reset the reference count (whiten the object) */
object->count = 0;
- object->flags &= ~OBJECT_NEW;
if (color_gray(object) && get_object(object))
list_add_tail(&object->gray_list, &gray_list);
@@ -1037,76 +1198,51 @@ static void kmemleak_scan(void)
}
/*
- * Scanning the task stacks may introduce false negatives and it is
- * not enabled by default.
+ * Scanning the task stacks (may introduce false negatives).
*/
if (kmemleak_stack_scan) {
+ struct task_struct *p, *g;
+
read_lock(&tasklist_lock);
- for_each_process(task)
- scan_block(task_stack_page(task),
- task_stack_page(task) + THREAD_SIZE,
- NULL, 0);
+ do_each_thread(g, p) {
+ scan_block(task_stack_page(p), task_stack_page(p) +
+ THREAD_SIZE, NULL, 0);
+ } while_each_thread(g, p);
read_unlock(&tasklist_lock);
}
/*
* Scan the objects already referenced from the sections scanned
- * above. More objects will be referenced and, if there are no memory
- * leaks, all the objects will be scanned. The list traversal is safe
- * for both tail additions and removals from inside the loop. The
- * kmemleak objects cannot be freed from outside the loop because their
- * use_count was increased.
+ * above.
*/
-repeat:
- object = list_entry(gray_list.next, typeof(*object), gray_list);
- while (&object->gray_list != &gray_list) {
- cond_resched();
-
- /* may add new objects to the list */
- if (!scan_should_stop())
- scan_object(object);
-
- tmp = list_entry(object->gray_list.next, typeof(*object),
- gray_list);
-
- /* remove the object from the list and release it */
- list_del(&object->gray_list);
- put_object(object);
-
- object = tmp;
- }
-
- if (scan_should_stop() || ++gray_list_pass >= GRAY_LIST_PASSES)
- goto scan_end;
+ scan_gray_list();
/*
- * Check for new objects allocated during this scanning and add them
- * to the gray list.
+ * Check for new or unreferenced objects modified since the previous
+ * scan and color them gray until the next scan.
*/
rcu_read_lock();
list_for_each_entry_rcu(object, &object_list, object_list) {
spin_lock_irqsave(&object->lock, flags);
- if ((object->flags & OBJECT_NEW) && !color_black(object) &&
- get_object(object)) {
- object->flags &= ~OBJECT_NEW;
+ if (color_white(object) && (object->flags & OBJECT_ALLOCATED)
+ && update_checksum(object) && get_object(object)) {
+ /* color it gray temporarily */
+ object->count = object->min_count;
list_add_tail(&object->gray_list, &gray_list);
}
spin_unlock_irqrestore(&object->lock, flags);
}
rcu_read_unlock();
- if (!list_empty(&gray_list))
- goto repeat;
-
-scan_end:
- WARN_ON(!list_empty(&gray_list));
+ /*
+ * Re-scan the gray list for modified unreferenced objects.
+ */
+ scan_gray_list();
/*
- * If scanning was stopped or new objects were being allocated at a
- * higher rate than gray list scanning, do not report any new
- * unreferenced objects.
+ * If scanning was stopped do not report any new unreferenced objects.
*/
- if (scan_should_stop() || gray_list_pass >= GRAY_LIST_PASSES)
+ if (scan_should_stop())
return;
/*
@@ -1170,7 +1306,7 @@ static int kmemleak_scan_thread(void *arg)
* Start the automatic memory scanning thread. This function must be called
* with the scan_mutex held.
*/
-void start_scan_thread(void)
+static void start_scan_thread(void)
{
if (scan_thread)
return;
@@ -1185,7 +1321,7 @@ void start_scan_thread(void)
* Stop the automatic memory scanning thread. This function must be called
* with the scan_mutex held.
*/
-void stop_scan_thread(void)
+static void stop_scan_thread(void)
{
if (scan_thread) {
kthread_stop(scan_thread);
@@ -1294,6 +1430,49 @@ static int kmemleak_release(struct inode *inode, struct file *file)
return seq_release(inode, file);
}
+static int dump_str_object_info(const char *str)
+{
+ unsigned long flags;
+ struct kmemleak_object *object;
+ unsigned long addr;
+
+ addr= simple_strtoul(str, NULL, 0);
+ object = find_and_get_object(addr, 0);
+ if (!object) {
+ pr_info("Unknown object at 0x%08lx\n", addr);
+ return -EINVAL;
+ }
+
+ spin_lock_irqsave(&object->lock, flags);
+ dump_object_info(object);
+ spin_unlock_irqrestore(&object->lock, flags);
+
+ put_object(object);
+ return 0;
+}
+
+/*
+ * We use grey instead of black to ensure we can do future scans on the same
+ * objects. If we did not do future scans these black objects could
+ * potentially contain references to newly allocated objects in the future and
+ * we'd end up with false positives.
+ */
+static void kmemleak_clear(void)
+{
+ struct kmemleak_object *object;
+ unsigned long flags;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(object, &object_list, object_list) {
+ spin_lock_irqsave(&object->lock, flags);
+ if ((object->flags & OBJECT_REPORTED) &&
+ unreferenced_object(object))
+ __paint_it(object, KMEMLEAK_GREY);
+ spin_unlock_irqrestore(&object->lock, flags);
+ }
+ rcu_read_unlock();
+}
+
/*
* File write operation to configure kmemleak at run-time. The following
* commands can be written to the /sys/kernel/debug/kmemleak file:
@@ -1305,6 +1484,9 @@ static int kmemleak_release(struct inode *inode, struct file *file)
* scan=... - set the automatic memory scanning period in seconds (0 to
* disable it)
* scan - trigger a memory scan
+ * clear - mark all current reported unreferenced kmemleak objects as
+ * grey to ignore printing them
+ * dump=... - dump information about the object found at the given address
*/
static ssize_t kmemleak_write(struct file *file, const char __user *user_buf,
size_t size, loff_t *ppos)
@@ -1345,6 +1527,10 @@ static ssize_t kmemleak_write(struct file *file, const char __user *user_buf,
}
} else if (strncmp(buf, "scan", 4) == 0)
kmemleak_scan();
+ else if (strncmp(buf, "clear", 5) == 0)
+ kmemleak_clear();
+ else if (strncmp(buf, "dump=", 5) == 0)
+ ret = dump_str_object_info(buf + 5);
else
ret = -EINVAL;
@@ -1371,7 +1557,7 @@ static const struct file_operations kmemleak_fops = {
* Perform the freeing of the kmemleak internal objects after waiting for any
* current memory scan to complete.
*/
-static int kmemleak_cleanup_thread(void *arg)
+static void kmemleak_do_cleanup(struct work_struct *work)
{
struct kmemleak_object *object;
@@ -1383,22 +1569,9 @@ static int kmemleak_cleanup_thread(void *arg)
delete_object_full(object->pointer);
rcu_read_unlock();
mutex_unlock(&scan_mutex);
-
- return 0;
}
-/*
- * Start the clean-up thread.
- */
-static void kmemleak_cleanup(void)
-{
- struct task_struct *cleanup_thread;
-
- cleanup_thread = kthread_run(kmemleak_cleanup_thread, NULL,
- "kmemleak-clean");
- if (IS_ERR(cleanup_thread))
- pr_warning("Failed to create the clean-up thread\n");
-}
+static DECLARE_WORK(cleanup_work, kmemleak_do_cleanup);
/*
* Disable kmemleak. No memory allocation/freeing will be traced once this
@@ -1416,7 +1589,7 @@ static void kmemleak_disable(void)
/* check whether it is too early for a kernel thread */
if (atomic_read(&kmemleak_initialized))
- kmemleak_cleanup();
+ schedule_work(&cleanup_work);
pr_info("Kernel memory leak detector disabled\n");
}
@@ -1469,8 +1642,7 @@ void __init kmemleak_init(void)
switch (log->op_type) {
case KMEMLEAK_ALLOC:
- kmemleak_alloc(log->ptr, log->size, log->min_count,
- GFP_KERNEL);
+ early_alloc(log);
break;
case KMEMLEAK_FREE:
kmemleak_free(log->ptr);
@@ -1485,8 +1657,7 @@ void __init kmemleak_init(void)
kmemleak_ignore(log->ptr);
break;
case KMEMLEAK_SCAN_AREA:
- kmemleak_scan_area(log->ptr, log->offset, log->length,
- GFP_KERNEL);
+ kmemleak_scan_area(log->ptr, log->size, GFP_KERNEL);
break;
case KMEMLEAK_NO_SCAN:
kmemleak_no_scan(log->ptr);
@@ -1513,7 +1684,7 @@ static int __init kmemleak_late_init(void)
* after setting kmemleak_initialized and we may end up with
* two clean-up threads but serialized by scan_mutex.
*/
- kmemleak_cleanup();
+ schedule_work(&cleanup_work);
return -ENOMEM;
}
diff --git a/mm/ksm.c b/mm/ksm.c
new file mode 100644
index 000000000000..56a0da1f9979
--- /dev/null
+++ b/mm/ksm.c
@@ -0,0 +1,1979 @@
+/*
+ * Memory merging support.
+ *
+ * This code enables dynamic sharing of identical pages found in different
+ * memory areas, even if they are not shared by fork()
+ *
+ * Copyright (C) 2008-2009 Red Hat, Inc.
+ * Authors:
+ * Izik Eidus
+ * Andrea Arcangeli
+ * Chris Wright
+ * Hugh Dickins
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2.
+ */
+
+#include <linux/errno.h>
+#include <linux/mm.h>
+#include <linux/fs.h>
+#include <linux/mman.h>
+#include <linux/sched.h>
+#include <linux/rwsem.h>
+#include <linux/pagemap.h>
+#include <linux/rmap.h>
+#include <linux/spinlock.h>
+#include <linux/jhash.h>
+#include <linux/delay.h>
+#include <linux/kthread.h>
+#include <linux/wait.h>
+#include <linux/slab.h>
+#include <linux/rbtree.h>
+#include <linux/memory.h>
+#include <linux/mmu_notifier.h>
+#include <linux/swap.h>
+#include <linux/ksm.h>
+
+#include <asm/tlbflush.h>
+#include "internal.h"
+
+/*
+ * A few notes about the KSM scanning process,
+ * to make it easier to understand the data structures below:
+ *
+ * In order to reduce excessive scanning, KSM sorts the memory pages by their
+ * contents into a data structure that holds pointers to the pages' locations.
+ *
+ * Since the contents of the pages may change at any moment, KSM cannot just
+ * insert the pages into a normal sorted tree and expect it to find anything.
+ * Therefore KSM uses two data structures - the stable and the unstable tree.
+ *
+ * The stable tree holds pointers to all the merged pages (ksm pages), sorted
+ * by their contents. Because each such page is write-protected, searching on
+ * this tree is fully assured to be working (except when pages are unmapped),
+ * and therefore this tree is called the stable tree.
+ *
+ * In addition to the stable tree, KSM uses a second data structure called the
+ * unstable tree: this tree holds pointers to pages which have been found to
+ * be "unchanged for a period of time". The unstable tree sorts these pages
+ * by their contents, but since they are not write-protected, KSM cannot rely
+ * upon the unstable tree to work correctly - the unstable tree is liable to
+ * be corrupted as its contents are modified, and so it is called unstable.
+ *
+ * KSM solves this problem by several techniques:
+ *
+ * 1) The unstable tree is flushed every time KSM completes scanning all
+ * memory areas, and then the tree is rebuilt again from the beginning.
+ * 2) KSM will only insert into the unstable tree, pages whose hash value
+ * has not changed since the previous scan of all memory areas.
+ * 3) The unstable tree is a RedBlack Tree - so its balancing is based on the
+ * colors of the nodes and not on their contents, assuring that even when
+ * the tree gets "corrupted" it won't get out of balance, so scanning time
+ * remains the same (also, searching and inserting nodes in an rbtree uses
+ * the same algorithm, so we have no overhead when we flush and rebuild).
+ * 4) KSM never flushes the stable tree, which means that even if it were to
+ * take 10 attempts to find a page in the unstable tree, once it is found,
+ * it is secured in the stable tree. (When we scan a new page, we first
+ * compare it against the stable tree, and then against the unstable tree.)
+ */
+
+/**
+ * struct mm_slot - ksm information per mm that is being scanned
+ * @link: link to the mm_slots hash list
+ * @mm_list: link into the mm_slots list, rooted in ksm_mm_head
+ * @rmap_list: head for this mm_slot's singly-linked list of rmap_items
+ * @mm: the mm that this information is valid for
+ */
+struct mm_slot {
+ struct hlist_node link;
+ struct list_head mm_list;
+ struct rmap_item *rmap_list;
+ struct mm_struct *mm;
+};
+
+/**
+ * struct ksm_scan - cursor for scanning
+ * @mm_slot: the current mm_slot we are scanning
+ * @address: the next address inside that to be scanned
+ * @rmap_list: link to the next rmap to be scanned in the rmap_list
+ * @seqnr: count of completed full scans (needed when removing unstable node)
+ *
+ * There is only the one ksm_scan instance of this cursor structure.
+ */
+struct ksm_scan {
+ struct mm_slot *mm_slot;
+ unsigned long address;
+ struct rmap_item **rmap_list;
+ unsigned long seqnr;
+};
+
+/**
+ * struct stable_node - node of the stable rbtree
+ * @node: rb node of this ksm page in the stable tree
+ * @hlist: hlist head of rmap_items using this ksm page
+ * @kpfn: page frame number of this ksm page
+ */
+struct stable_node {
+ struct rb_node node;
+ struct hlist_head hlist;
+ unsigned long kpfn;
+};
+
+/**
+ * struct rmap_item - reverse mapping item for virtual addresses
+ * @rmap_list: next rmap_item in mm_slot's singly-linked rmap_list
+ * @anon_vma: pointer to anon_vma for this mm,address, when in stable tree
+ * @mm: the memory structure this rmap_item is pointing into
+ * @address: the virtual address this rmap_item tracks (+ flags in low bits)
+ * @oldchecksum: previous checksum of the page at that virtual address
+ * @node: rb node of this rmap_item in the unstable tree
+ * @head: pointer to stable_node heading this list in the stable tree
+ * @hlist: link into hlist of rmap_items hanging off that stable_node
+ */
+struct rmap_item {
+ struct rmap_item *rmap_list;
+ struct anon_vma *anon_vma; /* when stable */
+ struct mm_struct *mm;
+ unsigned long address; /* + low bits used for flags below */
+ unsigned int oldchecksum; /* when unstable */
+ union {
+ struct rb_node node; /* when node of unstable tree */
+ struct { /* when listed from stable tree */
+ struct stable_node *head;
+ struct hlist_node hlist;
+ };
+ };
+};
+
+#define SEQNR_MASK 0x0ff /* low bits of unstable tree seqnr */
+#define UNSTABLE_FLAG 0x100 /* is a node of the unstable tree */
+#define STABLE_FLAG 0x200 /* is listed from the stable tree */
+
+/* The stable and unstable tree heads */
+static struct rb_root root_stable_tree = RB_ROOT;
+static struct rb_root root_unstable_tree = RB_ROOT;
+
+#define MM_SLOTS_HASH_HEADS 1024
+static struct hlist_head *mm_slots_hash;
+
+static struct mm_slot ksm_mm_head = {
+ .mm_list = LIST_HEAD_INIT(ksm_mm_head.mm_list),
+};
+static struct ksm_scan ksm_scan = {
+ .mm_slot = &ksm_mm_head,
+};
+
+static struct kmem_cache *rmap_item_cache;
+static struct kmem_cache *stable_node_cache;
+static struct kmem_cache *mm_slot_cache;
+
+/* The number of nodes in the stable tree */
+static unsigned long ksm_pages_shared;
+
+/* The number of page slots additionally sharing those nodes */
+static unsigned long ksm_pages_sharing;
+
+/* The number of nodes in the unstable tree */
+static unsigned long ksm_pages_unshared;
+
+/* The number of rmap_items in use: to calculate pages_volatile */
+static unsigned long ksm_rmap_items;
+
+/* Number of pages ksmd should scan in one batch */
+static unsigned int ksm_thread_pages_to_scan = 100;
+
+/* Milliseconds ksmd should sleep between batches */
+static unsigned int ksm_thread_sleep_millisecs = 20;
+
+#define KSM_RUN_STOP 0
+#define KSM_RUN_MERGE 1
+#define KSM_RUN_UNMERGE 2
+static unsigned int ksm_run = KSM_RUN_STOP;
+
+static DECLARE_WAIT_QUEUE_HEAD(ksm_thread_wait);
+static DEFINE_MUTEX(ksm_thread_mutex);
+static DEFINE_SPINLOCK(ksm_mmlist_lock);
+
+#define KSM_KMEM_CACHE(__struct, __flags) kmem_cache_create("ksm_"#__struct,\
+ sizeof(struct __struct), __alignof__(struct __struct),\
+ (__flags), NULL)
+
+static int __init ksm_slab_init(void)
+{
+ rmap_item_cache = KSM_KMEM_CACHE(rmap_item, 0);
+ if (!rmap_item_cache)
+ goto out;
+
+ stable_node_cache = KSM_KMEM_CACHE(stable_node, 0);
+ if (!stable_node_cache)
+ goto out_free1;
+
+ mm_slot_cache = KSM_KMEM_CACHE(mm_slot, 0);
+ if (!mm_slot_cache)
+ goto out_free2;
+
+ return 0;
+
+out_free2:
+ kmem_cache_destroy(stable_node_cache);
+out_free1:
+ kmem_cache_destroy(rmap_item_cache);
+out:
+ return -ENOMEM;
+}
+
+static void __init ksm_slab_free(void)
+{
+ kmem_cache_destroy(mm_slot_cache);
+ kmem_cache_destroy(stable_node_cache);
+ kmem_cache_destroy(rmap_item_cache);
+ mm_slot_cache = NULL;
+}
+
+static inline struct rmap_item *alloc_rmap_item(void)
+{
+ struct rmap_item *rmap_item;
+
+ rmap_item = kmem_cache_zalloc(rmap_item_cache, GFP_KERNEL);
+ if (rmap_item)
+ ksm_rmap_items++;
+ return rmap_item;
+}
+
+static inline void free_rmap_item(struct rmap_item *rmap_item)
+{
+ ksm_rmap_items--;
+ rmap_item->mm = NULL; /* debug safety */
+ kmem_cache_free(rmap_item_cache, rmap_item);
+}
+
+static inline struct stable_node *alloc_stable_node(void)
+{
+ return kmem_cache_alloc(stable_node_cache, GFP_KERNEL);
+}
+
+static inline void free_stable_node(struct stable_node *stable_node)
+{
+ kmem_cache_free(stable_node_cache, stable_node);
+}
+
+static inline struct mm_slot *alloc_mm_slot(void)
+{
+ if (!mm_slot_cache) /* initialization failed */
+ return NULL;
+ return kmem_cache_zalloc(mm_slot_cache, GFP_KERNEL);
+}
+
+static inline void free_mm_slot(struct mm_slot *mm_slot)
+{
+ kmem_cache_free(mm_slot_cache, mm_slot);
+}
+
+static int __init mm_slots_hash_init(void)
+{
+ mm_slots_hash = kzalloc(MM_SLOTS_HASH_HEADS * sizeof(struct hlist_head),
+ GFP_KERNEL);
+ if (!mm_slots_hash)
+ return -ENOMEM;
+ return 0;
+}
+
+static void __init mm_slots_hash_free(void)
+{
+ kfree(mm_slots_hash);
+}
+
+static struct mm_slot *get_mm_slot(struct mm_struct *mm)
+{
+ struct mm_slot *mm_slot;
+ struct hlist_head *bucket;
+ struct hlist_node *node;
+
+ bucket = &mm_slots_hash[((unsigned long)mm / sizeof(struct mm_struct))
+ % MM_SLOTS_HASH_HEADS];
+ hlist_for_each_entry(mm_slot, node, bucket, link) {
+ if (mm == mm_slot->mm)
+ return mm_slot;
+ }
+ return NULL;
+}
+
+static void insert_to_mm_slots_hash(struct mm_struct *mm,
+ struct mm_slot *mm_slot)
+{
+ struct hlist_head *bucket;
+
+ bucket = &mm_slots_hash[((unsigned long)mm / sizeof(struct mm_struct))
+ % MM_SLOTS_HASH_HEADS];
+ mm_slot->mm = mm;
+ hlist_add_head(&mm_slot->link, bucket);
+}
+
+static inline int in_stable_tree(struct rmap_item *rmap_item)
+{
+ return rmap_item->address & STABLE_FLAG;
+}
+
+static void hold_anon_vma(struct rmap_item *rmap_item,
+ struct anon_vma *anon_vma)
+{
+ rmap_item->anon_vma = anon_vma;
+ atomic_inc(&anon_vma->ksm_refcount);
+}
+
+static void drop_anon_vma(struct rmap_item *rmap_item)
+{
+ struct anon_vma *anon_vma = rmap_item->anon_vma;
+
+ if (atomic_dec_and_lock(&anon_vma->ksm_refcount, &anon_vma->lock)) {
+ int empty = list_empty(&anon_vma->head);
+ spin_unlock(&anon_vma->lock);
+ if (empty)
+ anon_vma_free(anon_vma);
+ }
+}
+
+/*
+ * ksmd, and unmerge_and_remove_all_rmap_items(), must not touch an mm's
+ * page tables after it has passed through ksm_exit() - which, if necessary,
+ * takes mmap_sem briefly to serialize against them. ksm_exit() does not set
+ * a special flag: they can just back out as soon as mm_users goes to zero.
+ * ksm_test_exit() is used throughout to make this test for exit: in some
+ * places for correctness, in some places just to avoid unnecessary work.
+ */
+static inline bool ksm_test_exit(struct mm_struct *mm)
+{
+ return atomic_read(&mm->mm_users) == 0;
+}
+
+/*
+ * We use break_ksm to break COW on a ksm page: it's a stripped down
+ *
+ * if (get_user_pages(current, mm, addr, 1, 1, 1, &page, NULL) == 1)
+ * put_page(page);
+ *
+ * but taking great care only to touch a ksm page, in a VM_MERGEABLE vma,
+ * in case the application has unmapped and remapped mm,addr meanwhile.
+ * Could a ksm page appear anywhere else? Actually yes, in a VM_PFNMAP
+ * mmap of /dev/mem or /dev/kmem, where we would not want to touch it.
+ */
+static int break_ksm(struct vm_area_struct *vma, unsigned long addr)
+{
+ struct page *page;
+ int ret = 0;
+
+ do {
+ cond_resched();
+ page = follow_page(vma, addr, FOLL_GET);
+ if (!page)
+ break;
+ if (PageKsm(page))
+ ret = handle_mm_fault(vma->vm_mm, vma, addr,
+ FAULT_FLAG_WRITE);
+ else
+ ret = VM_FAULT_WRITE;
+ put_page(page);
+ } while (!(ret & (VM_FAULT_WRITE | VM_FAULT_SIGBUS | VM_FAULT_OOM)));
+ /*
+ * We must loop because handle_mm_fault() may back out if there's
+ * any difficulty e.g. if pte accessed bit gets updated concurrently.
+ *
+ * VM_FAULT_WRITE is what we have been hoping for: it indicates that
+ * COW has been broken, even if the vma does not permit VM_WRITE;
+ * but note that a concurrent fault might break PageKsm for us.
+ *
+ * VM_FAULT_SIGBUS could occur if we race with truncation of the
+ * backing file, which also invalidates anonymous pages: that's
+ * okay, that truncation will have unmapped the PageKsm for us.
+ *
+ * VM_FAULT_OOM: at the time of writing (late July 2009), setting
+ * aside mem_cgroup limits, VM_FAULT_OOM would only be set if the
+ * current task has TIF_MEMDIE set, and will be OOM killed on return
+ * to user; and ksmd, having no mm, would never be chosen for that.
+ *
+ * But if the mm is in a limited mem_cgroup, then the fault may fail
+ * with VM_FAULT_OOM even if the current task is not TIF_MEMDIE; and
+ * even ksmd can fail in this way - though it's usually breaking ksm
+ * just to undo a merge it made a moment before, so unlikely to oom.
+ *
+ * That's a pity: we might therefore have more kernel pages allocated
+ * than we're counting as nodes in the stable tree; but ksm_do_scan
+ * will retry to break_cow on each pass, so should recover the page
+ * in due course. The important thing is to not let VM_MERGEABLE
+ * be cleared while any such pages might remain in the area.
+ */
+ return (ret & VM_FAULT_OOM) ? -ENOMEM : 0;
+}
+
+static void break_cow(struct rmap_item *rmap_item)
+{
+ struct mm_struct *mm = rmap_item->mm;
+ unsigned long addr = rmap_item->address;
+ struct vm_area_struct *vma;
+
+ /*
+ * It is not an accident that whenever we want to break COW
+ * to undo, we also need to drop a reference to the anon_vma.
+ */
+ drop_anon_vma(rmap_item);
+
+ down_read(&mm->mmap_sem);
+ if (ksm_test_exit(mm))
+ goto out;
+ vma = find_vma(mm, addr);
+ if (!vma || vma->vm_start > addr)
+ goto out;
+ if (!(vma->vm_flags & VM_MERGEABLE) || !vma->anon_vma)
+ goto out;
+ break_ksm(vma, addr);
+out:
+ up_read(&mm->mmap_sem);
+}
+
+static struct page *get_mergeable_page(struct rmap_item *rmap_item)
+{
+ struct mm_struct *mm = rmap_item->mm;
+ unsigned long addr = rmap_item->address;
+ struct vm_area_struct *vma;
+ struct page *page;
+
+ down_read(&mm->mmap_sem);
+ if (ksm_test_exit(mm))
+ goto out;
+ vma = find_vma(mm, addr);
+ if (!vma || vma->vm_start > addr)
+ goto out;
+ if (!(vma->vm_flags & VM_MERGEABLE) || !vma->anon_vma)
+ goto out;
+
+ page = follow_page(vma, addr, FOLL_GET);
+ if (!page)
+ goto out;
+ if (PageAnon(page)) {
+ flush_anon_page(vma, page, addr);
+ flush_dcache_page(page);
+ } else {
+ put_page(page);
+out: page = NULL;
+ }
+ up_read(&mm->mmap_sem);
+ return page;
+}
+
+static void remove_node_from_stable_tree(struct stable_node *stable_node)
+{
+ struct rmap_item *rmap_item;
+ struct hlist_node *hlist;
+
+ hlist_for_each_entry(rmap_item, hlist, &stable_node->hlist, hlist) {
+ if (rmap_item->hlist.next)
+ ksm_pages_sharing--;
+ else
+ ksm_pages_shared--;
+ drop_anon_vma(rmap_item);
+ rmap_item->address &= PAGE_MASK;
+ cond_resched();
+ }
+
+ rb_erase(&stable_node->node, &root_stable_tree);
+ free_stable_node(stable_node);
+}
+
+/*
+ * get_ksm_page: checks if the page indicated by the stable node
+ * is still its ksm page, despite having held no reference to it.
+ * In which case we can trust the content of the page, and it
+ * returns the gotten page; but if the page has now been zapped,
+ * remove the stale node from the stable tree and return NULL.
+ *
+ * You would expect the stable_node to hold a reference to the ksm page.
+ * But if it increments the page's count, swapping out has to wait for
+ * ksmd to come around again before it can free the page, which may take
+ * seconds or even minutes: much too unresponsive. So instead we use a
+ * "keyhole reference": access to the ksm page from the stable node peeps
+ * out through its keyhole to see if that page still holds the right key,
+ * pointing back to this stable node. This relies on freeing a PageAnon
+ * page to reset its page->mapping to NULL, and relies on no other use of
+ * a page to put something that might look like our key in page->mapping.
+ *
+ * include/linux/pagemap.h page_cache_get_speculative() is a good reference,
+ * but this is different - made simpler by ksm_thread_mutex being held, but
+ * interesting for assuming that no other use of the struct page could ever
+ * put our expected_mapping into page->mapping (or a field of the union which
+ * coincides with page->mapping). The RCU calls are not for KSM at all, but
+ * to keep the page_count protocol described with page_cache_get_speculative.
+ *
+ * Note: it is possible that get_ksm_page() will return NULL one moment,
+ * then page the next, if the page is in between page_freeze_refs() and
+ * page_unfreeze_refs(): this shouldn't be a problem anywhere, the page
+ * is on its way to being freed; but it is an anomaly to bear in mind.
+ */
+static struct page *get_ksm_page(struct stable_node *stable_node)
+{
+ struct page *page;
+ void *expected_mapping;
+
+ page = pfn_to_page(stable_node->kpfn);
+ expected_mapping = (void *)stable_node +
+ (PAGE_MAPPING_ANON | PAGE_MAPPING_KSM);
+ rcu_read_lock();
+ if (page->mapping != expected_mapping)
+ goto stale;
+ if (!get_page_unless_zero(page))
+ goto stale;
+ if (page->mapping != expected_mapping) {
+ put_page(page);
+ goto stale;
+ }
+ rcu_read_unlock();
+ return page;
+stale:
+ rcu_read_unlock();
+ remove_node_from_stable_tree(stable_node);
+ return NULL;
+}
+
+/*
+ * Removing rmap_item from stable or unstable tree.
+ * This function will clean the information from the stable/unstable tree.
+ */
+static void remove_rmap_item_from_tree(struct rmap_item *rmap_item)
+{
+ if (rmap_item->address & STABLE_FLAG) {
+ struct stable_node *stable_node;
+ struct page *page;
+
+ stable_node = rmap_item->head;
+ page = get_ksm_page(stable_node);
+ if (!page)
+ goto out;
+
+ lock_page(page);
+ hlist_del(&rmap_item->hlist);
+ unlock_page(page);
+ put_page(page);
+
+ if (stable_node->hlist.first)
+ ksm_pages_sharing--;
+ else
+ ksm_pages_shared--;
+
+ drop_anon_vma(rmap_item);
+ rmap_item->address &= PAGE_MASK;
+
+ } else if (rmap_item->address & UNSTABLE_FLAG) {
+ unsigned char age;
+ /*
+ * Usually ksmd can and must skip the rb_erase, because
+ * root_unstable_tree was already reset to RB_ROOT.
+ * But be careful when an mm is exiting: do the rb_erase
+ * if this rmap_item was inserted by this scan, rather
+ * than left over from before.
+ */
+ age = (unsigned char)(ksm_scan.seqnr - rmap_item->address);
+ BUG_ON(age > 1);
+ if (!age)
+ rb_erase(&rmap_item->node, &root_unstable_tree);
+
+ ksm_pages_unshared--;
+ rmap_item->address &= PAGE_MASK;
+ }
+out:
+ cond_resched(); /* we're called from many long loops */
+}
+
+static void remove_trailing_rmap_items(struct mm_slot *mm_slot,
+ struct rmap_item **rmap_list)
+{
+ while (*rmap_list) {
+ struct rmap_item *rmap_item = *rmap_list;
+ *rmap_list = rmap_item->rmap_list;
+ remove_rmap_item_from_tree(rmap_item);
+ free_rmap_item(rmap_item);
+ }
+}
+
+/*
+ * Though it's very tempting to unmerge in_stable_tree(rmap_item)s rather
+ * than check every pte of a given vma, the locking doesn't quite work for
+ * that - an rmap_item is assigned to the stable tree after inserting ksm
+ * page and upping mmap_sem. Nor does it fit with the way we skip dup'ing
+ * rmap_items from parent to child at fork time (so as not to waste time
+ * if exit comes before the next scan reaches it).
+ *
+ * Similarly, although we'd like to remove rmap_items (so updating counts
+ * and freeing memory) when unmerging an area, it's easier to leave that
+ * to the next pass of ksmd - consider, for example, how ksmd might be
+ * in cmp_and_merge_page on one of the rmap_items we would be removing.
+ */
+static int unmerge_ksm_pages(struct vm_area_struct *vma,
+ unsigned long start, unsigned long end)
+{
+ unsigned long addr;
+ int err = 0;
+
+ for (addr = start; addr < end && !err; addr += PAGE_SIZE) {
+ if (ksm_test_exit(vma->vm_mm))
+ break;
+ if (signal_pending(current))
+ err = -ERESTARTSYS;
+ else
+ err = break_ksm(vma, addr);
+ }
+ return err;
+}
+
+#ifdef CONFIG_SYSFS
+/*
+ * Only called through the sysfs control interface:
+ */
+static int unmerge_and_remove_all_rmap_items(void)
+{
+ struct mm_slot *mm_slot;
+ struct mm_struct *mm;
+ struct vm_area_struct *vma;
+ int err = 0;
+
+ spin_lock(&ksm_mmlist_lock);
+ ksm_scan.mm_slot = list_entry(ksm_mm_head.mm_list.next,
+ struct mm_slot, mm_list);
+ spin_unlock(&ksm_mmlist_lock);
+
+ for (mm_slot = ksm_scan.mm_slot;
+ mm_slot != &ksm_mm_head; mm_slot = ksm_scan.mm_slot) {
+ mm = mm_slot->mm;
+ down_read(&mm->mmap_sem);
+ for (vma = mm->mmap; vma; vma = vma->vm_next) {
+ if (ksm_test_exit(mm))
+ break;
+ if (!(vma->vm_flags & VM_MERGEABLE) || !vma->anon_vma)
+ continue;
+ err = unmerge_ksm_pages(vma,
+ vma->vm_start, vma->vm_end);
+ if (err)
+ goto error;
+ }
+
+ remove_trailing_rmap_items(mm_slot, &mm_slot->rmap_list);
+
+ spin_lock(&ksm_mmlist_lock);
+ ksm_scan.mm_slot = list_entry(mm_slot->mm_list.next,
+ struct mm_slot, mm_list);
+ if (ksm_test_exit(mm)) {
+ hlist_del(&mm_slot->link);
+ list_del(&mm_slot->mm_list);
+ spin_unlock(&ksm_mmlist_lock);
+
+ free_mm_slot(mm_slot);
+ clear_bit(MMF_VM_MERGEABLE, &mm->flags);
+ up_read(&mm->mmap_sem);
+ mmdrop(mm);
+ } else {
+ spin_unlock(&ksm_mmlist_lock);
+ up_read(&mm->mmap_sem);
+ }
+ }
+
+ ksm_scan.seqnr = 0;
+ return 0;
+
+error:
+ up_read(&mm->mmap_sem);
+ spin_lock(&ksm_mmlist_lock);
+ ksm_scan.mm_slot = &ksm_mm_head;
+ spin_unlock(&ksm_mmlist_lock);
+ return err;
+}
+#endif /* CONFIG_SYSFS */
+
+static u32 calc_checksum(struct page *page)
+{
+ u32 checksum;
+ void *addr = kmap_atomic(page, KM_USER0);
+ checksum = jhash2(addr, PAGE_SIZE / 4, 17);
+ kunmap_atomic(addr, KM_USER0);
+ return checksum;
+}
+
+static int memcmp_pages(struct page *page1, struct page *page2)
+{
+ char *addr1, *addr2;
+ int ret;
+
+ addr1 = kmap_atomic(page1, KM_USER0);
+ addr2 = kmap_atomic(page2, KM_USER1);
+ ret = memcmp(addr1, addr2, PAGE_SIZE);
+ kunmap_atomic(addr2, KM_USER1);
+ kunmap_atomic(addr1, KM_USER0);
+ return ret;
+}
+
+static inline int pages_identical(struct page *page1, struct page *page2)
+{
+ return !memcmp_pages(page1, page2);
+}
+
+static int write_protect_page(struct vm_area_struct *vma, struct page *page,
+ pte_t *orig_pte)
+{
+ struct mm_struct *mm = vma->vm_mm;
+ unsigned long addr;
+ pte_t *ptep;
+ spinlock_t *ptl;
+ int swapped;
+ int err = -EFAULT;
+
+ addr = page_address_in_vma(page, vma);
+ if (addr == -EFAULT)
+ goto out;
+
+ ptep = page_check_address(page, mm, addr, &ptl, 0);
+ if (!ptep)
+ goto out;
+
+ if (pte_write(*ptep)) {
+ pte_t entry;
+
+ swapped = PageSwapCache(page);
+ flush_cache_page(vma, addr, page_to_pfn(page));
+ /*
+ * Ok this is tricky, when get_user_pages_fast() run it doesnt
+ * take any lock, therefore the check that we are going to make
+ * with the pagecount against the mapcount is racey and
+ * O_DIRECT can happen right after the check.
+ * So we clear the pte and flush the tlb before the check
+ * this assure us that no O_DIRECT can happen after the check
+ * or in the middle of the check.
+ */
+ entry = ptep_clear_flush(vma, addr, ptep);
+ /*
+ * Check that no O_DIRECT or similar I/O is in progress on the
+ * page
+ */
+ if (page_mapcount(page) + 1 + swapped != page_count(page)) {
+ set_pte_at_notify(mm, addr, ptep, entry);
+ goto out_unlock;
+ }
+ entry = pte_wrprotect(entry);
+ set_pte_at_notify(mm, addr, ptep, entry);
+ }
+ *orig_pte = *ptep;
+ err = 0;
+
+out_unlock:
+ pte_unmap_unlock(ptep, ptl);
+out:
+ return err;
+}
+
+/**
+ * replace_page - replace page in vma by new ksm page
+ * @vma: vma that holds the pte pointing to page
+ * @page: the page we are replacing by kpage
+ * @kpage: the ksm page we replace page by
+ * @orig_pte: the original value of the pte
+ *
+ * Returns 0 on success, -EFAULT on failure.
+ */
+static int replace_page(struct vm_area_struct *vma, struct page *page,
+ struct page *kpage, pte_t orig_pte)
+{
+ struct mm_struct *mm = vma->vm_mm;
+ pgd_t *pgd;
+ pud_t *pud;
+ pmd_t *pmd;
+ pte_t *ptep;
+ spinlock_t *ptl;
+ unsigned long addr;
+ int err = -EFAULT;
+
+ addr = page_address_in_vma(page, vma);
+ if (addr == -EFAULT)
+ goto out;
+
+ pgd = pgd_offset(mm, addr);
+ if (!pgd_present(*pgd))
+ goto out;
+
+ pud = pud_offset(pgd, addr);
+ if (!pud_present(*pud))
+ goto out;
+
+ pmd = pmd_offset(pud, addr);
+ if (!pmd_present(*pmd))
+ goto out;
+
+ ptep = pte_offset_map_lock(mm, pmd, addr, &ptl);
+ if (!pte_same(*ptep, orig_pte)) {
+ pte_unmap_unlock(ptep, ptl);
+ goto out;
+ }
+
+ get_page(kpage);
+ page_add_anon_rmap(kpage, vma, addr);
+
+ flush_cache_page(vma, addr, pte_pfn(*ptep));
+ ptep_clear_flush(vma, addr, ptep);
+ set_pte_at_notify(mm, addr, ptep, mk_pte(kpage, vma->vm_page_prot));
+
+ page_remove_rmap(page);
+ put_page(page);
+
+ pte_unmap_unlock(ptep, ptl);
+ err = 0;
+out:
+ return err;
+}
+
+/*
+ * try_to_merge_one_page - take two pages and merge them into one
+ * @vma: the vma that holds the pte pointing to page
+ * @page: the PageAnon page that we want to replace with kpage
+ * @kpage: the PageKsm page that we want to map instead of page,
+ * or NULL the first time when we want to use page as kpage.
+ *
+ * This function returns 0 if the pages were merged, -EFAULT otherwise.
+ */
+static int try_to_merge_one_page(struct vm_area_struct *vma,
+ struct page *page, struct page *kpage)
+{
+ pte_t orig_pte = __pte(0);
+ int err = -EFAULT;
+
+ if (page == kpage) /* ksm page forked */
+ return 0;
+
+ if (!(vma->vm_flags & VM_MERGEABLE))
+ goto out;
+ if (!PageAnon(page))
+ goto out;
+
+ /*
+ * We need the page lock to read a stable PageSwapCache in
+ * write_protect_page(). We use trylock_page() instead of
+ * lock_page() because we don't want to wait here - we
+ * prefer to continue scanning and merging different pages,
+ * then come back to this page when it is unlocked.
+ */
+ if (!trylock_page(page))
+ goto out;
+ /*
+ * If this anonymous page is mapped only here, its pte may need
+ * to be write-protected. If it's mapped elsewhere, all of its
+ * ptes are necessarily already write-protected. But in either
+ * case, we need to lock and check page_count is not raised.
+ */
+ if (write_protect_page(vma, page, &orig_pte) == 0) {
+ if (!kpage) {
+ /*
+ * While we hold page lock, upgrade page from
+ * PageAnon+anon_vma to PageKsm+NULL stable_node:
+ * stable_tree_insert() will update stable_node.
+ */
+ set_page_stable_node(page, NULL);
+ mark_page_accessed(page);
+ err = 0;
+ } else if (pages_identical(page, kpage))
+ err = replace_page(vma, page, kpage, orig_pte);
+ }
+
+ if ((vma->vm_flags & VM_LOCKED) && kpage && !err) {
+ munlock_vma_page(page);
+ if (!PageMlocked(kpage)) {
+ unlock_page(page);
+ lock_page(kpage);
+ mlock_vma_page(kpage);
+ page = kpage; /* for final unlock */
+ }
+ }
+
+ unlock_page(page);
+out:
+ return err;
+}
+
+/*
+ * try_to_merge_with_ksm_page - like try_to_merge_two_pages,
+ * but no new kernel page is allocated: kpage must already be a ksm page.
+ *
+ * This function returns 0 if the pages were merged, -EFAULT otherwise.
+ */
+static int try_to_merge_with_ksm_page(struct rmap_item *rmap_item,
+ struct page *page, struct page *kpage)
+{
+ struct mm_struct *mm = rmap_item->mm;
+ struct vm_area_struct *vma;
+ int err = -EFAULT;
+
+ down_read(&mm->mmap_sem);
+ if (ksm_test_exit(mm))
+ goto out;
+ vma = find_vma(mm, rmap_item->address);
+ if (!vma || vma->vm_start > rmap_item->address)
+ goto out;
+
+ err = try_to_merge_one_page(vma, page, kpage);
+ if (err)
+ goto out;
+
+ /* Must get reference to anon_vma while still holding mmap_sem */
+ hold_anon_vma(rmap_item, vma->anon_vma);
+out:
+ up_read(&mm->mmap_sem);
+ return err;
+}
+
+/*
+ * try_to_merge_two_pages - take two identical pages and prepare them
+ * to be merged into one page.
+ *
+ * This function returns the kpage if we successfully merged two identical
+ * pages into one ksm page, NULL otherwise.
+ *
+ * Note that this function upgrades page to ksm page: if one of the pages
+ * is already a ksm page, try_to_merge_with_ksm_page should be used.
+ */
+static struct page *try_to_merge_two_pages(struct rmap_item *rmap_item,
+ struct page *page,
+ struct rmap_item *tree_rmap_item,
+ struct page *tree_page)
+{
+ int err;
+
+ err = try_to_merge_with_ksm_page(rmap_item, page, NULL);
+ if (!err) {
+ err = try_to_merge_with_ksm_page(tree_rmap_item,
+ tree_page, page);
+ /*
+ * If that fails, we have a ksm page with only one pte
+ * pointing to it: so break it.
+ */
+ if (err)
+ break_cow(rmap_item);
+ }
+ return err ? NULL : page;
+}
+
+/*
+ * stable_tree_search - search for page inside the stable tree
+ *
+ * This function checks if there is a page inside the stable tree
+ * with identical content to the page that we are scanning right now.
+ *
+ * This function returns the stable tree node of identical content if found,
+ * NULL otherwise.
+ */
+static struct page *stable_tree_search(struct page *page)
+{
+ struct rb_node *node = root_stable_tree.rb_node;
+ struct stable_node *stable_node;
+
+ stable_node = page_stable_node(page);
+ if (stable_node) { /* ksm page forked */
+ get_page(page);
+ return page;
+ }
+
+ while (node) {
+ struct page *tree_page;
+ int ret;
+
+ cond_resched();
+ stable_node = rb_entry(node, struct stable_node, node);
+ tree_page = get_ksm_page(stable_node);
+ if (!tree_page)
+ return NULL;
+
+ ret = memcmp_pages(page, tree_page);
+
+ if (ret < 0) {
+ put_page(tree_page);
+ node = node->rb_left;
+ } else if (ret > 0) {
+ put_page(tree_page);
+ node = node->rb_right;
+ } else
+ return tree_page;
+ }
+
+ return NULL;
+}
+
+/*
+ * stable_tree_insert - insert rmap_item pointing to new ksm page
+ * into the stable tree.
+ *
+ * This function returns the stable tree node just allocated on success,
+ * NULL otherwise.
+ */
+static struct stable_node *stable_tree_insert(struct page *kpage)
+{
+ struct rb_node **new = &root_stable_tree.rb_node;
+ struct rb_node *parent = NULL;
+ struct stable_node *stable_node;
+
+ while (*new) {
+ struct page *tree_page;
+ int ret;
+
+ cond_resched();
+ stable_node = rb_entry(*new, struct stable_node, node);
+ tree_page = get_ksm_page(stable_node);
+ if (!tree_page)
+ return NULL;
+
+ ret = memcmp_pages(kpage, tree_page);
+ put_page(tree_page);
+
+ parent = *new;
+ if (ret < 0)
+ new = &parent->rb_left;
+ else if (ret > 0)
+ new = &parent->rb_right;
+ else {
+ /*
+ * It is not a bug that stable_tree_search() didn't
+ * find this node: because at that time our page was
+ * not yet write-protected, so may have changed since.
+ */
+ return NULL;
+ }
+ }
+
+ stable_node = alloc_stable_node();
+ if (!stable_node)
+ return NULL;
+
+ rb_link_node(&stable_node->node, parent, new);
+ rb_insert_color(&stable_node->node, &root_stable_tree);
+
+ INIT_HLIST_HEAD(&stable_node->hlist);
+
+ stable_node->kpfn = page_to_pfn(kpage);
+ set_page_stable_node(kpage, stable_node);
+
+ return stable_node;
+}
+
+/*
+ * unstable_tree_search_insert - search for identical page,
+ * else insert rmap_item into the unstable tree.
+ *
+ * This function searches for a page in the unstable tree identical to the
+ * page currently being scanned; and if no identical page is found in the
+ * tree, we insert rmap_item as a new object into the unstable tree.
+ *
+ * This function returns pointer to rmap_item found to be identical
+ * to the currently scanned page, NULL otherwise.
+ *
+ * This function does both searching and inserting, because they share
+ * the same walking algorithm in an rbtree.
+ */
+static
+struct rmap_item *unstable_tree_search_insert(struct rmap_item *rmap_item,
+ struct page *page,
+ struct page **tree_pagep)
+
+{
+ struct rb_node **new = &root_unstable_tree.rb_node;
+ struct rb_node *parent = NULL;
+
+ while (*new) {
+ struct rmap_item *tree_rmap_item;
+ struct page *tree_page;
+ int ret;
+
+ cond_resched();
+ tree_rmap_item = rb_entry(*new, struct rmap_item, node);
+ tree_page = get_mergeable_page(tree_rmap_item);
+ if (!tree_page)
+ return NULL;
+
+ /*
+ * Don't substitute a ksm page for a forked page.
+ */
+ if (page == tree_page) {
+ put_page(tree_page);
+ return NULL;
+ }
+
+ ret = memcmp_pages(page, tree_page);
+
+ parent = *new;
+ if (ret < 0) {
+ put_page(tree_page);
+ new = &parent->rb_left;
+ } else if (ret > 0) {
+ put_page(tree_page);
+ new = &parent->rb_right;
+ } else {
+ *tree_pagep = tree_page;
+ return tree_rmap_item;
+ }
+ }
+
+ rmap_item->address |= UNSTABLE_FLAG;
+ rmap_item->address |= (ksm_scan.seqnr & SEQNR_MASK);
+ rb_link_node(&rmap_item->node, parent, new);
+ rb_insert_color(&rmap_item->node, &root_unstable_tree);
+
+ ksm_pages_unshared++;
+ return NULL;
+}
+
+/*
+ * stable_tree_append - add another rmap_item to the linked list of
+ * rmap_items hanging off a given node of the stable tree, all sharing
+ * the same ksm page.
+ */
+static void stable_tree_append(struct rmap_item *rmap_item,
+ struct stable_node *stable_node)
+{
+ rmap_item->head = stable_node;
+ rmap_item->address |= STABLE_FLAG;
+ hlist_add_head(&rmap_item->hlist, &stable_node->hlist);
+
+ if (rmap_item->hlist.next)
+ ksm_pages_sharing++;
+ else
+ ksm_pages_shared++;
+}
+
+/*
+ * cmp_and_merge_page - first see if page can be merged into the stable tree;
+ * if not, compare checksum to previous and if it's the same, see if page can
+ * be inserted into the unstable tree, or merged with a page already there and
+ * both transferred to the stable tree.
+ *
+ * @page: the page that we are searching identical page to.
+ * @rmap_item: the reverse mapping into the virtual address of this page
+ */
+static void cmp_and_merge_page(struct page *page, struct rmap_item *rmap_item)
+{
+ struct rmap_item *tree_rmap_item;
+ struct page *tree_page = NULL;
+ struct stable_node *stable_node;
+ struct page *kpage;
+ unsigned int checksum;
+ int err;
+
+ remove_rmap_item_from_tree(rmap_item);
+
+ /* We first start with searching the page inside the stable tree */
+ kpage = stable_tree_search(page);
+ if (kpage) {
+ err = try_to_merge_with_ksm_page(rmap_item, page, kpage);
+ if (!err) {
+ /*
+ * The page was successfully merged:
+ * add its rmap_item to the stable tree.
+ */
+ lock_page(kpage);
+ stable_tree_append(rmap_item, page_stable_node(kpage));
+ unlock_page(kpage);
+ }
+ put_page(kpage);
+ return;
+ }
+
+ /*
+ * If the hash value of the page has changed from the last time
+ * we calculated it, this page is changing frequently: therefore we
+ * don't want to insert it in the unstable tree, and we don't want
+ * to waste our time searching for something identical to it there.
+ */
+ checksum = calc_checksum(page);
+ if (rmap_item->oldchecksum != checksum) {
+ rmap_item->oldchecksum = checksum;
+ return;
+ }
+
+ tree_rmap_item =
+ unstable_tree_search_insert(rmap_item, page, &tree_page);
+ if (tree_rmap_item) {
+ kpage = try_to_merge_two_pages(rmap_item, page,
+ tree_rmap_item, tree_page);
+ put_page(tree_page);
+ /*
+ * As soon as we merge this page, we want to remove the
+ * rmap_item of the page we have merged with from the unstable
+ * tree, and insert it instead as new node in the stable tree.
+ */
+ if (kpage) {
+ remove_rmap_item_from_tree(tree_rmap_item);
+
+ lock_page(kpage);
+ stable_node = stable_tree_insert(kpage);
+ if (stable_node) {
+ stable_tree_append(tree_rmap_item, stable_node);
+ stable_tree_append(rmap_item, stable_node);
+ }
+ unlock_page(kpage);
+
+ /*
+ * If we fail to insert the page into the stable tree,
+ * we will have 2 virtual addresses that are pointing
+ * to a ksm page left outside the stable tree,
+ * in which case we need to break_cow on both.
+ */
+ if (!stable_node) {
+ break_cow(tree_rmap_item);
+ break_cow(rmap_item);
+ }
+ }
+ }
+}
+
+static struct rmap_item *get_next_rmap_item(struct mm_slot *mm_slot,
+ struct rmap_item **rmap_list,
+ unsigned long addr)
+{
+ struct rmap_item *rmap_item;
+
+ while (*rmap_list) {
+ rmap_item = *rmap_list;
+ if ((rmap_item->address & PAGE_MASK) == addr)
+ return rmap_item;
+ if (rmap_item->address > addr)
+ break;
+ *rmap_list = rmap_item->rmap_list;
+ remove_rmap_item_from_tree(rmap_item);
+ free_rmap_item(rmap_item);
+ }
+
+ rmap_item = alloc_rmap_item();
+ if (rmap_item) {
+ /* It has already been zeroed */
+ rmap_item->mm = mm_slot->mm;
+ rmap_item->address = addr;
+ rmap_item->rmap_list = *rmap_list;
+ *rmap_list = rmap_item;
+ }
+ return rmap_item;
+}
+
+static struct rmap_item *scan_get_next_rmap_item(struct page **page)
+{
+ struct mm_struct *mm;
+ struct mm_slot *slot;
+ struct vm_area_struct *vma;
+ struct rmap_item *rmap_item;
+
+ if (list_empty(&ksm_mm_head.mm_list))
+ return NULL;
+
+ slot = ksm_scan.mm_slot;
+ if (slot == &ksm_mm_head) {
+ root_unstable_tree = RB_ROOT;
+
+ spin_lock(&ksm_mmlist_lock);
+ slot = list_entry(slot->mm_list.next, struct mm_slot, mm_list);
+ ksm_scan.mm_slot = slot;
+ spin_unlock(&ksm_mmlist_lock);
+next_mm:
+ ksm_scan.address = 0;
+ ksm_scan.rmap_list = &slot->rmap_list;
+ }
+
+ mm = slot->mm;
+ down_read(&mm->mmap_sem);
+ if (ksm_test_exit(mm))
+ vma = NULL;
+ else
+ vma = find_vma(mm, ksm_scan.address);
+
+ for (; vma; vma = vma->vm_next) {
+ if (!(vma->vm_flags & VM_MERGEABLE))
+ continue;
+ if (ksm_scan.address < vma->vm_start)
+ ksm_scan.address = vma->vm_start;
+ if (!vma->anon_vma)
+ ksm_scan.address = vma->vm_end;
+
+ while (ksm_scan.address < vma->vm_end) {
+ if (ksm_test_exit(mm))
+ break;
+ *page = follow_page(vma, ksm_scan.address, FOLL_GET);
+ if (*page && PageAnon(*page)) {
+ flush_anon_page(vma, *page, ksm_scan.address);
+ flush_dcache_page(*page);
+ rmap_item = get_next_rmap_item(slot,
+ ksm_scan.rmap_list, ksm_scan.address);
+ if (rmap_item) {
+ ksm_scan.rmap_list =
+ &rmap_item->rmap_list;
+ ksm_scan.address += PAGE_SIZE;
+ } else
+ put_page(*page);
+ up_read(&mm->mmap_sem);
+ return rmap_item;
+ }
+ if (*page)
+ put_page(*page);
+ ksm_scan.address += PAGE_SIZE;
+ cond_resched();
+ }
+ }
+
+ if (ksm_test_exit(mm)) {
+ ksm_scan.address = 0;
+ ksm_scan.rmap_list = &slot->rmap_list;
+ }
+ /*
+ * Nuke all the rmap_items that are above this current rmap:
+ * because there were no VM_MERGEABLE vmas with such addresses.
+ */
+ remove_trailing_rmap_items(slot, ksm_scan.rmap_list);
+
+ spin_lock(&ksm_mmlist_lock);
+ ksm_scan.mm_slot = list_entry(slot->mm_list.next,
+ struct mm_slot, mm_list);
+ if (ksm_scan.address == 0) {
+ /*
+ * We've completed a full scan of all vmas, holding mmap_sem
+ * throughout, and found no VM_MERGEABLE: so do the same as
+ * __ksm_exit does to remove this mm from all our lists now.
+ * This applies either when cleaning up after __ksm_exit
+ * (but beware: we can reach here even before __ksm_exit),
+ * or when all VM_MERGEABLE areas have been unmapped (and
+ * mmap_sem then protects against race with MADV_MERGEABLE).
+ */
+ hlist_del(&slot->link);
+ list_del(&slot->mm_list);
+ spin_unlock(&ksm_mmlist_lock);
+
+ free_mm_slot(slot);
+ clear_bit(MMF_VM_MERGEABLE, &mm->flags);
+ up_read(&mm->mmap_sem);
+ mmdrop(mm);
+ } else {
+ spin_unlock(&ksm_mmlist_lock);
+ up_read(&mm->mmap_sem);
+ }
+
+ /* Repeat until we've completed scanning the whole list */
+ slot = ksm_scan.mm_slot;
+ if (slot != &ksm_mm_head)
+ goto next_mm;
+
+ ksm_scan.seqnr++;
+ return NULL;
+}
+
+/**
+ * ksm_do_scan - the ksm scanner main worker function.
+ * @scan_npages - number of pages we want to scan before we return.
+ */
+static void ksm_do_scan(unsigned int scan_npages)
+{
+ struct rmap_item *rmap_item;
+ struct page *page;
+
+ while (scan_npages--) {
+ cond_resched();
+ rmap_item = scan_get_next_rmap_item(&page);
+ if (!rmap_item)
+ return;
+ if (!PageKsm(page) || !in_stable_tree(rmap_item))
+ cmp_and_merge_page(page, rmap_item);
+ put_page(page);
+ }
+}
+
+static int ksmd_should_run(void)
+{
+ return (ksm_run & KSM_RUN_MERGE) && !list_empty(&ksm_mm_head.mm_list);
+}
+
+static int ksm_scan_thread(void *nothing)
+{
+ set_user_nice(current, 5);
+
+ while (!kthread_should_stop()) {
+ mutex_lock(&ksm_thread_mutex);
+ if (ksmd_should_run())
+ ksm_do_scan(ksm_thread_pages_to_scan);
+ mutex_unlock(&ksm_thread_mutex);
+
+ if (ksmd_should_run()) {
+ schedule_timeout_interruptible(
+ msecs_to_jiffies(ksm_thread_sleep_millisecs));
+ } else {
+ wait_event_interruptible(ksm_thread_wait,
+ ksmd_should_run() || kthread_should_stop());
+ }
+ }
+ return 0;
+}
+
+int ksm_madvise(struct vm_area_struct *vma, unsigned long start,
+ unsigned long end, int advice, unsigned long *vm_flags)
+{
+ struct mm_struct *mm = vma->vm_mm;
+ int err;
+
+ switch (advice) {
+ case MADV_MERGEABLE:
+ /*
+ * Be somewhat over-protective for now!
+ */
+ if (*vm_flags & (VM_MERGEABLE | VM_SHARED | VM_MAYSHARE |
+ VM_PFNMAP | VM_IO | VM_DONTEXPAND |
+ VM_RESERVED | VM_HUGETLB | VM_INSERTPAGE |
+ VM_NONLINEAR | VM_MIXEDMAP | VM_SAO))
+ return 0; /* just ignore the advice */
+
+ if (!test_bit(MMF_VM_MERGEABLE, &mm->flags)) {
+ err = __ksm_enter(mm);
+ if (err)
+ return err;
+ }
+
+ *vm_flags |= VM_MERGEABLE;
+ break;
+
+ case MADV_UNMERGEABLE:
+ if (!(*vm_flags & VM_MERGEABLE))
+ return 0; /* just ignore the advice */
+
+ if (vma->anon_vma) {
+ err = unmerge_ksm_pages(vma, start, end);
+ if (err)
+ return err;
+ }
+
+ *vm_flags &= ~VM_MERGEABLE;
+ break;
+ }
+
+ return 0;
+}
+
+int __ksm_enter(struct mm_struct *mm)
+{
+ struct mm_slot *mm_slot;
+ int needs_wakeup;
+
+ mm_slot = alloc_mm_slot();
+ if (!mm_slot)
+ return -ENOMEM;
+
+ /* Check ksm_run too? Would need tighter locking */
+ needs_wakeup = list_empty(&ksm_mm_head.mm_list);
+
+ spin_lock(&ksm_mmlist_lock);
+ insert_to_mm_slots_hash(mm, mm_slot);
+ /*
+ * Insert just behind the scanning cursor, to let the area settle
+ * down a little; when fork is followed by immediate exec, we don't
+ * want ksmd to waste time setting up and tearing down an rmap_list.
+ */
+ list_add_tail(&mm_slot->mm_list, &ksm_scan.mm_slot->mm_list);
+ spin_unlock(&ksm_mmlist_lock);
+
+ set_bit(MMF_VM_MERGEABLE, &mm->flags);
+ atomic_inc(&mm->mm_count);
+
+ if (needs_wakeup)
+ wake_up_interruptible(&ksm_thread_wait);
+
+ return 0;
+}
+
+void __ksm_exit(struct mm_struct *mm)
+{
+ struct mm_slot *mm_slot;
+ int easy_to_free = 0;
+
+ /*
+ * This process is exiting: if it's straightforward (as is the
+ * case when ksmd was never running), free mm_slot immediately.
+ * But if it's at the cursor or has rmap_items linked to it, use
+ * mmap_sem to synchronize with any break_cows before pagetables
+ * are freed, and leave the mm_slot on the list for ksmd to free.
+ * Beware: ksm may already have noticed it exiting and freed the slot.
+ */
+
+ spin_lock(&ksm_mmlist_lock);
+ mm_slot = get_mm_slot(mm);
+ if (mm_slot && ksm_scan.mm_slot != mm_slot) {
+ if (!mm_slot->rmap_list) {
+ hlist_del(&mm_slot->link);
+ list_del(&mm_slot->mm_list);
+ easy_to_free = 1;
+ } else {
+ list_move(&mm_slot->mm_list,
+ &ksm_scan.mm_slot->mm_list);
+ }
+ }
+ spin_unlock(&ksm_mmlist_lock);
+
+ if (easy_to_free) {
+ free_mm_slot(mm_slot);
+ clear_bit(MMF_VM_MERGEABLE, &mm->flags);
+ mmdrop(mm);
+ } else if (mm_slot) {
+ down_write(&mm->mmap_sem);
+ up_write(&mm->mmap_sem);
+ }
+}
+
+struct page *ksm_does_need_to_copy(struct page *page,
+ struct vm_area_struct *vma, unsigned long address)
+{
+ struct page *new_page;
+
+ unlock_page(page); /* any racers will COW it, not modify it */
+
+ new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address);
+ if (new_page) {
+ copy_user_highpage(new_page, page, address, vma);
+
+ SetPageDirty(new_page);
+ __SetPageUptodate(new_page);
+ SetPageSwapBacked(new_page);
+ __set_page_locked(new_page);
+
+ if (page_evictable(new_page, vma))
+ lru_cache_add_lru(new_page, LRU_ACTIVE_ANON);
+ else
+ add_page_to_unevictable_list(new_page);
+ }
+
+ page_cache_release(page);
+ return new_page;
+}
+
+int page_referenced_ksm(struct page *page, struct mem_cgroup *memcg,
+ unsigned long *vm_flags)
+{
+ struct stable_node *stable_node;
+ struct rmap_item *rmap_item;
+ struct hlist_node *hlist;
+ unsigned int mapcount = page_mapcount(page);
+ int referenced = 0;
+ int search_new_forks = 0;
+
+ VM_BUG_ON(!PageKsm(page));
+ VM_BUG_ON(!PageLocked(page));
+
+ stable_node = page_stable_node(page);
+ if (!stable_node)
+ return 0;
+again:
+ hlist_for_each_entry(rmap_item, hlist, &stable_node->hlist, hlist) {
+ struct anon_vma *anon_vma = rmap_item->anon_vma;
+ struct vm_area_struct *vma;
+
+ spin_lock(&anon_vma->lock);
+ list_for_each_entry(vma, &anon_vma->head, anon_vma_node) {
+ if (rmap_item->address < vma->vm_start ||
+ rmap_item->address >= vma->vm_end)
+ continue;
+ /*
+ * Initially we examine only the vma which covers this
+ * rmap_item; but later, if there is still work to do,
+ * we examine covering vmas in other mms: in case they
+ * were forked from the original since ksmd passed.
+ */
+ if ((rmap_item->mm == vma->vm_mm) == search_new_forks)
+ continue;
+
+ if (memcg && !mm_match_cgroup(vma->vm_mm, memcg))
+ continue;
+
+ referenced += page_referenced_one(page, vma,
+ rmap_item->address, &mapcount, vm_flags);
+ if (!search_new_forks || !mapcount)
+ break;
+ }
+ spin_unlock(&anon_vma->lock);
+ if (!mapcount)
+ goto out;
+ }
+ if (!search_new_forks++)
+ goto again;
+out:
+ return referenced;
+}
+
+int try_to_unmap_ksm(struct page *page, enum ttu_flags flags)
+{
+ struct stable_node *stable_node;
+ struct hlist_node *hlist;
+ struct rmap_item *rmap_item;
+ int ret = SWAP_AGAIN;
+ int search_new_forks = 0;
+
+ VM_BUG_ON(!PageKsm(page));
+ VM_BUG_ON(!PageLocked(page));
+
+ stable_node = page_stable_node(page);
+ if (!stable_node)
+ return SWAP_FAIL;
+again:
+ hlist_for_each_entry(rmap_item, hlist, &stable_node->hlist, hlist) {
+ struct anon_vma *anon_vma = rmap_item->anon_vma;
+ struct vm_area_struct *vma;
+
+ spin_lock(&anon_vma->lock);
+ list_for_each_entry(vma, &anon_vma->head, anon_vma_node) {
+ if (rmap_item->address < vma->vm_start ||
+ rmap_item->address >= vma->vm_end)
+ continue;
+ /*
+ * Initially we examine only the vma which covers this
+ * rmap_item; but later, if there is still work to do,
+ * we examine covering vmas in other mms: in case they
+ * were forked from the original since ksmd passed.
+ */
+ if ((rmap_item->mm == vma->vm_mm) == search_new_forks)
+ continue;
+
+ ret = try_to_unmap_one(page, vma,
+ rmap_item->address, flags);
+ if (ret != SWAP_AGAIN || !page_mapped(page)) {
+ spin_unlock(&anon_vma->lock);
+ goto out;
+ }
+ }
+ spin_unlock(&anon_vma->lock);
+ }
+ if (!search_new_forks++)
+ goto again;
+out:
+ return ret;
+}
+
+#ifdef CONFIG_MIGRATION
+int rmap_walk_ksm(struct page *page, int (*rmap_one)(struct page *,
+ struct vm_area_struct *, unsigned long, void *), void *arg)
+{
+ struct stable_node *stable_node;
+ struct hlist_node *hlist;
+ struct rmap_item *rmap_item;
+ int ret = SWAP_AGAIN;
+ int search_new_forks = 0;
+
+ VM_BUG_ON(!PageKsm(page));
+ VM_BUG_ON(!PageLocked(page));
+
+ stable_node = page_stable_node(page);
+ if (!stable_node)
+ return ret;
+again:
+ hlist_for_each_entry(rmap_item, hlist, &stable_node->hlist, hlist) {
+ struct anon_vma *anon_vma = rmap_item->anon_vma;
+ struct vm_area_struct *vma;
+
+ spin_lock(&anon_vma->lock);
+ list_for_each_entry(vma, &anon_vma->head, anon_vma_node) {
+ if (rmap_item->address < vma->vm_start ||
+ rmap_item->address >= vma->vm_end)
+ continue;
+ /*
+ * Initially we examine only the vma which covers this
+ * rmap_item; but later, if there is still work to do,
+ * we examine covering vmas in other mms: in case they
+ * were forked from the original since ksmd passed.
+ */
+ if ((rmap_item->mm == vma->vm_mm) == search_new_forks)
+ continue;
+
+ ret = rmap_one(page, vma, rmap_item->address, arg);
+ if (ret != SWAP_AGAIN) {
+ spin_unlock(&anon_vma->lock);
+ goto out;
+ }
+ }
+ spin_unlock(&anon_vma->lock);
+ }
+ if (!search_new_forks++)
+ goto again;
+out:
+ return ret;
+}
+
+void ksm_migrate_page(struct page *newpage, struct page *oldpage)
+{
+ struct stable_node *stable_node;
+
+ VM_BUG_ON(!PageLocked(oldpage));
+ VM_BUG_ON(!PageLocked(newpage));
+ VM_BUG_ON(newpage->mapping != oldpage->mapping);
+
+ stable_node = page_stable_node(newpage);
+ if (stable_node) {
+ VM_BUG_ON(stable_node->kpfn != page_to_pfn(oldpage));
+ stable_node->kpfn = page_to_pfn(newpage);
+ }
+}
+#endif /* CONFIG_MIGRATION */
+
+#ifdef CONFIG_MEMORY_HOTREMOVE
+static struct stable_node *ksm_check_stable_tree(unsigned long start_pfn,
+ unsigned long end_pfn)
+{
+ struct rb_node *node;
+
+ for (node = rb_first(&root_stable_tree); node; node = rb_next(node)) {
+ struct stable_node *stable_node;
+
+ stable_node = rb_entry(node, struct stable_node, node);
+ if (stable_node->kpfn >= start_pfn &&
+ stable_node->kpfn < end_pfn)
+ return stable_node;
+ }
+ return NULL;
+}
+
+static int ksm_memory_callback(struct notifier_block *self,
+ unsigned long action, void *arg)
+{
+ struct memory_notify *mn = arg;
+ struct stable_node *stable_node;
+
+ switch (action) {
+ case MEM_GOING_OFFLINE:
+ /*
+ * Keep it very simple for now: just lock out ksmd and
+ * MADV_UNMERGEABLE while any memory is going offline.
+ */
+ mutex_lock(&ksm_thread_mutex);
+ break;
+
+ case MEM_OFFLINE:
+ /*
+ * Most of the work is done by page migration; but there might
+ * be a few stable_nodes left over, still pointing to struct
+ * pages which have been offlined: prune those from the tree.
+ */
+ while ((stable_node = ksm_check_stable_tree(mn->start_pfn,
+ mn->start_pfn + mn->nr_pages)) != NULL)
+ remove_node_from_stable_tree(stable_node);
+ /* fallthrough */
+
+ case MEM_CANCEL_OFFLINE:
+ mutex_unlock(&ksm_thread_mutex);
+ break;
+ }
+ return NOTIFY_OK;
+}
+#endif /* CONFIG_MEMORY_HOTREMOVE */
+
+#ifdef CONFIG_SYSFS
+/*
+ * This all compiles without CONFIG_SYSFS, but is a waste of space.
+ */
+
+#define KSM_ATTR_RO(_name) \
+ static struct kobj_attribute _name##_attr = __ATTR_RO(_name)
+#define KSM_ATTR(_name) \
+ static struct kobj_attribute _name##_attr = \
+ __ATTR(_name, 0644, _name##_show, _name##_store)
+
+static ssize_t sleep_millisecs_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ return sprintf(buf, "%u\n", ksm_thread_sleep_millisecs);
+}
+
+static ssize_t sleep_millisecs_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf, size_t count)
+{
+ unsigned long msecs;
+ int err;
+
+ err = strict_strtoul(buf, 10, &msecs);
+ if (err || msecs > UINT_MAX)
+ return -EINVAL;
+
+ ksm_thread_sleep_millisecs = msecs;
+
+ return count;
+}
+KSM_ATTR(sleep_millisecs);
+
+static ssize_t pages_to_scan_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ return sprintf(buf, "%u\n", ksm_thread_pages_to_scan);
+}
+
+static ssize_t pages_to_scan_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf, size_t count)
+{
+ int err;
+ unsigned long nr_pages;
+
+ err = strict_strtoul(buf, 10, &nr_pages);
+ if (err || nr_pages > UINT_MAX)
+ return -EINVAL;
+
+ ksm_thread_pages_to_scan = nr_pages;
+
+ return count;
+}
+KSM_ATTR(pages_to_scan);
+
+static ssize_t run_show(struct kobject *kobj, struct kobj_attribute *attr,
+ char *buf)
+{
+ return sprintf(buf, "%u\n", ksm_run);
+}
+
+static ssize_t run_store(struct kobject *kobj, struct kobj_attribute *attr,
+ const char *buf, size_t count)
+{
+ int err;
+ unsigned long flags;
+
+ err = strict_strtoul(buf, 10, &flags);
+ if (err || flags > UINT_MAX)
+ return -EINVAL;
+ if (flags > KSM_RUN_UNMERGE)
+ return -EINVAL;
+
+ /*
+ * KSM_RUN_MERGE sets ksmd running, and 0 stops it running.
+ * KSM_RUN_UNMERGE stops it running and unmerges all rmap_items,
+ * breaking COW to free the pages_shared (but leaves mm_slots
+ * on the list for when ksmd may be set running again).
+ */
+
+ mutex_lock(&ksm_thread_mutex);
+ if (ksm_run != flags) {
+ ksm_run = flags;
+ if (flags & KSM_RUN_UNMERGE) {
+ current->flags |= PF_OOM_ORIGIN;
+ err = unmerge_and_remove_all_rmap_items();
+ current->flags &= ~PF_OOM_ORIGIN;
+ if (err) {
+ ksm_run = KSM_RUN_STOP;
+ count = err;
+ }
+ }
+ }
+ mutex_unlock(&ksm_thread_mutex);
+
+ if (flags & KSM_RUN_MERGE)
+ wake_up_interruptible(&ksm_thread_wait);
+
+ return count;
+}
+KSM_ATTR(run);
+
+static ssize_t pages_shared_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ return sprintf(buf, "%lu\n", ksm_pages_shared);
+}
+KSM_ATTR_RO(pages_shared);
+
+static ssize_t pages_sharing_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ return sprintf(buf, "%lu\n", ksm_pages_sharing);
+}
+KSM_ATTR_RO(pages_sharing);
+
+static ssize_t pages_unshared_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ return sprintf(buf, "%lu\n", ksm_pages_unshared);
+}
+KSM_ATTR_RO(pages_unshared);
+
+static ssize_t pages_volatile_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ long ksm_pages_volatile;
+
+ ksm_pages_volatile = ksm_rmap_items - ksm_pages_shared
+ - ksm_pages_sharing - ksm_pages_unshared;
+ /*
+ * It was not worth any locking to calculate that statistic,
+ * but it might therefore sometimes be negative: conceal that.
+ */
+ if (ksm_pages_volatile < 0)
+ ksm_pages_volatile = 0;
+ return sprintf(buf, "%ld\n", ksm_pages_volatile);
+}
+KSM_ATTR_RO(pages_volatile);
+
+static ssize_t full_scans_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ return sprintf(buf, "%lu\n", ksm_scan.seqnr);
+}
+KSM_ATTR_RO(full_scans);
+
+static struct attribute *ksm_attrs[] = {
+ &sleep_millisecs_attr.attr,
+ &pages_to_scan_attr.attr,
+ &run_attr.attr,
+ &pages_shared_attr.attr,
+ &pages_sharing_attr.attr,
+ &pages_unshared_attr.attr,
+ &pages_volatile_attr.attr,
+ &full_scans_attr.attr,
+ NULL,
+};
+
+static struct attribute_group ksm_attr_group = {
+ .attrs = ksm_attrs,
+ .name = "ksm",
+};
+#endif /* CONFIG_SYSFS */
+
+static int __init ksm_init(void)
+{
+ struct task_struct *ksm_thread;
+ int err;
+
+ err = ksm_slab_init();
+ if (err)
+ goto out;
+
+ err = mm_slots_hash_init();
+ if (err)
+ goto out_free1;
+
+ ksm_thread = kthread_run(ksm_scan_thread, NULL, "ksmd");
+ if (IS_ERR(ksm_thread)) {
+ printk(KERN_ERR "ksm: creating kthread failed\n");
+ err = PTR_ERR(ksm_thread);
+ goto out_free2;
+ }
+
+#ifdef CONFIG_SYSFS
+ err = sysfs_create_group(mm_kobj, &ksm_attr_group);
+ if (err) {
+ printk(KERN_ERR "ksm: register sysfs failed\n");
+ kthread_stop(ksm_thread);
+ goto out_free2;
+ }
+#else
+ ksm_run = KSM_RUN_MERGE; /* no way for user to start it */
+
+#endif /* CONFIG_SYSFS */
+
+#ifdef CONFIG_MEMORY_HOTREMOVE
+ /*
+ * Choose a high priority since the callback takes ksm_thread_mutex:
+ * later callbacks could only be taking locks which nest within that.
+ */
+ hotplug_memory_notifier(ksm_memory_callback, 100);
+#endif
+ return 0;
+
+out_free2:
+ mm_slots_hash_free();
+out_free1:
+ ksm_slab_free();
+out:
+ return err;
+}
+module_init(ksm_init)
diff --git a/mm/maccess.c b/mm/maccess.c
index 9073695ff25f..4e348dbaecd7 100644
--- a/mm/maccess.c
+++ b/mm/maccess.c
@@ -14,7 +14,11 @@
* Safely read from address @src to the buffer at @dst. If a kernel fault
* happens, handle that and return -EFAULT.
*/
-long probe_kernel_read(void *dst, void *src, size_t size)
+
+long __weak probe_kernel_read(void *dst, void *src, size_t size)
+ __attribute__((alias("__probe_kernel_read")));
+
+long __probe_kernel_read(void *dst, void *src, size_t size)
{
long ret;
mm_segment_t old_fs = get_fs();
@@ -39,7 +43,10 @@ EXPORT_SYMBOL_GPL(probe_kernel_read);
* Safely write to address @dst from the buffer at @src. If a kernel fault
* happens, handle that and return -EFAULT.
*/
-long notrace __weak probe_kernel_write(void *dst, void *src, size_t size)
+long __weak probe_kernel_write(void *dst, void *src, size_t size)
+ __attribute__((alias("__probe_kernel_write")));
+
+long __probe_kernel_write(void *dst, void *src, size_t size)
{
long ret;
mm_segment_t old_fs = get_fs();
diff --git a/mm/madvise.c b/mm/madvise.c
index 76eb4193acdd..319528b8db74 100644
--- a/mm/madvise.c
+++ b/mm/madvise.c
@@ -9,8 +9,10 @@
#include <linux/pagemap.h>
#include <linux/syscalls.h>
#include <linux/mempolicy.h>
+#include <linux/page-isolation.h>
#include <linux/hugetlb.h>
#include <linux/sched.h>
+#include <linux/ksm.h>
/*
* Any behaviour which results in changes to the vma->vm_flags needs to
@@ -41,7 +43,7 @@ static long madvise_behavior(struct vm_area_struct * vma,
struct mm_struct * mm = vma->vm_mm;
int error = 0;
pgoff_t pgoff;
- int new_flags = vma->vm_flags;
+ unsigned long new_flags = vma->vm_flags;
switch (behavior) {
case MADV_NORMAL:
@@ -57,8 +59,18 @@ static long madvise_behavior(struct vm_area_struct * vma,
new_flags |= VM_DONTCOPY;
break;
case MADV_DOFORK:
+ if (vma->vm_flags & VM_IO) {
+ error = -EINVAL;
+ goto out;
+ }
new_flags &= ~VM_DONTCOPY;
break;
+ case MADV_MERGEABLE:
+ case MADV_UNMERGEABLE:
+ error = ksm_madvise(vma, start, end, behavior, &new_flags);
+ if (error)
+ goto out;
+ break;
}
if (new_flags == vma->vm_flags) {
@@ -207,41 +219,52 @@ static long madvise_remove(struct vm_area_struct *vma,
return error;
}
+#ifdef CONFIG_MEMORY_FAILURE
+/*
+ * Error injection support for memory error handling.
+ */
+static int madvise_hwpoison(int bhv, unsigned long start, unsigned long end)
+{
+ int ret = 0;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+ for (; start < end; start += PAGE_SIZE) {
+ struct page *p;
+ int ret = get_user_pages_fast(start, 1, 0, &p);
+ if (ret != 1)
+ return ret;
+ if (bhv == MADV_SOFT_OFFLINE) {
+ printk(KERN_INFO "Soft offlining page %lx at %lx\n",
+ page_to_pfn(p), start);
+ ret = soft_offline_page(p, MF_COUNT_INCREASED);
+ if (ret)
+ break;
+ continue;
+ }
+ printk(KERN_INFO "Injecting memory failure for page %lx at %lx\n",
+ page_to_pfn(p), start);
+ /* Ignore return value for now */
+ __memory_failure(page_to_pfn(p), 0, MF_COUNT_INCREASED);
+ }
+ return ret;
+}
+#endif
+
static long
madvise_vma(struct vm_area_struct *vma, struct vm_area_struct **prev,
unsigned long start, unsigned long end, int behavior)
{
- long error;
-
switch (behavior) {
- case MADV_DOFORK:
- if (vma->vm_flags & VM_IO) {
- error = -EINVAL;
- break;
- }
- case MADV_DONTFORK:
- case MADV_NORMAL:
- case MADV_SEQUENTIAL:
- case MADV_RANDOM:
- error = madvise_behavior(vma, prev, start, end, behavior);
- break;
case MADV_REMOVE:
- error = madvise_remove(vma, prev, start, end);
- break;
-
+ return madvise_remove(vma, prev, start, end);
case MADV_WILLNEED:
- error = madvise_willneed(vma, prev, start, end);
- break;
-
+ return madvise_willneed(vma, prev, start, end);
case MADV_DONTNEED:
- error = madvise_dontneed(vma, prev, start, end);
- break;
-
+ return madvise_dontneed(vma, prev, start, end);
default:
- BUG();
- break;
+ return madvise_behavior(vma, prev, start, end, behavior);
}
- return error;
}
static int
@@ -256,12 +279,17 @@ madvise_behavior_valid(int behavior)
case MADV_REMOVE:
case MADV_WILLNEED:
case MADV_DONTNEED:
+#ifdef CONFIG_KSM
+ case MADV_MERGEABLE:
+ case MADV_UNMERGEABLE:
+#endif
return 1;
default:
return 0;
}
}
+
/*
* The madvise(2) system call.
*
@@ -286,6 +314,12 @@ madvise_behavior_valid(int behavior)
* so the kernel can free resources associated with it.
* MADV_REMOVE - the application wants to free up the given range of
* pages and associated backing store.
+ * MADV_DONTFORK - omit this area from child's address space when forking:
+ * typically, to avoid COWing pages pinned by get_user_pages().
+ * MADV_DOFORK - cancel MADV_DONTFORK: no longer omit this area when forking.
+ * MADV_MERGEABLE - the application recommends that KSM try to merge pages in
+ * this area with pages of identical content from other such areas.
+ * MADV_UNMERGEABLE- cancel MADV_MERGEABLE: no longer merge pages with others.
*
* return values:
* zero - success
@@ -307,6 +341,10 @@ SYSCALL_DEFINE3(madvise, unsigned long, start, size_t, len_in, int, behavior)
int write;
size_t len;
+#ifdef CONFIG_MEMORY_FAILURE
+ if (behavior == MADV_HWPOISON || behavior == MADV_SOFT_OFFLINE)
+ return madvise_hwpoison(behavior, start, start+len_in);
+#endif
if (!madvise_behavior_valid(behavior))
return error;
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 34643f496c5d..d3e716fbab13 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -29,6 +29,7 @@
#include <linux/rcupdate.h>
#include <linux/limits.h>
#include <linux/mutex.h>
+#include <linux/rbtree.h>
#include <linux/slab.h>
#include <linux/swap.h>
#include <linux/spinlock.h>
@@ -37,12 +38,14 @@
#include <linux/vmalloc.h>
#include <linux/mm_inline.h>
#include <linux/page_cgroup.h>
+#include <linux/cpu.h>
#include "internal.h"
#include <asm/uaccess.h>
struct cgroup_subsys mem_cgroup_subsys __read_mostly;
#define MEM_CGROUP_RECLAIM_RETRIES 5
+struct mem_cgroup *root_mem_cgroup __read_mostly;
#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
/* Turned on only when memory cgroup is enabled && really_do_swap_account = 1 */
@@ -52,7 +55,7 @@ static int really_do_swap_account __initdata = 1; /* for remember boot option*/
#define do_swap_account (0)
#endif
-static DEFINE_MUTEX(memcg_tasklist); /* can be hold under cgroup_mutex */
+#define SOFTLIMIT_EVENTS_THRESH (1000)
/*
* Statistics for memory cgroup.
@@ -63,9 +66,11 @@ enum mem_cgroup_stat_index {
*/
MEM_CGROUP_STAT_CACHE, /* # of pages charged as cache */
MEM_CGROUP_STAT_RSS, /* # of pages charged as anon rss */
- MEM_CGROUP_STAT_MAPPED_FILE, /* # of pages charged as file rss */
+ MEM_CGROUP_STAT_FILE_MAPPED, /* # of pages charged as file rss */
MEM_CGROUP_STAT_PGPGIN_COUNT, /* # of pages paged in */
MEM_CGROUP_STAT_PGPGOUT_COUNT, /* # of pages paged out */
+ MEM_CGROUP_STAT_EVENTS, /* sum of pagein + pageout for internal use */
+ MEM_CGROUP_STAT_SWAPOUT, /* # of pages, swapped out */
MEM_CGROUP_STAT_NSTATS,
};
@@ -78,6 +83,20 @@ struct mem_cgroup_stat {
struct mem_cgroup_stat_cpu cpustat[0];
};
+static inline void
+__mem_cgroup_stat_reset_safe(struct mem_cgroup_stat_cpu *stat,
+ enum mem_cgroup_stat_index idx)
+{
+ stat->count[idx] = 0;
+}
+
+static inline s64
+__mem_cgroup_stat_read_local(struct mem_cgroup_stat_cpu *stat,
+ enum mem_cgroup_stat_index idx)
+{
+ return stat->count[idx];
+}
+
/*
* For accounting under irq disable, no need for increment preempt count.
*/
@@ -117,6 +136,12 @@ struct mem_cgroup_per_zone {
unsigned long count[NR_LRU_LISTS];
struct zone_reclaim_stat reclaim_stat;
+ struct rb_node tree_node; /* RB tree node */
+ unsigned long long usage_in_excess;/* Set to the value by which */
+ /* the soft limit is exceeded*/
+ bool on_tree;
+ struct mem_cgroup *mem; /* Back pointer, we cannot */
+ /* use container_of */
};
/* Macro for accessing counter */
#define MEM_CGROUP_ZSTAT(mz, idx) ((mz)->count[(idx)])
@@ -130,6 +155,26 @@ struct mem_cgroup_lru_info {
};
/*
+ * Cgroups above their limits are maintained in a RB-Tree, independent of
+ * their hierarchy representation
+ */
+
+struct mem_cgroup_tree_per_zone {
+ struct rb_root rb_root;
+ spinlock_t lock;
+};
+
+struct mem_cgroup_tree_per_node {
+ struct mem_cgroup_tree_per_zone rb_tree_per_zone[MAX_NR_ZONES];
+};
+
+struct mem_cgroup_tree {
+ struct mem_cgroup_tree_per_node *rb_tree_per_node[MAX_NUMNODES];
+};
+
+static struct mem_cgroup_tree soft_limit_tree __read_mostly;
+
+/*
* The memory controller data structure. The memory controller controls both
* page cache and RSS per cgroup. We would eventually like to provide
* statistics based on the statistics developed by Rik Van Riel for clock-pro,
@@ -164,7 +209,7 @@ struct mem_cgroup {
int prev_priority; /* for recording reclaim priority */
/*
- * While reclaiming in a hiearchy, we cache the last child we
+ * While reclaiming in a hierarchy, we cache the last child we
* reclaimed from.
*/
int last_scanned_child;
@@ -186,6 +231,13 @@ struct mem_cgroup {
struct mem_cgroup_stat stat;
};
+/*
+ * Maximum loops in mem_cgroup_hierarchical_reclaim(), used for soft
+ * limit reclaim to prevent infinite loops, if they ever occur.
+ */
+#define MEM_CGROUP_MAX_RECLAIM_LOOPS (100)
+#define MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS (2)
+
enum charge_type {
MEM_CGROUP_CHARGE_TYPE_CACHE = 0,
MEM_CGROUP_CHARGE_TYPE_MAPPED,
@@ -200,13 +252,8 @@ enum charge_type {
#define PCGF_CACHE (1UL << PCG_CACHE)
#define PCGF_USED (1UL << PCG_USED)
#define PCGF_LOCK (1UL << PCG_LOCK)
-static const unsigned long
-pcg_default_flags[NR_CHARGE_TYPE] = {
- PCGF_CACHE | PCGF_USED | PCGF_LOCK, /* File Cache */
- PCGF_USED | PCGF_LOCK, /* Anon */
- PCGF_CACHE | PCGF_USED | PCGF_LOCK, /* Shmem */
- 0, /* FORCE */
-};
+/* Not used, but added here for completeness */
+#define PCGF_ACCT (1UL << PCG_ACCT)
/* for encoding cft->private value on file */
#define _MEM (0)
@@ -215,15 +262,243 @@ pcg_default_flags[NR_CHARGE_TYPE] = {
#define MEMFILE_TYPE(val) (((val) >> 16) & 0xffff)
#define MEMFILE_ATTR(val) ((val) & 0xffff)
+/*
+ * Reclaim flags for mem_cgroup_hierarchical_reclaim
+ */
+#define MEM_CGROUP_RECLAIM_NOSWAP_BIT 0x0
+#define MEM_CGROUP_RECLAIM_NOSWAP (1 << MEM_CGROUP_RECLAIM_NOSWAP_BIT)
+#define MEM_CGROUP_RECLAIM_SHRINK_BIT 0x1
+#define MEM_CGROUP_RECLAIM_SHRINK (1 << MEM_CGROUP_RECLAIM_SHRINK_BIT)
+#define MEM_CGROUP_RECLAIM_SOFT_BIT 0x2
+#define MEM_CGROUP_RECLAIM_SOFT (1 << MEM_CGROUP_RECLAIM_SOFT_BIT)
+
static void mem_cgroup_get(struct mem_cgroup *mem);
static void mem_cgroup_put(struct mem_cgroup *mem);
static struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *mem);
+static void drain_all_stock_async(void);
+
+static struct mem_cgroup_per_zone *
+mem_cgroup_zoneinfo(struct mem_cgroup *mem, int nid, int zid)
+{
+ return &mem->info.nodeinfo[nid]->zoneinfo[zid];
+}
+
+struct cgroup_subsys_state *mem_cgroup_css(struct mem_cgroup *mem)
+{
+ return &mem->css;
+}
+
+static struct mem_cgroup_per_zone *
+page_cgroup_zoneinfo(struct page_cgroup *pc)
+{
+ struct mem_cgroup *mem = pc->mem_cgroup;
+ int nid = page_cgroup_nid(pc);
+ int zid = page_cgroup_zid(pc);
+
+ if (!mem)
+ return NULL;
+
+ return mem_cgroup_zoneinfo(mem, nid, zid);
+}
+
+static struct mem_cgroup_tree_per_zone *
+soft_limit_tree_node_zone(int nid, int zid)
+{
+ return &soft_limit_tree.rb_tree_per_node[nid]->rb_tree_per_zone[zid];
+}
+
+static struct mem_cgroup_tree_per_zone *
+soft_limit_tree_from_page(struct page *page)
+{
+ int nid = page_to_nid(page);
+ int zid = page_zonenum(page);
+
+ return &soft_limit_tree.rb_tree_per_node[nid]->rb_tree_per_zone[zid];
+}
+
+static void
+__mem_cgroup_insert_exceeded(struct mem_cgroup *mem,
+ struct mem_cgroup_per_zone *mz,
+ struct mem_cgroup_tree_per_zone *mctz,
+ unsigned long long new_usage_in_excess)
+{
+ struct rb_node **p = &mctz->rb_root.rb_node;
+ struct rb_node *parent = NULL;
+ struct mem_cgroup_per_zone *mz_node;
+
+ if (mz->on_tree)
+ return;
+
+ mz->usage_in_excess = new_usage_in_excess;
+ if (!mz->usage_in_excess)
+ return;
+ while (*p) {
+ parent = *p;
+ mz_node = rb_entry(parent, struct mem_cgroup_per_zone,
+ tree_node);
+ if (mz->usage_in_excess < mz_node->usage_in_excess)
+ p = &(*p)->rb_left;
+ /*
+ * We can't avoid mem cgroups that are over their soft
+ * limit by the same amount
+ */
+ else if (mz->usage_in_excess >= mz_node->usage_in_excess)
+ p = &(*p)->rb_right;
+ }
+ rb_link_node(&mz->tree_node, parent, p);
+ rb_insert_color(&mz->tree_node, &mctz->rb_root);
+ mz->on_tree = true;
+}
+
+static void
+__mem_cgroup_remove_exceeded(struct mem_cgroup *mem,
+ struct mem_cgroup_per_zone *mz,
+ struct mem_cgroup_tree_per_zone *mctz)
+{
+ if (!mz->on_tree)
+ return;
+ rb_erase(&mz->tree_node, &mctz->rb_root);
+ mz->on_tree = false;
+}
+
+static void
+mem_cgroup_remove_exceeded(struct mem_cgroup *mem,
+ struct mem_cgroup_per_zone *mz,
+ struct mem_cgroup_tree_per_zone *mctz)
+{
+ spin_lock(&mctz->lock);
+ __mem_cgroup_remove_exceeded(mem, mz, mctz);
+ spin_unlock(&mctz->lock);
+}
+
+static bool mem_cgroup_soft_limit_check(struct mem_cgroup *mem)
+{
+ bool ret = false;
+ int cpu;
+ s64 val;
+ struct mem_cgroup_stat_cpu *cpustat;
+
+ cpu = get_cpu();
+ cpustat = &mem->stat.cpustat[cpu];
+ val = __mem_cgroup_stat_read_local(cpustat, MEM_CGROUP_STAT_EVENTS);
+ if (unlikely(val > SOFTLIMIT_EVENTS_THRESH)) {
+ __mem_cgroup_stat_reset_safe(cpustat, MEM_CGROUP_STAT_EVENTS);
+ ret = true;
+ }
+ put_cpu();
+ return ret;
+}
+
+static void mem_cgroup_update_tree(struct mem_cgroup *mem, struct page *page)
+{
+ unsigned long long excess;
+ struct mem_cgroup_per_zone *mz;
+ struct mem_cgroup_tree_per_zone *mctz;
+ int nid = page_to_nid(page);
+ int zid = page_zonenum(page);
+ mctz = soft_limit_tree_from_page(page);
+
+ /*
+ * Necessary to update all ancestors when hierarchy is used.
+ * because their event counter is not touched.
+ */
+ for (; mem; mem = parent_mem_cgroup(mem)) {
+ mz = mem_cgroup_zoneinfo(mem, nid, zid);
+ excess = res_counter_soft_limit_excess(&mem->res);
+ /*
+ * We have to update the tree if mz is on RB-tree or
+ * mem is over its softlimit.
+ */
+ if (excess || mz->on_tree) {
+ spin_lock(&mctz->lock);
+ /* if on-tree, remove it */
+ if (mz->on_tree)
+ __mem_cgroup_remove_exceeded(mem, mz, mctz);
+ /*
+ * Insert again. mz->usage_in_excess will be updated.
+ * If excess is 0, no tree ops.
+ */
+ __mem_cgroup_insert_exceeded(mem, mz, mctz, excess);
+ spin_unlock(&mctz->lock);
+ }
+ }
+}
+
+static void mem_cgroup_remove_from_trees(struct mem_cgroup *mem)
+{
+ int node, zone;
+ struct mem_cgroup_per_zone *mz;
+ struct mem_cgroup_tree_per_zone *mctz;
+
+ for_each_node_state(node, N_POSSIBLE) {
+ for (zone = 0; zone < MAX_NR_ZONES; zone++) {
+ mz = mem_cgroup_zoneinfo(mem, node, zone);
+ mctz = soft_limit_tree_node_zone(node, zone);
+ mem_cgroup_remove_exceeded(mem, mz, mctz);
+ }
+ }
+}
+
+static inline unsigned long mem_cgroup_get_excess(struct mem_cgroup *mem)
+{
+ return res_counter_soft_limit_excess(&mem->res) >> PAGE_SHIFT;
+}
+
+static struct mem_cgroup_per_zone *
+__mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_zone *mctz)
+{
+ struct rb_node *rightmost = NULL;
+ struct mem_cgroup_per_zone *mz;
+
+retry:
+ mz = NULL;
+ rightmost = rb_last(&mctz->rb_root);
+ if (!rightmost)
+ goto done; /* Nothing to reclaim from */
+
+ mz = rb_entry(rightmost, struct mem_cgroup_per_zone, tree_node);
+ /*
+ * Remove the node now but someone else can add it back,
+ * we will to add it back at the end of reclaim to its correct
+ * position in the tree.
+ */
+ __mem_cgroup_remove_exceeded(mz->mem, mz, mctz);
+ if (!res_counter_soft_limit_excess(&mz->mem->res) ||
+ !css_tryget(&mz->mem->css))
+ goto retry;
+done:
+ return mz;
+}
+
+static struct mem_cgroup_per_zone *
+mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_zone *mctz)
+{
+ struct mem_cgroup_per_zone *mz;
+
+ spin_lock(&mctz->lock);
+ mz = __mem_cgroup_largest_soft_limit_node(mctz);
+ spin_unlock(&mctz->lock);
+ return mz;
+}
+
+static void mem_cgroup_swap_statistics(struct mem_cgroup *mem,
+ bool charge)
+{
+ int val = (charge) ? 1 : -1;
+ struct mem_cgroup_stat *stat = &mem->stat;
+ struct mem_cgroup_stat_cpu *cpustat;
+ int cpu = get_cpu();
+
+ cpustat = &stat->cpustat[cpu];
+ __mem_cgroup_stat_add_safe(cpustat, MEM_CGROUP_STAT_SWAPOUT, val);
+ put_cpu();
+}
static void mem_cgroup_charge_statistics(struct mem_cgroup *mem,
struct page_cgroup *pc,
bool charge)
{
- int val = (charge)? 1 : -1;
+ int val = (charge) ? 1 : -1;
struct mem_cgroup_stat *stat = &mem->stat;
struct mem_cgroup_stat_cpu *cpustat;
int cpu = get_cpu();
@@ -240,28 +515,10 @@ static void mem_cgroup_charge_statistics(struct mem_cgroup *mem,
else
__mem_cgroup_stat_add_safe(cpustat,
MEM_CGROUP_STAT_PGPGOUT_COUNT, 1);
+ __mem_cgroup_stat_add_safe(cpustat, MEM_CGROUP_STAT_EVENTS, 1);
put_cpu();
}
-static struct mem_cgroup_per_zone *
-mem_cgroup_zoneinfo(struct mem_cgroup *mem, int nid, int zid)
-{
- return &mem->info.nodeinfo[nid]->zoneinfo[zid];
-}
-
-static struct mem_cgroup_per_zone *
-page_cgroup_zoneinfo(struct page_cgroup *pc)
-{
- struct mem_cgroup *mem = pc->mem_cgroup;
- int nid = page_cgroup_nid(pc);
- int zid = page_cgroup_zid(pc);
-
- if (!mem)
- return NULL;
-
- return mem_cgroup_zoneinfo(mem, nid, zid);
-}
-
static unsigned long mem_cgroup_get_local_zonestat(struct mem_cgroup *mem,
enum lru_list idx)
{
@@ -354,6 +611,11 @@ static int mem_cgroup_walk_tree(struct mem_cgroup *root, void *data,
return ret;
}
+static inline bool mem_cgroup_is_root(struct mem_cgroup *mem)
+{
+ return (mem == root_mem_cgroup);
+}
+
/*
* Following LRU functions are allowed to be used without PCG_LOCK.
* Operations are called by routine of global LRU independently from memcg.
@@ -371,22 +633,24 @@ static int mem_cgroup_walk_tree(struct mem_cgroup *root, void *data,
void mem_cgroup_del_lru_list(struct page *page, enum lru_list lru)
{
struct page_cgroup *pc;
- struct mem_cgroup *mem;
struct mem_cgroup_per_zone *mz;
if (mem_cgroup_disabled())
return;
pc = lookup_page_cgroup(page);
/* can happen while we handle swapcache. */
- if (list_empty(&pc->lru) || !pc->mem_cgroup)
+ if (!TestClearPageCgroupAcctLRU(pc))
return;
+ VM_BUG_ON(!pc->mem_cgroup);
/*
* We don't check PCG_USED bit. It's cleared when the "page" is finally
* removed from global LRU.
*/
mz = page_cgroup_zoneinfo(pc);
- mem = pc->mem_cgroup;
MEM_CGROUP_ZSTAT(mz, lru) -= 1;
+ if (mem_cgroup_is_root(pc->mem_cgroup))
+ return;
+ VM_BUG_ON(list_empty(&pc->lru));
list_del_init(&pc->lru);
return;
}
@@ -410,8 +674,8 @@ void mem_cgroup_rotate_lru_list(struct page *page, enum lru_list lru)
* For making pc->mem_cgroup visible, insert smp_rmb() here.
*/
smp_rmb();
- /* unused page is not rotated. */
- if (!PageCgroupUsed(pc))
+ /* unused or root page is not rotated. */
+ if (!PageCgroupUsed(pc) || mem_cgroup_is_root(pc->mem_cgroup))
return;
mz = page_cgroup_zoneinfo(pc);
list_move(&pc->lru, &mz->lists[lru]);
@@ -425,6 +689,7 @@ void mem_cgroup_add_lru_list(struct page *page, enum lru_list lru)
if (mem_cgroup_disabled())
return;
pc = lookup_page_cgroup(page);
+ VM_BUG_ON(PageCgroupAcctLRU(pc));
/*
* Used bit is set without atomic ops but after smp_wmb().
* For making pc->mem_cgroup visible, insert smp_rmb() here.
@@ -435,6 +700,9 @@ void mem_cgroup_add_lru_list(struct page *page, enum lru_list lru)
mz = page_cgroup_zoneinfo(pc);
MEM_CGROUP_ZSTAT(mz, lru) += 1;
+ SetPageCgroupAcctLRU(pc);
+ if (mem_cgroup_is_root(pc->mem_cgroup))
+ return;
list_add(&pc->lru, &mz->lists[lru]);
}
@@ -469,7 +737,7 @@ static void mem_cgroup_lru_add_after_commit_swapcache(struct page *page)
spin_lock_irqsave(&zone->lru_lock, flags);
/* link when the page is linked to LRU but page_cgroup isn't */
- if (PageLRU(page) && list_empty(&pc->lru))
+ if (PageLRU(page) && !PageCgroupAcctLRU(pc))
mem_cgroup_add_lru_list(page, page_lru(page));
spin_unlock_irqrestore(&zone->lru_lock, flags);
}
@@ -496,7 +764,13 @@ int task_in_mem_cgroup(struct task_struct *task, const struct mem_cgroup *mem)
task_unlock(task);
if (!curr)
return 0;
- if (curr->use_hierarchy)
+ /*
+ * We should check use_hierarchy of "mem" not "curr". Because checking
+ * use_hierarchy of "curr" here make this function true if hierarchy is
+ * enabled in "curr" and "curr" is a child of "mem" in *cgroup*
+ * hierarchy(even if use_hierarchy is disabled in "mem").
+ */
+ if (mem->use_hierarchy)
ret = css_is_ancestor(&curr->css, &mem->css);
else
ret = (curr == mem);
@@ -648,7 +922,7 @@ unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan,
int nid = z->zone_pgdat->node_id;
int zid = zone_idx(z);
struct mem_cgroup_per_zone *mz;
- int lru = LRU_FILE * !!file + !!active;
+ int lru = LRU_FILE * file + active;
int ret;
BUG_ON(!mem_cont);
@@ -745,7 +1019,7 @@ void mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p)
static char memcg_name[PATH_MAX];
int ret;
- if (!memcg)
+ if (!memcg || !p)
return;
@@ -855,28 +1129,64 @@ mem_cgroup_select_victim(struct mem_cgroup *root_mem)
* If shrink==true, for avoiding to free too much, this returns immedieately.
*/
static int mem_cgroup_hierarchical_reclaim(struct mem_cgroup *root_mem,
- gfp_t gfp_mask, bool noswap, bool shrink)
+ struct zone *zone,
+ gfp_t gfp_mask,
+ unsigned long reclaim_options)
{
struct mem_cgroup *victim;
int ret, total = 0;
int loop = 0;
+ bool noswap = reclaim_options & MEM_CGROUP_RECLAIM_NOSWAP;
+ bool shrink = reclaim_options & MEM_CGROUP_RECLAIM_SHRINK;
+ bool check_soft = reclaim_options & MEM_CGROUP_RECLAIM_SOFT;
+ unsigned long excess = mem_cgroup_get_excess(root_mem);
/* If memsw_is_minimum==1, swap-out is of-no-use. */
if (root_mem->memsw_is_minimum)
noswap = true;
- while (loop < 2) {
+ while (1) {
victim = mem_cgroup_select_victim(root_mem);
- if (victim == root_mem)
+ if (victim == root_mem) {
loop++;
+ if (loop >= 1)
+ drain_all_stock_async();
+ if (loop >= 2) {
+ /*
+ * If we have not been able to reclaim
+ * anything, it might because there are
+ * no reclaimable pages under this hierarchy
+ */
+ if (!check_soft || !total) {
+ css_put(&victim->css);
+ break;
+ }
+ /*
+ * We want to do more targetted reclaim.
+ * excess >> 2 is not to excessive so as to
+ * reclaim too much, nor too less that we keep
+ * coming back to reclaim from this cgroup
+ */
+ if (total >= (excess >> 2) ||
+ (loop > MEM_CGROUP_MAX_RECLAIM_LOOPS)) {
+ css_put(&victim->css);
+ break;
+ }
+ }
+ }
if (!mem_cgroup_local_usage(&victim->stat)) {
/* this cgroup's local usage == 0 */
css_put(&victim->css);
continue;
}
/* we use swappiness of local cgroup */
- ret = try_to_free_mem_cgroup_pages(victim, gfp_mask, noswap,
- get_swappiness(victim));
+ if (check_soft)
+ ret = mem_cgroup_shrink_node_zone(victim, gfp_mask,
+ noswap, get_swappiness(victim), zone,
+ zone->zone_pgdat->node_id);
+ else
+ ret = try_to_free_mem_cgroup_pages(victim, gfp_mask,
+ noswap, get_swappiness(victim));
css_put(&victim->css);
/*
* At shrinking usage, we can't check we should stop here or
@@ -886,7 +1196,10 @@ static int mem_cgroup_hierarchical_reclaim(struct mem_cgroup *root_mem,
if (shrink)
return ret;
total += ret;
- if (mem_cgroup_check_under_limit(root_mem))
+ if (check_soft) {
+ if (res_counter_check_under_soft_limit(&root_mem->res))
+ return total;
+ } else if (mem_cgroup_check_under_limit(root_mem))
return 1 + total;
}
return total;
@@ -924,7 +1237,7 @@ static void record_last_oom(struct mem_cgroup *mem)
* Currently used to update mapped file statistics, but the routine can be
* generalized to update other statistics as well.
*/
-void mem_cgroup_update_mapped_file_stat(struct page *page, int val)
+void mem_cgroup_update_file_mapped(struct page *page, int val)
{
struct mem_cgroup *mem;
struct mem_cgroup_stat *stat;
@@ -932,9 +1245,6 @@ void mem_cgroup_update_mapped_file_stat(struct page *page, int val)
int cpu;
struct page_cgroup *pc;
- if (!page_is_file_cache(page))
- return;
-
pc = lookup_page_cgroup(page);
if (unlikely(!pc))
return;
@@ -948,29 +1258,158 @@ void mem_cgroup_update_mapped_file_stat(struct page *page, int val)
goto done;
/*
- * Preemption is already disabled, we don't need get_cpu(), but
- * that's not true for RT :)
+ * Preemption is already disabled, we don't need get_cpu(),
+ * but that's not true for RT !
*/
cpu = get_cpu();
stat = &mem->stat;
cpustat = &stat->cpustat[cpu];
- __mem_cgroup_stat_add_safe(cpustat, MEM_CGROUP_STAT_MAPPED_FILE, val);
+
+ __mem_cgroup_stat_add_safe(cpustat, MEM_CGROUP_STAT_FILE_MAPPED, val);
put_cpu();
done:
unlock_page_cgroup(pc);
}
/*
+ * size of first charge trial. "32" comes from vmscan.c's magic value.
+ * TODO: maybe necessary to use big numbers in big irons.
+ */
+#define CHARGE_SIZE (32 * PAGE_SIZE)
+struct memcg_stock_pcp {
+ struct mem_cgroup *cached; /* this never be root cgroup */
+ int charge;
+ struct work_struct work;
+};
+static DEFINE_PER_CPU(struct memcg_stock_pcp, memcg_stock);
+static atomic_t memcg_drain_count;
+
+/*
+ * Try to consume stocked charge on this cpu. If success, PAGE_SIZE is consumed
+ * from local stock and true is returned. If the stock is 0 or charges from a
+ * cgroup which is not current target, returns false. This stock will be
+ * refilled.
+ */
+static bool consume_stock(struct mem_cgroup *mem)
+{
+ struct memcg_stock_pcp *stock;
+ bool ret = true;
+
+ stock = &get_cpu_var(memcg_stock);
+ if (mem == stock->cached && stock->charge)
+ stock->charge -= PAGE_SIZE;
+ else /* need to call res_counter_charge */
+ ret = false;
+ put_cpu_var(memcg_stock);
+ return ret;
+}
+
+/*
+ * Returns stocks cached in percpu to res_counter and reset cached information.
+ */
+static void drain_stock(struct memcg_stock_pcp *stock)
+{
+ struct mem_cgroup *old = stock->cached;
+
+ if (stock->charge) {
+ res_counter_uncharge(&old->res, stock->charge);
+ if (do_swap_account)
+ res_counter_uncharge(&old->memsw, stock->charge);
+ }
+ stock->cached = NULL;
+ stock->charge = 0;
+}
+
+/*
+ * This must be called under preempt disabled or must be called by
+ * a thread which is pinned to local cpu.
+ */
+static void drain_local_stock(struct work_struct *dummy)
+{
+ struct memcg_stock_pcp *stock = &__get_cpu_var(memcg_stock);
+ drain_stock(stock);
+}
+
+/*
+ * Cache charges(val) which is from res_counter, to local per_cpu area.
+ * This will be consumed by consumt_stock() function, later.
+ */
+static void refill_stock(struct mem_cgroup *mem, int val)
+{
+ struct memcg_stock_pcp *stock = &get_cpu_var(memcg_stock);
+
+ if (stock->cached != mem) { /* reset if necessary */
+ drain_stock(stock);
+ stock->cached = mem;
+ }
+ stock->charge += val;
+ put_cpu_var(memcg_stock);
+}
+
+/*
+ * Tries to drain stocked charges in other cpus. This function is asynchronous
+ * and just put a work per cpu for draining localy on each cpu. Caller can
+ * expects some charges will be back to res_counter later but cannot wait for
+ * it.
+ */
+static void drain_all_stock_async(void)
+{
+ int cpu;
+ /* This function is for scheduling "drain" in asynchronous way.
+ * The result of "drain" is not directly handled by callers. Then,
+ * if someone is calling drain, we don't have to call drain more.
+ * Anyway, WORK_STRUCT_PENDING check in queue_work_on() will catch if
+ * there is a race. We just do loose check here.
+ */
+ if (atomic_read(&memcg_drain_count))
+ return;
+ /* Notify other cpus that system-wide "drain" is running */
+ atomic_inc(&memcg_drain_count);
+ get_online_cpus();
+ for_each_online_cpu(cpu) {
+ struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu);
+ schedule_work_on(cpu, &stock->work);
+ }
+ put_online_cpus();
+ atomic_dec(&memcg_drain_count);
+ /* We don't wait for flush_work */
+}
+
+/* This is a synchronous drain interface. */
+static void drain_all_stock_sync(void)
+{
+ /* called when force_empty is called */
+ atomic_inc(&memcg_drain_count);
+ schedule_on_each_cpu(drain_local_stock);
+ atomic_dec(&memcg_drain_count);
+}
+
+static int __cpuinit memcg_stock_cpu_callback(struct notifier_block *nb,
+ unsigned long action,
+ void *hcpu)
+{
+ int cpu = (unsigned long)hcpu;
+ struct memcg_stock_pcp *stock;
+
+ if (action != CPU_DEAD)
+ return NOTIFY_OK;
+ stock = &per_cpu(memcg_stock, cpu);
+ drain_stock(stock);
+ return NOTIFY_OK;
+}
+
+/*
* Unlike exported interface, "oom" parameter is added. if oom==true,
* oom-killer can be invoked.
*/
static int __mem_cgroup_try_charge(struct mm_struct *mm,
gfp_t gfp_mask, struct mem_cgroup **memcg,
- bool oom)
+ bool oom, struct page *page)
{
struct mem_cgroup *mem, *mem_over_limit;
int nr_retries = MEM_CGROUP_RECLAIM_RETRIES;
struct res_counter *fail_res;
+ int csize = CHARGE_SIZE;
if (unlikely(test_thread_flag(TIF_MEMDIE))) {
/* Don't account this! */
@@ -995,22 +1434,26 @@ static int __mem_cgroup_try_charge(struct mm_struct *mm,
return 0;
VM_BUG_ON(css_is_removed(&mem->css));
+ if (mem_cgroup_is_root(mem))
+ goto done;
while (1) {
- int ret;
- bool noswap = false;
+ int ret = 0;
+ unsigned long flags = 0;
- ret = res_counter_charge(&mem->res, PAGE_SIZE, &fail_res);
+ if (consume_stock(mem))
+ goto charged;
+
+ ret = res_counter_charge(&mem->res, csize, &fail_res);
if (likely(!ret)) {
if (!do_swap_account)
break;
- ret = res_counter_charge(&mem->memsw, PAGE_SIZE,
- &fail_res);
+ ret = res_counter_charge(&mem->memsw, csize, &fail_res);
if (likely(!ret))
break;
/* mem+swap counter fails */
- res_counter_uncharge(&mem->res, PAGE_SIZE);
- noswap = true;
+ res_counter_uncharge(&mem->res, csize);
+ flags |= MEM_CGROUP_RECLAIM_NOSWAP;
mem_over_limit = mem_cgroup_from_res_counter(fail_res,
memsw);
} else
@@ -1018,11 +1461,16 @@ static int __mem_cgroup_try_charge(struct mm_struct *mm,
mem_over_limit = mem_cgroup_from_res_counter(fail_res,
res);
+ /* reduce request size and retry */
+ if (csize > PAGE_SIZE) {
+ csize = PAGE_SIZE;
+ continue;
+ }
if (!(gfp_mask & __GFP_WAIT))
goto nomem;
- ret = mem_cgroup_hierarchical_reclaim(mem_over_limit, gfp_mask,
- noswap, false);
+ ret = mem_cgroup_hierarchical_reclaim(mem_over_limit, NULL,
+ gfp_mask, flags);
if (ret)
continue;
@@ -1039,20 +1487,42 @@ static int __mem_cgroup_try_charge(struct mm_struct *mm,
if (!nr_retries--) {
if (oom) {
- mutex_lock(&memcg_tasklist);
mem_cgroup_out_of_memory(mem_over_limit, gfp_mask);
- mutex_unlock(&memcg_tasklist);
record_last_oom(mem_over_limit);
}
goto nomem;
}
}
+ if (csize > PAGE_SIZE)
+ refill_stock(mem, csize - PAGE_SIZE);
+charged:
+ /*
+ * Insert ancestor (and ancestor's ancestors), to softlimit RB-tree.
+ * if they exceeds softlimit.
+ */
+ if (mem_cgroup_soft_limit_check(mem))
+ mem_cgroup_update_tree(mem, page);
+done:
return 0;
nomem:
css_put(&mem->css);
return -ENOMEM;
}
+/*
+ * Somemtimes we have to undo a charge we got by try_charge().
+ * This function is for that and do uncharge, put css's refcnt.
+ * gotten by try_charge().
+ */
+static void mem_cgroup_cancel_charge(struct mem_cgroup *mem)
+{
+ if (!mem_cgroup_is_root(mem)) {
+ res_counter_uncharge(&mem->res, PAGE_SIZE);
+ if (do_swap_account)
+ res_counter_uncharge(&mem->memsw, PAGE_SIZE);
+ }
+ css_put(&mem->css);
+}
/*
* A helper function to get mem_cgroup from ID. must be called under
@@ -1073,25 +1543,22 @@ static struct mem_cgroup *mem_cgroup_lookup(unsigned short id)
return container_of(css, struct mem_cgroup, css);
}
-static struct mem_cgroup *try_get_mem_cgroup_from_swapcache(struct page *page)
+struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page)
{
- struct mem_cgroup *mem;
+ struct mem_cgroup *mem = NULL;
struct page_cgroup *pc;
unsigned short id;
swp_entry_t ent;
VM_BUG_ON(!PageLocked(page));
- if (!PageSwapCache(page))
- return NULL;
-
pc = lookup_page_cgroup(page);
lock_page_cgroup(pc);
if (PageCgroupUsed(pc)) {
mem = pc->mem_cgroup;
if (mem && !css_tryget(&mem->css))
mem = NULL;
- } else {
+ } else if (PageSwapCache(page)) {
ent.val = page_private(page);
id = lookup_swap_cgroup(ent);
rcu_read_lock();
@@ -1120,15 +1587,32 @@ static void __mem_cgroup_commit_charge(struct mem_cgroup *mem,
lock_page_cgroup(pc);
if (unlikely(PageCgroupUsed(pc))) {
unlock_page_cgroup(pc);
- res_counter_uncharge(&mem->res, PAGE_SIZE);
- if (do_swap_account)
- res_counter_uncharge(&mem->memsw, PAGE_SIZE);
- css_put(&mem->css);
+ mem_cgroup_cancel_charge(mem);
return;
}
+
pc->mem_cgroup = mem;
+ /*
+ * We access a page_cgroup asynchronously without lock_page_cgroup().
+ * Especially when a page_cgroup is taken from a page, pc->mem_cgroup
+ * is accessed after testing USED bit. To make pc->mem_cgroup visible
+ * before USED bit, we need memory barrier here.
+ * See mem_cgroup_add_lru_list(), etc.
+ */
smp_wmb();
- pc->flags = pcg_default_flags[ctype];
+ switch (ctype) {
+ case MEM_CGROUP_CHARGE_TYPE_CACHE:
+ case MEM_CGROUP_CHARGE_TYPE_SHMEM:
+ SetPageCgroupCache(pc);
+ SetPageCgroupUsed(pc);
+ break;
+ case MEM_CGROUP_CHARGE_TYPE_MAPPED:
+ ClearPageCgroupCache(pc);
+ SetPageCgroupUsed(pc);
+ break;
+ default:
+ break;
+ }
mem_cgroup_charge_statistics(mem, pc, true);
@@ -1136,27 +1620,22 @@ static void __mem_cgroup_commit_charge(struct mem_cgroup *mem,
}
/**
- * mem_cgroup_move_account - move account of the page
+ * __mem_cgroup_move_account - move account of the page
* @pc: page_cgroup of the page.
* @from: mem_cgroup which the page is moved from.
* @to: mem_cgroup which the page is moved to. @from != @to.
*
* The caller must confirm following.
* - page is not on LRU (isolate_page() is useful.)
- *
- * returns 0 at success,
- * returns -EBUSY when lock is busy or "pc" is unstable.
+ * - the pc is locked, used, and ->mem_cgroup points to @from.
*
* This function does "uncharge" from old cgroup but doesn't do "charge" to
* new cgroup. It should be done by a caller.
*/
-static int mem_cgroup_move_account(struct page_cgroup *pc,
+static void __mem_cgroup_move_account(struct page_cgroup *pc,
struct mem_cgroup *from, struct mem_cgroup *to)
{
- struct mem_cgroup_per_zone *from_mz, *to_mz;
- int nid, zid;
- int ret = -EBUSY;
struct page *page;
int cpu;
struct mem_cgroup_stat *stat;
@@ -1164,56 +1643,59 @@ static int mem_cgroup_move_account(struct page_cgroup *pc,
VM_BUG_ON(from == to);
VM_BUG_ON(PageLRU(pc->page));
+ VM_BUG_ON(!PageCgroupLocked(pc));
+ VM_BUG_ON(!PageCgroupUsed(pc));
+ VM_BUG_ON(pc->mem_cgroup != from);
- nid = page_cgroup_nid(pc);
- zid = page_cgroup_zid(pc);
- from_mz = mem_cgroup_zoneinfo(from, nid, zid);
- to_mz = mem_cgroup_zoneinfo(to, nid, zid);
-
- if (!trylock_page_cgroup(pc))
- return ret;
-
- if (!PageCgroupUsed(pc))
- goto out;
-
- if (pc->mem_cgroup != from)
- goto out;
-
- res_counter_uncharge(&from->res, PAGE_SIZE);
+ if (!mem_cgroup_is_root(from))
+ res_counter_uncharge(&from->res, PAGE_SIZE);
mem_cgroup_charge_statistics(from, pc, false);
page = pc->page;
- if (page_is_file_cache(page) && page_mapped(page)) {
+ if (page_mapped(page) && !PageAnon(page)) {
cpu = smp_processor_id();
/* Update mapped_file data for mem_cgroup "from" */
stat = &from->stat;
cpustat = &stat->cpustat[cpu];
- __mem_cgroup_stat_add_safe(cpustat, MEM_CGROUP_STAT_MAPPED_FILE,
+ __mem_cgroup_stat_add_safe(cpustat, MEM_CGROUP_STAT_FILE_MAPPED,
-1);
/* Update mapped_file data for mem_cgroup "to" */
stat = &to->stat;
cpustat = &stat->cpustat[cpu];
- __mem_cgroup_stat_add_safe(cpustat, MEM_CGROUP_STAT_MAPPED_FILE,
+ __mem_cgroup_stat_add_safe(cpustat, MEM_CGROUP_STAT_FILE_MAPPED,
1);
}
- if (do_swap_account)
+ if (do_swap_account && !mem_cgroup_is_root(from))
res_counter_uncharge(&from->memsw, PAGE_SIZE);
css_put(&from->css);
css_get(&to->css);
pc->mem_cgroup = to;
mem_cgroup_charge_statistics(to, pc, true);
- ret = 0;
-out:
- unlock_page_cgroup(pc);
/*
* We charges against "to" which may not have any tasks. Then, "to"
* can be under rmdir(). But in current implementation, caller of
* this function is just force_empty() and it's garanteed that
* "to" is never removed. So, we don't check rmdir status here.
*/
+}
+
+/*
+ * check whether the @pc is valid for moving account and call
+ * __mem_cgroup_move_account()
+ */
+static int mem_cgroup_move_account(struct page_cgroup *pc,
+ struct mem_cgroup *from, struct mem_cgroup *to)
+{
+ int ret = -EINVAL;
+ lock_page_cgroup(pc);
+ if (PageCgroupUsed(pc) && pc->mem_cgroup == from) {
+ __mem_cgroup_move_account(pc, from, to);
+ ret = 0;
+ }
+ unlock_page_cgroup(pc);
return ret;
}
@@ -1235,43 +1717,27 @@ static int mem_cgroup_move_parent(struct page_cgroup *pc,
if (!pcg)
return -EINVAL;
+ ret = -EBUSY;
+ if (!get_page_unless_zero(page))
+ goto out;
+ if (isolate_lru_page(page))
+ goto put;
parent = mem_cgroup_from_cont(pcg);
-
-
- ret = __mem_cgroup_try_charge(NULL, gfp_mask, &parent, false);
+ ret = __mem_cgroup_try_charge(NULL, gfp_mask, &parent, false, page);
if (ret || !parent)
- return ret;
-
- if (!get_page_unless_zero(page)) {
- ret = -EBUSY;
- goto uncharge;
- }
-
- ret = isolate_lru_page(page);
-
- if (ret)
- goto cancel;
+ goto put_back;
ret = mem_cgroup_move_account(pc, child, parent);
-
+ if (!ret)
+ css_put(&parent->css); /* drop extra refcnt by try_charge() */
+ else
+ mem_cgroup_cancel_charge(parent); /* does css_put */
+put_back:
putback_lru_page(page);
- if (!ret) {
- put_page(page);
- /* drop extra refcnt by try_charge() */
- css_put(&parent->css);
- return 0;
- }
-
-cancel:
+put:
put_page(page);
-uncharge:
- /* drop extra refcnt by try_charge() */
- css_put(&parent->css);
- /* uncharge if move fails */
- res_counter_uncharge(&parent->res, PAGE_SIZE);
- if (do_swap_account)
- res_counter_uncharge(&parent->memsw, PAGE_SIZE);
+out:
return ret;
}
@@ -1296,7 +1762,7 @@ static int mem_cgroup_charge_common(struct page *page, struct mm_struct *mm,
prefetchw(pc);
mem = memcg;
- ret = __mem_cgroup_try_charge(mm, gfp_mask, &mem, true);
+ ret = __mem_cgroup_try_charge(mm, gfp_mask, &mem, true, page);
if (ret || !mem)
return ret;
@@ -1389,7 +1855,7 @@ int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm,
/*
* While swap-in, try_charge -> commit or cancel, the page is locked.
* And when try_charge() successfully returns, one refcnt to memcg without
- * struct page_cgroup is aquired. This refcnt will be cumsumed by
+ * struct page_cgroup is acquired. This refcnt will be consumed by
* "commit()" or removed by "cancel()"
*/
int mem_cgroup_try_charge_swapin(struct mm_struct *mm,
@@ -1406,23 +1872,24 @@ int mem_cgroup_try_charge_swapin(struct mm_struct *mm,
goto charge_cur_mm;
/*
* A racing thread's fault, or swapoff, may have already updated
- * the pte, and even removed page from swap cache: return success
- * to go on to do_swap_page()'s pte_same() test, which should fail.
+ * the pte, and even removed page from swap cache: in those cases
+ * do_swap_page()'s pte_same() test will fail; but there's also a
+ * KSM case which does need to charge the page.
*/
if (!PageSwapCache(page))
- return 0;
- mem = try_get_mem_cgroup_from_swapcache(page);
+ goto charge_cur_mm;
+ mem = try_get_mem_cgroup_from_page(page);
if (!mem)
goto charge_cur_mm;
*ptr = mem;
- ret = __mem_cgroup_try_charge(NULL, mask, ptr, true);
+ ret = __mem_cgroup_try_charge(NULL, mask, ptr, true, page);
/* drop extra refcnt from tryget */
css_put(&mem->css);
return ret;
charge_cur_mm:
if (unlikely(!mm))
mm = &init_mm;
- return __mem_cgroup_try_charge(mm, mask, ptr, true);
+ return __mem_cgroup_try_charge(mm, mask, ptr, true, page);
}
static void
@@ -1460,7 +1927,9 @@ __mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *ptr,
* This recorded memcg can be obsolete one. So, avoid
* calling css_tryget
*/
- res_counter_uncharge(&memcg->memsw, PAGE_SIZE);
+ if (!mem_cgroup_is_root(memcg))
+ res_counter_uncharge(&memcg->memsw, PAGE_SIZE);
+ mem_cgroup_swap_statistics(memcg, false);
mem_cgroup_put(memcg);
}
rcu_read_unlock();
@@ -1485,13 +1954,54 @@ void mem_cgroup_cancel_charge_swapin(struct mem_cgroup *mem)
return;
if (!mem)
return;
+ mem_cgroup_cancel_charge(mem);
+}
+
+static void
+__do_uncharge(struct mem_cgroup *mem, const enum charge_type ctype)
+{
+ struct memcg_batch_info *batch = NULL;
+ bool uncharge_memsw = true;
+ /* If swapout, usage of swap doesn't decrease */
+ if (!do_swap_account || ctype == MEM_CGROUP_CHARGE_TYPE_SWAPOUT)
+ uncharge_memsw = false;
+ /*
+ * do_batch > 0 when unmapping pages or inode invalidate/truncate.
+ * In those cases, all pages freed continously can be expected to be in
+ * the same cgroup and we have chance to coalesce uncharges.
+ * But we do uncharge one by one if this is killed by OOM(TIF_MEMDIE)
+ * because we want to do uncharge as soon as possible.
+ */
+ if (!current->memcg_batch.do_batch || test_thread_flag(TIF_MEMDIE))
+ goto direct_uncharge;
+
+ batch = &current->memcg_batch;
+ /*
+ * In usual, we do css_get() when we remember memcg pointer.
+ * But in this case, we keep res->usage until end of a series of
+ * uncharges. Then, it's ok to ignore memcg's refcnt.
+ */
+ if (!batch->memcg)
+ batch->memcg = mem;
+ /*
+ * In typical case, batch->memcg == mem. This means we can
+ * merge a series of uncharges to an uncharge of res_counter.
+ * If not, we uncharge res_counter ony by one.
+ */
+ if (batch->memcg != mem)
+ goto direct_uncharge;
+ /* remember freed charge and uncharge it later */
+ batch->bytes += PAGE_SIZE;
+ if (uncharge_memsw)
+ batch->memsw_bytes += PAGE_SIZE;
+ return;
+direct_uncharge:
res_counter_uncharge(&mem->res, PAGE_SIZE);
- if (do_swap_account)
+ if (uncharge_memsw)
res_counter_uncharge(&mem->memsw, PAGE_SIZE);
- css_put(&mem->css);
+ return;
}
-
/*
* uncharge if !page_mapped(page)
*/
@@ -1539,9 +2049,10 @@ __mem_cgroup_uncharge_common(struct page *page, enum charge_type ctype)
break;
}
- res_counter_uncharge(&mem->res, PAGE_SIZE);
- if (do_swap_account && (ctype != MEM_CGROUP_CHARGE_TYPE_SWAPOUT))
- res_counter_uncharge(&mem->memsw, PAGE_SIZE);
+ if (!mem_cgroup_is_root(mem))
+ __do_uncharge(mem, ctype);
+ if (ctype == MEM_CGROUP_CHARGE_TYPE_SWAPOUT)
+ mem_cgroup_swap_statistics(mem, true);
mem_cgroup_charge_statistics(mem, pc, false);
ClearPageCgroupUsed(pc);
@@ -1555,6 +2066,8 @@ __mem_cgroup_uncharge_common(struct page *page, enum charge_type ctype)
mz = page_cgroup_zoneinfo(pc);
unlock_page_cgroup(pc);
+ if (mem_cgroup_soft_limit_check(mem))
+ mem_cgroup_update_tree(mem, page);
/* at swapout, this memcg will be accessed to record to swap */
if (ctype != MEM_CGROUP_CHARGE_TYPE_SWAPOUT)
css_put(&mem->css);
@@ -1583,6 +2096,50 @@ void mem_cgroup_uncharge_cache_page(struct page *page)
__mem_cgroup_uncharge_common(page, MEM_CGROUP_CHARGE_TYPE_CACHE);
}
+/*
+ * Batch_start/batch_end is called in unmap_page_range/invlidate/trucate.
+ * In that cases, pages are freed continuously and we can expect pages
+ * are in the same memcg. All these calls itself limits the number of
+ * pages freed at once, then uncharge_start/end() is called properly.
+ * This may be called prural(2) times in a context,
+ */
+
+void mem_cgroup_uncharge_start(void)
+{
+ current->memcg_batch.do_batch++;
+ /* We can do nest. */
+ if (current->memcg_batch.do_batch == 1) {
+ current->memcg_batch.memcg = NULL;
+ current->memcg_batch.bytes = 0;
+ current->memcg_batch.memsw_bytes = 0;
+ }
+}
+
+void mem_cgroup_uncharge_end(void)
+{
+ struct memcg_batch_info *batch = &current->memcg_batch;
+
+ if (!batch->do_batch)
+ return;
+
+ batch->do_batch--;
+ if (batch->do_batch) /* If stacked, do nothing. */
+ return;
+
+ if (!batch->memcg)
+ return;
+ /*
+ * This "batch->memcg" is valid without any css_get/put etc...
+ * bacause we hide charges behind us.
+ */
+ if (batch->bytes)
+ res_counter_uncharge(&batch->memcg->res, batch->bytes);
+ if (batch->memsw_bytes)
+ res_counter_uncharge(&batch->memcg->memsw, batch->memsw_bytes);
+ /* forget this pointer (for sanity check) */
+ batch->memcg = NULL;
+}
+
#ifdef CONFIG_SWAP
/*
* called after __delete_from_swap_cache() and drop "page" account.
@@ -1630,7 +2187,9 @@ void mem_cgroup_uncharge_swap(swp_entry_t ent)
* We uncharge this because swap is freed.
* This memcg can be obsolete one. We avoid calling css_tryget
*/
- res_counter_uncharge(&memcg->memsw, PAGE_SIZE);
+ if (!mem_cgroup_is_root(memcg))
+ res_counter_uncharge(&memcg->memsw, PAGE_SIZE);
+ mem_cgroup_swap_statistics(memcg, false);
mem_cgroup_put(memcg);
}
rcu_read_unlock();
@@ -1659,7 +2218,8 @@ int mem_cgroup_prepare_migration(struct page *page, struct mem_cgroup **ptr)
unlock_page_cgroup(pc);
if (mem) {
- ret = __mem_cgroup_try_charge(NULL, GFP_KERNEL, &mem, false);
+ ret = __mem_cgroup_try_charge(NULL, GFP_KERNEL, &mem, false,
+ page);
css_put(&mem->css);
}
*ptr = mem;
@@ -1755,7 +2315,6 @@ static int mem_cgroup_resize_limit(struct mem_cgroup *memcg,
unsigned long long val)
{
int retry_count;
- int progress;
u64 memswlimit;
int ret = 0;
int children = mem_cgroup_count_children(memcg);
@@ -1799,8 +2358,8 @@ static int mem_cgroup_resize_limit(struct mem_cgroup *memcg,
if (!ret)
break;
- progress = mem_cgroup_hierarchical_reclaim(memcg, GFP_KERNEL,
- false, true);
+ mem_cgroup_hierarchical_reclaim(memcg, NULL, GFP_KERNEL,
+ MEM_CGROUP_RECLAIM_SHRINK);
curusage = res_counter_read_u64(&memcg->res, RES_USAGE);
/* Usage is reduced ? */
if (curusage >= oldusage)
@@ -1852,7 +2411,9 @@ static int mem_cgroup_resize_memsw_limit(struct mem_cgroup *memcg,
if (!ret)
break;
- mem_cgroup_hierarchical_reclaim(memcg, GFP_KERNEL, true, true);
+ mem_cgroup_hierarchical_reclaim(memcg, NULL, GFP_KERNEL,
+ MEM_CGROUP_RECLAIM_NOSWAP |
+ MEM_CGROUP_RECLAIM_SHRINK);
curusage = res_counter_read_u64(&memcg->memsw, RES_USAGE);
/* Usage is reduced ? */
if (curusage >= oldusage)
@@ -1863,6 +2424,97 @@ static int mem_cgroup_resize_memsw_limit(struct mem_cgroup *memcg,
return ret;
}
+unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order,
+ gfp_t gfp_mask, int nid,
+ int zid)
+{
+ unsigned long nr_reclaimed = 0;
+ struct mem_cgroup_per_zone *mz, *next_mz = NULL;
+ unsigned long reclaimed;
+ int loop = 0;
+ struct mem_cgroup_tree_per_zone *mctz;
+ unsigned long long excess;
+
+ if (order > 0)
+ return 0;
+
+ mctz = soft_limit_tree_node_zone(nid, zid);
+ /*
+ * This loop can run a while, specially if mem_cgroup's continuously
+ * keep exceeding their soft limit and putting the system under
+ * pressure
+ */
+ do {
+ if (next_mz)
+ mz = next_mz;
+ else
+ mz = mem_cgroup_largest_soft_limit_node(mctz);
+ if (!mz)
+ break;
+
+ reclaimed = mem_cgroup_hierarchical_reclaim(mz->mem, zone,
+ gfp_mask,
+ MEM_CGROUP_RECLAIM_SOFT);
+ nr_reclaimed += reclaimed;
+ spin_lock(&mctz->lock);
+
+ /*
+ * If we failed to reclaim anything from this memory cgroup
+ * it is time to move on to the next cgroup
+ */
+ next_mz = NULL;
+ if (!reclaimed) {
+ do {
+ /*
+ * Loop until we find yet another one.
+ *
+ * By the time we get the soft_limit lock
+ * again, someone might have aded the
+ * group back on the RB tree. Iterate to
+ * make sure we get a different mem.
+ * mem_cgroup_largest_soft_limit_node returns
+ * NULL if no other cgroup is present on
+ * the tree
+ */
+ next_mz =
+ __mem_cgroup_largest_soft_limit_node(mctz);
+ if (next_mz == mz) {
+ css_put(&next_mz->mem->css);
+ next_mz = NULL;
+ } else /* next_mz == NULL or other memcg */
+ break;
+ } while (1);
+ }
+ __mem_cgroup_remove_exceeded(mz->mem, mz, mctz);
+ excess = res_counter_soft_limit_excess(&mz->mem->res);
+ /*
+ * One school of thought says that we should not add
+ * back the node to the tree if reclaim returns 0.
+ * But our reclaim could return 0, simply because due
+ * to priority we are exposing a smaller subset of
+ * memory to reclaim from. Consider this as a longer
+ * term TODO.
+ */
+ /* If excess == 0, no tree ops */
+ __mem_cgroup_insert_exceeded(mz->mem, mz, mctz, excess);
+ spin_unlock(&mctz->lock);
+ css_put(&mz->mem->css);
+ loop++;
+ /*
+ * Could not reclaim anything and there are no more
+ * mem cgroups to try or we seem to be looping without
+ * reclaiming anything.
+ */
+ if (!nr_reclaimed &&
+ (next_mz == NULL ||
+ loop > MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS))
+ break;
+ } while (!nr_reclaimed);
+ if (next_mz)
+ css_put(&next_mz->mem->css);
+ return nr_reclaimed;
+}
+
/*
* This routine traverse page_cgroup in given list and drop them all.
* *And* this routine doesn't reclaim page itself, just removes page_cgroup.
@@ -1936,7 +2588,7 @@ static int mem_cgroup_force_empty(struct mem_cgroup *mem, bool free_all)
if (free_all)
goto try_to_free;
move_account:
- while (mem->res.usage > 0) {
+ do {
ret = -EBUSY;
if (cgroup_task_count(cgrp) || !list_empty(&cgrp->children))
goto out;
@@ -1945,6 +2597,7 @@ move_account:
goto out;
/* This is for making all *used* pages to be on LRU. */
lru_add_drain_all();
+ drain_all_stock_sync();
ret = 0;
for_each_node_state(node, N_HIGH_MEMORY) {
for (zid = 0; !ret && zid < MAX_NR_ZONES; zid++) {
@@ -1963,8 +2616,8 @@ move_account:
if (ret == -ENOMEM)
goto try_to_free;
cond_resched();
- }
- ret = 0;
+ /* "ret" should also be checked to ensure all lists are empty. */
+ } while (mem->res.usage > 0 || ret);
out:
css_put(&mem->css);
return ret;
@@ -1997,10 +2650,7 @@ try_to_free:
}
lru_add_drain();
/* try move_account...there may be some *locked* pages. */
- if (mem->res.usage)
- goto move_account;
- ret = 0;
- goto out;
+ goto move_account;
}
int mem_cgroup_force_empty_write(struct cgroup *cont, unsigned int event)
@@ -2027,7 +2677,7 @@ static int mem_cgroup_hierarchy_write(struct cgroup *cont, struct cftype *cft,
cgroup_lock();
/*
- * If parent's use_hiearchy is set, we can't make any modifications
+ * If parent's use_hierarchy is set, we can't make any modifications
* in the child subtrees. If it is unset, then the change can
* occur, provided the current cgroup has no children.
*
@@ -2047,20 +2697,65 @@ static int mem_cgroup_hierarchy_write(struct cgroup *cont, struct cftype *cft,
return retval;
}
+struct mem_cgroup_idx_data {
+ s64 val;
+ enum mem_cgroup_stat_index idx;
+};
+
+static int
+mem_cgroup_get_idx_stat(struct mem_cgroup *mem, void *data)
+{
+ struct mem_cgroup_idx_data *d = data;
+ d->val += mem_cgroup_read_stat(&mem->stat, d->idx);
+ return 0;
+}
+
+static void
+mem_cgroup_get_recursive_idx_stat(struct mem_cgroup *mem,
+ enum mem_cgroup_stat_index idx, s64 *val)
+{
+ struct mem_cgroup_idx_data d;
+ d.idx = idx;
+ d.val = 0;
+ mem_cgroup_walk_tree(mem, &d, mem_cgroup_get_idx_stat);
+ *val = d.val;
+}
+
static u64 mem_cgroup_read(struct cgroup *cont, struct cftype *cft)
{
struct mem_cgroup *mem = mem_cgroup_from_cont(cont);
- u64 val = 0;
+ u64 idx_val, val;
int type, name;
type = MEMFILE_TYPE(cft->private);
name = MEMFILE_ATTR(cft->private);
switch (type) {
case _MEM:
- val = res_counter_read_u64(&mem->res, name);
+ if (name == RES_USAGE && mem_cgroup_is_root(mem)) {
+ mem_cgroup_get_recursive_idx_stat(mem,
+ MEM_CGROUP_STAT_CACHE, &idx_val);
+ val = idx_val;
+ mem_cgroup_get_recursive_idx_stat(mem,
+ MEM_CGROUP_STAT_RSS, &idx_val);
+ val += idx_val;
+ val <<= PAGE_SHIFT;
+ } else
+ val = res_counter_read_u64(&mem->res, name);
break;
case _MEMSWAP:
- val = res_counter_read_u64(&mem->memsw, name);
+ if (name == RES_USAGE && mem_cgroup_is_root(mem)) {
+ mem_cgroup_get_recursive_idx_stat(mem,
+ MEM_CGROUP_STAT_CACHE, &idx_val);
+ val = idx_val;
+ mem_cgroup_get_recursive_idx_stat(mem,
+ MEM_CGROUP_STAT_RSS, &idx_val);
+ val += idx_val;
+ mem_cgroup_get_recursive_idx_stat(mem,
+ MEM_CGROUP_STAT_SWAPOUT, &idx_val);
+ val += idx_val;
+ val <<= PAGE_SHIFT;
+ } else
+ val = res_counter_read_u64(&mem->memsw, name);
break;
default:
BUG();
@@ -2084,6 +2779,10 @@ static int mem_cgroup_write(struct cgroup *cont, struct cftype *cft,
name = MEMFILE_ATTR(cft->private);
switch (name) {
case RES_LIMIT:
+ if (mem_cgroup_is_root(memcg)) { /* Can't set limit on root */
+ ret = -EINVAL;
+ break;
+ }
/* This function does all necessary parse...reuse it */
ret = res_counter_memparse_write_strategy(buffer, &val);
if (ret)
@@ -2093,6 +2792,20 @@ static int mem_cgroup_write(struct cgroup *cont, struct cftype *cft,
else
ret = mem_cgroup_resize_memsw_limit(memcg, val);
break;
+ case RES_SOFT_LIMIT:
+ ret = res_counter_memparse_write_strategy(buffer, &val);
+ if (ret)
+ break;
+ /*
+ * For memsw, soft limits are hard to implement in terms
+ * of semantics, for now, we support soft limits for
+ * control without swap
+ */
+ if (type == _MEM)
+ ret = res_counter_set_soft_limit(&memcg->res, val);
+ else
+ ret = -EINVAL;
+ break;
default:
ret = -EINVAL; /* should be BUG() ? */
break;
@@ -2150,6 +2863,7 @@ static int mem_cgroup_reset(struct cgroup *cont, unsigned int event)
res_counter_reset_failcnt(&mem->memsw);
break;
}
+
return 0;
}
@@ -2158,9 +2872,10 @@ static int mem_cgroup_reset(struct cgroup *cont, unsigned int event)
enum {
MCS_CACHE,
MCS_RSS,
- MCS_MAPPED_FILE,
+ MCS_FILE_MAPPED,
MCS_PGPGIN,
MCS_PGPGOUT,
+ MCS_SWAP,
MCS_INACTIVE_ANON,
MCS_ACTIVE_ANON,
MCS_INACTIVE_FILE,
@@ -2182,6 +2897,7 @@ struct {
{"mapped_file", "total_mapped_file"},
{"pgpgin", "total_pgpgin"},
{"pgpgout", "total_pgpgout"},
+ {"swap", "total_swap"},
{"inactive_anon", "total_inactive_anon"},
{"active_anon", "total_active_anon"},
{"inactive_file", "total_inactive_file"},
@@ -2200,12 +2916,16 @@ static int mem_cgroup_get_local_stat(struct mem_cgroup *mem, void *data)
s->stat[MCS_CACHE] += val * PAGE_SIZE;
val = mem_cgroup_read_stat(&mem->stat, MEM_CGROUP_STAT_RSS);
s->stat[MCS_RSS] += val * PAGE_SIZE;
- val = mem_cgroup_read_stat(&mem->stat, MEM_CGROUP_STAT_MAPPED_FILE);
- s->stat[MCS_MAPPED_FILE] += val * PAGE_SIZE;
+ val = mem_cgroup_read_stat(&mem->stat, MEM_CGROUP_STAT_FILE_MAPPED);
+ s->stat[MCS_FILE_MAPPED] += val * PAGE_SIZE;
val = mem_cgroup_read_stat(&mem->stat, MEM_CGROUP_STAT_PGPGIN_COUNT);
s->stat[MCS_PGPGIN] += val;
val = mem_cgroup_read_stat(&mem->stat, MEM_CGROUP_STAT_PGPGOUT_COUNT);
s->stat[MCS_PGPGOUT] += val;
+ if (do_swap_account) {
+ val = mem_cgroup_read_stat(&mem->stat, MEM_CGROUP_STAT_SWAPOUT);
+ s->stat[MCS_SWAP] += val * PAGE_SIZE;
+ }
/* per zone stat */
val = mem_cgroup_get_local_zonestat(mem, LRU_INACTIVE_ANON);
@@ -2237,8 +2957,11 @@ static int mem_control_stat_show(struct cgroup *cont, struct cftype *cft,
memset(&mystat, 0, sizeof(mystat));
mem_cgroup_get_local_stat(mem_cont, &mystat);
- for (i = 0; i < NR_MCS_STAT; i++)
+ for (i = 0; i < NR_MCS_STAT; i++) {
+ if (i == MCS_SWAP && !do_swap_account)
+ continue;
cb->fill(cb, memcg_stat_strings[i].local_name, mystat.stat[i]);
+ }
/* Hierarchical information */
{
@@ -2251,9 +2974,11 @@ static int mem_control_stat_show(struct cgroup *cont, struct cftype *cft,
memset(&mystat, 0, sizeof(mystat));
mem_cgroup_get_total_stat(mem_cont, &mystat);
- for (i = 0; i < NR_MCS_STAT; i++)
+ for (i = 0; i < NR_MCS_STAT; i++) {
+ if (i == MCS_SWAP && !do_swap_account)
+ continue;
cb->fill(cb, memcg_stat_strings[i].total_name, mystat.stat[i]);
-
+ }
#ifdef CONFIG_DEBUG_VM
cb->fill(cb, "inactive_ratio", calc_inactive_ratio(mem_cont, NULL));
@@ -2346,6 +3071,12 @@ static struct cftype mem_cgroup_files[] = {
.read_u64 = mem_cgroup_read,
},
{
+ .name = "soft_limit_in_bytes",
+ .private = MEMFILE_PRIVATE(_MEM, RES_SOFT_LIMIT),
+ .write_string = mem_cgroup_write,
+ .read_u64 = mem_cgroup_read,
+ },
+ {
.name = "failcnt",
.private = MEMFILE_PRIVATE(_MEM, RES_FAILCNT),
.trigger = mem_cgroup_reset,
@@ -2439,6 +3170,9 @@ static int alloc_mem_cgroup_per_zone_info(struct mem_cgroup *mem, int node)
mz = &pn->zoneinfo[zone];
for_each_lru(l)
INIT_LIST_HEAD(&mz->lists[l]);
+ mz->usage_in_excess = 0;
+ mz->on_tree = false;
+ mz->mem = mem;
}
return 0;
}
@@ -2484,6 +3218,7 @@ static void __mem_cgroup_free(struct mem_cgroup *mem)
{
int node;
+ mem_cgroup_remove_from_trees(mem);
free_css_id(&mem_cgroup_subsys, &mem->css);
for_each_node_state(node, N_POSSIBLE)
@@ -2532,6 +3267,31 @@ static void __init enable_swap_cgroup(void)
}
#endif
+static int mem_cgroup_soft_limit_tree_init(void)
+{
+ struct mem_cgroup_tree_per_node *rtpn;
+ struct mem_cgroup_tree_per_zone *rtpz;
+ int tmp, node, zone;
+
+ for_each_node_state(node, N_POSSIBLE) {
+ tmp = node;
+ if (!node_state(node, N_NORMAL_MEMORY))
+ tmp = -1;
+ rtpn = kzalloc_node(sizeof(*rtpn), GFP_KERNEL, tmp);
+ if (!rtpn)
+ return 1;
+
+ soft_limit_tree.rb_tree_per_node[node] = rtpn;
+
+ for (zone = 0; zone < MAX_NR_ZONES; zone++) {
+ rtpz = &rtpn->rb_tree_per_zone[zone];
+ rtpz->rb_root = RB_ROOT;
+ spin_lock_init(&rtpz->lock);
+ }
+ }
+ return 0;
+}
+
static struct cgroup_subsys_state * __ref
mem_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cont)
{
@@ -2546,10 +3306,22 @@ mem_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cont)
for_each_node_state(node, N_POSSIBLE)
if (alloc_mem_cgroup_per_zone_info(mem, node))
goto free_out;
+
/* root ? */
if (cont->parent == NULL) {
+ int cpu;
enable_swap_cgroup();
parent = NULL;
+ root_mem_cgroup = mem;
+ if (mem_cgroup_soft_limit_tree_init())
+ goto free_out;
+ for_each_possible_cpu(cpu) {
+ struct memcg_stock_pcp *stock =
+ &per_cpu(memcg_stock, cpu);
+ INIT_WORK(&stock->work, drain_local_stock);
+ }
+ hotcpu_notifier(memcg_stock_cpu_callback, 0);
+
} else {
parent = mem_cgroup_from_cont(cont->parent);
mem->use_hierarchy = parent->use_hierarchy;
@@ -2578,6 +3350,7 @@ mem_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cont)
return &mem->css;
free_out:
__mem_cgroup_free(mem);
+ root_mem_cgroup = NULL;
return ERR_PTR(error);
}
@@ -2613,14 +3386,13 @@ static int mem_cgroup_populate(struct cgroup_subsys *ss,
static void mem_cgroup_move_task(struct cgroup_subsys *ss,
struct cgroup *cont,
struct cgroup *old_cont,
- struct task_struct *p)
+ struct task_struct *p,
+ bool threadgroup)
{
- mutex_lock(&memcg_tasklist);
/*
* FIXME: It's better to move charges of this process from old
* memcg to new memcg. But it's just on TODO-List now.
*/
- mutex_unlock(&memcg_tasklist);
}
struct cgroup_subsys mem_cgroup_subsys = {
diff --git a/mm/memory-failure.c b/mm/memory-failure.c
new file mode 100644
index 000000000000..17299fd4577c
--- /dev/null
+++ b/mm/memory-failure.c
@@ -0,0 +1,1294 @@
+/*
+ * Copyright (C) 2008, 2009 Intel Corporation
+ * Authors: Andi Kleen, Fengguang Wu
+ *
+ * This software may be redistributed and/or modified under the terms of
+ * the GNU General Public License ("GPL") version 2 only as published by the
+ * Free Software Foundation.
+ *
+ * High level machine check handler. Handles pages reported by the
+ * hardware as being corrupted usually due to a 2bit ECC memory or cache
+ * failure.
+ *
+ * Handles page cache pages in various states. The tricky part
+ * here is that we can access any page asynchronous to other VM
+ * users, because memory failures could happen anytime and anywhere,
+ * possibly violating some of their assumptions. This is why this code
+ * has to be extremely careful. Generally it tries to use normal locking
+ * rules, as in get the standard locks, even if that means the
+ * error handling takes potentially a long time.
+ *
+ * The operation to map back from RMAP chains to processes has to walk
+ * the complete process list and has non linear complexity with the number
+ * mappings. In short it can be quite slow. But since memory corruptions
+ * are rare we hope to get away with this.
+ */
+
+/*
+ * Notebook:
+ * - hugetlb needs more code
+ * - kcore/oldmem/vmcore/mem/kmem check for hwpoison pages
+ * - pass bad pages to kdump next kernel
+ */
+#define DEBUG 1 /* remove me in 2.6.34 */
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/page-flags.h>
+#include <linux/kernel-page-flags.h>
+#include <linux/sched.h>
+#include <linux/ksm.h>
+#include <linux/rmap.h>
+#include <linux/pagemap.h>
+#include <linux/swap.h>
+#include <linux/backing-dev.h>
+#include <linux/migrate.h>
+#include <linux/page-isolation.h>
+#include <linux/suspend.h>
+#include "internal.h"
+
+int sysctl_memory_failure_early_kill __read_mostly = 0;
+
+int sysctl_memory_failure_recovery __read_mostly = 1;
+
+atomic_long_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0);
+
+#if defined(CONFIG_HWPOISON_INJECT) || defined(CONFIG_HWPOISON_INJECT_MODULE)
+
+u32 hwpoison_filter_enable = 0;
+u32 hwpoison_filter_dev_major = ~0U;
+u32 hwpoison_filter_dev_minor = ~0U;
+u64 hwpoison_filter_flags_mask;
+u64 hwpoison_filter_flags_value;
+EXPORT_SYMBOL_GPL(hwpoison_filter_enable);
+EXPORT_SYMBOL_GPL(hwpoison_filter_dev_major);
+EXPORT_SYMBOL_GPL(hwpoison_filter_dev_minor);
+EXPORT_SYMBOL_GPL(hwpoison_filter_flags_mask);
+EXPORT_SYMBOL_GPL(hwpoison_filter_flags_value);
+
+static int hwpoison_filter_dev(struct page *p)
+{
+ struct address_space *mapping;
+ dev_t dev;
+
+ if (hwpoison_filter_dev_major == ~0U &&
+ hwpoison_filter_dev_minor == ~0U)
+ return 0;
+
+ /*
+ * page_mapping() does not accept slab page
+ */
+ if (PageSlab(p))
+ return -EINVAL;
+
+ mapping = page_mapping(p);
+ if (mapping == NULL || mapping->host == NULL)
+ return -EINVAL;
+
+ dev = mapping->host->i_sb->s_dev;
+ if (hwpoison_filter_dev_major != ~0U &&
+ hwpoison_filter_dev_major != MAJOR(dev))
+ return -EINVAL;
+ if (hwpoison_filter_dev_minor != ~0U &&
+ hwpoison_filter_dev_minor != MINOR(dev))
+ return -EINVAL;
+
+ return 0;
+}
+
+static int hwpoison_filter_flags(struct page *p)
+{
+ if (!hwpoison_filter_flags_mask)
+ return 0;
+
+ if ((stable_page_flags(p) & hwpoison_filter_flags_mask) ==
+ hwpoison_filter_flags_value)
+ return 0;
+ else
+ return -EINVAL;
+}
+
+/*
+ * This allows stress tests to limit test scope to a collection of tasks
+ * by putting them under some memcg. This prevents killing unrelated/important
+ * processes such as /sbin/init. Note that the target task may share clean
+ * pages with init (eg. libc text), which is harmless. If the target task
+ * share _dirty_ pages with another task B, the test scheme must make sure B
+ * is also included in the memcg. At last, due to race conditions this filter
+ * can only guarantee that the page either belongs to the memcg tasks, or is
+ * a freed page.
+ */
+#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
+u64 hwpoison_filter_memcg;
+EXPORT_SYMBOL_GPL(hwpoison_filter_memcg);
+static int hwpoison_filter_task(struct page *p)
+{
+ struct mem_cgroup *mem;
+ struct cgroup_subsys_state *css;
+ unsigned long ino;
+
+ if (!hwpoison_filter_memcg)
+ return 0;
+
+ mem = try_get_mem_cgroup_from_page(p);
+ if (!mem)
+ return -EINVAL;
+
+ css = mem_cgroup_css(mem);
+ /* root_mem_cgroup has NULL dentries */
+ if (!css->cgroup->dentry)
+ return -EINVAL;
+
+ ino = css->cgroup->dentry->d_inode->i_ino;
+ css_put(css);
+
+ if (ino != hwpoison_filter_memcg)
+ return -EINVAL;
+
+ return 0;
+}
+#else
+static int hwpoison_filter_task(struct page *p) { return 0; }
+#endif
+
+int hwpoison_filter(struct page *p)
+{
+ if (!hwpoison_filter_enable)
+ return 0;
+
+ if (hwpoison_filter_dev(p))
+ return -EINVAL;
+
+ if (hwpoison_filter_flags(p))
+ return -EINVAL;
+
+ if (hwpoison_filter_task(p))
+ return -EINVAL;
+
+ return 0;
+}
+#else
+int hwpoison_filter(struct page *p)
+{
+ return 0;
+}
+#endif
+
+EXPORT_SYMBOL_GPL(hwpoison_filter);
+
+/*
+ * Send all the processes who have the page mapped an ``action optional''
+ * signal.
+ */
+static int kill_proc_ao(struct task_struct *t, unsigned long addr, int trapno,
+ unsigned long pfn)
+{
+ struct siginfo si;
+ int ret;
+
+ printk(KERN_ERR
+ "MCE %#lx: Killing %s:%d early due to hardware memory corruption\n",
+ pfn, t->comm, t->pid);
+ si.si_signo = SIGBUS;
+ si.si_errno = 0;
+ si.si_code = BUS_MCEERR_AO;
+ si.si_addr = (void *)addr;
+#ifdef __ARCH_SI_TRAPNO
+ si.si_trapno = trapno;
+#endif
+ si.si_addr_lsb = PAGE_SHIFT;
+ /*
+ * Don't use force here, it's convenient if the signal
+ * can be temporarily blocked.
+ * This could cause a loop when the user sets SIGBUS
+ * to SIG_IGN, but hopefully noone will do that?
+ */
+ ret = send_sig_info(SIGBUS, &si, t); /* synchronous? */
+ if (ret < 0)
+ printk(KERN_INFO "MCE: Error sending signal to %s:%d: %d\n",
+ t->comm, t->pid, ret);
+ return ret;
+}
+
+/*
+ * When a unknown page type is encountered drain as many buffers as possible
+ * in the hope to turn the page into a LRU or free page, which we can handle.
+ */
+void shake_page(struct page *p, int access)
+{
+ if (!PageSlab(p)) {
+ lru_add_drain_all();
+ if (PageLRU(p))
+ return;
+ drain_all_pages();
+ if (PageLRU(p) || is_free_buddy_page(p))
+ return;
+ }
+
+ /*
+ * Only all shrink_slab here (which would also
+ * shrink other caches) if access is not potentially fatal.
+ */
+ if (access) {
+ int nr;
+ do {
+ nr = shrink_slab(1000, GFP_KERNEL, 1000);
+ if (page_count(p) == 0)
+ break;
+ } while (nr > 10);
+ }
+}
+EXPORT_SYMBOL_GPL(shake_page);
+
+/*
+ * Kill all processes that have a poisoned page mapped and then isolate
+ * the page.
+ *
+ * General strategy:
+ * Find all processes having the page mapped and kill them.
+ * But we keep a page reference around so that the page is not
+ * actually freed yet.
+ * Then stash the page away
+ *
+ * There's no convenient way to get back to mapped processes
+ * from the VMAs. So do a brute-force search over all
+ * running processes.
+ *
+ * Remember that machine checks are not common (or rather
+ * if they are common you have other problems), so this shouldn't
+ * be a performance issue.
+ *
+ * Also there are some races possible while we get from the
+ * error detection to actually handle it.
+ */
+
+struct to_kill {
+ struct list_head nd;
+ struct task_struct *tsk;
+ unsigned long addr;
+ unsigned addr_valid:1;
+};
+
+/*
+ * Failure handling: if we can't find or can't kill a process there's
+ * not much we can do. We just print a message and ignore otherwise.
+ */
+
+/*
+ * Schedule a process for later kill.
+ * Uses GFP_ATOMIC allocations to avoid potential recursions in the VM.
+ * TBD would GFP_NOIO be enough?
+ */
+static void add_to_kill(struct task_struct *tsk, struct page *p,
+ struct vm_area_struct *vma,
+ struct list_head *to_kill,
+ struct to_kill **tkc)
+{
+ struct to_kill *tk;
+
+ if (*tkc) {
+ tk = *tkc;
+ *tkc = NULL;
+ } else {
+ tk = kmalloc(sizeof(struct to_kill), GFP_ATOMIC);
+ if (!tk) {
+ printk(KERN_ERR
+ "MCE: Out of memory while machine check handling\n");
+ return;
+ }
+ }
+ tk->addr = page_address_in_vma(p, vma);
+ tk->addr_valid = 1;
+
+ /*
+ * In theory we don't have to kill when the page was
+ * munmaped. But it could be also a mremap. Since that's
+ * likely very rare kill anyways just out of paranoia, but use
+ * a SIGKILL because the error is not contained anymore.
+ */
+ if (tk->addr == -EFAULT) {
+ pr_debug("MCE: Unable to find user space address %lx in %s\n",
+ page_to_pfn(p), tsk->comm);
+ tk->addr_valid = 0;
+ }
+ get_task_struct(tsk);
+ tk->tsk = tsk;
+ list_add_tail(&tk->nd, to_kill);
+}
+
+/*
+ * Kill the processes that have been collected earlier.
+ *
+ * Only do anything when DOIT is set, otherwise just free the list
+ * (this is used for clean pages which do not need killing)
+ * Also when FAIL is set do a force kill because something went
+ * wrong earlier.
+ */
+static void kill_procs_ao(struct list_head *to_kill, int doit, int trapno,
+ int fail, unsigned long pfn)
+{
+ struct to_kill *tk, *next;
+
+ list_for_each_entry_safe (tk, next, to_kill, nd) {
+ if (doit) {
+ /*
+ * In case something went wrong with munmapping
+ * make sure the process doesn't catch the
+ * signal and then access the memory. Just kill it.
+ */
+ if (fail || tk->addr_valid == 0) {
+ printk(KERN_ERR
+ "MCE %#lx: forcibly killing %s:%d because of failure to unmap corrupted page\n",
+ pfn, tk->tsk->comm, tk->tsk->pid);
+ force_sig(SIGKILL, tk->tsk);
+ }
+
+ /*
+ * In theory the process could have mapped
+ * something else on the address in-between. We could
+ * check for that, but we need to tell the
+ * process anyways.
+ */
+ else if (kill_proc_ao(tk->tsk, tk->addr, trapno,
+ pfn) < 0)
+ printk(KERN_ERR
+ "MCE %#lx: Cannot send advisory machine check signal to %s:%d\n",
+ pfn, tk->tsk->comm, tk->tsk->pid);
+ }
+ put_task_struct(tk->tsk);
+ kfree(tk);
+ }
+}
+
+static int task_early_kill(struct task_struct *tsk)
+{
+ if (!tsk->mm)
+ return 0;
+ if (tsk->flags & PF_MCE_PROCESS)
+ return !!(tsk->flags & PF_MCE_EARLY);
+ return sysctl_memory_failure_early_kill;
+}
+
+/*
+ * Collect processes when the error hit an anonymous page.
+ */
+static void collect_procs_anon(struct page *page, struct list_head *to_kill,
+ struct to_kill **tkc)
+{
+ struct vm_area_struct *vma;
+ struct task_struct *tsk;
+ struct anon_vma *av;
+
+ read_lock(&tasklist_lock);
+ av = page_lock_anon_vma(page);
+ if (av == NULL) /* Not actually mapped anymore */
+ goto out;
+ for_each_process (tsk) {
+ if (!task_early_kill(tsk))
+ continue;
+ list_for_each_entry (vma, &av->head, anon_vma_node) {
+ if (!page_mapped_in_vma(page, vma))
+ continue;
+ if (vma->vm_mm == tsk->mm)
+ add_to_kill(tsk, page, vma, to_kill, tkc);
+ }
+ }
+ page_unlock_anon_vma(av);
+out:
+ read_unlock(&tasklist_lock);
+}
+
+/*
+ * Collect processes when the error hit a file mapped page.
+ */
+static void collect_procs_file(struct page *page, struct list_head *to_kill,
+ struct to_kill **tkc)
+{
+ struct vm_area_struct *vma;
+ struct task_struct *tsk;
+ struct prio_tree_iter iter;
+ struct address_space *mapping = page->mapping;
+
+ /*
+ * A note on the locking order between the two locks.
+ * We don't rely on this particular order.
+ * If you have some other code that needs a different order
+ * feel free to switch them around. Or add a reverse link
+ * from mm_struct to task_struct, then this could be all
+ * done without taking tasklist_lock and looping over all tasks.
+ */
+
+ read_lock(&tasklist_lock);
+ spin_lock(&mapping->i_mmap_lock);
+ for_each_process(tsk) {
+ pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
+
+ if (!task_early_kill(tsk))
+ continue;
+
+ vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff,
+ pgoff) {
+ /*
+ * Send early kill signal to tasks where a vma covers
+ * the page but the corrupted page is not necessarily
+ * mapped it in its pte.
+ * Assume applications who requested early kill want
+ * to be informed of all such data corruptions.
+ */
+ if (vma->vm_mm == tsk->mm)
+ add_to_kill(tsk, page, vma, to_kill, tkc);
+ }
+ }
+ spin_unlock(&mapping->i_mmap_lock);
+ read_unlock(&tasklist_lock);
+}
+
+/*
+ * Collect the processes who have the corrupted page mapped to kill.
+ * This is done in two steps for locking reasons.
+ * First preallocate one tokill structure outside the spin locks,
+ * so that we can kill at least one process reasonably reliable.
+ */
+static void collect_procs(struct page *page, struct list_head *tokill)
+{
+ struct to_kill *tk;
+
+ if (!page->mapping)
+ return;
+
+ tk = kmalloc(sizeof(struct to_kill), GFP_NOIO);
+ if (!tk)
+ return;
+ if (PageAnon(page))
+ collect_procs_anon(page, tokill, &tk);
+ else
+ collect_procs_file(page, tokill, &tk);
+ kfree(tk);
+}
+
+/*
+ * Error handlers for various types of pages.
+ */
+
+enum outcome {
+ IGNORED, /* Error: cannot be handled */
+ FAILED, /* Error: handling failed */
+ DELAYED, /* Will be handled later */
+ RECOVERED, /* Successfully recovered */
+};
+
+static const char *action_name[] = {
+ [IGNORED] = "Ignored",
+ [FAILED] = "Failed",
+ [DELAYED] = "Delayed",
+ [RECOVERED] = "Recovered",
+};
+
+/*
+ * XXX: It is possible that a page is isolated from LRU cache,
+ * and then kept in swap cache or failed to remove from page cache.
+ * The page count will stop it from being freed by unpoison.
+ * Stress tests should be aware of this memory leak problem.
+ */
+static int delete_from_lru_cache(struct page *p)
+{
+ if (!isolate_lru_page(p)) {
+ /*
+ * Clear sensible page flags, so that the buddy system won't
+ * complain when the page is unpoison-and-freed.
+ */
+ ClearPageActive(p);
+ ClearPageUnevictable(p);
+ /*
+ * drop the page count elevated by isolate_lru_page()
+ */
+ page_cache_release(p);
+ return 0;
+ }
+ return -EIO;
+}
+
+/*
+ * Error hit kernel page.
+ * Do nothing, try to be lucky and not touch this instead. For a few cases we
+ * could be more sophisticated.
+ */
+static int me_kernel(struct page *p, unsigned long pfn)
+{
+ return IGNORED;
+}
+
+/*
+ * Page in unknown state. Do nothing.
+ */
+static int me_unknown(struct page *p, unsigned long pfn)
+{
+ printk(KERN_ERR "MCE %#lx: Unknown page state\n", pfn);
+ return FAILED;
+}
+
+/*
+ * Clean (or cleaned) page cache page.
+ */
+static int me_pagecache_clean(struct page *p, unsigned long pfn)
+{
+ int err;
+ int ret = FAILED;
+ struct address_space *mapping;
+
+ delete_from_lru_cache(p);
+
+ /*
+ * For anonymous pages we're done the only reference left
+ * should be the one m_f() holds.
+ */
+ if (PageAnon(p))
+ return RECOVERED;
+
+ /*
+ * Now truncate the page in the page cache. This is really
+ * more like a "temporary hole punch"
+ * Don't do this for block devices when someone else
+ * has a reference, because it could be file system metadata
+ * and that's not safe to truncate.
+ */
+ mapping = page_mapping(p);
+ if (!mapping) {
+ /*
+ * Page has been teared down in the meanwhile
+ */
+ return FAILED;
+ }
+
+ /*
+ * Truncation is a bit tricky. Enable it per file system for now.
+ *
+ * Open: to take i_mutex or not for this? Right now we don't.
+ */
+ if (mapping->a_ops->error_remove_page) {
+ err = mapping->a_ops->error_remove_page(mapping, p);
+ if (err != 0) {
+ printk(KERN_INFO "MCE %#lx: Failed to punch page: %d\n",
+ pfn, err);
+ } else if (page_has_private(p) &&
+ !try_to_release_page(p, GFP_NOIO)) {
+ pr_debug("MCE %#lx: failed to release buffers\n", pfn);
+ } else {
+ ret = RECOVERED;
+ }
+ } else {
+ /*
+ * If the file system doesn't support it just invalidate
+ * This fails on dirty or anything with private pages
+ */
+ if (invalidate_inode_page(p))
+ ret = RECOVERED;
+ else
+ printk(KERN_INFO "MCE %#lx: Failed to invalidate\n",
+ pfn);
+ }
+ return ret;
+}
+
+/*
+ * Dirty cache page page
+ * Issues: when the error hit a hole page the error is not properly
+ * propagated.
+ */
+static int me_pagecache_dirty(struct page *p, unsigned long pfn)
+{
+ struct address_space *mapping = page_mapping(p);
+
+ SetPageError(p);
+ /* TBD: print more information about the file. */
+ if (mapping) {
+ /*
+ * IO error will be reported by write(), fsync(), etc.
+ * who check the mapping.
+ * This way the application knows that something went
+ * wrong with its dirty file data.
+ *
+ * There's one open issue:
+ *
+ * The EIO will be only reported on the next IO
+ * operation and then cleared through the IO map.
+ * Normally Linux has two mechanisms to pass IO error
+ * first through the AS_EIO flag in the address space
+ * and then through the PageError flag in the page.
+ * Since we drop pages on memory failure handling the
+ * only mechanism open to use is through AS_AIO.
+ *
+ * This has the disadvantage that it gets cleared on
+ * the first operation that returns an error, while
+ * the PageError bit is more sticky and only cleared
+ * when the page is reread or dropped. If an
+ * application assumes it will always get error on
+ * fsync, but does other operations on the fd before
+ * and the page is dropped inbetween then the error
+ * will not be properly reported.
+ *
+ * This can already happen even without hwpoisoned
+ * pages: first on metadata IO errors (which only
+ * report through AS_EIO) or when the page is dropped
+ * at the wrong time.
+ *
+ * So right now we assume that the application DTRT on
+ * the first EIO, but we're not worse than other parts
+ * of the kernel.
+ */
+ mapping_set_error(mapping, EIO);
+ }
+
+ return me_pagecache_clean(p, pfn);
+}
+
+/*
+ * Clean and dirty swap cache.
+ *
+ * Dirty swap cache page is tricky to handle. The page could live both in page
+ * cache and swap cache(ie. page is freshly swapped in). So it could be
+ * referenced concurrently by 2 types of PTEs:
+ * normal PTEs and swap PTEs. We try to handle them consistently by calling
+ * try_to_unmap(TTU_IGNORE_HWPOISON) to convert the normal PTEs to swap PTEs,
+ * and then
+ * - clear dirty bit to prevent IO
+ * - remove from LRU
+ * - but keep in the swap cache, so that when we return to it on
+ * a later page fault, we know the application is accessing
+ * corrupted data and shall be killed (we installed simple
+ * interception code in do_swap_page to catch it).
+ *
+ * Clean swap cache pages can be directly isolated. A later page fault will
+ * bring in the known good data from disk.
+ */
+static int me_swapcache_dirty(struct page *p, unsigned long pfn)
+{
+ ClearPageDirty(p);
+ /* Trigger EIO in shmem: */
+ ClearPageUptodate(p);
+
+ if (!delete_from_lru_cache(p))
+ return DELAYED;
+ else
+ return FAILED;
+}
+
+static int me_swapcache_clean(struct page *p, unsigned long pfn)
+{
+ delete_from_swap_cache(p);
+
+ if (!delete_from_lru_cache(p))
+ return RECOVERED;
+ else
+ return FAILED;
+}
+
+/*
+ * Huge pages. Needs work.
+ * Issues:
+ * No rmap support so we cannot find the original mapper. In theory could walk
+ * all MMs and look for the mappings, but that would be non atomic and racy.
+ * Need rmap for hugepages for this. Alternatively we could employ a heuristic,
+ * like just walking the current process and hoping it has it mapped (that
+ * should be usually true for the common "shared database cache" case)
+ * Should handle free huge pages and dequeue them too, but this needs to
+ * handle huge page accounting correctly.
+ */
+static int me_huge_page(struct page *p, unsigned long pfn)
+{
+ return FAILED;
+}
+
+/*
+ * Various page states we can handle.
+ *
+ * A page state is defined by its current page->flags bits.
+ * The table matches them in order and calls the right handler.
+ *
+ * This is quite tricky because we can access page at any time
+ * in its live cycle, so all accesses have to be extremly careful.
+ *
+ * This is not complete. More states could be added.
+ * For any missing state don't attempt recovery.
+ */
+
+#define dirty (1UL << PG_dirty)
+#define sc (1UL << PG_swapcache)
+#define unevict (1UL << PG_unevictable)
+#define mlock (1UL << PG_mlocked)
+#define writeback (1UL << PG_writeback)
+#define lru (1UL << PG_lru)
+#define swapbacked (1UL << PG_swapbacked)
+#define head (1UL << PG_head)
+#define tail (1UL << PG_tail)
+#define compound (1UL << PG_compound)
+#define slab (1UL << PG_slab)
+#define reserved (1UL << PG_reserved)
+
+static struct page_state {
+ unsigned long mask;
+ unsigned long res;
+ char *msg;
+ int (*action)(struct page *p, unsigned long pfn);
+} error_states[] = {
+ { reserved, reserved, "reserved kernel", me_kernel },
+ /*
+ * free pages are specially detected outside this table:
+ * PG_buddy pages only make a small fraction of all free pages.
+ */
+
+ /*
+ * Could in theory check if slab page is free or if we can drop
+ * currently unused objects without touching them. But just
+ * treat it as standard kernel for now.
+ */
+ { slab, slab, "kernel slab", me_kernel },
+
+#ifdef CONFIG_PAGEFLAGS_EXTENDED
+ { head, head, "huge", me_huge_page },
+ { tail, tail, "huge", me_huge_page },
+#else
+ { compound, compound, "huge", me_huge_page },
+#endif
+
+ { sc|dirty, sc|dirty, "swapcache", me_swapcache_dirty },
+ { sc|dirty, sc, "swapcache", me_swapcache_clean },
+
+ { unevict|dirty, unevict|dirty, "unevictable LRU", me_pagecache_dirty},
+ { unevict, unevict, "unevictable LRU", me_pagecache_clean},
+
+ { mlock|dirty, mlock|dirty, "mlocked LRU", me_pagecache_dirty },
+ { mlock, mlock, "mlocked LRU", me_pagecache_clean },
+
+ { lru|dirty, lru|dirty, "LRU", me_pagecache_dirty },
+ { lru|dirty, lru, "clean LRU", me_pagecache_clean },
+
+ /*
+ * Catchall entry: must be at end.
+ */
+ { 0, 0, "unknown page state", me_unknown },
+};
+
+#undef dirty
+#undef sc
+#undef unevict
+#undef mlock
+#undef writeback
+#undef lru
+#undef swapbacked
+#undef head
+#undef tail
+#undef compound
+#undef slab
+#undef reserved
+
+static void action_result(unsigned long pfn, char *msg, int result)
+{
+ struct page *page = pfn_to_page(pfn);
+
+ printk(KERN_ERR "MCE %#lx: %s%s page recovery: %s\n",
+ pfn,
+ PageDirty(page) ? "dirty " : "",
+ msg, action_name[result]);
+}
+
+static int page_action(struct page_state *ps, struct page *p,
+ unsigned long pfn)
+{
+ int result;
+ int count;
+
+ result = ps->action(p, pfn);
+ action_result(pfn, ps->msg, result);
+
+ count = page_count(p) - 1;
+ if (ps->action == me_swapcache_dirty && result == DELAYED)
+ count--;
+ if (count != 0) {
+ printk(KERN_ERR
+ "MCE %#lx: %s page still referenced by %d users\n",
+ pfn, ps->msg, count);
+ result = FAILED;
+ }
+
+ /* Could do more checks here if page looks ok */
+ /*
+ * Could adjust zone counters here to correct for the missing page.
+ */
+
+ return (result == RECOVERED || result == DELAYED) ? 0 : -EBUSY;
+}
+
+#define N_UNMAP_TRIES 5
+
+/*
+ * Do all that is necessary to remove user space mappings. Unmap
+ * the pages and send SIGBUS to the processes if the data was dirty.
+ */
+static int hwpoison_user_mappings(struct page *p, unsigned long pfn,
+ int trapno)
+{
+ enum ttu_flags ttu = TTU_UNMAP | TTU_IGNORE_MLOCK | TTU_IGNORE_ACCESS;
+ struct address_space *mapping;
+ LIST_HEAD(tokill);
+ int ret;
+ int i;
+ int kill = 1;
+
+ if (PageReserved(p) || PageSlab(p))
+ return SWAP_SUCCESS;
+
+ /*
+ * This check implies we don't kill processes if their pages
+ * are in the swap cache early. Those are always late kills.
+ */
+ if (!page_mapped(p))
+ return SWAP_SUCCESS;
+
+ if (PageCompound(p) || PageKsm(p))
+ return SWAP_FAIL;
+
+ if (PageSwapCache(p)) {
+ printk(KERN_ERR
+ "MCE %#lx: keeping poisoned page in swap cache\n", pfn);
+ ttu |= TTU_IGNORE_HWPOISON;
+ }
+
+ /*
+ * Propagate the dirty bit from PTEs to struct page first, because we
+ * need this to decide if we should kill or just drop the page.
+ * XXX: the dirty test could be racy: set_page_dirty() may not always
+ * be called inside page lock (it's recommended but not enforced).
+ */
+ mapping = page_mapping(p);
+ if (!PageDirty(p) && mapping && mapping_cap_writeback_dirty(mapping)) {
+ if (page_mkclean(p)) {
+ SetPageDirty(p);
+ } else {
+ kill = 0;
+ ttu |= TTU_IGNORE_HWPOISON;
+ printk(KERN_INFO
+ "MCE %#lx: corrupted page was clean: dropped without side effects\n",
+ pfn);
+ }
+ }
+
+ /*
+ * First collect all the processes that have the page
+ * mapped in dirty form. This has to be done before try_to_unmap,
+ * because ttu takes the rmap data structures down.
+ *
+ * Error handling: We ignore errors here because
+ * there's nothing that can be done.
+ */
+ if (kill)
+ collect_procs(p, &tokill);
+
+ /*
+ * try_to_unmap can fail temporarily due to races.
+ * Try a few times (RED-PEN better strategy?)
+ */
+ for (i = 0; i < N_UNMAP_TRIES; i++) {
+ ret = try_to_unmap(p, ttu);
+ if (ret == SWAP_SUCCESS)
+ break;
+ pr_debug("MCE %#lx: try_to_unmap retry needed %d\n", pfn, ret);
+ }
+
+ if (ret != SWAP_SUCCESS)
+ printk(KERN_ERR "MCE %#lx: failed to unmap page (mapcount=%d)\n",
+ pfn, page_mapcount(p));
+
+ /*
+ * Now that the dirty bit has been propagated to the
+ * struct page and all unmaps done we can decide if
+ * killing is needed or not. Only kill when the page
+ * was dirty, otherwise the tokill list is merely
+ * freed. When there was a problem unmapping earlier
+ * use a more force-full uncatchable kill to prevent
+ * any accesses to the poisoned memory.
+ */
+ kill_procs_ao(&tokill, !!PageDirty(p), trapno,
+ ret != SWAP_SUCCESS, pfn);
+
+ return ret;
+}
+
+int __memory_failure(unsigned long pfn, int trapno, int flags)
+{
+ struct page_state *ps;
+ struct page *p;
+ int res;
+
+ if (!sysctl_memory_failure_recovery)
+ panic("Memory failure from trap %d on page %lx", trapno, pfn);
+
+ if (!pfn_valid(pfn)) {
+ printk(KERN_ERR
+ "MCE %#lx: memory outside kernel control\n",
+ pfn);
+ return -ENXIO;
+ }
+
+ p = pfn_to_page(pfn);
+ if (TestSetPageHWPoison(p)) {
+ printk(KERN_ERR "MCE %#lx: already hardware poisoned\n", pfn);
+ return 0;
+ }
+
+ atomic_long_add(1, &mce_bad_pages);
+
+ /*
+ * We need/can do nothing about count=0 pages.
+ * 1) it's a free page, and therefore in safe hand:
+ * prep_new_page() will be the gate keeper.
+ * 2) it's part of a non-compound high order page.
+ * Implies some kernel user: cannot stop them from
+ * R/W the page; let's pray that the page has been
+ * used and will be freed some time later.
+ * In fact it's dangerous to directly bump up page count from 0,
+ * that may make page_freeze_refs()/page_unfreeze_refs() mismatch.
+ */
+ if (!(flags & MF_COUNT_INCREASED) &&
+ !get_page_unless_zero(compound_head(p))) {
+ if (is_free_buddy_page(p)) {
+ action_result(pfn, "free buddy", DELAYED);
+ return 0;
+ } else {
+ action_result(pfn, "high order kernel", IGNORED);
+ return -EBUSY;
+ }
+ }
+
+ /*
+ * We ignore non-LRU pages for good reasons.
+ * - PG_locked is only well defined for LRU pages and a few others
+ * - to avoid races with __set_page_locked()
+ * - to avoid races with __SetPageSlab*() (and more non-atomic ops)
+ * The check (unnecessarily) ignores LRU pages being isolated and
+ * walked by the page reclaim code, however that's not a big loss.
+ */
+ if (!PageLRU(p))
+ shake_page(p, 0);
+ if (!PageLRU(p)) {
+ /*
+ * shake_page could have turned it free.
+ */
+ if (is_free_buddy_page(p)) {
+ action_result(pfn, "free buddy, 2nd try", DELAYED);
+ return 0;
+ }
+ action_result(pfn, "non LRU", IGNORED);
+ put_page(p);
+ return -EBUSY;
+ }
+
+ /*
+ * Lock the page and wait for writeback to finish.
+ * It's very difficult to mess with pages currently under IO
+ * and in many cases impossible, so we just avoid it here.
+ */
+ lock_page_nosync(p);
+
+ /*
+ * unpoison always clear PG_hwpoison inside page lock
+ */
+ if (!PageHWPoison(p)) {
+ printk(KERN_ERR "MCE %#lx: just unpoisoned\n", pfn);
+ res = 0;
+ goto out;
+ }
+ if (hwpoison_filter(p)) {
+ if (TestClearPageHWPoison(p))
+ atomic_long_dec(&mce_bad_pages);
+ unlock_page(p);
+ put_page(p);
+ return 0;
+ }
+
+ wait_on_page_writeback(p);
+
+ /*
+ * Now take care of user space mappings.
+ * Abort on fail: __remove_from_page_cache() assumes unmapped page.
+ */
+ if (hwpoison_user_mappings(p, pfn, trapno) != SWAP_SUCCESS) {
+ printk(KERN_ERR "MCE %#lx: cannot unmap page, give up\n", pfn);
+ res = -EBUSY;
+ goto out;
+ }
+
+ /*
+ * Torn down by someone else?
+ */
+ if (PageLRU(p) && !PageSwapCache(p) && p->mapping == NULL) {
+ action_result(pfn, "already truncated LRU", IGNORED);
+ res = -EBUSY;
+ goto out;
+ }
+
+ res = -EBUSY;
+ for (ps = error_states;; ps++) {
+ if ((p->flags & ps->mask) == ps->res) {
+ res = page_action(ps, p, pfn);
+ break;
+ }
+ }
+out:
+ unlock_page(p);
+ return res;
+}
+EXPORT_SYMBOL_GPL(__memory_failure);
+
+/**
+ * memory_failure - Handle memory failure of a page.
+ * @pfn: Page Number of the corrupted page
+ * @trapno: Trap number reported in the signal to user space.
+ *
+ * This function is called by the low level machine check code
+ * of an architecture when it detects hardware memory corruption
+ * of a page. It tries its best to recover, which includes
+ * dropping pages, killing processes etc.
+ *
+ * The function is primarily of use for corruptions that
+ * happen outside the current execution context (e.g. when
+ * detected by a background scrubber)
+ *
+ * Must run in process context (e.g. a work queue) with interrupts
+ * enabled and no spinlocks hold.
+ */
+void memory_failure(unsigned long pfn, int trapno)
+{
+ __memory_failure(pfn, trapno, 0);
+}
+
+/**
+ * unpoison_memory - Unpoison a previously poisoned page
+ * @pfn: Page number of the to be unpoisoned page
+ *
+ * Software-unpoison a page that has been poisoned by
+ * memory_failure() earlier.
+ *
+ * This is only done on the software-level, so it only works
+ * for linux injected failures, not real hardware failures
+ *
+ * Returns 0 for success, otherwise -errno.
+ */
+int unpoison_memory(unsigned long pfn)
+{
+ struct page *page;
+ struct page *p;
+ int freeit = 0;
+
+ if (!pfn_valid(pfn))
+ return -ENXIO;
+
+ p = pfn_to_page(pfn);
+ page = compound_head(p);
+
+ if (!PageHWPoison(p)) {
+ pr_debug("MCE: Page was already unpoisoned %#lx\n", pfn);
+ return 0;
+ }
+
+ if (!get_page_unless_zero(page)) {
+ if (TestClearPageHWPoison(p))
+ atomic_long_dec(&mce_bad_pages);
+ pr_debug("MCE: Software-unpoisoned free page %#lx\n", pfn);
+ return 0;
+ }
+
+ lock_page_nosync(page);
+ /*
+ * This test is racy because PG_hwpoison is set outside of page lock.
+ * That's acceptable because that won't trigger kernel panic. Instead,
+ * the PG_hwpoison page will be caught and isolated on the entrance to
+ * the free buddy page pool.
+ */
+ if (TestClearPageHWPoison(p)) {
+ pr_debug("MCE: Software-unpoisoned page %#lx\n", pfn);
+ atomic_long_dec(&mce_bad_pages);
+ freeit = 1;
+ }
+ unlock_page(page);
+
+ put_page(page);
+ if (freeit)
+ put_page(page);
+
+ return 0;
+}
+EXPORT_SYMBOL(unpoison_memory);
+
+static struct page *new_page(struct page *p, unsigned long private, int **x)
+{
+ int nid = page_to_nid(p);
+ return alloc_pages_exact_node(nid, GFP_HIGHUSER_MOVABLE, 0);
+}
+
+/*
+ * Safely get reference count of an arbitrary page.
+ * Returns 0 for a free page, -EIO for a zero refcount page
+ * that is not free, and 1 for any other page type.
+ * For 1 the page is returned with increased page count, otherwise not.
+ */
+static int get_any_page(struct page *p, unsigned long pfn, int flags)
+{
+ int ret;
+
+ if (flags & MF_COUNT_INCREASED)
+ return 1;
+
+ /*
+ * The lock_system_sleep prevents a race with memory hotplug,
+ * because the isolation assumes there's only a single user.
+ * This is a big hammer, a better would be nicer.
+ */
+ lock_system_sleep();
+
+ /*
+ * Isolate the page, so that it doesn't get reallocated if it
+ * was free.
+ */
+ set_migratetype_isolate(p);
+ if (!get_page_unless_zero(compound_head(p))) {
+ if (is_free_buddy_page(p)) {
+ pr_debug("get_any_page: %#lx free buddy page\n", pfn);
+ /* Set hwpoison bit while page is still isolated */
+ SetPageHWPoison(p);
+ ret = 0;
+ } else {
+ pr_debug("get_any_page: %#lx: unknown zero refcount page type %lx\n",
+ pfn, p->flags);
+ ret = -EIO;
+ }
+ } else {
+ /* Not a free page */
+ ret = 1;
+ }
+ unset_migratetype_isolate(p);
+ unlock_system_sleep();
+ return ret;
+}
+
+/**
+ * soft_offline_page - Soft offline a page.
+ * @page: page to offline
+ * @flags: flags. Same as memory_failure().
+ *
+ * Returns 0 on success, otherwise negated errno.
+ *
+ * Soft offline a page, by migration or invalidation,
+ * without killing anything. This is for the case when
+ * a page is not corrupted yet (so it's still valid to access),
+ * but has had a number of corrected errors and is better taken
+ * out.
+ *
+ * The actual policy on when to do that is maintained by
+ * user space.
+ *
+ * This should never impact any application or cause data loss,
+ * however it might take some time.
+ *
+ * This is not a 100% solution for all memory, but tries to be
+ * ``good enough'' for the majority of memory.
+ */
+int soft_offline_page(struct page *page, int flags)
+{
+ int ret;
+ unsigned long pfn = page_to_pfn(page);
+
+ ret = get_any_page(page, pfn, flags);
+ if (ret < 0)
+ return ret;
+ if (ret == 0)
+ goto done;
+
+ /*
+ * Page cache page we can handle?
+ */
+ if (!PageLRU(page)) {
+ /*
+ * Try to free it.
+ */
+ put_page(page);
+ shake_page(page, 1);
+
+ /*
+ * Did it turn free?
+ */
+ ret = get_any_page(page, pfn, 0);
+ if (ret < 0)
+ return ret;
+ if (ret == 0)
+ goto done;
+ }
+ if (!PageLRU(page)) {
+ pr_debug("soft_offline: %#lx: unknown non LRU page type %lx\n",
+ pfn, page->flags);
+ return -EIO;
+ }
+
+ lock_page(page);
+ wait_on_page_writeback(page);
+
+ /*
+ * Synchronized using the page lock with memory_failure()
+ */
+ if (PageHWPoison(page)) {
+ unlock_page(page);
+ put_page(page);
+ pr_debug("soft offline: %#lx page already poisoned\n", pfn);
+ return -EBUSY;
+ }
+
+ /*
+ * Try to invalidate first. This should work for
+ * non dirty unmapped page cache pages.
+ */
+ ret = invalidate_inode_page(page);
+ unlock_page(page);
+
+ /*
+ * Drop count because page migration doesn't like raised
+ * counts. The page could get re-allocated, but if it becomes
+ * LRU the isolation will just fail.
+ * RED-PEN would be better to keep it isolated here, but we
+ * would need to fix isolation locking first.
+ */
+ put_page(page);
+ if (ret == 1) {
+ ret = 0;
+ pr_debug("soft_offline: %#lx: invalidated\n", pfn);
+ goto done;
+ }
+
+ /*
+ * Simple invalidation didn't work.
+ * Try to migrate to a new page instead. migrate.c
+ * handles a large number of cases for us.
+ */
+ ret = isolate_lru_page(page);
+ if (!ret) {
+ LIST_HEAD(pagelist);
+
+ list_add(&page->lru, &pagelist);
+ ret = migrate_pages(&pagelist, new_page, MPOL_MF_MOVE_ALL, 0);
+ if (ret) {
+ pr_debug("soft offline: %#lx: migration failed %d, type %lx\n",
+ pfn, ret, page->flags);
+ if (ret > 0)
+ ret = -EIO;
+ }
+ } else {
+ pr_debug("soft offline: %#lx: isolation failed: %d, page count %d, type %lx\n",
+ pfn, ret, page_count(page), page->flags);
+ }
+ if (ret)
+ return ret;
+
+done:
+ atomic_long_add(1, &mce_bad_pages);
+ SetPageHWPoison(page);
+ /* keep elevated page count for bad page */
+ return ret;
+}
diff --git a/mm/memory.c b/mm/memory.c
index c39396955651..aecb7451965c 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -45,6 +45,7 @@
#include <linux/swap.h>
#include <linux/highmem.h>
#include <linux/pagemap.h>
+#include <linux/ksm.h>
#include <linux/rmap.h>
#include <linux/module.h>
#include <linux/delayacct.h>
@@ -56,6 +57,7 @@
#include <linux/swapops.h>
#include <linux/elf.h>
+#include <asm/io.h>
#include <asm/pgalloc.h>
#include <asm/uaccess.h>
#include <asm/tlb.h>
@@ -106,6 +108,18 @@ static int __init disable_randmaps(char *s)
}
__setup("norandmaps", disable_randmaps);
+unsigned long zero_pfn __read_mostly;
+unsigned long highest_memmap_pfn __read_mostly;
+
+/*
+ * CONFIG_MMU architectures set up ZERO_PAGE in their paging_init()
+ */
+static int __init init_zero_pfn(void)
+{
+ zero_pfn = page_to_pfn(ZERO_PAGE(0));
+ return 0;
+}
+core_initcall(init_zero_pfn);
/*
* If a p?d_bad entry is found while walking page tables, report
@@ -283,7 +297,8 @@ void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *vma,
unsigned long addr = vma->vm_start;
/*
- * Hide vma from rmap and vmtruncate before freeing pgtables
+ * Hide vma from rmap and truncate_pagecache before freeing
+ * pgtables
*/
anon_vma_unlink(vma);
unlink_file_vma(vma);
@@ -442,6 +457,20 @@ static inline int is_cow_mapping(unsigned int flags)
return (flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE;
}
+#ifndef is_zero_pfn
+static inline int is_zero_pfn(unsigned long pfn)
+{
+ return pfn == zero_pfn;
+}
+#endif
+
+#ifndef my_zero_pfn
+static inline unsigned long my_zero_pfn(unsigned long addr)
+{
+ return zero_pfn;
+}
+#endif
+
/*
* vm_normal_page -- This function gets the "struct page" associated with a pte.
*
@@ -497,7 +526,9 @@ struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
if (HAVE_PTE_SPECIAL) {
if (likely(!pte_special(pte)))
goto check_pfn;
- if (!(vma->vm_flags & (VM_PFNMAP | VM_MIXEDMAP)))
+ if (vma->vm_flags & (VM_PFNMAP | VM_MIXEDMAP))
+ return NULL;
+ if (!is_zero_pfn(pfn))
print_bad_pte(vma, addr, pte, NULL);
return NULL;
}
@@ -519,6 +550,8 @@ struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
}
}
+ if (is_zero_pfn(pfn))
+ return NULL;
check_pfn:
if (unlikely(pfn > highest_memmap_pfn)) {
print_bad_pte(vma, addr, pte, NULL);
@@ -539,7 +572,7 @@ out:
* covered by this vma.
*/
-static inline void
+static inline unsigned long
copy_one_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm,
pte_t *dst_pte, pte_t *src_pte, struct vm_area_struct *vma,
unsigned long addr, int *rss)
@@ -553,7 +586,9 @@ copy_one_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm,
if (!pte_file(pte)) {
swp_entry_t entry = pte_to_swp_entry(pte);
- swap_duplicate(entry);
+ if (swap_duplicate(entry) < 0)
+ return entry.val;
+
/* make sure dst_mm is on swapoff's mmlist. */
if (unlikely(list_empty(&dst_mm->mmlist))) {
spin_lock(&mmlist_lock);
@@ -596,22 +631,25 @@ copy_one_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm,
page = vm_normal_page(vma, addr, pte);
if (page) {
get_page(page);
- page_dup_rmap(page, vma, addr);
- rss[!!PageAnon(page)]++;
+ page_dup_rmap(page);
+ rss[PageAnon(page)]++;
}
out_set_pte:
set_pte_at(dst_mm, addr, dst_pte, pte);
+ return 0;
}
static int copy_pte_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
pmd_t *dst_pmd, pmd_t *src_pmd, struct vm_area_struct *vma,
unsigned long addr, unsigned long end)
{
+ pte_t *orig_src_pte, *orig_dst_pte;
pte_t *src_pte, *dst_pte;
spinlock_t *src_ptl, *dst_ptl;
int progress = 0;
int rss[2];
+ swp_entry_t entry = (swp_entry_t){0};
again:
rss[1] = rss[0] = 0;
@@ -621,6 +659,8 @@ again:
src_pte = pte_offset_map_nested(src_pmd, addr);
src_ptl = pte_lockptr(src_mm, src_pmd);
spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
+ orig_src_pte = src_pte;
+ orig_dst_pte = dst_pte;
arch_enter_lazy_mmu_mode();
do {
@@ -638,16 +678,25 @@ again:
progress++;
continue;
}
- copy_one_pte(dst_mm, src_mm, dst_pte, src_pte, vma, addr, rss);
+ entry.val = copy_one_pte(dst_mm, src_mm, dst_pte, src_pte,
+ vma, addr, rss);
+ if (entry.val)
+ break;
progress += 8;
} while (dst_pte++, src_pte++, addr += PAGE_SIZE, addr != end);
arch_leave_lazy_mmu_mode();
spin_unlock(src_ptl);
- pte_unmap_nested(src_pte - 1);
+ pte_unmap_nested(orig_src_pte);
add_mm_rss(dst_mm, rss[0], rss[1]);
- pte_unmap_unlock(dst_pte - 1, dst_ptl);
+ pte_unmap_unlock(orig_dst_pte, dst_ptl);
cond_resched();
+
+ if (entry.val) {
+ if (add_swap_count_continuation(entry, GFP_KERNEL) < 0)
+ return -ENOMEM;
+ progress = 0;
+ }
if (addr != end)
goto again;
return 0;
@@ -907,6 +956,7 @@ static unsigned long unmap_page_range(struct mmu_gather *tlb,
details = NULL;
BUG_ON(addr >= end);
+ mem_cgroup_uncharge_start();
tlb_start_vma(tlb, vma);
pgd = pgd_offset(vma->vm_mm, addr);
do {
@@ -919,6 +969,7 @@ static unsigned long unmap_page_range(struct mmu_gather *tlb,
zap_work, details);
} while (pgd++, addr = next, (addr != end && *zap_work > 0));
tlb_end_vma(tlb, vma);
+ mem_cgroup_uncharge_end();
return addr;
}
@@ -1130,9 +1181,14 @@ struct page *follow_page(struct vm_area_struct *vma, unsigned long address,
goto no_page;
if ((flags & FOLL_WRITE) && !pte_write(pte))
goto unlock;
+
page = vm_normal_page(vma, address, pte);
- if (unlikely(!page))
- goto bad_page;
+ if (unlikely(!page)) {
+ if ((flags & FOLL_DUMP) ||
+ !is_zero_pfn(pte_pfn(pte)))
+ goto bad_page;
+ page = pte_page(pte);
+ }
if (flags & FOLL_GET)
get_page(page);
@@ -1160,65 +1216,46 @@ no_page:
pte_unmap_unlock(ptep, ptl);
if (!pte_none(pte))
return page;
- /* Fall through to ZERO_PAGE handling */
+
no_page_table:
/*
* When core dumping an enormous anonymous area that nobody
- * has touched so far, we don't want to allocate page tables.
+ * has touched so far, we don't want to allocate unnecessary pages or
+ * page tables. Return error instead of NULL to skip handle_mm_fault,
+ * then get_dump_page() will return NULL to leave a hole in the dump.
+ * But we can only make this optimization where a hole would surely
+ * be zero-filled if handle_mm_fault() actually did handle it.
*/
- if (flags & FOLL_ANON) {
- page = ZERO_PAGE(0);
- if (flags & FOLL_GET)
- get_page(page);
- BUG_ON(flags & FOLL_WRITE);
- }
+ if ((flags & FOLL_DUMP) &&
+ (!vma->vm_ops || !vma->vm_ops->fault))
+ return ERR_PTR(-EFAULT);
return page;
}
-/* Can we do the FOLL_ANON optimization? */
-static inline int use_zero_page(struct vm_area_struct *vma)
-{
- /*
- * We don't want to optimize FOLL_ANON for make_pages_present()
- * when it tries to page in a VM_LOCKED region. As to VM_SHARED,
- * we want to get the page from the page tables to make sure
- * that we serialize and update with any other user of that
- * mapping.
- */
- if (vma->vm_flags & (VM_LOCKED | VM_SHARED))
- return 0;
- /*
- * And if we have a fault routine, it's not an anonymous region.
- */
- return !vma->vm_ops || !vma->vm_ops->fault;
-}
-
-
-
int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
- unsigned long start, int nr_pages, int flags,
+ unsigned long start, int nr_pages, unsigned int gup_flags,
struct page **pages, struct vm_area_struct **vmas)
{
int i;
- unsigned int vm_flags = 0;
- int write = !!(flags & GUP_FLAGS_WRITE);
- int force = !!(flags & GUP_FLAGS_FORCE);
- int ignore = !!(flags & GUP_FLAGS_IGNORE_VMA_PERMISSIONS);
- int ignore_sigkill = !!(flags & GUP_FLAGS_IGNORE_SIGKILL);
+ unsigned long vm_flags;
if (nr_pages <= 0)
return 0;
+
+ VM_BUG_ON(!!pages != !!(gup_flags & FOLL_GET));
+
/*
* Require read or write permissions.
- * If 'force' is set, we only require the "MAY" flags.
+ * If FOLL_FORCE is set, we only require the "MAY" flags.
*/
- vm_flags = write ? (VM_WRITE | VM_MAYWRITE) : (VM_READ | VM_MAYREAD);
- vm_flags &= force ? (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
+ vm_flags = (gup_flags & FOLL_WRITE) ?
+ (VM_WRITE | VM_MAYWRITE) : (VM_READ | VM_MAYREAD);
+ vm_flags &= (gup_flags & FOLL_FORCE) ?
+ (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
i = 0;
do {
struct vm_area_struct *vma;
- unsigned int foll_flags;
vma = find_extend_vma(mm, start);
if (!vma && in_gate_area(tsk, start)) {
@@ -1230,7 +1267,7 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
pte_t *pte;
/* user gate pages are read-only */
- if (!ignore && write)
+ if (gup_flags & FOLL_WRITE)
return i ? : -EFAULT;
if (pg > TASK_SIZE)
pgd = pgd_offset_k(pg);
@@ -1264,38 +1301,26 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
if (!vma ||
(vma->vm_flags & (VM_IO | VM_PFNMAP)) ||
- (!ignore && !(vm_flags & vma->vm_flags)))
+ !(vm_flags & vma->vm_flags))
return i ? : -EFAULT;
if (is_vm_hugetlb_page(vma)) {
i = follow_hugetlb_page(mm, vma, pages, vmas,
- &start, &nr_pages, i, write);
+ &start, &nr_pages, i, gup_flags);
continue;
}
- foll_flags = FOLL_TOUCH;
- if (pages)
- foll_flags |= FOLL_GET;
- if (!write && use_zero_page(vma))
- foll_flags |= FOLL_ANON;
-
do {
struct page *page;
+ unsigned int foll_flags = gup_flags;
/*
* If we have a pending SIGKILL, don't keep faulting
- * pages and potentially allocating memory, unless
- * current is handling munlock--e.g., on exit. In
- * that case, we are not allocating memory. Rather,
- * we're only unlocking already resident/mapped pages.
+ * pages and potentially allocating memory.
*/
- if (unlikely(!ignore_sigkill &&
- fatal_signal_pending(current)))
+ if (unlikely(fatal_signal_pending(current)))
return i ? i : -ERESTARTSYS;
- if (write)
- foll_flags |= FOLL_WRITE;
-
cond_resched();
while (!(page = follow_page(vma, start, foll_flags))) {
int ret;
@@ -1307,7 +1332,8 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
if (ret & VM_FAULT_ERROR) {
if (ret & VM_FAULT_OOM)
return i ? i : -ENOMEM;
- else if (ret & VM_FAULT_SIGBUS)
+ if (ret &
+ (VM_FAULT_HWPOISON|VM_FAULT_SIGBUS))
return i ? i : -EFAULT;
BUG();
}
@@ -1406,18 +1432,47 @@ int get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
unsigned long start, int nr_pages, int write, int force,
struct page **pages, struct vm_area_struct **vmas)
{
- int flags = 0;
+ int flags = FOLL_TOUCH;
+ if (pages)
+ flags |= FOLL_GET;
if (write)
- flags |= GUP_FLAGS_WRITE;
+ flags |= FOLL_WRITE;
if (force)
- flags |= GUP_FLAGS_FORCE;
+ flags |= FOLL_FORCE;
return __get_user_pages(tsk, mm, start, nr_pages, flags, pages, vmas);
}
-
EXPORT_SYMBOL(get_user_pages);
+/**
+ * get_dump_page() - pin user page in memory while writing it to core dump
+ * @addr: user address
+ *
+ * Returns struct page pointer of user page pinned for dump,
+ * to be freed afterwards by page_cache_release() or put_page().
+ *
+ * Returns NULL on any kind of failure - a hole must then be inserted into
+ * the corefile, to preserve alignment with its headers; and also returns
+ * NULL wherever the ZERO_PAGE, or an anonymous pte_none, has been found -
+ * allowing a hole to be left in the corefile to save diskspace.
+ *
+ * Called without mmap_sem, but after all other threads have been killed.
+ */
+#ifdef CONFIG_ELF_CORE
+struct page *get_dump_page(unsigned long addr)
+{
+ struct vm_area_struct *vma;
+ struct page *page;
+
+ if (__get_user_pages(current, current->mm, addr, 1,
+ FOLL_FORCE | FOLL_DUMP | FOLL_GET, &page, &vma) < 1)
+ return NULL;
+ flush_cache_page(vma, addr, page_to_pfn(page));
+ return page;
+}
+#endif /* CONFIG_ELF_CORE */
+
pte_t *get_locked_pte(struct mm_struct *mm, unsigned long addr,
spinlock_t **ptl)
{
@@ -1595,7 +1650,8 @@ int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
* If we don't have pte special, then we have to use the pfn_valid()
* based VM_MIXEDMAP scheme (see vm_normal_page), and thus we *must*
* refcount the page if pfn_valid is true (hence insert_page rather
- * than insert_pfn).
+ * than insert_pfn). If a zero_pfn were inserted into a VM_MIXEDMAP
+ * without pte special, it would there be refcounted as a normal page.
*/
if (!HAVE_PTE_SPECIAL && pfn_valid(pfn)) {
struct page *page;
@@ -1770,10 +1826,10 @@ static int apply_to_pte_range(struct mm_struct *mm, pmd_t *pmd,
token = pmd_pgtable(*pmd);
do {
- err = fn(pte, token, addr, data);
+ err = fn(pte++, token, addr, data);
if (err)
break;
- } while (pte++, addr += PAGE_SIZE, addr != end);
+ } while (addr += PAGE_SIZE, addr != end);
arch_leave_lazy_mmu_mode();
@@ -1961,7 +2017,7 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
* Take out anonymous pages first, anonymous shared vmas are
* not dirty accountable.
*/
- if (PageAnon(old_page)) {
+ if (PageAnon(old_page) && !PageKsm(old_page)) {
if (!trylock_page(old_page)) {
page_cache_get(old_page);
pte_unmap_unlock(page_table, ptl);
@@ -2062,10 +2118,19 @@ gotten:
if (unlikely(anon_vma_prepare(vma)))
goto oom;
- VM_BUG_ON(old_page == ZERO_PAGE(0));
- new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address);
- if (!new_page)
- goto oom;
+
+ if (is_zero_pfn(pte_pfn(orig_pte))) {
+ new_page = alloc_zeroed_user_highpage_movable(vma, address);
+ if (!new_page)
+ goto oom;
+ } else {
+ new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address);
+ if (!new_page)
+ goto oom;
+ cow_user_page(new_page, old_page, address, vma);
+ }
+ __SetPageUptodate(new_page);
+
/*
* Don't let another task, with possibly unlocked vma,
* keep the mlocked page.
@@ -2075,8 +2140,6 @@ gotten:
clear_page_mlock(old_page);
unlock_page(old_page);
}
- cow_user_page(new_page, old_page, address, vma);
- __SetPageUptodate(new_page);
if (mem_cgroup_newpage_charge(new_page, mm, GFP_KERNEL))
goto oom_free_new;
@@ -2102,9 +2165,14 @@ gotten:
* seen in the presence of one thread doing SMC and another
* thread doing COW.
*/
- ptep_clear_flush_notify(vma, address, page_table);
+ ptep_clear_flush(vma, address, page_table);
page_add_new_anon_rmap(new_page, vma, address);
- set_pte_at(mm, address, page_table, entry);
+ /*
+ * We call the notify macro here because, when using secondary
+ * mmu page tables (such as kvm shadow page tables), we want the
+ * new page to be mapped directly into the secondary page table.
+ */
+ set_pte_at_notify(mm, address, page_table, entry);
update_mmu_cache(vma, address, entry);
if (old_page) {
/*
@@ -2347,7 +2415,7 @@ restart:
* @mapping: the address space containing mmaps to be unmapped.
* @holebegin: byte in first page to unmap, relative to the start of
* the underlying file. This will be rounded down to a PAGE_SIZE
- * boundary. Note that this is different from vmtruncate(), which
+ * boundary. Note that this is different from truncate_pagecache(), which
* must keep the partial page. In contrast, we must get rid of
* partial pages.
* @holelen: size of prospective hole in bytes. This will be rounded
@@ -2398,63 +2466,6 @@ void unmap_mapping_range(struct address_space *mapping,
}
EXPORT_SYMBOL(unmap_mapping_range);
-/**
- * vmtruncate - unmap mappings "freed" by truncate() syscall
- * @inode: inode of the file used
- * @offset: file offset to start truncating
- *
- * NOTE! We have to be ready to update the memory sharing
- * between the file and the memory map for a potential last
- * incomplete page. Ugly, but necessary.
- */
-int vmtruncate(struct inode * inode, loff_t offset)
-{
- if (inode->i_size < offset) {
- unsigned long limit;
-
- limit = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
- if (limit != RLIM_INFINITY && offset > limit)
- goto out_sig;
- if (offset > inode->i_sb->s_maxbytes)
- goto out_big;
- i_size_write(inode, offset);
- } else {
- struct address_space *mapping = inode->i_mapping;
-
- /*
- * truncation of in-use swapfiles is disallowed - it would
- * cause subsequent swapout to scribble on the now-freed
- * blocks.
- */
- if (IS_SWAPFILE(inode))
- return -ETXTBSY;
- i_size_write(inode, offset);
-
- /*
- * unmap_mapping_range is called twice, first simply for
- * efficiency so that truncate_inode_pages does fewer
- * single-page unmaps. However after this first call, and
- * before truncate_inode_pages finishes, it is possible for
- * private pages to be COWed, which remain after
- * truncate_inode_pages finishes, hence the second
- * unmap_mapping_range call must be made for correctness.
- */
- unmap_mapping_range(mapping, offset + PAGE_SIZE - 1, 0, 1);
- truncate_inode_pages(mapping, offset);
- unmap_mapping_range(mapping, offset + PAGE_SIZE - 1, 0, 1);
- }
-
- if (inode->i_op->truncate)
- inode->i_op->truncate(inode);
- return 0;
-
-out_sig:
- send_sig(SIGXFSZ, current, 0);
-out_big:
- return -EFBIG;
-}
-EXPORT_SYMBOL(vmtruncate);
-
int vmtruncate_range(struct inode *inode, loff_t offset, loff_t end)
{
struct address_space *mapping = inode->i_mapping;
@@ -2499,8 +2510,15 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
goto out;
entry = pte_to_swp_entry(orig_pte);
- if (is_migration_entry(entry)) {
- migration_entry_wait(mm, pmd, address);
+ if (unlikely(non_swap_entry(entry))) {
+ if (is_migration_entry(entry)) {
+ migration_entry_wait(mm, pmd, address);
+ } else if (is_hwpoison_entry(entry)) {
+ ret = VM_FAULT_HWPOISON;
+ } else {
+ print_bad_pte(vma, address, orig_pte, NULL);
+ ret = VM_FAULT_SIGBUS;
+ }
goto out;
}
delayacct_set_flag(DELAYACCT_PF_SWAPIN);
@@ -2524,11 +2542,25 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
/* Had to read the page from swap area: Major fault */
ret = VM_FAULT_MAJOR;
count_vm_event(PGMAJFAULT);
+ } else if (PageHWPoison(page)) {
+ /*
+ * hwpoisoned dirty swapcache pages are kept for killing
+ * owner processes (which may be unknown at hwpoison time)
+ */
+ ret = VM_FAULT_HWPOISON;
+ delayacct_clear_flag(DELAYACCT_PF_SWAPIN);
+ goto out_release;
}
lock_page(page);
delayacct_clear_flag(DELAYACCT_PF_SWAPIN);
+ page = ksm_might_need_to_copy(page, vma, address);
+ if (!page) {
+ ret = VM_FAULT_OOM;
+ goto out;
+ }
+
if (mem_cgroup_try_charge_swapin(mm, page, GFP_KERNEL, &ptr)) {
ret = VM_FAULT_OOM;
goto out_page;
@@ -2595,6 +2627,7 @@ out_nomap:
pte_unmap_unlock(page_table, ptl);
out_page:
unlock_page(page);
+out_release:
page_cache_release(page);
return ret;
}
@@ -2612,6 +2645,16 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
spinlock_t *ptl;
pte_t entry;
+ if (!(flags & FAULT_FLAG_WRITE)) {
+ entry = pte_mkspecial(pfn_pte(my_zero_pfn(address),
+ vma->vm_page_prot));
+ ptl = pte_lockptr(mm, pmd);
+ spin_lock(ptl);
+ if (!pte_none(*page_table))
+ goto unlock;
+ goto setpte;
+ }
+
/* Allocate our own private page. */
pte_unmap(page_table);
@@ -2626,13 +2669,16 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
goto oom_free_page;
entry = mk_pte(page, vma->vm_page_prot);
- entry = maybe_mkwrite(pte_mkdirty(entry), vma);
+ if (vma->vm_flags & VM_WRITE)
+ entry = pte_mkwrite(pte_mkdirty(entry));
page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
if (!pte_none(*page_table))
goto release;
+
inc_mm_counter(mm, anon_rss);
page_add_new_anon_rmap(page, vma, address);
+setpte:
set_pte_at(mm, address, page_table, entry);
/* No need to invalidate - it was non-present before */
@@ -2687,6 +2733,12 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE)))
return ret;
+ if (unlikely(PageHWPoison(vmf.page))) {
+ if (ret & VM_FAULT_LOCKED)
+ unlock_page(vmf.page);
+ return VM_FAULT_HWPOISON;
+ }
+
/*
* For consistency in subsequent calls, make the faulted page always
* locked.
@@ -2871,7 +2923,7 @@ static int do_nonlinear_fault(struct mm_struct *mm, struct vm_area_struct *vma,
* Page table corrupted: show pte and kill process.
*/
print_bad_pte(vma, address, orig_pte, NULL);
- return VM_FAULT_OOM;
+ return VM_FAULT_SIGBUS;
}
pgoff = pte_to_pgoff(orig_pte);
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index e4412a676c88..030ce8a5bb0e 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -26,6 +26,8 @@
#include <linux/migrate.h>
#include <linux/page-isolation.h>
#include <linux/pfn.h>
+#include <linux/suspend.h>
+#include <linux/mm_inline.h>
#include <asm/tlbflush.h>
@@ -70,7 +72,9 @@ static void get_page_bootmem(unsigned long info, struct page *page, int type)
atomic_inc(&page->_count);
}
-void put_page_bootmem(struct page *page)
+/* reference to __meminit __free_pages_bootmem is valid
+ * so use __ref to tell modpost not to generate a warning */
+void __ref put_page_bootmem(struct page *page)
{
int type;
@@ -339,8 +343,11 @@ EXPORT_SYMBOL_GPL(__remove_pages);
void online_page(struct page *page)
{
+ unsigned long pfn = page_to_pfn(page);
+
totalram_pages++;
- num_physpages++;
+ if (pfn >= num_physpages)
+ num_physpages = pfn + 1;
#ifdef CONFIG_HIGHMEM
if (PageHighMem(page))
@@ -410,7 +417,7 @@ int online_pages(unsigned long pfn, unsigned long nr_pages)
if (!populated_zone(zone))
need_zonelists_rebuild = 1;
- ret = walk_memory_resource(pfn, nr_pages, &onlined_pages,
+ ret = walk_system_ram_range(pfn, nr_pages, &onlined_pages,
online_pages_range);
if (ret) {
printk(KERN_DEBUG "online_pages %lx at %lx failed\n",
@@ -422,6 +429,7 @@ int online_pages(unsigned long pfn, unsigned long nr_pages)
zone->present_pages += onlined_pages;
zone->zone_pgdat->node_present_pages += onlined_pages;
+ zone_pcp_update(zone);
setup_per_zone_wmarks();
calculate_zone_inactive_ratio(zone);
if (onlined_pages) {
@@ -443,7 +451,8 @@ int online_pages(unsigned long pfn, unsigned long nr_pages)
}
#endif /* CONFIG_MEMORY_HOTPLUG_SPARSE */
-static pg_data_t *hotadd_new_pgdat(int nid, u64 start)
+/* we are OK calling __meminit stuff here - we have CONFIG_MEMORY_HOTPLUG */
+static pg_data_t __ref *hotadd_new_pgdat(int nid, u64 start)
{
struct pglist_data *pgdat;
unsigned long zones_size[MAX_NR_ZONES] = {0};
@@ -480,14 +489,18 @@ int __ref add_memory(int nid, u64 start, u64 size)
struct resource *res;
int ret;
+ lock_system_sleep();
+
res = register_memory_resource(start, size);
+ ret = -EEXIST;
if (!res)
- return -EEXIST;
+ goto out;
if (!node_online(nid)) {
pgdat = hotadd_new_pgdat(nid, start);
+ ret = -ENOMEM;
if (!pgdat)
- return -ENOMEM;
+ goto out;
new_pgdat = 1;
}
@@ -510,7 +523,8 @@ int __ref add_memory(int nid, u64 start, u64 size)
BUG_ON(ret);
}
- return ret;
+ goto out;
+
error:
/* rollback pgdat allocation and others */
if (new_pgdat)
@@ -518,6 +532,8 @@ error:
if (res)
release_memory_resource(res);
+out:
+ unlock_system_sleep();
return ret;
}
EXPORT_SYMBOL_GPL(add_memory);
@@ -659,6 +675,9 @@ do_migrate_range(unsigned long start_pfn, unsigned long end_pfn)
if (!ret) { /* Success */
list_add_tail(&page->lru, &source);
move_pages--;
+ inc_zone_page_state(page, NR_ISOLATED_ANON +
+ page_is_file_cache(page));
+
} else {
/* Becasue we don't have big zone->lock. we should
check this again here. */
@@ -681,7 +700,7 @@ do_migrate_range(unsigned long start_pfn, unsigned long end_pfn)
if (list_empty(&source))
goto out;
/* this function returns # of failed pages */
- ret = migrate_pages(&source, hotremove_migrate_alloc, 0);
+ ret = migrate_pages(&source, hotremove_migrate_alloc, 0, 1);
out:
return ret;
@@ -701,7 +720,7 @@ offline_isolated_pages_cb(unsigned long start, unsigned long nr_pages,
static void
offline_isolated_pages(unsigned long start_pfn, unsigned long end_pfn)
{
- walk_memory_resource(start_pfn, end_pfn - start_pfn, NULL,
+ walk_system_ram_range(start_pfn, end_pfn - start_pfn, NULL,
offline_isolated_pages_cb);
}
@@ -727,14 +746,14 @@ check_pages_isolated(unsigned long start_pfn, unsigned long end_pfn)
long offlined = 0;
int ret;
- ret = walk_memory_resource(start_pfn, end_pfn - start_pfn, &offlined,
+ ret = walk_system_ram_range(start_pfn, end_pfn - start_pfn, &offlined,
check_pages_isolated_cb);
if (ret < 0)
offlined = (long)ret;
return offlined;
}
-int offline_pages(unsigned long start_pfn,
+static int offline_pages(unsigned long start_pfn,
unsigned long end_pfn, unsigned long timeout)
{
unsigned long pfn, nr_pages, expire;
@@ -754,6 +773,8 @@ int offline_pages(unsigned long start_pfn,
if (!test_pages_in_a_zone(start_pfn, end_pfn))
return -EINVAL;
+ lock_system_sleep();
+
zone = page_zone(pfn_to_page(start_pfn));
node = zone_to_nid(zone);
nr_pages = end_pfn - start_pfn;
@@ -761,7 +782,7 @@ int offline_pages(unsigned long start_pfn,
/* set above range as isolated */
ret = start_isolate_page_range(start_pfn, end_pfn);
if (ret)
- return ret;
+ goto out;
arg.start_pfn = start_pfn;
arg.nr_pages = nr_pages;
@@ -831,15 +852,19 @@ repeat:
zone->present_pages -= offlined_pages;
zone->zone_pgdat->node_present_pages -= offlined_pages;
totalram_pages -= offlined_pages;
- num_physpages -= offlined_pages;
setup_per_zone_wmarks();
calculate_zone_inactive_ratio(zone);
+ if (!node_present_pages(node)) {
+ node_clear_state(node, N_HIGH_MEMORY);
+ kswapd_stop(node);
+ }
vm_total_pages = nr_free_pagecache_pages();
writeback_set_ratelimit();
memory_notify(MEM_OFFLINE, &arg);
+ unlock_system_sleep();
return 0;
failed_removal:
@@ -849,6 +874,8 @@ failed_removal:
/* pushback to free area */
undo_isolate_page_range(start_pfn, end_pfn);
+out:
+ unlock_system_sleep();
return ret;
}
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index 7dd9d9f80694..290fb5bf0440 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -85,10 +85,12 @@
#include <linux/seq_file.h>
#include <linux/proc_fs.h>
#include <linux/migrate.h>
+#include <linux/ksm.h>
#include <linux/rmap.h>
#include <linux/security.h>
#include <linux/syscalls.h>
#include <linux/ctype.h>
+#include <linux/mm_inline.h>
#include <asm/tlbflush.h>
#include <asm/uaccess.h>
@@ -412,17 +414,11 @@ static int check_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
if (!page)
continue;
/*
- * The check for PageReserved here is important to avoid
- * handling zero pages and other pages that may have been
- * marked special by the system.
- *
- * If the PageReserved would not be checked here then f.e.
- * the location of the zero page could have an influence
- * on MPOL_MF_STRICT, zero pages would be counted for
- * the per node stats, and there would be useless attempts
- * to put zero pages on the migration list.
+ * vm_normal_page() filters out zero pages, but there might
+ * still be PageReserved pages to skip, perhaps in a VDSO.
+ * And we cannot move PageKsm pages sensibly or safely yet.
*/
- if (PageReserved(page))
+ if (PageReserved(page) || PageKsm(page))
continue;
nid = page_to_nid(page);
if (node_isset(nid, *nodes) == !!(flags & MPOL_MF_INVERT))
@@ -809,6 +805,8 @@ static void migrate_page_add(struct page *page, struct list_head *pagelist,
if ((flags & MPOL_MF_MOVE_ALL) || page_mapcount(page) == 1) {
if (!isolate_lru_page(page)) {
list_add_tail(&page->lru, pagelist);
+ inc_zone_page_state(page, NR_ISOLATED_ANON +
+ page_is_file_cache(page));
}
}
}
@@ -836,7 +834,7 @@ static int migrate_to_node(struct mm_struct *mm, int source, int dest,
flags | MPOL_MF_DISCONTIG_OK, &pagelist);
if (!list_empty(&pagelist))
- err = migrate_pages(&pagelist, new_node_page, dest);
+ err = migrate_pages(&pagelist, new_node_page, dest, 0);
return err;
}
@@ -1024,7 +1022,7 @@ static long do_mbind(unsigned long start, unsigned long len,
err = migrate_prep();
if (err)
- return err;
+ goto mpol_out;
}
{
NODEMASK_SCRATCH(scratch);
@@ -1039,10 +1037,9 @@ static long do_mbind(unsigned long start, unsigned long len,
err = -ENOMEM;
NODEMASK_SCRATCH_FREE(scratch);
}
- if (err) {
- mpol_put(new);
- return err;
- }
+ if (err)
+ goto mpol_out;
+
vma = check_range(mm, start, end, nmask,
flags | MPOL_MF_INVERT, &pagelist);
@@ -1054,13 +1051,15 @@ static long do_mbind(unsigned long start, unsigned long len,
if (!list_empty(&pagelist))
nr_failed = migrate_pages(&pagelist, new_vma_page,
- (unsigned long)vma);
+ (unsigned long)vma, 0);
if (!err && nr_failed && (flags & MPOL_MF_STRICT))
err = -EIO;
- }
+ } else
+ putback_lru_pages(&pagelist);
up_write(&mm->mmap_sem);
+ mpol_out:
mpol_put(new);
return err;
}
@@ -1564,6 +1563,53 @@ struct zonelist *huge_zonelist(struct vm_area_struct *vma, unsigned long addr,
}
return zl;
}
+
+/*
+ * init_nodemask_of_mempolicy
+ *
+ * If the current task's mempolicy is "default" [NULL], return 'false'
+ * to indicate default policy. Otherwise, extract the policy nodemask
+ * for 'bind' or 'interleave' policy into the argument nodemask, or
+ * initialize the argument nodemask to contain the single node for
+ * 'preferred' or 'local' policy and return 'true' to indicate presence
+ * of non-default mempolicy.
+ *
+ * We don't bother with reference counting the mempolicy [mpol_get/put]
+ * because the current task is examining it's own mempolicy and a task's
+ * mempolicy is only ever changed by the task itself.
+ *
+ * N.B., it is the caller's responsibility to free a returned nodemask.
+ */
+bool init_nodemask_of_mempolicy(nodemask_t *mask)
+{
+ struct mempolicy *mempolicy;
+ int nid;
+
+ if (!(mask && current->mempolicy))
+ return false;
+
+ mempolicy = current->mempolicy;
+ switch (mempolicy->mode) {
+ case MPOL_PREFERRED:
+ if (mempolicy->flags & MPOL_F_LOCAL)
+ nid = numa_node_id();
+ else
+ nid = mempolicy->v.preferred_node;
+ init_nodemask_of_node(mask, nid);
+ break;
+
+ case MPOL_BIND:
+ /* Fall through */
+ case MPOL_INTERLEAVE:
+ *mask = mempolicy->v.nodes;
+ break;
+
+ default:
+ BUG();
+ }
+
+ return true;
+}
#endif
/* Allocate a page in interleaved policy.
diff --git a/mm/mempool.c b/mm/mempool.c
index 32e75d400503..1a3bc3d4d554 100644
--- a/mm/mempool.c
+++ b/mm/mempool.c
@@ -308,13 +308,6 @@ void *mempool_kmalloc(gfp_t gfp_mask, void *pool_data)
}
EXPORT_SYMBOL(mempool_kmalloc);
-void *mempool_kzalloc(gfp_t gfp_mask, void *pool_data)
-{
- size_t size = (size_t)pool_data;
- return kzalloc(size, gfp_mask);
-}
-EXPORT_SYMBOL(mempool_kzalloc);
-
void mempool_kfree(void *element, void *pool_data)
{
kfree(element);
diff --git a/mm/migrate.c b/mm/migrate.c
index 939888f9ddab..9a0db5bbabe4 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -21,6 +21,7 @@
#include <linux/mm_inline.h>
#include <linux/nsproxy.h>
#include <linux/pagevec.h>
+#include <linux/ksm.h>
#include <linux/rmap.h>
#include <linux/topology.h>
#include <linux/cpu.h>
@@ -67,6 +68,8 @@ int putback_lru_pages(struct list_head *l)
list_for_each_entry_safe(page, page2, l, lru) {
list_del(&page->lru);
+ dec_zone_page_state(page, NR_ISOLATED_ANON +
+ page_is_file_cache(page));
putback_lru_page(page);
count++;
}
@@ -76,8 +79,8 @@ int putback_lru_pages(struct list_head *l)
/*
* Restore a potential migration pte to a working pte entry
*/
-static void remove_migration_pte(struct vm_area_struct *vma,
- struct page *old, struct page *new)
+static int remove_migration_pte(struct page *new, struct vm_area_struct *vma,
+ unsigned long addr, void *old)
{
struct mm_struct *mm = vma->vm_mm;
swp_entry_t entry;
@@ -86,40 +89,37 @@ static void remove_migration_pte(struct vm_area_struct *vma,
pmd_t *pmd;
pte_t *ptep, pte;
spinlock_t *ptl;
- unsigned long addr = page_address_in_vma(new, vma);
-
- if (addr == -EFAULT)
- return;
pgd = pgd_offset(mm, addr);
if (!pgd_present(*pgd))
- return;
+ goto out;
pud = pud_offset(pgd, addr);
if (!pud_present(*pud))
- return;
+ goto out;
pmd = pmd_offset(pud, addr);
if (!pmd_present(*pmd))
- return;
+ goto out;
ptep = pte_offset_map(pmd, addr);
if (!is_swap_pte(*ptep)) {
pte_unmap(ptep);
- return;
+ goto out;
}
ptl = pte_lockptr(mm, pmd);
spin_lock(ptl);
pte = *ptep;
if (!is_swap_pte(pte))
- goto out;
+ goto unlock;
entry = pte_to_swp_entry(pte);
- if (!is_migration_entry(entry) || migration_entry_to_page(entry) != old)
- goto out;
+ if (!is_migration_entry(entry) ||
+ migration_entry_to_page(entry) != old)
+ goto unlock;
get_page(new);
pte = pte_mkold(mk_pte(new, vma->vm_page_prot));
@@ -135,58 +135,10 @@ static void remove_migration_pte(struct vm_area_struct *vma,
/* No need to invalidate - it was non-present before */
update_mmu_cache(vma, addr, pte);
-
-out:
+unlock:
pte_unmap_unlock(ptep, ptl);
-}
-
-/*
- * Note that remove_file_migration_ptes will only work on regular mappings,
- * Nonlinear mappings do not use migration entries.
- */
-static void remove_file_migration_ptes(struct page *old, struct page *new)
-{
- struct vm_area_struct *vma;
- struct address_space *mapping = page_mapping(new);
- struct prio_tree_iter iter;
- pgoff_t pgoff = new->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
-
- if (!mapping)
- return;
-
- spin_lock(&mapping->i_mmap_lock);
-
- vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff)
- remove_migration_pte(vma, old, new);
-
- spin_unlock(&mapping->i_mmap_lock);
-}
-
-/*
- * Must hold mmap_sem lock on at least one of the vmas containing
- * the page so that the anon_vma cannot vanish.
- */
-static void remove_anon_migration_ptes(struct page *old, struct page *new)
-{
- struct anon_vma *anon_vma;
- struct vm_area_struct *vma;
- unsigned long mapping;
-
- mapping = (unsigned long)new->mapping;
-
- if (!mapping || (mapping & PAGE_MAPPING_ANON) == 0)
- return;
-
- /*
- * We hold the mmap_sem lock. So no need to call page_lock_anon_vma.
- */
- anon_vma = (struct anon_vma *) (mapping - PAGE_MAPPING_ANON);
- spin_lock(&anon_vma->lock);
-
- list_for_each_entry(vma, &anon_vma->head, anon_vma_node)
- remove_migration_pte(vma, old, new);
-
- spin_unlock(&anon_vma->lock);
+out:
+ return SWAP_AGAIN;
}
/*
@@ -195,10 +147,7 @@ static void remove_anon_migration_ptes(struct page *old, struct page *new)
*/
static void remove_migration_ptes(struct page *old, struct page *new)
{
- if (PageAnon(new))
- remove_anon_migration_ptes(old, new);
- else
- remove_file_migration_ptes(old, new);
+ rmap_walk(new, remove_migration_pte, old);
}
/*
@@ -270,7 +219,7 @@ static int migrate_page_move_mapping(struct address_space *mapping,
pslot = radix_tree_lookup_slot(&mapping->page_tree,
page_index(page));
- expected_count = 2 + !!page_has_private(page);
+ expected_count = 2 + page_has_private(page);
if (page_count(page) != expected_count ||
(struct page *)radix_tree_deref_slot(pslot) != page) {
spin_unlock_irq(&mapping->tree_lock);
@@ -312,7 +261,10 @@ static int migrate_page_move_mapping(struct address_space *mapping,
*/
__dec_zone_page_state(page, NR_FILE_PAGES);
__inc_zone_page_state(newpage, NR_FILE_PAGES);
-
+ if (PageSwapBacked(page)) {
+ __dec_zone_page_state(page, NR_SHMEM);
+ __inc_zone_page_state(newpage, NR_SHMEM);
+ }
spin_unlock_irq(&mapping->tree_lock);
return 0;
@@ -336,8 +288,8 @@ static void migrate_page_copy(struct page *newpage, struct page *page)
if (TestClearPageActive(page)) {
VM_BUG_ON(PageUnevictable(page));
SetPageActive(newpage);
- } else
- unevictable_migrate_page(newpage, page);
+ } else if (TestClearPageUnevictable(page))
+ SetPageUnevictable(newpage);
if (PageChecked(page))
SetPageChecked(newpage);
if (PageMappedToDisk(page))
@@ -356,6 +308,7 @@ static void migrate_page_copy(struct page *newpage, struct page *page)
}
mlock_migrate_page(newpage, page);
+ ksm_migrate_page(newpage, page);
ClearPageSwapCache(page);
ClearPagePrivate(page);
@@ -575,9 +528,9 @@ static int move_to_new_page(struct page *newpage, struct page *page)
else
rc = fallback_migrate_page(mapping, newpage, page);
- if (!rc) {
+ if (!rc)
remove_migration_ptes(page, newpage);
- } else
+ else
newpage->mapping = NULL;
unlock_page(newpage);
@@ -590,14 +543,14 @@ static int move_to_new_page(struct page *newpage, struct page *page)
* to the newly allocated page in newpage.
*/
static int unmap_and_move(new_page_t get_new_page, unsigned long private,
- struct page *page, int force)
+ struct page *page, int force, int offlining)
{
int rc = 0;
int *result = NULL;
struct page *newpage = get_new_page(page, private, &result);
int rcu_locked = 0;
int charge = 0;
- struct mem_cgroup *mem;
+ struct mem_cgroup *mem = NULL;
if (!newpage)
return -ENOMEM;
@@ -616,6 +569,20 @@ static int unmap_and_move(new_page_t get_new_page, unsigned long private,
lock_page(page);
}
+ /*
+ * Only memory hotplug's offline_pages() caller has locked out KSM,
+ * and can safely migrate a KSM page. The other cases have skipped
+ * PageKsm along with PageReserved - but it is only now when we have
+ * the page lock that we can be certain it will not go KSM beneath us
+ * (KSM will not upgrade a page from PageAnon to PageKsm when it sees
+ * its pagecount raised, but only here do we take the page lock which
+ * serializes that).
+ */
+ if (PageKsm(page) && !offlining) {
+ rc = -EBUSY;
+ goto unlock;
+ }
+
/* charge against new page */
charge = mem_cgroup_prepare_migration(page, &mem);
if (charge == -ENOMEM) {
@@ -664,13 +631,15 @@ static int unmap_and_move(new_page_t get_new_page, unsigned long private,
* needs to be effective.
*/
try_to_free_buffers(page);
+ goto rcu_unlock;
}
- goto rcu_unlock;
+ goto skip_unmap;
}
/* Establish migration ptes or remove ptes */
- try_to_unmap(page, 1);
+ try_to_unmap(page, TTU_MIGRATION|TTU_IGNORE_MLOCK|TTU_IGNORE_ACCESS);
+skip_unmap:
if (!page_mapped(page))
rc = move_to_new_page(newpage, page);
@@ -693,6 +662,8 @@ unlock:
* restored.
*/
list_del(&page->lru);
+ dec_zone_page_state(page, NR_ISOLATED_ANON +
+ page_is_file_cache(page));
putback_lru_page(page);
}
@@ -728,7 +699,7 @@ move_newpage:
* Return: Number of pages not migrated or error code.
*/
int migrate_pages(struct list_head *from,
- new_page_t get_new_page, unsigned long private)
+ new_page_t get_new_page, unsigned long private, int offlining)
{
int retry = 1;
int nr_failed = 0;
@@ -748,7 +719,7 @@ int migrate_pages(struct list_head *from,
cond_resched();
rc = unmap_and_move(get_new_page, private,
- page, pass > 2);
+ page, pass > 2, offlining);
switch(rc) {
case -ENOMEM:
@@ -844,7 +815,8 @@ static int do_move_page_to_node_array(struct mm_struct *mm,
if (!page)
goto set_status;
- if (PageReserved(page)) /* Check for zero page */
+ /* Use PageReserved to check for zero page */
+ if (PageReserved(page) || PageKsm(page))
goto put_and_set;
pp->page = page;
@@ -862,8 +834,11 @@ static int do_move_page_to_node_array(struct mm_struct *mm,
goto put_and_set;
err = isolate_lru_page(page);
- if (!err)
+ if (!err) {
list_add_tail(&page->lru, &pagelist);
+ inc_zone_page_state(page, NR_ISOLATED_ANON +
+ page_is_file_cache(page));
+ }
put_and_set:
/*
* Either remove the duplicate refcount from
@@ -878,7 +853,7 @@ set_status:
err = 0;
if (!list_empty(&pagelist))
err = migrate_pages(&pagelist, new_page_node,
- (unsigned long)pm);
+ (unsigned long)pm, 0);
up_read(&mm->mmap_sem);
return err;
@@ -937,6 +912,9 @@ static int do_pages_move(struct mm_struct *mm, struct task_struct *task,
goto out_pm;
err = -ENODEV;
+ if (node < 0 || node >= MAX_NUMNODES)
+ goto out_pm;
+
if (!node_state(node, N_HIGH_MEMORY))
goto out_pm;
@@ -999,7 +977,7 @@ static void do_pages_stat_array(struct mm_struct *mm, unsigned long nr_pages,
err = -ENOENT;
/* Use PageReserved to check for zero page */
- if (!page || PageReserved(page))
+ if (!page || PageReserved(page) || PageKsm(page))
goto set_status;
err = page_to_nid(page);
@@ -1028,7 +1006,7 @@ static int do_pages_stat(struct mm_struct *mm, unsigned long nr_pages,
int err;
for (i = 0; i < nr_pages; i += chunk_nr) {
- if (chunk_nr + i > nr_pages)
+ if (chunk_nr > nr_pages - i)
chunk_nr = nr_pages - i;
err = copy_from_user(chunk_pages, &pages[i],
diff --git a/mm/mincore.c b/mm/mincore.c
index 8cb508f84ea4..7a3436ef39eb 100644
--- a/mm/mincore.c
+++ b/mm/mincore.c
@@ -14,6 +14,7 @@
#include <linux/syscalls.h>
#include <linux/swap.h>
#include <linux/swapops.h>
+#include <linux/hugetlb.h>
#include <asm/uaccess.h>
#include <asm/pgtable.h>
@@ -72,6 +73,42 @@ static long do_mincore(unsigned long addr, unsigned char *vec, unsigned long pag
if (!vma || addr < vma->vm_start)
return -ENOMEM;
+#ifdef CONFIG_HUGETLB_PAGE
+ if (is_vm_hugetlb_page(vma)) {
+ struct hstate *h;
+ unsigned long nr_huge;
+ unsigned char present;
+
+ i = 0;
+ nr = min(pages, (vma->vm_end - addr) >> PAGE_SHIFT);
+ h = hstate_vma(vma);
+ nr_huge = ((addr + pages * PAGE_SIZE - 1) >> huge_page_shift(h))
+ - (addr >> huge_page_shift(h)) + 1;
+ nr_huge = min(nr_huge,
+ (vma->vm_end - addr) >> huge_page_shift(h));
+ while (1) {
+ /* hugepage always in RAM for now,
+ * but generally it needs to be check */
+ ptep = huge_pte_offset(current->mm,
+ addr & huge_page_mask(h));
+ present = !!(ptep &&
+ !huge_pte_none(huge_ptep_get(ptep)));
+ while (1) {
+ vec[i++] = present;
+ addr += PAGE_SIZE;
+ /* reach buffer limit */
+ if (i == nr)
+ return nr;
+ /* check hugepage border */
+ if (!((addr & ~huge_page_mask(h))
+ >> PAGE_SHIFT))
+ break;
+ }
+ }
+ return nr;
+ }
+#endif
+
/*
* Calculate how many pages there are left in the last level of the
* PTE array for our address.
diff --git a/mm/mlock.c b/mm/mlock.c
index 45eb650b9654..2b8335a89400 100644
--- a/mm/mlock.c
+++ b/mm/mlock.c
@@ -88,25 +88,22 @@ void mlock_vma_page(struct page *page)
}
}
-/*
- * called from munlock()/munmap() path with page supposedly on the LRU.
+/**
+ * munlock_vma_page - munlock a vma page
+ * @page - page to be unlocked
*
- * Note: unlike mlock_vma_page(), we can't just clear the PageMlocked
- * [in try_to_munlock()] and then attempt to isolate the page. We must
- * isolate the page to keep others from messing with its unevictable
- * and mlocked state while trying to munlock. However, we pre-clear the
- * mlocked state anyway as we might lose the isolation race and we might
- * not get another chance to clear PageMlocked. If we successfully
- * isolate the page and try_to_munlock() detects other VM_LOCKED vmas
- * mapping the page, it will restore the PageMlocked state, unless the page
- * is mapped in a non-linear vma. So, we go ahead and SetPageMlocked(),
- * perhaps redundantly.
- * If we lose the isolation race, and the page is mapped by other VM_LOCKED
- * vmas, we'll detect this in vmscan--via try_to_munlock() or try_to_unmap()
- * either of which will restore the PageMlocked state by calling
- * mlock_vma_page() above, if it can grab the vma's mmap sem.
+ * called from munlock()/munmap() path with page supposedly on the LRU.
+ * When we munlock a page, because the vma where we found the page is being
+ * munlock()ed or munmap()ed, we want to check whether other vmas hold the
+ * page locked so that we can leave it on the unevictable lru list and not
+ * bother vmscan with it. However, to walk the page's rmap list in
+ * try_to_munlock() we must isolate the page from the LRU. If some other
+ * task has removed the page from the LRU, we won't be able to do that.
+ * So we clear the PageMlocked as we might not get another chance. If we
+ * can't isolate the page, we leave it for putback_lru_page() and vmscan
+ * [page_referenced()/try_to_unmap()] to deal with.
*/
-static void munlock_vma_page(struct page *page)
+void munlock_vma_page(struct page *page)
{
BUG_ON(!PageLocked(page));
@@ -117,18 +114,18 @@ static void munlock_vma_page(struct page *page)
/*
* did try_to_unlock() succeed or punt?
*/
- if (ret == SWAP_SUCCESS || ret == SWAP_AGAIN)
+ if (ret != SWAP_MLOCK)
count_vm_event(UNEVICTABLE_PGMUNLOCKED);
putback_lru_page(page);
} else {
/*
- * We lost the race. let try_to_unmap() deal
- * with it. At least we get the page state and
- * mlock stats right. However, page is still on
- * the noreclaim list. We'll fix that up when
- * the page is eventually freed or we scan the
- * noreclaim list.
+ * Some other task has removed the page from the LRU.
+ * putback_lru_page() will take care of removing the
+ * page from the unevictable list, if necessary.
+ * vmscan [page_referenced()] will move the page back
+ * to the unevictable list if some other vma has it
+ * mlocked.
*/
if (PageUnevictable(page))
count_vm_event(UNEVICTABLE_PGSTRANDED);
@@ -139,49 +136,36 @@ static void munlock_vma_page(struct page *page)
}
/**
- * __mlock_vma_pages_range() - mlock/munlock a range of pages in the vma.
+ * __mlock_vma_pages_range() - mlock a range of pages in the vma.
* @vma: target vma
* @start: start address
* @end: end address
- * @mlock: 0 indicate munlock, otherwise mlock.
*
- * If @mlock == 0, unlock an mlocked range;
- * else mlock the range of pages. This takes care of making the pages present ,
- * too.
+ * This takes care of making the pages present too.
*
* return 0 on success, negative error code on error.
*
* vma->vm_mm->mmap_sem must be held for at least read.
*/
static long __mlock_vma_pages_range(struct vm_area_struct *vma,
- unsigned long start, unsigned long end,
- int mlock)
+ unsigned long start, unsigned long end)
{
struct mm_struct *mm = vma->vm_mm;
unsigned long addr = start;
struct page *pages[16]; /* 16 gives a reasonable batch */
int nr_pages = (end - start) / PAGE_SIZE;
int ret = 0;
- int gup_flags = 0;
+ int gup_flags;
VM_BUG_ON(start & ~PAGE_MASK);
VM_BUG_ON(end & ~PAGE_MASK);
VM_BUG_ON(start < vma->vm_start);
VM_BUG_ON(end > vma->vm_end);
- VM_BUG_ON((!rwsem_is_locked(&mm->mmap_sem)) &&
- (atomic_read(&mm->mm_users) != 0));
-
- /*
- * mlock: don't page populate if vma has PROT_NONE permission.
- * munlock: always do munlock although the vma has PROT_NONE
- * permission, or SIGKILL is pending.
- */
- if (!mlock)
- gup_flags |= GUP_FLAGS_IGNORE_VMA_PERMISSIONS |
- GUP_FLAGS_IGNORE_SIGKILL;
+ VM_BUG_ON(!rwsem_is_locked(&mm->mmap_sem));
+ gup_flags = FOLL_TOUCH | FOLL_GET;
if (vma->vm_flags & VM_WRITE)
- gup_flags |= GUP_FLAGS_WRITE;
+ gup_flags |= FOLL_WRITE;
while (nr_pages > 0) {
int i;
@@ -201,51 +185,45 @@ static long __mlock_vma_pages_range(struct vm_area_struct *vma,
* This can happen for, e.g., VM_NONLINEAR regions before
* a page has been allocated and mapped at a given offset,
* or for addresses that map beyond end of a file.
- * We'll mlock the the pages if/when they get faulted in.
+ * We'll mlock the pages if/when they get faulted in.
*/
if (ret < 0)
break;
- if (ret == 0) {
- /*
- * We know the vma is there, so the only time
- * we cannot get a single page should be an
- * error (ret < 0) case.
- */
- WARN_ON(1);
- break;
- }
lru_add_drain(); /* push cached pages to LRU */
for (i = 0; i < ret; i++) {
struct page *page = pages[i];
- lock_page(page);
- /*
- * Because we lock page here and migration is blocked
- * by the elevated reference, we need only check for
- * page truncation (file-cache only).
- */
if (page->mapping) {
- if (mlock)
+ /*
+ * That preliminary check is mainly to avoid
+ * the pointless overhead of lock_page on the
+ * ZERO_PAGE: which might bounce very badly if
+ * there is contention. However, we're still
+ * dirtying its cacheline with get/put_page:
+ * we'll add another __get_user_pages flag to
+ * avoid it if that case turns out to matter.
+ */
+ lock_page(page);
+ /*
+ * Because we lock page here and migration is
+ * blocked by the elevated reference, we need
+ * only check for file-cache page truncation.
+ */
+ if (page->mapping)
mlock_vma_page(page);
- else
- munlock_vma_page(page);
+ unlock_page(page);
}
- unlock_page(page);
- put_page(page); /* ref from get_user_pages() */
-
- /*
- * here we assume that get_user_pages() has given us
- * a list of virtually contiguous pages.
- */
- addr += PAGE_SIZE; /* for next get_user_pages() */
- nr_pages--;
+ put_page(page); /* ref from get_user_pages() */
}
+
+ addr += ret * PAGE_SIZE;
+ nr_pages -= ret;
ret = 0;
}
- return ret; /* count entire vma as locked_vm */
+ return ret; /* 0 or negative error code */
}
/*
@@ -289,7 +267,7 @@ long mlock_vma_pages_range(struct vm_area_struct *vma,
is_vm_hugetlb_page(vma) ||
vma == get_gate_vma(current))) {
- __mlock_vma_pages_range(vma, start, end, 1);
+ __mlock_vma_pages_range(vma, start, end);
/* Hide errors from mmap() and other callers */
return 0;
@@ -310,7 +288,6 @@ no_mlock:
return nr_pages; /* error or pages NOT mlocked */
}
-
/*
* munlock_vma_pages_range() - munlock all pages in the vma range.'
* @vma - vma containing range to be munlock()ed.
@@ -330,10 +307,38 @@ no_mlock:
* free them. This will result in freeing mlocked pages.
*/
void munlock_vma_pages_range(struct vm_area_struct *vma,
- unsigned long start, unsigned long end)
+ unsigned long start, unsigned long end)
{
+ unsigned long addr;
+
+ lru_add_drain();
vma->vm_flags &= ~VM_LOCKED;
- __mlock_vma_pages_range(vma, start, end, 0);
+
+ for (addr = start; addr < end; addr += PAGE_SIZE) {
+ struct page *page;
+ /*
+ * Although FOLL_DUMP is intended for get_dump_page(),
+ * it just so happens that its special treatment of the
+ * ZERO_PAGE (returning an error instead of doing get_page)
+ * suits munlock very well (and if somehow an abnormal page
+ * has sneaked into the range, we won't oops here: great).
+ */
+ page = follow_page(vma, addr, FOLL_GET | FOLL_DUMP);
+ if (page && !IS_ERR(page)) {
+ lock_page(page);
+ /*
+ * Like in __mlock_vma_pages_range(),
+ * because we lock page here and migration is
+ * blocked by the elevated reference, we need
+ * only check for file-cache page truncation.
+ */
+ if (page->mapping)
+ munlock_vma_page(page);
+ unlock_page(page);
+ put_page(page);
+ }
+ cond_resched();
+ }
}
/*
@@ -400,18 +405,14 @@ success:
* It's okay if try_to_unmap_one unmaps a page just after we
* set VM_LOCKED, __mlock_vma_pages_range will bring it back.
*/
- vma->vm_flags = newflags;
if (lock) {
- ret = __mlock_vma_pages_range(vma, start, end, 1);
-
- if (ret > 0) {
- mm->locked_vm -= ret;
- ret = 0;
- } else
- ret = __mlock_posix_error_return(ret); /* translate if needed */
+ vma->vm_flags = newflags;
+ ret = __mlock_vma_pages_range(vma, start, end);
+ if (ret < 0)
+ ret = __mlock_posix_error_return(ret);
} else {
- __mlock_vma_pages_range(vma, start, end, 0);
+ munlock_vma_pages_range(vma, start, end);
}
out:
diff --git a/mm/mmap.c b/mm/mmap.c
index 1a0f65299162..73ab63e40cdf 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -20,7 +20,6 @@
#include <linux/fs.h>
#include <linux/personality.h>
#include <linux/security.h>
-#include <linux/ima.h>
#include <linux/hugetlb.h>
#include <linux/profile.h>
#include <linux/module.h>
@@ -28,7 +27,7 @@
#include <linux/mempolicy.h>
#include <linux/rmap.h>
#include <linux/mmu_notifier.h>
-#include <linux/perf_counter.h>
+#include <linux/perf_event.h>
#include <asm/uaccess.h>
#include <asm/cacheflush.h>
@@ -570,9 +569,9 @@ again: remove_next = 1 + (end > next->vm_end);
/*
* When changing only vma->vm_end, we don't really need
- * anon_vma lock: but is that case worth optimizing out?
+ * anon_vma lock.
*/
- if (vma->anon_vma)
+ if (vma->anon_vma && (insert || importer || start != vma->vm_start))
anon_vma = vma->anon_vma;
if (anon_vma) {
spin_lock(&anon_vma->lock);
@@ -656,9 +655,6 @@ again: remove_next = 1 + (end > next->vm_end);
validate_mm(mm);
}
-/* Flags that can be inherited from an existing mapping when merging */
-#define VM_MERGEABLE_FLAGS (VM_CAN_NONLINEAR)
-
/*
* If the vma has a ->close operation then the driver probably needs to release
* per-vma resources, so we don't attempt to merge those.
@@ -666,7 +662,8 @@ again: remove_next = 1 + (end > next->vm_end);
static inline int is_mergeable_vma(struct vm_area_struct *vma,
struct file *file, unsigned long vm_flags)
{
- if ((vma->vm_flags ^ vm_flags) & ~VM_MERGEABLE_FLAGS)
+ /* VM_CAN_NONLINEAR may get set later by f_op->mmap() */
+ if ((vma->vm_flags ^ vm_flags) & ~VM_CAN_NONLINEAR)
return 0;
if (vma->vm_file != file)
return 0;
@@ -905,7 +902,7 @@ void vm_stat_account(struct mm_struct *mm, unsigned long flags,
#endif /* CONFIG_PROC_FS */
/*
- * The caller must hold down_write(current->mm->mmap_sem).
+ * The caller must hold down_write(&current->mm->mmap_sem).
*/
unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
@@ -934,13 +931,9 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
if (!(flags & MAP_FIXED))
addr = round_hint_to_min(addr);
- error = arch_mmap_check(addr, len, flags);
- if (error)
- return error;
-
/* Careful about overflows.. */
len = PAGE_ALIGN(len);
- if (!len || len > TASK_SIZE)
+ if (!len)
return -ENOMEM;
/* offset overflow? */
@@ -965,11 +958,9 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
vm_flags = calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags) |
mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
- if (flags & MAP_LOCKED) {
+ if (flags & MAP_LOCKED)
if (!can_do_mlock())
return -EPERM;
- vm_flags |= VM_LOCKED;
- }
/* mlock MCL_FUTURE? */
if (vm_flags & VM_LOCKED) {
@@ -1047,14 +1038,51 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
error = security_file_mmap(file, reqprot, prot, flags, addr, 0);
if (error)
return error;
- error = ima_file_mmap(file, prot);
- if (error)
- return error;
return mmap_region(file, addr, len, flags, vm_flags, pgoff);
}
EXPORT_SYMBOL(do_mmap_pgoff);
+SYSCALL_DEFINE6(mmap_pgoff, unsigned long, addr, unsigned long, len,
+ unsigned long, prot, unsigned long, flags,
+ unsigned long, fd, unsigned long, pgoff)
+{
+ struct file *file = NULL;
+ unsigned long retval = -EBADF;
+
+ if (!(flags & MAP_ANONYMOUS)) {
+ if (unlikely(flags & MAP_HUGETLB))
+ return -EINVAL;
+ file = fget(fd);
+ if (!file)
+ goto out;
+ } else if (flags & MAP_HUGETLB) {
+ struct user_struct *user = NULL;
+ /*
+ * VM_NORESERVE is used because the reservations will be
+ * taken when vm_ops->mmap() is called
+ * A dummy user value is used because we are not locking
+ * memory so no accounting is necessary
+ */
+ len = ALIGN(len, huge_page_size(&default_hstate));
+ file = hugetlb_file_setup(HUGETLB_ANON_FILE, len, VM_NORESERVE,
+ &user, HUGETLB_ANONHUGE_INODE);
+ if (IS_ERR(file))
+ return PTR_ERR(file);
+ }
+
+ flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
+
+ down_write(&current->mm->mmap_sem);
+ retval = do_mmap_pgoff(file, addr, len, prot, flags, pgoff);
+ up_write(&current->mm->mmap_sem);
+
+ if (file)
+ fput(file);
+out:
+ return retval;
+}
+
/*
* Some shared mappigns will want the pages marked read-only
* to track write events. If so, we'll downgrade vm_page_prot
@@ -1195,23 +1223,35 @@ munmap_back:
goto unmap_and_free_vma;
if (vm_flags & VM_EXECUTABLE)
added_exe_file_vma(mm);
+
+ /* Can addr have changed??
+ *
+ * Answer: Yes, several device drivers can do it in their
+ * f_op->mmap method. -DaveM
+ */
+ addr = vma->vm_start;
+ pgoff = vma->vm_pgoff;
+ vm_flags = vma->vm_flags;
} else if (vm_flags & VM_SHARED) {
error = shmem_zero_setup(vma);
if (error)
goto free_vma;
}
- /* Can addr have changed??
- *
- * Answer: Yes, several device drivers can do it in their
- * f_op->mmap method. -DaveM
- */
- addr = vma->vm_start;
- pgoff = vma->vm_pgoff;
- vm_flags = vma->vm_flags;
+ if (vma_wants_writenotify(vma)) {
+ pgprot_t pprot = vma->vm_page_prot;
- if (vma_wants_writenotify(vma))
+ /* Can vma->vm_page_prot have changed??
+ *
+ * Answer: Yes, drivers may have changed it in their
+ * f_op->mmap method.
+ *
+ * Ensures that vmas marked as uncached stay that way.
+ */
vma->vm_page_prot = vm_get_page_prot(vm_flags & ~VM_SHARED);
+ if (pgprot_val(pprot) == pgprot_val(pgprot_noncached(pprot)))
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+ }
vma_link(mm, vma, prev, rb_link, rb_parent);
file = vma->vm_file;
@@ -1220,7 +1260,7 @@ munmap_back:
if (correct_wcount)
atomic_inc(&inode->i_writecount);
out:
- perf_counter_mmap(vma);
+ perf_event_mmap(vma);
mm->total_vm += len >> PAGE_SHIFT;
vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT);
@@ -1445,6 +1485,14 @@ get_unmapped_area(struct file *file, unsigned long addr, unsigned long len,
unsigned long (*get_area)(struct file *, unsigned long,
unsigned long, unsigned long, unsigned long);
+ unsigned long error = arch_mmap_check(addr, len, flags);
+ if (error)
+ return error;
+
+ /* Careful about overflows.. */
+ if (len > TASK_SIZE)
+ return -ENOMEM;
+
get_area = current->mm->get_unmapped_area;
if (file && file->f_op && file->f_op->get_unmapped_area)
get_area = file->f_op->get_unmapped_area;
@@ -1815,10 +1863,10 @@ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma,
}
/*
- * Split a vma into two pieces at address 'addr', a new vma is allocated
- * either for the first part or the tail.
+ * __split_vma() bypasses sysctl_max_map_count checking. We use this on the
+ * munmap path where it doesn't make sense to fail.
*/
-int split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
+static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
unsigned long addr, int new_below)
{
struct mempolicy *pol;
@@ -1828,9 +1876,6 @@ int split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
~(huge_page_mask(hstate_vma(vma)))))
return -EINVAL;
- if (mm->map_count >= sysctl_max_map_count)
- return -ENOMEM;
-
new = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
if (!new)
return -ENOMEM;
@@ -1870,6 +1915,19 @@ int split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
return 0;
}
+/*
+ * Split a vma into two pieces at address 'addr', a new vma is allocated
+ * either for the first part or the tail.
+ */
+int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
+ unsigned long addr, int new_below)
+{
+ if (mm->map_count >= sysctl_max_map_count)
+ return -ENOMEM;
+
+ return __split_vma(mm, vma, addr, new_below);
+}
+
/* Munmap is split into 2 main parts -- this part which finds
* what needs doing, and the areas themselves, which do the
* work. This now handles partial unmappings.
@@ -1905,7 +1963,17 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
* places tmp vma above, and higher split_vma places tmp vma below.
*/
if (start > vma->vm_start) {
- int error = split_vma(mm, vma, start, 0);
+ int error;
+
+ /*
+ * Make sure that map_count on return from munmap() will
+ * not exceed its limit; but let map_count go just above
+ * its limit temporarily, to help free resources as expected.
+ */
+ if (end < vma->vm_end && mm->map_count >= sysctl_max_map_count)
+ return -ENOMEM;
+
+ error = __split_vma(mm, vma, start, 0);
if (error)
return error;
prev = vma;
@@ -1914,7 +1982,7 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
/* Does it split the last one? */
last = find_vma(mm, end);
if (last && end > last->vm_start) {
- int error = split_vma(mm, last, end, 1);
+ int error = __split_vma(mm, last, end, 1);
if (error)
return error;
}
@@ -1995,20 +2063,14 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
if (!len)
return addr;
- if ((addr + len) > TASK_SIZE || (addr + len) < addr)
- return -EINVAL;
-
- if (is_hugepage_only_range(mm, addr, len))
- return -EINVAL;
-
error = security_file_mmap(NULL, 0, 0, 0, addr, 1);
if (error)
return error;
flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
- error = arch_mmap_check(addr, len, flags);
- if (error)
+ error = get_unmapped_area(NULL, addr, len, 0, MAP_FIXED);
+ if (error & ~PAGE_MASK)
return error;
/*
@@ -2117,6 +2179,7 @@ void exit_mmap(struct mm_struct *mm)
/* Use -1 here to ensure all VMAs in the mm are unmapped */
end = unmap_vmas(&tlb, vma, 0, -1, &nr_accounted, NULL);
vm_unacct_memory(nr_accounted);
+
free_pgtables(&tlb, vma, FIRST_USER_ADDRESS, 0);
tlb_finish_mmu(&tlb, 0, end);
@@ -2273,7 +2336,7 @@ static void special_mapping_close(struct vm_area_struct *vma)
{
}
-static struct vm_operations_struct special_mapping_vmops = {
+static const struct vm_operations_struct special_mapping_vmops = {
.close = special_mapping_close,
.fault = special_mapping_fault,
};
@@ -2314,7 +2377,7 @@ int install_special_mapping(struct mm_struct *mm,
mm->total_vm += len >> PAGE_SHIFT;
- perf_counter_mmap(vma);
+ perf_event_mmap(vma);
return 0;
}
diff --git a/mm/mmu_context.c b/mm/mmu_context.c
new file mode 100644
index 000000000000..fa3d6930ee56
--- /dev/null
+++ b/mm/mmu_context.c
@@ -0,0 +1,60 @@
+/* Copyright (C) 2009 Red Hat, Inc.
+ *
+ * See ../COPYING for licensing terms.
+ */
+
+#include <linux/mm.h>
+#include <linux/mmu_context.h>
+#include <linux/sched.h>
+
+#include <asm/mmu_context.h>
+
+/*
+ * use_mm
+ * Makes the calling kernel thread take on the specified
+ * mm context.
+ * Called by the retry thread execute retries within the
+ * iocb issuer's mm context, so that copy_from/to_user
+ * operations work seamlessly for aio.
+ * (Note: this routine is intended to be called only
+ * from a kernel thread context)
+ */
+void use_mm(struct mm_struct *mm)
+{
+ struct mm_struct *active_mm;
+ struct task_struct *tsk = current;
+
+ task_lock(tsk);
+ preempt_disable();
+ active_mm = tsk->active_mm;
+ if (active_mm != mm) {
+ atomic_inc(&mm->mm_count);
+ tsk->active_mm = mm;
+ }
+ tsk->mm = mm;
+ switch_mm(active_mm, mm, tsk);
+ preempt_enable();
+ task_unlock(tsk);
+
+ if (active_mm != mm)
+ mmdrop(active_mm);
+}
+
+/*
+ * unuse_mm
+ * Reverses the effect of use_mm, i.e. releases the
+ * specified mm context which was earlier taken on
+ * by the calling kernel thread
+ * (Note: this routine is intended to be called only
+ * from a kernel thread context)
+ */
+void unuse_mm(struct mm_struct *mm)
+{
+ struct task_struct *tsk = current;
+
+ task_lock(tsk);
+ tsk->mm = NULL;
+ /* active_mm is still 'mm' */
+ enter_lazy_tlb(mm, tsk);
+ task_unlock(tsk);
+}
diff --git a/mm/mmu_notifier.c b/mm/mmu_notifier.c
index 5f4ef0250bee..7e33f2cb3c77 100644
--- a/mm/mmu_notifier.c
+++ b/mm/mmu_notifier.c
@@ -99,6 +99,26 @@ int __mmu_notifier_clear_flush_young(struct mm_struct *mm,
return young;
}
+void __mmu_notifier_change_pte(struct mm_struct *mm, unsigned long address,
+ pte_t pte)
+{
+ struct mmu_notifier *mn;
+ struct hlist_node *n;
+
+ rcu_read_lock();
+ hlist_for_each_entry_rcu(mn, n, &mm->mmu_notifier_mm->list, hlist) {
+ if (mn->ops->change_pte)
+ mn->ops->change_pte(mn, mm, address, pte);
+ /*
+ * Some drivers don't have change_pte,
+ * so we must call invalidate_page in that case.
+ */
+ else if (mn->ops->invalidate_page)
+ mn->ops->invalidate_page(mn, mm, address);
+ }
+ rcu_read_unlock();
+}
+
void __mmu_notifier_invalidate_page(struct mm_struct *mm,
unsigned long address)
{
diff --git a/mm/mprotect.c b/mm/mprotect.c
index d80311baeb2d..8bc969d8112d 100644
--- a/mm/mprotect.c
+++ b/mm/mprotect.c
@@ -23,7 +23,7 @@
#include <linux/swapops.h>
#include <linux/mmu_notifier.h>
#include <linux/migrate.h>
-#include <linux/perf_counter.h>
+#include <linux/perf_event.h>
#include <asm/uaccess.h>
#include <asm/pgtable.h>
#include <asm/cacheflush.h>
@@ -300,7 +300,7 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
error = mprotect_fixup(vma, &prev, nstart, tmp, newflags);
if (error)
goto out;
- perf_counter_mmap(vma);
+ perf_event_mmap(vma);
nstart = tmp;
if (nstart < prev->vm_end)
diff --git a/mm/mremap.c b/mm/mremap.c
index a39b7b91be46..845190898d59 100644
--- a/mm/mremap.c
+++ b/mm/mremap.c
@@ -11,6 +11,7 @@
#include <linux/hugetlb.h>
#include <linux/slab.h>
#include <linux/shm.h>
+#include <linux/ksm.h>
#include <linux/mman.h>
#include <linux/swap.h>
#include <linux/capability.h>
@@ -85,8 +86,8 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
if (vma->vm_file) {
/*
* Subtle point from Rajesh Venkatasubramanian: before
- * moving file-based ptes, we must lock vmtruncate out,
- * since it might clean the dst vma before the src vma,
+ * moving file-based ptes, we must lock truncate_pagecache
+ * out, since it might clean the dst vma before the src vma,
* and we propagate stale pages into the dst afterward.
*/
mapping = vma->vm_file->f_mapping;
@@ -174,6 +175,7 @@ static unsigned long move_vma(struct vm_area_struct *vma,
unsigned long excess = 0;
unsigned long hiwater_vm;
int split = 0;
+ int err;
/*
* We'd prefer to avoid failure later on in do_munmap:
@@ -182,6 +184,18 @@ static unsigned long move_vma(struct vm_area_struct *vma,
if (mm->map_count >= sysctl_max_map_count - 3)
return -ENOMEM;
+ /*
+ * Advise KSM to break any KSM pages in the area to be moved:
+ * it would be confusing if they were to turn up at the new
+ * location, where they happen to coincide with different KSM
+ * pages recently unmapped. But leave vma->vm_flags as it was,
+ * so KSM can come around to merge on vma and new_vma afterwards.
+ */
+ err = ksm_madvise(vma, old_addr, old_addr + old_len,
+ MADV_UNMERGEABLE, &vm_flags);
+ if (err)
+ return err;
+
new_pgoff = vma->vm_pgoff + ((old_addr - vma->vm_start) >> PAGE_SHIFT);
new_vma = copy_vma(&vma, new_addr, new_len, new_pgoff);
if (!new_vma)
@@ -247,6 +261,137 @@ static unsigned long move_vma(struct vm_area_struct *vma,
return new_addr;
}
+static struct vm_area_struct *vma_to_resize(unsigned long addr,
+ unsigned long old_len, unsigned long new_len, unsigned long *p)
+{
+ struct mm_struct *mm = current->mm;
+ struct vm_area_struct *vma = find_vma(mm, addr);
+
+ if (!vma || vma->vm_start > addr)
+ goto Efault;
+
+ if (is_vm_hugetlb_page(vma))
+ goto Einval;
+
+ /* We can't remap across vm area boundaries */
+ if (old_len > vma->vm_end - addr)
+ goto Efault;
+
+ if (vma->vm_flags & (VM_DONTEXPAND | VM_PFNMAP)) {
+ if (new_len > old_len)
+ goto Efault;
+ }
+
+ if (vma->vm_flags & VM_LOCKED) {
+ unsigned long locked, lock_limit;
+ locked = mm->locked_vm << PAGE_SHIFT;
+ lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur;
+ locked += new_len - old_len;
+ if (locked > lock_limit && !capable(CAP_IPC_LOCK))
+ goto Eagain;
+ }
+
+ if (!may_expand_vm(mm, (new_len - old_len) >> PAGE_SHIFT))
+ goto Enomem;
+
+ if (vma->vm_flags & VM_ACCOUNT) {
+ unsigned long charged = (new_len - old_len) >> PAGE_SHIFT;
+ if (security_vm_enough_memory(charged))
+ goto Efault;
+ *p = charged;
+ }
+
+ return vma;
+
+Efault: /* very odd choice for most of the cases, but... */
+ return ERR_PTR(-EFAULT);
+Einval:
+ return ERR_PTR(-EINVAL);
+Enomem:
+ return ERR_PTR(-ENOMEM);
+Eagain:
+ return ERR_PTR(-EAGAIN);
+}
+
+static unsigned long mremap_to(unsigned long addr,
+ unsigned long old_len, unsigned long new_addr,
+ unsigned long new_len)
+{
+ struct mm_struct *mm = current->mm;
+ struct vm_area_struct *vma;
+ unsigned long ret = -EINVAL;
+ unsigned long charged = 0;
+ unsigned long map_flags;
+
+ if (new_addr & ~PAGE_MASK)
+ goto out;
+
+ if (new_len > TASK_SIZE || new_addr > TASK_SIZE - new_len)
+ goto out;
+
+ /* Check if the location we're moving into overlaps the
+ * old location at all, and fail if it does.
+ */
+ if ((new_addr <= addr) && (new_addr+new_len) > addr)
+ goto out;
+
+ if ((addr <= new_addr) && (addr+old_len) > new_addr)
+ goto out;
+
+ ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1);
+ if (ret)
+ goto out;
+
+ ret = do_munmap(mm, new_addr, new_len);
+ if (ret)
+ goto out;
+
+ if (old_len >= new_len) {
+ ret = do_munmap(mm, addr+new_len, old_len - new_len);
+ if (ret && old_len != new_len)
+ goto out;
+ old_len = new_len;
+ }
+
+ vma = vma_to_resize(addr, old_len, new_len, &charged);
+ if (IS_ERR(vma)) {
+ ret = PTR_ERR(vma);
+ goto out;
+ }
+
+ map_flags = MAP_FIXED;
+ if (vma->vm_flags & VM_MAYSHARE)
+ map_flags |= MAP_SHARED;
+
+ ret = get_unmapped_area(vma->vm_file, new_addr, new_len, vma->vm_pgoff +
+ ((addr - vma->vm_start) >> PAGE_SHIFT),
+ map_flags);
+ if (ret & ~PAGE_MASK)
+ goto out1;
+
+ ret = move_vma(vma, addr, old_len, new_len, new_addr);
+ if (!(ret & ~PAGE_MASK))
+ goto out;
+out1:
+ vm_unacct_memory(charged);
+
+out:
+ return ret;
+}
+
+static int vma_expandable(struct vm_area_struct *vma, unsigned long delta)
+{
+ unsigned long end = vma->vm_end + delta;
+ if (end < vma->vm_end) /* overflow */
+ return 0;
+ if (vma->vm_next && vma->vm_next->vm_start < end) /* intersection */
+ return 0;
+ if (get_unmapped_area(NULL, vma->vm_start, end - vma->vm_start,
+ 0, MAP_FIXED) & ~PAGE_MASK)
+ return 0;
+ return 1;
+}
+
/*
* Expand (or shrink) an existing mapping, potentially moving it at the
* same time (controlled by the MREMAP_MAYMOVE flag and available VM space)
@@ -280,32 +425,10 @@ unsigned long do_mremap(unsigned long addr,
if (!new_len)
goto out;
- /* new_addr is only valid if MREMAP_FIXED is specified */
if (flags & MREMAP_FIXED) {
- if (new_addr & ~PAGE_MASK)
- goto out;
- if (!(flags & MREMAP_MAYMOVE))
- goto out;
-
- if (new_len > TASK_SIZE || new_addr > TASK_SIZE - new_len)
- goto out;
-
- /* Check if the location we're moving into overlaps the
- * old location at all, and fail if it does.
- */
- if ((new_addr <= addr) && (new_addr+new_len) > addr)
- goto out;
-
- if ((addr <= new_addr) && (addr+old_len) > new_addr)
- goto out;
-
- ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1);
- if (ret)
- goto out;
-
- ret = do_munmap(mm, new_addr, new_len);
- if (ret)
- goto out;
+ if (flags & MREMAP_MAYMOVE)
+ ret = mremap_to(addr, old_len, new_addr, new_len);
+ goto out;
}
/*
@@ -318,60 +441,23 @@ unsigned long do_mremap(unsigned long addr,
if (ret && old_len != new_len)
goto out;
ret = addr;
- if (!(flags & MREMAP_FIXED) || (new_addr == addr))
- goto out;
- old_len = new_len;
+ goto out;
}
/*
- * Ok, we need to grow.. or relocate.
+ * Ok, we need to grow..
*/
- ret = -EFAULT;
- vma = find_vma(mm, addr);
- if (!vma || vma->vm_start > addr)
- goto out;
- if (is_vm_hugetlb_page(vma)) {
- ret = -EINVAL;
- goto out;
- }
- /* We can't remap across vm area boundaries */
- if (old_len > vma->vm_end - addr)
- goto out;
- if (vma->vm_flags & (VM_DONTEXPAND | VM_PFNMAP)) {
- if (new_len > old_len)
- goto out;
- }
- if (vma->vm_flags & VM_LOCKED) {
- unsigned long locked, lock_limit;
- locked = mm->locked_vm << PAGE_SHIFT;
- lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur;
- locked += new_len - old_len;
- ret = -EAGAIN;
- if (locked > lock_limit && !capable(CAP_IPC_LOCK))
- goto out;
- }
- if (!may_expand_vm(mm, (new_len - old_len) >> PAGE_SHIFT)) {
- ret = -ENOMEM;
+ vma = vma_to_resize(addr, old_len, new_len, &charged);
+ if (IS_ERR(vma)) {
+ ret = PTR_ERR(vma);
goto out;
}
- if (vma->vm_flags & VM_ACCOUNT) {
- charged = (new_len - old_len) >> PAGE_SHIFT;
- if (security_vm_enough_memory(charged))
- goto out_nc;
- }
-
/* old_len exactly to the end of the area..
- * And we're not relocating the area.
*/
- if (old_len == vma->vm_end - addr &&
- !((flags & MREMAP_FIXED) && (addr != new_addr)) &&
- (old_len != new_len || !(flags & MREMAP_MAYMOVE))) {
- unsigned long max_addr = TASK_SIZE;
- if (vma->vm_next)
- max_addr = vma->vm_next->vm_start;
+ if (old_len == vma->vm_end - addr) {
/* can we just expand the current mapping? */
- if (max_addr - addr >= new_len) {
+ if (vma_expandable(vma, new_len - old_len)) {
int pages = (new_len - old_len) >> PAGE_SHIFT;
vma_adjust(vma, vma->vm_start,
@@ -395,28 +481,27 @@ unsigned long do_mremap(unsigned long addr,
*/
ret = -ENOMEM;
if (flags & MREMAP_MAYMOVE) {
- if (!(flags & MREMAP_FIXED)) {
- unsigned long map_flags = 0;
- if (vma->vm_flags & VM_MAYSHARE)
- map_flags |= MAP_SHARED;
-
- new_addr = get_unmapped_area(vma->vm_file, 0, new_len,
- vma->vm_pgoff, map_flags);
- if (new_addr & ~PAGE_MASK) {
- ret = new_addr;
- goto out;
- }
-
- ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1);
- if (ret)
- goto out;
+ unsigned long map_flags = 0;
+ if (vma->vm_flags & VM_MAYSHARE)
+ map_flags |= MAP_SHARED;
+
+ new_addr = get_unmapped_area(vma->vm_file, 0, new_len,
+ vma->vm_pgoff +
+ ((addr - vma->vm_start) >> PAGE_SHIFT),
+ map_flags);
+ if (new_addr & ~PAGE_MASK) {
+ ret = new_addr;
+ goto out;
}
+
+ ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1);
+ if (ret)
+ goto out;
ret = move_vma(vma, addr, old_len, new_len, new_addr);
}
out:
if (ret & ~PAGE_MASK)
vm_unacct_memory(charged);
-out_nc:
return ret;
}
diff --git a/mm/nommu.c b/mm/nommu.c
index 66e81e7e9fe9..48a2ecfaf059 100644
--- a/mm/nommu.c
+++ b/mm/nommu.c
@@ -33,6 +33,7 @@
#include <asm/uaccess.h>
#include <asm/tlb.h>
#include <asm/tlbflush.h>
+#include <asm/mmu_context.h>
#include "internal.h"
static inline __attribute__((format(printf, 1, 2)))
@@ -56,12 +57,11 @@ void no_printk(const char *fmt, ...)
no_printk(KERN_DEBUG FMT"\n", ##__VA_ARGS__)
#endif
-#include "internal.h"
-
void *high_memory;
struct page *mem_map;
unsigned long max_mapnr;
unsigned long num_physpages;
+unsigned long highest_memmap_pfn;
struct percpu_counter vm_committed_as;
int sysctl_overcommit_memory = OVERCOMMIT_GUESS; /* heuristic overcommit */
int sysctl_overcommit_ratio = 50; /* default is 50% */
@@ -79,50 +79,10 @@ static struct kmem_cache *vm_region_jar;
struct rb_root nommu_region_tree = RB_ROOT;
DECLARE_RWSEM(nommu_region_sem);
-struct vm_operations_struct generic_file_vm_ops = {
+const struct vm_operations_struct generic_file_vm_ops = {
};
/*
- * Handle all mappings that got truncated by a "truncate()"
- * system call.
- *
- * NOTE! We have to be ready to update the memory sharing
- * between the file and the memory map for a potential last
- * incomplete page. Ugly, but necessary.
- */
-int vmtruncate(struct inode *inode, loff_t offset)
-{
- struct address_space *mapping = inode->i_mapping;
- unsigned long limit;
-
- if (inode->i_size < offset)
- goto do_expand;
- i_size_write(inode, offset);
-
- truncate_inode_pages(mapping, offset);
- goto out_truncate;
-
-do_expand:
- limit = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
- if (limit != RLIM_INFINITY && offset > limit)
- goto out_sig;
- if (offset > inode->i_sb->s_maxbytes)
- goto out;
- i_size_write(inode, offset);
-
-out_truncate:
- if (inode->i_op->truncate)
- inode->i_op->truncate(inode);
- return 0;
-out_sig:
- send_sig(SIGXFSZ, current, 0);
-out:
- return -EFBIG;
-}
-
-EXPORT_SYMBOL(vmtruncate);
-
-/*
* Return the total memory allocated for this pointer, not
* just what the caller asked for.
*
@@ -170,21 +130,20 @@ unsigned int kobjsize(const void *objp)
}
int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
- unsigned long start, int nr_pages, int flags,
+ unsigned long start, int nr_pages, unsigned int foll_flags,
struct page **pages, struct vm_area_struct **vmas)
{
struct vm_area_struct *vma;
unsigned long vm_flags;
int i;
- int write = !!(flags & GUP_FLAGS_WRITE);
- int force = !!(flags & GUP_FLAGS_FORCE);
- int ignore = !!(flags & GUP_FLAGS_IGNORE_VMA_PERMISSIONS);
/* calculate required read or write permissions.
- * - if 'force' is set, we only require the "MAY" flags.
+ * If FOLL_FORCE is set, we only require the "MAY" flags.
*/
- vm_flags = write ? (VM_WRITE | VM_MAYWRITE) : (VM_READ | VM_MAYREAD);
- vm_flags &= force ? (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
+ vm_flags = (foll_flags & FOLL_WRITE) ?
+ (VM_WRITE | VM_MAYWRITE) : (VM_READ | VM_MAYREAD);
+ vm_flags &= (foll_flags & FOLL_FORCE) ?
+ (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
for (i = 0; i < nr_pages; i++) {
vma = find_vma(mm, start);
@@ -192,8 +151,8 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
goto finish_or_fault;
/* protect what we can, including chardevs */
- if (vma->vm_flags & (VM_IO | VM_PFNMAP) ||
- (!ignore && !(vm_flags & vma->vm_flags)))
+ if ((vma->vm_flags & (VM_IO | VM_PFNMAP)) ||
+ !(vm_flags & vma->vm_flags))
goto finish_or_fault;
if (pages) {
@@ -212,7 +171,6 @@ finish_or_fault:
return i ? : -EFAULT;
}
-
/*
* get a list of pages in an address range belonging to the specified process
* and indicate the VMA that covers each page
@@ -227,9 +185,9 @@ int get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
int flags = 0;
if (write)
- flags |= GUP_FLAGS_WRITE;
+ flags |= FOLL_WRITE;
if (force)
- flags |= GUP_FLAGS_FORCE;
+ flags |= FOLL_FORCE;
return __get_user_pages(tsk, mm, start, nr_pages, flags, pages, vmas);
}
@@ -474,6 +432,7 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
/*
* Ok, looks good - let it rip.
*/
+ flush_icache_range(mm->brk, brk);
return mm->brk = brk;
}
@@ -593,11 +552,11 @@ static void free_page_series(unsigned long from, unsigned long to)
static void __put_nommu_region(struct vm_region *region)
__releases(nommu_region_sem)
{
- kenter("%p{%d}", region, atomic_read(&region->vm_usage));
+ kenter("%p{%d}", region, region->vm_usage);
BUG_ON(!nommu_region_tree.rb_node);
- if (atomic_dec_and_test(&region->vm_usage)) {
+ if (--region->vm_usage == 0) {
if (region->vm_top > region->vm_start)
delete_nommu_region(region);
up_write(&nommu_region_sem);
@@ -627,6 +586,22 @@ static void put_nommu_region(struct vm_region *region)
}
/*
+ * update protection on a vma
+ */
+static void protect_vma(struct vm_area_struct *vma, unsigned long flags)
+{
+#ifdef CONFIG_MPU
+ struct mm_struct *mm = vma->vm_mm;
+ long start = vma->vm_start & PAGE_MASK;
+ while (start < vma->vm_end) {
+ protect_page(mm, start, flags);
+ start += PAGE_SIZE;
+ }
+ update_protections(mm);
+#endif
+}
+
+/*
* add a VMA into a process's mm_struct in the appropriate place in the list
* and tree and add to the address space's page tree also if not an anonymous
* page
@@ -645,6 +620,8 @@ static void add_vma_to_mm(struct mm_struct *mm, struct vm_area_struct *vma)
mm->map_count++;
vma->vm_mm = mm;
+ protect_vma(vma, vma->vm_flags);
+
/* add the VMA to the mapping */
if (vma->vm_file) {
mapping = vma->vm_file->f_mapping;
@@ -707,6 +684,8 @@ static void delete_vma_from_mm(struct vm_area_struct *vma)
kenter("%p", vma);
+ protect_vma(vma, 0);
+
mm->map_count--;
if (mm->mmap_cache == vma)
mm->mmap_cache = NULL;
@@ -848,7 +827,7 @@ static int validate_mmap_request(struct file *file,
int ret;
/* do the simple checks first */
- if (flags & MAP_FIXED || addr) {
+ if (flags & MAP_FIXED) {
printk(KERN_DEBUG
"%d: Can't do fixed-address/overlay mmap of RAM\n",
current->pid);
@@ -1056,7 +1035,7 @@ static int do_mmap_shared_file(struct vm_area_struct *vma)
ret = vma->vm_file->f_op->mmap(vma->vm_file, vma);
if (ret == 0) {
vma->vm_region->vm_top = vma->vm_region->vm_end;
- return ret;
+ return 0;
}
if (ret != -ENOSYS)
return ret;
@@ -1073,7 +1052,8 @@ static int do_mmap_shared_file(struct vm_area_struct *vma)
*/
static int do_mmap_private(struct vm_area_struct *vma,
struct vm_region *region,
- unsigned long len)
+ unsigned long len,
+ unsigned long capabilities)
{
struct page *pages;
unsigned long total, point, n, rlen;
@@ -1084,13 +1064,13 @@ static int do_mmap_private(struct vm_area_struct *vma,
* shared mappings on devices or memory
* - VM_MAYSHARE will be set if it may attempt to share
*/
- if (vma->vm_file) {
+ if (capabilities & BDI_CAP_MAP_DIRECT) {
ret = vma->vm_file->f_op->mmap(vma->vm_file, vma);
if (ret == 0) {
/* shouldn't return success if we're not sharing */
BUG_ON(!(vma->vm_flags & VM_MAYSHARE));
vma->vm_region->vm_top = vma->vm_region->vm_end;
- return ret;
+ return 0;
}
if (ret != -ENOSYS)
return ret;
@@ -1164,9 +1144,6 @@ static int do_mmap_private(struct vm_area_struct *vma,
if (ret < rlen)
memset(base + ret, 0, rlen - ret);
- } else {
- /* if it's an anonymous mapping, then just clear it */
- memset(base, 0, rlen);
}
return 0;
@@ -1203,9 +1180,6 @@ unsigned long do_mmap_pgoff(struct file *file,
kenter(",%lx,%lx,%lx,%lx,%lx", addr, len, prot, flags, pgoff);
- if (!(flags & MAP_FIXED))
- addr = round_hint_to_min(addr);
-
/* decide whether we should attempt the mapping, and if so what sort of
* mapping */
ret = validate_mmap_request(file, addr, len, prot, flags, pgoff,
@@ -1215,6 +1189,9 @@ unsigned long do_mmap_pgoff(struct file *file,
return ret;
}
+ /* we ignore the address hint */
+ addr = 0;
+
/* we've determined that we can make the mapping, now translate what we
* now know into VMA flags */
vm_flags = determine_vm_flags(file, prot, flags, capabilities);
@@ -1228,7 +1205,7 @@ unsigned long do_mmap_pgoff(struct file *file,
if (!vma)
goto error_getting_vma;
- atomic_set(&region->vm_usage, 1);
+ region->vm_usage = 1;
region->vm_flags = vm_flags;
region->vm_pgoff = pgoff;
@@ -1295,7 +1272,7 @@ unsigned long do_mmap_pgoff(struct file *file,
}
/* we've found a region we can share */
- atomic_inc(&pregion->vm_usage);
+ pregion->vm_usage++;
vma->vm_region = pregion;
start = pregion->vm_start;
start += (pgoff - pregion->vm_pgoff) << PAGE_SHIFT;
@@ -1312,7 +1289,7 @@ unsigned long do_mmap_pgoff(struct file *file,
vma->vm_region = NULL;
vma->vm_start = 0;
vma->vm_end = 0;
- atomic_dec(&pregion->vm_usage);
+ pregion->vm_usage--;
pregion = NULL;
goto error_just_free;
}
@@ -1328,7 +1305,7 @@ unsigned long do_mmap_pgoff(struct file *file,
* - this is the hook for quasi-memory character devices to
* tell us the location of a shared mapping
*/
- if (file && file->f_op->get_unmapped_area) {
+ if (capabilities & BDI_CAP_MAP_DIRECT) {
addr = file->f_op->get_unmapped_area(file, addr, len,
pgoff, flags);
if (IS_ERR((void *) addr)) {
@@ -1352,15 +1329,22 @@ unsigned long do_mmap_pgoff(struct file *file,
}
vma->vm_region = region;
- add_nommu_region(region);
- /* set up the mapping */
+ /* set up the mapping
+ * - the region is filled in if BDI_CAP_MAP_DIRECT is still set
+ */
if (file && vma->vm_flags & VM_SHARED)
ret = do_mmap_shared_file(vma);
else
- ret = do_mmap_private(vma, region, len);
+ ret = do_mmap_private(vma, region, len, capabilities);
if (ret < 0)
- goto error_put_region;
+ goto error_just_free;
+ add_nommu_region(region);
+
+ /* clear anonymous mappings that don't ask for uninitialized data */
+ if (!vma->vm_file && !(flags & MAP_UNINITIALIZED))
+ memset((void *)region->vm_start, 0,
+ region->vm_end - region->vm_start);
/* okay... we have a mapping; now we have to register it */
result = vma->vm_start;
@@ -1370,33 +1354,26 @@ unsigned long do_mmap_pgoff(struct file *file,
share:
add_vma_to_mm(current->mm, vma);
- up_write(&nommu_region_sem);
+ /* we flush the region from the icache only when the first executable
+ * mapping of it is made */
+ if (vma->vm_flags & VM_EXEC && !region->vm_icache_flushed) {
+ flush_icache_range(region->vm_start, region->vm_end);
+ region->vm_icache_flushed = true;
+ }
- if (prot & PROT_EXEC)
- flush_icache_range(result, result + len);
+ up_write(&nommu_region_sem);
kleave(" = %lx", result);
return result;
-error_put_region:
- __put_nommu_region(region);
- if (vma) {
- if (vma->vm_file) {
- fput(vma->vm_file);
- if (vma->vm_flags & VM_EXECUTABLE)
- removed_exe_file_vma(vma->vm_mm);
- }
- kmem_cache_free(vm_area_cachep, vma);
- }
- kleave(" = %d [pr]", ret);
- return ret;
-
error_just_free:
up_write(&nommu_region_sem);
error:
- fput(region->vm_file);
+ if (region->vm_file)
+ fput(region->vm_file);
kmem_cache_free(vm_region_jar, region);
- fput(vma->vm_file);
+ if (vma->vm_file)
+ fput(vma->vm_file);
if (vma->vm_flags & VM_EXECUTABLE)
removed_exe_file_vma(vma->vm_mm);
kmem_cache_free(vm_area_cachep, vma);
@@ -1426,6 +1403,31 @@ error_getting_region:
}
EXPORT_SYMBOL(do_mmap_pgoff);
+SYSCALL_DEFINE6(mmap_pgoff, unsigned long, addr, unsigned long, len,
+ unsigned long, prot, unsigned long, flags,
+ unsigned long, fd, unsigned long, pgoff)
+{
+ struct file *file = NULL;
+ unsigned long retval = -EBADF;
+
+ if (!(flags & MAP_ANONYMOUS)) {
+ file = fget(fd);
+ if (!file)
+ goto out;
+ }
+
+ flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
+
+ down_write(&current->mm->mmap_sem);
+ retval = do_mmap_pgoff(file, addr, len, prot, flags, pgoff);
+ up_write(&current->mm->mmap_sem);
+
+ if (file)
+ fput(file);
+out:
+ return retval;
+}
+
/*
* split a vma into two pieces at address 'addr', a new vma is allocated either
* for the first part or the tail.
@@ -1439,10 +1441,9 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
kenter("");
- /* we're only permitted to split anonymous regions that have a single
- * owner */
- if (vma->vm_file ||
- atomic_read(&vma->vm_region->vm_usage) != 1)
+ /* we're only permitted to split anonymous regions (these should have
+ * only a single usage on the region) */
+ if (vma->vm_file)
return -ENOMEM;
if (mm->map_count >= sysctl_max_map_count)
@@ -1516,7 +1517,7 @@ static int shrink_vma(struct mm_struct *mm,
/* cut the backing region down to size */
region = vma->vm_region;
- BUG_ON(atomic_read(&region->vm_usage) != 1);
+ BUG_ON(region->vm_usage != 1);
down_write(&nommu_region_sem);
delete_nommu_region(region);
@@ -1760,27 +1761,6 @@ void unmap_mapping_range(struct address_space *mapping,
EXPORT_SYMBOL(unmap_mapping_range);
/*
- * ask for an unmapped area at which to create a mapping on a file
- */
-unsigned long get_unmapped_area(struct file *file, unsigned long addr,
- unsigned long len, unsigned long pgoff,
- unsigned long flags)
-{
- unsigned long (*get_area)(struct file *, unsigned long, unsigned long,
- unsigned long, unsigned long);
-
- get_area = current->mm->get_unmapped_area;
- if (file && file->f_op && file->f_op->get_unmapped_area)
- get_area = file->f_op->get_unmapped_area;
-
- if (!get_area)
- return -ENOSYS;
-
- return get_area(file, addr, len, pgoff, flags);
-}
-EXPORT_SYMBOL(get_unmapped_area);
-
-/*
* Check that a process has enough memory to allocate a new virtual
* mapping. 0 means there is enough memory for the allocation to
* succeed and -ENOMEM implies there is not.
@@ -1919,9 +1899,11 @@ int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, in
/* only read or write mappings where it is permitted */
if (write && vma->vm_flags & VM_MAYWRITE)
- len -= copy_to_user((void *) addr, buf, len);
+ copy_to_user_page(vma, NULL, addr,
+ (void *) addr, buf, len);
else if (!write && vma->vm_flags & VM_MAYREAD)
- len -= copy_from_user(buf, (void *) addr, len);
+ copy_from_user_page(vma, NULL, addr,
+ buf, (void *) addr, len);
else
len = 0;
} else {
@@ -1932,3 +1914,65 @@ int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, in
mmput(mm);
return len;
}
+
+/**
+ * nommu_shrink_inode_mappings - Shrink the shared mappings on an inode
+ * @inode: The inode to check
+ * @size: The current filesize of the inode
+ * @newsize: The proposed filesize of the inode
+ *
+ * Check the shared mappings on an inode on behalf of a shrinking truncate to
+ * make sure that that any outstanding VMAs aren't broken and then shrink the
+ * vm_regions that extend that beyond so that do_mmap_pgoff() doesn't
+ * automatically grant mappings that are too large.
+ */
+int nommu_shrink_inode_mappings(struct inode *inode, size_t size,
+ size_t newsize)
+{
+ struct vm_area_struct *vma;
+ struct prio_tree_iter iter;
+ struct vm_region *region;
+ pgoff_t low, high;
+ size_t r_size, r_top;
+
+ low = newsize >> PAGE_SHIFT;
+ high = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
+
+ down_write(&nommu_region_sem);
+
+ /* search for VMAs that fall within the dead zone */
+ vma_prio_tree_foreach(vma, &iter, &inode->i_mapping->i_mmap,
+ low, high) {
+ /* found one - only interested if it's shared out of the page
+ * cache */
+ if (vma->vm_flags & VM_SHARED) {
+ up_write(&nommu_region_sem);
+ return -ETXTBSY; /* not quite true, but near enough */
+ }
+ }
+
+ /* reduce any regions that overlap the dead zone - if in existence,
+ * these will be pointed to by VMAs that don't overlap the dead zone
+ *
+ * we don't check for any regions that start beyond the EOF as there
+ * shouldn't be any
+ */
+ vma_prio_tree_foreach(vma, &iter, &inode->i_mapping->i_mmap,
+ 0, ULONG_MAX) {
+ if (!(vma->vm_flags & VM_SHARED))
+ continue;
+
+ region = vma->vm_region;
+ r_size = region->vm_top - region->vm_start;
+ r_top = (region->vm_pgoff << PAGE_SHIFT) + r_size;
+
+ if (r_top > newsize) {
+ region->vm_top -= r_top - newsize;
+ if (region->vm_end > region->vm_top)
+ region->vm_end = region->vm_top;
+ }
+ }
+
+ up_write(&nommu_region_sem);
+ return 0;
+}
diff --git a/mm/oom_kill.c b/mm/oom_kill.c
index a7b2460e922b..f52481b1c1e5 100644
--- a/mm/oom_kill.c
+++ b/mm/oom_kill.c
@@ -34,6 +34,23 @@ int sysctl_oom_dump_tasks;
static DEFINE_SPINLOCK(zone_scan_lock);
/* #define DEBUG */
+/*
+ * Is all threads of the target process nodes overlap ours?
+ */
+static int has_intersects_mems_allowed(struct task_struct *tsk)
+{
+ struct task_struct *t;
+
+ t = tsk;
+ do {
+ if (cpuset_mems_allowed_intersects(current, t))
+ return 1;
+ t = next_thread(t);
+ } while (t != tsk);
+
+ return 0;
+}
+
/**
* badness - calculate a numeric value for how bad this task has been
* @p: task struct of which task we should calculate
@@ -58,6 +75,13 @@ unsigned long badness(struct task_struct *p, unsigned long uptime)
unsigned long points, cpu_time, run_time;
struct mm_struct *mm;
struct task_struct *child;
+ int oom_adj = p->signal->oom_adj;
+ struct task_cputime task_time;
+ unsigned long utime;
+ unsigned long stime;
+
+ if (oom_adj == OOM_DISABLE)
+ return 0;
task_lock(p);
mm = p->mm;
@@ -79,7 +103,7 @@ unsigned long badness(struct task_struct *p, unsigned long uptime)
/*
* swapoff can easily use up all memory, so kill those first.
*/
- if (p->flags & PF_SWAPOFF)
+ if (p->flags & PF_OOM_ORIGIN)
return ULONG_MAX;
/*
@@ -102,8 +126,11 @@ unsigned long badness(struct task_struct *p, unsigned long uptime)
* of seconds. There is no particular reason for this other than
* that it turned out to work very well in practice.
*/
- cpu_time = (cputime_to_jiffies(p->utime) + cputime_to_jiffies(p->stime))
- >> (SHIFT_HZ + 3);
+ thread_group_cputime(p, &task_time);
+ utime = cputime_to_jiffies(task_time.utime);
+ stime = cputime_to_jiffies(task_time.stime);
+ cpu_time = (utime + stime) >> (SHIFT_HZ + 3);
+
if (uptime >= p->start_time.tv_sec)
run_time = (uptime - p->start_time.tv_sec) >> 10;
@@ -144,19 +171,19 @@ unsigned long badness(struct task_struct *p, unsigned long uptime)
* because p may have allocated or otherwise mapped memory on
* this node before. However it will be less likely.
*/
- if (!cpuset_mems_allowed_intersects(current, p))
+ if (!has_intersects_mems_allowed(p))
points /= 8;
/*
- * Adjust the score by oomkilladj.
+ * Adjust the score by oom_adj.
*/
- if (p->oomkilladj) {
- if (p->oomkilladj > 0) {
+ if (oom_adj) {
+ if (oom_adj > 0) {
if (!points)
points = 1;
- points <<= p->oomkilladj;
+ points <<= oom_adj;
} else
- points >>= -(p->oomkilladj);
+ points >>= -(oom_adj);
}
#ifdef DEBUG
@@ -169,27 +196,46 @@ unsigned long badness(struct task_struct *p, unsigned long uptime)
/*
* Determine the type of allocation constraint.
*/
-static inline enum oom_constraint constrained_alloc(struct zonelist *zonelist,
- gfp_t gfp_mask)
-{
#ifdef CONFIG_NUMA
+static enum oom_constraint constrained_alloc(struct zonelist *zonelist,
+ gfp_t gfp_mask, nodemask_t *nodemask)
+{
struct zone *zone;
struct zoneref *z;
enum zone_type high_zoneidx = gfp_zone(gfp_mask);
- nodemask_t nodes = node_states[N_HIGH_MEMORY];
- for_each_zone_zonelist(zone, z, zonelist, high_zoneidx)
- if (cpuset_zone_allowed_softwall(zone, gfp_mask))
- node_clear(zone_to_nid(zone), nodes);
- else
- return CONSTRAINT_CPUSET;
+ /*
+ * Reach here only when __GFP_NOFAIL is used. So, we should avoid
+ * to kill current.We have to random task kill in this case.
+ * Hopefully, CONSTRAINT_THISNODE...but no way to handle it, now.
+ */
+ if (gfp_mask & __GFP_THISNODE)
+ return CONSTRAINT_NONE;
- if (!nodes_empty(nodes))
+ /*
+ * The nodemask here is a nodemask passed to alloc_pages(). Now,
+ * cpuset doesn't use this nodemask for its hardwall/softwall/hierarchy
+ * feature. mempolicy is an only user of nodemask here.
+ * check mempolicy's nodemask contains all N_HIGH_MEMORY
+ */
+ if (nodemask && !nodes_subset(node_states[N_HIGH_MEMORY], *nodemask))
return CONSTRAINT_MEMORY_POLICY;
-#endif
+ /* Check this allocation failure is caused by cpuset's wall function */
+ for_each_zone_zonelist_nodemask(zone, z, zonelist,
+ high_zoneidx, nodemask)
+ if (!cpuset_zone_allowed_softwall(zone, gfp_mask))
+ return CONSTRAINT_CPUSET;
+
+ return CONSTRAINT_NONE;
+}
+#else
+static enum oom_constraint constrained_alloc(struct zonelist *zonelist,
+ gfp_t gfp_mask, nodemask_t *nodemask)
+{
return CONSTRAINT_NONE;
}
+#endif
/*
* Simple selection loop. We chose the process with the highest
@@ -200,13 +246,13 @@ static inline enum oom_constraint constrained_alloc(struct zonelist *zonelist,
static struct task_struct *select_bad_process(unsigned long *ppoints,
struct mem_cgroup *mem)
{
- struct task_struct *g, *p;
+ struct task_struct *p;
struct task_struct *chosen = NULL;
struct timespec uptime;
*ppoints = 0;
do_posix_clock_monotonic_gettime(&uptime);
- do_each_thread(g, p) {
+ for_each_process(p) {
unsigned long points;
/*
@@ -251,7 +297,7 @@ static struct task_struct *select_bad_process(unsigned long *ppoints,
*ppoints = ULONG_MAX;
}
- if (p->oomkilladj == OOM_DISABLE)
+ if (p->signal->oom_adj == OOM_DISABLE)
continue;
points = badness(p, uptime.tv_sec);
@@ -259,7 +305,7 @@ static struct task_struct *select_bad_process(unsigned long *ppoints,
chosen = p;
*ppoints = points;
}
- } while_each_thread(g, p);
+ }
return chosen;
}
@@ -304,12 +350,30 @@ static void dump_tasks(const struct mem_cgroup *mem)
}
printk(KERN_INFO "[%5d] %5d %5d %8lu %8lu %3d %3d %s\n",
p->pid, __task_cred(p)->uid, p->tgid, mm->total_vm,
- get_mm_rss(mm), (int)task_cpu(p), p->oomkilladj,
+ get_mm_rss(mm), (int)task_cpu(p), p->signal->oom_adj,
p->comm);
task_unlock(p);
} while_each_thread(g, p);
}
+static void dump_header(struct task_struct *p, gfp_t gfp_mask, int order,
+ struct mem_cgroup *mem)
+{
+ pr_warning("%s invoked oom-killer: gfp_mask=0x%x, order=%d, "
+ "oom_adj=%d\n",
+ current->comm, gfp_mask, order, current->signal->oom_adj);
+ task_lock(current);
+ cpuset_print_task_mems_allowed(current);
+ task_unlock(current);
+ dump_stack();
+ mem_cgroup_print_oom_info(mem, p);
+ show_mem();
+ if (sysctl_oom_dump_tasks)
+ dump_tasks(mem);
+}
+
+#define K(x) ((x) << (PAGE_SHIFT-10))
+
/*
* Send SIGKILL to the selected process irrespective of CAP_SYS_RAW_IO
* flag though it's unlikely that we select a process with CAP_SYS_RAW_IO
@@ -323,15 +387,23 @@ static void __oom_kill_task(struct task_struct *p, int verbose)
return;
}
+ task_lock(p);
if (!p->mm) {
WARN_ON(1);
- printk(KERN_WARNING "tried to kill an mm-less task!\n");
+ printk(KERN_WARNING "tried to kill an mm-less task %d (%s)!\n",
+ task_pid_nr(p), p->comm);
+ task_unlock(p);
return;
}
if (verbose)
- printk(KERN_ERR "Killed process %d (%s)\n",
- task_pid_nr(p), p->comm);
+ printk(KERN_ERR "Killed process %d (%s) "
+ "vsz:%lukB, anon-rss:%lukB, file-rss:%lukB\n",
+ task_pid_nr(p), p->comm,
+ K(p->mm->total_vm),
+ K(get_mm_counter(p->mm, anon_rss)),
+ K(get_mm_counter(p->mm, file_rss)));
+ task_unlock(p);
/*
* We give our sacrificial lamb high priority and access to
@@ -346,11 +418,6 @@ static void __oom_kill_task(struct task_struct *p, int verbose)
static int oom_kill_task(struct task_struct *p)
{
- struct mm_struct *mm;
- struct task_struct *g, *q;
-
- mm = p->mm;
-
/* WARNING: mm may not be dereferenced since we did not obtain its
* value from get_task_mm(p). This is OK since all we need to do is
* compare mm to q->mm below.
@@ -359,30 +426,11 @@ static int oom_kill_task(struct task_struct *p)
* change to NULL at any time since we do not hold task_lock(p).
* However, this is of no concern to us.
*/
-
- if (mm == NULL)
+ if (!p->mm || p->signal->oom_adj == OOM_DISABLE)
return 1;
- /*
- * Don't kill the process if any threads are set to OOM_DISABLE
- */
- do_each_thread(g, q) {
- if (q->mm == mm && q->oomkilladj == OOM_DISABLE)
- return 1;
- } while_each_thread(g, q);
-
__oom_kill_task(p, 1);
- /*
- * kill all processes that share the ->mm (i.e. all threads),
- * but are in a different thread group. Don't let them have access
- * to memory reserves though, otherwise we might deplete all memory.
- */
- do_each_thread(g, q) {
- if (q->mm == mm && !same_thread_group(q, p))
- force_sig(SIGKILL, q);
- } while_each_thread(g, q);
-
return 0;
}
@@ -392,19 +440,8 @@ static int oom_kill_process(struct task_struct *p, gfp_t gfp_mask, int order,
{
struct task_struct *c;
- if (printk_ratelimit()) {
- printk(KERN_WARNING "%s invoked oom-killer: "
- "gfp_mask=0x%x, order=%d, oomkilladj=%d\n",
- current->comm, gfp_mask, order, current->oomkilladj);
- task_lock(current);
- cpuset_print_task_mems_allowed(current);
- task_unlock(current);
- dump_stack();
- mem_cgroup_print_oom_info(mem, current);
- show_mem();
- if (sysctl_oom_dump_tasks)
- dump_tasks(mem);
- }
+ if (printk_ratelimit())
+ dump_header(p, gfp_mask, order, mem);
/*
* If the task is already exiting, don't alarm the sysadmin or kill
@@ -540,6 +577,7 @@ retry:
/* Found nothing?!?! Either we hang forever, or we panic. */
if (!p) {
read_unlock(&tasklist_lock);
+ dump_header(NULL, gfp_mask, order, NULL);
panic("Out of memory and no killable processes...\n");
}
@@ -595,7 +633,8 @@ rest_and_return:
* OR try to be smart about which process to kill. Note that we
* don't have to be perfect here, we just have to be good.
*/
-void out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask, int order)
+void out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask,
+ int order, nodemask_t *nodemask)
{
unsigned long freed = 0;
enum oom_constraint constraint;
@@ -605,14 +644,16 @@ void out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask, int order)
/* Got some memory back in the last second. */
return;
- if (sysctl_panic_on_oom == 2)
+ if (sysctl_panic_on_oom == 2) {
+ dump_header(NULL, gfp_mask, order, NULL);
panic("out of memory. Compulsory panic_on_oom is selected.\n");
+ }
/*
* Check if there were limitations on the allocation (only relevant for
* NUMA) that may require different handling.
*/
- constraint = constrained_alloc(zonelist, gfp_mask);
+ constraint = constrained_alloc(zonelist, gfp_mask, nodemask);
read_lock(&tasklist_lock);
switch (constraint) {
@@ -622,8 +663,10 @@ void out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask, int order)
break;
case CONSTRAINT_NONE:
- if (sysctl_panic_on_oom)
+ if (sysctl_panic_on_oom) {
+ dump_header(NULL, gfp_mask, order, NULL);
panic("out of memory. panic_on_oom is selected\n");
+ }
/* Fall-through */
case CONSTRAINT_CPUSET:
__out_of_memory(gfp_mask, order);
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index 81627ebcd313..0b19943ecf8b 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -36,15 +36,6 @@
#include <linux/pagevec.h>
/*
- * The maximum number of pages to writeout in a single bdflush/kupdate
- * operation. We do this so we don't hold I_SYNC against an inode for
- * enormous amounts of time, which would block a userspace task which has
- * been forced to throttle against that inode. Also, the code reevaluates
- * the dirty each time it has written this many pages.
- */
-#define MAX_WRITEBACK_PAGES 1024
-
-/*
* After a CPU has dirtied this many pages, balance_dirty_pages_ratelimited
* will look to see if it needs to force writeback or throttling.
*/
@@ -53,18 +44,21 @@ static long ratelimit_pages = 32;
/*
* When balance_dirty_pages decides that the caller needs to perform some
* non-background writeback, this is how many pages it will attempt to write.
- * It should be somewhat larger than RATELIMIT_PAGES to ensure that reasonably
+ * It should be somewhat larger than dirtied pages to ensure that reasonably
* large amounts of I/O are submitted.
*/
-static inline long sync_writeback_pages(void)
+static inline long sync_writeback_pages(unsigned long dirtied)
{
- return ratelimit_pages + ratelimit_pages / 2;
+ if (dirtied < ratelimit_pages)
+ dirtied = ratelimit_pages;
+
+ return dirtied + dirtied / 2;
}
/* The following parameters are exported via /proc/sys/vm */
/*
- * Start background writeback (via pdflush) at this percentage
+ * Start background writeback (via writeback threads) at this percentage
*/
int dirty_background_ratio = 10;
@@ -117,8 +111,6 @@ EXPORT_SYMBOL(laptop_mode);
/* End of sysctl-exported parameters */
-static void background_writeout(unsigned long _min_pages);
-
/*
* Scale the writeback cache size proportional to the relative writeout speeds.
*
@@ -166,37 +158,37 @@ static void update_completion_period(void)
}
int dirty_background_ratio_handler(struct ctl_table *table, int write,
- struct file *filp, void __user *buffer, size_t *lenp,
+ void __user *buffer, size_t *lenp,
loff_t *ppos)
{
int ret;
- ret = proc_dointvec_minmax(table, write, filp, buffer, lenp, ppos);
+ ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
if (ret == 0 && write)
dirty_background_bytes = 0;
return ret;
}
int dirty_background_bytes_handler(struct ctl_table *table, int write,
- struct file *filp, void __user *buffer, size_t *lenp,
+ void __user *buffer, size_t *lenp,
loff_t *ppos)
{
int ret;
- ret = proc_doulongvec_minmax(table, write, filp, buffer, lenp, ppos);
+ ret = proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
if (ret == 0 && write)
dirty_background_ratio = 0;
return ret;
}
int dirty_ratio_handler(struct ctl_table *table, int write,
- struct file *filp, void __user *buffer, size_t *lenp,
+ void __user *buffer, size_t *lenp,
loff_t *ppos)
{
int old_ratio = vm_dirty_ratio;
int ret;
- ret = proc_dointvec_minmax(table, write, filp, buffer, lenp, ppos);
+ ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
if (ret == 0 && write && vm_dirty_ratio != old_ratio) {
update_completion_period();
vm_dirty_bytes = 0;
@@ -206,13 +198,13 @@ int dirty_ratio_handler(struct ctl_table *table, int write,
int dirty_bytes_handler(struct ctl_table *table, int write,
- struct file *filp, void __user *buffer, size_t *lenp,
+ void __user *buffer, size_t *lenp,
loff_t *ppos)
{
unsigned long old_bytes = vm_dirty_bytes;
int ret;
- ret = proc_doulongvec_minmax(table, write, filp, buffer, lenp, ppos);
+ ret = proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
if (ret == 0 && write && vm_dirty_bytes != old_bytes) {
update_completion_period();
vm_dirty_ratio = 0;
@@ -320,15 +312,13 @@ static void task_dirty_limit(struct task_struct *tsk, unsigned long *pdirty)
/*
*
*/
-static DEFINE_SPINLOCK(bdi_lock);
static unsigned int bdi_min_ratio;
int bdi_set_min_ratio(struct backing_dev_info *bdi, unsigned int min_ratio)
{
int ret = 0;
- unsigned long flags;
- spin_lock_irqsave(&bdi_lock, flags);
+ spin_lock_bh(&bdi_lock);
if (min_ratio > bdi->max_ratio) {
ret = -EINVAL;
} else {
@@ -340,27 +330,26 @@ int bdi_set_min_ratio(struct backing_dev_info *bdi, unsigned int min_ratio)
ret = -EINVAL;
}
}
- spin_unlock_irqrestore(&bdi_lock, flags);
+ spin_unlock_bh(&bdi_lock);
return ret;
}
int bdi_set_max_ratio(struct backing_dev_info *bdi, unsigned max_ratio)
{
- unsigned long flags;
int ret = 0;
if (max_ratio > 100)
return -EINVAL;
- spin_lock_irqsave(&bdi_lock, flags);
+ spin_lock_bh(&bdi_lock);
if (bdi->min_ratio > max_ratio) {
ret = -EINVAL;
} else {
bdi->max_ratio = max_ratio;
bdi->max_prop_frac = (PROP_FRAC_BASE * max_ratio) / 100;
}
- spin_unlock_irqrestore(&bdi_lock, flags);
+ spin_unlock_bh(&bdi_lock);
return ret;
}
@@ -394,7 +383,8 @@ static unsigned long highmem_dirtyable_memory(unsigned long total)
struct zone *z =
&NODE_DATA(node)->node_zones[ZONE_HIGHMEM];
- x += zone_page_state(z, NR_FREE_PAGES) + zone_lru_pages(z);
+ x += zone_page_state(z, NR_FREE_PAGES) +
+ zone_reclaimable_pages(z);
}
/*
* Make sure that the number of highmem pages is never larger
@@ -418,7 +408,7 @@ unsigned long determine_dirtyable_memory(void)
{
unsigned long x;
- x = global_page_state(NR_FREE_PAGES) + global_lru_pages();
+ x = global_page_state(NR_FREE_PAGES) + global_reclaimable_pages();
if (!vm_highmem_is_dirtyable)
x -= highmem_dirtyable_memory(x);
@@ -487,10 +477,11 @@ get_dirty_limits(unsigned long *pbackground, unsigned long *pdirty,
* balance_dirty_pages() must be called by processes which are generating dirty
* data. It looks at the number of dirty pages in the machine and will force
* the caller to perform writeback if the system is over `vm_dirty_ratio'.
- * If we're over `background_thresh' then pdflush is woken to perform some
- * writeout.
+ * If we're over `background_thresh' then the writeback threads are woken to
+ * perform some writeout.
*/
-static void balance_dirty_pages(struct address_space *mapping)
+static void balance_dirty_pages(struct address_space *mapping,
+ unsigned long write_chunk)
{
long nr_reclaimable, bdi_nr_reclaimable;
long nr_writeback, bdi_nr_writeback;
@@ -498,7 +489,7 @@ static void balance_dirty_pages(struct address_space *mapping)
unsigned long dirty_thresh;
unsigned long bdi_thresh;
unsigned long pages_written = 0;
- unsigned long write_chunk = sync_writeback_pages();
+ unsigned long pause = 1;
struct backing_dev_info *bdi = mapping->backing_dev_info;
@@ -546,7 +537,7 @@ static void balance_dirty_pages(struct address_space *mapping)
* up.
*/
if (bdi_nr_reclaimable > bdi_thresh) {
- writeback_inodes(&wbc);
+ writeback_inodes_wbc(&wbc);
pages_written += write_chunk - wbc.nr_to_write;
get_dirty_limits(&background_thresh, &dirty_thresh,
&bdi_thresh, bdi);
@@ -575,7 +566,16 @@ static void balance_dirty_pages(struct address_space *mapping)
if (pages_written >= write_chunk)
break; /* We've done our duty */
- congestion_wait(BLK_RW_ASYNC, HZ/10);
+ __set_current_state(TASK_INTERRUPTIBLE);
+ io_schedule_timeout(pause);
+
+ /*
+ * Increase the delay for each loop, up to our previous
+ * default of taking a 100ms nap.
+ */
+ pause <<= 1;
+ if (pause > HZ / 10)
+ pause = HZ / 10;
}
if (bdi_nr_reclaimable + bdi_nr_writeback < bdi_thresh &&
@@ -583,7 +583,7 @@ static void balance_dirty_pages(struct address_space *mapping)
bdi->dirty_exceeded = 0;
if (writeback_in_progress(bdi))
- return; /* pdflush is already working this queue */
+ return;
/*
* In laptop mode, we wait until hitting the higher threshold before
@@ -594,10 +594,10 @@ static void balance_dirty_pages(struct address_space *mapping)
* background_thresh, to keep the amount of dirty memory low.
*/
if ((laptop_mode && pages_written) ||
- (!laptop_mode && (global_page_state(NR_FILE_DIRTY)
- + global_page_state(NR_UNSTABLE_NFS)
+ (!laptop_mode && ((global_page_state(NR_FILE_DIRTY)
+ + global_page_state(NR_UNSTABLE_NFS))
> background_thresh)))
- pdflush_operation(background_writeout, 0);
+ bdi_start_writeback(bdi, NULL, 0);
}
void set_page_dirty_balance(struct page *page, int page_mkwrite)
@@ -610,6 +610,8 @@ void set_page_dirty_balance(struct page *page, int page_mkwrite)
}
}
+static DEFINE_PER_CPU(unsigned long, bdp_ratelimits) = 0;
+
/**
* balance_dirty_pages_ratelimited_nr - balance dirty memory state
* @mapping: address_space which was dirtied
@@ -627,7 +629,6 @@ void set_page_dirty_balance(struct page *page, int page_mkwrite)
void balance_dirty_pages_ratelimited_nr(struct address_space *mapping,
unsigned long nr_pages_dirtied)
{
- static DEFINE_PER_CPU(unsigned long, ratelimits) = 0;
unsigned long ratelimit;
unsigned long *p;
@@ -640,12 +641,13 @@ void balance_dirty_pages_ratelimited_nr(struct address_space *mapping,
* tasks in balance_dirty_pages(). Period.
*/
preempt_disable();
- p = &__get_cpu_var(ratelimits);
+ p = &__get_cpu_var(bdp_ratelimits);
*p += nr_pages_dirtied;
if (unlikely(*p >= ratelimit)) {
+ ratelimit = sync_writeback_pages(*p);
*p = 0;
preempt_enable();
- balance_dirty_pages(mapping);
+ balance_dirty_pages(mapping, ratelimit);
return;
}
preempt_enable();
@@ -681,153 +683,35 @@ void throttle_vm_writeout(gfp_t gfp_mask)
}
}
-/*
- * writeback at least _min_pages, and keep writing until the amount of dirty
- * memory is less than the background threshold, or until we're all clean.
- */
-static void background_writeout(unsigned long _min_pages)
-{
- long min_pages = _min_pages;
- struct writeback_control wbc = {
- .bdi = NULL,
- .sync_mode = WB_SYNC_NONE,
- .older_than_this = NULL,
- .nr_to_write = 0,
- .nonblocking = 1,
- .range_cyclic = 1,
- };
-
- for ( ; ; ) {
- unsigned long background_thresh;
- unsigned long dirty_thresh;
-
- get_dirty_limits(&background_thresh, &dirty_thresh, NULL, NULL);
- if (global_page_state(NR_FILE_DIRTY) +
- global_page_state(NR_UNSTABLE_NFS) < background_thresh
- && min_pages <= 0)
- break;
- wbc.more_io = 0;
- wbc.encountered_congestion = 0;
- wbc.nr_to_write = MAX_WRITEBACK_PAGES;
- wbc.pages_skipped = 0;
- writeback_inodes(&wbc);
- min_pages -= MAX_WRITEBACK_PAGES - wbc.nr_to_write;
- if (wbc.nr_to_write > 0 || wbc.pages_skipped > 0) {
- /* Wrote less than expected */
- if (wbc.encountered_congestion || wbc.more_io)
- congestion_wait(BLK_RW_ASYNC, HZ/10);
- else
- break;
- }
- }
-}
-
-/*
- * Start writeback of `nr_pages' pages. If `nr_pages' is zero, write back
- * the whole world. Returns 0 if a pdflush thread was dispatched. Returns
- * -1 if all pdflush threads were busy.
- */
-int wakeup_pdflush(long nr_pages)
-{
- if (nr_pages == 0)
- nr_pages = global_page_state(NR_FILE_DIRTY) +
- global_page_state(NR_UNSTABLE_NFS);
- return pdflush_operation(background_writeout, nr_pages);
-}
-
-static void wb_timer_fn(unsigned long unused);
static void laptop_timer_fn(unsigned long unused);
-static DEFINE_TIMER(wb_timer, wb_timer_fn, 0, 0);
static DEFINE_TIMER(laptop_mode_wb_timer, laptop_timer_fn, 0, 0);
/*
- * Periodic writeback of "old" data.
- *
- * Define "old": the first time one of an inode's pages is dirtied, we mark the
- * dirtying-time in the inode's address_space. So this periodic writeback code
- * just walks the superblock inode list, writing back any inodes which are
- * older than a specific point in time.
- *
- * Try to run once per dirty_writeback_interval. But if a writeback event
- * takes longer than a dirty_writeback_interval interval, then leave a
- * one-second gap.
- *
- * older_than_this takes precedence over nr_to_write. So we'll only write back
- * all dirty pages if they are all attached to "old" mappings.
- */
-static void wb_kupdate(unsigned long arg)
-{
- unsigned long oldest_jif;
- unsigned long start_jif;
- unsigned long next_jif;
- long nr_to_write;
- struct writeback_control wbc = {
- .bdi = NULL,
- .sync_mode = WB_SYNC_NONE,
- .older_than_this = &oldest_jif,
- .nr_to_write = 0,
- .nonblocking = 1,
- .for_kupdate = 1,
- .range_cyclic = 1,
- };
-
- sync_supers();
-
- oldest_jif = jiffies - msecs_to_jiffies(dirty_expire_interval * 10);
- start_jif = jiffies;
- next_jif = start_jif + msecs_to_jiffies(dirty_writeback_interval * 10);
- nr_to_write = global_page_state(NR_FILE_DIRTY) +
- global_page_state(NR_UNSTABLE_NFS) +
- (inodes_stat.nr_inodes - inodes_stat.nr_unused);
- while (nr_to_write > 0) {
- wbc.more_io = 0;
- wbc.encountered_congestion = 0;
- wbc.nr_to_write = MAX_WRITEBACK_PAGES;
- writeback_inodes(&wbc);
- if (wbc.nr_to_write > 0) {
- if (wbc.encountered_congestion || wbc.more_io)
- congestion_wait(BLK_RW_ASYNC, HZ/10);
- else
- break; /* All the old data is written */
- }
- nr_to_write -= MAX_WRITEBACK_PAGES - wbc.nr_to_write;
- }
- if (time_before(next_jif, jiffies + HZ))
- next_jif = jiffies + HZ;
- if (dirty_writeback_interval)
- mod_timer(&wb_timer, next_jif);
-}
-
-/*
* sysctl handler for /proc/sys/vm/dirty_writeback_centisecs
*/
int dirty_writeback_centisecs_handler(ctl_table *table, int write,
- struct file *file, void __user *buffer, size_t *length, loff_t *ppos)
+ void __user *buffer, size_t *length, loff_t *ppos)
{
- proc_dointvec(table, write, file, buffer, length, ppos);
- if (dirty_writeback_interval)
- mod_timer(&wb_timer, jiffies +
- msecs_to_jiffies(dirty_writeback_interval * 10));
- else
- del_timer(&wb_timer);
+ proc_dointvec(table, write, buffer, length, ppos);
return 0;
}
-static void wb_timer_fn(unsigned long unused)
+static void do_laptop_sync(struct work_struct *work)
{
- if (pdflush_operation(wb_kupdate, 0) < 0)
- mod_timer(&wb_timer, jiffies + HZ); /* delay 1 second */
-}
-
-static void laptop_flush(unsigned long unused)
-{
- sys_sync();
+ wakeup_flusher_threads(0);
+ kfree(work);
}
static void laptop_timer_fn(unsigned long unused)
{
- pdflush_operation(laptop_flush, 0);
+ struct work_struct *work;
+
+ work = kmalloc(sizeof(*work), GFP_ATOMIC);
+ if (work) {
+ INIT_WORK(work, do_laptop_sync);
+ schedule_work(work);
+ }
}
/*
@@ -910,8 +794,6 @@ void __init page_writeback_init(void)
{
int shift;
- mod_timer(&wb_timer,
- jiffies + msecs_to_jiffies(dirty_writeback_interval * 10));
writeback_set_ratelimit();
register_cpu_notifier(&ratelimit_nb);
@@ -939,7 +821,6 @@ int write_cache_pages(struct address_space *mapping,
struct writeback_control *wbc, writepage_t writepage,
void *data)
{
- struct backing_dev_info *bdi = mapping->backing_dev_info;
int ret = 0;
int done = 0;
struct pagevec pvec;
@@ -952,11 +833,6 @@ int write_cache_pages(struct address_space *mapping,
int range_whole = 0;
long nr_to_write = wbc->nr_to_write;
- if (wbc->nonblocking && bdi_write_congested(bdi)) {
- wbc->encountered_congestion = 1;
- return 0;
- }
-
pagevec_init(&pvec, 0);
if (wbc->range_cyclic) {
writeback_index = mapping->writeback_index; /* prev offset */
@@ -1075,12 +951,6 @@ continue_unlock:
break;
}
}
-
- if (wbc->nonblocking && bdi_write_congested(bdi)) {
- wbc->encountered_congestion = 1;
- done = 1;
- break;
- }
}
pagevec_release(&pvec);
cond_resched();
@@ -1145,12 +1015,10 @@ int do_writepages(struct address_space *mapping, struct writeback_control *wbc)
if (wbc->nr_to_write <= 0)
return 0;
- wbc->for_writepages = 1;
if (mapping->a_ops->writepages)
ret = mapping->a_ops->writepages(mapping, wbc);
else
ret = generic_writepages(mapping, wbc);
- wbc->for_writepages = 0;
return ret;
}
@@ -1274,6 +1142,13 @@ int redirty_page_for_writepage(struct writeback_control *wbc, struct page *page)
EXPORT_SYMBOL(redirty_page_for_writepage);
/*
+ * Dirty a page.
+ *
+ * For pages with a mapping this should be done under the page lock
+ * for the benefit of asynchronous memory errors who prefer a consistent
+ * dirty state. This rule can be broken in some special cases,
+ * but should be better not to.
+ *
* If the mapping doesn't provide a set_page_dirty a_op, then
* just fall through and assume that it wants buffer_heads.
*/
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index b3ebbd9915cb..bde9ea1ab26f 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -48,6 +48,8 @@
#include <linux/page_cgroup.h>
#include <linux/debugobjects.h>
#include <linux/kmemleak.h>
+#include <linux/memory.h>
+#include <trace/events/kmem.h>
#include <asm/tlbflush.h>
#include <asm/div64.h>
@@ -71,7 +73,6 @@ EXPORT_SYMBOL(node_states);
unsigned long totalram_pages __read_mostly;
unsigned long totalreserve_pages __read_mostly;
-unsigned long highest_memmap_pfn __read_mostly;
int percpu_pagelist_fraction;
gfp_t gfp_allowed_mask __read_mostly = GFP_BOOT_MASK;
@@ -123,8 +124,8 @@ static char * const zone_names[MAX_NR_ZONES] = {
int min_free_kbytes = 1024;
-unsigned long __meminitdata nr_kernel_pages;
-unsigned long __meminitdata nr_all_pages;
+static unsigned long __meminitdata nr_kernel_pages;
+static unsigned long __meminitdata nr_all_pages;
static unsigned long __meminitdata dma_reserve;
#ifdef CONFIG_ARCH_POPULATES_NODE_MAP
@@ -169,7 +170,7 @@ static inline void __lock_cpu_pcp(unsigned long *flags, int cpu)
{
#ifdef CONFIG_PREEMPT_RT
spin_lock(&__get_cpu_lock(pcp_locks, cpu));
- flags = 0;
+ *flags = 0;
#else
local_irq_save(*flags);
#endif
@@ -281,6 +282,12 @@ static void bad_page(struct page *page)
static unsigned long nr_shown;
static unsigned long nr_unshown;
+ /* Don't complain about poisoned pages */
+ if (PageHWPoison(page)) {
+ __ClearPageBuddy(page);
+ return;
+ }
+
/*
* Allow a burst of 60 reports, then keep quiet for that minute;
* or allow a steady drip of one report per second.
@@ -527,7 +534,6 @@ static inline void __free_one_page(struct page *page,
zone->free_area[order].nr_free++;
}
-#ifdef CONFIG_HAVE_MLOCKED_PAGE_BIT
/*
* free_page_mlock() -- clean up attempts to free and mlocked() page.
* Page should not be on lru, so no need to fix that up.
@@ -538,9 +544,6 @@ static inline void free_page_mlock(struct page *page)
__dec_zone_page_state(page, NR_MLOCK);
__count_vm_event(UNEVICTABLE_MLOCKFREED);
}
-#else
-static void free_page_mlock(struct page *page) { }
-#endif
static inline int free_pages_check(struct page *page)
{
@@ -557,7 +560,7 @@ static inline int free_pages_check(struct page *page)
}
/*
- * Frees a list of pages.
+ * Frees a number of pages from the PCP lists
* Assumes all pages on list are in same zone, and of same order.
* count is the number of pages to free.
*
@@ -567,31 +570,76 @@ static inline int free_pages_check(struct page *page)
* And clear the zone's pages_scanned counter, to hold off the "all pages are
* pinned" detection logic.
*/
-static void free_pages_bulk(struct zone *zone, int count,
- struct list_head *list, int order)
+static void free_pcppages_bulk(struct zone *zone, int count,
+ struct per_cpu_pages *pcp)
{
+ int migratetype = 0;
unsigned long flags;
spin_lock_irqsave(&zone->lock, flags);
zone_clear_flag(zone, ZONE_ALL_UNRECLAIMABLE);
zone->pages_scanned = 0;
- __mod_zone_page_state(zone, NR_FREE_PAGES, count << order);
- while (count--) {
- struct page *page;
+ __mod_zone_page_state(zone, NR_FREE_PAGES, count);
- VM_BUG_ON(list_empty(list));
- page = list_entry(list->prev, struct page, lru);
- /* have to delete it as __free_one_page list manipulates */
- list_del(&page->lru);
- __free_one_page(page, zone, order, page_private(page));
+ for (migratetype =0; migratetype < MIGRATE_PCPTYPES; migratetype++) {
+ struct list_head *list = &pcp->lists[migratetype];
+
+ while (!list_empty(list)) {
+ struct page *page;
+
+ page = list_first_entry(list, struct page, lru);
+ /* must delete as __free_one_page list manipulates */
+ list_del(&page->lru);
+ /* MIGRATE_MOVABLE list may include MIGRATE_RESERVEs */
+ __free_one_page(page, zone, 0, page_private(page));
+ trace_mm_page_pcpu_drain(page, 0, page_private(page));
#ifdef CONFIG_PREEMPT_RT
- cond_resched_lock(&zone->lock);
+ cond_resched_lock(&zone->lock);
#endif
+ count--;
+ }
}
+ WARN_ON(count != 0);
spin_unlock_irqrestore(&zone->lock, flags);
}
+static void isolate_pcp_pages(int count, struct per_cpu_pages *src,
+ struct per_cpu_pages *dst)
+{
+ int migratetype, batch_free = 0;
+
+ for (migratetype = 0; migratetype < MIGRATE_PCPTYPES; migratetype++)
+ INIT_LIST_HEAD(&dst->lists[migratetype]);
+ migratetype = 0;
+
+ while (count) {
+ struct page *page;
+ struct list_head *list;
+
+ /*
+ * Remove pages from lists in a round-robin fashion. A
+ * batch_free count is maintained that is incremented when an
+ * empty list is encountered. This is so more pages are freed
+ * off fuller lists instead of spinning excessively around empty
+ * lists
+ */
+ do {
+ batch_free++;
+ if (++migratetype == MIGRATE_PCPTYPES)
+ migratetype = 0;
+ list = &src->lists[migratetype];
+ } while (list_empty(list));
+
+ do {
+ page = list_last_entry(list, struct page, lru);
+ /* must delete as __free_one_page list manipulates */
+ list_del(&page->lru);
+ list_add(&page->lru, &dst->lists[migratetype]);
+ } while (--count && --batch_free && !list_empty(list));
+ }
+}
+
static void free_one_page(struct zone *zone, struct page *page, int order,
int migratetype)
{
@@ -610,7 +658,7 @@ static void __free_pages_ok(struct page *page, unsigned int order)
{
unsigned long flags;
int i, this_cpu, bad = 0;
- int wasMlocked = TestClearPageMlocked(page);
+ int wasMlocked = __TestClearPageMlocked(page);
kmemcheck_free_shadow(page, order);
@@ -699,7 +747,7 @@ static inline void expand(struct zone *zone, struct page *page,
/*
* This page is about to be returned from the page allocator
*/
-static int prep_new_page(struct page *page, int order, gfp_t gfp_flags)
+static inline int check_new_page(struct page *page)
{
if (unlikely(page_mapcount(page) |
(page->mapping != NULL) |
@@ -708,6 +756,18 @@ static int prep_new_page(struct page *page, int order, gfp_t gfp_flags)
bad_page(page);
return 1;
}
+ return 0;
+}
+
+static int prep_new_page(struct page *page, int order, gfp_t gfp_flags)
+{
+ int i;
+
+ for (i = 0; i < (1 << order); i++) {
+ struct page *p = page + i;
+ if (unlikely(check_new_page(p)))
+ return 1;
+ }
set_page_private(page, 0);
set_page_refcounted(page);
@@ -836,6 +896,17 @@ static int move_freepages_block(struct zone *zone, struct page *page,
return move_freepages(zone, start_page, end_page, migratetype);
}
+static void change_pageblock_range(struct page *pageblock_page,
+ int start_order, int migratetype)
+{
+ int nr_pageblocks = 1 << (start_order - pageblock_order);
+
+ while (nr_pageblocks--) {
+ set_pageblock_migratetype(pageblock_page, migratetype);
+ pageblock_page += pageblock_nr_pages;
+ }
+}
+
/* Remove an element from the buddy allocator from the fallback list */
static inline struct page *
__rmqueue_fallback(struct zone *zone, int order, int start_migratetype)
@@ -889,11 +960,16 @@ __rmqueue_fallback(struct zone *zone, int order, int start_migratetype)
list_del(&page->lru);
rmv_page_order(page);
- if (current_order == pageblock_order)
- set_pageblock_migratetype(page,
+ /* Take ownership for orders >= pageblock_order */
+ if (current_order >= pageblock_order)
+ change_pageblock_range(page, current_order,
start_migratetype);
expand(zone, page, order, current_order, area, migratetype);
+
+ trace_mm_page_alloc_extfrag(page, order, current_order,
+ start_migratetype, migratetype);
+
return page;
}
}
@@ -927,6 +1003,7 @@ retry_reserve:
}
}
+ trace_mm_page_alloc_zone_locked(page, order, migratetype);
return page;
}
@@ -968,16 +1045,6 @@ static int rmqueue_bulk(struct zone *zone, unsigned int order,
return i;
}
-static void
-isolate_pcp_pages(int count, struct list_head *src, struct list_head *dst)
-{
- while (count--) {
- struct page *page = list_last_entry(src, struct page, lru);
- list_move(&page->lru, dst);
- }
-}
-
-
#ifdef CONFIG_NUMA
/*
* Called from the vmstat counter updater to drain pagesets of this
@@ -989,20 +1056,19 @@ isolate_pcp_pages(int count, struct list_head *src, struct list_head *dst)
*/
void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp)
{
- LIST_HEAD(free_list);
+ struct per_cpu_pages dst;
unsigned long flags;
- int to_drain;
- int this_cpu;
+ int to_drain, this_cpu;
lock_cpu_pcp(&flags, &this_cpu);
if (pcp->count >= pcp->batch)
to_drain = pcp->batch;
else
to_drain = pcp->count;
- isolate_pcp_pages(to_drain, &pcp->list, &free_list);
+ isolate_pcp_pages(to_drain, pcp, &dst);
pcp->count -= to_drain;
unlock_cpu_pcp(flags, this_cpu);
- free_pages_bulk(zone, to_drain, &free_list, 0);
+ free_pcppages_bulk(zone, to_drain, &dst);
}
#endif
@@ -1020,8 +1086,7 @@ static void drain_pages(unsigned int cpu)
for_each_populated_zone(zone) {
struct per_cpu_pageset *pset;
- struct per_cpu_pages *pcp;
- LIST_HEAD(free_list);
+ struct per_cpu_pages *pcp, dst;
int count;
__lock_cpu_pcp(&flags, cpu);
@@ -1031,12 +1096,13 @@ static void drain_pages(unsigned int cpu)
WARN_ON(1);
continue;
}
+
pcp = &pset->pcp;
- isolate_pcp_pages(pcp->count, &pcp->list, &free_list);
+ isolate_pcp_pages(pcp->count, pcp, &dst);
count = pcp->count;
pcp->count = 0;
unlock_cpu_pcp(flags, cpu);
- free_pages_bulk(zone, count, &free_list, 0);
+ free_pcppages_bulk(zone, count, &dst);
}
}
@@ -1141,7 +1207,8 @@ static void free_hot_cold_page(struct page *page, int cold)
struct per_cpu_pageset *pset;
struct per_cpu_pages *pcp;
unsigned long flags;
- int count, this_cpu, wasMlocked = TestClearPageMlocked(page);
+ int migratetype, this_cpu, count;
+ int wasMlocked = __TestClearPageMlocked(page);
kmemcheck_free_shadow(page, 0);
@@ -1159,38 +1226,52 @@ static void free_hot_cold_page(struct page *page, int cold)
pset = get_zone_pcp(zone, &flags, &this_cpu);
pcp = &pset->pcp;
- set_page_private(page, get_pageblock_migratetype(page));
+ migratetype = get_pageblock_migratetype(page);
+ set_page_private(page, migratetype);
if (unlikely(wasMlocked))
free_page_mlock(page);
count_vm_event(PGFREE);
+ /*
+ * We only track unmovable, reclaimable and movable on pcp lists.
+ * Free ISOLATE pages back to the allocator because they are being
+ * offlined but treat RESERVE as movable pages so we can get those
+ * areas back if necessary. Otherwise, we may have to free
+ * excessively into the page allocator
+ */
+ if (migratetype >= MIGRATE_PCPTYPES) {
+ if (unlikely(migratetype == MIGRATE_ISOLATE)) {
+ free_one_page(zone, page, 0, migratetype);
+ goto out;
+ }
+ migratetype = MIGRATE_MOVABLE;
+ }
+
if (cold)
- list_add_tail(&page->lru, &pcp->list);
+ list_add_tail(&page->lru, &pcp->lists[migratetype]);
else
- list_add(&page->lru, &pcp->list);
+ list_add(&page->lru, &pcp->lists[migratetype]);
pcp->count++;
if (pcp->count >= pcp->high) {
- LIST_HEAD(free_list);
+ struct per_cpu_pages dst;
- isolate_pcp_pages(pcp->batch, &pcp->list, &free_list);
+ isolate_pcp_pages(pcp->batch, pcp, &dst);
pcp->count -= pcp->batch;
count = pcp->batch;
put_zone_pcp(zone, flags, this_cpu);
- free_pages_bulk(zone, count, &free_list, 0);
- } else
- put_zone_pcp(zone, flags, this_cpu);
+ free_pcppages_bulk(zone, count, &dst);
+ return;
+ }
+out:
+ put_zone_pcp(zone, flags, this_cpu);
}
void free_hot_page(struct page *page)
{
+ trace_mm_page_free_direct(page, 0);
free_hot_cold_page(page, 0);
}
-void free_cold_page(struct page *page)
-{
- free_hot_cold_page(page, 1);
-}
-
/*
* split_page takes a non-compound higher-order page, and splits it into
* n (1<<order) sub-pages: page[0..n]
@@ -1240,33 +1321,21 @@ again:
if (likely(order == 0)) {
struct per_cpu_pages *pcp = &pset->pcp;
+ struct list_head *list;
- if (!pcp->count) {
- pcp->count = rmqueue_bulk(zone, 0,
- pcp->batch, &pcp->list,
+ list = &pcp->lists[migratetype];
+ if (list_empty(list)) {
+ pcp->count += rmqueue_bulk(zone, 0,
+ pcp->batch, list,
migratetype, cold);
- if (unlikely(!pcp->count))
+ if (unlikely(list_empty(list)))
goto failed;
}
- /* Find a page of the appropriate migrate type */
- if (cold) {
- list_for_each_entry_reverse(page, &pcp->list, lru)
- if (page_private(page) == migratetype)
- break;
- } else {
- list_for_each_entry(page, &pcp->list, lru)
- if (page_private(page) == migratetype)
- break;
- }
-
- /* Allocate more to the pcp list if necessary */
- if (unlikely(&page->lru == &pcp->list)) {
- pcp->count += rmqueue_bulk(zone, 0,
- pcp->batch, &pcp->list,
- migratetype, cold);
- page = list_entry(pcp->list.next, struct page, lru);
- }
+ if (cold)
+ page = list_entry(list->prev, struct page, lru);
+ else
+ page = list_entry(list->next, struct page, lru);
list_del(&page->lru);
pcp->count--;
@@ -1286,10 +1355,10 @@ again:
}
spin_lock(&zone->lock);
page = __rmqueue(zone, order, migratetype);
- __mod_zone_page_state(zone, NR_FREE_PAGES, -(1 << order));
spin_unlock(&zone->lock);
if (!page)
goto failed;
+ __mod_zone_page_state(zone, NR_FREE_PAGES, -(1 << order));
}
__count_zone_vm_events(PGALLOC, zone, 1 << order);
@@ -1717,12 +1786,22 @@ __alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order,
if (page)
goto out;
- /* The OOM killer will not help higher order allocs */
- if (order > PAGE_ALLOC_COSTLY_ORDER && !(gfp_mask & __GFP_NOFAIL))
- goto out;
-
+ if (!(gfp_mask & __GFP_NOFAIL)) {
+ /* The OOM killer will not help higher order allocs */
+ if (order > PAGE_ALLOC_COSTLY_ORDER)
+ goto out;
+ /*
+ * GFP_THISNODE contains __GFP_NORETRY and we never hit this.
+ * Sanity check for bare calls of __GFP_THISNODE, not real OOM.
+ * The caller should handle page allocation failure by itself if
+ * it specifies __GFP_THISNODE.
+ * Note: Hugepage uses it but will hit PAGE_ALLOC_COSTLY_ORDER.
+ */
+ if (gfp_mask & __GFP_THISNODE)
+ goto out;
+ }
/* Exhausted what can be done so it's blamo time */
- out_of_memory(zonelist, gfp_mask, order);
+ out_of_memory(zonelist, gfp_mask, order, nodemask);
out:
clear_zonelist_oom(zonelist, gfp_mask);
@@ -1744,10 +1823,6 @@ __alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order,
/* We now go into synchronous reclaim */
cpuset_memory_pressure_bump();
-
- /*
- * The task's cpuset might have expanded its set of allowable nodes
- */
p->flags |= PF_MEMALLOC;
lockdep_set_current_reclaim_state(gfp_mask);
reclaim_state.reclaimed_slab = 0;
@@ -1832,7 +1907,7 @@ gfp_to_alloc_flags(gfp_t gfp_mask)
* See also cpuset_zone_allowed() comment in kernel/cpuset.c.
*/
alloc_flags &= ~ALLOC_CPUSET;
- } else if (unlikely(rt_task(p)))
+ } else if (unlikely(rt_task(p)) && !in_interrupt())
alloc_flags |= ALLOC_HARDER;
if (likely(!(gfp_mask & __GFP_NOMEMALLOC))) {
@@ -1880,6 +1955,7 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
if (NUMA_BUILD && (gfp_mask & GFP_THISNODE) == GFP_THISNODE)
goto nopage;
+restart:
wake_all_kswapd(order, zonelist, high_zoneidx);
/*
@@ -1889,7 +1965,6 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
*/
alloc_flags = gfp_to_alloc_flags(gfp_mask);
-restart:
/* This is the last chance, in general, before the goto nopage. */
page = get_page_from_freelist(gfp_mask, nodemask, order, zonelist,
high_zoneidx, alloc_flags & ~ALLOC_NO_WATERMARKS,
@@ -2024,6 +2099,7 @@ __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order,
zonelist, high_zoneidx, nodemask,
preferred_zone, migratetype);
+ trace_mm_page_alloc(page, order, gfp_mask, migratetype);
return page;
}
EXPORT_SYMBOL(__alloc_pages_nodemask);
@@ -2033,44 +2109,41 @@ EXPORT_SYMBOL(__alloc_pages_nodemask);
*/
unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order)
{
- struct page * page;
+ struct page *page;
+
+ /*
+ * __get_free_pages() returns a 32-bit address, which cannot represent
+ * a highmem page
+ */
+ VM_BUG_ON((gfp_mask & __GFP_HIGHMEM) != 0);
+
page = alloc_pages(gfp_mask, order);
if (!page)
return 0;
return (unsigned long) page_address(page);
}
-
EXPORT_SYMBOL(__get_free_pages);
unsigned long get_zeroed_page(gfp_t gfp_mask)
{
- struct page * page;
-
- /*
- * get_zeroed_page() returns a 32-bit address, which cannot represent
- * a highmem page
- */
- VM_BUG_ON((gfp_mask & __GFP_HIGHMEM) != 0);
-
- page = alloc_pages(gfp_mask | __GFP_ZERO, 0);
- if (page)
- return (unsigned long) page_address(page);
- return 0;
+ return __get_free_pages(gfp_mask | __GFP_ZERO, 0);
}
-
EXPORT_SYMBOL(get_zeroed_page);
void __pagevec_free(struct pagevec *pvec)
{
int i = pagevec_count(pvec);
- while (--i >= 0)
+ while (--i >= 0) {
+ trace_mm_pagevec_free(pvec->pages[i], pvec->cold);
free_hot_cold_page(pvec->pages[i], pvec->cold);
+ }
}
void __free_pages(struct page *page, unsigned int order)
{
if (put_page_testzero(page)) {
+ trace_mm_page_free_direct(page, order);
if (order == 0)
free_hot_page(page);
else
@@ -2245,23 +2318,27 @@ void show_free_areas(void)
}
}
- printk("Active_anon:%lu active_file:%lu inactive_anon:%lu\n"
- " inactive_file:%lu"
+ printk("active_anon:%lu inactive_anon:%lu isolated_anon:%lu\n"
+ " active_file:%lu inactive_file:%lu isolated_file:%lu\n"
" unevictable:%lu"
" dirty:%lu writeback:%lu unstable:%lu\n"
- " free:%lu slab:%lu mapped:%lu pagetables:%lu bounce:%lu\n",
+ " free:%lu slab_reclaimable:%lu slab_unreclaimable:%lu\n"
+ " mapped:%lu shmem:%lu pagetables:%lu bounce:%lu\n",
global_page_state(NR_ACTIVE_ANON),
- global_page_state(NR_ACTIVE_FILE),
global_page_state(NR_INACTIVE_ANON),
+ global_page_state(NR_ISOLATED_ANON),
+ global_page_state(NR_ACTIVE_FILE),
global_page_state(NR_INACTIVE_FILE),
+ global_page_state(NR_ISOLATED_FILE),
global_page_state(NR_UNEVICTABLE),
global_page_state(NR_FILE_DIRTY),
global_page_state(NR_WRITEBACK),
global_page_state(NR_UNSTABLE_NFS),
global_page_state(NR_FREE_PAGES),
- global_page_state(NR_SLAB_RECLAIMABLE) +
- global_page_state(NR_SLAB_UNRECLAIMABLE),
+ global_page_state(NR_SLAB_RECLAIMABLE),
+ global_page_state(NR_SLAB_UNRECLAIMABLE),
global_page_state(NR_FILE_MAPPED),
+ global_page_state(NR_SHMEM),
global_page_state(NR_PAGETABLE),
global_page_state(NR_BOUNCE));
@@ -2279,7 +2356,21 @@ void show_free_areas(void)
" active_file:%lukB"
" inactive_file:%lukB"
" unevictable:%lukB"
+ " isolated(anon):%lukB"
+ " isolated(file):%lukB"
" present:%lukB"
+ " mlocked:%lukB"
+ " dirty:%lukB"
+ " writeback:%lukB"
+ " mapped:%lukB"
+ " shmem:%lukB"
+ " slab_reclaimable:%lukB"
+ " slab_unreclaimable:%lukB"
+ " kernel_stack:%lukB"
+ " pagetables:%lukB"
+ " unstable:%lukB"
+ " bounce:%lukB"
+ " writeback_tmp:%lukB"
" pages_scanned:%lu"
" all_unreclaimable? %s"
"\n",
@@ -2293,7 +2384,22 @@ void show_free_areas(void)
K(zone_page_state(zone, NR_ACTIVE_FILE)),
K(zone_page_state(zone, NR_INACTIVE_FILE)),
K(zone_page_state(zone, NR_UNEVICTABLE)),
+ K(zone_page_state(zone, NR_ISOLATED_ANON)),
+ K(zone_page_state(zone, NR_ISOLATED_FILE)),
K(zone->present_pages),
+ K(zone_page_state(zone, NR_MLOCK)),
+ K(zone_page_state(zone, NR_FILE_DIRTY)),
+ K(zone_page_state(zone, NR_WRITEBACK)),
+ K(zone_page_state(zone, NR_FILE_MAPPED)),
+ K(zone_page_state(zone, NR_SHMEM)),
+ K(zone_page_state(zone, NR_SLAB_RECLAIMABLE)),
+ K(zone_page_state(zone, NR_SLAB_UNRECLAIMABLE)),
+ zone_page_state(zone, NR_KERNEL_STACK) *
+ THREAD_SIZE / 1024,
+ K(zone_page_state(zone, NR_PAGETABLE)),
+ K(zone_page_state(zone, NR_UNSTABLE_NFS)),
+ K(zone_page_state(zone, NR_BOUNCE)),
+ K(zone_page_state(zone, NR_WRITEBACK_TEMP)),
zone->pages_scanned,
(zone_is_all_unreclaimable(zone) ? "yes" : "no")
);
@@ -2422,18 +2528,19 @@ early_param("numa_zonelist_order", setup_numa_zonelist_order);
* sysctl handler for numa_zonelist_order
*/
int numa_zonelist_order_handler(ctl_table *table, int write,
- struct file *file, void __user *buffer, size_t *length,
+ void __user *buffer, size_t *length,
loff_t *ppos)
{
char saved_string[NUMA_ZONELIST_ORDER_LEN];
int ret;
+ static DEFINE_MUTEX(zl_order_mutex);
+ mutex_lock(&zl_order_mutex);
if (write)
- strncpy(saved_string, (char*)table->data,
- NUMA_ZONELIST_ORDER_LEN);
- ret = proc_dostring(table, write, file, buffer, length, ppos);
+ strcpy(saved_string, (char*)table->data);
+ ret = proc_dostring(table, write, buffer, length, ppos);
if (ret)
- return ret;
+ goto out;
if (write) {
int oldval = user_zonelist_order;
if (__parse_numa_zonelist_order((char*)table->data)) {
@@ -2446,7 +2553,9 @@ int numa_zonelist_order_handler(ctl_table *table, int write,
} else if (oldval != user_zonelist_order)
build_all_zonelists();
}
- return 0;
+out:
+ mutex_unlock(&zl_order_mutex);
+ return ret;
}
@@ -2900,7 +3009,8 @@ static void setup_zone_migrate_reserve(struct zone *zone)
{
unsigned long start_pfn, pfn, end_pfn;
struct page *page;
- unsigned long reserve, block_migratetype;
+ unsigned long block_migratetype;
+ int reserve;
/* Get the start pfn, end pfn and the number of blocks to reserve */
start_pfn = zone->zone_start_pfn;
@@ -2908,6 +3018,15 @@ static void setup_zone_migrate_reserve(struct zone *zone)
reserve = roundup(min_wmark_pages(zone), pageblock_nr_pages) >>
pageblock_order;
+ /*
+ * Reserve blocks are generally in place to help high-order atomic
+ * allocations that are short-lived. A min_free_kbytes value that
+ * would result in more than 2 reserve blocks for atomic allocations
+ * is assumed to be in place to help anti-fragmentation for the
+ * future allocation of hugepages at runtime.
+ */
+ reserve = min(2, reserve);
+
for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) {
if (!pfn_valid(pfn))
continue;
@@ -3078,6 +3197,7 @@ static int zone_batchsize(struct zone *zone)
static void setup_pageset(struct per_cpu_pageset *p, unsigned long batch)
{
struct per_cpu_pages *pcp;
+ int migratetype;
memset(p, 0, sizeof(*p));
@@ -3085,7 +3205,8 @@ static void setup_pageset(struct per_cpu_pageset *p, unsigned long batch)
pcp->count = 0;
pcp->high = 6 * batch;
pcp->batch = max(1UL, 1 * batch);
- INIT_LIST_HEAD(&pcp->list);
+ for (migratetype = 0; migratetype < MIGRATE_PCPTYPES; migratetype++)
+ INIT_LIST_HEAD(&pcp->lists[migratetype]);
}
/*
@@ -3147,7 +3268,7 @@ static int __cpuinit process_zones(int cpu)
if (percpu_pagelist_fraction)
setup_pagelist_highmark(zone_pcp(zone, cpu),
- (zone->present_pages / percpu_pagelist_fraction));
+ (zone->present_pages / percpu_pagelist_fraction));
}
return 0;
@@ -3279,6 +3400,32 @@ int zone_wait_table_init(struct zone *zone, unsigned long zone_size_pages)
return 0;
}
+static int __zone_pcp_update(void *data)
+{
+ struct zone *zone = data;
+ int cpu;
+ unsigned long batch = zone_batchsize(zone), flags;
+
+ for (cpu = 0; cpu < NR_CPUS; cpu++) {
+ struct per_cpu_pageset *pset;
+ struct per_cpu_pages *pcp;
+
+ pset = zone_pcp(zone, cpu);
+ pcp = &pset->pcp;
+
+ local_irq_save(flags);
+ free_pcppages_bulk(zone, pcp->count, pcp);
+ setup_pageset(pset, batch);
+ local_irq_restore(flags);
+ }
+ return 0;
+}
+
+void zone_pcp_update(struct zone *zone)
+{
+ stop_machine(__zone_pcp_update, zone, NULL);
+}
+
static __meminit void zone_pcp_init(struct zone *zone)
{
int cpu;
@@ -3583,7 +3730,7 @@ static unsigned long __meminit zone_spanned_pages_in_node(int nid,
* Return the number of holes in a range on a node. If nid is MAX_NUMNODES,
* then all holes in the requested range will be accounted for.
*/
-static unsigned long __meminit __absent_pages_in_range(int nid,
+unsigned long __meminit __absent_pages_in_range(int nid,
unsigned long range_start_pfn,
unsigned long range_end_pfn)
{
@@ -3853,7 +4000,7 @@ static void __paginginit free_area_init_core(struct pglist_data *pgdat,
zone_pcp_init(zone);
for_each_lru(l) {
INIT_LIST_HEAD(&zone->lru[l].list);
- zone->lru[l].nr_saved_scan = 0;
+ zone->reclaim_stat.nr_saved_scan[l] = 0;
}
zone->reclaim_stat.recent_rotated[0] = 0;
zone->reclaim_stat.recent_rotated[1] = 0;
@@ -3998,7 +4145,7 @@ void __init add_active_range(unsigned int nid, unsigned long start_pfn,
}
/* Merge backward if suitable */
- if (start_pfn < early_node_map[i].end_pfn &&
+ if (start_pfn < early_node_map[i].start_pfn &&
end_pfn >= early_node_map[i].start_pfn) {
early_node_map[i].start_pfn = start_pfn;
return;
@@ -4112,7 +4259,7 @@ static int __init cmp_node_active_region(const void *a, const void *b)
}
/* sort the node_map by start_pfn */
-static void __init sort_node_map(void)
+void __init sort_node_map(void)
{
sort(early_node_map, (size_t)nr_nodemap_entries,
sizeof(struct node_active_region),
@@ -4642,7 +4789,7 @@ void setup_per_zone_wmarks(void)
calculate_totalreserve_pages();
}
-/**
+/*
* The inactive anon list should be small enough that the VM never has to
* do too much work, but large enough that each inactive page has a chance
* to be referenced again before it is swapped out.
@@ -4733,9 +4880,9 @@ module_init(init_per_zone_wmark_min)
* changes.
*/
int min_free_kbytes_sysctl_handler(ctl_table *table, int write,
- struct file *file, void __user *buffer, size_t *length, loff_t *ppos)
+ void __user *buffer, size_t *length, loff_t *ppos)
{
- proc_dointvec(table, write, file, buffer, length, ppos);
+ proc_dointvec(table, write, buffer, length, ppos);
if (write)
setup_per_zone_wmarks();
return 0;
@@ -4743,12 +4890,12 @@ int min_free_kbytes_sysctl_handler(ctl_table *table, int write,
#ifdef CONFIG_NUMA
int sysctl_min_unmapped_ratio_sysctl_handler(ctl_table *table, int write,
- struct file *file, void __user *buffer, size_t *length, loff_t *ppos)
+ void __user *buffer, size_t *length, loff_t *ppos)
{
struct zone *zone;
int rc;
- rc = proc_dointvec_minmax(table, write, file, buffer, length, ppos);
+ rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
if (rc)
return rc;
@@ -4759,12 +4906,12 @@ int sysctl_min_unmapped_ratio_sysctl_handler(ctl_table *table, int write,
}
int sysctl_min_slab_ratio_sysctl_handler(ctl_table *table, int write,
- struct file *file, void __user *buffer, size_t *length, loff_t *ppos)
+ void __user *buffer, size_t *length, loff_t *ppos)
{
struct zone *zone;
int rc;
- rc = proc_dointvec_minmax(table, write, file, buffer, length, ppos);
+ rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
if (rc)
return rc;
@@ -4785,9 +4932,9 @@ int sysctl_min_slab_ratio_sysctl_handler(ctl_table *table, int write,
* if in function of the boot time zone sizes.
*/
int lowmem_reserve_ratio_sysctl_handler(ctl_table *table, int write,
- struct file *file, void __user *buffer, size_t *length, loff_t *ppos)
+ void __user *buffer, size_t *length, loff_t *ppos)
{
- proc_dointvec_minmax(table, write, file, buffer, length, ppos);
+ proc_dointvec_minmax(table, write, buffer, length, ppos);
setup_per_zone_lowmem_reserve();
return 0;
}
@@ -4799,13 +4946,13 @@ int lowmem_reserve_ratio_sysctl_handler(ctl_table *table, int write,
*/
int percpu_pagelist_fraction_sysctl_handler(ctl_table *table, int write,
- struct file *file, void __user *buffer, size_t *length, loff_t *ppos)
+ void __user *buffer, size_t *length, loff_t *ppos)
{
struct zone *zone;
unsigned int cpu;
int ret;
- ret = proc_dointvec_minmax(table, write, file, buffer, length, ppos);
+ ret = proc_dointvec_minmax(table, write, buffer, length, ppos);
if (!write || (ret == -EINVAL))
return ret;
for_each_populated_zone(zone) {
@@ -4865,7 +5012,14 @@ void *__init alloc_large_system_hash(const char *tablename,
numentries <<= (PAGE_SHIFT - scale);
/* Make sure we've got at least a 0-order allocation.. */
- if (unlikely((numentries * bucketsize) < PAGE_SIZE))
+ if (unlikely(flags & HASH_SMALL)) {
+ /* Makes no sense without HASH_EARLY */
+ WARN_ON(!(flags & HASH_EARLY));
+ if (!(numentries >> *_hash_shift)) {
+ numentries = 1UL << *_hash_shift;
+ BUG_ON(!numentries);
+ }
+ } else if (unlikely((numentries * bucketsize) < PAGE_SIZE))
numentries = PAGE_SIZE / bucketsize;
}
numentries = roundup_pow_of_two(numentries);
@@ -5005,20 +5159,65 @@ void set_pageblock_flags_group(struct page *page, unsigned long flags,
int set_migratetype_isolate(struct page *page)
{
struct zone *zone;
- unsigned long flags;
+ struct page *curr_page;
+ unsigned long flags, pfn, iter;
+ unsigned long immobile = 0;
+ struct memory_isolate_notify arg;
+ int notifier_ret;
int ret = -EBUSY;
+ int zone_idx;
zone = page_zone(page);
+ zone_idx = zone_idx(zone);
+
spin_lock_irqsave(&zone->lock, flags);
+ if (get_pageblock_migratetype(page) == MIGRATE_MOVABLE ||
+ zone_idx == ZONE_MOVABLE) {
+ ret = 0;
+ goto out;
+ }
+
+ pfn = page_to_pfn(page);
+ arg.start_pfn = pfn;
+ arg.nr_pages = pageblock_nr_pages;
+ arg.pages_found = 0;
+
/*
- * In future, more migrate types will be able to be isolation target.
+ * It may be possible to isolate a pageblock even if the
+ * migratetype is not MIGRATE_MOVABLE. The memory isolation
+ * notifier chain is used by balloon drivers to return the
+ * number of pages in a range that are held by the balloon
+ * driver to shrink memory. If all the pages are accounted for
+ * by balloons, are free, or on the LRU, isolation can continue.
+ * Later, for example, when memory hotplug notifier runs, these
+ * pages reported as "can be isolated" should be isolated(freed)
+ * by the balloon driver through the memory notifier chain.
*/
- if (get_pageblock_migratetype(page) != MIGRATE_MOVABLE)
+ notifier_ret = memory_isolate_notify(MEM_ISOLATE_COUNT, &arg);
+ notifier_ret = notifier_to_errno(notifier_ret);
+ if (notifier_ret || !arg.pages_found)
goto out;
- set_pageblock_migratetype(page, MIGRATE_ISOLATE);
- move_freepages_block(zone, page, MIGRATE_ISOLATE);
- ret = 0;
+
+ for (iter = pfn; iter < (pfn + pageblock_nr_pages); iter++) {
+ if (!pfn_valid_within(pfn))
+ continue;
+
+ curr_page = pfn_to_page(iter);
+ if (!page_count(curr_page) || PageLRU(curr_page))
+ continue;
+
+ immobile++;
+ }
+
+ if (arg.pages_found == immobile)
+ ret = 0;
+
out:
+ if (!ret) {
+ set_pageblock_migratetype(page, MIGRATE_ISOLATE);
+ move_freepages_block(zone, page, MIGRATE_ISOLATE);
+ }
+
spin_unlock_irqrestore(&zone->lock, flags);
if (!ret)
drain_all_pages();
@@ -5085,3 +5284,24 @@ __offline_isolated_pages(unsigned long start_pfn, unsigned long end_pfn)
spin_unlock_irqrestore(&zone->lock, flags);
}
#endif
+
+#ifdef CONFIG_MEMORY_FAILURE
+bool is_free_buddy_page(struct page *page)
+{
+ struct zone *zone = page_zone(page);
+ unsigned long pfn = page_to_pfn(page);
+ unsigned long flags;
+ int order;
+
+ spin_lock_irqsave(&zone->lock, flags);
+ for (order = 0; order < MAX_ORDER; order++) {
+ struct page *page_head = page - (pfn & ((1 << order) - 1));
+
+ if (PageBuddy(page_head) && page_order(page_head) >= order)
+ break;
+ }
+ spin_unlock_irqrestore(&zone->lock, flags);
+
+ return order < MAX_ORDER;
+}
+#endif
diff --git a/mm/page_cgroup.c b/mm/page_cgroup.c
index 835674efb23b..54facf0a0aa3 100644
--- a/mm/page_cgroup.c
+++ b/mm/page_cgroup.c
@@ -117,10 +117,16 @@ static int __init_refok init_section_page_cgroup(unsigned long pfn)
nid = page_to_nid(pfn_to_page(pfn));
table_size = sizeof(struct page_cgroup) * PAGES_PER_SECTION;
VM_BUG_ON(!slab_is_available());
- base = kmalloc_node(table_size,
+ if (node_state(nid, N_HIGH_MEMORY)) {
+ base = kmalloc_node(table_size,
GFP_KERNEL | __GFP_NOWARN, nid);
- if (!base)
- base = vmalloc_node(table_size, nid);
+ if (!base)
+ base = vmalloc_node(table_size, nid);
+ } else {
+ base = kmalloc(table_size, GFP_KERNEL | __GFP_NOWARN);
+ if (!base)
+ base = vmalloc(table_size);
+ }
} else {
/*
* We don't have to allocate page_cgroup again, but
diff --git a/mm/page_io.c b/mm/page_io.c
index c6f3e5071de3..a19af956ee1b 100644
--- a/mm/page_io.c
+++ b/mm/page_io.c
@@ -19,20 +19,15 @@
#include <linux/writeback.h>
#include <asm/pgtable.h>
-static struct bio *get_swap_bio(gfp_t gfp_flags, pgoff_t index,
+static struct bio *get_swap_bio(gfp_t gfp_flags,
struct page *page, bio_end_io_t end_io)
{
struct bio *bio;
bio = bio_alloc(gfp_flags, 1);
if (bio) {
- struct swap_info_struct *sis;
- swp_entry_t entry = { .val = index, };
-
- sis = get_swap_info_struct(swp_type(entry));
- bio->bi_sector = map_swap_page(sis, swp_offset(entry)) *
- (PAGE_SIZE >> 9);
- bio->bi_bdev = sis->bdev;
+ bio->bi_sector = map_swap_page(page, &bio->bi_bdev);
+ bio->bi_sector <<= PAGE_SHIFT - 9;
bio->bi_io_vec[0].bv_page = page;
bio->bi_io_vec[0].bv_len = PAGE_SIZE;
bio->bi_io_vec[0].bv_offset = 0;
@@ -102,8 +97,7 @@ int swap_writepage(struct page *page, struct writeback_control *wbc)
unlock_page(page);
goto out;
}
- bio = get_swap_bio(GFP_NOIO, page_private(page), page,
- end_swap_bio_write);
+ bio = get_swap_bio(GFP_NOIO, page, end_swap_bio_write);
if (bio == NULL) {
set_page_dirty(page);
unlock_page(page);
@@ -127,8 +121,7 @@ int swap_readpage(struct page *page)
VM_BUG_ON(!PageLocked(page));
VM_BUG_ON(PageUptodate(page));
- bio = get_swap_bio(GFP_KERNEL, page_private(page), page,
- end_swap_bio_read);
+ bio = get_swap_bio(GFP_KERNEL, page, end_swap_bio_read);
if (bio == NULL) {
unlock_page(page);
ret = -ENOMEM;
diff --git a/mm/pagewalk.c b/mm/pagewalk.c
index d5878bed7841..7b47a57b6646 100644
--- a/mm/pagewalk.c
+++ b/mm/pagewalk.c
@@ -1,6 +1,7 @@
#include <linux/mm.h>
#include <linux/highmem.h>
#include <linux/sched.h>
+#include <linux/hugetlb.h>
static int walk_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
struct mm_walk *walk)
@@ -107,6 +108,7 @@ int walk_page_range(unsigned long addr, unsigned long end,
pgd_t *pgd;
unsigned long next;
int err = 0;
+ struct vm_area_struct *vma;
if (addr >= end)
return err;
@@ -117,11 +119,38 @@ int walk_page_range(unsigned long addr, unsigned long end,
pgd = pgd_offset(walk->mm, addr);
do {
next = pgd_addr_end(addr, end);
+
+ /*
+ * handle hugetlb vma individually because pagetable walk for
+ * the hugetlb page is dependent on the architecture and
+ * we can't handled it in the same manner as non-huge pages.
+ */
+ vma = find_vma(walk->mm, addr);
+#ifdef CONFIG_HUGETLB_PAGE
+ if (vma && is_vm_hugetlb_page(vma)) {
+ pte_t *pte;
+ struct hstate *hs;
+
+ if (vma->vm_end < next)
+ next = vma->vm_end;
+ hs = hstate_vma(vma);
+ pte = huge_pte_offset(walk->mm,
+ addr & huge_page_mask(hs));
+ if (pte && !huge_pte_none(huge_ptep_get(pte))
+ && walk->hugetlb_entry)
+ err = walk->hugetlb_entry(pte, addr,
+ next, walk);
+ if (err)
+ break;
+ continue;
+ }
+#endif
if (pgd_none_or_clear_bad(pgd)) {
if (walk->pte_hole)
err = walk->pte_hole(addr, next, walk);
if (err)
break;
+ pgd++;
continue;
}
if (walk->pgd_entry)
@@ -131,7 +160,8 @@ int walk_page_range(unsigned long addr, unsigned long end,
err = walk_pud_range(pgd, addr, next, walk);
if (err)
break;
- } while (pgd++, addr = next, addr != end);
+ pgd++;
+ } while (addr = next, addr != end);
return err;
}
diff --git a/mm/pdflush.c b/mm/pdflush.c
deleted file mode 100644
index 235ac440c44e..000000000000
--- a/mm/pdflush.c
+++ /dev/null
@@ -1,269 +0,0 @@
-/*
- * mm/pdflush.c - worker threads for writing back filesystem data
- *
- * Copyright (C) 2002, Linus Torvalds.
- *
- * 09Apr2002 Andrew Morton
- * Initial version
- * 29Feb2004 kaos@sgi.com
- * Move worker thread creation to kthread to avoid chewing
- * up stack space with nested calls to kernel_thread.
- */
-
-#include <linux/sched.h>
-#include <linux/list.h>
-#include <linux/signal.h>
-#include <linux/spinlock.h>
-#include <linux/gfp.h>
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/fs.h> /* Needed by writeback.h */
-#include <linux/writeback.h> /* Prototypes pdflush_operation() */
-#include <linux/kthread.h>
-#include <linux/cpuset.h>
-#include <linux/freezer.h>
-
-
-/*
- * Minimum and maximum number of pdflush instances
- */
-#define MIN_PDFLUSH_THREADS 2
-#define MAX_PDFLUSH_THREADS 8
-
-static void start_one_pdflush_thread(void);
-
-
-/*
- * The pdflush threads are worker threads for writing back dirty data.
- * Ideally, we'd like one thread per active disk spindle. But the disk
- * topology is very hard to divine at this level. Instead, we take
- * care in various places to prevent more than one pdflush thread from
- * performing writeback against a single filesystem. pdflush threads
- * have the PF_FLUSHER flag set in current->flags to aid in this.
- */
-
-/*
- * All the pdflush threads. Protected by pdflush_lock
- */
-static LIST_HEAD(pdflush_list);
-static DEFINE_SPINLOCK(pdflush_lock);
-
-/*
- * The count of currently-running pdflush threads. Protected
- * by pdflush_lock.
- *
- * Readable by sysctl, but not writable. Published to userspace at
- * /proc/sys/vm/nr_pdflush_threads.
- */
-int nr_pdflush_threads = 0;
-
-/*
- * The time at which the pdflush thread pool last went empty
- */
-static unsigned long last_empty_jifs;
-
-/*
- * The pdflush thread.
- *
- * Thread pool management algorithm:
- *
- * - The minimum and maximum number of pdflush instances are bound
- * by MIN_PDFLUSH_THREADS and MAX_PDFLUSH_THREADS.
- *
- * - If there have been no idle pdflush instances for 1 second, create
- * a new one.
- *
- * - If the least-recently-went-to-sleep pdflush thread has been asleep
- * for more than one second, terminate a thread.
- */
-
-/*
- * A structure for passing work to a pdflush thread. Also for passing
- * state information between pdflush threads. Protected by pdflush_lock.
- */
-struct pdflush_work {
- struct task_struct *who; /* The thread */
- void (*fn)(unsigned long); /* A callback function */
- unsigned long arg0; /* An argument to the callback */
- struct list_head list; /* On pdflush_list, when idle */
- unsigned long when_i_went_to_sleep;
-};
-
-static int __pdflush(struct pdflush_work *my_work)
-{
- current->flags |= PF_FLUSHER | PF_SWAPWRITE;
- set_freezable();
- my_work->fn = NULL;
- my_work->who = current;
- INIT_LIST_HEAD(&my_work->list);
-
- spin_lock_irq(&pdflush_lock);
- for ( ; ; ) {
- struct pdflush_work *pdf;
-
- set_current_state(TASK_INTERRUPTIBLE);
- list_move(&my_work->list, &pdflush_list);
- my_work->when_i_went_to_sleep = jiffies;
- spin_unlock_irq(&pdflush_lock);
- schedule();
- try_to_freeze();
- spin_lock_irq(&pdflush_lock);
- if (!list_empty(&my_work->list)) {
- /*
- * Someone woke us up, but without removing our control
- * structure from the global list. swsusp will do this
- * in try_to_freeze()->refrigerator(). Handle it.
- */
- my_work->fn = NULL;
- continue;
- }
- if (my_work->fn == NULL) {
- printk("pdflush: bogus wakeup\n");
- continue;
- }
- spin_unlock_irq(&pdflush_lock);
-
- (*my_work->fn)(my_work->arg0);
-
- spin_lock_irq(&pdflush_lock);
-
- /*
- * Thread creation: For how long have there been zero
- * available threads?
- *
- * To throttle creation, we reset last_empty_jifs.
- */
- if (time_after(jiffies, last_empty_jifs + 1 * HZ)) {
- if (list_empty(&pdflush_list)) {
- if (nr_pdflush_threads < MAX_PDFLUSH_THREADS) {
- last_empty_jifs = jiffies;
- nr_pdflush_threads++;
- spin_unlock_irq(&pdflush_lock);
- start_one_pdflush_thread();
- spin_lock_irq(&pdflush_lock);
- }
- }
- }
-
- my_work->fn = NULL;
-
- /*
- * Thread destruction: For how long has the sleepiest
- * thread slept?
- */
- if (list_empty(&pdflush_list))
- continue;
- if (nr_pdflush_threads <= MIN_PDFLUSH_THREADS)
- continue;
- pdf = list_entry(pdflush_list.prev, struct pdflush_work, list);
- if (time_after(jiffies, pdf->when_i_went_to_sleep + 1 * HZ)) {
- /* Limit exit rate */
- pdf->when_i_went_to_sleep = jiffies;
- break; /* exeunt */
- }
- }
- nr_pdflush_threads--;
- spin_unlock_irq(&pdflush_lock);
- return 0;
-}
-
-/*
- * Of course, my_work wants to be just a local in __pdflush(). It is
- * separated out in this manner to hopefully prevent the compiler from
- * performing unfortunate optimisations against the auto variables. Because
- * these are visible to other tasks and CPUs. (No problem has actually
- * been observed. This is just paranoia).
- */
-static int pdflush(void *dummy)
-{
- struct pdflush_work my_work;
- cpumask_var_t cpus_allowed;
-
- /*
- * Since the caller doesn't even check kthread_run() worked, let's not
- * freak out too much if this fails.
- */
- if (!alloc_cpumask_var(&cpus_allowed, GFP_KERNEL)) {
- printk(KERN_WARNING "pdflush failed to allocate cpumask\n");
- return 0;
- }
-
- /*
- * pdflush can spend a lot of time doing encryption via dm-crypt. We
- * don't want to do that at keventd's priority.
- */
- set_user_nice(current, 0);
-
- /*
- * Some configs put our parent kthread in a limited cpuset,
- * which kthread() overrides, forcing cpus_allowed == cpu_all_mask.
- * Our needs are more modest - cut back to our cpusets cpus_allowed.
- * This is needed as pdflush's are dynamically created and destroyed.
- * The boottime pdflush's are easily placed w/o these 2 lines.
- */
- cpuset_cpus_allowed(current, cpus_allowed);
- set_cpus_allowed_ptr(current, cpus_allowed);
- free_cpumask_var(cpus_allowed);
-
- return __pdflush(&my_work);
-}
-
-/*
- * Attempt to wake up a pdflush thread, and get it to do some work for you.
- * Returns zero if it indeed managed to find a worker thread, and passed your
- * payload to it.
- */
-int pdflush_operation(void (*fn)(unsigned long), unsigned long arg0)
-{
- unsigned long flags;
- int ret = 0;
-
- BUG_ON(fn == NULL); /* Hard to diagnose if it's deferred */
-
- spin_lock_irqsave(&pdflush_lock, flags);
- if (list_empty(&pdflush_list)) {
- ret = -1;
- } else {
- struct pdflush_work *pdf;
-
- pdf = list_entry(pdflush_list.next, struct pdflush_work, list);
- list_del_init(&pdf->list);
- if (list_empty(&pdflush_list))
- last_empty_jifs = jiffies;
- pdf->fn = fn;
- pdf->arg0 = arg0;
- wake_up_process(pdf->who);
- }
- spin_unlock_irqrestore(&pdflush_lock, flags);
-
- return ret;
-}
-
-static void start_one_pdflush_thread(void)
-{
- struct task_struct *k;
-
- k = kthread_run(pdflush, NULL, "pdflush");
- if (unlikely(IS_ERR(k))) {
- spin_lock_irq(&pdflush_lock);
- nr_pdflush_threads--;
- spin_unlock_irq(&pdflush_lock);
- }
-}
-
-static int __init pdflush_init(void)
-{
- int i;
-
- /*
- * Pre-set nr_pdflush_threads... If we fail to create,
- * the count will be decremented.
- */
- nr_pdflush_threads = MIN_PDFLUSH_THREADS;
-
- for (i = 0; i < MIN_PDFLUSH_THREADS; i++)
- start_one_pdflush_thread();
- return 0;
-}
-
-module_init(pdflush_init);
diff --git a/mm/percpu.c b/mm/percpu.c
index 3311c8919f37..083e7c91e5f6 100644
--- a/mm/percpu.c
+++ b/mm/percpu.c
@@ -8,12 +8,13 @@
*
* This is percpu allocator which can handle both static and dynamic
* areas. Percpu areas are allocated in chunks in vmalloc area. Each
- * chunk is consisted of nr_cpu_ids units and the first chunk is used
- * for static percpu variables in the kernel image (special boot time
- * alloc/init handling necessary as these areas need to be brought up
- * before allocation services are running). Unit grows as necessary
- * and all units grow or shrink in unison. When a chunk is filled up,
- * another chunk is allocated. ie. in vmalloc area
+ * chunk is consisted of boot-time determined number of units and the
+ * first chunk is used for static percpu variables in the kernel image
+ * (special boot time alloc/init handling necessary as these areas
+ * need to be brought up before allocation services are running).
+ * Unit grows as necessary and all units grow or shrink in unison.
+ * When a chunk is filled up, another chunk is allocated. ie. in
+ * vmalloc area
*
* c0 c1 c2
* ------------------- ------------------- ------------
@@ -22,11 +23,13 @@
*
* Allocation is done in offset-size areas of single unit space. Ie,
* an area of 512 bytes at 6k in c1 occupies 512 bytes at 6k of c1:u0,
- * c1:u1, c1:u2 and c1:u3. Percpu access can be done by configuring
- * percpu base registers pcpu_unit_size apart.
+ * c1:u1, c1:u2 and c1:u3. On UMA, units corresponds directly to
+ * cpus. On NUMA, the mapping can be non-linear and even sparse.
+ * Percpu access can be done by configuring percpu base registers
+ * according to cpu to unit mapping and pcpu_unit_size.
*
- * There are usually many small percpu allocations many of them as
- * small as 4 bytes. The allocator organizes chunks into lists
+ * There are usually many small percpu allocations many of them being
+ * as small as 4 bytes. The allocator organizes chunks into lists
* according to free size and tries to allocate from the fullest one.
* Each chunk keeps the maximum contiguous area size hint which is
* guaranteed to be eqaul to or larger than the maximum contiguous
@@ -43,8 +46,6 @@
*
* To use this allocator, arch code should do the followings.
*
- * - define CONFIG_HAVE_DYNAMIC_PER_CPU_AREA
- *
* - define __addr_to_pcpu_ptr() and __pcpu_ptr_to_addr() to translate
* regular address to percpu pointer and back if they need to be
* different from the default
@@ -55,7 +56,9 @@
#include <linux/bitmap.h>
#include <linux/bootmem.h>
+#include <linux/err.h>
#include <linux/list.h>
+#include <linux/log2.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/mutex.h>
@@ -69,6 +72,7 @@
#include <asm/cacheflush.h>
#include <asm/sections.h>
#include <asm/tlbflush.h>
+#include <asm/io.h>
#define PCPU_SLOT_BASE_SHIFT 5 /* 1-31 shares the same slot */
#define PCPU_DFL_MAP_ALLOC 16 /* start a map with 16 ents */
@@ -89,25 +93,38 @@ struct pcpu_chunk {
struct list_head list; /* linked to pcpu_slot lists */
int free_size; /* free bytes in the chunk */
int contig_hint; /* max contiguous size hint */
- struct vm_struct *vm; /* mapped vmalloc region */
+ void *base_addr; /* base address of this chunk */
int map_used; /* # of map entries used */
int map_alloc; /* # of map entries allocated */
int *map; /* allocation map */
+ struct vm_struct **vms; /* mapped vmalloc regions */
bool immutable; /* no [de]population allowed */
- struct page **page; /* points to page array */
- struct page *page_ar[]; /* #cpus * UNIT_PAGES */
+ unsigned long populated[]; /* populated bitmap */
};
static int pcpu_unit_pages __read_mostly;
static int pcpu_unit_size __read_mostly;
-static int pcpu_chunk_size __read_mostly;
+static int pcpu_nr_units __read_mostly;
+static int pcpu_atom_size __read_mostly;
static int pcpu_nr_slots __read_mostly;
static size_t pcpu_chunk_struct_size __read_mostly;
+/* cpus with the lowest and highest unit numbers */
+static unsigned int pcpu_first_unit_cpu __read_mostly;
+static unsigned int pcpu_last_unit_cpu __read_mostly;
+
/* the address of the first chunk which starts with the kernel static area */
void *pcpu_base_addr __read_mostly;
EXPORT_SYMBOL_GPL(pcpu_base_addr);
+static const int *pcpu_unit_map __read_mostly; /* cpu -> unit */
+const unsigned long *pcpu_unit_offsets __read_mostly; /* cpu -> unit offset */
+
+/* group information, used for vm allocation */
+static int pcpu_nr_groups __read_mostly;
+static const unsigned long *pcpu_group_offsets __read_mostly;
+static const size_t *pcpu_group_sizes __read_mostly;
+
/*
* The first chunk which always exists. Note that unlike other
* chunks, this one can be allocated and mapped in several different
@@ -129,13 +146,16 @@ static int pcpu_reserved_chunk_limit;
* Synchronization rules.
*
* There are two locks - pcpu_alloc_mutex and pcpu_lock. The former
- * protects allocation/reclaim paths, chunks and chunk->page arrays.
- * The latter is a spinlock and protects the index data structures -
- * chunk slots, chunks and area maps in chunks.
+ * protects allocation/reclaim paths, chunks, populated bitmap and
+ * vmalloc mapping. The latter is a spinlock and protects the index
+ * data structures - chunk slots, chunks and area maps in chunks.
*
* During allocation, pcpu_alloc_mutex is kept locked all the time and
* pcpu_lock is grabbed and released as necessary. All actual memory
- * allocations are done using GFP_KERNEL with pcpu_lock released.
+ * allocations are done using GFP_KERNEL with pcpu_lock released. In
+ * general, percpu memory can't be allocated with irq off but
+ * irqsave/restore are still used in alloc path so that it can be used
+ * from early init path - sched_init() specifically.
*
* Free path accesses and alters only the index data structures, so it
* can be safely called from atomic context. When memory needs to be
@@ -178,31 +198,23 @@ static int pcpu_chunk_slot(const struct pcpu_chunk *chunk)
static int pcpu_page_idx(unsigned int cpu, int page_idx)
{
- return cpu * pcpu_unit_pages + page_idx;
-}
-
-static struct page **pcpu_chunk_pagep(struct pcpu_chunk *chunk,
- unsigned int cpu, int page_idx)
-{
- return &chunk->page[pcpu_page_idx(cpu, page_idx)];
+ return pcpu_unit_map[cpu] * pcpu_unit_pages + page_idx;
}
static unsigned long pcpu_chunk_addr(struct pcpu_chunk *chunk,
unsigned int cpu, int page_idx)
{
- return (unsigned long)chunk->vm->addr +
- (pcpu_page_idx(cpu, page_idx) << PAGE_SHIFT);
+ return (unsigned long)chunk->base_addr + pcpu_unit_offsets[cpu] +
+ (page_idx << PAGE_SHIFT);
}
-static bool pcpu_chunk_page_occupied(struct pcpu_chunk *chunk,
- int page_idx)
+static struct page *pcpu_chunk_page(struct pcpu_chunk *chunk,
+ unsigned int cpu, int page_idx)
{
- /*
- * Any possible cpu id can be used here, so there's no need to
- * worry about preemption or cpu hotplug.
- */
- return *pcpu_chunk_pagep(chunk, raw_smp_processor_id(),
- page_idx) != NULL;
+ /* must not be used on pre-mapped chunk */
+ WARN_ON(chunk->immutable);
+
+ return vmalloc_to_page((void *)pcpu_chunk_addr(chunk, cpu, page_idx));
}
/* set the pointer to a chunk in a page struct */
@@ -217,6 +229,34 @@ static struct pcpu_chunk *pcpu_get_page_chunk(struct page *page)
return (struct pcpu_chunk *)page->index;
}
+static void pcpu_next_unpop(struct pcpu_chunk *chunk, int *rs, int *re, int end)
+{
+ *rs = find_next_zero_bit(chunk->populated, end, *rs);
+ *re = find_next_bit(chunk->populated, end, *rs + 1);
+}
+
+static void pcpu_next_pop(struct pcpu_chunk *chunk, int *rs, int *re, int end)
+{
+ *rs = find_next_bit(chunk->populated, end, *rs);
+ *re = find_next_zero_bit(chunk->populated, end, *rs + 1);
+}
+
+/*
+ * (Un)populated page region iterators. Iterate over (un)populated
+ * page regions betwen @start and @end in @chunk. @rs and @re should
+ * be integer variables and will be set to start and end page index of
+ * the current region.
+ */
+#define pcpu_for_each_unpop_region(chunk, rs, re, start, end) \
+ for ((rs) = (start), pcpu_next_unpop((chunk), &(rs), &(re), (end)); \
+ (rs) < (re); \
+ (rs) = (re) + 1, pcpu_next_unpop((chunk), &(rs), &(re), (end)))
+
+#define pcpu_for_each_pop_region(chunk, rs, re, start, end) \
+ for ((rs) = (start), pcpu_next_pop((chunk), &(rs), &(re), (end)); \
+ (rs) < (re); \
+ (rs) = (re) + 1, pcpu_next_pop((chunk), &(rs), &(re), (end)))
+
/**
* pcpu_mem_alloc - allocate memory
* @size: bytes to allocate
@@ -292,10 +332,10 @@ static void pcpu_chunk_relocate(struct pcpu_chunk *chunk, int oslot)
*/
static struct pcpu_chunk *pcpu_chunk_addr_search(void *addr)
{
- void *first_start = pcpu_first_chunk->vm->addr;
+ void *first_start = pcpu_first_chunk->base_addr;
/* is it in the first chunk? */
- if (addr >= first_start && addr < first_start + pcpu_chunk_size) {
+ if (addr >= first_start && addr < first_start + pcpu_unit_size) {
/* is it in the reserved area? */
if (addr < first_start + pcpu_reserved_chunk_limit)
return pcpu_reserved_chunk;
@@ -309,67 +349,91 @@ static struct pcpu_chunk *pcpu_chunk_addr_search(void *addr)
* space. Note that any possible cpu id can be used here, so
* there's no need to worry about preemption or cpu hotplug.
*/
- addr += raw_smp_processor_id() * pcpu_unit_size;
+ addr += pcpu_unit_offsets[raw_smp_processor_id()];
return pcpu_get_page_chunk(vmalloc_to_page(addr));
}
/**
- * pcpu_extend_area_map - extend area map for allocation
- * @chunk: target chunk
+ * pcpu_need_to_extend - determine whether chunk area map needs to be extended
+ * @chunk: chunk of interest
*
- * Extend area map of @chunk so that it can accomodate an allocation.
- * A single allocation can split an area into three areas, so this
- * function makes sure that @chunk->map has at least two extra slots.
+ * Determine whether area map of @chunk needs to be extended to
+ * accomodate a new allocation.
*
* CONTEXT:
- * pcpu_alloc_mutex, pcpu_lock. pcpu_lock is released and reacquired
- * if area map is extended.
+ * pcpu_lock.
*
* RETURNS:
- * 0 if noop, 1 if successfully extended, -errno on failure.
+ * New target map allocation length if extension is necessary, 0
+ * otherwise.
*/
-static int pcpu_extend_area_map(struct pcpu_chunk *chunk)
+static int pcpu_need_to_extend(struct pcpu_chunk *chunk)
{
int new_alloc;
- int *new;
- size_t size;
- /* has enough? */
if (chunk->map_alloc >= chunk->map_used + 2)
return 0;
- spin_unlock_irq(&pcpu_lock);
-
new_alloc = PCPU_DFL_MAP_ALLOC;
while (new_alloc < chunk->map_used + 2)
new_alloc *= 2;
- new = pcpu_mem_alloc(new_alloc * sizeof(new[0]));
- if (!new) {
- spin_lock_irq(&pcpu_lock);
+ return new_alloc;
+}
+
+/**
+ * pcpu_extend_area_map - extend area map of a chunk
+ * @chunk: chunk of interest
+ * @new_alloc: new target allocation length of the area map
+ *
+ * Extend area map of @chunk to have @new_alloc entries.
+ *
+ * CONTEXT:
+ * Does GFP_KERNEL allocation. Grabs and releases pcpu_lock.
+ *
+ * RETURNS:
+ * 0 on success, -errno on failure.
+ */
+static int pcpu_extend_area_map(struct pcpu_chunk *chunk, int new_alloc)
+{
+ int *old = NULL, *new = NULL;
+ size_t old_size = 0, new_size = new_alloc * sizeof(new[0]);
+ unsigned long flags;
+
+ new = pcpu_mem_alloc(new_size);
+ if (!new)
return -ENOMEM;
- }
- /*
- * Acquire pcpu_lock and switch to new area map. Only free
- * could have happened inbetween, so map_used couldn't have
- * grown.
- */
- spin_lock_irq(&pcpu_lock);
- BUG_ON(new_alloc < chunk->map_used + 2);
+ /* acquire pcpu_lock and switch to new area map */
+ spin_lock_irqsave(&pcpu_lock, flags);
+
+ if (new_alloc <= chunk->map_alloc)
+ goto out_unlock;
- size = chunk->map_alloc * sizeof(chunk->map[0]);
- memcpy(new, chunk->map, size);
+ old_size = chunk->map_alloc * sizeof(chunk->map[0]);
+ memcpy(new, chunk->map, old_size);
/*
* map_alloc < PCPU_DFL_MAP_ALLOC indicates that the chunk is
* one of the first chunks and still using static map.
*/
if (chunk->map_alloc >= PCPU_DFL_MAP_ALLOC)
- pcpu_mem_free(chunk->map, size);
+ old = chunk->map;
chunk->map_alloc = new_alloc;
chunk->map = new;
+ new = NULL;
+
+out_unlock:
+ spin_unlock_irqrestore(&pcpu_lock, flags);
+
+ /*
+ * pcpu_mem_free() might end up calling vfree() which uses
+ * IRQ-unsafe lock and thus can't be called under pcpu_lock.
+ */
+ pcpu_mem_free(old, old_size);
+ pcpu_mem_free(new, new_size);
+
return 0;
}
@@ -558,125 +622,327 @@ static void pcpu_free_area(struct pcpu_chunk *chunk, int freeme)
}
/**
- * pcpu_unmap - unmap pages out of a pcpu_chunk
+ * pcpu_get_pages_and_bitmap - get temp pages array and bitmap
* @chunk: chunk of interest
- * @page_start: page index of the first page to unmap
- * @page_end: page index of the last page to unmap + 1
- * @flush_tlb: whether to flush tlb or not
+ * @bitmapp: output parameter for bitmap
+ * @may_alloc: may allocate the array
*
- * For each cpu, unmap pages [@page_start,@page_end) out of @chunk.
- * If @flush is true, vcache is flushed before unmapping and tlb
- * after.
+ * Returns pointer to array of pointers to struct page and bitmap,
+ * both of which can be indexed with pcpu_page_idx(). The returned
+ * array is cleared to zero and *@bitmapp is copied from
+ * @chunk->populated. Note that there is only one array and bitmap
+ * and access exclusion is the caller's responsibility.
+ *
+ * CONTEXT:
+ * pcpu_alloc_mutex and does GFP_KERNEL allocation if @may_alloc.
+ * Otherwise, don't care.
+ *
+ * RETURNS:
+ * Pointer to temp pages array on success, NULL on failure.
*/
-static void pcpu_unmap(struct pcpu_chunk *chunk, int page_start, int page_end,
- bool flush_tlb)
+static struct page **pcpu_get_pages_and_bitmap(struct pcpu_chunk *chunk,
+ unsigned long **bitmapp,
+ bool may_alloc)
{
- unsigned int last = nr_cpu_ids - 1;
- unsigned int cpu;
+ static struct page **pages;
+ static unsigned long *bitmap;
+ size_t pages_size = pcpu_nr_units * pcpu_unit_pages * sizeof(pages[0]);
+ size_t bitmap_size = BITS_TO_LONGS(pcpu_unit_pages) *
+ sizeof(unsigned long);
+
+ if (!pages || !bitmap) {
+ if (may_alloc && !pages)
+ pages = pcpu_mem_alloc(pages_size);
+ if (may_alloc && !bitmap)
+ bitmap = pcpu_mem_alloc(bitmap_size);
+ if (!pages || !bitmap)
+ return NULL;
+ }
- /* unmap must not be done on immutable chunk */
- WARN_ON(chunk->immutable);
+ memset(pages, 0, pages_size);
+ bitmap_copy(bitmap, chunk->populated, pcpu_unit_pages);
- /*
- * Each flushing trial can be very expensive, issue flush on
- * the whole region at once rather than doing it for each cpu.
- * This could be an overkill but is more scalable.
- */
- flush_cache_vunmap(pcpu_chunk_addr(chunk, 0, page_start),
- pcpu_chunk_addr(chunk, last, page_end));
+ *bitmapp = bitmap;
+ return pages;
+}
- for_each_possible_cpu(cpu)
- unmap_kernel_range_noflush(
- pcpu_chunk_addr(chunk, cpu, page_start),
- (page_end - page_start) << PAGE_SHIFT);
-
- /* ditto as flush_cache_vunmap() */
- if (flush_tlb)
- flush_tlb_kernel_range(pcpu_chunk_addr(chunk, 0, page_start),
- pcpu_chunk_addr(chunk, last, page_end));
+/**
+ * pcpu_free_pages - free pages which were allocated for @chunk
+ * @chunk: chunk pages were allocated for
+ * @pages: array of pages to be freed, indexed by pcpu_page_idx()
+ * @populated: populated bitmap
+ * @page_start: page index of the first page to be freed
+ * @page_end: page index of the last page to be freed + 1
+ *
+ * Free pages [@page_start and @page_end) in @pages for all units.
+ * The pages were allocated for @chunk.
+ */
+static void pcpu_free_pages(struct pcpu_chunk *chunk,
+ struct page **pages, unsigned long *populated,
+ int page_start, int page_end)
+{
+ unsigned int cpu;
+ int i;
+
+ for_each_possible_cpu(cpu) {
+ for (i = page_start; i < page_end; i++) {
+ struct page *page = pages[pcpu_page_idx(cpu, i)];
+
+ if (page)
+ __free_page(page);
+ }
+ }
}
/**
- * pcpu_depopulate_chunk - depopulate and unmap an area of a pcpu_chunk
- * @chunk: chunk to depopulate
- * @off: offset to the area to depopulate
- * @size: size of the area to depopulate in bytes
- * @flush: whether to flush cache and tlb or not
- *
- * For each cpu, depopulate and unmap pages [@page_start,@page_end)
- * from @chunk. If @flush is true, vcache is flushed before unmapping
- * and tlb after.
- *
- * CONTEXT:
- * pcpu_alloc_mutex.
+ * pcpu_alloc_pages - allocates pages for @chunk
+ * @chunk: target chunk
+ * @pages: array to put the allocated pages into, indexed by pcpu_page_idx()
+ * @populated: populated bitmap
+ * @page_start: page index of the first page to be allocated
+ * @page_end: page index of the last page to be allocated + 1
+ *
+ * Allocate pages [@page_start,@page_end) into @pages for all units.
+ * The allocation is for @chunk. Percpu core doesn't care about the
+ * content of @pages and will pass it verbatim to pcpu_map_pages().
*/
-static void pcpu_depopulate_chunk(struct pcpu_chunk *chunk, int off, int size,
- bool flush)
+static int pcpu_alloc_pages(struct pcpu_chunk *chunk,
+ struct page **pages, unsigned long *populated,
+ int page_start, int page_end)
{
- int page_start = PFN_DOWN(off);
- int page_end = PFN_UP(off + size);
- int unmap_start = -1;
- int uninitialized_var(unmap_end);
+ const gfp_t gfp = GFP_KERNEL | __GFP_HIGHMEM | __GFP_COLD;
unsigned int cpu;
int i;
- for (i = page_start; i < page_end; i++) {
- for_each_possible_cpu(cpu) {
- struct page **pagep = pcpu_chunk_pagep(chunk, cpu, i);
+ for_each_possible_cpu(cpu) {
+ for (i = page_start; i < page_end; i++) {
+ struct page **pagep = &pages[pcpu_page_idx(cpu, i)];
+
+ *pagep = alloc_pages_node(cpu_to_node(cpu), gfp, 0);
+ if (!*pagep) {
+ pcpu_free_pages(chunk, pages, populated,
+ page_start, page_end);
+ return -ENOMEM;
+ }
+ }
+ }
+ return 0;
+}
- if (!*pagep)
- continue;
+/**
+ * pcpu_pre_unmap_flush - flush cache prior to unmapping
+ * @chunk: chunk the regions to be flushed belongs to
+ * @page_start: page index of the first page to be flushed
+ * @page_end: page index of the last page to be flushed + 1
+ *
+ * Pages in [@page_start,@page_end) of @chunk are about to be
+ * unmapped. Flush cache. As each flushing trial can be very
+ * expensive, issue flush on the whole region at once rather than
+ * doing it for each cpu. This could be an overkill but is more
+ * scalable.
+ */
+static void pcpu_pre_unmap_flush(struct pcpu_chunk *chunk,
+ int page_start, int page_end)
+{
+ flush_cache_vunmap(
+ pcpu_chunk_addr(chunk, pcpu_first_unit_cpu, page_start),
+ pcpu_chunk_addr(chunk, pcpu_last_unit_cpu, page_end));
+}
- __free_page(*pagep);
+static void __pcpu_unmap_pages(unsigned long addr, int nr_pages)
+{
+ unmap_kernel_range_noflush(addr, nr_pages << PAGE_SHIFT);
+}
+
+/**
+ * pcpu_unmap_pages - unmap pages out of a pcpu_chunk
+ * @chunk: chunk of interest
+ * @pages: pages array which can be used to pass information to free
+ * @populated: populated bitmap
+ * @page_start: page index of the first page to unmap
+ * @page_end: page index of the last page to unmap + 1
+ *
+ * For each cpu, unmap pages [@page_start,@page_end) out of @chunk.
+ * Corresponding elements in @pages were cleared by the caller and can
+ * be used to carry information to pcpu_free_pages() which will be
+ * called after all unmaps are finished. The caller should call
+ * proper pre/post flush functions.
+ */
+static void pcpu_unmap_pages(struct pcpu_chunk *chunk,
+ struct page **pages, unsigned long *populated,
+ int page_start, int page_end)
+{
+ unsigned int cpu;
+ int i;
- /*
- * If it's partial depopulation, it might get
- * populated or depopulated again. Mark the
- * page gone.
- */
- *pagep = NULL;
+ for_each_possible_cpu(cpu) {
+ for (i = page_start; i < page_end; i++) {
+ struct page *page;
- unmap_start = unmap_start < 0 ? i : unmap_start;
- unmap_end = i + 1;
+ page = pcpu_chunk_page(chunk, cpu, i);
+ WARN_ON(!page);
+ pages[pcpu_page_idx(cpu, i)] = page;
}
+ __pcpu_unmap_pages(pcpu_chunk_addr(chunk, cpu, page_start),
+ page_end - page_start);
}
- if (unmap_start >= 0)
- pcpu_unmap(chunk, unmap_start, unmap_end, flush);
+ for (i = page_start; i < page_end; i++)
+ __clear_bit(i, populated);
}
/**
- * pcpu_map - map pages into a pcpu_chunk
+ * pcpu_post_unmap_tlb_flush - flush TLB after unmapping
+ * @chunk: pcpu_chunk the regions to be flushed belong to
+ * @page_start: page index of the first page to be flushed
+ * @page_end: page index of the last page to be flushed + 1
+ *
+ * Pages [@page_start,@page_end) of @chunk have been unmapped. Flush
+ * TLB for the regions. This can be skipped if the area is to be
+ * returned to vmalloc as vmalloc will handle TLB flushing lazily.
+ *
+ * As with pcpu_pre_unmap_flush(), TLB flushing also is done at once
+ * for the whole region.
+ */
+static void pcpu_post_unmap_tlb_flush(struct pcpu_chunk *chunk,
+ int page_start, int page_end)
+{
+ flush_tlb_kernel_range(
+ pcpu_chunk_addr(chunk, pcpu_first_unit_cpu, page_start),
+ pcpu_chunk_addr(chunk, pcpu_last_unit_cpu, page_end));
+}
+
+static int __pcpu_map_pages(unsigned long addr, struct page **pages,
+ int nr_pages)
+{
+ return map_kernel_range_noflush(addr, nr_pages << PAGE_SHIFT,
+ PAGE_KERNEL, pages);
+}
+
+/**
+ * pcpu_map_pages - map pages into a pcpu_chunk
* @chunk: chunk of interest
+ * @pages: pages array containing pages to be mapped
+ * @populated: populated bitmap
* @page_start: page index of the first page to map
* @page_end: page index of the last page to map + 1
*
- * For each cpu, map pages [@page_start,@page_end) into @chunk.
- * vcache is flushed afterwards.
+ * For each cpu, map pages [@page_start,@page_end) into @chunk. The
+ * caller is responsible for calling pcpu_post_map_flush() after all
+ * mappings are complete.
+ *
+ * This function is responsible for setting corresponding bits in
+ * @chunk->populated bitmap and whatever is necessary for reverse
+ * lookup (addr -> chunk).
*/
-static int pcpu_map(struct pcpu_chunk *chunk, int page_start, int page_end)
+static int pcpu_map_pages(struct pcpu_chunk *chunk,
+ struct page **pages, unsigned long *populated,
+ int page_start, int page_end)
{
- unsigned int last = nr_cpu_ids - 1;
- unsigned int cpu;
- int err;
-
- /* map must not be done on immutable chunk */
- WARN_ON(chunk->immutable);
+ unsigned int cpu, tcpu;
+ int i, err;
for_each_possible_cpu(cpu) {
- err = map_kernel_range_noflush(
- pcpu_chunk_addr(chunk, cpu, page_start),
- (page_end - page_start) << PAGE_SHIFT,
- PAGE_KERNEL,
- pcpu_chunk_pagep(chunk, cpu, page_start));
+ err = __pcpu_map_pages(pcpu_chunk_addr(chunk, cpu, page_start),
+ &pages[pcpu_page_idx(cpu, page_start)],
+ page_end - page_start);
if (err < 0)
- return err;
+ goto err;
+ }
+
+ /* mapping successful, link chunk and mark populated */
+ for (i = page_start; i < page_end; i++) {
+ for_each_possible_cpu(cpu)
+ pcpu_set_page_chunk(pages[pcpu_page_idx(cpu, i)],
+ chunk);
+ __set_bit(i, populated);
}
- /* flush at once, please read comments in pcpu_unmap() */
- flush_cache_vmap(pcpu_chunk_addr(chunk, 0, page_start),
- pcpu_chunk_addr(chunk, last, page_end));
return 0;
+
+err:
+ for_each_possible_cpu(tcpu) {
+ if (tcpu == cpu)
+ break;
+ __pcpu_unmap_pages(pcpu_chunk_addr(chunk, tcpu, page_start),
+ page_end - page_start);
+ }
+ return err;
+}
+
+/**
+ * pcpu_post_map_flush - flush cache after mapping
+ * @chunk: pcpu_chunk the regions to be flushed belong to
+ * @page_start: page index of the first page to be flushed
+ * @page_end: page index of the last page to be flushed + 1
+ *
+ * Pages [@page_start,@page_end) of @chunk have been mapped. Flush
+ * cache.
+ *
+ * As with pcpu_pre_unmap_flush(), TLB flushing also is done at once
+ * for the whole region.
+ */
+static void pcpu_post_map_flush(struct pcpu_chunk *chunk,
+ int page_start, int page_end)
+{
+ flush_cache_vmap(
+ pcpu_chunk_addr(chunk, pcpu_first_unit_cpu, page_start),
+ pcpu_chunk_addr(chunk, pcpu_last_unit_cpu, page_end));
+}
+
+/**
+ * pcpu_depopulate_chunk - depopulate and unmap an area of a pcpu_chunk
+ * @chunk: chunk to depopulate
+ * @off: offset to the area to depopulate
+ * @size: size of the area to depopulate in bytes
+ * @flush: whether to flush cache and tlb or not
+ *
+ * For each cpu, depopulate and unmap pages [@page_start,@page_end)
+ * from @chunk. If @flush is true, vcache is flushed before unmapping
+ * and tlb after.
+ *
+ * CONTEXT:
+ * pcpu_alloc_mutex.
+ */
+static void pcpu_depopulate_chunk(struct pcpu_chunk *chunk, int off, int size)
+{
+ int page_start = PFN_DOWN(off);
+ int page_end = PFN_UP(off + size);
+ struct page **pages;
+ unsigned long *populated;
+ int rs, re;
+
+ /* quick path, check whether it's empty already */
+ pcpu_for_each_unpop_region(chunk, rs, re, page_start, page_end) {
+ if (rs == page_start && re == page_end)
+ return;
+ break;
+ }
+
+ /* immutable chunks can't be depopulated */
+ WARN_ON(chunk->immutable);
+
+ /*
+ * If control reaches here, there must have been at least one
+ * successful population attempt so the temp pages array must
+ * be available now.
+ */
+ pages = pcpu_get_pages_and_bitmap(chunk, &populated, false);
+ BUG_ON(!pages);
+
+ /* unmap and free */
+ pcpu_pre_unmap_flush(chunk, page_start, page_end);
+
+ pcpu_for_each_pop_region(chunk, rs, re, page_start, page_end)
+ pcpu_unmap_pages(chunk, pages, populated, rs, re);
+
+ /* no need to flush tlb, vmalloc will handle it lazily */
+
+ pcpu_for_each_pop_region(chunk, rs, re, page_start, page_end)
+ pcpu_free_pages(chunk, pages, populated, rs, re);
+
+ /* commit new bitmap */
+ bitmap_copy(chunk->populated, populated, pcpu_unit_pages);
}
/**
@@ -693,58 +959,68 @@ static int pcpu_map(struct pcpu_chunk *chunk, int page_start, int page_end)
*/
static int pcpu_populate_chunk(struct pcpu_chunk *chunk, int off, int size)
{
- const gfp_t alloc_mask = GFP_KERNEL | __GFP_HIGHMEM | __GFP_COLD;
int page_start = PFN_DOWN(off);
int page_end = PFN_UP(off + size);
- int map_start = -1;
- int uninitialized_var(map_end);
+ int free_end = page_start, unmap_end = page_start;
+ struct page **pages;
+ unsigned long *populated;
unsigned int cpu;
- int i;
+ int rs, re, rc;
- for (i = page_start; i < page_end; i++) {
- if (pcpu_chunk_page_occupied(chunk, i)) {
- if (map_start >= 0) {
- if (pcpu_map(chunk, map_start, map_end))
- goto err;
- map_start = -1;
- }
- continue;
- }
+ /* quick path, check whether all pages are already there */
+ pcpu_for_each_pop_region(chunk, rs, re, page_start, page_end) {
+ if (rs == page_start && re == page_end)
+ goto clear;
+ break;
+ }
- map_start = map_start < 0 ? i : map_start;
- map_end = i + 1;
+ /* need to allocate and map pages, this chunk can't be immutable */
+ WARN_ON(chunk->immutable);
- for_each_possible_cpu(cpu) {
- struct page **pagep = pcpu_chunk_pagep(chunk, cpu, i);
+ pages = pcpu_get_pages_and_bitmap(chunk, &populated, true);
+ if (!pages)
+ return -ENOMEM;
- *pagep = alloc_pages_node(cpu_to_node(cpu),
- alloc_mask, 0);
- if (!*pagep)
- goto err;
- pcpu_set_page_chunk(*pagep, chunk);
- }
+ /* alloc and map */
+ pcpu_for_each_unpop_region(chunk, rs, re, page_start, page_end) {
+ rc = pcpu_alloc_pages(chunk, pages, populated, rs, re);
+ if (rc)
+ goto err_free;
+ free_end = re;
}
- if (map_start >= 0 && pcpu_map(chunk, map_start, map_end))
- goto err;
+ pcpu_for_each_unpop_region(chunk, rs, re, page_start, page_end) {
+ rc = pcpu_map_pages(chunk, pages, populated, rs, re);
+ if (rc)
+ goto err_unmap;
+ unmap_end = re;
+ }
+ pcpu_post_map_flush(chunk, page_start, page_end);
+ /* commit new bitmap */
+ bitmap_copy(chunk->populated, populated, pcpu_unit_pages);
+clear:
for_each_possible_cpu(cpu)
- memset(chunk->vm->addr + cpu * pcpu_unit_size + off, 0,
- size);
-
+ memset((void *)pcpu_chunk_addr(chunk, cpu, 0) + off, 0, size);
return 0;
-err:
- /* likely under heavy memory pressure, give memory back */
- pcpu_depopulate_chunk(chunk, off, size, true);
- return -ENOMEM;
+
+err_unmap:
+ pcpu_pre_unmap_flush(chunk, page_start, unmap_end);
+ pcpu_for_each_unpop_region(chunk, rs, re, page_start, unmap_end)
+ pcpu_unmap_pages(chunk, pages, populated, rs, re);
+ pcpu_post_unmap_tlb_flush(chunk, page_start, unmap_end);
+err_free:
+ pcpu_for_each_unpop_region(chunk, rs, re, page_start, free_end)
+ pcpu_free_pages(chunk, pages, populated, rs, re);
+ return rc;
}
static void free_pcpu_chunk(struct pcpu_chunk *chunk)
{
if (!chunk)
return;
- if (chunk->vm)
- free_vm_area(chunk->vm);
+ if (chunk->vms)
+ pcpu_free_vm_areas(chunk->vms, pcpu_nr_groups);
pcpu_mem_free(chunk->map, chunk->map_alloc * sizeof(chunk->map[0]));
kfree(chunk);
}
@@ -760,10 +1036,11 @@ static struct pcpu_chunk *alloc_pcpu_chunk(void)
chunk->map = pcpu_mem_alloc(PCPU_DFL_MAP_ALLOC * sizeof(chunk->map[0]));
chunk->map_alloc = PCPU_DFL_MAP_ALLOC;
chunk->map[chunk->map_used++] = pcpu_unit_size;
- chunk->page = chunk->page_ar;
- chunk->vm = get_vm_area(pcpu_chunk_size, VM_ALLOC);
- if (!chunk->vm) {
+ chunk->vms = pcpu_get_vm_areas(pcpu_group_offsets, pcpu_group_sizes,
+ pcpu_nr_groups, pcpu_atom_size,
+ GFP_KERNEL);
+ if (!chunk->vms) {
free_pcpu_chunk(chunk);
return NULL;
}
@@ -771,6 +1048,7 @@ static struct pcpu_chunk *alloc_pcpu_chunk(void)
INIT_LIST_HEAD(&chunk->list);
chunk->free_size = pcpu_unit_size;
chunk->contig_hint = pcpu_unit_size;
+ chunk->base_addr = chunk->vms[0]->addr - pcpu_group_offsets[0];
return chunk;
}
@@ -791,8 +1069,11 @@ static struct pcpu_chunk *alloc_pcpu_chunk(void)
*/
static void *pcpu_alloc(size_t size, size_t align, bool reserved)
{
+ static int warn_limit = 10;
struct pcpu_chunk *chunk;
- int slot, off;
+ const char *err;
+ int slot, off, new_alloc;
+ unsigned long flags;
if (unlikely(!size || size > PCPU_MIN_UNIT_SIZE || align > PAGE_SIZE)) {
WARN(true, "illegal size (%zu) or align (%zu) for "
@@ -801,17 +1082,31 @@ static void *pcpu_alloc(size_t size, size_t align, bool reserved)
}
mutex_lock(&pcpu_alloc_mutex);
- spin_lock_irq(&pcpu_lock);
+ spin_lock_irqsave(&pcpu_lock, flags);
/* serve reserved allocations from the reserved chunk if available */
if (reserved && pcpu_reserved_chunk) {
chunk = pcpu_reserved_chunk;
- if (size > chunk->contig_hint ||
- pcpu_extend_area_map(chunk) < 0)
+
+ if (size > chunk->contig_hint) {
+ err = "alloc from reserved chunk failed";
goto fail_unlock;
+ }
+
+ while ((new_alloc = pcpu_need_to_extend(chunk))) {
+ spin_unlock_irqrestore(&pcpu_lock, flags);
+ if (pcpu_extend_area_map(chunk, new_alloc) < 0) {
+ err = "failed to extend area map of reserved chunk";
+ goto fail_unlock_mutex;
+ }
+ spin_lock_irqsave(&pcpu_lock, flags);
+ }
+
off = pcpu_alloc_area(chunk, size, align);
if (off >= 0)
goto area_found;
+
+ err = "alloc from reserved chunk failed";
goto fail_unlock;
}
@@ -822,13 +1117,20 @@ restart:
if (size > chunk->contig_hint)
continue;
- switch (pcpu_extend_area_map(chunk)) {
- case 0:
- break;
- case 1:
- goto restart; /* pcpu_lock dropped, restart */
- default:
- goto fail_unlock;
+ new_alloc = pcpu_need_to_extend(chunk);
+ if (new_alloc) {
+ spin_unlock_irqrestore(&pcpu_lock, flags);
+ if (pcpu_extend_area_map(chunk,
+ new_alloc) < 0) {
+ err = "failed to extend area map";
+ goto fail_unlock_mutex;
+ }
+ spin_lock_irqsave(&pcpu_lock, flags);
+ /*
+ * pcpu_lock has been dropped, need to
+ * restart cpu_slot list walking.
+ */
+ goto restart;
}
off = pcpu_alloc_area(chunk, size, align);
@@ -838,34 +1140,45 @@ restart:
}
/* hmmm... no space left, create a new chunk */
- spin_unlock_irq(&pcpu_lock);
+ spin_unlock_irqrestore(&pcpu_lock, flags);
chunk = alloc_pcpu_chunk();
- if (!chunk)
+ if (!chunk) {
+ err = "failed to allocate new chunk";
goto fail_unlock_mutex;
+ }
- spin_lock_irq(&pcpu_lock);
+ spin_lock_irqsave(&pcpu_lock, flags);
pcpu_chunk_relocate(chunk, -1);
goto restart;
area_found:
- spin_unlock_irq(&pcpu_lock);
+ spin_unlock_irqrestore(&pcpu_lock, flags);
/* populate, map and clear the area */
if (pcpu_populate_chunk(chunk, off, size)) {
- spin_lock_irq(&pcpu_lock);
+ spin_lock_irqsave(&pcpu_lock, flags);
pcpu_free_area(chunk, off);
+ err = "failed to populate";
goto fail_unlock;
}
mutex_unlock(&pcpu_alloc_mutex);
- return __addr_to_pcpu_ptr(chunk->vm->addr + off);
+ /* return address relative to base address */
+ return __addr_to_pcpu_ptr(chunk->base_addr + off);
fail_unlock:
- spin_unlock_irq(&pcpu_lock);
+ spin_unlock_irqrestore(&pcpu_lock, flags);
fail_unlock_mutex:
mutex_unlock(&pcpu_alloc_mutex);
+ if (warn_limit) {
+ pr_warning("PERCPU: allocation failed, size=%zu align=%zu, "
+ "%s\n", size, align, err);
+ dump_stack();
+ if (!--warn_limit)
+ pr_info("PERCPU: limit reached, disable warning\n");
+ }
return NULL;
}
@@ -938,12 +1251,13 @@ static void pcpu_reclaim(struct work_struct *work)
}
spin_unlock_irq(&pcpu_lock);
- mutex_unlock(&pcpu_alloc_mutex);
list_for_each_entry_safe(chunk, next, &todo, list) {
- pcpu_depopulate_chunk(chunk, 0, pcpu_unit_size, false);
+ pcpu_depopulate_chunk(chunk, 0, pcpu_unit_size);
free_pcpu_chunk(chunk);
}
+
+ mutex_unlock(&pcpu_alloc_mutex);
}
/**
@@ -957,7 +1271,7 @@ static void pcpu_reclaim(struct work_struct *work)
*/
void free_percpu(void *ptr)
{
- void *addr = __pcpu_ptr_to_addr(ptr);
+ void *addr;
struct pcpu_chunk *chunk;
unsigned long flags;
int off;
@@ -965,10 +1279,12 @@ void free_percpu(void *ptr)
if (!ptr)
return;
+ addr = __pcpu_ptr_to_addr(ptr);
+
spin_lock_irqsave(&pcpu_lock, flags);
chunk = pcpu_chunk_addr_search(addr);
- off = addr - chunk->vm->addr;
+ off = addr - chunk->base_addr;
pcpu_free_area(chunk, off);
@@ -988,29 +1304,319 @@ void free_percpu(void *ptr)
EXPORT_SYMBOL_GPL(free_percpu);
/**
- * pcpu_setup_first_chunk - initialize the first percpu chunk
- * @get_page_fn: callback to fetch page pointer
- * @static_size: the size of static percpu area in bytes
+ * per_cpu_ptr_to_phys - convert translated percpu address to physical address
+ * @addr: the address to be converted to physical address
+ *
+ * Given @addr which is dereferenceable address obtained via one of
+ * percpu access macros, this function translates it into its physical
+ * address. The caller is responsible for ensuring @addr stays valid
+ * until this function finishes.
+ *
+ * RETURNS:
+ * The physical address for @addr.
+ */
+phys_addr_t per_cpu_ptr_to_phys(void *addr)
+{
+ if ((unsigned long)addr < VMALLOC_START ||
+ (unsigned long)addr >= VMALLOC_END)
+ return __pa(addr);
+ else
+ return page_to_phys(vmalloc_to_page(addr));
+}
+
+static inline size_t pcpu_calc_fc_sizes(size_t static_size,
+ size_t reserved_size,
+ ssize_t *dyn_sizep)
+{
+ size_t size_sum;
+
+ size_sum = PFN_ALIGN(static_size + reserved_size +
+ (*dyn_sizep >= 0 ? *dyn_sizep : 0));
+ if (*dyn_sizep != 0)
+ *dyn_sizep = size_sum - static_size - reserved_size;
+
+ return size_sum;
+}
+
+/**
+ * pcpu_alloc_alloc_info - allocate percpu allocation info
+ * @nr_groups: the number of groups
+ * @nr_units: the number of units
+ *
+ * Allocate ai which is large enough for @nr_groups groups containing
+ * @nr_units units. The returned ai's groups[0].cpu_map points to the
+ * cpu_map array which is long enough for @nr_units and filled with
+ * NR_CPUS. It's the caller's responsibility to initialize cpu_map
+ * pointer of other groups.
+ *
+ * RETURNS:
+ * Pointer to the allocated pcpu_alloc_info on success, NULL on
+ * failure.
+ */
+struct pcpu_alloc_info * __init pcpu_alloc_alloc_info(int nr_groups,
+ int nr_units)
+{
+ struct pcpu_alloc_info *ai;
+ size_t base_size, ai_size;
+ void *ptr;
+ int unit;
+
+ base_size = ALIGN(sizeof(*ai) + nr_groups * sizeof(ai->groups[0]),
+ __alignof__(ai->groups[0].cpu_map[0]));
+ ai_size = base_size + nr_units * sizeof(ai->groups[0].cpu_map[0]);
+
+ ptr = alloc_bootmem_nopanic(PFN_ALIGN(ai_size));
+ if (!ptr)
+ return NULL;
+ ai = ptr;
+ ptr += base_size;
+
+ ai->groups[0].cpu_map = ptr;
+
+ for (unit = 0; unit < nr_units; unit++)
+ ai->groups[0].cpu_map[unit] = NR_CPUS;
+
+ ai->nr_groups = nr_groups;
+ ai->__ai_size = PFN_ALIGN(ai_size);
+
+ return ai;
+}
+
+/**
+ * pcpu_free_alloc_info - free percpu allocation info
+ * @ai: pcpu_alloc_info to free
+ *
+ * Free @ai which was allocated by pcpu_alloc_alloc_info().
+ */
+void __init pcpu_free_alloc_info(struct pcpu_alloc_info *ai)
+{
+ free_bootmem(__pa(ai), ai->__ai_size);
+}
+
+/**
+ * pcpu_build_alloc_info - build alloc_info considering distances between CPUs
* @reserved_size: the size of reserved percpu area in bytes
* @dyn_size: free size for dynamic allocation in bytes, -1 for auto
- * @unit_size: unit size in bytes, must be multiple of PAGE_SIZE, -1 for auto
- * @base_addr: mapped address, NULL for auto
- * @populate_pte_fn: callback to allocate pagetable, NULL if unnecessary
+ * @atom_size: allocation atom size
+ * @cpu_distance_fn: callback to determine distance between cpus, optional
+ *
+ * This function determines grouping of units, their mappings to cpus
+ * and other parameters considering needed percpu size, allocation
+ * atom size and distances between CPUs.
+ *
+ * Groups are always mutliples of atom size and CPUs which are of
+ * LOCAL_DISTANCE both ways are grouped together and share space for
+ * units in the same group. The returned configuration is guaranteed
+ * to have CPUs on different nodes on different groups and >=75% usage
+ * of allocated virtual address space.
+ *
+ * RETURNS:
+ * On success, pointer to the new allocation_info is returned. On
+ * failure, ERR_PTR value is returned.
+ */
+struct pcpu_alloc_info * __init pcpu_build_alloc_info(
+ size_t reserved_size, ssize_t dyn_size,
+ size_t atom_size,
+ pcpu_fc_cpu_distance_fn_t cpu_distance_fn)
+{
+ static int group_map[NR_CPUS] __initdata;
+ static int group_cnt[NR_CPUS] __initdata;
+ const size_t static_size = __per_cpu_end - __per_cpu_start;
+ int group_cnt_max = 0, nr_groups = 1, nr_units = 0;
+ size_t size_sum, min_unit_size, alloc_size;
+ int upa, max_upa, uninitialized_var(best_upa); /* units_per_alloc */
+ int last_allocs, group, unit;
+ unsigned int cpu, tcpu;
+ struct pcpu_alloc_info *ai;
+ unsigned int *cpu_map;
+
+ /* this function may be called multiple times */
+ memset(group_map, 0, sizeof(group_map));
+ memset(group_cnt, 0, sizeof(group_map));
+
+ /*
+ * Determine min_unit_size, alloc_size and max_upa such that
+ * alloc_size is multiple of atom_size and is the smallest
+ * which can accomodate 4k aligned segments which are equal to
+ * or larger than min_unit_size.
+ */
+ size_sum = pcpu_calc_fc_sizes(static_size, reserved_size, &dyn_size);
+ min_unit_size = max_t(size_t, size_sum, PCPU_MIN_UNIT_SIZE);
+
+ alloc_size = roundup(min_unit_size, atom_size);
+ upa = alloc_size / min_unit_size;
+ while (alloc_size % upa || ((alloc_size / upa) & ~PAGE_MASK))
+ upa--;
+ max_upa = upa;
+
+ /* group cpus according to their proximity */
+ for_each_possible_cpu(cpu) {
+ group = 0;
+ next_group:
+ for_each_possible_cpu(tcpu) {
+ if (cpu == tcpu)
+ break;
+ if (group_map[tcpu] == group && cpu_distance_fn &&
+ (cpu_distance_fn(cpu, tcpu) > LOCAL_DISTANCE ||
+ cpu_distance_fn(tcpu, cpu) > LOCAL_DISTANCE)) {
+ group++;
+ nr_groups = max(nr_groups, group + 1);
+ goto next_group;
+ }
+ }
+ group_map[cpu] = group;
+ group_cnt[group]++;
+ group_cnt_max = max(group_cnt_max, group_cnt[group]);
+ }
+
+ /*
+ * Expand unit size until address space usage goes over 75%
+ * and then as much as possible without using more address
+ * space.
+ */
+ last_allocs = INT_MAX;
+ for (upa = max_upa; upa; upa--) {
+ int allocs = 0, wasted = 0;
+
+ if (alloc_size % upa || ((alloc_size / upa) & ~PAGE_MASK))
+ continue;
+
+ for (group = 0; group < nr_groups; group++) {
+ int this_allocs = DIV_ROUND_UP(group_cnt[group], upa);
+ allocs += this_allocs;
+ wasted += this_allocs * upa - group_cnt[group];
+ }
+
+ /*
+ * Don't accept if wastage is over 25%. The
+ * greater-than comparison ensures upa==1 always
+ * passes the following check.
+ */
+ if (wasted > num_possible_cpus() / 3)
+ continue;
+
+ /* and then don't consume more memory */
+ if (allocs > last_allocs)
+ break;
+ last_allocs = allocs;
+ best_upa = upa;
+ }
+ upa = best_upa;
+
+ /* allocate and fill alloc_info */
+ for (group = 0; group < nr_groups; group++)
+ nr_units += roundup(group_cnt[group], upa);
+
+ ai = pcpu_alloc_alloc_info(nr_groups, nr_units);
+ if (!ai)
+ return ERR_PTR(-ENOMEM);
+ cpu_map = ai->groups[0].cpu_map;
+
+ for (group = 0; group < nr_groups; group++) {
+ ai->groups[group].cpu_map = cpu_map;
+ cpu_map += roundup(group_cnt[group], upa);
+ }
+
+ ai->static_size = static_size;
+ ai->reserved_size = reserved_size;
+ ai->dyn_size = dyn_size;
+ ai->unit_size = alloc_size / upa;
+ ai->atom_size = atom_size;
+ ai->alloc_size = alloc_size;
+
+ for (group = 0, unit = 0; group_cnt[group]; group++) {
+ struct pcpu_group_info *gi = &ai->groups[group];
+
+ /*
+ * Initialize base_offset as if all groups are located
+ * back-to-back. The caller should update this to
+ * reflect actual allocation.
+ */
+ gi->base_offset = unit * ai->unit_size;
+
+ for_each_possible_cpu(cpu)
+ if (group_map[cpu] == group)
+ gi->cpu_map[gi->nr_units++] = cpu;
+ gi->nr_units = roundup(gi->nr_units, upa);
+ unit += gi->nr_units;
+ }
+ BUG_ON(unit != nr_units);
+
+ return ai;
+}
+
+/**
+ * pcpu_dump_alloc_info - print out information about pcpu_alloc_info
+ * @lvl: loglevel
+ * @ai: allocation info to dump
+ *
+ * Print out information about @ai using loglevel @lvl.
+ */
+static void pcpu_dump_alloc_info(const char *lvl,
+ const struct pcpu_alloc_info *ai)
+{
+ int group_width = 1, cpu_width = 1, width;
+ char empty_str[] = "--------";
+ int alloc = 0, alloc_end = 0;
+ int group, v;
+ int upa, apl; /* units per alloc, allocs per line */
+
+ v = ai->nr_groups;
+ while (v /= 10)
+ group_width++;
+
+ v = num_possible_cpus();
+ while (v /= 10)
+ cpu_width++;
+ empty_str[min_t(int, cpu_width, sizeof(empty_str) - 1)] = '\0';
+
+ upa = ai->alloc_size / ai->unit_size;
+ width = upa * (cpu_width + 1) + group_width + 3;
+ apl = rounddown_pow_of_two(max(60 / width, 1));
+
+ printk("%spcpu-alloc: s%zu r%zu d%zu u%zu alloc=%zu*%zu",
+ lvl, ai->static_size, ai->reserved_size, ai->dyn_size,
+ ai->unit_size, ai->alloc_size / ai->atom_size, ai->atom_size);
+
+ for (group = 0; group < ai->nr_groups; group++) {
+ const struct pcpu_group_info *gi = &ai->groups[group];
+ int unit = 0, unit_end = 0;
+
+ BUG_ON(gi->nr_units % upa);
+ for (alloc_end += gi->nr_units / upa;
+ alloc < alloc_end; alloc++) {
+ if (!(alloc % apl)) {
+ printk("\n");
+ printk("%spcpu-alloc: ", lvl);
+ }
+ printk("[%0*d] ", group_width, group);
+
+ for (unit_end += upa; unit < unit_end; unit++)
+ if (gi->cpu_map[unit] != NR_CPUS)
+ printk("%0*d ", cpu_width,
+ gi->cpu_map[unit]);
+ else
+ printk("%s ", empty_str);
+ }
+ }
+ printk("\n");
+}
+
+/**
+ * pcpu_setup_first_chunk - initialize the first percpu chunk
+ * @ai: pcpu_alloc_info describing how to percpu area is shaped
+ * @base_addr: mapped address
*
* Initialize the first percpu chunk which contains the kernel static
* perpcu area. This function is to be called from arch percpu area
- * setup path. The first two parameters are mandatory. The rest are
- * optional.
- *
- * @get_page_fn() should return pointer to percpu page given cpu
- * number and page number. It should at least return enough pages to
- * cover the static area. The returned pages for static area should
- * have been initialized with valid data. If @unit_size is specified,
- * it can also return pages after the static area. NULL return
- * indicates end of pages for the cpu. Note that @get_page_fn() must
- * return the same number of pages for all cpus.
- *
- * @reserved_size, if non-zero, specifies the amount of bytes to
+ * setup path.
+ *
+ * @ai contains all information necessary to initialize the first
+ * chunk and prime the dynamic percpu allocator.
+ *
+ * @ai->static_size is the size of static percpu area.
+ *
+ * @ai->reserved_size, if non-zero, specifies the amount of bytes to
* reserve after the static area in the first chunk. This reserves
* the first chunk such that it's available only through reserved
* percpu allocation. This is primarily used to serve module percpu
@@ -1018,22 +1624,29 @@ EXPORT_SYMBOL_GPL(free_percpu);
* limited offset range for symbol relocations to guarantee module
* percpu symbols fall inside the relocatable range.
*
- * @dyn_size, if non-negative, determines the number of bytes
- * available for dynamic allocation in the first chunk. Specifying
- * non-negative value makes percpu leave alone the area beyond
- * @static_size + @reserved_size + @dyn_size.
+ * @ai->dyn_size determines the number of bytes available for dynamic
+ * allocation in the first chunk. The area between @ai->static_size +
+ * @ai->reserved_size + @ai->dyn_size and @ai->unit_size is unused.
+ *
+ * @ai->unit_size specifies unit size and must be aligned to PAGE_SIZE
+ * and equal to or larger than @ai->static_size + @ai->reserved_size +
+ * @ai->dyn_size.
*
- * @unit_size, if non-negative, specifies unit size and must be
- * aligned to PAGE_SIZE and equal to or larger than @static_size +
- * @reserved_size + if non-negative, @dyn_size.
+ * @ai->atom_size is the allocation atom size and used as alignment
+ * for vm areas.
*
- * Non-null @base_addr means that the caller already allocated virtual
- * region for the first chunk and mapped it. percpu must not mess
- * with the chunk. Note that @base_addr with 0 @unit_size or non-NULL
- * @populate_pte_fn doesn't make any sense.
+ * @ai->alloc_size is the allocation size and always multiple of
+ * @ai->atom_size. This is larger than @ai->atom_size if
+ * @ai->unit_size is larger than @ai->atom_size.
*
- * @populate_pte_fn is used to populate the pagetable. NULL means the
- * caller already populated the pagetable.
+ * @ai->nr_groups and @ai->groups describe virtual memory layout of
+ * percpu areas. Units which should be colocated are put into the
+ * same group. Dynamic VM areas will be allocated according to these
+ * groupings. If @ai->nr_groups is zero, a single group containing
+ * all units is assumed.
+ *
+ * The caller should have mapped the first chunk at @base_addr and
+ * copied static data to each unit.
*
* If the first chunk ends up with both reserved and dynamic areas, it
* is served by two chunks - one to serve the core static and reserved
@@ -1043,49 +1656,98 @@ EXPORT_SYMBOL_GPL(free_percpu);
* and available for dynamic allocation like any other chunks.
*
* RETURNS:
- * The determined pcpu_unit_size which can be used to initialize
- * percpu access.
+ * 0 on success, -errno on failure.
*/
-size_t __init pcpu_setup_first_chunk(pcpu_get_page_fn_t get_page_fn,
- size_t static_size, size_t reserved_size,
- ssize_t dyn_size, ssize_t unit_size,
- void *base_addr,
- pcpu_populate_pte_fn_t populate_pte_fn)
+int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
+ void *base_addr)
{
- static struct vm_struct first_vm;
+ static char cpus_buf[4096] __initdata;
static int smap[2], dmap[2];
- size_t size_sum = static_size + reserved_size +
- (dyn_size >= 0 ? dyn_size : 0);
+ size_t dyn_size = ai->dyn_size;
+ size_t size_sum = ai->static_size + ai->reserved_size + dyn_size;
struct pcpu_chunk *schunk, *dchunk = NULL;
+ unsigned long *group_offsets;
+ size_t *group_sizes;
+ unsigned long *unit_off;
unsigned int cpu;
- int nr_pages;
- int err, i;
+ int *unit_map;
+ int group, unit, i;
+
+ cpumask_scnprintf(cpus_buf, sizeof(cpus_buf), cpu_possible_mask);
+
+#define PCPU_SETUP_BUG_ON(cond) do { \
+ if (unlikely(cond)) { \
+ pr_emerg("PERCPU: failed to initialize, %s", #cond); \
+ pr_emerg("PERCPU: cpu_possible_mask=%s\n", cpus_buf); \
+ pcpu_dump_alloc_info(KERN_EMERG, ai); \
+ BUG(); \
+ } \
+} while (0)
- /* santiy checks */
+ /* sanity checks */
BUILD_BUG_ON(ARRAY_SIZE(smap) >= PCPU_DFL_MAP_ALLOC ||
ARRAY_SIZE(dmap) >= PCPU_DFL_MAP_ALLOC);
- BUG_ON(!static_size);
- if (unit_size >= 0) {
- BUG_ON(unit_size < size_sum);
- BUG_ON(unit_size & ~PAGE_MASK);
- BUG_ON(unit_size < PCPU_MIN_UNIT_SIZE);
- } else
- BUG_ON(base_addr);
- BUG_ON(base_addr && populate_pte_fn);
-
- if (unit_size >= 0)
- pcpu_unit_pages = unit_size >> PAGE_SHIFT;
- else
- pcpu_unit_pages = max_t(int, PCPU_MIN_UNIT_SIZE >> PAGE_SHIFT,
- PFN_UP(size_sum));
+ PCPU_SETUP_BUG_ON(ai->nr_groups <= 0);
+ PCPU_SETUP_BUG_ON(!ai->static_size);
+ PCPU_SETUP_BUG_ON(!base_addr);
+ PCPU_SETUP_BUG_ON(ai->unit_size < size_sum);
+ PCPU_SETUP_BUG_ON(ai->unit_size & ~PAGE_MASK);
+ PCPU_SETUP_BUG_ON(ai->unit_size < PCPU_MIN_UNIT_SIZE);
+
+ /* process group information and build config tables accordingly */
+ group_offsets = alloc_bootmem(ai->nr_groups * sizeof(group_offsets[0]));
+ group_sizes = alloc_bootmem(ai->nr_groups * sizeof(group_sizes[0]));
+ unit_map = alloc_bootmem(nr_cpu_ids * sizeof(unit_map[0]));
+ unit_off = alloc_bootmem(nr_cpu_ids * sizeof(unit_off[0]));
+
+ for (cpu = 0; cpu < nr_cpu_ids; cpu++)
+ unit_map[cpu] = UINT_MAX;
+ pcpu_first_unit_cpu = NR_CPUS;
+
+ for (group = 0, unit = 0; group < ai->nr_groups; group++, unit += i) {
+ const struct pcpu_group_info *gi = &ai->groups[group];
+
+ group_offsets[group] = gi->base_offset;
+ group_sizes[group] = gi->nr_units * ai->unit_size;
+
+ for (i = 0; i < gi->nr_units; i++) {
+ cpu = gi->cpu_map[i];
+ if (cpu == NR_CPUS)
+ continue;
- pcpu_unit_size = pcpu_unit_pages << PAGE_SHIFT;
- pcpu_chunk_size = nr_cpu_ids * pcpu_unit_size;
- pcpu_chunk_struct_size = sizeof(struct pcpu_chunk)
- + nr_cpu_ids * pcpu_unit_pages * sizeof(struct page *);
+ PCPU_SETUP_BUG_ON(cpu > nr_cpu_ids);
+ PCPU_SETUP_BUG_ON(!cpu_possible(cpu));
+ PCPU_SETUP_BUG_ON(unit_map[cpu] != UINT_MAX);
- if (dyn_size < 0)
- dyn_size = pcpu_unit_size - static_size - reserved_size;
+ unit_map[cpu] = unit + i;
+ unit_off[cpu] = gi->base_offset + i * ai->unit_size;
+
+ if (pcpu_first_unit_cpu == NR_CPUS)
+ pcpu_first_unit_cpu = cpu;
+ }
+ }
+ pcpu_last_unit_cpu = cpu;
+ pcpu_nr_units = unit;
+
+ for_each_possible_cpu(cpu)
+ PCPU_SETUP_BUG_ON(unit_map[cpu] == UINT_MAX);
+
+ /* we're done parsing the input, undefine BUG macro and dump config */
+#undef PCPU_SETUP_BUG_ON
+ pcpu_dump_alloc_info(KERN_INFO, ai);
+
+ pcpu_nr_groups = ai->nr_groups;
+ pcpu_group_offsets = group_offsets;
+ pcpu_group_sizes = group_sizes;
+ pcpu_unit_map = unit_map;
+ pcpu_unit_offsets = unit_off;
+
+ /* determine basic parameters */
+ pcpu_unit_pages = ai->unit_size >> PAGE_SHIFT;
+ pcpu_unit_size = pcpu_unit_pages << PAGE_SHIFT;
+ pcpu_atom_size = ai->atom_size;
+ pcpu_chunk_struct_size = sizeof(struct pcpu_chunk) +
+ BITS_TO_LONGS(pcpu_unit_pages) * sizeof(unsigned long);
/*
* Allocate chunk slots. The additional last slot is for
@@ -1105,189 +1767,368 @@ size_t __init pcpu_setup_first_chunk(pcpu_get_page_fn_t get_page_fn,
*/
schunk = alloc_bootmem(pcpu_chunk_struct_size);
INIT_LIST_HEAD(&schunk->list);
- schunk->vm = &first_vm;
+ schunk->base_addr = base_addr;
schunk->map = smap;
schunk->map_alloc = ARRAY_SIZE(smap);
- schunk->page = schunk->page_ar;
+ schunk->immutable = true;
+ bitmap_fill(schunk->populated, pcpu_unit_pages);
- if (reserved_size) {
- schunk->free_size = reserved_size;
+ if (ai->reserved_size) {
+ schunk->free_size = ai->reserved_size;
pcpu_reserved_chunk = schunk;
- pcpu_reserved_chunk_limit = static_size + reserved_size;
+ pcpu_reserved_chunk_limit = ai->static_size + ai->reserved_size;
} else {
schunk->free_size = dyn_size;
dyn_size = 0; /* dynamic area covered */
}
schunk->contig_hint = schunk->free_size;
- schunk->map[schunk->map_used++] = -static_size;
+ schunk->map[schunk->map_used++] = -ai->static_size;
if (schunk->free_size)
schunk->map[schunk->map_used++] = schunk->free_size;
/* init dynamic chunk if necessary */
if (dyn_size) {
- dchunk = alloc_bootmem(sizeof(struct pcpu_chunk));
+ dchunk = alloc_bootmem(pcpu_chunk_struct_size);
INIT_LIST_HEAD(&dchunk->list);
- dchunk->vm = &first_vm;
+ dchunk->base_addr = base_addr;
dchunk->map = dmap;
dchunk->map_alloc = ARRAY_SIZE(dmap);
- dchunk->page = schunk->page_ar; /* share page map with schunk */
+ dchunk->immutable = true;
+ bitmap_fill(dchunk->populated, pcpu_unit_pages);
dchunk->contig_hint = dchunk->free_size = dyn_size;
dchunk->map[dchunk->map_used++] = -pcpu_reserved_chunk_limit;
dchunk->map[dchunk->map_used++] = dchunk->free_size;
}
- /* allocate vm address */
- first_vm.flags = VM_ALLOC;
- first_vm.size = pcpu_chunk_size;
-
- if (!base_addr)
- vm_area_register_early(&first_vm, PAGE_SIZE);
- else {
- /*
- * Pages already mapped. No need to remap into
- * vmalloc area. In this case the first chunks can't
- * be mapped or unmapped by percpu and are marked
- * immutable.
- */
- first_vm.addr = base_addr;
- schunk->immutable = true;
- if (dchunk)
- dchunk->immutable = true;
- }
-
- /* assign pages */
- nr_pages = -1;
- for_each_possible_cpu(cpu) {
- for (i = 0; i < pcpu_unit_pages; i++) {
- struct page *page = get_page_fn(cpu, i);
-
- if (!page)
- break;
- *pcpu_chunk_pagep(schunk, cpu, i) = page;
- }
-
- BUG_ON(i < PFN_UP(static_size));
-
- if (nr_pages < 0)
- nr_pages = i;
- else
- BUG_ON(nr_pages != i);
- }
-
- /* map them */
- if (populate_pte_fn) {
- for_each_possible_cpu(cpu)
- for (i = 0; i < nr_pages; i++)
- populate_pte_fn(pcpu_chunk_addr(schunk,
- cpu, i));
-
- err = pcpu_map(schunk, 0, nr_pages);
- if (err)
- panic("failed to setup static percpu area, err=%d\n",
- err);
- }
-
/* link the first chunk in */
pcpu_first_chunk = dchunk ?: schunk;
pcpu_chunk_relocate(pcpu_first_chunk, -1);
/* we're done */
- pcpu_base_addr = (void *)pcpu_chunk_addr(schunk, 0, 0);
- return pcpu_unit_size;
+ pcpu_base_addr = base_addr;
+ return 0;
}
-/*
- * Embedding first chunk setup helper.
- */
-static void *pcpue_ptr __initdata;
-static size_t pcpue_size __initdata;
-static size_t pcpue_unit_size __initdata;
+const char *pcpu_fc_names[PCPU_FC_NR] __initdata = {
+ [PCPU_FC_AUTO] = "auto",
+ [PCPU_FC_EMBED] = "embed",
+ [PCPU_FC_PAGE] = "page",
+};
-static struct page * __init pcpue_get_page(unsigned int cpu, int pageno)
-{
- size_t off = (size_t)pageno << PAGE_SHIFT;
+enum pcpu_fc pcpu_chosen_fc __initdata = PCPU_FC_AUTO;
- if (off >= pcpue_size)
- return NULL;
+static int __init percpu_alloc_setup(char *str)
+{
+ if (0)
+ /* nada */;
+#ifdef CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK
+ else if (!strcmp(str, "embed"))
+ pcpu_chosen_fc = PCPU_FC_EMBED;
+#endif
+#ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK
+ else if (!strcmp(str, "page"))
+ pcpu_chosen_fc = PCPU_FC_PAGE;
+#endif
+ else
+ pr_warning("PERCPU: unknown allocator %s specified\n", str);
- return virt_to_page(pcpue_ptr + cpu * pcpue_unit_size + off);
+ return 0;
}
+early_param("percpu_alloc", percpu_alloc_setup);
+#if defined(CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK) || \
+ !defined(CONFIG_HAVE_SETUP_PER_CPU_AREA)
/**
* pcpu_embed_first_chunk - embed the first percpu chunk into bootmem
- * @static_size: the size of static percpu area in bytes
* @reserved_size: the size of reserved percpu area in bytes
* @dyn_size: free size for dynamic allocation in bytes, -1 for auto
- * @unit_size: unit size in bytes, must be multiple of PAGE_SIZE, -1 for auto
+ * @atom_size: allocation atom size
+ * @cpu_distance_fn: callback to determine distance between cpus, optional
+ * @alloc_fn: function to allocate percpu page
+ * @free_fn: funtion to free percpu page
*
* This is a helper to ease setting up embedded first percpu chunk and
* can be called where pcpu_setup_first_chunk() is expected.
*
* If this function is used to setup the first chunk, it is allocated
- * as a contiguous area using bootmem allocator and used as-is without
- * being mapped into vmalloc area. This enables the first chunk to
- * piggy back on the linear physical mapping which often uses larger
- * page size.
+ * by calling @alloc_fn and used as-is without being mapped into
+ * vmalloc area. Allocations are always whole multiples of @atom_size
+ * aligned to @atom_size.
+ *
+ * This enables the first chunk to piggy back on the linear physical
+ * mapping which often uses larger page size. Please note that this
+ * can result in very sparse cpu->unit mapping on NUMA machines thus
+ * requiring large vmalloc address space. Don't use this allocator if
+ * vmalloc space is not orders of magnitude larger than distances
+ * between node memory addresses (ie. 32bit NUMA machines).
*
* When @dyn_size is positive, dynamic area might be larger than
- * specified to fill page alignment. Also, when @dyn_size is auto,
- * @dyn_size does not fill the whole first chunk but only what's
- * necessary for page alignment after static and reserved areas.
+ * specified to fill page alignment. When @dyn_size is auto,
+ * @dyn_size is just big enough to fill page alignment after static
+ * and reserved areas.
*
* If the needed size is smaller than the minimum or specified unit
- * size, the leftover is returned to the bootmem allocator.
+ * size, the leftover is returned using @free_fn.
*
* RETURNS:
- * The determined pcpu_unit_size which can be used to initialize
- * percpu access on success, -errno on failure.
+ * 0 on success, -errno on failure.
*/
-ssize_t __init pcpu_embed_first_chunk(size_t static_size, size_t reserved_size,
- ssize_t dyn_size, ssize_t unit_size)
+int __init pcpu_embed_first_chunk(size_t reserved_size, ssize_t dyn_size,
+ size_t atom_size,
+ pcpu_fc_cpu_distance_fn_t cpu_distance_fn,
+ pcpu_fc_alloc_fn_t alloc_fn,
+ pcpu_fc_free_fn_t free_fn)
{
- size_t chunk_size;
- unsigned int cpu;
+ void *base = (void *)ULONG_MAX;
+ void **areas = NULL;
+ struct pcpu_alloc_info *ai;
+ size_t size_sum, areas_size, max_distance;
+ int group, i, rc;
+
+ ai = pcpu_build_alloc_info(reserved_size, dyn_size, atom_size,
+ cpu_distance_fn);
+ if (IS_ERR(ai))
+ return PTR_ERR(ai);
+
+ size_sum = ai->static_size + ai->reserved_size + ai->dyn_size;
+ areas_size = PFN_ALIGN(ai->nr_groups * sizeof(void *));
+
+ areas = alloc_bootmem_nopanic(areas_size);
+ if (!areas) {
+ rc = -ENOMEM;
+ goto out_free;
+ }
- /* determine parameters and allocate */
- pcpue_size = PFN_ALIGN(static_size + reserved_size +
- (dyn_size >= 0 ? dyn_size : 0));
- if (dyn_size != 0)
- dyn_size = pcpue_size - static_size - reserved_size;
-
- if (unit_size >= 0) {
- BUG_ON(unit_size < pcpue_size);
- pcpue_unit_size = unit_size;
- } else
- pcpue_unit_size = max_t(size_t, pcpue_size, PCPU_MIN_UNIT_SIZE);
-
- chunk_size = pcpue_unit_size * nr_cpu_ids;
-
- pcpue_ptr = __alloc_bootmem_nopanic(chunk_size, PAGE_SIZE,
- __pa(MAX_DMA_ADDRESS));
- if (!pcpue_ptr) {
- pr_warning("PERCPU: failed to allocate %zu bytes for "
- "embedding\n", chunk_size);
- return -ENOMEM;
+ /* allocate, copy and determine base address */
+ for (group = 0; group < ai->nr_groups; group++) {
+ struct pcpu_group_info *gi = &ai->groups[group];
+ unsigned int cpu = NR_CPUS;
+ void *ptr;
+
+ for (i = 0; i < gi->nr_units && cpu == NR_CPUS; i++)
+ cpu = gi->cpu_map[i];
+ BUG_ON(cpu == NR_CPUS);
+
+ /* allocate space for the whole group */
+ ptr = alloc_fn(cpu, gi->nr_units * ai->unit_size, atom_size);
+ if (!ptr) {
+ rc = -ENOMEM;
+ goto out_free_areas;
+ }
+ areas[group] = ptr;
+
+ base = min(ptr, base);
+
+ for (i = 0; i < gi->nr_units; i++, ptr += ai->unit_size) {
+ if (gi->cpu_map[i] == NR_CPUS) {
+ /* unused unit, free whole */
+ free_fn(ptr, ai->unit_size);
+ continue;
+ }
+ /* copy and return the unused part */
+ memcpy(ptr, __per_cpu_load, ai->static_size);
+ free_fn(ptr + size_sum, ai->unit_size - size_sum);
+ }
}
- /* return the leftover and copy */
- for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
- void *ptr = pcpue_ptr + cpu * pcpue_unit_size;
+ /* base address is now known, determine group base offsets */
+ max_distance = 0;
+ for (group = 0; group < ai->nr_groups; group++) {
+ ai->groups[group].base_offset = areas[group] - base;
+ max_distance = max_t(size_t, max_distance,
+ ai->groups[group].base_offset);
+ }
+ max_distance += ai->unit_size;
+
+ /* warn if maximum distance is further than 75% of vmalloc space */
+ if (max_distance > (VMALLOC_END - VMALLOC_START) * 3 / 4) {
+ pr_warning("PERCPU: max_distance=0x%zx too large for vmalloc "
+ "space 0x%lx\n",
+ max_distance, VMALLOC_END - VMALLOC_START);
+#ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK
+ /* and fail if we have fallback */
+ rc = -EINVAL;
+ goto out_free;
+#endif
+ }
+
+ pr_info("PERCPU: Embedded %zu pages/cpu @%p s%zu r%zu d%zu u%zu\n",
+ PFN_DOWN(size_sum), base, ai->static_size, ai->reserved_size,
+ ai->dyn_size, ai->unit_size);
+
+ rc = pcpu_setup_first_chunk(ai, base);
+ goto out_free;
+
+out_free_areas:
+ for (group = 0; group < ai->nr_groups; group++)
+ free_fn(areas[group],
+ ai->groups[group].nr_units * ai->unit_size);
+out_free:
+ pcpu_free_alloc_info(ai);
+ if (areas)
+ free_bootmem(__pa(areas), areas_size);
+ return rc;
+}
+#endif /* CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK ||
+ !CONFIG_HAVE_SETUP_PER_CPU_AREA */
+
+#ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK
+/**
+ * pcpu_page_first_chunk - map the first chunk using PAGE_SIZE pages
+ * @reserved_size: the size of reserved percpu area in bytes
+ * @alloc_fn: function to allocate percpu page, always called with PAGE_SIZE
+ * @free_fn: funtion to free percpu page, always called with PAGE_SIZE
+ * @populate_pte_fn: function to populate pte
+ *
+ * This is a helper to ease setting up page-remapped first percpu
+ * chunk and can be called where pcpu_setup_first_chunk() is expected.
+ *
+ * This is the basic allocator. Static percpu area is allocated
+ * page-by-page into vmalloc area.
+ *
+ * RETURNS:
+ * 0 on success, -errno on failure.
+ */
+int __init pcpu_page_first_chunk(size_t reserved_size,
+ pcpu_fc_alloc_fn_t alloc_fn,
+ pcpu_fc_free_fn_t free_fn,
+ pcpu_fc_populate_pte_fn_t populate_pte_fn)
+{
+ static struct vm_struct vm;
+ struct pcpu_alloc_info *ai;
+ char psize_str[16];
+ int unit_pages;
+ size_t pages_size;
+ struct page **pages;
+ int unit, i, j, rc;
+
+ snprintf(psize_str, sizeof(psize_str), "%luK", PAGE_SIZE >> 10);
+
+ ai = pcpu_build_alloc_info(reserved_size, -1, PAGE_SIZE, NULL);
+ if (IS_ERR(ai))
+ return PTR_ERR(ai);
+ BUG_ON(ai->nr_groups != 1);
+ BUG_ON(ai->groups[0].nr_units != num_possible_cpus());
+
+ unit_pages = ai->unit_size >> PAGE_SHIFT;
+
+ /* unaligned allocations can't be freed, round up to page size */
+ pages_size = PFN_ALIGN(unit_pages * num_possible_cpus() *
+ sizeof(pages[0]));
+ pages = alloc_bootmem(pages_size);
+
+ /* allocate pages */
+ j = 0;
+ for (unit = 0; unit < num_possible_cpus(); unit++)
+ for (i = 0; i < unit_pages; i++) {
+ unsigned int cpu = ai->groups[0].cpu_map[unit];
+ void *ptr;
+
+ ptr = alloc_fn(cpu, PAGE_SIZE, PAGE_SIZE);
+ if (!ptr) {
+ pr_warning("PERCPU: failed to allocate %s page "
+ "for cpu%u\n", psize_str, cpu);
+ goto enomem;
+ }
+ pages[j++] = virt_to_page(ptr);
+ }
+
+ /* allocate vm area, map the pages and copy static data */
+ vm.flags = VM_ALLOC;
+ vm.size = num_possible_cpus() * ai->unit_size;
+ vm_area_register_early(&vm, PAGE_SIZE);
+
+ for (unit = 0; unit < num_possible_cpus(); unit++) {
+ unsigned long unit_addr =
+ (unsigned long)vm.addr + unit * ai->unit_size;
+
+ for (i = 0; i < unit_pages; i++)
+ populate_pte_fn(unit_addr + (i << PAGE_SHIFT));
- if (cpu_possible(cpu)) {
- free_bootmem(__pa(ptr + pcpue_size),
- pcpue_unit_size - pcpue_size);
- memcpy(ptr, __per_cpu_load, static_size);
- } else
- free_bootmem(__pa(ptr), pcpue_unit_size);
+ /* pte already populated, the following shouldn't fail */
+ rc = __pcpu_map_pages(unit_addr, &pages[unit * unit_pages],
+ unit_pages);
+ if (rc < 0)
+ panic("failed to map percpu area, err=%d\n", rc);
+
+ /*
+ * FIXME: Archs with virtual cache should flush local
+ * cache for the linear mapping here - something
+ * equivalent to flush_cache_vmap() on the local cpu.
+ * flush_cache_vmap() can't be used as most supporting
+ * data structures are not set up yet.
+ */
+
+ /* copy static data */
+ memcpy((void *)unit_addr, __per_cpu_load, ai->static_size);
}
/* we're ready, commit */
- pr_info("PERCPU: Embedded %zu pages at %p, static data %zu bytes\n",
- pcpue_size >> PAGE_SHIFT, pcpue_ptr, static_size);
+ pr_info("PERCPU: %d %s pages/cpu @%p s%zu r%zu d%zu\n",
+ unit_pages, psize_str, vm.addr, ai->static_size,
+ ai->reserved_size, ai->dyn_size);
+
+ rc = pcpu_setup_first_chunk(ai, vm.addr);
+ goto out_free_ar;
+
+enomem:
+ while (--j >= 0)
+ free_fn(page_address(pages[j]), PAGE_SIZE);
+ rc = -ENOMEM;
+out_free_ar:
+ free_bootmem(__pa(pages), pages_size);
+ pcpu_free_alloc_info(ai);
+ return rc;
+}
+#endif /* CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK */
+
+/*
+ * Generic percpu area setup.
+ *
+ * The embedding helper is used because its behavior closely resembles
+ * the original non-dynamic generic percpu area setup. This is
+ * important because many archs have addressing restrictions and might
+ * fail if the percpu area is located far away from the previous
+ * location. As an added bonus, in non-NUMA cases, embedding is
+ * generally a good idea TLB-wise because percpu area can piggy back
+ * on the physical linear memory mapping which uses large page
+ * mappings on applicable archs.
+ */
+#ifndef CONFIG_HAVE_SETUP_PER_CPU_AREA
+unsigned long __per_cpu_offset[NR_CPUS] __read_mostly;
+EXPORT_SYMBOL(__per_cpu_offset);
- return pcpu_setup_first_chunk(pcpue_get_page, static_size,
- reserved_size, dyn_size,
- pcpue_unit_size, pcpue_ptr, NULL);
+static void * __init pcpu_dfl_fc_alloc(unsigned int cpu, size_t size,
+ size_t align)
+{
+ return __alloc_bootmem_nopanic(size, align, __pa(MAX_DMA_ADDRESS));
+}
+
+static void __init pcpu_dfl_fc_free(void *ptr, size_t size)
+{
+ free_bootmem(__pa(ptr), size);
+}
+
+void __init setup_per_cpu_areas(void)
+{
+ unsigned long delta;
+ unsigned int cpu;
+ int rc;
+
+ /*
+ * Always reserve area for module percpu variables. That's
+ * what the legacy allocator did.
+ */
+ rc = pcpu_embed_first_chunk(PERCPU_MODULE_RESERVE,
+ PERCPU_DYNAMIC_RESERVE, PAGE_SIZE, NULL,
+ pcpu_dfl_fc_alloc, pcpu_dfl_fc_free);
+ if (rc < 0)
+ panic("Failed to initialized percpu areas.");
+
+ delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
+ for_each_possible_cpu(cpu)
+ __per_cpu_offset[cpu] = delta + pcpu_unit_offsets[cpu];
}
+#endif /* CONFIG_HAVE_SETUP_PER_CPU_AREA */
diff --git a/mm/quicklist.c b/mm/quicklist.c
index 03341b014c2b..b6fb02348693 100644
--- a/mm/quicklist.c
+++ b/mm/quicklist.c
@@ -19,7 +19,7 @@
#include <linux/module.h>
#include <linux/quicklist.h>
-DEFINE_PER_CPU_LOCKED(struct quicklist, quicklist)[CONFIG_NR_QUICK];
+DEFINE_PER_CPU_LOCKED(struct quicklist [CONFIG_NR_QUICK], quicklist);
#define FRACTION_OF_NODE_MEM 16
@@ -29,7 +29,6 @@ static unsigned long max_pages(unsigned long min_pages)
int node = numa_node_id();
struct zone *zones = NODE_DATA(node)->node_zones;
int num_cpus_on_node;
- const struct cpumask *cpumask_on_node = cpumask_of_node(node);
node_free_pages =
#ifdef CONFIG_ZONE_DMA
@@ -42,7 +41,7 @@ static unsigned long max_pages(unsigned long min_pages)
max = node_free_pages / FRACTION_OF_NODE_MEM;
- num_cpus_on_node = cpus_weight_nr(*cpumask_on_node);
+ num_cpus_on_node = cpumask_weight(cpumask_of_node(node));
max /= num_cpus_on_node;
return max(max, min_pages);
diff --git a/mm/readahead.c b/mm/readahead.c
index aa1aa2345235..033bc135a41f 100644
--- a/mm/readahead.c
+++ b/mm/readahead.c
@@ -547,5 +547,17 @@ page_cache_async_readahead(struct address_space *mapping,
/* do read-ahead */
ondemand_readahead(mapping, ra, filp, true, offset, req_size);
+
+#ifdef CONFIG_BLOCK
+ /*
+ * Normally the current page is !uptodate and lock_page() will be
+ * immediately called to implicitly unplug the device. However this
+ * is not always true for RAID conifgurations, where data arrives
+ * not strictly in their submission order. In this case we need to
+ * explicitly kick off the IO.
+ */
+ if (PageUptodate(page))
+ blk_run_backing_dev(mapping->backing_dev_info, NULL);
+#endif
}
EXPORT_SYMBOL_GPL(page_cache_async_readahead);
diff --git a/mm/rmap.c b/mm/rmap.c
index 0895b5c7cbff..278cd277bdec 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -36,6 +36,11 @@
* mapping->tree_lock (widely used, in set_page_dirty,
* in arch-dependent flush_dcache_mmap_lock,
* within inode_lock in __sync_single_inode)
+ *
+ * (code doesn't rely on that order so it could be switched around)
+ * ->tasklist_lock
+ * anon_vma->lock (memory_failure, collect_procs_anon)
+ * pte map lock
*/
#include <linux/mm.h>
@@ -44,6 +49,7 @@
#include <linux/swapops.h>
#include <linux/slab.h>
#include <linux/init.h>
+#include <linux/ksm.h>
#include <linux/rmap.h>
#include <linux/rcupdate.h>
#include <linux/module.h>
@@ -62,7 +68,7 @@ static inline struct anon_vma *anon_vma_alloc(void)
return kmem_cache_alloc(anon_vma_cachep, GFP_KERNEL);
}
-static inline void anon_vma_free(struct anon_vma *anon_vma)
+void anon_vma_free(struct anon_vma *anon_vma)
{
kmem_cache_free(anon_vma_cachep, anon_vma);
}
@@ -166,7 +172,7 @@ void anon_vma_unlink(struct vm_area_struct *vma)
list_del(&vma->anon_vma_node);
/* We must garbage collect the anon_vma if it's empty */
- empty = list_empty(&anon_vma->head);
+ empty = list_empty(&anon_vma->head) && !ksm_refcount(anon_vma);
spin_unlock(&anon_vma->lock);
if (empty)
@@ -178,6 +184,7 @@ static void anon_vma_ctor(void *data)
struct anon_vma *anon_vma = data;
spin_lock_init(&anon_vma->lock);
+ ksm_refcount_init(anon_vma);
INIT_LIST_HEAD(&anon_vma->head);
}
@@ -191,14 +198,14 @@ void __init anon_vma_init(void)
* Getting a lock on a stable anon_vma from a page off the LRU is
* tricky: page_lock_anon_vma rely on RCU to guard against the races.
*/
-static struct anon_vma *page_lock_anon_vma(struct page *page)
+struct anon_vma *page_lock_anon_vma(struct page *page)
{
struct anon_vma *anon_vma;
unsigned long anon_mapping;
rcu_read_lock();
- anon_mapping = (unsigned long) page->mapping;
- if (!(anon_mapping & PAGE_MAPPING_ANON))
+ anon_mapping = (unsigned long) ACCESS_ONCE(page->mapping);
+ if ((anon_mapping & PAGE_MAPPING_FLAGS) != PAGE_MAPPING_ANON)
goto out;
if (!page_mapped(page))
goto out;
@@ -211,7 +218,7 @@ out:
return NULL;
}
-static void page_unlock_anon_vma(struct anon_vma *anon_vma)
+void page_unlock_anon_vma(struct anon_vma *anon_vma)
{
spin_unlock(&anon_vma->lock);
rcu_read_unlock();
@@ -237,14 +244,13 @@ vma_address(struct page *page, struct vm_area_struct *vma)
}
/*
- * At what user virtual address is page expected in vma? checking that the
- * page matches the vma: currently only used on anon pages, by unuse_vma;
+ * At what user virtual address is page expected in vma?
+ * checking that the page matches the vma.
*/
unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma)
{
if (PageAnon(page)) {
- if ((void *)vma->anon_vma !=
- (void *)page->mapping - PAGE_MAPPING_ANON)
+ if (vma->anon_vma != page_anon_vma(page))
return -EFAULT;
} else if (page->mapping && !(vma->vm_flags & VM_NONLINEAR)) {
if (!vma->vm_file ||
@@ -311,7 +317,7 @@ pte_t *page_check_address(struct page *page, struct mm_struct *mm,
* if the page is not mapped into the page tables of this VMA. Only
* valid for normal file or anonymous VMAs.
*/
-static int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma)
+int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma)
{
unsigned long address;
pte_t *pte;
@@ -332,21 +338,15 @@ static int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma)
* Subfunctions of page_referenced: page_referenced_one called
* repeatedly from either page_referenced_anon or page_referenced_file.
*/
-static int page_referenced_one(struct page *page,
- struct vm_area_struct *vma,
- unsigned int *mapcount,
- unsigned long *vm_flags)
+int page_referenced_one(struct page *page, struct vm_area_struct *vma,
+ unsigned long address, unsigned int *mapcount,
+ unsigned long *vm_flags)
{
struct mm_struct *mm = vma->vm_mm;
- unsigned long address;
pte_t *pte;
spinlock_t *ptl;
int referenced = 0;
- address = vma_address(page, vma);
- if (address == -EFAULT)
- goto out;
-
pte = page_check_address(page, mm, address, &ptl, 0);
if (!pte)
goto out;
@@ -383,9 +383,10 @@ static int page_referenced_one(struct page *page,
out_unmap:
(*mapcount)--;
pte_unmap_unlock(pte, ptl);
-out:
+
if (referenced)
*vm_flags |= vma->vm_flags;
+out:
return referenced;
}
@@ -404,6 +405,9 @@ static int page_referenced_anon(struct page *page,
mapcount = page_mapcount(page);
list_for_each_entry(vma, &anon_vma->head, anon_vma_node) {
+ unsigned long address = vma_address(page, vma);
+ if (address == -EFAULT)
+ continue;
/*
* If we are reclaiming on behalf of a cgroup, skip
* counting on behalf of references from different
@@ -411,7 +415,7 @@ static int page_referenced_anon(struct page *page,
*/
if (mem_cont && !mm_match_cgroup(vma->vm_mm, mem_cont))
continue;
- referenced += page_referenced_one(page, vma,
+ referenced += page_referenced_one(page, vma, address,
&mapcount, vm_flags);
if (!mapcount)
break;
@@ -469,6 +473,9 @@ static int page_referenced_file(struct page *page,
mapcount = page_mapcount(page);
vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) {
+ unsigned long address = vma_address(page, vma);
+ if (address == -EFAULT)
+ continue;
/*
* If we are reclaiming on behalf of a cgroup, skip
* counting on behalf of references from different
@@ -476,7 +483,7 @@ static int page_referenced_file(struct page *page,
*/
if (mem_cont && !mm_match_cgroup(vma->vm_mm, mem_cont))
continue;
- referenced += page_referenced_one(page, vma,
+ referenced += page_referenced_one(page, vma, address,
&mapcount, vm_flags);
if (!mapcount)
break;
@@ -502,46 +509,47 @@ int page_referenced(struct page *page,
unsigned long *vm_flags)
{
int referenced = 0;
+ int we_locked = 0;
if (TestClearPageReferenced(page))
referenced++;
*vm_flags = 0;
- if (page_mapped(page) && page->mapping) {
- if (PageAnon(page))
+ if (page_mapped(page) && page_rmapping(page)) {
+ if (!is_locked && (!PageAnon(page) || PageKsm(page))) {
+ we_locked = trylock_page(page);
+ if (!we_locked) {
+ referenced++;
+ goto out;
+ }
+ }
+ if (unlikely(PageKsm(page)))
+ referenced += page_referenced_ksm(page, mem_cont,
+ vm_flags);
+ else if (PageAnon(page))
referenced += page_referenced_anon(page, mem_cont,
vm_flags);
- else if (is_locked)
+ else if (page->mapping)
referenced += page_referenced_file(page, mem_cont,
vm_flags);
- else if (!trylock_page(page))
- referenced++;
- else {
- if (page->mapping)
- referenced += page_referenced_file(page,
- mem_cont, vm_flags);
+ if (we_locked)
unlock_page(page);
- }
}
-
+out:
if (page_test_and_clear_young(page))
referenced++;
return referenced;
}
-static int page_mkclean_one(struct page *page, struct vm_area_struct *vma)
+static int page_mkclean_one(struct page *page, struct vm_area_struct *vma,
+ unsigned long address)
{
struct mm_struct *mm = vma->vm_mm;
- unsigned long address;
pte_t *pte;
spinlock_t *ptl;
int ret = 0;
- address = vma_address(page, vma);
- if (address == -EFAULT)
- goto out;
-
pte = page_check_address(page, mm, address, &ptl, 1);
if (!pte)
goto out;
@@ -573,8 +581,12 @@ static int page_mkclean_file(struct address_space *mapping, struct page *page)
spin_lock(&mapping->i_mmap_lock);
vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) {
- if (vma->vm_flags & VM_SHARED)
- ret += page_mkclean_one(page, vma);
+ if (vma->vm_flags & VM_SHARED) {
+ unsigned long address = vma_address(page, vma);
+ if (address == -EFAULT)
+ continue;
+ ret += page_mkclean_one(page, vma, address);
+ }
}
spin_unlock(&mapping->i_mmap_lock);
return ret;
@@ -615,14 +627,7 @@ static void __page_set_anon_rmap(struct page *page,
BUG_ON(!anon_vma);
anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON;
page->mapping = (struct address_space *) anon_vma;
-
page->index = linear_page_index(vma, address);
-
- /*
- * nr_mapped state can be updated without turning off
- * interrupts because it is not modified via interrupt.
- */
- __inc_zone_page_state(page, NR_ANON_PAGES);
}
/**
@@ -660,14 +665,23 @@ static void __page_check_anon_rmap(struct page *page,
* @vma: the vm area in which the mapping is added
* @address: the user virtual address mapped
*
- * The caller needs to hold the pte lock and the page must be locked.
+ * The caller needs to hold the pte lock, and the page must be locked in
+ * the anon_vma case: to serialize mapping,index checking after setting,
+ * and to ensure that PageAnon is not being upgraded racily to PageKsm
+ * (but PageKsm is never downgraded to PageAnon).
*/
void page_add_anon_rmap(struct page *page,
struct vm_area_struct *vma, unsigned long address)
{
+ int first = atomic_inc_and_test(&page->_mapcount);
+ if (first)
+ __inc_zone_page_state(page, NR_ANON_PAGES);
+ if (unlikely(PageKsm(page)))
+ return;
+
VM_BUG_ON(!PageLocked(page));
VM_BUG_ON(address < vma->vm_start || address >= vma->vm_end);
- if (atomic_inc_and_test(&page->_mapcount))
+ if (first)
__page_set_anon_rmap(page, vma, address);
else
__page_check_anon_rmap(page, vma, address);
@@ -689,6 +703,7 @@ void page_add_new_anon_rmap(struct page *page,
VM_BUG_ON(address < vma->vm_start || address >= vma->vm_end);
SetPageSwapBacked(page);
atomic_set(&page->_mapcount, 0); /* increment count (starts at -1) */
+ __inc_zone_page_state(page, NR_ANON_PAGES);
__page_set_anon_rmap(page, vma, address);
if (page_evictable(page, vma))
lru_cache_add_lru(page, LRU_ACTIVE_ANON);
@@ -706,31 +721,10 @@ void page_add_file_rmap(struct page *page)
{
if (atomic_inc_and_test(&page->_mapcount)) {
__inc_zone_page_state(page, NR_FILE_MAPPED);
- mem_cgroup_update_mapped_file_stat(page, 1);
+ mem_cgroup_update_file_mapped(page, 1);
}
}
-#ifdef CONFIG_DEBUG_VM
-/**
- * page_dup_rmap - duplicate pte mapping to a page
- * @page: the page to add the mapping to
- * @vma: the vm area being duplicated
- * @address: the user virtual address mapped
- *
- * For copy_page_range only: minimal extract from page_add_file_rmap /
- * page_add_anon_rmap, avoiding unnecessary tests (already checked) so it's
- * quicker.
- *
- * The caller needs to hold the pte lock.
- */
-void page_dup_rmap(struct page *page, struct vm_area_struct *vma, unsigned long address)
-{
- if (PageAnon(page))
- __page_check_anon_rmap(page, vma, address);
- atomic_inc(&page->_mapcount);
-}
-#endif
-
/**
* page_remove_rmap - take down pte mapping from a page
* @page: page to remove mapping from
@@ -739,54 +733,52 @@ void page_dup_rmap(struct page *page, struct vm_area_struct *vma, unsigned long
*/
void page_remove_rmap(struct page *page)
{
- if (atomic_add_negative(-1, &page->_mapcount)) {
- /*
- * Now that the last pte has gone, s390 must transfer dirty
- * flag from storage key to struct page. We can usually skip
- * this if the page is anon, so about to be freed; but perhaps
- * not if it's in swapcache - there might be another pte slot
- * containing the swap entry, but page not yet written to swap.
- */
- if ((!PageAnon(page) || PageSwapCache(page)) &&
- page_test_dirty(page)) {
- page_clear_dirty(page);
- set_page_dirty(page);
- }
- if (PageAnon(page))
- mem_cgroup_uncharge_page(page);
- __dec_zone_page_state(page,
- PageAnon(page) ? NR_ANON_PAGES : NR_FILE_MAPPED);
- mem_cgroup_update_mapped_file_stat(page, -1);
- /*
- * It would be tidy to reset the PageAnon mapping here,
- * but that might overwrite a racing page_add_anon_rmap
- * which increments mapcount after us but sets mapping
- * before us: so leave the reset to free_hot_cold_page,
- * and remember that it's only reliable while mapped.
- * Leaving it set also helps swapoff to reinstate ptes
- * faster for those pages still in swapcache.
- */
+ /* page still mapped by someone else? */
+ if (!atomic_add_negative(-1, &page->_mapcount))
+ return;
+
+ /*
+ * Now that the last pte has gone, s390 must transfer dirty
+ * flag from storage key to struct page. We can usually skip
+ * this if the page is anon, so about to be freed; but perhaps
+ * not if it's in swapcache - there might be another pte slot
+ * containing the swap entry, but page not yet written to swap.
+ */
+ if ((!PageAnon(page) || PageSwapCache(page)) && page_test_dirty(page)) {
+ page_clear_dirty(page);
+ set_page_dirty(page);
}
+ if (PageAnon(page)) {
+ mem_cgroup_uncharge_page(page);
+ __dec_zone_page_state(page, NR_ANON_PAGES);
+ } else {
+ __dec_zone_page_state(page, NR_FILE_MAPPED);
+ mem_cgroup_update_file_mapped(page, -1);
+ }
+ /*
+ * It would be tidy to reset the PageAnon mapping here,
+ * but that might overwrite a racing page_add_anon_rmap
+ * which increments mapcount after us but sets mapping
+ * before us: so leave the reset to free_hot_cold_page,
+ * and remember that it's only reliable while mapped.
+ * Leaving it set also helps swapoff to reinstate ptes
+ * faster for those pages still in swapcache.
+ */
}
/*
* Subfunctions of try_to_unmap: try_to_unmap_one called
* repeatedly from either try_to_unmap_anon or try_to_unmap_file.
*/
-static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
- int migration)
+int try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
+ unsigned long address, enum ttu_flags flags)
{
struct mm_struct *mm = vma->vm_mm;
- unsigned long address;
pte_t *pte;
pte_t pteval;
spinlock_t *ptl;
int ret = SWAP_AGAIN;
- address = vma_address(page, vma);
- if (address == -EFAULT)
- goto out;
-
pte = page_check_address(page, mm, address, &ptl, 0);
if (!pte)
goto out;
@@ -796,11 +788,14 @@ static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
* If it's recently referenced (perhaps page_referenced
* skipped over this mm) then we should reactivate it.
*/
- if (!migration) {
- if (vma->vm_flags & VM_LOCKED) {
- ret = SWAP_MLOCK;
+ if (!(flags & TTU_IGNORE_MLOCK)) {
+ if (vma->vm_flags & VM_LOCKED)
+ goto out_mlock;
+
+ if (TTU_ACTION(flags) == TTU_MUNLOCK)
goto out_unmap;
- }
+ }
+ if (!(flags & TTU_IGNORE_ACCESS)) {
if (ptep_clear_flush_young_notify(vma, address, pte)) {
ret = SWAP_FAIL;
goto out_unmap;
@@ -818,7 +813,14 @@ static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
/* Update high watermark before we lower rss */
update_hiwater_rss(mm);
- if (PageAnon(page)) {
+ if (PageHWPoison(page) && !(flags & TTU_IGNORE_HWPOISON)) {
+ if (PageAnon(page))
+ dec_mm_counter(mm, anon_rss);
+ else
+ dec_mm_counter(mm, file_rss);
+ set_pte_at(mm, address, pte,
+ swp_entry_to_pte(make_hwpoison_entry(page)));
+ } else if (PageAnon(page)) {
swp_entry_t entry = { .val = page_private(page) };
if (PageSwapCache(page)) {
@@ -826,7 +828,11 @@ static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
* Store the swap location in the pte.
* See handle_pte_fault() ...
*/
- swap_duplicate(entry);
+ if (swap_duplicate(entry) < 0) {
+ set_pte_at(mm, address, pte, pteval);
+ ret = SWAP_FAIL;
+ goto out_unmap;
+ }
if (list_empty(&mm->mmlist)) {
spin_lock(&mmlist_lock);
if (list_empty(&mm->mmlist))
@@ -840,12 +846,12 @@ static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
* pte. do_swap_page() will wait until the migration
* pte is removed and then restart fault handling.
*/
- BUG_ON(!migration);
+ BUG_ON(TTU_ACTION(flags) != TTU_MIGRATION);
entry = make_migration_entry(page, pte_write(pteval));
}
set_pte_at(mm, address, pte, swp_entry_to_pte(entry));
BUG_ON(pte_file(*pte));
- } else if (PAGE_MIGRATION && migration) {
+ } else if (PAGE_MIGRATION && (TTU_ACTION(flags) == TTU_MIGRATION)) {
/* Establish migration entry for a file page */
swp_entry_t entry;
entry = make_migration_entry(page, pte_write(pteval));
@@ -853,7 +859,6 @@ static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
} else
dec_mm_counter(mm, file_rss);
-
page_remove_rmap(page);
page_cache_release(page);
@@ -861,6 +866,27 @@ out_unmap:
pte_unmap_unlock(pte, ptl);
out:
return ret;
+
+out_mlock:
+ pte_unmap_unlock(pte, ptl);
+
+
+ /*
+ * We need mmap_sem locking, Otherwise VM_LOCKED check makes
+ * unstable result and race. Plus, We can't wait here because
+ * we now hold anon_vma->lock or mapping->i_mmap_lock.
+ * if trylock failed, the page remain in evictable lru and later
+ * vmscan could retry to move the page to unevictable lru if the
+ * page is actually mlocked.
+ */
+ if (down_read_trylock(&vma->vm_mm->mmap_sem)) {
+ if (vma->vm_flags & VM_LOCKED) {
+ mlock_vma_page(page);
+ ret = SWAP_MLOCK;
+ }
+ up_read(&vma->vm_mm->mmap_sem);
+ }
+ return ret;
}
/*
@@ -926,11 +952,10 @@ static int try_to_unmap_cluster(unsigned long cursor, unsigned int *mapcount,
return ret;
/*
- * MLOCK_PAGES => feature is configured.
- * if we can acquire the mmap_sem for read, and vma is VM_LOCKED,
+ * If we can acquire the mmap_sem for read, and vma is VM_LOCKED,
* keep the sem while scanning the cluster for mlocking pages.
*/
- if (MLOCK_PAGES && down_read_trylock(&vma->vm_mm->mmap_sem)) {
+ if (down_read_trylock(&vma->vm_mm->mmap_sem)) {
locked_vma = (vma->vm_flags & VM_LOCKED);
if (!locked_vma)
up_read(&vma->vm_mm->mmap_sem); /* don't need it */
@@ -980,29 +1005,11 @@ static int try_to_unmap_cluster(unsigned long cursor, unsigned int *mapcount,
return ret;
}
-/*
- * common handling for pages mapped in VM_LOCKED vmas
- */
-static int try_to_mlock_page(struct page *page, struct vm_area_struct *vma)
-{
- int mlocked = 0;
-
- if (down_read_trylock(&vma->vm_mm->mmap_sem)) {
- if (vma->vm_flags & VM_LOCKED) {
- mlock_vma_page(page);
- mlocked++; /* really mlocked the page */
- }
- up_read(&vma->vm_mm->mmap_sem);
- }
- return mlocked;
-}
-
/**
* try_to_unmap_anon - unmap or unlock anonymous page using the object-based
* rmap method
* @page: the page to unmap/unlock
- * @unlock: request for unlock rather than unmap [unlikely]
- * @migration: unmapping for migration - ignored if @unlock
+ * @flags: action and flags
*
* Find all the mappings of a page using the mapping pointer and the vma chains
* contained in the anon_vma struct it points to.
@@ -1014,53 +1021,33 @@ static int try_to_mlock_page(struct page *page, struct vm_area_struct *vma)
* vm_flags for that VMA. That should be OK, because that vma shouldn't be
* 'LOCKED.
*/
-static int try_to_unmap_anon(struct page *page, int unlock, int migration)
+static int try_to_unmap_anon(struct page *page, enum ttu_flags flags)
{
struct anon_vma *anon_vma;
struct vm_area_struct *vma;
- unsigned int mlocked = 0;
int ret = SWAP_AGAIN;
- if (MLOCK_PAGES && unlikely(unlock))
- ret = SWAP_SUCCESS; /* default for try_to_munlock() */
-
anon_vma = page_lock_anon_vma(page);
if (!anon_vma)
return ret;
list_for_each_entry(vma, &anon_vma->head, anon_vma_node) {
- if (MLOCK_PAGES && unlikely(unlock)) {
- if (!((vma->vm_flags & VM_LOCKED) &&
- page_mapped_in_vma(page, vma)))
- continue; /* must visit all unlocked vmas */
- ret = SWAP_MLOCK; /* saw at least one mlocked vma */
- } else {
- ret = try_to_unmap_one(page, vma, migration);
- if (ret == SWAP_FAIL || !page_mapped(page))
- break;
- }
- if (ret == SWAP_MLOCK) {
- mlocked = try_to_mlock_page(page, vma);
- if (mlocked)
- break; /* stop if actually mlocked page */
- }
+ unsigned long address = vma_address(page, vma);
+ if (address == -EFAULT)
+ continue;
+ ret = try_to_unmap_one(page, vma, address, flags);
+ if (ret != SWAP_AGAIN || !page_mapped(page))
+ break;
}
page_unlock_anon_vma(anon_vma);
-
- if (mlocked)
- ret = SWAP_MLOCK; /* actually mlocked the page */
- else if (ret == SWAP_MLOCK)
- ret = SWAP_AGAIN; /* saw VM_LOCKED vma */
-
return ret;
}
/**
* try_to_unmap_file - unmap/unlock file page using the object-based rmap method
* @page: the page to unmap/unlock
- * @unlock: request for unlock rather than unmap [unlikely]
- * @migration: unmapping for migration - ignored if @unlock
+ * @flags: action and flags
*
* Find all the mappings of a page using the mapping pointer and the vma chains
* contained in the address_space struct it points to.
@@ -1072,7 +1059,7 @@ static int try_to_unmap_anon(struct page *page, int unlock, int migration)
* vm_flags for that VMA. That should be OK, because that vma shouldn't be
* 'LOCKED.
*/
-static int try_to_unmap_file(struct page *page, int unlock, int migration)
+static int try_to_unmap_file(struct page *page, enum ttu_flags flags)
{
struct address_space *mapping = page->mapping;
pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
@@ -1083,46 +1070,30 @@ static int try_to_unmap_file(struct page *page, int unlock, int migration)
unsigned long max_nl_cursor = 0;
unsigned long max_nl_size = 0;
unsigned int mapcount;
- unsigned int mlocked = 0;
-
- if (MLOCK_PAGES && unlikely(unlock))
- ret = SWAP_SUCCESS; /* default for try_to_munlock() */
spin_lock(&mapping->i_mmap_lock);
vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) {
- if (MLOCK_PAGES && unlikely(unlock)) {
- if (!((vma->vm_flags & VM_LOCKED) &&
- page_mapped_in_vma(page, vma)))
- continue; /* must visit all vmas */
- ret = SWAP_MLOCK;
- } else {
- ret = try_to_unmap_one(page, vma, migration);
- if (ret == SWAP_FAIL || !page_mapped(page))
- goto out;
- }
- if (ret == SWAP_MLOCK) {
- mlocked = try_to_mlock_page(page, vma);
- if (mlocked)
- break; /* stop if actually mlocked page */
- }
+ unsigned long address = vma_address(page, vma);
+ if (address == -EFAULT)
+ continue;
+ ret = try_to_unmap_one(page, vma, address, flags);
+ if (ret != SWAP_AGAIN || !page_mapped(page))
+ goto out;
}
- if (mlocked)
+ if (list_empty(&mapping->i_mmap_nonlinear))
goto out;
- if (list_empty(&mapping->i_mmap_nonlinear))
+ /*
+ * We don't bother to try to find the munlocked page in nonlinears.
+ * It's costly. Instead, later, page reclaim logic may call
+ * try_to_unmap(TTU_MUNLOCK) and recover PG_mlocked lazily.
+ */
+ if (TTU_ACTION(flags) == TTU_MUNLOCK)
goto out;
list_for_each_entry(vma, &mapping->i_mmap_nonlinear,
shared.vm_set.list) {
- if (MLOCK_PAGES && unlikely(unlock)) {
- if (!(vma->vm_flags & VM_LOCKED))
- continue; /* must visit all vmas */
- ret = SWAP_MLOCK; /* leave mlocked == 0 */
- goto out; /* no need to look further */
- }
- if (!MLOCK_PAGES && !migration && (vma->vm_flags & VM_LOCKED))
- continue;
cursor = (unsigned long) vma->vm_private_data;
if (cursor > max_nl_cursor)
max_nl_cursor = cursor;
@@ -1155,16 +1126,12 @@ static int try_to_unmap_file(struct page *page, int unlock, int migration)
do {
list_for_each_entry(vma, &mapping->i_mmap_nonlinear,
shared.vm_set.list) {
- if (!MLOCK_PAGES && !migration &&
- (vma->vm_flags & VM_LOCKED))
- continue;
cursor = (unsigned long) vma->vm_private_data;
while ( cursor < max_nl_cursor &&
cursor < vma->vm_end - vma->vm_start) {
- ret = try_to_unmap_cluster(cursor, &mapcount,
- vma, page);
- if (ret == SWAP_MLOCK)
- mlocked = 2; /* to return below */
+ if (try_to_unmap_cluster(cursor, &mapcount,
+ vma, page) == SWAP_MLOCK)
+ ret = SWAP_MLOCK;
cursor += CLUSTER_SIZE;
vma->vm_private_data = (void *) cursor;
if ((int)mapcount <= 0)
@@ -1185,17 +1152,13 @@ static int try_to_unmap_file(struct page *page, int unlock, int migration)
vma->vm_private_data = NULL;
out:
spin_unlock(&mapping->i_mmap_lock);
- if (mlocked)
- ret = SWAP_MLOCK; /* actually mlocked the page */
- else if (ret == SWAP_MLOCK)
- ret = SWAP_AGAIN; /* saw VM_LOCKED vma */
return ret;
}
/**
* try_to_unmap - try to remove all page table mappings to a page
* @page: the page to get unmapped
- * @migration: migration flag
+ * @flags: action and flags
*
* Tries to remove all the page table entries which are mapping this
* page, used in the pageout path. Caller must hold the page lock.
@@ -1206,16 +1169,18 @@ out:
* SWAP_FAIL - the page is unswappable
* SWAP_MLOCK - page is mlocked.
*/
-int try_to_unmap(struct page *page, int migration)
+int try_to_unmap(struct page *page, enum ttu_flags flags)
{
int ret;
BUG_ON(!PageLocked(page));
- if (PageAnon(page))
- ret = try_to_unmap_anon(page, 0, migration);
+ if (unlikely(PageKsm(page)))
+ ret = try_to_unmap_ksm(page, flags);
+ else if (PageAnon(page))
+ ret = try_to_unmap_anon(page, flags);
else
- ret = try_to_unmap_file(page, 0, migration);
+ ret = try_to_unmap_file(page, flags);
if (ret != SWAP_MLOCK && !page_mapped(page))
ret = SWAP_SUCCESS;
return ret;
@@ -1231,17 +1196,98 @@ int try_to_unmap(struct page *page, int migration)
*
* Return values are:
*
- * SWAP_SUCCESS - no vma's holding page mlocked.
+ * SWAP_AGAIN - no vma is holding page mlocked, or,
* SWAP_AGAIN - page mapped in mlocked vma -- couldn't acquire mmap sem
+ * SWAP_FAIL - page cannot be located at present
* SWAP_MLOCK - page is now mlocked.
*/
int try_to_munlock(struct page *page)
{
VM_BUG_ON(!PageLocked(page) || PageLRU(page));
- if (PageAnon(page))
- return try_to_unmap_anon(page, 1, 0);
+ if (unlikely(PageKsm(page)))
+ return try_to_unmap_ksm(page, TTU_MUNLOCK);
+ else if (PageAnon(page))
+ return try_to_unmap_anon(page, TTU_MUNLOCK);
else
- return try_to_unmap_file(page, 1, 0);
+ return try_to_unmap_file(page, TTU_MUNLOCK);
}
+#ifdef CONFIG_MIGRATION
+/*
+ * rmap_walk() and its helpers rmap_walk_anon() and rmap_walk_file():
+ * Called by migrate.c to remove migration ptes, but might be used more later.
+ */
+static int rmap_walk_anon(struct page *page, int (*rmap_one)(struct page *,
+ struct vm_area_struct *, unsigned long, void *), void *arg)
+{
+ struct anon_vma *anon_vma;
+ struct vm_area_struct *vma;
+ int ret = SWAP_AGAIN;
+
+ /*
+ * Note: remove_migration_ptes() cannot use page_lock_anon_vma()
+ * because that depends on page_mapped(); but not all its usages
+ * are holding mmap_sem, which also gave the necessary guarantee
+ * (that this anon_vma's slab has not already been destroyed).
+ * This needs to be reviewed later: avoiding page_lock_anon_vma()
+ * is risky, and currently limits the usefulness of rmap_walk().
+ */
+ anon_vma = page_anon_vma(page);
+ if (!anon_vma)
+ return ret;
+ spin_lock(&anon_vma->lock);
+ list_for_each_entry(vma, &anon_vma->head, anon_vma_node) {
+ unsigned long address = vma_address(page, vma);
+ if (address == -EFAULT)
+ continue;
+ ret = rmap_one(page, vma, address, arg);
+ if (ret != SWAP_AGAIN)
+ break;
+ }
+ spin_unlock(&anon_vma->lock);
+ return ret;
+}
+
+static int rmap_walk_file(struct page *page, int (*rmap_one)(struct page *,
+ struct vm_area_struct *, unsigned long, void *), void *arg)
+{
+ struct address_space *mapping = page->mapping;
+ pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
+ struct vm_area_struct *vma;
+ struct prio_tree_iter iter;
+ int ret = SWAP_AGAIN;
+
+ if (!mapping)
+ return ret;
+ spin_lock(&mapping->i_mmap_lock);
+ vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) {
+ unsigned long address = vma_address(page, vma);
+ if (address == -EFAULT)
+ continue;
+ ret = rmap_one(page, vma, address, arg);
+ if (ret != SWAP_AGAIN)
+ break;
+ }
+ /*
+ * No nonlinear handling: being always shared, nonlinear vmas
+ * never contain migration ptes. Decide what to do about this
+ * limitation to linear when we need rmap_walk() on nonlinear.
+ */
+ spin_unlock(&mapping->i_mmap_lock);
+ return ret;
+}
+
+int rmap_walk(struct page *page, int (*rmap_one)(struct page *,
+ struct vm_area_struct *, unsigned long, void *), void *arg)
+{
+ VM_BUG_ON(!PageLocked(page));
+
+ if (unlikely(PageKsm(page)))
+ return rmap_walk_ksm(page, rmap_one, arg);
+ else if (PageAnon(page))
+ return rmap_walk_anon(page, rmap_one, arg);
+ else
+ return rmap_walk_file(page, rmap_one, arg);
+}
+#endif /* CONFIG_MIGRATION */
diff --git a/mm/shmem.c b/mm/shmem.c
index d713239ce2ce..eef4ebea5158 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -29,7 +29,6 @@
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/swap.h>
-#include <linux/ima.h>
static struct vfsmount *shm_mnt;
@@ -42,6 +41,7 @@ static struct vfsmount *shm_mnt;
#include <linux/xattr.h>
#include <linux/exportfs.h>
+#include <linux/posix_acl.h>
#include <linux/generic_acl.h>
#include <linux/mman.h>
#include <linux/string.h>
@@ -49,7 +49,6 @@ static struct vfsmount *shm_mnt;
#include <linux/backing-dev.h>
#include <linux/shmem_fs.h>
#include <linux/writeback.h>
-#include <linux/vfs.h>
#include <linux/blkdev.h>
#include <linux/security.h>
#include <linux/swapops.h>
@@ -219,7 +218,7 @@ static const struct file_operations shmem_file_operations;
static const struct inode_operations shmem_inode_operations;
static const struct inode_operations shmem_dir_inode_operations;
static const struct inode_operations shmem_special_inode_operations;
-static struct vm_operations_struct shmem_vm_ops;
+static const struct vm_operations_struct shmem_vm_ops;
static struct backing_dev_info shmem_backing_dev_info __read_mostly = {
.ra_pages = 0, /* No readahead */
@@ -811,7 +810,7 @@ static int shmem_notify_change(struct dentry *dentry, struct iattr *attr)
error = inode_setattr(inode, attr);
#ifdef CONFIG_TMPFS_POSIX_ACL
if (!error && (attr->ia_valid & ATTR_MODE))
- error = generic_acl_chmod(inode, &shmem_acl_ops);
+ error = generic_acl_chmod(inode);
#endif
if (page)
page_cache_release(page);
@@ -1018,7 +1017,14 @@ int shmem_unuse(swp_entry_t entry, struct page *page)
goto out;
}
mutex_unlock(&shmem_swaplist_mutex);
-out: return found; /* 0 or 1 or -ENOMEM */
+ /*
+ * Can some race bring us here? We've been holding page lock,
+ * so I think not; but would rather try again later than BUG()
+ */
+ unlock_page(page);
+ page_cache_release(page);
+out:
+ return (found < 0) ? found : 0;
}
/*
@@ -1047,8 +1053,9 @@ static int shmem_writepage(struct page *page, struct writeback_control *wbc)
* sync from ever calling shmem_writepage; but a stacking filesystem
* may use the ->writepage of its underlying filesystem, in which case
* tmpfs should write out to swap only in response to memory pressure,
- * and not for pdflush or sync. However, in those cases, we do still
- * want to check if there's a redundant swappage to be discarded.
+ * and not for the writeback threads or sync. However, in those cases,
+ * we do still want to check if there's a redundant swappage to be
+ * discarded.
*/
if (wbc->for_reclaim)
swap = get_swap_page();
@@ -1080,7 +1087,7 @@ static int shmem_writepage(struct page *page, struct writeback_control *wbc)
else
inode = NULL;
spin_unlock(&info->lock);
- swap_duplicate(swap);
+ swap_shmem_alloc(swap);
BUG_ON(page_mapped(page));
page_cache_release(page); /* pagecache ref */
swap_writepage(page, wbc);
@@ -1097,6 +1104,10 @@ static int shmem_writepage(struct page *page, struct writeback_control *wbc)
shmem_swp_unmap(entry);
unlock:
spin_unlock(&info->lock);
+ /*
+ * add_to_swap_cache() doesn't return -EEXIST, so we can safely
+ * clear SWAP_HAS_CACHE flag.
+ */
swapcache_free(swap, NULL);
redirty:
set_page_dirty(page);
@@ -1630,8 +1641,8 @@ shmem_write_end(struct file *file, struct address_space *mapping,
if (pos + copied > inode->i_size)
i_size_write(inode, pos + copied);
- unlock_page(page);
set_page_dirty(page);
+ unlock_page(page);
page_cache_release(page);
return copied;
@@ -1813,11 +1824,15 @@ shmem_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t dev)
return error;
}
}
- error = shmem_acl_init(inode, dir);
+#ifdef CONFIG_TMPFS_POSIX_ACL
+ error = generic_acl_init(inode, dir);
if (error) {
iput(inode);
return error;
}
+#else
+ error = 0;
+#endif
if (dir->i_mode & S_ISGID) {
inode->i_gid = dir->i_gid;
if (S_ISDIR(mode))
@@ -1968,13 +1983,13 @@ static int shmem_symlink(struct inode *dir, struct dentry *dentry, const char *s
iput(inode);
return error;
}
- unlock_page(page);
inode->i_mapping->a_ops = &shmem_aops;
inode->i_op = &shmem_symlink_inode_operations;
kaddr = kmap_atomic(page, KM_USER0);
memcpy(kaddr, symname, len);
kunmap_atomic(kaddr, KM_USER0);
set_page_dirty(page);
+ unlock_page(page);
page_cache_release(page);
}
if (dir->i_mode & S_ISGID)
@@ -2032,27 +2047,28 @@ static const struct inode_operations shmem_symlink_inode_operations = {
* filesystem level, though.
*/
-static size_t shmem_xattr_security_list(struct inode *inode, char *list,
+static size_t shmem_xattr_security_list(struct dentry *dentry, char *list,
size_t list_len, const char *name,
- size_t name_len)
+ size_t name_len, int handler_flags)
{
- return security_inode_listsecurity(inode, list, list_len);
+ return security_inode_listsecurity(dentry->d_inode, list, list_len);
}
-static int shmem_xattr_security_get(struct inode *inode, const char *name,
- void *buffer, size_t size)
+static int shmem_xattr_security_get(struct dentry *dentry, const char *name,
+ void *buffer, size_t size, int handler_flags)
{
if (strcmp(name, "") == 0)
return -EINVAL;
- return xattr_getsecurity(inode, name, buffer, size);
+ return xattr_getsecurity(dentry->d_inode, name, buffer, size);
}
-static int shmem_xattr_security_set(struct inode *inode, const char *name,
- const void *value, size_t size, int flags)
+static int shmem_xattr_security_set(struct dentry *dentry, const char *name,
+ const void *value, size_t size, int flags, int handler_flags)
{
if (strcmp(name, "") == 0)
return -EINVAL;
- return security_inode_setsecurity(inode, name, value, size, flags);
+ return security_inode_setsecurity(dentry->d_inode, name, value,
+ size, flags);
}
static struct xattr_handler shmem_xattr_security_handler = {
@@ -2063,8 +2079,8 @@ static struct xattr_handler shmem_xattr_security_handler = {
};
static struct xattr_handler *shmem_xattr_handlers[] = {
- &shmem_xattr_acl_access_handler,
- &shmem_xattr_acl_default_handler,
+ &generic_acl_access_handler,
+ &generic_acl_default_handler,
&shmem_xattr_security_handler,
NULL
};
@@ -2298,8 +2314,7 @@ static void shmem_put_super(struct super_block *sb)
sb->s_fs_info = NULL;
}
-static int shmem_fill_super(struct super_block *sb,
- void *data, int silent)
+int shmem_fill_super(struct super_block *sb, void *data, int silent)
{
struct inode *inode;
struct dentry *root;
@@ -2307,17 +2322,14 @@ static int shmem_fill_super(struct super_block *sb,
int err = -ENOMEM;
/* Round up to L1_CACHE_BYTES to resist false sharing */
- sbinfo = kmalloc(max((int)sizeof(struct shmem_sb_info),
+ sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info),
L1_CACHE_BYTES), GFP_KERNEL);
if (!sbinfo)
return -ENOMEM;
- sbinfo->max_blocks = 0;
- sbinfo->max_inodes = 0;
sbinfo->mode = S_IRWXUGO | S_ISVTX;
sbinfo->uid = current_fsuid();
sbinfo->gid = current_fsgid();
- sbinfo->mpol = NULL;
sb->s_fs_info = sbinfo;
#ifdef CONFIG_TMPFS
@@ -2421,6 +2433,7 @@ static const struct address_space_operations shmem_aops = {
.write_end = shmem_write_end,
#endif
.migratepage = migrate_page,
+ .error_remove_page = generic_error_remove_page,
};
static const struct file_operations shmem_file_operations = {
@@ -2446,7 +2459,7 @@ static const struct inode_operations shmem_inode_operations = {
.getxattr = generic_getxattr,
.listxattr = generic_listxattr,
.removexattr = generic_removexattr,
- .permission = shmem_permission,
+ .check_acl = generic_check_acl,
#endif
};
@@ -2469,7 +2482,7 @@ static const struct inode_operations shmem_dir_inode_operations = {
.getxattr = generic_getxattr,
.listxattr = generic_listxattr,
.removexattr = generic_removexattr,
- .permission = shmem_permission,
+ .check_acl = generic_check_acl,
#endif
};
@@ -2480,7 +2493,7 @@ static const struct inode_operations shmem_special_inode_operations = {
.getxattr = generic_getxattr,
.listxattr = generic_listxattr,
.removexattr = generic_removexattr,
- .permission = shmem_permission,
+ .check_acl = generic_check_acl,
#endif
};
@@ -2497,7 +2510,7 @@ static const struct super_operations shmem_ops = {
.put_super = shmem_put_super,
};
-static struct vm_operations_struct shmem_vm_ops = {
+static const struct vm_operations_struct shmem_vm_ops = {
.fault = shmem_fault,
#ifdef CONFIG_NUMA
.set_policy = shmem_set_policy,
@@ -2519,7 +2532,7 @@ static struct file_system_type tmpfs_fs_type = {
.kill_sb = kill_litter_super,
};
-static int __init init_tmpfs(void)
+int __init init_tmpfs(void)
{
int error;
@@ -2576,7 +2589,7 @@ static struct file_system_type tmpfs_fs_type = {
.kill_sb = kill_litter_super,
};
-static int __init init_tmpfs(void)
+int __init init_tmpfs(void)
{
BUG_ON(register_filesystem(&tmpfs_fs_type) != 0);
@@ -2591,6 +2604,11 @@ int shmem_unuse(swp_entry_t entry, struct page *page)
return 0;
}
+int shmem_lock(struct file *file, int lock, struct user_struct *user)
+{
+ return 0;
+}
+
#define shmem_vm_ops generic_file_vm_ops
#define shmem_file_operations ramfs_file_operations
#define shmem_get_inode(sb, mode, dev, flags) ramfs_get_inode(sb, mode, dev)
@@ -2613,7 +2631,8 @@ struct file *shmem_file_setup(const char *name, loff_t size, unsigned long flags
int error;
struct file *file;
struct inode *inode;
- struct dentry *dentry, *root;
+ struct path path;
+ struct dentry *root;
struct qstr this;
if (IS_ERR(shm_mnt))
@@ -2630,38 +2649,35 @@ struct file *shmem_file_setup(const char *name, loff_t size, unsigned long flags
this.len = strlen(name);
this.hash = 0; /* will go */
root = shm_mnt->mnt_root;
- dentry = d_alloc(root, &this);
- if (!dentry)
+ path.dentry = d_alloc(root, &this);
+ if (!path.dentry)
goto put_memory;
-
- error = -ENFILE;
- file = get_empty_filp();
- if (!file)
- goto put_dentry;
+ path.mnt = mntget(shm_mnt);
error = -ENOSPC;
inode = shmem_get_inode(root->d_sb, S_IFREG | S_IRWXUGO, 0, flags);
if (!inode)
- goto close_file;
+ goto put_dentry;
- d_instantiate(dentry, inode);
+ d_instantiate(path.dentry, inode);
inode->i_size = size;
inode->i_nlink = 0; /* It is unlinked */
- init_file(file, shm_mnt, dentry, FMODE_WRITE | FMODE_READ,
- &shmem_file_operations);
-
#ifndef CONFIG_MMU
error = ramfs_nommu_expand_for_mapping(inode, size);
if (error)
- goto close_file;
+ goto put_dentry;
#endif
- ima_counts_get(file);
+
+ error = -ENFILE;
+ file = alloc_file(&path, FMODE_WRITE | FMODE_READ,
+ &shmem_file_operations);
+ if (!file)
+ goto put_dentry;
+
return file;
-close_file:
- put_filp(file);
put_dentry:
- dput(dentry);
+ path_put(&path);
put_memory:
shmem_unacct_size(flags, size);
return ERR_PTR(error);
@@ -2687,5 +2703,3 @@ int shmem_zero_setup(struct vm_area_struct *vma)
vma->vm_ops = &shmem_vm_ops;
return 0;
}
-
-module_init(init_tmpfs)
diff --git a/mm/shmem_acl.c b/mm/shmem_acl.c
deleted file mode 100644
index 606a8e757a42..000000000000
--- a/mm/shmem_acl.c
+++ /dev/null
@@ -1,180 +0,0 @@
-/*
- * mm/shmem_acl.c
- *
- * (C) 2005 Andreas Gruenbacher <agruen@suse.de>
- *
- * This file is released under the GPL.
- */
-
-#include <linux/fs.h>
-#include <linux/shmem_fs.h>
-#include <linux/xattr.h>
-#include <linux/generic_acl.h>
-
-/**
- * shmem_get_acl - generic_acl_operations->getacl() operation
- */
-static struct posix_acl *
-shmem_get_acl(struct inode *inode, int type)
-{
- struct posix_acl *acl = NULL;
-
- spin_lock(&inode->i_lock);
- switch(type) {
- case ACL_TYPE_ACCESS:
- acl = posix_acl_dup(inode->i_acl);
- break;
-
- case ACL_TYPE_DEFAULT:
- acl = posix_acl_dup(inode->i_default_acl);
- break;
- }
- spin_unlock(&inode->i_lock);
-
- return acl;
-}
-
-/**
- * shmem_set_acl - generic_acl_operations->setacl() operation
- */
-static void
-shmem_set_acl(struct inode *inode, int type, struct posix_acl *acl)
-{
- struct posix_acl *free = NULL;
-
- spin_lock(&inode->i_lock);
- switch(type) {
- case ACL_TYPE_ACCESS:
- free = inode->i_acl;
- inode->i_acl = posix_acl_dup(acl);
- break;
-
- case ACL_TYPE_DEFAULT:
- free = inode->i_default_acl;
- inode->i_default_acl = posix_acl_dup(acl);
- break;
- }
- spin_unlock(&inode->i_lock);
- posix_acl_release(free);
-}
-
-struct generic_acl_operations shmem_acl_ops = {
- .getacl = shmem_get_acl,
- .setacl = shmem_set_acl,
-};
-
-/**
- * shmem_list_acl_access, shmem_get_acl_access, shmem_set_acl_access,
- * shmem_xattr_acl_access_handler - plumbing code to implement the
- * system.posix_acl_access xattr using the generic acl functions.
- */
-
-static size_t
-shmem_list_acl_access(struct inode *inode, char *list, size_t list_size,
- const char *name, size_t name_len)
-{
- return generic_acl_list(inode, &shmem_acl_ops, ACL_TYPE_ACCESS,
- list, list_size);
-}
-
-static int
-shmem_get_acl_access(struct inode *inode, const char *name, void *buffer,
- size_t size)
-{
- if (strcmp(name, "") != 0)
- return -EINVAL;
- return generic_acl_get(inode, &shmem_acl_ops, ACL_TYPE_ACCESS, buffer,
- size);
-}
-
-static int
-shmem_set_acl_access(struct inode *inode, const char *name, const void *value,
- size_t size, int flags)
-{
- if (strcmp(name, "") != 0)
- return -EINVAL;
- return generic_acl_set(inode, &shmem_acl_ops, ACL_TYPE_ACCESS, value,
- size);
-}
-
-struct xattr_handler shmem_xattr_acl_access_handler = {
- .prefix = POSIX_ACL_XATTR_ACCESS,
- .list = shmem_list_acl_access,
- .get = shmem_get_acl_access,
- .set = shmem_set_acl_access,
-};
-
-/**
- * shmem_list_acl_default, shmem_get_acl_default, shmem_set_acl_default,
- * shmem_xattr_acl_default_handler - plumbing code to implement the
- * system.posix_acl_default xattr using the generic acl functions.
- */
-
-static size_t
-shmem_list_acl_default(struct inode *inode, char *list, size_t list_size,
- const char *name, size_t name_len)
-{
- return generic_acl_list(inode, &shmem_acl_ops, ACL_TYPE_DEFAULT,
- list, list_size);
-}
-
-static int
-shmem_get_acl_default(struct inode *inode, const char *name, void *buffer,
- size_t size)
-{
- if (strcmp(name, "") != 0)
- return -EINVAL;
- return generic_acl_get(inode, &shmem_acl_ops, ACL_TYPE_DEFAULT, buffer,
- size);
-}
-
-static int
-shmem_set_acl_default(struct inode *inode, const char *name, const void *value,
- size_t size, int flags)
-{
- if (strcmp(name, "") != 0)
- return -EINVAL;
- return generic_acl_set(inode, &shmem_acl_ops, ACL_TYPE_DEFAULT, value,
- size);
-}
-
-struct xattr_handler shmem_xattr_acl_default_handler = {
- .prefix = POSIX_ACL_XATTR_DEFAULT,
- .list = shmem_list_acl_default,
- .get = shmem_get_acl_default,
- .set = shmem_set_acl_default,
-};
-
-/**
- * shmem_acl_init - Inizialize the acl(s) of a new inode
- */
-int
-shmem_acl_init(struct inode *inode, struct inode *dir)
-{
- return generic_acl_init(inode, dir, &shmem_acl_ops);
-}
-
-/**
- * shmem_check_acl - check_acl() callback for generic_permission()
- */
-static int
-shmem_check_acl(struct inode *inode, int mask)
-{
- struct posix_acl *acl = shmem_get_acl(inode, ACL_TYPE_ACCESS);
-
- if (acl) {
- int error = posix_acl_permission(inode, acl, mask);
- posix_acl_release(acl);
- return error;
- }
- return -EAGAIN;
-}
-
-/**
- * shmem_permission - permission() inode operation
- */
-int
-shmem_permission(struct inode *inode, int mask)
-{
- return generic_permission(inode, mask, shmem_check_acl);
-}
diff --git a/mm/slab.c b/mm/slab.c
index f70b3260d962..985c67bcb952 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -244,7 +244,7 @@ static void slab_irq_disable_GFP_WAIT(gfp_t flags, int *cpu)
slab_irq_disable(*cpu);
}
-static int _slab_spin_trylock_irq(spinlock_t *lock, int *cpu)
+static inline int _slab_spin_trylock_irq(spinlock_t *lock, int *cpu)
{
int locked;
@@ -641,7 +641,7 @@ static void **dbg_userword(struct kmem_cache *cachep, void *objp)
#endif
-#ifdef CONFIG_KMEMTRACE
+#ifdef CONFIG_TRACING
size_t slab_buffer_size(struct kmem_cache *cachep)
{
return cachep->buffer_size;
@@ -755,6 +755,26 @@ static struct kmem_cache cache_cache = {
#define BAD_ALIEN_MAGIC 0x01020304ul
+/*
+ * chicken and egg problem: delay the per-cpu array allocation
+ * until the general caches are up.
+ */
+static enum {
+ NONE,
+ PARTIAL_AC,
+ PARTIAL_L3,
+ EARLY,
+ FULL
+} g_cpucache_up;
+
+/*
+ * used by boot code to determine if it can use slab based allocator
+ */
+int slab_is_available(void)
+{
+ return g_cpucache_up >= EARLY;
+}
+
#ifdef CONFIG_LOCKDEP
/*
@@ -771,40 +791,52 @@ static struct kmem_cache cache_cache = {
static struct lock_class_key on_slab_l3_key;
static struct lock_class_key on_slab_alc_key;
-static inline void init_lock_keys(void)
-
+static void init_node_lock_keys(int q)
{
- int q;
struct cache_sizes *s = malloc_sizes;
- while (s->cs_size != ULONG_MAX) {
- for_each_node(q) {
- struct array_cache **alc;
- int r;
- struct kmem_list3 *l3 = s->cs_cachep->nodelists[q];
- if (!l3 || OFF_SLAB(s->cs_cachep))
- continue;
- lockdep_set_class(&l3->list_lock, &on_slab_l3_key);
- alc = l3->alien;
- /*
- * FIXME: This check for BAD_ALIEN_MAGIC
- * should go away when common slab code is taught to
- * work even without alien caches.
- * Currently, non NUMA code returns BAD_ALIEN_MAGIC
- * for alloc_alien_cache,
- */
- if (!alc || (unsigned long)alc == BAD_ALIEN_MAGIC)
- continue;
- for_each_node(r) {
- if (alc[r])
- lockdep_set_class(&alc[r]->lock,
- &on_slab_alc_key);
- }
+ if (g_cpucache_up != FULL)
+ return;
+
+ for (s = malloc_sizes; s->cs_size != ULONG_MAX; s++) {
+ struct array_cache **alc;
+ struct kmem_list3 *l3;
+ int r;
+
+ l3 = s->cs_cachep->nodelists[q];
+ if (!l3 || OFF_SLAB(s->cs_cachep))
+ continue;
+ lockdep_set_class(&l3->list_lock, &on_slab_l3_key);
+ alc = l3->alien;
+ /*
+ * FIXME: This check for BAD_ALIEN_MAGIC
+ * should go away when common slab code is taught to
+ * work even without alien caches.
+ * Currently, non NUMA code returns BAD_ALIEN_MAGIC
+ * for alloc_alien_cache,
+ */
+ if (!alc || (unsigned long)alc == BAD_ALIEN_MAGIC)
+ continue;
+ for_each_node(r) {
+ if (alc[r])
+ lockdep_set_class(&alc[r]->lock,
+ &on_slab_alc_key);
}
- s++;
}
}
+
+static inline void init_lock_keys(void)
+{
+ int node;
+
+ for_each_node(node)
+ init_node_lock_keys(node);
+}
#else
+static void init_node_lock_keys(int q)
+{
+}
+
static inline void init_lock_keys(void)
{
}
@@ -816,27 +848,7 @@ static inline void init_lock_keys(void)
static DEFINE_MUTEX(cache_chain_mutex);
static struct list_head cache_chain;
-/*
- * chicken and egg problem: delay the per-cpu array allocation
- * until the general caches are up.
- */
-static enum {
- NONE,
- PARTIAL_AC,
- PARTIAL_L3,
- EARLY,
- FULL
-} g_cpucache_up;
-
-/*
- * used by boot code to determine if it can use slab based allocator
- */
-int slab_is_available(void)
-{
- return g_cpucache_up >= EARLY;
-}
-
-static DEFINE_PER_CPU(struct delayed_work, reap_work);
+static DEFINE_PER_CPU(struct delayed_work, slab_reap_work);
static inline struct array_cache *
cpu_cache_get(struct kmem_cache *cachep, int this_cpu)
@@ -978,7 +990,7 @@ __setup("noaliencache", noaliencache_setup);
* objects freed on different nodes from which they were allocated) and the
* flushing of remote pcps by calling drain_node_pages.
*/
-static DEFINE_PER_CPU(unsigned long, reap_node);
+static DEFINE_PER_CPU(unsigned long, slab_reap_node);
static void init_reap_node(int cpu)
{
@@ -988,17 +1000,17 @@ static void init_reap_node(int cpu)
if (node == MAX_NUMNODES)
node = first_node(node_online_map);
- per_cpu(reap_node, cpu) = node;
+ per_cpu(slab_reap_node, cpu) = node;
}
static void next_reap_node(void)
{
- int node = __get_cpu_var(reap_node);
+ int node = __get_cpu_var(slab_reap_node);
node = next_node(node, node_online_map);
if (unlikely(node >= MAX_NUMNODES))
node = first_node(node_online_map);
- __get_cpu_var(reap_node) = node;
+ __get_cpu_var(slab_reap_node) = node;
}
#else
@@ -1015,7 +1027,7 @@ static void next_reap_node(void)
*/
static void __cpuinit start_cpu_timer(int cpu)
{
- struct delayed_work *reap_work = &per_cpu(reap_work, cpu);
+ struct delayed_work *reap_work = &per_cpu(slab_reap_work, cpu);
/*
* When this gets called from do_initcalls via cpucache_init(),
@@ -1179,16 +1191,16 @@ static void __drain_alien_cache(struct kmem_cache *cachep,
/*
* Called from cache_reap() to regularly drain alien caches round robin.
*/
-static int
-reap_alien(struct kmem_cache *cachep, struct kmem_list3 *l3)
+static int reap_alien(struct kmem_cache *cachep, struct kmem_list3 *l3)
{
- int node = __get_cpu_var(reap_node);
+ int node = __get_cpu_var(slab_reap_node);
int this_cpu;
if (l3->alien) {
struct array_cache *ac = l3->alien[node];
- if (ac && ac->avail && slab_spin_trylock_irq(&ac->lock, this_cpu)) {
+ if (ac && ac->avail &&
+ slab_spin_trylock_irq(&ac->lock, this_cpu)) {
__drain_alien_cache(cachep, ac, node, &this_cpu);
slab_spin_unlock_irq(&ac->lock, this_cpu);
return 1;
@@ -1200,7 +1212,7 @@ reap_alien(struct kmem_cache *cachep, struct kmem_list3 *l3)
static void drain_alien_cache(struct kmem_cache *cachep,
struct array_cache **alien)
{
- int i = 0, this_cpu;
+ int this_cpu, i = 0;
struct array_cache *ac;
unsigned long flags;
@@ -1278,10 +1290,9 @@ static void __cpuinit cpuup_canceled(int cpu)
/* Free limit for this kmem_list3 */
l3->free_limit -= cachep->batchcount;
if (nc)
- free_block(cachep, nc->entry, nc->avail, node,
- &cpu);
+ free_block(cachep, nc->entry, nc->avail, node, &cpu);
- if (!cpus_empty(*mask)) {
+ if (!cpumask_empty(mask)) {
spin_unlock_irq(&l3->list_lock);
goto free_array_cache;
}
@@ -1416,6 +1427,8 @@ static int __cpuinit cpuup_prepare(int cpu)
kfree(shared);
free_alien_cache(alien);
}
+ init_node_lock_keys(node);
+
return 0;
bad:
cpuup_canceled(cpu);
@@ -1457,9 +1470,9 @@ static int __cpuinit cpuup_callback(struct notifier_block *nfb,
* anything expensive but will only modify reap_work
* and reschedule the timer.
*/
- cancel_rearming_delayed_work(&per_cpu(reap_work, cpu));
+ cancel_rearming_delayed_work(&per_cpu(slab_reap_work, cpu));
/* Now the cache_reaper is guaranteed to be not running. */
- per_cpu(reap_work, cpu).work.func = NULL;
+ per_cpu(slab_reap_work, cpu).work.func = NULL;
break;
case CPU_DOWN_FAILED:
case CPU_DOWN_FAILED_FROZEN:
@@ -1565,7 +1578,7 @@ void __init kmem_cache_init(void)
* Fragmentation resistance on low memory - only use bigger
* page orders on machines with more than 32MB of memory.
*/
- if (num_physpages > (32 << 20) >> PAGE_SHIFT)
+ if (totalram_pages > (32 << 20) >> PAGE_SHIFT)
slab_break_gfp_order = BREAK_GFP_ORDER_HI;
/* Bootstrap is tricky, because several objects are allocated
@@ -2467,9 +2480,11 @@ kmem_cache_create (const char *name, size_t size, size_t align,
/*
* Determine if the slab management is 'on' or 'off' slab.
* (bootstrapping cannot cope with offslab caches so don't do
- * it too early on.)
+ * it too early on. Always use on-slab management when
+ * SLAB_NOLEAKTRACE to avoid recursive calls into kmemleak)
*/
- if ((size >= (PAGE_SIZE >> 3)) && !slab_early_init)
+ if ((size >= (PAGE_SIZE >> 3)) && !slab_early_init &&
+ !(flags & SLAB_NOLEAKTRACE))
/*
* Size is large, assume best to place the slab management obj
* off-slab (should allow better packing of objs).
@@ -2817,8 +2832,8 @@ static struct slab *alloc_slabmgmt(struct kmem_cache *cachep, void *objp,
* kmemleak does not treat the ->s_mem pointer as a reference
* to the object. Otherwise we will not report the leak.
*/
- kmemleak_scan_area(slabp, offsetof(struct slab, list),
- sizeof(struct list_head), local_flags);
+ kmemleak_scan_area(&slabp->list, sizeof(struct list_head),
+ local_flags);
if (!slabp)
return NULL;
} else {
@@ -3339,13 +3354,19 @@ ____cache_alloc(struct kmem_cache *cachep, gfp_t flags, int *this_cpu)
} else {
STATS_INC_ALLOCMISS(cachep);
objp = cache_alloc_refill(cachep, flags, this_cpu);
+ /*
+ * the 'ac' may be updated by cache_alloc_refill(),
+ * and kmemleak_erase() requires its correct value.
+ */
+ ac = cpu_cache_get(cachep, *this_cpu);
}
/*
* To avoid a false negative, if an object that is in one of the
* per-CPU caches is leaked, we need to make sure kmemleak doesn't
* treat the array pointers as a reference to the object.
*/
- kmemleak_erase(&ac->entry[ac->avail]);
+ if (objp)
+ kmemleak_erase(&ac->entry[ac->avail]);
return objp;
}
@@ -3544,11 +3565,10 @@ __cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid,
return NULL;
cache_alloc_debugcheck_before(cachep, flags);
-
slab_irq_save(save_flags, this_cpu);
this_node = cpu_to_node(this_cpu);
- if (unlikely(nodeid == -1))
+ if (nodeid == -1)
nodeid = this_node;
if (unlikely(!cachep->nodelists[nodeid])) {
@@ -3752,8 +3772,7 @@ free_done:
* Release an obj back to its cache. If the obj has a constructed state, it must
* be in this state _before_ it is released. Called with disabled ints.
*/
-static inline void
-__cache_free(struct kmem_cache *cachep, void *objp, int *this_cpu)
+static void __cache_free(struct kmem_cache *cachep, void *objp, int *this_cpu)
{
struct array_cache *ac = cpu_cache_get(cachep, *this_cpu);
@@ -3803,7 +3822,7 @@ void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
}
EXPORT_SYMBOL(kmem_cache_alloc);
-#ifdef CONFIG_KMEMTRACE
+#ifdef CONFIG_TRACING
void *kmem_cache_alloc_notrace(struct kmem_cache *cachep, gfp_t flags)
{
return __cache_alloc(cachep, flags, __builtin_return_address(0));
@@ -3866,7 +3885,7 @@ void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid)
}
EXPORT_SYMBOL(kmem_cache_alloc_node);
-#ifdef CONFIG_KMEMTRACE
+#ifdef CONFIG_TRACING
void *kmem_cache_alloc_node_notrace(struct kmem_cache *cachep,
gfp_t flags,
int nodeid)
@@ -3894,7 +3913,7 @@ __do_kmalloc_node(size_t size, gfp_t flags, int node, void *caller)
return ret;
}
-#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_KMEMTRACE)
+#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_TRACING)
void *__kmalloc_node(size_t size, gfp_t flags, int node)
{
return __do_kmalloc_node(size, flags, node,
@@ -3914,7 +3933,7 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node)
return __do_kmalloc_node(size, flags, node, NULL);
}
EXPORT_SYMBOL(__kmalloc_node);
-#endif /* CONFIG_DEBUG_SLAB */
+#endif /* CONFIG_DEBUG_SLAB || CONFIG_TRACING */
#endif /* CONFIG_NUMA */
/**
@@ -3946,7 +3965,7 @@ static __always_inline void *__do_kmalloc(size_t size, gfp_t flags,
}
-#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_KMEMTRACE)
+#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_TRACING)
void *__kmalloc(size_t size, gfp_t flags)
{
return __do_kmalloc(size, flags, __builtin_return_address(0));
diff --git a/mm/slob.c b/mm/slob.c
index 9641da3d5e58..837ebd64cc34 100644
--- a/mm/slob.c
+++ b/mm/slob.c
@@ -692,3 +692,8 @@ void __init kmem_cache_init(void)
{
slob_ready = 1;
}
+
+void __init kmem_cache_init_late(void)
+{
+ /* Nothing to do */
+}
diff --git a/mm/slub.c b/mm/slub.c
index b6276753626e..8d71aaf888d7 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -141,6 +141,13 @@
SLAB_POISON | SLAB_STORE_USER)
/*
+ * Debugging flags that require metadata to be stored in the slab. These get
+ * disabled when slub_debug=O is used and a cache's min order increases with
+ * metadata.
+ */
+#define DEBUG_METADATA_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER)
+
+/*
* Set of flags that will prevent slab merging
*/
#define SLUB_NEVER_MERGE (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \
@@ -325,6 +332,7 @@ static int slub_debug;
#endif
static char *slub_debug_slabs;
+static int disable_higher_order_debug;
/*
* Object debugging
@@ -646,7 +654,7 @@ static int slab_pad_check(struct kmem_cache *s, struct page *page)
slab_err(s, page, "Padding overwritten. 0x%p-0x%p", fault, end - 1);
print_section("Padding", end - remainder, remainder);
- restore_bytes(s, "slab padding", POISON_INUSE, start, end);
+ restore_bytes(s, "slab padding", POISON_INUSE, end - remainder, end);
return 0;
}
@@ -976,6 +984,15 @@ static int __init setup_slub_debug(char *str)
*/
goto check_slabs;
+ if (tolower(*str) == 'o') {
+ /*
+ * Avoid enabling debugging on caches if its minimum order
+ * would increase as a result.
+ */
+ disable_higher_order_debug = 1;
+ goto out;
+ }
+
slub_debug = 0;
if (*str == '-')
/*
@@ -1026,8 +1043,8 @@ static unsigned long kmem_cache_flags(unsigned long objsize,
* Enable debugging if selected on the kernel commandline.
*/
if (slub_debug && (!slub_debug_slabs ||
- strncmp(slub_debug_slabs, name, strlen(slub_debug_slabs)) == 0))
- flags |= slub_debug;
+ !strncmp(slub_debug_slabs, name, strlen(slub_debug_slabs))))
+ flags |= slub_debug;
return flags;
}
@@ -1054,6 +1071,8 @@ static inline unsigned long kmem_cache_flags(unsigned long objsize,
}
#define slub_debug 0
+#define disable_higher_order_debug 0
+
static inline unsigned long slabs_node(struct kmem_cache *s, int node)
{ return 0; }
static inline unsigned long node_nr_slabs(struct kmem_cache_node *n)
@@ -1109,8 +1128,7 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
}
if (kmemcheck_enabled
- && !(s->flags & (SLAB_NOTRACK | DEBUG_DEFAULT_FLAGS)))
- {
+ && !(s->flags & (SLAB_NOTRACK | DEBUG_DEFAULT_FLAGS))) {
int pages = 1 << oo_order(oo);
kmemcheck_alloc_shadow(page, oo_order(oo), flags, node);
@@ -1560,6 +1578,10 @@ slab_out_of_memory(struct kmem_cache *s, gfp_t gfpflags, int nid)
"default order: %d, min order: %d\n", s->name, s->objsize,
s->size, oo_order(s->oo), oo_order(s->min));
+ if (oo_order(s->min) > get_order(s->objsize))
+ printk(KERN_WARNING " %s debugging increased min order, use "
+ "slub_debug=O to disable.\n", s->name);
+
for_each_online_node(node) {
struct kmem_cache_node *n = get_node(s, node);
unsigned long nr_slabs;
@@ -1713,7 +1735,7 @@ static __always_inline void *slab_alloc(struct kmem_cache *s,
}
local_irq_restore(flags);
- if (unlikely((gfpflags & __GFP_ZERO) && object))
+ if (unlikely(gfpflags & __GFP_ZERO) && object)
memset(object, 0, objsize);
kmemcheck_slab_alloc(s, gfpflags, object, c->objsize);
@@ -1732,7 +1754,7 @@ void *kmem_cache_alloc(struct kmem_cache *s, gfp_t gfpflags)
}
EXPORT_SYMBOL(kmem_cache_alloc);
-#ifdef CONFIG_KMEMTRACE
+#ifdef CONFIG_TRACING
void *kmem_cache_alloc_notrace(struct kmem_cache *s, gfp_t gfpflags)
{
return slab_alloc(s, gfpflags, -1, _RET_IP_);
@@ -1753,7 +1775,7 @@ void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, int node)
EXPORT_SYMBOL(kmem_cache_alloc_node);
#endif
-#ifdef CONFIG_KMEMTRACE
+#ifdef CONFIG_TRACING
void *kmem_cache_alloc_node_notrace(struct kmem_cache *s,
gfp_t gfpflags,
int node)
@@ -2001,7 +2023,7 @@ static inline int calculate_order(int size)
return order;
fraction /= 2;
}
- min_objects --;
+ min_objects--;
}
/*
@@ -2091,8 +2113,8 @@ init_kmem_cache_node(struct kmem_cache_node *n, struct kmem_cache *s)
*/
#define NR_KMEM_CACHE_CPU 100
-static DEFINE_PER_CPU(struct kmem_cache_cpu,
- kmem_cache_cpu)[NR_KMEM_CACHE_CPU];
+static DEFINE_PER_CPU(struct kmem_cache_cpu [NR_KMEM_CACHE_CPU],
+ kmem_cache_cpu);
static DEFINE_PER_CPU(struct kmem_cache_cpu *, kmem_cache_cpu_free);
static DECLARE_BITMAP(kmem_cach_cpu_free_init_once, CONFIG_NR_CPUS);
@@ -2400,6 +2422,7 @@ static int calculate_sizes(struct kmem_cache *s, int forced_order)
* on bootup.
*/
align = calculate_alignment(flags, align, s->objsize);
+ s->align = align;
/*
* SLUB stores one object immediately after another beginning from
@@ -2452,6 +2475,18 @@ static int kmem_cache_open(struct kmem_cache *s, gfp_t gfpflags,
if (!calculate_sizes(s, -1))
goto error;
+ if (disable_higher_order_debug) {
+ /*
+ * Disable debugging flags that store metadata if the min slab
+ * order increased.
+ */
+ if (get_order(s->size) > get_order(s->objsize)) {
+ s->flags &= ~DEBUG_METADATA_FLAGS;
+ s->offset = 0;
+ if (!calculate_sizes(s, -1))
+ goto error;
+ }
+ }
/*
* The larger the object size is, the more pages we want on the partial
@@ -2790,6 +2825,11 @@ static s8 size_index[24] = {
2 /* 192 */
};
+static inline int size_index_elem(size_t bytes)
+{
+ return (bytes - 1) / 8;
+}
+
static struct kmem_cache *get_slab(size_t size, gfp_t flags)
{
int index;
@@ -2798,7 +2838,7 @@ static struct kmem_cache *get_slab(size_t size, gfp_t flags)
if (!size)
return ZERO_SIZE_PTR;
- index = size_index[(size - 1) / 8];
+ index = size_index[size_index_elem(size)];
} else
index = fls(size - 1);
@@ -3156,10 +3196,12 @@ void __init kmem_cache_init(void)
slab_state = PARTIAL;
/* Caches that are not of the two-to-the-power-of size */
- if (KMALLOC_MIN_SIZE <= 64) {
+ if (KMALLOC_MIN_SIZE <= 32) {
create_kmalloc_cache(&kmalloc_caches[1],
"kmalloc-96", 96, GFP_NOWAIT);
caches++;
+ }
+ if (KMALLOC_MIN_SIZE <= 64) {
create_kmalloc_cache(&kmalloc_caches[2],
"kmalloc-192", 192, GFP_NOWAIT);
caches++;
@@ -3186,17 +3228,28 @@ void __init kmem_cache_init(void)
BUILD_BUG_ON(KMALLOC_MIN_SIZE > 256 ||
(KMALLOC_MIN_SIZE & (KMALLOC_MIN_SIZE - 1)));
- for (i = 8; i < KMALLOC_MIN_SIZE; i += 8)
- size_index[(i - 1) / 8] = KMALLOC_SHIFT_LOW;
+ for (i = 8; i < KMALLOC_MIN_SIZE; i += 8) {
+ int elem = size_index_elem(i);
+ if (elem >= ARRAY_SIZE(size_index))
+ break;
+ size_index[elem] = KMALLOC_SHIFT_LOW;
+ }
- if (KMALLOC_MIN_SIZE == 128) {
+ if (KMALLOC_MIN_SIZE == 64) {
+ /*
+ * The 96 byte size cache is not used if the alignment
+ * is 64 byte.
+ */
+ for (i = 64 + 8; i <= 96; i += 8)
+ size_index[size_index_elem(i)] = 7;
+ } else if (KMALLOC_MIN_SIZE == 128) {
/*
* The 192 byte sized cache is not used if the alignment
* is 128 byte. Redirect kmalloc to use the 256 byte cache
* instead.
*/
for (i = 128 + 8; i <= 192; i += 8)
- size_index[(i - 1) / 8] = 8;
+ size_index[size_index_elem(i)] = 8;
}
slab_state = UP;
@@ -3292,6 +3345,9 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
{
struct kmem_cache *s;
+ if (WARN_ON(!name))
+ return NULL;
+
down_write(&slub_lock);
s = find_mergeable(size, align, flags, name, ctor);
if (s) {
@@ -4315,12 +4371,28 @@ static int show_stat(struct kmem_cache *s, char *buf, enum stat_item si)
return len + sprintf(buf + len, "\n");
}
+static void clear_stat(struct kmem_cache *s, enum stat_item si)
+{
+ int cpu;
+
+ for_each_online_cpu(cpu)
+ get_cpu_slab(s, cpu)->stat[si] = 0;
+}
+
#define STAT_ATTR(si, text) \
static ssize_t text##_show(struct kmem_cache *s, char *buf) \
{ \
return show_stat(s, buf, si); \
} \
-SLAB_ATTR_RO(text); \
+static ssize_t text##_store(struct kmem_cache *s, \
+ const char *buf, size_t length) \
+{ \
+ if (buf[0] != '0') \
+ return -EINVAL; \
+ clear_stat(s, si); \
+ return length; \
+} \
+SLAB_ATTR(text); \
STAT_ATTR(ALLOC_FASTPATH, alloc_fastpath);
STAT_ATTR(ALLOC_SLOWPATH, alloc_slowpath);
@@ -4543,8 +4615,11 @@ static int sysfs_slab_add(struct kmem_cache *s)
}
err = sysfs_create_group(&s->kobj, &slab_attr_group);
- if (err)
+ if (err) {
+ kobject_del(&s->kobj);
+ kobject_put(&s->kobj);
return err;
+ }
kobject_uevent(&s->kobj, KOBJ_ADD);
if (!unmergeable) {
/* Setup first alias */
@@ -4726,7 +4801,7 @@ static const struct file_operations proc_slabinfo_operations = {
static int __init slab_proc_init(void)
{
- proc_create("slabinfo",S_IWUSR|S_IRUGO,NULL,&proc_slabinfo_operations);
+ proc_create("slabinfo", S_IRUGO, NULL, &proc_slabinfo_operations);
return 0;
}
module_init(slab_proc_init);
diff --git a/mm/sparse-vmemmap.c b/mm/sparse-vmemmap.c
index a13ea6401ae7..d9714bdcb4a3 100644
--- a/mm/sparse-vmemmap.c
+++ b/mm/sparse-vmemmap.c
@@ -48,8 +48,14 @@ void * __meminit vmemmap_alloc_block(unsigned long size, int node)
{
/* If the main allocator is up use that, fallback to bootmem. */
if (slab_is_available()) {
- struct page *page = alloc_pages_node(node,
+ struct page *page;
+
+ if (node_state(node, N_HIGH_MEMORY))
+ page = alloc_pages_node(node,
GFP_KERNEL | __GFP_ZERO, get_order(size));
+ else
+ page = alloc_pages(GFP_KERNEL | __GFP_ZERO,
+ get_order(size));
if (page)
return page_address(page);
return NULL;
diff --git a/mm/sparse.c b/mm/sparse.c
index da432d9f0ae8..6ce4aab69e99 100644
--- a/mm/sparse.c
+++ b/mm/sparse.c
@@ -62,9 +62,12 @@ static struct mem_section noinline __init_refok *sparse_index_alloc(int nid)
unsigned long array_size = SECTIONS_PER_ROOT *
sizeof(struct mem_section);
- if (slab_is_available())
- section = kmalloc_node(array_size, GFP_KERNEL, nid);
- else
+ if (slab_is_available()) {
+ if (node_state(nid, N_HIGH_MEMORY))
+ section = kmalloc_node(array_size, GFP_KERNEL, nid);
+ else
+ section = kmalloc(array_size, GFP_KERNEL);
+ } else
section = alloc_bootmem_node(NODE_DATA(nid), array_size);
if (section)
diff --git a/mm/swap.c b/mm/swap.c
index c2dfe5f08b5d..a4de467273ec 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -196,7 +196,7 @@ static void pagevec_move_tail(struct pagevec *pvec)
spin_lock(&zone->lru_lock);
}
if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) {
- int lru = page_is_file_cache(page);
+ int lru = page_lru_base_type(page);
list_move_tail(&page->lru, &zone->lru[lru].list);
pgmoved++;
}
@@ -259,7 +259,7 @@ void activate_page(struct page *page)
spin_lock_irq(&zone->lru_lock);
if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) {
int file = page_is_file_cache(page);
- int lru = LRU_BASE + file;
+ int lru = page_lru_base_type(page);
del_page_from_lru_list(zone, page, lru);
SetPageActive(page);
@@ -267,7 +267,7 @@ void activate_page(struct page *page)
add_page_to_lru_list(zone, page, lru);
__count_vm_event(PGACTIVATE);
- update_page_reclaim_stat(zone, page, !!file, 1);
+ update_page_reclaim_stat(zone, page, file, 1);
}
spin_unlock_irq(&zone->lru_lock);
}
@@ -578,7 +578,7 @@ EXPORT_SYMBOL(pagevec_lookup_tag);
*/
void __init swap_setup(void)
{
- unsigned long megs = num_physpages >> (20 - PAGE_SHIFT);
+ unsigned long megs = totalram_pages >> (20 - PAGE_SHIFT);
#ifdef CONFIG_SWAP
bdi_init(swapper_space.backing_dev_info);
diff --git a/mm/swap_state.c b/mm/swap_state.c
index 42cd38eba79f..6d1daeb1cb4a 100644
--- a/mm/swap_state.c
+++ b/mm/swap_state.c
@@ -34,6 +34,7 @@ static const struct address_space_operations swap_aops = {
};
static struct backing_dev_info swap_backing_dev_info = {
+ .name = "swap",
.capabilities = BDI_CAP_NO_ACCT_AND_WRITEBACK | BDI_CAP_SWAP_BACKED,
.unplug_io_fn = swap_unplug_io_fn,
};
@@ -66,10 +67,10 @@ void show_swap_cache_info(void)
}
/*
- * add_to_swap_cache resembles add_to_page_cache_locked on swapper_space,
+ * __add_to_swap_cache resembles add_to_page_cache_locked on swapper_space,
* but sets SwapCache flag and private instead of mapping and index.
*/
-int add_to_swap_cache(struct page *page, swp_entry_t entry, gfp_t gfp_mask)
+static int __add_to_swap_cache(struct page *page, swp_entry_t entry)
{
int error;
@@ -77,28 +78,43 @@ int add_to_swap_cache(struct page *page, swp_entry_t entry, gfp_t gfp_mask)
VM_BUG_ON(PageSwapCache(page));
VM_BUG_ON(!PageSwapBacked(page));
+ page_cache_get(page);
+ SetPageSwapCache(page);
+ set_page_private(page, entry.val);
+
+ spin_lock_irq(&swapper_space.tree_lock);
+ error = radix_tree_insert(&swapper_space.page_tree, entry.val, page);
+ if (likely(!error)) {
+ total_swapcache_pages++;
+ __inc_zone_page_state(page, NR_FILE_PAGES);
+ INC_CACHE_INFO(add_total);
+ }
+ spin_unlock_irq(&swapper_space.tree_lock);
+
+ if (unlikely(error)) {
+ /*
+ * Only the context which have set SWAP_HAS_CACHE flag
+ * would call add_to_swap_cache().
+ * So add_to_swap_cache() doesn't returns -EEXIST.
+ */
+ VM_BUG_ON(error == -EEXIST);
+ set_page_private(page, 0UL);
+ ClearPageSwapCache(page);
+ page_cache_release(page);
+ }
+
+ return error;
+}
+
+
+int add_to_swap_cache(struct page *page, swp_entry_t entry, gfp_t gfp_mask)
+{
+ int error;
+
error = radix_tree_preload(gfp_mask);
if (!error) {
- page_cache_get(page);
- SetPageSwapCache(page);
- set_page_private(page, entry.val);
-
- spin_lock_irq(&swapper_space.tree_lock);
- error = radix_tree_insert(&swapper_space.page_tree,
- entry.val, page);
- if (likely(!error)) {
- total_swapcache_pages++;
- __inc_zone_page_state(page, NR_FILE_PAGES);
- INC_CACHE_INFO(add_total);
- }
- spin_unlock_irq(&swapper_space.tree_lock);
+ error = __add_to_swap_cache(page, entry);
radix_tree_preload_end();
-
- if (unlikely(error)) {
- set_page_private(page, 0UL);
- ClearPageSwapCache(page);
- page_cache_release(page);
- }
}
return error;
}
@@ -136,38 +152,34 @@ int add_to_swap(struct page *page)
VM_BUG_ON(!PageLocked(page));
VM_BUG_ON(!PageUptodate(page));
- for (;;) {
- entry = get_swap_page();
- if (!entry.val)
- return 0;
+ entry = get_swap_page();
+ if (!entry.val)
+ return 0;
+ /*
+ * Radix-tree node allocations from PF_MEMALLOC contexts could
+ * completely exhaust the page allocator. __GFP_NOMEMALLOC
+ * stops emergency reserves from being allocated.
+ *
+ * TODO: this could cause a theoretical memory reclaim
+ * deadlock in the swap out path.
+ */
+ /*
+ * Add it to the swap cache and mark it dirty
+ */
+ err = add_to_swap_cache(page, entry,
+ __GFP_HIGH|__GFP_NOMEMALLOC|__GFP_NOWARN);
+
+ if (!err) { /* Success */
+ SetPageDirty(page);
+ return 1;
+ } else { /* -ENOMEM radix-tree allocation failure */
/*
- * Radix-tree node allocations from PF_MEMALLOC contexts could
- * completely exhaust the page allocator. __GFP_NOMEMALLOC
- * stops emergency reserves from being allocated.
- *
- * TODO: this could cause a theoretical memory reclaim
- * deadlock in the swap out path.
- */
- /*
- * Add it to the swap cache and mark it dirty
+ * add_to_swap_cache() doesn't return -EEXIST, so we can safely
+ * clear SWAP_HAS_CACHE flag.
*/
- err = add_to_swap_cache(page, entry,
- __GFP_HIGH|__GFP_NOMEMALLOC|__GFP_NOWARN);
-
- switch (err) {
- case 0: /* Success */
- SetPageDirty(page);
- return 1;
- case -EEXIST:
- /* Raced with "speculative" read_swap_cache_async */
- swapcache_free(entry, NULL);
- continue;
- default:
- /* -ENOMEM radix-tree allocation failure */
- swapcache_free(entry, NULL);
- return 0;
- }
+ swapcache_free(entry, NULL);
+ return 0;
}
}
@@ -289,26 +301,31 @@ struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
}
/*
+ * call radix_tree_preload() while we can wait.
+ */
+ err = radix_tree_preload(gfp_mask & GFP_KERNEL);
+ if (err)
+ break;
+
+ /*
* Swap entry may have been freed since our caller observed it.
*/
err = swapcache_prepare(entry);
- if (err == -EEXIST) /* seems racy */
+ if (err == -EEXIST) { /* seems racy */
+ radix_tree_preload_end();
continue;
- if (err) /* swp entry is obsolete ? */
+ }
+ if (err) { /* swp entry is obsolete ? */
+ radix_tree_preload_end();
break;
+ }
- /*
- * Associate the page with swap entry in the swap cache.
- * May fail (-EEXIST) if there is already a page associated
- * with this entry in the swap cache: added by a racing
- * read_swap_cache_async, or add_to_swap or shmem_writepage
- * re-using the just freed swap entry for an existing page.
- * May fail (-ENOMEM) if radix-tree node allocation failed.
- */
+ /* May fail (-ENOMEM) if radix-tree node allocation failed. */
__set_page_locked(new_page);
SetPageSwapBacked(new_page);
- err = add_to_swap_cache(new_page, entry, gfp_mask & GFP_KERNEL);
+ err = __add_to_swap_cache(new_page, entry);
if (likely(!err)) {
+ radix_tree_preload_end();
/*
* Initiate read into locked page and return.
*/
@@ -316,8 +333,13 @@ struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
swap_readpage(new_page);
return new_page;
}
+ radix_tree_preload_end();
ClearPageSwapBacked(new_page);
__clear_page_locked(new_page);
+ /*
+ * add_to_swap_cache() doesn't return -EEXIST, so we can safely
+ * clear SWAP_HAS_CACHE flag.
+ */
swapcache_free(entry, NULL);
} while (err != -ENOMEM);
diff --git a/mm/swapfile.c b/mm/swapfile.c
index 8ffdc0d23c53..6c0585b16418 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -22,6 +22,7 @@
#include <linux/seq_file.h>
#include <linux/init.h>
#include <linux/module.h>
+#include <linux/ksm.h>
#include <linux/rmap.h>
#include <linux/security.h>
#include <linux/backing-dev.h>
@@ -35,11 +36,15 @@
#include <linux/swapops.h>
#include <linux/page_cgroup.h>
+static bool swap_count_continued(struct swap_info_struct *, pgoff_t,
+ unsigned char);
+static void free_swap_count_continuations(struct swap_info_struct *);
+static sector_t map_swap_entry(swp_entry_t, struct block_device**);
+
static DEFINE_SPINLOCK(swap_lock);
static unsigned int nr_swapfiles;
long nr_swap_pages;
long total_swap_pages;
-static int swap_overflow;
static int least_priority;
static const char Bad_file[] = "Bad swap file entry ";
@@ -49,42 +54,20 @@ static const char Unused_offset[] = "Unused swap offset entry ";
static struct swap_list_t swap_list = {-1, -1};
-static struct swap_info_struct swap_info[MAX_SWAPFILES];
+static struct swap_info_struct *swap_info[MAX_SWAPFILES];
static DEFINE_MUTEX(swapon_mutex);
-/* For reference count accounting in swap_map */
-/* enum for swap_map[] handling. internal use only */
-enum {
- SWAP_MAP = 0, /* ops for reference from swap users */
- SWAP_CACHE, /* ops for reference from swap cache */
-};
-
-static inline int swap_count(unsigned short ent)
-{
- return ent & SWAP_COUNT_MASK;
-}
-
-static inline bool swap_has_cache(unsigned short ent)
+static inline unsigned char swap_count(unsigned char ent)
{
- return !!(ent & SWAP_HAS_CACHE);
+ return ent & ~SWAP_HAS_CACHE; /* may include SWAP_HAS_CONT flag */
}
-static inline unsigned short encode_swapmap(int count, bool has_cache)
-{
- unsigned short ret = count;
-
- if (has_cache)
- return SWAP_HAS_CACHE | ret;
- return ret;
-}
-
-/* returnes 1 if swap entry is freed */
+/* returns 1 if swap entry is freed */
static int
__try_to_reclaim_swap(struct swap_info_struct *si, unsigned long offset)
{
- int type = si - swap_info;
- swp_entry_t entry = swp_entry(type, offset);
+ swp_entry_t entry = swp_entry(si->type, offset);
struct page *page;
int ret = 0;
@@ -120,7 +103,7 @@ void swap_unplug_io_fn(struct backing_dev_info *unused_bdi, struct page *page)
down_read(&swap_unplug_sem);
entry.val = page_private(page);
if (PageSwapCache(page)) {
- struct block_device *bdev = swap_info[swp_type(entry)].bdev;
+ struct block_device *bdev = swap_info[swp_type(entry)]->bdev;
struct backing_dev_info *bdi;
/*
@@ -146,22 +129,28 @@ void swap_unplug_io_fn(struct backing_dev_info *unused_bdi, struct page *page)
static int discard_swap(struct swap_info_struct *si)
{
struct swap_extent *se;
+ sector_t start_block;
+ sector_t nr_blocks;
int err = 0;
- list_for_each_entry(se, &si->extent_list, list) {
- sector_t start_block = se->start_block << (PAGE_SHIFT - 9);
- sector_t nr_blocks = (sector_t)se->nr_pages << (PAGE_SHIFT - 9);
+ /* Do not discard the swap header page! */
+ se = &si->first_swap_extent;
+ start_block = (se->start_block + 1) << (PAGE_SHIFT - 9);
+ nr_blocks = ((sector_t)se->nr_pages - 1) << (PAGE_SHIFT - 9);
+ if (nr_blocks) {
+ err = blkdev_issue_discard(si->bdev, start_block,
+ nr_blocks, GFP_KERNEL, DISCARD_FL_BARRIER);
+ if (err)
+ return err;
+ cond_resched();
+ }
- if (se->start_page == 0) {
- /* Do not discard the swap header page! */
- start_block += 1 << (PAGE_SHIFT - 9);
- nr_blocks -= 1 << (PAGE_SHIFT - 9);
- if (!nr_blocks)
- continue;
- }
+ list_for_each_entry(se, &si->first_swap_extent.list, list) {
+ start_block = se->start_block << (PAGE_SHIFT - 9);
+ nr_blocks = (sector_t)se->nr_pages << (PAGE_SHIFT - 9);
err = blkdev_issue_discard(si->bdev, start_block,
- nr_blocks, GFP_KERNEL);
+ nr_blocks, GFP_KERNEL, DISCARD_FL_BARRIER);
if (err)
break;
@@ -200,13 +189,11 @@ static void discard_swap_cluster(struct swap_info_struct *si,
start_block <<= PAGE_SHIFT - 9;
nr_blocks <<= PAGE_SHIFT - 9;
if (blkdev_issue_discard(si->bdev, start_block,
- nr_blocks, GFP_NOIO))
+ nr_blocks, GFP_NOIO, DISCARD_FL_BARRIER))
break;
}
lh = se->list.next;
- if (lh == &si->extent_list)
- lh = lh->next;
se = list_entry(lh, struct swap_extent, list);
}
}
@@ -221,7 +208,7 @@ static int wait_for_discard(void *word)
#define LATENCY_LIMIT 256
static inline unsigned long scan_swap_map(struct swap_info_struct *si,
- int cache)
+ unsigned char usage)
{
unsigned long offset;
unsigned long scan_base;
@@ -352,10 +339,7 @@ checks:
si->lowest_bit = si->max;
si->highest_bit = 0;
}
- if (cache == SWAP_CACHE) /* at usual swap-out via vmscan.c */
- si->swap_map[offset] = encode_swapmap(0, true);
- else /* at suspend */
- si->swap_map[offset] = encode_swapmap(1, false);
+ si->swap_map[offset] = usage;
si->cluster_next = offset + 1;
si->flags -= SWP_SCANNING;
@@ -465,10 +449,10 @@ swp_entry_t get_swap_page(void)
nr_swap_pages--;
for (type = swap_list.next; type >= 0 && wrapped < 2; type = next) {
- si = swap_info + type;
+ si = swap_info[type];
next = si->next;
if (next < 0 ||
- (!wrapped && si->prio != swap_info[next].prio)) {
+ (!wrapped && si->prio != swap_info[next]->prio)) {
next = swap_list.head;
wrapped++;
}
@@ -480,7 +464,7 @@ swp_entry_t get_swap_page(void)
swap_list.next = next;
/* This is called for allocating swap entry for cache */
- offset = scan_swap_map(si, SWAP_CACHE);
+ offset = scan_swap_map(si, SWAP_HAS_CACHE);
if (offset) {
spin_unlock(&swap_lock);
return swp_entry(type, offset);
@@ -501,11 +485,11 @@ swp_entry_t get_swap_page_of_type(int type)
pgoff_t offset;
spin_lock(&swap_lock);
- si = swap_info + type;
- if (si->flags & SWP_WRITEOK) {
+ si = swap_info[type];
+ if (si && (si->flags & SWP_WRITEOK)) {
nr_swap_pages--;
/* This is called for allocating swap entry, not cache */
- offset = scan_swap_map(si, SWAP_MAP);
+ offset = scan_swap_map(si, 1);
if (offset) {
spin_unlock(&swap_lock);
return swp_entry(type, offset);
@@ -516,9 +500,9 @@ swp_entry_t get_swap_page_of_type(int type)
return (swp_entry_t) {0};
}
-static struct swap_info_struct * swap_info_get(swp_entry_t entry)
+static struct swap_info_struct *swap_info_get(swp_entry_t entry)
{
- struct swap_info_struct * p;
+ struct swap_info_struct *p;
unsigned long offset, type;
if (!entry.val)
@@ -526,7 +510,7 @@ static struct swap_info_struct * swap_info_get(swp_entry_t entry)
type = swp_type(entry);
if (type >= nr_swapfiles)
goto bad_nofile;
- p = & swap_info[type];
+ p = swap_info[type];
if (!(p->flags & SWP_USED))
goto bad_device;
offset = swp_offset(entry);
@@ -552,41 +536,56 @@ out:
return NULL;
}
-static int swap_entry_free(struct swap_info_struct *p,
- swp_entry_t ent, int cache)
+static unsigned char swap_entry_free(struct swap_info_struct *p,
+ swp_entry_t entry, unsigned char usage)
{
- unsigned long offset = swp_offset(ent);
- int count = swap_count(p->swap_map[offset]);
- bool has_cache;
+ unsigned long offset = swp_offset(entry);
+ unsigned char count;
+ unsigned char has_cache;
- has_cache = swap_has_cache(p->swap_map[offset]);
+ count = p->swap_map[offset];
+ has_cache = count & SWAP_HAS_CACHE;
+ count &= ~SWAP_HAS_CACHE;
- if (cache == SWAP_MAP) { /* dropping usage count of swap */
- if (count < SWAP_MAP_MAX) {
- count--;
- p->swap_map[offset] = encode_swapmap(count, has_cache);
- }
- } else { /* dropping swap cache flag */
+ if (usage == SWAP_HAS_CACHE) {
VM_BUG_ON(!has_cache);
- p->swap_map[offset] = encode_swapmap(count, false);
-
+ has_cache = 0;
+ } else if (count == SWAP_MAP_SHMEM) {
+ /*
+ * Or we could insist on shmem.c using a special
+ * swap_shmem_free() and free_shmem_swap_and_cache()...
+ */
+ count = 0;
+ } else if ((count & ~COUNT_CONTINUED) <= SWAP_MAP_MAX) {
+ if (count == COUNT_CONTINUED) {
+ if (swap_count_continued(p, offset, count))
+ count = SWAP_MAP_MAX | COUNT_CONTINUED;
+ else
+ count = SWAP_MAP_MAX;
+ } else
+ count--;
}
- /* return code. */
- count = p->swap_map[offset];
+
+ if (!count)
+ mem_cgroup_uncharge_swap(entry);
+
+ usage = count | has_cache;
+ p->swap_map[offset] = usage;
+
/* free if no reference */
- if (!count) {
+ if (!usage) {
if (offset < p->lowest_bit)
p->lowest_bit = offset;
if (offset > p->highest_bit)
p->highest_bit = offset;
- if (p->prio > swap_info[swap_list.next].prio)
- swap_list.next = p - swap_info;
+ if (swap_list.next >= 0 &&
+ p->prio > swap_info[swap_list.next]->prio)
+ swap_list.next = p->type;
nr_swap_pages++;
p->inuse_pages--;
}
- if (!swap_count(count))
- mem_cgroup_uncharge_swap(ent);
- return count;
+
+ return usage;
}
/*
@@ -595,11 +594,11 @@ static int swap_entry_free(struct swap_info_struct *p,
*/
void swap_free(swp_entry_t entry)
{
- struct swap_info_struct * p;
+ struct swap_info_struct *p;
p = swap_info_get(entry);
if (p) {
- swap_entry_free(p, entry, SWAP_MAP);
+ swap_entry_free(p, entry, 1);
spin_unlock(&swap_lock);
}
}
@@ -610,26 +609,21 @@ void swap_free(swp_entry_t entry)
void swapcache_free(swp_entry_t entry, struct page *page)
{
struct swap_info_struct *p;
- int ret;
+ unsigned char count;
p = swap_info_get(entry);
if (p) {
- ret = swap_entry_free(p, entry, SWAP_CACHE);
- if (page) {
- bool swapout;
- if (ret)
- swapout = true; /* the end of swap out */
- else
- swapout = false; /* no more swap users! */
- mem_cgroup_uncharge_swapcache(page, entry, swapout);
- }
+ count = swap_entry_free(p, entry, SWAP_HAS_CACHE);
+ if (page)
+ mem_cgroup_uncharge_swapcache(page, entry, count != 0);
spin_unlock(&swap_lock);
}
- return;
}
/*
* How many references to page are currently swapped out?
+ * This does not give an exact answer when swap count is continued,
+ * but does include the high COUNT_CONTINUED flag to allow for that.
*/
static inline int page_swapcount(struct page *page)
{
@@ -657,6 +651,8 @@ int reuse_swap_page(struct page *page)
int count;
VM_BUG_ON(!PageLocked(page));
+ if (unlikely(PageKsm(page)))
+ return 0;
count = page_mapcount(page);
if (count <= 1 && PageSwapCache(page)) {
count += page_swapcount(page);
@@ -665,7 +661,7 @@ int reuse_swap_page(struct page *page)
SetPageDirty(page);
}
}
- return count == 1;
+ return count <= 1;
}
/*
@@ -697,12 +693,12 @@ int free_swap_and_cache(swp_entry_t entry)
struct swap_info_struct *p;
struct page *page = NULL;
- if (is_migration_entry(entry))
+ if (non_swap_entry(entry))
return 1;
p = swap_info_get(entry);
if (p) {
- if (swap_entry_free(p, entry, SWAP_MAP) == SWAP_HAS_CACHE) {
+ if (swap_entry_free(p, entry, 1) == SWAP_HAS_CACHE) {
page = find_get_page(&swapper_space, entry.val);
if (page && !trylock_page(page)) {
page_cache_release(page);
@@ -739,14 +735,14 @@ int free_swap_and_cache(swp_entry_t entry)
int swap_type_of(dev_t device, sector_t offset, struct block_device **bdev_p)
{
struct block_device *bdev = NULL;
- int i;
+ int type;
if (device)
bdev = bdget(device);
spin_lock(&swap_lock);
- for (i = 0; i < nr_swapfiles; i++) {
- struct swap_info_struct *sis = swap_info + i;
+ for (type = 0; type < nr_swapfiles; type++) {
+ struct swap_info_struct *sis = swap_info[type];
if (!(sis->flags & SWP_WRITEOK))
continue;
@@ -756,20 +752,18 @@ int swap_type_of(dev_t device, sector_t offset, struct block_device **bdev_p)
*bdev_p = bdgrab(sis->bdev);
spin_unlock(&swap_lock);
- return i;
+ return type;
}
if (bdev == sis->bdev) {
- struct swap_extent *se;
+ struct swap_extent *se = &sis->first_swap_extent;
- se = list_entry(sis->extent_list.next,
- struct swap_extent, list);
if (se->start_block == offset) {
if (bdev_p)
*bdev_p = bdgrab(sis->bdev);
spin_unlock(&swap_lock);
bdput(bdev);
- return i;
+ return type;
}
}
}
@@ -781,6 +775,21 @@ int swap_type_of(dev_t device, sector_t offset, struct block_device **bdev_p)
}
/*
+ * Get the (PAGE_SIZE) block corresponding to given offset on the swapdev
+ * corresponding to given index in swap_info (swap type).
+ */
+sector_t swapdev_block(int type, pgoff_t offset)
+{
+ struct block_device *bdev;
+
+ if ((unsigned int)type >= nr_swapfiles)
+ return 0;
+ if (!(swap_info[type]->flags & SWP_WRITEOK))
+ return 0;
+ return map_swap_entry(swp_entry(type, offset), &bdev);
+}
+
+/*
* Return either the total number of swap pages of given type, or the number
* of free pages of that type (depending on @free)
*
@@ -790,18 +799,20 @@ unsigned int count_swap_pages(int type, int free)
{
unsigned int n = 0;
- if (type < nr_swapfiles) {
- spin_lock(&swap_lock);
- if (swap_info[type].flags & SWP_WRITEOK) {
- n = swap_info[type].pages;
+ spin_lock(&swap_lock);
+ if ((unsigned int)type < nr_swapfiles) {
+ struct swap_info_struct *sis = swap_info[type];
+
+ if (sis->flags & SWP_WRITEOK) {
+ n = sis->pages;
if (free)
- n -= swap_info[type].inuse_pages;
+ n -= sis->inuse_pages;
}
- spin_unlock(&swap_lock);
}
+ spin_unlock(&swap_lock);
return n;
}
-#endif
+#endif /* CONFIG_HIBERNATION */
/*
* No need to decide whether this PTE shares the swap entry with others,
@@ -930,7 +941,7 @@ static int unuse_vma(struct vm_area_struct *vma,
unsigned long addr, end, next;
int ret;
- if (page->mapping) {
+ if (page_anon_vma(page)) {
addr = page_address_in_vma(page, vma);
if (addr == -EFAULT)
return 0;
@@ -986,7 +997,7 @@ static unsigned int find_next_to_unuse(struct swap_info_struct *si,
{
unsigned int max = si->max;
unsigned int i = prev;
- int count;
+ unsigned char count;
/*
* No need for swap_lock here: we're just looking
@@ -1022,16 +1033,14 @@ static unsigned int find_next_to_unuse(struct swap_info_struct *si,
*/
static int try_to_unuse(unsigned int type)
{
- struct swap_info_struct * si = &swap_info[type];
+ struct swap_info_struct *si = swap_info[type];
struct mm_struct *start_mm;
- unsigned short *swap_map;
- unsigned short swcount;
+ unsigned char *swap_map;
+ unsigned char swcount;
struct page *page;
swp_entry_t entry;
unsigned int i = 0;
int retval = 0;
- int reset_overflow = 0;
- int shmem;
/*
* When searching mms for an entry, a good strategy is to
@@ -1045,8 +1054,7 @@ static int try_to_unuse(unsigned int type)
* together, child after parent. If we race with dup_mmap(), we
* prefer to resolve parent before child, lest we miss entries
* duplicated after we scanned child: using last mm would invert
- * that. Though it's only a serious concern when an overflowed
- * swap count is reset from SWAP_MAP_MAX, preventing a rescan.
+ * that.
*/
start_mm = &init_mm;
atomic_inc(&init_mm.mm_users);
@@ -1108,17 +1116,18 @@ static int try_to_unuse(unsigned int type)
/*
* Remove all references to entry.
- * Whenever we reach init_mm, there's no address space
- * to search, but use it as a reminder to search shmem.
*/
- shmem = 0;
swcount = *swap_map;
- if (swap_count(swcount)) {
- if (start_mm == &init_mm)
- shmem = shmem_unuse(entry, page);
- else
- retval = unuse_mm(start_mm, entry, page);
+ if (swap_count(swcount) == SWAP_MAP_SHMEM) {
+ retval = shmem_unuse(entry, page);
+ /* page has already been unlocked and released */
+ if (retval < 0)
+ break;
+ continue;
}
+ if (swap_count(swcount) && start_mm != &init_mm)
+ retval = unuse_mm(start_mm, entry, page);
+
if (swap_count(*swap_map)) {
int set_start_mm = (*swap_map >= swcount);
struct list_head *p = &start_mm->mmlist;
@@ -1129,7 +1138,7 @@ static int try_to_unuse(unsigned int type)
atomic_inc(&new_start_mm->mm_users);
atomic_inc(&prev_mm->mm_users);
spin_lock(&mmlist_lock);
- while (swap_count(*swap_map) && !retval && !shmem &&
+ while (swap_count(*swap_map) && !retval &&
(p = p->next) != &start_mm->mmlist) {
mm = list_entry(p, struct mm_struct, mmlist);
if (!atomic_inc_not_zero(&mm->mm_users))
@@ -1143,14 +1152,12 @@ static int try_to_unuse(unsigned int type)
swcount = *swap_map;
if (!swap_count(swcount)) /* any usage ? */
;
- else if (mm == &init_mm) {
+ else if (mm == &init_mm)
set_start_mm = 1;
- shmem = shmem_unuse(entry, page);
- } else
+ else
retval = unuse_mm(mm, entry, page);
- if (set_start_mm &&
- swap_count(*swap_map) < swcount) {
+ if (set_start_mm && *swap_map < swcount) {
mmput(new_start_mm);
atomic_inc(&mm->mm_users);
new_start_mm = mm;
@@ -1163,13 +1170,6 @@ static int try_to_unuse(unsigned int type)
mmput(start_mm);
start_mm = new_start_mm;
}
- if (shmem) {
- /* page has already been unlocked and released */
- if (shmem > 0)
- continue;
- retval = shmem;
- break;
- }
if (retval) {
unlock_page(page);
page_cache_release(page);
@@ -1177,30 +1177,6 @@ static int try_to_unuse(unsigned int type)
}
/*
- * How could swap count reach 0x7ffe ?
- * There's no way to repeat a swap page within an mm
- * (except in shmem, where it's the shared object which takes
- * the reference count)?
- * We believe SWAP_MAP_MAX cannot occur.(if occur, unsigned
- * short is too small....)
- * If that's wrong, then we should worry more about
- * exit_mmap() and do_munmap() cases described above:
- * we might be resetting SWAP_MAP_MAX too early here.
- * We know "Undead"s can happen, they're okay, so don't
- * report them; but do report if we reset SWAP_MAP_MAX.
- */
- /* We might release the lock_page() in unuse_mm(). */
- if (!PageSwapCache(page) || page_private(page) != entry.val)
- goto retry;
-
- if (swap_count(*swap_map) == SWAP_MAP_MAX) {
- spin_lock(&swap_lock);
- *swap_map = encode_swapmap(0, true);
- spin_unlock(&swap_lock);
- reset_overflow = 1;
- }
-
- /*
* If a reference remains (rare), we would like to leave
* the page in the swap cache; but try_to_unmap could
* then re-duplicate the entry once we drop page lock,
@@ -1212,6 +1188,12 @@ static int try_to_unuse(unsigned int type)
* read from disk into another page. Splitting into two
* pages would be incorrect if swap supported "shared
* private" pages, but they are handled by tmpfs files.
+ *
+ * Given how unuse_vma() targets one particular offset
+ * in an anon_vma, once the anon_vma has been determined,
+ * this splitting happens to be just what is needed to
+ * handle where KSM pages have been swapped out: re-reading
+ * is unnecessarily slow, but we can fix that later on.
*/
if (swap_count(*swap_map) &&
PageDirty(page) && PageSwapCache(page)) {
@@ -1241,7 +1223,6 @@ static int try_to_unuse(unsigned int type)
* mark page dirty so shrink_page_list will preserve it.
*/
SetPageDirty(page);
-retry:
unlock_page(page);
page_cache_release(page);
@@ -1253,10 +1234,6 @@ retry:
}
mmput(start_mm);
- if (reset_overflow) {
- printk(KERN_WARNING "swapoff: cleared swap entry overflow\n");
- swap_overflow = 0;
- }
return retval;
}
@@ -1269,10 +1246,10 @@ retry:
static void drain_mmlist(void)
{
struct list_head *p, *next;
- unsigned int i;
+ unsigned int type;
- for (i = 0; i < nr_swapfiles; i++)
- if (swap_info[i].inuse_pages)
+ for (type = 0; type < nr_swapfiles; type++)
+ if (swap_info[type]->inuse_pages)
return;
spin_lock(&mmlist_lock);
list_for_each_safe(p, next, &init_mm.mmlist)
@@ -1282,12 +1259,23 @@ static void drain_mmlist(void)
/*
* Use this swapdev's extent info to locate the (PAGE_SIZE) block which
- * corresponds to page offset `offset'.
+ * corresponds to page offset for the specified swap entry.
+ * Note that the type of this function is sector_t, but it returns page offset
+ * into the bdev, not sector offset.
*/
-sector_t map_swap_page(struct swap_info_struct *sis, pgoff_t offset)
+static sector_t map_swap_entry(swp_entry_t entry, struct block_device **bdev)
{
- struct swap_extent *se = sis->curr_swap_extent;
- struct swap_extent *start_se = se;
+ struct swap_info_struct *sis;
+ struct swap_extent *start_se;
+ struct swap_extent *se;
+ pgoff_t offset;
+
+ sis = swap_info[swp_type(entry)];
+ *bdev = sis->bdev;
+
+ offset = swp_offset(entry);
+ start_se = sis->curr_swap_extent;
+ se = start_se;
for ( ; ; ) {
struct list_head *lh;
@@ -1297,40 +1285,31 @@ sector_t map_swap_page(struct swap_info_struct *sis, pgoff_t offset)
return se->start_block + (offset - se->start_page);
}
lh = se->list.next;
- if (lh == &sis->extent_list)
- lh = lh->next;
se = list_entry(lh, struct swap_extent, list);
sis->curr_swap_extent = se;
BUG_ON(se == start_se); /* It *must* be present */
}
}
-#ifdef CONFIG_HIBERNATION
/*
- * Get the (PAGE_SIZE) block corresponding to given offset on the swapdev
- * corresponding to given index in swap_info (swap type).
+ * Returns the page offset into bdev for the specified page's swap entry.
*/
-sector_t swapdev_block(int swap_type, pgoff_t offset)
+sector_t map_swap_page(struct page *page, struct block_device **bdev)
{
- struct swap_info_struct *sis;
-
- if (swap_type >= nr_swapfiles)
- return 0;
-
- sis = swap_info + swap_type;
- return (sis->flags & SWP_WRITEOK) ? map_swap_page(sis, offset) : 0;
+ swp_entry_t entry;
+ entry.val = page_private(page);
+ return map_swap_entry(entry, bdev);
}
-#endif /* CONFIG_HIBERNATION */
/*
* Free all of a swapdev's extent information
*/
static void destroy_swap_extents(struct swap_info_struct *sis)
{
- while (!list_empty(&sis->extent_list)) {
+ while (!list_empty(&sis->first_swap_extent.list)) {
struct swap_extent *se;
- se = list_entry(sis->extent_list.next,
+ se = list_entry(sis->first_swap_extent.list.next,
struct swap_extent, list);
list_del(&se->list);
kfree(se);
@@ -1351,8 +1330,15 @@ add_swap_extent(struct swap_info_struct *sis, unsigned long start_page,
struct swap_extent *new_se;
struct list_head *lh;
- lh = sis->extent_list.prev; /* The highest page extent */
- if (lh != &sis->extent_list) {
+ if (start_page == 0) {
+ se = &sis->first_swap_extent;
+ sis->curr_swap_extent = se;
+ se->start_page = 0;
+ se->nr_pages = nr_pages;
+ se->start_block = start_block;
+ return 1;
+ } else {
+ lh = sis->first_swap_extent.list.prev; /* Highest extent */
se = list_entry(lh, struct swap_extent, list);
BUG_ON(se->start_page + se->nr_pages != start_page);
if (se->start_block + se->nr_pages == start_block) {
@@ -1372,7 +1358,7 @@ add_swap_extent(struct swap_info_struct *sis, unsigned long start_page,
new_se->nr_pages = nr_pages;
new_se->start_block = start_block;
- list_add_tail(&new_se->list, &sis->extent_list);
+ list_add_tail(&new_se->list, &sis->first_swap_extent.list);
return 1;
}
@@ -1424,7 +1410,7 @@ static int setup_swap_extents(struct swap_info_struct *sis, sector_t *span)
if (S_ISBLK(inode->i_mode)) {
ret = add_swap_extent(sis, 0, sis->max, 0);
*span = sis->pages;
- goto done;
+ goto out;
}
blkbits = inode->i_blkbits;
@@ -1495,25 +1481,22 @@ reprobe:
sis->max = page_no;
sis->pages = page_no - 1;
sis->highest_bit = page_no - 1;
-done:
- sis->curr_swap_extent = list_entry(sis->extent_list.prev,
- struct swap_extent, list);
- goto out;
+out:
+ return ret;
bad_bmap:
printk(KERN_ERR "swapon: swapfile has holes\n");
ret = -EINVAL;
-out:
- return ret;
+ goto out;
}
SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
{
- struct swap_info_struct * p = NULL;
- unsigned short *swap_map;
+ struct swap_info_struct *p = NULL;
+ unsigned char *swap_map;
struct file *swap_file, *victim;
struct address_space *mapping;
struct inode *inode;
- char * pathname;
+ char *pathname;
int i, type, prev;
int err;
@@ -1534,8 +1517,8 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
mapping = victim->f_mapping;
prev = -1;
spin_lock(&swap_lock);
- for (type = swap_list.head; type >= 0; type = swap_info[type].next) {
- p = swap_info + type;
+ for (type = swap_list.head; type >= 0; type = swap_info[type]->next) {
+ p = swap_info[type];
if (p->flags & SWP_WRITEOK) {
if (p->swap_file->f_mapping == mapping)
break;
@@ -1554,18 +1537,17 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
spin_unlock(&swap_lock);
goto out_dput;
}
- if (prev < 0) {
+ if (prev < 0)
swap_list.head = p->next;
- } else {
- swap_info[prev].next = p->next;
- }
+ else
+ swap_info[prev]->next = p->next;
if (type == swap_list.next) {
/* just pick something that's safe... */
swap_list.next = swap_list.head;
}
if (p->prio < 0) {
- for (i = p->next; i >= 0; i = swap_info[i].next)
- swap_info[i].prio = p->prio--;
+ for (i = p->next; i >= 0; i = swap_info[i]->next)
+ swap_info[i]->prio = p->prio--;
least_priority++;
}
nr_swap_pages -= p->pages;
@@ -1573,9 +1555,9 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
p->flags &= ~SWP_WRITEOK;
spin_unlock(&swap_lock);
- current->flags |= PF_SWAPOFF;
+ current->flags |= PF_OOM_ORIGIN;
err = try_to_unuse(type);
- current->flags &= ~PF_SWAPOFF;
+ current->flags &= ~PF_OOM_ORIGIN;
if (err) {
/* re-insert swap space back into swap_list */
@@ -1583,16 +1565,16 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
if (p->prio < 0)
p->prio = --least_priority;
prev = -1;
- for (i = swap_list.head; i >= 0; i = swap_info[i].next) {
- if (p->prio >= swap_info[i].prio)
+ for (i = swap_list.head; i >= 0; i = swap_info[i]->next) {
+ if (p->prio >= swap_info[i]->prio)
break;
prev = i;
}
p->next = i;
if (prev < 0)
- swap_list.head = swap_list.next = p - swap_info;
+ swap_list.head = swap_list.next = type;
else
- swap_info[prev].next = p - swap_info;
+ swap_info[prev]->next = type;
nr_swap_pages += p->pages;
total_swap_pages += p->pages;
p->flags |= SWP_WRITEOK;
@@ -1605,6 +1587,9 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
up_write(&swap_unplug_sem);
destroy_swap_extents(p);
+ if (p->flags & SWP_CONTINUED)
+ free_swap_count_continuations(p);
+
mutex_lock(&swapon_mutex);
spin_lock(&swap_lock);
drain_mmlist();
@@ -1652,8 +1637,8 @@ out:
/* iterator */
static void *swap_start(struct seq_file *swap, loff_t *pos)
{
- struct swap_info_struct *ptr = swap_info;
- int i;
+ struct swap_info_struct *si;
+ int type;
loff_t l = *pos;
mutex_lock(&swapon_mutex);
@@ -1661,11 +1646,13 @@ static void *swap_start(struct seq_file *swap, loff_t *pos)
if (!l)
return SEQ_START_TOKEN;
- for (i = 0; i < nr_swapfiles; i++, ptr++) {
- if (!(ptr->flags & SWP_USED) || !ptr->swap_map)
+ for (type = 0; type < nr_swapfiles; type++) {
+ smp_rmb(); /* read nr_swapfiles before swap_info[type] */
+ si = swap_info[type];
+ if (!(si->flags & SWP_USED) || !si->swap_map)
continue;
if (!--l)
- return ptr;
+ return si;
}
return NULL;
@@ -1673,21 +1660,21 @@ static void *swap_start(struct seq_file *swap, loff_t *pos)
static void *swap_next(struct seq_file *swap, void *v, loff_t *pos)
{
- struct swap_info_struct *ptr;
- struct swap_info_struct *endptr = swap_info + nr_swapfiles;
+ struct swap_info_struct *si = v;
+ int type;
if (v == SEQ_START_TOKEN)
- ptr = swap_info;
- else {
- ptr = v;
- ptr++;
- }
+ type = 0;
+ else
+ type = si->type + 1;
- for (; ptr < endptr; ptr++) {
- if (!(ptr->flags & SWP_USED) || !ptr->swap_map)
+ for (; type < nr_swapfiles; type++) {
+ smp_rmb(); /* read nr_swapfiles before swap_info[type] */
+ si = swap_info[type];
+ if (!(si->flags & SWP_USED) || !si->swap_map)
continue;
++*pos;
- return ptr;
+ return si;
}
return NULL;
@@ -1700,24 +1687,24 @@ static void swap_stop(struct seq_file *swap, void *v)
static int swap_show(struct seq_file *swap, void *v)
{
- struct swap_info_struct *ptr = v;
+ struct swap_info_struct *si = v;
struct file *file;
int len;
- if (ptr == SEQ_START_TOKEN) {
+ if (si == SEQ_START_TOKEN) {
seq_puts(swap,"Filename\t\t\t\tType\t\tSize\tUsed\tPriority\n");
return 0;
}
- file = ptr->swap_file;
+ file = si->swap_file;
len = seq_path(swap, &file->f_path, " \t\n\\");
seq_printf(swap, "%*s%s\t%u\t%u\t%d\n",
len < 40 ? 40 - len : 1, " ",
S_ISBLK(file->f_path.dentry->d_inode->i_mode) ?
"partition" : "file\t",
- ptr->pages << (PAGE_SHIFT - 10),
- ptr->inuse_pages << (PAGE_SHIFT - 10),
- ptr->prio);
+ si->pages << (PAGE_SHIFT - 10),
+ si->inuse_pages << (PAGE_SHIFT - 10),
+ si->prio);
return 0;
}
@@ -1764,7 +1751,7 @@ late_initcall(max_swapfiles_check);
*/
SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
{
- struct swap_info_struct * p;
+ struct swap_info_struct *p;
char *name = NULL;
struct block_device *bdev = NULL;
struct file *swap_file = NULL;
@@ -1778,30 +1765,52 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
sector_t span;
unsigned long maxpages = 1;
unsigned long swapfilepages;
- unsigned short *swap_map = NULL;
+ unsigned char *swap_map = NULL;
struct page *page = NULL;
struct inode *inode = NULL;
int did_down = 0;
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
+
+ p = kzalloc(sizeof(*p), GFP_KERNEL);
+ if (!p)
+ return -ENOMEM;
+
spin_lock(&swap_lock);
- p = swap_info;
- for (type = 0 ; type < nr_swapfiles ; type++,p++)
- if (!(p->flags & SWP_USED))
+ for (type = 0; type < nr_swapfiles; type++) {
+ if (!(swap_info[type]->flags & SWP_USED))
break;
+ }
error = -EPERM;
if (type >= MAX_SWAPFILES) {
spin_unlock(&swap_lock);
+ kfree(p);
goto out;
}
- if (type >= nr_swapfiles)
- nr_swapfiles = type+1;
- memset(p, 0, sizeof(*p));
- INIT_LIST_HEAD(&p->extent_list);
+ if (type >= nr_swapfiles) {
+ p->type = type;
+ swap_info[type] = p;
+ /*
+ * Write swap_info[type] before nr_swapfiles, in case a
+ * racing procfs swap_start() or swap_next() is reading them.
+ * (We never shrink nr_swapfiles, we never free this entry.)
+ */
+ smp_wmb();
+ nr_swapfiles++;
+ } else {
+ kfree(p);
+ p = swap_info[type];
+ /*
+ * Do not memset this entry: a racing procfs swap_next()
+ * would be relying on p->type to remain valid.
+ */
+ }
+ INIT_LIST_HEAD(&p->first_swap_extent.list);
p->flags = SWP_USED;
p->next = -1;
spin_unlock(&swap_lock);
+
name = getname(specialfile);
error = PTR_ERR(name);
if (IS_ERR(name)) {
@@ -1821,7 +1830,7 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
error = -EBUSY;
for (i = 0; i < nr_swapfiles; i++) {
- struct swap_info_struct *q = &swap_info[i];
+ struct swap_info_struct *q = swap_info[i];
if (i == type || !q->swap_file)
continue;
@@ -1896,6 +1905,7 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
p->lowest_bit = 1;
p->cluster_next = 1;
+ p->cluster_nr = 0;
/*
* Find out how many pages are allowed for a single swap
@@ -1931,13 +1941,13 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
goto bad_swap;
/* OK, set up the swap map and apply the bad block list */
- swap_map = vmalloc(maxpages * sizeof(short));
+ swap_map = vmalloc(maxpages);
if (!swap_map) {
error = -ENOMEM;
goto bad_swap;
}
- memset(swap_map, 0, maxpages * sizeof(short));
+ memset(swap_map, 0, maxpages);
for (i = 0; i < swap_header->info.nr_badpages; i++) {
int page_nr = swap_header->info.badpages[i];
if (page_nr <= 0 || page_nr >= swap_header->info.last_page) {
@@ -1972,12 +1982,14 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
goto bad_swap;
}
- if (blk_queue_nonrot(bdev_get_queue(p->bdev))) {
- p->flags |= SWP_SOLIDSTATE;
- p->cluster_next = 1 + (random32() % p->highest_bit);
+ if (p->bdev) {
+ if (blk_queue_nonrot(bdev_get_queue(p->bdev))) {
+ p->flags |= SWP_SOLIDSTATE;
+ p->cluster_next = 1 + (random32() % p->highest_bit);
+ }
+ if (discard_swap(p) == 0)
+ p->flags |= SWP_DISCARDABLE;
}
- if (discard_swap(p) == 0)
- p->flags |= SWP_DISCARDABLE;
mutex_lock(&swapon_mutex);
spin_lock(&swap_lock);
@@ -2000,18 +2012,16 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
/* insert swap space into swap_list: */
prev = -1;
- for (i = swap_list.head; i >= 0; i = swap_info[i].next) {
- if (p->prio >= swap_info[i].prio) {
+ for (i = swap_list.head; i >= 0; i = swap_info[i]->next) {
+ if (p->prio >= swap_info[i]->prio)
break;
- }
prev = i;
}
p->next = i;
- if (prev < 0) {
- swap_list.head = swap_list.next = p - swap_info;
- } else {
- swap_info[prev].next = p - swap_info;
- }
+ if (prev < 0)
+ swap_list.head = swap_list.next = type;
+ else
+ swap_info[prev]->next = type;
spin_unlock(&swap_lock);
mutex_unlock(&swapon_mutex);
error = 0;
@@ -2048,15 +2058,15 @@ out:
void si_swapinfo(struct sysinfo *val)
{
- unsigned int i;
+ unsigned int type;
unsigned long nr_to_be_unused = 0;
spin_lock(&swap_lock);
- for (i = 0; i < nr_swapfiles; i++) {
- if (!(swap_info[i].flags & SWP_USED) ||
- (swap_info[i].flags & SWP_WRITEOK))
- continue;
- nr_to_be_unused += swap_info[i].inuse_pages;
+ for (type = 0; type < nr_swapfiles; type++) {
+ struct swap_info_struct *si = swap_info[type];
+
+ if ((si->flags & SWP_USED) && !(si->flags & SWP_WRITEOK))
+ nr_to_be_unused += si->inuse_pages;
}
val->freeswap = nr_swap_pages + nr_to_be_unused;
val->totalswap = total_swap_pages + nr_to_be_unused;
@@ -2066,101 +2076,107 @@ void si_swapinfo(struct sysinfo *val)
/*
* Verify that a swap entry is valid and increment its swap map count.
*
- * Note: if swap_map[] reaches SWAP_MAP_MAX the entries are treated as
- * "permanent", but will be reclaimed by the next swapoff.
* Returns error code in following case.
* - success -> 0
* - swp_entry is invalid -> EINVAL
* - swp_entry is migration entry -> EINVAL
* - swap-cache reference is requested but there is already one. -> EEXIST
* - swap-cache reference is requested but the entry is not used. -> ENOENT
+ * - swap-mapped reference requested but needs continued swap count. -> ENOMEM
*/
-static int __swap_duplicate(swp_entry_t entry, bool cache)
+static int __swap_duplicate(swp_entry_t entry, unsigned char usage)
{
- struct swap_info_struct * p;
+ struct swap_info_struct *p;
unsigned long offset, type;
- int result = -EINVAL;
- int count;
- bool has_cache;
+ unsigned char count;
+ unsigned char has_cache;
+ int err = -EINVAL;
- if (is_migration_entry(entry))
- return -EINVAL;
+ if (non_swap_entry(entry))
+ goto out;
type = swp_type(entry);
if (type >= nr_swapfiles)
goto bad_file;
- p = type + swap_info;
+ p = swap_info[type];
offset = swp_offset(entry);
spin_lock(&swap_lock);
-
if (unlikely(offset >= p->max))
goto unlock_out;
- count = swap_count(p->swap_map[offset]);
- has_cache = swap_has_cache(p->swap_map[offset]);
+ count = p->swap_map[offset];
+ has_cache = count & SWAP_HAS_CACHE;
+ count &= ~SWAP_HAS_CACHE;
+ err = 0;
- if (cache == SWAP_CACHE) { /* called for swapcache/swapin-readahead */
+ if (usage == SWAP_HAS_CACHE) {
/* set SWAP_HAS_CACHE if there is no cache and entry is used */
- if (!has_cache && count) {
- p->swap_map[offset] = encode_swapmap(count, true);
- result = 0;
- } else if (has_cache) /* someone added cache */
- result = -EEXIST;
- else if (!count) /* no users */
- result = -ENOENT;
+ if (!has_cache && count)
+ has_cache = SWAP_HAS_CACHE;
+ else if (has_cache) /* someone else added cache */
+ err = -EEXIST;
+ else /* no users remaining */
+ err = -ENOENT;
} else if (count || has_cache) {
- if (count < SWAP_MAP_MAX - 1) {
- p->swap_map[offset] = encode_swapmap(count + 1,
- has_cache);
- result = 0;
- } else if (count <= SWAP_MAP_MAX) {
- if (swap_overflow++ < 5)
- printk(KERN_WARNING
- "swap_dup: swap entry overflow\n");
- p->swap_map[offset] = encode_swapmap(SWAP_MAP_MAX,
- has_cache);
- result = 0;
- }
+
+ if ((count & ~COUNT_CONTINUED) < SWAP_MAP_MAX)
+ count += usage;
+ else if ((count & ~COUNT_CONTINUED) > SWAP_MAP_MAX)
+ err = -EINVAL;
+ else if (swap_count_continued(p, offset, count))
+ count = COUNT_CONTINUED;
+ else
+ err = -ENOMEM;
} else
- result = -ENOENT; /* unused swap entry */
+ err = -ENOENT; /* unused swap entry */
+
+ p->swap_map[offset] = count | has_cache;
+
unlock_out:
spin_unlock(&swap_lock);
out:
- return result;
+ return err;
bad_file:
printk(KERN_ERR "swap_dup: %s%08lx\n", Bad_file, entry.val);
goto out;
}
+
+/*
+ * Help swapoff by noting that swap entry belongs to shmem/tmpfs
+ * (in which case its reference count is never incremented).
+ */
+void swap_shmem_alloc(swp_entry_t entry)
+{
+ __swap_duplicate(entry, SWAP_MAP_SHMEM);
+}
+
/*
* increase reference count of swap entry by 1.
*/
-void swap_duplicate(swp_entry_t entry)
+int swap_duplicate(swp_entry_t entry)
{
- __swap_duplicate(entry, SWAP_MAP);
+ int err = 0;
+
+ while (!err && __swap_duplicate(entry, 1) == -ENOMEM)
+ err = add_swap_count_continuation(entry, GFP_ATOMIC);
+ return err;
}
/*
* @entry: swap entry for which we allocate swap cache.
*
- * Called when allocating swap cache for exising swap entry,
+ * Called when allocating swap cache for existing swap entry,
* This can return error codes. Returns 0 at success.
* -EBUSY means there is a swap cache.
* Note: return code is different from swap_duplicate().
*/
int swapcache_prepare(swp_entry_t entry)
{
- return __swap_duplicate(entry, SWAP_CACHE);
-}
-
-
-struct swap_info_struct *
-get_swap_info_struct(unsigned type)
-{
- return &swap_info[type];
+ return __swap_duplicate(entry, SWAP_HAS_CACHE);
}
/*
@@ -2178,7 +2194,7 @@ int valid_swaphandles(swp_entry_t entry, unsigned long *offset)
if (!our_page_cluster) /* no readahead */
return 0;
- si = &swap_info[swp_type(entry)];
+ si = swap_info[swp_type(entry)];
target = swp_offset(entry);
base = (target >> our_page_cluster) << our_page_cluster;
end = base + (1 << our_page_cluster);
@@ -2214,3 +2230,219 @@ int valid_swaphandles(swp_entry_t entry, unsigned long *offset)
*offset = ++toff;
return nr_pages? ++nr_pages: 0;
}
+
+/*
+ * add_swap_count_continuation - called when a swap count is duplicated
+ * beyond SWAP_MAP_MAX, it allocates a new page and links that to the entry's
+ * page of the original vmalloc'ed swap_map, to hold the continuation count
+ * (for that entry and for its neighbouring PAGE_SIZE swap entries). Called
+ * again when count is duplicated beyond SWAP_MAP_MAX * SWAP_CONT_MAX, etc.
+ *
+ * These continuation pages are seldom referenced: the common paths all work
+ * on the original swap_map, only referring to a continuation page when the
+ * low "digit" of a count is incremented or decremented through SWAP_MAP_MAX.
+ *
+ * add_swap_count_continuation(, GFP_ATOMIC) can be called while holding
+ * page table locks; if it fails, add_swap_count_continuation(, GFP_KERNEL)
+ * can be called after dropping locks.
+ */
+int add_swap_count_continuation(swp_entry_t entry, gfp_t gfp_mask)
+{
+ struct swap_info_struct *si;
+ struct page *head;
+ struct page *page;
+ struct page *list_page;
+ pgoff_t offset;
+ unsigned char count;
+
+ /*
+ * When debugging, it's easier to use __GFP_ZERO here; but it's better
+ * for latency not to zero a page while GFP_ATOMIC and holding locks.
+ */
+ page = alloc_page(gfp_mask | __GFP_HIGHMEM);
+
+ si = swap_info_get(entry);
+ if (!si) {
+ /*
+ * An acceptable race has occurred since the failing
+ * __swap_duplicate(): the swap entry has been freed,
+ * perhaps even the whole swap_map cleared for swapoff.
+ */
+ goto outer;
+ }
+
+ offset = swp_offset(entry);
+ count = si->swap_map[offset] & ~SWAP_HAS_CACHE;
+
+ if ((count & ~COUNT_CONTINUED) != SWAP_MAP_MAX) {
+ /*
+ * The higher the swap count, the more likely it is that tasks
+ * will race to add swap count continuation: we need to avoid
+ * over-provisioning.
+ */
+ goto out;
+ }
+
+ if (!page) {
+ spin_unlock(&swap_lock);
+ return -ENOMEM;
+ }
+
+ /*
+ * We are fortunate that although vmalloc_to_page uses pte_offset_map,
+ * no architecture is using highmem pages for kernel pagetables: so it
+ * will not corrupt the GFP_ATOMIC caller's atomic pagetable kmaps.
+ */
+ head = vmalloc_to_page(si->swap_map + offset);
+ offset &= ~PAGE_MASK;
+
+ /*
+ * Page allocation does not initialize the page's lru field,
+ * but it does always reset its private field.
+ */
+ if (!page_private(head)) {
+ BUG_ON(count & COUNT_CONTINUED);
+ INIT_LIST_HEAD(&head->lru);
+ set_page_private(head, SWP_CONTINUED);
+ si->flags |= SWP_CONTINUED;
+ }
+
+ list_for_each_entry(list_page, &head->lru, lru) {
+ unsigned char *map;
+
+ /*
+ * If the previous map said no continuation, but we've found
+ * a continuation page, free our allocation and use this one.
+ */
+ if (!(count & COUNT_CONTINUED))
+ goto out;
+
+ map = kmap_atomic(list_page, KM_USER0) + offset;
+ count = *map;
+ kunmap_atomic(map, KM_USER0);
+
+ /*
+ * If this continuation count now has some space in it,
+ * free our allocation and use this one.
+ */
+ if ((count & ~COUNT_CONTINUED) != SWAP_CONT_MAX)
+ goto out;
+ }
+
+ list_add_tail(&page->lru, &head->lru);
+ page = NULL; /* now it's attached, don't free it */
+out:
+ spin_unlock(&swap_lock);
+outer:
+ if (page)
+ __free_page(page);
+ return 0;
+}
+
+/*
+ * swap_count_continued - when the original swap_map count is incremented
+ * from SWAP_MAP_MAX, check if there is already a continuation page to carry
+ * into, carry if so, or else fail until a new continuation page is allocated;
+ * when the original swap_map count is decremented from 0 with continuation,
+ * borrow from the continuation and report whether it still holds more.
+ * Called while __swap_duplicate() or swap_entry_free() holds swap_lock.
+ */
+static bool swap_count_continued(struct swap_info_struct *si,
+ pgoff_t offset, unsigned char count)
+{
+ struct page *head;
+ struct page *page;
+ unsigned char *map;
+
+ head = vmalloc_to_page(si->swap_map + offset);
+ if (page_private(head) != SWP_CONTINUED) {
+ BUG_ON(count & COUNT_CONTINUED);
+ return false; /* need to add count continuation */
+ }
+
+ offset &= ~PAGE_MASK;
+ page = list_entry(head->lru.next, struct page, lru);
+ map = kmap_atomic(page, KM_USER0) + offset;
+
+ if (count == SWAP_MAP_MAX) /* initial increment from swap_map */
+ goto init_map; /* jump over SWAP_CONT_MAX checks */
+
+ if (count == (SWAP_MAP_MAX | COUNT_CONTINUED)) { /* incrementing */
+ /*
+ * Think of how you add 1 to 999
+ */
+ while (*map == (SWAP_CONT_MAX | COUNT_CONTINUED)) {
+ kunmap_atomic(map, KM_USER0);
+ page = list_entry(page->lru.next, struct page, lru);
+ BUG_ON(page == head);
+ map = kmap_atomic(page, KM_USER0) + offset;
+ }
+ if (*map == SWAP_CONT_MAX) {
+ kunmap_atomic(map, KM_USER0);
+ page = list_entry(page->lru.next, struct page, lru);
+ if (page == head)
+ return false; /* add count continuation */
+ map = kmap_atomic(page, KM_USER0) + offset;
+init_map: *map = 0; /* we didn't zero the page */
+ }
+ *map += 1;
+ kunmap_atomic(map, KM_USER0);
+ page = list_entry(page->lru.prev, struct page, lru);
+ while (page != head) {
+ map = kmap_atomic(page, KM_USER0) + offset;
+ *map = COUNT_CONTINUED;
+ kunmap_atomic(map, KM_USER0);
+ page = list_entry(page->lru.prev, struct page, lru);
+ }
+ return true; /* incremented */
+
+ } else { /* decrementing */
+ /*
+ * Think of how you subtract 1 from 1000
+ */
+ BUG_ON(count != COUNT_CONTINUED);
+ while (*map == COUNT_CONTINUED) {
+ kunmap_atomic(map, KM_USER0);
+ page = list_entry(page->lru.next, struct page, lru);
+ BUG_ON(page == head);
+ map = kmap_atomic(page, KM_USER0) + offset;
+ }
+ BUG_ON(*map == 0);
+ *map -= 1;
+ if (*map == 0)
+ count = 0;
+ kunmap_atomic(map, KM_USER0);
+ page = list_entry(page->lru.prev, struct page, lru);
+ while (page != head) {
+ map = kmap_atomic(page, KM_USER0) + offset;
+ *map = SWAP_CONT_MAX | count;
+ count = COUNT_CONTINUED;
+ kunmap_atomic(map, KM_USER0);
+ page = list_entry(page->lru.prev, struct page, lru);
+ }
+ return count == COUNT_CONTINUED;
+ }
+}
+
+/*
+ * free_swap_count_continuations - swapoff free all the continuation pages
+ * appended to the swap_map, after swap_map is quiesced, before vfree'ing it.
+ */
+static void free_swap_count_continuations(struct swap_info_struct *si)
+{
+ pgoff_t offset;
+
+ for (offset = 0; offset < si->max; offset += PAGE_SIZE) {
+ struct page *head;
+ head = vmalloc_to_page(si->swap_map + offset);
+ if (page_private(head)) {
+ struct list_head *this, *next;
+ list_for_each_safe(this, next, &head->lru) {
+ struct page *page;
+ page = list_entry(this, struct page, lru);
+ list_del(this);
+ __free_page(page);
+ }
+ }
+ }
+}
diff --git a/mm/truncate.c b/mm/truncate.c
index ccc3ecf7cb98..e87e37244829 100644
--- a/mm/truncate.c
+++ b/mm/truncate.c
@@ -93,11 +93,11 @@ EXPORT_SYMBOL(cancel_dirty_page);
* its lock, b) when a concurrent invalidate_mapping_pages got there first and
* c) when tmpfs swizzles a page between a tmpfs inode and swapper_space.
*/
-static void
+static int
truncate_complete_page(struct address_space *mapping, struct page *page)
{
if (page->mapping != mapping)
- return;
+ return -EIO;
if (page_has_private(page))
do_invalidatepage(page, 0);
@@ -108,6 +108,7 @@ truncate_complete_page(struct address_space *mapping, struct page *page)
remove_from_page_cache(page);
ClearPageMappedToDisk(page);
page_cache_release(page); /* pagecache ref */
+ return 0;
}
/*
@@ -135,6 +136,51 @@ invalidate_complete_page(struct address_space *mapping, struct page *page)
return ret;
}
+int truncate_inode_page(struct address_space *mapping, struct page *page)
+{
+ if (page_mapped(page)) {
+ unmap_mapping_range(mapping,
+ (loff_t)page->index << PAGE_CACHE_SHIFT,
+ PAGE_CACHE_SIZE, 0);
+ }
+ return truncate_complete_page(mapping, page);
+}
+
+/*
+ * Used to get rid of pages on hardware memory corruption.
+ */
+int generic_error_remove_page(struct address_space *mapping, struct page *page)
+{
+ if (!mapping)
+ return -EINVAL;
+ /*
+ * Only punch for normal data pages for now.
+ * Handling other types like directories would need more auditing.
+ */
+ if (!S_ISREG(mapping->host->i_mode))
+ return -EIO;
+ return truncate_inode_page(mapping, page);
+}
+EXPORT_SYMBOL(generic_error_remove_page);
+
+/*
+ * Safely invalidate one page from its pagecache mapping.
+ * It only drops clean, unused pages. The page must be locked.
+ *
+ * Returns 1 if the page is successfully invalidated, otherwise 0.
+ */
+int invalidate_inode_page(struct page *page)
+{
+ struct address_space *mapping = page_mapping(page);
+ if (!mapping)
+ return 0;
+ if (PageDirty(page) || PageWriteback(page))
+ return 0;
+ if (page_mapped(page))
+ return 0;
+ return invalidate_complete_page(mapping, page);
+}
+
/**
* truncate_inode_pages - truncate range of pages specified by start & end byte offsets
* @mapping: mapping to truncate
@@ -196,12 +242,7 @@ void truncate_inode_pages_range(struct address_space *mapping,
unlock_page(page);
continue;
}
- if (page_mapped(page)) {
- unmap_mapping_range(mapping,
- (loff_t)page_index<<PAGE_CACHE_SHIFT,
- PAGE_CACHE_SIZE, 0);
- }
- truncate_complete_page(mapping, page);
+ truncate_inode_page(mapping, page);
unlock_page(page);
}
pagevec_release(&pvec);
@@ -231,6 +272,7 @@ void truncate_inode_pages_range(struct address_space *mapping,
pagevec_release(&pvec);
break;
}
+ mem_cgroup_uncharge_start();
for (i = 0; i < pagevec_count(&pvec); i++) {
struct page *page = pvec.pages[i];
@@ -238,18 +280,14 @@ void truncate_inode_pages_range(struct address_space *mapping,
break;
lock_page(page);
wait_on_page_writeback(page);
- if (page_mapped(page)) {
- unmap_mapping_range(mapping,
- (loff_t)page->index<<PAGE_CACHE_SHIFT,
- PAGE_CACHE_SIZE, 0);
- }
+ truncate_inode_page(mapping, page);
if (page->index > next)
next = page->index;
next++;
- truncate_complete_page(mapping, page);
unlock_page(page);
}
pagevec_release(&pvec);
+ mem_cgroup_uncharge_end();
}
}
EXPORT_SYMBOL(truncate_inode_pages_range);
@@ -291,6 +329,7 @@ unsigned long invalidate_mapping_pages(struct address_space *mapping,
pagevec_init(&pvec, 0);
while (next <= end &&
pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) {
+ mem_cgroup_uncharge_start();
for (i = 0; i < pagevec_count(&pvec); i++) {
struct page *page = pvec.pages[i];
pgoff_t index;
@@ -311,17 +350,14 @@ unsigned long invalidate_mapping_pages(struct address_space *mapping,
if (lock_failed)
continue;
- if (PageDirty(page) || PageWriteback(page))
- goto unlock;
- if (page_mapped(page))
- goto unlock;
- ret += invalidate_complete_page(mapping, page);
-unlock:
+ ret += invalidate_inode_page(page);
+
unlock_page(page);
if (next > end)
break;
}
pagevec_release(&pvec);
+ mem_cgroup_uncharge_end();
cond_resched();
}
return ret;
@@ -396,6 +432,7 @@ int invalidate_inode_pages2_range(struct address_space *mapping,
while (next <= end && !wrapped &&
pagevec_lookup(&pvec, mapping, next,
min(end - next, (pgoff_t)PAGEVEC_SIZE - 1) + 1)) {
+ mem_cgroup_uncharge_start();
for (i = 0; i < pagevec_count(&pvec); i++) {
struct page *page = pvec.pages[i];
pgoff_t page_index;
@@ -445,6 +482,7 @@ int invalidate_inode_pages2_range(struct address_space *mapping,
unlock_page(page);
}
pagevec_release(&pvec);
+ mem_cgroup_uncharge_end();
cond_resched();
}
return ret;
@@ -458,10 +496,72 @@ EXPORT_SYMBOL_GPL(invalidate_inode_pages2_range);
* Any pages which are found to be mapped into pagetables are unmapped prior to
* invalidation.
*
- * Returns -EIO if any pages could not be invalidated.
+ * Returns -EBUSY if any pages could not be invalidated.
*/
int invalidate_inode_pages2(struct address_space *mapping)
{
return invalidate_inode_pages2_range(mapping, 0, -1);
}
EXPORT_SYMBOL_GPL(invalidate_inode_pages2);
+
+/**
+ * truncate_pagecache - unmap and remove pagecache that has been truncated
+ * @inode: inode
+ * @old: old file offset
+ * @new: new file offset
+ *
+ * inode's new i_size must already be written before truncate_pagecache
+ * is called.
+ *
+ * This function should typically be called before the filesystem
+ * releases resources associated with the freed range (eg. deallocates
+ * blocks). This way, pagecache will always stay logically coherent
+ * with on-disk format, and the filesystem would not have to deal with
+ * situations such as writepage being called for a page that has already
+ * had its underlying blocks deallocated.
+ */
+void truncate_pagecache(struct inode *inode, loff_t old, loff_t new)
+{
+ struct address_space *mapping = inode->i_mapping;
+
+ /*
+ * unmap_mapping_range is called twice, first simply for
+ * efficiency so that truncate_inode_pages does fewer
+ * single-page unmaps. However after this first call, and
+ * before truncate_inode_pages finishes, it is possible for
+ * private pages to be COWed, which remain after
+ * truncate_inode_pages finishes, hence the second
+ * unmap_mapping_range call must be made for correctness.
+ */
+ unmap_mapping_range(mapping, new + PAGE_SIZE - 1, 0, 1);
+ truncate_inode_pages(mapping, new);
+ unmap_mapping_range(mapping, new + PAGE_SIZE - 1, 0, 1);
+}
+EXPORT_SYMBOL(truncate_pagecache);
+
+/**
+ * vmtruncate - unmap mappings "freed" by truncate() syscall
+ * @inode: inode of the file used
+ * @offset: file offset to start truncating
+ *
+ * NOTE! We have to be ready to update the memory sharing
+ * between the file and the memory map for a potential last
+ * incomplete page. Ugly, but necessary.
+ */
+int vmtruncate(struct inode *inode, loff_t offset)
+{
+ loff_t oldsize;
+ int error;
+
+ error = inode_newsize_ok(inode, offset);
+ if (error)
+ return error;
+ oldsize = inode->i_size;
+ i_size_write(inode, offset);
+ truncate_pagecache(inode, oldsize, offset);
+ if (inode->i_op->truncate)
+ inode->i_op->truncate(inode);
+
+ return error;
+}
+EXPORT_SYMBOL(vmtruncate);
diff --git a/mm/util.c b/mm/util.c
index 7c35ad95f927..834db7be240f 100644
--- a/mm/util.c
+++ b/mm/util.c
@@ -220,7 +220,7 @@ char *strndup_user(const char __user *s, long n)
}
EXPORT_SYMBOL(strndup_user);
-#ifndef HAVE_ARCH_PICK_MMAP_LAYOUT
+#if defined(CONFIG_MMU) && !defined(HAVE_ARCH_PICK_MMAP_LAYOUT)
void arch_pick_mmap_layout(struct mm_struct *mm)
{
mm->mmap_base = TASK_UNMAPPED_BASE;
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index f8189a4b3e13..ae007462b7f6 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -12,6 +12,7 @@
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/highmem.h>
+#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/interrupt.h>
@@ -25,10 +26,10 @@
#include <linux/rcupdate.h>
#include <linux/pfn.h>
#include <linux/kmemleak.h>
-
#include <asm/atomic.h>
#include <asm/uaccess.h>
#include <asm/tlbflush.h>
+#include <asm/shmparam.h>
/*** Page table manipulation functions ***/
@@ -168,11 +169,9 @@ static int vmap_page_range_noflush(unsigned long start, unsigned long end,
next = pgd_addr_end(addr, end);
err = vmap_pud_range(pgd, addr, next, prot, pages, &nr);
if (err)
- break;
+ return err;
} while (pgd++, addr = next, addr != end);
- if (unlikely(err))
- return err;
return nr;
}
@@ -186,7 +185,7 @@ static int vmap_page_range(unsigned long start, unsigned long end,
return ret;
}
-static inline int is_vmalloc_or_module_addr(const void *x)
+int is_vmalloc_or_module_addr(const void *x)
{
/*
* ARM, x86-64 and sparc64 put modules in a special place,
@@ -265,6 +264,7 @@ struct vmap_area {
static DEFINE_SPINLOCK(vmap_area_lock);
static struct rb_root vmap_area_root = RB_ROOT;
static LIST_HEAD(vmap_area_list);
+static unsigned long vmap_area_pcpu_hole;
static struct vmap_area *__find_vmap_area(unsigned long addr)
{
@@ -431,6 +431,15 @@ static void __free_vmap_area(struct vmap_area *va)
RB_CLEAR_NODE(&va->rb_node);
list_del_rcu(&va->list);
+ /*
+ * Track the highest possible candidate for pcpu area
+ * allocation. Areas outside of vmalloc area can be returned
+ * here too, consider only end addresses which fall inside
+ * vmalloc area proper.
+ */
+ if (va->va_end > VMALLOC_START && va->va_end <= VMALLOC_END)
+ vmap_area_pcpu_hole = max(vmap_area_pcpu_hole, va->va_end);
+
call_rcu(&va->rcu_head, rcu_free_va);
}
@@ -500,6 +509,9 @@ static unsigned long lazy_max_pages(void)
static atomic_t vmap_lazy_nr = ATOMIC_INIT(0);
+/* for per-CPU blocks */
+static void purge_fragmented_blocks_allcpus(void);
+
/*
* Purges all lazily-freed vmap areas.
*
@@ -530,6 +542,9 @@ static void __purge_vmap_area_lazy(unsigned long *start, unsigned long *end,
} else
spin_lock(&purge_lock);
+ if (sync)
+ purge_fragmented_blocks_allcpus();
+
rcu_read_lock();
list_for_each_entry_rcu(va, &vmap_area_list, list) {
if (va->flags & VM_LAZY_FREE) {
@@ -546,10 +561,8 @@ static void __purge_vmap_area_lazy(unsigned long *start, unsigned long *end,
}
rcu_read_unlock();
- if (nr) {
- BUG_ON(nr > atomic_read(&vmap_lazy_nr));
+ if (nr)
atomic_sub(nr, &vmap_lazy_nr);
- }
if (nr || force_flush)
flush_tlb_kernel_range(*start, *end);
@@ -660,8 +673,6 @@ static bool vmap_initialized __read_mostly = false;
struct vmap_block_queue {
spinlock_t lock;
struct list_head free;
- struct list_head dirty;
- unsigned int nr_dirty;
};
struct vmap_block {
@@ -671,10 +682,9 @@ struct vmap_block {
unsigned long free, dirty;
DECLARE_BITMAP(alloc_map, VMAP_BBMAP_BITS);
DECLARE_BITMAP(dirty_map, VMAP_BBMAP_BITS);
- union {
- struct list_head free_list;
- struct rcu_head rcu_head;
- };
+ struct list_head free_list;
+ struct rcu_head rcu_head;
+ struct list_head purge;
};
/* Queue of free and dirty vmap blocks, for allocation and flushing purposes */
@@ -750,9 +760,9 @@ static struct vmap_block *new_vmap_block(gfp_t gfp_mask)
vbq = &get_cpu_var(vmap_block_queue);
vb->vbq = vbq;
spin_lock(&vbq->lock);
- list_add(&vb->free_list, &vbq->free);
+ list_add_rcu(&vb->free_list, &vbq->free);
spin_unlock(&vbq->lock);
- put_cpu_var(vmap_cpu_blocks);
+ put_cpu_var(vmap_block_queue);
return vb;
}
@@ -769,8 +779,6 @@ static void free_vmap_block(struct vmap_block *vb)
struct vmap_block *tmp;
unsigned long vb_idx;
- BUG_ON(!list_empty(&vb->free_list));
-
vb_idx = addr_to_vb_idx(vb->va->va_start);
spin_lock(&vmap_block_tree_lock);
tmp = radix_tree_delete(&vmap_block_tree, vb_idx);
@@ -781,12 +789,61 @@ static void free_vmap_block(struct vmap_block *vb)
call_rcu(&vb->rcu_head, rcu_free_vb);
}
+static void purge_fragmented_blocks(int cpu)
+{
+ LIST_HEAD(purge);
+ struct vmap_block *vb;
+ struct vmap_block *n_vb;
+ struct vmap_block_queue *vbq = &per_cpu(vmap_block_queue, cpu);
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(vb, &vbq->free, free_list) {
+
+ if (!(vb->free + vb->dirty == VMAP_BBMAP_BITS && vb->dirty != VMAP_BBMAP_BITS))
+ continue;
+
+ spin_lock(&vb->lock);
+ if (vb->free + vb->dirty == VMAP_BBMAP_BITS && vb->dirty != VMAP_BBMAP_BITS) {
+ vb->free = 0; /* prevent further allocs after releasing lock */
+ vb->dirty = VMAP_BBMAP_BITS; /* prevent purging it again */
+ bitmap_fill(vb->alloc_map, VMAP_BBMAP_BITS);
+ bitmap_fill(vb->dirty_map, VMAP_BBMAP_BITS);
+ spin_lock(&vbq->lock);
+ list_del_rcu(&vb->free_list);
+ spin_unlock(&vbq->lock);
+ spin_unlock(&vb->lock);
+ list_add_tail(&vb->purge, &purge);
+ } else
+ spin_unlock(&vb->lock);
+ }
+ rcu_read_unlock();
+
+ list_for_each_entry_safe(vb, n_vb, &purge, purge) {
+ list_del(&vb->purge);
+ free_vmap_block(vb);
+ }
+}
+
+static void purge_fragmented_blocks_thiscpu(void)
+{
+ purge_fragmented_blocks(smp_processor_id());
+}
+
+static void purge_fragmented_blocks_allcpus(void)
+{
+ int cpu;
+
+ for_each_possible_cpu(cpu)
+ purge_fragmented_blocks(cpu);
+}
+
static void *vb_alloc(unsigned long size, gfp_t gfp_mask)
{
struct vmap_block_queue *vbq;
struct vmap_block *vb;
unsigned long addr = 0;
unsigned int order;
+ int purge = 0;
BUG_ON(size & ~PAGE_MASK);
BUG_ON(size > PAGE_SIZE*VMAP_MAX_ALLOC);
@@ -799,25 +856,39 @@ again:
int i;
spin_lock(&vb->lock);
+ if (vb->free < 1UL << order)
+ goto next;
+
i = bitmap_find_free_region(vb->alloc_map,
VMAP_BBMAP_BITS, order);
- if (i >= 0) {
- addr = vb->va->va_start + (i << PAGE_SHIFT);
- BUG_ON(addr_to_vb_idx(addr) !=
- addr_to_vb_idx(vb->va->va_start));
- vb->free -= 1UL << order;
- if (vb->free == 0) {
- spin_lock(&vbq->lock);
- list_del_init(&vb->free_list);
- spin_unlock(&vbq->lock);
+ if (i < 0) {
+ if (vb->free + vb->dirty == VMAP_BBMAP_BITS) {
+ /* fragmented and no outstanding allocations */
+ BUG_ON(vb->dirty != VMAP_BBMAP_BITS);
+ purge = 1;
}
- spin_unlock(&vb->lock);
- break;
+ goto next;
}
+ addr = vb->va->va_start + (i << PAGE_SHIFT);
+ BUG_ON(addr_to_vb_idx(addr) !=
+ addr_to_vb_idx(vb->va->va_start));
+ vb->free -= 1UL << order;
+ if (vb->free == 0) {
+ spin_lock(&vbq->lock);
+ list_del_rcu(&vb->free_list);
+ spin_unlock(&vbq->lock);
+ }
+ spin_unlock(&vb->lock);
+ break;
+next:
spin_unlock(&vb->lock);
}
- put_cpu_var(vmap_cpu_blocks);
+
+ if (purge)
+ purge_fragmented_blocks_thiscpu();
+
+ put_cpu_var(vmap_block_queue);
rcu_read_unlock();
if (!addr) {
@@ -853,11 +924,11 @@ static void vb_free(const void *addr, unsigned long size)
BUG_ON(!vb);
spin_lock(&vb->lock);
- bitmap_allocate_region(vb->dirty_map, offset >> PAGE_SHIFT, order);
+ BUG_ON(bitmap_allocate_region(vb->dirty_map, offset >> PAGE_SHIFT, order));
vb->dirty += 1UL << order;
if (vb->dirty == VMAP_BBMAP_BITS) {
- BUG_ON(vb->free || !list_empty(&vb->free_list));
+ BUG_ON(vb->free);
spin_unlock(&vb->lock);
free_vmap_block(vb);
} else
@@ -1026,8 +1097,6 @@ void __init vmalloc_init(void)
vbq = &per_cpu(vmap_block_queue, i);
spin_lock_init(&vbq->lock);
INIT_LIST_HEAD(&vbq->free);
- INIT_LIST_HEAD(&vbq->dirty);
- vbq->nr_dirty = 0;
}
/* Import existing vmlist entries. */
@@ -1038,6 +1107,9 @@ void __init vmalloc_init(void)
va->va_end = va->va_start + tmp->size;
__insert_vmap_area(va);
}
+
+ vmap_area_pcpu_hole = VMALLOC_END;
+
vmap_initialized = true;
}
@@ -1122,14 +1194,34 @@ EXPORT_SYMBOL_GPL(map_vm_area);
DEFINE_RWLOCK(vmlist_lock);
struct vm_struct *vmlist;
+static void insert_vmalloc_vm(struct vm_struct *vm, struct vmap_area *va,
+ unsigned long flags, void *caller)
+{
+ struct vm_struct *tmp, **p;
+
+ vm->flags = flags;
+ vm->addr = (void *)va->va_start;
+ vm->size = va->va_end - va->va_start;
+ vm->caller = caller;
+ va->private = vm;
+ va->flags |= VM_VM_AREA;
+
+ write_lock(&vmlist_lock);
+ for (p = &vmlist; (tmp = *p) != NULL; p = &tmp->next) {
+ if (tmp->addr >= vm->addr)
+ break;
+ }
+ vm->next = *p;
+ *p = vm;
+ write_unlock(&vmlist_lock);
+}
+
static struct vm_struct *__get_vm_area_node(unsigned long size,
- unsigned long flags, unsigned long start, unsigned long end,
- int node, gfp_t gfp_mask, void *caller)
+ unsigned long align, unsigned long flags, unsigned long start,
+ unsigned long end, int node, gfp_t gfp_mask, void *caller)
{
static struct vmap_area *va;
struct vm_struct *area;
- struct vm_struct *tmp, **p;
- unsigned long align = 1;
BUG_ON(in_interrupt());
if (flags & VM_IOREMAP) {
@@ -1147,7 +1239,7 @@ static struct vm_struct *__get_vm_area_node(unsigned long size,
if (unlikely(!size))
return NULL;
- area = kmalloc_node(sizeof(*area), gfp_mask & GFP_RECLAIM_MASK, node);
+ area = kzalloc_node(sizeof(*area), gfp_mask & GFP_RECLAIM_MASK, node);
if (unlikely(!area))
return NULL;
@@ -1162,32 +1254,14 @@ static struct vm_struct *__get_vm_area_node(unsigned long size,
return NULL;
}
- area->flags = flags;
- area->addr = (void *)va->va_start;
- area->size = size;
- area->pages = NULL;
- area->nr_pages = 0;
- area->phys_addr = 0;
- area->caller = caller;
- va->private = area;
- va->flags |= VM_VM_AREA;
-
- write_lock(&vmlist_lock);
- for (p = &vmlist; (tmp = *p) != NULL; p = &tmp->next) {
- if (tmp->addr >= area->addr)
- break;
- }
- area->next = *p;
- *p = area;
- write_unlock(&vmlist_lock);
-
+ insert_vmalloc_vm(area, va, flags, caller);
return area;
}
struct vm_struct *__get_vm_area(unsigned long size, unsigned long flags,
unsigned long start, unsigned long end)
{
- return __get_vm_area_node(size, flags, start, end, -1, GFP_KERNEL,
+ return __get_vm_area_node(size, 1, flags, start, end, -1, GFP_KERNEL,
__builtin_return_address(0));
}
EXPORT_SYMBOL_GPL(__get_vm_area);
@@ -1196,7 +1270,7 @@ struct vm_struct *__get_vm_area_caller(unsigned long size, unsigned long flags,
unsigned long start, unsigned long end,
void *caller)
{
- return __get_vm_area_node(size, flags, start, end, -1, GFP_KERNEL,
+ return __get_vm_area_node(size, 1, flags, start, end, -1, GFP_KERNEL,
caller);
}
@@ -1211,22 +1285,22 @@ struct vm_struct *__get_vm_area_caller(unsigned long size, unsigned long flags,
*/
struct vm_struct *get_vm_area(unsigned long size, unsigned long flags)
{
- return __get_vm_area_node(size, flags, VMALLOC_START, VMALLOC_END,
+ return __get_vm_area_node(size, 1, flags, VMALLOC_START, VMALLOC_END,
-1, GFP_KERNEL, __builtin_return_address(0));
}
struct vm_struct *get_vm_area_caller(unsigned long size, unsigned long flags,
void *caller)
{
- return __get_vm_area_node(size, flags, VMALLOC_START, VMALLOC_END,
+ return __get_vm_area_node(size, 1, flags, VMALLOC_START, VMALLOC_END,
-1, GFP_KERNEL, caller);
}
struct vm_struct *get_vm_area_node(unsigned long size, unsigned long flags,
int node, gfp_t gfp_mask)
{
- return __get_vm_area_node(size, flags, VMALLOC_START, VMALLOC_END, node,
- gfp_mask, __builtin_return_address(0));
+ return __get_vm_area_node(size, 1, flags, VMALLOC_START, VMALLOC_END,
+ node, gfp_mask, __builtin_return_address(0));
}
static struct vm_struct *find_vm_area(const void *addr)
@@ -1256,17 +1330,21 @@ struct vm_struct *remove_vm_area(const void *addr)
if (va && va->flags & VM_VM_AREA) {
struct vm_struct *vm = va->private;
struct vm_struct *tmp, **p;
-
- vmap_debug_free_range(va->va_start, va->va_end);
- free_unmap_vmap_area(va);
- vm->size -= PAGE_SIZE;
-
+ /*
+ * remove from list and disallow access to this vm_struct
+ * before unmap. (address range confliction is maintained by
+ * vmap.)
+ */
write_lock(&vmlist_lock);
for (p = &vmlist; (tmp = *p) != vm; p = &tmp->next)
;
*p = tmp->next;
write_unlock(&vmlist_lock);
+ vmap_debug_free_range(va->va_start, va->va_end);
+ free_unmap_vmap_area(va);
+ vm->size -= PAGE_SIZE;
+
return vm;
}
return NULL;
@@ -1368,7 +1446,7 @@ void *vmap(struct page **pages, unsigned int count,
might_sleep();
- if (count > num_physpages)
+ if (count > totalram_pages)
return NULL;
area = get_vm_area_caller((count << PAGE_SHIFT), flags,
@@ -1385,13 +1463,15 @@ void *vmap(struct page **pages, unsigned int count,
}
EXPORT_SYMBOL(vmap);
-static void *__vmalloc_node(unsigned long size, gfp_t gfp_mask, pgprot_t prot,
+static void *__vmalloc_node(unsigned long size, unsigned long align,
+ gfp_t gfp_mask, pgprot_t prot,
int node, void *caller);
static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
pgprot_t prot, int node, void *caller)
{
struct page **pages;
unsigned int nr_pages, array_size, i;
+ gfp_t nested_gfp = (gfp_mask & GFP_RECLAIM_MASK) | __GFP_ZERO;
nr_pages = (area->size - PAGE_SIZE) >> PAGE_SHIFT;
array_size = (nr_pages * sizeof(struct page *));
@@ -1399,13 +1479,11 @@ static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
area->nr_pages = nr_pages;
/* Please note that the recursion is strictly bounded. */
if (array_size > PAGE_SIZE) {
- pages = __vmalloc_node(array_size, gfp_mask | __GFP_ZERO,
+ pages = __vmalloc_node(array_size, 1, nested_gfp|__GFP_HIGHMEM,
PAGE_KERNEL, node, caller);
area->flags |= VM_VPAGES;
} else {
- pages = kmalloc_node(array_size,
- (gfp_mask & GFP_RECLAIM_MASK) | __GFP_ZERO,
- node);
+ pages = kmalloc_node(array_size, nested_gfp, node);
}
area->pages = pages;
area->caller = caller;
@@ -1458,6 +1536,7 @@ void *__vmalloc_area(struct vm_struct *area, gfp_t gfp_mask, pgprot_t prot)
/**
* __vmalloc_node - allocate virtually contiguous memory
* @size: allocation size
+ * @align: desired alignment
* @gfp_mask: flags for the page level allocator
* @prot: protection mask for the allocated pages
* @node: node to use for allocation or -1
@@ -1467,19 +1546,20 @@ void *__vmalloc_area(struct vm_struct *area, gfp_t gfp_mask, pgprot_t prot)
* allocator with @gfp_mask flags. Map them into contiguous
* kernel virtual space, using a pagetable protection of @prot.
*/
-static void *__vmalloc_node(unsigned long size, gfp_t gfp_mask, pgprot_t prot,
- int node, void *caller)
+static void *__vmalloc_node(unsigned long size, unsigned long align,
+ gfp_t gfp_mask, pgprot_t prot,
+ int node, void *caller)
{
struct vm_struct *area;
void *addr;
unsigned long real_size = size;
size = PAGE_ALIGN(size);
- if (!size || (size >> PAGE_SHIFT) > num_physpages)
+ if (!size || (size >> PAGE_SHIFT) > totalram_pages)
return NULL;
- area = __get_vm_area_node(size, VM_ALLOC, VMALLOC_START, VMALLOC_END,
- node, gfp_mask, caller);
+ area = __get_vm_area_node(size, align, VM_ALLOC, VMALLOC_START,
+ VMALLOC_END, node, gfp_mask, caller);
if (!area)
return NULL;
@@ -1498,7 +1578,7 @@ static void *__vmalloc_node(unsigned long size, gfp_t gfp_mask, pgprot_t prot,
void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot)
{
- return __vmalloc_node(size, gfp_mask, prot, -1,
+ return __vmalloc_node(size, 1, gfp_mask, prot, -1,
__builtin_return_address(0));
}
EXPORT_SYMBOL(__vmalloc);
@@ -1514,7 +1594,7 @@ EXPORT_SYMBOL(__vmalloc);
*/
void *vmalloc(unsigned long size)
{
- return __vmalloc_node(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL,
+ return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL,
-1, __builtin_return_address(0));
}
EXPORT_SYMBOL(vmalloc);
@@ -1531,7 +1611,8 @@ void *vmalloc_user(unsigned long size)
struct vm_struct *area;
void *ret;
- ret = __vmalloc_node(size, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO,
+ ret = __vmalloc_node(size, SHMLBA,
+ GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO,
PAGE_KERNEL, -1, __builtin_return_address(0));
if (ret) {
area = find_vm_area(ret);
@@ -1554,7 +1635,7 @@ EXPORT_SYMBOL(vmalloc_user);
*/
void *vmalloc_node(unsigned long size, int node)
{
- return __vmalloc_node(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL,
+ return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL,
node, __builtin_return_address(0));
}
EXPORT_SYMBOL(vmalloc_node);
@@ -1577,7 +1658,7 @@ EXPORT_SYMBOL(vmalloc_node);
void *vmalloc_exec(unsigned long size)
{
- return __vmalloc_node(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
+ return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
-1, __builtin_return_address(0));
}
@@ -1598,7 +1679,7 @@ void *vmalloc_exec(unsigned long size)
*/
void *vmalloc_32(unsigned long size)
{
- return __vmalloc_node(size, GFP_VMALLOC32, PAGE_KERNEL,
+ return __vmalloc_node(size, 1, GFP_VMALLOC32, PAGE_KERNEL,
-1, __builtin_return_address(0));
}
EXPORT_SYMBOL(vmalloc_32);
@@ -1615,7 +1696,7 @@ void *vmalloc_32_user(unsigned long size)
struct vm_struct *area;
void *ret;
- ret = __vmalloc_node(size, GFP_VMALLOC32 | __GFP_ZERO, PAGE_KERNEL,
+ ret = __vmalloc_node(size, 1, GFP_VMALLOC32 | __GFP_ZERO, PAGE_KERNEL,
-1, __builtin_return_address(0));
if (ret) {
area = find_vm_area(ret);
@@ -1625,10 +1706,120 @@ void *vmalloc_32_user(unsigned long size)
}
EXPORT_SYMBOL(vmalloc_32_user);
+/*
+ * small helper routine , copy contents to buf from addr.
+ * If the page is not present, fill zero.
+ */
+
+static int aligned_vread(char *buf, char *addr, unsigned long count)
+{
+ struct page *p;
+ int copied = 0;
+
+ while (count) {
+ unsigned long offset, length;
+
+ offset = (unsigned long)addr & ~PAGE_MASK;
+ length = PAGE_SIZE - offset;
+ if (length > count)
+ length = count;
+ p = vmalloc_to_page(addr);
+ /*
+ * To do safe access to this _mapped_ area, we need
+ * lock. But adding lock here means that we need to add
+ * overhead of vmalloc()/vfree() calles for this _debug_
+ * interface, rarely used. Instead of that, we'll use
+ * kmap() and get small overhead in this access function.
+ */
+ if (p) {
+ /*
+ * we can expect USER0 is not used (see vread/vwrite's
+ * function description)
+ */
+ void *map = kmap_atomic(p, KM_USER0);
+ memcpy(buf, map + offset, length);
+ kunmap_atomic(map, KM_USER0);
+ } else
+ memset(buf, 0, length);
+
+ addr += length;
+ buf += length;
+ copied += length;
+ count -= length;
+ }
+ return copied;
+}
+
+static int aligned_vwrite(char *buf, char *addr, unsigned long count)
+{
+ struct page *p;
+ int copied = 0;
+
+ while (count) {
+ unsigned long offset, length;
+
+ offset = (unsigned long)addr & ~PAGE_MASK;
+ length = PAGE_SIZE - offset;
+ if (length > count)
+ length = count;
+ p = vmalloc_to_page(addr);
+ /*
+ * To do safe access to this _mapped_ area, we need
+ * lock. But adding lock here means that we need to add
+ * overhead of vmalloc()/vfree() calles for this _debug_
+ * interface, rarely used. Instead of that, we'll use
+ * kmap() and get small overhead in this access function.
+ */
+ if (p) {
+ /*
+ * we can expect USER0 is not used (see vread/vwrite's
+ * function description)
+ */
+ void *map = kmap_atomic(p, KM_USER0);
+ memcpy(map + offset, buf, length);
+ kunmap_atomic(map, KM_USER0);
+ }
+ addr += length;
+ buf += length;
+ copied += length;
+ count -= length;
+ }
+ return copied;
+}
+
+/**
+ * vread() - read vmalloc area in a safe way.
+ * @buf: buffer for reading data
+ * @addr: vm address.
+ * @count: number of bytes to be read.
+ *
+ * Returns # of bytes which addr and buf should be increased.
+ * (same number to @count). Returns 0 if [addr...addr+count) doesn't
+ * includes any intersect with alive vmalloc area.
+ *
+ * This function checks that addr is a valid vmalloc'ed area, and
+ * copy data from that area to a given buffer. If the given memory range
+ * of [addr...addr+count) includes some valid address, data is copied to
+ * proper area of @buf. If there are memory holes, they'll be zero-filled.
+ * IOREMAP area is treated as memory hole and no copy is done.
+ *
+ * If [addr...addr+count) doesn't includes any intersects with alive
+ * vm_struct area, returns 0.
+ * @buf should be kernel's buffer. Because this function uses KM_USER0,
+ * the caller should guarantee KM_USER0 is not used.
+ *
+ * Note: In usual ops, vread() is never necessary because the caller
+ * should know vmalloc() area is valid and can use memcpy().
+ * This is for routines which have to access vmalloc area without
+ * any informaion, as /dev/kmem.
+ *
+ */
+
long vread(char *buf, char *addr, unsigned long count)
{
struct vm_struct *tmp;
char *vaddr, *buf_start = buf;
+ unsigned long buflen = count;
unsigned long n;
/* Don't allow overflow */
@@ -1636,7 +1827,7 @@ long vread(char *buf, char *addr, unsigned long count)
count = -(unsigned long) addr;
read_lock(&vmlist_lock);
- for (tmp = vmlist; tmp; tmp = tmp->next) {
+ for (tmp = vmlist; count && tmp; tmp = tmp->next) {
vaddr = (char *) tmp->addr;
if (addr >= vaddr + tmp->size - PAGE_SIZE)
continue;
@@ -1649,32 +1840,72 @@ long vread(char *buf, char *addr, unsigned long count)
count--;
}
n = vaddr + tmp->size - PAGE_SIZE - addr;
- do {
- if (count == 0)
- goto finished;
- *buf = *addr;
- buf++;
- addr++;
- count--;
- } while (--n > 0);
+ if (n > count)
+ n = count;
+ if (!(tmp->flags & VM_IOREMAP))
+ aligned_vread(buf, addr, n);
+ else /* IOREMAP area is treated as memory hole */
+ memset(buf, 0, n);
+ buf += n;
+ addr += n;
+ count -= n;
}
finished:
read_unlock(&vmlist_lock);
- return buf - buf_start;
+
+ if (buf == buf_start)
+ return 0;
+ /* zero-fill memory holes */
+ if (buf != buf_start + buflen)
+ memset(buf, 0, buflen - (buf - buf_start));
+
+ return buflen;
}
+/**
+ * vwrite() - write vmalloc area in a safe way.
+ * @buf: buffer for source data
+ * @addr: vm address.
+ * @count: number of bytes to be read.
+ *
+ * Returns # of bytes which addr and buf should be incresed.
+ * (same number to @count).
+ * If [addr...addr+count) doesn't includes any intersect with valid
+ * vmalloc area, returns 0.
+ *
+ * This function checks that addr is a valid vmalloc'ed area, and
+ * copy data from a buffer to the given addr. If specified range of
+ * [addr...addr+count) includes some valid address, data is copied from
+ * proper area of @buf. If there are memory holes, no copy to hole.
+ * IOREMAP area is treated as memory hole and no copy is done.
+ *
+ * If [addr...addr+count) doesn't includes any intersects with alive
+ * vm_struct area, returns 0.
+ * @buf should be kernel's buffer. Because this function uses KM_USER0,
+ * the caller should guarantee KM_USER0 is not used.
+ *
+ * Note: In usual ops, vwrite() is never necessary because the caller
+ * should know vmalloc() area is valid and can use memcpy().
+ * This is for routines which have to access vmalloc area without
+ * any informaion, as /dev/kmem.
+ *
+ * The caller should guarantee KM_USER1 is not used.
+ */
+
long vwrite(char *buf, char *addr, unsigned long count)
{
struct vm_struct *tmp;
- char *vaddr, *buf_start = buf;
- unsigned long n;
+ char *vaddr;
+ unsigned long n, buflen;
+ int copied = 0;
/* Don't allow overflow */
if ((unsigned long) addr + count < count)
count = -(unsigned long) addr;
+ buflen = count;
read_lock(&vmlist_lock);
- for (tmp = vmlist; tmp; tmp = tmp->next) {
+ for (tmp = vmlist; count && tmp; tmp = tmp->next) {
vaddr = (char *) tmp->addr;
if (addr >= vaddr + tmp->size - PAGE_SIZE)
continue;
@@ -1686,18 +1917,21 @@ long vwrite(char *buf, char *addr, unsigned long count)
count--;
}
n = vaddr + tmp->size - PAGE_SIZE - addr;
- do {
- if (count == 0)
- goto finished;
- *addr = *buf;
- buf++;
- addr++;
- count--;
- } while (--n > 0);
+ if (n > count)
+ n = count;
+ if (!(tmp->flags & VM_IOREMAP)) {
+ aligned_vwrite(buf, addr, n);
+ copied++;
+ }
+ buf += n;
+ addr += n;
+ count -= n;
}
finished:
read_unlock(&vmlist_lock);
- return buf - buf_start;
+ if (!copied)
+ return 0;
+ return buflen;
}
/**
@@ -1818,6 +2052,286 @@ void free_vm_area(struct vm_struct *area)
}
EXPORT_SYMBOL_GPL(free_vm_area);
+static struct vmap_area *node_to_va(struct rb_node *n)
+{
+ return n ? rb_entry(n, struct vmap_area, rb_node) : NULL;
+}
+
+/**
+ * pvm_find_next_prev - find the next and prev vmap_area surrounding @end
+ * @end: target address
+ * @pnext: out arg for the next vmap_area
+ * @pprev: out arg for the previous vmap_area
+ *
+ * Returns: %true if either or both of next and prev are found,
+ * %false if no vmap_area exists
+ *
+ * Find vmap_areas end addresses of which enclose @end. ie. if not
+ * NULL, *pnext->va_end > @end and *pprev->va_end <= @end.
+ */
+static bool pvm_find_next_prev(unsigned long end,
+ struct vmap_area **pnext,
+ struct vmap_area **pprev)
+{
+ struct rb_node *n = vmap_area_root.rb_node;
+ struct vmap_area *va = NULL;
+
+ while (n) {
+ va = rb_entry(n, struct vmap_area, rb_node);
+ if (end < va->va_end)
+ n = n->rb_left;
+ else if (end > va->va_end)
+ n = n->rb_right;
+ else
+ break;
+ }
+
+ if (!va)
+ return false;
+
+ if (va->va_end > end) {
+ *pnext = va;
+ *pprev = node_to_va(rb_prev(&(*pnext)->rb_node));
+ } else {
+ *pprev = va;
+ *pnext = node_to_va(rb_next(&(*pprev)->rb_node));
+ }
+ return true;
+}
+
+/**
+ * pvm_determine_end - find the highest aligned address between two vmap_areas
+ * @pnext: in/out arg for the next vmap_area
+ * @pprev: in/out arg for the previous vmap_area
+ * @align: alignment
+ *
+ * Returns: determined end address
+ *
+ * Find the highest aligned address between *@pnext and *@pprev below
+ * VMALLOC_END. *@pnext and *@pprev are adjusted so that the aligned
+ * down address is between the end addresses of the two vmap_areas.
+ *
+ * Please note that the address returned by this function may fall
+ * inside *@pnext vmap_area. The caller is responsible for checking
+ * that.
+ */
+static unsigned long pvm_determine_end(struct vmap_area **pnext,
+ struct vmap_area **pprev,
+ unsigned long align)
+{
+ const unsigned long vmalloc_end = VMALLOC_END & ~(align - 1);
+ unsigned long addr;
+
+ if (*pnext)
+ addr = min((*pnext)->va_start & ~(align - 1), vmalloc_end);
+ else
+ addr = vmalloc_end;
+
+ while (*pprev && (*pprev)->va_end > addr) {
+ *pnext = *pprev;
+ *pprev = node_to_va(rb_prev(&(*pnext)->rb_node));
+ }
+
+ return addr;
+}
+
+/**
+ * pcpu_get_vm_areas - allocate vmalloc areas for percpu allocator
+ * @offsets: array containing offset of each area
+ * @sizes: array containing size of each area
+ * @nr_vms: the number of areas to allocate
+ * @align: alignment, all entries in @offsets and @sizes must be aligned to this
+ * @gfp_mask: allocation mask
+ *
+ * Returns: kmalloc'd vm_struct pointer array pointing to allocated
+ * vm_structs on success, %NULL on failure
+ *
+ * Percpu allocator wants to use congruent vm areas so that it can
+ * maintain the offsets among percpu areas. This function allocates
+ * congruent vmalloc areas for it. These areas tend to be scattered
+ * pretty far, distance between two areas easily going up to
+ * gigabytes. To avoid interacting with regular vmallocs, these areas
+ * are allocated from top.
+ *
+ * Despite its complicated look, this allocator is rather simple. It
+ * does everything top-down and scans areas from the end looking for
+ * matching slot. While scanning, if any of the areas overlaps with
+ * existing vmap_area, the base address is pulled down to fit the
+ * area. Scanning is repeated till all the areas fit and then all
+ * necessary data structres are inserted and the result is returned.
+ */
+struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets,
+ const size_t *sizes, int nr_vms,
+ size_t align, gfp_t gfp_mask)
+{
+ const unsigned long vmalloc_start = ALIGN(VMALLOC_START, align);
+ const unsigned long vmalloc_end = VMALLOC_END & ~(align - 1);
+ struct vmap_area **vas, *prev, *next;
+ struct vm_struct **vms;
+ int area, area2, last_area, term_area;
+ unsigned long base, start, end, last_end;
+ bool purged = false;
+
+ gfp_mask &= GFP_RECLAIM_MASK;
+
+ /* verify parameters and allocate data structures */
+ BUG_ON(align & ~PAGE_MASK || !is_power_of_2(align));
+ for (last_area = 0, area = 0; area < nr_vms; area++) {
+ start = offsets[area];
+ end = start + sizes[area];
+
+ /* is everything aligned properly? */
+ BUG_ON(!IS_ALIGNED(offsets[area], align));
+ BUG_ON(!IS_ALIGNED(sizes[area], align));
+
+ /* detect the area with the highest address */
+ if (start > offsets[last_area])
+ last_area = area;
+
+ for (area2 = 0; area2 < nr_vms; area2++) {
+ unsigned long start2 = offsets[area2];
+ unsigned long end2 = start2 + sizes[area2];
+
+ if (area2 == area)
+ continue;
+
+ BUG_ON(start2 >= start && start2 < end);
+ BUG_ON(end2 <= end && end2 > start);
+ }
+ }
+ last_end = offsets[last_area] + sizes[last_area];
+
+ if (vmalloc_end - vmalloc_start < last_end) {
+ WARN_ON(true);
+ return NULL;
+ }
+
+ vms = kzalloc(sizeof(vms[0]) * nr_vms, gfp_mask);
+ vas = kzalloc(sizeof(vas[0]) * nr_vms, gfp_mask);
+ if (!vas || !vms)
+ goto err_free;
+
+ for (area = 0; area < nr_vms; area++) {
+ vas[area] = kzalloc(sizeof(struct vmap_area), gfp_mask);
+ vms[area] = kzalloc(sizeof(struct vm_struct), gfp_mask);
+ if (!vas[area] || !vms[area])
+ goto err_free;
+ }
+retry:
+ spin_lock(&vmap_area_lock);
+
+ /* start scanning - we scan from the top, begin with the last area */
+ area = term_area = last_area;
+ start = offsets[area];
+ end = start + sizes[area];
+
+ if (!pvm_find_next_prev(vmap_area_pcpu_hole, &next, &prev)) {
+ base = vmalloc_end - last_end;
+ goto found;
+ }
+ base = pvm_determine_end(&next, &prev, align) - end;
+
+ while (true) {
+ BUG_ON(next && next->va_end <= base + end);
+ BUG_ON(prev && prev->va_end > base + end);
+
+ /*
+ * base might have underflowed, add last_end before
+ * comparing.
+ */
+ if (base + last_end < vmalloc_start + last_end) {
+ spin_unlock(&vmap_area_lock);
+ if (!purged) {
+ purge_vmap_area_lazy();
+ purged = true;
+ goto retry;
+ }
+ goto err_free;
+ }
+
+ /*
+ * If next overlaps, move base downwards so that it's
+ * right below next and then recheck.
+ */
+ if (next && next->va_start < base + end) {
+ base = pvm_determine_end(&next, &prev, align) - end;
+ term_area = area;
+ continue;
+ }
+
+ /*
+ * If prev overlaps, shift down next and prev and move
+ * base so that it's right below new next and then
+ * recheck.
+ */
+ if (prev && prev->va_end > base + start) {
+ next = prev;
+ prev = node_to_va(rb_prev(&next->rb_node));
+ base = pvm_determine_end(&next, &prev, align) - end;
+ term_area = area;
+ continue;
+ }
+
+ /*
+ * This area fits, move on to the previous one. If
+ * the previous one is the terminal one, we're done.
+ */
+ area = (area + nr_vms - 1) % nr_vms;
+ if (area == term_area)
+ break;
+ start = offsets[area];
+ end = start + sizes[area];
+ pvm_find_next_prev(base + end, &next, &prev);
+ }
+found:
+ /* we've found a fitting base, insert all va's */
+ for (area = 0; area < nr_vms; area++) {
+ struct vmap_area *va = vas[area];
+
+ va->va_start = base + offsets[area];
+ va->va_end = va->va_start + sizes[area];
+ __insert_vmap_area(va);
+ }
+
+ vmap_area_pcpu_hole = base + offsets[last_area];
+
+ spin_unlock(&vmap_area_lock);
+
+ /* insert all vm's */
+ for (area = 0; area < nr_vms; area++)
+ insert_vmalloc_vm(vms[area], vas[area], VM_ALLOC,
+ pcpu_get_vm_areas);
+
+ kfree(vas);
+ return vms;
+
+err_free:
+ for (area = 0; area < nr_vms; area++) {
+ if (vas)
+ kfree(vas[area]);
+ if (vms)
+ kfree(vms[area]);
+ }
+ kfree(vas);
+ kfree(vms);
+ return NULL;
+}
+
+/**
+ * pcpu_free_vm_areas - free vmalloc areas for percpu allocator
+ * @vms: vm_struct pointer array returned by pcpu_get_vm_areas()
+ * @nr_vms: the number of allocated areas
+ *
+ * Free vm_structs and the array allocated by pcpu_get_vm_areas().
+ */
+void pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms)
+{
+ int i;
+
+ for (i = 0; i < nr_vms; i++)
+ free_vm_area(vms[i]);
+ kfree(vms);
+}
#ifdef CONFIG_PROC_FS
static void *s_start(struct seq_file *m, loff_t *pos)
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 9360c3e6d124..f055abc3eae6 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -23,7 +23,6 @@
#include <linux/file.h>
#include <linux/writeback.h>
#include <linux/blkdev.h>
-#include <linux/interrupt.h>
#include <linux/buffer_head.h> /* for try_to_release_page(),
buffer_heads_over_limit */
#include <linux/mm_inline.h>
@@ -56,6 +55,11 @@ struct scan_control {
/* Number of pages freed so far during a call to shrink_zones() */
unsigned long nr_reclaimed;
+ /* How many pages shrink_list() should reclaim */
+ unsigned long nr_to_reclaim;
+
+ unsigned long hibernation_mode;
+
/* This context's GFP mask */
gfp_t gfp_mask;
@@ -67,12 +71,6 @@ struct scan_control {
/* Can pages be swapped as part of reclaim? */
int may_swap;
- /* This context's SWAP_CLUSTER_MAX. If freeing memory for
- * suspend, we effectively ignore SWAP_CLUSTER_MAX.
- * In this context, it doesn't matter that we scan the
- * whole list at once. */
- int swap_cluster_max;
-
int swappiness;
int all_unreclaimable;
@@ -149,8 +147,8 @@ static struct zone_reclaim_stat *get_reclaim_stat(struct zone *zone,
return &zone->reclaim_stat;
}
-static unsigned long zone_nr_pages(struct zone *zone, struct scan_control *sc,
- enum lru_list lru)
+static unsigned long zone_nr_lru_pages(struct zone *zone,
+ struct scan_control *sc, enum lru_list lru)
{
if (!scanning_global_lru(sc))
return mem_cgroup_zone_nr_pages(sc->mem_cgroup, zone, lru);
@@ -287,7 +285,12 @@ static inline int page_mapping_inuse(struct page *page)
static inline int is_page_cache_freeable(struct page *page)
{
- return page_count(page) - !!page_has_private(page) == 2;
+ /*
+ * A freeable page cache page is referenced only by the caller
+ * that isolated the page, the page cache radix tree and
+ * optional buffer heads at page->private.
+ */
+ return page_count(page) - page_has_private(page) == 2;
}
static int may_write_to_queue(struct backing_dev_info *bdi)
@@ -354,7 +357,7 @@ static pageout_t pageout(struct page *page, struct address_space *mapping,
* stalls if we need to run get_block(). We could test
* PagePrivate for that.
*
- * If this process is currently in generic_file_write() against
+ * If this process is currently in __generic_file_aio_write() against
* this page's queue, we can perform writeback even if that
* will block.
*
@@ -362,7 +365,6 @@ static pageout_t pageout(struct page *page, struct address_space *mapping,
* block, for some throttling. This happens by accident, because
* swap_backing_dev_info is bust: it doesn't reflect the
* congestion state of the swapdevs. Easy to fix, if needed.
- * See swapfile.c:page_queue_congested().
*/
if (!is_page_cache_freeable(page))
return PAGE_KEEP;
@@ -532,7 +534,7 @@ redo:
* unevictable page on [in]active list.
* We know how to handle that.
*/
- lru = active + page_is_file_cache(page);
+ lru = active + page_lru_base_type(page);
lru_cache_add_lru(page, lru);
} else {
/*
@@ -541,6 +543,16 @@ redo:
*/
lru = LRU_UNEVICTABLE;
add_page_to_unevictable_list(page);
+ /*
+ * When racing with an mlock clearing (page is
+ * unlocked), make sure that if the other thread does
+ * not observe our setting of PG_lru and fails
+ * isolation, we see PG_mlocked cleared below and move
+ * the page back to the evictable list.
+ *
+ * The other side is TestClearPageMlocked().
+ */
+ smp_mb();
}
/*
@@ -660,7 +672,7 @@ static unsigned long shrink_page_list(struct list_head *page_list,
* processes. Try to unmap it here.
*/
if (page_mapped(page) && mapping) {
- switch (try_to_unmap(page, 0)) {
+ switch (try_to_unmap(page, TTU_UNMAP)) {
case SWAP_FAIL:
goto activate_locked;
case SWAP_AGAIN:
@@ -822,7 +834,7 @@ int __isolate_lru_page(struct page *page, int mode, int file)
if (mode != ISOLATE_BOTH && (!PageActive(page) != !mode))
return ret;
- if (mode != ISOLATE_BOTH && (!page_is_file_cache(page) != !file))
+ if (mode != ISOLATE_BOTH && page_is_file_cache(page) != file)
return ret;
/*
@@ -936,6 +948,16 @@ static unsigned long isolate_lru_pages(unsigned long nr_to_scan,
/* Check that we have not crossed a zone boundary. */
if (unlikely(page_zone_id(cursor_page) != zone_id))
continue;
+
+ /*
+ * If we don't have enough swap space, reclaiming of
+ * anon page which don't already have a swap slot is
+ * pointless.
+ */
+ if (nr_swap_pages <= 0 && PageAnon(cursor_page) &&
+ !PageSwapCache(cursor_page))
+ continue;
+
if (__isolate_lru_page(cursor_page, mode, file) == 0) {
list_move(&cursor_page->lru, dst);
mem_cgroup_del_lru(cursor_page);
@@ -962,7 +984,7 @@ static unsigned long isolate_pages_global(unsigned long nr,
if (file)
lru += LRU_FILE;
return isolate_lru_pages(nr, &z->lru[lru].list, dst, scanned, order,
- mode, !!file);
+ mode, file);
}
/*
@@ -977,7 +999,7 @@ static unsigned long clear_active_flags(struct list_head *page_list,
struct page *page;
list_for_each_entry(page, page_list, lru) {
- lru = page_is_file_cache(page);
+ lru = page_lru_base_type(page);
if (PageActive(page)) {
lru += LRU_ACTIVE;
ClearPageActive(page);
@@ -1035,6 +1057,31 @@ int isolate_lru_page(struct page *page)
}
/*
+ * Are there way too many processes in the direct reclaim path already?
+ */
+static int too_many_isolated(struct zone *zone, int file,
+ struct scan_control *sc)
+{
+ unsigned long inactive, isolated;
+
+ if (current_is_kswapd())
+ return 0;
+
+ if (!scanning_global_lru(sc))
+ return 0;
+
+ if (file) {
+ inactive = zone_page_state(zone, NR_INACTIVE_FILE);
+ isolated = zone_page_state(zone, NR_ISOLATED_FILE);
+ } else {
+ inactive = zone_page_state(zone, NR_INACTIVE_ANON);
+ isolated = zone_page_state(zone, NR_ISOLATED_ANON);
+ }
+
+ return isolated > inactive;
+}
+
+/*
* shrink_inactive_list() is a helper for shrink_zone(). It returns the number
* of reclaimed pages
*/
@@ -1049,6 +1096,14 @@ static unsigned long shrink_inactive_list(unsigned long max_scan,
struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(zone, sc);
int lumpy_reclaim = 0;
+ while (unlikely(too_many_isolated(zone, file, sc))) {
+ congestion_wait(BLK_RW_ASYNC, HZ/10);
+
+ /* We are about to die and free our memory. Return now. */
+ if (fatal_signal_pending(current))
+ return SWAP_CLUSTER_MAX;
+ }
+
/*
* If we need a large contiguous chunk of memory, or have
* trouble getting a small set of contiguous pages, we
@@ -1073,10 +1128,26 @@ static unsigned long shrink_inactive_list(unsigned long max_scan,
unsigned long nr_active;
unsigned int count[NR_LRU_LISTS] = { 0, };
int mode = lumpy_reclaim ? ISOLATE_BOTH : ISOLATE_INACTIVE;
+ unsigned long nr_anon;
+ unsigned long nr_file;
- nr_taken = sc->isolate_pages(sc->swap_cluster_max,
+ nr_taken = sc->isolate_pages(SWAP_CLUSTER_MAX,
&page_list, &nr_scan, sc->order, mode,
zone, sc->mem_cgroup, 0, file);
+
+ if (scanning_global_lru(sc)) {
+ zone->pages_scanned += nr_scan;
+ if (current_is_kswapd())
+ __count_zone_vm_events(PGSCAN_KSWAPD, zone,
+ nr_scan);
+ else
+ __count_zone_vm_events(PGSCAN_DIRECT, zone,
+ nr_scan);
+ }
+
+ if (nr_taken == 0)
+ goto done;
+
nr_active = clear_active_flags(&page_list, count);
__count_vm_events(PGDEACTIVATE, nr_active);
@@ -1089,13 +1160,13 @@ static unsigned long shrink_inactive_list(unsigned long max_scan,
__mod_zone_page_state(zone, NR_INACTIVE_ANON,
-count[LRU_INACTIVE_ANON]);
- if (scanning_global_lru(sc))
- zone->pages_scanned += nr_scan;
+ nr_anon = count[LRU_ACTIVE_ANON] + count[LRU_INACTIVE_ANON];
+ nr_file = count[LRU_ACTIVE_FILE] + count[LRU_INACTIVE_FILE];
+ __mod_zone_page_state(zone, NR_ISOLATED_ANON, nr_anon);
+ __mod_zone_page_state(zone, NR_ISOLATED_FILE, nr_file);
- reclaim_stat->recent_scanned[0] += count[LRU_INACTIVE_ANON];
- reclaim_stat->recent_scanned[0] += count[LRU_ACTIVE_ANON];
- reclaim_stat->recent_scanned[1] += count[LRU_INACTIVE_FILE];
- reclaim_stat->recent_scanned[1] += count[LRU_ACTIVE_FILE];
+ reclaim_stat->recent_scanned[0] += nr_anon;
+ reclaim_stat->recent_scanned[1] += nr_file;
spin_unlock_irq(&zone->lru_lock);
@@ -1124,18 +1195,12 @@ static unsigned long shrink_inactive_list(unsigned long max_scan,
}
nr_reclaimed += nr_freed;
+
local_irq_disable_nort();
- if (current_is_kswapd()) {
- __count_zone_vm_events(PGSCAN_KSWAPD, zone, nr_scan);
+ if (current_is_kswapd())
__count_vm_events(KSWAPD_STEAL, nr_freed);
- } else if (scanning_global_lru(sc))
- __count_zone_vm_events(PGSCAN_DIRECT, zone, nr_scan);
-
__count_zone_vm_events(PGSTEAL, zone, nr_freed);
- if (nr_taken == 0)
- goto done;
-
spin_lock(&zone->lru_lock);
/*
* Put back any unfreeable pages.
@@ -1154,8 +1219,8 @@ static unsigned long shrink_inactive_list(unsigned long max_scan,
SetPageLRU(page);
lru = page_lru(page);
add_page_to_lru_list(zone, page, lru);
- if (PageActive(page)) {
- int file = !!page_is_file_cache(page);
+ if (is_active_lru(lru)) {
+ int file = is_file_lru(lru);
reclaim_stat->recent_rotated[file]++;
}
if (!pagevec_add(&pvec, page)) {
@@ -1164,15 +1229,13 @@ static unsigned long shrink_inactive_list(unsigned long max_scan,
spin_lock_irq(&zone->lru_lock);
}
}
+ __mod_zone_page_state(zone, NR_ISOLATED_ANON, -nr_anon);
+ __mod_zone_page_state(zone, NR_ISOLATED_FILE, -nr_file);
+
} while (nr_scanned < max_scan);
- /*
- * Non-PREEMPT_RT relies on IRQs-off protecting the page_states
- * per-CPU data. PREEMPT_RT has that data protected even in
- * __mod_page_state(), so no need to keep IRQs disabled.
- */
- spin_unlock(&zone->lru_lock);
+
done:
- local_irq_enable_nort();
+ spin_unlock_irq(&zone->lru_lock);
pagevec_release(&pvec);
return nr_reclaimed;
}
@@ -1221,15 +1284,10 @@ static void move_active_pages_to_lru(struct zone *zone,
while (!list_empty(list)) {
page = lru_to_page(list);
- prefetchw_prev_lru_page(page, list, flags);
VM_BUG_ON(PageLRU(page));
SetPageLRU(page);
- VM_BUG_ON(!PageActive(page));
- if (!is_active_lru(lru))
- ClearPageActive(page); /* we are de-activating */
-
list_move(&page->lru, &zone->lru[lru].list);
mem_cgroup_add_lru_list(page, lru);
pgmoved++;
@@ -1250,7 +1308,7 @@ static void move_active_pages_to_lru(struct zone *zone,
static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
struct scan_control *sc, int priority, int file)
{
- unsigned long pgmoved;
+ unsigned long nr_taken;
unsigned long pgscanned;
unsigned long vm_flags;
LIST_HEAD(l_hold); /* The pages which were snipped off */
@@ -1258,10 +1316,11 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
LIST_HEAD(l_inactive);
struct page *page;
struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(zone, sc);
+ unsigned long nr_rotated = 0;
lru_add_drain();
spin_lock_irq(&zone->lru_lock);
- pgmoved = sc->isolate_pages(nr_pages, &l_hold, &pgscanned, sc->order,
+ nr_taken = sc->isolate_pages(nr_pages, &l_hold, &pgscanned, sc->order,
ISOLATE_ACTIVE, zone,
sc->mem_cgroup, 1, file);
/*
@@ -1271,16 +1330,16 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
if (scanning_global_lru(sc)) {
zone->pages_scanned += pgscanned;
}
- reclaim_stat->recent_scanned[!!file] += pgmoved;
+ reclaim_stat->recent_scanned[file] += nr_taken;
__count_zone_vm_events(PGREFILL, zone, pgscanned);
if (file)
- __mod_zone_page_state(zone, NR_ACTIVE_FILE, -pgmoved);
+ __mod_zone_page_state(zone, NR_ACTIVE_FILE, -nr_taken);
else
- __mod_zone_page_state(zone, NR_ACTIVE_ANON, -pgmoved);
+ __mod_zone_page_state(zone, NR_ACTIVE_ANON, -nr_taken);
+ __mod_zone_page_state(zone, NR_ISOLATED_ANON + file, nr_taken);
spin_unlock_irq(&zone->lru_lock);
- pgmoved = 0; /* count referenced (mapping) mapped pages */
while (!list_empty(&l_hold)) {
cond_resched();
page = lru_to_page(&l_hold);
@@ -1294,7 +1353,7 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
/* page_referenced clears PageReferenced */
if (page_mapping_inuse(page) &&
page_referenced(page, 0, sc->mem_cgroup, &vm_flags)) {
- pgmoved++;
+ nr_rotated++;
/*
* Identify referenced, file-backed active pages and
* give them one more trip around the active list. So
@@ -1304,12 +1363,13 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
* IO, plus JVM can create lots of anon VM_EXEC pages,
* so we ignore them here.
*/
- if ((vm_flags & VM_EXEC) && !PageAnon(page)) {
+ if ((vm_flags & VM_EXEC) && page_is_file_cache(page)) {
list_add(&page->lru, &l_active);
continue;
}
}
+ ClearPageActive(page); /* we are de-activating */
list_add(&page->lru, &l_inactive);
}
@@ -1323,13 +1383,13 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
* helps balance scan pressure between file and anonymous pages in
* get_scan_ratio.
*/
- reclaim_stat->recent_rotated[!!file] += pgmoved;
+ reclaim_stat->recent_rotated[file] += nr_rotated;
move_active_pages_to_lru(zone, &l_active,
LRU_ACTIVE + file * LRU_FILE);
move_active_pages_to_lru(zone, &l_inactive,
LRU_BASE + file * LRU_FILE);
-
+ __mod_zone_page_state(zone, NR_ISOLATED_ANON + file, -nr_taken);
spin_unlock_irq(&zone->lru_lock);
}
@@ -1401,20 +1461,26 @@ static int inactive_file_is_low(struct zone *zone, struct scan_control *sc)
return low;
}
+static int inactive_list_is_low(struct zone *zone, struct scan_control *sc,
+ int file)
+{
+ if (file)
+ return inactive_file_is_low(zone, sc);
+ else
+ return inactive_anon_is_low(zone, sc);
+}
+
static unsigned long shrink_list(enum lru_list lru, unsigned long nr_to_scan,
struct zone *zone, struct scan_control *sc, int priority)
{
int file = is_file_lru(lru);
- if (lru == LRU_ACTIVE_FILE && inactive_file_is_low(zone, sc)) {
- shrink_active_list(nr_to_scan, zone, sc, priority, file);
+ if (is_active_lru(lru)) {
+ if (inactive_list_is_low(zone, sc, file))
+ shrink_active_list(nr_to_scan, zone, sc, priority, file);
return 0;
}
- if (lru == LRU_ACTIVE_ANON && inactive_anon_is_low(zone, sc)) {
- shrink_active_list(nr_to_scan, zone, sc, priority, file);
- return 0;
- }
return shrink_inactive_list(nr_to_scan, zone, sc, priority, file);
}
@@ -1435,10 +1501,10 @@ static void get_scan_ratio(struct zone *zone, struct scan_control *sc,
unsigned long ap, fp;
struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(zone, sc);
- anon = zone_nr_pages(zone, sc, LRU_ACTIVE_ANON) +
- zone_nr_pages(zone, sc, LRU_INACTIVE_ANON);
- file = zone_nr_pages(zone, sc, LRU_ACTIVE_FILE) +
- zone_nr_pages(zone, sc, LRU_INACTIVE_FILE);
+ anon = zone_nr_lru_pages(zone, sc, LRU_ACTIVE_ANON) +
+ zone_nr_lru_pages(zone, sc, LRU_INACTIVE_ANON);
+ file = zone_nr_lru_pages(zone, sc, LRU_ACTIVE_FILE) +
+ zone_nr_lru_pages(zone, sc, LRU_INACTIVE_FILE);
if (scanning_global_lru(sc)) {
free = zone_page_state(zone, NR_FREE_PAGES);
@@ -1504,15 +1570,14 @@ static void get_scan_ratio(struct zone *zone, struct scan_control *sc,
* until we collected @swap_cluster_max pages to scan.
*/
static unsigned long nr_scan_try_batch(unsigned long nr_to_scan,
- unsigned long *nr_saved_scan,
- unsigned long swap_cluster_max)
+ unsigned long *nr_saved_scan)
{
unsigned long nr;
*nr_saved_scan += nr_to_scan;
nr = *nr_saved_scan;
- if (nr >= swap_cluster_max)
+ if (nr >= SWAP_CLUSTER_MAX)
*nr_saved_scan = 0;
else
nr = 0;
@@ -1531,7 +1596,8 @@ static void shrink_zone(int priority, struct zone *zone,
unsigned long percent[2]; /* anon @ 0; file @ 1 */
enum lru_list l;
unsigned long nr_reclaimed = sc->nr_reclaimed;
- unsigned long swap_cluster_max = sc->swap_cluster_max;
+ unsigned long nr_to_reclaim = sc->nr_to_reclaim;
+ struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(zone, sc);
int noswap = 0;
/* If we have no swap space, do not bother scanning anon pages. */
@@ -1546,24 +1612,21 @@ static void shrink_zone(int priority, struct zone *zone,
int file = is_file_lru(l);
unsigned long scan;
- scan = zone_nr_pages(zone, sc, l);
+ scan = zone_nr_lru_pages(zone, sc, l);
if (priority || noswap) {
scan >>= priority;
scan = (scan * percent[file]) / 100;
}
- if (scanning_global_lru(sc))
- nr[l] = nr_scan_try_batch(scan,
- &zone->lru[l].nr_saved_scan,
- swap_cluster_max);
- else
- nr[l] = scan;
+ nr[l] = nr_scan_try_batch(scan,
+ &reclaim_stat->nr_saved_scan[l]);
}
while (nr[LRU_INACTIVE_ANON] || nr[LRU_ACTIVE_FILE] ||
nr[LRU_INACTIVE_FILE]) {
for_each_evictable_lru(l) {
if (nr[l]) {
- nr_to_scan = min(nr[l], swap_cluster_max);
+ nr_to_scan = min_t(unsigned long,
+ nr[l], SWAP_CLUSTER_MAX);
nr[l] -= nr_to_scan;
nr_reclaimed += shrink_list(l, nr_to_scan,
@@ -1578,8 +1641,7 @@ static void shrink_zone(int priority, struct zone *zone,
* with multiple processes reclaiming pages, the total
* freeing target can get unreasonably large.
*/
- if (nr_reclaimed > swap_cluster_max &&
- priority < DEF_PRIORITY && !current_is_kswapd())
+ if (nr_reclaimed >= nr_to_reclaim && priority < DEF_PRIORITY)
break;
}
@@ -1658,10 +1720,10 @@ static void shrink_zones(int priority, struct zonelist *zonelist,
*
* If the caller is !__GFP_FS then the probability of a failure is reasonably
* high - the zone may be full of dirty or under-writeback pages, which this
- * caller can't do much about. We kick pdflush and take explicit naps in the
- * hope that some of these pages can be written. But if the allocating task
- * holds filesystem locks which prevent writeout this might not work, and the
- * allocation attempt will fail.
+ * caller can't do much about. We kick the writeback threads and take explicit
+ * naps in the hope that some of these pages can be written. But if the
+ * allocating task holds filesystem locks which prevent writeout this might not
+ * work, and the allocation attempt will fail.
*
* returns: 0, if no pages reclaimed
* else, the number of pages reclaimed
@@ -1677,6 +1739,7 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
struct zoneref *z;
struct zone *zone;
enum zone_type high_zoneidx = gfp_zone(sc->gfp_mask);
+ unsigned long writeback_threshold;
delayacct_freepages_start();
@@ -1691,7 +1754,7 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
continue;
- lru_pages += zone_lru_pages(zone);
+ lru_pages += zone_reclaimable_pages(zone);
}
}
@@ -1712,7 +1775,7 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
}
}
total_scanned += sc->nr_scanned;
- if (sc->nr_reclaimed >= sc->swap_cluster_max) {
+ if (sc->nr_reclaimed >= sc->nr_to_reclaim) {
ret = sc->nr_reclaimed;
goto out;
}
@@ -1724,14 +1787,15 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
* that's undesirable in laptop mode, where we *want* lumpy
* writeout. So in laptop mode, write out the whole world.
*/
- if (total_scanned > sc->swap_cluster_max +
- sc->swap_cluster_max / 2) {
- wakeup_pdflush(laptop_mode ? 0 : total_scanned);
+ writeback_threshold = sc->nr_to_reclaim + sc->nr_to_reclaim / 2;
+ if (total_scanned > writeback_threshold) {
+ wakeup_flusher_threads(laptop_mode ? 0 : total_scanned);
sc->may_writepage = 1;
}
/* Take a nap, wait for some writeback to complete */
- if (sc->nr_scanned && priority < DEF_PRIORITY - 2)
+ if (!sc->hibernation_mode && sc->nr_scanned &&
+ priority < DEF_PRIORITY - 2)
congestion_wait(BLK_RW_ASYNC, HZ/10);
}
/* top priority shrink_zones still had more to do? don't OOM, then */
@@ -1770,7 +1834,7 @@ unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
struct scan_control sc = {
.gfp_mask = gfp_mask,
.may_writepage = !laptop_mode,
- .swap_cluster_max = SWAP_CLUSTER_MAX,
+ .nr_to_reclaim = SWAP_CLUSTER_MAX,
.may_unmap = 1,
.may_swap = 1,
.swappiness = vm_swappiness,
@@ -1785,23 +1849,55 @@ unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
#ifdef CONFIG_CGROUP_MEM_RES_CTLR
+unsigned long mem_cgroup_shrink_node_zone(struct mem_cgroup *mem,
+ gfp_t gfp_mask, bool noswap,
+ unsigned int swappiness,
+ struct zone *zone, int nid)
+{
+ struct scan_control sc = {
+ .may_writepage = !laptop_mode,
+ .may_unmap = 1,
+ .may_swap = !noswap,
+ .swappiness = swappiness,
+ .order = 0,
+ .mem_cgroup = mem,
+ .isolate_pages = mem_cgroup_isolate_pages,
+ };
+ nodemask_t nm = nodemask_of_node(nid);
+
+ sc.gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) |
+ (GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK);
+ sc.nodemask = &nm;
+ sc.nr_reclaimed = 0;
+ sc.nr_scanned = 0;
+ /*
+ * NOTE: Although we can get the priority field, using it
+ * here is not a good idea, since it limits the pages we can scan.
+ * if we don't reclaim here, the shrink_zone from balance_pgdat
+ * will pick up pages from other mem cgroup's as well. We hack
+ * the priority and make it zero.
+ */
+ shrink_zone(0, zone, &sc);
+ return sc.nr_reclaimed;
+}
+
unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem_cont,
gfp_t gfp_mask,
bool noswap,
unsigned int swappiness)
{
+ struct zonelist *zonelist;
struct scan_control sc = {
.may_writepage = !laptop_mode,
.may_unmap = 1,
.may_swap = !noswap,
- .swap_cluster_max = SWAP_CLUSTER_MAX,
+ .nr_to_reclaim = SWAP_CLUSTER_MAX,
.swappiness = swappiness,
.order = 0,
.mem_cgroup = mem_cont,
.isolate_pages = mem_cgroup_isolate_pages,
.nodemask = NULL, /* we don't care the placement */
};
- struct zonelist *zonelist;
sc.gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) |
(GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK);
@@ -1810,6 +1906,33 @@ unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem_cont,
}
#endif
+/* is kswapd sleeping prematurely? */
+static int sleeping_prematurely(pg_data_t *pgdat, int order, long remaining)
+{
+ int i;
+
+ /* If a direct reclaimer woke kswapd within HZ/10, it's premature */
+ if (remaining)
+ return 1;
+
+ /* If after HZ/10, a zone is below the high mark, it's premature */
+ for (i = 0; i < pgdat->nr_zones; i++) {
+ struct zone *zone = pgdat->node_zones + i;
+
+ if (!populated_zone(zone))
+ continue;
+
+ if (zone_is_all_unreclaimable(zone))
+ continue;
+
+ if (!zone_watermark_ok(zone, order, high_wmark_pages(zone),
+ 0, 0))
+ return 1;
+ }
+
+ return 0;
+}
+
/*
* For kswapd, balance_pgdat() will work across all this node's zones until
* they are all at high_wmark_pages(zone).
@@ -1842,7 +1965,11 @@ static unsigned long balance_pgdat(pg_data_t *pgdat, int order)
.gfp_mask = GFP_KERNEL,
.may_unmap = 1,
.may_swap = 1,
- .swap_cluster_max = SWAP_CLUSTER_MAX,
+ /*
+ * kswapd doesn't want to be bailed out while reclaim. because
+ * we want to put equal scanning pressure on each zone.
+ */
+ .nr_to_reclaim = ULONG_MAX,
.swappiness = vm_swappiness,
.order = order,
.mem_cgroup = NULL,
@@ -1867,6 +1994,7 @@ loop_again:
for (priority = DEF_PRIORITY; priority >= 0; priority--) {
int end_zone = 0; /* Inclusive. 0 = ZONE_DMA */
unsigned long lru_pages = 0;
+ int has_under_min_watermark_zone = 0;
/* The swap token gets in the way of swapout... */
if (!priority)
@@ -1908,7 +2036,7 @@ loop_again:
for (i = 0; i <= end_zone; i++) {
struct zone *zone = pgdat->node_zones + i;
- lru_pages += zone_lru_pages(zone);
+ lru_pages += zone_reclaimable_pages(zone);
}
/*
@@ -1923,6 +2051,7 @@ loop_again:
for (i = 0; i <= end_zone; i++) {
struct zone *zone = pgdat->node_zones + i;
int nr_slab;
+ int nid, zid;
if (!populated_zone(zone))
continue;
@@ -1937,6 +2066,15 @@ loop_again:
temp_priority[i] = priority;
sc.nr_scanned = 0;
note_zone_scanning_priority(zone, priority);
+
+ nid = pgdat->node_id;
+ zid = zone_idx(zone);
+ /*
+ * Call soft limit reclaim before calling shrink_zone.
+ * For now we ignore the return value
+ */
+ mem_cgroup_soft_limit_reclaim(zone, order, sc.gfp_mask,
+ nid, zid);
/*
* We put equal pressure on every zone, unless one
* zone has way too many pages free already.
@@ -1952,7 +2090,7 @@ loop_again:
if (zone_is_all_unreclaimable(zone))
continue;
if (nr_slab == 0 && zone->pages_scanned >=
- (zone_lru_pages(zone) * 6))
+ (zone_reclaimable_pages(zone) * 6))
zone_set_flag(zone,
ZONE_ALL_UNRECLAIMABLE);
/*
@@ -1963,6 +2101,15 @@ loop_again:
if (total_scanned > SWAP_CLUSTER_MAX * 2 &&
total_scanned > sc.nr_reclaimed + sc.nr_reclaimed / 2)
sc.may_writepage = 1;
+
+ /*
+ * We are still under min water mark. it mean we have
+ * GFP_ATOMIC allocation failure risk. Hurry up!
+ */
+ if (!zone_watermark_ok(zone, order, min_wmark_pages(zone),
+ end_zone, 0))
+ has_under_min_watermark_zone = 1;
+
}
if (all_zones_ok)
break; /* kswapd: all done */
@@ -1970,8 +2117,12 @@ loop_again:
* OK, kswapd is getting into trouble. Take a nap, then take
* another pass across the zones.
*/
- if (total_scanned && priority < DEF_PRIORITY - 2)
- congestion_wait(BLK_RW_ASYNC, HZ/10);
+ if (total_scanned && (priority < DEF_PRIORITY - 2)) {
+ if (has_under_min_watermark_zone)
+ count_vm_event(KSWAPD_SKIP_CONGESTION_WAIT);
+ else
+ congestion_wait(BLK_RW_ASYNC, HZ/10);
+ }
/*
* We do this so kswapd doesn't build up large priorities for
@@ -2069,6 +2220,7 @@ static int kswapd(void *p)
order = 0;
for ( ; ; ) {
unsigned long new_order;
+ int ret;
prepare_to_wait(&pgdat->kswapd_wait, &wait, TASK_INTERRUPTIBLE);
new_order = pgdat->kswapd_max_order;
@@ -2080,19 +2232,45 @@ static int kswapd(void *p)
*/
order = new_order;
} else {
- if (!freezing(current))
- schedule();
+ if (!freezing(current) && !kthread_should_stop()) {
+ long remaining = 0;
+
+ /* Try to sleep for a short interval */
+ if (!sleeping_prematurely(pgdat, order, remaining)) {
+ remaining = schedule_timeout(HZ/10);
+ finish_wait(&pgdat->kswapd_wait, &wait);
+ prepare_to_wait(&pgdat->kswapd_wait, &wait, TASK_INTERRUPTIBLE);
+ }
+
+ /*
+ * After a short sleep, check if it was a
+ * premature sleep. If not, then go fully
+ * to sleep until explicitly woken up
+ */
+ if (!sleeping_prematurely(pgdat, order, remaining))
+ schedule();
+ else {
+ if (remaining)
+ count_vm_event(KSWAPD_LOW_WMARK_HIT_QUICKLY);
+ else
+ count_vm_event(KSWAPD_HIGH_WMARK_HIT_QUICKLY);
+ }
+ }
order = pgdat->kswapd_max_order;
}
finish_wait(&pgdat->kswapd_wait, &wait);
- if (!try_to_freeze()) {
- /* We can speed up thawing tasks if we don't call
- * balance_pgdat after returning from the refrigerator
- */
+ ret = try_to_freeze();
+ if (kthread_should_stop())
+ break;
+
+ /*
+ * We can speed up thawing tasks if we don't call balance_pgdat
+ * after returning from the refrigerator
+ */
+ if (!ret)
balance_pgdat(pgdat, order);
- }
}
return 0;
}
@@ -2119,153 +2297,80 @@ void wakeup_kswapd(struct zone *zone, int order)
wake_up_interruptible(&pgdat->kswapd_wait);
}
-unsigned long global_lru_pages(void)
-{
- return global_page_state(NR_ACTIVE_ANON)
- + global_page_state(NR_ACTIVE_FILE)
- + global_page_state(NR_INACTIVE_ANON)
- + global_page_state(NR_INACTIVE_FILE);
-}
-
-#ifdef CONFIG_HIBERNATION
/*
- * Helper function for shrink_all_memory(). Tries to reclaim 'nr_pages' pages
- * from LRU lists system-wide, for given pass and priority.
- *
- * For pass > 3 we also try to shrink the LRU lists that contain a few pages
+ * The reclaimable count would be mostly accurate.
+ * The less reclaimable pages may be
+ * - mlocked pages, which will be moved to unevictable list when encountered
+ * - mapped pages, which may require several travels to be reclaimed
+ * - dirty pages, which is not "instantly" reclaimable
*/
-static void shrink_all_zones(unsigned long nr_pages, int prio,
- int pass, struct scan_control *sc)
+unsigned long global_reclaimable_pages(void)
{
- struct zone *zone;
- unsigned long nr_reclaimed = 0;
+ int nr;
- for_each_populated_zone(zone) {
- enum lru_list l;
+ nr = global_page_state(NR_ACTIVE_FILE) +
+ global_page_state(NR_INACTIVE_FILE);
- if (zone_is_all_unreclaimable(zone) && prio != DEF_PRIORITY)
- continue;
+ if (nr_swap_pages > 0)
+ nr += global_page_state(NR_ACTIVE_ANON) +
+ global_page_state(NR_INACTIVE_ANON);
- for_each_evictable_lru(l) {
- enum zone_stat_item ls = NR_LRU_BASE + l;
- unsigned long lru_pages = zone_page_state(zone, ls);
+ return nr;
+}
- /* For pass = 0, we don't shrink the active list */
- if (pass == 0 && (l == LRU_ACTIVE_ANON ||
- l == LRU_ACTIVE_FILE))
- continue;
+unsigned long zone_reclaimable_pages(struct zone *zone)
+{
+ int nr;
- zone->lru[l].nr_saved_scan += (lru_pages >> prio) + 1;
- if (zone->lru[l].nr_saved_scan >= nr_pages || pass > 3) {
- unsigned long nr_to_scan;
-
- zone->lru[l].nr_saved_scan = 0;
- nr_to_scan = min(nr_pages, lru_pages);
- nr_reclaimed += shrink_list(l, nr_to_scan, zone,
- sc, prio);
- if (nr_reclaimed >= nr_pages) {
- sc->nr_reclaimed += nr_reclaimed;
- return;
- }
- }
- }
- }
- sc->nr_reclaimed += nr_reclaimed;
+ nr = zone_page_state(zone, NR_ACTIVE_FILE) +
+ zone_page_state(zone, NR_INACTIVE_FILE);
+
+ if (nr_swap_pages > 0)
+ nr += zone_page_state(zone, NR_ACTIVE_ANON) +
+ zone_page_state(zone, NR_INACTIVE_ANON);
+
+ return nr;
}
+#ifdef CONFIG_HIBERNATION
/*
- * Try to free `nr_pages' of memory, system-wide, and return the number of
+ * Try to free `nr_to_reclaim' of memory, system-wide, and return the number of
* freed pages.
*
* Rather than trying to age LRUs the aim is to preserve the overall
* LRU order by reclaiming preferentially
* inactive > active > active referenced > active mapped
*/
-unsigned long shrink_all_memory(unsigned long nr_pages)
+unsigned long shrink_all_memory(unsigned long nr_to_reclaim)
{
- unsigned long lru_pages, nr_slab;
- int pass;
struct reclaim_state reclaim_state;
struct scan_control sc = {
- .gfp_mask = GFP_KERNEL,
- .may_unmap = 0,
+ .gfp_mask = GFP_HIGHUSER_MOVABLE,
+ .may_swap = 1,
+ .may_unmap = 1,
.may_writepage = 1,
+ .nr_to_reclaim = nr_to_reclaim,
+ .hibernation_mode = 1,
+ .swappiness = vm_swappiness,
+ .order = 0,
.isolate_pages = isolate_pages_global,
- .nr_reclaimed = 0,
};
+ struct zonelist * zonelist = node_zonelist(numa_node_id(), sc.gfp_mask);
+ struct task_struct *p = current;
+ unsigned long nr_reclaimed;
- current->reclaim_state = &reclaim_state;
-
- lru_pages = global_lru_pages();
- nr_slab = global_page_state(NR_SLAB_RECLAIMABLE);
- /* If slab caches are huge, it's better to hit them first */
- while (nr_slab >= lru_pages) {
- reclaim_state.reclaimed_slab = 0;
- shrink_slab(nr_pages, sc.gfp_mask, lru_pages);
- if (!reclaim_state.reclaimed_slab)
- break;
-
- sc.nr_reclaimed += reclaim_state.reclaimed_slab;
- if (sc.nr_reclaimed >= nr_pages)
- goto out;
-
- nr_slab -= reclaim_state.reclaimed_slab;
- }
-
- /*
- * We try to shrink LRUs in 5 passes:
- * 0 = Reclaim from inactive_list only
- * 1 = Reclaim from active list but don't reclaim mapped
- * 2 = 2nd pass of type 1
- * 3 = Reclaim mapped (normal reclaim)
- * 4 = 2nd pass of type 3
- */
- for (pass = 0; pass < 5; pass++) {
- int prio;
-
- /* Force reclaiming mapped pages in the passes #3 and #4 */
- if (pass > 2)
- sc.may_unmap = 1;
-
- for (prio = DEF_PRIORITY; prio >= 0; prio--) {
- unsigned long nr_to_scan = nr_pages - sc.nr_reclaimed;
-
- sc.nr_scanned = 0;
- sc.swap_cluster_max = nr_to_scan;
- shrink_all_zones(nr_to_scan, prio, pass, &sc);
- if (sc.nr_reclaimed >= nr_pages)
- goto out;
-
- reclaim_state.reclaimed_slab = 0;
- shrink_slab(sc.nr_scanned, sc.gfp_mask,
- global_lru_pages());
- sc.nr_reclaimed += reclaim_state.reclaimed_slab;
- if (sc.nr_reclaimed >= nr_pages)
- goto out;
-
- if (sc.nr_scanned && prio < DEF_PRIORITY - 2)
- congestion_wait(BLK_RW_ASYNC, HZ / 10);
- }
- }
-
- /*
- * If sc.nr_reclaimed = 0, we could not shrink LRUs, but there may be
- * something in slab caches
- */
- if (!sc.nr_reclaimed) {
- do {
- reclaim_state.reclaimed_slab = 0;
- shrink_slab(nr_pages, sc.gfp_mask, global_lru_pages());
- sc.nr_reclaimed += reclaim_state.reclaimed_slab;
- } while (sc.nr_reclaimed < nr_pages &&
- reclaim_state.reclaimed_slab > 0);
- }
+ p->flags |= PF_MEMALLOC;
+ lockdep_set_current_reclaim_state(sc.gfp_mask);
+ reclaim_state.reclaimed_slab = 0;
+ p->reclaim_state = &reclaim_state;
+ nr_reclaimed = do_try_to_free_pages(zonelist, &sc);
-out:
- current->reclaim_state = NULL;
+ p->reclaim_state = NULL;
+ lockdep_clear_current_reclaim_state();
+ p->flags &= ~PF_MEMALLOC;
- return sc.nr_reclaimed;
+ return nr_reclaimed;
}
#endif /* CONFIG_HIBERNATION */
@@ -2315,6 +2420,17 @@ int kswapd_run(int nid)
return ret;
}
+/*
+ * Called by memory hotplug when all memory in a node is offlined.
+ */
+void kswapd_stop(int nid)
+{
+ struct task_struct *kswapd = NODE_DATA(nid)->kswapd;
+
+ if (kswapd)
+ kthread_stop(kswapd);
+}
+
static int __init kswapd_init(void)
{
int nid;
@@ -2417,8 +2533,8 @@ static int __zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
.may_writepage = !!(zone_reclaim_mode & RECLAIM_WRITE),
.may_unmap = !!(zone_reclaim_mode & RECLAIM_SWAP),
.may_swap = 1,
- .swap_cluster_max = max_t(unsigned long, nr_pages,
- SWAP_CLUSTER_MAX),
+ .nr_to_reclaim = max_t(unsigned long, nr_pages,
+ SWAP_CLUSTER_MAX),
.gfp_mask = gfp_mask,
.swappiness = vm_swappiness,
.order = order,
@@ -2575,7 +2691,7 @@ static void check_move_unevictable_page(struct page *page, struct zone *zone)
retry:
ClearPageUnevictable(page);
if (page_evictable(page, NULL)) {
- enum lru_list l = LRU_INACTIVE_ANON + page_is_file_cache(page);
+ enum lru_list l = page_lru_base_type(page);
__dec_zone_state(zone, NR_UNEVICTABLE);
list_move(&page->lru, &zone->lru[l].list);
@@ -2718,10 +2834,10 @@ static void scan_all_zones_unevictable_pages(void)
unsigned long scan_unevictable_pages;
int scan_unevictable_handler(struct ctl_table *table, int write,
- struct file *file, void __user *buffer,
+ void __user *buffer,
size_t *length, loff_t *ppos)
{
- proc_doulongvec_minmax(table, write, file, buffer, length, ppos);
+ proc_doulongvec_minmax(table, write, buffer, length, ppos);
if (write && *(unsigned long *)table->data)
scan_all_zones_unevictable_pages();
diff --git a/mm/vmstat.c b/mm/vmstat.c
index 9f7c001f1820..ea832b5015e1 100644
--- a/mm/vmstat.c
+++ b/mm/vmstat.c
@@ -650,11 +650,14 @@ static const char * const vmstat_text[] = {
"nr_slab_reclaimable",
"nr_slab_unreclaimable",
"nr_page_table_pages",
+ "nr_kernel_stack",
"nr_unstable",
"nr_bounce",
"nr_vmscan_write",
"nr_writeback_temp",
-
+ "nr_isolated_anon",
+ "nr_isolated_file",
+ "nr_shmem",
#ifdef CONFIG_NUMA
"numa_hit",
"numa_miss",
@@ -691,6 +694,9 @@ static const char * const vmstat_text[] = {
"slabs_scanned",
"kswapd_steal",
"kswapd_inodesteal",
+ "kswapd_low_wmark_hit_quickly",
+ "kswapd_high_wmark_hit_quickly",
+ "kswapd_skip_congestion_wait",
"pageoutrun",
"allocstall",
@@ -891,11 +897,10 @@ static void vmstat_update(struct work_struct *w)
static void __cpuinit start_cpu_timer(int cpu)
{
- struct delayed_work *vmstat_work = &per_cpu(vmstat_work, cpu);
+ struct delayed_work *work = &per_cpu(vmstat_work, cpu);
- INIT_DELAYED_WORK_DEFERRABLE(vmstat_work, vmstat_update);
- schedule_delayed_work_on(cpu, vmstat_work,
- __round_jiffies_relative(HZ, cpu));
+ INIT_DELAYED_WORK_DEFERRABLE(work, vmstat_update);
+ schedule_delayed_work_on(cpu, work, __round_jiffies_relative(HZ, cpu));
}
/*