aboutsummaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2012-01-12 20:42:54 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2012-01-12 20:42:54 -0800
commit099469502f62fbe0d7e4f0b83a2f22538367f734 (patch)
tree5229c3818b2e6e09d35026d49314047121130536 /include
parent7c17d86a8502c2e30c2eea777ed1b830aa3b447b (diff)
parent35f1526845a9d804206883e19bd257d3dcef758f (diff)
Merge branch 'akpm' (aka "Andrew's patch-bomb, take two")
Andrew explains: - various misc stuff - Most of the rest of MM: memcg, threaded hugepages, others. - cpumask - kexec - kdump - some direct-io performance tweaking - radix-tree optimisations - new selftests code A note on this: often people will develop a new userspace-visible feature and will develop userspace code to exercise/test that feature. Then they merge the patch and the selftest code dies. Sometimes we paste it into the changelog. Sometimes the code gets thrown into Documentation/(!). This saddens me. So this patch creates a bare-bones framework which will henceforth allow me to ask people to include their test apps in the kernel tree so we can keep them alive. Then when people enhance or fix the feature, I can ask them to update the test app too. The infrastruture is terribly trivial at present - let's see how it evolves. - checkpoint/restart feature work. A note on this: this is a project by various mad Russians to perform c/r mainly from userspace, with various oddball helper code added into the kernel where the need is demonstrated. So rather than some large central lump of code, what we have is little bits and pieces popping up in various places which either expose something new or which permit something which is normally kernel-private to be modified. The overall project is an ongoing thing. I've judged that the size and scope of the thing means that we're more likely to be successful with it if we integrate the support into mainline piecemeal rather than allowing it all to develop out-of-tree. However I'm less confident than the developers that it will all eventually work! So what I'm asking them to do is to wrap each piece of new code inside CONFIG_CHECKPOINT_RESTORE. So if it all eventually comes to tears and the project as a whole fails, it should be a simple matter to go through and delete all trace of it. This lot pretty much wraps up the -rc1 merge for me. * akpm: (96 commits) unlzo: fix input buffer free ramoops: update parameters only after successful init ramoops: fix use of rounddown_pow_of_two() c/r: prctl: add PR_SET_MM codes to set up mm_struct entries c/r: procfs: add start_data, end_data, start_brk members to /proc/$pid/stat v4 c/r: introduce CHECKPOINT_RESTORE symbol selftests: new x86 breakpoints selftest selftests: new very basic kernel selftests directory radix_tree: take radix_tree_path off stack radix_tree: remove radix_tree_indirect_to_ptr() dio: optimize cache misses in the submission path vfs: cache request_queue in struct block_device fs/direct-io.c: calculate fs_count correctly in get_more_blocks() drivers/parport/parport_pc.c: fix warnings panic: don't print redundant backtraces on oops sysctl: add the kernel.ns_last_pid control kdump: add udev events for memory online/offline include/linux/crash_dump.h needs elf.h kdump: fix crash_kexec()/smp_send_stop() race in panic() kdump: crashk_res init check for /sys/kernel/kexec_crash_size ...
Diffstat (limited to 'include')
-rw-r--r--include/asm-generic/tlb.h14
-rw-r--r--include/linux/crash_dump.h1
-rw-r--r--include/linux/eventpoll.h1
-rw-r--r--include/linux/fs.h14
-rw-r--r--include/linux/huge_mm.h2
-rw-r--r--include/linux/kernel.h13
-rw-r--r--include/linux/kmsg_dump.h1
-rw-r--r--include/linux/linkage.h4
-rw-r--r--include/linux/memcontrol.h105
-rw-r--r--include/linux/migrate.h23
-rw-r--r--include/linux/mm_inline.h44
-rw-r--r--include/linux/mm_types.h9
-rw-r--r--include/linux/mmzone.h28
-rw-r--r--include/linux/oom.h2
-rw-r--r--include/linux/page_cgroup.h46
-rw-r--r--include/linux/pagevec.h12
-rw-r--r--include/linux/prctl.h12
-rw-r--r--include/linux/radix-tree.h3
-rw-r--r--include/linux/rmap.h4
-rw-r--r--include/linux/sched.h2
-rw-r--r--include/trace/events/vmscan.h22
21 files changed, 197 insertions, 165 deletions
diff --git a/include/asm-generic/tlb.h b/include/asm-generic/tlb.h
index e58fa777fa0..f96a5b58a97 100644
--- a/include/asm-generic/tlb.h
+++ b/include/asm-generic/tlb.h
@@ -139,6 +139,20 @@ static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
__tlb_remove_tlb_entry(tlb, ptep, address); \
} while (0)
+/**
+ * tlb_remove_pmd_tlb_entry - remember a pmd mapping for later tlb invalidation
+ * This is a nop so far, because only x86 needs it.
+ */
+#ifndef __tlb_remove_pmd_tlb_entry
+#define __tlb_remove_pmd_tlb_entry(tlb, pmdp, address) do {} while (0)
+#endif
+
+#define tlb_remove_pmd_tlb_entry(tlb, pmdp, address) \
+ do { \
+ tlb->need_flush = 1; \
+ __tlb_remove_pmd_tlb_entry(tlb, pmdp, address); \
+ } while (0)
+
#define pte_free_tlb(tlb, ptep, address) \
do { \
tlb->need_flush = 1; \
diff --git a/include/linux/crash_dump.h b/include/linux/crash_dump.h
index 5c4abce94ad..b936763f223 100644
--- a/include/linux/crash_dump.h
+++ b/include/linux/crash_dump.h
@@ -5,6 +5,7 @@
#include <linux/kexec.h>
#include <linux/device.h>
#include <linux/proc_fs.h>
+#include <linux/elf.h>
#define ELFCORE_ADDR_MAX (-1ULL)
#define ELFCORE_ADDR_ERR (-2ULL)
diff --git a/include/linux/eventpoll.h b/include/linux/eventpoll.h
index f362733186a..657ab55beda 100644
--- a/include/linux/eventpoll.h
+++ b/include/linux/eventpoll.h
@@ -61,6 +61,7 @@ struct file;
static inline void eventpoll_init_file(struct file *file)
{
INIT_LIST_HEAD(&file->f_ep_links);
+ INIT_LIST_HEAD(&file->f_tfile_llink);
}
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 7aacf31418f..4bc8169fb5a 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -525,6 +525,7 @@ enum positive_aop_returns {
struct page;
struct address_space;
struct writeback_control;
+enum migrate_mode;
struct iov_iter {
const struct iovec *iov;
@@ -609,9 +610,12 @@ struct address_space_operations {
loff_t offset, unsigned long nr_segs);
int (*get_xip_mem)(struct address_space *, pgoff_t, int,
void **, unsigned long *);
- /* migrate the contents of a page to the specified target */
+ /*
+ * migrate the contents of a page to the specified target. If sync
+ * is false, it must not block.
+ */
int (*migratepage) (struct address_space *,
- struct page *, struct page *);
+ struct page *, struct page *, enum migrate_mode);
int (*launder_page) (struct page *);
int (*is_partially_uptodate) (struct page *, read_descriptor_t *,
unsigned long);
@@ -656,6 +660,7 @@ struct address_space {
* must be enforced here for CRIS, to let the least significant bit
* of struct page's "mapping" pointer be used for PAGE_MAPPING_ANON.
*/
+struct request_queue;
struct block_device {
dev_t bd_dev; /* not a kdev_t - it's a search key */
@@ -678,6 +683,7 @@ struct block_device {
unsigned bd_part_count;
int bd_invalidated;
struct gendisk * bd_disk;
+ struct request_queue * bd_queue;
struct list_head bd_list;
/*
* Private data. You must have bd_claim'ed the block_device
@@ -1001,6 +1007,7 @@ struct file {
#ifdef CONFIG_EPOLL
/* Used by fs/eventpoll.c to link all the hooks to this file */
struct list_head f_ep_links;
+ struct list_head f_tfile_llink;
#endif /* #ifdef CONFIG_EPOLL */
struct address_space *f_mapping;
#ifdef CONFIG_DEBUG_WRITECOUNT
@@ -2536,7 +2543,8 @@ extern int generic_check_addressable(unsigned, u64);
#ifdef CONFIG_MIGRATION
extern int buffer_migrate_page(struct address_space *,
- struct page *, struct page *);
+ struct page *, struct page *,
+ enum migrate_mode);
#else
#define buffer_migrate_page NULL
#endif
diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
index a9ace9c3250..1b921299abc 100644
--- a/include/linux/huge_mm.h
+++ b/include/linux/huge_mm.h
@@ -18,7 +18,7 @@ extern struct page *follow_trans_huge_pmd(struct mm_struct *mm,
unsigned int flags);
extern int zap_huge_pmd(struct mmu_gather *tlb,
struct vm_area_struct *vma,
- pmd_t *pmd);
+ pmd_t *pmd, unsigned long addr);
extern int mincore_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
unsigned long addr, unsigned long end,
unsigned char *vec);
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index d0a7a0c7166..e8343422240 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -185,16 +185,17 @@ static inline void might_fault(void)
extern struct atomic_notifier_head panic_notifier_list;
extern long (*panic_blink)(int state);
-NORET_TYPE void panic(const char * fmt, ...)
- __attribute__ ((NORET_AND format (printf, 1, 2))) __cold;
+__printf(1, 2)
+void panic(const char *fmt, ...)
+ __noreturn __cold;
extern void oops_enter(void);
extern void oops_exit(void);
void print_oops_end_marker(void);
extern int oops_may_print(void);
-NORET_TYPE void do_exit(long error_code)
- ATTRIB_NORET;
-NORET_TYPE void complete_and_exit(struct completion *, long)
- ATTRIB_NORET;
+void do_exit(long error_code)
+ __noreturn;
+void complete_and_exit(struct completion *, long)
+ __noreturn;
/* Internal, do not use. */
int __must_check _kstrtoul(const char *s, unsigned int base, unsigned long *res);
diff --git a/include/linux/kmsg_dump.h b/include/linux/kmsg_dump.h
index ee0c952188d..fee66317e07 100644
--- a/include/linux/kmsg_dump.h
+++ b/include/linux/kmsg_dump.h
@@ -18,7 +18,6 @@
enum kmsg_dump_reason {
KMSG_DUMP_OOPS,
KMSG_DUMP_PANIC,
- KMSG_DUMP_KEXEC,
KMSG_DUMP_RESTART,
KMSG_DUMP_HALT,
KMSG_DUMP_POWEROFF,
diff --git a/include/linux/linkage.h b/include/linux/linkage.h
index 3f46aedea42..807f1e53322 100644
--- a/include/linux/linkage.h
+++ b/include/linux/linkage.h
@@ -88,8 +88,4 @@
#endif
-#define NORET_TYPE /**/
-#define ATTRIB_NORET __attribute__((noreturn))
-#define NORET_AND noreturn,
-
#endif
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index f944591765e..4d34356fe64 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -32,13 +32,11 @@ enum mem_cgroup_page_stat_item {
MEMCG_NR_FILE_MAPPED, /* # of pages charged as file rss */
};
-extern unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan,
- struct list_head *dst,
- unsigned long *scanned, int order,
- isolate_mode_t mode,
- struct zone *z,
- struct mem_cgroup *mem_cont,
- int active, int file);
+struct mem_cgroup_reclaim_cookie {
+ struct zone *zone;
+ int priority;
+ unsigned int generation;
+};
#ifdef CONFIG_CGROUP_MEM_RES_CTLR
/*
@@ -56,20 +54,21 @@ extern int mem_cgroup_newpage_charge(struct page *page, struct mm_struct *mm,
gfp_t gfp_mask);
/* for swap handling */
extern int mem_cgroup_try_charge_swapin(struct mm_struct *mm,
- struct page *page, gfp_t mask, struct mem_cgroup **ptr);
+ struct page *page, gfp_t mask, struct mem_cgroup **memcgp);
extern void mem_cgroup_commit_charge_swapin(struct page *page,
- struct mem_cgroup *ptr);
-extern void mem_cgroup_cancel_charge_swapin(struct mem_cgroup *ptr);
+ struct mem_cgroup *memcg);
+extern void mem_cgroup_cancel_charge_swapin(struct mem_cgroup *memcg);
extern int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm,
gfp_t gfp_mask);
-extern void mem_cgroup_add_lru_list(struct page *page, enum lru_list lru);
-extern void mem_cgroup_del_lru_list(struct page *page, enum lru_list lru);
-extern void mem_cgroup_rotate_reclaimable_page(struct page *page);
-extern void mem_cgroup_rotate_lru_list(struct page *page, enum lru_list lru);
-extern void mem_cgroup_del_lru(struct page *page);
-extern void mem_cgroup_move_lists(struct page *page,
- enum lru_list from, enum lru_list to);
+
+struct lruvec *mem_cgroup_zone_lruvec(struct zone *, struct mem_cgroup *);
+struct lruvec *mem_cgroup_lru_add_list(struct zone *, struct page *,
+ enum lru_list);
+void mem_cgroup_lru_del_list(struct page *, enum lru_list);
+void mem_cgroup_lru_del(struct page *);
+struct lruvec *mem_cgroup_lru_move_lists(struct zone *, struct page *,
+ enum lru_list, enum lru_list);
/* For coalescing uncharge for reducing memcg' overhead*/
extern void mem_cgroup_uncharge_start(void);
@@ -102,10 +101,15 @@ extern struct cgroup_subsys_state *mem_cgroup_css(struct mem_cgroup *memcg);
extern int
mem_cgroup_prepare_migration(struct page *page,
- struct page *newpage, struct mem_cgroup **ptr, gfp_t gfp_mask);
+ struct page *newpage, struct mem_cgroup **memcgp, gfp_t gfp_mask);
extern void mem_cgroup_end_migration(struct mem_cgroup *memcg,
struct page *oldpage, struct page *newpage, bool migration_ok);
+struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *,
+ struct mem_cgroup *,
+ struct mem_cgroup_reclaim_cookie *);
+void mem_cgroup_iter_break(struct mem_cgroup *, struct mem_cgroup *);
+
/*
* For memory reclaim.
*/
@@ -122,7 +126,10 @@ struct zone_reclaim_stat*
mem_cgroup_get_reclaim_stat_from_page(struct page *page);
extern void mem_cgroup_print_oom_info(struct mem_cgroup *memcg,
struct task_struct *p);
+extern void mem_cgroup_replace_page_cache(struct page *oldpage,
+ struct page *newpage);
+extern void mem_cgroup_reset_owner(struct page *page);
#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
extern int do_swap_account;
#endif
@@ -157,7 +164,7 @@ u64 mem_cgroup_get_limit(struct mem_cgroup *memcg);
void mem_cgroup_count_vm_event(struct mm_struct *mm, enum vm_event_item idx);
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
-void mem_cgroup_split_huge_fixup(struct page *head, struct page *tail);
+void mem_cgroup_split_huge_fixup(struct page *head);
#endif
#ifdef CONFIG_DEBUG_VM
@@ -180,17 +187,17 @@ static inline int mem_cgroup_cache_charge(struct page *page,
}
static inline int mem_cgroup_try_charge_swapin(struct mm_struct *mm,
- struct page *page, gfp_t gfp_mask, struct mem_cgroup **ptr)
+ struct page *page, gfp_t gfp_mask, struct mem_cgroup **memcgp)
{
return 0;
}
static inline void mem_cgroup_commit_charge_swapin(struct page *page,
- struct mem_cgroup *ptr)
+ struct mem_cgroup *memcg)
{
}
-static inline void mem_cgroup_cancel_charge_swapin(struct mem_cgroup *ptr)
+static inline void mem_cgroup_cancel_charge_swapin(struct mem_cgroup *memcg)
{
}
@@ -210,33 +217,33 @@ static inline void mem_cgroup_uncharge_cache_page(struct page *page)
{
}
-static inline void mem_cgroup_add_lru_list(struct page *page, int lru)
-{
-}
-
-static inline void mem_cgroup_del_lru_list(struct page *page, int lru)
+static inline struct lruvec *mem_cgroup_zone_lruvec(struct zone *zone,
+ struct mem_cgroup *memcg)
{
- return ;
+ return &zone->lruvec;
}
-static inline void mem_cgroup_rotate_reclaimable_page(struct page *page)
+static inline struct lruvec *mem_cgroup_lru_add_list(struct zone *zone,
+ struct page *page,
+ enum lru_list lru)
{
- return ;
+ return &zone->lruvec;
}
-static inline void mem_cgroup_rotate_lru_list(struct page *page, int lru)
+static inline void mem_cgroup_lru_del_list(struct page *page, enum lru_list lru)
{
- return ;
}
-static inline void mem_cgroup_del_lru(struct page *page)
+static inline void mem_cgroup_lru_del(struct page *page)
{
- return ;
}
-static inline void
-mem_cgroup_move_lists(struct page *page, enum lru_list from, enum lru_list to)
+static inline struct lruvec *mem_cgroup_lru_move_lists(struct zone *zone,
+ struct page *page,
+ enum lru_list from,
+ enum lru_list to)
{
+ return &zone->lruvec;
}
static inline struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page)
@@ -269,7 +276,7 @@ static inline struct cgroup_subsys_state
static inline int
mem_cgroup_prepare_migration(struct page *page, struct page *newpage,
- struct mem_cgroup **ptr, gfp_t gfp_mask)
+ struct mem_cgroup **memcgp, gfp_t gfp_mask)
{
return 0;
}
@@ -279,6 +286,19 @@ static inline void mem_cgroup_end_migration(struct mem_cgroup *memcg,
{
}
+static inline struct mem_cgroup *
+mem_cgroup_iter(struct mem_cgroup *root,
+ struct mem_cgroup *prev,
+ struct mem_cgroup_reclaim_cookie *reclaim)
+{
+ return NULL;
+}
+
+static inline void mem_cgroup_iter_break(struct mem_cgroup *root,
+ struct mem_cgroup *prev)
+{
+}
+
static inline int mem_cgroup_get_reclaim_priority(struct mem_cgroup *memcg)
{
return 0;
@@ -360,8 +380,7 @@ u64 mem_cgroup_get_limit(struct mem_cgroup *memcg)
return 0;
}
-static inline void mem_cgroup_split_huge_fixup(struct page *head,
- struct page *tail)
+static inline void mem_cgroup_split_huge_fixup(struct page *head)
{
}
@@ -369,6 +388,14 @@ static inline
void mem_cgroup_count_vm_event(struct mm_struct *mm, enum vm_event_item idx)
{
}
+static inline void mem_cgroup_replace_page_cache(struct page *oldpage,
+ struct page *newpage)
+{
+}
+
+static inline void mem_cgroup_reset_owner(struct page *page)
+{
+}
#endif /* CONFIG_CGROUP_MEM_CONT */
#if !defined(CONFIG_CGROUP_MEM_RES_CTLR) || !defined(CONFIG_DEBUG_VM)
diff --git a/include/linux/migrate.h b/include/linux/migrate.h
index e39aeecfe9a..eaf867412f7 100644
--- a/include/linux/migrate.h
+++ b/include/linux/migrate.h
@@ -6,18 +6,31 @@
typedef struct page *new_page_t(struct page *, unsigned long private, int **);
+/*
+ * MIGRATE_ASYNC means never block
+ * MIGRATE_SYNC_LIGHT in the current implementation means to allow blocking
+ * on most operations but not ->writepage as the potential stall time
+ * is too significant
+ * MIGRATE_SYNC will block when migrating pages
+ */
+enum migrate_mode {
+ MIGRATE_ASYNC,
+ MIGRATE_SYNC_LIGHT,
+ MIGRATE_SYNC,
+};
+
#ifdef CONFIG_MIGRATION
#define PAGE_MIGRATION 1
extern void putback_lru_pages(struct list_head *l);
extern int migrate_page(struct address_space *,
- struct page *, struct page *);
+ struct page *, struct page *, enum migrate_mode);
extern int migrate_pages(struct list_head *l, new_page_t x,
unsigned long private, bool offlining,
- bool sync);
+ enum migrate_mode mode);
extern int migrate_huge_pages(struct list_head *l, new_page_t x,
unsigned long private, bool offlining,
- bool sync);
+ enum migrate_mode mode);
extern int fail_migrate_page(struct address_space *,
struct page *, struct page *);
@@ -36,10 +49,10 @@ extern int migrate_huge_page_move_mapping(struct address_space *mapping,
static inline void putback_lru_pages(struct list_head *l) {}
static inline int migrate_pages(struct list_head *l, new_page_t x,
unsigned long private, bool offlining,
- bool sync) { return -ENOSYS; }
+ enum migrate_mode mode) { return -ENOSYS; }
static inline int migrate_huge_pages(struct list_head *l, new_page_t x,
unsigned long private, bool offlining,
- bool sync) { return -ENOSYS; }
+ enum migrate_mode mode) { return -ENOSYS; }
static inline int migrate_prep(void) { return -ENOSYS; }
static inline int migrate_prep_local(void) { return -ENOSYS; }
diff --git a/include/linux/mm_inline.h b/include/linux/mm_inline.h
index 8f7d24712dc..227fd3e9a9c 100644
--- a/include/linux/mm_inline.h
+++ b/include/linux/mm_inline.h
@@ -22,26 +22,21 @@ static inline int page_is_file_cache(struct page *page)
}
static inline void
-__add_page_to_lru_list(struct zone *zone, struct page *page, enum lru_list l,
- struct list_head *head)
+add_page_to_lru_list(struct zone *zone, struct page *page, enum lru_list lru)
{
- list_add(&page->lru, head);
- __mod_zone_page_state(zone, NR_LRU_BASE + l, hpage_nr_pages(page));
- mem_cgroup_add_lru_list(page, l);
-}
+ struct lruvec *lruvec;
-static inline void
-add_page_to_lru_list(struct zone *zone, struct page *page, enum lru_list l)
-{
- __add_page_to_lru_list(zone, page, l, &zone->lru[l].list);
+ lruvec = mem_cgroup_lru_add_list(zone, page, lru);
+ list_add(&page->lru, &lruvec->lists[lru]);
+ __mod_zone_page_state(zone, NR_LRU_BASE + lru, hpage_nr_pages(page));
}
static inline void
-del_page_from_lru_list(struct zone *zone, struct page *page, enum lru_list l)
+del_page_from_lru_list(struct zone *zone, struct page *page, enum lru_list lru)
{
+ mem_cgroup_lru_del_list(page, lru);
list_del(&page->lru);
- __mod_zone_page_state(zone, NR_LRU_BASE + l, -hpage_nr_pages(page));
- mem_cgroup_del_lru_list(page, l);
+ __mod_zone_page_state(zone, NR_LRU_BASE + lru, -hpage_nr_pages(page));
}
/**
@@ -59,24 +54,28 @@ static inline enum lru_list page_lru_base_type(struct page *page)
return LRU_INACTIVE_ANON;
}
-static inline void
-del_page_from_lru(struct zone *zone, struct page *page)
+/**
+ * page_off_lru - which LRU list was page on? clearing its lru flags.
+ * @page: the page to test
+ *
+ * Returns the LRU list a page was on, as an index into the array of LRU
+ * lists; and clears its Unevictable or Active flags, ready for freeing.
+ */
+static inline enum lru_list page_off_lru(struct page *page)
{
- enum lru_list l;
+ enum lru_list lru;
- list_del(&page->lru);
if (PageUnevictable(page)) {
__ClearPageUnevictable(page);
- l = LRU_UNEVICTABLE;
+ lru = LRU_UNEVICTABLE;
} else {
- l = page_lru_base_type(page);
+ lru = page_lru_base_type(page);
if (PageActive(page)) {
__ClearPageActive(page);
- l += LRU_ACTIVE;
+ lru += LRU_ACTIVE;
}
}
- __mod_zone_page_state(zone, NR_LRU_BASE + l, -hpage_nr_pages(page));
- mem_cgroup_del_lru_list(page, l);
+ return lru;
}
/**
@@ -97,7 +96,6 @@ static inline enum lru_list page_lru(struct page *page)
if (PageActive(page))
lru += LRU_ACTIVE;
}
-
return lru;
}
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index 5b42f1b34eb..3cc3062b376 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -151,12 +151,11 @@ struct page {
#endif
}
/*
- * If another subsystem starts using the double word pairing for atomic
- * operations on struct page then it must change the #if to ensure
- * proper alignment of the page struct.
+ * The struct page can be forced to be double word aligned so that atomic ops
+ * on double words work. The SLUB allocator can make use of such a feature.
*/
-#if defined(CONFIG_SLUB) && defined(CONFIG_CMPXCHG_LOCAL)
- __attribute__((__aligned__(2*sizeof(unsigned long))))
+#ifdef CONFIG_HAVE_ALIGNED_STRUCT_PAGE
+ __aligned(2 * sizeof(unsigned long))
#endif
;
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index ca6ca92418a..650ba2fb330 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -140,25 +140,29 @@ enum lru_list {
NR_LRU_LISTS
};
-#define for_each_lru(l) for (l = 0; l < NR_LRU_LISTS; l++)
+#define for_each_lru(lru) for (lru = 0; lru < NR_LRU_LISTS; lru++)
-#define for_each_evictable_lru(l) for (l = 0; l <= LRU_ACTIVE_FILE; l++)
+#define for_each_evictable_lru(lru) for (lru = 0; lru <= LRU_ACTIVE_FILE; lru++)
-static inline int is_file_lru(enum lru_list l)
+static inline int is_file_lru(enum lru_list lru)
{
- return (l == LRU_INACTIVE_FILE || l == LRU_ACTIVE_FILE);
+ return (lru == LRU_INACTIVE_FILE || lru == LRU_ACTIVE_FILE);
}
-static inline int is_active_lru(enum lru_list l)
+static inline int is_active_lru(enum lru_list lru)
{
- return (l == LRU_ACTIVE_ANON || l == LRU_ACTIVE_FILE);
+ return (lru == LRU_ACTIVE_ANON || lru == LRU_ACTIVE_FILE);
}
-static inline int is_unevictable_lru(enum lru_list l)
+static inline int is_unevictable_lru(enum lru_list lru)
{
- return (l == LRU_UNEVICTABLE);
+ return (lru == LRU_UNEVICTABLE);
}
+struct lruvec {
+ struct list_head lists[NR_LRU_LISTS];
+};
+
/* Mask used at gathering information at once (see memcontrol.c) */
#define LRU_ALL_FILE (BIT(LRU_INACTIVE_FILE) | BIT(LRU_ACTIVE_FILE))
#define LRU_ALL_ANON (BIT(LRU_INACTIVE_ANON) | BIT(LRU_ACTIVE_ANON))
@@ -173,6 +177,8 @@ static inline int is_unevictable_lru(enum lru_list l)
#define ISOLATE_CLEAN ((__force isolate_mode_t)0x4)
/* Isolate unmapped file */
#define ISOLATE_UNMAPPED ((__force isolate_mode_t)0x8)
+/* Isolate for asynchronous migration */
+#define ISOLATE_ASYNC_MIGRATE ((__force isolate_mode_t)0x10)
/* LRU Isolation modes. */
typedef unsigned __bitwise__ isolate_mode_t;
@@ -364,10 +370,8 @@ struct zone {
ZONE_PADDING(_pad1_)
/* Fields commonly accessed by the page reclaim scanner */
- spinlock_t lru_lock;
- struct zone_lru {
- struct list_head list;
- } lru[NR_LRU_LISTS];
+ spinlock_t lru_lock;
+ struct lruvec lruvec;
struct zone_reclaim_stat reclaim_stat;
diff --git a/include/linux/oom.h b/include/linux/oom.h
index 6f9d04a8533..552fba9c7d5 100644
--- a/include/linux/oom.h
+++ b/include/linux/oom.h
@@ -43,7 +43,7 @@ enum oom_constraint {
extern void compare_swap_oom_score_adj(int old_val, int new_val);
extern int test_set_oom_score_adj(int new_val);
-extern unsigned int oom_badness(struct task_struct *p, struct mem_cgroup *mem,
+extern unsigned int oom_badness(struct task_struct *p, struct mem_cgroup *memcg,
const nodemask_t *nodemask, unsigned long totalpages);
extern int try_set_zonelist_oom(struct zonelist *zonelist, gfp_t gfp_flags);
extern void clear_zonelist_oom(struct zonelist *zonelist, gfp_t gfp_flags);
diff --git a/include/linux/page_cgroup.h b/include/linux/page_cgroup.h
index 961ecc7d30b..a2d11771c84 100644
--- a/include/linux/page_cgroup.h
+++ b/include/linux/page_cgroup.h
@@ -10,8 +10,6 @@ enum {
/* flags for mem_cgroup and file and I/O status */
PCG_MOVE_LOCK, /* For race between move_account v.s. following bits */
PCG_FILE_MAPPED, /* page is accounted as "mapped" */
- /* No lock in page_cgroup */
- PCG_ACCT_LRU, /* page has been accounted for (under lru_lock) */
__NR_PCG_FLAGS,
};
@@ -31,7 +29,6 @@ enum {
struct page_cgroup {
unsigned long flags;
struct mem_cgroup *mem_cgroup;
- struct list_head lru; /* per cgroup LRU list */
};
void __meminit pgdat_page_cgroup_init(struct pglist_data *pgdat);
@@ -76,12 +73,6 @@ TESTPCGFLAG(Used, USED)
CLEARPCGFLAG(Used, USED)
SETPCGFLAG(Used, USED)
-SETPCGFLAG(AcctLRU, ACCT_LRU)
-CLEARPCGFLAG(AcctLRU, ACCT_LRU)
-TESTPCGFLAG(AcctLRU, ACCT_LRU)
-TESTCLEARPCGFLAG(AcctLRU, ACCT_LRU)
-
-
SETPCGFLAG(FileMapped, FILE_MAPPED)
CLEARPCGFLAG(FileMapped, FILE_MAPPED)
TESTPCGFLAG(FileMapped, FILE_MAPPED)
@@ -122,39 +113,6 @@ static inline void move_unlock_page_cgroup(struct page_cgroup *pc,
local_irq_restore(*flags);
}
-#ifdef CONFIG_SPARSEMEM
-#define PCG_ARRAYID_WIDTH SECTIONS_SHIFT
-#else
-#define PCG_ARRAYID_WIDTH NODES_SHIFT
-#endif
-
-#if (PCG_ARRAYID_WIDTH > BITS_PER_LONG - NR_PCG_FLAGS)
-#error Not enough space left in pc->flags to store page_cgroup array IDs
-#endif
-
-/* pc->flags: ARRAY-ID | FLAGS */
-
-#define PCG_ARRAYID_MASK ((1UL << PCG_ARRAYID_WIDTH) - 1)
-
-#define PCG_ARRAYID_OFFSET (BITS_PER_LONG - PCG_ARRAYID_WIDTH)
-/*
- * Zero the shift count for non-existent fields, to prevent compiler
- * warnings and ensure references are optimized away.
- */
-#define PCG_ARRAYID_SHIFT (PCG_ARRAYID_OFFSET * (PCG_ARRAYID_WIDTH != 0))
-
-static inline void set_page_cgroup_array_id(struct page_cgroup *pc,
- unsigned long id)
-{
- pc->flags &= ~(PCG_ARRAYID_MASK << PCG_ARRAYID_SHIFT);
- pc->flags |= (id & PCG_ARRAYID_MASK) << PCG_ARRAYID_SHIFT;
-}
-
-static inline unsigned long page_cgroup_array_id(struct page_cgroup *pc)
-{
- return (pc->flags >> PCG_ARRAYID_SHIFT) & PCG_ARRAYID_MASK;
-}
-
#else /* CONFIG_CGROUP_MEM_RES_CTLR */
struct page_cgroup;
@@ -183,7 +141,7 @@ static inline void __init page_cgroup_init_flatmem(void)
extern unsigned short swap_cgroup_cmpxchg(swp_entry_t ent,
unsigned short old, unsigned short new);
extern unsigned short swap_cgroup_record(swp_entry_t ent, unsigned short id);
-extern unsigned short lookup_swap_cgroup(swp_entry_t ent);
+extern unsigned short lookup_swap_cgroup_id(swp_entry_t ent);
extern int swap_cgroup_swapon(int type, unsigned long max_pages);
extern void swap_cgroup_swapoff(int type);
#else
@@ -195,7 +153,7 @@ unsigned short swap_cgroup_record(swp_entry_t ent, unsigned short id)
}
static inline
-unsigned short lookup_swap_cgroup(swp_entry_t ent)
+unsigned short lookup_swap_cgroup_id(swp_entry_t ent)
{
return 0;
}
diff --git a/include/linux/pagevec.h b/include/linux/pagevec.h
index ed17024d2eb..2aa12b8499c 100644
--- a/include/linux/pagevec.h
+++ b/include/linux/pagevec.h
@@ -21,8 +21,7 @@ struct pagevec {
};
void __pagevec_release(struct pagevec *pvec);
-void ____pagevec_lru_add(struct pagevec *pvec, enum lru_list lru);
-void pagevec_strip(struct pagevec *pvec);
+void __pagevec_lru_add(struct pagevec *pvec, enum lru_list lru);
unsigned pagevec_lookup(struct pagevec *pvec, struct address_space *mapping,
pgoff_t start, unsigned nr_pages);
unsigned pagevec_lookup_tag(struct pagevec *pvec,
@@ -59,7 +58,6 @@ static inline unsigned pagevec_add(struct pagevec *pvec, struct page *page)
return pagevec_space(pvec);
}
-
static inline void pagevec_release(struct pagevec *pvec)
{
if (pagevec_count(pvec))
@@ -68,22 +66,22 @@ static inline void pagevec_release(struct pagevec *pvec)
static inline void __pagevec_lru_add_anon(struct pagevec *pvec)
{
- ____pagevec_lru_add(pvec, LRU_INACTIVE_ANON);
+ __pagevec_lru_add(pvec, LRU_INACTIVE_ANON);
}
static inline void __pagevec_lru_add_active_anon(struct pagevec *pvec)
{
- ____pagevec_lru_add(pvec, LRU_ACTIVE_ANON);
+ __pagevec_lru_add(pvec, LRU_ACTIVE_ANON);
}
static inline void __pagevec_lru_add_file(struct pagevec *pvec)
{
- ____pagevec_lru_add(pvec, LRU_INACTIVE_FILE);
+ __pagevec_lru_add(pvec, LRU_INACTIVE_FILE);
}
static inline void __pagevec_lru_add_active_file(struct pagevec *pvec)
{
- ____pagevec_lru_add(pvec, LRU_ACTIVE_FILE);
+ __pagevec_lru_add(pvec, LRU_ACTIVE_FILE);
}
static inline void pagevec_lru_add_file(struct pagevec *pvec)
diff --git a/include/linux/prctl.h b/include/linux/prctl.h
index a3baeb2c216..7ddc7f1b480 100644
--- a/include/linux/prctl.h
+++ b/include/linux/prctl.h
@@ -102,4 +102,16 @@
#define PR_MCE_KILL_GET 34
+/*
+ * Tune up process memory map specifics.
+ */
+#define PR_SET_MM 35
+# define PR_SET_MM_START_CODE 1
+# define PR_SET_MM_END_CODE 2
+# define PR_SET_MM_START_DATA 3
+# define PR_SET_MM_END_DATA 4
+# define PR_SET_MM_START_STACK 5
+# define PR_SET_MM_START_BRK 6
+# define PR_SET_MM_BRK 7
+
#endif /* _LINUX_PRCTL_H */
diff --git a/include/linux/radix-tree.h b/include/linux/radix-tree.h
index 9d4539c52e5..07e360b1b28 100644
--- a/include/linux/radix-tree.h
+++ b/include/linux/radix-tree.h
@@ -49,9 +49,6 @@
#define RADIX_TREE_EXCEPTIONAL_ENTRY 2
#define RADIX_TREE_EXCEPTIONAL_SHIFT 2
-#define radix_tree_indirect_to_ptr(ptr) \
- radix_tree_indirect_to_ptr((void __force *)(ptr))
-
static inline int radix_tree_is_indirect_ptr(void *ptr)
{
return (int)((unsigned long)ptr & RADIX_TREE_INDIRECT_PTR);
diff --git a/include/linux/rmap.h b/include/linux/rmap.h
index 1afb9954bbf..1cdd62a2788 100644
--- a/include/linux/rmap.h
+++ b/include/linux/rmap.h
@@ -158,7 +158,7 @@ static inline void page_dup_rmap(struct page *page)
* Called from mm/vmscan.c to handle paging out
*/
int page_referenced(struct page *, int is_locked,
- struct mem_cgroup *cnt, unsigned long *vm_flags);
+ struct mem_cgroup *memcg, unsigned long *vm_flags);
int page_referenced_one(struct page *, struct vm_area_struct *,
unsigned long address, unsigned int *mapcount, unsigned long *vm_flags);
@@ -236,7 +236,7 @@ int rmap_walk(struct page *page, int (*rmap_one)(struct page *,
#define anon_vma_link(vma) do {} while (0)
static inline int page_referenced(struct page *page, int is_locked,
- struct mem_cgroup *cnt,
+ struct mem_cgroup *memcg,
unsigned long *vm_flags)
{
*vm_flags = 0;
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 21cd0303af5..4032ec1cf83 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -2275,7 +2275,7 @@ extern void __cleanup_sighand(struct sighand_struct *);
extern void exit_itimers(struct signal_struct *);
extern void flush_itimer_signals(void);
-extern NORET_TYPE void do_group_exit(int);
+extern void do_group_exit(int);
extern void daemonize(const char *, ...);
extern int allow_signal(int);
diff --git a/include/trace/events/vmscan.h b/include/trace/events/vmscan.h
index edc4b3d25a2..f64560e204b 100644
--- a/include/trace/events/vmscan.h
+++ b/include/trace/events/vmscan.h
@@ -266,9 +266,10 @@ DECLARE_EVENT_CLASS(mm_vmscan_lru_isolate_template,
unsigned long nr_lumpy_taken,
unsigned long nr_lumpy_dirty,
unsigned long nr_lumpy_failed,
- isolate_mode_t isolate_mode),
+ isolate_mode_t isolate_mode,
+ int file),
- TP_ARGS(order, nr_requested, nr_scanned, nr_taken, nr_lumpy_taken, nr_lumpy_dirty, nr_lumpy_failed, isolate_mode),
+ TP_ARGS(order, nr_requested, nr_scanned, nr_taken, nr_lumpy_taken, nr_lumpy_dirty, nr_lumpy_failed, isolate_mode, file),
TP_STRUCT__entry(
__field(int, order)
@@ -279,6 +280,7 @@ DECLARE_EVENT_CLASS(mm_vmscan_lru_isolate_template,
__field(unsigned long, nr_lumpy_dirty)
__field(unsigned long, nr_lumpy_failed)
__field(isolate_mode_t, isolate_mode)
+ __field(int, file)
),
TP_fast_assign(
@@ -290,9 +292,10 @@ DECLARE_EVENT_CLASS(mm_vmscan_lru_isolate_template,
__entry->nr_lumpy_dirty = nr_lumpy_dirty;
__entry->nr_lumpy_failed = nr_lumpy_failed;
__entry->isolate_mode = isolate_mode;
+ __entry->file = file;
),
- TP_printk("isolate_mode=%d order=%d nr_requested=%lu nr_scanned=%lu nr_taken=%lu contig_taken=%lu contig_dirty=%lu contig_failed=%lu",
+ TP_printk("isolate_mode=%d order=%d nr_requested=%lu nr_scanned=%lu nr_taken=%lu contig_taken=%lu contig_dirty=%lu contig_failed=%lu file=%d",
__entry->isolate_mode,
__entry->order,
__entry->nr_requested,
@@ -300,7 +303,8 @@ DECLARE_EVENT_CLASS(mm_vmscan_lru_isolate_template,
__entry->nr_taken,
__entry->nr_lumpy_taken,
__entry->nr_lumpy_dirty,
- __entry->nr_lumpy_failed)
+ __entry->nr_lumpy_failed,
+ __entry->file)
);
DEFINE_EVENT(mm_vmscan_lru_isolate_template, mm_vmscan_lru_isolate,
@@ -312,9 +316,10 @@ DEFINE_EVENT(mm_vmscan_lru_isolate_template, mm_vmscan_lru_isolate,
unsigned long nr_lumpy_taken,
unsigned long nr_lumpy_dirty,
unsigned long nr_lumpy_failed,
- isolate_mode_t isolate_mode),
+ isolate_mode_t isolate_mode,
+ int file),
- TP_ARGS(order, nr_requested, nr_scanned, nr_taken, nr_lumpy_taken, nr_lumpy_dirty, nr_lumpy_failed, isolate_mode)
+ TP_ARGS(order, nr_requested, nr_scanned, nr_taken, nr_lumpy_taken, nr_lumpy_dirty, nr_lumpy_failed, isolate_mode, file)
);
@@ -327,9 +332,10 @@ DEFINE_EVENT(mm_vmscan_lru_isolate_template, mm_vmscan_memcg_isolate,
unsigned long nr_lumpy_taken,
unsigned long nr_lumpy_dirty,
unsigned long nr_lumpy_failed,
- isolate_mode_t isolate_mode),
+ isolate_mode_t isolate_mode,
+ int file),
- TP_ARGS(order, nr_requested, nr_scanned, nr_taken, nr_lumpy_taken, nr_lumpy_dirty, nr_lumpy_failed, isolate_mode)
+ TP_ARGS(order, nr_requested, nr_scanned, nr_taken, nr_lumpy_taken, nr_lumpy_dirty, nr_lumpy_failed, isolate_mode, file)
);