aboutsummaryrefslogtreecommitdiff
path: root/include/linux/mm.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/mm.h')
-rw-r--r--include/linux/mm.h144
1 files changed, 77 insertions, 67 deletions
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 7438071b44a..17b27cd269c 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -10,6 +10,7 @@
#include <linux/mmzone.h>
#include <linux/rbtree.h>
#include <linux/prio_tree.h>
+#include <linux/atomic.h>
#include <linux/debug_locks.h>
#include <linux/mm_types.h>
#include <linux/range.h>
@@ -356,36 +357,50 @@ static inline struct page *compound_head(struct page *page)
return page;
}
+/*
+ * The atomic page->_mapcount, starts from -1: so that transitions
+ * both from it and to it can be tracked, using atomic_inc_and_test
+ * and atomic_add_negative(-1).
+ */
+static inline void reset_page_mapcount(struct page *page)
+{
+ atomic_set(&(page)->_mapcount, -1);
+}
+
+static inline int page_mapcount(struct page *page)
+{
+ return atomic_read(&(page)->_mapcount) + 1;
+}
+
static inline int page_count(struct page *page)
{
return atomic_read(&compound_head(page)->_count);
}
+static inline void get_huge_page_tail(struct page *page)
+{
+ /*
+ * __split_huge_page_refcount() cannot run
+ * from under us.
+ */
+ VM_BUG_ON(page_mapcount(page) < 0);
+ VM_BUG_ON(atomic_read(&page->_count) != 0);
+ atomic_inc(&page->_mapcount);
+}
+
+extern bool __get_page_tail(struct page *page);
+
static inline void get_page(struct page *page)
{
+ if (unlikely(PageTail(page)))
+ if (likely(__get_page_tail(page)))
+ return;
/*
* Getting a normal page or the head of a compound page
- * requires to already have an elevated page->_count. Only if
- * we're getting a tail page, the elevated page->_count is
- * required only in the head page, so for tail pages the
- * bugcheck only verifies that the page->_count isn't
- * negative.
+ * requires to already have an elevated page->_count.
*/
- VM_BUG_ON(atomic_read(&page->_count) < !PageTail(page));
+ VM_BUG_ON(atomic_read(&page->_count) <= 0);
atomic_inc(&page->_count);
- /*
- * Getting a tail page will elevate both the head and tail
- * page->_count(s).
- */
- if (unlikely(PageTail(page))) {
- /*
- * This is safe only because
- * __split_huge_page_refcount can't run under
- * get_page().
- */
- VM_BUG_ON(atomic_read(&page->first_page->_count) <= 0);
- atomic_inc(&page->first_page->_count);
- }
}
static inline struct page *virt_to_head_page(const void *x)
@@ -804,21 +819,6 @@ static inline pgoff_t page_index(struct page *page)
}
/*
- * The atomic page->_mapcount, like _count, starts from -1:
- * so that transitions both from it and to it can be tracked,
- * using atomic_inc_and_test and atomic_add_negative(-1).
- */
-static inline void reset_page_mapcount(struct page *page)
-{
- atomic_set(&(page)->_mapcount, -1);
-}
-
-static inline int page_mapcount(struct page *page)
-{
- return atomic_read(&(page)->_mapcount) + 1;
-}
-
-/*
* Return true if this page is mapped into pagetables.
*/
static inline int page_mapped(struct page *page)
@@ -1253,41 +1253,34 @@ static inline void pgtable_page_dtor(struct page *page)
extern void free_area_init(unsigned long * zones_size);
extern void free_area_init_node(int nid, unsigned long * zones_size,
unsigned long zone_start_pfn, unsigned long *zholes_size);
-#ifdef CONFIG_ARCH_POPULATES_NODE_MAP
+#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
/*
- * With CONFIG_ARCH_POPULATES_NODE_MAP set, an architecture may initialise its
+ * With CONFIG_HAVE_MEMBLOCK_NODE_MAP set, an architecture may initialise its
* zones, allocate the backing mem_map and account for memory holes in a more
* architecture independent manner. This is a substitute for creating the
* zone_sizes[] and zholes_size[] arrays and passing them to
* free_area_init_node()
*
* An architecture is expected to register range of page frames backed by
- * physical memory with add_active_range() before calling
+ * physical memory with memblock_add[_node]() before calling
* free_area_init_nodes() passing in the PFN each zone ends at. At a basic
* usage, an architecture is expected to do something like
*
* unsigned long max_zone_pfns[MAX_NR_ZONES] = {max_dma, max_normal_pfn,
* max_highmem_pfn};
* for_each_valid_physical_page_range()
- * add_active_range(node_id, start_pfn, end_pfn)
+ * memblock_add_node(base, size, nid)
* free_area_init_nodes(max_zone_pfns);
*
- * If the architecture guarantees that there are no holes in the ranges
- * registered with add_active_range(), free_bootmem_active_regions()
- * will call free_bootmem_node() for each registered physical page range.
- * Similarly sparse_memory_present_with_active_regions() calls
- * memory_present() for each range when SPARSEMEM is enabled.
+ * free_bootmem_with_active_regions() calls free_bootmem_node() for each
+ * registered physical page range. Similarly
+ * sparse_memory_present_with_active_regions() calls memory_present() for
+ * each range when SPARSEMEM is enabled.
*
* See mm/page_alloc.c for more information on each function exposed by
- * CONFIG_ARCH_POPULATES_NODE_MAP
+ * CONFIG_HAVE_MEMBLOCK_NODE_MAP.
*/
extern void free_area_init_nodes(unsigned long *max_zone_pfn);
-extern void add_active_range(unsigned int nid, unsigned long start_pfn,
- unsigned long end_pfn);
-extern void remove_active_range(unsigned int nid, unsigned long start_pfn,
- unsigned long end_pfn);
-extern void remove_all_active_ranges(void);
-void sort_node_map(void);
unsigned long node_map_pfn_alignment(void);
unsigned long __absent_pages_in_range(int nid, unsigned long start_pfn,
unsigned long end_pfn);
@@ -1300,14 +1293,11 @@ extern void free_bootmem_with_active_regions(int nid,
unsigned long max_low_pfn);
int add_from_early_node_map(struct range *range, int az,
int nr_range, int nid);
-u64 __init find_memory_core_early(int nid, u64 size, u64 align,
- u64 goal, u64 limit);
-typedef int (*work_fn_t)(unsigned long, unsigned long, void *);
-extern void work_with_active_regions(int nid, work_fn_t work_fn, void *data);
extern void sparse_memory_present_with_active_regions(int nid);
-#endif /* CONFIG_ARCH_POPULATES_NODE_MAP */
-#if !defined(CONFIG_ARCH_POPULATES_NODE_MAP) && \
+#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
+
+#if !defined(CONFIG_HAVE_MEMBLOCK_NODE_MAP) && \
!defined(CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID)
static inline int __early_pfn_to_nid(unsigned long pfn)
{
@@ -1334,7 +1324,8 @@ extern void si_meminfo(struct sysinfo * val);
extern void si_meminfo_node(struct sysinfo *val, int nid);
extern int after_bootmem;
-extern void warn_alloc_failed(gfp_t gfp_mask, int order, const char *fmt, ...);
+extern __printf(3, 4)
+void warn_alloc_failed(gfp_t gfp_mask, int order, const char *fmt, ...);
extern void setup_per_cpu_pageset(void);
@@ -1491,6 +1482,18 @@ static inline unsigned long vma_pages(struct vm_area_struct *vma)
return (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
}
+/* Look up the first VMA which exactly match the interval vm_start ... vm_end */
+static inline struct vm_area_struct *find_exact_vma(struct mm_struct *mm,
+ unsigned long vm_start, unsigned long vm_end)
+{
+ struct vm_area_struct *vma = find_vma(mm, vm_start);
+
+ if (vma && (vma->vm_start != vm_start || vma->vm_end != vm_end))
+ vma = NULL;
+
+ return vma;
+}
+
#ifdef CONFIG_MMU
pgprot_t vm_get_page_prot(unsigned long vm_flags);
#else
@@ -1537,23 +1540,13 @@ static inline void vm_stat_account(struct mm_struct *mm,
#endif /* CONFIG_PROC_FS */
#ifdef CONFIG_DEBUG_PAGEALLOC
-extern int debug_pagealloc_enabled;
-
extern void kernel_map_pages(struct page *page, int numpages, int enable);
-
-static inline void enable_debug_pagealloc(void)
-{
- debug_pagealloc_enabled = 1;
-}
#ifdef CONFIG_HIBERNATION
extern bool kernel_page_present(struct page *page);
#endif /* CONFIG_HIBERNATION */
#else
static inline void
kernel_map_pages(struct page *page, int numpages, int enable) {}
-static inline void enable_debug_pagealloc(void)
-{
-}
#ifdef CONFIG_HIBERNATION
static inline bool kernel_page_present(struct page *page) { return true; }
#endif /* CONFIG_HIBERNATION */
@@ -1627,5 +1620,22 @@ extern void copy_user_huge_page(struct page *dst, struct page *src,
unsigned int pages_per_huge_page);
#endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */
+#ifdef CONFIG_DEBUG_PAGEALLOC
+extern unsigned int _debug_guardpage_minorder;
+
+static inline unsigned int debug_guardpage_minorder(void)
+{
+ return _debug_guardpage_minorder;
+}
+
+static inline bool page_is_guard(struct page *page)
+{
+ return test_bit(PAGE_DEBUG_FLAG_GUARD, &page->debug_flags);
+}
+#else
+static inline unsigned int debug_guardpage_minorder(void) { return 0; }
+static inline bool page_is_guard(struct page *page) { return false; }
+#endif /* CONFIG_DEBUG_PAGEALLOC */
+
#endif /* __KERNEL__ */
#endif /* _LINUX_MM_H */