aboutsummaryrefslogtreecommitdiff
path: root/arch
diff options
context:
space:
mode:
Diffstat (limited to 'arch')
-rw-r--r--arch/arm/include/asm/page.h2
-rw-r--r--arch/arm64/Kconfig2
-rw-r--r--arch/arm64/include/asm/hugetlb.h4
-rw-r--r--arch/arm64/include/asm/stackprotector.h1
-rw-r--r--arch/arm64/mm/hugetlbpage.c53
-rw-r--r--arch/frv/include/asm/Kbuild2
-rw-r--r--arch/frv/include/asm/device.h7
-rw-r--r--arch/frv/include/asm/fb.h12
-rw-r--r--arch/frv/include/asm/timex.h6
-rw-r--r--arch/ia64/kernel/machine_kexec.c5
-rw-r--r--arch/ia64/mm/hugetlbpage.c4
-rw-r--r--arch/ia64/mm/init.c11
-rw-r--r--arch/metag/mm/hugetlbpage.c3
-rw-r--r--arch/mips/mm/hugetlbpage.c3
-rw-r--r--arch/mn10300/include/asm/Kbuild2
-rw-r--r--arch/mn10300/include/asm/device.h1
-rw-r--r--arch/mn10300/include/asm/fb.h23
-rw-r--r--arch/parisc/mm/hugetlbpage.c3
-rw-r--r--arch/powerpc/include/asm/book3s/64/hugetlb.h10
-rw-r--r--arch/powerpc/kernel/fadump.c3
-rw-r--r--arch/powerpc/mm/hugetlbpage.c90
-rw-r--r--arch/powerpc/mm/mem.c12
-rw-r--r--arch/powerpc/platforms/Kconfig.cputype6
-rw-r--r--arch/s390/Kconfig2
-rw-r--r--arch/s390/include/asm/hugetlb.h5
-rw-r--r--arch/s390/kernel/machine_kexec.c1
-rw-r--r--arch/s390/kernel/setup.c6
-rw-r--r--arch/s390/mm/hugetlbpage.c3
-rw-r--r--arch/s390/mm/init.c32
-rw-r--r--arch/sh/include/asm/stackprotector.h1
-rw-r--r--arch/sh/mm/hugetlbpage.c3
-rw-r--r--arch/sh/mm/init.c10
-rw-r--r--arch/sparc/mm/hugetlbpage.c3
-rw-r--r--arch/tile/mm/hugetlbpage.c3
-rw-r--r--arch/tile/mm/pgtable.c11
-rw-r--r--arch/x86/Kconfig3
-rw-r--r--arch/x86/include/asm/hugetlb.h4
-rw-r--r--arch/x86/include/asm/stackprotector.h1
-rw-r--r--arch/x86/kernel/crash.c2
-rw-r--r--arch/x86/kernel/machine_kexec_64.c1
-rw-r--r--arch/x86/mm/hugetlbpage.c2
-rw-r--r--arch/x86/mm/init_32.c7
-rw-r--r--arch/x86/mm/init_64.c11
-rw-r--r--arch/x86/xen/mmu_pv.c4
44 files changed, 152 insertions, 228 deletions
diff --git a/arch/arm/include/asm/page.h b/arch/arm/include/asm/page.h
index 4355f0ec44d6..f98baaec0a15 100644
--- a/arch/arm/include/asm/page.h
+++ b/arch/arm/include/asm/page.h
@@ -17,6 +17,8 @@
#ifndef __ASSEMBLY__
+#include <linux/personality.h> /* For READ_IMPLIES_EXEC */
+
#ifndef CONFIG_MMU
#include <asm/page-nommu.h>
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 3dcd7ec69bca..848a34116c67 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -12,7 +12,7 @@ config ARM64
select ARCH_HAS_ACPI_TABLE_UPGRADE if ACPI
select ARCH_HAS_ELF_RANDOMIZE
select ARCH_HAS_GCOV_PROFILE_ALL
- select ARCH_HAS_GIGANTIC_PAGE
+ select ARCH_HAS_GIGANTIC_PAGE if (MEMORY_ISOLATION && COMPACTION) || CMA
select ARCH_HAS_KCOV
select ARCH_HAS_SET_MEMORY
select ARCH_HAS_SG_CHAIN
diff --git a/arch/arm64/include/asm/hugetlb.h b/arch/arm64/include/asm/hugetlb.h
index bbc1e35aa601..793bd73b0d07 100644
--- a/arch/arm64/include/asm/hugetlb.h
+++ b/arch/arm64/include/asm/hugetlb.h
@@ -83,4 +83,8 @@ extern void huge_ptep_set_wrprotect(struct mm_struct *mm,
extern void huge_ptep_clear_flush(struct vm_area_struct *vma,
unsigned long addr, pte_t *ptep);
+#ifdef CONFIG_ARCH_HAS_GIGANTIC_PAGE
+static inline bool gigantic_page_supported(void) { return true; }
+#endif
+
#endif /* __ASM_HUGETLB_H */
diff --git a/arch/arm64/include/asm/stackprotector.h b/arch/arm64/include/asm/stackprotector.h
index fe5e287dc56b..b86a0865ddf1 100644
--- a/arch/arm64/include/asm/stackprotector.h
+++ b/arch/arm64/include/asm/stackprotector.h
@@ -30,6 +30,7 @@ static __always_inline void boot_init_stack_canary(void)
/* Try to get a semi random initial value. */
get_random_bytes(&canary, sizeof(canary));
canary ^= LINUX_VERSION_CODE;
+ canary &= CANARY_MASK;
current->stack_canary = canary;
__stack_chk_guard = current->stack_canary;
diff --git a/arch/arm64/mm/hugetlbpage.c b/arch/arm64/mm/hugetlbpage.c
index 7514a000e361..0ecc921b5719 100644
--- a/arch/arm64/mm/hugetlbpage.c
+++ b/arch/arm64/mm/hugetlbpage.c
@@ -42,15 +42,13 @@ int pud_huge(pud_t pud)
}
static int find_num_contig(struct mm_struct *mm, unsigned long addr,
- pte_t *ptep, pte_t pte, size_t *pgsize)
+ pte_t *ptep, size_t *pgsize)
{
pgd_t *pgd = pgd_offset(mm, addr);
pud_t *pud;
pmd_t *pmd;
*pgsize = PAGE_SIZE;
- if (!pte_cont(pte))
- return 1;
pud = pud_offset(pgd, addr);
pmd = pmd_offset(pud, addr);
if ((pte_t *)pmd == ptep) {
@@ -65,15 +63,16 @@ void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
{
size_t pgsize;
int i;
- int ncontig = find_num_contig(mm, addr, ptep, pte, &pgsize);
+ int ncontig;
unsigned long pfn;
pgprot_t hugeprot;
- if (ncontig == 1) {
+ if (!pte_cont(pte)) {
set_pte_at(mm, addr, ptep, pte);
return;
}
+ ncontig = find_num_contig(mm, addr, ptep, &pgsize);
pfn = pte_pfn(pte);
hugeprot = __pgprot(pte_val(pfn_pte(pfn, __pgprot(0))) ^ pte_val(pte));
for (i = 0; i < ncontig; i++) {
@@ -132,7 +131,8 @@ pte_t *huge_pte_alloc(struct mm_struct *mm,
return pte;
}
-pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
+pte_t *huge_pte_offset(struct mm_struct *mm,
+ unsigned long addr, unsigned long sz)
{
pgd_t *pgd;
pud_t *pud;
@@ -193,21 +193,19 @@ pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
if (pte_cont(*ptep)) {
int ncontig, i;
size_t pgsize;
- pte_t *cpte;
bool is_dirty = false;
- cpte = huge_pte_offset(mm, addr);
- ncontig = find_num_contig(mm, addr, cpte, *cpte, &pgsize);
+ ncontig = find_num_contig(mm, addr, ptep, &pgsize);
/* save the 1st pte to return */
- pte = ptep_get_and_clear(mm, addr, cpte);
+ pte = ptep_get_and_clear(mm, addr, ptep);
for (i = 1, addr += pgsize; i < ncontig; ++i, addr += pgsize) {
/*
* If HW_AFDBM is enabled, then the HW could
* turn on the dirty bit for any of the page
* in the set, so check them all.
*/
- ++cpte;
- if (pte_dirty(ptep_get_and_clear(mm, addr, cpte)))
+ ++ptep;
+ if (pte_dirty(ptep_get_and_clear(mm, addr, ptep)))
is_dirty = true;
}
if (is_dirty)
@@ -223,8 +221,6 @@ int huge_ptep_set_access_flags(struct vm_area_struct *vma,
unsigned long addr, pte_t *ptep,
pte_t pte, int dirty)
{
- pte_t *cpte;
-
if (pte_cont(pte)) {
int ncontig, i, changed = 0;
size_t pgsize = 0;
@@ -234,12 +230,11 @@ int huge_ptep_set_access_flags(struct vm_area_struct *vma,
__pgprot(pte_val(pfn_pte(pfn, __pgprot(0))) ^
pte_val(pte));
- cpte = huge_pte_offset(vma->vm_mm, addr);
- pfn = pte_pfn(*cpte);
- ncontig = find_num_contig(vma->vm_mm, addr, cpte,
- *cpte, &pgsize);
- for (i = 0; i < ncontig; ++i, ++cpte, addr += pgsize) {
- changed |= ptep_set_access_flags(vma, addr, cpte,
+ pfn = pte_pfn(pte);
+ ncontig = find_num_contig(vma->vm_mm, addr, ptep,
+ &pgsize);
+ for (i = 0; i < ncontig; ++i, ++ptep, addr += pgsize) {
+ changed |= ptep_set_access_flags(vma, addr, ptep,
pfn_pte(pfn,
hugeprot),
dirty);
@@ -256,13 +251,11 @@ void huge_ptep_set_wrprotect(struct mm_struct *mm,
{
if (pte_cont(*ptep)) {
int ncontig, i;
- pte_t *cpte;
size_t pgsize = 0;
- cpte = huge_pte_offset(mm, addr);
- ncontig = find_num_contig(mm, addr, cpte, *cpte, &pgsize);
- for (i = 0; i < ncontig; ++i, ++cpte, addr += pgsize)
- ptep_set_wrprotect(mm, addr, cpte);
+ ncontig = find_num_contig(mm, addr, ptep, &pgsize);
+ for (i = 0; i < ncontig; ++i, ++ptep, addr += pgsize)
+ ptep_set_wrprotect(mm, addr, ptep);
} else {
ptep_set_wrprotect(mm, addr, ptep);
}
@@ -273,14 +266,12 @@ void huge_ptep_clear_flush(struct vm_area_struct *vma,
{
if (pte_cont(*ptep)) {
int ncontig, i;
- pte_t *cpte;
size_t pgsize = 0;
- cpte = huge_pte_offset(vma->vm_mm, addr);
- ncontig = find_num_contig(vma->vm_mm, addr, cpte,
- *cpte, &pgsize);
- for (i = 0; i < ncontig; ++i, ++cpte, addr += pgsize)
- ptep_clear_flush(vma, addr, cpte);
+ ncontig = find_num_contig(vma->vm_mm, addr, ptep,
+ &pgsize);
+ for (i = 0; i < ncontig; ++i, ++ptep, addr += pgsize)
+ ptep_clear_flush(vma, addr, ptep);
} else {
ptep_clear_flush(vma, addr, ptep);
}
diff --git a/arch/frv/include/asm/Kbuild b/arch/frv/include/asm/Kbuild
index cce3bc3603ea..2cf7648787b2 100644
--- a/arch/frv/include/asm/Kbuild
+++ b/arch/frv/include/asm/Kbuild
@@ -1,7 +1,9 @@
generic-y += clkdev.h
+generic-y += device.h
generic-y += exec.h
generic-y += extable.h
+generic-y += fb.h
generic-y += irq_work.h
generic-y += mcs_spinlock.h
generic-y += mm-arch-hooks.h
diff --git a/arch/frv/include/asm/device.h b/arch/frv/include/asm/device.h
deleted file mode 100644
index d8f9872b0e2d..000000000000
--- a/arch/frv/include/asm/device.h
+++ /dev/null
@@ -1,7 +0,0 @@
-/*
- * Arch specific extensions to struct device
- *
- * This file is released under the GPLv2
- */
-#include <asm-generic/device.h>
-
diff --git a/arch/frv/include/asm/fb.h b/arch/frv/include/asm/fb.h
deleted file mode 100644
index c7df38030992..000000000000
--- a/arch/frv/include/asm/fb.h
+++ /dev/null
@@ -1,12 +0,0 @@
-#ifndef _ASM_FB_H_
-#define _ASM_FB_H_
-#include <linux/fb.h>
-
-#define fb_pgprotect(...) do {} while (0)
-
-static inline int fb_is_primary_device(struct fb_info *info)
-{
- return 0;
-}
-
-#endif /* _ASM_FB_H_ */
diff --git a/arch/frv/include/asm/timex.h b/arch/frv/include/asm/timex.h
index a89bddefdacf..139093fab326 100644
--- a/arch/frv/include/asm/timex.h
+++ b/arch/frv/include/asm/timex.h
@@ -16,5 +16,11 @@ static inline cycles_t get_cycles(void)
#define vxtime_lock() do {} while (0)
#define vxtime_unlock() do {} while (0)
+/* This attribute is used in include/linux/jiffies.h alongside with
+ * __cacheline_aligned_in_smp. It is assumed that __cacheline_aligned_in_smp
+ * for frv does not contain another section specification.
+ */
+#define __jiffy_arch_data __attribute__((__section__(".data")))
+
#endif
diff --git a/arch/ia64/kernel/machine_kexec.c b/arch/ia64/kernel/machine_kexec.c
index 599507bcec91..c14815dca747 100644
--- a/arch/ia64/kernel/machine_kexec.c
+++ b/arch/ia64/kernel/machine_kexec.c
@@ -163,8 +163,3 @@ void arch_crash_save_vmcoreinfo(void)
#endif
}
-phys_addr_t paddr_vmcoreinfo_note(void)
-{
- return ia64_tpa((unsigned long)(char *)&vmcoreinfo_note);
-}
-
diff --git a/arch/ia64/mm/hugetlbpage.c b/arch/ia64/mm/hugetlbpage.c
index 85de86d36fdf..ae35140332f7 100644
--- a/arch/ia64/mm/hugetlbpage.c
+++ b/arch/ia64/mm/hugetlbpage.c
@@ -44,7 +44,7 @@ huge_pte_alloc(struct mm_struct *mm, unsigned long addr, unsigned long sz)
}
pte_t *
-huge_pte_offset (struct mm_struct *mm, unsigned long addr)
+huge_pte_offset (struct mm_struct *mm, unsigned long addr, unsigned long sz)
{
unsigned long taddr = htlbpage_to_page(addr);
pgd_t *pgd;
@@ -92,7 +92,7 @@ struct page *follow_huge_addr(struct mm_struct *mm, unsigned long addr, int writ
if (REGION_NUMBER(addr) != RGN_HPAGE)
return ERR_PTR(-EINVAL);
- ptep = huge_pte_offset(mm, addr);
+ ptep = huge_pte_offset(mm, addr, HPAGE_SIZE);
if (!ptep || pte_none(*ptep))
return NULL;
page = pte_page(*ptep);
diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
index 8f3efa682ee8..a4e8d6bd9cfa 100644
--- a/arch/ia64/mm/init.c
+++ b/arch/ia64/mm/init.c
@@ -646,20 +646,13 @@ mem_init (void)
}
#ifdef CONFIG_MEMORY_HOTPLUG
-int arch_add_memory(int nid, u64 start, u64 size, bool for_device)
+int arch_add_memory(int nid, u64 start, u64 size, bool want_memblock)
{
- pg_data_t *pgdat;
- struct zone *zone;
unsigned long start_pfn = start >> PAGE_SHIFT;
unsigned long nr_pages = size >> PAGE_SHIFT;
int ret;
- pgdat = NODE_DATA(nid);
-
- zone = pgdat->node_zones +
- zone_for_memory(nid, start, size, ZONE_NORMAL, for_device);
- ret = __add_pages(nid, zone, start_pfn, nr_pages);
-
+ ret = __add_pages(nid, start_pfn, nr_pages, want_memblock);
if (ret)
printk("%s: Problem encountered in __add_pages() as ret=%d\n",
__func__, ret);
diff --git a/arch/metag/mm/hugetlbpage.c b/arch/metag/mm/hugetlbpage.c
index db1b7da91e4f..67fd53e2935a 100644
--- a/arch/metag/mm/hugetlbpage.c
+++ b/arch/metag/mm/hugetlbpage.c
@@ -74,7 +74,8 @@ pte_t *huge_pte_alloc(struct mm_struct *mm,
return pte;
}
-pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
+pte_t *huge_pte_offset(struct mm_struct *mm,
+ unsigned long addr, unsigned long sz)
{
pgd_t *pgd;
pud_t *pud;
diff --git a/arch/mips/mm/hugetlbpage.c b/arch/mips/mm/hugetlbpage.c
index 74aa6f62468f..cef152234312 100644
--- a/arch/mips/mm/hugetlbpage.c
+++ b/arch/mips/mm/hugetlbpage.c
@@ -36,7 +36,8 @@ pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr,
return pte;
}
-pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
+pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr,
+ unsigned long sz)
{
pgd_t *pgd;
pud_t *pud;
diff --git a/arch/mn10300/include/asm/Kbuild b/arch/mn10300/include/asm/Kbuild
index ed810e7206e8..db5b57829a81 100644
--- a/arch/mn10300/include/asm/Kbuild
+++ b/arch/mn10300/include/asm/Kbuild
@@ -1,8 +1,10 @@
generic-y += barrier.h
generic-y += clkdev.h
+generic-y += device.h
generic-y += exec.h
generic-y += extable.h
+generic-y += fb.h
generic-y += irq_work.h
generic-y += mcs_spinlock.h
generic-y += mm-arch-hooks.h
diff --git a/arch/mn10300/include/asm/device.h b/arch/mn10300/include/asm/device.h
deleted file mode 100644
index f0a4c256403b..000000000000
--- a/arch/mn10300/include/asm/device.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/device.h>
diff --git a/arch/mn10300/include/asm/fb.h b/arch/mn10300/include/asm/fb.h
deleted file mode 100644
index 697b24a91e1a..000000000000
--- a/arch/mn10300/include/asm/fb.h
+++ /dev/null
@@ -1,23 +0,0 @@
-/* MN10300 Frame buffer stuff
- *
- * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
- * Written by David Howells (dhowells@redhat.com)
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public Licence
- * as published by the Free Software Foundation; either version
- * 2 of the Licence, or (at your option) any later version.
- */
-#ifndef _ASM_FB_H
-#define _ASM_FB_H
-
-#include <linux/fb.h>
-
-#define fb_pgprotect(...) do {} while (0)
-
-static inline int fb_is_primary_device(struct fb_info *info)
-{
- return 0;
-}
-
-#endif /* _ASM_FB_H */
diff --git a/arch/parisc/mm/hugetlbpage.c b/arch/parisc/mm/hugetlbpage.c
index aa50ac090e9b..5eb8f633b282 100644
--- a/arch/parisc/mm/hugetlbpage.c
+++ b/arch/parisc/mm/hugetlbpage.c
@@ -69,7 +69,8 @@ pte_t *huge_pte_alloc(struct mm_struct *mm,
return pte;
}
-pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
+pte_t *huge_pte_offset(struct mm_struct *mm,
+ unsigned long addr, unsigned long sz)
{
pgd_t *pgd;
pud_t *pud;
diff --git a/arch/powerpc/include/asm/book3s/64/hugetlb.h b/arch/powerpc/include/asm/book3s/64/hugetlb.h
index 6666cd366596..5c28bd6f2ae1 100644
--- a/arch/powerpc/include/asm/book3s/64/hugetlb.h
+++ b/arch/powerpc/include/asm/book3s/64/hugetlb.h
@@ -50,4 +50,14 @@ static inline pte_t arch_make_huge_pte(pte_t entry, struct vm_area_struct *vma,
else
return entry;
}
+
+#ifdef CONFIG_ARCH_HAS_GIGANTIC_PAGE
+static inline bool gigantic_page_supported(void)
+{
+ if (radix_enabled())
+ return true;
+ return false;
+}
+#endif
+
#endif
diff --git a/arch/powerpc/kernel/fadump.c b/arch/powerpc/kernel/fadump.c
index 466569e26278..7bd6cd02104c 100644
--- a/arch/powerpc/kernel/fadump.c
+++ b/arch/powerpc/kernel/fadump.c
@@ -893,8 +893,7 @@ static int fadump_create_elfcore_headers(char *bufp)
phdr->p_paddr = fadump_relocate(paddr_vmcoreinfo_note());
phdr->p_offset = phdr->p_paddr;
- phdr->p_memsz = vmcoreinfo_max_size;
- phdr->p_filesz = vmcoreinfo_max_size;
+ phdr->p_memsz = phdr->p_filesz = VMCOREINFO_NOTE_SIZE;
/* Increment number of program headers. */
(elf->e_phnum)++;
diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c
index a4f33de4008e..c41dc44472c5 100644
--- a/arch/powerpc/mm/hugetlbpage.c
+++ b/arch/powerpc/mm/hugetlbpage.c
@@ -17,6 +17,8 @@
#include <linux/memblock.h>
#include <linux/bootmem.h>
#include <linux/moduleparam.h>
+#include <linux/swap.h>
+#include <linux/swapops.h>
#include <asm/pgtable.h>
#include <asm/pgalloc.h>
#include <asm/tlb.h>
@@ -55,7 +57,7 @@ static unsigned nr_gpages;
#define hugepd_none(hpd) (hpd_val(hpd) == 0)
-pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
+pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr, unsigned long sz)
{
/* Only called for hugetlbfs pages, hence can ignore THP */
return __find_linux_pte_or_hugepte(mm->pgd, addr, NULL, NULL);
@@ -617,62 +619,39 @@ void hugetlb_free_pgd_range(struct mmu_gather *tlb,
} while (addr = next, addr != end);
}
-/*
- * We are holding mmap_sem, so a parallel huge page collapse cannot run.
- * To prevent hugepage split, disable irq.
- */
-struct page *
-follow_huge_addr(struct mm_struct *mm, unsigned long address, int write)
+struct page *follow_huge_pd(struct vm_area_struct *vma,
+ unsigned long address, hugepd_t hpd,
+ int flags, int pdshift)
{
- bool is_thp;
- pte_t *ptep, pte;
- unsigned shift;
- unsigned long mask, flags;
- struct page *page = ERR_PTR(-EINVAL);
-
- local_irq_save(flags);
- ptep = find_linux_pte_or_hugepte(mm->pgd, address, &is_thp, &shift);
- if (!ptep)
- goto no_page;
- pte = READ_ONCE(*ptep);
- /*
- * Verify it is a huge page else bail.
- * Transparent hugepages are handled by generic code. We can skip them
- * here.
- */
- if (!shift || is_thp)
- goto no_page;
-
- if (!pte_present(pte)) {
- page = NULL;
- goto no_page;
+ pte_t *ptep;
+ spinlock_t *ptl;
+ struct page *page = NULL;
+ unsigned long mask;
+ int shift = hugepd_shift(hpd);
+ struct mm_struct *mm = vma->vm_mm;
+
+retry:
+ ptl = &mm->page_table_lock;
+ spin_lock(ptl);
+
+ ptep = hugepte_offset(hpd, address, pdshift);
+ if (pte_present(*ptep)) {
+ mask = (1UL << shift) - 1;
+ page = pte_page(*ptep);
+ page += ((address & mask) >> PAGE_SHIFT);
+ if (flags & FOLL_GET)
+ get_page(page);
+ } else {
+ if (is_hugetlb_entry_migration(*ptep)) {
+ spin_unlock(ptl);
+ __migration_entry_wait(mm, ptep, ptl);
+ goto retry;
+ }
}
- mask = (1UL << shift) - 1;
- page = pte_page(pte);
- if (page)
- page += (address & mask) / PAGE_SIZE;
-
-no_page:
- local_irq_restore(flags);
+ spin_unlock(ptl);
return page;
}
-struct page *
-follow_huge_pmd(struct mm_struct *mm, unsigned long address,
- pmd_t *pmd, int write)
-{
- BUG();
- return NULL;
-}
-
-struct page *
-follow_huge_pud(struct mm_struct *mm, unsigned long address,
- pud_t *pud, int write)
-{
- BUG();
- return NULL;
-}
-
static unsigned long hugepte_addr_end(unsigned long addr, unsigned long end,
unsigned long sz)
{
@@ -763,8 +742,11 @@ static int __init add_huge_page_size(unsigned long long size)
* Hash: 16M and 16G
*/
if (radix_enabled()) {
- if (mmu_psize != MMU_PAGE_2M)
- return -EINVAL;
+ if (mmu_psize != MMU_PAGE_2M) {
+ if (cpu_has_feature(CPU_FTR_POWER9_DD1) ||
+ (mmu_psize != MMU_PAGE_1G))
+ return -EINVAL;
+ }
} else {
if (mmu_psize != MMU_PAGE_16M && mmu_psize != MMU_PAGE_16G)
return -EINVAL;
diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c
index 9ee536ec0739..de5a90e1ceaa 100644
--- a/arch/powerpc/mm/mem.c
+++ b/arch/powerpc/mm/mem.c
@@ -126,18 +126,14 @@ int __weak remove_section_mapping(unsigned long start, unsigned long end)
return -ENODEV;
}
-int arch_add_memory(int nid, u64 start, u64 size, bool for_device)
+int arch_add_memory(int nid, u64 start, u64 size, bool want_memblock)
{
- struct pglist_data *pgdata;
- struct zone *zone;
unsigned long start_pfn = start >> PAGE_SHIFT;
unsigned long nr_pages = size >> PAGE_SHIFT;
int rc;
resize_hpt_for_hotplug(memblock_phys_mem_size());
- pgdata = NODE_DATA(nid);
-
start = (unsigned long)__va(start);
rc = create_section_mapping(start, start + size);
if (rc) {
@@ -147,11 +143,7 @@ int arch_add_memory(int nid, u64 start, u64 size, bool for_device)
return -EFAULT;
}
- /* this should work for most non-highmem platforms */
- zone = pgdata->node_zones +
- zone_for_memory(nid, start, size, 0, for_device);
-
- return __add_pages(nid, zone, start_pfn, nr_pages);
+ return __add_pages(nid, start_pfn, nr_pages, want_memblock);
}
#ifdef CONFIG_MEMORY_HOTREMOVE
diff --git a/arch/powerpc/platforms/Kconfig.cputype b/arch/powerpc/platforms/Kconfig.cputype
index 684e886eaae4..2f629e0551e9 100644
--- a/arch/powerpc/platforms/Kconfig.cputype
+++ b/arch/powerpc/platforms/Kconfig.cputype
@@ -344,12 +344,18 @@ config PPC_STD_MMU_64
config PPC_RADIX_MMU
bool "Radix MMU Support"
depends on PPC_BOOK3S_64
+ select ARCH_HAS_GIGANTIC_PAGE if (MEMORY_ISOLATION && COMPACTION) || CMA
default y
help
Enable support for the Power ISA 3.0 Radix style MMU. Currently this
is only implemented by IBM Power9 CPUs, if you don't have one of them
you can probably disable this.
+config ARCH_ENABLE_HUGEPAGE_MIGRATION
+ def_bool y
+ depends on PPC_BOOK3S_64 && HUGETLB_PAGE && MIGRATION
+
+
config PPC_MMU_NOHASH
def_bool y
depends on !PPC_STD_MMU
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index e7ff58150e8f..6940c68d8437 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -67,7 +67,7 @@ config S390
select ARCH_HAS_DEVMEM_IS_ALLOWED
select ARCH_HAS_ELF_RANDOMIZE
select ARCH_HAS_GCOV_PROFILE_ALL
- select ARCH_HAS_GIGANTIC_PAGE
+ select ARCH_HAS_GIGANTIC_PAGE if (MEMORY_ISOLATION && COMPACTION) || CMA
select ARCH_HAS_KCOV
select ARCH_HAS_SET_MEMORY
select ARCH_HAS_SG_CHAIN
diff --git a/arch/s390/include/asm/hugetlb.h b/arch/s390/include/asm/hugetlb.h
index cd546a245c68..d95869ce3ca2 100644
--- a/arch/s390/include/asm/hugetlb.h
+++ b/arch/s390/include/asm/hugetlb.h
@@ -39,7 +39,7 @@ static inline int prepare_hugepage_range(struct file *file,
#define arch_clear_hugepage_flags(page) do { } while (0)
static inline void huge_pte_clear(struct mm_struct *mm, unsigned long addr,
- pte_t *ptep)
+ pte_t *ptep, unsigned long sz)
{
if ((pte_val(*ptep) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3)
pte_val(*ptep) = _REGION3_ENTRY_EMPTY;
@@ -112,4 +112,7 @@ static inline pte_t huge_pte_modify(pte_t pte, pgprot_t newprot)
return pte_modify(pte, newprot);
}
+#ifdef CONFIG_ARCH_HAS_GIGANTIC_PAGE
+static inline bool gigantic_page_supported(void) { return true; }
+#endif
#endif /* _ASM_S390_HUGETLB_H */
diff --git a/arch/s390/kernel/machine_kexec.c b/arch/s390/kernel/machine_kexec.c
index 49a6bd45957b..3d0b14afa232 100644
--- a/arch/s390/kernel/machine_kexec.c
+++ b/arch/s390/kernel/machine_kexec.c
@@ -246,6 +246,7 @@ void arch_crash_save_vmcoreinfo(void)
VMCOREINFO_SYMBOL(lowcore_ptr);
VMCOREINFO_SYMBOL(high_memory);
VMCOREINFO_LENGTH(lowcore_ptr, NR_CPUS);
+ mem_assign_absolute(S390_lowcore.vmcore_info, paddr_vmcoreinfo_note());
}
void machine_shutdown(void)
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index 3ae756c0db3d..3d1d808ea8a9 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -496,11 +496,6 @@ static void __init setup_memory_end(void)
pr_notice("The maximum memory size is %luMB\n", memory_end >> 20);
}
-static void __init setup_vmcoreinfo(void)
-{
- mem_assign_absolute(S390_lowcore.vmcore_info, paddr_vmcoreinfo_note());
-}
-
#ifdef CONFIG_CRASH_DUMP
/*
@@ -939,7 +934,6 @@ void __init setup_arch(char **cmdline_p)
#endif
setup_resources();
- setup_vmcoreinfo();
setup_lowcore();
smp_fill_possible_mask();
cpu_detect_mhz_feature();
diff --git a/arch/s390/mm/hugetlbpage.c b/arch/s390/mm/hugetlbpage.c
index d3a5e39756f6..44a8e6f0391e 100644
--- a/arch/s390/mm/hugetlbpage.c
+++ b/arch/s390/mm/hugetlbpage.c
@@ -180,7 +180,8 @@ pte_t *huge_pte_alloc(struct mm_struct *mm,
return (pte_t *) pmdp;
}
-pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
+pte_t *huge_pte_offset(struct mm_struct *mm,
+ unsigned long addr, unsigned long sz)
{
pgd_t *pgdp;
p4d_t *p4dp;
diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c
index ee6a1d3d4983..f4fb5d191562 100644
--- a/arch/s390/mm/init.c
+++ b/arch/s390/mm/init.c
@@ -162,43 +162,17 @@ unsigned long memory_block_size_bytes(void)
}
#ifdef CONFIG_MEMORY_HOTPLUG
-int arch_add_memory(int nid, u64 start, u64 size, bool for_device)
+int arch_add_memory(int nid, u64 start, u64 size, bool want_memblock)
{
- unsigned long zone_start_pfn, zone_end_pfn, nr_pages;
unsigned long start_pfn = PFN_DOWN(start);
unsigned long size_pages = PFN_DOWN(size);
- pg_data_t *pgdat = NODE_DATA(nid);
- struct zone *zone;
- int rc, i;
+ int rc;
rc = vmem_add_mapping(start, size);
if (rc)
return rc;
- for (i = 0; i < MAX_NR_ZONES; i++) {
- zone = pgdat->node_zones + i;
- if (zone_idx(zone) != ZONE_MOVABLE) {
- /* Add range within existing zone limits, if possible */
- zone_start_pfn = zone->zone_start_pfn;
- zone_end_pfn = zone->zone_start_pfn +
- zone->spanned_pages;
- } else {
- /* Add remaining range to ZONE_MOVABLE */
- zone_start_pfn = start_pfn;
- zone_end_pfn = start_pfn + size_pages;
- }
- if (start_pfn < zone_start_pfn || start_pfn >= zone_end_pfn)
- continue;
- nr_pages = (start_pfn + size_pages > zone_end_pfn) ?
- zone_end_pfn - start_pfn : size_pages;
- rc = __add_pages(nid, zone, start_pfn, nr_pages);
- if (rc)
- break;
- start_pfn += nr_pages;
- size_pages -= nr_pages;
- if (!size_pages)
- break;
- }
+ rc = __add_pages(nid, start_pfn, size_pages, want_memblock);
if (rc)
vmem_remove_mapping(start, size);
return rc;
diff --git a/arch/sh/include/asm/stackprotector.h b/arch/sh/include/asm/stackprotector.h
index d9df3a76847c..141515a43b78 100644
--- a/arch/sh/include/asm/stackprotector.h
+++ b/arch/sh/include/asm/stackprotector.h
@@ -19,6 +19,7 @@ static __always_inline void boot_init_stack_canary(void)
/* Try to get a semi random initial value. */
get_random_bytes(&canary, sizeof(canary));
canary ^= LINUX_VERSION_CODE;
+ canary &= CANARY_MASK;
current->stack_canary = canary;
__stack_chk_guard = current->stack_canary;
diff --git a/arch/sh/mm/hugetlbpage.c b/arch/sh/mm/hugetlbpage.c
index cc948db74878..d2412d2d6462 100644
--- a/arch/sh/mm/hugetlbpage.c
+++ b/arch/sh/mm/hugetlbpage.c
@@ -42,7 +42,8 @@ pte_t *huge_pte_alloc(struct mm_struct *mm,
return pte;
}
-pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
+pte_t *huge_pte_offset(struct mm_struct *mm,
+ unsigned long addr, unsigned long sz)
{
pgd_t *pgd;
pud_t *pud;
diff --git a/arch/sh/mm/init.c b/arch/sh/mm/init.c
index 75491862d900..bf726af5f1a5 100644
--- a/arch/sh/mm/init.c
+++ b/arch/sh/mm/init.c
@@ -485,20 +485,14 @@ void free_initrd_mem(unsigned long start, unsigned long end)
#endif
#ifdef CONFIG_MEMORY_HOTPLUG
-int arch_add_memory(int nid, u64 start, u64 size, bool for_device)
+int arch_add_memory(int nid, u64 start, u64 size, bool want_memblock)
{
- pg_data_t *pgdat;
unsigned long start_pfn = PFN_DOWN(start);
unsigned long nr_pages = size >> PAGE_SHIFT;
int ret;
- pgdat = NODE_DATA(nid);
-
/* We only have ZONE_NORMAL, so this is easy.. */
- ret = __add_pages(nid, pgdat->node_zones +
- zone_for_memory(nid, start, size, ZONE_NORMAL,
- for_device),
- start_pfn, nr_pages);
+ ret = __add_pages(nid, start_pfn, nr_pages, want_memblock);
if (unlikely(ret))
printk("%s: Failed, __add_pages() == %d\n", __func__, ret);
diff --git a/arch/sparc/mm/hugetlbpage.c b/arch/sparc/mm/hugetlbpage.c
index 7c29d38e6b99..8989c5e155b3 100644
--- a/arch/sparc/mm/hugetlbpage.c
+++ b/arch/sparc/mm/hugetlbpage.c
@@ -277,7 +277,8 @@ pte_t *huge_pte_alloc(struct mm_struct *mm,
return pte;
}
-pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
+pte_t *huge_pte_offset(struct mm_struct *mm,
+ unsigned long addr, unsigned long sz)
{
pgd_t *pgd;
pud_t *pud;
diff --git a/arch/tile/mm/hugetlbpage.c b/arch/tile/mm/hugetlbpage.c
index cb10153b5c9f..1f0993945521 100644
--- a/arch/tile/mm/hugetlbpage.c
+++ b/arch/tile/mm/hugetlbpage.c
@@ -102,7 +102,8 @@ static pte_t *get_pte(pte_t *base, int index, int level)
return ptep;
}
-pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
+pte_t *huge_pte_offset(struct mm_struct *mm,
+ unsigned long addr, unsigned long sz)
{
pgd_t *pgd;
pud_t *pud;
diff --git a/arch/tile/mm/pgtable.c b/arch/tile/mm/pgtable.c
index 492a7361e58e..ec5576fd3a86 100644
--- a/arch/tile/mm/pgtable.c
+++ b/arch/tile/mm/pgtable.c
@@ -503,6 +503,17 @@ void __iomem *ioremap_prot(resource_size_t phys_addr, unsigned long size,
}
EXPORT_SYMBOL(ioremap_prot);
+#if !defined(CONFIG_PCI) || !defined(CONFIG_TILEGX)
+/* ioremap is conditionally declared in pci_gx.c */
+
+void __iomem *ioremap(resource_size_t phys_addr, unsigned long size)
+{
+ return NULL;
+}
+EXPORT_SYMBOL(ioremap);
+
+#endif
+
/* Unmap an MMIO VA mapping. */
void iounmap(volatile void __iomem *addr_in)
{
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 4ccfacc7232a..f00fa5176f72 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -22,7 +22,7 @@ config X86_64
def_bool y
depends on 64BIT
# Options that are inherently 64-bit kernel only:
- select ARCH_HAS_GIGANTIC_PAGE
+ select ARCH_HAS_GIGANTIC_PAGE if (MEMORY_ISOLATION && COMPACTION) || CMA
select ARCH_SUPPORTS_INT128
select ARCH_USE_CMPXCHG_LOCKREF
select HAVE_ARCH_SOFT_DIRTY
@@ -72,6 +72,7 @@ config X86
select ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH if SMP
select ARCH_WANT_FRAME_POINTERS
select ARCH_WANTS_DYNAMIC_TASK_STRUCT
+ select ARCH_WANTS_THP_SWAP if X86_64
select BUILDTIME_EXTABLE_SORT
select CLKEVT_I8253
select CLOCKSOURCE_VALIDATE_LAST_CYCLE
diff --git a/arch/x86/include/asm/hugetlb.h b/arch/x86/include/asm/hugetlb.h
index 3a106165e03a..535af0f2d8ac 100644
--- a/arch/x86/include/asm/hugetlb.h
+++ b/arch/x86/include/asm/hugetlb.h
@@ -85,4 +85,8 @@ static inline void arch_clear_hugepage_flags(struct page *page)
{
}
+#ifdef CONFIG_ARCH_HAS_GIGANTIC_PAGE
+static inline bool gigantic_page_supported(void) { return true; }
+#endif
+
#endif /* _ASM_X86_HUGETLB_H */
diff --git a/arch/x86/include/asm/stackprotector.h b/arch/x86/include/asm/stackprotector.h
index dcbd9bcce714..8abedf1d650e 100644
--- a/arch/x86/include/asm/stackprotector.h
+++ b/arch/x86/include/asm/stackprotector.h
@@ -74,6 +74,7 @@ static __always_inline void boot_init_stack_canary(void)
get_random_bytes(&canary, sizeof(canary));
tsc = rdtsc();
canary += tsc + (tsc << 32UL);
+ canary &= CANARY_MASK;
current->stack_canary = canary;
#ifdef CONFIG_X86_64
diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c
index 22217ece26c8..44404e2307bb 100644
--- a/arch/x86/kernel/crash.c
+++ b/arch/x86/kernel/crash.c
@@ -457,7 +457,7 @@ static int prepare_elf64_headers(struct crash_elf_data *ced,
bufp += sizeof(Elf64_Phdr);
phdr->p_type = PT_NOTE;
phdr->p_offset = phdr->p_paddr = paddr_vmcoreinfo_note();
- phdr->p_filesz = phdr->p_memsz = sizeof(vmcoreinfo_note);
+ phdr->p_filesz = phdr->p_memsz = VMCOREINFO_NOTE_SIZE;
(ehdr->e_phnum)++;
#ifdef CONFIG_X86_64
diff --git a/arch/x86/kernel/machine_kexec_64.c b/arch/x86/kernel/machine_kexec_64.c
index 6f5ca4ebe6e5..794e7a9a98a4 100644
--- a/arch/x86/kernel/machine_kexec_64.c
+++ b/arch/x86/kernel/machine_kexec_64.c
@@ -356,6 +356,7 @@ void arch_crash_save_vmcoreinfo(void)
vmcoreinfo_append_str("KERNELOFFSET=%lx\n",
kaslr_offset());
VMCOREINFO_NUMBER(KERNEL_IMAGE_SIZE);
+ VMCOREINFO_PHYS_BASE(phys_base);
}
/* arch-dependent functionality related to kexec file-based syscall */
diff --git a/arch/x86/mm/hugetlbpage.c b/arch/x86/mm/hugetlbpage.c
index 302f43fd9c28..ccf509063dfd 100644
--- a/arch/x86/mm/hugetlbpage.c
+++ b/arch/x86/mm/hugetlbpage.c
@@ -33,7 +33,7 @@ follow_huge_addr(struct mm_struct *mm, unsigned long address, int write)
if (!vma || !is_vm_hugetlb_page(vma))
return ERR_PTR(-EINVAL);
- pte = huge_pte_offset(mm, address);
+ pte = huge_pte_offset(mm, address, vma_mmu_pagesize(vma));
/* hugetlb should be locked, and hence, prefaulted */
WARN_ON(!pte || pte_none(*pte));
diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
index 99fb83819a5f..8a64a6f2848d 100644
--- a/arch/x86/mm/init_32.c
+++ b/arch/x86/mm/init_32.c
@@ -823,15 +823,12 @@ void __init mem_init(void)
}
#ifdef CONFIG_MEMORY_HOTPLUG
-int arch_add_memory(int nid, u64 start, u64 size, bool for_device)
+int arch_add_memory(int nid, u64 start, u64 size, bool want_memblock)
{
- struct pglist_data *pgdata = NODE_DATA(nid);
- struct zone *zone = pgdata->node_zones +
- zone_for_memory(nid, start, size, ZONE_HIGHMEM, for_device);
unsigned long start_pfn = start >> PAGE_SHIFT;
unsigned long nr_pages = size >> PAGE_SHIFT;
- return __add_pages(nid, zone, start_pfn, nr_pages);
+ return __add_pages(nid, start_pfn, nr_pages, want_memblock);
}
#ifdef CONFIG_MEMORY_HOTREMOVE
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
index 95651dc58e09..73329300192b 100644
--- a/arch/x86/mm/init_64.c
+++ b/arch/x86/mm/init_64.c
@@ -682,22 +682,15 @@ static void update_end_of_memory_vars(u64 start, u64 size)
}
}
-/*
- * Memory is added always to NORMAL zone. This means you will never get
- * additional DMA/DMA32 memory.
- */
-int arch_add_memory(int nid, u64 start, u64 size, bool for_device)
+int arch_add_memory(int nid, u64 start, u64 size, bool want_memblock)
{
- struct pglist_data *pgdat = NODE_DATA(nid);
- struct zone *zone = pgdat->node_zones +
- zone_for_memory(nid, start, size, ZONE_NORMAL, for_device);
unsigned long start_pfn = start >> PAGE_SHIFT;
unsigned long nr_pages = size >> PAGE_SHIFT;
int ret;
init_memory_mapping(start, start + size);
- ret = __add_pages(nid, zone, start_pfn, nr_pages);
+ ret = __add_pages(nid, start_pfn, nr_pages, want_memblock);
WARN_ON_ONCE(ret);
/* update max_pfn, max_low_pfn and high_memory */
diff --git a/arch/x86/xen/mmu_pv.c b/arch/x86/xen/mmu_pv.c
index 1f386d7fdf70..e2127edbe826 100644
--- a/arch/x86/xen/mmu_pv.c
+++ b/arch/x86/xen/mmu_pv.c
@@ -2698,8 +2698,8 @@ EXPORT_SYMBOL_GPL(xen_destroy_contiguous_region);
phys_addr_t paddr_vmcoreinfo_note(void)
{
if (xen_pv_domain())
- return virt_to_machine(&vmcoreinfo_note).maddr;
+ return virt_to_machine(vmcoreinfo_note).maddr;
else
- return __pa_symbol(&vmcoreinfo_note);
+ return __pa(vmcoreinfo_note);
}
#endif /* CONFIG_KEXEC_CORE */