From 8dcf10ec65597f9b5fd4066787a746cccf58eea4 Mon Sep 17 00:00:00 2001 From: Mark Salter Date: Tue, 17 Jun 2014 18:14:26 +0100 Subject: arm64: export __cpu_{clear,copy}_user_page functions The __cpu_clear_user_page() and __cpu_copy_user_page() functions are not currently exported. This prevents modules from using clear_user_page() and copy_user_page(). Signed-off-by: Mark Salter Signed-off-by: Catalin Marinas (cherry picked from commit bec7cedc8a92bfe96d32febe72634b30c63896bd) Signed-off-by: Mark Brown --- arch/arm64/mm/copypage.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/arch/arm64/mm/copypage.c b/arch/arm64/mm/copypage.c index 9aecbace4128..13bbc3be6f5a 100644 --- a/arch/arm64/mm/copypage.c +++ b/arch/arm64/mm/copypage.c @@ -27,8 +27,10 @@ void __cpu_copy_user_page(void *kto, const void *kfrom, unsigned long vaddr) copy_page(kto, kfrom); __flush_dcache_area(kto, PAGE_SIZE); } +EXPORT_SYMBOL_GPL(__cpu_copy_user_page); void __cpu_clear_user_page(void *kaddr, unsigned long vaddr) { clear_page(kaddr); } +EXPORT_SYMBOL_GPL(__cpu_clear_user_page); -- cgit v1.2.3 From 22a137413e7530afbf2305465fd4021c72618305 Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Tue, 24 Jun 2014 16:51:34 +0100 Subject: arm64: head.S: remove unnecessary function alignment Currently __turn_mmu_on is aligned to 64 bytes to ensure that it doesn't span any page boundary, which simplifies the idmap and spares us requiring an additional page table to map half of the function. In keeping with other important requirements in architecture code, this fact is undocumented. Additionally, as the function consists of three instructions totalling 12 bytes with no literal pool data, a smaller alignment of 16 bytes would be sufficient. This patch reduces the alignment to 16 bytes and documents the underlying reason for the alignment. This reduces the required alignment of the entire .head.text section from 64 bytes to 16 bytes, though it may still be aligned to a larger value depending on TEXT_OFFSET. Signed-off-by: Mark Rutland Tested-by: Laura Abbott Acked-by: Will Deacon Signed-off-by: Catalin Marinas (cherry picked from commit 909a4069da65a5cfca8c968edf9f0d99f694d2f3) Signed-off-by: Mark Brown --- arch/arm64/kernel/head.S | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S index 0b281fffda51..5627d9e69b3c 100644 --- a/arch/arm64/kernel/head.S +++ b/arch/arm64/kernel/head.S @@ -340,8 +340,13 @@ ENDPROC(__enable_mmu) * x27 = *virtual* address to jump to upon completion * * other registers depend on the function called upon completion + * + * We align the entire function to the smallest power of two larger than it to + * ensure it fits within a single block map entry. Otherwise were PHYS_OFFSET + * close to the end of a 512MB or 1GB block we might require an additional + * table to map the entire function. */ - .align 6 + .align 4 __turn_mmu_on: msr sctlr_el1, x0 isb -- cgit v1.2.3 From 329e60cf177b0fff4929291cb9f67945fc14a458 Mon Sep 17 00:00:00 2001 From: Catalin Marinas Date: Tue, 15 Jul 2014 15:46:02 +0100 Subject: arm64: Remove duplicate (SWAPPER|IDMAP)_DIR_SIZE definitions Just keep the asm/page.h definition as this is included in vmlinux.lds.S as well. Signed-off-by: Catalin Marinas Acked-by: Mark Rutland (cherry picked from commit b2f8c07bcb7d1a3575f41444d2d8048d0c922762) Signed-off-by: Mark Brown --- arch/arm64/include/asm/pgtable.h | 3 --- 1 file changed, 3 deletions(-) diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h index aa3917c8b623..cb63d933123c 100644 --- a/arch/arm64/include/asm/pgtable.h +++ b/arch/arm64/include/asm/pgtable.h @@ -386,9 +386,6 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; extern pgd_t idmap_pg_dir[PTRS_PER_PGD]; -#define SWAPPER_DIR_SIZE (3 * PAGE_SIZE) -#define IDMAP_DIR_SIZE (2 * PAGE_SIZE) - /* * Encode and decode a swap entry: * bits 0-1: present (must be zero) -- cgit v1.2.3 From cd23262bbdab0b8090b2d46e1e4773085500d7e6 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Wed, 9 Jul 2014 19:22:11 +0100 Subject: arm64: vdso: put vdso datapage in a separate vma The VDSO datapage doesn't need to be executable (no code there) or CoW-able (the kernel writes the page, so a private copy is totally useless). This patch moves the datapage into its own VMA, identified as "[vvar]" in /proc//maps. Cc: Andy Lutomirski Signed-off-by: Will Deacon Signed-off-by: Catalin Marinas (cherry picked from commit 8715493852783358ef8656a0054a14bf822509cf) Signed-off-by: Mark Brown --- arch/arm64/kernel/vdso.c | 34 ++++++++++++++++++++++++++-------- 1 file changed, 26 insertions(+), 8 deletions(-) diff --git a/arch/arm64/kernel/vdso.c b/arch/arm64/kernel/vdso.c index a7149cae1615..7931d6916683 100644 --- a/arch/arm64/kernel/vdso.c +++ b/arch/arm64/kernel/vdso.c @@ -156,11 +156,12 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) { struct mm_struct *mm = current->mm; - unsigned long vdso_base, vdso_mapping_len; + unsigned long vdso_base, vdso_text_len, vdso_mapping_len; int ret; + vdso_text_len = vdso_pages << PAGE_SHIFT; /* Be sure to map the data page */ - vdso_mapping_len = (vdso_pages + 1) << PAGE_SHIFT; + vdso_mapping_len = vdso_text_len + PAGE_SIZE; down_write(&mm->mmap_sem); vdso_base = get_unmapped_area(NULL, 0, vdso_mapping_len, 0, 0); @@ -170,35 +171,52 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, } mm->context.vdso = (void *)vdso_base; - ret = install_special_mapping(mm, vdso_base, vdso_mapping_len, + ret = install_special_mapping(mm, vdso_base, vdso_text_len, VM_READ|VM_EXEC| VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC, vdso_pagelist); - if (ret) { - mm->context.vdso = NULL; + if (ret) + goto up_fail; + + vdso_base += vdso_text_len; + ret = install_special_mapping(mm, vdso_base, PAGE_SIZE, + VM_READ|VM_MAYREAD, + vdso_pagelist + vdso_pages); + if (ret) goto up_fail; - } -up_fail: up_write(&mm->mmap_sem); + return 0; +up_fail: + mm->context.vdso = NULL; + up_write(&mm->mmap_sem); return ret; } const char *arch_vma_name(struct vm_area_struct *vma) { + unsigned long vdso_text; + + if (!vma->vm_mm) + return NULL; + + vdso_text = (unsigned long)vma->vm_mm->context.vdso; + /* * We can re-use the vdso pointer in mm_context_t for identifying * the vectors page for compat applications. The vDSO will always * sit above TASK_UNMAPPED_BASE and so we don't need to worry about * it conflicting with the vectors base. */ - if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso) { + if (vma->vm_start == vdso_text) { #ifdef CONFIG_COMPAT if (vma->vm_start == AARCH32_VECTORS_BASE) return "[vectors]"; #endif return "[vdso]"; + } else if (vma->vm_start == (vdso_text + (vdso_pages << PAGE_SHIFT))) { + return "[vvar]"; } return NULL; -- cgit v1.2.3 From 47dea321d906e94b8bda77e208962f11db6436aa Mon Sep 17 00:00:00 2001 From: Ian Campbell Date: Tue, 15 Jul 2014 08:38:08 +0100 Subject: arm64: Align the kbuild output for VDSOL and VDSOA Signed-off-by: Ian Campbell Cc: Catalin Marinas Cc: Will Deacon Cc: Michal Marek Cc: linux-arm-kernel@lists.infradead.org Cc: linux-kbuild@vger.kernel.org Signed-off-by: Catalin Marinas (cherry picked from commit ad789ba5f7086138461420d2156478d33fb61077) Signed-off-by: Mark Brown --- arch/arm64/kernel/vdso/Makefile | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/arm64/kernel/vdso/Makefile b/arch/arm64/kernel/vdso/Makefile index 6d20b7d162d8..84b942612051 100644 --- a/arch/arm64/kernel/vdso/Makefile +++ b/arch/arm64/kernel/vdso/Makefile @@ -47,9 +47,9 @@ $(obj-vdso): %.o: %.S $(call if_changed_dep,vdsoas) # Actual build commands -quiet_cmd_vdsold = VDSOL $@ +quiet_cmd_vdsold = VDSOL $@ cmd_vdsold = $(CC) $(c_flags) -Wl,-n -Wl,-T $^ -o $@ -quiet_cmd_vdsoas = VDSOA $@ +quiet_cmd_vdsoas = VDSOA $@ cmd_vdsoas = $(CC) $(a_flags) -c -o $@ $< # Install commands for the unstripped file -- cgit v1.2.3