diff options
Diffstat (limited to 'arch/arm/include/asm')
-rw-r--r-- | arch/arm/include/asm/mmu.h | 1 | ||||
-rw-r--r-- | arch/arm/include/asm/pgalloc.h | 9 | ||||
-rw-r--r-- | arch/arm/include/asm/pgtable-2level.h | 1 | ||||
-rw-r--r-- | arch/arm/include/asm/pgtable-3level.h | 21 | ||||
-rw-r--r-- | arch/arm/include/asm/pgtable.h | 18 | ||||
-rw-r--r-- | arch/arm/include/asm/tlb.h | 8 |
6 files changed, 58 insertions, 0 deletions
diff --git a/arch/arm/include/asm/mmu.h b/arch/arm/include/asm/mmu.h index 6f18da09668b..8d22ddafb7bb 100644 --- a/arch/arm/include/asm/mmu.h +++ b/arch/arm/include/asm/mmu.h @@ -11,6 +11,7 @@ typedef struct { #endif unsigned int vmalloc_seq; unsigned long sigpage; + atomic_t gup_readers; } mm_context_t; #ifdef CONFIG_CPU_HAS_ASID diff --git a/arch/arm/include/asm/pgalloc.h b/arch/arm/include/asm/pgalloc.h index 943504f53f57..49f054cfaea1 100644 --- a/arch/arm/include/asm/pgalloc.h +++ b/arch/arm/include/asm/pgalloc.h @@ -123,6 +123,15 @@ static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte) static inline void pte_free(struct mm_struct *mm, pgtable_t pte) { pgtable_page_dtor(pte); + + /* + * Before freeing page, check to see whether or not + * __get_user_pages_fast is still walking pages in the mm. + * If this is the case, wait until gup has finished. + */ + while (atomic_read(&mm->context.gup_readers) != 0) + cpu_relax(); + __free_page(pte); } diff --git a/arch/arm/include/asm/pgtable-2level.h b/arch/arm/include/asm/pgtable-2level.h index f97ee02386ee..19a11a70bdd2 100644 --- a/arch/arm/include/asm/pgtable-2level.h +++ b/arch/arm/include/asm/pgtable-2level.h @@ -179,6 +179,7 @@ static inline pmd_t *pmd_offset(pud_t *pud, unsigned long addr) /* we don't need complex calculations here as the pmd is folded into the pgd */ #define pmd_addr_end(addr,end) (end) +#define pmd_protnone(pmd) (0) #define set_pte_ext(ptep,pte,ext) cpu_set_pte_ext(ptep,pte,ext) #endif /* __ASSEMBLY__ */ diff --git a/arch/arm/include/asm/pgtable-3level.h b/arch/arm/include/asm/pgtable-3level.h index 54733e5ef7a1..4d96a22df51b 100644 --- a/arch/arm/include/asm/pgtable-3level.h +++ b/arch/arm/include/asm/pgtable-3level.h @@ -227,6 +227,7 @@ PMD_BIT_FUNC(mkyoung, |= PMD_SECT_AF); #define pfn_pmd(pfn,prot) (__pmd(((phys_addr_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot))) #define mk_pmd(page,prot) pfn_pmd(page_to_pfn(page),prot) +#define pmd_protnone(pmd) (pmd_val(pmd) & PMD_SECT_NONE) /* represent a notpresent pmd by zero, this is used by pmdp_invalidate */ #define pmd_mknotpresent(pmd) (__pmd(0)) @@ -256,6 +257,26 @@ static inline int has_transparent_hugepage(void) return 1; } +#define __HAVE_ARCH_PMDP_SPLITTING_FLUSH +static inline void pmdp_splitting_flush(struct vm_area_struct *vma, unsigned long address, + pmd_t *pmdp) +{ + pmd_t pmd = pmd_mksplitting(*pmdp); + VM_BUG_ON(address & ~PMD_MASK); + set_pmd_at(vma->vm_mm, address, pmdp, pmd); + + /* + * Hold off until __get_user_pages_fast or arch_block_thp_splitting + * have finished. + * + * The set_pmd_at above finishes with a dsb. This ensures that the + * software splitting bit is observed by the critical section in + * __get_user_pages_fast before we potentially start spinning below. + */ + while (atomic_read(&vma->vm_mm->context.gup_readers) != 0) + cpu_relax(); +} + #endif /* __ASSEMBLY__ */ #endif /* _ASM_PGTABLE_3LEVEL_H */ diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h index eaedce7b7e3a..4ee115f25511 100644 --- a/arch/arm/include/asm/pgtable.h +++ b/arch/arm/include/asm/pgtable.h @@ -220,6 +220,7 @@ static inline pte_t *pmd_page_vaddr(pmd_t pmd) #define pte_dirty(pte) (pte_val(pte) & L_PTE_DIRTY) #define pte_young(pte) (pte_val(pte) & L_PTE_YOUNG) #define pte_exec(pte) (!(pte_val(pte) & L_PTE_XN)) +#define pte_protnone(pte) (pte_val(pte) & L_PTE_NONE) #define pte_special(pte) (0) #define pte_present_user(pte) (pte_present(pte) && (pte_val(pte) & L_PTE_USER)) @@ -330,6 +331,23 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) #define pgtable_cache_init() do { } while (0) +static inline void inc_gup_readers(struct mm_struct *mm) +{ + atomic_inc(&mm->context.gup_readers); + smp_mb__after_atomic_inc(); +} + +static inline void dec_gup_readers(struct mm_struct *mm) +{ + smp_mb__before_atomic_dec(); + atomic_dec(&mm->context.gup_readers); +} + +#ifdef CONFIG_TRANSPARENT_HUGEPAGE +#define arch_block_thp_split(mm) inc_gup_readers(mm) +#define arch_unblock_thp_split(mm) dec_gup_readers(mm) +#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ + #endif /* !__ASSEMBLY__ */ #endif /* CONFIG_MMU */ diff --git a/arch/arm/include/asm/tlb.h b/arch/arm/include/asm/tlb.h index 0baf7f0d9394..470ef9e1e697 100644 --- a/arch/arm/include/asm/tlb.h +++ b/arch/arm/include/asm/tlb.h @@ -100,6 +100,14 @@ static inline void __tlb_alloc_page(struct mmu_gather *tlb) static inline void tlb_flush_mmu(struct mmu_gather *tlb) { + /* + * Before freeing pages, check to see whether or not + * __get_user_pages_fast is still walking pages in the mm. + * If this is the case, wait until gup has finished. + */ + while (atomic_read(&tlb->mm->context.gup_readers) != 0) + cpu_relax(); + tlb_flush(tlb); free_pages_and_swap_cache(tlb->pages, tlb->nr); tlb->nr = 0; |