aboutsummaryrefslogtreecommitdiff
path: root/arch/arc/mm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arc/mm')
-rw-r--r--arch/arc/mm/cache_arc700.c23
-rw-r--r--arch/arc/mm/tlb.c3
-rw-r--r--arch/arc/mm/tlbex.S6
3 files changed, 17 insertions, 15 deletions
diff --git a/arch/arc/mm/cache_arc700.c b/arch/arc/mm/cache_arc700.c
index 2f12bca8aef..aedce190544 100644
--- a/arch/arc/mm/cache_arc700.c
+++ b/arch/arc/mm/cache_arc700.c
@@ -610,7 +610,7 @@ void __sync_icache_dcache(unsigned long paddr, unsigned long vaddr, int len)
local_irq_save(flags);
__ic_line_inv_vaddr(paddr, vaddr, len);
- __dc_line_op(paddr, vaddr, len, OP_FLUSH);
+ __dc_line_op(paddr, vaddr, len, OP_FLUSH_N_INV);
local_irq_restore(flags);
}
@@ -676,6 +676,17 @@ void flush_cache_range(struct vm_area_struct *vma, unsigned long start,
flush_cache_all();
}
+void flush_anon_page(struct vm_area_struct *vma, struct page *page,
+ unsigned long u_vaddr)
+{
+ /* TBD: do we really need to clear the kernel mapping */
+ __flush_dcache_page(page_address(page), u_vaddr);
+ __flush_dcache_page(page_address(page), page_address(page));
+
+}
+
+#endif
+
void copy_user_highpage(struct page *to, struct page *from,
unsigned long u_vaddr, struct vm_area_struct *vma)
{
@@ -725,16 +736,6 @@ void clear_user_page(void *to, unsigned long u_vaddr, struct page *page)
set_bit(PG_arch_1, &page->flags);
}
-void flush_anon_page(struct vm_area_struct *vma, struct page *page,
- unsigned long u_vaddr)
-{
- /* TBD: do we really need to clear the kernel mapping */
- __flush_dcache_page(page_address(page), u_vaddr);
- __flush_dcache_page(page_address(page), page_address(page));
-
-}
-
-#endif
/**********************************************************************
* Explicit Cache flush request from user space via syscall
diff --git a/arch/arc/mm/tlb.c b/arch/arc/mm/tlb.c
index 066145b5f34..fe1c5a073af 100644
--- a/arch/arc/mm/tlb.c
+++ b/arch/arc/mm/tlb.c
@@ -444,7 +444,8 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long vaddr_unaligned,
* so userspace sees the right data.
* (Avoids the flush for Non-exec + congruent mapping case)
*/
- if (vma->vm_flags & VM_EXEC || addr_not_cache_congruent(paddr, vaddr)) {
+ if ((vma->vm_flags & VM_EXEC) ||
+ addr_not_cache_congruent(paddr, vaddr)) {
struct page *page = pfn_to_page(pte_pfn(*ptep));
int dirty = test_and_clear_bit(PG_arch_1, &page->flags);
diff --git a/arch/arc/mm/tlbex.S b/arch/arc/mm/tlbex.S
index 9df765dc7c3..3357d26ffe5 100644
--- a/arch/arc/mm/tlbex.S
+++ b/arch/arc/mm/tlbex.S
@@ -277,7 +277,7 @@ ARC_ENTRY EV_TLBMissI
;----------------------------------------------------------------
; VERIFY_PTE: Check if PTE permissions approp for executing code
cmp_s r2, VMALLOC_START
- mov.lo r2, (_PAGE_PRESENT | _PAGE_READ | _PAGE_EXECUTE)
+ mov.lo r2, (_PAGE_PRESENT | _PAGE_U_READ | _PAGE_U_EXECUTE)
mov.hs r2, (_PAGE_PRESENT | _PAGE_K_READ | _PAGE_K_EXECUTE)
and r3, r0, r2 ; Mask out NON Flag bits from PTE
@@ -320,9 +320,9 @@ ARC_ENTRY EV_TLBMissD
mov_s r2, 0
lr r3, [ecr]
btst_s r3, ECR_C_BIT_DTLB_LD_MISS ; Read Access
- or.nz r2, r2, _PAGE_READ ; chk for Read flag in PTE
+ or.nz r2, r2, _PAGE_U_READ ; chk for Read flag in PTE
btst_s r3, ECR_C_BIT_DTLB_ST_MISS ; Write Access
- or.nz r2, r2, _PAGE_WRITE ; chk for Write flag in PTE
+ or.nz r2, r2, _PAGE_U_WRITE ; chk for Write flag in PTE
; Above laddering takes care of XCHG access
; which is both Read and Write