aboutsummaryrefslogtreecommitdiff
path: root/arch/arm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm')
-rw-r--r--arch/arm/kernel/head.S5
-rw-r--r--arch/arm/kernel/hyp-stub.S18
-rw-r--r--arch/arm/mm/dma-mapping.c18
-rw-r--r--arch/arm/mm/mmu.c2
-rw-r--r--arch/arm/vfp/entry.S6
-rw-r--r--arch/arm/vfp/vfphw.S4
6 files changed, 26 insertions, 27 deletions
diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S
index 4eee351f466..486a15ae901 100644
--- a/arch/arm/kernel/head.S
+++ b/arch/arm/kernel/head.S
@@ -246,6 +246,7 @@ __create_page_tables:
/*
* Then map boot params address in r2 if specified.
+ * We map 2 sections in case the ATAGs/DTB crosses a section boundary.
*/
mov r0, r2, lsr #SECTION_SHIFT
movs r0, r0, lsl #SECTION_SHIFT
@@ -253,6 +254,8 @@ __create_page_tables:
addne r3, r3, #PAGE_OFFSET
addne r3, r4, r3, lsr #(SECTION_SHIFT - PMD_ORDER)
orrne r6, r7, r0
+ strne r6, [r3], #1 << PMD_ORDER
+ addne r6, r6, #1 << SECTION_SHIFT
strne r6, [r3]
#ifdef CONFIG_DEBUG_LL
@@ -331,7 +334,7 @@ ENTRY(secondary_startup)
* as it has already been validated by the primary processor.
*/
#ifdef CONFIG_ARM_VIRT_EXT
- bl __hyp_stub_install
+ bl __hyp_stub_install_secondary
#endif
safe_svcmode_maskall r9
diff --git a/arch/arm/kernel/hyp-stub.S b/arch/arm/kernel/hyp-stub.S
index 65b2417aebc..1315c4ccfa5 100644
--- a/arch/arm/kernel/hyp-stub.S
+++ b/arch/arm/kernel/hyp-stub.S
@@ -99,7 +99,7 @@ ENTRY(__hyp_stub_install_secondary)
* immediately.
*/
compare_cpu_mode_with_primary r4, r5, r6, r7
- bxne lr
+ movne pc, lr
/*
* Once we have given up on one CPU, we do not try to install the
@@ -111,7 +111,7 @@ ENTRY(__hyp_stub_install_secondary)
*/
cmp r4, #HYP_MODE
- bxne lr @ give up if the CPU is not in HYP mode
+ movne pc, lr @ give up if the CPU is not in HYP mode
/*
* Configure HSCTLR to set correct exception endianness/instruction set
@@ -120,7 +120,8 @@ ENTRY(__hyp_stub_install_secondary)
* Eventually, CPU-specific code might be needed -- assume not for now
*
* This code relies on the "eret" instruction to synchronize the
- * various coprocessor accesses.
+ * various coprocessor accesses. This is done when we switch to SVC
+ * (see safe_svcmode_maskall).
*/
@ Now install the hypervisor stub:
adr r7, __hyp_stub_vectors
@@ -155,14 +156,7 @@ THUMB( orr r7, #(1 << 30) ) @ HSCTLR.TE
1:
#endif
- bic r7, r4, #MODE_MASK
- orr r7, r7, #SVC_MODE
-THUMB( orr r7, r7, #PSR_T_BIT )
- msr spsr_cxsf, r7 @ This is SPSR_hyp.
-
- __MSR_ELR_HYP(14) @ msr elr_hyp, lr
- __ERET @ return, switching to SVC mode
- @ The boot CPU mode is left in r4.
+ bx lr @ The boot CPU mode is left in r4.
ENDPROC(__hyp_stub_install_secondary)
__hyp_stub_do_trap:
@@ -200,7 +194,7 @@ ENDPROC(__hyp_get_vectors)
@ fall through
ENTRY(__hyp_set_vectors)
__HVC(0)
- bx lr
+ mov pc, lr
ENDPROC(__hyp_set_vectors)
#ifndef ZIMAGE
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index 6b2fb87c869..076c26d4386 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -774,25 +774,27 @@ static void dma_cache_maint_page(struct page *page, unsigned long offset,
size_t size, enum dma_data_direction dir,
void (*op)(const void *, size_t, int))
{
+ unsigned long pfn;
+ size_t left = size;
+
+ pfn = page_to_pfn(page) + offset / PAGE_SIZE;
+ offset %= PAGE_SIZE;
+
/*
* A single sg entry may refer to multiple physically contiguous
* pages. But we still need to process highmem pages individually.
* If highmem is not configured then the bulk of this loop gets
* optimized out.
*/
- size_t left = size;
do {
size_t len = left;
void *vaddr;
+ page = pfn_to_page(pfn);
+
if (PageHighMem(page)) {
- if (len + offset > PAGE_SIZE) {
- if (offset >= PAGE_SIZE) {
- page += offset / PAGE_SIZE;
- offset %= PAGE_SIZE;
- }
+ if (len + offset > PAGE_SIZE)
len = PAGE_SIZE - offset;
- }
vaddr = kmap_high_get(page);
if (vaddr) {
vaddr += offset;
@@ -809,7 +811,7 @@ static void dma_cache_maint_page(struct page *page, unsigned long offset,
op(vaddr, len, dir);
}
offset = 0;
- page++;
+ pfn++;
left -= len;
} while (left);
}
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index 9f0610243bd..ce328c7f5c9 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -283,7 +283,7 @@ static struct mem_type mem_types[] = {
},
[MT_MEMORY_SO] = {
.prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
- L_PTE_MT_UNCACHED,
+ L_PTE_MT_UNCACHED | L_PTE_XN,
.prot_l1 = PMD_TYPE_TABLE,
.prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_SECT_S |
PMD_SECT_UNCACHED | PMD_SECT_XN,
diff --git a/arch/arm/vfp/entry.S b/arch/arm/vfp/entry.S
index cc926c98598..323ce1a62bb 100644
--- a/arch/arm/vfp/entry.S
+++ b/arch/arm/vfp/entry.S
@@ -22,7 +22,7 @@
@ IRQs disabled.
@
ENTRY(do_vfp)
-#ifdef CONFIG_PREEMPT
+#ifdef CONFIG_PREEMPT_COUNT
ldr r4, [r10, #TI_PREEMPT] @ get preempt count
add r11, r4, #1 @ increment it
str r11, [r10, #TI_PREEMPT]
@@ -35,7 +35,7 @@ ENTRY(do_vfp)
ENDPROC(do_vfp)
ENTRY(vfp_null_entry)
-#ifdef CONFIG_PREEMPT
+#ifdef CONFIG_PREEMPT_COUNT
get_thread_info r10
ldr r4, [r10, #TI_PREEMPT] @ get preempt count
sub r11, r4, #1 @ decrement it
@@ -53,7 +53,7 @@ ENDPROC(vfp_null_entry)
__INIT
ENTRY(vfp_testing_entry)
-#ifdef CONFIG_PREEMPT
+#ifdef CONFIG_PREEMPT_COUNT
get_thread_info r10
ldr r4, [r10, #TI_PREEMPT] @ get preempt count
sub r11, r4, #1 @ decrement it
diff --git a/arch/arm/vfp/vfphw.S b/arch/arm/vfp/vfphw.S
index ea0349f6358..dd5e56f95f3 100644
--- a/arch/arm/vfp/vfphw.S
+++ b/arch/arm/vfp/vfphw.S
@@ -168,7 +168,7 @@ vfp_hw_state_valid:
@ else it's one 32-bit instruction, so
@ always subtract 4 from the following
@ instruction address.
-#ifdef CONFIG_PREEMPT
+#ifdef CONFIG_PREEMPT_COUNT
get_thread_info r10
ldr r4, [r10, #TI_PREEMPT] @ get preempt count
sub r11, r4, #1 @ decrement it
@@ -192,7 +192,7 @@ look_for_VFP_exceptions:
@ not recognised by VFP
DBGSTR "not VFP"
-#ifdef CONFIG_PREEMPT
+#ifdef CONFIG_PREEMPT_COUNT
get_thread_info r10
ldr r4, [r10, #TI_PREEMPT] @ get preempt count
sub r11, r4, #1 @ decrement it