aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorGreg Bellows <greg.bellows@linaro.org>2015-03-02 13:34:47 -0600
committerGreg Bellows <greg.bellows@linaro.org>2015-03-02 13:34:47 -0600
commite9bc256b7fafccd92ef48209285af41c22de73da (patch)
tree96ee57c8fbf9405dd022c6cdfdef59b9b3e1784f
parentb191bfbfc00a8d0ead1eb290a4cfcf46de6ae602 (diff)
Add page permission support
Signed-off-by: Greg Bellows <greg.bellows@linaro.org>
-rw-r--r--aarch64/common/armv8_vmsa.h25
-rw-r--r--aarch64/common/init_util.S80
-rw-r--r--aarch64/el1_common/el1.c16
-rw-r--r--aarch64/el1_common/el1_init.S28
-rw-r--r--aarch64/el3/el3.c16
-rw-r--r--aarch64/el3/el3_init.S34
6 files changed, 129 insertions, 70 deletions
diff --git a/aarch64/common/armv8_vmsa.h b/aarch64/common/armv8_vmsa.h
index a173bd8..534747c 100644
--- a/aarch64/common/armv8_vmsa.h
+++ b/aarch64/common/armv8_vmsa.h
@@ -1,6 +1,7 @@
#ifndef _ARMV8_VMSA_H
#define _ARMV8_VMSA_H
+/*
typedef union {
struct {
uint64_t type : 2;
@@ -33,5 +34,29 @@ typedef union {
};
uint64_t raw;
} armv8_4k_tbl_pte_t;
+*/
+#define ARMV8_PAGE_ATTRINDX_SHIFT 2
+#define ARMV8_PAGE_NS_SHIFT 5
+#define ARMV8_PAGE_AP_SHIFT 6
+#define ARMV8_PAGE_SH_SHIFT 8
+#define ARMV8_PAGE_AF_SHIFT 10
+#define ARMV8_PAGE_NG_SHIFT 11
+#define ARMV8_PAGE_CONIG_SHIFT 52
+#define ARMV8_PAGE_PXN_SHIFT 53
+#define ARMV8_PAGE_XN_SHIFT 54
+
+#define ARMV8_PAGE_EL1_RW (0<<ARMV8_PAGE_AP_SHIFT)
+#define ARMV8_PAGE_EL0_RW (1<<ARMV8_PAGE_AP_SHIFT)
+#define ARMV8_PAGE_EL1_R (2<<ARMV8_PAGE_AP_SHIFT)
+#define ARMV8_PAGE_EL0_R (3<<ARMV8_PAGE_AP_SHIFT)
+#define ARMv8_PAGE_ACCESS (1<<ARMV8_PAGE_AF_SHIFT)
+
+#define PTE_ACCESS ARMv8_PAGE_ACCESS
+#define PTE_PRIV_RW ARMV8_PAGE_EL1_RW
+#define PTE_PRIV_RO ARMV8_PAGE_EL1_R
+#define PTE_USER_RW ARMV8_PAGE_EL0_RW
+#define PTE_USER_RO ARMV8_PAGE_EL0_R
+#define PTE_PAGE 0x3
+#define PTE_TABLE 0x3
#endif
diff --git a/aarch64/common/init_util.S b/aarch64/common/init_util.S
index 8bc4e50..3942ccd 100644
--- a/aarch64/common/init_util.S
+++ b/aarch64/common/init_util.S
@@ -1,3 +1,4 @@
+#include "armv8_vmsa.h"
.section .init
/* allocate_pa() - Allocates and returns next pool PA */
@@ -11,7 +12,7 @@ allocate_pa:
ret
.globl map_va_to_pa
-/* map_va_to_pa(VA, PA) */
+/* map_va_to_pa(VA, pgprop, PA) */
map_va_to_pa:
stp x30, x10, [sp, #-16]!
stp x11, x12, [sp, #-16]!
@@ -30,17 +31,18 @@ map_loop:
cbz x13, map_done /* If we reached level 0 then finalize */
ldr x12, [x10] /* Otherwise, fetch the descriptor */
and x11, x12, #0x1 /* Filter valid bit */
- cbnz x11, map_loop /* If the descriptor is valid then next */
+ cbz x11, map_alloc_page /* If the descriptor is valid then next */
+ b map_loop /* Next level */
+map_alloc_page:
mov x11, x0 /* Save VA across call */
bl allocate_pa /* Allocate a PT phys page */
mov x12, x0 /* Got a PA */
mov x0, x11 /* Restore VA */
- orr x12, x12, #0x3 /* This is a table entry */
+ orr x12, x12, #PTE_TABLE /* This is a table entry */
str x12, [x10] /* Fill in PT entry */
b map_loop /* Next level */
map_done:
- mov x12, #0x403 /* Last level entry is a page */
- orr x12, x12, x1 /* Create PTE for target PA */
+ orr x12, x1, x2 /* Create PTE: PA + pgprop */
str x12, [x10] /* Fill in PT entry */
ldp x13, x14, [sp], #16
ldp x11, x12, [sp], #16
@@ -48,52 +50,78 @@ map_done:
ret
.globl map_va
-/* map_va(VA) */
+/* map_va(VA, pgprop) */
map_va:
str x30, [sp, #-8]!
- stp x1, x10, [sp, #-16]!
+ stp x2, x10, [sp, #-16]!
mov x10, x0
bl allocate_pa
- mov x1, x0
+ mov x2, x0
mov x0, x10
bl map_va_to_pa
- ldp x1, x10, [sp], #16
+ ldp x2, x10, [sp], #16
ldr x30, [sp], #8
ret
+.globl map_pa
+/* map_pa(PA, pgprop) */
+map_pa:
+ stp x30, x2, [sp, #-16]!
+ mov x2, x0
+ bl map_va_to_pa
+ ldp x30, x2, [sp], #16
+ ret
+
.globl map_va_to_pa_range
-/* map_va_to_pa_range(VA, PA, len) */
+/* map_va_to_pa_range(VA, pgprop, PA, len) */
map_va_to_pa_range:
- stp x30, x2, [sp, #-16]!
- stp x0, x1, [sp, #-16]!
- add x2, x2, #0xFFF
- and x2, x2, #~0xFFF
+ stp x30, x3, [sp, #-16]!
+ stp x0, x2, [sp, #-16]!
+ add x3, x3, #0xFFF
+ and x3, x3, #~0xFFF
map_va_to_pa_loop:
- cbz x2, map_va_to_pa_done
+ cbz x3, map_va_to_pa_done
bl map_va_to_pa
add x0, x0, #0x1000
- add x1, x1, #0x1000
- sub x2, x2, #0x1000
+ add x2, x2, #0x1000
+ sub x3, x3, #0x1000
b map_va_to_pa_loop
map_va_to_pa_done:
- ldp x0, x1, [sp], #16
- ldp x30, x2, [sp], #16
+ ldp x0, x2, [sp], #16
+ ldp x30, x3, [sp], #16
ret
-/* map_va_range(VA, len) */
+/* map_pa_range(PA, pgprop, len) */
+map_pa_range:
+ str x30, [sp, #-8]!
+ stp x0, x2, [sp, #-16]!
+ add x2, x2, #0xFFF
+ and x2, x2, #~0xFFF
+map_pa_loop:
+ cbz x2, map_pa_done
+ bl map_pa
+ add x0, x0, #0x1000
+ sub x2, x2, #0x1000
+ b map_pa_loop
+map_pa_done:
+ ldp x0, x2, [sp], #16
+ ldr x30, [sp], #8
+ ret
+
+/* map_va_range(VA, pgprop, len) */
map_va_range:
str x30, [sp, #-8]!
- stp x0, x1, [sp, #-16]!
- add x1, x1, #0xFFF
- and x1, x1, #~0xFFF
+ stp x0, x2, [sp, #-16]!
+ add x2, x2, #0xFFF
+ and x2, x2, #~0xFFF
map_va_loop:
- cbz x1, map_va_done
+ cbz x2, map_va_done
bl map_va
add x0, x0, #0x1000
- sub x1, x1, #0x1000
+ sub x2, x2, #0x1000
b map_va_loop
map_va_done:
- ldp x0, x1, [sp], #16
+ ldp x0, x2, [sp], #16
ldr x30, [sp], #8
ret
diff --git a/aarch64/el1_common/el1.c b/aarch64/el1_common/el1.c
index d40c697..8f696ce 100644
--- a/aarch64/el1_common/el1.c
+++ b/aarch64/el1_common/el1.c
@@ -20,26 +20,24 @@ void el1_map_va(uintptr_t addr)
{
uint64_t pa = EL1_S_PGTBL_BASE;
uint32_t i;
- armv8_4k_tbl_pte_t *pte;
- armv8_4k_pg_pte_t *l3pte;
+ uint64_t *pte;
for (i = 0; i < 4; i++) {
/* Each successive level uses the next lower 9 VA bits in a 48-bit
* address, hence the i*9.
*/
uint64_t off = ((addr >> (39-(i*9))) & 0x1FF) << 3;
- pte = (armv8_4k_tbl_pte_t *)(pa | off);
- if (!pte->type) {
+ pte = (uint64_t *)(pa | off);
+ if (!(*pte & 0x1)) {
pa = el1_allocate_pa();
- pte->pa = pa >> 12;
- pte->type = 3;
+ *pte = pa;
+ *pte = PTE_PAGE;
} else {
- pa = pte->pa << 12;
+ pa = *pte & 0x000FFFFFF000;
}
}
- l3pte = (armv8_4k_pg_pte_t *)pte;
- l3pte->af = 1;
+ *pte |= PTE_ACCESS;
}
void el1_handle_exception(uint64_t ec, uint64_t iss, uint64_t addr)
diff --git a/aarch64/el1_common/el1_init.S b/aarch64/el1_common/el1_init.S
index 252dacd..b68550f 100644
--- a/aarch64/el1_common/el1_init.S
+++ b/aarch64/el1_common/el1_init.S
@@ -1,4 +1,5 @@
#include "el1.h"
+#include "armv8_vmsa.h"
.section .init
.align 12
@@ -41,52 +42,55 @@ el1_init_mmu:
el1_map_init:
/* Direct map the init code */
ldr x0, =_EL1_INIT_BASE
- ldr x1, =_EL1_INIT_BASE
+ mov x1, #(PTE_PAGE|PTE_ACCESS|PTE_PRIV_RO)
ldr x2, =_EL1_INIT_SIZE
- bl map_va_to_pa_range
+ bl map_pa_range
el1_map_flash:
/* Direct map the EL1 flash sections so we can copy from them once
* the MMU has been enabled.
*/
ldr x0, =_EL1_FLASH_TEXT
- ldr x1, =_EL1_FLASH_TEXT
+ mov x1, #(PTE_PAGE|PTE_ACCESS|PTE_PRIV_RO)
ldr x2, =_EL1_TEXT_SIZE
- bl map_va_to_pa_range
+ bl map_pa_range
ldr x0, =_EL1_FLASH_DATA
- ldr x1, =_EL1_FLASH_DATA
+ mov x1, #(PTE_PAGE|PTE_ACCESS|PTE_PRIV_RO)
ldr x2, =_EL1_DATA_SIZE
- bl map_va_to_pa_range
+ bl map_pa_range
el1_map_text:
/* Map the EL1 text address range */
ldr x0, =_EL1_TEXT_BASE
- ldr x1, =_EL1_TEXT_SIZE
+ mov x1, #(PTE_PAGE|PTE_ACCESS|PTE_PRIV_RW)
+ ldr x2, =_EL1_TEXT_SIZE
bl map_va_range
el1_map_data:
/* Map the EL1 data address range */
ldr x0, =_EL1_DATA_BASE
- ldr x1, =_EL1_DATA_SIZE
+ mov x1, #(PTE_PAGE|PTE_ACCESS|PTE_PRIV_RW)
+ ldr x2, =_EL1_DATA_SIZE
bl map_va_range
el1_map_stack:
/* Map the first page of the stack so we can get off the ground */
ldr x0, =EL1_STACK_BASE-0x1000
+ mov x1, #(PTE_PAGE|PTE_ACCESS|PTE_PRIV_RW)
bl map_va
el1_map_pt:
/* Direct map the page table pool */
ldr x0, =EL1_PGTBL_BASE
- ldr x1, =EL1_PGTBL_BASE
+ mov x1, #(PTE_PAGE|PTE_ACCESS|PTE_PRIV_RW)
ldr x2, =EL1_PGTBL_SIZE
- bl map_va_to_pa_range
+ bl map_pa_range
el1_map_uart:
ldr x0, =UART0_BASE
- ldr x1, =UART0_BASE
- bl map_va_to_pa
+ mov x1, #(PTE_PAGE|PTE_ACCESS|PTE_PRIV_RW)
+ bl map_pa
save_last_pa:
ldr x17, =RAM_BASE+0x2000
diff --git a/aarch64/el3/el3.c b/aarch64/el3/el3.c
index 90c888c..cf3b38b 100644
--- a/aarch64/el3/el3.c
+++ b/aarch64/el3/el3.c
@@ -34,26 +34,24 @@ void el3_map_va(uintptr_t addr)
{
uint64_t pa = EL3_PGTBL_BASE;
uint32_t i;
- armv8_4k_tbl_pte_t *pte;
- armv8_4k_pg_pte_t *l3pte;
+ uint64_t *pte;
for (i = 0; i < 4; i++) {
/* Each successive level uses the next lower 9 VA bits in a 48-bit
* address, hence the i*9.
*/
uint64_t off = ((addr >> (39-(i*9))) & 0x1FF) << 3;
- pte = (armv8_4k_tbl_pte_t *)(pa | off);
- if (!pte->type) {
+ pte = (uint64_t *)(pa | off);
+ if (!(*pte & 0x1)) {
pa = el3_allocate_pa();
- pte->pa = pa >> 12;
- pte->type = 3;
+ *pte = pa;
+ *pte |= PTE_PAGE;
} else {
- pa = pte->pa << 12;
+ pa = *pte & 0x000FFFFFF000;
}
}
- l3pte = (armv8_4k_pg_pte_t *)pte;
- l3pte->af = 1;
+ *pte |= PTE_ACCESS;
}
void el3_handle_exception(uint64_t ec, uint64_t iss, uint64_t addr)
diff --git a/aarch64/el3/el3_init.S b/aarch64/el3/el3_init.S
index 13d592a..bb17582 100644
--- a/aarch64/el3/el3_init.S
+++ b/aarch64/el3/el3_init.S
@@ -1,4 +1,5 @@
#include "platform.h"
+#include "armv8_vmsa.h"
#define PT_BASE EL3_PGTBL_BASE
@@ -47,52 +48,57 @@ el3_init_mmu:
el3_map_init:
/* Direct map the init code */
ldr x0, =_EL3_INIT_BASE
- ldr x1, =_EL3_INIT_BASE
+ mov x1, #(PTE_PAGE|PTE_ACCESS|PTE_PRIV_RO)
ldr x2, =_EL3_INIT_SIZE
- bl map_va_to_pa_range
+ bl map_pa_range
el3_map_flash:
/* Direct map the EL3 flash sections so we can copy from them once
* the MMU has been enabled.
*/
ldr x0, =_EL3_FLASH_TEXT
- ldr x1, =_EL3_FLASH_TEXT
+ mov x1, #(PTE_PAGE|PTE_ACCESS|PTE_PRIV_RO)
ldr x2, =_EL3_TEXT_SIZE
- bl map_va_to_pa_range
+ bl map_pa_range
ldr x0, =_EL3_FLASH_DATA
- ldr x1, =_EL3_FLASH_DATA
+ mov x1, #(PTE_PAGE|PTE_ACCESS|PTE_PRIV_RO)
ldr x2, =_EL3_DATA_SIZE
- bl map_va_to_pa_range
+ bl map_pa_range
el3_map_text:
- /* Map the EL3 text address range */
+ /* Map the EL3 text address range. Initially this needs to be RW so we can
+ * copy in the text from flash.
+ */
ldr x0, =_EL3_TEXT_BASE
- ldr x1, =_EL3_TEXT_SIZE
+ mov x1, #(PTE_PAGE|PTE_ACCESS|PTE_PRIV_RW)
+ ldr x2, =_EL3_TEXT_SIZE
bl map_va_range
el3_map_data:
- /* Map the EL3 data address range */
+ /* Map the EL3 data address range. */
ldr x0, =_EL3_DATA_BASE
- ldr x1, =_EL3_DATA_SIZE
+ mov x1, #(PTE_PAGE|PTE_ACCESS|PTE_PRIV_RW)
+ ldr x2, =_EL3_DATA_SIZE
bl map_va_range
el3_map_stack:
/* Map the first page of the stack so we can get off the ground */
ldr x0, =EL3_STACK_BASE-0x1000
+ mov x1, #(PTE_PAGE|PTE_ACCESS|PTE_PRIV_RW)
bl map_va
el3_map_pt:
/* Direct map the page table pool */
ldr x0, =EL3_PGTBL_BASE
- ldr x1, =EL3_PGTBL_BASE
+ mov x1, #(PTE_PAGE|PTE_ACCESS|PTE_PRIV_RW)
ldr x2, =EL3_PGTBL_SIZE
- bl map_va_to_pa_range
+ bl map_pa_range
el3_map_uart:
ldr x0, =UART0_BASE
- ldr x1, =UART0_BASE
- bl map_va_to_pa
+ mov x1, #(PTE_PAGE|PTE_ACCESS|PTE_PRIV_RW)
+ bl map_pa
save_last_pa:
ldr x17, =RAM_BASE+0x2000