diff options
36 files changed, 706 insertions, 589 deletions
@@ -148,9 +148,7 @@ endif .PHONY: all msg_start clean realclean distclean cscope locate-checkpatch checkcodebase checkpatch fiptool fip .SUFFIXES: -INCLUDES += -Iinclude/bl1 \ - -Iinclude/bl2 \ - -Iinclude/bl31 \ +INCLUDES += -Iinclude/bl31 \ -Iinclude/bl31/services \ -Iinclude/bl32 \ -Iinclude/bl32/payloads \ @@ -184,10 +182,12 @@ $(eval $(call assert_boolean,RESET_TO_BL31)) $(eval $(call add_define,RESET_TO_BL31)) ASFLAGS += -nostdinc -ffreestanding -Wa,--fatal-warnings \ + -Werror -Wmissing-include-dirs \ -mgeneral-regs-only -D__ASSEMBLY__ \ ${DEFINES} ${INCLUDES} CFLAGS += -nostdinc -pedantic -ffreestanding -Wall \ - -Werror -mgeneral-regs-only -std=c99 -c -Os \ + -Werror -Wmissing-include-dirs \ + -mgeneral-regs-only -std=c99 -c -Os \ ${DEFINES} ${INCLUDES} CFLAGS += -ffunction-sections -fdata-sections diff --git a/bl1/aarch64/bl1_arch_setup.c b/bl1/aarch64/bl1_arch_setup.c index 5725bac..8ed45d9 100644 --- a/bl1/aarch64/bl1_arch_setup.c +++ b/bl1/aarch64/bl1_arch_setup.c @@ -46,11 +46,10 @@ void bl1_arch_setup(void) isb(); /* - * Enable HVCs, route FIQs to EL3, set the next EL to be AArch64, route - * external abort and SError interrupts to EL3 + * Set the next EL to be AArch64, route external abort and SError + * interrupts to EL3 */ - tmp_reg = SCR_RES1_BITS | SCR_RW_BIT | SCR_HCE_BIT | SCR_EA_BIT | - SCR_FIQ_BIT; + tmp_reg = SCR_RES1_BITS | SCR_RW_BIT | SCR_EA_BIT; write_scr(tmp_reg); /* diff --git a/bl31/aarch64/bl31_arch_setup.c b/bl31/aarch64/bl31_arch_setup.c index ad73de0..e0382b3 100644 --- a/bl31/aarch64/bl31_arch_setup.c +++ b/bl31/aarch64/bl31_arch_setup.c @@ -51,11 +51,11 @@ void bl31_arch_setup(void) write_sctlr_el3(tmp_reg); /* - * Enable HVCs, route FIQs to EL3, set the next EL to be AArch64, route - * external abort and SError interrupts to EL3 + * Route external abort and SError interrupts to EL3 + * other SCR bits will be configured before exiting to a lower exception + * level */ - tmp_reg = SCR_RES1_BITS | SCR_RW_BIT | SCR_HCE_BIT | SCR_EA_BIT | - SCR_FIQ_BIT; + tmp_reg = SCR_RES1_BITS | SCR_EA_BIT; write_scr(tmp_reg); /* @@ -68,39 +68,3 @@ void bl31_arch_setup(void) counter_freq = plat_get_syscnt_freq(); write_cntfrq_el0(counter_freq); } - -/******************************************************************************* - * Detect what the security state of the next EL is and setup the minimum - * required architectural state: program SCTRL to reflect the RES1 bits, and to - * have MMU and caches disabled - ******************************************************************************/ -void bl31_next_el_arch_setup(uint32_t security_state) -{ - unsigned long id_aa64pfr0 = read_id_aa64pfr0_el1(); - unsigned long next_sctlr; - unsigned long el_status; - unsigned long scr = read_scr(); - - /* Use the same endianness than the current BL */ - next_sctlr = (read_sctlr_el3() & SCTLR_EE_BIT); - - /* Find out which EL we are going to */ - el_status = (id_aa64pfr0 >> ID_AA64PFR0_EL2_SHIFT) & ID_AA64PFR0_ELX_MASK; - - if (security_state == NON_SECURE) { - /* Check if EL2 is supported */ - if (el_status && (scr & SCR_HCE_BIT)) { - /* Set SCTLR EL2 */ - next_sctlr |= SCTLR_EL2_RES1; - write_sctlr_el2(next_sctlr); - return; - } - } - - /* - * SCTLR_EL1 needs the same programming irrespective of the - * security state of EL1. - */ - next_sctlr |= SCTLR_EL1_RES1; - write_sctlr_el1(next_sctlr); -} diff --git a/bl31/aarch64/bl31_entrypoint.S b/bl31/aarch64/bl31_entrypoint.S index 3c9042b..e4dfea4 100644 --- a/bl31/aarch64/bl31_entrypoint.S +++ b/bl31/aarch64/bl31_entrypoint.S @@ -72,11 +72,13 @@ func bl31_entrypoint isb /* --------------------------------------------- - * Set the exception vector to something sane. + * Set the exception vector and zero tpidr_el3 + * until the crash reporting is set up * --------------------------------------------- */ - adr x1, early_exceptions + adr x1, runtime_exceptions msr vbar_el3, x1 + msr tpidr_el3, xzr /* --------------------------------------------------------------------- * The initial state of the Architectural feature trap register @@ -131,6 +133,15 @@ func bl31_entrypoint bl zeromem16 /* --------------------------------------------- + * Initialise cpu_data and crash reporting + * --------------------------------------------- + */ +#if CRASH_REPORTING + bl init_crash_reporting +#endif + bl init_cpu_data_ptr + + /* --------------------------------------------- * Use SP_EL0 for the C runtime stack. * --------------------------------------------- */ diff --git a/bl31/aarch64/context.S b/bl31/aarch64/context.S index d0bca64..6667419 100644 --- a/bl31/aarch64/context.S +++ b/bl31/aarch64/context.S @@ -43,9 +43,8 @@ .global el3_sysregs_context_save func el3_sysregs_context_save - mrs x9, scr_el3 mrs x10, sctlr_el3 - stp x9, x10, [x0, #CTX_SCR_EL3] + str x10, [x0, #CTX_SCTLR_EL3] mrs x11, cptr_el3 stp x11, xzr, [x0, #CTX_CPTR_EL3] @@ -98,8 +97,7 @@ func el3_sysregs_context_restore /* Make sure all the above changes are observed */ isb - ldp x9, x10, [x0, #CTX_SCR_EL3] - msr scr_el3, x9 + ldr x10, [x0, #CTX_SCTLR_EL3] msr sctlr_el3, x10 isb diff --git a/bl31/aarch64/cpu_data.S b/bl31/aarch64/cpu_data.S new file mode 100644 index 0000000..feb51d6 --- /dev/null +++ b/bl31/aarch64/cpu_data.S @@ -0,0 +1,85 @@ +/* + * Copyright (c) 2014, ARM Limited and Contributors. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * Neither the name of ARM nor the names of its contributors may be used + * to endorse or promote products derived from this software without specific + * prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include <asm_macros.S> +#include <cpu_data.h> + +.globl init_cpu_data_ptr +.globl _cpu_data_by_mpidr +.globl _cpu_data_by_index + +/* ----------------------------------------------------------------- + * void init_cpu_data_ptr(void) + * + * Initialise the TPIDR_EL3 register to refer to the cpu_data_t + * for the calling CPU. This must be called before cm_get_cpu_data() + * + * This can be called without a valid stack. + * clobbers: x0, x1, x9, x10 + * ----------------------------------------------------------------- + */ +func init_cpu_data_ptr + mov x10, x30 + mrs x0, mpidr_el1 + bl _cpu_data_by_mpidr + msr tpidr_el3, x0 + ret x10 + + +/* ----------------------------------------------------------------- + * cpu_data_t *_cpu_data_by_mpidr(uint64_t mpidr) + * + * Return the cpu_data structure for the CPU with given MPIDR + * + * This can be called without a valid stack. It assumes that + * platform_get_core_pos() does not clobber register x9. + * clobbers: x0, x1, x9 + * ----------------------------------------------------------------- + */ +func _cpu_data_by_mpidr + mov x9, x30 + bl platform_get_core_pos + mov x30, x9 + b _cpu_data_by_index + + +/* ----------------------------------------------------------------- + * cpu_data_t *_cpu_data_by_index(uint32_t cpu_index) + * + * Return the cpu_data structure for the CPU with given linear index + * + * This can be called without a valid stack. + * clobbers: x0, x1 + * ----------------------------------------------------------------- + */ +func _cpu_data_by_index + adr x1, percpu_data + add x0, x1, x0, LSL #CPU_DATA_LOG2SIZE + ret diff --git a/bl31/aarch64/crash_reporting.S b/bl31/aarch64/crash_reporting.S index 21f74a4..1118e96 100644 --- a/bl31/aarch64/crash_reporting.S +++ b/bl31/aarch64/crash_reporting.S @@ -30,12 +30,13 @@ #include <arch.h> #include <asm_macros.S> #include <context.h> +#include <cpu_data.h> #include <plat_macros.S> #include <platform_def.h> - .globl get_crash_stack .globl dump_state_and_die .globl dump_intr_state_and_die + .globl init_crash_reporting #if CRASH_REPORTING /* ------------------------------------------------------ @@ -232,7 +233,7 @@ non_el3_sys_1_regs: .asciz "tpidr_el0", "tpidrro_el0", "dacr32_el2",\ /* Check if tpidr is initialized */ cbz x0, infinite_loop - ldr x0, [x0, #PTR_CACHE_CRASH_STACK_OFFSET] + ldr x0, [x0, #CPU_DATA_CRASH_STACK_OFFSET] /* store the x30 and sp to stack */ str x30, [x0, #-(REG_SIZE)]! mov x30, sp @@ -281,19 +282,31 @@ infinite_loop: #define PCPU_CRASH_STACK_SIZE 0x140 /* ----------------------------------------------------- - * void get_crash_stack (uint64_t mpidr) : This - * function is used to allocate a small stack for - * reporting unhandled exceptions + * Per-cpu crash stacks in normal memory. * ----------------------------------------------------- */ -func get_crash_stack - mov x10, x30 // lr - get_mp_stack pcpu_crash_stack, PCPU_CRASH_STACK_SIZE - ret x10 +declare_stack pcpu_crash_stack, tzfw_normal_stacks, \ + PCPU_CRASH_STACK_SIZE, PLATFORM_CORE_COUNT /* ----------------------------------------------------- - * Per-cpu crash stacks in normal memory. + * Provides each CPU with a small stacks for reporting + * unhandled exceptions, and stores the stack address + * in cpu_data + * + * This can be called without a runtime stack + * clobbers: x0 - x4 * ----------------------------------------------------- */ -declare_stack pcpu_crash_stack, tzfw_normal_stacks, \ - PCPU_CRASH_STACK_SIZE, PLATFORM_CORE_COUNT +func init_crash_reporting + mov x4, x30 + mov x2, #0 + adr x3, pcpu_crash_stack +init_crash_loop: + mov x0, x2 + bl _cpu_data_by_index + add x3, x3, #PCPU_CRASH_STACK_SIZE + str x3, [x0, #CPU_DATA_CRASH_STACK_OFFSET] + add x2, x2, #1 + cmp x2, #PLATFORM_CORE_COUNT + b.lo init_crash_loop + ret x4 diff --git a/bl31/bl31.mk b/bl31/bl31.mk index 99fc357..5555c31 100644 --- a/bl31/bl31.mk +++ b/bl31/bl31.mk @@ -30,14 +30,15 @@ BL31_SOURCES += bl31/bl31_main.c \ bl31/context_mgmt.c \ + bl31/cpu_data_array.c \ bl31/runtime_svc.c \ bl31/interrupt_mgmt.c \ bl31/aarch64/bl31_arch_setup.c \ bl31/aarch64/bl31_entrypoint.S \ bl31/aarch64/context.S \ + bl31/aarch64/cpu_data.S \ bl31/aarch64/runtime_exceptions.S \ - bl31/aarch64/crash_reporting.S \ - common/aarch64/early_exceptions.S \ + bl31/aarch64/crash_reporting.S \ lib/aarch64/cpu_helpers.S \ lib/locks/bakery/bakery_lock.c \ lib/locks/exclusive/spinlock.S \ diff --git a/bl31/bl31_main.c b/bl31/bl31_main.c index 6765e60..8cc7e0d 100644 --- a/bl31/bl31_main.c +++ b/bl31/bl31_main.c @@ -71,7 +71,6 @@ void bl31_lib_init() ******************************************************************************/ void bl31_main(void) { - /* Perform remaining generic architectural setup from EL3 */ bl31_arch_setup(); @@ -89,17 +88,7 @@ void bl31_main(void) /* Clean caches before re-entering normal world */ dcsw_op_all(DCCSW); - /* - * Use the more complex exception vectors now that context - * management is setup. SP_EL3 should point to a 'cpu_context' - * structure which has an exception stack allocated. The PSCI - * service should have set the context. - */ - assert(cm_get_context(NON_SECURE)); - cm_set_next_eret_context(NON_SECURE); - cm_init_pcpu_ptr_cache(); - write_vbar_el3((uint64_t) runtime_exceptions); - isb(); + /* By default run the non-secure BL3-3 image next */ next_image_type = NON_SECURE; /* @@ -151,53 +140,18 @@ uint32_t bl31_get_next_image_type(void) void bl31_prepare_next_image_entry() { entry_point_info_t *next_image_info; - uint32_t scr, image_type; - cpu_context_t *ctx; - gp_regs_t *gp_regs; + uint32_t image_type; /* Determine which image to execute next */ image_type = bl31_get_next_image_type(); - /* - * Setup minimal architectural state of the next highest EL to - * allow execution in it immediately upon entering it. - */ - bl31_next_el_arch_setup(image_type); - /* Program EL3 registers to enable entry into the next EL */ next_image_info = bl31_plat_get_next_image_ep_info(image_type); assert(next_image_info); assert(image_type == GET_SECURITY_STATE(next_image_info->h.attr)); - scr = read_scr(); - scr &= ~SCR_NS_BIT; - if (image_type == NON_SECURE) - scr |= SCR_NS_BIT; - - scr &= ~SCR_RW_BIT; - if ((next_image_info->spsr & (1 << MODE_RW_SHIFT)) == - (MODE_RW_64 << MODE_RW_SHIFT)) - scr |= SCR_RW_BIT; - - /* - * Tell the context mgmt. library to ensure that SP_EL3 points to - * the right context to exit from EL3 correctly. - */ - cm_set_el3_eret_context(image_type, - next_image_info->pc, - next_image_info->spsr, - scr); - - /* - * Save the args generated in BL2 for the image in the right context - * used on its entry - */ - ctx = cm_get_context(image_type); - gp_regs = get_gpregs_ctx(ctx); - memcpy(gp_regs, (void *)&next_image_info->args, sizeof(aapcs64_params_t)); - - /* Finally set the next context */ - cm_set_next_eret_context(image_type); + cm_init_context(read_mpidr_el1(), next_image_info); + cm_prepare_el3_exit(image_type); } /******************************************************************************* diff --git a/bl31/context_mgmt.c b/bl31/context_mgmt.c index 122a0d4..81c7c56 100644 --- a/bl31/context_mgmt.c +++ b/bl31/context_mgmt.c @@ -35,24 +35,13 @@ #include <bl31.h> #include <context.h> #include <context_mgmt.h> +#include <cpu_data.h> #include <interrupt_mgmt.h> #include <platform.h> #include <platform_def.h> #include <runtime_svc.h> +#include <string.h> -/******************************************************************************* - * Data structure which holds the pointers to non-secure and secure security - * state contexts for each cpu. It is aligned to the cache line boundary to - * allow efficient concurrent manipulation of these pointers on different cpus - ******************************************************************************/ -typedef struct { - void *ptr[2]; -} __aligned (CACHE_WRITEBACK_GRANULE) context_info_t; - -static context_info_t cm_context_info[PLATFORM_CORE_COUNT]; - -/* The per_cpu_ptr_cache_t space allocation */ -static per_cpu_ptr_cache_t per_cpu_ptr_cache_space[PLATFORM_CORE_COUNT]; /******************************************************************************* * Context management library initialisation routine. This library is used by @@ -82,47 +71,191 @@ void cm_init() ******************************************************************************/ void *cm_get_context_by_mpidr(uint64_t mpidr, uint32_t security_state) { - uint32_t linear_id = platform_get_core_pos(mpidr); + assert(security_state <= NON_SECURE); + + return get_cpu_data_by_mpidr(mpidr, cpu_context[security_state]); +} +/******************************************************************************* + * This function sets the pointer to the current 'cpu_context' structure for the + * specified security state for the CPU identified by MPIDR + ******************************************************************************/ +void cm_set_context_by_mpidr(uint64_t mpidr, void *context, uint32_t security_state) +{ assert(security_state <= NON_SECURE); - return cm_context_info[linear_id].ptr[security_state]; + set_cpu_data_by_mpidr(mpidr, cpu_context[security_state], context); } /******************************************************************************* - * This function returns a pointer to the most recent 'cpu_context' structure - * for the calling CPU that was set as the context for the specified security - * state. NULL is returned if no such structure has been specified. + * This function is used to program the context that's used for exception + * return. This initializes the SP_EL3 to a pointer to a 'cpu_context' set for + * the required security state ******************************************************************************/ -void *cm_get_context(uint32_t security_state) +static inline void cm_set_next_context(void *context) { - uint32_t linear_id = platform_get_core_pos(read_mpidr()); +#if DEBUG + uint64_t sp_mode; - assert(security_state <= NON_SECURE); + /* + * Check that this function is called with SP_EL0 as the stack + * pointer + */ + __asm__ volatile("mrs %0, SPSel\n" + : "=r" (sp_mode)); - return cm_context_info[linear_id].ptr[security_state]; + assert(sp_mode == MODE_SP_EL0); +#endif + + __asm__ volatile("msr spsel, #1\n" + "mov sp, %0\n" + "msr spsel, #0\n" + : : "r" (context)); } /******************************************************************************* - * This function sets the pointer to the current 'cpu_context' structure for the - * specified security state for the CPU identified by MPIDR + * The following function initializes a cpu_context for the current CPU for + * first use, and sets the initial entrypoint state as specified by the + * entry_point_info structure. + * + * The security state to initialize is determined by the SECURE attribute + * of the entry_point_info. The function returns a pointer to the initialized + * context and sets this as the next context to return to. + * + * The EE and ST attributes are used to configure the endianess and secure + * timer availability for the new excution context. + * + * To prepare the register state for entry call cm_prepare_el3_exit() and + * el3_exit(). For Secure-EL1 cm_prepare_el3_exit() is equivalent to + * cm_e1_sysreg_context_restore(). ******************************************************************************/ -void cm_set_context_by_mpidr(uint64_t mpidr, void *context, uint32_t security_state) +void cm_init_context(uint64_t mpidr, const entry_point_info_t *ep) { - uint32_t linear_id = platform_get_core_pos(mpidr); + uint32_t security_state; + cpu_context_t *ctx; + uint32_t scr_el3; + el3_state_t *state; + gp_regs_t *gp_regs; + unsigned long sctlr_elx; - assert(security_state <= NON_SECURE); + security_state = GET_SECURITY_STATE(ep->h.attr); + ctx = cm_get_context_by_mpidr(mpidr, security_state); + assert(ctx); + + /* Clear any residual register values from the context */ + memset(ctx, 0, sizeof(*ctx)); + + /* + * Base the context SCR on the current value, adjust for entry point + * specific requirements and set trap bits from the IMF + * TODO: provide the base/global SCR bits using another mechanism? + */ + scr_el3 = read_scr(); + scr_el3 &= ~(SCR_NS_BIT | SCR_RW_BIT | SCR_FIQ_BIT | SCR_IRQ_BIT | + SCR_ST_BIT | SCR_HCE_BIT); + + if (security_state != SECURE) + scr_el3 |= SCR_NS_BIT; + + if (GET_RW(ep->spsr) == MODE_RW_64) + scr_el3 |= SCR_RW_BIT; + + if (EP_GET_ST(ep->h.attr)) + scr_el3 |= SCR_ST_BIT; + + scr_el3 |= get_scr_el3_from_routing_model(security_state); + + /* + * Set up SCTLR_ELx for the target exception level: + * EE bit is taken from the entrpoint attributes + * M, C and I bits must be zero (as required by PSCI specification) + * + * The target exception level is based on the spsr mode requested. + * If execution is requested to EL2 or hyp mode, HVC is enabled + * via SCR_EL3.HCE. + * + * Always compute the SCTLR_EL1 value and save in the cpu_context + * - the EL2 registers are set up by cm_preapre_ns_entry() as they + * are not part of the stored cpu_context + * + * TODO: In debug builds the spsr should be validated and checked + * against the CPU support, security state, endianess and pc + */ + sctlr_elx = EP_GET_EE(ep->h.attr) ? SCTLR_EE_BIT : 0; + sctlr_elx |= SCTLR_EL1_RES1; + write_ctx_reg(get_sysregs_ctx(ctx), CTX_SCTLR_EL1, sctlr_elx); + + if ((GET_RW(ep->spsr) == MODE_RW_64 + && GET_EL(ep->spsr) == MODE_EL2) + || (GET_RW(ep->spsr) != MODE_RW_64 + && GET_M32(ep->spsr) == MODE32_hyp)) { + scr_el3 |= SCR_HCE_BIT; + } - cm_context_info[linear_id].ptr[security_state] = context; + /* Populate EL3 state so that we've the right context before doing ERET */ + state = get_el3state_ctx(ctx); + write_ctx_reg(state, CTX_SCR_EL3, scr_el3); + write_ctx_reg(state, CTX_ELR_EL3, ep->pc); + write_ctx_reg(state, CTX_SPSR_EL3, ep->spsr); + + /* + * Store the X0-X7 value from the entrypoint into the context + * Use memcpy as we are in control of the layout of the structures + */ + gp_regs = get_gpregs_ctx(ctx); + memcpy(gp_regs, (void *)&ep->args, sizeof(aapcs64_params_t)); } /******************************************************************************* - * This function sets the pointer to the current 'cpu_context' structure for the - * specified security state for the calling CPU + * Prepare the CPU system registers for first entry into secure or normal world + * + * If execution is requested to EL2 or hyp mode, SCTLR_EL2 is initialized + * If execution is requested to non-secure EL1 or svc mode, and the CPU supports + * EL2 then EL2 is disabled by configuring all necessary EL2 registers. + * For all entries, the EL1 registers are initialized from the cpu_context ******************************************************************************/ -void cm_set_context(void *context, uint32_t security_state) +void cm_prepare_el3_exit(uint32_t security_state) { - cm_set_context_by_mpidr(read_mpidr(), context, security_state); + uint32_t sctlr_elx, scr_el3, cptr_el2; + cpu_context_t *ctx = cm_get_context(security_state); + + assert(ctx); + + if (security_state == NON_SECURE) { + scr_el3 = read_ctx_reg(get_el3state_ctx(ctx), CTX_SCR_EL3); + if (scr_el3 & SCR_HCE_BIT) { + /* Use SCTLR_EL1.EE value to initialise sctlr_el2 */ + sctlr_elx = read_ctx_reg(get_sysregs_ctx(ctx), + CTX_SCTLR_EL1); + sctlr_elx &= ~SCTLR_EE_BIT; + sctlr_elx |= SCTLR_EL2_RES1; + write_sctlr_el2(sctlr_elx); + } else if (read_id_aa64pfr0_el1() & + (ID_AA64PFR0_ELX_MASK << ID_AA64PFR0_EL2_SHIFT)) { + /* EL2 present but unused, need to disable safely */ + + /* HCR_EL2 = 0, except RW bit set to match SCR_EL3 */ + write_hcr_el2((scr_el3 & SCR_RW_BIT) ? HCR_RW_BIT : 0); + + /* SCTLR_EL2 : can be ignored when bypassing */ + + /* CPTR_EL2 : disable all traps TCPAC, TTA, TFP */ + cptr_el2 = read_cptr_el2(); + cptr_el2 &= ~(TCPAC_BIT | TTA_BIT | TFP_BIT); + write_cptr_el2(cptr_el2); + + /* Enable EL1 access to timer */ + write_cnthctl_el2(EL1PCEN_BIT | EL1PCTEN_BIT); + + /* Set VPIDR, VMPIDR to match MIDR, MPIDR */ + write_vpidr_el2(read_midr_el1()); + write_vmpidr_el2(read_mpidr_el1()); + } + } + + el1_sysregs_context_restore(get_sysregs_ctx(ctx)); + + cm_set_next_context(ctx); } /******************************************************************************* @@ -171,13 +304,10 @@ void cm_el1_sysregs_context_restore(uint32_t security_state) } /******************************************************************************* - * This function populates 'cpu_context' pertaining to the given security state - * with the entrypoint, SPSR and SCR values so that an ERET from this security - * state correctly restores corresponding values to drop the CPU to the next - * exception level + * This function populates ELR_EL3 member of 'cpu_context' pertaining to the + * given security state with the given entrypoint ******************************************************************************/ -void cm_set_el3_eret_context(uint32_t security_state, uint64_t entrypoint, - uint32_t spsr, uint32_t scr) +void cm_set_elr_el3(uint32_t security_state, uint64_t entrypoint) { cpu_context_t *ctx; el3_state_t *state; @@ -185,23 +315,17 @@ void cm_set_el3_eret_context(uint32_t security_state, uint64_t entrypoint, ctx = cm_get_context(security_state); assert(ctx); - /* Program the interrupt routing model for this security state */ - scr &= ~SCR_FIQ_BIT; - scr &= ~SCR_IRQ_BIT; - scr |= get_scr_el3_from_routing_model(security_state); - - /* Populate EL3 state so that we've the right context before doing ERET */ + /* Populate EL3 state so that ERET jumps to the correct entry */ state = get_el3state_ctx(ctx); - write_ctx_reg(state, CTX_SPSR_EL3, spsr); write_ctx_reg(state, CTX_ELR_EL3, entrypoint); - write_ctx_reg(state, CTX_SCR_EL3, scr); } /******************************************************************************* - * This function populates ELR_EL3 member of 'cpu_context' pertaining to the - * given security state with the given entrypoint + * This function populates ELR_EL3 and SPSR_EL3 members of 'cpu_context' + * pertaining to the given security state ******************************************************************************/ -void cm_set_elr_el3(uint32_t security_state, uint64_t entrypoint) +void cm_set_elr_spsr_el3(uint32_t security_state, + uint64_t entrypoint, uint32_t spsr) { cpu_context_t *ctx; el3_state_t *state; @@ -212,6 +336,7 @@ void cm_set_elr_el3(uint32_t security_state, uint64_t entrypoint) /* Populate EL3 state so that ERET jumps to the correct entry */ state = get_el3state_ctx(ctx); write_ctx_reg(state, CTX_ELR_EL3, entrypoint); + write_ctx_reg(state, CTX_SPSR_EL3, spsr); } /******************************************************************************* @@ -272,57 +397,9 @@ uint32_t cm_get_scr_el3(uint32_t security_state) void cm_set_next_eret_context(uint32_t security_state) { cpu_context_t *ctx; -#if DEBUG - uint64_t sp_mode; -#endif ctx = cm_get_context(security_state); assert(ctx); -#if DEBUG - /* - * Check that this function is called with SP_EL0 as the stack - * pointer - */ - __asm__ volatile("mrs %0, SPSel\n" - : "=r" (sp_mode)); - - assert(sp_mode == MODE_SP_EL0); -#endif - - __asm__ volatile("msr spsel, #1\n" - "mov sp, %0\n" - "msr spsel, #0\n" - : : "r" (ctx)); + cm_set_next_context(ctx); } - -/************************************************************************ - * The following function is used to populate the per cpu pointer cache. - * The pointer will be stored in the tpidr_el3 register. - *************************************************************************/ -void cm_init_pcpu_ptr_cache() -{ - unsigned long mpidr = read_mpidr(); - uint32_t linear_id = platform_get_core_pos(mpidr); - per_cpu_ptr_cache_t *pcpu_ptr_cache; - - pcpu_ptr_cache = &per_cpu_ptr_cache_space[linear_id]; - assert(pcpu_ptr_cache); -#if CRASH_REPORTING - pcpu_ptr_cache->crash_stack = get_crash_stack(mpidr); -#endif - - cm_set_pcpu_ptr_cache(pcpu_ptr_cache); -} - - -void cm_set_pcpu_ptr_cache(const void *pcpu_ptr) -{ - write_tpidr_el3((unsigned long)pcpu_ptr); -} - -void *cm_get_pcpu_ptr_cache(void) -{ - return (void *)read_tpidr_el3(); -} - diff --git a/bl31/cpu_data_array.c b/bl31/cpu_data_array.c new file mode 100644 index 0000000..b0042a1 --- /dev/null +++ b/bl31/cpu_data_array.c @@ -0,0 +1,44 @@ +/* + * Copyright (c) 2014, ARM Limited and Contributors. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * Neither the name of ARM nor the names of its contributors may be used + * to endorse or promote products derived from this software without specific + * prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include <cassert.h> +#include <cpu_data.h> +#include <platform_def.h> + +/* verify assembler offsets match data structures */ +CASSERT(CPU_DATA_CRASH_STACK_OFFSET == __builtin_offsetof + (cpu_data_t, crash_stack), + assert_cpu_data_crash_stack_offset_mismatch); + +CASSERT((1 << CPU_DATA_LOG2SIZE) == sizeof(cpu_data_t), + assert_cpu_data_log2size_mismatch); + +/* The per_cpu_ptr_cache_t space allocation */ +cpu_data_t percpu_data[PLATFORM_CORE_COUNT]; diff --git a/docs/porting-guide.md b/docs/porting-guide.md index d970190..f854af9 100644 --- a/docs/porting-guide.md +++ b/docs/porting-guide.md @@ -150,6 +150,11 @@ file is found in [plat/fvp/include/platform_def.h]. Defines the maximum number of CPUs that can be implemented within a cluster on the platform. +* **#define : PLATFORM_NUM_AFFS** + + Defines the total number of nodes in the affinity heirarchy at all affinity + levels used by the platform. + * **#define : PRIMARY_CPU** Defines the `MPIDR` of the primary CPU on the platform. This value is used diff --git a/drivers/arm/pl011/pl011_console.c b/drivers/arm/pl011/pl011_console.c index a26c00e..81897ca 100644 --- a/drivers/arm/pl011/pl011_console.c +++ b/drivers/arm/pl011/pl011_console.c @@ -71,7 +71,12 @@ void console_init(unsigned long base_addr) int console_putc(int c) { - assert(uart_base); + /* If the console has not been initialized then return an error + * code. Asserting here would result in recursion and stack + * exhaustion + */ + if (!uart_base) + return -1; if (c == '\n') { WAIT_UNTIL_UART_FREE(uart_base); diff --git a/include/bl31/context.h b/include/bl31/context.h index 16cc744..c0230b8 100644 --- a/include/bl31/context.h +++ b/include/bl31/context.h @@ -185,14 +185,10 @@ #define CTX_FP_FPCR 0x208 #define CTX_FPREGS_END 0x210 -/****************************************************************************** - * Offsets for the per cpu cache implementation - ******************************************************************************/ -#define PTR_CACHE_CRASH_STACK_OFFSET 0x0 - #ifndef __ASSEMBLY__ #include <cassert.h> +#include <platform_def.h> /* for CACHE_WRITEBACK_GRANULE */ #include <stdint.h> /* @@ -331,17 +327,6 @@ void fpregs_context_save(fp_regs_t *regs); void fpregs_context_restore(fp_regs_t *regs); -/* Per-CPU pointer cache of recently used pointers and also the crash stack - * TODO: Add other commonly used variables to this (tf_issues#90) - */ -typedef struct per_cpu_ptr_cache { - uint64_t crash_stack; -} per_cpu_ptr_cache_t; - -CASSERT(PTR_CACHE_CRASH_STACK_OFFSET == __builtin_offsetof\ - (per_cpu_ptr_cache_t, crash_stack), \ - assert_per_cpu_ptr_cache_crash_stack_offset_mismatch); - #undef CTX_SYSREG_ALL #undef CTX_FP_ALL #undef CTX_GPREG_ALL diff --git a/include/bl31/context_mgmt.h b/include/bl31/context_mgmt.h index 86bbc58..6127b74 100644 --- a/include/bl31/context_mgmt.h +++ b/include/bl31/context_mgmt.h @@ -31,31 +31,63 @@ #ifndef __CM_H__ #define __CM_H__ +#include <cpu_data.h> #include <stdint.h> /******************************************************************************* + * Forward declarations + ******************************************************************************/ +struct entry_point_info; + +/******************************************************************************* * Function & variable prototypes ******************************************************************************/ void cm_init(void); void *cm_get_context_by_mpidr(uint64_t mpidr, uint32_t security_state); -void *cm_get_context(uint32_t security_state); +static inline void *cm_get_context(uint32_t security_state); void cm_set_context_by_mpidr(uint64_t mpidr, void *context, uint32_t security_state); -void cm_set_context(void *context, uint32_t security_state); +static inline void cm_set_context(void *context, uint32_t security_state); void cm_el3_sysregs_context_save(uint32_t security_state); +void cm_init_context(uint64_t mpidr, const struct entry_point_info *ep); +void cm_prepare_el3_exit(uint32_t security_state); void cm_el3_sysregs_context_restore(uint32_t security_state); void cm_el1_sysregs_context_save(uint32_t security_state); void cm_el1_sysregs_context_restore(uint32_t security_state); -void cm_set_el3_eret_context(uint32_t security_state, uint64_t entrypoint, - uint32_t spsr, uint32_t scr); void cm_set_elr_el3(uint32_t security_state, uint64_t entrypoint); +void cm_set_elr_spsr_el3(uint32_t security_state, + uint64_t entrypoint, uint32_t spsr); void cm_write_scr_el3_bit(uint32_t security_state, uint32_t bit_pos, uint32_t value); void cm_set_next_eret_context(uint32_t security_state); -void cm_init_pcpu_ptr_cache(); -void cm_set_pcpu_ptr_cache(const void *pcpu_ptr); -void *cm_get_pcpu_ptr_cache(void); uint32_t cm_get_scr_el3(uint32_t security_state); + +/* Inline definitions */ + +/******************************************************************************* + * This function returns a pointer to the most recent 'cpu_context' structure + * for the calling CPU that was set as the context for the specified security + * state. NULL is returned if no such structure has been specified. + ******************************************************************************/ +void *cm_get_context(uint32_t security_state) +{ + assert(security_state <= NON_SECURE); + + return get_cpu_data(cpu_context[security_state]); +} + +/******************************************************************************* + * This function sets the pointer to the current 'cpu_context' structure for the + * specified security state for the calling CPU + ******************************************************************************/ +void cm_set_context(void *context, uint32_t security_state) +{ + assert(security_state <= NON_SECURE); + + set_cpu_data(cpu_context[security_state], context); +} + + #endif /* __CM_H__ */ diff --git a/include/bl31/cpu_data.h b/include/bl31/cpu_data.h new file mode 100644 index 0000000..5f45f14 --- /dev/null +++ b/include/bl31/cpu_data.h @@ -0,0 +1,92 @@ +/* + * Copyright (c) 2014, ARM Limited and Contributors. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * Neither the name of ARM nor the names of its contributors may be used + * to endorse or promote products derived from this software without specific + * prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef __CPU_DATA_H__ +#define __CPU_DATA_H__ + +/* Offsets for the cpu_data structure */ +#define CPU_DATA_CRASH_STACK_OFFSET 0x10 +#define CPU_DATA_LOG2SIZE 6 + +#ifndef __ASSEMBLY__ + +#include <arch_helpers.h> +#include <platform_def.h> +#include <stdint.h> + +/******************************************************************************* + * Function & variable prototypes + ******************************************************************************/ + +/******************************************************************************* + * Cache of frequently used per-cpu data: + * Pointers to non-secure and secure security state contexts + * Address of the crash stack + * It is aligned to the cache line boundary to allow efficient concurrent + * manipulation of these pointers on different cpus + * + * TODO: Add other commonly used variables to this (tf_issues#90) + * + * The data structure and the _cpu_data accessors should not be used directly + * by components that have per-cpu members. The member access macros should be + * used for this. + ******************************************************************************/ + +typedef struct cpu_data { + void *cpu_context[2]; + uint64_t crash_stack; +} __aligned(CACHE_WRITEBACK_GRANULE) cpu_data_t; + +struct cpu_data *_cpu_data_by_index(uint32_t cpu_index); +struct cpu_data *_cpu_data_by_mpidr(uint64_t mpidr); + +/* Return the cpu_data structure for the current CPU. */ +static inline struct cpu_data *_cpu_data(void) +{ + return (cpu_data_t *)read_tpidr_el3(); +} + + +/************************************************************************** + * APIs for initialising and accessing per-cpu data + *************************************************************************/ + +void init_cpu_data_ptr(void); + +#define get_cpu_data(_m) _cpu_data()->_m +#define set_cpu_data(_m, _v) _cpu_data()->_m = _v +#define get_cpu_data_by_index(_ix, _m) _cpu_data_by_index(_ix)->_m +#define set_cpu_data_by_index(_ix, _m, _v) _cpu_data_by_index(_ix)->_m = _v +#define get_cpu_data_by_mpidr(_id, _m) _cpu_data_by_mpidr(_id)->_m +#define set_cpu_data_by_mpidr(_id, _m, _v) _cpu_data_by_mpidr(_id)->_m = _v + + +#endif /* __ASSEMBLY__ */ +#endif /* __CPU_DATA_H__ */ diff --git a/include/bl31/runtime_svc.h b/include/bl31/runtime_svc.h index d7d88d4..f3543d4 100644 --- a/include/bl31/runtime_svc.h +++ b/include/bl31/runtime_svc.h @@ -267,7 +267,7 @@ CASSERT(RT_SVC_DESC_HANDLE == __builtin_offsetof(rt_svc_desc_t, handle), \ void runtime_svc_init(); extern uint64_t __RT_SVC_DESCS_START__; extern uint64_t __RT_SVC_DESCS_END__; -uint64_t get_crash_stack(uint64_t mpidr); -void runtime_exceptions(void); +void init_crash_reporting(void); + #endif /*__ASSEMBLY__*/ #endif /* __RUNTIME_SVC_H__ */ diff --git a/include/bl31/services/psci.h b/include/bl31/services/psci.h index 887c4ce..77f406d 100644 --- a/include/bl31/services/psci.h +++ b/include/bl31/services/psci.h @@ -128,9 +128,6 @@ #define psci_validate_power_state(pstate) (pstate & PSTATE_VALID_MASK) -/* Number of affinity instances whose state this psci imp. can track */ -#define PSCI_NUM_AFFS 32ull - #ifndef __ASSEMBLY__ #include <stdint.h> diff --git a/include/common/bl_common.h b/include/common/bl_common.h index 2f3bade..f5e2a9a 100644 --- a/include/common/bl_common.h +++ b/include/common/bl_common.h @@ -33,7 +33,6 @@ #define SECURE 0x0 #define NON_SECURE 0x1 -#define PARAM_EP_SECURITY_MASK 0x1 #define UP 1 #define DOWN 0 @@ -64,10 +63,23 @@ #define ENTRY_POINT_INFO_PC_OFFSET 0x08 #define ENTRY_POINT_INFO_ARGS_OFFSET 0x18 +#define PARAM_EP_SECURITY_MASK 0x1 #define GET_SECURITY_STATE(x) (x & PARAM_EP_SECURITY_MASK) #define SET_SECURITY_STATE(x, security) \ ((x) = ((x) & ~PARAM_EP_SECURITY_MASK) | (security)) +#define EP_EE_MASK 0x2 +#define EP_EE_LITTLE 0x0 +#define EP_EE_BIG 0x2 +#define EP_GET_EE(x) (x & EP_EE_MASK) +#define EP_SET_EE(x, ee) ((x) = ((x) & ~EP_EE_MASK) | (ee)) + +#define EP_ST_MASK 0x4 +#define EP_ST_DISABLE 0x0 +#define EP_ST_ENABLE 0x4 +#define EP_GET_ST(x) (x & EP_ST_MASK) +#define EP_SET_ST(x, ee) ((x) = ((x) & ~EP_ST_MASK) | (ee)) + #define PARAM_EP 0x01 #define PARAM_IMAGE_BINARY 0x02 #define PARAM_BL31 0x03 diff --git a/include/lib/aarch64/arch.h b/include/lib/aarch64/arch.h index 0bfbd66..5dc488b 100644 --- a/include/lib/aarch64/arch.h +++ b/include/lib/aarch64/arch.h @@ -167,6 +167,7 @@ #define HCR_FMO_BIT (1 << 3) /* CNTHCTL_EL2 definitions */ +#define EVNTEN_BIT (1 << 2) #define EL1PCEN_BIT (1 << 1) #define EL1PCTEN_BIT (1 << 0) diff --git a/include/lib/aarch64/arch_helpers.h b/include/lib/aarch64/arch_helpers.h index 1ca3350..673e897 100644 --- a/include/lib/aarch64/arch_helpers.h +++ b/include/lib/aarch64/arch_helpers.h @@ -262,6 +262,9 @@ DEFINE_SYSREG_RW_FUNCS(cnthctl_el2) DEFINE_SYSREG_RW_FUNCS(tpidr_el3) +DEFINE_SYSREG_RW_FUNCS(vpidr_el2) +DEFINE_SYSREG_RW_FUNCS(vmpidr_el2) + /* Implementation specific registers */ DEFINE_RENAME_SYSREG_RW_FUNCS(cpuectlr_el1, CPUECTLR_EL1) diff --git a/include/lib/bakery_lock.h b/include/lib/bakery_lock.h index 037fa7d..95634cf 100644 --- a/include/lib/bakery_lock.h +++ b/include/lib/bakery_lock.h @@ -44,8 +44,8 @@ typedef struct bakery_lock { #define NO_OWNER (-1) void bakery_lock_init(bakery_lock_t *bakery); -void bakery_lock_get(unsigned long mpidr, bakery_lock_t *bakery); -void bakery_lock_release(unsigned long mpidr, bakery_lock_t *bakery); -int bakery_lock_try(unsigned long mpidr, bakery_lock_t *bakery); +void bakery_lock_get(bakery_lock_t *bakery); +void bakery_lock_release(bakery_lock_t *bakery); +int bakery_lock_try(bakery_lock_t *bakery); #endif /* __BAKERY_LOCK_H__ */ diff --git a/lib/locks/bakery/bakery_lock.c b/lib/locks/bakery/bakery_lock.c index 4e148b5..877f526 100644 --- a/lib/locks/bakery/bakery_lock.c +++ b/lib/locks/bakery/bakery_lock.c @@ -124,12 +124,12 @@ static unsigned int bakery_get_ticket(bakery_lock_t *bakery, unsigned int me) * of others'. The CPU with the highest priority (lowest numerical value) * acquires the lock */ -void bakery_lock_get(unsigned long mpidr, bakery_lock_t *bakery) +void bakery_lock_get(bakery_lock_t *bakery) { unsigned int they, me; unsigned int my_ticket, my_prio, their_ticket; - me = platform_get_core_pos(mpidr); + me = platform_get_core_pos(read_mpidr_el1()); assert_bakery_entry_valid(me, bakery); @@ -176,9 +176,9 @@ void bakery_lock_get(unsigned long mpidr, bakery_lock_t *bakery) /* Release the lock and signal contenders */ -void bakery_lock_release(unsigned long mpidr, bakery_lock_t *bakery) +void bakery_lock_release(bakery_lock_t *bakery) { - unsigned int me = platform_get_core_pos(mpidr); + unsigned int me = platform_get_core_pos(read_mpidr_el1()); assert_bakery_entry_valid(me, bakery); assert(bakery->owner == me); diff --git a/plat/fvp/aarch64/fvp_common.c b/plat/fvp/aarch64/fvp_common.c index 41234cb..580c793 100644 --- a/plat/fvp/aarch64/fvp_common.c +++ b/plat/fvp/aarch64/fvp_common.c @@ -66,8 +66,6 @@ const mmap_region_t fvp_mmap[] = { MT_MEMORY | MT_RW | MT_SECURE }, { DEVICE0_BASE, DEVICE0_BASE, DEVICE0_SIZE, MT_DEVICE | MT_RW | MT_SECURE }, - { NSRAM_BASE, NSRAM_BASE, NSRAM_SIZE, - MT_MEMORY | MT_RW | MT_NS }, { DEVICE1_BASE, DEVICE1_BASE, DEVICE1_SIZE, MT_DEVICE | MT_RW | MT_SECURE }, /* 2nd GB as device for now...*/ diff --git a/plat/fvp/drivers/pwrc/fvp_pwrc.c b/plat/fvp/drivers/pwrc/fvp_pwrc.c index d1feece..c32c322 100644 --- a/plat/fvp/drivers/pwrc/fvp_pwrc.c +++ b/plat/fvp/drivers/pwrc/fvp_pwrc.c @@ -41,59 +41,54 @@ static bakery_lock_t pwrc_lock __attribute__ ((section("tzfw_coherent_mem"))); unsigned int fvp_pwrc_get_cpu_wkr(unsigned long mpidr) { - unsigned int rc = 0; - bakery_lock_get(mpidr, &pwrc_lock); - mmio_write_32(PWRC_BASE + PSYSR_OFF, (unsigned int) mpidr); - rc = PSYSR_WK(mmio_read_32(PWRC_BASE + PSYSR_OFF)); - bakery_lock_release(mpidr, &pwrc_lock); - return rc; + return PSYSR_WK(fvp_pwrc_read_psysr(mpidr)); } unsigned int fvp_pwrc_read_psysr(unsigned long mpidr) { - unsigned int rc = 0; - bakery_lock_get(mpidr, &pwrc_lock); + unsigned int rc; + bakery_lock_get(&pwrc_lock); mmio_write_32(PWRC_BASE + PSYSR_OFF, (unsigned int) mpidr); rc = mmio_read_32(PWRC_BASE + PSYSR_OFF); - bakery_lock_release(mpidr, &pwrc_lock); + bakery_lock_release(&pwrc_lock); return rc; } void fvp_pwrc_write_pponr(unsigned long mpidr) { - bakery_lock_get(mpidr, &pwrc_lock); + bakery_lock_get(&pwrc_lock); mmio_write_32(PWRC_BASE + PPONR_OFF, (unsigned int) mpidr); - bakery_lock_release(mpidr, &pwrc_lock); + bakery_lock_release(&pwrc_lock); } void fvp_pwrc_write_ppoffr(unsigned long mpidr) { - bakery_lock_get(mpidr, &pwrc_lock); + bakery_lock_get(&pwrc_lock); mmio_write_32(PWRC_BASE + PPOFFR_OFF, (unsigned int) mpidr); - bakery_lock_release(mpidr, &pwrc_lock); + bakery_lock_release(&pwrc_lock); } void fvp_pwrc_set_wen(unsigned long mpidr) { - bakery_lock_get(mpidr, &pwrc_lock); + bakery_lock_get(&pwrc_lock); mmio_write_32(PWRC_BASE + PWKUPR_OFF, (unsigned int) (PWKUPR_WEN | mpidr)); - bakery_lock_release(mpidr, &pwrc_lock); + bakery_lock_release(&pwrc_lock); } void fvp_pwrc_clr_wen(unsigned long mpidr) { - bakery_lock_get(mpidr, &pwrc_lock); + bakery_lock_get(&pwrc_lock); mmio_write_32(PWRC_BASE + PWKUPR_OFF, (unsigned int) mpidr); - bakery_lock_release(mpidr, &pwrc_lock); + bakery_lock_release(&pwrc_lock); } void fvp_pwrc_write_pcoffr(unsigned long mpidr) { - bakery_lock_get(mpidr, &pwrc_lock); + bakery_lock_get(&pwrc_lock); mmio_write_32(PWRC_BASE + PCOFFR_OFF, (unsigned int) mpidr); - bakery_lock_release(mpidr, &pwrc_lock); + bakery_lock_release(&pwrc_lock); } /* Nothing else to do here apart from initializing the lock */ diff --git a/plat/fvp/include/platform_def.h b/plat/fvp/include/platform_def.h index 46a9f24..fe4d73b 100644 --- a/plat/fvp/include/platform_def.h +++ b/plat/fvp/include/platform_def.h @@ -75,6 +75,8 @@ #define PLATFORM_CORE_COUNT (PLATFORM_CLUSTER1_CORE_COUNT + \ PLATFORM_CLUSTER0_CORE_COUNT) #define PLATFORM_MAX_CPUS_PER_CLUSTER 4 +#define PLATFORM_NUM_AFFS (PLATFORM_CLUSTER_COUNT + \ + PLATFORM_CORE_COUNT) #define PRIMARY_CPU 0x0 #define MAX_IO_DEVICES 3 #define MAX_IO_HANDLES 4 @@ -145,7 +147,7 @@ * Platform specific page table and MMU setup constants ******************************************************************************/ #define ADDR_SPACE_SIZE (1ull << 32) -#define MAX_XLAT_TABLES 3 +#define MAX_XLAT_TABLES 2 #define MAX_MMAP_REGIONS 16 /******************************************************************************* diff --git a/services/spd/tspd/tspd_common.c b/services/spd/tspd/tspd_common.c index 9242702..c497670 100644 --- a/services/spd/tspd/tspd_common.c +++ b/services/spd/tspd/tspd_common.c @@ -45,9 +45,8 @@ int32_t tspd_init_secure_context(uint64_t entrypoint, uint64_t mpidr, tsp_context_t *tsp_ctx) { - uint32_t scr, sctlr; - el1_sys_regs_t *el1_state; - uint32_t spsr; + entry_point_info_t ep; + uint32_t ep_attr; /* Passing a NULL context is a critical programming error */ assert(tsp_ctx); @@ -58,51 +57,24 @@ int32_t tspd_init_secure_context(uint64_t entrypoint, */ assert(rw == TSP_AARCH64); - /* - * This might look redundant if the context was statically - * allocated but this function cannot make that assumption. - */ - memset(tsp_ctx, 0, sizeof(*tsp_ctx)); - - /* - * Set the right security state, register width and enable access to - * the secure physical timer for the SP. - */ - scr = read_scr(); - scr &= ~SCR_NS_BIT; - scr &= ~SCR_RW_BIT; - scr |= SCR_ST_BIT; - if (rw == TSP_AARCH64) - scr |= SCR_RW_BIT; - - /* Get a pointer to the S-EL1 context memory */ - el1_state = get_sysregs_ctx(&tsp_ctx->cpu_ctx); - - /* - * Program the SCTLR_EL1 such that upon entry in S-EL1, caches and MMU are - * disabled and exception endianess is set to be the same as EL3 - */ - sctlr = read_sctlr_el3(); - sctlr &= SCTLR_EE_BIT; - sctlr |= SCTLR_EL1_RES1; - write_ctx_reg(el1_state, CTX_SCTLR_EL1, sctlr); - - /* Set this context as ready to be initialised i.e OFF */ + /* Associate this context with the cpu specified */ + tsp_ctx->mpidr = mpidr; + tsp_ctx->state = 0; set_tsp_pstate(tsp_ctx->state, TSP_PSTATE_OFF); - - /* - * This context has not been used yet. It will become valid - * when the TSP is interrupted and wants the TSPD to preserve - * the context. - */ clr_std_smc_active_flag(tsp_ctx->state); - /* Associate this context with the cpu specified */ - tsp_ctx->mpidr = mpidr; + cm_set_context_by_mpidr(mpidr, &tsp_ctx->cpu_ctx, SECURE); + + /* initialise an entrypoint to set up the CPU context */ + ep_attr = SECURE | EP_ST_ENABLE; + if (read_sctlr_el3() & SCTLR_EE_BIT) + ep_attr |= EP_EE_BIG; + SET_PARAM_HEAD(&ep, PARAM_EP, VERSION_1, ep_attr); + ep.pc = entrypoint; + ep.spsr = SPSR_64(MODE_EL1, MODE_SP_ELX, DISABLE_ALL_EXCEPTIONS); + memset(&ep.args, 0, sizeof(ep.args)); - cm_set_context(&tsp_ctx->cpu_ctx, SECURE); - spsr = SPSR_64(MODE_EL1, MODE_SP_ELX, DISABLE_ALL_EXCEPTIONS); - cm_set_el3_eret_context(SECURE, entrypoint, spsr, scr); + cm_init_context(mpidr, &ep); return 0; } diff --git a/services/spd/tspd/tspd_main.c b/services/spd/tspd/tspd_main.c index 35bc6e2..f1dbe68 100644 --- a/services/spd/tspd/tspd_main.c +++ b/services/spd/tspd/tspd_main.c @@ -122,13 +122,9 @@ static uint64_t tspd_sel1_interrupt_handler(uint32_t id, CTX_ELR_EL3); } - SMC_SET_EL3(&tsp_ctx->cpu_ctx, - CTX_SPSR_EL3, - SPSR_64(MODE_EL1, MODE_SP_ELX, DISABLE_ALL_EXCEPTIONS)); - SMC_SET_EL3(&tsp_ctx->cpu_ctx, - CTX_ELR_EL3, - (uint64_t) &tsp_vectors->fiq_entry); cm_el1_sysregs_context_restore(SECURE); + cm_set_elr_spsr_el3(SECURE, (uint64_t) &tsp_vectors->fiq_entry, + SPSR_64(MODE_EL1, MODE_SP_ELX, DISABLE_ALL_EXCEPTIONS)); cm_set_next_eret_context(SECURE); /* diff --git a/services/std_svc/psci/psci_afflvl_off.c b/services/std_svc/psci/psci_afflvl_off.c index 21a4d1a..30f2bd1 100644 --- a/services/std_svc/psci/psci_afflvl_off.c +++ b/services/std_svc/psci/psci_afflvl_off.c @@ -42,8 +42,8 @@ typedef int (*afflvl_off_handler_t)(unsigned long, aff_map_node_t *); ******************************************************************************/ static int psci_afflvl0_off(unsigned long mpidr, aff_map_node_t *cpu_node) { - unsigned int index, plat_state; - int rc = PSCI_E_SUCCESS; + unsigned int plat_state; + int rc; unsigned long sctlr; assert(cpu_node->level == MPIDR_AFFLVL0); @@ -67,9 +67,6 @@ static int psci_afflvl0_off(unsigned long mpidr, aff_map_node_t *cpu_node) return rc; } - index = cpu_node->data; - memset(&psci_ns_entry_info[index], 0, sizeof(psci_ns_entry_info[index])); - /* * Arch. management. Perform the necessary steps to flush all * cpu caches. @@ -96,6 +93,7 @@ static int psci_afflvl0_off(unsigned long mpidr, aff_map_node_t *cpu_node) * Plat. management: Perform platform specific actions to turn this * cpu off e.g. exit cpu coherency, program the power controller etc. */ + rc = PSCI_E_SUCCESS; if (psci_plat_pm_ops->affinst_off) { /* Get the current physical state of this cpu */ diff --git a/services/std_svc/psci/psci_afflvl_on.c b/services/std_svc/psci/psci_afflvl_on.c index 443e6af..d91db96 100644 --- a/services/std_svc/psci/psci_afflvl_on.c +++ b/services/std_svc/psci/psci_afflvl_on.c @@ -75,8 +75,10 @@ static int psci_afflvl0_on(unsigned long target_cpu, unsigned long ns_entrypoint, unsigned long context_id) { - unsigned int index, plat_state; + unsigned int plat_state; unsigned long psci_entrypoint; + uint32_t ns_scr_el3 = read_scr_el3(); + uint32_t ns_sctlr_el1 = read_sctlr_el1(); int rc; /* Sanity check to safeguard against data corruption */ @@ -103,8 +105,8 @@ static int psci_afflvl0_on(unsigned long target_cpu, * the non-secure world from the non-secure state from * where this call originated. */ - index = cpu_node->data; - rc = psci_set_ns_entry_info(index, ns_entrypoint, context_id); + rc = psci_save_ns_entry(target_cpu, ns_entrypoint, context_id, + ns_scr_el3, ns_sctlr_el1); if (rc != PSCI_E_SUCCESS) return rc; @@ -336,7 +338,7 @@ int psci_afflvl_on(unsigned long target_cpu, static unsigned int psci_afflvl0_on_finish(unsigned long mpidr, aff_map_node_t *cpu_node) { - unsigned int index, plat_state, state, rc = PSCI_E_SUCCESS; + unsigned int plat_state, state, rc; assert(cpu_node->level == MPIDR_AFFLVL0); @@ -373,17 +375,6 @@ static unsigned int psci_afflvl0_on_finish(unsigned long mpidr, bl31_arch_setup(); /* - * Use the more complex exception vectors to enable SPD - * initialisation. SP_EL3 should point to a 'cpu_context' - * structure. The calling cpu should have set the - * context already - */ - assert(cm_get_context(NON_SECURE)); - cm_set_next_eret_context(NON_SECURE); - cm_init_pcpu_ptr_cache(); - write_vbar_el3((uint64_t) runtime_exceptions); - - /* * Call the cpu on finish handler registered by the Secure Payload * Dispatcher to let it do any bookeeping. If the handler encounters an * error, it's expected to assert within @@ -394,11 +385,9 @@ static unsigned int psci_afflvl0_on_finish(unsigned long mpidr, /* * Generic management: Now we just need to retrieve the * information that we had stashed away during the cpu_on - * call to set this cpu on its way. First get the index - * for restoring the re-entry info + * call to set this cpu on its way. */ - index = cpu_node->data; - psci_get_ns_entry_info(index); + cm_prepare_el3_exit(NON_SECURE); /* State management: mark this cpu as on */ psci_set_state(cpu_node, PSCI_STATE_ON); @@ -406,6 +395,7 @@ static unsigned int psci_afflvl0_on_finish(unsigned long mpidr, /* Clean caches before re-entering normal world */ dcsw_op_louis(DCCSW); + rc = PSCI_E_SUCCESS; return rc; } diff --git a/services/std_svc/psci/psci_afflvl_suspend.c b/services/std_svc/psci/psci_afflvl_suspend.c index a986e5c..ea90389 100644 --- a/services/std_svc/psci/psci_afflvl_suspend.c +++ b/services/std_svc/psci/psci_afflvl_suspend.c @@ -57,16 +57,11 @@ void psci_set_suspend_power_state(aff_map_node_t *node, unsigned int power_state assert(node->mpidr == (read_mpidr() & MPIDR_AFFINITY_MASK)); assert(node->level == MPIDR_AFFLVL0); - /* Save PSCI power state parameter for the core in suspend context */ - psci_suspend_context[node->data].power_state = power_state; - /* - * Flush the suspend data to PoC since it will be accessed while - * returning back from suspend with the caches turned off + * Save PSCI power state parameter for the core in suspend context. + * The node is in always-coherent RAM so it does not need to be flushed */ - flush_dcache_range( - (unsigned long)&psci_suspend_context[node->data], - sizeof(suspend_context_t)); + node->power_state = power_state; } /******************************************************************************* @@ -97,7 +92,7 @@ int psci_get_aff_map_node_suspend_afflvl(aff_map_node_t *node) assert(node->level == MPIDR_AFFLVL0); - power_state = psci_suspend_context[node->data].power_state; + power_state = node->power_state; return ((power_state == PSCI_INVALID_DATA) ? power_state : psci_get_pstate_afflvl(power_state)); } @@ -117,7 +112,7 @@ int psci_get_suspend_stateid(unsigned long mpidr) assert(node); assert(node->level == MPIDR_AFFLVL0); - power_state = psci_suspend_context[node->data].power_state; + power_state = node->power_state; return ((power_state == PSCI_INVALID_DATA) ? power_state : psci_get_pstate_id(power_state)); } @@ -132,10 +127,12 @@ static int psci_afflvl0_suspend(unsigned long mpidr, unsigned long context_id, unsigned int power_state) { - unsigned int index, plat_state; + unsigned int plat_state; unsigned long psci_entrypoint, sctlr; el3_state_t *saved_el3_state; - int rc = PSCI_E_SUCCESS; + uint32_t ns_scr_el3 = read_scr_el3(); + uint32_t ns_sctlr_el1 = read_sctlr_el1(); + int rc; /* Sanity check to safeguard against data corruption */ assert(cpu_node->level == MPIDR_AFFLVL0); @@ -163,8 +160,8 @@ static int psci_afflvl0_suspend(unsigned long mpidr, * Generic management: Store the re-entry information for the * non-secure world */ - index = cpu_node->data; - rc = psci_set_ns_entry_info(index, ns_entrypoint, context_id); + rc = psci_save_ns_entry(read_mpidr_el1(), ns_entrypoint, context_id, + ns_scr_el3, ns_sctlr_el1); if (rc != PSCI_E_SUCCESS) return rc; @@ -174,7 +171,6 @@ static int psci_afflvl0_suspend(unsigned long mpidr, * L1 caches and exit intra-cluster coherency et al */ cm_el3_sysregs_context_save(NON_SECURE); - rc = PSCI_E_SUCCESS; /* * The EL3 state to PoC since it will be accessed after a @@ -214,6 +210,8 @@ static int psci_afflvl0_suspend(unsigned long mpidr, * platform defined mailbox with the psci entrypoint, * program the power controller etc. */ + rc = PSCI_E_SUCCESS; + if (psci_plat_pm_ops->affinst_suspend) { plat_state = psci_get_phys_state(cpu_node); rc = psci_plat_pm_ops->affinst_suspend(mpidr, @@ -454,7 +452,7 @@ int psci_afflvl_suspend(unsigned long mpidr, static unsigned int psci_afflvl0_suspend_finish(unsigned long mpidr, aff_map_node_t *cpu_node) { - unsigned int index, plat_state, state, rc = PSCI_E_SUCCESS; + unsigned int plat_state, state, rc; int32_t suspend_level; assert(cpu_node->level == MPIDR_AFFLVL0); @@ -481,24 +479,11 @@ static unsigned int psci_afflvl0_suspend_finish(unsigned long mpidr, } /* Get the index for restoring the re-entry information */ - index = cpu_node->data; - /* * Arch. management: Restore the stashed EL3 architectural * context from the 'cpu_context' structure for this cpu. */ cm_el3_sysregs_context_restore(NON_SECURE); - rc = PSCI_E_SUCCESS; - - /* - * Use the more complex exception vectors to enable SPD - * initialisation. SP_EL3 should point to a 'cpu_context' - * structure. The non-secure context should have been - * set on this cpu prior to suspension. - */ - cm_set_next_eret_context(NON_SECURE); - cm_init_pcpu_ptr_cache(); - write_vbar_el3((uint64_t) runtime_exceptions); /* * Call the cpu suspend finish handler registered by the Secure Payload @@ -519,7 +504,7 @@ static unsigned int psci_afflvl0_suspend_finish(unsigned long mpidr, * information that we had stashed away during the suspend * call to set this cpu on its way. */ - psci_get_ns_entry_info(index); + cm_prepare_el3_exit(NON_SECURE); /* State management: mark this cpu as on */ psci_set_state(cpu_node, PSCI_STATE_ON); @@ -527,6 +512,7 @@ static unsigned int psci_afflvl0_suspend_finish(unsigned long mpidr, /* Clean caches before re-entering normal world */ dcsw_op_louis(DCCSW); + rc = PSCI_E_SUCCESS; return rc; } diff --git a/services/std_svc/psci/psci_common.c b/services/std_svc/psci/psci_common.c index 3cbacd7..a8719a5 100644 --- a/services/std_svc/psci/psci_common.c +++ b/services/std_svc/psci/psci_common.c @@ -36,6 +36,7 @@ #include <context_mgmt.h> #include <debug.h> #include <platform.h> +#include <string.h> #include "psci_private.h" /* @@ -45,14 +46,6 @@ const spd_pm_ops_t *psci_spd_pm; /******************************************************************************* - * Arrays that contains information needs to resume a cpu's execution when woken - * out of suspend or off states. Each cpu is allocated a single entry in each - * array during startup. - ******************************************************************************/ -suspend_context_t psci_suspend_context[PSCI_NUM_AFFS]; -ns_entry_info_t psci_ns_entry_info[PSCI_NUM_AFFS]; - -/******************************************************************************* * Grand array that holds the platform's topology information for state * management of affinity instances. Each node (aff_map_node) in the array * corresponds to an affinity instance e.g. cluster, cpu within an mpidr @@ -173,7 +166,7 @@ void psci_acquire_afflvl_locks(unsigned long mpidr, for (level = start_afflvl; level <= end_afflvl; level++) { if (mpidr_nodes[level] == NULL) continue; - bakery_lock_get(mpidr, &mpidr_nodes[level]->lock); + bakery_lock_get(&mpidr_nodes[level]->lock); } } @@ -192,7 +185,7 @@ void psci_release_afflvl_locks(unsigned long mpidr, for (level = end_afflvl; level >= start_afflvl; level--) { if (mpidr_nodes[level] == NULL) continue; - bakery_lock_release(mpidr, &mpidr_nodes[level]->lock); + bakery_lock_release(&mpidr_nodes[level]->lock); } } @@ -212,97 +205,36 @@ int psci_validate_mpidr(unsigned long mpidr, int level) } /******************************************************************************* - * This function retrieves all the stashed information needed to correctly - * resume a cpu's execution in the non-secure state after it has been physically - * powered on i.e. turned ON or resumed from SUSPEND + * This function determines the full entrypoint information for the requested + * PSCI entrypoint on power on/resume and saves this in the non-secure CPU + * cpu_context, ready for when the core boots. ******************************************************************************/ -void psci_get_ns_entry_info(unsigned int index) +int psci_save_ns_entry(uint64_t mpidr, + uint64_t entrypoint, uint64_t context_id, + uint32_t ns_scr_el3, uint32_t ns_sctlr_el1) { - unsigned long sctlr = 0, scr, el_status, id_aa64pfr0; - cpu_context_t *ns_entry_context; - gp_regs_t *ns_entry_gpregs; - - scr = read_scr(); - - /* Find out which EL we are going to */ - id_aa64pfr0 = read_id_aa64pfr0_el1(); - el_status = (id_aa64pfr0 >> ID_AA64PFR0_EL2_SHIFT) & - ID_AA64PFR0_ELX_MASK; - - /* Restore endianess */ - if (psci_ns_entry_info[index].sctlr & SCTLR_EE_BIT) - sctlr |= SCTLR_EE_BIT; - else - sctlr &= ~SCTLR_EE_BIT; - - /* Turn off MMU and Caching */ - sctlr &= ~(SCTLR_M_BIT | SCTLR_C_BIT | SCTLR_M_BIT); - - /* Set the register width */ - if (psci_ns_entry_info[index].scr & SCR_RW_BIT) - scr |= SCR_RW_BIT; - else - scr &= ~SCR_RW_BIT; - - scr |= SCR_NS_BIT; - - if (el_status) - write_sctlr_el2(sctlr); - else - write_sctlr_el1(sctlr); - - /* Fulfill the cpu_on entry reqs. as per the psci spec */ - ns_entry_context = (cpu_context_t *) cm_get_context(NON_SECURE); - assert(ns_entry_context); - - /* - * Setup general purpose registers to return the context id and - * prevent leakage of secure information into the normal world. - */ - ns_entry_gpregs = get_gpregs_ctx(ns_entry_context); - write_ctx_reg(ns_entry_gpregs, - CTX_GPREG_X0, - psci_ns_entry_info[index].context_id); - - /* - * Tell the context management library to setup EL3 system registers to - * be able to ERET into the ns state, and SP_EL3 points to the right - * context to exit from EL3 correctly. - */ - cm_set_el3_eret_context(NON_SECURE, - psci_ns_entry_info[index].eret_info.entrypoint, - psci_ns_entry_info[index].eret_info.spsr, - scr); + uint32_t ep_attr, mode, sctlr, daif, ee; + entry_point_info_t ep; - cm_set_next_eret_context(NON_SECURE); -} + sctlr = ns_scr_el3 & SCR_HCE_BIT ? read_sctlr_el2() : ns_sctlr_el1; + ee = 0; -/******************************************************************************* - * This function retrieves and stashes all the information needed to correctly - * resume a cpu's execution in the non-secure state after it has been physically - * powered on i.e. turned ON or resumed from SUSPEND. This is done prior to - * turning it on or before suspending it. - ******************************************************************************/ -int psci_set_ns_entry_info(unsigned int index, - unsigned long entrypoint, - unsigned long context_id) -{ - int rc = PSCI_E_SUCCESS; - unsigned int rw, mode, ee, spsr = 0; - unsigned long id_aa64pfr0 = read_id_aa64pfr0_el1(), scr = read_scr(); - unsigned long el_status; - unsigned long daif; + ep_attr = NON_SECURE | EP_ST_DISABLE; + if (sctlr & SCTLR_EE_BIT) { + ep_attr |= EP_EE_BIG; + ee = 1; + } + SET_PARAM_HEAD(&ep, PARAM_EP, VERSION_1, ep_attr); - /* Figure out what mode do we enter the non-secure world in */ - el_status = (id_aa64pfr0 >> ID_AA64PFR0_EL2_SHIFT) & - ID_AA64PFR0_ELX_MASK; + ep.pc = entrypoint; + memset(&ep.args, 0, sizeof(ep.args)); + ep.args.arg0 = context_id; /* * Figure out whether the cpu enters the non-secure address space * in aarch32 or aarch64 */ - rw = scr & SCR_RW_BIT; - if (rw) { + if (ns_scr_el3 & SCR_RW_BIT) { /* * Check whether a Thumb entry point has been provided for an @@ -311,28 +243,12 @@ int psci_set_ns_entry_info(unsigned int index, if (entrypoint & 0x1) return PSCI_E_INVALID_PARAMS; - if (el_status && (scr & SCR_HCE_BIT)) { - mode = MODE_EL2; - ee = read_sctlr_el2() & SCTLR_EE_BIT; - } else { - mode = MODE_EL1; - ee = read_sctlr_el1() & SCTLR_EE_BIT; - } - - spsr = SPSR_64(mode, MODE_SP_ELX, DISABLE_ALL_EXCEPTIONS); + mode = ns_scr_el3 & SCR_HCE_BIT ? MODE_EL2 : MODE_EL1; - psci_ns_entry_info[index].sctlr |= ee; - psci_ns_entry_info[index].scr |= SCR_RW_BIT; + ep.spsr = SPSR_64(mode, MODE_SP_ELX, DISABLE_ALL_EXCEPTIONS); } else { - - if (el_status && (scr & SCR_HCE_BIT)) { - mode = MODE32_hyp; - ee = read_sctlr_el2() & SCTLR_EE_BIT; - } else { - mode = MODE32_svc; - ee = read_sctlr_el1() & SCTLR_EE_BIT; - } + mode = ns_scr_el3 & SCR_HCE_BIT ? MODE32_hyp : MODE32_svc; /* * TODO: Choose async. exception bits if HYP mode is not @@ -340,18 +256,13 @@ int psci_set_ns_entry_info(unsigned int index, */ daif = DAIF_ABT_BIT | DAIF_IRQ_BIT | DAIF_FIQ_BIT; - spsr = SPSR_MODE32(mode, entrypoint & 0x1, ee, daif); - - /* Ensure that the CSPR.E and SCTLR.EE bits match */ - psci_ns_entry_info[index].sctlr |= ee; - psci_ns_entry_info[index].scr &= ~SCR_RW_BIT; + ep.spsr = SPSR_MODE32(mode, entrypoint & 0x1, ee, daif); } - psci_ns_entry_info[index].eret_info.entrypoint = entrypoint; - psci_ns_entry_info[index].eret_info.spsr = spsr; - psci_ns_entry_info[index].context_id = context_id; + /* initialise an entrypoint to set up the CPU context */ + cm_init_context(mpidr, &ep); - return rc; + return PSCI_E_SUCCESS; } /******************************************************************************* diff --git a/services/std_svc/psci/psci_entry.S b/services/std_svc/psci/psci_entry.S index bc8d900..5628d79 100644 --- a/services/std_svc/psci/psci_entry.S +++ b/services/std_svc/psci/psci_entry.S @@ -61,12 +61,16 @@ psci_aff_common_finish_entry: adr x22, psci_afflvl_power_on_finish /* --------------------------------------------- - * Exceptions should not occur at this point. - * Set VBAR in order to handle and report any - * that do occur + * Initialise the pcpu cache pointer for the CPU * --------------------------------------------- */ - adr x0, early_exceptions + bl init_cpu_data_ptr + + /* --------------------------------------------- + * Set the exception vectors + * --------------------------------------------- + */ + adr x0, runtime_exceptions msr vbar_el3, x0 isb diff --git a/services/std_svc/psci/psci_main.c b/services/std_svc/psci/psci_main.c index c0866fb..2d7b018 100644 --- a/services/std_svc/psci/psci_main.c +++ b/services/std_svc/psci/psci_main.c @@ -221,50 +221,68 @@ uint64_t psci_smc_handler(uint32_t smc_fid, void *handle, uint64_t flags) { - uint64_t rc; - - switch (smc_fid) { - case PSCI_VERSION: - rc = psci_version(); - break; - - case PSCI_CPU_OFF: - rc = __psci_cpu_off(); - break; - - case PSCI_CPU_SUSPEND_AARCH64: - case PSCI_CPU_SUSPEND_AARCH32: - rc = __psci_cpu_suspend(x1, x2, x3); - break; - - case PSCI_CPU_ON_AARCH64: - case PSCI_CPU_ON_AARCH32: - rc = psci_cpu_on(x1, x2, x3); - break; - - case PSCI_AFFINITY_INFO_AARCH32: - case PSCI_AFFINITY_INFO_AARCH64: - rc = psci_affinity_info(x1, x2); - break; - - case PSCI_MIG_AARCH32: - case PSCI_MIG_AARCH64: - rc = psci_migrate(x1); - break; - - case PSCI_MIG_INFO_TYPE: - rc = psci_migrate_info_type(); - break; - - case PSCI_MIG_INFO_UP_CPU_AARCH32: - case PSCI_MIG_INFO_UP_CPU_AARCH64: - rc = psci_migrate_info_up_cpu(); - break; - - default: - rc = SMC_UNK; - WARN("Unimplemented PSCI Call: 0x%x \n", smc_fid); + if (is_caller_secure(flags)) + SMC_RET1(handle, SMC_UNK); + + if (((smc_fid >> FUNCID_CC_SHIFT) & FUNCID_CC_MASK) == SMC_32) { + /* 32-bit PSCI function, clear top parameter bits */ + + x1 = (uint32_t)x1; + x2 = (uint32_t)x2; + x3 = (uint32_t)x3; + + switch (smc_fid) { + case PSCI_VERSION: + SMC_RET1(handle, psci_version()); + + case PSCI_CPU_OFF: + SMC_RET1(handle, __psci_cpu_off()); + + case PSCI_CPU_SUSPEND_AARCH32: + SMC_RET1(handle, __psci_cpu_suspend(x1, x2, x3)); + + case PSCI_CPU_ON_AARCH32: + SMC_RET1(handle, psci_cpu_on(x1, x2, x3)); + + case PSCI_AFFINITY_INFO_AARCH32: + SMC_RET1(handle, psci_affinity_info(x1, x2)); + + case PSCI_MIG_AARCH32: + SMC_RET1(handle, psci_migrate(x1)); + + case PSCI_MIG_INFO_TYPE: + SMC_RET1(handle, psci_migrate_info_type()); + + case PSCI_MIG_INFO_UP_CPU_AARCH32: + SMC_RET1(handle, psci_migrate_info_up_cpu()); + + default: + break; + } + } else { + /* 64-bit PSCI function */ + + switch (smc_fid) { + case PSCI_CPU_SUSPEND_AARCH64: + SMC_RET1(handle, __psci_cpu_suspend(x1, x2, x3)); + + case PSCI_CPU_ON_AARCH64: + SMC_RET1(handle, psci_cpu_on(x1, x2, x3)); + + case PSCI_AFFINITY_INFO_AARCH64: + SMC_RET1(handle, psci_affinity_info(x1, x2)); + + case PSCI_MIG_AARCH64: + SMC_RET1(handle, psci_migrate(x1)); + + case PSCI_MIG_INFO_UP_CPU_AARCH64: + SMC_RET1(handle, psci_migrate_info_up_cpu()); + + default: + break; + } } - SMC_RET1(handle, rc); + WARN("Unimplemented PSCI Call: 0x%x \n", smc_fid); + SMC_RET1(handle, SMC_UNK); } diff --git a/services/std_svc/psci/psci_private.h b/services/std_svc/psci/psci_private.h index 747a2d4..f534087 100644 --- a/services/std_svc/psci/psci_private.h +++ b/services/std_svc/psci/psci_private.h @@ -33,23 +33,15 @@ #include <arch.h> #include <bakery_lock.h> +#include <platform_def.h> /* for PLATFORM_NUM_AFFS */ #include <psci.h> -/******************************************************************************* - * The following two data structures hold the generic information to bringup - * a suspended/hotplugged out cpu - ******************************************************************************/ -typedef struct eret_params { - unsigned long entrypoint; - unsigned long spsr; -} eret_params_t; - -typedef struct ns_entry_info { - eret_params_t eret_info; - unsigned long context_id; - unsigned int scr; - unsigned int sctlr; -} ns_entry_info_t; +/* Number of affinity instances whose state this psci imp. can track */ +#ifdef PLATFORM_NUM_AFFS +#define PSCI_NUM_AFFS PLATFORM_NUM_AFFS +#else +#define PSCI_NUM_AFFS (2 * PLATFORM_CORE_COUNT) +#endif /******************************************************************************* * The following two data structures hold the topology tree which in turn tracks @@ -60,7 +52,7 @@ typedef struct aff_map_node { unsigned short ref_count; unsigned char state; unsigned char level; - unsigned int data; + unsigned int power_state; bakery_lock_t lock; } aff_map_node_t; @@ -69,14 +61,6 @@ typedef struct aff_limits_node { int max; } aff_limits_node_t; -/******************************************************************************* - * This data structure holds secure world context that needs to be preserved - * across cpu_suspend calls which enter the power down state. - ******************************************************************************/ -typedef struct suspend_context { - unsigned int power_state; -} __aligned(CACHE_WRITEBACK_GRANULE) suspend_context_t; - typedef aff_map_node_t (*mpidr_aff_map_nodes_t[MPIDR_MAX_AFFLVL]); typedef unsigned int (*afflvl_power_on_finisher_t)(unsigned long, aff_map_node_t *); @@ -84,8 +68,6 @@ typedef unsigned int (*afflvl_power_on_finisher_t)(unsigned long, /******************************************************************************* * Data prototypes ******************************************************************************/ -extern suspend_context_t psci_suspend_context[PSCI_NUM_AFFS]; -extern ns_entry_info_t psci_ns_entry_info[PSCI_NUM_AFFS]; extern const plat_pm_ops_t *psci_plat_pm_ops; extern aff_map_node_t psci_aff_map[PSCI_NUM_AFFS]; @@ -102,7 +84,6 @@ int get_max_afflvl(void); unsigned short psci_get_state(aff_map_node_t *node); unsigned short psci_get_phys_state(aff_map_node_t *node); void psci_set_state(aff_map_node_t *node, unsigned short state); -void psci_get_ns_entry_info(unsigned int index); unsigned long mpidr_set_aff_inst(unsigned long, unsigned char, int); int psci_validate_mpidr(unsigned long, int); int get_power_on_target_afflvl(unsigned long mpidr); @@ -110,9 +91,9 @@ void psci_afflvl_power_on_finish(unsigned long, int, int, afflvl_power_on_finisher_t *); -int psci_set_ns_entry_info(unsigned int index, - unsigned long entrypoint, - unsigned long context_id); +int psci_save_ns_entry(uint64_t mpidr, + uint64_t entrypoint, uint64_t context_id, + uint32_t caller_scr_el3, uint32_t caller_sctlr_el1); int psci_check_afflvl_range(int start_afflvl, int end_afflvl); void psci_acquire_afflvl_locks(unsigned long mpidr, int start_afflvl, diff --git a/services/std_svc/psci/psci_setup.c b/services/std_svc/psci/psci_setup.c index 015beab..68f19a0 100644 --- a/services/std_svc/psci/psci_setup.c +++ b/services/std_svc/psci/psci_setup.c @@ -58,12 +58,6 @@ static cpu_context_t psci_ns_context[PLATFORM_CORE_COUNT]; static aff_limits_node_t psci_aff_limits[MPIDR_MAX_AFFLVL + 1]; /******************************************************************************* - * 'psci_ns_einfo_idx' keeps track of the next free index in the - * 'psci_ns_entry_info' & 'psci_suspend_context' arrays. - ******************************************************************************/ -static unsigned int psci_ns_einfo_idx; - -/******************************************************************************* * Routines for retrieving the node corresponding to an affinity level instance * in the mpidr. The first one uses binary search to find the node corresponding * to the mpidr (key) at a particular affinity level. The second routine decides @@ -195,13 +189,8 @@ static void psci_init_aff_map_node(unsigned long mpidr, if (state & PSCI_AFF_PRESENT) psci_set_state(&psci_aff_map[idx], PSCI_STATE_OFF); - /* Ensure that we have not overflowed the psci_ns_einfo array */ - assert(psci_ns_einfo_idx < PSCI_NUM_AFFS); - - psci_aff_map[idx].data = psci_ns_einfo_idx; /* Invalidate the suspend context for the node */ - psci_suspend_context[psci_ns_einfo_idx].power_state = PSCI_INVALID_DATA; - psci_ns_einfo_idx++; + psci_aff_map[idx].power_state = PSCI_INVALID_DATA; /* * Associate a non-secure context with this affinity @@ -301,7 +290,6 @@ int32_t psci_setup(void) int afflvl, affmap_idx, max_afflvl; aff_map_node_t *node; - psci_ns_einfo_idx = 0; psci_plat_pm_ops = NULL; /* Find out the maximum affinity level that the platform implements */ |