summaryrefslogtreecommitdiff
path: root/tests/runtime_services
diff options
context:
space:
mode:
authordp-arm <dimitris.papastamos@arm.com>2016-10-18 14:14:59 +0100
committerSandrine Bailleux <sandrine.bailleux@arm.com>2016-12-06 11:35:29 +0000
commit4a28da27e380faf7f1cbf39b1e68ecadcdad5597 (patch)
tree843c35adf91943f92b5fac3e195f3043cfdc725d /tests/runtime_services
parent2494732c0477e929827e4507982e937caebccaad (diff)
Add PMF test cases for BL31 runtime instrumentation
Change-Id: I4e6fd9d07ae591e15271d3edff3bc2717321e02a Signed-off-by: dp-arm <dimitris.papastamos@arm.com>
Diffstat (limited to 'tests/runtime_services')
-rw-r--r--tests/runtime_services/standard_service/pmf/api_tests/runtime_instr/test_pmf_rt_instr.c668
1 files changed, 668 insertions, 0 deletions
diff --git a/tests/runtime_services/standard_service/pmf/api_tests/runtime_instr/test_pmf_rt_instr.c b/tests/runtime_services/standard_service/pmf/api_tests/runtime_instr/test_pmf_rt_instr.c
new file mode 100644
index 0000000..caa37a5
--- /dev/null
+++ b/tests/runtime_services/standard_service/pmf/api_tests/runtime_instr/test_pmf_rt_instr.c
@@ -0,0 +1,668 @@
+/*
+ * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <arch_helpers.h>
+#include <debug.h>
+#include <plat_topology.h>
+#include <platform.h>
+#include <pmf.h>
+#include <power_management.h>
+#include <psci.h>
+#include <smc.h>
+#include <string.h>
+#include <sys/errno.h>
+#include <tftf_lib.h>
+#include <timer.h>
+
+#define TOTAL_IDS 4
+#define ENTER_PSCI 0
+#define EXIT_PSCI 1
+#define ENTER_HW_LOW_PWR 2
+#define EXIT_HW_LOW_PWR 3
+
+static spinlock_t cpu_count_lock;
+static volatile int cpu_count;
+static volatile int participating_cpu_count;
+static u_register_t timestamps[PLATFORM_CORE_COUNT][TOTAL_IDS];
+static unsigned int target_pwrlvl;
+
+/* Helper function to wait for CPUs participating in the test. */
+static void wait_for_participating_cpus(void)
+{
+ assert(participating_cpu_count <= PLATFORM_CORE_COUNT);
+
+ spin_lock(&cpu_count_lock);
+ cpu_count++;
+ spin_unlock(&cpu_count_lock);
+
+ assert(cpu_count <= PLATFORM_CORE_COUNT);
+
+ while (cpu_count != participating_cpu_count)
+ continue;
+}
+
+/*
+ * Perform an SMC call into TF to collect timestamp specified by `tid`
+ * and pass it as a parameter back to the caller.
+ */
+static u_register_t pmf_get_ts(u_register_t tid, u_register_t *v)
+{
+ smc_args args = { 0 };
+ smc_ret_values ret;
+
+ args.arg0 = PMF_SMC_GET_TIMESTAMP;
+ args.arg1 = tid;
+ args.arg2 = read_mpidr_el1();
+ ret = tftf_smc(&args);
+ *v = ret.ret1;
+ return ret.ret0;
+}
+
+/*
+ * Helper functions to convert cycles to us/ns.
+ * XXX: Consider moving these to a common place.
+ */
+static int cycles_to_us(uint64_t cycles, uint64_t freq, uint64_t *us)
+{
+ if (cycles > UINT64_MAX / 1000000 || freq == 0)
+ return -ERANGE;
+ *us = cycles * 1000000 / freq;
+ return 0;
+}
+
+static int cycles_to_ns(uint64_t cycles, uint64_t freq, uint64_t *ns)
+{
+ if (cycles > UINT64_MAX / 1000000000 || freq == 0)
+ return -ERANGE;
+ *ns = cycles * 1000000000 / freq;
+ return 0;
+}
+
+static u_register_t *get_core_timestamps(void)
+{
+ unsigned int pos = platform_get_core_pos(read_mpidr_el1());
+
+ assert(pos < PLATFORM_CORE_COUNT);
+ return timestamps[pos];
+}
+
+/* Check timestamps for the suspend/cpu off tests. */
+static test_result_t check_pwr_down_ts(void)
+{
+ u_register_t *ts;
+
+ ts = get_core_timestamps();
+ if (!(ts[ENTER_PSCI] <= ts[ENTER_HW_LOW_PWR] &&
+ ts[ENTER_HW_LOW_PWR] <= ts[EXIT_HW_LOW_PWR] &&
+ ts[EXIT_HW_LOW_PWR] <= ts[EXIT_PSCI])) {
+ tftf_testcase_printf("PMF timestamps are not correctly ordered\n");
+ return TEST_RESULT_FAIL;
+ }
+
+ return TEST_RESULT_SUCCESS;
+}
+
+/*
+ * Capture all runtime instrumentation timestamps for the current
+ * CPU and store them into the timestamps array.
+ */
+static test_result_t get_ts(void)
+{
+ u_register_t tid, *ts;
+ int i;
+
+ ts = get_core_timestamps();
+ for (i = 0; i < TOTAL_IDS; i++) {
+ tid = PMF_ARM_TIF_IMPL_ID;
+ tid |= PMF_RT_INSTR_SVC_ID << PMF_SVC_ID_SHIFT | i;
+ if (pmf_get_ts(tid, &ts[i]) != 0) {
+ ERROR("Failed to capture PMF timestamp\n");
+ return TEST_RESULT_FAIL;
+ }
+ }
+ return TEST_RESULT_SUCCESS;
+}
+
+/* Dump suspend statistics for the suspend/cpu off test. */
+static int dump_suspend_stats(void)
+{
+ u_register_t *ts;
+ u_register_t target_mpid;
+ uint64_t freq, cycles[2], period[2];
+ int cpu_node, ret;
+ unsigned int pos;
+
+ freq = read_cntfrq_el0();
+
+ tftf_testcase_printf("CPU\tENTRY\tEXIT\n");
+ for_each_cpu(cpu_node) {
+ target_mpid = tftf_get_mpidr_from_node(cpu_node);
+ pos = platform_get_core_pos(target_mpid);
+ assert(pos < PLATFORM_CORE_COUNT);
+ ts = timestamps[pos];
+
+ cycles[0] = ts[ENTER_HW_LOW_PWR] - ts[ENTER_PSCI];
+ ret = cycles_to_us(cycles[0], freq, &period[0]);
+ if (ret < 0) {
+ ERROR("cycles_to_us: out of range\n");
+ return TEST_RESULT_FAIL;
+ }
+
+ cycles[1] = ts[EXIT_PSCI] - ts[EXIT_HW_LOW_PWR];
+ ret = cycles_to_us(cycles[1], freq, &period[1]);
+ if (ret < 0) {
+ ERROR("cycles_to_us: out of range\n");
+ return TEST_RESULT_FAIL;
+ }
+
+ tftf_testcase_printf("%d\t%02llu us\t%02llu us\n", pos,
+ (unsigned long long)period[0],
+ (unsigned long long)period[1]);
+ }
+
+ return TEST_RESULT_SUCCESS;
+}
+
+/* Dump statistics for a PSCI version call. */
+static int dump_psci_version_stats(void)
+{
+ u_register_t *ts;
+ u_register_t target_mpid;
+ uint64_t freq, cycles, period;
+ int cpu_node, ret;
+ unsigned int pos;
+
+ freq = read_cntfrq_el0();
+
+ tftf_testcase_printf("CPU\tTOTAL TIME\n");
+ for_each_cpu(cpu_node) {
+ target_mpid = tftf_get_mpidr_from_node(cpu_node);
+ pos = platform_get_core_pos(target_mpid);
+ assert(pos < PLATFORM_CORE_COUNT);
+ ts = timestamps[pos];
+
+ cycles = ts[EXIT_PSCI] - ts[ENTER_PSCI];
+ ret = cycles_to_ns(cycles, freq, &period);
+ if (ret < 0) {
+ ERROR("cycles_to_ns: out of range\n");
+ return TEST_RESULT_FAIL;
+ }
+
+ tftf_testcase_printf("%d\t%02llu ns\n", pos,
+ (unsigned long long)period);
+ }
+
+ return TEST_RESULT_SUCCESS;
+}
+
+/* Dummy entry point to turn core off for the CPU off test. */
+static test_result_t dummy_entrypoint(void)
+{
+ wait_for_participating_cpus();
+ return TEST_RESULT_SUCCESS;
+}
+
+/* Entrypoint to collect timestamps for CPU off test. */
+static test_result_t collect_ts_entrypoint(void)
+{
+ wait_for_participating_cpus();
+
+ if (get_ts() != TEST_RESULT_SUCCESS ||
+ check_pwr_down_ts() != TEST_RESULT_SUCCESS)
+ return TEST_RESULT_FAIL;
+
+ return TEST_RESULT_SUCCESS;
+}
+
+/* Suspend current core to power level specified by `target_pwrlvl`. */
+static test_result_t suspend_current_core(void)
+{
+ unsigned int pstateid_idx[PLAT_MAX_PWR_LEVEL + 1];
+ unsigned int pwrlvl, susp_type, state_id, power_state;
+ int ret;
+
+ INIT_PWR_LEVEL_INDEX(pstateid_idx);
+
+ tftf_set_deepest_pstate_idx(target_pwrlvl, pstateid_idx);
+ tftf_get_pstate_vars(&pwrlvl, &susp_type, &state_id, pstateid_idx);
+
+ power_state = tftf_make_psci_pstate(pwrlvl, susp_type, state_id);
+
+ ret = tftf_program_timer_and_suspend(PLAT_SUSPEND_ENTRY_TIME,
+ power_state);
+ if (ret != 0) {
+ ERROR("Failed to program timer or suspend CPU: 0x%x\n", ret);
+ return TEST_RESULT_FAIL;
+ }
+
+ tftf_cancel_timer();
+
+ return TEST_RESULT_SUCCESS;
+}
+
+/* This entrypoint is used for all suspend tests. */
+static test_result_t suspend_core_entrypoint(void)
+{
+ wait_for_participating_cpus();
+
+ if (suspend_current_core() != TEST_RESULT_SUCCESS ||
+ get_ts() != TEST_RESULT_SUCCESS ||
+ check_pwr_down_ts() != TEST_RESULT_SUCCESS)
+ return TEST_RESULT_FAIL;
+
+ return TEST_RESULT_SUCCESS;
+}
+
+/* Entrypoint used for the PSCI version test. */
+static test_result_t psci_version_entrypoint(void)
+{
+ smc_args args = { SMC_PSCI_VERSION };
+ smc_ret_values ret;
+ u_register_t *ts;
+
+ wait_for_participating_cpus();
+
+ ret = tftf_smc(&args);
+ if (ret.ret0 != PSCI_VERSION) {
+ tftf_testcase_printf(
+ "Wrong PSCI version, expected 0x%08x got 0x%08x\n",
+ PSCI_VERSION, (uint32_t)ret.ret0);
+ return TEST_RESULT_FAIL;
+ }
+
+ if (get_ts() != TEST_RESULT_SUCCESS)
+ return TEST_RESULT_FAIL;
+
+ /* Check timestamp order. */
+ ts = get_core_timestamps();
+ if (ts[ENTER_PSCI] > ts[EXIT_PSCI]) {
+ tftf_testcase_printf("PMF timestamps are not correctly ordered\n");
+ return TEST_RESULT_FAIL;
+ }
+
+ return TEST_RESULT_SUCCESS;
+}
+
+/* Check if runtime instrumentation is enabled in TF. */
+static int is_rt_instr_supported(void)
+{
+ u_register_t tid, dummy;
+
+ tid = PMF_ARM_TIF_IMPL_ID;
+ tid |= PMF_RT_INSTR_SVC_ID << PMF_SVC_ID_SHIFT;
+ return !pmf_get_ts(tid, &dummy);
+}
+
+/*
+ * This test powers on all on the non-lead cores and brings
+ * them and the lead core to a common synchronization point.
+ * Then a suspend to the deepest power level supported on the
+ * platform is initiated on all cores in parallel.
+ */
+static test_result_t test_rt_instr_susp_parallel(void)
+{
+ u_register_t lead_mpid, target_mpid;
+ int cpu_node, ret;
+
+ if (is_rt_instr_supported() == 0)
+ return TEST_RESULT_SKIPPED;
+
+ lead_mpid = read_mpidr_el1() & MPID_MASK;
+ participating_cpu_count = tftf_get_total_cpus_count();
+ init_spinlock(&cpu_count_lock);
+ cpu_count = 0;
+
+ /* Power on all the non-lead cores. */
+ for_each_cpu(cpu_node) {
+ target_mpid = tftf_get_mpidr_from_node(cpu_node) & MPID_MASK;
+ if (lead_mpid == target_mpid)
+ continue;
+ ret = tftf_cpu_on(target_mpid,
+ (uintptr_t)suspend_core_entrypoint, 0);
+ if (ret != PSCI_E_SUCCESS) {
+ ERROR("CPU ON failed for 0x%llx\n",
+ (unsigned long long)target_mpid);
+ return TEST_RESULT_FAIL;
+ }
+ }
+
+ if (suspend_core_entrypoint() != TEST_RESULT_SUCCESS)
+ return TEST_RESULT_FAIL;
+
+ /* Wait for the non-lead cores to power down. */
+ for_each_cpu(cpu_node) {
+ target_mpid = tftf_get_mpidr_from_node(cpu_node) & MPID_MASK;
+ if (lead_mpid == target_mpid)
+ continue;
+ while (tftf_psci_affinity_info(target_mpid, MPIDR_AFFLVL0) !=
+ PSCI_STATE_OFF)
+ continue;
+ cpu_count--;
+ }
+
+ cpu_count--;
+ assert(cpu_count == 0);
+
+ return dump_suspend_stats();
+}
+
+/*
+ * This tests powers on each non-lead core in sequence and
+ * suspends it to the deepest power level supported on the platform.
+ * It then waits for the core to power off. Each core in
+ * the non-lead cluster will bring the entire clust down when it
+ * powers off because it will be the only core active in the cluster.
+ * The lead core will also be suspended in a similar fashion.
+ */
+static test_result_t test_rt_instr_susp_serial(void)
+{
+ u_register_t lead_mpid, target_mpid;
+ int cpu_node, ret;
+
+ if (is_rt_instr_supported() == 0)
+ return TEST_RESULT_SKIPPED;
+
+ lead_mpid = read_mpidr_el1() & MPID_MASK;
+ participating_cpu_count = 1;
+ init_spinlock(&cpu_count_lock);
+ cpu_count = 0;
+
+ /* Suspend one core at a time. */
+ for_each_cpu(cpu_node) {
+ target_mpid = tftf_get_mpidr_from_node(cpu_node) & MPID_MASK;
+ if (lead_mpid == target_mpid)
+ continue;
+ ret = tftf_cpu_on(target_mpid,
+ (uintptr_t)suspend_core_entrypoint, 0);
+ if (ret != PSCI_E_SUCCESS) {
+ ERROR("CPU ON failed for 0x%llx\n",
+ (unsigned long long)target_mpid);
+ return TEST_RESULT_FAIL;
+ }
+ while (tftf_psci_affinity_info(target_mpid, MPIDR_AFFLVL0) !=
+ PSCI_STATE_OFF)
+ continue;
+ cpu_count--;
+ }
+
+ assert(cpu_count == 0);
+
+ /* Suspend lead core as well. */
+ if (suspend_core_entrypoint() != TEST_RESULT_SUCCESS)
+ return TEST_RESULT_FAIL;
+
+ cpu_count--;
+ assert(cpu_count == 0);
+
+ return dump_suspend_stats();
+}
+
+/*
+ * @Test_Aim@ CPU suspend to deepest power level on all cores in parallel.
+ *
+ * This test should exercise contention in TF as all the cores initiate
+ * a CPU suspend call in parallel.
+ *
+ * Expected figures on Juno r1 with big cluster as lead cluster
+ * with TFTF in release mode are as follows:
+ *
+ * CPU ENTRY EXIT CFLUSH OVERHEAD
+ * 0 27 us 20 us 05 us
+ * 1 114 us 86 us 05 us
+ * 2 202 us 58 us 05 us
+ * 3 375 us 29 us 94 us
+ * 4 20 us 22 us 06 us
+ * 5 290 us 18 us 206 us
+ */
+test_result_t test_rt_instr_susp_deep_parallel(void)
+{
+ target_pwrlvl = PLAT_MAX_PWR_LEVEL;
+ return test_rt_instr_susp_parallel();
+}
+
+/*
+ * @Test_Aim@ CPU suspend on all cores in parallel.
+ *
+ * Suspend all cores in parallel to target power level 0.
+ * Cache associated with power domain level 0 is flushed. For
+ * Juno, the L1 cache is flushed.
+ *
+ * Expected figures on Juno r1 with big cluster as lead cluster
+ * with TFTF in release mode are as follows:
+ *
+ * CPU ENTRY EXIT CFLUSH OVERHEAD
+ * 0 116 us 14 us 08 us
+ * 1 204 us 14 us 08 us
+ * 2 287 us 13 us 08 us
+ * 3 376 us 13 us 09 us
+ * 4 29 us 15 us 07 us
+ * 5 21 us 15 us 08 us
+ */
+test_result_t test_rt_instr_cpu_susp_parallel(void)
+{
+ target_pwrlvl = 0;
+ return test_rt_instr_susp_parallel();
+}
+
+/*
+ * @Test_Aim@ CPU suspend to deepest power level on all cores in sequence.
+ *
+ * Each core in the non-lead cluster brings down the entire cluster when
+ * it goes down.
+ *
+ * Expected figures on Juno r1 with big cluster as lead cluster
+ * with TFTF in release mode are as follows:
+ *
+ * CPU ENTRY EXIT CFLUSH OVERHEAD
+ * 0 114 us 20 us 94 us
+ * 1 114 us 20 us 94 us
+ * 2 114 us 20 us 94 us
+ * 3 114 us 20 us 94 us
+ * 4 195 us 22 us 180 us
+ * 5 21 us 17 us 06 us
+ */
+test_result_t test_rt_instr_susp_deep_serial(void)
+{
+ target_pwrlvl = PLAT_MAX_PWR_LEVEL;
+ return test_rt_instr_susp_serial();
+}
+
+/*
+ * @Test_Aim@ CPU suspend on all cores in sequence.
+ *
+ * Cache associated with level 0 power domain are flushed. For
+ * Juno, the L1 cache is flushed.
+ *
+ * Expected figures on Juno r1 with big cluster as lead cluster
+ * with TFTF in release mode are as follows:
+ *
+ * CPU ENTRY EXIT CFLUSH OVERHEAD
+ * 0 22 us 14 us 05 us
+ * 1 22 us 14 us 05 us
+ * 2 21 us 14 us 05 us
+ * 3 22 us 14 us 05 us
+ * 4 17 us 14 us 06 us
+ * 5 18 us 15 us 06 us
+ */
+test_result_t test_rt_instr_cpu_susp_serial(void)
+{
+ target_pwrlvl = 0;
+ return test_rt_instr_susp_serial();
+}
+
+/*
+ * @Test_Aim@ CPU off on all non-lead cores in sequence and
+ * suspend lead to deepest power level.
+ *
+ * The test sequence is as follows:
+ *
+ * 1) Turn on and turn off each non-lead core in sequence.
+ * 2) Program wake up timer and suspend the lead core to deepest power level.
+ * 3) Turn on each secondary core and get the timestamps from each core.
+ *
+ * All cores in the non-lead cluster bring the cluster
+ * down when they go down. Core 4 brings the big cluster down
+ * when it goes down.
+ *
+ * Expected figures on Juno r1 with big cluster as lead cluster
+ * with TFTF in release mode are as follows:
+ *
+ * CPU ENTRY EXIT CFLUSH OVERHEAD
+ * 0 110 us 28 us 93 us
+ * 1 110 us 28 us 93 us
+ * 2 110 us 28 us 93 us
+ * 3 111 us 28 us 93 us
+ * 4 195 us 22 us 181 us
+ * 5 20 us 23 us 06 us
+ */
+test_result_t test_rt_instr_cpu_off_serial(void)
+{
+ u_register_t lead_mpid, target_mpid;
+ int cpu_node, ret;
+
+ if (is_rt_instr_supported() == 0)
+ return TEST_RESULT_SKIPPED;
+
+ target_pwrlvl = PLAT_MAX_PWR_LEVEL;
+ lead_mpid = read_mpidr_el1() & MPID_MASK;
+ participating_cpu_count = 1;
+ init_spinlock(&cpu_count_lock);
+ cpu_count = 0;
+
+ /* Turn core on/off one at a time. */
+ for_each_cpu(cpu_node) {
+ target_mpid = tftf_get_mpidr_from_node(cpu_node);
+ if (lead_mpid == target_mpid)
+ continue;
+ ret = tftf_cpu_on(target_mpid,
+ (uintptr_t)dummy_entrypoint, 0);
+ if (ret != PSCI_E_SUCCESS) {
+ ERROR("CPU ON failed for 0x%llx\n",
+ (unsigned long long)target_mpid);
+ return TEST_RESULT_FAIL;
+ }
+ while (tftf_psci_affinity_info(target_mpid, MPIDR_AFFLVL0) !=
+ PSCI_STATE_OFF)
+ continue;
+ cpu_count--;
+ }
+
+ assert(cpu_count == 0);
+
+ /* Suspend lead core as well. */
+ if (suspend_core_entrypoint() != TEST_RESULT_SUCCESS)
+ return TEST_RESULT_FAIL;
+
+ cpu_count--;
+ assert(cpu_count == 0);
+
+ /* Turn core on one at a time and collect timestamps. */
+ for_each_cpu(cpu_node) {
+ target_mpid = tftf_get_mpidr_from_node(cpu_node);
+ if (lead_mpid == target_mpid)
+ continue;
+ ret = tftf_cpu_on(target_mpid,
+ (uintptr_t)collect_ts_entrypoint, 0);
+ if (ret != PSCI_E_SUCCESS) {
+ ERROR("CPU ON failed for 0x%llx\n",
+ (unsigned long long)target_mpid);
+ return TEST_RESULT_FAIL;
+ }
+ while (tftf_psci_affinity_info(target_mpid, MPIDR_AFFLVL0) !=
+ PSCI_STATE_OFF)
+ continue;
+ cpu_count--;
+ }
+
+ assert(cpu_count == 0);
+
+ return dump_suspend_stats();
+}
+
+/*
+ * @Test_Aim@ PSCI version call on all cores in parallel.
+ *
+ * Expected figures on Juno r1 with big cluster as lead cluster
+ * with TFTF in release mode are as follows:
+ *
+ * CPU TOTAL TIME
+ * 0 3020 ns
+ * 1 2940 ns
+ * 2 2980 ns
+ * 3 3060 ns
+ * 4 520 ns
+ * 5 720 ns
+ */
+test_result_t test_rt_instr_psci_version_parallel(void)
+{
+ u_register_t lead_mpid, target_mpid;
+ int cpu_node, ret;
+
+ if (is_rt_instr_supported() == 0)
+ return TEST_RESULT_SKIPPED;
+
+ lead_mpid = read_mpidr_el1() & MPID_MASK;
+ participating_cpu_count = tftf_get_total_cpus_count();
+ init_spinlock(&cpu_count_lock);
+ cpu_count = 0;
+
+ /* Power on all the non-lead cores. */
+ for_each_cpu(cpu_node) {
+ target_mpid = tftf_get_mpidr_from_node(cpu_node);
+ if (lead_mpid == target_mpid)
+ continue;
+ ret = tftf_cpu_on(target_mpid,
+ (uintptr_t)psci_version_entrypoint, 0);
+ if (ret != PSCI_E_SUCCESS) {
+ ERROR("CPU ON failed for 0x%llx\n",
+ (unsigned long long)target_mpid);
+ return TEST_RESULT_FAIL;
+ }
+ }
+
+ if (psci_version_entrypoint() != TEST_RESULT_SUCCESS)
+ return TEST_RESULT_FAIL;
+
+ /* Wait for the non-lead cores to power down. */
+ for_each_cpu(cpu_node) {
+ target_mpid = tftf_get_mpidr_from_node(cpu_node);
+ if (lead_mpid == target_mpid)
+ continue;
+ while (tftf_psci_affinity_info(target_mpid, MPIDR_AFFLVL0) !=
+ PSCI_STATE_OFF)
+ continue;
+ cpu_count--;
+ }
+
+ cpu_count--;
+ assert(cpu_count == 0);
+
+ return dump_psci_version_stats();
+}