summaryrefslogtreecommitdiff
path: root/lib/power_management/hotplug
diff options
context:
space:
mode:
authorVikram Kanigiri <vikram.kanigiri@arm.com>2014-12-16 01:16:47 +0000
committerSandrine Bailleux <sandrine.bailleux@arm.com>2015-01-13 11:45:43 +0000
commitd3665b2bc52ac8c433773b7472fb9bdc5af7ad6f (patch)
treebaf70194317aebd0b973c11e3151059cf89243da /lib/power_management/hotplug
parent3f0d55bd300dfc83073a715111bdac00e5d4c149 (diff)
Provide finer details of CPU states
The framework currently knows only 2 states w.r.t a CPU, either ON or OFF. As soon as a PSCI CPU OFF request is sent we consider the CPU as off, By knowing the CPU as OFF other CPU's can do a CPU ON call for it before the EL3 PSCI s/w handles the OFF request which leads to return of error code. To understand the correct state of a CPU, This patch differentiates each CPU affinity state into ON, ON_PENDING, OFF. It also fixes the race condition in CPU_ON where a CPU_ON call is sent to a CPU before the EL3 PSCI software handles the CPU_OFF call for it. Change-Id: I0dd2b96043d47e8451f950499a8d6cfc94aac38d
Diffstat (limited to 'lib/power_management/hotplug')
-rw-r--r--lib/power_management/hotplug/hotplug.c73
1 files changed, 51 insertions, 22 deletions
diff --git a/lib/power_management/hotplug/hotplug.c b/lib/power_management/hotplug/hotplug.c
index 1a97bd5..430764b 100644
--- a/lib/power_management/hotplug/hotplug.c
+++ b/lib/power_management/hotplug/hotplug.c
@@ -33,6 +33,7 @@
#include <assert.h>
#include <cdefs.h> /* For __dead2 */
#include <debug.h>
+#include <events.h>
#include <gic_v2.h>
#include <irq.h>
#include <platform.h>
@@ -45,14 +46,15 @@
#include <tftf.h>
/*
- * Bitmap of online CPUs.
- * - Set bit i to mark CPU i as online.
- * - Clear bit i to mark CPU i as offline.
+ * Affinity info map of CPUs as seen by TFTF
+ * - Set cpus_status_map[i].state to TFTF_AFFINITY_STATE_ON to mark CPU i
+ * as ON.
+ * - Set cpus_status_map[i].state to TFTF_AFFINITY_STATE_ON_PENDING to mark
+ * CPU i as ON_PENDING.
+ * - Set cpus_status_map[i].state to TFTF_AFFINITY_STATE_OFF to mark CPU i
+ * as OFF.
*/
-static unsigned int cpus_online_map;
-
-/* Lock to prevent concurrent accesses to the bitmap of online CPUs */
-static spinlock_t cpus_online_map_lock;
+static tftf_cpu_state_t cpus_status_map[PLATFORM_CORE_COUNT];
/*
* Reference count keeping track of the number of CPUs participating in
@@ -66,6 +68,7 @@ static spinlock_t ref_cnt_lock;
/* Per-cpu test entrypoint */
uintptr_t test_entrypoint[PLATFORM_CORE_COUNT];
+static event_t cpu_is_online[PLATFORM_CORE_COUNT];
unsigned int tftf_inc_ref_cnt(void)
{
@@ -105,10 +108,13 @@ void tftf_set_cpu_online(void)
unsigned int mpid = read_mpidr_el1();
unsigned int core_pos = platform_get_core_pos(mpid);
- spin_lock(&cpus_online_map_lock);
- assert(!tftf_is_cpu_online(core_pos));
- cpus_online_map |= 1 << core_pos;
- spin_unlock(&cpus_online_map_lock);
+ spin_lock(&cpus_status_map[core_pos].lock);
+ assert(!tftf_is_cpu_online(mpid));
+ cpus_status_map[core_pos].state = TFTF_AFFINITY_STATE_ON;
+
+ /* Send an event to confirm the CPU is now seen as online */
+ tftf_send_event(&cpu_is_online[core_pos]);
+ spin_unlock(&cpus_status_map[core_pos].lock);
}
void tftf_set_cpu_offline(void)
@@ -116,15 +122,16 @@ void tftf_set_cpu_offline(void)
unsigned int mpid = read_mpidr_el1();
unsigned int core_pos = platform_get_core_pos(mpid);
- spin_lock(&cpus_online_map_lock);
- assert(tftf_is_cpu_online(core_pos));
- cpus_online_map &= ~(1 << core_pos);
- spin_unlock(&cpus_online_map_lock);
+ spin_lock(&cpus_status_map[core_pos].lock);
+ assert(tftf_is_cpu_online(mpid));
+ cpus_status_map[core_pos].state = TFTF_AFFINITY_STATE_OFF;
+ spin_unlock(&cpus_status_map[core_pos].lock);
}
-unsigned int tftf_is_cpu_online(unsigned int core_pos)
+unsigned int tftf_is_cpu_online(unsigned int mpid)
{
- return (ACCESS(cpus_online_map) & (1 << core_pos)) != 0;
+ unsigned int core_pos = platform_get_core_pos(mpid);
+ return ACCESS(cpus_status_map[core_pos].state) == TFTF_AFFINITY_STATE_ON;
}
int32_t tftf_cpu_on(uint64_t target_cpu,
@@ -132,25 +139,47 @@ int32_t tftf_cpu_on(uint64_t target_cpu,
uint64_t context_id)
{
int32_t ret;
+ tftf_affinity_info_t cpu_state;
unsigned int core_pos = platform_get_core_pos(target_cpu);
+ spin_lock(&cpus_status_map[core_pos].lock);
+ cpu_state = cpus_status_map[core_pos].state;
+ if (cpu_state == TFTF_AFFINITY_STATE_ON) {
+ spin_unlock(&cpus_status_map[core_pos].lock);
+ return PSCI_E_ALREADY_ON;
+ }
+
+ if (cpu_state == TFTF_AFFINITY_STATE_ON_PENDING) {
+ spin_unlock(&cpus_status_map[core_pos].lock);
+ /* CPU was successfully started earlier, wait for it to come online */
+ tftf_wait_for_event(&cpu_is_online[core_pos]);
+ return PSCI_E_SUCCESS;
+ }
+
/*
* Populate the test entry point for this core.
* This is the address where the core will jump to once the framework
* has finished initialising it.
*/
test_entrypoint[core_pos] = entrypoint;
- ret = tftf_psci_cpu_on(target_cpu,
+
+ assert(cpu_state == TFTF_AFFINITY_STATE_OFF);
+ do {
+ ret = tftf_psci_cpu_on(target_cpu,
(uint64_t) tftf_hotplug_entry,
context_id);
+ /* Check if multiple CPU_ON calls are done for same CPU */
+ assert(ret != PSCI_E_ON_PENDING);
+ } while (ret == PSCI_E_ALREADY_ON);
+
if (ret == PSCI_E_SUCCESS) {
+ cpus_status_map[core_pos].state = TFTF_AFFINITY_STATE_ON_PENDING;
+ spin_unlock(&cpus_status_map[core_pos].lock);
/* CPU was successfully started, wait for it to come online */
- /* TODO: Implement event-based loop rather than busy loop */
- /* TODO: Implement time out */
- while (!tftf_is_cpu_online(core_pos))
- continue;
+ tftf_wait_for_event(&cpu_is_online[core_pos]);
} else {
+ spin_unlock(&cpus_status_map[core_pos].lock);
mp_printf("Failed to boot CPU 0x%lx (%d)\n", target_cpu, ret);
}