summaryrefslogtreecommitdiff
path: root/big-little/secure_world/secure_resets.c
blob: c63d98db7712a4d7dd2a265891c08ab2be2e7ed6 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
/*
 * $Copyright:
 * ----------------------------------------------------------------
 * This confidential and proprietary software may be used only as
 * authorised by a licensing agreement from ARM Limited
 *  (C) COPYRIGHT 2008-2011 ARM Limited
 *       ALL RIGHTS RESERVED
 * The entire notice above must be reproduced on all authorised
 * copies and copies may only be made to the extent permitted
 * by a licensing agreement from ARM Limited.
 * ----------------------------------------------------------------
 * File:     secure_resets.c
 * ----------------------------------------------------------------
 * $
 */

#include "secure_world.h"
#include "events.h"
#include "bakery.h"

/* Bakery lock to serialize access to the tube. */
bakery_t lock_tube0 __attribute__ ((section("BL_SEC_DV_PAGE"))) = { 0 };

/*
 * Compile time switch to decided whether the outbound
 * L2 will be kept on always for inbound cache warming
 * or it will be flushed and reset after the BL context
 * has been picked up.
 */
static unsigned flush_ob_l2 = FLUSH_OB_L2;

/*
 * Variable in secure world to indicate the 
 * reset type i.e. cold (0) or warm reset (!0).
 */
unsigned ve_reset_type[NUM_CPUS];

/*
 * Allocate secure events in our device page
 */
unsigned event[MAX_CORES][MAX_SEC_EVENTS]
__attribute__ ((section("BL_SEC_DV_PAGE")));

/*
 * Normal spinlock to guard inbound cluster registers
 * in the KFSCB. It will always be used when the MMU 
 * is on. Each cluster will anyways use it sequentially.
 */
static unsigned lock_ib_kfscb;

/*
 * Bakery lock to guard outbound cluster registers in 
 * KFSCB. It will always be used when the MMU is off.
 * Each cluster will anyways use it sequentially
 */
static bakery_t lock_ob_kfscb __attribute__ ((section("BL_SEC_DV_PAGE"))) = { 0 };

/*
 * Small stacks for after we have turned our caches off.
 */
static unsigned long long powerdown_stacks[NUM_CPUS][32]
__attribute__ ((section("BL_SEC_DV_PAGE")));

#if SYSBENCH
void setup_reset_handler(unsigned handler)
{
        unsigned *warm_reset_vector = 0x0;
        unsigned first_cpu = find_first_cpu();
        unsigned cpu_id = read_cpuid();

        /*
         * First cpu waits for all others to finish execution
         * in SMC memory before editing the reset vector.
         */
        if (first_cpu == cpu_id) {
                wait_for_events(SETUP_RST);
                *warm_reset_vector = handler;
                dsb();
                cln_dcache_mva_poc(warm_reset_vector);
        } else {
                set_event(SETUP_RST, cpu_id);
        }

        return;
}
#endif

unsigned long long *get_powerdown_stack(unsigned cpu_id)
{
        return &powerdown_stacks[cpu_id + 1][0];
}

unsigned get_inbound()
{
        return !read_clusterid();
}

/*
 * Simple function which will bring our corresponding core out of reset
 */
void powerup_ib_core(unsigned cluster_id, unsigned cpu_id)
{
        unsigned rst_stat_reg = 0x0;
        unsigned cpu_mask = 0x0;

        if (ve_reset_type[cpu_id]) {

                if (flush_ob_l2) {
#if FLUSH_L2_FIX
                        set_event(FLUSH_L2, cpu_id);
#endif
                }

                /*
                 * The outbound cluster's last cpu send an event
                 * indicating that its finished the last switchover.
                 * Wait for it before bringing it's cores out of 
                 * reset.
                 */
                wait_for_event(OB_SHUTDOWN, cpu_id);
                reset_event(OB_SHUTDOWN, cpu_id);
        } else {
                /* Bump the warm reset count */
                ve_reset_type[cpu_id]++;
                cln_dcache_mva_poc(&ve_reset_type[cpu_id]);
        }


        write_trace(&lock_tube0, SEC_TUBE0, "Powerup Inbound", read_cntpct(), 0x0, 0x0);

        spin_lock(&lock_ib_kfscb);
        rst_stat_reg = read32(KFSCB_BASE + RST_STAT0 + (cluster_id << 2));
        cpu_mask = 1 << 8 | (1 << 4) << cpu_id | 1 << cpu_id;
        rst_stat_reg &= ~cpu_mask;
        write32(KFSCB_BASE + RST_HOLD0 + (cluster_id << 2), rst_stat_reg);
        spin_unlock(&lock_ib_kfscb);

        return;
}

/*
 * Simple function to place a core in the outbound cluster
 * in reset.
 */
void powerdown_ob_core(unsigned cluster_id, unsigned cpu_id)
{
        unsigned val = 0x0;
        unsigned mask = 0x0;

        get_bakery_spinlock(cpu_id, &lock_ob_kfscb);

        val = read32(KFSCB_BASE + RST_HOLD0 + (cluster_id << 2));
        mask = (1 << cpu_id) << 4;
        val |= mask;
        write32(KFSCB_BASE + RST_HOLD0 + (cluster_id << 2), val);

        release_bakery_spinlock(cpu_id, &lock_ob_kfscb);

        return;
}

/*
 * Simple function to the outbound cluster in reset.
 */
void powerdown_ob_cluster(unsigned cluster_id, unsigned cpu_id)
{
        unsigned val = 0x0;
        unsigned mask = 0x0;

        get_bakery_spinlock(cpu_id, &lock_ob_kfscb);

        val = read32(KFSCB_BASE + RST_HOLD0 + (cluster_id << 2));
        mask = 1 << 8;
        val |= mask;
        write32(KFSCB_BASE + RST_HOLD0 + (cluster_id << 2), val);

        release_bakery_spinlock(cpu_id, &lock_ob_kfscb);

        return;
}

/*
 * Do not use this function for Read-Modify-Write of KFSCB registers
 * as it does not hold a lock.
 */
unsigned reset_status(unsigned cluster_id, unsigned rst_level,
                      unsigned cpu_mask)
{
        unsigned rst_stat_reg = 0x0;

        rst_stat_reg = read32(KFSCB_BASE + RST_STAT0 + (cluster_id << 2));

        switch (rst_level) {
        case CLUSTER_RESET:
                return rst_stat_reg >> 8;
        case CORE_PORESET:
                return ((rst_stat_reg >> 4) & 0xf) & cpu_mask;
        case CORE_RESET:
                return (rst_stat_reg & 0xf) & cpu_mask;
        default:
                return 0;
        }
}

void powerdown_cluster(void)
{
        unsigned cpu_id = read_cpuid();
        unsigned cluster_id = read_clusterid();
        unsigned secondary_mask = 0x0;
        unsigned first_cpu = find_first_cpu();

        /*
         * Brute force way of cleaning the L1 and L2 caches of the outbound cluster.
         * All cpus flush their L1 caches. The 'first_cpu' waits for the others to
         * finish this operation before flushing the L2
         */
        write_trace(&lock_tube0, SEC_TUBE0, "L1 Flush Begin", read_cntpct(), 0x0, 0x0);
        write_sctlr(read_sctlr() & ~CR_C & ~CR_M);
        inv_icache_all();
        cache_maint_op(L1, CLN_INV);
        disable_coherency();
        write_trace(&lock_tube0, SEC_TUBE0, "L1 Flush End", read_cntpct(), 0x0, 0x0);
        set_event(SEC_L1_DONE, cpu_id);

        if (cpu_id == first_cpu) {

                wait_for_events(SEC_L1_DONE);

                if (flush_ob_l2) {                      
#if FLUSH_L2_FIX
                        wait_for_event(FLUSH_L2, cpu_id);
                        reset_event(FLUSH_L2, cpu_id);
#endif
                        write_trace(&lock_tube0, SEC_TUBE0, "L2 Flush Begin", read_cntpct(), 0x0, 0x0);
                        cache_maint_op(L2, CLN_INV);
                        write_trace(&lock_tube0, SEC_TUBE0, "L2 Flush End", read_cntpct(), 0x0, 0x0);

                        /* Turn off CCI snoops & DVM messages */
                        if (cluster_id)
                                write32(A7_SL_IFACE_BASE + SNOOP_CTLR_REG, 0x0);
                        else
                                write32(A15_SL_IFACE_BASE + SNOOP_CTLR_REG, 0x0);
                        
                        dsb();
                        
                        /* Wait for the dust to settle down */
                        while (read32(CCI_BASE + STATUS_REG) & 0x1) ;
                }

                /********************* RESET HANDLING **************************************
                 * Secondaries place themselves in reset while the 'first_cpu' waits for 
                 * them to do so.
                 ***************************************************************************/

                /*
                 * Read the L2 control to get the number of secondary
                 * cores present on this cluster. Shift mask by one to
                 * get correct mask which includes the primary
                 */
                secondary_mask = (1 << num_secondaries()) - 1;
                secondary_mask <<= 1;

                /* Wait for other cpus to enter reset */
                while (secondary_mask !=
                       reset_status(cluster_id, CORE_PORESET, secondary_mask)) ;

                if (flush_ob_l2)
                        powerdown_ob_cluster(cluster_id, cpu_id);
                else
                        powerdown_ob_core(cluster_id, cpu_id);

                set_events(OB_SHUTDOWN);

        } else {
                powerdown_ob_core(cluster_id, cpu_id);
        }

        write_trace(&lock_tube0, SEC_TUBE0, "Reset Initiated", read_cntpct(), 0x0, 0x0);
        return;
}