aboutsummaryrefslogtreecommitdiff
path: root/arch/microblaze/kernel/entry-nommu.S
blob: 70da83a49670539829365a7012395de497a348be (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
/*
 * Copyright (C) 2007-2009 Michal Simek <monstr@monstr.eu>
 * Copyright (C) 2007-2009 PetaLogix
 * Copyright (C) 2006 Atmark Techno, Inc.
 *
 * This file is subject to the terms and conditions of the GNU General Public
 * License. See the file "COPYING" in the main directory of this archive
 * for more details.
 */

#include <linux/linkage.h>
#include <asm/thread_info.h>
#include <linux/errno.h>
#include <asm/entry.h>
#include <asm/asm-offsets.h>
#include <asm/registers.h>
#include <asm/unistd.h>
#include <asm/percpu.h>
#include <asm/signal.h>

#if CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR
	.macro	disable_irq
	msrclr r0, MSR_IE
	.endm

	.macro	enable_irq
	msrset r0, MSR_IE
	.endm

	.macro	clear_bip
	msrclr r0, MSR_BIP
	.endm
#else
	.macro	disable_irq
	mfs r11, rmsr
	andi r11, r11, ~MSR_IE
	mts rmsr, r11
	.endm

	.macro	enable_irq
	mfs r11, rmsr
	ori r11, r11, MSR_IE
	mts rmsr, r11
	.endm

	.macro	clear_bip
	mfs r11, rmsr
	andi r11, r11, ~MSR_BIP
	mts rmsr, r11
	.endm
#endif

ENTRY(_interrupt)
	swi	r1, r0, PER_CPU(ENTRY_SP)	/* save the current sp */
	swi	r11, r0, PER_CPU(R11_SAVE)	/* temporarily save r11 */
	lwi	r11, r0, PER_CPU(KM)		/* load mode indicator */
	beqid	r11, 1f
	nop
	brid	2f				/* jump over */
	addik	r1, r1, (-PT_SIZE)	/* room for pt_regs (delay slot) */
1:						/* switch to kernel stack */
	lwi	r1, r0, PER_CPU(CURRENT_SAVE)	/* get the saved current */
	lwi	r1, r1, TS_THREAD_INFO		/* get the thread info */
	/* calculate kernel stack pointer */
	addik	r1, r1, THREAD_SIZE - PT_SIZE
2:
	swi	r11, r1, PT_MODE		/* store the mode */
	lwi	r11, r0, PER_CPU(R11_SAVE)	/* reload r11 */
	swi	r2, r1, PT_R2
	swi	r3, r1, PT_R3
	swi	r4, r1, PT_R4
	swi	r5, r1, PT_R5
	swi	r6, r1, PT_R6
	swi	r7, r1, PT_R7
	swi	r8, r1, PT_R8
	swi	r9, r1, PT_R9
	swi	r10, r1, PT_R10
	swi	r11, r1, PT_R11
	swi	r12, r1, PT_R12
	swi	r13, r1, PT_R13
	swi	r14, r1, PT_R14
	swi	r14, r1, PT_PC
	swi	r15, r1, PT_R15
	swi	r16, r1, PT_R16
	swi	r17, r1, PT_R17
	swi	r18, r1, PT_R18
	swi	r19, r1, PT_R19
	swi	r20, r1, PT_R20
	swi	r21, r1, PT_R21
	swi	r22, r1, PT_R22
	swi	r23, r1, PT_R23
	swi	r24, r1, PT_R24
	swi	r25, r1, PT_R25
	swi	r26, r1, PT_R26
	swi	r27, r1, PT_R27
	swi	r28, r1, PT_R28
	swi	r29, r1, PT_R29
	swi	r30, r1, PT_R30
	swi	r31, r1, PT_R31
	/* special purpose registers */
	mfs	r11, rmsr
	swi	r11, r1, PT_MSR
	mfs	r11, rear
	swi	r11, r1, PT_EAR
	mfs	r11, resr
	swi	r11, r1, PT_ESR
	mfs	r11, rfsr
	swi	r11, r1, PT_FSR
	/* reload original stack pointer and save it */
	lwi	r11, r0, PER_CPU(ENTRY_SP)
	swi	r11, r1, PT_R1
	/* update mode indicator we are in kernel mode */
	addik	r11, r0, 1
	swi	r11, r0, PER_CPU(KM)
	/* restore r31 */
	lwi	r31, r0, PER_CPU(CURRENT_SAVE)
	/* prepare the link register, the argument and jump */
	addik	r15, r0, ret_from_intr - 8
	addk	r6, r0, r15
	braid	do_IRQ
	add	r5, r0, r1

ret_from_intr:
	lwi	r11, r1, PT_MODE
	bneid	r11, no_intr_resched

	lwi	r6, r31, TS_THREAD_INFO	/* get thread info */
	lwi	r19, r6, TI_FLAGS	/* get flags in thread info */
				/* do an extra work if any bits are set */

	andi	r11, r19, _TIF_NEED_RESCHED
	beqi	r11, 1f
	bralid	r15, schedule
	nop
1:	andi	r11, r19, _TIF_SIGPENDING | _TIF_NOTIFY_RESUME
	beqid	r11, no_intr_resched
	addk	r5, r1, r0
	bralid	r15, do_notify_resume
	addk	r6, r0, r0

no_intr_resched:
	/* Disable interrupts, we are now committed to the state restore */
	disable_irq

	/* save mode indicator */
	lwi	r11, r1, PT_MODE
	swi	r11, r0, PER_CPU(KM)

	/* save r31 */
	swi	r31, r0, PER_CPU(CURRENT_SAVE)
restore_context:
	/* special purpose registers */
	lwi	r11, r1, PT_FSR
	mts	rfsr, r11
	lwi	r11, r1, PT_ESR
	mts	resr, r11
	lwi	r11, r1, PT_EAR
	mts	rear, r11
	lwi	r11, r1, PT_MSR
	mts	rmsr, r11

	lwi	r31, r1, PT_R31
	lwi	r30, r1, PT_R30
	lwi	r29, r1, PT_R29
	lwi	r28, r1, PT_R28
	lwi	r27, r1, PT_R27
	lwi	r26, r1, PT_R26
	lwi	r25, r1, PT_R25
	lwi	r24, r1, PT_R24
	lwi	r23, r1, PT_R23
	lwi	r22, r1, PT_R22
	lwi	r21, r1, PT_R21
	lwi	r20, r1, PT_R20
	lwi	r19, r1, PT_R19
	lwi	r18, r1, PT_R18
	lwi	r17, r1, PT_R17
	lwi	r16, r1, PT_R16
	lwi	r15, r1, PT_R15
	lwi	r14, r1, PT_PC
	lwi	r13, r1, PT_R13
	lwi	r12, r1, PT_R12
	lwi	r11, r1, PT_R11
	lwi	r10, r1, PT_R10
	lwi	r9, r1, PT_R9
	lwi	r8, r1, PT_R8
	lwi	r7, r1, PT_R7
	lwi	r6, r1, PT_R6
	lwi	r5, r1, PT_R5
	lwi	r4, r1, PT_R4
	lwi	r3, r1, PT_R3
	lwi	r2, r1, PT_R2
	lwi	r1, r1, PT_R1
	rtid	r14, 0
	nop

ENTRY(_reset)
	brai	0;

ENTRY(_user_exception)
	swi	r1, r0, PER_CPU(ENTRY_SP)	/* save the current sp */
	swi	r11, r0, PER_CPU(R11_SAVE)	/* temporarily save r11 */
	lwi	r11, r0, PER_CPU(KM)		/* load mode indicator */
	beqid	r11, 1f				/* Already in kernel mode? */
	nop
	brid	2f				/* jump over */
	addik	r1, r1, (-PT_SIZE)	/* Room for pt_regs (delay slot) */
1:						/* Switch to kernel stack */
	lwi	r1, r0, PER_CPU(CURRENT_SAVE)	/* get the saved current */
	lwi	r1, r1, TS_THREAD_INFO		/* get the thread info */
	/* calculate kernel stack pointer */
	addik	r1, r1, THREAD_SIZE - PT_SIZE
2:
	swi	r11, r1, PT_MODE		/* store the mode */
	lwi	r11, r0, PER_CPU(R11_SAVE)	/* reload r11 */
	/* save them on stack */
	swi	r2, r1, PT_R2
	swi	r3, r1, PT_R3 /* r3: _always_ in clobber list; see unistd.h */
	swi	r4, r1, PT_R4 /* r4: _always_ in clobber list; see unistd.h */
	swi	r5, r1, PT_R5
	swi	r6, r1, PT_R6
	swi	r7, r1, PT_R7
	swi	r8, r1, PT_R8
	swi	r9, r1, PT_R9
	swi	r10, r1, PT_R10
	swi	r11, r1, PT_R11
	/* r12: _always_ in clobber list; see unistd.h */
	swi	r12, r1, PT_R12
	swi	r13, r1, PT_R13
	/* r14: _always_ in clobber list; see unistd.h */
	swi	r14, r1, PT_R14
	/* but we want to return to the next inst. */
	addik	r14, r14, 0x4
	swi	r14, r1, PT_PC		/* increment by 4 and store in pc */
	swi	r15, r1, PT_R15
	swi	r16, r1, PT_R16
	swi	r17, r1, PT_R17
	swi	r18, r1, PT_R18
	swi	r19, r1, PT_R19
	swi	r20, r1, PT_R20
	swi	r21, r1, PT_R21
	swi	r22, r1, PT_R22
	swi	r23, r1, PT_R23
	swi	r24, r1, PT_R24
	swi	r25, r1, PT_R25
	swi	r26, r1, PT_R26
	swi	r27, r1, PT_R27
	swi	r28, r1, PT_R28
	swi	r29, r1, PT_R29
	swi	r30, r1, PT_R30
	swi	r31, r1, PT_R31

	disable_irq
	nop		/* make sure IE bit is in effect */
	clear_bip	/* once IE is in effect it is safe to clear BIP */
	nop

	/* special purpose registers */
	mfs	r11, rmsr
	swi	r11, r1, PT_MSR
	mfs	r11, rear
	swi	r11, r1, PT_EAR
	mfs	r11, resr
	swi	r11, r1, PT_ESR
	mfs	r11, rfsr
	swi	r11, r1, PT_FSR
	/* reload original stack pointer and save it */
	lwi	r11, r0, PER_CPU(ENTRY_SP)
	swi	r11, r1, PT_R1
	/* update mode indicator we are in kernel mode */
	addik	r11, r0, 1
	swi	r11, r0, PER_CPU(KM)
	/* restore r31 */
	lwi	r31, r0, PER_CPU(CURRENT_SAVE)
	/* re-enable interrupts now we are in kernel mode */
	enable_irq

	/* See if the system call number is valid. */
	addi	r11, r12, -__NR_syscalls
	bgei	r11, 1f			/* return to user if not valid */
	/* Figure out which function to use for this system call. */
	/* Note Microblaze barrel shift is optional, so don't rely on it */
	add	r12, r12, r12			/* convert num -> ptr */
	add	r12, r12, r12
	lwi	r12, r12, sys_call_table	/* Get function pointer */
	addik	r15, r0, ret_to_user-8		/* set return address */
	bra	r12				/* Make the system call. */
	bri	0				/* won't reach here */
1:
	brid	ret_to_user			/* jump to syscall epilogue */
	addi	r3, r0, -ENOSYS			/* set errno in delay slot */

/*
 * Debug traps are like a system call, but entered via brki r14, 0x60
 * All we need to do is send the SIGTRAP signal to current, ptrace and
 * do_notify_resume will handle the rest
 */
ENTRY(_debug_exception)
	swi	r1, r0, PER_CPU(ENTRY_SP)	/* save the current sp */
	lwi	r1, r0, PER_CPU(CURRENT_SAVE)	/* get the saved current */
	lwi	r1, r1, TS_THREAD_INFO		/* get the thread info */
	addik	r1, r1, THREAD_SIZE - PT_SIZE	/* get the kernel stack */
	swi	r11, r0, PER_CPU(R11_SAVE)	/* temporarily save r11 */
	lwi	r11, r0, PER_CPU(KM)		/* load mode indicator */
//save_context:
	swi	r11, r1, PT_MODE	/* store the mode */
	lwi	r11, r0, PER_CPU(R11_SAVE)	/* reload r11 */
	/* save them on stack */
	swi	r2, r1, PT_R2
	swi	r3, r1, PT_R3 /* r3: _always_ in clobber list; see unistd.h */
	swi	r4, r1, PT_R4 /* r4: _always_ in clobber list; see unistd.h */
	swi	r5, r1, PT_R5
	swi	r6, r1, PT_R6
	swi	r7, r1, PT_R7
	swi	r8, r1, PT_R8
	swi	r9, r1, PT_R9
	swi	r10, r1, PT_R10
	swi	r11, r1, PT_R11
	/* r12: _always_ in clobber list; see unistd.h */
	swi	r12, r1, PT_R12
	swi	r13, r1, PT_R13
	/* r14: _always_ in clobber list; see unistd.h */
	swi	r14, r1, PT_R14
	swi	r14, r1, PT_PC /* Will return to interrupted instruction */
	swi	r15, r1, PT_R15
	swi	r16, r1, PT_R16
	swi	r17, r1, PT_R17
	swi	r18, r1, PT_R18
	swi	r19, r1, PT_R19
	swi	r20, r1, PT_R20
	swi	r21, r1, PT_R21
	swi	r22, r1, PT_R22
	swi	r23, r1, PT_R23
	swi	r24, r1, PT_R24
	swi	r25, r1, PT_R25
	swi	r26, r1, PT_R26
	swi	r27, r1, PT_R27
	swi	r28, r1, PT_R28
	swi	r29, r1, PT_R29
	swi	r30, r1, PT_R30
	swi	r31, r1, PT_R31

	disable_irq
	nop		/* make sure IE bit is in effect */
	clear_bip	/* once IE is in effect it is safe to clear BIP */
	nop

	/* special purpose registers */
	mfs	r11, rmsr
	swi	r11, r1, PT_MSR
	mfs	r11, rear
	swi	r11, r1, PT_EAR
	mfs	r11, resr
	swi	r11, r1, PT_ESR
	mfs	r11, rfsr
	swi	r11, r1, PT_FSR
	/* reload original stack pointer and save it */
	lwi	r11, r0, PER_CPU(ENTRY_SP)
	swi	r11, r1, PT_R1
	/* update mode indicator we are in kernel mode */
	addik	r11, r0, 1
	swi	r11, r0, PER_CPU(KM)
	/* restore r31 */
	lwi	r31, r0, PER_CPU(CURRENT_SAVE)
	/* re-enable interrupts now we are in kernel mode */
	enable_irq

	addi	r5, r0, SIGTRAP			/* sending the trap signal */
	add	r6, r0, r31			/* to current */
	bralid	r15, send_sig
	add	r7, r0, r0			/* 3rd param zero */

	/* Restore r3/r4 to work around how ret_to_user works */
	lwi	r3, r1, PT_R3
	lwi	r4, r1, PT_R4
	bri	ret_to_user

ENTRY(_break)
	bri	0

/* struct task_struct *_switch_to(struct thread_info *prev,
					struct thread_info *next); */
ENTRY(_switch_to)
	/* prepare return value */
	addk	r3, r0, r31

	/* save registers in cpu_context */
	/* use r11 and r12, volatile registers, as temp register */
	addik	r11, r5, TI_CPU_CONTEXT
	swi	r1, r11, CC_R1
	swi	r2, r11, CC_R2
	/* skip volatile registers.
	 * they are saved on stack when we jumped to _switch_to() */
	/* dedicated registers */
	swi	r13, r11, CC_R13
	swi	r14, r11, CC_R14
	swi	r15, r11, CC_R15
	swi	r16, r11, CC_R16
	swi	r17, r11, CC_R17
	swi	r18, r11, CC_R18
	/* save non-volatile registers */
	swi	r19, r11, CC_R19
	swi	r20, r11, CC_R20
	swi	r21, r11, CC_R21
	swi	r22, r11, CC_R22
	swi	r23, r11, CC_R23
	swi	r24, r11, CC_R24
	swi	r25, r11, CC_R25
	swi	r26, r11, CC_R26
	swi	r27, r11, CC_R27
	swi	r28, r11, CC_R28
	swi	r29, r11, CC_R29
	swi	r30, r11, CC_R30
	/* special purpose registers */
	mfs	r12, rmsr
	swi	r12, r11, CC_MSR
	mfs	r12, rear
	swi	r12, r11, CC_EAR
	mfs	r12, resr
	swi	r12, r11, CC_ESR
	mfs	r12, rfsr
	swi	r12, r11, CC_FSR

	/* update r31, the current */
	lwi	r31, r6, TI_TASK
	swi	r31, r0, PER_CPU(CURRENT_SAVE)

	/* get new process' cpu context and restore */
	addik	r11, r6, TI_CPU_CONTEXT

	/* special purpose registers */
	lwi	r12, r11, CC_FSR
	mts	rfsr, r12
	lwi	r12, r11, CC_ESR
	mts	resr, r12
	lwi	r12, r11, CC_EAR
	mts	rear, r12
	lwi	r12, r11, CC_MSR
	mts	rmsr, r12
	/* non-volatile registers */
	lwi	r30, r11, CC_R30
	lwi	r29, r11, CC_R29
	lwi	r28, r11, CC_R28
	lwi	r27, r11, CC_R27
	lwi	r26, r11, CC_R26
	lwi	r25, r11, CC_R25
	lwi	r24, r11, CC_R24
	lwi	r23, r11, CC_R23
	lwi	r22, r11, CC_R22
	lwi	r21, r11, CC_R21
	lwi	r20, r11, CC_R20
	lwi	r19, r11, CC_R19
	/* dedicated registers */
	lwi	r18, r11, CC_R18
	lwi	r17, r11, CC_R17
	lwi	r16, r11, CC_R16
	lwi	r15, r11, CC_R15
	lwi	r14, r11, CC_R14
	lwi	r13, r11, CC_R13
	/* skip volatile registers */
	lwi	r2, r11, CC_R2
	lwi	r1, r11, CC_R1

	rtsd	r15, 8
	nop

ENTRY(ret_from_fork)
	addk	r5, r0, r3
	brlid	r15, schedule_tail
	nop
	swi	r31, r1, PT_R31		/* save r31 in user context. */
			/* will soon be restored to r31 in ret_to_user */
	addk	r3, r0, r0
	brid	ret_to_user
	nop

ENTRY(ret_from_kernel_thread)
	brlid	r15, schedule_tail
	addk	r5, r0, r3
	brald	r15, r20
	addk	r5, r0, r19
	brid	ret_to_user
	addk	r3, r0, r0

work_pending:
	enable_irq

	andi	r11, r19, _TIF_NEED_RESCHED
	beqi	r11, 1f
	bralid	r15, schedule
	nop
1:	andi	r11, r19, _TIF_SIGPENDING | _TIF_NOTIFY_RESUME
	beqi	r11, no_work_pending
	addk	r5, r1, r0
	bralid	r15, do_notify_resume
	addik	r6, r0, 1
	bri	no_work_pending

ENTRY(ret_to_user)
	disable_irq

	swi	r4, r1, PT_R4		/* return val */
	swi	r3, r1, PT_R3		/* return val */

	lwi	r6, r31, TS_THREAD_INFO /* get thread info */
	lwi	r19, r6, TI_FLAGS /* get flags in thread info */
	bnei	r19, work_pending /* do an extra work if any bits are set */
no_work_pending:
	disable_irq

	/* save r31 */
	swi	r31, r0, PER_CPU(CURRENT_SAVE)
	/* save mode indicator */
	lwi	r18, r1, PT_MODE
	swi	r18, r0, PER_CPU(KM)
//restore_context:
	/* special purpose registers */
	lwi	r18, r1, PT_FSR
	mts	rfsr, r18
	lwi	r18, r1, PT_ESR
	mts	resr, r18
	lwi	r18, r1, PT_EAR
	mts	rear, r18
	lwi	r18, r1, PT_MSR
	mts	rmsr, r18

	lwi	r31, r1, PT_R31
	lwi	r30, r1, PT_R30
	lwi	r29, r1, PT_R29
	lwi	r28, r1, PT_R28
	lwi	r27, r1, PT_R27
	lwi	r26, r1, PT_R26
	lwi	r25, r1, PT_R25
	lwi	r24, r1, PT_R24
	lwi	r23, r1, PT_R23
	lwi	r22, r1, PT_R22
	lwi	r21, r1, PT_R21
	lwi	r20, r1, PT_R20
	lwi	r19, r1, PT_R19
	lwi	r18, r1, PT_R18
	lwi	r17, r1, PT_R17
	lwi	r16, r1, PT_R16
	lwi	r15, r1, PT_R15
	lwi	r14, r1, PT_PC
	lwi	r13, r1, PT_R13
	lwi	r12, r1, PT_R12
	lwi	r11, r1, PT_R11
	lwi	r10, r1, PT_R10
	lwi	r9, r1, PT_R9
	lwi	r8, r1, PT_R8
	lwi	r7, r1, PT_R7
	lwi	r6, r1, PT_R6
	lwi	r5, r1, PT_R5
	lwi	r4, r1, PT_R4		/* return val */
	lwi	r3, r1, PT_R3		/* return val */
	lwi	r2, r1, PT_R2
	lwi	r1, r1, PT_R1

	rtid	r14, 0
	nop

sys_rt_sigreturn_wrapper:
	brid	sys_rt_sigreturn
	addk	r5, r1, r0

	/* Interrupt vector table */
	.section	.init.ivt, "ax"
	.org 0x0
	brai	_reset
	brai	_user_exception
	brai	_interrupt
	brai	_break
	brai	_hw_exception_handler
	.org 0x60
	brai	_debug_exception

.section .rodata,"a"
#include "syscall_table.S"

syscall_table_size=(.-sys_call_table)

type_SYSCALL:
	.ascii "SYSCALL\0"
type_IRQ:
	.ascii "IRQ\0"
type_IRQ_PREEMPT:
	.ascii "IRQ (PREEMPTED)\0"
type_SYSCALL_PREEMPT:
	.ascii " SYSCALL (PREEMPTED)\0"

	/*
	 * Trap decoding for stack unwinder
	 * Tuples are (start addr, end addr, string)
	 * If return address lies on [start addr, end addr],
	 * unwinder displays 'string'
	 */

	.align 4
.global microblaze_trap_handlers
microblaze_trap_handlers:
	/* Exact matches come first */
	.word ret_to_user  ; .word ret_to_user    ; .word type_SYSCALL
	.word ret_from_intr; .word ret_from_intr  ; .word type_IRQ
	/* Fuzzy matches go here */
	.word ret_from_intr; .word no_intr_resched; .word type_IRQ_PREEMPT
	.word work_pending ; .word no_work_pending; .word type_SYSCALL_PREEMPT
	/* End of table */
	.word 0             ; .word 0               ; .word 0