aboutsummaryrefslogtreecommitdiff
path: root/arch/metag/include/asm/irqflags.h
blob: 339b16f062eba90c24cbb47e9ec09c38294ed9b3 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
/*
 * IRQ flags handling
 *
 * This file gets included from lowlevel asm headers too, to provide
 * wrapped versions of the local_irq_*() APIs, based on the
 * raw_local_irq_*() functions from the lowlevel headers.
 */
#ifndef _ASM_IRQFLAGS_H
#define _ASM_IRQFLAGS_H

#ifndef __ASSEMBLY__

#include <asm/core_reg.h>
#include <asm/metag_regs.h>

#define INTS_OFF_MASK TXSTATI_BGNDHALT_BIT

#ifdef CONFIG_SMP
extern unsigned int get_trigger_mask(void);
#else

extern unsigned int global_trigger_mask;

static inline unsigned int get_trigger_mask(void)
{
	return global_trigger_mask;
}
#endif

static inline unsigned long arch_local_save_flags(void)
{
	return __core_reg_get(TXMASKI);
}

static inline int arch_irqs_disabled_flags(unsigned long flags)
{
	return (flags & ~INTS_OFF_MASK) == 0;
}

static inline int arch_irqs_disabled(void)
{
	unsigned long flags = arch_local_save_flags();

	return arch_irqs_disabled_flags(flags);
}

static inline unsigned long __irqs_disabled(void)
{
	/*
	 * We shouldn't enable exceptions if they are not already
	 * enabled. This is required for chancalls to work correctly.
	 */
	return arch_local_save_flags() & INTS_OFF_MASK;
}

/*
 * For spinlocks, etc:
 */
static inline unsigned long arch_local_irq_save(void)
{
	unsigned long flags = __irqs_disabled();

	asm volatile("SWAP %0,TXMASKI\n" : "=r" (flags) : "0" (flags)
		     : "memory");

	return flags;
}

static inline void arch_local_irq_restore(unsigned long flags)
{
	asm volatile("MOV TXMASKI,%0\n" : : "r" (flags) : "memory");
}

static inline void arch_local_irq_disable(void)
{
	unsigned long flags = __irqs_disabled();

	asm volatile("MOV TXMASKI,%0\n" : : "r" (flags) : "memory");
}

#ifdef CONFIG_SMP
/* Avoid circular include dependencies through <linux/preempt.h> */
void arch_local_irq_enable(void);
#else
static inline void arch_local_irq_enable(void)
{
	arch_local_irq_restore(get_trigger_mask());
}
#endif

#endif /* (__ASSEMBLY__) */

#endif /* !(_ASM_IRQFLAGS_H) */