aboutsummaryrefslogtreecommitdiff
path: root/arch/mips/include
diff options
context:
space:
mode:
authorJayachandran C <jchandra@broadcom.com>2013-01-14 15:11:54 +0000
committerJohn Crispin <blogic@openwrt.org>2013-02-17 00:15:19 +0100
commit220d9122e8c5a467fdeefc1857e077f29a623bfd (patch)
treee5efff5e5df4f3b11f77451412665ad1b4031a59 /arch/mips/include
parentf0cb40e5c384cf2cc4b2b932b61474544ac1fc9a (diff)
MIPS: Netlogic: Optimize EIMR/EIRR accesses in 32-bit
Provide functions ack_c0_eirr(), set_c0_eimr(), clear_c0_eimr() and read_c0_eirr_and_eimr() that do the EIMR and EIRR operations and update the interrupt handling code to use these functions. Also, use the EIMR register functions to mask interrupts in the irq code. The 64-bit interrupt request and mask registers (EIRR and EIMR) are accessed when the interrupts are off, and the common operations are to set or clear a bit in these registers. Using the 64-bit c0 access functions for these operations is not optimal in 32-bit, because it will disable/restore interrupts and split/join the 64-bit value during each register access. Signed-off-by: Jayachandran C <jchandra@broadcom.com> Patchwork: http://patchwork.linux-mips.org/patch/4790/ Signed-off-by: John Crispin <blogic@openwrt.org>
Diffstat (limited to 'arch/mips/include')
-rw-r--r--arch/mips/include/asm/netlogic/mips-extns.h79
1 files changed, 79 insertions, 0 deletions
diff --git a/arch/mips/include/asm/netlogic/mips-extns.h b/arch/mips/include/asm/netlogic/mips-extns.h
index 32ba6d95d47..cc4296595df 100644
--- a/arch/mips/include/asm/netlogic/mips-extns.h
+++ b/arch/mips/include/asm/netlogic/mips-extns.h
@@ -68,6 +68,85 @@ do { \
__write_64bit_c0_register($9, 7, (val)); \
} while (0)
+/*
+ * Handling the 64 bit EIMR and EIRR registers in 32-bit mode with
+ * standard functions will be very inefficient. This provides
+ * optimized functions for the normal operations on the registers.
+ *
+ * Call with interrupts disabled.
+ */
+static inline void ack_c0_eirr(int irq)
+{
+ __asm__ __volatile__(
+ ".set push\n\t"
+ ".set mips64\n\t"
+ ".set noat\n\t"
+ "li $1, 1\n\t"
+ "dsllv $1, $1, %0\n\t"
+ "dmtc0 $1, $9, 6\n\t"
+ ".set pop"
+ : : "r" (irq));
+}
+
+static inline void set_c0_eimr(int irq)
+{
+ __asm__ __volatile__(
+ ".set push\n\t"
+ ".set mips64\n\t"
+ ".set noat\n\t"
+ "li $1, 1\n\t"
+ "dsllv %0, $1, %0\n\t"
+ "dmfc0 $1, $9, 7\n\t"
+ "or $1, %0\n\t"
+ "dmtc0 $1, $9, 7\n\t"
+ ".set pop"
+ : "+r" (irq));
+}
+
+static inline void clear_c0_eimr(int irq)
+{
+ __asm__ __volatile__(
+ ".set push\n\t"
+ ".set mips64\n\t"
+ ".set noat\n\t"
+ "li $1, 1\n\t"
+ "dsllv %0, $1, %0\n\t"
+ "dmfc0 $1, $9, 7\n\t"
+ "or $1, %0\n\t"
+ "xor $1, %0\n\t"
+ "dmtc0 $1, $9, 7\n\t"
+ ".set pop"
+ : "+r" (irq));
+}
+
+/*
+ * Read c0 eimr and c0 eirr, do AND of the two values, the result is
+ * the interrupts which are raised and are not masked.
+ */
+static inline uint64_t read_c0_eirr_and_eimr(void)
+{
+ uint64_t val;
+
+#ifdef CONFIG_64BIT
+ val = read_c0_eimr() & read_c0_eirr();
+#else
+ __asm__ __volatile__(
+ ".set push\n\t"
+ ".set mips64\n\t"
+ ".set noat\n\t"
+ "dmfc0 %M0, $9, 6\n\t"
+ "dmfc0 %L0, $9, 7\n\t"
+ "and %M0, %L0\n\t"
+ "dsll %L0, %M0, 32\n\t"
+ "dsra %M0, %M0, 32\n\t"
+ "dsra %L0, %L0, 32\n\t"
+ ".set pop"
+ : "=r" (val));
+#endif
+
+ return val;
+}
+
static inline int hard_smp_processor_id(void)
{
return __read_32bit_c0_register($15, 1) & 0x3ff;