aboutsummaryrefslogtreecommitdiff
path: root/arch/metag/include
diff options
context:
space:
mode:
Diffstat (limited to 'arch/metag/include')
-rw-r--r--arch/metag/include/asm/Kbuild54
-rw-r--r--arch/metag/include/asm/atomic.h53
-rw-r--r--arch/metag/include/asm/atomic_lnkget.h234
-rw-r--r--arch/metag/include/asm/atomic_lock1.h160
-rw-r--r--arch/metag/include/asm/barrier.h85
-rw-r--r--arch/metag/include/asm/bitops.h132
-rw-r--r--arch/metag/include/asm/bug.h12
-rw-r--r--arch/metag/include/asm/cache.h23
-rw-r--r--arch/metag/include/asm/cacheflush.h250
-rw-r--r--arch/metag/include/asm/cachepart.h42
-rw-r--r--arch/metag/include/asm/checksum.h92
-rw-r--r--arch/metag/include/asm/clock.h51
-rw-r--r--arch/metag/include/asm/cmpxchg.h65
-rw-r--r--arch/metag/include/asm/cmpxchg_irq.h42
-rw-r--r--arch/metag/include/asm/cmpxchg_lnkget.h86
-rw-r--r--arch/metag/include/asm/cmpxchg_lock1.h48
-rw-r--r--arch/metag/include/asm/core_reg.h35
-rw-r--r--arch/metag/include/asm/cpu.h14
-rw-r--r--arch/metag/include/asm/da.h43
-rw-r--r--arch/metag/include/asm/delay.h29
-rw-r--r--arch/metag/include/asm/div64.h12
-rw-r--r--arch/metag/include/asm/dma-mapping.h190
-rw-r--r--arch/metag/include/asm/elf.h128
-rw-r--r--arch/metag/include/asm/fixmap.h99
-rw-r--r--arch/metag/include/asm/ftrace.h23
-rw-r--r--arch/metag/include/asm/global_lock.h100
-rw-r--r--arch/metag/include/asm/gpio.h4
-rw-r--r--arch/metag/include/asm/highmem.h62
-rw-r--r--arch/metag/include/asm/hugetlb.h86
-rw-r--r--arch/metag/include/asm/hwthread.h40
-rw-r--r--arch/metag/include/asm/io.h165
-rw-r--r--arch/metag/include/asm/irq.h32
-rw-r--r--arch/metag/include/asm/irqflags.h93
-rw-r--r--arch/metag/include/asm/l2cache.h258
-rw-r--r--arch/metag/include/asm/linkage.h7
-rw-r--r--arch/metag/include/asm/mach/arch.h86
-rw-r--r--arch/metag/include/asm/metag_isa.h81
-rw-r--r--arch/metag/include/asm/metag_mem.h1106
-rw-r--r--arch/metag/include/asm/metag_regs.h1184
-rw-r--r--arch/metag/include/asm/mman.h11
-rw-r--r--arch/metag/include/asm/mmu.h77
-rw-r--r--arch/metag/include/asm/mmu_context.h113
-rw-r--r--arch/metag/include/asm/mmzone.h42
-rw-r--r--arch/metag/include/asm/module.h37
-rw-r--r--arch/metag/include/asm/page.h128
-rw-r--r--arch/metag/include/asm/perf_event.h4
-rw-r--r--arch/metag/include/asm/pgalloc.h79
-rw-r--r--arch/metag/include/asm/pgtable.h370
-rw-r--r--arch/metag/include/asm/processor.h202
-rw-r--r--arch/metag/include/asm/prom.h23
-rw-r--r--arch/metag/include/asm/ptrace.h60
-rw-r--r--arch/metag/include/asm/setup.h8
-rw-r--r--arch/metag/include/asm/smp.h29
-rw-r--r--arch/metag/include/asm/sparsemem.h13
-rw-r--r--arch/metag/include/asm/spinlock.h22
-rw-r--r--arch/metag/include/asm/spinlock_lnkget.h249
-rw-r--r--arch/metag/include/asm/spinlock_lock1.h184
-rw-r--r--arch/metag/include/asm/spinlock_types.h20
-rw-r--r--arch/metag/include/asm/stacktrace.h20
-rw-r--r--arch/metag/include/asm/string.h13
-rw-r--r--arch/metag/include/asm/switch.h21
-rw-r--r--arch/metag/include/asm/syscall.h104
-rw-r--r--arch/metag/include/asm/syscalls.h39
-rw-r--r--arch/metag/include/asm/tbx.h1425
-rw-r--r--arch/metag/include/asm/tcm.h30
-rw-r--r--arch/metag/include/asm/thread_info.h155
-rw-r--r--arch/metag/include/asm/tlb.h36
-rw-r--r--arch/metag/include/asm/tlbflush.h77
-rw-r--r--arch/metag/include/asm/topology.h53
-rw-r--r--arch/metag/include/asm/traps.h48
-rw-r--r--arch/metag/include/asm/uaccess.h241
-rw-r--r--arch/metag/include/asm/unistd.h12
-rw-r--r--arch/metag/include/asm/user_gateway.h44
-rw-r--r--arch/metag/include/uapi/asm/Kbuild13
-rw-r--r--arch/metag/include/uapi/asm/byteorder.h1
-rw-r--r--arch/metag/include/uapi/asm/ptrace.h113
-rw-r--r--arch/metag/include/uapi/asm/resource.h7
-rw-r--r--arch/metag/include/uapi/asm/sigcontext.h31
-rw-r--r--arch/metag/include/uapi/asm/siginfo.h8
-rw-r--r--arch/metag/include/uapi/asm/swab.h26
-rw-r--r--arch/metag/include/uapi/asm/unistd.h21
81 files changed, 9515 insertions, 0 deletions
diff --git a/arch/metag/include/asm/Kbuild b/arch/metag/include/asm/Kbuild
new file mode 100644
index 00000000000..6ae0ccb632c
--- /dev/null
+++ b/arch/metag/include/asm/Kbuild
@@ -0,0 +1,54 @@
+generic-y += auxvec.h
+generic-y += bitsperlong.h
+generic-y += bugs.h
+generic-y += clkdev.h
+generic-y += cputime.h
+generic-y += current.h
+generic-y += device.h
+generic-y += dma.h
+generic-y += emergency-restart.h
+generic-y += errno.h
+generic-y += exec.h
+generic-y += fb.h
+generic-y += fcntl.h
+generic-y += futex.h
+generic-y += hardirq.h
+generic-y += hw_irq.h
+generic-y += ioctl.h
+generic-y += ioctls.h
+generic-y += ipcbuf.h
+generic-y += irq_regs.h
+generic-y += kdebug.h
+generic-y += kmap_types.h
+generic-y += kvm_para.h
+generic-y += local.h
+generic-y += local64.h
+generic-y += msgbuf.h
+generic-y += mutex.h
+generic-y += param.h
+generic-y += pci.h
+generic-y += percpu.h
+generic-y += poll.h
+generic-y += posix_types.h
+generic-y += scatterlist.h
+generic-y += sections.h
+generic-y += sembuf.h
+generic-y += serial.h
+generic-y += shmbuf.h
+generic-y += shmparam.h
+generic-y += signal.h
+generic-y += socket.h
+generic-y += sockios.h
+generic-y += stat.h
+generic-y += statfs.h
+generic-y += switch_to.h
+generic-y += termbits.h
+generic-y += termios.h
+generic-y += timex.h
+generic-y += trace_clock.h
+generic-y += types.h
+generic-y += ucontext.h
+generic-y += unaligned.h
+generic-y += user.h
+generic-y += vga.h
+generic-y += xor.h
diff --git a/arch/metag/include/asm/atomic.h b/arch/metag/include/asm/atomic.h
new file mode 100644
index 00000000000..307ecd2bd9a
--- /dev/null
+++ b/arch/metag/include/asm/atomic.h
@@ -0,0 +1,53 @@
+#ifndef __ASM_METAG_ATOMIC_H
+#define __ASM_METAG_ATOMIC_H
+
+#include <linux/compiler.h>
+#include <linux/types.h>
+#include <asm/cmpxchg.h>
+
+#if defined(CONFIG_METAG_ATOMICITY_IRQSOFF)
+/* The simple UP case. */
+#include <asm-generic/atomic.h>
+#else
+
+#if defined(CONFIG_METAG_ATOMICITY_LOCK1)
+#include <asm/atomic_lock1.h>
+#else
+#include <asm/atomic_lnkget.h>
+#endif
+
+#define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0)
+
+#define atomic_dec_return(v) atomic_sub_return(1, (v))
+#define atomic_inc_return(v) atomic_add_return(1, (v))
+
+/*
+ * atomic_inc_and_test - increment and test
+ * @v: pointer of type atomic_t
+ *
+ * Atomically increments @v by 1
+ * and returns true if the result is zero, or false for all
+ * other cases.
+ */
+#define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
+
+#define atomic_sub_and_test(i, v) (atomic_sub_return((i), (v)) == 0)
+#define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0)
+
+#define atomic_inc(v) atomic_add(1, (v))
+#define atomic_dec(v) atomic_sub(1, (v))
+
+#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
+
+#define smp_mb__before_atomic_dec() barrier()
+#define smp_mb__after_atomic_dec() barrier()
+#define smp_mb__before_atomic_inc() barrier()
+#define smp_mb__after_atomic_inc() barrier()
+
+#endif
+
+#define atomic_dec_if_positive(v) atomic_sub_if_positive(1, v)
+
+#include <asm-generic/atomic64.h>
+
+#endif /* __ASM_METAG_ATOMIC_H */
diff --git a/arch/metag/include/asm/atomic_lnkget.h b/arch/metag/include/asm/atomic_lnkget.h
new file mode 100644
index 00000000000..d2e60a18986
--- /dev/null
+++ b/arch/metag/include/asm/atomic_lnkget.h
@@ -0,0 +1,234 @@
+#ifndef __ASM_METAG_ATOMIC_LNKGET_H
+#define __ASM_METAG_ATOMIC_LNKGET_H
+
+#define ATOMIC_INIT(i) { (i) }
+
+#define atomic_set(v, i) ((v)->counter = (i))
+
+#include <linux/compiler.h>
+
+#include <asm/barrier.h>
+
+/*
+ * None of these asm statements clobber memory as LNKSET writes around
+ * the cache so the memory it modifies cannot safely be read by any means
+ * other than these accessors.
+ */
+
+static inline int atomic_read(const atomic_t *v)
+{
+ int temp;
+
+ asm volatile (
+ "LNKGETD %0, [%1]\n"
+ : "=da" (temp)
+ : "da" (&v->counter));
+
+ return temp;
+}
+
+static inline void atomic_add(int i, atomic_t *v)
+{
+ int temp;
+
+ asm volatile (
+ "1: LNKGETD %0, [%1]\n"
+ " ADD %0, %0, %2\n"
+ " LNKSETD [%1], %0\n"
+ " DEFR %0, TXSTAT\n"
+ " ANDT %0, %0, #HI(0x3f000000)\n"
+ " CMPT %0, #HI(0x02000000)\n"
+ " BNZ 1b\n"
+ : "=&d" (temp)
+ : "da" (&v->counter), "bd" (i)
+ : "cc");
+}
+
+static inline void atomic_sub(int i, atomic_t *v)
+{
+ int temp;
+
+ asm volatile (
+ "1: LNKGETD %0, [%1]\n"
+ " SUB %0, %0, %2\n"
+ " LNKSETD [%1], %0\n"
+ " DEFR %0, TXSTAT\n"
+ " ANDT %0, %0, #HI(0x3f000000)\n"
+ " CMPT %0, #HI(0x02000000)\n"
+ " BNZ 1b\n"
+ : "=&d" (temp)
+ : "da" (&v->counter), "bd" (i)
+ : "cc");
+}
+
+static inline int atomic_add_return(int i, atomic_t *v)
+{
+ int result, temp;
+
+ smp_mb();
+
+ asm volatile (
+ "1: LNKGETD %1, [%2]\n"
+ " ADD %1, %1, %3\n"
+ " LNKSETD [%2], %1\n"
+ " DEFR %0, TXSTAT\n"
+ " ANDT %0, %0, #HI(0x3f000000)\n"
+ " CMPT %0, #HI(0x02000000)\n"
+ " BNZ 1b\n"
+ : "=&d" (temp), "=&da" (result)
+ : "da" (&v->counter), "bd" (i)
+ : "cc");
+
+ smp_mb();
+
+ return result;
+}
+
+static inline int atomic_sub_return(int i, atomic_t *v)
+{
+ int result, temp;
+
+ smp_mb();
+
+ asm volatile (
+ "1: LNKGETD %1, [%2]\n"
+ " SUB %1, %1, %3\n"
+ " LNKSETD [%2], %1\n"
+ " DEFR %0, TXSTAT\n"
+ " ANDT %0, %0, #HI(0x3f000000)\n"
+ " CMPT %0, #HI(0x02000000)\n"
+ " BNZ 1b\n"
+ : "=&d" (temp), "=&da" (result)
+ : "da" (&v->counter), "bd" (i)
+ : "cc");
+
+ smp_mb();
+
+ return result;
+}
+
+static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
+{
+ int temp;
+
+ asm volatile (
+ "1: LNKGETD %0, [%1]\n"
+ " AND %0, %0, %2\n"
+ " LNKSETD [%1] %0\n"
+ " DEFR %0, TXSTAT\n"
+ " ANDT %0, %0, #HI(0x3f000000)\n"
+ " CMPT %0, #HI(0x02000000)\n"
+ " BNZ 1b\n"
+ : "=&d" (temp)
+ : "da" (&v->counter), "bd" (~mask)
+ : "cc");
+}
+
+static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
+{
+ int temp;
+
+ asm volatile (
+ "1: LNKGETD %0, [%1]\n"
+ " OR %0, %0, %2\n"
+ " LNKSETD [%1], %0\n"
+ " DEFR %0, TXSTAT\n"
+ " ANDT %0, %0, #HI(0x3f000000)\n"
+ " CMPT %0, #HI(0x02000000)\n"
+ " BNZ 1b\n"
+ : "=&d" (temp)
+ : "da" (&v->counter), "bd" (mask)
+ : "cc");
+}
+
+static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
+{
+ int result, temp;
+
+ smp_mb();
+
+ asm volatile (
+ "1: LNKGETD %1, [%2]\n"
+ " CMP %1, %3\n"
+ " LNKSETDEQ [%2], %4\n"
+ " BNE 2f\n"
+ " DEFR %0, TXSTAT\n"
+ " ANDT %0, %0, #HI(0x3f000000)\n"
+ " CMPT %0, #HI(0x02000000)\n"
+ " BNZ 1b\n"
+ "2:\n"
+ : "=&d" (temp), "=&d" (result)
+ : "da" (&v->counter), "bd" (old), "da" (new)
+ : "cc");
+
+ smp_mb();
+
+ return result;
+}
+
+static inline int atomic_xchg(atomic_t *v, int new)
+{
+ int temp, old;
+
+ asm volatile (
+ "1: LNKGETD %1, [%2]\n"
+ " LNKSETD [%2], %3\n"
+ " DEFR %0, TXSTAT\n"
+ " ANDT %0, %0, #HI(0x3f000000)\n"
+ " CMPT %0, #HI(0x02000000)\n"
+ " BNZ 1b\n"
+ : "=&d" (temp), "=&d" (old)
+ : "da" (&v->counter), "da" (new)
+ : "cc");
+
+ return old;
+}
+
+static inline int __atomic_add_unless(atomic_t *v, int a, int u)
+{
+ int result, temp;
+
+ smp_mb();
+
+ asm volatile (
+ "1: LNKGETD %1, [%2]\n"
+ " CMP %1, %3\n"
+ " ADD %0, %1, %4\n"
+ " LNKSETDNE [%2], %0\n"
+ " BEQ 2f\n"
+ " DEFR %0, TXSTAT\n"
+ " ANDT %0, %0, #HI(0x3f000000)\n"
+ " CMPT %0, #HI(0x02000000)\n"
+ " BNZ 1b\n"
+ "2:\n"
+ : "=&d" (temp), "=&d" (result)
+ : "da" (&v->counter), "bd" (u), "bd" (a)
+ : "cc");
+
+ smp_mb();
+
+ return result;
+}
+
+static inline int atomic_sub_if_positive(int i, atomic_t *v)
+{
+ int result, temp;
+
+ asm volatile (
+ "1: LNKGETD %1, [%2]\n"
+ " SUBS %1, %1, %3\n"
+ " LNKSETDGE [%2], %1\n"
+ " BLT 2f\n"
+ " DEFR %0, TXSTAT\n"
+ " ANDT %0, %0, #HI(0x3f000000)\n"
+ " CMPT %0, #HI(0x02000000)\n"
+ " BNZ 1b\n"
+ "2:\n"
+ : "=&d" (temp), "=&da" (result)
+ : "da" (&v->counter), "bd" (i)
+ : "cc");
+
+ return result;
+}
+
+#endif /* __ASM_METAG_ATOMIC_LNKGET_H */
diff --git a/arch/metag/include/asm/atomic_lock1.h b/arch/metag/include/asm/atomic_lock1.h
new file mode 100644
index 00000000000..e578955e674
--- /dev/null
+++ b/arch/metag/include/asm/atomic_lock1.h
@@ -0,0 +1,160 @@
+#ifndef __ASM_METAG_ATOMIC_LOCK1_H
+#define __ASM_METAG_ATOMIC_LOCK1_H
+
+#define ATOMIC_INIT(i) { (i) }
+
+#include <linux/compiler.h>
+
+#include <asm/barrier.h>
+#include <asm/global_lock.h>
+
+static inline int atomic_read(const atomic_t *v)
+{
+ return (v)->counter;
+}
+
+/*
+ * atomic_set needs to be take the lock to protect atomic_add_unless from a
+ * possible race, as it reads the counter twice:
+ *
+ * CPU0 CPU1
+ * atomic_add_unless(1, 0)
+ * ret = v->counter (non-zero)
+ * if (ret != u) v->counter = 0
+ * v->counter += 1 (counter set to 1)
+ *
+ * Making atomic_set take the lock ensures that ordering and logical
+ * consistency is preserved.
+ */
+static inline int atomic_set(atomic_t *v, int i)
+{
+ unsigned long flags;
+
+ __global_lock1(flags);
+ fence();
+ v->counter = i;
+ __global_unlock1(flags);
+ return i;
+}
+
+static inline void atomic_add(int i, atomic_t *v)
+{
+ unsigned long flags;
+
+ __global_lock1(flags);
+ fence();
+ v->counter += i;
+ __global_unlock1(flags);
+}
+
+static inline void atomic_sub(int i, atomic_t *v)
+{
+ unsigned long flags;
+
+ __global_lock1(flags);
+ fence();
+ v->counter -= i;
+ __global_unlock1(flags);
+}
+
+static inline int atomic_add_return(int i, atomic_t *v)
+{
+ unsigned long result;
+ unsigned long flags;
+
+ __global_lock1(flags);
+ result = v->counter;
+ result += i;
+ fence();
+ v->counter = result;
+ __global_unlock1(flags);
+
+ return result;
+}
+
+static inline int atomic_sub_return(int i, atomic_t *v)
+{
+ unsigned long result;
+ unsigned long flags;
+
+ __global_lock1(flags);
+ result = v->counter;
+ result -= i;
+ fence();
+ v->counter = result;
+ __global_unlock1(flags);
+
+ return result;
+}
+
+static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
+{
+ unsigned long flags;
+
+ __global_lock1(flags);
+ fence();
+ v->counter &= ~mask;
+ __global_unlock1(flags);
+}
+
+static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
+{
+ unsigned long flags;
+
+ __global_lock1(flags);
+ fence();
+ v->counter |= mask;
+ __global_unlock1(flags);
+}
+
+static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
+{
+ int ret;
+ unsigned long flags;
+
+ __global_lock1(flags);
+ ret = v->counter;
+ if (ret == old) {
+ fence();
+ v->counter = new;
+ }
+ __global_unlock1(flags);
+
+ return ret;
+}
+
+#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
+
+static inline int __atomic_add_unless(atomic_t *v, int a, int u)
+{
+ int ret;
+ unsigned long flags;
+
+ __global_lock1(flags);
+ ret = v->counter;
+ if (ret != u) {
+ fence();
+ v->counter += a;
+ }
+ __global_unlock1(flags);
+
+ return ret;
+}
+
+static inline int atomic_sub_if_positive(int i, atomic_t *v)
+{
+ int ret;
+ unsigned long flags;
+
+ __global_lock1(flags);
+ ret = v->counter - 1;
+ if (ret >= 0) {
+ fence();
+ v->counter = ret;
+ }
+ __global_unlock1(flags);
+
+ return ret;
+}
+
+#endif /* __ASM_METAG_ATOMIC_LOCK1_H */
diff --git a/arch/metag/include/asm/barrier.h b/arch/metag/include/asm/barrier.h
new file mode 100644
index 00000000000..c90bfc6bf64
--- /dev/null
+++ b/arch/metag/include/asm/barrier.h
@@ -0,0 +1,85 @@
+#ifndef _ASM_METAG_BARRIER_H
+#define _ASM_METAG_BARRIER_H
+
+#include <asm/metag_mem.h>
+
+#define nop() asm volatile ("NOP")
+#define mb() wmb()
+#define rmb() barrier()
+
+#ifdef CONFIG_METAG_META21
+
+/* HTP and above have a system event to fence writes */
+static inline void wr_fence(void)
+{
+ volatile int *flushptr = (volatile int *) LINSYSEVENT_WR_FENCE;
+ barrier();
+ *flushptr = 0;
+}
+
+#else /* CONFIG_METAG_META21 */
+
+/*
+ * ATP doesn't have system event to fence writes, so it is necessary to flush
+ * the processor write queues as well as possibly the write combiner (depending
+ * on the page being written).
+ * To ensure the write queues are flushed we do 4 writes to a system event
+ * register (in this case write combiner flush) which will also flush the write
+ * combiner.
+ */
+static inline void wr_fence(void)
+{
+ volatile int *flushptr = (volatile int *) LINSYSEVENT_WR_COMBINE_FLUSH;
+ barrier();
+ *flushptr = 0;
+ *flushptr = 0;
+ *flushptr = 0;
+ *flushptr = 0;
+}
+
+#endif /* !CONFIG_METAG_META21 */
+
+static inline void wmb(void)
+{
+ /* flush writes through the write combiner */
+ wr_fence();
+}
+
+#define read_barrier_depends() do { } while (0)
+
+#ifndef CONFIG_SMP
+#define fence() do { } while (0)
+#define smp_mb() barrier()
+#define smp_rmb() barrier()
+#define smp_wmb() barrier()
+#else
+
+#ifdef CONFIG_METAG_SMP_WRITE_REORDERING
+/*
+ * Write to the atomic memory unlock system event register (command 0). This is
+ * needed before a write to shared memory in a critical section, to prevent
+ * external reordering of writes before the fence on other threads with writes
+ * after the fence on this thread (and to prevent the ensuing cache-memory
+ * incoherence). It is therefore ineffective if used after and on the same
+ * thread as a write.
+ */
+static inline void fence(void)
+{
+ volatile int *flushptr = (volatile int *) LINSYSEVENT_WR_ATOMIC_UNLOCK;
+ barrier();
+ *flushptr = 0;
+}
+#define smp_mb() fence()
+#define smp_rmb() fence()
+#define smp_wmb() barrier()
+#else
+#define fence() do { } while (0)
+#define smp_mb() barrier()
+#define smp_rmb() barrier()
+#define smp_wmb() barrier()
+#endif
+#endif
+#define smp_read_barrier_depends() do { } while (0)
+#define set_mb(var, value) do { var = value; smp_mb(); } while (0)
+
+#endif /* _ASM_METAG_BARRIER_H */
diff --git a/arch/metag/include/asm/bitops.h b/arch/metag/include/asm/bitops.h
new file mode 100644
index 00000000000..c0d0df0d137
--- /dev/null
+++ b/arch/metag/include/asm/bitops.h
@@ -0,0 +1,132 @@
+#ifndef __ASM_METAG_BITOPS_H
+#define __ASM_METAG_BITOPS_H
+
+#include <linux/compiler.h>
+#include <asm/barrier.h>
+#include <asm/global_lock.h>
+
+/*
+ * clear_bit() doesn't provide any barrier for the compiler.
+ */
+#define smp_mb__before_clear_bit() barrier()
+#define smp_mb__after_clear_bit() barrier()
+
+#ifdef CONFIG_SMP
+/*
+ * These functions are the basis of our bit ops.
+ */
+static inline void set_bit(unsigned int bit, volatile unsigned long *p)
+{
+ unsigned long flags;
+ unsigned long mask = 1UL << (bit & 31);
+
+ p += bit >> 5;
+
+ __global_lock1(flags);
+ fence();
+ *p |= mask;
+ __global_unlock1(flags);
+}
+
+static inline void clear_bit(unsigned int bit, volatile unsigned long *p)
+{
+ unsigned long flags;
+ unsigned long mask = 1UL << (bit & 31);
+
+ p += bit >> 5;
+
+ __global_lock1(flags);
+ fence();
+ *p &= ~mask;
+ __global_unlock1(flags);
+}
+
+static inline void change_bit(unsigned int bit, volatile unsigned long *p)
+{
+ unsigned long flags;
+ unsigned long mask = 1UL << (bit & 31);
+
+ p += bit >> 5;
+
+ __global_lock1(flags);
+ fence();
+ *p ^= mask;
+ __global_unlock1(flags);
+}
+
+static inline int test_and_set_bit(unsigned int bit, volatile unsigned long *p)
+{
+ unsigned long flags;
+ unsigned long old;
+ unsigned long mask = 1UL << (bit & 31);
+
+ p += bit >> 5;
+
+ __global_lock1(flags);
+ old = *p;
+ if (!(old & mask)) {
+ fence();
+ *p = old | mask;
+ }
+ __global_unlock1(flags);
+
+ return (old & mask) != 0;
+}
+
+static inline int test_and_clear_bit(unsigned int bit,
+ volatile unsigned long *p)
+{
+ unsigned long flags;
+ unsigned long old;
+ unsigned long mask = 1UL << (bit & 31);
+
+ p += bit >> 5;
+
+ __global_lock1(flags);
+ old = *p;
+ if (old & mask) {
+ fence();
+ *p = old & ~mask;
+ }
+ __global_unlock1(flags);
+
+ return (old & mask) != 0;
+}
+
+static inline int test_and_change_bit(unsigned int bit,
+ volatile unsigned long *p)
+{
+ unsigned long flags;
+ unsigned long old;
+ unsigned long mask = 1UL << (bit & 31);
+
+ p += bit >> 5;
+
+ __global_lock1(flags);
+ fence();
+ old = *p;
+ *p = old ^ mask;
+ __global_unlock1(flags);
+
+ return (old & mask) != 0;
+}
+
+#else
+#include <asm-generic/bitops/atomic.h>
+#endif /* CONFIG_SMP */
+
+#include <asm-generic/bitops/non-atomic.h>
+#include <asm-generic/bitops/find.h>
+#include <asm-generic/bitops/ffs.h>
+#include <asm-generic/bitops/__ffs.h>
+#include <asm-generic/bitops/ffz.h>
+#include <asm-generic/bitops/fls.h>
+#include <asm-generic/bitops/__fls.h>
+#include <asm-generic/bitops/fls64.h>
+#include <asm-generic/bitops/hweight.h>
+#include <asm-generic/bitops/lock.h>
+#include <asm-generic/bitops/sched.h>
+#include <asm-generic/bitops/le.h>
+#include <asm-generic/bitops/ext2-atomic.h>
+
+#endif /* __ASM_METAG_BITOPS_H */
diff --git a/arch/metag/include/asm/bug.h b/arch/metag/include/asm/bug.h
new file mode 100644
index 00000000000..d04b48cefec
--- /dev/null
+++ b/arch/metag/include/asm/bug.h
@@ -0,0 +1,12 @@
+#ifndef _ASM_METAG_BUG_H
+#define _ASM_METAG_BUG_H
+
+#include <asm-generic/bug.h>
+
+struct pt_regs;
+
+extern const char *trap_name(int trapno);
+extern void die(const char *str, struct pt_regs *regs, long err,
+ unsigned long addr) __attribute__ ((noreturn));
+
+#endif
diff --git a/arch/metag/include/asm/cache.h b/arch/metag/include/asm/cache.h
new file mode 100644
index 00000000000..a43b650cfdc
--- /dev/null
+++ b/arch/metag/include/asm/cache.h
@@ -0,0 +1,23 @@
+#ifndef __ASM_METAG_CACHE_H
+#define __ASM_METAG_CACHE_H
+
+/* L1 cache line size (64 bytes) */
+#define L1_CACHE_SHIFT 6
+#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
+
+/* Meta requires large data items to be 8 byte aligned. */
+#define ARCH_SLAB_MINALIGN 8
+
+/*
+ * With an L2 cache, we may invalidate dirty lines, so we need to ensure DMA
+ * buffers have cache line alignment.
+ */
+#ifdef CONFIG_METAG_L2C
+#define ARCH_DMA_MINALIGN L1_CACHE_BYTES
+#else
+#define ARCH_DMA_MINALIGN 8
+#endif
+
+#define __read_mostly __attribute__((__section__(".data..read_mostly")))
+
+#endif
diff --git a/arch/metag/include/asm/cacheflush.h b/arch/metag/include/asm/cacheflush.h
new file mode 100644
index 00000000000..7787ec5e3ed
--- /dev/null
+++ b/arch/metag/include/asm/cacheflush.h
@@ -0,0 +1,250 @@
+#ifndef _METAG_CACHEFLUSH_H
+#define _METAG_CACHEFLUSH_H
+
+#include <linux/mm.h>
+#include <linux/sched.h>
+#include <linux/io.h>
+
+#include <asm/l2cache.h>
+#include <asm/metag_isa.h>
+#include <asm/metag_mem.h>
+
+void metag_cache_probe(void);
+
+void metag_data_cache_flush_all(const void *start);
+void metag_code_cache_flush_all(const void *start);
+
+/*
+ * Routines to flush physical cache lines that may be used to cache data or code
+ * normally accessed via the linear address range supplied. The region flushed
+ * must either lie in local or global address space determined by the top bit of
+ * the pStart address. If Bytes is >= 4K then the whole of the related cache
+ * state will be flushed rather than a limited range.
+ */
+void metag_data_cache_flush(const void *start, int bytes);
+void metag_code_cache_flush(const void *start, int bytes);
+
+#ifdef CONFIG_METAG_META12
+
+/* Write through, virtually tagged, split I/D cache. */
+
+static inline void __flush_cache_all(void)
+{
+ metag_code_cache_flush_all((void *) PAGE_OFFSET);
+ metag_data_cache_flush_all((void *) PAGE_OFFSET);
+}
+
+#define flush_cache_all() __flush_cache_all()
+
+/* flush the entire user address space referenced in this mm structure */
+static inline void flush_cache_mm(struct mm_struct *mm)
+{
+ if (mm == current->mm)
+ __flush_cache_all();
+}
+
+#define flush_cache_dup_mm(mm) flush_cache_mm(mm)
+
+/* flush a range of addresses from this mm */
+static inline void flush_cache_range(struct vm_area_struct *vma,
+ unsigned long start, unsigned long end)
+{
+ flush_cache_mm(vma->vm_mm);
+}
+
+static inline void flush_cache_page(struct vm_area_struct *vma,
+ unsigned long vmaddr, unsigned long pfn)
+{
+ flush_cache_mm(vma->vm_mm);
+}
+
+#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
+static inline void flush_dcache_page(struct page *page)
+{
+ metag_data_cache_flush_all((void *) PAGE_OFFSET);
+}
+
+#define flush_dcache_mmap_lock(mapping) do { } while (0)
+#define flush_dcache_mmap_unlock(mapping) do { } while (0)
+
+static inline void flush_icache_page(struct vm_area_struct *vma,
+ struct page *page)
+{
+ metag_code_cache_flush(page_to_virt(page), PAGE_SIZE);
+}
+
+static inline void flush_cache_vmap(unsigned long start, unsigned long end)
+{
+ metag_data_cache_flush_all((void *) PAGE_OFFSET);
+}
+
+static inline void flush_cache_vunmap(unsigned long start, unsigned long end)
+{
+ metag_data_cache_flush_all((void *) PAGE_OFFSET);
+}
+
+#else
+
+/* Write through, physically tagged, split I/D cache. */
+
+#define flush_cache_all() do { } while (0)
+#define flush_cache_mm(mm) do { } while (0)
+#define flush_cache_dup_mm(mm) do { } while (0)
+#define flush_cache_range(vma, start, end) do { } while (0)
+#define flush_cache_page(vma, vmaddr, pfn) do { } while (0)
+#define flush_dcache_mmap_lock(mapping) do { } while (0)
+#define flush_dcache_mmap_unlock(mapping) do { } while (0)
+#define flush_icache_page(vma, pg) do { } while (0)
+#define flush_cache_vmap(start, end) do { } while (0)
+#define flush_cache_vunmap(start, end) do { } while (0)
+
+#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
+static inline void flush_dcache_page(struct page *page)
+{
+ /* FIXME: We can do better than this. All we are trying to do is
+ * make the i-cache coherent, we should use the PG_arch_1 bit like
+ * e.g. powerpc.
+ */
+#ifdef CONFIG_SMP
+ metag_out32(1, SYSC_ICACHE_FLUSH);
+#else
+ metag_code_cache_flush_all((void *) PAGE_OFFSET);
+#endif
+}
+
+#endif
+
+/* Push n pages at kernel virtual address and clear the icache */
+static inline void flush_icache_range(unsigned long address,
+ unsigned long endaddr)
+{
+#ifdef CONFIG_SMP
+ metag_out32(1, SYSC_ICACHE_FLUSH);
+#else
+ metag_code_cache_flush((void *) address, endaddr - address);
+#endif
+}
+
+static inline void flush_cache_sigtramp(unsigned long addr, int size)
+{
+ /*
+ * Flush the icache in case there was previously some code
+ * fetched from this address, perhaps a previous sigtramp.
+ *
+ * We don't need to flush the dcache, it's write through and
+ * we just wrote the sigtramp code through it.
+ */
+#ifdef CONFIG_SMP
+ metag_out32(1, SYSC_ICACHE_FLUSH);
+#else
+ metag_code_cache_flush((void *) addr, size);
+#endif
+}
+
+#ifdef CONFIG_METAG_L2C
+
+/*
+ * Perform a single specific CACHEWD operation on an address, masking lower bits
+ * of address first.
+ */
+static inline void cachewd_line(void *addr, unsigned int data)
+{
+ unsigned long masked = (unsigned long)addr & -0x40;
+ __builtin_meta2_cachewd((void *)masked, data);
+}
+
+/* Perform a certain CACHEW op on each cache line in a range */
+static inline void cachew_region_op(void *start, unsigned long size,
+ unsigned int op)
+{
+ unsigned long offset = (unsigned long)start & 0x3f;
+ int i;
+ if (offset) {
+ size += offset;
+ start -= offset;
+ }
+ i = (size - 1) >> 6;
+ do {
+ __builtin_meta2_cachewd(start, op);
+ start += 0x40;
+ } while (i--);
+}
+
+/* prevent write fence and flushbacks being reordered in L2 */
+static inline void l2c_fence_flush(void *addr)
+{
+ /*
+ * Synchronise by reading back and re-flushing.
+ * It is assumed this access will miss, as the caller should have just
+ * flushed the cache line.
+ */
+ (void)(volatile u8 *)addr;
+ cachewd_line(addr, CACHEW_FLUSH_L1D_L2);
+}
+
+/* prevent write fence and writebacks being reordered in L2 */
+static inline void l2c_fence(void *addr)
+{
+ /*
+ * A write back has occurred, but not necessarily an invalidate, so the
+ * readback in l2c_fence_flush() would hit in the cache and have no
+ * effect. Therefore fully flush the line first.
+ */
+ cachewd_line(addr, CACHEW_FLUSH_L1D_L2);
+ l2c_fence_flush(addr);
+}
+
+/* Used to keep memory consistent when doing DMA. */
+static inline void flush_dcache_region(void *start, unsigned long size)
+{
+ /* metag_data_cache_flush won't flush L2 cache lines if size >= 4096 */
+ if (meta_l2c_is_enabled()) {
+ cachew_region_op(start, size, CACHEW_FLUSH_L1D_L2);
+ if (meta_l2c_is_writeback())
+ l2c_fence_flush(start + size - 1);
+ } else {
+ metag_data_cache_flush(start, size);
+ }
+}
+
+/* Write back dirty lines to memory (or do nothing if no writeback caches) */
+static inline void writeback_dcache_region(void *start, unsigned long size)
+{
+ if (meta_l2c_is_enabled() && meta_l2c_is_writeback()) {
+ cachew_region_op(start, size, CACHEW_WRITEBACK_L1D_L2);
+ l2c_fence(start + size - 1);
+ }
+}
+
+/* Invalidate (may also write back if necessary) */
+static inline void invalidate_dcache_region(void *start, unsigned long size)
+{
+ if (meta_l2c_is_enabled())
+ cachew_region_op(start, size, CACHEW_INVALIDATE_L1D_L2);
+ else
+ metag_data_cache_flush(start, size);
+}
+#else
+#define flush_dcache_region(s, l) metag_data_cache_flush((s), (l))
+#define writeback_dcache_region(s, l) do {} while (0)
+#define invalidate_dcache_region(s, l) flush_dcache_region((s), (l))
+#endif
+
+static inline void copy_to_user_page(struct vm_area_struct *vma,
+ struct page *page, unsigned long vaddr,
+ void *dst, const void *src,
+ unsigned long len)
+{
+ memcpy(dst, src, len);
+ flush_icache_range((unsigned long)dst, (unsigned long)dst + len);
+}
+
+static inline void copy_from_user_page(struct vm_area_struct *vma,
+ struct page *page, unsigned long vaddr,
+ void *dst, const void *src,
+ unsigned long len)
+{
+ memcpy(dst, src, len);
+}
+
+#endif /* _METAG_CACHEFLUSH_H */
diff --git a/arch/metag/include/asm/cachepart.h b/arch/metag/include/asm/cachepart.h
new file mode 100644
index 00000000000..cf6b44e916b
--- /dev/null
+++ b/arch/metag/include/asm/cachepart.h
@@ -0,0 +1,42 @@
+/*
+ * Meta cache partition manipulation.
+ *
+ * Copyright 2010 Imagination Technologies Ltd.
+ */
+
+#ifndef _METAG_CACHEPART_H_
+#define _METAG_CACHEPART_H_
+
+/**
+ * get_dcache_size() - Get size of data cache.
+ */
+unsigned int get_dcache_size(void);
+
+/**
+ * get_icache_size() - Get size of code cache.
+ */
+unsigned int get_icache_size(void);
+
+/**
+ * get_global_dcache_size() - Get the thread's global dcache.
+ *
+ * Returns the size of the current thread's global dcache partition.
+ */
+unsigned int get_global_dcache_size(void);
+
+/**
+ * get_global_icache_size() - Get the thread's global icache.
+ *
+ * Returns the size of the current thread's global icache partition.
+ */
+unsigned int get_global_icache_size(void);
+
+/**
+ * check_for_dache_aliasing() - Ensure that the bootloader has configured the
+ * dache and icache properly to avoid aliasing
+ * @thread_id: Hardware thread ID
+ *
+ */
+void check_for_cache_aliasing(int thread_id);
+
+#endif
diff --git a/arch/metag/include/asm/checksum.h b/arch/metag/include/asm/checksum.h
new file mode 100644
index 00000000000..999bf761a73
--- /dev/null
+++ b/arch/metag/include/asm/checksum.h
@@ -0,0 +1,92 @@
+#ifndef _METAG_CHECKSUM_H
+#define _METAG_CHECKSUM_H
+
+/*
+ * computes the checksum of a memory block at buff, length len,
+ * and adds in "sum" (32-bit)
+ *
+ * returns a 32-bit number suitable for feeding into itself
+ * or csum_tcpudp_magic
+ *
+ * this function must be called with even lengths, except
+ * for the last fragment, which may be odd
+ *
+ * it's best to have buff aligned on a 32-bit boundary
+ */
+extern __wsum csum_partial(const void *buff, int len, __wsum sum);
+
+/*
+ * the same as csum_partial, but copies from src while it
+ * checksums
+ *
+ * here even more important to align src and dst on a 32-bit (or even
+ * better 64-bit) boundary
+ */
+extern __wsum csum_partial_copy(const void *src, void *dst, int len,
+ __wsum sum);
+
+/*
+ * the same as csum_partial_copy, but copies from user space.
+ *
+ * here even more important to align src and dst on a 32-bit (or even
+ * better 64-bit) boundary
+ */
+extern __wsum csum_partial_copy_from_user(const void __user *src, void *dst,
+ int len, __wsum sum, int *csum_err);
+
+#define csum_partial_copy_nocheck(src, dst, len, sum) \
+ csum_partial_copy((src), (dst), (len), (sum))
+
+/*
+ * Fold a partial checksum
+ */
+static inline __sum16 csum_fold(__wsum csum)
+{
+ u32 sum = (__force u32)csum;
+ sum = (sum & 0xffff) + (sum >> 16);
+ sum = (sum & 0xffff) + (sum >> 16);
+ return (__force __sum16)~sum;
+}
+
+/*
+ * This is a version of ip_compute_csum() optimized for IP headers,
+ * which always checksum on 4 octet boundaries.
+ */
+extern __sum16 ip_fast_csum(const void *iph, unsigned int ihl);
+
+/*
+ * computes the checksum of the TCP/UDP pseudo-header
+ * returns a 16-bit checksum, already complemented
+ */
+static inline __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr,
+ unsigned short len,
+ unsigned short proto,
+ __wsum sum)
+{
+ unsigned long len_proto = (proto + len) << 8;
+ asm ("ADD %0, %0, %1\n"
+ "ADDS %0, %0, %2\n"
+ "ADDCS %0, %0, #1\n"
+ "ADDS %0, %0, %3\n"
+ "ADDCS %0, %0, #1\n"
+ : "=d" (sum)
+ : "d" (daddr), "d" (saddr), "d" (len_proto),
+ "0" (sum)
+ : "cc");
+ return sum;
+}
+
+static inline __sum16
+csum_tcpudp_magic(__be32 saddr, __be32 daddr, unsigned short len,
+ unsigned short proto, __wsum sum)
+{
+ return csum_fold(csum_tcpudp_nofold(saddr, daddr, len, proto, sum));
+}
+
+/*
+ * this routine is used for miscellaneous IP-like checksums, mainly
+ * in icmp.c
+ */
+extern __sum16 ip_compute_csum(const void *buff, int len);
+
+#endif /* _METAG_CHECKSUM_H */
diff --git a/arch/metag/include/asm/clock.h b/arch/metag/include/asm/clock.h
new file mode 100644
index 00000000000..3e2915a280c
--- /dev/null
+++ b/arch/metag/include/asm/clock.h
@@ -0,0 +1,51 @@
+/*
+ * arch/metag/include/asm/clock.h
+ *
+ * Copyright (C) 2012 Imagination Technologies Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _METAG_CLOCK_H_
+#define _METAG_CLOCK_H_
+
+#include <asm/mach/arch.h>
+
+/**
+ * struct meta_clock_desc - Meta Core clock callbacks.
+ * @get_core_freq: Get the frequency of the Meta core. If this is NULL, the
+ * core frequency will be determined like this:
+ * Meta 1: based on loops_per_jiffy.
+ * Meta 2: (EXPAND_TIMER_DIV + 1) MHz.
+ */
+struct meta_clock_desc {
+ unsigned long (*get_core_freq)(void);
+};
+
+extern struct meta_clock_desc _meta_clock;
+
+/*
+ * Set up the default clock, ensuring all callbacks are valid - only accessible
+ * during boot.
+ */
+void setup_meta_clocks(struct meta_clock_desc *desc);
+
+/**
+ * get_coreclock() - Get the frequency of the Meta core clock.
+ *
+ * Returns: The Meta core clock frequency in Hz.
+ */
+static inline unsigned long get_coreclock(void)
+{
+ /*
+ * Use the current clock callback. If set correctly this will provide
+ * the most accurate frequency as it can be calculated directly from the
+ * PLL configuration. otherwise a default callback will have been set
+ * instead.
+ */
+ return _meta_clock.get_core_freq();
+}
+
+#endif /* _METAG_CLOCK_H_ */
diff --git a/arch/metag/include/asm/cmpxchg.h b/arch/metag/include/asm/cmpxchg.h
new file mode 100644
index 00000000000..b1bc1be8540
--- /dev/null
+++ b/arch/metag/include/asm/cmpxchg.h
@@ -0,0 +1,65 @@
+#ifndef __ASM_METAG_CMPXCHG_H
+#define __ASM_METAG_CMPXCHG_H
+
+#include <asm/barrier.h>
+
+#if defined(CONFIG_METAG_ATOMICITY_IRQSOFF)
+#include <asm/cmpxchg_irq.h>
+#elif defined(CONFIG_METAG_ATOMICITY_LOCK1)
+#include <asm/cmpxchg_lock1.h>
+#elif defined(CONFIG_METAG_ATOMICITY_LNKGET)
+#include <asm/cmpxchg_lnkget.h>
+#endif
+
+extern void __xchg_called_with_bad_pointer(void);
+
+#define __xchg(ptr, x, size) \
+({ \
+ unsigned long __xchg__res; \
+ volatile void *__xchg_ptr = (ptr); \
+ switch (size) { \
+ case 4: \
+ __xchg__res = xchg_u32(__xchg_ptr, x); \
+ break; \
+ case 1: \
+ __xchg__res = xchg_u8(__xchg_ptr, x); \
+ break; \
+ default: \
+ __xchg_called_with_bad_pointer(); \
+ __xchg__res = x; \
+ break; \
+ } \
+ \
+ __xchg__res; \
+})
+
+#define xchg(ptr, x) \
+ ((__typeof__(*(ptr)))__xchg((ptr), (unsigned long)(x), sizeof(*(ptr))))
+
+/* This function doesn't exist, so you'll get a linker error
+ * if something tries to do an invalid cmpxchg(). */
+extern void __cmpxchg_called_with_bad_pointer(void);
+
+static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
+ unsigned long new, int size)
+{
+ switch (size) {
+ case 4:
+ return __cmpxchg_u32(ptr, old, new);
+ }
+ __cmpxchg_called_with_bad_pointer();
+ return old;
+}
+
+#define __HAVE_ARCH_CMPXCHG 1
+
+#define cmpxchg(ptr, o, n) \
+ ({ \
+ __typeof__(*(ptr)) _o_ = (o); \
+ __typeof__(*(ptr)) _n_ = (n); \
+ (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_, \
+ (unsigned long)_n_, \
+ sizeof(*(ptr))); \
+ })
+
+#endif /* __ASM_METAG_CMPXCHG_H */
diff --git a/arch/metag/include/asm/cmpxchg_irq.h b/arch/metag/include/asm/cmpxchg_irq.h
new file mode 100644
index 00000000000..649573168b0
--- /dev/null
+++ b/arch/metag/include/asm/cmpxchg_irq.h
@@ -0,0 +1,42 @@
+#ifndef __ASM_METAG_CMPXCHG_IRQ_H
+#define __ASM_METAG_CMPXCHG_IRQ_H
+
+#include <linux/irqflags.h>
+
+static inline unsigned long xchg_u32(volatile u32 *m, unsigned long val)
+{
+ unsigned long flags, retval;
+
+ local_irq_save(flags);
+ retval = *m;
+ *m = val;
+ local_irq_restore(flags);
+ return retval;
+}
+
+static inline unsigned long xchg_u8(volatile u8 *m, unsigned long val)
+{
+ unsigned long flags, retval;
+
+ local_irq_save(flags);
+ retval = *m;
+ *m = val & 0xff;
+ local_irq_restore(flags);
+ return retval;
+}
+
+static inline unsigned long __cmpxchg_u32(volatile int *m, unsigned long old,
+ unsigned long new)
+{
+ __u32 retval;
+ unsigned long flags;
+
+ local_irq_save(flags);
+ retval = *m;
+ if (retval == old)
+ *m = new;
+ local_irq_restore(flags); /* implies memory barrier */
+ return retval;
+}
+
+#endif /* __ASM_METAG_CMPXCHG_IRQ_H */
diff --git a/arch/metag/include/asm/cmpxchg_lnkget.h b/arch/metag/include/asm/cmpxchg_lnkget.h
new file mode 100644
index 00000000000..0154e2807eb
--- /dev/null
+++ b/arch/metag/include/asm/cmpxchg_lnkget.h
@@ -0,0 +1,86 @@
+#ifndef __ASM_METAG_CMPXCHG_LNKGET_H
+#define __ASM_METAG_CMPXCHG_LNKGET_H
+
+static inline unsigned long xchg_u32(volatile u32 *m, unsigned long val)
+{
+ int temp, old;
+
+ smp_mb();
+
+ asm volatile (
+ "1: LNKGETD %1, [%2]\n"
+ " LNKSETD [%2], %3\n"
+ " DEFR %0, TXSTAT\n"
+ " ANDT %0, %0, #HI(0x3f000000)\n"
+ " CMPT %0, #HI(0x02000000)\n"
+ " BNZ 1b\n"
+#ifdef CONFIG_METAG_LNKGET_AROUND_CACHE
+ " DCACHE [%2], %0\n"
+#endif
+ : "=&d" (temp), "=&d" (old)
+ : "da" (m), "da" (val)
+ : "cc"
+ );
+
+ smp_mb();
+
+ return old;
+}
+
+static inline unsigned long xchg_u8(volatile u8 *m, unsigned long val)
+{
+ int temp, old;
+
+ smp_mb();
+
+ asm volatile (
+ "1: LNKGETD %1, [%2]\n"
+ " LNKSETD [%2], %3\n"
+ " DEFR %0, TXSTAT\n"
+ " ANDT %0, %0, #HI(0x3f000000)\n"
+ " CMPT %0, #HI(0x02000000)\n"
+ " BNZ 1b\n"
+#ifdef CONFIG_METAG_LNKGET_AROUND_CACHE
+ " DCACHE [%2], %0\n"
+#endif
+ : "=&d" (temp), "=&d" (old)
+ : "da" (m), "da" (val & 0xff)
+ : "cc"
+ );
+
+ smp_mb();
+
+ return old;
+}
+
+static inline unsigned long __cmpxchg_u32(volatile int *m, unsigned long old,
+ unsigned long new)
+{
+ __u32 retval, temp;
+
+ smp_mb();
+
+ asm volatile (
+ "1: LNKGETD %1, [%2]\n"
+ " CMP %1, %3\n"
+ " LNKSETDEQ [%2], %4\n"
+ " BNE 2f\n"
+ " DEFR %0, TXSTAT\n"
+ " ANDT %0, %0, #HI(0x3f000000)\n"
+ " CMPT %0, #HI(0x02000000)\n"
+ " BNZ 1b\n"
+#ifdef CONFIG_METAG_LNKGET_AROUND_CACHE
+ " DCACHE [%2], %0\n"
+#endif
+ "2:\n"
+ : "=&d" (temp), "=&da" (retval)
+ : "da" (m), "bd" (old), "da" (new)
+ : "cc"
+ );
+
+ smp_mb();
+
+ return retval;
+}
+
+#endif /* __ASM_METAG_CMPXCHG_LNKGET_H */
diff --git a/arch/metag/include/asm/cmpxchg_lock1.h b/arch/metag/include/asm/cmpxchg_lock1.h
new file mode 100644
index 00000000000..fd685047496
--- /dev/null
+++ b/arch/metag/include/asm/cmpxchg_lock1.h
@@ -0,0 +1,48 @@
+#ifndef __ASM_METAG_CMPXCHG_LOCK1_H
+#define __ASM_METAG_CMPXCHG_LOCK1_H
+
+#include <asm/global_lock.h>
+
+/* Use LOCK2 as these have to be atomic w.r.t. ordinary accesses. */
+
+static inline unsigned long xchg_u32(volatile u32 *m, unsigned long val)
+{
+ unsigned long flags, retval;
+
+ __global_lock2(flags);
+ fence();
+ retval = *m;
+ *m = val;
+ __global_unlock2(flags);
+ return retval;
+}
+
+static inline unsigned long xchg_u8(volatile u8 *m, unsigned long val)
+{
+ unsigned long flags, retval;
+
+ __global_lock2(flags);
+ fence();
+ retval = *m;
+ *m = val & 0xff;
+ __global_unlock2(flags);
+ return retval;
+}
+
+static inline unsigned long __cmpxchg_u32(volatile int *m, unsigned long old,
+ unsigned long new)
+{
+ __u32 retval;
+ unsigned long flags;
+
+ __global_lock2(flags);
+ retval = *m;
+ if (retval == old) {
+ fence();
+ *m = new;
+ }
+ __global_unlock2(flags);
+ return retval;
+}
+
+#endif /* __ASM_METAG_CMPXCHG_LOCK1_H */
diff --git a/arch/metag/include/asm/core_reg.h b/arch/metag/include/asm/core_reg.h
new file mode 100644
index 00000000000..bdbc3a51f31
--- /dev/null
+++ b/arch/metag/include/asm/core_reg.h
@@ -0,0 +1,35 @@
+#ifndef __ASM_METAG_CORE_REG_H_
+#define __ASM_METAG_CORE_REG_H_
+
+#include <asm/metag_regs.h>
+
+extern void core_reg_write(int unit, int reg, int thread, unsigned int val);
+extern unsigned int core_reg_read(int unit, int reg, int thread);
+
+/*
+ * These macros allow direct access from C to any register known to the
+ * assembler. Example candidates are TXTACTCYC, TXIDLECYC, and TXPRIVEXT.
+ */
+
+#define __core_reg_get(reg) ({ \
+ unsigned int __grvalue; \
+ asm volatile("MOV %0," #reg \
+ : "=r" (__grvalue)); \
+ __grvalue; \
+})
+
+#define __core_reg_set(reg, value) do { \
+ unsigned int __srvalue = (value); \
+ asm volatile("MOV " #reg ",%0" \
+ : \
+ : "r" (__srvalue)); \
+} while (0)
+
+#define __core_reg_swap(reg, value) do { \
+ unsigned int __srvalue = (value); \
+ asm volatile("SWAP " #reg ",%0" \
+ : "+r" (__srvalue)); \
+ (value) = __srvalue; \
+} while (0)
+
+#endif
diff --git a/arch/metag/include/asm/cpu.h b/arch/metag/include/asm/cpu.h
new file mode 100644
index 00000000000..decf1296926
--- /dev/null
+++ b/arch/metag/include/asm/cpu.h
@@ -0,0 +1,14 @@
+#ifndef _ASM_METAG_CPU_H
+#define _ASM_METAG_CPU_H
+
+#include <linux/percpu.h>
+
+struct cpuinfo_metag {
+ struct cpu cpu;
+#ifdef CONFIG_SMP
+ unsigned long loops_per_jiffy;
+#endif
+};
+
+DECLARE_PER_CPU(struct cpuinfo_metag, cpu_data);
+#endif /* _ASM_METAG_CPU_H */
diff --git a/arch/metag/include/asm/da.h b/arch/metag/include/asm/da.h
new file mode 100644
index 00000000000..81bd5212fb0
--- /dev/null
+++ b/arch/metag/include/asm/da.h
@@ -0,0 +1,43 @@
+/*
+ * Meta DA JTAG debugger control.
+ *
+ * Copyright 2012 Imagination Technologies Ltd.
+ */
+
+#ifndef _METAG_DA_H_
+#define _METAG_DA_H_
+
+#ifdef CONFIG_METAG_DA
+
+#include <linux/init.h>
+#include <linux/types.h>
+
+extern bool _metag_da_present;
+
+/**
+ * metag_da_enabled() - Find whether a DA is currently enabled.
+ *
+ * Returns: true if a DA was detected, false if not.
+ */
+static inline bool metag_da_enabled(void)
+{
+ return _metag_da_present;
+}
+
+/**
+ * metag_da_probe() - Try and detect a connected DA.
+ *
+ * This is used at start up to detect whether a DA is active.
+ *
+ * Returns: 0 on detection, -err otherwise.
+ */
+int __init metag_da_probe(void);
+
+#else /* !CONFIG_METAG_DA */
+
+#define metag_da_enabled() false
+#define metag_da_probe() do {} while (0)
+
+#endif
+
+#endif /* _METAG_DA_H_ */
diff --git a/arch/metag/include/asm/delay.h b/arch/metag/include/asm/delay.h
new file mode 100644
index 00000000000..9c92f996957
--- /dev/null
+++ b/arch/metag/include/asm/delay.h
@@ -0,0 +1,29 @@
+#ifndef _METAG_DELAY_H
+#define _METAG_DELAY_H
+
+/*
+ * Copyright (C) 1993 Linus Torvalds
+ *
+ * Delay routines calling functions in arch/metag/lib/delay.c
+ */
+
+/* Undefined functions to get compile-time errors */
+extern void __bad_udelay(void);
+extern void __bad_ndelay(void);
+
+extern void __udelay(unsigned long usecs);
+extern void __ndelay(unsigned long nsecs);
+extern void __const_udelay(unsigned long xloops);
+extern void __delay(unsigned long loops);
+
+/* 0x10c7 is 2**32 / 1000000 (rounded up) */
+#define udelay(n) (__builtin_constant_p(n) ? \
+ ((n) > 20000 ? __bad_udelay() : __const_udelay((n) * 0x10c7ul)) : \
+ __udelay(n))
+
+/* 0x5 is 2**32 / 1000000000 (rounded up) */
+#define ndelay(n) (__builtin_constant_p(n) ? \
+ ((n) > 20000 ? __bad_ndelay() : __const_udelay((n) * 5ul)) : \
+ __ndelay(n))
+
+#endif /* _METAG_DELAY_H */
diff --git a/arch/metag/include/asm/div64.h b/arch/metag/include/asm/div64.h
new file mode 100644
index 00000000000..0fdd1167621
--- /dev/null
+++ b/arch/metag/include/asm/div64.h
@@ -0,0 +1,12 @@
+#ifndef __ASM_DIV64_H__
+#define __ASM_DIV64_H__
+
+#include <asm-generic/div64.h>
+
+extern u64 div_u64(u64 dividend, u64 divisor);
+extern s64 div_s64(s64 dividend, s64 divisor);
+
+#define div_u64 div_u64
+#define div_s64 div_s64
+
+#endif
diff --git a/arch/metag/include/asm/dma-mapping.h b/arch/metag/include/asm/dma-mapping.h
new file mode 100644
index 00000000000..14b23efd9b7
--- /dev/null
+++ b/arch/metag/include/asm/dma-mapping.h
@@ -0,0 +1,190 @@
+#ifndef _ASM_METAG_DMA_MAPPING_H
+#define _ASM_METAG_DMA_MAPPING_H
+
+#include <linux/mm.h>
+
+#include <asm/cache.h>
+#include <asm/io.h>
+#include <linux/scatterlist.h>
+#include <asm/bug.h>
+
+#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
+#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
+
+void *dma_alloc_coherent(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, gfp_t flag);
+
+void dma_free_coherent(struct device *dev, size_t size,
+ void *vaddr, dma_addr_t dma_handle);
+
+void dma_sync_for_device(void *vaddr, size_t size, int dma_direction);
+void dma_sync_for_cpu(void *vaddr, size_t size, int dma_direction);
+
+int dma_mmap_coherent(struct device *dev, struct vm_area_struct *vma,
+ void *cpu_addr, dma_addr_t dma_addr, size_t size);
+
+int dma_mmap_writecombine(struct device *dev, struct vm_area_struct *vma,
+ void *cpu_addr, dma_addr_t dma_addr, size_t size);
+
+static inline dma_addr_t
+dma_map_single(struct device *dev, void *ptr, size_t size,
+ enum dma_data_direction direction)
+{
+ BUG_ON(!valid_dma_direction(direction));
+ WARN_ON(size == 0);
+ dma_sync_for_device(ptr, size, direction);
+ return virt_to_phys(ptr);
+}
+
+static inline void
+dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
+ enum dma_data_direction direction)
+{
+ BUG_ON(!valid_dma_direction(direction));
+ dma_sync_for_cpu(phys_to_virt(dma_addr), size, direction);
+}
+
+static inline int
+dma_map_sg(struct device *dev, struct scatterlist *sglist, int nents,
+ enum dma_data_direction direction)
+{
+ struct scatterlist *sg;
+ int i;
+
+ BUG_ON(!valid_dma_direction(direction));
+ WARN_ON(nents == 0 || sglist[0].length == 0);
+
+ for_each_sg(sglist, sg, nents, i) {
+ BUG_ON(!sg_page(sg));
+
+ sg->dma_address = sg_phys(sg);
+ dma_sync_for_device(sg_virt(sg), sg->length, direction);
+ }
+
+ return nents;
+}
+
+static inline dma_addr_t
+dma_map_page(struct device *dev, struct page *page, unsigned long offset,
+ size_t size, enum dma_data_direction direction)
+{
+ BUG_ON(!valid_dma_direction(direction));
+ dma_sync_for_device((void *)(page_to_phys(page) + offset), size,
+ direction);
+ return page_to_phys(page) + offset;
+}
+
+static inline void
+dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size,
+ enum dma_data_direction direction)
+{
+ BUG_ON(!valid_dma_direction(direction));
+ dma_sync_for_cpu(phys_to_virt(dma_address), size, direction);
+}
+
+
+static inline void
+dma_unmap_sg(struct device *dev, struct scatterlist *sglist, int nhwentries,
+ enum dma_data_direction direction)
+{
+ struct scatterlist *sg;
+ int i;
+
+ BUG_ON(!valid_dma_direction(direction));
+ WARN_ON(nhwentries == 0 || sglist[0].length == 0);
+
+ for_each_sg(sglist, sg, nhwentries, i) {
+ BUG_ON(!sg_page(sg));
+
+ sg->dma_address = sg_phys(sg);
+ dma_sync_for_cpu(sg_virt(sg), sg->length, direction);
+ }
+}
+
+static inline void
+dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size,
+ enum dma_data_direction direction)
+{
+ dma_sync_for_cpu(phys_to_virt(dma_handle), size, direction);
+}
+
+static inline void
+dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle,
+ size_t size, enum dma_data_direction direction)
+{
+ dma_sync_for_device(phys_to_virt(dma_handle), size, direction);
+}
+
+static inline void
+dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle,
+ unsigned long offset, size_t size,
+ enum dma_data_direction direction)
+{
+ dma_sync_for_cpu(phys_to_virt(dma_handle)+offset, size,
+ direction);
+}
+
+static inline void
+dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle,
+ unsigned long offset, size_t size,
+ enum dma_data_direction direction)
+{
+ dma_sync_for_device(phys_to_virt(dma_handle)+offset, size,
+ direction);
+}
+
+static inline void
+dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems,
+ enum dma_data_direction direction)
+{
+ int i;
+ for (i = 0; i < nelems; i++, sg++)
+ dma_sync_for_cpu(sg_virt(sg), sg->length, direction);
+}
+
+static inline void
+dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems,
+ enum dma_data_direction direction)
+{
+ int i;
+ for (i = 0; i < nelems; i++, sg++)
+ dma_sync_for_device(sg_virt(sg), sg->length, direction);
+}
+
+static inline int
+dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
+{
+ return 0;
+}
+
+#define dma_supported(dev, mask) (1)
+
+static inline int
+dma_set_mask(struct device *dev, u64 mask)
+{
+ if (!dev->dma_mask || !dma_supported(dev, mask))
+ return -EIO;
+
+ *dev->dma_mask = mask;
+
+ return 0;
+}
+
+/*
+ * dma_alloc_noncoherent() returns non-cacheable memory, so there's no need to
+ * do any flushing here.
+ */
+static inline void
+dma_cache_sync(struct device *dev, void *vaddr, size_t size,
+ enum dma_data_direction direction)
+{
+}
+
+/* drivers/base/dma-mapping.c */
+extern int dma_common_get_sgtable(struct device *dev, struct sg_table *sgt,
+ void *cpu_addr, dma_addr_t dma_addr,
+ size_t size);
+
+#define dma_get_sgtable(d, t, v, h, s) dma_common_get_sgtable(d, t, v, h, s)
+
+#endif
diff --git a/arch/metag/include/asm/elf.h b/arch/metag/include/asm/elf.h
new file mode 100644
index 00000000000..d63b9d0e57d
--- /dev/null
+++ b/arch/metag/include/asm/elf.h
@@ -0,0 +1,128 @@
+#ifndef __ASM_METAG_ELF_H
+#define __ASM_METAG_ELF_H
+
+#define EM_METAG 174
+
+/* Meta relocations */
+#define R_METAG_HIADDR16 0
+#define R_METAG_LOADDR16 1
+#define R_METAG_ADDR32 2
+#define R_METAG_NONE 3
+#define R_METAG_RELBRANCH 4
+#define R_METAG_GETSETOFF 5
+
+/* Backward compatability */
+#define R_METAG_REG32OP1 6
+#define R_METAG_REG32OP2 7
+#define R_METAG_REG32OP3 8
+#define R_METAG_REG16OP1 9
+#define R_METAG_REG16OP2 10
+#define R_METAG_REG16OP3 11
+#define R_METAG_REG32OP4 12
+
+#define R_METAG_HIOG 13
+#define R_METAG_LOOG 14
+
+/* GNU */
+#define R_METAG_GNU_VTINHERIT 30
+#define R_METAG_GNU_VTENTRY 31
+
+/* PIC relocations */
+#define R_METAG_HI16_GOTOFF 32
+#define R_METAG_LO16_GOTOFF 33
+#define R_METAG_GETSET_GOTOFF 34
+#define R_METAG_GETSET_GOT 35
+#define R_METAG_HI16_GOTPC 36
+#define R_METAG_LO16_GOTPC 37
+#define R_METAG_HI16_PLT 38
+#define R_METAG_LO16_PLT 39
+#define R_METAG_RELBRANCH_PLT 40
+#define R_METAG_GOTOFF 41
+#define R_METAG_PLT 42
+#define R_METAG_COPY 43
+#define R_METAG_JMP_SLOT 44
+#define R_METAG_RELATIVE 45
+#define R_METAG_GLOB_DAT 46
+
+/*
+ * ELF register definitions.
+ */
+
+#include <asm/page.h>
+#include <asm/processor.h>
+#include <asm/ptrace.h>
+#include <asm/user.h>
+
+typedef unsigned long elf_greg_t;
+
+#define ELF_NGREG (sizeof(struct user_gp_regs) / sizeof(elf_greg_t))
+typedef elf_greg_t elf_gregset_t[ELF_NGREG];
+
+typedef unsigned long elf_fpregset_t;
+
+/*
+ * This is used to ensure we don't load something for the wrong architecture.
+ */
+#define elf_check_arch(x) ((x)->e_machine == EM_METAG)
+
+/*
+ * These are used to set parameters in the core dumps.
+ */
+#define ELF_CLASS ELFCLASS32
+#define ELF_DATA ELFDATA2LSB
+#define ELF_ARCH EM_METAG
+
+#define ELF_PLAT_INIT(_r, load_addr) \
+ do { _r->ctx.AX[0].U0 = 0; } while (0)
+
+#define USE_ELF_CORE_DUMP
+#define CORE_DUMP_USE_REGSET
+#define ELF_EXEC_PAGESIZE PAGE_SIZE
+
+/* This is the location that an ET_DYN program is loaded if exec'ed. Typical
+ use of this is to invoke "./ld.so someprog" to test out a new version of
+ the loader. We need to make sure that it is out of the way of the program
+ that it will "exec", and that there is sufficient room for the brk. */
+
+#define ELF_ET_DYN_BASE 0x08000000UL
+
+#define ELF_CORE_COPY_REGS(_dest, _regs) \
+ memcpy((char *)&_dest, (char *)_regs, sizeof(struct pt_regs));
+
+/* This yields a mask that user programs can use to figure out what
+ instruction set this cpu supports. */
+
+#define ELF_HWCAP (0)
+
+/* This yields a string that ld.so will use to load implementation
+ specific libraries for optimization. This is more specific in
+ intent than poking at uname or /proc/cpuinfo. */
+
+#define ELF_PLATFORM (NULL)
+
+#define SET_PERSONALITY(ex) \
+ set_personality(PER_LINUX | (current->personality & (~PER_MASK)))
+
+#define STACK_RND_MASK (0)
+
+#ifdef CONFIG_METAG_USER_TCM
+
+struct elf32_phdr;
+struct file;
+
+unsigned long __metag_elf_map(struct file *filep, unsigned long addr,
+ struct elf32_phdr *eppnt, int prot, int type,
+ unsigned long total_size);
+
+static inline unsigned long metag_elf_map(struct file *filep,
+ unsigned long addr,
+ struct elf32_phdr *eppnt, int prot,
+ int type, unsigned long total_size)
+{
+ return __metag_elf_map(filep, addr, eppnt, prot, type, total_size);
+}
+#define elf_map metag_elf_map
+
+#endif
+
+#endif
diff --git a/arch/metag/include/asm/fixmap.h b/arch/metag/include/asm/fixmap.h
new file mode 100644
index 00000000000..33312751c92
--- /dev/null
+++ b/arch/metag/include/asm/fixmap.h
@@ -0,0 +1,99 @@
+/*
+ * fixmap.h: compile-time virtual memory allocation
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1998 Ingo Molnar
+ *
+ * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
+ */
+
+#ifndef _ASM_FIXMAP_H
+#define _ASM_FIXMAP_H
+
+#include <asm/pgtable.h>
+#ifdef CONFIG_HIGHMEM
+#include <linux/threads.h>
+#include <asm/kmap_types.h>
+#endif
+
+/*
+ * Here we define all the compile-time 'special' virtual
+ * addresses. The point is to have a constant address at
+ * compile time, but to set the physical address only
+ * in the boot process. We allocate these special addresses
+ * from the end of the consistent memory region backwards.
+ * Also this lets us do fail-safe vmalloc(), we
+ * can guarantee that these special addresses and
+ * vmalloc()-ed addresses never overlap.
+ *
+ * these 'compile-time allocated' memory buffers are
+ * fixed-size 4k pages. (or larger if used with an increment
+ * higher than 1) use fixmap_set(idx,phys) to associate
+ * physical memory with fixmap indices.
+ *
+ * TLB entries of such buffers will not be flushed across
+ * task switches.
+ */
+enum fixed_addresses {
+#define FIX_N_COLOURS 8
+#ifdef CONFIG_HIGHMEM
+ /* reserved pte's for temporary kernel mappings */
+ FIX_KMAP_BEGIN,
+ FIX_KMAP_END = FIX_KMAP_BEGIN+(KM_TYPE_NR*NR_CPUS)-1,
+#endif
+ __end_of_fixed_addresses
+};
+
+#define FIXADDR_TOP (CONSISTENT_START - PAGE_SIZE)
+#define FIXADDR_SIZE (__end_of_fixed_addresses << PAGE_SHIFT)
+#define FIXADDR_START ((FIXADDR_TOP - FIXADDR_SIZE) & PMD_MASK)
+
+#define __fix_to_virt(x) (FIXADDR_TOP - ((x) << PAGE_SHIFT))
+#define __virt_to_fix(x) ((FIXADDR_TOP - ((x)&PAGE_MASK)) >> PAGE_SHIFT)
+
+extern void __this_fixmap_does_not_exist(void);
+/*
+ * 'index to address' translation. If anyone tries to use the idx
+ * directly without tranlation, we catch the bug with a NULL-deference
+ * kernel oops. Illegal ranges of incoming indices are caught too.
+ */
+static inline unsigned long fix_to_virt(const unsigned int idx)
+{
+ /*
+ * this branch gets completely eliminated after inlining,
+ * except when someone tries to use fixaddr indices in an
+ * illegal way. (such as mixing up address types or using
+ * out-of-range indices).
+ *
+ * If it doesn't get removed, the linker will complain
+ * loudly with a reasonably clear error message..
+ */
+ if (idx >= __end_of_fixed_addresses)
+ __this_fixmap_does_not_exist();
+
+ return __fix_to_virt(idx);
+}
+
+static inline unsigned long virt_to_fix(const unsigned long vaddr)
+{
+ BUG_ON(vaddr >= FIXADDR_TOP || vaddr < FIXADDR_START);
+ return __virt_to_fix(vaddr);
+}
+
+#define kmap_get_fixmap_pte(vaddr) \
+ pte_offset_kernel( \
+ pmd_offset(pud_offset(pgd_offset_k(vaddr), (vaddr)), (vaddr)), \
+ (vaddr) \
+ )
+
+/*
+ * Called from pgtable_init()
+ */
+extern void fixrange_init(unsigned long start, unsigned long end,
+ pgd_t *pgd_base);
+
+
+#endif
diff --git a/arch/metag/include/asm/ftrace.h b/arch/metag/include/asm/ftrace.h
new file mode 100644
index 00000000000..2901f0f7d94
--- /dev/null
+++ b/arch/metag/include/asm/ftrace.h
@@ -0,0 +1,23 @@
+#ifndef _ASM_METAG_FTRACE
+#define _ASM_METAG_FTRACE
+
+#ifdef CONFIG_FUNCTION_TRACER
+#define MCOUNT_INSN_SIZE 8 /* sizeof mcount call */
+
+#ifndef __ASSEMBLY__
+extern void mcount_wrapper(void);
+#define MCOUNT_ADDR ((long)(mcount_wrapper))
+
+static inline unsigned long ftrace_call_adjust(unsigned long addr)
+{
+ return addr;
+}
+
+struct dyn_arch_ftrace {
+ /* No extra data needed on metag */
+};
+#endif /* __ASSEMBLY__ */
+
+#endif /* CONFIG_FUNCTION_TRACER */
+
+#endif /* _ASM_METAG_FTRACE */
diff --git a/arch/metag/include/asm/global_lock.h b/arch/metag/include/asm/global_lock.h
new file mode 100644
index 00000000000..fc831c88c22
--- /dev/null
+++ b/arch/metag/include/asm/global_lock.h
@@ -0,0 +1,100 @@
+#ifndef __ASM_METAG_GLOBAL_LOCK_H
+#define __ASM_METAG_GLOBAL_LOCK_H
+
+#include <asm/metag_mem.h>
+
+/**
+ * __global_lock1() - Acquire global voluntary lock (LOCK1).
+ * @flags: Variable to store flags into.
+ *
+ * Acquires the Meta global voluntary lock (LOCK1), also taking care to disable
+ * all triggers so we cannot be interrupted, and to enforce a compiler barrier
+ * so that the compiler cannot reorder memory accesses across the lock.
+ *
+ * No other hardware thread will be able to acquire the voluntary or exclusive
+ * locks until the voluntary lock is released with @__global_unlock1, but they
+ * may continue to execute as long as they aren't trying to acquire either of
+ * the locks.
+ */
+#define __global_lock1(flags) do { \
+ unsigned int __trval; \
+ asm volatile("MOV %0,#0\n\t" \
+ "SWAP %0,TXMASKI\n\t" \
+ "LOCK1" \
+ : "=r" (__trval) \
+ : \
+ : "memory"); \
+ (flags) = __trval; \
+} while (0)
+
+/**
+ * __global_unlock1() - Release global voluntary lock (LOCK1).
+ * @flags: Variable to restore flags from.
+ *
+ * Releases the Meta global voluntary lock (LOCK1) acquired with
+ * @__global_lock1, also taking care to re-enable triggers, and to enforce a
+ * compiler barrier so that the compiler cannot reorder memory accesses across
+ * the unlock.
+ *
+ * This immediately allows another hardware thread to acquire the voluntary or
+ * exclusive locks.
+ */
+#define __global_unlock1(flags) do { \
+ unsigned int __trval = (flags); \
+ asm volatile("LOCK0\n\t" \
+ "MOV TXMASKI,%0" \
+ : \
+ : "r" (__trval) \
+ : "memory"); \
+} while (0)
+
+/**
+ * __global_lock2() - Acquire global exclusive lock (LOCK2).
+ * @flags: Variable to store flags into.
+ *
+ * Acquires the Meta global voluntary lock and global exclusive lock (LOCK2),
+ * also taking care to disable all triggers so we cannot be interrupted, to take
+ * the atomic lock (system event) and to enforce a compiler barrier so that the
+ * compiler cannot reorder memory accesses across the lock.
+ *
+ * No other hardware thread will be able to execute code until the locks are
+ * released with @__global_unlock2.
+ */
+#define __global_lock2(flags) do { \
+ unsigned int __trval; \
+ unsigned int __aloc_hi = LINSYSEVENT_WR_ATOMIC_LOCK & 0xFFFF0000; \
+ asm volatile("MOV %0,#0\n\t" \
+ "SWAP %0,TXMASKI\n\t" \
+ "LOCK2\n\t" \
+ "SETD [%1+#0x40],D1RtP" \
+ : "=r&" (__trval) \
+ : "u" (__aloc_hi) \
+ : "memory"); \
+ (flags) = __trval; \
+} while (0)
+
+/**
+ * __global_unlock2() - Release global exclusive lock (LOCK2).
+ * @flags: Variable to restore flags from.
+ *
+ * Releases the Meta global exclusive lock (LOCK2) and global voluntary lock
+ * acquired with @__global_lock2, also taking care to release the atomic lock
+ * (system event), re-enable triggers, and to enforce a compiler barrier so that
+ * the compiler cannot reorder memory accesses across the unlock.
+ *
+ * This immediately allows other hardware threads to continue executing and one
+ * of them to acquire locks.
+ */
+#define __global_unlock2(flags) do { \
+ unsigned int __trval = (flags); \
+ unsigned int __alock_hi = LINSYSEVENT_WR_ATOMIC_LOCK & 0xFFFF0000; \
+ asm volatile("SETD [%1+#0x00],D1RtP\n\t" \
+ "LOCK0\n\t" \
+ "MOV TXMASKI,%0" \
+ : \
+ : "r" (__trval), \
+ "u" (__alock_hi) \
+ : "memory"); \
+} while (0)
+
+#endif /* __ASM_METAG_GLOBAL_LOCK_H */
diff --git a/arch/metag/include/asm/gpio.h b/arch/metag/include/asm/gpio.h
new file mode 100644
index 00000000000..b3799d88ffc
--- /dev/null
+++ b/arch/metag/include/asm/gpio.h
@@ -0,0 +1,4 @@
+#ifndef __LINUX_GPIO_H
+#warning Include linux/gpio.h instead of asm/gpio.h
+#include <linux/gpio.h>
+#endif
diff --git a/arch/metag/include/asm/highmem.h b/arch/metag/include/asm/highmem.h
new file mode 100644
index 00000000000..6646a15c73d
--- /dev/null
+++ b/arch/metag/include/asm/highmem.h
@@ -0,0 +1,62 @@
+#ifndef _ASM_HIGHMEM_H
+#define _ASM_HIGHMEM_H
+
+#include <asm/cacheflush.h>
+#include <asm/kmap_types.h>
+#include <asm/fixmap.h>
+
+/*
+ * Right now we initialize only a single pte table. It can be extended
+ * easily, subsequent pte tables have to be allocated in one physical
+ * chunk of RAM.
+ */
+/*
+ * Ordering is (from lower to higher memory addresses):
+ *
+ * high_memory
+ * Persistent kmap area
+ * PKMAP_BASE
+ * fixed_addresses
+ * FIXADDR_START
+ * FIXADDR_TOP
+ * Vmalloc area
+ * VMALLOC_START
+ * VMALLOC_END
+ */
+#define PKMAP_BASE (FIXADDR_START - PMD_SIZE)
+#define LAST_PKMAP PTRS_PER_PTE
+#define LAST_PKMAP_MASK (LAST_PKMAP - 1)
+#define PKMAP_NR(virt) (((virt) - PKMAP_BASE) >> PAGE_SHIFT)
+#define PKMAP_ADDR(nr) (PKMAP_BASE + ((nr) << PAGE_SHIFT))
+
+#define kmap_prot PAGE_KERNEL
+
+static inline void flush_cache_kmaps(void)
+{
+ flush_cache_all();
+}
+
+/* declarations for highmem.c */
+extern unsigned long highstart_pfn, highend_pfn;
+
+extern pte_t *pkmap_page_table;
+
+extern void *kmap_high(struct page *page);
+extern void kunmap_high(struct page *page);
+
+extern void kmap_init(void);
+
+/*
+ * The following functions are already defined by <linux/highmem.h>
+ * when CONFIG_HIGHMEM is not set.
+ */
+#ifdef CONFIG_HIGHMEM
+extern void *kmap(struct page *page);
+extern void kunmap(struct page *page);
+extern void *kmap_atomic(struct page *page);
+extern void __kunmap_atomic(void *kvaddr);
+extern void *kmap_atomic_pfn(unsigned long pfn);
+extern struct page *kmap_atomic_to_page(void *ptr);
+#endif
+
+#endif
diff --git a/arch/metag/include/asm/hugetlb.h b/arch/metag/include/asm/hugetlb.h
new file mode 100644
index 00000000000..f545477e61f
--- /dev/null
+++ b/arch/metag/include/asm/hugetlb.h
@@ -0,0 +1,86 @@
+#ifndef _ASM_METAG_HUGETLB_H
+#define _ASM_METAG_HUGETLB_H
+
+#include <asm/page.h>
+
+
+static inline int is_hugepage_only_range(struct mm_struct *mm,
+ unsigned long addr,
+ unsigned long len) {
+ return 0;
+}
+
+int prepare_hugepage_range(struct file *file, unsigned long addr,
+ unsigned long len);
+
+static inline void hugetlb_prefault_arch_hook(struct mm_struct *mm)
+{
+}
+
+static inline void hugetlb_free_pgd_range(struct mmu_gather *tlb,
+ unsigned long addr, unsigned long end,
+ unsigned long floor,
+ unsigned long ceiling)
+{
+ free_pgd_range(tlb, addr, end, floor, ceiling);
+}
+
+static inline void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep, pte_t pte)
+{
+ set_pte_at(mm, addr, ptep, pte);
+}
+
+static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
+ unsigned long addr, pte_t *ptep)
+{
+ return ptep_get_and_clear(mm, addr, ptep);
+}
+
+static inline void huge_ptep_clear_flush(struct vm_area_struct *vma,
+ unsigned long addr, pte_t *ptep)
+{
+}
+
+static inline int huge_pte_none(pte_t pte)
+{
+ return pte_none(pte);
+}
+
+static inline pte_t huge_pte_wrprotect(pte_t pte)
+{
+ return pte_wrprotect(pte);
+}
+
+static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
+ unsigned long addr, pte_t *ptep)
+{
+ ptep_set_wrprotect(mm, addr, ptep);
+}
+
+static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma,
+ unsigned long addr, pte_t *ptep,
+ pte_t pte, int dirty)
+{
+ return ptep_set_access_flags(vma, addr, ptep, pte, dirty);
+}
+
+static inline pte_t huge_ptep_get(pte_t *ptep)
+{
+ return *ptep;
+}
+
+static inline int arch_prepare_hugepage(struct page *page)
+{
+ return 0;
+}
+
+static inline void arch_release_hugepage(struct page *page)
+{
+}
+
+static inline void arch_clear_hugepage_flags(struct page *page)
+{
+}
+
+#endif /* _ASM_METAG_HUGETLB_H */
diff --git a/arch/metag/include/asm/hwthread.h b/arch/metag/include/asm/hwthread.h
new file mode 100644
index 00000000000..8f9786619b1
--- /dev/null
+++ b/arch/metag/include/asm/hwthread.h
@@ -0,0 +1,40 @@
+/*
+ * Copyright (C) 2008 Imagination Technologies
+ */
+#ifndef __METAG_HWTHREAD_H
+#define __METAG_HWTHREAD_H
+
+#include <linux/bug.h>
+#include <linux/io.h>
+
+#include <asm/metag_mem.h>
+
+#define BAD_HWTHREAD_ID (0xFFU)
+#define BAD_CPU_ID (0xFFU)
+
+extern u8 cpu_2_hwthread_id[];
+extern u8 hwthread_id_2_cpu[];
+
+/*
+ * Each hardware thread's Control Unit registers are memory-mapped
+ * and can therefore be accessed by any other hardware thread.
+ *
+ * This helper function returns the memory address where "thread"'s
+ * register "regnum" is mapped.
+ */
+static inline
+void __iomem *__CU_addr(unsigned int thread, unsigned int regnum)
+{
+ unsigned int base, thread_offset, thread_regnum;
+
+ WARN_ON(thread == BAD_HWTHREAD_ID);
+
+ base = T0UCTREG0; /* Control unit base */
+
+ thread_offset = TnUCTRX_STRIDE * thread;
+ thread_regnum = TXUCTREGn_STRIDE * regnum;
+
+ return (void __iomem *)(base + thread_offset + thread_regnum);
+}
+
+#endif /* __METAG_HWTHREAD_H */
diff --git a/arch/metag/include/asm/io.h b/arch/metag/include/asm/io.h
new file mode 100644
index 00000000000..9359e504844
--- /dev/null
+++ b/arch/metag/include/asm/io.h
@@ -0,0 +1,165 @@
+#ifndef _ASM_METAG_IO_H
+#define _ASM_METAG_IO_H
+
+#include <linux/types.h>
+
+#define IO_SPACE_LIMIT 0
+
+#define page_to_bus page_to_phys
+#define bus_to_page phys_to_page
+
+/*
+ * Generic I/O
+ */
+
+#define __raw_readb __raw_readb
+static inline u8 __raw_readb(const volatile void __iomem *addr)
+{
+ u8 ret;
+ asm volatile("GETB %0,[%1]"
+ : "=da" (ret)
+ : "da" (addr)
+ : "memory");
+ return ret;
+}
+
+#define __raw_readw __raw_readw
+static inline u16 __raw_readw(const volatile void __iomem *addr)
+{
+ u16 ret;
+ asm volatile("GETW %0,[%1]"
+ : "=da" (ret)
+ : "da" (addr)
+ : "memory");
+ return ret;
+}
+
+#define __raw_readl __raw_readl
+static inline u32 __raw_readl(const volatile void __iomem *addr)
+{
+ u32 ret;
+ asm volatile("GETD %0,[%1]"
+ : "=da" (ret)
+ : "da" (addr)
+ : "memory");
+ return ret;
+}
+
+#define __raw_readq __raw_readq
+static inline u64 __raw_readq(const volatile void __iomem *addr)
+{
+ u64 ret;
+ asm volatile("GETL %0,%t0,[%1]"
+ : "=da" (ret)
+ : "da" (addr)
+ : "memory");
+ return ret;
+}
+
+#define __raw_writeb __raw_writeb
+static inline void __raw_writeb(u8 b, volatile void __iomem *addr)
+{
+ asm volatile("SETB [%0],%1"
+ :
+ : "da" (addr),
+ "da" (b)
+ : "memory");
+}
+
+#define __raw_writew __raw_writew
+static inline void __raw_writew(u16 b, volatile void __iomem *addr)
+{
+ asm volatile("SETW [%0],%1"
+ :
+ : "da" (addr),
+ "da" (b)
+ : "memory");
+}
+
+#define __raw_writel __raw_writel
+static inline void __raw_writel(u32 b, volatile void __iomem *addr)
+{
+ asm volatile("SETD [%0],%1"
+ :
+ : "da" (addr),
+ "da" (b)
+ : "memory");
+}
+
+#define __raw_writeq __raw_writeq
+static inline void __raw_writeq(u64 b, volatile void __iomem *addr)
+{
+ asm volatile("SETL [%0],%1,%t1"
+ :
+ : "da" (addr),
+ "da" (b)
+ : "memory");
+}
+
+/*
+ * The generic io.h can define all the other generic accessors
+ */
+
+#include <asm-generic/io.h>
+
+/*
+ * Despite being a 32bit architecture, Meta can do 64bit memory accesses
+ * (assuming the bus supports it).
+ */
+
+#define readq __raw_readq
+#define writeq __raw_writeq
+
+/*
+ * Meta specific I/O for accessing non-MMU areas.
+ *
+ * These can be provided with a physical address rather than an __iomem pointer
+ * and should only be used by core architecture code for accessing fixed core
+ * registers. Generic drivers should use ioremap and the generic I/O accessors.
+ */
+
+#define metag_in8(addr) __raw_readb((volatile void __iomem *)(addr))
+#define metag_in16(addr) __raw_readw((volatile void __iomem *)(addr))
+#define metag_in32(addr) __raw_readl((volatile void __iomem *)(addr))
+#define metag_in64(addr) __raw_readq((volatile void __iomem *)(addr))
+
+#define metag_out8(b, addr) __raw_writeb(b, (volatile void __iomem *)(addr))
+#define metag_out16(b, addr) __raw_writew(b, (volatile void __iomem *)(addr))
+#define metag_out32(b, addr) __raw_writel(b, (volatile void __iomem *)(addr))
+#define metag_out64(b, addr) __raw_writeq(b, (volatile void __iomem *)(addr))
+
+/*
+ * io remapping functions
+ */
+
+extern void __iomem *__ioremap(unsigned long offset,
+ size_t size, unsigned long flags);
+extern void __iounmap(void __iomem *addr);
+
+/**
+ * ioremap - map bus memory into CPU space
+ * @offset: bus address of the memory
+ * @size: size of the resource to map
+ *
+ * ioremap performs a platform specific sequence of operations to
+ * make bus memory CPU accessible via the readb/readw/readl/writeb/
+ * writew/writel functions and the other mmio helpers. The returned
+ * address is not guaranteed to be usable directly as a virtual
+ * address.
+ */
+#define ioremap(offset, size) \
+ __ioremap((offset), (size), 0)
+
+#define ioremap_nocache(offset, size) \
+ __ioremap((offset), (size), 0)
+
+#define ioremap_cached(offset, size) \
+ __ioremap((offset), (size), _PAGE_CACHEABLE)
+
+#define ioremap_wc(offset, size) \
+ __ioremap((offset), (size), _PAGE_WR_COMBINE)
+
+#define iounmap(addr) \
+ __iounmap(addr)
+
+#endif /* _ASM_METAG_IO_H */
diff --git a/arch/metag/include/asm/irq.h b/arch/metag/include/asm/irq.h
new file mode 100644
index 00000000000..be0c8f3c5a5
--- /dev/null
+++ b/arch/metag/include/asm/irq.h
@@ -0,0 +1,32 @@
+#ifndef __ASM_METAG_IRQ_H
+#define __ASM_METAG_IRQ_H
+
+#ifdef CONFIG_4KSTACKS
+extern void irq_ctx_init(int cpu);
+extern void irq_ctx_exit(int cpu);
+# define __ARCH_HAS_DO_SOFTIRQ
+#else
+# define irq_ctx_init(cpu) do { } while (0)
+# define irq_ctx_exit(cpu) do { } while (0)
+#endif
+
+void tbi_startup_interrupt(int);
+void tbi_shutdown_interrupt(int);
+
+struct pt_regs;
+
+int tbisig_map(unsigned int hw);
+extern void do_IRQ(int irq, struct pt_regs *regs);
+
+#ifdef CONFIG_METAG_SUSPEND_MEM
+int traps_save_context(void);
+int traps_restore_context(void);
+#endif
+
+#include <asm-generic/irq.h>
+
+#ifdef CONFIG_HOTPLUG_CPU
+extern void migrate_irqs(void);
+#endif
+
+#endif /* __ASM_METAG_IRQ_H */
diff --git a/arch/metag/include/asm/irqflags.h b/arch/metag/include/asm/irqflags.h
new file mode 100644
index 00000000000..339b16f062e
--- /dev/null
+++ b/arch/metag/include/asm/irqflags.h
@@ -0,0 +1,93 @@
+/*
+ * IRQ flags handling
+ *
+ * This file gets included from lowlevel asm headers too, to provide
+ * wrapped versions of the local_irq_*() APIs, based on the
+ * raw_local_irq_*() functions from the lowlevel headers.
+ */
+#ifndef _ASM_IRQFLAGS_H
+#define _ASM_IRQFLAGS_H
+
+#ifndef __ASSEMBLY__
+
+#include <asm/core_reg.h>
+#include <asm/metag_regs.h>
+
+#define INTS_OFF_MASK TXSTATI_BGNDHALT_BIT
+
+#ifdef CONFIG_SMP
+extern unsigned int get_trigger_mask(void);
+#else
+
+extern unsigned int global_trigger_mask;
+
+static inline unsigned int get_trigger_mask(void)
+{
+ return global_trigger_mask;
+}
+#endif
+
+static inline unsigned long arch_local_save_flags(void)
+{
+ return __core_reg_get(TXMASKI);
+}
+
+static inline int arch_irqs_disabled_flags(unsigned long flags)
+{
+ return (flags & ~INTS_OFF_MASK) == 0;
+}
+
+static inline int arch_irqs_disabled(void)
+{
+ unsigned long flags = arch_local_save_flags();
+
+ return arch_irqs_disabled_flags(flags);
+}
+
+static inline unsigned long __irqs_disabled(void)
+{
+ /*
+ * We shouldn't enable exceptions if they are not already
+ * enabled. This is required for chancalls to work correctly.
+ */
+ return arch_local_save_flags() & INTS_OFF_MASK;
+}
+
+/*
+ * For spinlocks, etc:
+ */
+static inline unsigned long arch_local_irq_save(void)
+{
+ unsigned long flags = __irqs_disabled();
+
+ asm volatile("SWAP %0,TXMASKI\n" : "=r" (flags) : "0" (flags)
+ : "memory");
+
+ return flags;
+}
+
+static inline void arch_local_irq_restore(unsigned long flags)
+{
+ asm volatile("MOV TXMASKI,%0\n" : : "r" (flags) : "memory");
+}
+
+static inline void arch_local_irq_disable(void)
+{
+ unsigned long flags = __irqs_disabled();
+
+ asm volatile("MOV TXMASKI,%0\n" : : "r" (flags) : "memory");
+}
+
+#ifdef CONFIG_SMP
+/* Avoid circular include dependencies through <linux/preempt.h> */
+void arch_local_irq_enable(void);
+#else
+static inline void arch_local_irq_enable(void)
+{
+ arch_local_irq_restore(get_trigger_mask());
+}
+#endif
+
+#endif /* (__ASSEMBLY__) */
+
+#endif /* !(_ASM_IRQFLAGS_H) */
diff --git a/arch/metag/include/asm/l2cache.h b/arch/metag/include/asm/l2cache.h
new file mode 100644
index 00000000000..bffbeaa4d93
--- /dev/null
+++ b/arch/metag/include/asm/l2cache.h
@@ -0,0 +1,258 @@
+#ifndef _METAG_L2CACHE_H
+#define _METAG_L2CACHE_H
+
+#ifdef CONFIG_METAG_L2C
+
+#include <asm/global_lock.h>
+#include <asm/io.h>
+
+/*
+ * Store the last known value of pfenable (we don't want prefetch enabled while
+ * L2 is off).
+ */
+extern int l2c_pfenable;
+
+/* defined in arch/metag/drivers/core-sysfs.c */
+extern struct sysdev_class cache_sysclass;
+
+static inline void wr_fence(void);
+
+/*
+ * Functions for reading of L2 cache configuration.
+ */
+
+/* Get raw L2 config register (CORE_CONFIG3) */
+static inline unsigned int meta_l2c_config(void)
+{
+ const unsigned int *corecfg3 = (const unsigned int *)METAC_CORE_CONFIG3;
+ return *corecfg3;
+}
+
+/* Get whether the L2 is present */
+static inline int meta_l2c_is_present(void)
+{
+ return meta_l2c_config() & METAC_CORECFG3_L2C_HAVE_L2C_BIT;
+}
+
+/* Get whether the L2 is configured for write-back instead of write-through */
+static inline int meta_l2c_is_writeback(void)
+{
+ return meta_l2c_config() & METAC_CORECFG3_L2C_MODE_BIT;
+}
+
+/* Get whether the L2 is unified instead of separated code/data */
+static inline int meta_l2c_is_unified(void)
+{
+ return meta_l2c_config() & METAC_CORECFG3_L2C_UNIFIED_BIT;
+}
+
+/* Get the L2 cache size in bytes */
+static inline unsigned int meta_l2c_size(void)
+{
+ unsigned int size_s;
+ if (!meta_l2c_is_present())
+ return 0;
+ size_s = (meta_l2c_config() & METAC_CORECFG3_L2C_SIZE_BITS)
+ >> METAC_CORECFG3_L2C_SIZE_S;
+ /* L2CSIZE is in KiB */
+ return 1024 << size_s;
+}
+
+/* Get the number of ways in the L2 cache */
+static inline unsigned int meta_l2c_ways(void)
+{
+ unsigned int ways_s;
+ if (!meta_l2c_is_present())
+ return 0;
+ ways_s = (meta_l2c_config() & METAC_CORECFG3_L2C_NUM_WAYS_BITS)
+ >> METAC_CORECFG3_L2C_NUM_WAYS_S;
+ return 0x1 << ways_s;
+}
+
+/* Get the line size of the L2 cache */
+static inline unsigned int meta_l2c_linesize(void)
+{
+ unsigned int line_size;
+ if (!meta_l2c_is_present())
+ return 0;
+ line_size = (meta_l2c_config() & METAC_CORECFG3_L2C_LINE_SIZE_BITS)
+ >> METAC_CORECFG3_L2C_LINE_SIZE_S;
+ switch (line_size) {
+ case METAC_CORECFG3_L2C_LINE_SIZE_64B:
+ return 64;
+ default:
+ return 0;
+ }
+}
+
+/* Get the revision ID of the L2 cache */
+static inline unsigned int meta_l2c_revision(void)
+{
+ return (meta_l2c_config() & METAC_CORECFG3_L2C_REV_ID_BITS)
+ >> METAC_CORECFG3_L2C_REV_ID_S;
+}
+
+
+/*
+ * Start an initialisation of the L2 cachelines and wait for completion.
+ * This should only be done in a LOCK1 or LOCK2 critical section while the L2
+ * is disabled.
+ */
+static inline void _meta_l2c_init(void)
+{
+ metag_out32(SYSC_L2C_INIT_INIT, SYSC_L2C_INIT);
+ while (metag_in32(SYSC_L2C_INIT) == SYSC_L2C_INIT_IN_PROGRESS)
+ /* do nothing */;
+}
+
+/*
+ * Start a writeback of dirty L2 cachelines and wait for completion.
+ * This should only be done in a LOCK1 or LOCK2 critical section.
+ */
+static inline void _meta_l2c_purge(void)
+{
+ metag_out32(SYSC_L2C_PURGE_PURGE, SYSC_L2C_PURGE);
+ while (metag_in32(SYSC_L2C_PURGE) == SYSC_L2C_PURGE_IN_PROGRESS)
+ /* do nothing */;
+}
+
+/* Set whether the L2 cache is enabled. */
+static inline void _meta_l2c_enable(int enabled)
+{
+ unsigned int enable;
+
+ enable = metag_in32(SYSC_L2C_ENABLE);
+ if (enabled)
+ enable |= SYSC_L2C_ENABLE_ENABLE_BIT;
+ else
+ enable &= ~SYSC_L2C_ENABLE_ENABLE_BIT;
+ metag_out32(enable, SYSC_L2C_ENABLE);
+}
+
+/* Set whether the L2 cache prefetch is enabled. */
+static inline void _meta_l2c_pf_enable(int pfenabled)
+{
+ unsigned int enable;
+
+ enable = metag_in32(SYSC_L2C_ENABLE);
+ if (pfenabled)
+ enable |= SYSC_L2C_ENABLE_PFENABLE_BIT;
+ else
+ enable &= ~SYSC_L2C_ENABLE_PFENABLE_BIT;
+ metag_out32(enable, SYSC_L2C_ENABLE);
+}
+
+/* Return whether the L2 cache is enabled */
+static inline int _meta_l2c_is_enabled(void)
+{
+ return metag_in32(SYSC_L2C_ENABLE) & SYSC_L2C_ENABLE_ENABLE_BIT;
+}
+
+/* Return whether the L2 cache prefetch is enabled */
+static inline int _meta_l2c_pf_is_enabled(void)
+{
+ return metag_in32(SYSC_L2C_ENABLE) & SYSC_L2C_ENABLE_PFENABLE_BIT;
+}
+
+
+/* Return whether the L2 cache is enabled */
+static inline int meta_l2c_is_enabled(void)
+{
+ int en;
+
+ /*
+ * There is no need to lock at the moment, as the enable bit is never
+ * intermediately changed, so we will never see an intermediate result.
+ */
+ en = _meta_l2c_is_enabled();
+
+ return en;
+}
+
+/*
+ * Ensure the L2 cache is disabled.
+ * Return whether the L2 was previously disabled.
+ */
+int meta_l2c_disable(void);
+
+/*
+ * Ensure the L2 cache is enabled.
+ * Return whether the L2 was previously enabled.
+ */
+int meta_l2c_enable(void);
+
+/* Return whether the L2 cache prefetch is enabled */
+static inline int meta_l2c_pf_is_enabled(void)
+{
+ return l2c_pfenable;
+}
+
+/*
+ * Set whether the L2 cache prefetch is enabled.
+ * Return whether the L2 prefetch was previously enabled.
+ */
+int meta_l2c_pf_enable(int pfenable);
+
+/*
+ * Flush the L2 cache.
+ * Return 1 if the L2 is disabled.
+ */
+int meta_l2c_flush(void);
+
+/*
+ * Write back all dirty cache lines in the L2 cache.
+ * Return 1 if the L2 is disabled or there isn't any writeback.
+ */
+static inline int meta_l2c_writeback(void)
+{
+ unsigned long flags;
+ int en;
+
+ /* no need to purge if it's not a writeback cache */
+ if (!meta_l2c_is_writeback())
+ return 1;
+
+ /*
+ * Purge only works if the L2 is enabled, and involves reading back to
+ * detect completion, so keep this operation atomic with other threads.
+ */
+ __global_lock1(flags);
+ en = meta_l2c_is_enabled();
+ if (likely(en)) {
+ wr_fence();
+ _meta_l2c_purge();
+ }
+ __global_unlock1(flags);
+
+ return !en;
+}
+
+#else /* CONFIG_METAG_L2C */
+
+#define meta_l2c_config() 0
+#define meta_l2c_is_present() 0
+#define meta_l2c_is_writeback() 0
+#define meta_l2c_is_unified() 0
+#define meta_l2c_size() 0
+#define meta_l2c_ways() 0
+#define meta_l2c_linesize() 0
+#define meta_l2c_revision() 0
+
+#define meta_l2c_is_enabled() 0
+#define _meta_l2c_pf_is_enabled() 0
+#define meta_l2c_pf_is_enabled() 0
+#define meta_l2c_disable() 1
+#define meta_l2c_enable() 0
+#define meta_l2c_pf_enable(X) 0
+static inline int meta_l2c_flush(void)
+{
+ return 1;
+}
+static inline int meta_l2c_writeback(void)
+{
+ return 1;
+}
+
+#endif /* CONFIG_METAG_L2C */
+
+#endif /* _METAG_L2CACHE_H */
diff --git a/arch/metag/include/asm/linkage.h b/arch/metag/include/asm/linkage.h
new file mode 100644
index 00000000000..73bf25ba4e1
--- /dev/null
+++ b/arch/metag/include/asm/linkage.h
@@ -0,0 +1,7 @@
+#ifndef __ASM_LINKAGE_H
+#define __ASM_LINKAGE_H
+
+#define __ALIGN .p2align 2
+#define __ALIGN_STR ".p2align 2"
+
+#endif
diff --git a/arch/metag/include/asm/mach/arch.h b/arch/metag/include/asm/mach/arch.h
new file mode 100644
index 00000000000..12c5664fea6
--- /dev/null
+++ b/arch/metag/include/asm/mach/arch.h
@@ -0,0 +1,86 @@
+/*
+ * arch/metag/include/asm/mach/arch.h
+ *
+ * Copyright (C) 2012 Imagination Technologies Ltd.
+ *
+ * based on the ARM version:
+ * Copyright (C) 2000 Russell King
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _METAG_MACH_ARCH_H_
+#define _METAG_MACH_ARCH_H_
+
+#include <linux/stddef.h>
+
+#include <asm/clock.h>
+
+/**
+ * struct machine_desc - Describes a board controlled by a Meta.
+ * @name: Board/SoC name.
+ * @dt_compat: Array of device tree 'compatible' strings.
+ * @clocks: Clock callbacks.
+ *
+ * @nr_irqs: Maximum number of IRQs.
+ * If 0, defaults to NR_IRQS in asm-generic/irq.h.
+ *
+ * @init_early: Early init callback.
+ * @init_irq: IRQ init callback for setting up IRQ controllers.
+ * @init_machine: Arch init callback for setting up devices.
+ * @init_late: Late init callback.
+ *
+ * This structure is provided by each board which can be controlled by a Meta.
+ * It is chosen by matching the compatible strings in the device tree provided
+ * by the bootloader with the strings in @dt_compat, and sets up any aspects of
+ * the machine that aren't configured with device tree (yet).
+ */
+struct machine_desc {
+ const char *name;
+ const char **dt_compat;
+ struct meta_clock_desc *clocks;
+
+ unsigned int nr_irqs;
+
+ void (*init_early)(void);
+ void (*init_irq)(void);
+ void (*init_machine)(void);
+ void (*init_late)(void);
+};
+
+/*
+ * Current machine - only accessible during boot.
+ */
+extern struct machine_desc *machine_desc;
+
+/*
+ * Machine type table - also only accessible during boot
+ */
+extern struct machine_desc __arch_info_begin[], __arch_info_end[];
+#define for_each_machine_desc(p) \
+ for (p = __arch_info_begin; p < __arch_info_end; p++)
+
+static inline struct machine_desc *default_machine_desc(void)
+{
+ /* the default machine is the last one linked in */
+ if (__arch_info_end - 1 < __arch_info_begin)
+ return NULL;
+ return __arch_info_end - 1;
+}
+
+/*
+ * Set of macros to define architecture features. This is built into
+ * a table by the linker.
+ */
+#define MACHINE_START(_type, _name) \
+static const struct machine_desc __mach_desc_##_type \
+__used \
+__attribute__((__section__(".arch.info.init"))) = { \
+ .name = _name,
+
+#define MACHINE_END \
+};
+
+#endif /* _METAG_MACH_ARCH_H_ */
diff --git a/arch/metag/include/asm/metag_isa.h b/arch/metag/include/asm/metag_isa.h
new file mode 100644
index 00000000000..c8aa2ae3899
--- /dev/null
+++ b/arch/metag/include/asm/metag_isa.h
@@ -0,0 +1,81 @@
+/*
+ * asm/metag_isa.h
+ *
+ * Copyright (C) 2000-2007, 2012 Imagination Technologies.
+ *
+ * This program is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU General Public License version 2 as published by the
+ * Free Software Foundation.
+ *
+ * Various defines for Meta instruction set.
+ */
+
+#ifndef _ASM_METAG_ISA_H_
+#define _ASM_METAG_ISA_H_
+
+
+/* L1 cache layout */
+
+/* Data cache line size as bytes and shift */
+#define DCACHE_LINE_BYTES 64
+#define DCACHE_LINE_S 6
+
+/* Number of ways in the data cache */
+#define DCACHE_WAYS 4
+
+/* Instruction cache line size as bytes and shift */
+#define ICACHE_LINE_BYTES 64
+#define ICACHE_LINE_S 6
+
+/* Number of ways in the instruction cache */
+#define ICACHE_WAYS 4
+
+
+/*
+ * CACHEWD/CACHEWL instructions use the bottom 8 bits of the data presented to
+ * control the operation actually achieved.
+ */
+/* Use of these two bits should be discouraged since the bits dont have
+ * consistent meanings
+ */
+#define CACHEW_ICACHE_BIT 0x01
+#define CACHEW_TLBFLUSH_BIT 0x02
+
+#define CACHEW_FLUSH_L1D_L2 0x0
+#define CACHEW_INVALIDATE_L1I 0x1
+#define CACHEW_INVALIDATE_L1DTLB 0x2
+#define CACHEW_INVALIDATE_L1ITLB 0x3
+#define CACHEW_WRITEBACK_L1D_L2 0x4
+#define CACHEW_INVALIDATE_L1D 0x8
+#define CACHEW_INVALIDATE_L1D_L2 0xC
+
+/*
+ * CACHERD/CACHERL instructions use bits 3:5 of the address presented to
+ * control the operation achieved and hence the specific result.
+ */
+#define CACHER_ADDR_BITS 0xFFFFFFC0
+#define CACHER_OPER_BITS 0x00000030
+#define CACHER_OPER_S 4
+#define CACHER_OPER_LINPHY 0
+#define CACHER_ICACHE_BIT 0x00000008
+#define CACHER_ICACHE_S 3
+
+/*
+ * CACHERD/CACHERL LINPHY Oper result is one/two 32-bit words
+ *
+ * If CRLINPHY0_VAL_BIT (Bit 0) set then,
+ * Lower 32-bits corresponds to MMCU_ENTRY_* above.
+ * Upper 32-bits corresponds to CRLINPHY1_* values below (if requested).
+ * else
+ * Lower 32-bits corresponds to CRLINPHY0_* values below.
+ * Upper 32-bits undefined.
+ */
+#define CRLINPHY0_VAL_BIT 0x00000001
+#define CRLINPHY0_FIRST_BIT 0x00000004 /* Set if VAL=0 due to first level */
+
+#define CRLINPHY1_READ_BIT 0x00000001 /* Set if reads permitted */
+#define CRLINPHY1_SINGLE_BIT 0x00000004 /* Set if TLB does not cache entry */
+#define CRLINPHY1_PAGEMSK_BITS 0x0000FFF0 /* Set to ((2^n-1)>>12) value */
+#define CRLINPHY1_PAGEMSK_S 4
+
+#endif /* _ASM_METAG_ISA_H_ */
diff --git a/arch/metag/include/asm/metag_mem.h b/arch/metag/include/asm/metag_mem.h
new file mode 100644
index 00000000000..3f7b54d8cca
--- /dev/null
+++ b/arch/metag/include/asm/metag_mem.h
@@ -0,0 +1,1106 @@
+/*
+ * asm/metag_mem.h
+ *
+ * Copyright (C) 2000-2007, 2012 Imagination Technologies.
+ *
+ * This program is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU General Public License version 2 as published by the
+ * Free Software Foundation.
+ *
+ * Various defines for Meta (memory-mapped) registers.
+ */
+
+#ifndef _ASM_METAG_MEM_H_
+#define _ASM_METAG_MEM_H_
+
+/*****************************************************************************
+ * META MEMORY MAP LINEAR ADDRESS VALUES
+ ****************************************************************************/
+/*
+ * COMMON MEMORY MAP
+ * -----------------
+ */
+
+#define LINSYSTEM_BASE 0x00200000
+#define LINSYSTEM_LIMIT 0x07FFFFFF
+
+/* Linear cache flush now implemented via DCACHE instruction. These defines
+ related to a special region that used to exist for achieving cache flushes.
+ */
+#define LINSYSLFLUSH_S 0
+
+#define LINSYSRES0_BASE 0x00200000
+#define LINSYSRES0_LIMIT 0x01FFFFFF
+
+#define LINSYSCUSTOM_BASE 0x02000000
+#define LINSYSCUSTOM_LIMIT 0x02FFFFFF
+
+#define LINSYSEXPAND_BASE 0x03000000
+#define LINSYSEXPAND_LIMIT 0x03FFFFFF
+
+#define LINSYSEVENT_BASE 0x04000000
+#define LINSYSEVENT_WR_ATOMIC_UNLOCK 0x04000000
+#define LINSYSEVENT_WR_ATOMIC_LOCK 0x04000040
+#define LINSYSEVENT_WR_CACHE_DISABLE 0x04000080
+#define LINSYSEVENT_WR_CACHE_ENABLE 0x040000C0
+#define LINSYSEVENT_WR_COMBINE_FLUSH 0x04000100
+#define LINSYSEVENT_WR_FENCE 0x04000140
+#define LINSYSEVENT_LIMIT 0x04000FFF
+
+#define LINSYSCFLUSH_BASE 0x04400000
+#define LINSYSCFLUSH_DCACHE_LINE 0x04400000
+#define LINSYSCFLUSH_ICACHE_LINE 0x04500000
+#define LINSYSCFLUSH_MMCU 0x04700000
+#ifndef METAC_1_2
+#define LINSYSCFLUSH_TxMMCU_BASE 0x04700020
+#define LINSYSCFLUSH_TxMMCU_STRIDE 0x00000008
+#endif
+#define LINSYSCFLUSH_ADDR_BITS 0x000FFFFF
+#define LINSYSCFLUSH_ADDR_S 0
+#define LINSYSCFLUSH_LIMIT 0x047FFFFF
+
+#define LINSYSCTRL_BASE 0x04800000
+#define LINSYSCTRL_LIMIT 0x04FFFFFF
+
+#define LINSYSMTABLE_BASE 0x05000000
+#define LINSYSMTABLE_LIMIT 0x05FFFFFF
+
+#define LINSYSDIRECT_BASE 0x06000000
+#define LINSYSDIRECT_LIMIT 0x07FFFFFF
+
+#define LINLOCAL_BASE 0x08000000
+#define LINLOCAL_LIMIT 0x7FFFFFFF
+
+#define LINCORE_BASE 0x80000000
+#define LINCORE_LIMIT 0x87FFFFFF
+
+#define LINCORE_CODE_BASE 0x80000000
+#define LINCORE_CODE_LIMIT 0x81FFFFFF
+
+#define LINCORE_DATA_BASE 0x82000000
+#define LINCORE_DATA_LIMIT 0x83FFFFFF
+
+
+/* The core can support locked icache lines in this region */
+#define LINCORE_ICACHE_BASE 0x84000000
+#define LINCORE_ICACHE_LIMIT 0x85FFFFFF
+
+/* The core can support locked dcache lines in this region */
+#define LINCORE_DCACHE_BASE 0x86000000
+#define LINCORE_DCACHE_LIMIT 0x87FFFFFF
+
+#define LINGLOBAL_BASE 0x88000000
+#define LINGLOBAL_LIMIT 0xFFFDFFFF
+
+/*
+ * CHIP Core Register Map
+ * ----------------------
+ */
+#define CORE_HWBASE 0x04800000
+#define PRIV_HWBASE 0x04810000
+#define TRIG_HWBASE 0x04820000
+#define SYSC_HWBASE 0x04830000
+
+/*****************************************************************************
+ * INTER-THREAD KICK REGISTERS FOR SOFTWARE EVENT GENERATION
+ ****************************************************************************/
+/*
+ * These values define memory mapped registers that can be used to supply
+ * kicks to threads that service arbitrary software events.
+ */
+
+#define T0KICK 0x04800800 /* Background kick 0 */
+#define TXXKICK_MAX 0xFFFF /* Maximum kicks */
+#define TnXKICK_STRIDE 0x00001000 /* Thread scale value */
+#define TnXKICK_STRIDE_S 12
+#define T0KICKI 0x04800808 /* Interrupt kick 0 */
+#define TXIKICK_OFFSET 0x00000008 /* Int level offset value */
+#define T1KICK 0x04801800 /* Background kick 1 */
+#define T1KICKI 0x04801808 /* Interrupt kick 1 */
+#define T2KICK 0x04802800 /* Background kick 2 */
+#define T2KICKI 0x04802808 /* Interrupt kick 2 */
+#define T3KICK 0x04803800 /* Background kick 3 */
+#define T3KICKI 0x04803808 /* Interrupt kick 3 */
+
+/*****************************************************************************
+ * GLOBAL REGISTER ACCESS RESOURCES
+ ****************************************************************************/
+/*
+ * These values define memory mapped registers that allow access to the
+ * internal state of all threads in order to allow global set-up of thread
+ * state and external handling of thread events, errors, or debugging.
+ *
+ * The actual unit and register index values needed to access individul
+ * registers are chip specific see - METAC_TXUXX_VALUES in metac_x_y.h.
+ * However two C array initialisers TXUXX_MASKS and TGUXX_MASKS will always be
+ * defined to allow arbitrary loading, display, and saving of all valid
+ * register states without detailed knowledge of their purpose - TXUXX sets
+ * bits for all valid registers and TGUXX sets bits for the sub-set which are
+ * global.
+ */
+
+#define T0UCTREG0 0x04800000 /* Access to all CT regs */
+#define TnUCTRX_STRIDE 0x00001000 /* Thread scale value */
+#define TXUCTREGn_STRIDE 0x00000008 /* Register scale value */
+
+#define TXUXXRXDT 0x0480FFF0 /* Data to/from any threads reg */
+#define TXUXXRXRQ 0x0480FFF8
+#define TXUXXRXRQ_DREADY_BIT 0x80000000 /* Poll for done */
+#define TXUXXRXRQ_DSPEXT_BIT 0x00020000 /* Addr DSP Regs */
+#define TXUXXRXRQ_RDnWR_BIT 0x00010000 /* Set for read */
+#define TXUXXRXRQ_TX_BITS 0x00003000 /* Thread number */
+#define TXUXXRXRQ_TX_S 12
+#define TXUXXRXRQ_RX_BITS 0x000001F0 /* Register num */
+#define TXUXXRXRQ_RX_S 4
+#define TXUXXRXRQ_DSPRARD0 0 /* DSP RAM A Read Pointer 0 */
+#define TXUXXRXRQ_DSPRARD1 1 /* DSP RAM A Read Pointer 1 */
+#define TXUXXRXRQ_DSPRAWR0 2 /* DSP RAM A Write Pointer 0 */
+#define TXUXXRXRQ_DSPRAWR2 3 /* DSP RAM A Write Pointer 1 */
+#define TXUXXRXRQ_DSPRBRD0 4 /* DSP RAM B Read Pointer 0 */
+#define TXUXXRXRQ_DSPRBRD1 5 /* DSP RAM B Read Pointer 1 */
+#define TXUXXRXRQ_DSPRBWR0 6 /* DSP RAM B Write Pointer 0 */
+#define TXUXXRXRQ_DSPRBWR1 7 /* DSP RAM B Write Pointer 1 */
+#define TXUXXRXRQ_DSPRARINC0 8 /* DSP RAM A Read Increment 0 */
+#define TXUXXRXRQ_DSPRARINC1 9 /* DSP RAM A Read Increment 1 */
+#define TXUXXRXRQ_DSPRAWINC0 10 /* DSP RAM A Write Increment 0 */
+#define TXUXXRXRQ_DSPRAWINC1 11 /* DSP RAM A Write Increment 1 */
+#define TXUXXRXRQ_DSPRBRINC0 12 /* DSP RAM B Read Increment 0 */
+#define TXUXXRXRQ_DSPRBRINC1 13 /* DSP RAM B Read Increment 1 */
+#define TXUXXRXRQ_DSPRBWINC0 14 /* DSP RAM B Write Increment 0 */
+#define TXUXXRXRQ_DSPRBWINC1 15 /* DSP RAM B Write Increment 1 */
+
+#define TXUXXRXRQ_ACC0L0 16 /* Accumulator 0 bottom 32-bits */
+#define TXUXXRXRQ_ACC1L0 17 /* Accumulator 1 bottom 32-bits */
+#define TXUXXRXRQ_ACC2L0 18 /* Accumulator 2 bottom 32-bits */
+#define TXUXXRXRQ_ACC3L0 19 /* Accumulator 3 bottom 32-bits */
+#define TXUXXRXRQ_ACC0HI 20 /* Accumulator 0 top 8-bits */
+#define TXUXXRXRQ_ACC1HI 21 /* Accumulator 1 top 8-bits */
+#define TXUXXRXRQ_ACC2HI 22 /* Accumulator 2 top 8-bits */
+#define TXUXXRXRQ_ACC3HI 23 /* Accumulator 3 top 8-bits */
+#define TXUXXRXRQ_UXX_BITS 0x0000000F /* Unit number */
+#define TXUXXRXRQ_UXX_S 0
+
+/*****************************************************************************
+ * PRIVILEGE CONTROL VALUES FOR MEMORY MAPPED RESOURCES
+ ****************************************************************************/
+/*
+ * These values define memory mapped registers that give control over and
+ * the privilege required to access other memory mapped resources. These
+ * registers themselves always require privilege to update them.
+ */
+
+#define TXPRIVREG_STRIDE 0x8 /* Delta between per-thread regs */
+#define TXPRIVREG_STRIDE_S 3
+
+/*
+ * Each bit 0 to 15 defines privilege required to access internal register
+ * regions 0x04800000 to 0x048FFFFF in 64k chunks
+ */
+#define T0PIOREG 0x04810100
+#define T1PIOREG 0x04810108
+#define T2PIOREG 0x04810110
+#define T3PIOREG 0x04810118
+
+/*
+ * Each bit 0 to 31 defines privilege required to use the pair of
+ * system events implemented as writee in the regions 0x04000000 to
+ * 0x04000FFF in 2*64 byte chunks.
+ */
+#define T0PSYREG 0x04810180
+#define T1PSYREG 0x04810188
+#define T2PSYREG 0x04810190
+#define T3PSYREG 0x04810198
+
+/*
+ * CHIP PRIV CONTROLS
+ * ------------------
+ */
+
+/* The TXPIOREG register holds a bit mask directly mappable to
+ corresponding addresses in the range 0x04800000 to 049FFFFF */
+#define TXPIOREG_ADDR_BITS 0x1F0000 /* Up to 32x64K bytes */
+#define TXPIOREG_ADDR_S 16
+
+/* Hence based on the _HWBASE values ... */
+#define TXPIOREG_CORE_BIT (1<<((0x04800000>>16)&0x1F))
+#define TXPIOREG_PRIV_BIT (1<<((0x04810000>>16)&0x1F))
+#define TXPIOREG_TRIG_BIT (1<<((0x04820000>>16)&0x1F))
+#define TXPIOREG_SYSC_BIT (1<<((0x04830000>>16)&0x1F))
+
+#define TXPIOREG_WRC_BIT 0x00080000 /* Wr combiner reg priv */
+#define TXPIOREG_LOCALBUS_RW_BIT 0x00040000 /* Local bus rd/wr priv */
+#define TXPIOREG_SYSREGBUS_RD_BIT 0x00020000 /* Sys reg bus write priv */
+#define TXPIOREG_SYSREGBUS_WR_BIT 0x00010000 /* Sys reg bus read priv */
+
+/* CORE region privilege controls */
+#define T0PRIVCORE 0x04800828
+#define TXPRIVCORE_TXBKICK_BIT 0x001 /* Background kick priv */
+#define TXPRIVCORE_TXIKICK_BIT 0x002 /* Interrupt kick priv */
+#define TXPRIVCORE_TXAMAREGX_BIT 0x004 /* TXAMAREG4|5|6 priv */
+#define TnPRIVCORE_STRIDE 0x00001000
+
+#define T0PRIVSYSR 0x04810000
+#define TnPRIVSYSR_STRIDE 0x00000008
+#define TnPRIVSYSR_STRIDE_S 3
+#define TXPRIVSYSR_CFLUSH_BIT 0x01
+#define TXPRIVSYSR_MTABLE_BIT 0x02
+#define TXPRIVSYSR_DIRECT_BIT 0x04
+#ifdef METAC_1_2
+#define TXPRIVSYSR_ALL_BITS 0x07
+#else
+#define TXPRIVSYSR_CORE_BIT 0x08
+#define TXPRIVSYSR_CORECODE_BIT 0x10
+#define TXPRIVSYSR_ALL_BITS 0x1F
+#endif
+#define T1PRIVSYSR 0x04810008
+#define T2PRIVSYSR 0x04810010
+#define T3PRIVSYSR 0x04810018
+
+/*****************************************************************************
+ * H/W TRIGGER STATE/LEVEL REGISTERS AND H/W TRIGGER VECTORS
+ ****************************************************************************/
+/*
+ * These values define memory mapped registers that give control over and
+ * the state of hardware trigger sources both external to the META processor
+ * and internal to it.
+ */
+
+#define HWSTATMETA 0x04820000 /* Hardware status/clear META trig */
+#define HWSTATMETA_T0HALT_BITS 0xF
+#define HWSTATMETA_T0HALT_S 0
+#define HWSTATMETA_T0BHALT_BIT 0x1 /* Background HALT */
+#define HWSTATMETA_T0IHALT_BIT 0x2 /* Interrupt HALT */
+#define HWSTATMETA_T0PHALT_BIT 0x4 /* PF/RO Memory HALT */
+#define HWSTATMETA_T0AMATR_BIT 0x8 /* AMA trigger */
+#define HWSTATMETA_TnINT_S 4 /* Shift by (thread*4) */
+#define HWSTATEXT 0x04820010 /* H/W status/clear external trigs 0-31 */
+#define HWSTATEXT2 0x04820018 /* H/W status/clear external trigs 32-63 */
+#define HWSTATEXT4 0x04820020 /* H/W status/clear external trigs 64-95 */
+#define HWSTATEXT6 0x04820028 /* H/W status/clear external trigs 96-128 */
+#define HWLEVELEXT 0x04820030 /* Edge/Level type of external trigs 0-31 */
+#define HWLEVELEXT2 0x04820038 /* Edge/Level type of external trigs 32-63 */
+#define HWLEVELEXT4 0x04820040 /* Edge/Level type of external trigs 64-95 */
+#define HWLEVELEXT6 0x04820048 /* Edge/Level type of external trigs 96-128 */
+#define HWLEVELEXT_XXX_LEVEL 1 /* Level sense logic in HWSTATEXTn */
+#define HWLEVELEXT_XXX_EDGE 0
+#define HWMASKEXT 0x04820050 /* Enable/disable of external trigs 0-31 */
+#define HWMASKEXT2 0x04820058 /* Enable/disable of external trigs 32-63 */
+#define HWMASKEXT4 0x04820060 /* Enable/disable of external trigs 64-95 */
+#define HWMASKEXT6 0x04820068 /* Enable/disable of external trigs 96-128 */
+#define T0VECINT_BHALT 0x04820500 /* Background HALT trigger vector */
+#define TXVECXXX_BITS 0xF /* Per-trigger vector vals 0,1,4-15 */
+#define TXVECXXX_S 0
+#define T0VECINT_IHALT 0x04820508 /* Interrupt HALT */
+#define T0VECINT_PHALT 0x04820510 /* PF/RO memory fault */
+#define T0VECINT_AMATR 0x04820518 /* AMA trigger */
+#define TnVECINT_STRIDE 0x00000020 /* Per thread stride */
+#define HWVEC0EXT 0x04820700 /* Vectors for external triggers 0-31 */
+#define HWVEC20EXT 0x04821700 /* Vectors for external triggers 32-63 */
+#define HWVEC40EXT 0x04822700 /* Vectors for external triggers 64-95 */
+#define HWVEC60EXT 0x04823700 /* Vectors for external triggers 96-127 */
+#define HWVECnEXT_STRIDE 0x00000008 /* Per trigger stride */
+#define HWVECnEXT_DEBUG 0x1 /* Redirect trigger to debug i/f */
+
+/*
+ * CORE HWCODE-BREAKPOINT REGISTERS/VALUES
+ * ---------------------------------------
+ */
+#define CODEB0ADDR 0x0480FF00 /* Address specifier */
+#define CODEBXADDR_MATCHX_BITS 0xFFFFFFFC
+#define CODEBXADDR_MATCHX_S 2
+#define CODEB0CTRL 0x0480FF08 /* Control */
+#define CODEBXCTRL_MATEN_BIT 0x80000000 /* Match 'Enable' */
+#define CODEBXCTRL_MATTXEN_BIT 0x10000000 /* Match threadn enable */
+#define CODEBXCTRL_HITC_BITS 0x00FF0000 /* Hit counter */
+#define CODEBXCTRL_HITC_S 16
+#define CODEBXHITC_NEXT 0xFF /* Next 'hit' will trigger */
+#define CODEBXHITC_HIT1 0x00 /* No 'hits' after trigger */
+#define CODEBXCTRL_MMASK_BITS 0x0000FFFC /* Mask ADDR_MATCH bits */
+#define CODEBXCTRL_MMASK_S 2
+#define CODEBXCTRL_MATLTX_BITS 0x00000003 /* Match threadn LOCAL addr */
+#define CODEBXCTRL_MATLTX_S 0 /* Match threadn LOCAL addr */
+#define CODEBnXXXX_STRIDE 0x00000010 /* Stride between CODEB reg sets */
+#define CODEBnXXXX_STRIDE_S 4
+#define CODEBnXXXX_LIMIT 3 /* Sets 0-3 */
+
+/*
+ * CORE DATA-WATCHPOINT REGISTERS/VALUES
+ * -------------------------------------
+ */
+#define DATAW0ADDR 0x0480FF40 /* Address specifier */
+#define DATAWXADDR_MATCHR_BITS 0xFFFFFFF8
+#define DATAWXADDR_MATCHR_S 3
+#define DATAWXADDR_MATCHW_BITS 0xFFFFFFFF
+#define DATAWXADDR_MATCHW_S 0
+#define DATAW0CTRL 0x0480FF48 /* Control */
+#define DATAWXCTRL_MATRD_BIT 0x80000000 /* Match 'Read' */
+#ifndef METAC_1_2
+#define DATAWXCTRL_MATNOTTX_BIT 0x20000000 /* Invert threadn enable */
+#endif
+#define DATAWXCTRL_MATWR_BIT 0x40000000 /* Match 'Write' */
+#define DATAWXCTRL_MATTXEN_BIT 0x10000000 /* Match threadn enable */
+#define DATAWXCTRL_WRSIZE_BITS 0x0F000000 /* Write Match Size */
+#define DATAWXCTRL_WRSIZE_S 24
+#define DATAWWRSIZE_ANY 0 /* Any size transaction matches */
+#define DATAWWRSIZE_8BIT 1 /* Specific sizes ... */
+#define DATAWWRSIZE_16BIT 2
+#define DATAWWRSIZE_32BIT 3
+#define DATAWWRSIZE_64BIT 4
+#define DATAWXCTRL_HITC_BITS 0x00FF0000 /* Hit counter */
+#define DATAWXCTRL_HITC_S 16
+#define DATAWXHITC_NEXT 0xFF /* Next 'hit' will trigger */
+#define DATAWXHITC_HIT1 0x00 /* No 'hits' after trigger */
+#define DATAWXCTRL_MMASK_BITS 0x0000FFF8 /* Mask ADDR_MATCH bits */
+#define DATAWXCTRL_MMASK_S 3
+#define DATAWXCTRL_MATLTX_BITS 0x00000003 /* Match threadn LOCAL addr */
+#define DATAWXCTRL_MATLTX_S 0 /* Match threadn LOCAL addr */
+#define DATAW0DMATCH0 0x0480FF50 /* Write match data */
+#define DATAW0DMATCH1 0x0480FF58
+#define DATAW0DMASK0 0x0480FF60 /* Write match data mask */
+#define DATAW0DMASK1 0x0480FF68
+#define DATAWnXXXX_STRIDE 0x00000040 /* Stride between DATAW reg sets */
+#define DATAWnXXXX_STRIDE_S 6
+#define DATAWnXXXX_LIMIT 1 /* Sets 0,1 */
+
+/*
+ * CHIP Automatic Mips Allocation control registers
+ * ------------------------------------------------
+ */
+
+/* CORE memory mapped AMA registers */
+#define T0AMAREG4 0x04800810
+#define TXAMAREG4_POOLSIZE_BITS 0x3FFFFF00
+#define TXAMAREG4_POOLSIZE_S 8
+#define TXAMAREG4_AVALUE_BITS 0x000000FF
+#define TXAMAREG4_AVALUE_S 0
+#define T0AMAREG5 0x04800818
+#define TXAMAREG5_POOLC_BITS 0x07FFFFFF
+#define TXAMAREG5_POOLC_S 0
+#define T0AMAREG6 0x04800820
+#define TXAMAREG6_DLINEDEF_BITS 0x00FFFFF0
+#define TXAMAREG6_DLINEDEF_S 0
+#define TnAMAREGX_STRIDE 0x00001000
+
+/*
+ * Memory Management Control Unit Table Entries
+ * --------------------------------------------
+ */
+#define MMCU_ENTRY_S 4 /* -> Entry size */
+#define MMCU_ENTRY_ADDR_BITS 0xFFFFF000 /* Physical address */
+#define MMCU_ENTRY_ADDR_S 12 /* -> Page size */
+#define MMCU_ENTRY_CWIN_BITS 0x000000C0 /* Caching 'window' selection */
+#define MMCU_ENTRY_CWIN_S 6
+#define MMCU_CWIN_UNCACHED 0 /* May not be memory etc. */
+#define MMCU_CWIN_BURST 1 /* Cached but LRU unset */
+#define MMCU_CWIN_C1SET 2 /* Cached in 1 set only */
+#define MMCU_CWIN_CACHED 3 /* Fully cached */
+#define MMCU_ENTRY_CACHE_BIT 0x00000080 /* Set for cached region */
+#define MMCU_ECACHE1_FULL_BIT 0x00000040 /* Use all the sets */
+#define MMCU_ECACHE0_BURST_BIT 0x00000040 /* Match bursts */
+#define MMCU_ENTRY_SYS_BIT 0x00000010 /* Sys-coherent access required */
+#define MMCU_ENTRY_WRC_BIT 0x00000008 /* Write combining allowed */
+#define MMCU_ENTRY_PRIV_BIT 0x00000004 /* Privilege required */
+#define MMCU_ENTRY_WR_BIT 0x00000002 /* Writes allowed */
+#define MMCU_ENTRY_VAL_BIT 0x00000001 /* Entry is valid */
+
+#ifdef METAC_2_1
+/*
+ * Extended first-level/top table entries have extra/larger fields in later
+ * cores as bits 11:0 previously had no effect in such table entries.
+ */
+#define MMCU_E1ENT_ADDR_BITS 0xFFFFFFC0 /* Physical address */
+#define MMCU_E1ENT_ADDR_S 6 /* -> resolution < page size */
+#define MMCU_E1ENT_PGSZ_BITS 0x0000001E /* Page size for 2nd level */
+#define MMCU_E1ENT_PGSZ_S 1
+#define MMCU_E1ENT_PGSZ0_POWER 12 /* PgSz 0 -> 4K */
+#define MMCU_E1ENT_PGSZ_MAX 10 /* PgSz 10 -> 4M maximum */
+#define MMCU_E1ENT_MINIM_BIT 0x00000020
+#endif /* METAC_2_1 */
+
+/* MMCU control register in SYSC region */
+#define MMCU_TABLE_PHYS_ADDR 0x04830010
+#define MMCU_TABLE_PHYS_ADDR_BITS 0xFFFFFFFC
+#ifdef METAC_2_1
+#define MMCU_TABLE_PHYS_EXTEND 0x00000001 /* See below */
+#endif
+#define MMCU_DCACHE_CTRL_ADDR 0x04830018
+#define MMCU_xCACHE_CTRL_ENABLE_BIT 0x00000001
+#define MMCU_xCACHE_CTRL_PARTITION_BIT 0x00000000 /* See xCPART below */
+#define MMCU_ICACHE_CTRL_ADDR 0x04830020
+
+#ifdef METAC_2_1
+
+/*
+ * Allow direct access to physical memory used to implement MMU table.
+ *
+ * Each is based on a corresponding MMCU_TnLOCAL_TABLE_PHYSn or similar
+ * MMCU_TnGLOBAL_TABLE_PHYSn register pair (see next).
+ */
+#define LINSYSMEMT0L_BASE 0x05000000
+#define LINSYSMEMT0L_LIMIT 0x051FFFFF
+#define LINSYSMEMTnX_STRIDE 0x00200000 /* 2MB Local per thread */
+#define LINSYSMEMTnX_STRIDE_S 21
+#define LINSYSMEMTXG_OFFSET 0x00800000 /* +2MB Global per thread */
+#define LINSYSMEMTXG_OFFSET_S 23
+#define LINSYSMEMT1L_BASE 0x05200000
+#define LINSYSMEMT1L_LIMIT 0x053FFFFF
+#define LINSYSMEMT2L_BASE 0x05400000
+#define LINSYSMEMT2L_LIMIT 0x055FFFFF
+#define LINSYSMEMT3L_BASE 0x05600000
+#define LINSYSMEMT3L_LIMIT 0x057FFFFF
+#define LINSYSMEMT0G_BASE 0x05800000
+#define LINSYSMEMT0G_LIMIT 0x059FFFFF
+#define LINSYSMEMT1G_BASE 0x05A00000
+#define LINSYSMEMT1G_LIMIT 0x05BFFFFF
+#define LINSYSMEMT2G_BASE 0x05C00000
+#define LINSYSMEMT2G_LIMIT 0x05DFFFFF
+#define LINSYSMEMT3G_BASE 0x05E00000
+#define LINSYSMEMT3G_LIMIT 0x05FFFFFF
+
+/*
+ * Extended MMU table functionality allows a sparse or flat table to be
+ * described much more efficiently than before.
+ */
+#define MMCU_T0LOCAL_TABLE_PHYS0 0x04830700
+#define MMCU_TnX_TABLE_PHYSX_STRIDE 0x20 /* Offset per thread */
+#define MMCU_TnX_TABLE_PHYSX_STRIDE_S 5
+#define MMCU_TXG_TABLE_PHYSX_OFFSET 0x10 /* Global versus local */
+#define MMCU_TXG_TABLE_PHYSX_OFFSET_S 4
+#define MMCU_TBLPHYS0_DCCTRL_BITS 0x000000DF /* DC controls */
+#define MMCU_TBLPHYS0_ENTLB_BIT 0x00000020 /* Cache in TLB */
+#define MMCU_TBLPHYS0_TBLSZ_BITS 0x00000F00 /* Area supported */
+#define MMCU_TBLPHYS0_TBLSZ_S 8
+#define MMCU_TBLPHYS0_TBLSZ0_POWER 22 /* 0 -> 4M */
+#define MMCU_TBLPHYS0_TBLSZ_MAX 9 /* 9 -> 2G */
+#define MMCU_TBLPHYS0_LINBASE_BITS 0xFFC00000 /* Linear base */
+#define MMCU_TBLPHYS0_LINBASE_S 22
+
+#define MMCU_T0LOCAL_TABLE_PHYS1 0x04830708
+#define MMCU_TBLPHYS1_ADDR_BITS 0xFFFFFFFC /* Physical base */
+#define MMCU_TBLPHYS1_ADDR_S 2
+
+#define MMCU_T0GLOBAL_TABLE_PHYS0 0x04830710
+#define MMCU_T0GLOBAL_TABLE_PHYS1 0x04830718
+#define MMCU_T1LOCAL_TABLE_PHYS0 0x04830720
+#define MMCU_T1LOCAL_TABLE_PHYS1 0x04830728
+#define MMCU_T1GLOBAL_TABLE_PHYS0 0x04830730
+#define MMCU_T1GLOBAL_TABLE_PHYS1 0x04830738
+#define MMCU_T2LOCAL_TABLE_PHYS0 0x04830740
+#define MMCU_T2LOCAL_TABLE_PHYS1 0x04830748
+#define MMCU_T2GLOBAL_TABLE_PHYS0 0x04830750
+#define MMCU_T2GLOBAL_TABLE_PHYS1 0x04830758
+#define MMCU_T3LOCAL_TABLE_PHYS0 0x04830760
+#define MMCU_T3LOCAL_TABLE_PHYS1 0x04830768
+#define MMCU_T3GLOBAL_TABLE_PHYS0 0x04830770
+#define MMCU_T3GLOBAL_TABLE_PHYS1 0x04830778
+
+#define MMCU_T0EBWCCTRL 0x04830640
+#define MMCU_TnEBWCCTRL_BITS 0x00000007
+#define MMCU_TnEBWCCTRL_S 0
+#define MMCU_TnEBWCCCTRL_DISABLE_ALL 0
+#define MMCU_TnEBWCCCTRL_ABIT25 1
+#define MMCU_TnEBWCCCTRL_ABIT26 2
+#define MMCU_TnEBWCCCTRL_ABIT27 3
+#define MMCU_TnEBWCCCTRL_ABIT28 4
+#define MMCU_TnEBWCCCTRL_ABIT29 5
+#define MMCU_TnEBWCCCTRL_ABIT30 6
+#define MMCU_TnEBWCCCTRL_ENABLE_ALL 7
+#define MMCU_TnEBWCCTRL_STRIDE 8
+
+#endif /* METAC_2_1 */
+
+
+/* Registers within the SYSC register region */
+#define METAC_ID 0x04830000
+#define METAC_ID_MAJOR_BITS 0xFF000000
+#define METAC_ID_MAJOR_S 24
+#define METAC_ID_MINOR_BITS 0x00FF0000
+#define METAC_ID_MINOR_S 16
+#define METAC_ID_REV_BITS 0x0000FF00
+#define METAC_ID_REV_S 8
+#define METAC_ID_MAINT_BITS 0x000000FF
+#define METAC_ID_MAINT_S 0
+
+#ifdef METAC_2_1
+/* Use of this section is strongly deprecated */
+#define METAC_ID2 0x04830008
+#define METAC_ID2_DESIGNER_BITS 0xFFFF0000 /* Modified by customer */
+#define METAC_ID2_DESIGNER_S 16
+#define METAC_ID2_MINOR2_BITS 0x00000F00 /* 3rd digit of prod rev */
+#define METAC_ID2_MINOR2_S 8
+#define METAC_ID2_CONFIG_BITS 0x000000FF /* Wrapper configuration */
+#define METAC_ID2_CONFIG_S 0
+
+/* Primary core identification and configuration information */
+#define METAC_CORE_ID 0x04831000
+#define METAC_COREID_GROUP_BITS 0xFF000000
+#define METAC_COREID_GROUP_S 24
+#define METAC_COREID_GROUP_METAG 0x14
+#define METAC_COREID_ID_BITS 0x00FF0000
+#define METAC_COREID_ID_S 16
+#define METAC_COREID_ID_W32 0x10 /* >= for 32-bit pipeline */
+#define METAC_COREID_CONFIG_BITS 0x0000FFFF
+#define METAC_COREID_CONFIG_S 0
+#define METAC_COREID_CFGCACHE_BITS 0x0007
+#define METAC_COREID_CFGCACHE_S 0
+#define METAC_COREID_CFGCACHE_NOM 0
+#define METAC_COREID_CFGCACHE_TYPE0 1
+#define METAC_COREID_CFGCACHE_NOMMU 1 /* Alias for TYPE0 */
+#define METAC_COREID_CFGCACHE_NOCACHE 2
+#define METAC_COREID_CFGCACHE_PRIVNOMMU 3
+#define METAC_COREID_CFGDSP_BITS 0x0038
+#define METAC_COREID_CFGDSP_S 3
+#define METAC_COREID_CFGDSP_NOM 0
+#define METAC_COREID_CFGDSP_MIN 1
+#define METAC_COREID_NOFPACC_BIT 0x0040 /* Set if no FPU accum */
+#define METAC_COREID_CFGFPU_BITS 0x0180
+#define METAC_COREID_CFGFPU_S 7
+#define METAC_COREID_CFGFPU_NOM 0
+#define METAC_COREID_CFGFPU_SNGL 1
+#define METAC_COREID_CFGFPU_DBL 2
+#define METAC_COREID_NOAMA_BIT 0x0200 /* Set if no AMA present */
+#define METAC_COREID_NOCOH_BIT 0x0400 /* Set if no Gbl coherency */
+
+/* Core revision information */
+#define METAC_CORE_REV 0x04831008
+#define METAC_COREREV_DESIGN_BITS 0xFF000000
+#define METAC_COREREV_DESIGN_S 24
+#define METAC_COREREV_MAJOR_BITS 0x00FF0000
+#define METAC_COREREV_MAJOR_S 16
+#define METAC_COREREV_MINOR_BITS 0x0000FF00
+#define METAC_COREREV_MINOR_S 8
+#define METAC_COREREV_MAINT_BITS 0x000000FF
+#define METAC_COREREV_MAINT_S 0
+
+/* Configuration information control outside the core */
+#define METAC_CORE_DESIGNER1 0x04831010 /* Arbitrary value */
+#define METAC_CORE_DESIGNER2 0x04831018 /* Arbitrary value */
+
+/* Configuration information covering presence/number of various features */
+#define METAC_CORE_CONFIG2 0x04831020
+#define METAC_CORECFG2_COREDBGTYPE_BITS 0x60000000 /* Core debug type */
+#define METAC_CORECFG2_COREDBGTYPE_S 29
+#define METAC_CORECFG2_DCSMALL_BIT 0x04000000 /* Data cache small */
+#define METAC_CORECFG2_ICSMALL_BIT 0x02000000 /* Inst cache small */
+#define METAC_CORECFG2_DCSZNP_BITS 0x01C00000 /* Data cache size np */
+#define METAC_CORECFG2_DCSZNP_S 22
+#define METAC_CORECFG2_ICSZNP_BITS 0x00380000 /* Inst cache size np */
+#define METAC_CORECFG2_ICSZNP_S 19
+#define METAC_CORECFG2_DCSZ_BITS 0x00070000 /* Data cache size */
+#define METAC_CORECFG2_DCSZ_S 16
+#define METAC_CORECFG2_xCSZ_4K 0 /* Allocated values */
+#define METAC_CORECFG2_xCSZ_8K 1
+#define METAC_CORECFG2_xCSZ_16K 2
+#define METAC_CORECFG2_xCSZ_32K 3
+#define METAC_CORECFG2_xCSZ_64K 4
+#define METAC_CORE_C2ICSZ_BITS 0x0000E000 /* Inst cache size */
+#define METAC_CORE_C2ICSZ_S 13
+#define METAC_CORE_GBLACC_BITS 0x00001800 /* Number of Global Acc */
+#define METAC_CORE_GBLACC_S 11
+#define METAC_CORE_GBLDXR_BITS 0x00000700 /* 0 -> 0, R -> 2^(R-1) */
+#define METAC_CORE_GBLDXR_S 8
+#define METAC_CORE_GBLAXR_BITS 0x000000E0 /* 0 -> 0, R -> 2^(R-1) */
+#define METAC_CORE_GBLAXR_S 5
+#define METAC_CORE_RTTRACE_BIT 0x00000010
+#define METAC_CORE_WATCHN_BITS 0x0000000C /* 0 -> 0, N -> 2^N */
+#define METAC_CORE_WATCHN_S 2
+#define METAC_CORE_BREAKN_BITS 0x00000003 /* 0 -> 0, N -> 2^N */
+#define METAC_CORE_BREAKN_S 0
+
+/* Configuration information covering presence/number of various features */
+#define METAC_CORE_CONFIG3 0x04831028
+#define METAC_CORECFG3_L2C_REV_ID_BITS 0x000F0000 /* Revision of L2 cache */
+#define METAC_CORECFG3_L2C_REV_ID_S 16
+#define METAC_CORECFG3_L2C_LINE_SIZE_BITS 0x00003000 /* L2 line size */
+#define METAC_CORECFG3_L2C_LINE_SIZE_S 12
+#define METAC_CORECFG3_L2C_LINE_SIZE_64B 0x0 /* 64 bytes */
+#define METAC_CORECFG3_L2C_NUM_WAYS_BITS 0x00000F00 /* L2 number of ways (2^n) */
+#define METAC_CORECFG3_L2C_NUM_WAYS_S 8
+#define METAC_CORECFG3_L2C_SIZE_BITS 0x000000F0 /* L2 size (2^n) */
+#define METAC_CORECFG3_L2C_SIZE_S 4
+#define METAC_CORECFG3_L2C_UNIFIED_BIT 0x00000004 /* Unified cache: */
+#define METAC_CORECFG3_L2C_UNIFIED_S 2
+#define METAC_CORECFG3_L2C_UNIFIED_UNIFIED 1 /* - Unified D/I cache */
+#define METAC_CORECFG3_L2C_UNIFIED_SEPARATE 0 /* - Separate D/I cache */
+#define METAC_CORECFG3_L2C_MODE_BIT 0x00000002 /* Cache Mode: */
+#define METAC_CORECFG3_L2C_MODE_S 1
+#define METAC_CORECFG3_L2C_MODE_WRITE_BACK 1 /* - Write back */
+#define METAC_CORECFG3_L2C_MODE_WRITE_THROUGH 0 /* - Write through */
+#define METAC_CORECFG3_L2C_HAVE_L2C_BIT 0x00000001 /* Have L2C */
+#define METAC_CORECFG3_L2C_HAVE_L2C_S 0
+
+#endif /* METAC_2_1 */
+
+#define SYSC_CACHE_MMU_CONFIG 0x04830028
+#ifdef METAC_2_1
+#define SYSC_CMMUCFG_DCSKEWABLE_BIT 0x00000040
+#define SYSC_CMMUCFG_ICSKEWABLE_BIT 0x00000020
+#define SYSC_CMMUCFG_DCSKEWOFF_BIT 0x00000010 /* Skew association override */
+#define SYSC_CMMUCFG_ICSKEWOFF_BIT 0x00000008 /* -> default 0 on if present */
+#define SYSC_CMMUCFG_MODE_BITS 0x00000007 /* Access to old state */
+#define SYSC_CMMUCFG_MODE_S 0
+#define SYSC_CMMUCFG_ON 0x7
+#define SYSC_CMMUCFG_EBYPASS 0x6 /* Enhanced by-pass mode */
+#define SYSC_CMMUCFG_EBYPASSIC 0x4 /* EB just inst cache */
+#define SYSC_CMMUCFG_EBYPASSDC 0x2 /* EB just data cache */
+#endif /* METAC_2_1 */
+/* Old definitions, Keep them for now */
+#define SYSC_CMMUCFG_MMU_ON_BIT 0x1
+#define SYSC_CMMUCFG_DC_ON_BIT 0x2
+#define SYSC_CMMUCFG_IC_ON_BIT 0x4
+
+#define SYSC_JTAG_THREAD 0x04830030
+#define SYSC_JTAG_TX_BITS 0x00000003 /* Read only bits! */
+#define SYSC_JTAG_TX_S 0
+#define SYSC_JTAG_PRIV_BIT 0x00000004
+#ifdef METAC_2_1
+#define SYSC_JTAG_SLAVETX_BITS 0x00000018
+#define SYSC_JTAG_SLAVETX_S 3
+#endif /* METAC_2_1 */
+
+#define SYSC_DCACHE_FLUSH 0x04830038
+#define SYSC_ICACHE_FLUSH 0x04830040
+#define SYSC_xCACHE_FLUSH_INIT 0x1
+#define MMCU_DIRECTMAP0_ADDR 0x04830080 /* LINSYSDIRECT_BASE -> */
+#define MMCU_DIRECTMAPn_STRIDE 0x00000010 /* 4 Region settings */
+#define MMCU_DIRECTMAPn_S 4
+#define MMCU_DIRECTMAPn_ADDR_BITS 0xFF800000
+#define MMCU_DIRECTMAPn_ADDR_S 23
+#define MMCU_DIRECTMAPn_ADDR_SCALE 0x00800000 /* 8M Regions */
+#ifdef METAC_2_1
+/*
+ * These fields in the above registers provide MMCU_ENTRY_* values
+ * for each direct mapped region to enable optimisation of these areas.
+ * (LSB similar to VALID must be set for enhancments to be active)
+ */
+#define MMCU_DIRECTMAPn_ENHANCE_BIT 0x00000001 /* 0 = no optim */
+#define MMCU_DIRECTMAPn_DCCTRL_BITS 0x000000DF /* Get DC Ctrl */
+#define MMCU_DIRECTMAPn_DCCTRL_S 0
+#define MMCU_DIRECTMAPn_ICCTRL_BITS 0x0000C000 /* Get IC Ctrl */
+#define MMCU_DIRECTMAPn_ICCTRL_S 8
+#define MMCU_DIRECTMAPn_ENTLB_BIT 0x00000020 /* Cache in TLB */
+#define MMCU_DIRECTMAPn_ICCWIN_BITS 0x0000C000 /* Get IC Win Bits */
+#define MMCU_DIRECTMAPn_ICCWIN_S 14
+#endif /* METAC_2_1 */
+
+#define MMCU_DIRECTMAP1_ADDR 0x04830090
+#define MMCU_DIRECTMAP2_ADDR 0x048300a0
+#define MMCU_DIRECTMAP3_ADDR 0x048300b0
+
+/*
+ * These bits partion each threads use of data cache or instruction cache
+ * resource by modifying the top 4 bits of the address within the cache
+ * storage area.
+ */
+#define SYSC_DCPART0 0x04830200
+#define SYSC_xCPARTn_STRIDE 0x00000008
+#define SYSC_xCPARTL_AND_BITS 0x0000000F /* Masks top 4 bits */
+#define SYSC_xCPARTL_AND_S 0
+#define SYSC_xCPARTG_AND_BITS 0x00000F00 /* Masks top 4 bits */
+#define SYSC_xCPARTG_AND_S 8
+#define SYSC_xCPARTL_OR_BITS 0x000F0000 /* Ors into top 4 bits */
+#define SYSC_xCPARTL_OR_S 16
+#define SYSC_xCPARTG_OR_BITS 0x0F000000 /* Ors into top 4 bits */
+#define SYSC_xCPARTG_OR_S 24
+#define SYSC_CWRMODE_BIT 0x80000000 /* Write cache mode bit */
+
+#define SYSC_DCPART1 0x04830208
+#define SYSC_DCPART2 0x04830210
+#define SYSC_DCPART3 0x04830218
+#define SYSC_ICPART0 0x04830220
+#define SYSC_ICPART1 0x04830228
+#define SYSC_ICPART2 0x04830230
+#define SYSC_ICPART3 0x04830238
+
+/*
+ * META Core Memory and Cache Update registers
+ */
+#define SYSC_MCMDATAX 0x04830300 /* 32-bit read/write data register */
+#define SYSC_MCMDATAT 0x04830308 /* Read or write data triggers oper */
+#define SYSC_MCMGCTRL 0x04830310 /* Control register */
+#define SYSC_MCMGCTRL_READ_BIT 0x00000001 /* Set to issue 1st read */
+#define SYSC_MCMGCTRL_AINC_BIT 0x00000002 /* Set for auto-increment */
+#define SYSC_MCMGCTRL_ADDR_BITS 0x000FFFFC /* Address or index */
+#define SYSC_MCMGCTRL_ADDR_S 2
+#define SYSC_MCMGCTRL_ID_BITS 0x0FF00000 /* Internal memory block Id */
+#define SYSC_MCMGCTRL_ID_S 20
+#define SYSC_MCMGID_NODEV 0xFF /* No Device Selected */
+#define SYSC_MCMGID_DSPRAM0A 0x04 /* DSP RAM D0 block A access */
+#define SYSC_MCMGID_DSPRAM0B 0x05 /* DSP RAM D0 block B access */
+#define SYSC_MCMGID_DSPRAM1A 0x06 /* DSP RAM D1 block A access */
+#define SYSC_MCMGID_DSPRAM1B 0x07 /* DSP RAM D1 block B access */
+#define SYSC_MCMGID_DCACHEL 0x08 /* DCACHE lines (64-bytes/line) */
+#ifdef METAC_2_1
+#define SYSC_MCMGID_DCACHETLB 0x09 /* DCACHE TLB ( Read Only ) */
+#endif /* METAC_2_1 */
+#define SYSC_MCMGID_DCACHET 0x0A /* DCACHE tags (32-bits/line) */
+#define SYSC_MCMGID_DCACHELRU 0x0B /* DCACHE LRU (8-bits/line) */
+#define SYSC_MCMGID_ICACHEL 0x0C /* ICACHE lines (64-bytes/line */
+#ifdef METAC_2_1
+#define SYSC_MCMGID_ICACHETLB 0x0D /* ICACHE TLB (Read Only ) */
+#endif /* METAC_2_1 */
+#define SYSC_MCMGID_ICACHET 0x0E /* ICACHE Tags (32-bits/line) */
+#define SYSC_MCMGID_ICACHELRU 0x0F /* ICACHE LRU (8-bits/line ) */
+#define SYSC_MCMGID_COREIRAM0 0x10 /* Core code mem id 0 */
+#define SYSC_MCMGID_COREIRAMn 0x17
+#define SYSC_MCMGID_COREDRAM0 0x18 /* Core data mem id 0 */
+#define SYSC_MCMGID_COREDRAMn 0x1F
+#ifdef METAC_2_1
+#define SYSC_MCMGID_DCACHEST 0x20 /* DCACHE ST ( Read Only ) */
+#define SYSC_MCMGID_ICACHEST 0x21 /* ICACHE ST ( Read Only ) */
+#define SYSC_MCMGID_DCACHETLBLRU 0x22 /* DCACHE TLB LRU ( Read Only )*/
+#define SYSC_MCMGID_ICACHETLBLRU 0x23 /* ICACHE TLB LRU( Read Only ) */
+#define SYSC_MCMGID_DCACHESTLRU 0x24 /* DCACHE ST LRU ( Read Only ) */
+#define SYSC_MCMGID_ICACHESTLRU 0x25 /* ICACHE ST LRU ( Read Only ) */
+#define SYSC_MCMGID_DEBUGTLB 0x26 /* DEBUG TLB ( Read Only ) */
+#define SYSC_MCMGID_DEBUGST 0x27 /* DEBUG ST ( Read Only ) */
+#define SYSC_MCMGID_L2CACHEL 0x30 /* L2 Cache Lines (64-bytes/line) */
+#define SYSC_MCMGID_L2CACHET 0x31 /* L2 Cache Tags (32-bits/line) */
+#define SYSC_MCMGID_COPROX0 0x70 /* Coprocessor port id 0 */
+#define SYSC_MCMGID_COPROXn 0x77
+#endif /* METAC_2_1 */
+#define SYSC_MCMGCTRL_TR31_BIT 0x80000000 /* Trigger 31 on completion */
+#define SYSC_MCMSTATUS 0x04830318 /* Status read only */
+#define SYSC_MCMSTATUS_IDLE_BIT 0x00000001
+
+/* META System Events */
+#define SYSC_SYS_EVENT 0x04830400
+#define SYSC_SYSEVT_ATOMIC_BIT 0x00000001
+#define SYSC_SYSEVT_CACHEX_BIT 0x00000002
+#define SYSC_ATOMIC_LOCK 0x04830408
+#define SYSC_ATOMIC_STATE_TX_BITS 0x0000000F
+#define SYSC_ATOMIC_STATE_TX_S 0
+#ifdef METAC_1_2
+#define SYSC_ATOMIC_STATE_DX_BITS 0x000000F0
+#define SYSC_ATOMIC_STATE_DX_S 4
+#else /* METAC_1_2 */
+#define SYSC_ATOMIC_SOURCE_BIT 0x00000010
+#endif /* !METAC_1_2 */
+
+
+#ifdef METAC_2_1
+
+/* These definitions replace the EXPAND_TIMER_DIV register defines which are to
+ * be deprecated.
+ */
+#define SYSC_TIMER_DIV 0x04830140
+#define SYSC_TIMDIV_BITS 0x000000FF
+#define SYSC_TIMDIV_S 0
+
+/* META Enhanced by-pass control for local and global region */
+#define MMCU_LOCAL_EBCTRL 0x04830600
+#define MMCU_GLOBAL_EBCTRL 0x04830608
+#define MMCU_EBCTRL_SINGLE_BIT 0x00000020 /* TLB Uncached */
+/*
+ * These fields in the above registers provide MMCU_ENTRY_* values
+ * for each direct mapped region to enable optimisation of these areas.
+ */
+#define MMCU_EBCTRL_DCCTRL_BITS 0x000000C0 /* Get DC Ctrl */
+#define MMCU_EBCTRL_DCCTRL_S 0
+#define MMCU_EBCTRL_ICCTRL_BITS 0x0000C000 /* Get DC Ctrl */
+#define MMCU_EBCTRL_ICCTRL_S 8
+
+/* META Cached Core Mode Registers */
+#define MMCU_T0CCM_ICCTRL 0x04830680 /* Core cached code control */
+#define MMCU_TnCCM_xxCTRL_STRIDE 8
+#define MMCU_TnCCM_xxCTRL_STRIDE_S 3
+#define MMCU_T1CCM_ICCTRL 0x04830688
+#define MMCU_T2CCM_ICCTRL 0x04830690
+#define MMCU_T3CCM_ICCTRL 0x04830698
+#define MMCU_T0CCM_DCCTRL 0x048306C0 /* Core cached data control */
+#define MMCU_T1CCM_DCCTRL 0x048306C8
+#define MMCU_T2CCM_DCCTRL 0x048306D0
+#define MMCU_T3CCM_DCCTRL 0x048306D8
+#define MMCU_TnCCM_ENABLE_BIT 0x00000001
+#define MMCU_TnCCM_WIN3_BIT 0x00000002
+#define MMCU_TnCCM_DCWRITE_BIT 0x00000004 /* In DCCTRL only */
+#define MMCU_TnCCM_REGSZ_BITS 0x00000F00
+#define MMCU_TnCCM_REGSZ_S 8
+#define MMCU_TnCCM_REGSZ0_POWER 12 /* RegSz 0 -> 4K */
+#define MMCU_TnCCM_REGSZ_MAXBYTES 0x00080000 /* 512K max */
+#define MMCU_TnCCM_ADDR_BITS 0xFFFFF000
+#define MMCU_TnCCM_ADDR_S 12
+
+#endif /* METAC_2_1 */
+
+/*
+ * Hardware performance counter registers
+ * --------------------------------------
+ */
+#ifdef METAC_2_1
+/* Two Performance Counter Internal Core Events Control registers */
+#define PERF_ICORE0 0x0480FFD0
+#define PERF_ICORE1 0x0480FFD8
+#define PERFI_CTRL_BITS 0x0000000F
+#define PERFI_CTRL_S 0
+#define PERFI_CAH_DMISS 0x0 /* Dcache Misses in cache (TLB Hit) */
+#define PERFI_CAH_IMISS 0x1 /* Icache Misses in cache (TLB Hit) */
+#define PERFI_TLB_DMISS 0x2 /* Dcache Misses in per-thread TLB */
+#define PERFI_TLB_IMISS 0x3 /* Icache Misses in per-thread TLB */
+#define PERFI_TLB_DWRHITS 0x4 /* DC Write-Hits in per-thread TLB */
+#define PERFI_TLB_DWRMISS 0x5 /* DC Write-Miss in per-thread TLB */
+#define PERFI_CAH_DLFETCH 0x8 /* DC Read cache line fetch */
+#define PERFI_CAH_ILFETCH 0x9 /* DC Read cache line fetch */
+#define PERFI_CAH_DWFETCH 0xA /* DC Read cache word fetch */
+#define PERFI_CAH_IWFETCH 0xB /* DC Read cache word fetch */
+#endif /* METAC_2_1 */
+
+/* Two memory-mapped hardware performance counter registers */
+#define PERF_COUNT0 0x0480FFE0
+#define PERF_COUNT1 0x0480FFE8
+
+/* Fields in PERF_COUNTn registers */
+#define PERF_COUNT_BITS 0x00ffffff /* Event count value */
+
+#define PERF_THREAD_BITS 0x0f000000 /* Thread mask selects threads */
+#define PERF_THREAD_S 24
+
+#define PERF_CTRL_BITS 0xf0000000 /* Event filter control */
+#define PERF_CTRL_S 28
+
+#define PERFCTRL_SUPER 0 /* Superthread cycles */
+#define PERFCTRL_REWIND 1 /* Rewinds due to Dcache Misses */
+#ifdef METAC_2_1
+#define PERFCTRL_SUPREW 2 /* Rewinds of superthreaded cycles (no mask) */
+
+#define PERFCTRL_CYCLES 3 /* Counts all cycles (no mask) */
+
+#define PERFCTRL_PREDBC 4 /* Conditional branch predictions */
+#define PERFCTRL_MISPBC 5 /* Conditional branch mispredictions */
+#define PERFCTRL_PREDRT 6 /* Return predictions */
+#define PERFCTRL_MISPRT 7 /* Return mispredictions */
+#endif /* METAC_2_1 */
+
+#define PERFCTRL_DHITS 8 /* Dcache Hits */
+#define PERFCTRL_IHITS 9 /* Icache Hits */
+#define PERFCTRL_IMISS 10 /* Icache Misses in cache or TLB */
+#ifdef METAC_2_1
+#define PERFCTRL_DCSTALL 11 /* Dcache+TLB o/p delayed (per-thread) */
+#define PERFCTRL_ICSTALL 12 /* Icache+TLB o/p delayed (per-thread) */
+
+#define PERFCTRL_INT 13 /* Internal core delailed events (see next) */
+#define PERFCTRL_EXT 15 /* External source in core periphery */
+#endif /* METAC_2_1 */
+
+#ifdef METAC_2_1
+/* These definitions replace the EXPAND_PERFCHANx register defines which are to
+ * be deprecated.
+ */
+#define PERF_CHAN0 0x04830150
+#define PERF_CHAN1 0x04830158
+#define PERF_CHAN_BITS 0x0000000F
+#define PERF_CHAN_S 0
+#define PERFCHAN_WRC_WRBURST 0x0 /* Write combiner write burst */
+#define PERFCHAN_WRC_WRITE 0x1 /* Write combiner write */
+#define PERFCHAN_WRC_RDBURST 0x2 /* Write combiner read burst */
+#define PERFCHAN_WRC_READ 0x3 /* Write combiner read */
+#define PERFCHAN_PREARB_DELAY 0x4 /* Pre-arbiter delay cycle */
+ /* Cross-bar hold-off cycle: */
+#define PERFCHAN_XBAR_HOLDWRAP 0x5 /* wrapper register */
+#define PERFCHAN_XBAR_HOLDSBUS 0x6 /* system bus (ATP only) */
+#define PERFCHAN_XBAR_HOLDCREG 0x9 /* core registers */
+#define PERFCHAN_L2C_MISS 0x6 /* L2 Cache miss */
+#define PERFCHAN_L2C_HIT 0x7 /* L2 Cache hit */
+#define PERFCHAN_L2C_WRITEBACK 0x8 /* L2 Cache writeback */
+ /* Admission delay cycle: */
+#define PERFCHAN_INPUT_CREG 0xB /* core registers */
+#define PERFCHAN_INPUT_INTR 0xC /* internal ram */
+#define PERFCHAN_INPUT_WRC 0xD /* write combiners(memory) */
+
+/* Should following be removed as not in TRM anywhere? */
+#define PERFCHAN_XBAR_HOLDINTR 0x8 /* internal ram */
+#define PERFCHAN_INPUT_SBUS 0xA /* register port */
+/* End of remove section. */
+
+#define PERFCHAN_MAINARB_DELAY 0xF /* Main arbiter delay cycle */
+
+#endif /* METAC_2_1 */
+
+#ifdef METAC_2_1
+/*
+ * Write combiner registers
+ * ------------------------
+ *
+ * These replace the EXPAND_T0WRCOMBINE register defines, which will be
+ * deprecated.
+ */
+#define WRCOMB_CONFIG0 0x04830100
+#define WRCOMB_LFFEn_BIT 0x00004000 /* Enable auto line full flush */
+#define WRCOMB_ENABLE_BIT 0x00002000 /* Enable write combiner */
+#define WRCOMB_TIMEOUT_ENABLE_BIT 0x00001000 /* Timeout flush enable */
+#define WRCOMB_TIMEOUT_COUNT_BITS 0x000003FF
+#define WRCOMB_TIMEOUT_COUNT_S 0
+#define WRCOMB_CONFIG4 0x04830180
+#define WRCOMB_PARTALLOC_BITS 0x000000C0
+#define WRCOMB_PARTALLOC_S 64
+#define WRCOMB_PARTSIZE_BITS 0x00000030
+#define WRCOMB_PARTSIZE_S 4
+#define WRCOMB_PARTOFFSET_BITS 0x0000000F
+#define WRCOMB_PARTOFFSET_S 0
+#define WRCOMB_CONFIG_STRIDE 8
+#endif /* METAC_2_1 */
+
+#ifdef METAC_2_1
+/*
+ * Thread arbiter registers
+ * ------------------------
+ *
+ * These replace the EXPAND_T0ARBITER register defines, which will be
+ * deprecated.
+ */
+#define ARBITER_ARBCONFIG0 0x04830120
+#define ARBCFG_BPRIORITY_BIT 0x02000000
+#define ARBCFG_IPRIORITY_BIT 0x01000000
+#define ARBCFG_PAGE_BITS 0x00FF0000
+#define ARBCFG_PAGE_S 16
+#define ARBCFG_BBASE_BITS 0x0000FF00
+#define ARGCFG_BBASE_S 8
+#define ARBCFG_IBASE_BITS 0x000000FF
+#define ARBCFG_IBASE_S 0
+#define ARBITER_TTECONFIG0 0x04820160
+#define ARBTTE_IUPPER_BITS 0xFF000000
+#define ARBTTE_IUPPER_S 24
+#define ARBTTE_ILOWER_BITS 0x00FF0000
+#define ARBTTE_ILOWER_S 16
+#define ARBTTE_BUPPER_BITS 0x0000FF00
+#define ARBTTE_BUPPER_S 8
+#define ARBTTE_BLOWER_BITS 0x000000FF
+#define ARBTTE_BLOWER_S 0
+#define ARBITER_STRIDE 8
+#endif /* METAC_2_1 */
+
+/*
+ * Expansion area registers
+ * --------------------------------------
+ */
+
+/* These defines are to be deprecated. See above instead. */
+#define EXPAND_T0WRCOMBINE 0x03000000
+#ifdef METAC_2_1
+#define EXPWRC_LFFEn_BIT 0x00004000 /* Enable auto line full flush */
+#endif /* METAC_2_1 */
+#define EXPWRC_ENABLE_BIT 0x00002000 /* Enable write combiner */
+#define EXPWRC_TIMEOUT_ENABLE_BIT 0x00001000 /* Timeout flush enable */
+#define EXPWRC_TIMEOUT_COUNT_BITS 0x000003FF
+#define EXPWRC_TIMEOUT_COUNT_S 0
+#define EXPAND_TnWRCOMBINE_STRIDE 0x00000008
+
+/* These defines are to be deprecated. See above instead. */
+#define EXPAND_T0ARBITER 0x03000020
+#define EXPARB_BPRIORITY_BIT 0x02000000
+#define EXPARB_IPRIORITY_BIT 0x01000000
+#define EXPARB_PAGE_BITS 0x00FF0000
+#define EXPARB_PAGE_S 16
+#define EXPARB_BBASE_BITS 0x0000FF00
+#define EXPARB_BBASE_S 8
+#define EXPARB_IBASE_BITS 0x000000FF
+#define EXPARB_IBASE_S 0
+#define EXPAND_TnARBITER_STRIDE 0x00000008
+
+/* These definitions are to be deprecated. See above instead. */
+#define EXPAND_TIMER_DIV 0x03000040
+#define EXPTIM_DIV_BITS 0x000000FF
+#define EXPTIM_DIV_S 0
+
+/* These definitions are to be deprecated. See above instead. */
+#define EXPAND_PERFCHAN0 0x03000050
+#define EXPAND_PERFCHAN1 0x03000058
+#define EXPPERF_CTRL_BITS 0x0000000F
+#define EXPPERF_CTRL_S 0
+#define EXPPERF_WRC_WRBURST 0x0 /* Write combiner write burst */
+#define EXPPERF_WRC_WRITE 0x1 /* Write combiner write */
+#define EXPPERF_WRC_RDBURST 0x2 /* Write combiner read burst */
+#define EXPPERF_WRC_READ 0x3 /* Write combiner read */
+#define EXPPERF_PREARB_DELAY 0x4 /* Pre-arbiter delay cycle */
+ /* Cross-bar hold-off cycle: */
+#define EXPPERF_XBAR_HOLDWRAP 0x5 /* wrapper register */
+#define EXPPERF_XBAR_HOLDSBUS 0x6 /* system bus */
+#ifdef METAC_1_2
+#define EXPPERF_XBAR_HOLDLBUS 0x7 /* local bus */
+#else /* METAC_1_2 */
+#define EXPPERF_XBAR_HOLDINTR 0x8 /* internal ram */
+#define EXPPERF_XBAR_HOLDCREG 0x9 /* core registers */
+ /* Admission delay cycle: */
+#define EXPPERF_INPUT_SBUS 0xA /* register port */
+#define EXPPERF_INPUT_CREG 0xB /* core registers */
+#define EXPPERF_INPUT_INTR 0xC /* internal ram */
+#define EXPPERF_INPUT_WRC 0xD /* write combiners(memory) */
+#endif /* !METAC_1_2 */
+#define EXPPERF_MAINARB_DELAY 0xF /* Main arbiter delay cycle */
+
+/*
+ * Debug port registers
+ * --------------------------------------
+ */
+
+/* Data Exchange Register */
+#define DBGPORT_MDBGDATAX 0x0
+
+/* Data Transfer register */
+#define DBGPORT_MDBGDATAT 0x4
+
+/* Control Register 0 */
+#define DBGPORT_MDBGCTRL0 0x8
+#define DBGPORT_MDBGCTRL0_ADDR_BITS 0xFFFFFFFC
+#define DBGPORT_MDBGCTRL0_ADDR_S 2
+#define DBGPORT_MDBGCTRL0_AUTOINCR_BIT 0x00000002
+#define DBGPORT_MDBGCTRL0_RD_BIT 0x00000001
+
+/* Control Register 1 */
+#define DBGPORT_MDBGCTRL1 0xC
+#ifdef METAC_2_1
+#define DBGPORT_MDBGCTRL1_DEFERRTHREAD_BITS 0xC0000000
+#define DBGPORT_MDBGCTRL1_DEFERRTHREAD_S 30
+#endif /* METAC_2_1 */
+#define DBGPORT_MDBGCTRL1_LOCK2_INTERLOCK_BIT 0x20000000
+#define DBGPORT_MDBGCTRL1_ATOMIC_INTERLOCK_BIT 0x10000000
+#define DBGPORT_MDBGCTRL1_TRIGSTATUS_BIT 0x08000000
+#define DBGPORT_MDBGCTRL1_GBLPORT_IDLE_BIT 0x04000000
+#define DBGPORT_MDBGCTRL1_COREMEM_IDLE_BIT 0x02000000
+#define DBGPORT_MDBGCTRL1_READY_BIT 0x01000000
+#ifdef METAC_2_1
+#define DBGPORT_MDBGCTRL1_DEFERRID_BITS 0x00E00000
+#define DBGPORT_MDBGCTRL1_DEFERRID_S 21
+#define DBGPORT_MDBGCTRL1_DEFERR_BIT 0x00100000
+#endif /* METAC_2_1 */
+#define DBGPORT_MDBGCTRL1_WR_ACTIVE_BIT 0x00040000
+#define DBGPORT_MDBGCTRL1_COND_LOCK2_BIT 0x00020000
+#define DBGPORT_MDBGCTRL1_LOCK2_BIT 0x00010000
+#define DBGPORT_MDBGCTRL1_DIAGNOSE_BIT 0x00008000
+#define DBGPORT_MDBGCTRL1_FORCEDIAG_BIT 0x00004000
+#define DBGPORT_MDBGCTRL1_MEMFAULT_BITS 0x00003000
+#define DBGPORT_MDBGCTRL1_MEMFAULT_S 12
+#define DBGPORT_MDBGCTRL1_TRIGGER_BIT 0x00000100
+#ifdef METAC_2_1
+#define DBGPORT_MDBGCTRL1_INTSPECIAL_BIT 0x00000080
+#define DBGPORT_MDBGCTRL1_INTRUSIVE_BIT 0x00000040
+#endif /* METAC_2_1 */
+#define DBGPORT_MDBGCTRL1_THREAD_BITS 0x00000030 /* Thread mask selects threads */
+#define DBGPORT_MDBGCTRL1_THREAD_S 4
+#define DBGPORT_MDBGCTRL1_TRANS_SIZE_BITS 0x0000000C
+#define DBGPORT_MDBGCTRL1_TRANS_SIZE_S 2
+#define DBGPORT_MDBGCTRL1_TRANS_SIZE_32_BIT 0x00000000
+#define DBGPORT_MDBGCTRL1_TRANS_SIZE_16_BIT 0x00000004
+#define DBGPORT_MDBGCTRL1_TRANS_SIZE_8_BIT 0x00000008
+#define DBGPORT_MDBGCTRL1_BYTE_ROUND_BITS 0x00000003
+#define DBGPORT_MDBGCTRL1_BYTE_ROUND_S 0
+#define DBGPORT_MDBGCTRL1_BYTE_ROUND_8_BIT 0x00000001
+#define DBGPORT_MDBGCTRL1_BYTE_ROUND_16_BIT 0x00000002
+
+
+/* L2 Cache registers */
+#define SYSC_L2C_INIT 0x048300C0
+#define SYSC_L2C_INIT_INIT 1
+#define SYSC_L2C_INIT_IN_PROGRESS 0
+#define SYSC_L2C_INIT_COMPLETE 1
+
+#define SYSC_L2C_ENABLE 0x048300D0
+#define SYSC_L2C_ENABLE_ENABLE_BIT 0x00000001
+#define SYSC_L2C_ENABLE_PFENABLE_BIT 0x00000002
+
+#define SYSC_L2C_PURGE 0x048300C8
+#define SYSC_L2C_PURGE_PURGE 1
+#define SYSC_L2C_PURGE_IN_PROGRESS 0
+#define SYSC_L2C_PURGE_COMPLETE 1
+
+#endif /* _ASM_METAG_MEM_H_ */
diff --git a/arch/metag/include/asm/metag_regs.h b/arch/metag/include/asm/metag_regs.h
new file mode 100644
index 00000000000..acf4b8e6e9d
--- /dev/null
+++ b/arch/metag/include/asm/metag_regs.h
@@ -0,0 +1,1184 @@
+/*
+ * asm/metag_regs.h
+ *
+ * Copyright (C) 2000-2007, 2012 Imagination Technologies.
+ *
+ * This program is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU General Public License version 2 as published by the
+ * Free Software Foundation.
+ *
+ * Various defines for Meta core (non memory-mapped) registers.
+ */
+
+#ifndef _ASM_METAG_REGS_H_
+#define _ASM_METAG_REGS_H_
+
+/*
+ * CHIP Unit Identifiers and Valid/Global register number masks
+ * ------------------------------------------------------------
+ */
+#define TXUCT_ID 0x0 /* Control unit regs */
+#ifdef METAC_1_2
+#define TXUCT_MASK 0xFF0FFFFF /* Valid regs 0..31 */
+#else
+#define TXUCT_MASK 0xFF1FFFFF /* Valid regs 0..31 */
+#endif
+#define TGUCT_MASK 0x00000000 /* No global regs */
+#define TXUD0_ID 0x1 /* Data unit regs */
+#define TXUD1_ID 0x2
+#define TXUDX_MASK 0xFFFFFFFF /* Valid regs 0..31 */
+#define TGUDX_MASK 0xFFFF0000 /* Global regs for base inst */
+#define TXUDXDSP_MASK 0x0F0FFFFF /* Valid DSP regs */
+#define TGUDXDSP_MASK 0x0E0E0000 /* Global DSP ACC regs */
+#define TXUA0_ID 0x3 /* Address unit regs */
+#define TXUA1_ID 0x4
+#define TXUAX_MASK 0x0000FFFF /* Valid regs 0-15 */
+#define TGUAX_MASK 0x0000FF00 /* Global regs 8-15 */
+#define TXUPC_ID 0x5 /* PC registers */
+#define TXUPC_MASK 0x00000003 /* Valid regs 0- 1 */
+#define TGUPC_MASK 0x00000000 /* No global regs */
+#define TXUPORT_ID 0x6 /* Ports are not registers */
+#define TXUTR_ID 0x7
+#define TXUTR_MASK 0x0000005F /* Valid regs 0-3,4,6 */
+#define TGUTR_MASK 0x00000000 /* No global regs */
+#ifdef METAC_2_1
+#define TXUTT_ID 0x8
+#define TXUTT_MASK 0x0000000F /* Valid regs 0-3 */
+#define TGUTT_MASK 0x00000010 /* Global reg 4 */
+#define TXUFP_ID 0x9 /* FPU regs */
+#define TXUFP_MASK 0x0000FFFF /* Valid regs 0-15 */
+#define TGUFP_MASK 0x00000000 /* No global regs */
+#endif /* METAC_2_1 */
+
+#ifdef METAC_1_2
+#define TXUXX_MASKS { TXUCT_MASK, TXUDX_MASK, TXUDX_MASK, TXUAX_MASK, \
+ TXUAX_MASK, TXUPC_MASK, 0, TXUTR_MASK, \
+ 0, 0, 0, 0, 0, 0, 0, 0 }
+#define TGUXX_MASKS { TGUCT_MASK, TGUDX_MASK, TGUDX_MASK, TGUAX_MASK, \
+ TGUAX_MASK, TGUPC_MASK, 0, TGUTR_MASK, \
+ 0, 0, 0, 0, 0, 0, 0, 0 }
+#else /* METAC_1_2 */
+#define TXUXX_MASKS { TXUCT_MASK, TXUDX_MASK, TXUDX_MASK, TXUAX_MASK, \
+ TXUAX_MASK, TXUPC_MASK, 0, TXUTR_MASK, \
+ TXUTT_MASK, TXUFP_MASK, 0, 0, \
+ 0, 0, 0, 0 }
+#define TGUXX_MASKS { TGUCT_MASK, TGUDX_MASK, TGUDX_MASK, TGUAX_MASK, \
+ TGUAX_MASK, TGUPC_MASK, 0, TGUTR_MASK, \
+ TGUTT_MASK, TGUFP_MASK, 0, 0, \
+ 0, 0, 0, 0 }
+#endif /* !METAC_1_2 */
+
+#define TXUXXDSP_MASKS { 0, TXUDXDSP_MASK, TXUDXDSP_MASK, 0, 0, 0, 0, 0, \
+ 0, 0, 0, 0, 0, 0, 0, 0 }
+#define TGUXXDSP_MASKS { 0, TGUDXDSP_MASK, TGUDXDSP_MASK, 0, 0, 0, 0, 0, \
+ 0, 0, 0, 0, 0, 0, 0, 0 }
+
+/* -------------------------------------------------------------------------
+; DATA AND ADDRESS UNIT REGISTERS
+; -----------------------------------------------------------------------*/
+/*
+ Thread local D0 registers
+ */
+/* D0.0 ; Holds 32-bit result, can be used as scratch */
+#define D0Re0 D0.0
+/* D0.1 ; Used to pass Arg6_32 */
+#define D0Ar6 D0.1
+/* D0.2 ; Used to pass Arg4_32 */
+#define D0Ar4 D0.2
+/* D0.3 ; Used to pass Arg2_32 to a called routine (see D1.3 below) */
+#define D0Ar2 D0.3
+/* D0.4 ; Can be used as scratch; used to save A0FrP in entry sequences */
+#define D0FrT D0.4
+/* D0.5 ; C compiler assumes preservation, save with D1.5 if used */
+/* D0.6 ; C compiler assumes preservation, save with D1.6 if used */
+/* D0.7 ; C compiler assumes preservation, save with D1.7 if used */
+/* D0.8 ; Use of D0.8 and above is not encouraged */
+/* D0.9 */
+/* D0.10 */
+/* D0.11 */
+/* D0.12 */
+/* D0.13 */
+/* D0.14 */
+/* D0.15 */
+/*
+ Thread local D1 registers
+ */
+/* D1.0 ; Holds top 32-bits of 64-bit result, can be used as scratch */
+#define D1Re0 D1.0
+/* D1.1 ; Used to pass Arg5_32 */
+#define D1Ar5 D1.1
+/* D1.2 ; Used to pass Arg3_32 */
+#define D1Ar3 D1.2
+/* D1.3 ; Used to pass Arg1_32 (first 32-bit argument) to a called routine */
+#define D1Ar1 D1.3
+/* D1.4 ; Used for Return Pointer, save during entry with A0FrP (via D0.4) */
+#define D1RtP D1.4
+/* D1.5 ; C compiler assumes preservation, save if used */
+/* D1.6 ; C compiler assumes preservation, save if used */
+/* D1.7 ; C compiler assumes preservation, save if used */
+/* D1.8 ; Use of D1.8 and above is not encouraged */
+/* D1.9 */
+/* D1.10 */
+/* D1.11 */
+/* D1.12 */
+/* D1.13 */
+/* D1.14 */
+/* D1.15 */
+/*
+ Thread local A0 registers
+ */
+/* A0.0 ; Primary stack pointer */
+#define A0StP A0.0
+/* A0.1 ; Used as local frame pointer in C, save if used (via D0.4) */
+#define A0FrP A0.1
+/* A0.2 */
+/* A0.3 */
+/* A0.4 ; Use of A0.4 and above is not encouraged */
+/* A0.5 */
+/* A0.6 */
+/* A0.7 */
+/*
+ Thread local A1 registers
+ */
+/* A1.0 ; Global static chain pointer - do not modify */
+#define A1GbP A1.0
+/* A1.1 ; Local static chain pointer in C, can be used as scratch */
+#define A1LbP A1.1
+/* A1.2 */
+/* A1.3 */
+/* A1.4 ; Use of A1.4 and above is not encouraged */
+/* A1.5 */
+/* A1.6 */
+/* A1.7 */
+#ifdef METAC_2_1
+/* Renameable registers for use with Fast Interrupts */
+/* The interrupt stack pointer (usually a global register) */
+#define A0IStP A0IReg
+/* The interrupt global pointer (usually a global register) */
+#define A1IGbP A1IReg
+#endif
+/*
+ Further registers may be globally allocated via linkage/loading tools,
+ normally they are not used.
+ */
+/*-------------------------------------------------------------------------
+; STACK STRUCTURE and CALLING CONVENTION
+; -----------------------------------------------------------------------*/
+/*
+; Calling convention indicates that the following is the state of the
+; stack frame at the start of a routine-
+;
+; Arg9_32 [A0StP+#-12]
+; Arg8_32 [A0StP+#- 8]
+; Arg7_32 [A0StP+#- 4]
+; A0StP->
+;
+; Registers D1.3, D0.3, ..., to D0.1 are used to pass Arg1_32 to Arg6_32
+; respectively. If a routine needs to store them on the stack in order
+; to make sub-calls or because of the general complexity of the routine it
+; is best to dump these registers immediately at the start of a routine
+; using a MSETL or SETL instruction-
+;
+; MSETL [A0StP],D0Ar6,D0Ar4,D0Ar2; Only dump argments expected
+;or SETL [A0StP+#8++],D0Ar2 ; Up to two 32-bit args expected
+;
+; For non-leaf routines it is always necessary to save and restore at least
+; the return address value D1RtP on the stack. Also by convention if the
+; frame is saved then a new A0FrP value must be set-up. So for non-leaf
+; routines at this point both these registers must be saved onto the stack
+; using a SETL instruction and the new A0FrP value is then set-up-
+;
+; MOV D0FrT,A0FrP
+; ADD A0FrP,A0StP,#0
+; SETL [A0StP+#8++],D0FrT,D1RtP
+;
+; Registers D0.5, D1.5, to D1.7 are assumed to be preserved across calls so
+; a SETL or MSETL instruction can be used to save the current state
+; of these registers if they are modified by the current routine-
+;
+; MSETL [A0StP],D0.5,D0.6,D0.7 ; Only save registers modified
+;or SETL [A0StP+#8++],D0.5 ; Only D0.5 and/or D1.5 modified
+;
+; All of the above sequences can be combined into one maximal case-
+;
+; MOV D0FrT,A0FrP ; Save and calculate new frame pointer
+; ADD A0FrP,A0StP,#(ARS)
+; MSETL [A0StP],D0Ar6,D0Ar4,D0Ar2,D0FrT,D0.5,D0.6,D0.7
+;
+; Having completed the above sequence the only remaining task on routine
+; entry is to reserve any local and outgoing argment storage space on the
+; stack. This instruction may be omitted if the size of this region is zero-
+;
+; ADD A0StP,A0StP,#(LCS)
+;
+; LCS is the first example use of one of a number of standard local defined
+; values that can be created to make assembler code more readable and
+; potentially more robust-
+;
+; #define ARS 0x18 ; Register arg bytes saved on stack
+; #define FRS 0x20 ; Frame save area size in bytes
+; #define LCS 0x00 ; Locals and Outgoing arg size
+; #define ARO (LCS+FRS) ; Stack offset to access args
+;
+; All of the above defines should be undefined (#undef) at the end of each
+; routine to avoid accidental use in the next routine.
+;
+; Given all of the above the following stack structure is expected during
+; the body of a routine if all args passed in registers are saved during
+; entry-
+;
+; ; 'Incoming args area'
+; Arg10_32 [A0StP+#-((10*4)+ARO)] Arg9_32 [A0StP+#-(( 9*4)+ARO)]
+; Arg8_32 [A0StP+#-(( 8*4)+ARO)] Arg7_32 [A0StP+#-(( 7*4)+ARO)]
+;--- Call point
+; D0Ar6= Arg6_32 [A0StP+#-(( 6*4)+ARO)] D1Ar5=Arg5_32 [A0StP+#-(( 5*4)+ARO)]
+; D0Ar4= Arg4_32 [A0StP+#-(( 4*4)+ARO)] D1Ar3=Arg3_32 [A0StP+#-(( 3*4)+ARO)]
+; D0Ar2= Arg2_32 [A0StP+#-(( 2*4)+ARO)] D1Ar2=Arg1_32 [A0StP+#-(( 1*4)+ARO)]
+; ; 'Frame area'
+; A0FrP-> D0FrT, D1RtP,
+; D0.5, D1.5,
+; D0.6, D1.6,
+; D0.7, D1.7,
+; ; 'Locals area'
+; Loc0_32 [A0StP+# (( 0*4)-LCS)], Loc1_32 [A0StP+# (( 1*4)-LCS)]
+; .... other locals
+; Locn_32 [A0StP+# (( n*4)-LCS)]
+; ; 'Outgoing args area'
+; Outm_32 [A0StP+#- ( m*4)] .... other outgoing args
+; Out8_32 [A0StP+#- ( 1*4)] Out7_32 [A0StP+#- ( 1*4)]
+; A0StP-> (Out1_32-Out6_32 in regs D1Ar1-D0Ar6)
+;
+; The exit sequence for a non-leaf routine can use the frame pointer created
+; in the entry sequence to optimise the recovery of the full state-
+;
+; MGETL D0FrT,D0.5,D0.6,D0.7,[A0FrP]
+; SUB A0StP,A0FrP,#(ARS+FRS)
+; MOV A0FrP,D0FrT
+; MOV PC,D1RtP
+;
+; Having described the most complex non-leaf case above, it is worth noting
+; that if a routine is a leaf and does not use any of the caller-preserved
+; state. The routine can be implemented as-
+;
+; ADD A0StP,A0StP,#LCS
+; .... body of routine
+; SUB A0StP,A0StP,#LCS
+; MOV PC,D1RtP
+;
+; The stack adjustments can also be omitted if no local storage is required.
+;
+; Another exit sequence structure is more applicable if for a leaf routine
+; with no local frame pointer saved/generated in which the call saved
+; registers need to be saved and restored-
+;
+; MSETL [A0StP],D0.5,D0.6,D0.7 ; Hence FRS is 0x18, ARS is 0x00
+; ADD A0StP,A0StP,#LCS
+; .... body of routine
+; GETL D0.5,D1.5,[A0StP+#((0*8)-(FRS+LCS))]
+; GETL D0.6,D1.6,[A0StP+#((1*8)-(FRS+LCS))]
+; GETL D0.7,D1.7,[A0StP+#((2*8)-(FRS+LCS))]
+; SUB A0StP,A0StP,#(ARS+FRS+LCS)
+; MOV PC,D1RtP
+;
+; Lastly, to support profiling assembler code should use a fixed entry/exit
+; sequence if the trigger define _GMON_ASM is defined-
+;
+; #ifndef _GMON_ASM
+; ... optimised entry code
+; #else
+; ; Profiling entry case
+; MOV D0FrT,A0FrP ; Save and calculate new frame pointer
+; ADD A0FrP,A0StP,#(ARS)
+; MSETL [A0StP],...,D0FrT,... or SETL [A0FrP],D0FrT,D1RtP
+; CALLR D0FrT,_mcount_wrapper
+; #endif
+; ... body of routine
+; #ifndef _GMON_ASM
+; ... optimised exit code
+; #else
+; ; Profiling exit case
+; MGETL D0FrT,...,[A0FrP] or GETL D0FrT,D1RtP,[A0FrP++]
+; SUB A0StP,A0FrP,#(ARS+FRS)
+; MOV A0FrP,D0FrT
+; MOV PC,D1RtP
+; #endif
+
+
+; -------------------------------------------------------------------------
+; CONTROL UNIT REGISTERS
+; -------------------------------------------------------------------------
+;
+; See the assembler guide, hardware documentation, or the field values
+; defined below for some details of the use of these registers.
+*/
+#define TXENABLE CT.0 /* Need to define bit-field values in these */
+#define TXMODE CT.1
+#define TXSTATUS CT.2 /* DEFAULT 0x00020000 */
+#define TXRPT CT.3
+#define TXTIMER CT.4
+#define TXL1START CT.5
+#define TXL1END CT.6
+#define TXL1COUNT CT.7
+#define TXL2START CT.8
+#define TXL2END CT.9
+#define TXL2COUNT CT.10
+#define TXBPOBITS CT.11
+#define TXMRSIZE CT.12
+#define TXTIMERI CT.13
+#define TXDRCTRL CT.14 /* DEFAULT 0x0XXXF0F0 */
+#define TXDRSIZE CT.15
+#define TXCATCH0 CT.16
+#define TXCATCH1 CT.17
+#define TXCATCH2 CT.18
+#define TXCATCH3 CT.19
+
+#ifdef METAC_2_1
+#define TXDEFR CT.20
+#define TXCPRS CT.21
+#endif
+
+#define TXINTERN0 CT.23
+#define TXAMAREG0 CT.24
+#define TXAMAREG1 CT.25
+#define TXAMAREG2 CT.26
+#define TXAMAREG3 CT.27
+#define TXDIVTIME CT.28 /* DEFAULT 0x00000001 */
+#define TXPRIVEXT CT.29 /* DEFAULT 0x003B0000 */
+#define TXTACTCYC CT.30
+#define TXIDLECYC CT.31
+
+/*****************************************************************************
+ * CONTROL UNIT REGISTER BITS
+ ****************************************************************************/
+/*
+ * The following registers and where appropriate the sub-fields of those
+ * registers are defined for pervasive use in controlling program flow.
+ */
+
+/*
+ * TXENABLE register fields - only the thread id is routinely useful
+ */
+#define TXENABLE_REGNUM 0
+#define TXENABLE_THREAD_BITS 0x00000700
+#define TXENABLE_THREAD_S 8
+#define TXENABLE_REV_STEP_BITS 0x000000F0
+#define TXENABLE_REV_STEP_S 4
+
+/*
+ * TXMODE register - controls extensions of the instruction set
+ */
+#define TXMODE_REGNUM 1
+#define TXMODE_DEFAULT 0 /* All fields default to zero */
+
+/*
+ * TXSTATUS register - contains a couple of stable bits that can be used
+ * to determine the privilege processing level and interrupt
+ * processing level of the current thread.
+ */
+#define TXSTATUS_REGNUM 2
+#define TXSTATUS_PSTAT_BIT 0x00020000 /* -> Privilege active */
+#define TXSTATUS_PSTAT_S 17
+#define TXSTATUS_ISTAT_BIT 0x00010000 /* -> In interrupt state */
+#define TXSTATUS_ISTAT_S 16
+
+/*
+ * These are all relatively boring registers, mostly full 32-bit
+ */
+#define TXRPT_REGNUM 3 /* Repeat counter for XFR... instructions */
+#define TXTIMER_REGNUM 4 /* Timer-- causes timer trigger on overflow */
+#define TXL1START_REGNUM 5 /* Hardware Loop 1 Start-PC/End-PC/Count */
+#define TXL1END_REGNUM 6
+#define TXL1COUNT_REGNUM 7
+#define TXL2START_REGNUM 8 /* Hardware Loop 2 Start-PC/End-PC/Count */
+#define TXL2END_REGNUM 9
+#define TXL2COUNT_REGNUM 10
+#define TXBPOBITS_REGNUM 11 /* Branch predict override bits - tune perf */
+#define TXTIMERI_REGNUM 13 /* Timer-- time based interrupt trigger */
+
+/*
+ * TXDIVTIME register is routinely read to calculate the time-base for
+ * the TXTIMER register.
+ */
+#define TXDIVTIME_REGNUM 28
+#define TXDIVTIME_DIV_BITS 0x000000FF
+#define TXDIVTIME_DIV_S 0
+#define TXDIVTIME_DIV_MIN 0x00000001 /* Maximum resolution */
+#define TXDIVTIME_DIV_MAX 0x00000100 /* 1/1 -> 1/256 resolution */
+#define TXDIVTIME_BASE_HZ 1000000 /* Timers run at 1Mhz @1/1 */
+
+/*
+ * TXPRIVEXT register can be consulted to decide if write access to a
+ * part of the threads register set is not permitted when in
+ * unprivileged mode (PSTAT == 0).
+ */
+#define TXPRIVEXT_REGNUM 29
+#define TXPRIVEXT_COPRO_BITS 0xFF000000 /* Co-processor 0-7 */
+#define TXPRIVEXT_COPRO_S 24
+#ifndef METAC_1_2
+#define TXPRIVEXT_TXTIMER_BIT 0x00080000 /* TXTIMER priv */
+#define TXPRIVEXT_TRACE_BIT 0x00040000 /* TTEXEC|TTCTRL|GTEXEC */
+#endif
+#define TXPRIVEXT_TXTRIGGER_BIT 0x00020000 /* TXSTAT|TXMASK|TXPOLL */
+#define TXPRIVEXT_TXGBLCREG_BIT 0x00010000 /* Global common regs */
+#define TXPRIVEXT_CBPRIV_BIT 0x00008000 /* Mem i/f dump priv */
+#define TXPRIVEXT_ILOCK_BIT 0x00004000 /* LOCK inst priv */
+#define TXPRIVEXT_TXITACCYC_BIT 0x00002000 /* TXIDLECYC|TXTACTCYC */
+#define TXPRIVEXT_TXDIVTIME_BIT 0x00001000 /* TXDIVTIME priv */
+#define TXPRIVEXT_TXAMAREGX_BIT 0x00000800 /* TXAMAREGX priv */
+#define TXPRIVEXT_TXTIMERI_BIT 0x00000400 /* TXTIMERI priv */
+#define TXPRIVEXT_TXSTATUS_BIT 0x00000200 /* TXSTATUS priv */
+#define TXPRIVEXT_TXDISABLE_BIT 0x00000100 /* TXENABLE priv */
+#ifndef METAC_1_2
+#define TXPRIVEXT_MINIMON_BIT 0x00000080 /* Enable Minim features */
+#define TXPRIVEXT_OLDBCCON_BIT 0x00000020 /* Restore Static predictions */
+#define TXPRIVEXT_ALIGNREW_BIT 0x00000010 /* Align & precise checks */
+#endif
+#define TXPRIVEXT_KEEPPRI_BIT 0x00000008 /* Use AMA_Priority if ISTAT=1*/
+#define TXPRIVEXT_TXTOGGLEI_BIT 0x00000001 /* TX.....I priv */
+
+/*
+ * TXTACTCYC register - counts instructions issued for this thread
+ */
+#define TXTACTCYC_REGNUM 30
+#define TXTACTCYC_COUNT_MASK 0x00FFFFFF
+
+/*
+ * TXIDLECYC register - counts idle cycles
+ */
+#define TXIDLECYC_REGNUM 31
+#define TXIDLECYC_COUNT_MASK 0x00FFFFFF
+
+/*****************************************************************************
+ * DSP EXTENSIONS
+ ****************************************************************************/
+/*
+ * The following values relate to fields and controls that only a program
+ * using the DSP extensions of the META instruction set need to know.
+ */
+
+
+#ifndef METAC_1_2
+/*
+ * Allow co-processor hardware to replace the read pipeline data source in
+ * appropriate cases.
+ */
+#define TXMODE_RDCPEN_BIT 0x00800000
+#endif
+
+/*
+ * Address unit addressing modes
+ */
+#define TXMODE_A1ADDR_BITS 0x00007000
+#define TXMODE_A1ADDR_S 12
+#define TXMODE_A0ADDR_BITS 0x00000700
+#define TXMODE_A0ADDR_S 8
+#define TXMODE_AXADDR_MODULO 3
+#define TXMODE_AXADDR_REVB 4
+#define TXMODE_AXADDR_REVW 5
+#define TXMODE_AXADDR_REVD 6
+#define TXMODE_AXADDR_REVL 7
+
+/*
+ * Data unit OverScale select (default 0 -> normal, 1 -> top 16 bits)
+ */
+#define TXMODE_DXOVERSCALE_BIT 0x00000080
+
+/*
+ * Data unit MX mode select (default 0 -> MX16, 1 -> MX8)
+ */
+#define TXMODE_M8_BIT 0x00000040
+
+/*
+ * Data unit accumulator saturation point (default -> 40 bit accumulator)
+ */
+#define TXMODE_DXACCSAT_BIT 0x00000020 /* Set for 32-bit accumulator */
+
+/*
+ * Data unit accumulator saturation enable (default 0 -> no saturation)
+ */
+#define TXMODE_DXSAT_BIT 0x00000010
+
+/*
+ * Data unit master rounding control (default 0 -> normal, 1 -> convergent)
+ */
+#define TXMODE_DXROUNDING_BIT 0x00000008
+
+/*
+ * Data unit product shift for fractional arithmetic (default off)
+ */
+#define TXMODE_DXPRODSHIFT_BIT 0x00000004
+
+/*
+ * Select the arithmetic mode (multiply mostly) for both data units
+ */
+#define TXMODE_DXARITH_BITS 0x00000003
+#define TXMODE_DXARITH_32 3
+#define TXMODE_DXARITH_32H 2
+#define TXMODE_DXARITH_S16 1
+#define TXMODE_DXARITH_16 0
+
+/*
+ * TXMRSIZE register value only relevant when DSP modulo addressing active
+ */
+#define TXMRSIZE_REGNUM 12
+#define TXMRSIZE_MIN 0x0002 /* 0, 1 -> normal addressing logic */
+#define TXMRSIZE_MAX 0xFFFF
+
+/*
+ * TXDRCTRL register can be used to detect the actaul size of the DSP RAM
+ * partitions allocated to this thread.
+ */
+#define TXDRCTRL_REGNUM 14
+#define TXDRCTRL_SINESIZE_BITS 0x0F000000
+#define TXDRCTRL_SINESIZE_S 24
+#define TXDRCTRL_RAMSZPOW_BITS 0x001F0000 /* Limit = (1<<RAMSZPOW)-1 */
+#define TXDRCTRL_RAMSZPOW_S 16
+#define TXDRCTRL_D1RSZAND_BITS 0x0000F000 /* Mask top 4 bits - D1 */
+#define TXDRCTRL_D1RSZAND_S 12
+#define TXDRCTRL_D0RSZAND_BITS 0x000000F0 /* Mask top 4 bits - D0 */
+#define TXDRCTRL_D0RSZAND_S 4
+/* Given extracted RAMSZPOW and DnRSZAND fields this returns the size */
+#define TXDRCTRL_DXSIZE(Pow, AndBits) \
+ ((((~(AndBits)) & 0x0f) + 1) << ((Pow)-4))
+
+/*
+ * TXDRSIZE register provides modulo addressing options for each DSP RAM
+ */
+#define TXDRSIZE_REGNUM 15
+#define TXDRSIZE_R1MOD_BITS 0xFFFF0000
+#define TXDRSIZE_R1MOD_S 16
+#define TXDRSIZE_R0MOD_BITS 0x0000FFFF
+#define TXDRSIZE_R0MOD_S 0
+
+#define TXDRSIZE_RBRAD_SCALE_BITS 0x70000000
+#define TXDRSIZE_RBRAD_SCALE_S 28
+#define TXDRSIZE_RBMODSIZE_BITS 0x0FFF0000
+#define TXDRSIZE_RBMODSIZE_S 16
+#define TXDRSIZE_RARAD_SCALE_BITS 0x00007000
+#define TXDRSIZE_RARAD_SCALE_S 12
+#define TXDRSIZE_RAMODSIZE_BITS 0x00000FFF
+#define TXDRSIZE_RAMODSIZE_S 0
+
+/*****************************************************************************
+ * DEFERRED and BUS ERROR EXTENSION
+ ****************************************************************************/
+
+/*
+ * TXDEFR register - Deferred exception control
+ */
+#define TXDEFR_REGNUM 20
+#define TXDEFR_DEFAULT 0 /* All fields default to zero */
+
+/*
+ * Bus error state is a multi-bit positive/negative event notification from
+ * the bus infrastructure.
+ */
+#define TXDEFR_BUS_ERR_BIT 0x80000000 /* Set if error (LSB STATE) */
+#define TXDEFR_BUS_ERRI_BIT 0x40000000 /* Fetch returned error */
+#define TXDEFR_BUS_STATE_BITS 0x3F000000 /* Bus event/state data */
+#define TXDEFR_BUS_STATE_S 24
+#define TXDEFR_BUS_TRIG_BIT 0x00800000 /* Set when bus error seen */
+
+/*
+ * Bus events are collected by background code in a deferred manner unless
+ * selected to trigger an extended interrupt HALT trigger when they occur.
+ */
+#define TXDEFR_BUS_ICTRL_BIT 0x00000080 /* Enable interrupt trigger */
+
+/*
+ * CHIP Automatic Mips Allocation control registers
+ * ------------------------------------------------
+ */
+
+/* CT Bank AMA Registers */
+#define TXAMAREG0_REGNUM 24
+#ifdef METAC_1_2
+#define TXAMAREG0_CTRL_BITS 0x07000000
+#else /* METAC_1_2 */
+#define TXAMAREG0_RCOFF_BIT 0x08000000
+#define TXAMAREG0_DLINEHLT_BIT 0x04000000
+#define TXAMAREG0_DLINEDIS_BIT 0x02000000
+#define TXAMAREG0_CYCSTRICT_BIT 0x01000000
+#define TXAMAREG0_CTRL_BITS (TXAMAREG0_RCOFF_BIT | \
+ TXAMAREG0_DLINEHLT_BIT | \
+ TXAMAREG0_DLINEDIS_BIT | \
+ TXAMAREG0_CYCSTRICT_BIT)
+#endif /* !METAC_1_2 */
+#define TXAMAREG0_CTRL_S 24
+#define TXAMAREG0_MDM_BIT 0x00400000
+#define TXAMAREG0_MPF_BIT 0x00200000
+#define TXAMAREG0_MPE_BIT 0x00100000
+#define TXAMAREG0_MASK_BITS (TXAMAREG0_MDM_BIT | \
+ TXAMAREG0_MPF_BIT | \
+ TXAMAREG0_MPE_BIT)
+#define TXAMAREG0_MASK_S 20
+#define TXAMAREG0_SDM_BIT 0x00040000
+#define TXAMAREG0_SPF_BIT 0x00020000
+#define TXAMAREG0_SPE_BIT 0x00010000
+#define TXAMAREG0_STATUS_BITS (TXAMAREG0_SDM_BIT | \
+ TXAMAREG0_SPF_BIT | \
+ TXAMAREG0_SPE_BIT)
+#define TXAMAREG0_STATUS_S 16
+#define TXAMAREG0_PRIORITY_BITS 0x0000FF00
+#define TXAMAREG0_PRIORITY_S 8
+#define TXAMAREG0_BVALUE_BITS 0x000000FF
+#define TXAMAREG0_BVALUE_S 0
+
+#define TXAMAREG1_REGNUM 25
+#define TXAMAREG1_DELAYC_BITS 0x07FFFFFF
+#define TXAMAREG1_DELAYC_S 0
+
+#define TXAMAREG2_REGNUM 26
+#ifdef METAC_1_2
+#define TXAMAREG2_DLINEC_BITS 0x00FFFFFF
+#define TXAMAREG2_DLINEC_S 0
+#else /* METAC_1_2 */
+#define TXAMAREG2_IRQPRIORITY_BIT 0xFF000000
+#define TXAMAREG2_IRQPRIORITY_S 24
+#define TXAMAREG2_DLINEC_BITS 0x00FFFFF0
+#define TXAMAREG2_DLINEC_S 4
+#endif /* !METAC_1_2 */
+
+#define TXAMAREG3_REGNUM 27
+#define TXAMAREG2_AMABLOCK_BIT 0x00080000
+#define TXAMAREG2_AMAC_BITS 0x0000FFFF
+#define TXAMAREG2_AMAC_S 0
+
+/*****************************************************************************
+ * FPU EXTENSIONS
+ ****************************************************************************/
+/*
+ * The following registers only exist in FPU enabled cores.
+ */
+
+/*
+ * TXMODE register - FPU rounding mode control/status fields
+ */
+#define TXMODE_FPURMODE_BITS 0x00030000
+#define TXMODE_FPURMODE_S 16
+#define TXMODE_FPURMODEWRITE_BIT 0x00040000 /* Set to change FPURMODE */
+
+/*
+ * TXDEFR register - FPU exception handling/state is a significant source
+ * of deferrable errors. Run-time S/W can move handling to interrupt level
+ * using DEFR instruction to collect state.
+ */
+#define TXDEFR_FPE_FE_BITS 0x003F0000 /* Set by FPU_FE events */
+#define TXDEFR_FPE_FE_S 16
+
+#define TXDEFR_FPE_INEXACT_FE_BIT 0x010000
+#define TXDEFR_FPE_UNDERFLOW_FE_BIT 0x020000
+#define TXDEFR_FPE_OVERFLOW_FE_BIT 0x040000
+#define TXDEFR_FPE_DIVBYZERO_FE_BIT 0x080000
+#define TXDEFR_FPE_INVALID_FE_BIT 0x100000
+#define TXDEFR_FPE_DENORMAL_FE_BIT 0x200000
+
+#define TXDEFR_FPE_ICTRL_BITS 0x000003F /* Route to interrupts */
+#define TXDEFR_FPE_ICTRL_S 0
+
+#define TXDEFR_FPE_INEXACT_ICTRL_BIT 0x01
+#define TXDEFR_FPE_UNDERFLOW_ICTRL_BIT 0x02
+#define TXDEFR_FPE_OVERFLOW_ICTRL_BIT 0x04
+#define TXDEFR_FPE_DIVBYZERO_ICTRL_BIT 0x08
+#define TXDEFR_FPE_INVALID_ICTRL_BIT 0x10
+#define TXDEFR_FPE_DENORMAL_ICTRL_BIT 0x20
+
+/*
+ * DETAILED FPU RELATED VALUES
+ * ---------------------------
+ */
+
+/*
+ * Rounding mode field in TXMODE can hold a number of logical values
+ */
+#define METAG_FPURMODE_TONEAREST 0x0 /* Default */
+#define METAG_FPURMODE_TOWARDZERO 0x1
+#define METAG_FPURMODE_UPWARD 0x2
+#define METAG_FPURMODE_DOWNWARD 0x3
+
+/*
+ * In order to set the TXMODE register field that controls the rounding mode
+ * an extra bit must be set in the value written versus that read in order
+ * to gate writes to the rounding mode field. This allows other non-FPU code
+ * to modify TXMODE without knowledge of the FPU units presence and not
+ * influence the FPU rounding mode. This macro adds the required bit so new
+ * rounding modes are accepted.
+ */
+#define TXMODE_FPURMODE_SET(FPURMode) \
+ (TXMODE_FPURMODEWRITE_BIT + ((FPURMode)<<TXMODE_FPURMODE_S))
+
+/*
+ * To successfully restore TXMODE to zero at the end of the function the
+ * following value (rather than zero) must be used.
+ */
+#define TXMODE_FPURMODE_RESET (TXMODE_FPURMODEWRITE_BIT)
+
+/*
+ * In TXSTATUS a special bit exists to indicate if FPU H/W has been accessed
+ * since it was last reset.
+ */
+#define TXSTATUS_FPACTIVE_BIT 0x01000000
+
+/*
+ * Exception state (see TXDEFR_FPU_FE_*) and enabling (for interrupt
+ * level processing (see TXDEFR_FPU_ICTRL_*) are controlled by similar
+ * bit mask locations within each field.
+ */
+#define METAG_FPU_FE_INEXACT 0x01
+#define METAG_FPU_FE_UNDERFLOW 0x02
+#define METAG_FPU_FE_OVERFLOW 0x04
+#define METAG_FPU_FE_DIVBYZERO 0x08
+#define METAG_FPU_FE_INVALID 0x10
+#define METAG_FPU_FE_DENORMAL 0x20
+#define METAG_FPU_FE_ALL_EXCEPT (METAG_FPU_FE_INEXACT | \
+ METAG_FPU_FE_UNDERFLOW | \
+ METAG_FPU_FE_OVERFLOW | \
+ METAG_FPU_FE_DIVBYZERO | \
+ METAG_FPU_FE_INVALID | \
+ METAG_FPU_FE_DENORMAL)
+
+/*****************************************************************************
+ * THREAD CONTROL, ERROR, OR INTERRUPT STATE EXTENSIONS
+ ****************************************************************************/
+/*
+ * The following values are only relevant to code that externally controls
+ * threads, handles errors/interrupts, and/or set-up interrupt/error handlers
+ * for subsequent use.
+ */
+
+/*
+ * TXENABLE register fields - only ENABLE_BIT is potentially read/write
+ */
+#define TXENABLE_MAJOR_REV_BITS 0xFF000000
+#define TXENABLE_MAJOR_REV_S 24
+#define TXENABLE_MINOR_REV_BITS 0x00FF0000
+#define TXENABLE_MINOR_REV_S 16
+#define TXENABLE_CLASS_BITS 0x0000F000
+#define TXENABLE_CLASS_S 12
+#define TXENABLE_CLASS_DSP 0x0 /* -> DSP Thread */
+#define TXENABLE_CLASS_LDSP 0x8 /* -> DSP LITE Thread */
+#define TXENABLE_CLASS_GP 0xC /* -> General Purpose Thread */
+#define TXENABLE_CLASSALT_LFPU 0x2 /* Set to indicate LITE FPU */
+#define TXENABLE_CLASSALT_FPUR8 0x1 /* Set to indicate 8xFPU regs */
+#define TXENABLE_MTXARCH_BIT 0x00000800
+#define TXENABLE_STEP_REV_BITS 0x000000F0
+#define TXENABLE_STEP_REV_S 4
+#define TXENABLE_STOPPED_BIT 0x00000004 /* TXOFF due to ENABLE->0 */
+#define TXENABLE_OFF_BIT 0x00000002 /* Thread is in off state */
+#define TXENABLE_ENABLE_BIT 0x00000001 /* Set if running */
+
+/*
+ * TXSTATUS register - used by external/internal interrupt/error handler
+ */
+#define TXSTATUS_CB1MARKER_BIT 0x00800000 /* -> int level mem state */
+#define TXSTATUS_CBMARKER_BIT 0x00400000 /* -> mem i/f state dumped */
+#define TXSTATUS_MEM_FAULT_BITS 0x00300000
+#define TXSTATUS_MEM_FAULT_S 20
+#define TXSTATUS_MEMFAULT_NONE 0x0 /* -> No memory fault */
+#define TXSTATUS_MEMFAULT_GEN 0x1 /* -> General fault */
+#define TXSTATUS_MEMFAULT_PF 0x2 /* -> Page fault */
+#define TXSTATUS_MEMFAULT_RO 0x3 /* -> Read only fault */
+#define TXSTATUS_MAJOR_HALT_BITS 0x000C0000
+#define TXSTATUS_MAJOR_HALT_S 18
+#define TXSTATUS_MAJHALT_TRAP 0x0 /* -> SWITCH inst used */
+#define TXSTATUS_MAJHALT_INST 0x1 /* -> Unknown inst or fetch */
+#define TXSTATUS_MAJHALT_PRIV 0x2 /* -> Internal privilege */
+#define TXSTATUS_MAJHALT_MEM 0x3 /* -> Memory i/f fault */
+#define TXSTATUS_L_STEP_BITS 0x00000800 /* -> Progress of L oper */
+#define TXSTATUS_LSM_STEP_BITS 0x00000700 /* -> Progress of L/S mult */
+#define TXSTATUS_LSM_STEP_S 8
+#define TXSTATUS_FLAG_BITS 0x0000001F /* -> All the flags */
+#define TXSTATUS_SCC_BIT 0x00000010 /* -> Split-16 flags ... */
+#define TXSTATUS_SCF_LZ_BIT 0x00000008 /* -> Split-16 Low Z flag */
+#define TXSTATUS_SCF_HZ_BIT 0x00000004 /* -> Split-16 High Z flag */
+#define TXSTATUS_SCF_HC_BIT 0x00000002 /* -> Split-16 High C flag */
+#define TXSTATUS_SCF_LC_BIT 0x00000001 /* -> Split-16 Low C flag */
+#define TXSTATUS_CF_Z_BIT 0x00000008 /* -> Condition Z flag */
+#define TXSTATUS_CF_N_BIT 0x00000004 /* -> Condition N flag */
+#define TXSTATUS_CF_O_BIT 0x00000002 /* -> Condition O flag */
+#define TXSTATUS_CF_C_BIT 0x00000001 /* -> Condition C flag */
+
+/*
+ * TXCATCH0-3 register contents may store information on a memory operation
+ * that has failed if the bit TXSTATUS_CBMARKER_BIT is set.
+ */
+#define TXCATCH0_REGNUM 16
+#define TXCATCH1_REGNUM 17
+#define TXCATCH1_ADDR_BITS 0xFFFFFFFF /* TXCATCH1 is Addr 0-31 */
+#define TXCATCH1_ADDR_S 0
+#define TXCATCH2_REGNUM 18
+#define TXCATCH2_DATA0_BITS 0xFFFFFFFF /* TXCATCH2 is Data 0-31 */
+#define TXCATCH2_DATA0_S 0
+#define TXCATCH3_REGNUM 19
+#define TXCATCH3_DATA1_BITS 0xFFFFFFFF /* TXCATCH3 is Data 32-63 */
+#define TXCATCH3_DATA1_S 0
+
+/*
+ * Detailed catch state information
+ * --------------------------------
+ */
+
+/* Contents of TXCATCH0 register */
+#define TXCATCH0_LDRXX_BITS 0xF8000000 /* Load destination reg 0-31 */
+#define TXCATCH0_LDRXX_S 27
+#define TXCATCH0_LDDST_BITS 0x07FF0000 /* Load destination bits */
+#define TXCATCH0_LDDST_S 16
+#define TXCATCH0_LDDST_D1DSP 0x400 /* One bit set if it's a LOAD */
+#define TXCATCH0_LDDST_D0DSP 0x200
+#define TXCATCH0_LDDST_TMPLT 0x100
+#define TXCATCH0_LDDST_TR 0x080
+#ifdef METAC_2_1
+#define TXCATCH0_LDDST_FPU 0x040
+#endif
+#define TXCATCH0_LDDST_PC 0x020
+#define TXCATCH0_LDDST_A1 0x010
+#define TXCATCH0_LDDST_A0 0x008
+#define TXCATCH0_LDDST_D1 0x004
+#define TXCATCH0_LDDST_D0 0x002
+#define TXCATCH0_LDDST_CT 0x001
+#ifdef METAC_2_1
+#define TXCATCH0_WATCHSTOP_BIT 0x00004000 /* Set if Data Watch set fault */
+#endif
+#define TXCATCH0_WATCHS_BIT 0x00004000 /* Set if Data Watch set fault */
+#define TXCATCH0_WATCH1_BIT 0x00002000 /* Set if Data Watch 1 matches */
+#define TXCATCH0_WATCH0_BIT 0x00001000 /* Set if Data Watch 0 matches */
+#define TXCATCH0_FAULT_BITS 0x00000C00 /* See TXSTATUS_MEMFAULT_* */
+#define TXCATCH0_FAULT_S 10
+#define TXCATCH0_PRIV_BIT 0x00000200 /* Privilege of transaction */
+#define TXCATCH0_READ_BIT 0x00000100 /* Set for Read or Load cases */
+
+#ifdef METAC_2_1
+/* LNKGET Marker bit in TXCATCH0 */
+#define TXCATCH0_LNKGET_MARKER_BIT 0x00000008
+#define TXCATCH0_PREPROC_BIT 0x00000004
+#endif
+
+/* Loads are indicated by one of the LDDST bits being set */
+#define TXCATCH0_LDM16_BIT 0x00000004 /* Load M16 flag */
+#define TXCATCH0_LDL2L1_BITS 0x00000003 /* Load data size L2,L1 */
+#define TXCATCH0_LDL2L1_S 0
+
+/* Reads are indicated by the READ bit being set without LDDST bits */
+#define TXCATCH0_RAXX_BITS 0x0000001F /* RAXX issue port for read */
+#define TXCATCH0_RAXX_S 0
+
+/* Write operations are all that remain if READ bit is not set */
+#define TXCATCH0_WMASK_BITS 0x000000FF /* Write byte lane mask */
+#define TXCATCH0_WMASK_S 0
+
+#ifdef METAC_2_1
+
+/* When a FPU exception is signalled then FPUSPEC == FPUSPEC_TAG */
+#define TXCATCH0_FPURDREG_BITS 0xF8000000
+#define TXCATCH0_FPURDREG_S 27
+#define TXCATCH0_FPUR1REG_BITS 0x07C00000
+#define TXCATCH0_FPUR1REG_S 22
+#define TXCATCH0_FPUSPEC_BITS 0x000F0000
+#define TXCATCH0_FPUSPEC_S 16
+#define TXCATCH0_FPUSPEC_TAG 0xF
+#define TXCATCH0_FPUINSTA_BIT 0x00001000
+#define TXCATCH0_FPUINSTQ_BIT 0x00000800
+#define TXCATCH0_FPUINSTZ_BIT 0x00000400
+#define TXCATCH0_FPUINSTN_BIT 0x00000200
+#define TXCATCH0_FPUINSTO3O_BIT 0x00000100
+#define TXCATCH0_FPUWIDTH_BITS 0x000000C0
+#define TXCATCH0_FPUWIDTH_S 6
+#define TXCATCH0_FPUWIDTH_FLOAT 0
+#define TXCATCH0_FPUWIDTH_DOUBLE 1
+#define TXCATCH0_FPUWIDTH_PAIRED 2
+#define TXCATCH0_FPUOPENC_BITS 0x0000003F
+#define TXCATCH0_FPUOPENC_S 0
+#define TXCATCH0_FPUOPENC_ADD 0 /* rop1=Rs1, rop3=Rs2 */
+#define TXCATCH0_FPUOPENC_SUB 1 /* rop1=Rs1, rop3=Rs2 */
+#define TXCATCH0_FPUOPENC_MUL 2 /* rop1=Rs1, rop2=Rs2 */
+#define TXCATCH0_FPUOPENC_ATOI 3 /* rop3=Rs */
+#define TXCATCH0_FPUOPENC_ATOX 4 /* rop3=Rs, uses #Imm */
+#define TXCATCH0_FPUOPENC_ITOA 5 /* rop3=Rs */
+#define TXCATCH0_FPUOPENC_XTOA 6 /* rop3=Rs, uses #Imm */
+#define TXCATCH0_FPUOPENC_ATOH 7 /* rop2=Rs */
+#define TXCATCH0_FPUOPENC_HTOA 8 /* rop2=Rs */
+#define TXCATCH0_FPUOPENC_DTOF 9 /* rop3=Rs */
+#define TXCATCH0_FPUOPENC_FTOD 10 /* rop3=Rs */
+#define TXCATCH0_FPUOPENC_DTOL 11 /* rop3=Rs */
+#define TXCATCH0_FPUOPENC_LTOD 12 /* rop3=Rs */
+#define TXCATCH0_FPUOPENC_DTOXL 13 /* rop3=Rs, uses #imm */
+#define TXCATCH0_FPUOPENC_XLTOD 14 /* rop3=Rs, uses #imm */
+#define TXCATCH0_FPUOPENC_CMP 15 /* rop1=Rs1, rop2=Rs2 */
+#define TXCATCH0_FPUOPENC_MIN 16 /* rop1=Rs1, rop2=Rs2 */
+#define TXCATCH0_FPUOPENC_MAX 17 /* rop1=Rs1, rop2=Rs2 */
+#define TXCATCH0_FPUOPENC_ADDRE 18 /* rop1=Rs1, rop3=Rs2 */
+#define TXCATCH0_FPUOPENC_SUBRE 19 /* rop1=Rs1, rop3=Rs2 */
+#define TXCATCH0_FPUOPENC_MULRE 20 /* rop1=Rs1, rop2=Rs2 */
+#define TXCATCH0_FPUOPENC_MXA 21 /* rop1=Rs1, rop2=Rs2, rop3=Rs3*/
+#define TXCATCH0_FPUOPENC_MXAS 22 /* rop1=Rs1, rop2=Rs2, rop3=Rs3*/
+#define TXCATCH0_FPUOPENC_MAR 23 /* rop1=Rs1, rop2=Rs2 */
+#define TXCATCH0_FPUOPENC_MARS 24 /* rop1=Rs1, rop2=Rs2 */
+#define TXCATCH0_FPUOPENC_MUZ 25 /* rop1=Rs1, rop2=Rs2, rop3=Rs3*/
+#define TXCATCH0_FPUOPENC_MUZS 26 /* rop1=Rs1, rop2=Rs2, rop3=Rs3*/
+#define TXCATCH0_FPUOPENC_RCP 27 /* rop2=Rs */
+#define TXCATCH0_FPUOPENC_RSQ 28 /* rop2=Rs */
+
+/* For floating point exceptions TXCATCH1 is used to carry extra data */
+#define TXCATCH1_FPUR2REG_BITS 0xF8000000
+#define TXCATCH1_FPUR2REG_S 27
+#define TXCATCH1_FPUR3REG_BITS 0x07C00000 /* Undefined if O3O set */
+#define TXCATCH1_FPUR3REG_S 22
+#define TXCATCH1_FPUIMM16_BITS 0x0000FFFF
+#define TXCATCH1_FPUIMM16_S 0
+
+#endif /* METAC_2_1 */
+
+/*
+ * TXDIVTIME register used to hold the partial base address of memory i/f
+ * state dump area. Now deprecated.
+ */
+#define TXDIVTIME_CBBASE_MASK 0x03FFFE00
+#define TXDIVTIME_CBBASE_LINBASE 0x80000000
+#define TXDIVTIME_CBBASE_LINBOFF 0x00000000 /* BGnd state */
+#define TXDIVTIME_CBBASE_LINIOFF 0x00000100 /* Int state */
+
+/*
+ * TXDIVTIME register used to indicate if the read pipeline was dirty when a
+ * thread was interrupted, halted, or generated an exception. It is invalid
+ * to attempt to issue a further pipeline read address while the read
+ * pipeline is in the dirty state.
+ */
+#define TXDIVTIME_RPDIRTY_BIT 0x80000000
+
+/*
+ * Further bits in the TXDIVTIME register allow interrupt handling code to
+ * short-cut the discovery the most significant bit last read from TXSTATI.
+ *
+ * This is the bit number of the trigger line that a low level interrupt
+ * handler should acknowledge and then perhaps the index of a corresponding
+ * handler function.
+ */
+#define TXDIVTIME_IRQENC_BITS 0x0F000000
+#define TXDIVTIME_IRQENC_S 24
+
+/*
+ * If TXDIVTIME_RPVALID_BIT is set the read pipeline contained significant
+ * information when the thread was interrupted|halted|exceptioned. Each slot
+ * containing data is indicated by a one bit in the corresponding
+ * TXDIVTIME_RPMASK_BITS bit (least significance bit relates to first
+ * location in read pipeline - most likely to have the 1 state). Empty slots
+ * contain zeroes with no interlock applied on reads if RPDIRTY is currently
+ * set with RPMASK itself being read-only state.
+ */
+#define TXDIVTIME_RPMASK_BITS 0x003F0000 /* -> Full (1) Empty (0) */
+#define TXDIVTIME_RPMASK_S 16
+
+/*
+ * TXPRIVEXT register can be used to single step thread execution and
+ * enforce synchronous memory i/f address checking for debugging purposes.
+ */
+#define TXPRIVEXT_TXSTEP_BIT 0x00000004
+#define TXPRIVEXT_MEMCHECK_BIT 0x00000002
+
+/*
+ * TXINTERNx registers holds internal state information for H/W debugging only
+ */
+#define TXINTERN0_REGNUM 23
+#define TXINTERN0_LOCK2_BITS 0xF0000000
+#define TXINTERN0_LOCK2_S 28
+#define TXINTERN0_LOCK1_BITS 0x0F000000
+#define TXINTERN0_LOCK1_S 24
+#define TXINTERN0_TIFDF_BITS 0x0000F000
+#define TXINTERN0_TIFDF_S 12
+#define TXINTERN0_TIFIB_BITS 0x00000F00
+#define TXINTERN0_TIFIB_S 8
+#define TXINTERN0_TIFAF_BITS 0x000000F0
+#define TXINTERN0_TIFAF_S 4
+#define TXINTERN0_MSTATE_BITS 0x0000000F
+#define TXINTERN0_MSTATE_S 0
+
+/*
+ * TXSTAT, TXMASK, TXPOLL, TXSTATI, TXMASKI, TXPOLLI registers from trigger
+ * bank all have similar contents (upper kick count bits not in MASK regs)
+ */
+#define TXSTAT_REGNUM 0
+#define TXSTAT_TIMER_BIT 0x00000001
+#define TXSTAT_TIMER_S 0
+#define TXSTAT_KICK_BIT 0x00000002
+#define TXSTAT_KICK_S 1
+#define TXSTAT_DEFER_BIT 0x00000008
+#define TXSTAT_DEFER_S 3
+#define TXSTAT_EXTTRIG_BITS 0x0000FFF0
+#define TXSTAT_EXTTRIG_S 4
+#define TXSTAT_FPE_BITS 0x003F0000
+#define TXSTAT_FPE_S 16
+#define TXSTAT_FPE_DENORMAL_BIT 0x00200000
+#define TXSTAT_FPE_DENORMAL_S 21
+#define TXSTAT_FPE_INVALID_BIT 0x00100000
+#define TXSTAT_FPE_INVALID_S 20
+#define TXSTAT_FPE_DIVBYZERO_BIT 0x00080000
+#define TXSTAT_FPE_DIVBYZERO_S 19
+#define TXSTAT_FPE_OVERFLOW_BIT 0x00040000
+#define TXSTAT_FPE_OVERFLOW_S 18
+#define TXSTAT_FPE_UNDERFLOW_BIT 0x00020000
+#define TXSTAT_FPE_UNDERFLOW_S 17
+#define TXSTAT_FPE_INEXACT_BIT 0x00010000
+#define TXSTAT_FPE_INEXACT_S 16
+#define TXSTAT_BUSERR_BIT 0x00800000 /* Set if bus error/ack state */
+#define TXSTAT_BUSERR_S 23
+#define TXSTAT_BUSSTATE_BITS 0xFF000000 /* Read only */
+#define TXSTAT_BUSSTATE_S 24
+#define TXSTAT_KICKCNT_BITS 0xFFFF0000
+#define TXSTAT_KICKCNT_S 16
+#define TXMASK_REGNUM 1
+#define TXSTATI_REGNUM 2
+#define TXSTATI_BGNDHALT_BIT 0x00000004
+#define TXMASKI_REGNUM 3
+#define TXPOLL_REGNUM 4
+#define TXPOLLI_REGNUM 6
+
+/*
+ * TXDRCTRL register can be used to partition the DSP RAM space available to
+ * this thread at startup. This is achieved by offsetting the region allocated
+ * to each thread.
+ */
+#define TXDRCTRL_D1PARTOR_BITS 0x00000F00 /* OR's into top 4 bits */
+#define TXDRCTRL_D1PARTOR_S 8
+#define TXDRCTRL_D0PARTOR_BITS 0x0000000F /* OR's into top 4 bits */
+#define TXDRCTRL_D0PARTOR_S 0
+/* Given extracted Pow and Or fields this is threads base within DSP RAM */
+#define TXDRCTRL_DXBASE(Pow, Or) ((Or)<<((Pow)-4))
+
+/*****************************************************************************
+ * RUN TIME TRACE CONTROL REGISTERS
+ ****************************************************************************/
+/*
+ * The following values are only relevant to code that implements run-time
+ * trace features within the META Core
+ */
+#define TTEXEC TT.0
+#define TTCTRL TT.1
+#define TTMARK TT.2
+#define TTREC TT.3
+#define GTEXEC TT.4
+
+#define TTEXEC_REGNUM 0
+#define TTEXEC_EXTTRIGAND_BITS 0x7F000000
+#define TTEXEC_EXTTRIGAND_S 24
+#define TTEXEC_EXTTRIGEN_BIT 0x00008000
+#define TTEXEC_EXTTRIGMATCH_BITS 0x00007F00
+#define TTEXEC_EXTTRIGMATCH_S 8
+#define TTEXEC_TCMODE_BITS 0x00000003
+#define TTEXEC_TCMODE_S 0
+
+#define TTCTRL_REGNUM 1
+#define TTCTRL_TRACETT_BITS 0x00008000
+#define TTCTRL_TRACETT_S 15
+#define TTCTRL_TRACEALL_BITS 0x00002000
+#define TTCTRL_TRACEALL_S 13
+#ifdef METAC_2_1
+#define TTCTRL_TRACEALLTAG_BITS 0x00000400
+#define TTCTRL_TRACEALLTAG_S 10
+#endif /* METAC_2_1 */
+#define TTCTRL_TRACETAG_BITS 0x00000200
+#define TTCTRL_TRACETAG_S 9
+#define TTCTRL_TRACETTPC_BITS 0x00000080
+#define TTCTRL_TRACETTPC_S 7
+#define TTCTRL_TRACEMPC_BITS 0x00000020
+#define TTCTRL_TRACEMPC_S 5
+#define TTCTRL_TRACEEN_BITS 0x00000008
+#define TTCTRL_TRACEEN_S 3
+#define TTCTRL_TRACEEN1_BITS 0x00000004
+#define TTCTRL_TRACEEN1_S 2
+#define TTCTRL_TRACEPC_BITS 0x00000002
+#define TTCTRL_TRACEPC_S 1
+
+#ifdef METAC_2_1
+#define TTMARK_REGNUM 2
+#define TTMARK_BITS 0xFFFFFFFF
+#define TTMARK_S 0x0
+
+#define TTREC_REGNUM 3
+#define TTREC_BITS 0xFFFFFFFFFFFFFFFF
+#define TTREC_S 0x0
+#endif /* METAC_2_1 */
+
+#define GTEXEC_REGNUM 4
+#define GTEXEC_DCRUN_BITS 0x80000000
+#define GTEXEC_DCRUN_S 31
+#define GTEXEC_ICMODE_BITS 0x0C000000
+#define GTEXEC_ICMODE_S 26
+#define GTEXEC_TCMODE_BITS 0x03000000
+#define GTEXEC_TCMODE_S 24
+#define GTEXEC_PERF1CMODE_BITS 0x00040000
+#define GTEXEC_PERF1CMODE_S 18
+#define GTEXEC_PERF0CMODE_BITS 0x00010000
+#define GTEXEC_PERF0CMODE_S 16
+#define GTEXEC_REFMSEL_BITS 0x0000F000
+#define GTEXEC_REFMSEL_S 12
+#define GTEXEC_METRICTH_BITS 0x000003FF
+#define GTEXEC_METRICTH_S 0
+
+#ifdef METAC_2_1
+/*
+ * Clock Control registers
+ * -----------------------
+ */
+#define TXCLKCTRL_REGNUM 22
+
+/*
+ * Default setting is with clocks always on (DEFON), turning all clocks off
+ * can only be done from external devices (OFF), enabling automatic clock
+ * gating will allow clocks to stop as units fall idle.
+ */
+#define TXCLKCTRL_ALL_OFF 0x02222222
+#define TXCLKCTRL_ALL_DEFON 0x01111111
+#define TXCLKCTRL_ALL_AUTO 0x02222222
+
+/*
+ * Individual fields control caches, floating point and main data/addr units
+ */
+#define TXCLKCTRL_CLOCKIC_BITS 0x03000000
+#define TXCLKCTRL_CLOCKIC_S 24
+#define TXCLKCTRL_CLOCKDC_BITS 0x00300000
+#define TXCLKCTRL_CLOCKDC_S 20
+#define TXCLKCTRL_CLOCKFP_BITS 0x00030000
+#define TXCLKCTRL_CLOCKFP_S 16
+#define TXCLKCTRL_CLOCKD1_BITS 0x00003000
+#define TXCLKCTRL_CLOCKD1_S 12
+#define TXCLKCTRL_CLOCKD0_BITS 0x00000300
+#define TXCLKCTRL_CLOCKD0_S 8
+#define TXCLKCTRL_CLOCKA1_BITS 0x00000030
+#define TXCLKCTRL_CLOCKA1_S 4
+#define TXCLKCTRL_CLOCKA0_BITS 0x00000003
+#define TXCLKCTRL_CLOCKA0_S 0
+
+/*
+ * Individual settings for each field are common
+ */
+#define TXCLKCTRL_CLOCKxx_OFF 0
+#define TXCLKCTRL_CLOCKxx_DEFON 1
+#define TXCLKCTRL_CLOCKxx_AUTO 2
+
+#endif /* METAC_2_1 */
+
+#ifdef METAC_2_1
+/*
+ * Fast interrupt new bits
+ * ------------------------------------
+ */
+#define TXSTATUS_IPTOGGLE_BIT 0x80000000 /* Prev PToggle of TXPRIVEXT */
+#define TXSTATUS_ISTATE_BIT 0x40000000 /* IState bit */
+#define TXSTATUS_IWAIT_BIT 0x20000000 /* wait indefinitely in decision step*/
+#define TXSTATUS_IEXCEPT_BIT 0x10000000 /* Indicate an exception occured */
+#define TXSTATUS_IRPCOUNT_BITS 0x0E000000 /* Number of 'dirty' date entries*/
+#define TXSTATUS_IRPCOUNT_S 25
+#define TXSTATUS_IRQSTAT_BITS 0x0000F000 /* IRQEnc bits, trigger or interrupts */
+#define TXSTATUS_IRQSTAT_S 12
+#define TXSTATUS_LNKSETOK_BIT 0x00000020 /* LNKSetOK bit, successful LNKSET */
+
+/* New fields in TXDE for fast interrupt system */
+#define TXDIVTIME_IACTIVE_BIT 0x00008000 /* Enable new interrupt system */
+#define TXDIVTIME_INONEST_BIT 0x00004000 /* Gate nested interrupt */
+#define TXDIVTIME_IREGIDXGATE_BIT 0x00002000 /* gate of the IRegIdex field */
+#define TXDIVTIME_IREGIDX_BITS 0x00001E00 /* Index of A0.0/1 replaces */
+#define TXDIVTIME_IREGIDX_S 9
+#define TXDIVTIME_NOST_BIT 0x00000100 /* disable superthreading bit */
+#endif
+
+#endif /* _ASM_METAG_REGS_H_ */
diff --git a/arch/metag/include/asm/mman.h b/arch/metag/include/asm/mman.h
new file mode 100644
index 00000000000..17999dba927
--- /dev/null
+++ b/arch/metag/include/asm/mman.h
@@ -0,0 +1,11 @@
+#ifndef __METAG_MMAN_H__
+#define __METAG_MMAN_H__
+
+#include <uapi/asm/mman.h>
+
+#ifndef __ASSEMBLY__
+#define arch_mmap_check metag_mmap_check
+int metag_mmap_check(unsigned long addr, unsigned long len,
+ unsigned long flags);
+#endif
+#endif /* __METAG_MMAN_H__ */
diff --git a/arch/metag/include/asm/mmu.h b/arch/metag/include/asm/mmu.h
new file mode 100644
index 00000000000..9c321147c0b
--- /dev/null
+++ b/arch/metag/include/asm/mmu.h
@@ -0,0 +1,77 @@
+#ifndef __MMU_H
+#define __MMU_H
+
+#ifdef CONFIG_METAG_USER_TCM
+#include <linux/list.h>
+#endif
+
+#ifdef CONFIG_HUGETLB_PAGE
+#include <asm/page.h>
+#endif
+
+typedef struct {
+ /* Software pgd base pointer used for Meta 1.x MMU. */
+ unsigned long pgd_base;
+#ifdef CONFIG_METAG_USER_TCM
+ struct list_head tcm;
+#endif
+#ifdef CONFIG_HUGETLB_PAGE
+#if HPAGE_SHIFT < HUGEPT_SHIFT
+ /* last partially filled huge page table address */
+ unsigned long part_huge;
+#endif
+#endif
+} mm_context_t;
+
+/* Given a virtual address, return the pte for the top level 4meg entry
+ * that maps that address.
+ * Returns 0 (an empty pte) if that range is not mapped.
+ */
+unsigned long mmu_read_first_level_page(unsigned long vaddr);
+
+/* Given a linear (virtual) address, return the second level 4k pte
+ * that maps that address. Returns 0 if the address is not mapped.
+ */
+unsigned long mmu_read_second_level_page(unsigned long vaddr);
+
+/* Get the virtual base address of the MMU */
+unsigned long mmu_get_base(void);
+
+/* Initialize the MMU. */
+void mmu_init(unsigned long mem_end);
+
+#ifdef CONFIG_METAG_META21_MMU
+/*
+ * For cpu "cpu" calculate and return the address of the
+ * MMCU_TnLOCAL_TABLE_PHYS0 if running in local-space or
+ * MMCU_TnGLOBAL_TABLE_PHYS0 if running in global-space.
+ */
+static inline unsigned long mmu_phys0_addr(unsigned int cpu)
+{
+ unsigned long phys0;
+
+ phys0 = (MMCU_T0LOCAL_TABLE_PHYS0 +
+ (MMCU_TnX_TABLE_PHYSX_STRIDE * cpu)) +
+ (MMCU_TXG_TABLE_PHYSX_OFFSET * is_global_space(PAGE_OFFSET));
+
+ return phys0;
+}
+
+/*
+ * For cpu "cpu" calculate and return the address of the
+ * MMCU_TnLOCAL_TABLE_PHYS1 if running in local-space or
+ * MMCU_TnGLOBAL_TABLE_PHYS1 if running in global-space.
+ */
+static inline unsigned long mmu_phys1_addr(unsigned int cpu)
+{
+ unsigned long phys1;
+
+ phys1 = (MMCU_T0LOCAL_TABLE_PHYS1 +
+ (MMCU_TnX_TABLE_PHYSX_STRIDE * cpu)) +
+ (MMCU_TXG_TABLE_PHYSX_OFFSET * is_global_space(PAGE_OFFSET));
+
+ return phys1;
+}
+#endif /* CONFIG_METAG_META21_MMU */
+
+#endif
diff --git a/arch/metag/include/asm/mmu_context.h b/arch/metag/include/asm/mmu_context.h
new file mode 100644
index 00000000000..ae2a71b5e0b
--- /dev/null
+++ b/arch/metag/include/asm/mmu_context.h
@@ -0,0 +1,113 @@
+#ifndef __METAG_MMU_CONTEXT_H
+#define __METAG_MMU_CONTEXT_H
+
+#include <asm-generic/mm_hooks.h>
+
+#include <asm/page.h>
+#include <asm/mmu.h>
+#include <asm/tlbflush.h>
+#include <asm/cacheflush.h>
+
+#include <linux/io.h>
+
+static inline void enter_lazy_tlb(struct mm_struct *mm,
+ struct task_struct *tsk)
+{
+}
+
+static inline int init_new_context(struct task_struct *tsk,
+ struct mm_struct *mm)
+{
+#ifndef CONFIG_METAG_META21_MMU
+ /* We use context to store a pointer to the page holding the
+ * pgd of a process while it is running. While a process is not
+ * running the pgd and context fields should be equal.
+ */
+ mm->context.pgd_base = (unsigned long) mm->pgd;
+#endif
+#ifdef CONFIG_METAG_USER_TCM
+ INIT_LIST_HEAD(&mm->context.tcm);
+#endif
+ return 0;
+}
+
+#ifdef CONFIG_METAG_USER_TCM
+
+#include <linux/slab.h>
+#include <asm/tcm.h>
+
+static inline void destroy_context(struct mm_struct *mm)
+{
+ struct tcm_allocation *pos, *n;
+
+ list_for_each_entry_safe(pos, n, &mm->context.tcm, list) {
+ tcm_free(pos->tag, pos->addr, pos->size);
+ list_del(&pos->list);
+ kfree(pos);
+ }
+}
+#else
+#define destroy_context(mm) do { } while (0)
+#endif
+
+#ifdef CONFIG_METAG_META21_MMU
+static inline void load_pgd(pgd_t *pgd, int thread)
+{
+ unsigned long phys0 = mmu_phys0_addr(thread);
+ unsigned long phys1 = mmu_phys1_addr(thread);
+
+ /*
+ * 0x900 2Gb address space
+ * The permission bits apply to MMU table region which gives a 2MB
+ * window into physical memory. We especially don't want userland to be
+ * able to access this.
+ */
+ metag_out32(0x900 | _PAGE_CACHEABLE | _PAGE_PRIV | _PAGE_WRITE |
+ _PAGE_PRESENT, phys0);
+ /* Set new MMU base address */
+ metag_out32(__pa(pgd) & MMCU_TBLPHYS1_ADDR_BITS, phys1);
+}
+#endif
+
+static inline void switch_mmu(struct mm_struct *prev, struct mm_struct *next)
+{
+#ifdef CONFIG_METAG_META21_MMU
+ load_pgd(next->pgd, hard_processor_id());
+#else
+ unsigned int i;
+
+ /* prev->context == prev->pgd in the case where we are initially
+ switching from the init task to the first process. */
+ if (prev->context.pgd_base != (unsigned long) prev->pgd) {
+ for (i = FIRST_USER_PGD_NR; i < USER_PTRS_PER_PGD; i++)
+ ((pgd_t *) prev->context.pgd_base)[i] = prev->pgd[i];
+ } else
+ prev->pgd = (pgd_t *)mmu_get_base();
+
+ next->pgd = prev->pgd;
+ prev->pgd = (pgd_t *) prev->context.pgd_base;
+
+ for (i = FIRST_USER_PGD_NR; i < USER_PTRS_PER_PGD; i++)
+ next->pgd[i] = ((pgd_t *) next->context.pgd_base)[i];
+
+ flush_cache_all();
+#endif
+ flush_tlb_all();
+}
+
+static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
+ struct task_struct *tsk)
+{
+ if (prev != next)
+ switch_mmu(prev, next);
+}
+
+static inline void activate_mm(struct mm_struct *prev_mm,
+ struct mm_struct *next_mm)
+{
+ switch_mmu(prev_mm, next_mm);
+}
+
+#define deactivate_mm(tsk, mm) do { } while (0)
+
+#endif
diff --git a/arch/metag/include/asm/mmzone.h b/arch/metag/include/asm/mmzone.h
new file mode 100644
index 00000000000..9c88a9c65f5
--- /dev/null
+++ b/arch/metag/include/asm/mmzone.h
@@ -0,0 +1,42 @@
+#ifndef __ASM_METAG_MMZONE_H
+#define __ASM_METAG_MMZONE_H
+
+#ifdef CONFIG_NEED_MULTIPLE_NODES
+#include <linux/numa.h>
+
+extern struct pglist_data *node_data[];
+#define NODE_DATA(nid) (node_data[nid])
+
+static inline int pfn_to_nid(unsigned long pfn)
+{
+ int nid;
+
+ for (nid = 0; nid < MAX_NUMNODES; nid++)
+ if (pfn >= node_start_pfn(nid) && pfn <= node_end_pfn(nid))
+ break;
+
+ return nid;
+}
+
+static inline struct pglist_data *pfn_to_pgdat(unsigned long pfn)
+{
+ return NODE_DATA(pfn_to_nid(pfn));
+}
+
+/* arch/metag/mm/numa.c */
+void __init setup_bootmem_node(int nid, unsigned long start, unsigned long end);
+#else
+static inline void
+setup_bootmem_node(int nid, unsigned long start, unsigned long end)
+{
+}
+#endif /* CONFIG_NEED_MULTIPLE_NODES */
+
+#ifdef CONFIG_NUMA
+/* SoC specific mem init */
+void __init soc_mem_setup(void);
+#else
+static inline void __init soc_mem_setup(void) {};
+#endif
+
+#endif /* __ASM_METAG_MMZONE_H */
diff --git a/arch/metag/include/asm/module.h b/arch/metag/include/asm/module.h
new file mode 100644
index 00000000000..e47e60941b2
--- /dev/null
+++ b/arch/metag/include/asm/module.h
@@ -0,0 +1,37 @@
+#ifndef _ASM_METAG_MODULE_H
+#define _ASM_METAG_MODULE_H
+
+#include <asm-generic/module.h>
+
+struct metag_plt_entry {
+ /* Indirect jump instruction sequence. */
+ unsigned long tramp[2];
+};
+
+struct mod_arch_specific {
+ /* Indices of PLT sections within module. */
+ unsigned int core_plt_section, init_plt_section;
+};
+
+#if defined CONFIG_METAG_META12
+#define MODULE_PROC_FAMILY "META 1.2 "
+#elif defined CONFIG_METAG_META21
+#define MODULE_PROC_FAMILY "META 2.1 "
+#else
+#define MODULE_PROC_FAMILY ""
+#endif
+
+#ifdef CONFIG_4KSTACKS
+#define MODULE_STACKSIZE "4KSTACKS "
+#else
+#define MODULE_STACKSIZE ""
+#endif
+
+#define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_STACKSIZE
+
+#ifdef MODULE
+asm(".section .plt,\"ax\",@progbits; .balign 8; .previous");
+asm(".section .init.plt,\"ax\",@progbits; .balign 8; .previous");
+#endif
+
+#endif /* _ASM_METAG_MODULE_H */
diff --git a/arch/metag/include/asm/page.h b/arch/metag/include/asm/page.h
new file mode 100644
index 00000000000..1e8e281b8bb
--- /dev/null
+++ b/arch/metag/include/asm/page.h
@@ -0,0 +1,128 @@
+#ifndef _METAG_PAGE_H
+#define _METAG_PAGE_H
+
+#include <linux/const.h>
+
+#include <asm/metag_mem.h>
+
+/* PAGE_SHIFT determines the page size */
+#if defined(CONFIG_PAGE_SIZE_4K)
+#define PAGE_SHIFT 12
+#elif defined(CONFIG_PAGE_SIZE_8K)
+#define PAGE_SHIFT 13
+#elif defined(CONFIG_PAGE_SIZE_16K)
+#define PAGE_SHIFT 14
+#endif
+
+#define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
+#define PAGE_MASK (~(PAGE_SIZE-1))
+
+#if defined(CONFIG_HUGETLB_PAGE_SIZE_8K)
+# define HPAGE_SHIFT 13
+#elif defined(CONFIG_HUGETLB_PAGE_SIZE_16K)
+# define HPAGE_SHIFT 14
+#elif defined(CONFIG_HUGETLB_PAGE_SIZE_32K)
+# define HPAGE_SHIFT 15
+#elif defined(CONFIG_HUGETLB_PAGE_SIZE_64K)
+# define HPAGE_SHIFT 16
+#elif defined(CONFIG_HUGETLB_PAGE_SIZE_128K)
+# define HPAGE_SHIFT 17
+#elif defined(CONFIG_HUGETLB_PAGE_SIZE_256K)
+# define HPAGE_SHIFT 18
+#elif defined(CONFIG_HUGETLB_PAGE_SIZE_512K)
+# define HPAGE_SHIFT 19
+#elif defined(CONFIG_HUGETLB_PAGE_SIZE_1M)
+# define HPAGE_SHIFT 20
+#elif defined(CONFIG_HUGETLB_PAGE_SIZE_2M)
+# define HPAGE_SHIFT 21
+#elif defined(CONFIG_HUGETLB_PAGE_SIZE_4M)
+# define HPAGE_SHIFT 22
+#endif
+
+#ifdef CONFIG_HUGETLB_PAGE
+# define HPAGE_SIZE (1UL << HPAGE_SHIFT)
+# define HPAGE_MASK (~(HPAGE_SIZE-1))
+# define HUGETLB_PAGE_ORDER (HPAGE_SHIFT-PAGE_SHIFT)
+/*
+ * We define our own hugetlb_get_unmapped_area so we don't corrupt 2nd level
+ * page tables with normal pages in them.
+ */
+# define HUGEPT_SHIFT (22)
+# define HUGEPT_ALIGN (1 << HUGEPT_SHIFT)
+# define HUGEPT_MASK (HUGEPT_ALIGN - 1)
+# define ALIGN_HUGEPT(x) ALIGN(x, HUGEPT_ALIGN)
+# define HAVE_ARCH_HUGETLB_UNMAPPED_AREA
+#endif
+
+#ifndef __ASSEMBLY__
+
+/* On the Meta, we would like to know if the address (heap) we have is
+ * in local or global space.
+ */
+#define is_global_space(addr) ((addr) > 0x7fffffff)
+#define is_local_space(addr) (!is_global_space(addr))
+
+extern void clear_page(void *to);
+extern void copy_page(void *to, void *from);
+
+#define clear_user_page(page, vaddr, pg) clear_page(page)
+#define copy_user_page(to, from, vaddr, pg) copy_page(to, from)
+
+/*
+ * These are used to make use of C type-checking..
+ */
+typedef struct { unsigned long pte; } pte_t;
+typedef struct { unsigned long pgd; } pgd_t;
+typedef struct { unsigned long pgprot; } pgprot_t;
+typedef struct page *pgtable_t;
+
+#define pte_val(x) ((x).pte)
+#define pgd_val(x) ((x).pgd)
+#define pgprot_val(x) ((x).pgprot)
+
+#define __pte(x) ((pte_t) { (x) })
+#define __pgd(x) ((pgd_t) { (x) })
+#define __pgprot(x) ((pgprot_t) { (x) })
+
+/* The kernel must now ALWAYS live at either 0xC0000000 or 0x40000000 - that
+ * being either global or local space.
+ */
+#define PAGE_OFFSET (CONFIG_PAGE_OFFSET)
+
+#if PAGE_OFFSET >= LINGLOBAL_BASE
+#define META_MEMORY_BASE LINGLOBAL_BASE
+#define META_MEMORY_LIMIT LINGLOBAL_LIMIT
+#else
+#define META_MEMORY_BASE LINLOCAL_BASE
+#define META_MEMORY_LIMIT LINLOCAL_LIMIT
+#endif
+
+/* Offset between physical and virtual mapping of kernel memory. */
+extern unsigned int meta_memoffset;
+
+#define __pa(x) ((unsigned long)(((unsigned long)(x)) - meta_memoffset))
+#define __va(x) ((void *)((unsigned long)(((unsigned long)(x)) + meta_memoffset)))
+
+extern unsigned long pfn_base;
+#define ARCH_PFN_OFFSET (pfn_base)
+#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
+#define page_to_virt(page) __va(page_to_pfn(page) << PAGE_SHIFT)
+#define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
+#define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT)
+#ifdef CONFIG_FLATMEM
+extern unsigned long max_pfn;
+extern unsigned long min_low_pfn;
+#define pfn_valid(pfn) ((pfn) >= min_low_pfn && (pfn) < max_pfn)
+#endif
+
+#define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT)
+
+#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \
+ VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
+
+#include <asm-generic/memory_model.h>
+#include <asm-generic/getorder.h>
+
+#endif /* __ASSMEBLY__ */
+
+#endif /* _METAG_PAGE_H */
diff --git a/arch/metag/include/asm/perf_event.h b/arch/metag/include/asm/perf_event.h
new file mode 100644
index 00000000000..105bbff0149
--- /dev/null
+++ b/arch/metag/include/asm/perf_event.h
@@ -0,0 +1,4 @@
+#ifndef __ASM_METAG_PERF_EVENT_H
+#define __ASM_METAG_PERF_EVENT_H
+
+#endif /* __ASM_METAG_PERF_EVENT_H */
diff --git a/arch/metag/include/asm/pgalloc.h b/arch/metag/include/asm/pgalloc.h
new file mode 100644
index 00000000000..275d9285141
--- /dev/null
+++ b/arch/metag/include/asm/pgalloc.h
@@ -0,0 +1,79 @@
+#ifndef _METAG_PGALLOC_H
+#define _METAG_PGALLOC_H
+
+#include <linux/threads.h>
+#include <linux/mm.h>
+
+#define pmd_populate_kernel(mm, pmd, pte) \
+ set_pmd(pmd, __pmd(_PAGE_TABLE | __pa(pte)))
+
+#define pmd_populate(mm, pmd, pte) \
+ set_pmd(pmd, __pmd(_PAGE_TABLE | page_to_phys(pte)))
+
+#define pmd_pgtable(pmd) pmd_page(pmd)
+
+/*
+ * Allocate and free page tables.
+ */
+#ifdef CONFIG_METAG_META21_MMU
+static inline void pgd_ctor(pgd_t *pgd)
+{
+ memcpy(pgd + USER_PTRS_PER_PGD,
+ swapper_pg_dir + USER_PTRS_PER_PGD,
+ (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
+}
+#else
+#define pgd_ctor(x) do { } while (0)
+#endif
+
+static inline pgd_t *pgd_alloc(struct mm_struct *mm)
+{
+ pgd_t *pgd = (pgd_t *)get_zeroed_page(GFP_KERNEL);
+ if (pgd)
+ pgd_ctor(pgd);
+ return pgd;
+}
+
+static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
+{
+ free_page((unsigned long)pgd);
+}
+
+static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
+ unsigned long address)
+{
+ pte_t *pte = (pte_t *)__get_free_page(GFP_KERNEL | __GFP_REPEAT |
+ __GFP_ZERO);
+ return pte;
+}
+
+static inline pgtable_t pte_alloc_one(struct mm_struct *mm,
+ unsigned long address)
+{
+ struct page *pte;
+ pte = alloc_pages(GFP_KERNEL | __GFP_REPEAT | __GFP_ZERO, 0);
+ if (pte)
+ pgtable_page_ctor(pte);
+ return pte;
+}
+
+static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
+{
+ free_page((unsigned long)pte);
+}
+
+static inline void pte_free(struct mm_struct *mm, pgtable_t pte)
+{
+ pgtable_page_dtor(pte);
+ __free_page(pte);
+}
+
+#define __pte_free_tlb(tlb, pte, addr) \
+ do { \
+ pgtable_page_dtor(pte); \
+ tlb_remove_page((tlb), (pte)); \
+ } while (0)
+
+#define check_pgt_cache() do { } while (0)
+
+#endif
diff --git a/arch/metag/include/asm/pgtable.h b/arch/metag/include/asm/pgtable.h
new file mode 100644
index 00000000000..1cd13d59519
--- /dev/null
+++ b/arch/metag/include/asm/pgtable.h
@@ -0,0 +1,370 @@
+/*
+ * Macros and functions to manipulate Meta page tables.
+ */
+
+#ifndef _METAG_PGTABLE_H
+#define _METAG_PGTABLE_H
+
+#include <asm-generic/pgtable-nopmd.h>
+
+/* Invalid regions on Meta: 0x00000000-0x001FFFFF and 0xFFFF0000-0xFFFFFFFF */
+#if PAGE_OFFSET >= LINGLOBAL_BASE
+#define CONSISTENT_START 0xF7000000
+#define CONSISTENT_END 0xF73FFFFF
+#define VMALLOC_START 0xF8000000
+#define VMALLOC_END 0xFFFEFFFF
+#else
+#define CONSISTENT_START 0x77000000
+#define CONSISTENT_END 0x773FFFFF
+#define VMALLOC_START 0x78000000
+#define VMALLOC_END 0x7FFFFFFF
+#endif
+
+/*
+ * Definitions for MMU descriptors
+ *
+ * These are the hardware bits in the MMCU pte entries.
+ * Derived from the Meta toolkit headers.
+ */
+#define _PAGE_PRESENT MMCU_ENTRY_VAL_BIT
+#define _PAGE_WRITE MMCU_ENTRY_WR_BIT
+#define _PAGE_PRIV MMCU_ENTRY_PRIV_BIT
+/* Write combine bit - this can cause writes to occur out of order */
+#define _PAGE_WR_COMBINE MMCU_ENTRY_WRC_BIT
+/* Sys coherent bit - this bit is never used by Linux */
+#define _PAGE_SYS_COHERENT MMCU_ENTRY_SYS_BIT
+#define _PAGE_ALWAYS_ZERO_1 0x020
+#define _PAGE_CACHE_CTRL0 0x040
+#define _PAGE_CACHE_CTRL1 0x080
+#define _PAGE_ALWAYS_ZERO_2 0x100
+#define _PAGE_ALWAYS_ZERO_3 0x200
+#define _PAGE_ALWAYS_ZERO_4 0x400
+#define _PAGE_ALWAYS_ZERO_5 0x800
+
+/* These are software bits that we stuff into the gaps in the hardware
+ * pte entries that are not used. Note, these DO get stored in the actual
+ * hardware, but the hardware just does not use them.
+ */
+#define _PAGE_ACCESSED _PAGE_ALWAYS_ZERO_1
+#define _PAGE_DIRTY _PAGE_ALWAYS_ZERO_2
+#define _PAGE_FILE _PAGE_ALWAYS_ZERO_3
+
+/* Pages owned, and protected by, the kernel. */
+#define _PAGE_KERNEL _PAGE_PRIV
+
+/* No cacheing of this page */
+#define _PAGE_CACHE_WIN0 (MMCU_CWIN_UNCACHED << MMCU_ENTRY_CWIN_S)
+/* burst cacheing - good for data streaming */
+#define _PAGE_CACHE_WIN1 (MMCU_CWIN_BURST << MMCU_ENTRY_CWIN_S)
+/* One cache way per thread */
+#define _PAGE_CACHE_WIN2 (MMCU_CWIN_C1SET << MMCU_ENTRY_CWIN_S)
+/* Full on cacheing */
+#define _PAGE_CACHE_WIN3 (MMCU_CWIN_CACHED << MMCU_ENTRY_CWIN_S)
+
+#define _PAGE_CACHEABLE (_PAGE_CACHE_WIN3 | _PAGE_WR_COMBINE)
+
+/* which bits are used for cache control ... */
+#define _PAGE_CACHE_MASK (_PAGE_CACHE_CTRL0 | _PAGE_CACHE_CTRL1 | \
+ _PAGE_WR_COMBINE)
+
+/* This is a mask of the bits that pte_modify is allowed to change. */
+#define _PAGE_CHG_MASK (PAGE_MASK)
+
+#define _PAGE_SZ_SHIFT 1
+#define _PAGE_SZ_4K (0x0)
+#define _PAGE_SZ_8K (0x1 << _PAGE_SZ_SHIFT)
+#define _PAGE_SZ_16K (0x2 << _PAGE_SZ_SHIFT)
+#define _PAGE_SZ_32K (0x3 << _PAGE_SZ_SHIFT)
+#define _PAGE_SZ_64K (0x4 << _PAGE_SZ_SHIFT)
+#define _PAGE_SZ_128K (0x5 << _PAGE_SZ_SHIFT)
+#define _PAGE_SZ_256K (0x6 << _PAGE_SZ_SHIFT)
+#define _PAGE_SZ_512K (0x7 << _PAGE_SZ_SHIFT)
+#define _PAGE_SZ_1M (0x8 << _PAGE_SZ_SHIFT)
+#define _PAGE_SZ_2M (0x9 << _PAGE_SZ_SHIFT)
+#define _PAGE_SZ_4M (0xa << _PAGE_SZ_SHIFT)
+#define _PAGE_SZ_MASK (0xf << _PAGE_SZ_SHIFT)
+
+#if defined(CONFIG_PAGE_SIZE_4K)
+#define _PAGE_SZ (_PAGE_SZ_4K)
+#elif defined(CONFIG_PAGE_SIZE_8K)
+#define _PAGE_SZ (_PAGE_SZ_8K)
+#elif defined(CONFIG_PAGE_SIZE_16K)
+#define _PAGE_SZ (_PAGE_SZ_16K)
+#endif
+#define _PAGE_TABLE (_PAGE_SZ | _PAGE_PRESENT)
+
+#if defined(CONFIG_HUGETLB_PAGE_SIZE_8K)
+# define _PAGE_SZHUGE (_PAGE_SZ_8K)
+#elif defined(CONFIG_HUGETLB_PAGE_SIZE_16K)
+# define _PAGE_SZHUGE (_PAGE_SZ_16K)
+#elif defined(CONFIG_HUGETLB_PAGE_SIZE_32K)
+# define _PAGE_SZHUGE (_PAGE_SZ_32K)
+#elif defined(CONFIG_HUGETLB_PAGE_SIZE_64K)
+# define _PAGE_SZHUGE (_PAGE_SZ_64K)
+#elif defined(CONFIG_HUGETLB_PAGE_SIZE_128K)
+# define _PAGE_SZHUGE (_PAGE_SZ_128K)
+#elif defined(CONFIG_HUGETLB_PAGE_SIZE_256K)
+# define _PAGE_SZHUGE (_PAGE_SZ_256K)
+#elif defined(CONFIG_HUGETLB_PAGE_SIZE_512K)
+# define _PAGE_SZHUGE (_PAGE_SZ_512K)
+#elif defined(CONFIG_HUGETLB_PAGE_SIZE_1M)
+# define _PAGE_SZHUGE (_PAGE_SZ_1M)
+#elif defined(CONFIG_HUGETLB_PAGE_SIZE_2M)
+# define _PAGE_SZHUGE (_PAGE_SZ_2M)
+#elif defined(CONFIG_HUGETLB_PAGE_SIZE_4M)
+# define _PAGE_SZHUGE (_PAGE_SZ_4M)
+#endif
+
+/*
+ * The Linux memory management assumes a three-level page table setup. On
+ * Meta, we use that, but "fold" the mid level into the top-level page
+ * table.
+ */
+
+/* PGDIR_SHIFT determines the size of the area a second-level page table can
+ * map. This is always 4MB.
+ */
+
+#define PGDIR_SHIFT 22
+#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
+#define PGDIR_MASK (~(PGDIR_SIZE-1))
+
+/*
+ * Entries per page directory level: we use a two-level, so
+ * we don't really have any PMD directory physically. First level tables
+ * always map 2Gb (local or global) at a granularity of 4MB, second-level
+ * tables map 4MB with a granularity between 4MB and 4kB (between 1 and
+ * 1024 entries).
+ */
+#define PTRS_PER_PTE (PGDIR_SIZE/PAGE_SIZE)
+#define HPTRS_PER_PTE (PGDIR_SIZE/HPAGE_SIZE)
+#define PTRS_PER_PGD 512
+
+#define USER_PTRS_PER_PGD 256
+#define FIRST_USER_ADDRESS META_MEMORY_BASE
+#define FIRST_USER_PGD_NR pgd_index(FIRST_USER_ADDRESS)
+
+#define PAGE_NONE __pgprot(_PAGE_PRESENT | _PAGE_ACCESSED | \
+ _PAGE_CACHEABLE)
+
+#define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_WRITE | \
+ _PAGE_ACCESSED | _PAGE_CACHEABLE)
+#define PAGE_SHARED_C PAGE_SHARED
+#define PAGE_COPY __pgprot(_PAGE_PRESENT | _PAGE_ACCESSED | \
+ _PAGE_CACHEABLE)
+#define PAGE_COPY_C PAGE_COPY
+
+#define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_ACCESSED | \
+ _PAGE_CACHEABLE)
+#define PAGE_KERNEL __pgprot(_PAGE_PRESENT | _PAGE_DIRTY | \
+ _PAGE_ACCESSED | _PAGE_WRITE | \
+ _PAGE_CACHEABLE | _PAGE_KERNEL)
+
+#define __P000 PAGE_NONE
+#define __P001 PAGE_READONLY
+#define __P010 PAGE_COPY
+#define __P011 PAGE_COPY
+#define __P100 PAGE_READONLY
+#define __P101 PAGE_READONLY
+#define __P110 PAGE_COPY_C
+#define __P111 PAGE_COPY_C
+
+#define __S000 PAGE_NONE
+#define __S001 PAGE_READONLY
+#define __S010 PAGE_SHARED
+#define __S011 PAGE_SHARED
+#define __S100 PAGE_READONLY
+#define __S101 PAGE_READONLY
+#define __S110 PAGE_SHARED_C
+#define __S111 PAGE_SHARED_C
+
+#ifndef __ASSEMBLY__
+
+#include <asm/page.h>
+
+/* zero page used for uninitialized stuff */
+extern unsigned long empty_zero_page;
+#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
+
+/* Certain architectures need to do special things when pte's
+ * within a page table are directly modified. Thus, the following
+ * hook is made available.
+ */
+#define set_pte(pteptr, pteval) ((*(pteptr)) = (pteval))
+#define set_pte_at(mm, addr, ptep, pteval) set_pte(ptep, pteval)
+
+#define set_pmd(pmdptr, pmdval) (*(pmdptr) = pmdval)
+
+#define pte_pfn(pte) (pte_val(pte) >> PAGE_SHIFT)
+
+#define pfn_pte(pfn, prot) __pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot))
+
+#define pte_none(x) (!pte_val(x))
+#define pte_present(x) (pte_val(x) & _PAGE_PRESENT)
+#define pte_clear(mm, addr, xp) do { pte_val(*(xp)) = 0; } while (0)
+
+#define pmd_none(x) (!pmd_val(x))
+#define pmd_bad(x) ((pmd_val(x) & ~(PAGE_MASK | _PAGE_SZ_MASK)) \
+ != (_PAGE_TABLE & ~_PAGE_SZ_MASK))
+#define pmd_present(x) (pmd_val(x) & _PAGE_PRESENT)
+#define pmd_clear(xp) do { pmd_val(*(xp)) = 0; } while (0)
+
+#define pte_page(x) pfn_to_page(pte_pfn(x))
+
+/*
+ * The following only work if pte_present() is true.
+ * Undefined behaviour if not..
+ */
+
+static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_WRITE; }
+static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; }
+static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; }
+static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE; }
+static inline int pte_special(pte_t pte) { return 0; }
+
+static inline pte_t pte_wrprotect(pte_t pte) { pte_val(pte) &= (~_PAGE_WRITE); return pte; }
+static inline pte_t pte_mkclean(pte_t pte) { pte_val(pte) &= ~_PAGE_DIRTY; return pte; }
+static inline pte_t pte_mkold(pte_t pte) { pte_val(pte) &= ~_PAGE_ACCESSED; return pte; }
+static inline pte_t pte_mkwrite(pte_t pte) { pte_val(pte) |= _PAGE_WRITE; return pte; }
+static inline pte_t pte_mkdirty(pte_t pte) { pte_val(pte) |= _PAGE_DIRTY; return pte; }
+static inline pte_t pte_mkyoung(pte_t pte) { pte_val(pte) |= _PAGE_ACCESSED; return pte; }
+static inline pte_t pte_mkspecial(pte_t pte) { return pte; }
+static inline pte_t pte_mkhuge(pte_t pte) { return pte; }
+
+/*
+ * Macro and implementation to make a page protection as uncacheable.
+ */
+#define pgprot_writecombine(prot) \
+ __pgprot(pgprot_val(prot) & ~(_PAGE_CACHE_CTRL1 | _PAGE_CACHE_CTRL0))
+
+#define pgprot_noncached(prot) \
+ __pgprot(pgprot_val(prot) & ~_PAGE_CACHEABLE)
+
+
+/*
+ * Conversion functions: convert a page and protection to a page entry,
+ * and a page entry and page directory to the page they refer to.
+ */
+
+#define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
+
+static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
+{
+ pte_val(pte) = (pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot);
+ return pte;
+}
+
+static inline unsigned long pmd_page_vaddr(pmd_t pmd)
+{
+ unsigned long paddr = pmd_val(pmd) & PAGE_MASK;
+ if (!paddr)
+ return 0;
+ return (unsigned long)__va(paddr);
+}
+
+#define pmd_page(pmd) (pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT))
+#define pmd_page_shift(pmd) (12 + ((pmd_val(pmd) & _PAGE_SZ_MASK) \
+ >> _PAGE_SZ_SHIFT))
+#define pmd_num_ptrs(pmd) (PGDIR_SIZE >> pmd_page_shift(pmd))
+
+/*
+ * Each pgd is only 2k, mapping 2Gb (local or global). If we're in global
+ * space drop the top bit before indexing the pgd.
+ */
+#if PAGE_OFFSET >= LINGLOBAL_BASE
+#define pgd_index(address) ((((address) & ~0x80000000) >> PGDIR_SHIFT) \
+ & (PTRS_PER_PGD-1))
+#else
+#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
+#endif
+
+#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
+
+#define pgd_offset_k(address) pgd_offset(&init_mm, address)
+
+#define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
+
+/* Find an entry in the second-level page table.. */
+#if !defined(CONFIG_HUGETLB_PAGE)
+ /* all pages are of size (1 << PAGE_SHIFT), so no need to read 1st level pt */
+# define pte_index(pmd, address) \
+ (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
+#else
+ /* some pages are huge, so read 1st level pt to find out */
+# define pte_index(pmd, address) \
+ (((address) >> pmd_page_shift(pmd)) & (pmd_num_ptrs(pmd) - 1))
+#endif
+#define pte_offset_kernel(dir, address) \
+ ((pte_t *) pmd_page_vaddr(*(dir)) + pte_index(*(dir), address))
+#define pte_offset_map(dir, address) pte_offset_kernel(dir, address)
+#define pte_offset_map_nested(dir, address) pte_offset_kernel(dir, address)
+
+#define pte_unmap(pte) do { } while (0)
+#define pte_unmap_nested(pte) do { } while (0)
+
+#define pte_ERROR(e) \
+ pr_err("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e))
+#define pgd_ERROR(e) \
+ pr_err("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))
+
+/*
+ * Meta doesn't have any external MMU info: the kernel page
+ * tables contain all the necessary information.
+ */
+static inline void update_mmu_cache(struct vm_area_struct *vma,
+ unsigned long address, pte_t *pte)
+{
+}
+
+/*
+ * Encode and decode a swap entry (must be !pte_none(e) && !pte_present(e))
+ * Since PAGE_PRESENT is bit 1, we can use the bits above that.
+ */
+#define __swp_type(x) (((x).val >> 1) & 0xff)
+#define __swp_offset(x) ((x).val >> 10)
+#define __swp_entry(type, offset) ((swp_entry_t) { ((type) << 1) | \
+ ((offset) << 10) })
+#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
+#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
+
+#define PTE_FILE_MAX_BITS 22
+#define pte_to_pgoff(x) (pte_val(x) >> 10)
+#define pgoff_to_pte(x) __pte(((x) << 10) | _PAGE_FILE)
+
+#define kern_addr_valid(addr) (1)
+
+#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
+ remap_pfn_range(vma, vaddr, pfn, size, prot)
+
+/*
+ * No page table caches to initialise
+ */
+#define pgtable_cache_init() do { } while (0)
+
+extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
+void paging_init(unsigned long mem_end);
+
+#ifdef CONFIG_METAG_META12
+/* This is a workaround for an issue in Meta 1 cores. These cores cache
+ * invalid entries in the TLB so we always need to flush whenever we add
+ * a new pte. Unfortunately we can only flush the whole TLB not shoot down
+ * single entries so this is sub-optimal. This implementation ensures that
+ * we will get a flush at the second attempt, so we may still get repeated
+ * faults, we just don't overflow the kernel stack handling them.
+ */
+#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
+#define ptep_set_access_flags(__vma, __address, __ptep, __entry, __dirty) \
+({ \
+ int __changed = !pte_same(*(__ptep), __entry); \
+ if (__changed) { \
+ set_pte_at((__vma)->vm_mm, (__address), __ptep, __entry); \
+ } \
+ flush_tlb_page(__vma, __address); \
+ __changed; \
+})
+#endif
+
+#include <asm-generic/pgtable.h>
+
+#endif /* __ASSEMBLY__ */
+#endif /* _METAG_PGTABLE_H */
diff --git a/arch/metag/include/asm/processor.h b/arch/metag/include/asm/processor.h
new file mode 100644
index 00000000000..9b029a7911c
--- /dev/null
+++ b/arch/metag/include/asm/processor.h
@@ -0,0 +1,202 @@
+/*
+ * Copyright (C) 2005,2006,2007,2008 Imagination Technologies
+ */
+
+#ifndef __ASM_METAG_PROCESSOR_H
+#define __ASM_METAG_PROCESSOR_H
+
+#include <linux/atomic.h>
+
+#include <asm/page.h>
+#include <asm/ptrace.h>
+#include <asm/metag_regs.h>
+
+/*
+ * Default implementation of macro that returns current
+ * instruction pointer ("program counter").
+ */
+#define current_text_addr() ({ __label__ _l; _l: &&_l; })
+
+/* The task stops where the kernel starts */
+#define TASK_SIZE PAGE_OFFSET
+/* Add an extra page of padding at the top of the stack for the guard page. */
+#define STACK_TOP (TASK_SIZE - PAGE_SIZE)
+#define STACK_TOP_MAX STACK_TOP
+
+/* This decides where the kernel will search for a free chunk of vm
+ * space during mmap's.
+ */
+#define TASK_UNMAPPED_BASE META_MEMORY_BASE
+
+typedef struct {
+ unsigned long seg;
+} mm_segment_t;
+
+#ifdef CONFIG_METAG_FPU
+struct meta_fpu_context {
+ TBICTXEXTFPU fpstate;
+ union {
+ struct {
+ TBICTXEXTBB4 fx8_15;
+ TBICTXEXTFPACC fpacc;
+ } fx8_15;
+ struct {
+ TBICTXEXTFPACC fpacc;
+ TBICTXEXTBB4 unused;
+ } nofx8_15;
+ } extfpstate;
+ bool needs_restore;
+};
+#else
+struct meta_fpu_context {};
+#endif
+
+#ifdef CONFIG_METAG_DSP
+struct meta_ext_context {
+ struct {
+ TBIEXTCTX ctx;
+ TBICTXEXTBB8 bb8;
+ TBIDUAL ax[TBICTXEXTAXX_BYTES / sizeof(TBIDUAL)];
+ TBICTXEXTHL2 hl2;
+ TBICTXEXTTDPR ext;
+ TBICTXEXTRP6 rp;
+ } regs;
+
+ /* DSPRAM A and B save areas. */
+ void *ram[2];
+
+ /* ECH encoded size of DSPRAM save areas. */
+ unsigned int ram_sz[2];
+};
+#else
+struct meta_ext_context {};
+#endif
+
+struct thread_struct {
+ PTBICTX kernel_context;
+ /* A copy of the user process Sig.SaveMask. */
+ unsigned int user_flags;
+ struct meta_fpu_context *fpu_context;
+ void __user *tls_ptr;
+ unsigned short int_depth;
+ unsigned short txdefr_failure;
+ struct meta_ext_context *dsp_context;
+};
+
+#define INIT_THREAD { \
+ NULL, /* kernel_context */ \
+ 0, /* user_flags */ \
+ NULL, /* fpu_context */ \
+ NULL, /* tls_ptr */ \
+ 1, /* int_depth - we start in kernel */ \
+ 0, /* txdefr_failure */ \
+ NULL, /* dsp_context */ \
+}
+
+/* Needed to make #define as we are referencing 'current', that is not visible
+ * yet.
+ *
+ * Stack layout is as below.
+
+ argc argument counter (integer)
+ argv[0] program name (pointer)
+ argv[1...N] program args (pointers)
+ argv[argc-1] end of args (integer)
+ NULL
+ env[0...N] environment variables (pointers)
+ NULL
+
+ */
+#define start_thread(regs, pc, usp) do { \
+ unsigned int *argc = (unsigned int *) bprm->exec; \
+ set_fs(USER_DS); \
+ current->thread.int_depth = 1; \
+ /* Force this process down to user land */ \
+ regs->ctx.SaveMask = TBICTX_PRIV_BIT; \
+ regs->ctx.CurrPC = pc; \
+ regs->ctx.AX[0].U0 = usp; \
+ regs->ctx.DX[3].U1 = *((int *)argc); /* argc */ \
+ regs->ctx.DX[3].U0 = (int)((int *)argc + 1); /* argv */ \
+ regs->ctx.DX[2].U1 = (int)((int *)argc + \
+ regs->ctx.DX[3].U1 + 2); /* envp */ \
+ regs->ctx.DX[2].U0 = 0; /* rtld_fini */ \
+} while (0)
+
+/* Forward declaration, a strange C thing */
+struct task_struct;
+
+/* Free all resources held by a thread. */
+static inline void release_thread(struct task_struct *dead_task)
+{
+}
+
+#define copy_segments(tsk, mm) do { } while (0)
+#define release_segments(mm) do { } while (0)
+
+extern void exit_thread(void);
+
+/*
+ * Return saved PC of a blocked thread.
+ */
+#define thread_saved_pc(tsk) \
+ ((unsigned long)(tsk)->thread.kernel_context->CurrPC)
+#define thread_saved_sp(tsk) \
+ ((unsigned long)(tsk)->thread.kernel_context->AX[0].U0)
+#define thread_saved_fp(tsk) \
+ ((unsigned long)(tsk)->thread.kernel_context->AX[1].U0)
+
+unsigned long get_wchan(struct task_struct *p);
+
+#define KSTK_EIP(tsk) ((tsk)->thread.kernel_context->CurrPC)
+#define KSTK_ESP(tsk) ((tsk)->thread.kernel_context->AX[0].U0)
+
+#define user_stack_pointer(regs) ((regs)->ctx.AX[0].U0)
+
+#define cpu_relax() barrier()
+
+extern void setup_priv(void);
+
+static inline unsigned int hard_processor_id(void)
+{
+ unsigned int id;
+
+ asm volatile ("MOV %0, TXENABLE\n"
+ "AND %0, %0, %1\n"
+ "LSR %0, %0, %2\n"
+ : "=&d" (id)
+ : "I" (TXENABLE_THREAD_BITS),
+ "K" (TXENABLE_THREAD_S)
+ );
+
+ return id;
+}
+
+#define OP3_EXIT 0
+
+#define HALT_OK 0
+#define HALT_PANIC -1
+
+/*
+ * Halt (stop) the hardware thread. This instruction sequence is the
+ * standard way to cause a Meta hardware thread to exit. The exit code
+ * is pushed onto the stack which is interpreted by the debug adapter.
+ */
+static inline void hard_processor_halt(int exit_code)
+{
+ asm volatile ("MOV D1Ar1, %0\n"
+ "MOV D0Ar6, %1\n"
+ "MSETL [A0StP],D0Ar6,D0Ar4,D0Ar2\n"
+ "1:\n"
+ "SWITCH #0xC30006\n"
+ "B 1b\n"
+ : : "r" (exit_code), "K" (OP3_EXIT));
+}
+
+/* Set these hooks to call SoC specific code to restart/halt/power off. */
+extern void (*soc_restart)(char *cmd);
+extern void (*soc_halt)(void);
+
+extern void show_trace(struct task_struct *tsk, unsigned long *sp,
+ struct pt_regs *regs);
+
+#endif
diff --git a/arch/metag/include/asm/prom.h b/arch/metag/include/asm/prom.h
new file mode 100644
index 00000000000..d2aa35d2228
--- /dev/null
+++ b/arch/metag/include/asm/prom.h
@@ -0,0 +1,23 @@
+/*
+ * arch/metag/include/asm/prom.h
+ *
+ * Copyright (C) 2012 Imagination Technologies Ltd.
+ *
+ * Based on ARM version:
+ * Copyright (C) 2009 Canonical Ltd. <jeremy.kerr@canonical.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+#ifndef __ASM_METAG_PROM_H
+#define __ASM_METAG_PROM_H
+
+#include <asm/setup.h>
+#define HAVE_ARCH_DEVTREE_FIXUPS
+
+extern struct machine_desc *setup_machine_fdt(void *dt);
+extern void copy_fdt(void);
+
+#endif /* __ASM_METAG_PROM_H */
diff --git a/arch/metag/include/asm/ptrace.h b/arch/metag/include/asm/ptrace.h
new file mode 100644
index 00000000000..fcabc18daf2
--- /dev/null
+++ b/arch/metag/include/asm/ptrace.h
@@ -0,0 +1,60 @@
+#ifndef _METAG_PTRACE_H
+#define _METAG_PTRACE_H
+
+#include <linux/compiler.h>
+#include <uapi/asm/ptrace.h>
+#include <asm/tbx.h>
+
+#ifndef __ASSEMBLY__
+
+/* this struct defines the way the registers are stored on the
+ stack during a system call. */
+
+struct pt_regs {
+ TBICTX ctx;
+ TBICTXEXTCB0 extcb0[5];
+};
+
+#define user_mode(regs) (((regs)->ctx.SaveMask & TBICTX_PRIV_BIT) > 0)
+
+#define instruction_pointer(regs) ((unsigned long)(regs)->ctx.CurrPC)
+#define profile_pc(regs) instruction_pointer(regs)
+
+#define task_pt_regs(task) \
+ ((struct pt_regs *)(task_stack_page(task) + \
+ sizeof(struct thread_info)))
+
+#define current_pt_regs() \
+ ((struct pt_regs *)((char *)current_thread_info() + \
+ sizeof(struct thread_info)))
+
+int syscall_trace_enter(struct pt_regs *regs);
+void syscall_trace_leave(struct pt_regs *regs);
+
+/* copy a struct user_gp_regs out to user */
+int metag_gp_regs_copyout(const struct pt_regs *regs,
+ unsigned int pos, unsigned int count,
+ void *kbuf, void __user *ubuf);
+/* copy a struct user_gp_regs in from user */
+int metag_gp_regs_copyin(struct pt_regs *regs,
+ unsigned int pos, unsigned int count,
+ const void *kbuf, const void __user *ubuf);
+/* copy a struct user_cb_regs out to user */
+int metag_cb_regs_copyout(const struct pt_regs *regs,
+ unsigned int pos, unsigned int count,
+ void *kbuf, void __user *ubuf);
+/* copy a struct user_cb_regs in from user */
+int metag_cb_regs_copyin(struct pt_regs *regs,
+ unsigned int pos, unsigned int count,
+ const void *kbuf, const void __user *ubuf);
+/* copy a struct user_rp_state out to user */
+int metag_rp_state_copyout(const struct pt_regs *regs,
+ unsigned int pos, unsigned int count,
+ void *kbuf, void __user *ubuf);
+/* copy a struct user_rp_state in from user */
+int metag_rp_state_copyin(struct pt_regs *regs,
+ unsigned int pos, unsigned int count,
+ const void *kbuf, const void __user *ubuf);
+
+#endif /* __ASSEMBLY__ */
+#endif /* _METAG_PTRACE_H */
diff --git a/arch/metag/include/asm/setup.h b/arch/metag/include/asm/setup.h
new file mode 100644
index 00000000000..e13083b15dd
--- /dev/null
+++ b/arch/metag/include/asm/setup.h
@@ -0,0 +1,8 @@
+#ifndef _ASM_METAG_SETUP_H
+#define _ASM_METAG_SETUP_H
+
+#include <uapi/asm/setup.h>
+
+void per_cpu_trap_init(unsigned long);
+extern void __init dump_machine_table(void);
+#endif /* _ASM_METAG_SETUP_H */
diff --git a/arch/metag/include/asm/smp.h b/arch/metag/include/asm/smp.h
new file mode 100644
index 00000000000..e0373f81a11
--- /dev/null
+++ b/arch/metag/include/asm/smp.h
@@ -0,0 +1,29 @@
+#ifndef __ASM_SMP_H
+#define __ASM_SMP_H
+
+#include <linux/cpumask.h>
+
+#define raw_smp_processor_id() (current_thread_info()->cpu)
+
+enum ipi_msg_type {
+ IPI_CALL_FUNC,
+ IPI_CALL_FUNC_SINGLE,
+ IPI_RESCHEDULE,
+};
+
+extern void arch_send_call_function_single_ipi(int cpu);
+extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
+#define arch_send_call_function_ipi_mask arch_send_call_function_ipi_mask
+
+asmlinkage void secondary_start_kernel(void);
+
+extern void secondary_startup(void);
+
+#ifdef CONFIG_HOTPLUG_CPU
+extern void __cpu_die(unsigned int cpu);
+extern int __cpu_disable(void);
+extern void cpu_die(void);
+#endif
+
+extern void smp_init_cpus(void);
+#endif /* __ASM_SMP_H */
diff --git a/arch/metag/include/asm/sparsemem.h b/arch/metag/include/asm/sparsemem.h
new file mode 100644
index 00000000000..03fe255d697
--- /dev/null
+++ b/arch/metag/include/asm/sparsemem.h
@@ -0,0 +1,13 @@
+#ifndef __ASM_METAG_SPARSEMEM_H
+#define __ASM_METAG_SPARSEMEM_H
+
+/*
+ * SECTION_SIZE_BITS 2^N: how big each section will be
+ * MAX_PHYSADDR_BITS 2^N: how much physical address space we have
+ * MAX_PHYSMEM_BITS 2^N: how much memory we can have in that space
+ */
+#define SECTION_SIZE_BITS 26
+#define MAX_PHYSADDR_BITS 32
+#define MAX_PHYSMEM_BITS 32
+
+#endif /* __ASM_METAG_SPARSEMEM_H */
diff --git a/arch/metag/include/asm/spinlock.h b/arch/metag/include/asm/spinlock.h
new file mode 100644
index 00000000000..86a7cf3d138
--- /dev/null
+++ b/arch/metag/include/asm/spinlock.h
@@ -0,0 +1,22 @@
+#ifndef __ASM_SPINLOCK_H
+#define __ASM_SPINLOCK_H
+
+#ifdef CONFIG_METAG_ATOMICITY_LOCK1
+#include <asm/spinlock_lock1.h>
+#else
+#include <asm/spinlock_lnkget.h>
+#endif
+
+#define arch_spin_unlock_wait(lock) \
+ do { while (arch_spin_is_locked(lock)) cpu_relax(); } while (0)
+
+#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
+
+#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
+#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
+
+#define arch_spin_relax(lock) cpu_relax()
+#define arch_read_relax(lock) cpu_relax()
+#define arch_write_relax(lock) cpu_relax()
+
+#endif /* __ASM_SPINLOCK_H */
diff --git a/arch/metag/include/asm/spinlock_lnkget.h b/arch/metag/include/asm/spinlock_lnkget.h
new file mode 100644
index 00000000000..ad8436feed8
--- /dev/null
+++ b/arch/metag/include/asm/spinlock_lnkget.h
@@ -0,0 +1,249 @@
+#ifndef __ASM_SPINLOCK_LNKGET_H
+#define __ASM_SPINLOCK_LNKGET_H
+
+/*
+ * None of these asm statements clobber memory as LNKSET writes around
+ * the cache so the memory it modifies cannot safely be read by any means
+ * other than these accessors.
+ */
+
+static inline int arch_spin_is_locked(arch_spinlock_t *lock)
+{
+ int ret;
+
+ asm volatile ("LNKGETD %0, [%1]\n"
+ "TST %0, #1\n"
+ "MOV %0, #1\n"
+ "XORZ %0, %0, %0\n"
+ : "=&d" (ret)
+ : "da" (&lock->lock)
+ : "cc");
+ return ret;
+}
+
+static inline void arch_spin_lock(arch_spinlock_t *lock)
+{
+ int tmp;
+
+ asm volatile ("1: LNKGETD %0,[%1]\n"
+ " TST %0, #1\n"
+ " ADD %0, %0, #1\n"
+ " LNKSETDZ [%1], %0\n"
+ " BNZ 1b\n"
+ " DEFR %0, TXSTAT\n"
+ " ANDT %0, %0, #HI(0x3f000000)\n"
+ " CMPT %0, #HI(0x02000000)\n"
+ " BNZ 1b\n"
+ : "=&d" (tmp)
+ : "da" (&lock->lock)
+ : "cc");
+
+ smp_mb();
+}
+
+/* Returns 0 if failed to acquire lock */
+static inline int arch_spin_trylock(arch_spinlock_t *lock)
+{
+ int tmp;
+
+ asm volatile (" LNKGETD %0,[%1]\n"
+ " TST %0, #1\n"
+ " ADD %0, %0, #1\n"
+ " LNKSETDZ [%1], %0\n"
+ " BNZ 1f\n"
+ " DEFR %0, TXSTAT\n"
+ " ANDT %0, %0, #HI(0x3f000000)\n"
+ " CMPT %0, #HI(0x02000000)\n"
+ " MOV %0, #1\n"
+ "1: XORNZ %0, %0, %0\n"
+ : "=&d" (tmp)
+ : "da" (&lock->lock)
+ : "cc");
+
+ smp_mb();
+
+ return tmp;
+}
+
+static inline void arch_spin_unlock(arch_spinlock_t *lock)
+{
+ smp_mb();
+
+ asm volatile (" SETD [%0], %1\n"
+ :
+ : "da" (&lock->lock), "da" (0)
+ : "memory");
+}
+
+/*
+ * RWLOCKS
+ *
+ *
+ * Write locks are easy - we just set bit 31. When unlocking, we can
+ * just write zero since the lock is exclusively held.
+ */
+
+static inline void arch_write_lock(arch_rwlock_t *rw)
+{
+ int tmp;
+
+ asm volatile ("1: LNKGETD %0,[%1]\n"
+ " CMP %0, #0\n"
+ " ADD %0, %0, %2\n"
+ " LNKSETDZ [%1], %0\n"
+ " BNZ 1b\n"
+ " DEFR %0, TXSTAT\n"
+ " ANDT %0, %0, #HI(0x3f000000)\n"
+ " CMPT %0, #HI(0x02000000)\n"
+ " BNZ 1b\n"
+ : "=&d" (tmp)
+ : "da" (&rw->lock), "bd" (0x80000000)
+ : "cc");
+
+ smp_mb();
+}
+
+static inline int arch_write_trylock(arch_rwlock_t *rw)
+{
+ int tmp;
+
+ asm volatile (" LNKGETD %0,[%1]\n"
+ " CMP %0, #0\n"
+ " ADD %0, %0, %2\n"
+ " LNKSETDZ [%1], %0\n"
+ " BNZ 1f\n"
+ " DEFR %0, TXSTAT\n"
+ " ANDT %0, %0, #HI(0x3f000000)\n"
+ " CMPT %0, #HI(0x02000000)\n"
+ " MOV %0,#1\n"
+ "1: XORNZ %0, %0, %0\n"
+ : "=&d" (tmp)
+ : "da" (&rw->lock), "bd" (0x80000000)
+ : "cc");
+
+ smp_mb();
+
+ return tmp;
+}
+
+static inline void arch_write_unlock(arch_rwlock_t *rw)
+{
+ smp_mb();
+
+ asm volatile (" SETD [%0], %1\n"
+ :
+ : "da" (&rw->lock), "da" (0)
+ : "memory");
+}
+
+/* write_can_lock - would write_trylock() succeed? */
+static inline int arch_write_can_lock(arch_rwlock_t *rw)
+{
+ int ret;
+
+ asm volatile ("LNKGETD %0, [%1]\n"
+ "CMP %0, #0\n"
+ "MOV %0, #1\n"
+ "XORNZ %0, %0, %0\n"
+ : "=&d" (ret)
+ : "da" (&rw->lock)
+ : "cc");
+ return ret;
+}
+
+/*
+ * Read locks are a bit more hairy:
+ * - Exclusively load the lock value.
+ * - Increment it.
+ * - Store new lock value if positive, and we still own this location.
+ * If the value is negative, we've already failed.
+ * - If we failed to store the value, we want a negative result.
+ * - If we failed, try again.
+ * Unlocking is similarly hairy. We may have multiple read locks
+ * currently active. However, we know we won't have any write
+ * locks.
+ */
+static inline void arch_read_lock(arch_rwlock_t *rw)
+{
+ int tmp;
+
+ asm volatile ("1: LNKGETD %0,[%1]\n"
+ " ADDS %0, %0, #1\n"
+ " LNKSETDPL [%1], %0\n"
+ " BMI 1b\n"
+ " DEFR %0, TXSTAT\n"
+ " ANDT %0, %0, #HI(0x3f000000)\n"
+ " CMPT %0, #HI(0x02000000)\n"
+ " BNZ 1b\n"
+ : "=&d" (tmp)
+ : "da" (&rw->lock)
+ : "cc");
+
+ smp_mb();
+}
+
+static inline void arch_read_unlock(arch_rwlock_t *rw)
+{
+ int tmp;
+
+ smp_mb();
+
+ asm volatile ("1: LNKGETD %0,[%1]\n"
+ " SUB %0, %0, #1\n"
+ " LNKSETD [%1], %0\n"
+ " DEFR %0, TXSTAT\n"
+ " ANDT %0, %0, #HI(0x3f000000)\n"
+ " CMPT %0, #HI(0x02000000)\n"
+ " BNZ 1b\n"
+ : "=&d" (tmp)
+ : "da" (&rw->lock)
+ : "cc", "memory");
+}
+
+static inline int arch_read_trylock(arch_rwlock_t *rw)
+{
+ int tmp;
+
+ asm volatile (" LNKGETD %0,[%1]\n"
+ " ADDS %0, %0, #1\n"
+ " LNKSETDPL [%1], %0\n"
+ " BMI 1f\n"
+ " DEFR %0, TXSTAT\n"
+ " ANDT %0, %0, #HI(0x3f000000)\n"
+ " CMPT %0, #HI(0x02000000)\n"
+ " MOV %0,#1\n"
+ " BZ 2f\n"
+ "1: MOV %0,#0\n"
+ "2:\n"
+ : "=&d" (tmp)
+ : "da" (&rw->lock)
+ : "cc");
+
+ smp_mb();
+
+ return tmp;
+}
+
+/* read_can_lock - would read_trylock() succeed? */
+static inline int arch_read_can_lock(arch_rwlock_t *rw)
+{
+ int tmp;
+
+ asm volatile ("LNKGETD %0, [%1]\n"
+ "CMP %0, %2\n"
+ "MOV %0, #1\n"
+ "XORZ %0, %0, %0\n"
+ : "=&d" (tmp)
+ : "da" (&rw->lock), "bd" (0x80000000)
+ : "cc");
+ return tmp;
+}
+
+#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
+#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
+
+#define arch_spin_relax(lock) cpu_relax()
+#define arch_read_relax(lock) cpu_relax()
+#define arch_write_relax(lock) cpu_relax()
+
+#endif /* __ASM_SPINLOCK_LNKGET_H */
diff --git a/arch/metag/include/asm/spinlock_lock1.h b/arch/metag/include/asm/spinlock_lock1.h
new file mode 100644
index 00000000000..c630444cffe
--- /dev/null
+++ b/arch/metag/include/asm/spinlock_lock1.h
@@ -0,0 +1,184 @@
+#ifndef __ASM_SPINLOCK_LOCK1_H
+#define __ASM_SPINLOCK_LOCK1_H
+
+#include <asm/bug.h>
+#include <asm/global_lock.h>
+
+static inline int arch_spin_is_locked(arch_spinlock_t *lock)
+{
+ int ret;
+
+ barrier();
+ ret = lock->lock;
+ WARN_ON(ret != 0 && ret != 1);
+ return ret;
+}
+
+static inline void arch_spin_lock(arch_spinlock_t *lock)
+{
+ unsigned int we_won = 0;
+ unsigned long flags;
+
+again:
+ __global_lock1(flags);
+ if (lock->lock == 0) {
+ fence();
+ lock->lock = 1;
+ we_won = 1;
+ }
+ __global_unlock1(flags);
+ if (we_won == 0)
+ goto again;
+ WARN_ON(lock->lock != 1);
+}
+
+/* Returns 0 if failed to acquire lock */
+static inline int arch_spin_trylock(arch_spinlock_t *lock)
+{
+ unsigned long flags;
+ unsigned int ret;
+
+ __global_lock1(flags);
+ ret = lock->lock;
+ if (ret == 0) {
+ fence();
+ lock->lock = 1;
+ }
+ __global_unlock1(flags);
+ return (ret == 0);
+}
+
+static inline void arch_spin_unlock(arch_spinlock_t *lock)
+{
+ barrier();
+ WARN_ON(!lock->lock);
+ lock->lock = 0;
+}
+
+/*
+ * RWLOCKS
+ *
+ *
+ * Write locks are easy - we just set bit 31. When unlocking, we can
+ * just write zero since the lock is exclusively held.
+ */
+
+static inline void arch_write_lock(arch_rwlock_t *rw)
+{
+ unsigned long flags;
+ unsigned int we_won = 0;
+
+again:
+ __global_lock1(flags);
+ if (rw->lock == 0) {
+ fence();
+ rw->lock = 0x80000000;
+ we_won = 1;
+ }
+ __global_unlock1(flags);
+ if (we_won == 0)
+ goto again;
+ WARN_ON(rw->lock != 0x80000000);
+}
+
+static inline int arch_write_trylock(arch_rwlock_t *rw)
+{
+ unsigned long flags;
+ unsigned int ret;
+
+ __global_lock1(flags);
+ ret = rw->lock;
+ if (ret == 0) {
+ fence();
+ rw->lock = 0x80000000;
+ }
+ __global_unlock1(flags);
+
+ return (ret == 0);
+}
+
+static inline void arch_write_unlock(arch_rwlock_t *rw)
+{
+ barrier();
+ WARN_ON(rw->lock != 0x80000000);
+ rw->lock = 0;
+}
+
+/* write_can_lock - would write_trylock() succeed? */
+static inline int arch_write_can_lock(arch_rwlock_t *rw)
+{
+ unsigned int ret;
+
+ barrier();
+ ret = rw->lock;
+ return (ret == 0);
+}
+
+/*
+ * Read locks are a bit more hairy:
+ * - Exclusively load the lock value.
+ * - Increment it.
+ * - Store new lock value if positive, and we still own this location.
+ * If the value is negative, we've already failed.
+ * - If we failed to store the value, we want a negative result.
+ * - If we failed, try again.
+ * Unlocking is similarly hairy. We may have multiple read locks
+ * currently active. However, we know we won't have any write
+ * locks.
+ */
+static inline void arch_read_lock(arch_rwlock_t *rw)
+{
+ unsigned long flags;
+ unsigned int we_won = 0, ret;
+
+again:
+ __global_lock1(flags);
+ ret = rw->lock;
+ if (ret < 0x80000000) {
+ fence();
+ rw->lock = ret + 1;
+ we_won = 1;
+ }
+ __global_unlock1(flags);
+ if (!we_won)
+ goto again;
+}
+
+static inline void arch_read_unlock(arch_rwlock_t *rw)
+{
+ unsigned long flags;
+ unsigned int ret;
+
+ __global_lock1(flags);
+ fence();
+ ret = rw->lock--;
+ __global_unlock1(flags);
+ WARN_ON(ret == 0);
+}
+
+static inline int arch_read_trylock(arch_rwlock_t *rw)
+{
+ unsigned long flags;
+ unsigned int ret;
+
+ __global_lock1(flags);
+ ret = rw->lock;
+ if (ret < 0x80000000) {
+ fence();
+ rw->lock = ret + 1;
+ }
+ __global_unlock1(flags);
+ return (ret < 0x80000000);
+}
+
+/* read_can_lock - would read_trylock() succeed? */
+static inline int arch_read_can_lock(arch_rwlock_t *rw)
+{
+ unsigned int ret;
+
+ barrier();
+ ret = rw->lock;
+ return (ret < 0x80000000);
+}
+
+#endif /* __ASM_SPINLOCK_LOCK1_H */
diff --git a/arch/metag/include/asm/spinlock_types.h b/arch/metag/include/asm/spinlock_types.h
new file mode 100644
index 00000000000..b76391405fe
--- /dev/null
+++ b/arch/metag/include/asm/spinlock_types.h
@@ -0,0 +1,20 @@
+#ifndef _ASM_METAG_SPINLOCK_TYPES_H
+#define _ASM_METAG_SPINLOCK_TYPES_H
+
+#ifndef __LINUX_SPINLOCK_TYPES_H
+# error "please don't include this file directly"
+#endif
+
+typedef struct {
+ volatile unsigned int lock;
+} arch_spinlock_t;
+
+#define __ARCH_SPIN_LOCK_UNLOCKED { 0 }
+
+typedef struct {
+ volatile unsigned int lock;
+} arch_rwlock_t;
+
+#define __ARCH_RW_LOCK_UNLOCKED { 0 }
+
+#endif /* _ASM_METAG_SPINLOCK_TYPES_H */
diff --git a/arch/metag/include/asm/stacktrace.h b/arch/metag/include/asm/stacktrace.h
new file mode 100644
index 00000000000..2830a0fe7ac
--- /dev/null
+++ b/arch/metag/include/asm/stacktrace.h
@@ -0,0 +1,20 @@
+#ifndef __ASM_STACKTRACE_H
+#define __ASM_STACKTRACE_H
+
+struct stackframe {
+ unsigned long fp;
+ unsigned long sp;
+ unsigned long lr;
+ unsigned long pc;
+};
+
+struct metag_frame {
+ unsigned long fp;
+ unsigned long lr;
+};
+
+extern int unwind_frame(struct stackframe *frame);
+extern void walk_stackframe(struct stackframe *frame,
+ int (*fn)(struct stackframe *, void *), void *data);
+
+#endif /* __ASM_STACKTRACE_H */
diff --git a/arch/metag/include/asm/string.h b/arch/metag/include/asm/string.h
new file mode 100644
index 00000000000..53e3806eee0
--- /dev/null
+++ b/arch/metag/include/asm/string.h
@@ -0,0 +1,13 @@
+#ifndef _METAG_STRING_H_
+#define _METAG_STRING_H_
+
+#define __HAVE_ARCH_MEMSET
+extern void *memset(void *__s, int __c, size_t __count);
+
+#define __HAVE_ARCH_MEMCPY
+void *memcpy(void *__to, __const__ void *__from, size_t __n);
+
+#define __HAVE_ARCH_MEMMOVE
+extern void *memmove(void *__dest, __const__ void *__src, size_t __n);
+
+#endif /* _METAG_STRING_H_ */
diff --git a/arch/metag/include/asm/switch.h b/arch/metag/include/asm/switch.h
new file mode 100644
index 00000000000..1fd6a587c84
--- /dev/null
+++ b/arch/metag/include/asm/switch.h
@@ -0,0 +1,21 @@
+/*
+ * Copyright (C) 2012 Imagination Technologies Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef _ASM_METAG_SWITCH_H
+#define _ASM_METAG_SWITCH_H
+
+/* metag SWITCH codes */
+#define __METAG_SW_PERM_BREAK 0x400002 /* compiled in breakpoint */
+#define __METAG_SW_SYS_LEGACY 0x440000 /* legacy system calls */
+#define __METAG_SW_SYS 0x440001 /* system calls */
+
+/* metag SWITCH instruction encoding */
+#define __METAG_SW_ENCODING(TYPE) (0xaf000000 | (__METAG_SW_##TYPE))
+
+#endif /* _ASM_METAG_SWITCH_H */
diff --git a/arch/metag/include/asm/syscall.h b/arch/metag/include/asm/syscall.h
new file mode 100644
index 00000000000..24fc97939f7
--- /dev/null
+++ b/arch/metag/include/asm/syscall.h
@@ -0,0 +1,104 @@
+/*
+ * Access to user system call parameters and results
+ *
+ * Copyright (C) 2008 Imagination Technologies Ltd.
+ *
+ * This copyrighted material is made available to anyone wishing to use,
+ * modify, copy, or redistribute it subject to the terms and conditions
+ * of the GNU General Public License v.2.
+ *
+ * See asm-generic/syscall.h for descriptions of what we must do here.
+ */
+
+#ifndef _ASM_METAG_SYSCALL_H
+#define _ASM_METAG_SYSCALL_H
+
+#include <linux/sched.h>
+#include <linux/err.h>
+#include <linux/uaccess.h>
+
+#include <asm/switch.h>
+
+static inline long syscall_get_nr(struct task_struct *task,
+ struct pt_regs *regs)
+{
+ unsigned long insn;
+
+ /*
+ * FIXME there's no way to find out how we got here other than to
+ * examine the memory at the PC to see if it is a syscall
+ * SWITCH instruction.
+ */
+ if (get_user(insn, (unsigned long *)(regs->ctx.CurrPC - 4)))
+ return -1;
+
+ if (insn == __METAG_SW_ENCODING(SYS))
+ return regs->ctx.DX[0].U1;
+ else
+ return -1L;
+}
+
+static inline void syscall_rollback(struct task_struct *task,
+ struct pt_regs *regs)
+{
+ /* do nothing */
+}
+
+static inline long syscall_get_error(struct task_struct *task,
+ struct pt_regs *regs)
+{
+ unsigned long error = regs->ctx.DX[0].U0;
+ return IS_ERR_VALUE(error) ? error : 0;
+}
+
+static inline long syscall_get_return_value(struct task_struct *task,
+ struct pt_regs *regs)
+{
+ return regs->ctx.DX[0].U0;
+}
+
+static inline void syscall_set_return_value(struct task_struct *task,
+ struct pt_regs *regs,
+ int error, long val)
+{
+ regs->ctx.DX[0].U0 = (long) error ?: val;
+}
+
+static inline void syscall_get_arguments(struct task_struct *task,
+ struct pt_regs *regs,
+ unsigned int i, unsigned int n,
+ unsigned long *args)
+{
+ unsigned int reg, j;
+ BUG_ON(i + n > 6);
+
+ for (j = i, reg = 6 - i; j < (i + n); j++, reg--) {
+ if (reg % 2)
+ args[j] = regs->ctx.DX[(reg + 1) / 2].U0;
+ else
+ args[j] = regs->ctx.DX[reg / 2].U1;
+ }
+}
+
+static inline void syscall_set_arguments(struct task_struct *task,
+ struct pt_regs *regs,
+ unsigned int i, unsigned int n,
+ const unsigned long *args)
+{
+ unsigned int reg;
+ BUG_ON(i + n > 6);
+
+ for (reg = 6 - i; i < (i + n); i++, reg--) {
+ if (reg % 2)
+ regs->ctx.DX[(reg + 1) / 2].U0 = args[i];
+ else
+ regs->ctx.DX[reg / 2].U1 = args[i];
+ }
+}
+
+#define NR_syscalls __NR_syscalls
+
+/* generic syscall table */
+extern const void *sys_call_table[];
+
+#endif /* _ASM_METAG_SYSCALL_H */
diff --git a/arch/metag/include/asm/syscalls.h b/arch/metag/include/asm/syscalls.h
new file mode 100644
index 00000000000..a02b9555652
--- /dev/null
+++ b/arch/metag/include/asm/syscalls.h
@@ -0,0 +1,39 @@
+#ifndef _ASM_METAG_SYSCALLS_H
+#define _ASM_METAG_SYSCALLS_H
+
+#include <linux/compiler.h>
+#include <linux/linkage.h>
+#include <linux/types.h>
+#include <linux/signal.h>
+
+/* kernel/signal.c */
+#define sys_rt_sigreturn sys_rt_sigreturn
+asmlinkage long sys_rt_sigreturn(void);
+
+#include <asm-generic/syscalls.h>
+
+/* kernel/sys_metag.c */
+asmlinkage int sys_metag_setglobalbit(char __user *, int);
+asmlinkage void sys_metag_set_fpu_flags(unsigned int);
+asmlinkage int sys_metag_set_tls(void __user *);
+asmlinkage void *sys_metag_get_tls(void);
+
+asmlinkage long sys_truncate64_metag(const char __user *, unsigned long,
+ unsigned long);
+asmlinkage long sys_ftruncate64_metag(unsigned int, unsigned long,
+ unsigned long);
+asmlinkage long sys_fadvise64_64_metag(int, unsigned long, unsigned long,
+ unsigned long, unsigned long, int);
+asmlinkage long sys_readahead_metag(int, unsigned long, unsigned long, size_t);
+asmlinkage ssize_t sys_pread64_metag(unsigned long, char __user *, size_t,
+ unsigned long, unsigned long);
+asmlinkage ssize_t sys_pwrite64_metag(unsigned long, char __user *, size_t,
+ unsigned long, unsigned long);
+asmlinkage long sys_sync_file_range_metag(int, unsigned long, unsigned long,
+ unsigned long, unsigned long,
+ unsigned int);
+
+int do_work_pending(struct pt_regs *regs, unsigned int thread_flags,
+ int syscall);
+
+#endif /* _ASM_METAG_SYSCALLS_H */
diff --git a/arch/metag/include/asm/tbx.h b/arch/metag/include/asm/tbx.h
new file mode 100644
index 00000000000..287b36ff8ad
--- /dev/null
+++ b/arch/metag/include/asm/tbx.h
@@ -0,0 +1,1425 @@
+/*
+ * asm/tbx.h
+ *
+ * Copyright (C) 2000-2012 Imagination Technologies.
+ *
+ * This program is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU General Public License version 2 as published by the
+ * Free Software Foundation.
+ *
+ * Thread binary interface header
+ */
+
+#ifndef _ASM_METAG_TBX_H_
+#define _ASM_METAG_TBX_H_
+
+/* for CACHEW_* values */
+#include <asm/metag_isa.h>
+/* for LINSYSEVENT_* addresses */
+#include <asm/metag_mem.h>
+
+#ifdef TBI_1_4
+#ifndef TBI_MUTEXES_1_4
+#define TBI_MUTEXES_1_4
+#endif
+#ifndef TBI_SEMAPHORES_1_4
+#define TBI_SEMAPHORES_1_4
+#endif
+#ifndef TBI_ASYNC_SWITCH_1_4
+#define TBI_ASYNC_SWITCH_1_4
+#endif
+#ifndef TBI_FASTINT_1_4
+#define TBI_FASTINT_1_4
+#endif
+#endif
+
+
+/* Id values in the TBI system describe a segment using an arbitrary
+ integer value and flags in the bottom 8 bits, the SIGPOLL value is
+ used in cases where control over blocking or polling behaviour is
+ needed. */
+#define TBID_SIGPOLL_BIT 0x02 /* Set bit in an Id value to poll vs block */
+/* Extended segment identifiers use strings in the string table */
+#define TBID_IS_SEGSTR( Id ) (((Id) & (TBID_SEGTYPE_BITS>>1)) == 0)
+
+/* Segment identifiers contain the following related bit-fields */
+#define TBID_SEGTYPE_BITS 0x0F /* One of the predefined segment types */
+#define TBID_SEGTYPE_S 0
+#define TBID_SEGSCOPE_BITS 0x30 /* Indicates the scope of the segment */
+#define TBID_SEGSCOPE_S 4
+#define TBID_SEGGADDR_BITS 0xC0 /* Indicates access possible via pGAddr */
+#define TBID_SEGGADDR_S 6
+
+/* Segments of memory can only really contain a few types of data */
+#define TBID_SEGTYPE_TEXT 0x02 /* Code segment */
+#define TBID_SEGTYPE_DATA 0x04 /* Data segment */
+#define TBID_SEGTYPE_STACK 0x06 /* Stack segment */
+#define TBID_SEGTYPE_HEAP 0x0A /* Heap segment */
+#define TBID_SEGTYPE_ROOT 0x0C /* Root block segments */
+#define TBID_SEGTYPE_STRING 0x0E /* String table segment */
+
+/* Segments have one of three possible scopes */
+#define TBID_SEGSCOPE_INIT 0 /* Temporary area for initialisation phase */
+#define TBID_SEGSCOPE_LOCAL 1 /* Private to this thread */
+#define TBID_SEGSCOPE_GLOBAL 2 /* Shared globally throughout the system */
+#define TBID_SEGSCOPE_SHARED 3 /* Limited sharing between local/global */
+
+/* For segment specifier a further field in two of the remaining bits
+ indicates the usefulness of the pGAddr field in the segment descriptor
+ descriptor. */
+#define TBID_SEGGADDR_NULL 0 /* pGAddr is NULL -> SEGSCOPE_(LOCAL|INIT) */
+#define TBID_SEGGADDR_READ 1 /* Only read via pGAddr */
+#define TBID_SEGGADDR_WRITE 2 /* Full access via pGAddr */
+#define TBID_SEGGADDR_EXEC 3 /* Only execute via pGAddr */
+
+/* The following values are common to both segment and signal Id value and
+ live in the top 8 bits of the Id values. */
+
+/* The ISTAT bit indicates if segments are related to interrupt vs
+ background level interfaces a thread can still handle all triggers at
+ either level, but can also split these up if it wants to. */
+#define TBID_ISTAT_BIT 0x01000000
+#define TBID_ISTAT_S 24
+
+/* Privilege needed to access a segment is indicated by the next bit.
+
+ This bit is set to mirror the current privilege level when starting a
+ search for a segment - setting it yourself toggles the automatically
+ generated state which is only useful to emulate unprivileged behaviour
+ or access unprivileged areas of memory while at privileged level. */
+#define TBID_PSTAT_BIT 0x02000000
+#define TBID_PSTAT_S 25
+
+/* The top six bits of a signal/segment specifier identifies a thread within
+ the system. This represents a segments owner. */
+#define TBID_THREAD_BITS 0xFC000000
+#define TBID_THREAD_S 26
+
+/* Special thread id values */
+#define TBID_THREAD_NULL (-32) /* Never matches any thread/segment id used */
+#define TBID_THREAD_GLOBAL (-31) /* Things global to all threads */
+#define TBID_THREAD_HOST ( -1) /* Host interface */
+#define TBID_THREAD_EXTIO (TBID_THREAD_HOST) /* Host based ExtIO i/f */
+
+/* Virtual Id's are used for external thread interface structures or the
+ above special Id's */
+#define TBID_IS_VIRTTHREAD( Id ) ((Id) < 0)
+
+/* Real Id's are used for actual hardware threads that are local */
+#define TBID_IS_REALTHREAD( Id ) ((Id) >= 0)
+
+/* Generate a segment Id given Thread, Scope, and Type */
+#define TBID_SEG( Thread, Scope, Type ) (\
+ ((Thread)<<TBID_THREAD_S) + ((Scope)<<TBID_SEGSCOPE_S) + (Type))
+
+/* Generate a signal Id given Thread and SigNum */
+#define TBID_SIG( Thread, SigNum ) (\
+ ((Thread)<<TBID_THREAD_S) + ((SigNum)<<TBID_SIGNUM_S) + TBID_SIGNAL_BIT)
+
+/* Generate an Id that solely represents a thread - useful for cache ops */
+#define TBID_THD( Thread ) ((Thread)<<TBID_THREAD_S)
+#define TBID_THD_NULL ((TBID_THREAD_NULL) <<TBID_THREAD_S)
+#define TBID_THD_GLOBAL ((TBID_THREAD_GLOBAL)<<TBID_THREAD_S)
+
+/* Common exception handler (see TBID_SIGNUM_XXF below) receives hardware
+ generated fault codes TBIXXF_SIGNUM_xxF in it's SigNum parameter */
+#define TBIXXF_SIGNUM_IIF 0x01 /* General instruction fault */
+#define TBIXXF_SIGNUM_PGF 0x02 /* Privilege general fault */
+#define TBIXXF_SIGNUM_DHF 0x03 /* Data access watchpoint HIT */
+#define TBIXXF_SIGNUM_IGF 0x05 /* Code fetch general read failure */
+#define TBIXXF_SIGNUM_DGF 0x07 /* Data access general read/write fault */
+#define TBIXXF_SIGNUM_IPF 0x09 /* Code fetch page fault */
+#define TBIXXF_SIGNUM_DPF 0x0B /* Data access page fault */
+#define TBIXXF_SIGNUM_IHF 0x0D /* Instruction breakpoint HIT */
+#define TBIXXF_SIGNUM_DWF 0x0F /* Data access read-only fault */
+
+/* Hardware signals communicate events between processing levels within a
+ single thread all the _xxF cases are exceptions and are routed via a
+ common exception handler, _SWx are software trap events and kicks including
+ __TBISignal generated kicks, and finally _TRx are hardware triggers */
+#define TBID_SIGNUM_SW0 0x00 /* SWITCH GROUP 0 - Per thread user */
+#define TBID_SIGNUM_SW1 0x01 /* SWITCH GROUP 1 - Per thread system */
+#define TBID_SIGNUM_SW2 0x02 /* SWITCH GROUP 2 - Internal global request */
+#define TBID_SIGNUM_SW3 0x03 /* SWITCH GROUP 3 - External global request */
+#ifdef TBI_1_4
+#define TBID_SIGNUM_FPE 0x04 /* Deferred exception - Any IEEE 754 exception */
+#define TBID_SIGNUM_FPD 0x05 /* Deferred exception - Denormal exception */
+/* Reserved 0x6 for a reserved deferred exception */
+#define TBID_SIGNUM_BUS 0x07 /* Deferred exception - Bus Error */
+/* Reserved 0x08-0x09 */
+#else
+/* Reserved 0x04-0x09 */
+#endif
+#define TBID_SIGNUM_SWS 0x0A /* KICK received with SigMask != 0 */
+#define TBID_SIGNUM_SWK 0x0B /* KICK received with SigMask == 0 */
+/* Reserved 0x0C-0x0F */
+#define TBID_SIGNUM_TRT 0x10 /* Timer trigger */
+#define TBID_SIGNUM_LWK 0x11 /* Low level kick (handler provided by TBI) */
+#define TBID_SIGNUM_XXF 0x12 /* Fault handler - receives ALL _xxF sigs */
+#ifdef TBI_1_4
+#define TBID_SIGNUM_DFR 0x13 /* Deferred Exception handler */
+#else
+#define TBID_SIGNUM_FPE 0x13 /* FPE Exception handler */
+#endif
+/* External trigger one group 0x14 to 0x17 - per thread */
+#define TBID_SIGNUM_TR1(Thread) (0x14+(Thread))
+#define TBID_SIGNUM_T10 0x14
+#define TBID_SIGNUM_T11 0x15
+#define TBID_SIGNUM_T12 0x16
+#define TBID_SIGNUM_T13 0x17
+/* External trigger two group 0x18 to 0x1b - per thread */
+#define TBID_SIGNUM_TR2(Thread) (0x18+(Thread))
+#define TBID_SIGNUM_T20 0x18
+#define TBID_SIGNUM_T21 0x19
+#define TBID_SIGNUM_T22 0x1A
+#define TBID_SIGNUM_T23 0x1B
+#define TBID_SIGNUM_TR3 0x1C /* External trigger N-4 (global) */
+#define TBID_SIGNUM_TR4 0x1D /* External trigger N-3 (global) */
+#define TBID_SIGNUM_TR5 0x1E /* External trigger N-2 (global) */
+#define TBID_SIGNUM_TR6 0x1F /* External trigger N-1 (global) */
+#define TBID_SIGNUM_MAX 0x1F
+
+/* Return the trigger register(TXMASK[I]/TXSTAT[I]) bits related to
+ each hardware signal, sometimes this is a many-to-one relationship. */
+#define TBI_TRIG_BIT(SigNum) (\
+ ((SigNum) >= TBID_SIGNUM_TRT) ? 1<<((SigNum)-TBID_SIGNUM_TRT) :\
+ ( ((SigNum) == TBID_SIGNUM_SWS) || \
+ ((SigNum) == TBID_SIGNUM_SWK) ) ? \
+ TXSTAT_KICK_BIT : TXSTATI_BGNDHALT_BIT )
+
+/* Return the hardware trigger vector number for entries in the
+ HWVEC0EXT table that will generate the required internal trigger. */
+#define TBI_TRIG_VEC(SigNum) (\
+ ((SigNum) >= TBID_SIGNUM_T10) ? ((SigNum)-TBID_SIGNUM_TRT) : -1)
+
+/* Default trigger masks for each thread at background/interrupt level */
+#define TBI_TRIGS_INIT( Thread ) (\
+ TXSTAT_KICK_BIT + TBI_TRIG_BIT(TBID_SIGNUM_TR1(Thread)) )
+#define TBI_INTS_INIT( Thread ) (\
+ TXSTAT_KICK_BIT + TXSTATI_BGNDHALT_BIT \
+ + TBI_TRIG_BIT(TBID_SIGNUM_TR2(Thread)) )
+
+#ifndef __ASSEMBLY__
+/* A spin-lock location is a zero-initialised location in memory */
+typedef volatile int TBISPIN, *PTBISPIN;
+
+/* A kick location is a hardware location you can write to
+ * in order to cause a kick
+ */
+typedef volatile int *PTBIKICK;
+
+#if defined(METAC_1_0) || defined(METAC_1_1)
+/* Macro to perform a kick */
+#define TBI_KICK( pKick ) do { pKick[0] = 1; } while (0)
+#else
+/* #define METAG_LIN_VALUES before including machine.h if required */
+#ifdef LINSYSEVENT_WR_COMBINE_FLUSH
+/* Macro to perform a kick - write combiners must be flushed */
+#define TBI_KICK( pKick ) do {\
+ volatile int *pFlush = (volatile int *) LINSYSEVENT_WR_COMBINE_FLUSH; \
+ pFlush[0] = 0; \
+ pKick[0] = 1; } while (0)
+#endif
+#endif /* if defined(METAC_1_0) || defined(METAC_1_1) */
+#endif /* ifndef __ASSEMBLY__ */
+
+#ifndef __ASSEMBLY__
+/* 64-bit dual unit state value */
+typedef struct _tbidual_tag_ {
+ /* 32-bit value from a pair of registers in data or address units */
+ int U0, U1;
+} TBIDUAL, *PTBIDUAL;
+#endif /* ifndef __ASSEMBLY__ */
+
+/* Byte offsets of fields within TBIDUAL */
+#define TBIDUAL_U0 (0)
+#define TBIDUAL_U1 (4)
+
+#define TBIDUAL_BYTES (8)
+
+#define TBICTX_CRIT_BIT 0x0001 /* ASync state saved in TBICTX */
+#define TBICTX_SOFT_BIT 0x0002 /* Sync state saved in TBICTX (other bits 0) */
+#ifdef TBI_FASTINT_1_4
+#define TBICTX_FINT_BIT 0x0004 /* Using Fast Interrupts */
+#endif
+#define TBICTX_FPAC_BIT 0x0010 /* FPU state in TBICTX, FPU active on entry */
+#define TBICTX_XMCC_BIT 0x0020 /* Bit to identify a MECC task */
+#define TBICTX_CBUF_BIT 0x0040 /* Hardware catch buffer flag from TXSTATUS */
+#define TBICTX_CBRP_BIT 0x0080 /* Read pipeline dirty from TXDIVTIME */
+#define TBICTX_XDX8_BIT 0x0100 /* Saved DX.8 to DX.15 too */
+#define TBICTX_XAXX_BIT 0x0200 /* Save remaining AX registers to AX.7 */
+#define TBICTX_XHL2_BIT 0x0400 /* Saved hardware loop registers too */
+#define TBICTX_XTDP_BIT 0x0800 /* Saved DSP registers too */
+#define TBICTX_XEXT_BIT 0x1000 /* Set if TBICTX.Ext.Ctx contains extended
+ state save area, otherwise TBICTX.Ext.AX2
+ just holds normal A0.2 and A1.2 states */
+#define TBICTX_WAIT_BIT 0x2000 /* Causes wait for trigger - sticky toggle */
+#define TBICTX_XCBF_BIT 0x4000 /* Catch buffer or RD extracted into TBICTX */
+#define TBICTX_PRIV_BIT 0x8000 /* Set if system uses 'privileged' model */
+
+#ifdef METAC_1_0
+#define TBICTX_XAX3_BIT 0x0200 /* Saved AX.5 to AX.7 for XAXX */
+#define TBICTX_AX_REGS 5 /* Ax.0 to Ax.4 are core GP regs on CHORUS */
+#else
+#define TBICTX_XAX4_BIT 0x0200 /* Saved AX.4 to AX.7 for XAXX */
+#define TBICTX_AX_REGS 4 /* Default is Ax.0 to Ax.3 */
+#endif
+
+#ifdef TBI_1_4
+#define TBICTX_CFGFPU_FX16_BIT 0x00010000 /* Save FX.8 to FX.15 too */
+
+/* The METAC_CORE_ID_CONFIG field indicates omitted DSP resources */
+#define METAC_COREID_CFGXCTX_MASK( Value ) (\
+ ( (((Value & METAC_COREID_CFGDSP_BITS)>> \
+ METAC_COREID_CFGDSP_S ) == METAC_COREID_CFGDSP_MIN) ? \
+ ~(TBICTX_XHL2_BIT+TBICTX_XTDP_BIT+ \
+ TBICTX_XAXX_BIT+TBICTX_XDX8_BIT ) : ~0U ) )
+#endif
+
+/* Extended context state provides a standardised method for registering the
+ arguments required by __TBICtxSave to save the additional register states
+ currently in use by non general purpose code. The state of the __TBIExtCtx
+ variable in the static space of the thread forms an extension of the base
+ context of the thread.
+
+ If ( __TBIExtCtx.Ctx.SaveMask == 0 ) then pExt is assumed to be NULL and
+ the empty state of __TBIExtCtx is represented by the fact that
+ TBICTX.SaveMask does not have the bit TBICTX_XEXT_BIT set.
+
+ If ( __TBIExtCtx.Ctx.SaveMask != 0 ) then pExt should point at a suitably
+ sized extended context save area (usually at the end of the stack space
+ allocated by the current routine). This space should allow for the
+ displaced state of A0.2 and A1.2 to be saved along with the other extended
+ states indicated via __TBIExtCtx.Ctx.SaveMask. */
+#ifndef __ASSEMBLY__
+typedef union _tbiextctx_tag_ {
+ long long Val;
+ TBIDUAL AX2;
+ struct _tbiextctxext_tag {
+#ifdef TBI_1_4
+ short DspramSizes; /* DSPRAM sizes. Encoding varies between
+ TBICtxAlloc and the ECH scheme. */
+#else
+ short Reserved0;
+#endif
+ short SaveMask; /* Flag bits for state saved */
+ PTBIDUAL pExt; /* AX[2] state saved first plus Xxxx state */
+
+ } Ctx;
+
+} TBIEXTCTX, *PTBIEXTCTX;
+
+/* Automatic registration of extended context save for __TBINestInts */
+extern TBIEXTCTX __TBIExtCtx;
+#endif /* ifndef __ASSEMBLY__ */
+
+/* Byte offsets of fields within TBIEXTCTX */
+#define TBIEXTCTX_AX2 (0)
+#define TBIEXTCTX_Ctx (0)
+#define TBIEXTCTX_Ctx_SaveMask (TBIEXTCTX_Ctx + 2)
+#define TBIEXTCTX_Ctx_pExt (TBIEXTCTX_Ctx + 2 + 2)
+
+/* Extended context data size calculation constants */
+#define TBICTXEXT_BYTES (8)
+#define TBICTXEXTBB8_BYTES (8*8)
+#define TBICTXEXTAX3_BYTES (3*8)
+#define TBICTXEXTAX4_BYTES (4*8)
+#ifdef METAC_1_0
+#define TBICTXEXTAXX_BYTES TBICTXEXTAX3_BYTES
+#else
+#define TBICTXEXTAXX_BYTES TBICTXEXTAX4_BYTES
+#endif
+#define TBICTXEXTHL2_BYTES (3*8)
+#define TBICTXEXTTDR_BYTES (27*8)
+#define TBICTXEXTTDP_BYTES TBICTXEXTTDR_BYTES
+
+#ifdef TBI_1_4
+#define TBICTXEXTFX8_BYTES (4*8)
+#define TBICTXEXTFPAC_BYTES (1*4 + 2*2 + 4*8)
+#define TBICTXEXTFACF_BYTES (3*8)
+#endif
+
+/* Maximum flag bits to be set via the TBICTX_EXTSET macro */
+#define TBICTXEXT_MAXBITS (TBICTX_XEXT_BIT| \
+ TBICTX_XDX8_BIT|TBICTX_XAXX_BIT|\
+ TBICTX_XHL2_BIT|TBICTX_XTDP_BIT )
+
+/* Maximum size of the extended context save area for current variant */
+#define TBICTXEXT_MAXBYTES (TBICTXEXT_BYTES+TBICTXEXTBB8_BYTES+\
+ TBICTXEXTAXX_BYTES+TBICTXEXTHL2_BYTES+\
+ TBICTXEXTTDP_BYTES )
+
+#ifdef TBI_FASTINT_1_4
+/* Maximum flag bits to be set via the TBICTX_EXTSET macro */
+#define TBICTX2EXT_MAXBITS (TBICTX_XDX8_BIT|TBICTX_XAXX_BIT|\
+ TBICTX_XHL2_BIT|TBICTX_XTDP_BIT )
+
+/* Maximum size of the extended context save area for current variant */
+#define TBICTX2EXT_MAXBYTES (TBICTXEXTBB8_BYTES+TBICTXEXTAXX_BYTES\
+ +TBICTXEXTHL2_BYTES+TBICTXEXTTDP_BYTES )
+#endif
+
+/* Specify extended resources being used by current routine, code must be
+ assembler generated to utilise extended resources-
+
+ MOV D0xxx,A0StP ; Perform alloca - routine should
+ ADD A0StP,A0StP,#SaveSize ; setup/use A0FrP to access locals
+ MOVT D1xxx,#SaveMask ; TBICTX_XEXT_BIT MUST be set
+ SETL [A1GbP+#OG(___TBIExtCtx)],D0xxx,D1xxx
+
+ NB: OG(___TBIExtCtx) is a special case supported for SETL/GETL operations
+ on 64-bit sizes structures only, other accesses must be based on use
+ of OGA(___TBIExtCtx).
+
+ At exit of routine-
+
+ MOV D0xxx,#0 ; Clear extended context save state
+ MOV D1xxx,#0
+ SETL [A1GbP+#OG(___TBIExtCtx)],D0xxx,D1xxx
+ SUB A0StP,A0StP,#SaveSize ; If original A0StP required
+
+ NB: Both the setting and clearing of the whole __TBIExtCtx MUST be done
+ atomically in one 64-bit write operation.
+
+ For simple interrupt handling only via __TBINestInts there should be no
+ impact of the __TBIExtCtx system. If pre-emptive scheduling is being
+ performed however (assuming __TBINestInts has already been called earlier
+ on) then the following logic will correctly call __TBICtxSave if required
+ and clear out the currently selected background task-
+
+ if ( __TBIExtCtx.Ctx.SaveMask & TBICTX_XEXT_BIT )
+ {
+ / * Store extended states in pCtx * /
+ State.Sig.SaveMask |= __TBIExtCtx.Ctx.SaveMask;
+
+ (void) __TBICtxSave( State, (void *) __TBIExtCtx.Ctx.pExt );
+ __TBIExtCtx.Val = 0;
+ }
+
+ and when restoring task states call __TBICtxRestore-
+
+ / * Restore state from pCtx * /
+ State.Sig.pCtx = pCtx;
+ State.Sig.SaveMask = pCtx->SaveMask;
+
+ if ( State.Sig.SaveMask & TBICTX_XEXT_BIT )
+ {
+ / * Restore extended states from pCtx * /
+ __TBIExtCtx.Val = pCtx->Ext.Val;
+
+ (void) __TBICtxRestore( State, (void *) __TBIExtCtx.Ctx.pExt );
+ }
+
+ */
+
+/* Critical thread state save area */
+#ifndef __ASSEMBLY__
+typedef struct _tbictx_tag_ {
+ /* TXSTATUS_FLAG_BITS and TXSTATUS_LSM_STEP_BITS from TXSTATUS */
+ short Flags;
+ /* Mask indicates any extended context state saved; 0 -> Never run */
+ short SaveMask;
+ /* Saved PC value */
+ int CurrPC;
+ /* Saved critical register states */
+ TBIDUAL DX[8];
+ /* Background control register states - for cores without catch buffer
+ base in DIVTIME the TXSTATUS bits RPVALID and RPMASK are stored with
+ the real state TXDIVTIME in CurrDIVTIME */
+ int CurrRPT, CurrBPOBITS, CurrMODE, CurrDIVTIME;
+ /* Saved AX register states */
+ TBIDUAL AX[2];
+ TBIEXTCTX Ext;
+ TBIDUAL AX3[TBICTX_AX_REGS-3];
+
+ /* Any CBUF state to be restored by a handler return must be stored here.
+ Other extended state can be stored anywhere - see __TBICtxSave and
+ __TBICtxRestore. */
+
+} TBICTX, *PTBICTX;
+
+#ifdef TBI_FASTINT_1_4
+typedef struct _tbictx2_tag_ {
+ TBIDUAL AX[2]; /* AU.0, AU.1 */
+ TBIDUAL DX[2]; /* DU.0, DU.4 */
+ int CurrMODE;
+ int CurrRPT;
+ int CurrSTATUS;
+ void *CurrPC; /* PC in PC address space */
+} TBICTX2, *PTBICTX2;
+/* TBICTX2 is followed by:
+ * TBICTXEXTCB0 if TXSTATUS.CBMarker
+ * TBIDUAL * TXSTATUS.IRPCount if TXSTATUS.IRPCount > 0
+ * TBICTXGP if using __TBIStdRootIntHandler or __TBIStdCtxSwitchRootIntHandler
+ */
+
+typedef struct _tbictxgp_tag_ {
+ short DspramSizes;
+ short SaveMask;
+ void *pExt;
+ TBIDUAL DX[6]; /* DU.1-DU.3, DU.5-DU.7 */
+ TBIDUAL AX[2]; /* AU.2-AU.3 */
+} TBICTXGP, *PTBICTXGP;
+
+#define TBICTXGP_DspramSizes (0)
+#define TBICTXGP_SaveMask (TBICTXGP_DspramSizes + 2)
+#define TBICTXGP_MAX_BYTES (2 + 2 + 4 + 8*(6+2))
+
+#endif
+#endif /* ifndef __ASSEMBLY__ */
+
+/* Byte offsets of fields within TBICTX */
+#define TBICTX_Flags (0)
+#define TBICTX_SaveMask (2)
+#define TBICTX_CurrPC (4)
+#define TBICTX_DX (2 + 2 + 4)
+#define TBICTX_CurrRPT (2 + 2 + 4 + 8 * 8)
+#define TBICTX_CurrMODE (2 + 2 + 4 + 8 * 8 + 4 + 4)
+#define TBICTX_AX (2 + 2 + 4 + 8 * 8 + 4 + 4 + 4 + 4)
+#define TBICTX_Ext (2 + 2 + 4 + 8 * 8 + 4 + 4 + 4 + 4 + 2 * 8)
+#define TBICTX_Ext_AX2 (TBICTX_Ext + TBIEXTCTX_AX2)
+#define TBICTX_Ext_AX2_U0 (TBICTX_Ext + TBIEXTCTX_AX2 + TBIDUAL_U0)
+#define TBICTX_Ext_AX2_U1 (TBICTX_Ext + TBIEXTCTX_AX2 + TBIDUAL_U1)
+#define TBICTX_Ext_Ctx_pExt (TBICTX_Ext + TBIEXTCTX_Ctx_pExt)
+#define TBICTX_Ext_Ctx_SaveMask (TBICTX_Ext + TBIEXTCTX_Ctx_SaveMask)
+
+#ifdef TBI_FASTINT_1_4
+#define TBICTX2_BYTES (8 * 2 + 8 * 2 + 4 + 4 + 4 + 4)
+#define TBICTXEXTCB0_BYTES (4 + 4 + 8)
+
+#define TBICTX2_CRIT_MAX_BYTES (TBICTX2_BYTES + TBICTXEXTCB0_BYTES + 6 * TBIDUAL_BYTES)
+#define TBI_SWITCH_NEXT_PC(PC, EXTRA) ((PC) + (EXTRA & 1) ? 8 : 4)
+#endif
+
+#ifndef __ASSEMBLY__
+/* Extended thread state save areas - catch buffer state element */
+typedef struct _tbictxextcb0_tag_ {
+ /* Flags data and address value - see METAC_CATCH_VALUES in machine.h */
+ unsigned long CBFlags, CBAddr;
+ /* 64-bit data */
+ TBIDUAL CBData;
+
+} TBICTXEXTCB0, *PTBICTXEXTCB0;
+
+/* Read pipeline state saved on later cores after single catch buffer slot */
+typedef struct _tbictxextrp6_tag_ {
+ /* RPMask is TXSTATUS_RPMASK_BITS only, reserved is undefined */
+ unsigned long RPMask, Reserved0;
+ TBIDUAL CBData[6];
+
+} TBICTXEXTRP6, *PTBICTXEXTRP6;
+
+/* Extended thread state save areas - 8 DU register pairs */
+typedef struct _tbictxextbb8_tag_ {
+ /* Remaining Data unit registers in 64-bit pairs */
+ TBIDUAL UX[8];
+
+} TBICTXEXTBB8, *PTBICTXEXTBB8;
+
+/* Extended thread state save areas - 3 AU register pairs */
+typedef struct _tbictxextbb3_tag_ {
+ /* Remaining Address unit registers in 64-bit pairs */
+ TBIDUAL UX[3];
+
+} TBICTXEXTBB3, *PTBICTXEXTBB3;
+
+/* Extended thread state save areas - 4 AU register pairs or 4 FX pairs */
+typedef struct _tbictxextbb4_tag_ {
+ /* Remaining Address unit or FPU registers in 64-bit pairs */
+ TBIDUAL UX[4];
+
+} TBICTXEXTBB4, *PTBICTXEXTBB4;
+
+/* Extended thread state save areas - Hardware loop states (max 2) */
+typedef struct _tbictxexthl2_tag_ {
+ /* Hardware looping register states */
+ TBIDUAL Start, End, Count;
+
+} TBICTXEXTHL2, *PTBICTXEXTHL2;
+
+/* Extended thread state save areas - DSP register states */
+typedef struct _tbictxexttdp_tag_ {
+ /* DSP 32-bit accumulator register state (Bits 31:0 of ACX.0) */
+ TBIDUAL Acc32[1];
+ /* DSP > 32-bit accumulator bits 63:32 of ACX.0 (zero-extended) */
+ TBIDUAL Acc64[1];
+ /* Twiddle register state, and three phase increment states */
+ TBIDUAL PReg[4];
+ /* Modulo region size, padded to 64-bits */
+ int CurrMRSIZE, Reserved0;
+
+} TBICTXEXTTDP, *PTBICTXEXTTDP;
+
+/* Extended thread state save areas - DSP register states including DSP RAM */
+typedef struct _tbictxexttdpr_tag_ {
+ /* DSP 32-bit accumulator register state (Bits 31:0 of ACX.0) */
+ TBIDUAL Acc32[1];
+ /* DSP 40-bit accumulator register state (Bits 39:8 of ACX.0) */
+ TBIDUAL Acc40[1];
+ /* DSP RAM Pointers */
+ TBIDUAL RP0[2], WP0[2], RP1[2], WP1[2];
+ /* DSP RAM Increments */
+ TBIDUAL RPI0[2], WPI0[2], RPI1[2], WPI1[2];
+ /* Template registers */
+ unsigned long Tmplt[16];
+ /* Modulo address region size and DSP RAM module region sizes */
+ int CurrMRSIZE, CurrDRSIZE;
+
+} TBICTXEXTTDPR, *PTBICTXEXTTDPR;
+
+#ifdef TBI_1_4
+/* The METAC_ID_CORE register state is a marker for the FPU
+ state that is then stored after this core header structure. */
+#define TBICTXEXTFPU_CONFIG_MASK ( (METAC_COREID_NOFPACC_BIT+ \
+ METAC_COREID_CFGFPU_BITS ) << \
+ METAC_COREID_CONFIG_BITS )
+
+/* Recorded FPU exception state from TXDEFR in DefrFpu */
+#define TBICTXEXTFPU_DEFRFPU_MASK (TXDEFR_FPU_FE_BITS)
+
+/* Extended thread state save areas - FPU register states */
+typedef struct _tbictxextfpu_tag_ {
+ /* Stored METAC_CORE_ID CONFIG */
+ int CfgFpu;
+ /* Stored deferred TXDEFR bits related to FPU
+ *
+ * This is encoded as follows in order to fit into 16-bits:
+ * DefrFPU:15 - 14 <= 0
+ * :13 - 8 <= TXDEFR:21-16
+ * : 7 - 6 <= 0
+ * : 5 - 0 <= TXDEFR:5-0
+ */
+ short DefrFpu;
+
+ /* TXMODE bits related to FPU */
+ short ModeFpu;
+
+ /* FPU Even/Odd register states */
+ TBIDUAL FX[4];
+
+ /* if CfgFpu & TBICTX_CFGFPU_FX16_BIT -> 1 then TBICTXEXTBB4 holds FX.8-15 */
+ /* if CfgFpu & TBICTX_CFGFPU_NOACF_BIT -> 0 then TBICTXEXTFPACC holds state */
+} TBICTXEXTFPU, *PTBICTXEXTFPU;
+
+/* Extended thread state save areas - FPU accumulator state */
+typedef struct _tbictxextfpacc_tag_ {
+ /* FPU accumulator register state - three 64-bit parts */
+ TBIDUAL FAcc32[3];
+
+} TBICTXEXTFPACC, *PTBICTXEXTFPACC;
+#endif
+
+/* Prototype TBI structure */
+struct _tbi_tag_ ;
+
+/* A 64-bit return value used commonly in the TBI APIs */
+typedef union _tbires_tag_ {
+ /* Save and load this value to get/set the whole result quickly */
+ long long Val;
+
+ /* Parameter of a fnSigs or __TBICtx* call */
+ struct _tbires_sig_tag_ {
+ /* TXMASK[I] bits zeroed upto and including current trigger level */
+ unsigned short TrigMask;
+ /* Control bits for handlers - see PTBIAPIFN documentation below */
+ unsigned short SaveMask;
+ /* Pointer to the base register context save area of the thread */
+ PTBICTX pCtx;
+ } Sig;
+
+ /* Result of TBIThrdPrivId call */
+ struct _tbires_thrdprivid_tag_ {
+ /* Basic thread identifier; just TBID_THREAD_BITS */
+ int Id;
+ /* None thread number bits; TBID_ISTAT_BIT+TBID_PSTAT_BIT */
+ int Priv;
+ } Thrd;
+
+ /* Parameter and Result of a __TBISwitch call */
+ struct _tbires_switch_tag_ {
+ /* Parameter passed across context switch */
+ void *pPara;
+ /* Thread context of other Thread includng restore flags */
+ PTBICTX pCtx;
+ } Switch;
+
+ /* For extended S/W events only */
+ struct _tbires_ccb_tag_ {
+ void *pCCB;
+ int COff;
+ } CCB;
+
+ struct _tbires_tlb_tag_ {
+ int Leaf; /* TLB Leaf data */
+ int Flags; /* TLB Flags */
+ } Tlb;
+
+#ifdef TBI_FASTINT_1_4
+ struct _tbires_intr_tag_ {
+ short TrigMask;
+ short SaveMask;
+ PTBICTX2 pCtx;
+ } Intr;
+#endif
+
+} TBIRES, *PTBIRES;
+#endif /* ifndef __ASSEMBLY__ */
+
+#ifndef __ASSEMBLY__
+/* Prototype for all signal handler functions, called via ___TBISyncTrigger or
+ ___TBIASyncTrigger.
+
+ State.Sig.TrigMask will indicate the bits set within TXMASKI at
+ the time of the handler call that have all been cleared to prevent
+ nested interrupt occuring immediately.
+
+ State.Sig.SaveMask is a bit-mask which will be set to Zero when a trigger
+ occurs at background level and TBICTX_CRIT_BIT and optionally
+ TBICTX_CBUF_BIT when a trigger occurs at interrupt level.
+
+ TBICTX_CBUF_BIT reflects the state of TXSTATUS_CBMARKER_BIT for
+ the interrupted background thread.
+
+ State.Sig.pCtx will point at a TBICTX structure generated to hold the
+ critical state of the interrupted thread at interrupt level and
+ should be set to NULL when called at background level.
+
+ Triggers will indicate the status of TXSTAT or TXSTATI sampled by the
+ code that called the handler.
+
+ InstOrSWSId is defined firstly as 'Inst' if the SigNum is TBID_SIGNUM_SWx
+ and hold the actual SWITCH instruction detected, secondly if SigNum
+ is TBID_SIGNUM_SWS the 'SWSId' is defined to hold the Id of the
+ software signal detected, in other cases the value of this
+ parameter is undefined.
+
+ pTBI points at the PTBI structure related to the thread and processing
+ level involved.
+
+ TBIRES return value at both processing levels is similar in terms of any
+ changes that the handler makes. By default the State argument value
+ passed in should be returned.
+
+ Sig.TrigMask value is bits to OR back into TXMASKI when the handler
+ completes to enable currently disabled interrupts.
+
+ Sig.SaveMask value is ignored.
+
+ Sig.pCtx is ignored.
+
+ */
+typedef TBIRES (*PTBIAPIFN)( TBIRES State, int SigNum,
+ int Triggers, int InstOrSWSId,
+ volatile struct _tbi_tag_ *pTBI );
+#endif /* ifndef __ASSEMBLY__ */
+
+#ifndef __ASSEMBLY__
+/* The global memory map is described by a list of segment descriptors */
+typedef volatile struct _tbiseg_tag_ {
+ volatile struct _tbiseg_tag_ *pLink;
+ int Id; /* Id of the segment */
+ TBISPIN Lock; /* Spin-lock for struct (normally 0) */
+ unsigned int Bytes; /* Size of region in bytes */
+ void *pGAddr; /* Base addr of region in global space */
+ void *pLAddr; /* Base addr of region in local space */
+ int Data[2]; /* Segment specific data (may be extended) */
+
+} TBISEG, *PTBISEG;
+#endif /* ifndef __ASSEMBLY__ */
+
+/* Offsets of fields in TBISEG structure */
+#define TBISEG_pLink ( 0)
+#define TBISEG_Id ( 4)
+#define TBISEG_Lock ( 8)
+#define TBISEG_Bytes (12)
+#define TBISEG_pGAddr (16)
+#define TBISEG_pLAddr (20)
+#define TBISEG_Data (24)
+
+#ifndef __ASSEMBLY__
+typedef volatile struct _tbi_tag_ {
+ int SigMask; /* Bits set to represent S/W events */
+ PTBIKICK pKick; /* Kick addr for S/W events */
+ void *pCCB; /* Extended S/W events */
+ PTBISEG pSeg; /* Related segment structure */
+ PTBIAPIFN fnSigs[TBID_SIGNUM_MAX+1];/* Signal handler API table */
+} *PTBI, TBI;
+#endif /* ifndef __ASSEMBLY__ */
+
+/* Byte offsets of fields within TBI */
+#define TBI_SigMask (0)
+#define TBI_pKick (4)
+#define TBI_pCCB (8)
+#define TBI_pSeg (12)
+#define TBI_fnSigs (16)
+
+#ifdef TBI_1_4
+#ifndef __ASSEMBLY__
+/* This handler should be used for TBID_SIGNUM_DFR */
+extern TBIRES __TBIHandleDFR ( TBIRES State, int SigNum,
+ int Triggers, int InstOrSWSId,
+ volatile struct _tbi_tag_ *pTBI );
+#endif
+#endif
+
+/* String table entry - special values */
+#define METAG_TBI_STRS (0x5300) /* Tag : If entry is valid */
+#define METAG_TBI_STRE (0x4500) /* Tag : If entry is end of table */
+#define METAG_TBI_STRG (0x4700) /* Tag : If entry is a gap */
+#define METAG_TBI_STRX (0x5A00) /* TransLen : If no translation present */
+
+#ifndef __ASSEMBLY__
+typedef volatile struct _tbistr_tag_ {
+ short Bytes; /* Length of entry in Bytes */
+ short Tag; /* Normally METAG_TBI_STRS(0x5300) */
+ short Len; /* Length of the string entry (incl null) */
+ short TransLen; /* Normally METAG_TBI_STRX(0x5A00) */
+ char String[8]; /* Zero terminated (may-be bigger) */
+
+} TBISTR, *PTBISTR;
+#endif /* ifndef __ASSEMBLY__ */
+
+/* Cache size information - available as fields of Data[1] of global heap
+ segment */
+#define METAG_TBI_ICACHE_SIZE_S 0 /* see comments below */
+#define METAG_TBI_ICACHE_SIZE_BITS 0x0000000F
+#define METAG_TBI_ICACHE_FILL_S 4
+#define METAG_TBI_ICACHE_FILL_BITS 0x000000F0
+#define METAG_TBI_DCACHE_SIZE_S 8
+#define METAG_TBI_DCACHE_SIZE_BITS 0x00000F00
+#define METAG_TBI_DCACHE_FILL_S 12
+#define METAG_TBI_DCACHE_FILL_BITS 0x0000F000
+
+/* METAG_TBI_xCACHE_SIZE
+ Describes the physical cache size rounded up to the next power of 2
+ relative to a 16K (2^14) cache. These sizes are encoded as a signed addend
+ to this base power of 2, for example
+ 4K -> 2^12 -> -2 (i.e. 12-14)
+ 8K -> 2^13 -> -1
+ 16K -> 2^14 -> 0
+ 32K -> 2^15 -> +1
+ 64K -> 2^16 -> +2
+ 128K -> 2^17 -> +3
+
+ METAG_TBI_xCACHE_FILL
+ Describes the physical cache size within the power of 2 area given by
+ the value above. For example a 10K cache may be represented as having
+ nearest size 16K with a fill of 10 sixteenths. This is encoded as the
+ number of unused 1/16ths, for example
+ 0000 -> 0 -> 16/16
+ 0001 -> 1 -> 15/16
+ 0010 -> 2 -> 14/16
+ ...
+ 1111 -> 15 -> 1/16
+ */
+
+#define METAG_TBI_CACHE_SIZE_BASE_LOG2 14
+
+/* Each declaration made by this macro generates a TBISTR entry */
+#ifndef __ASSEMBLY__
+#define TBISTR_DECL( Name, Str ) \
+ __attribute__ ((__section__ (".tbistr") )) const char Name[] = #Str
+#endif
+
+/* META timer values - see below for Timer support routines */
+#define TBI_TIMERWAIT_MIN (-16) /* Minimum 'recommended' period */
+#define TBI_TIMERWAIT_MAX (-0x7FFFFFFF) /* Maximum 'recommended' period */
+
+#ifndef __ASSEMBLY__
+/* These macros allow direct access from C to any register known to the
+ assembler or defined in machine.h. Example candidates are TXTACTCYC,
+ TXIDLECYC, and TXPRIVEXT. Note that when higher level macros and routines
+ like the timer and trigger handling features below these should be used in
+ preference to this direct low-level access mechanism. */
+#define TBI_GETREG( Reg ) __extension__ ({\
+ int __GRValue; \
+ __asm__ volatile ("MOV\t%0," #Reg "\t/* (*TBI_GETREG OK) */" : \
+ "=r" (__GRValue) ); \
+ __GRValue; })
+
+#define TBI_SETREG( Reg, Value ) do {\
+ int __SRValue = Value; \
+ __asm__ volatile ("MOV\t" #Reg ",%0\t/* (*TBI_SETREG OK) */" : \
+ : "r" (__SRValue) ); } while (0)
+
+#define TBI_SWAPREG( Reg, Value ) do {\
+ int __XRValue = (Value); \
+ __asm__ volatile ("SWAP\t" #Reg ",%0\t/* (*TBI_SWAPREG OK) */" : \
+ "=r" (__XRValue) : "0" (__XRValue) ); \
+ Value = __XRValue; } while (0)
+
+/* Obtain and/or release global critical section lock given that interrupts
+ are already disabled and/or should remain disabled. */
+#define TBI_NOINTSCRITON do {\
+ __asm__ volatile ("LOCK1\t\t/* (*TBI_NOINTSCRITON OK) */");} while (0)
+#define TBI_NOINTSCRITOFF do {\
+ __asm__ volatile ("LOCK0\t\t/* (*TBI_NOINTSCRITOFF OK) */");} while (0)
+/* Optimised in-lining versions of the above macros */
+
+#define TBI_LOCK( TrigState ) do {\
+ int __TRValue; \
+ int __ALOCKHI = LINSYSEVENT_WR_ATOMIC_LOCK & 0xFFFF0000; \
+ __asm__ volatile ("MOV %0,#0\t\t/* (*TBI_LOCK ... */\n\t" \
+ "SWAP\t%0,TXMASKI\t/* ... */\n\t" \
+ "LOCK2\t\t/* ... */\n\t" \
+ "SETD\t[%1+#0x40],D1RtP /* ... OK) */" : \
+ "=r&" (__TRValue) : "u" (__ALOCKHI) ); \
+ TrigState = __TRValue; } while (0)
+#define TBI_CRITON( TrigState ) do {\
+ int __TRValue; \
+ __asm__ volatile ("MOV %0,#0\t\t/* (*TBI_CRITON ... */\n\t" \
+ "SWAP\t%0,TXMASKI\t/* ... */\n\t" \
+ "LOCK1\t\t/* ... OK) */" : \
+ "=r" (__TRValue) ); \
+ TrigState = __TRValue; } while (0)
+
+#define TBI_INTSX( TrigState ) do {\
+ int __TRValue = TrigState; \
+ __asm__ volatile ("SWAP\t%0,TXMASKI\t/* (*TBI_INTSX OK) */" : \
+ "=r" (__TRValue) : "0" (__TRValue) ); \
+ TrigState = __TRValue; } while (0)
+
+#define TBI_UNLOCK( TrigState ) do {\
+ int __TRValue = TrigState; \
+ int __ALOCKHI = LINSYSEVENT_WR_ATOMIC_LOCK & 0xFFFF0000; \
+ __asm__ volatile ("SETD\t[%1+#0x00],D1RtP\t/* (*TBI_UNLOCK ... */\n\t" \
+ "LOCK0\t\t/* ... */\n\t" \
+ "MOV\tTXMASKI,%0\t/* ... OK) */" : \
+ : "r" (__TRValue), "u" (__ALOCKHI) ); } while (0)
+
+#define TBI_CRITOFF( TrigState ) do {\
+ int __TRValue = TrigState; \
+ __asm__ volatile ("LOCK0\t\t/* (*TBI_CRITOFF ... */\n\t" \
+ "MOV\tTXMASKI,%0\t/* ... OK) */" : \
+ : "r" (__TRValue) ); } while (0)
+
+#define TBI_TRIGSX( SrcDst ) do { TBI_SWAPREG( TXMASK, SrcDst );} while (0)
+
+/* Composite macros to perform logic ops on INTS or TRIGS masks */
+#define TBI_INTSOR( Bits ) do {\
+ int __TT = 0; TBI_INTSX(__TT); \
+ __TT |= (Bits); TBI_INTSX(__TT); } while (0)
+
+#define TBI_INTSAND( Bits ) do {\
+ int __TT = 0; TBI_INTSX(__TT); \
+ __TT &= (Bits); TBI_INTSX(__TT); } while (0)
+
+#ifdef TBI_1_4
+#define TBI_DEFRICTRLSOR( Bits ) do {\
+ int __TT = TBI_GETREG( CT.20 ); \
+ __TT |= (Bits); TBI_SETREG( CT.20, __TT); } while (0)
+
+#define TBI_DEFRICTRLSAND( Bits ) do {\
+ int __TT = TBI_GETREG( TXDEFR ); \
+ __TT &= (Bits); TBI_SETREG( CT.20, __TT); } while (0)
+#endif
+
+#define TBI_TRIGSOR( Bits ) do {\
+ int __TT = TBI_GETREG( TXMASK ); \
+ __TT |= (Bits); TBI_SETREG( TXMASK, __TT); } while (0)
+
+#define TBI_TRIGSAND( Bits ) do {\
+ int __TT = TBI_GETREG( TXMASK ); \
+ __TT &= (Bits); TBI_SETREG( TXMASK, __TT); } while (0)
+
+/* Macros to disable and re-enable interrupts using TBI_INTSX, deliberate
+ traps and exceptions can still be handled within the critical section. */
+#define TBI_STOPINTS( Value ) do {\
+ int __TT = TBI_GETREG( TXMASKI ); \
+ __TT &= TXSTATI_BGNDHALT_BIT; TBI_INTSX( __TT ); \
+ Value = __TT; } while (0)
+#define TBI_RESTINTS( Value ) do {\
+ int __TT = Value; TBI_INTSX( __TT ); } while (0)
+
+/* Return pointer to segment list at current privilege level */
+PTBISEG __TBISegList( void );
+
+/* Search the segment list for a match given Id, pStart can be NULL */
+PTBISEG __TBIFindSeg( PTBISEG pStart, int Id );
+
+/* Prepare a new segment structure using space from within another */
+PTBISEG __TBINewSeg( PTBISEG pFromSeg, int Id, unsigned int Bytes );
+
+/* Prepare a new segment using any global or local heap segments available */
+PTBISEG __TBIMakeNewSeg( int Id, unsigned int Bytes );
+
+/* Insert a new segment into the segment list so __TBIFindSeg can locate it */
+void __TBIAddSeg( PTBISEG pSeg );
+#define __TBIADDSEG_DEF /* Some versions failed to define this */
+
+/* Return Id of current thread; TBID_ISTAT_BIT+TBID_THREAD_BITS */
+int __TBIThreadId( void );
+
+/* Return TBIRES.Thrd data for current thread */
+TBIRES __TBIThrdPrivId( void );
+
+/* Return pointer to current threads TBI root block.
+ Id implies whether Int or Background root block is required */
+PTBI __TBI( int Id );
+
+/* Try to set Mask bit using the spin-lock protocol, return 0 if fails and
+ new state if succeeds */
+int __TBIPoll( PTBISPIN pLock, int Mask );
+
+/* Set Mask bits via the spin-lock protocol in *pLock, return new state */
+int __TBISpin( PTBISPIN pLock, int Mask );
+
+/* Default handler set up for all TBI.fnSigs entries during initialisation */
+TBIRES __TBIUnExpXXX( TBIRES State, int SigNum,
+ int Triggers, int Inst, PTBI pTBI );
+
+/* Call this routine to service triggers at background processing level. The
+ TBID_POLL_BIT of the Id parameter value will be used to indicate that the
+ routine should return if no triggers need to be serviced initially. If this
+ bit is not set the routine will block until one trigger handler is serviced
+ and then behave like the poll case servicing any remaining triggers
+ actually outstanding before returning. Normally the State parameter should
+ be simply initialised to zero and the result should be ignored, other
+ values/options are for internal use only. */
+TBIRES __TBISyncTrigger( TBIRES State, int Id );
+
+/* Call this routine to enable processing of triggers by signal handlers at
+ interrupt level. The State parameter value passed is returned by this
+ routine. The State.Sig.TrigMask field also specifies the initial
+ state of the interrupt mask register TXMASKI to be setup by the call.
+ The other parts of the State parameter are ignored unless the PRIV bit is
+ set in the SaveMask field. In this case the State.Sig.pCtx field specifies
+ the base of the stack to which the interrupt system should switch into
+ as it saves the state of the previously executing code. In the case the
+ thread will be unprivileged as it continues execution at the return
+ point of this routine and it's future state will be effectively never
+ trusted to be valid. */
+TBIRES __TBIASyncTrigger( TBIRES State );
+
+/* Call this to swap soft threads executing at the background processing level.
+ The TBIRES returned to the new thread will be the same as the NextThread
+ value specified to the call. The NextThread.Switch.pCtx value specifies
+ which thread context to restore and the NextThread.Switch.Para value can
+ hold an arbitrary expression to be passed between the threads. The saved
+ state of the previous thread will be stored in a TBICTX descriptor created
+ on it's stack and the address of this will be stored into the *rpSaveCtx
+ location specified. */
+TBIRES __TBISwitch( TBIRES NextThread, PTBICTX *rpSaveCtx );
+
+/* Call this to initialise a stack frame ready for further use, up to four
+ 32-bit arguments may be specified after the fixed args to be passed via
+ the new stack pStack to the routine specified via fnMain. If the
+ main-line routine ever returns the thread will operate as if main itself
+ had returned and terminate with the return code given. */
+typedef int (*PTBIMAINFN)( TBIRES Arg /*, <= 4 additional 32-bit args */ );
+PTBICTX __TBISwitchInit( void *pStack, PTBIMAINFN fnMain, ... );
+
+/* Call this to resume a thread from a saved synchronous TBICTX state.
+ The TBIRES returned to the new thread will be the same as the NextThread
+ value specified to the call. The NextThread.Switch.pCtx value specifies
+ which thread context to restore and the NextThread.Switch.Para value can
+ hold an arbitrary expression to be passed between the threads. The context
+ of the calling thread is lost and this routine never returns to the
+ caller. The TrigsMask value supplied is ored into TXMASKI to enable
+ interrupts after the context of the new thread is established. */
+void __TBISyncResume( TBIRES NextThread, int TrigsMask );
+
+/* Call these routines to save and restore the extended states of
+ scheduled tasks. */
+void *__TBICtxSave( TBIRES State, void *pExt );
+void *__TBICtxRestore( TBIRES State, void *pExt );
+
+#ifdef TBI_1_4
+#ifdef TBI_FASTINT_1_4
+/* Call these routines to copy the GP state to a separate buffer
+ * Only necessary for context switching.
+ */
+PTBICTXGP __TBICtx2SaveCrit( PTBICTX2 pCurrentCtx, PTBICTX2 pSaveCtx );
+void *__TBICtx2SaveGP( PTBICTXGP pCurrentCtxGP, PTBICTXGP pSaveCtxGP );
+
+/* Call these routines to save and restore the extended states of
+ scheduled tasks. */
+void *__TBICtx2Save( PTBICTXGP pCtxGP, short SaveMask, void *pExt );
+void *__TBICtx2Restore( PTBICTX2 pCtx, short SaveMask, void *pExt );
+#endif
+
+/* If FPAC flag is set then significant FPU context exists. Call these routine
+ to save and restore it */
+void *__TBICtxFPUSave( TBIRES State, void *pExt );
+void *__TBICtxFPURestore( TBIRES State, void *pExt );
+
+#ifdef TBI_FASTINT_1_4
+extern void *__TBICtx2FPUSave (PTBICTXGP, short, void*);
+extern void *__TBICtx2FPURestore (PTBICTXGP, short, void*);
+#endif
+#endif
+
+#ifdef TBI_1_4
+/* Call these routines to save and restore DSPRAM. */
+void *__TBIDspramSaveA (short DspramSizes, void *pExt);
+void *__TBIDspramSaveB (short DspramSizes, void *pExt);
+void *__TBIDspramRestoreA (short DspramSizes, void *pExt);
+void *__TBIDspramRestoreB (short DspramSizes, void *pExt);
+#endif
+
+/* This routine should be used at the entrypoint of interrupt handlers to
+ re-enable higher priority interrupts and/or save state from the previously
+ executing background code. State is a TBIRES.Sig parameter with NoNestMask
+ indicating the triggers (if any) that should remain disabled and SaveMask
+ CBUF bit indicating the if the hardware catch buffer is dirty. Optionally
+ any number of extended state bits X??? including XCBF can be specified to
+ force a nested state save call to __TBICtxSave before the current routine
+ continues. (In the latter case __TBICtxRestore should be called to restore
+ any extended states before the background thread of execution is resumed)
+
+ By default (no X??? bits specified in SaveMask) this routine performs a
+ sub-call to __TBICtxSave with the pExt and State parameters specified IF
+ some triggers could be serviced while the current interrupt handler
+ executes and the hardware catch buffer is actually dirty. In this case
+ this routine provides the XCBF bit in State.Sig.SaveMask to force the
+ __TBICtxSave to extract the current catch state.
+
+ The NoNestMask parameter should normally indicate that the same or lower
+ triggers than those provoking the current handler call should not be
+ serviced in nested calls, zero may be specified if all possible interrupts
+ are to be allowed.
+
+ The TBIRES.Sig value returned will be similar to the State parameter
+ specified with the XCBF bit ORed into it's SaveMask if a context save was
+ required and fewer bits set in it's TrigMask corresponding to the same/lower
+ priority interrupt triggers still not enabled. */
+TBIRES __TBINestInts( TBIRES State, void *pExt, int NoNestMask );
+
+/* This routine causes the TBICTX structure specified in State.Sig.pCtx to
+ be restored. This implies that execution will not return to the caller.
+ The State.Sig.TrigMask field will be restored during the context switch
+ such that any immediately occuring interrupts occur in the context of the
+ newly specified task. The State.Sig.SaveMask parameter is ignored. */
+void __TBIASyncResume( TBIRES State );
+
+/* Call this routine to enable fastest possible processing of one or more
+ interrupt triggers via a unified signal handler. The handler concerned
+ must simple return after servicing the related hardware.
+ The State.Sig.TrigMask parameter indicates the interrupt triggers to be
+ enabled and the Thin.Thin.fnHandler specifies the routine to call and
+ the whole Thin parameter value will be passed to this routine unaltered as
+ it's first parameter. */
+void __TBIASyncThin( TBIRES State, TBIRES Thin );
+
+/* Do this before performing your own direct spin-lock access - use TBI_LOCK */
+int __TBILock( void );
+
+/* Do this after performing your own direct spin-lock access - use TBI_UNLOCK */
+void __TBIUnlock( int TrigState );
+
+/* Obtain and release global critical section lock - only stops execution
+ of interrupts on this thread and similar critical section code on other
+ local threads - use TBI_CRITON or TBI_CRITOFF */
+int __TBICritOn( void );
+void __TBICritOff( int TrigState );
+
+/* Change INTS (TXMASKI) - return old state - use TBI_INTSX */
+int __TBIIntsX( int NewMask );
+
+/* Change TRIGS (TXMASK) - return old state - use TBI_TRIGSX */
+int __TBITrigsX( int NewMask );
+
+/* This function initialises a timer for first use, only the TBID_ISTAT_BIT
+ of the Id parameter is used to indicate which timer is to be modified. The
+ Wait value should either be zero to disable the timer concerned or be in
+ the recommended TBI_TIMERWAIT_* range to specify the delay required before
+ the first timer trigger occurs.
+
+ The TBID_ISTAT_BIT of the Id parameter similar effects all other timer
+ support functions (see below). */
+void __TBITimerCtrl( int Id, int Wait );
+
+/* This routine returns a 64-bit time stamp value that is initialised to zero
+ via a __TBITimerCtrl timer enabling call. */
+long long __TBITimeStamp( int Id );
+
+/* To manage a periodic timer each period elapsed should be subracted from
+ the current timer value to attempt to set up the next timer trigger. The
+ Wait parameter should be a value in the recommended TBI_TIMERWAIT_* range.
+ The return value is the new aggregate value that the timer was updated to,
+ if this is less than zero then a timer trigger is guaranteed to be
+ generated after the number of ticks implied, if a positive result is
+ returned either itterative or step-wise corrective action must be taken to
+ resynchronise the timer and hence provoke a future timer trigger. */
+int __TBITimerAdd( int Id, int Wait );
+
+/* String table search function, pStart is first entry to check or NULL,
+ pStr is string data to search for and MatchLen is either length of string
+ to compare for an exact match or negative length to compare for partial
+ match. */
+const TBISTR *__TBIFindStr( const TBISTR *pStart,
+ const char *pStr, int MatchLen );
+
+/* String table translate function, pStr is text to translate and Len is
+ it's length. Value returned may not be a string pointer if the
+ translation value is really some other type, 64-bit alignment of the return
+ pointer is guaranteed so almost any type including a structure could be
+ located with this routine. */
+const void *__TBITransStr( const char *pStr, int Len );
+
+
+
+/* Arbitrary physical memory access windows, use different Channels to avoid
+ conflict/thrashing within a single piece of code. */
+void *__TBIPhysAccess( int Channel, int PhysAddr, int Bytes );
+void __TBIPhysRelease( int Channel, void *pLinAddr );
+
+#ifdef METAC_1_0
+/* Data cache function nullified because data cache is off */
+#define TBIDCACHE_FLUSH( pAddr )
+#define TBIDCACHE_PRELOAD( Type, pAddr ) ((Type) (pAddr))
+#define TBIDCACHE_REFRESH( Type, pAddr ) ((Type) (pAddr))
+#endif
+#ifdef METAC_1_1
+/* To flush a single cache line from the data cache using a linear address */
+#define TBIDCACHE_FLUSH( pAddr ) ((volatile char *) \
+ (((unsigned int) (pAddr))>>LINSYSLFLUSH_S))[0] = 0
+
+extern void * __builtin_dcache_preload (void *);
+
+/* Try to ensure that the data at the address concerned is in the cache */
+#define TBIDCACHE_PRELOAD( Type, Addr ) \
+ ((Type) __builtin_dcache_preload ((void *)(Addr)))
+
+extern void * __builtin_dcache_refresh (void *);
+
+/* Flush any old version of data from address and re-load a new copy */
+#define TBIDCACHE_REFRESH( Type, Addr ) __extension__ ({ \
+ Type __addr = (Type)(Addr); \
+ (void)__builtin_dcache_refresh ((void *)(((unsigned int)(__addr))>>6)); \
+ __addr; })
+
+#endif
+#ifndef METAC_1_0
+#ifndef METAC_1_1
+/* Support for DCACHE builtin */
+extern void __builtin_dcache_flush (void *);
+
+/* To flush a single cache line from the data cache using a linear address */
+#define TBIDCACHE_FLUSH( Addr ) \
+ __builtin_dcache_flush ((void *)(Addr))
+
+extern void * __builtin_dcache_preload (void *);
+
+/* Try to ensure that the data at the address concerned is in the cache */
+#define TBIDCACHE_PRELOAD( Type, Addr ) \
+ ((Type) __builtin_dcache_preload ((void *)(Addr)))
+
+extern void * __builtin_dcache_refresh (void *);
+
+/* Flush any old version of data from address and re-load a new copy */
+#define TBIDCACHE_REFRESH( Type, Addr ) \
+ ((Type) __builtin_dcache_refresh ((void *)(Addr)))
+
+#endif
+#endif
+
+/* Flush the MMCU cache */
+#define TBIMCACHE_FLUSH() { ((volatile int *) LINSYSCFLUSH_MMCU)[0] = 0; }
+
+#ifdef METAC_2_1
+/* Obtain the MMU table entry for the specified address */
+#define TBIMTABLE_LEAFDATA(ADDR) TBIXCACHE_RD((int)(ADDR) & (-1<<6))
+
+#ifndef __ASSEMBLY__
+/* Obtain the full MMU table entry for the specified address */
+#define TBIMTABLE_DATA(ADDR) __extension__ ({ TBIRES __p; \
+ __p.Val = TBIXCACHE_RL((int)(ADDR) & (-1<<6)); \
+ __p; })
+#endif
+#endif
+
+/* Combine a physical base address, and a linear address
+ * Internal use only
+ */
+#define _TBIMTABLE_LIN2PHYS(PHYS, LIN, LMASK) (void*)(((int)(PHYS)&0xFFFFF000)\
+ +((int)(LIN)&(LMASK)))
+
+/* Convert a linear to a physical address */
+#define TBIMTABLE_LIN2PHYS(LEAFDATA, ADDR) \
+ (((LEAFDATA) & CRLINPHY0_VAL_BIT) \
+ ? _TBIMTABLE_LIN2PHYS(LEAFDATA, ADDR, 0x00000FFF) \
+ : 0)
+
+/* Debug support - using external debugger or host */
+void __TBIDumpSegListEntries( void );
+void __TBILogF( const char *pFmt, ... );
+void __TBIAssert( const char *pFile, int LineNum, const char *pExp );
+void __TBICont( const char *pMsg, ... ); /* TBIAssert -> 'wait for continue' */
+
+/* Array of signal name data for debug messages */
+extern const char __TBISigNames[];
+#endif /* ifndef __ASSEMBLY__ */
+
+
+
+/* Scale of sub-strings in the __TBISigNames string list */
+#define TBI_SIGNAME_SCALE 4
+#define TBI_SIGNAME_SCALE_S 2
+
+#define TBI_1_3
+
+#ifdef TBI_1_3
+
+#ifndef __ASSEMBLY__
+#define TBIXCACHE_RD(ADDR) __extension__ ({\
+ void * __Addr = (void *)(ADDR); \
+ int __Data; \
+ __asm__ volatile ( "CACHERD\t%0,[%1+#0]" : \
+ "=r" (__Data) : "r" (__Addr) ); \
+ __Data; })
+
+#define TBIXCACHE_RL(ADDR) __extension__ ({\
+ void * __Addr = (void *)(ADDR); \
+ long long __Data; \
+ __asm__ volatile ( "CACHERL\t%0,%t0,[%1+#0]" : \
+ "=d" (__Data) : "r" (__Addr) ); \
+ __Data; })
+
+#define TBIXCACHE_WD(ADDR, DATA) do {\
+ void * __Addr = (void *)(ADDR); \
+ int __Data = DATA; \
+ __asm__ volatile ( "CACHEWD\t[%0+#0],%1" : \
+ : "r" (__Addr), "r" (__Data) ); } while(0)
+
+#define TBIXCACHE_WL(ADDR, DATA) do {\
+ void * __Addr = (void *)(ADDR); \
+ long long __Data = DATA; \
+ __asm__ volatile ( "CACHEWL\t[%0+#0],%1,%t1" : \
+ : "r" (__Addr), "r" (__Data) ); } while(0)
+
+#ifdef TBI_4_0
+
+#define TBICACHE_FLUSH_L1D_L2(ADDR) \
+ TBIXCACHE_WD(ADDR, CACHEW_FLUSH_L1D_L2)
+#define TBICACHE_WRITEBACK_L1D_L2(ADDR) \
+ TBIXCACHE_WD(ADDR, CACHEW_WRITEBACK_L1D_L2)
+#define TBICACHE_INVALIDATE_L1D(ADDR) \
+ TBIXCACHE_WD(ADDR, CACHEW_INVALIDATE_L1D)
+#define TBICACHE_INVALIDATE_L1D_L2(ADDR) \
+ TBIXCACHE_WD(ADDR, CACHEW_INVALIDATE_L1D_L2)
+#define TBICACHE_INVALIDATE_L1DTLB(ADDR) \
+ TBIXCACHE_WD(ADDR, CACHEW_INVALIDATE_L1DTLB)
+#define TBICACHE_INVALIDATE_L1I(ADDR) \
+ TBIXCACHE_WD(ADDR, CACHEW_INVALIDATE_L1I)
+#define TBICACHE_INVALIDATE_L1ITLB(ADDR) \
+ TBIXCACHE_WD(ADDR, CACHEW_INVALIDATE_L1ITLB)
+
+#endif /* TBI_4_0 */
+#endif /* ifndef __ASSEMBLY__ */
+
+/*
+ * Calculate linear PC value from real PC and Minim mode control, the LSB of
+ * the result returned indicates if address compression has occured.
+ */
+#ifndef __ASSEMBLY__
+#define METAG_LINPC( PCVal ) (\
+ ( (TBI_GETREG(TXPRIVEXT) & TXPRIVEXT_MINIMON_BIT) != 0 ) ? ( \
+ ( ((PCVal) & 0x00900000) == 0x00900000 ) ? \
+ (((PCVal) & 0xFFE00000) + (((PCVal) & 0x001FFFFC)>>1) + 1) : \
+ ( ((PCVal) & 0x00800000) == 0x00000000 ) ? \
+ (((PCVal) & 0xFF800000) + (((PCVal) & 0x007FFFFC)>>1) + 1) : \
+ (PCVal) ) \
+ : (PCVal) )
+#define METAG_LINPC_X2BIT 0x00000001 /* Make (Size>>1) if compressed */
+
+/* Convert an arbitrary Linear address into a valid Minim PC or return 0 */
+#define METAG_PCMINIM( LinVal ) (\
+ (((LinVal) & 0x00980000) == 0x00880000) ? \
+ (((LinVal) & 0xFFE00000) + (((LinVal) & 0x000FFFFE)<<1)) : \
+ (((LinVal) & 0x00C00000) == 0x00000000) ? \
+ (((LinVal) & 0xFF800000) + (((LinVal) & 0x003FFFFE)<<1)) : 0 )
+
+/* Reverse a METAG_LINPC conversion step to return the original PCVal */
+#define METAG_PCLIN( LinVal ) ( 0xFFFFFFFC & (\
+ ( (LinVal & METAG_LINPC_X2BIT) != 0 ) ? METAG_PCMINIM( LinVal ) : \
+ (LinVal) ))
+
+/*
+ * Flush the MMCU Table cache privately for each thread. On cores that do not
+ * support per-thread flushing it will flush all threads mapping data.
+ */
+#define TBIMCACHE_TFLUSH(Thread) do {\
+ ((volatile int *)( LINSYSCFLUSH_TxMMCU_BASE + \
+ (LINSYSCFLUSH_TxMMCU_STRIDE*(Thread)) ))[0] = 0; \
+ } while(0)
+
+/*
+ * To flush a single linear-matched cache line from the code cache. In
+ * cases where Minim is possible the METAC_LINPC operation must be used
+ * to pre-process the address being flushed.
+ */
+#define TBIICACHE_FLUSH( pAddr ) TBIXCACHE_WD (pAddr, CACHEW_ICACHE_BIT)
+
+/* To flush a single linear-matched mapping from code/data MMU table cache */
+#define TBIMCACHE_AFLUSH( pAddr, SegType ) \
+ TBIXCACHE_WD(pAddr, CACHEW_TLBFLUSH_BIT + ( \
+ ((SegType) == TBID_SEGTYPE_TEXT) ? CACHEW_ICACHE_BIT : 0 ))
+
+/*
+ * To flush translation data corresponding to a range of addresses without
+ * using TBITCACHE_FLUSH to flush all of this threads translation data. It
+ * is necessary to know what stride (>= 4K) must be used to flush a specific
+ * region.
+ *
+ * For example direct mapped regions use the maximum page size (512K) which may
+ * mean that only one flush is needed to cover the sub-set of the direct
+ * mapped area used since it was setup.
+ *
+ * The function returns the stride on which flushes should be performed.
+ *
+ * If 0 is returned then the region is not subject to MMU caching, if -1 is
+ * returned then this indicates that only TBIMCACHE_TFLUSH can be used to
+ * flush the region concerned rather than TBIMCACHE_AFLUSH which this
+ * function is designed to support.
+ */
+int __TBIMMUCacheStride( const void *pStart, int Bytes );
+
+/*
+ * This function will use the above lower level functions to achieve a MMU
+ * table data flush in an optimal a fashion as possible. On a system that
+ * supports linear address based caching this function will also call the
+ * code or data cache flush functions to maintain address/data coherency.
+ *
+ * SegType should be TBID_SEGTYPE_TEXT if the address range is for code or
+ * any other value such as TBID_SEGTYPE_DATA for data. If an area is
+ * used in both ways then call this function twice; once for each.
+ */
+void __TBIMMUCacheFlush( const void *pStart, int Bytes, int SegType );
+
+/*
+ * Cached Core mode setup and flush functions allow one code and one data
+ * region of the corresponding global or local cache partion size to be
+ * locked into the corresponding cache memory. This prevents normal LRU
+ * logic discarding the code or data and avoids write-thru bandwidth in
+ * data areas. Code mappings are selected by specifying TBID_SEGTYPE_TEXT
+ * for SegType, otherwise data mappings are created.
+ *
+ * Mode supplied should always contain the VALID bit and WINx selection data.
+ * Data areas will be mapped read-only if the WRITE bit is not added.
+ *
+ * The address returned by the Opt function will either be the same as that
+ * passed in (if optimisation cannot be supported) or the base of the new core
+ * cached region in linear address space. The returned address must be passed
+ * into the End function to remove the mapping when required. If a non-core
+ * cached memory address is passed into it the End function has no effect.
+ * Note that the region accessed MUST be flushed from the appropriate cache
+ * before the End function is called to deliver correct operation.
+ */
+void *__TBICoreCacheOpt( const void *pStart, int Bytes, int SegType, int Mode );
+void __TBICoreCacheEnd( const void *pOpt, int Bytes, int SegType );
+
+/*
+ * Optimise physical access channel and flush side effects before releasing
+ * the channel. If pStart is NULL the whole region must be flushed and this is
+ * done automatically by the channel release function if optimisation is
+ * enabled. Flushing the specific region that may have been accessed before
+ * release should optimises this process. On physically cached systems we do
+ * not flush the code/data caches only the MMU table data needs flushing.
+ */
+void __TBIPhysOptim( int Channel, int IMode, int DMode );
+void __TBIPhysFlush( int Channel, const void *pStart, int Bytes );
+#endif
+#endif /* ifdef TBI_1_3 */
+
+#endif /* _ASM_METAG_TBX_H_ */
diff --git a/arch/metag/include/asm/tcm.h b/arch/metag/include/asm/tcm.h
new file mode 100644
index 00000000000..7711c317b1d
--- /dev/null
+++ b/arch/metag/include/asm/tcm.h
@@ -0,0 +1,30 @@
+#ifndef __ASM_TCM_H__
+#define __ASM_TCM_H__
+
+#include <linux/ioport.h>
+#include <linux/list.h>
+
+struct tcm_allocation {
+ struct list_head list;
+ unsigned int tag;
+ unsigned long addr;
+ unsigned long size;
+};
+
+/*
+ * TCM memory region descriptor.
+ */
+struct tcm_region {
+ unsigned int tag;
+ struct resource res;
+};
+
+#define TCM_INVALID_TAG 0xffffffff
+
+unsigned long tcm_alloc(unsigned int tag, size_t len);
+void tcm_free(unsigned int tag, unsigned long addr, size_t len);
+unsigned int tcm_lookup_tag(unsigned long p);
+
+int tcm_add_region(struct tcm_region *reg);
+
+#endif
diff --git a/arch/metag/include/asm/thread_info.h b/arch/metag/include/asm/thread_info.h
new file mode 100644
index 00000000000..0ecd34d8b5f
--- /dev/null
+++ b/arch/metag/include/asm/thread_info.h
@@ -0,0 +1,155 @@
+/* thread_info.h: Meta low-level thread information
+ *
+ * Copyright (C) 2002 David Howells (dhowells@redhat.com)
+ * - Incorporating suggestions made by Linus Torvalds and Dave Miller
+ *
+ * Meta port by Imagination Technologies
+ */
+
+#ifndef _ASM_THREAD_INFO_H
+#define _ASM_THREAD_INFO_H
+
+#include <linux/compiler.h>
+#include <asm/page.h>
+
+#ifndef __ASSEMBLY__
+#include <asm/processor.h>
+#endif
+
+/*
+ * low level task data that entry.S needs immediate access to
+ * - this struct should fit entirely inside of one cache line
+ * - this struct shares the supervisor stack pages
+ * - if the contents of this structure are changed, the assembly constants must
+ * also be changed
+ */
+#ifndef __ASSEMBLY__
+
+/* This must be 8 byte aligned so we can ensure stack alignment. */
+struct thread_info {
+ struct task_struct *task; /* main task structure */
+ struct exec_domain *exec_domain; /* execution domain */
+ unsigned long flags; /* low level flags */
+ unsigned long status; /* thread-synchronous flags */
+ u32 cpu; /* current CPU */
+ int preempt_count; /* 0 => preemptable, <0 => BUG */
+
+ mm_segment_t addr_limit; /* thread address space */
+ struct restart_block restart_block;
+
+ u8 supervisor_stack[0];
+};
+
+#else /* !__ASSEMBLY__ */
+
+#include <generated/asm-offsets.h>
+
+#endif
+
+#define PREEMPT_ACTIVE 0x10000000
+
+#ifdef CONFIG_4KSTACKS
+#define THREAD_SHIFT 12
+#else
+#define THREAD_SHIFT 13
+#endif
+
+#if THREAD_SHIFT >= PAGE_SHIFT
+#define THREAD_SIZE_ORDER (THREAD_SHIFT - PAGE_SHIFT)
+#else
+#define THREAD_SIZE_ORDER 0
+#endif
+
+#define THREAD_SIZE (PAGE_SIZE << THREAD_SIZE_ORDER)
+
+#define STACK_WARN (THREAD_SIZE/8)
+/*
+ * macros/functions for gaining access to the thread information structure
+ */
+#ifndef __ASSEMBLY__
+
+#define INIT_THREAD_INFO(tsk) \
+{ \
+ .task = &tsk, \
+ .exec_domain = &default_exec_domain, \
+ .flags = 0, \
+ .cpu = 0, \
+ .preempt_count = INIT_PREEMPT_COUNT, \
+ .addr_limit = KERNEL_DS, \
+ .restart_block = { \
+ .fn = do_no_restart_syscall, \
+ }, \
+}
+
+#define init_thread_info (init_thread_union.thread_info)
+#define init_stack (init_thread_union.stack)
+
+/* how to get the current stack pointer from C */
+register unsigned long current_stack_pointer asm("A0StP") __used;
+
+/* how to get the thread information struct from C */
+static inline struct thread_info *current_thread_info(void)
+{
+ return (struct thread_info *)(current_stack_pointer &
+ ~(THREAD_SIZE - 1));
+}
+
+#define __HAVE_ARCH_KSTACK_END
+static inline int kstack_end(void *addr)
+{
+ return addr == (void *) (((unsigned long) addr & ~(THREAD_SIZE - 1))
+ + sizeof(struct thread_info));
+}
+
+#endif
+
+/*
+ * thread information flags
+ * - these are process state flags that various assembly files may need to
+ * access
+ * - pending work-to-be-done flags are in LSW
+ * - other flags in MSW
+ */
+#define TIF_SYSCALL_TRACE 0 /* syscall trace active */
+#define TIF_SIGPENDING 1 /* signal pending */
+#define TIF_NEED_RESCHED 2 /* rescheduling necessary */
+#define TIF_SINGLESTEP 3 /* restore singlestep on return to user
+ mode */
+#define TIF_SYSCALL_AUDIT 4 /* syscall auditing active */
+#define TIF_SECCOMP 5 /* secure computing */
+#define TIF_RESTORE_SIGMASK 6 /* restore signal mask in do_signal() */
+#define TIF_NOTIFY_RESUME 7 /* callback before returning to user */
+#define TIF_POLLING_NRFLAG 8 /* true if poll_idle() is polling
+ TIF_NEED_RESCHED */
+#define TIF_MEMDIE 9 /* is terminating due to OOM killer */
+#define TIF_SYSCALL_TRACEPOINT 10 /* syscall tracepoint instrumentation */
+
+
+#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
+#define _TIF_SIGPENDING (1<<TIF_SIGPENDING)
+#define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED)
+#define _TIF_SINGLESTEP (1<<TIF_SINGLESTEP)
+#define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT)
+#define _TIF_SECCOMP (1<<TIF_SECCOMP)
+#define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME)
+#define _TIF_RESTORE_SIGMASK (1<<TIF_RESTORE_SIGMASK)
+#define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
+
+/* work to do in syscall trace */
+#define _TIF_WORK_SYSCALL_MASK (_TIF_SYSCALL_TRACE | _TIF_SINGLESTEP | \
+ _TIF_SYSCALL_AUDIT | _TIF_SECCOMP | \
+ _TIF_SYSCALL_TRACEPOINT)
+
+/* work to do on any return to u-space */
+#define _TIF_ALLWORK_MASK (_TIF_SYSCALL_TRACE | _TIF_SIGPENDING | \
+ _TIF_NEED_RESCHED | _TIF_SYSCALL_AUDIT | \
+ _TIF_SINGLESTEP | _TIF_RESTORE_SIGMASK | \
+ _TIF_NOTIFY_RESUME)
+
+/* work to do on interrupt/exception return */
+#define _TIF_WORK_MASK (_TIF_ALLWORK_MASK & ~(_TIF_SYSCALL_TRACE | \
+ _TIF_SYSCALL_AUDIT | _TIF_SINGLESTEP))
+
+#define tsk_is_polling(t) test_tsk_thread_flag(t, TIF_POLLING_NRFLAG)
+
+#endif /* _ASM_THREAD_INFO_H */
diff --git a/arch/metag/include/asm/tlb.h b/arch/metag/include/asm/tlb.h
new file mode 100644
index 00000000000..048282f1cc1
--- /dev/null
+++ b/arch/metag/include/asm/tlb.h
@@ -0,0 +1,36 @@
+#ifndef __ASM_METAG_TLB_H
+#define __ASM_METAG_TLB_H
+
+#include <asm/cacheflush.h>
+#include <asm/page.h>
+
+/* Note, read http://lkml.org/lkml/2004/1/15/6 */
+
+#ifdef CONFIG_METAG_META12
+
+#define tlb_start_vma(tlb, vma) \
+ do { \
+ if (!tlb->fullmm) \
+ flush_cache_range(vma, vma->vm_start, vma->vm_end); \
+ } while (0)
+
+#define tlb_end_vma(tlb, vma) \
+ do { \
+ if (!tlb->fullmm) \
+ flush_tlb_range(vma, vma->vm_start, vma->vm_end); \
+ } while (0)
+
+
+#else
+
+#define tlb_start_vma(tlb, vma) do { } while (0)
+#define tlb_end_vma(tlb, vma) do { } while (0)
+
+#endif
+
+#define __tlb_remove_tlb_entry(tlb, pte, addr) do { } while (0)
+#define tlb_flush(tlb) flush_tlb_mm((tlb)->mm)
+
+#include <asm-generic/tlb.h>
+
+#endif
diff --git a/arch/metag/include/asm/tlbflush.h b/arch/metag/include/asm/tlbflush.h
new file mode 100644
index 00000000000..566acf918a6
--- /dev/null
+++ b/arch/metag/include/asm/tlbflush.h
@@ -0,0 +1,77 @@
+#ifndef __ASM_METAG_TLBFLUSH_H
+#define __ASM_METAG_TLBFLUSH_H
+
+#include <linux/io.h>
+#include <linux/sched.h>
+#include <asm/metag_mem.h>
+#include <asm/pgalloc.h>
+
+/*
+ * TLB flushing:
+ *
+ * - flush_tlb() flushes the current mm struct TLBs
+ * - flush_tlb_all() flushes all processes TLBs
+ * - flush_tlb_mm(mm) flushes the specified mm context TLB's
+ * - flush_tlb_page(vma, vmaddr) flushes one page
+ * - flush_tlb_range(mm, start, end) flushes a range of pages
+ * - flush_tlb_kernel_range(start, end) flushes a range of kernel pages
+ * - flush_tlb_pgtables(mm, start, end) flushes a range of page tables
+ *
+ * FIXME: Meta 2 can flush single TLB entries.
+ *
+ */
+
+#if defined(CONFIG_METAG_META21) && !defined(CONFIG_SMP)
+static inline void __flush_tlb(void)
+{
+ /* flush TLB entries for just the current hardware thread */
+ int thread = hard_processor_id();
+ metag_out32(0, (LINSYSCFLUSH_TxMMCU_BASE +
+ LINSYSCFLUSH_TxMMCU_STRIDE * thread));
+}
+#else
+static inline void __flush_tlb(void)
+{
+ /* flush TLB entries for all hardware threads */
+ metag_out32(0, LINSYSCFLUSH_MMCU);
+}
+#endif /* defined(CONFIG_METAG_META21) && !defined(CONFIG_SMP) */
+
+#define flush_tlb() __flush_tlb()
+
+#define flush_tlb_all() __flush_tlb()
+
+#define local_flush_tlb_all() __flush_tlb()
+
+static inline void flush_tlb_mm(struct mm_struct *mm)
+{
+ if (mm == current->active_mm)
+ __flush_tlb();
+}
+
+static inline void flush_tlb_page(struct vm_area_struct *vma,
+ unsigned long addr)
+{
+ flush_tlb_mm(vma->vm_mm);
+}
+
+static inline void flush_tlb_range(struct vm_area_struct *vma,
+ unsigned long start, unsigned long end)
+{
+ flush_tlb_mm(vma->vm_mm);
+}
+
+static inline void flush_tlb_pgtables(struct mm_struct *mm,
+ unsigned long start, unsigned long end)
+{
+ flush_tlb_mm(mm);
+}
+
+static inline void flush_tlb_kernel_range(unsigned long start,
+ unsigned long end)
+{
+ flush_tlb_all();
+}
+
+#endif /* __ASM_METAG_TLBFLUSH_H */
+
diff --git a/arch/metag/include/asm/topology.h b/arch/metag/include/asm/topology.h
new file mode 100644
index 00000000000..23f5118f58d
--- /dev/null
+++ b/arch/metag/include/asm/topology.h
@@ -0,0 +1,53 @@
+#ifndef _ASM_METAG_TOPOLOGY_H
+#define _ASM_METAG_TOPOLOGY_H
+
+#ifdef CONFIG_NUMA
+
+/* sched_domains SD_NODE_INIT for Meta machines */
+#define SD_NODE_INIT (struct sched_domain) { \
+ .parent = NULL, \
+ .child = NULL, \
+ .groups = NULL, \
+ .min_interval = 8, \
+ .max_interval = 32, \
+ .busy_factor = 32, \
+ .imbalance_pct = 125, \
+ .cache_nice_tries = 2, \
+ .busy_idx = 3, \
+ .idle_idx = 2, \
+ .newidle_idx = 0, \
+ .wake_idx = 0, \
+ .forkexec_idx = 0, \
+ .flags = SD_LOAD_BALANCE \
+ | SD_BALANCE_FORK \
+ | SD_BALANCE_EXEC \
+ | SD_BALANCE_NEWIDLE \
+ | SD_SERIALIZE, \
+ .last_balance = jiffies, \
+ .balance_interval = 1, \
+ .nr_balance_failed = 0, \
+}
+
+#define cpu_to_node(cpu) ((void)(cpu), 0)
+#define parent_node(node) ((void)(node), 0)
+
+#define cpumask_of_node(node) ((void)node, cpu_online_mask)
+
+#define pcibus_to_node(bus) ((void)(bus), -1)
+#define cpumask_of_pcibus(bus) (pcibus_to_node(bus) == -1 ? \
+ cpu_all_mask : \
+ cpumask_of_node(pcibus_to_node(bus)))
+
+#endif
+
+#define mc_capable() (1)
+
+const struct cpumask *cpu_coregroup_mask(unsigned int cpu);
+
+extern cpumask_t cpu_core_map[NR_CPUS];
+
+#define topology_core_cpumask(cpu) (&cpu_core_map[cpu])
+
+#include <asm-generic/topology.h>
+
+#endif /* _ASM_METAG_TOPOLOGY_H */
diff --git a/arch/metag/include/asm/traps.h b/arch/metag/include/asm/traps.h
new file mode 100644
index 00000000000..ac808740bd8
--- /dev/null
+++ b/arch/metag/include/asm/traps.h
@@ -0,0 +1,48 @@
+/*
+ * Copyright (C) 2005,2008 Imagination Technologies
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file COPYING in the main directory of this archive
+ * for more details.
+ */
+
+#ifndef _METAG_TBIVECTORS_H
+#define _METAG_TBIVECTORS_H
+
+#ifndef __ASSEMBLY__
+
+#include <asm/tbx.h>
+
+typedef TBIRES (*kick_irq_func_t)(TBIRES, int, int, int, PTBI, int *);
+
+extern TBIRES kick_handler(TBIRES, int, int, int, PTBI);
+struct kick_irq_handler {
+ struct list_head list;
+ kick_irq_func_t func;
+};
+
+extern void kick_register_func(struct kick_irq_handler *);
+extern void kick_unregister_func(struct kick_irq_handler *);
+
+extern void head_end(TBIRES, unsigned long);
+extern void restart_critical_section(TBIRES State);
+extern TBIRES tail_end_sys(TBIRES, int, int *);
+static inline TBIRES tail_end(TBIRES state)
+{
+ return tail_end_sys(state, -1, NULL);
+}
+
+DECLARE_PER_CPU(PTBI, pTBI);
+extern PTBI pTBI_get(unsigned int);
+
+extern int ret_from_fork(TBIRES arg);
+
+extern int do_page_fault(struct pt_regs *regs, unsigned long address,
+ unsigned int write_access, unsigned int trapno);
+
+extern TBIRES __TBIUnExpXXX(TBIRES State, int SigNum, int Triggers, int Inst,
+ PTBI pTBI);
+
+#endif
+
+#endif /* _METAG_TBIVECTORS_H */
diff --git a/arch/metag/include/asm/uaccess.h b/arch/metag/include/asm/uaccess.h
new file mode 100644
index 00000000000..0748b0a9798
--- /dev/null
+++ b/arch/metag/include/asm/uaccess.h
@@ -0,0 +1,241 @@
+#ifndef __METAG_UACCESS_H
+#define __METAG_UACCESS_H
+
+/*
+ * User space memory access functions
+ */
+#include <linux/sched.h>
+
+#define VERIFY_READ 0
+#define VERIFY_WRITE 1
+
+/*
+ * The fs value determines whether argument validity checking should be
+ * performed or not. If get_fs() == USER_DS, checking is performed, with
+ * get_fs() == KERNEL_DS, checking is bypassed.
+ *
+ * For historical reasons, these macros are grossly misnamed.
+ */
+
+#define MAKE_MM_SEG(s) ((mm_segment_t) { (s) })
+
+#define KERNEL_DS MAKE_MM_SEG(0xFFFFFFFF)
+#define USER_DS MAKE_MM_SEG(PAGE_OFFSET)
+
+#define get_ds() (KERNEL_DS)
+#define get_fs() (current_thread_info()->addr_limit)
+#define set_fs(x) (current_thread_info()->addr_limit = (x))
+
+#define segment_eq(a, b) ((a).seg == (b).seg)
+
+#define __kernel_ok (segment_eq(get_fs(), KERNEL_DS))
+/*
+ * Explicitly allow NULL pointers here. Parts of the kernel such
+ * as readv/writev use access_ok to validate pointers, but want
+ * to allow NULL pointers for various reasons. NULL pointers are
+ * safe to allow through because the first page is not mappable on
+ * Meta.
+ *
+ * We also wish to avoid letting user code access the system area
+ * and the kernel half of the address space.
+ */
+#define __user_bad(addr, size) (((addr) > 0 && (addr) < META_MEMORY_BASE) || \
+ ((addr) > PAGE_OFFSET && \
+ (addr) < LINCORE_BASE))
+
+static inline int __access_ok(unsigned long addr, unsigned long size)
+{
+ return __kernel_ok || !__user_bad(addr, size);
+}
+
+#define access_ok(type, addr, size) __access_ok((unsigned long)(addr), \
+ (unsigned long)(size))
+
+static inline int verify_area(int type, const void *addr, unsigned long size)
+{
+ return access_ok(type, addr, size) ? 0 : -EFAULT;
+}
+
+/*
+ * The exception table consists of pairs of addresses: the first is the
+ * address of an instruction that is allowed to fault, and the second is
+ * the address at which the program should continue. No registers are
+ * modified, so it is entirely up to the continuation code to figure out
+ * what to do.
+ *
+ * All the routines below use bits of fixup code that are out of line
+ * with the main instruction path. This means when everything is well,
+ * we don't even have to jump over them. Further, they do not intrude
+ * on our cache or tlb entries.
+ */
+struct exception_table_entry {
+ unsigned long insn, fixup;
+};
+
+extern int fixup_exception(struct pt_regs *regs);
+
+/*
+ * These are the main single-value transfer routines. They automatically
+ * use the right size if we just have the right pointer type.
+ */
+
+#define put_user(x, ptr) \
+ __put_user_check((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
+#define __put_user(x, ptr) \
+ __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
+
+extern void __put_user_bad(void);
+
+#define __put_user_nocheck(x, ptr, size) \
+({ \
+ long __pu_err; \
+ __put_user_size((x), (ptr), (size), __pu_err); \
+ __pu_err; \
+})
+
+#define __put_user_check(x, ptr, size) \
+({ \
+ long __pu_err = -EFAULT; \
+ __typeof__(*(ptr)) __user *__pu_addr = (ptr); \
+ if (access_ok(VERIFY_WRITE, __pu_addr, size)) \
+ __put_user_size((x), __pu_addr, (size), __pu_err); \
+ __pu_err; \
+})
+
+extern long __put_user_asm_b(unsigned int x, void __user *addr);
+extern long __put_user_asm_w(unsigned int x, void __user *addr);
+extern long __put_user_asm_d(unsigned int x, void __user *addr);
+extern long __put_user_asm_l(unsigned long long x, void __user *addr);
+
+#define __put_user_size(x, ptr, size, retval) \
+do { \
+ retval = 0; \
+ switch (size) { \
+ case 1: \
+ retval = __put_user_asm_b((unsigned int)x, ptr); break; \
+ case 2: \
+ retval = __put_user_asm_w((unsigned int)x, ptr); break; \
+ case 4: \
+ retval = __put_user_asm_d((unsigned int)x, ptr); break; \
+ case 8: \
+ retval = __put_user_asm_l((unsigned long long)x, ptr); break; \
+ default: \
+ __put_user_bad(); \
+ } \
+} while (0)
+
+#define get_user(x, ptr) \
+ __get_user_check((x), (ptr), sizeof(*(ptr)))
+#define __get_user(x, ptr) \
+ __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
+
+extern long __get_user_bad(void);
+
+#define __get_user_nocheck(x, ptr, size) \
+({ \
+ long __gu_err, __gu_val; \
+ __get_user_size(__gu_val, (ptr), (size), __gu_err); \
+ (x) = (__typeof__(*(ptr)))__gu_val; \
+ __gu_err; \
+})
+
+#define __get_user_check(x, ptr, size) \
+({ \
+ long __gu_err = -EFAULT, __gu_val = 0; \
+ const __typeof__(*(ptr)) __user *__gu_addr = (ptr); \
+ if (access_ok(VERIFY_READ, __gu_addr, size)) \
+ __get_user_size(__gu_val, __gu_addr, (size), __gu_err); \
+ (x) = (__typeof__(*(ptr)))__gu_val; \
+ __gu_err; \
+})
+
+extern unsigned char __get_user_asm_b(const void __user *addr, long *err);
+extern unsigned short __get_user_asm_w(const void __user *addr, long *err);
+extern unsigned int __get_user_asm_d(const void __user *addr, long *err);
+
+#define __get_user_size(x, ptr, size, retval) \
+do { \
+ retval = 0; \
+ switch (size) { \
+ case 1: \
+ x = __get_user_asm_b(ptr, &retval); break; \
+ case 2: \
+ x = __get_user_asm_w(ptr, &retval); break; \
+ case 4: \
+ x = __get_user_asm_d(ptr, &retval); break; \
+ default: \
+ (x) = __get_user_bad(); \
+ } \
+} while (0)
+
+/*
+ * Copy a null terminated string from userspace.
+ *
+ * Must return:
+ * -EFAULT for an exception
+ * count if we hit the buffer limit
+ * bytes copied if we hit a null byte
+ * (without the null byte)
+ */
+
+extern long __must_check __strncpy_from_user(char *dst, const char __user *src,
+ long count);
+
+#define strncpy_from_user(dst, src, count) __strncpy_from_user(dst, src, count)
+
+/*
+ * Return the size of a string (including the ending 0)
+ *
+ * Return 0 on exception, a value greater than N if too long
+ */
+extern long __must_check strnlen_user(const char __user *src, long count);
+
+#define strlen_user(str) strnlen_user(str, 32767)
+
+extern unsigned long __must_check __copy_user_zeroing(void *to,
+ const void __user *from,
+ unsigned long n);
+
+static inline unsigned long
+copy_from_user(void *to, const void __user *from, unsigned long n)
+{
+ if (access_ok(VERIFY_READ, from, n))
+ return __copy_user_zeroing(to, from, n);
+ return n;
+}
+
+#define __copy_from_user(to, from, n) __copy_user_zeroing(to, from, n)
+#define __copy_from_user_inatomic __copy_from_user
+
+extern unsigned long __must_check __copy_user(void __user *to,
+ const void *from,
+ unsigned long n);
+
+static inline unsigned long copy_to_user(void __user *to, const void *from,
+ unsigned long n)
+{
+ if (access_ok(VERIFY_WRITE, to, n))
+ return __copy_user(to, from, n);
+ return n;
+}
+
+#define __copy_to_user(to, from, n) __copy_user(to, from, n)
+#define __copy_to_user_inatomic __copy_to_user
+
+/*
+ * Zero Userspace
+ */
+
+extern unsigned long __must_check __do_clear_user(void __user *to,
+ unsigned long n);
+
+static inline unsigned long clear_user(void __user *to, unsigned long n)
+{
+ if (access_ok(VERIFY_WRITE, to, n))
+ return __do_clear_user(to, n);
+ return n;
+}
+
+#define __clear_user(to, n) __do_clear_user(to, n)
+
+#endif /* _METAG_UACCESS_H */
diff --git a/arch/metag/include/asm/unistd.h b/arch/metag/include/asm/unistd.h
new file mode 100644
index 00000000000..32955a18fb3
--- /dev/null
+++ b/arch/metag/include/asm/unistd.h
@@ -0,0 +1,12 @@
+/*
+ * Copyright (C) 2012 Imagination Technologies Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <uapi/asm/unistd.h>
+
+#define __ARCH_WANT_SYS_CLONE
diff --git a/arch/metag/include/asm/user_gateway.h b/arch/metag/include/asm/user_gateway.h
new file mode 100644
index 00000000000..e404c09e3b7
--- /dev/null
+++ b/arch/metag/include/asm/user_gateway.h
@@ -0,0 +1,44 @@
+/*
+ * Copyright (C) 2010 Imagination Technologies
+ */
+
+#ifndef __ASM_METAG_USER_GATEWAY_H
+#define __ASM_METAG_USER_GATEWAY_H
+
+#include <asm/page.h>
+
+/* Page of kernel code accessible to userspace. */
+#define USER_GATEWAY_PAGE 0x6ffff000
+/* Offset of TLS pointer array in gateway page. */
+#define USER_GATEWAY_TLS 0x100
+
+#ifndef __ASSEMBLY__
+
+extern char __user_gateway_start;
+extern char __user_gateway_end;
+
+/* Kernel mapping of the gateway page. */
+extern void *gateway_page;
+
+static inline void set_gateway_tls(void __user *tls_ptr)
+{
+ void **gateway_tls = (void **)(gateway_page + USER_GATEWAY_TLS +
+ hard_processor_id() * 4);
+
+ *gateway_tls = (__force void *)tls_ptr;
+#ifdef CONFIG_METAG_META12
+ /* Avoid cache aliases on virtually tagged cache. */
+ __builtin_dcache_flush((void *)USER_GATEWAY_PAGE + USER_GATEWAY_TLS +
+ hard_processor_id() * sizeof(void *));
+#endif
+}
+
+extern int __kuser_get_tls(void);
+extern char *__kuser_get_tls_end[];
+
+extern int __kuser_cmpxchg(int, int, unsigned long *);
+extern char *__kuser_cmpxchg_end[];
+
+#endif
+
+#endif
diff --git a/arch/metag/include/uapi/asm/Kbuild b/arch/metag/include/uapi/asm/Kbuild
new file mode 100644
index 00000000000..876c71f866d
--- /dev/null
+++ b/arch/metag/include/uapi/asm/Kbuild
@@ -0,0 +1,13 @@
+# UAPI Header export list
+include include/uapi/asm-generic/Kbuild.asm
+
+header-y += byteorder.h
+header-y += ptrace.h
+header-y += resource.h
+header-y += sigcontext.h
+header-y += siginfo.h
+header-y += swab.h
+header-y += unistd.h
+
+generic-y += mman.h
+generic-y += setup.h
diff --git a/arch/metag/include/uapi/asm/byteorder.h b/arch/metag/include/uapi/asm/byteorder.h
new file mode 100644
index 00000000000..9558416d578
--- /dev/null
+++ b/arch/metag/include/uapi/asm/byteorder.h
@@ -0,0 +1 @@
+#include <linux/byteorder/little_endian.h>
diff --git a/arch/metag/include/uapi/asm/ptrace.h b/arch/metag/include/uapi/asm/ptrace.h
new file mode 100644
index 00000000000..45d97809d33
--- /dev/null
+++ b/arch/metag/include/uapi/asm/ptrace.h
@@ -0,0 +1,113 @@
+#ifndef _UAPI_METAG_PTRACE_H
+#define _UAPI_METAG_PTRACE_H
+
+#ifndef __ASSEMBLY__
+
+/*
+ * These are the layouts of the regsets returned by the GETREGSET ptrace call
+ */
+
+/* user_gp_regs::status */
+
+/* CBMarker bit (indicates catch state / catch replay) */
+#define USER_GP_REGS_STATUS_CATCH_BIT (1 << 22)
+#define USER_GP_REGS_STATUS_CATCH_S 22
+/* LSM_STEP field (load/store multiple step) */
+#define USER_GP_REGS_STATUS_LSM_STEP_BITS (0x7 << 8)
+#define USER_GP_REGS_STATUS_LSM_STEP_S 8
+/* SCC bit (indicates split 16x16 condition flags) */
+#define USER_GP_REGS_STATUS_SCC_BIT (1 << 4)
+#define USER_GP_REGS_STATUS_SCC_S 4
+
+/* normal condition flags */
+/* CF_Z bit (Zero flag) */
+#define USER_GP_REGS_STATUS_CF_Z_BIT (1 << 3)
+#define USER_GP_REGS_STATUS_CF_Z_S 3
+/* CF_N bit (Negative flag) */
+#define USER_GP_REGS_STATUS_CF_N_BIT (1 << 2)
+#define USER_GP_REGS_STATUS_CF_N_S 2
+/* CF_V bit (oVerflow flag) */
+#define USER_GP_REGS_STATUS_CF_V_BIT (1 << 1)
+#define USER_GP_REGS_STATUS_CF_V_S 1
+/* CF_C bit (Carry flag) */
+#define USER_GP_REGS_STATUS_CF_C_BIT (1 << 0)
+#define USER_GP_REGS_STATUS_CF_C_S 0
+
+/* split 16x16 condition flags */
+/* SCF_LZ bit (Low Zero flag) */
+#define USER_GP_REGS_STATUS_SCF_LZ_BIT (1 << 3)
+#define USER_GP_REGS_STATUS_SCF_LZ_S 3
+/* SCF_HZ bit (High Zero flag) */
+#define USER_GP_REGS_STATUS_SCF_HZ_BIT (1 << 2)
+#define USER_GP_REGS_STATUS_SCF_HZ_S 2
+/* SCF_HC bit (High Carry flag) */
+#define USER_GP_REGS_STATUS_SCF_HC_BIT (1 << 1)
+#define USER_GP_REGS_STATUS_SCF_HC_S 1
+/* SCF_LC bit (Low Carry flag) */
+#define USER_GP_REGS_STATUS_SCF_LC_BIT (1 << 0)
+#define USER_GP_REGS_STATUS_SCF_LC_S 0
+
+/**
+ * struct user_gp_regs - User general purpose registers
+ * @dx: GP data unit regs (dx[reg][unit] = D{unit:0-1}.{reg:0-7})
+ * @ax: GP address unit regs (ax[reg][unit] = A{unit:0-1}.{reg:0-3})
+ * @pc: PC register
+ * @status: TXSTATUS register (condition flags, LSM_STEP etc)
+ * @rpt: TXRPT registers (branch repeat counter)
+ * @bpobits: TXBPOBITS register ("branch prediction other" bits)
+ * @mode: TXMODE register
+ * @_pad1: Reserved padding to make sizeof obviously 64bit aligned
+ *
+ * This is the user-visible general purpose register state structure.
+ *
+ * It can be accessed through PTRACE_GETREGSET with NT_PRSTATUS.
+ *
+ * It is also used in the signal context.
+ */
+struct user_gp_regs {
+ unsigned long dx[8][2];
+ unsigned long ax[4][2];
+ unsigned long pc;
+ unsigned long status;
+ unsigned long rpt;
+ unsigned long bpobits;
+ unsigned long mode;
+ unsigned long _pad1;
+};
+
+/**
+ * struct user_cb_regs - User catch buffer registers
+ * @flags: TXCATCH0 register (fault flags)
+ * @addr: TXCATCH1 register (fault address)
+ * @data: TXCATCH2 and TXCATCH3 registers (low and high data word)
+ *
+ * This is the user-visible catch buffer register state structure containing
+ * information about a failed memory access, and allowing the access to be
+ * modified and replayed.
+ *
+ * It can be accessed through PTRACE_GETREGSET with NT_METAG_CBUF.
+ */
+struct user_cb_regs {
+ unsigned long flags;
+ unsigned long addr;
+ unsigned long long data;
+};
+
+/**
+ * struct user_rp_state - User read pipeline state
+ * @entries: Read pipeline entries
+ * @mask: Mask of valid pipeline entries (RPMask from TXDIVTIME register)
+ *
+ * This is the user-visible read pipeline state structure containing the entries
+ * currently in the read pipeline and the mask of valid entries.
+ *
+ * It can be accessed through PTRACE_GETREGSET with NT_METAG_RPIPE.
+ */
+struct user_rp_state {
+ unsigned long long entries[6];
+ unsigned long mask;
+};
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* _UAPI_METAG_PTRACE_H */
diff --git a/arch/metag/include/uapi/asm/resource.h b/arch/metag/include/uapi/asm/resource.h
new file mode 100644
index 00000000000..526d23cc305
--- /dev/null
+++ b/arch/metag/include/uapi/asm/resource.h
@@ -0,0 +1,7 @@
+#ifndef _UAPI_METAG_RESOURCE_H
+#define _UAPI_METAG_RESOURCE_H
+
+#define _STK_LIM_MAX (1 << 28)
+#include <asm-generic/resource.h>
+
+#endif /* _UAPI_METAG_RESOURCE_H */
diff --git a/arch/metag/include/uapi/asm/sigcontext.h b/arch/metag/include/uapi/asm/sigcontext.h
new file mode 100644
index 00000000000..ef79a910c1c
--- /dev/null
+++ b/arch/metag/include/uapi/asm/sigcontext.h
@@ -0,0 +1,31 @@
+#ifndef _ASM_METAG_SIGCONTEXT_H
+#define _ASM_METAG_SIGCONTEXT_H
+
+#include <asm/ptrace.h>
+
+/*
+ * In a sigcontext structure we need to store the active state of the
+ * user process so that it does not get trashed when we call the signal
+ * handler. That not really the same as a user context that we are
+ * going to store on syscall etc.
+ */
+struct sigcontext {
+ struct user_gp_regs regs; /* needs to be first */
+
+ /*
+ * Catch registers describing a memory fault.
+ * If USER_GP_REGS_STATUS_CATCH_BIT is set in regs.status then catch
+ * buffers have been saved and will be replayed on sigreturn.
+ * Clear that bit to discard the catch state instead of replaying it.
+ */
+ struct user_cb_regs cb;
+
+ /*
+ * Read pipeline state. This will get restored on sigreturn.
+ */
+ struct user_rp_state rp;
+
+ unsigned long oldmask;
+};
+
+#endif
diff --git a/arch/metag/include/uapi/asm/siginfo.h b/arch/metag/include/uapi/asm/siginfo.h
new file mode 100644
index 00000000000..b2e0c8b62ae
--- /dev/null
+++ b/arch/metag/include/uapi/asm/siginfo.h
@@ -0,0 +1,8 @@
+#ifndef _METAG_SIGINFO_H
+#define _METAG_SIGINFO_H
+
+#define __ARCH_SI_TRAPNO
+
+#include <asm-generic/siginfo.h>
+
+#endif
diff --git a/arch/metag/include/uapi/asm/swab.h b/arch/metag/include/uapi/asm/swab.h
new file mode 100644
index 00000000000..1076b3a6387
--- /dev/null
+++ b/arch/metag/include/uapi/asm/swab.h
@@ -0,0 +1,26 @@
+#ifndef __ASM_METAG_SWAB_H
+#define __ASM_METAG_SWAB_H
+
+#include <linux/compiler.h>
+#include <linux/types.h>
+#include <asm-generic/swab.h>
+
+static inline __attribute_const__ __u16 __arch_swab16(__u16 x)
+{
+ return __builtin_metag_bswaps(x);
+}
+#define __arch_swab16 __arch_swab16
+
+static inline __attribute_const__ __u32 __arch_swab32(__u32 x)
+{
+ return __builtin_metag_bswap(x);
+}
+#define __arch_swab32 __arch_swab32
+
+static inline __attribute_const__ __u64 __arch_swab64(__u64 x)
+{
+ return __builtin_metag_bswapll(x);
+}
+#define __arch_swab64 __arch_swab64
+
+#endif /* __ASM_METAG_SWAB_H */
diff --git a/arch/metag/include/uapi/asm/unistd.h b/arch/metag/include/uapi/asm/unistd.h
new file mode 100644
index 00000000000..b80b8e899d2
--- /dev/null
+++ b/arch/metag/include/uapi/asm/unistd.h
@@ -0,0 +1,21 @@
+/*
+ * Copyright (C) 2012 Imagination Technologies Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+/* Use the standard ABI for syscalls. */
+#include <asm-generic/unistd.h>
+
+/* metag-specific syscalls. */
+#define __NR_metag_setglobalbit (__NR_arch_specific_syscall + 1)
+__SYSCALL(__NR_metag_setglobalbit, sys_metag_setglobalbit)
+#define __NR_metag_set_fpu_flags (__NR_arch_specific_syscall + 2)
+__SYSCALL(__NR_metag_set_fpu_flags, sys_metag_set_fpu_flags)
+#define __NR_metag_set_tls (__NR_arch_specific_syscall + 3)
+__SYSCALL(__NR_metag_set_tls, sys_metag_set_tls)
+#define __NR_metag_get_tls (__NR_arch_specific_syscall + 4)
+__SYSCALL(__NR_metag_get_tls, sys_metag_get_tls)