aboutsummaryrefslogtreecommitdiff
path: root/arch/m68k/platform/68000/entry.S
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2012-12-16 17:42:21 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2012-12-16 17:42:21 -0800
commitaed606e3bc1f10753254db308d3fd8c053c41328 (patch)
treebd7c3860417e83181ec43c3c7c224858f2652fa0 /arch/m68k/platform/68000/entry.S
parent123df7ae0d0ed90d01ef4cb7316fa0b7ef0ec8a8 (diff)
parent280ef31a00073b4092bb47814fcb0a1f1f27b2bd (diff)
Merge branch 'for-next' of git://git.kernel.org/pub/scm/linux/kernel/git/gerg/m68knommu
Pull m68knommu updates from Greg Ungerer: "This one has a major restructuring of the non-mmu 68000 support. It merges all the related SoC types that use the original 68000 cpu core internally so they can share the same core code. It also allows for supporting the original stand alone 68000 cpu in its own right. There is also a generalization of the clock support of the ColdFire parts, some merging of common ColdFire code, and a couple of bug fixes as well." * 'for-next' of git://git.kernel.org/pub/scm/linux/kernel/git/gerg/m68knommu: m68knommu: modify clock code so it can be used by all ColdFire CPU types m68knommu: add clock definitions for 54xx ColdFire CPU types m68knommu: add clock definitions for 5407 ColdFire CPU types m68knommu: add clock definitions for 5307 ColdFire CPU types m68knommu: add clock definitions for 528x ColdFire CPU types m68knommu: add clock definitions for 527x ColdFire CPU types m68knommu: add clock definitions for 5272 ColdFire CPU types m68knommu: add clock definitions for 525x ColdFire CPU types m68knommu: add clock definitions for 5249 ColdFire CPU types m68knommu: add clock definitions for 523x ColdFire CPU types m68knommu: add clock definitions for 5206 ColdFire CPU types m68knommu: add clock creation support macro for other ColdFire CPUs m68k: fix unused variable warning in mempcy.c m68knommu: make non-MMU page_to_virt() return a void * m68knommu: merge ColdFire 5249 and 525x definitions m68knommu: disable MC68000 cpu target when MMU is selected m68knommu: allow for configuration of true 68000 based systems m68knommu: platform code merge for 68000 core cpus
Diffstat (limited to 'arch/m68k/platform/68000/entry.S')
-rw-r--r--arch/m68k/platform/68000/entry.S261
1 files changed, 261 insertions, 0 deletions
diff --git a/arch/m68k/platform/68000/entry.S b/arch/m68k/platform/68000/entry.S
new file mode 100644
index 00000000000..7f91c2fde50
--- /dev/null
+++ b/arch/m68k/platform/68000/entry.S
@@ -0,0 +1,261 @@
+/*
+ * linux/arch/m68knommu/platform/68328/entry.S
+ *
+ * Copyright (C) 1991, 1992 Linus Torvalds
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file README.legal in the main directory of this archive
+ * for more details.
+ *
+ * Linux/m68k support by Hamish Macdonald
+ */
+
+#include <linux/linkage.h>
+#include <asm/thread_info.h>
+#include <asm/unistd.h>
+#include <asm/errno.h>
+#include <asm/setup.h>
+#include <asm/segment.h>
+#include <asm/traps.h>
+#include <asm/asm-offsets.h>
+#include <asm/entry.h>
+
+.text
+
+.globl system_call
+.globl resume
+.globl ret_from_exception
+.globl ret_from_signal
+.globl sys_call_table
+.globl ret_from_interrupt
+.globl bad_interrupt
+.globl inthandler1
+.globl inthandler2
+.globl inthandler3
+.globl inthandler4
+.globl inthandler5
+.globl inthandler6
+.globl inthandler7
+
+badsys:
+ movel #-ENOSYS,%sp@(PT_OFF_D0)
+ jra ret_from_exception
+
+do_trace:
+ movel #-ENOSYS,%sp@(PT_OFF_D0) /* needed for strace*/
+ subql #4,%sp
+ SAVE_SWITCH_STACK
+ jbsr syscall_trace_enter
+ RESTORE_SWITCH_STACK
+ addql #4,%sp
+ movel %sp@(PT_OFF_ORIG_D0),%d1
+ movel #-ENOSYS,%d0
+ cmpl #NR_syscalls,%d1
+ jcc 1f
+ lsl #2,%d1
+ lea sys_call_table, %a0
+ jbsr %a0@(%d1)
+
+1: movel %d0,%sp@(PT_OFF_D0) /* save the return value */
+ subql #4,%sp /* dummy return address */
+ SAVE_SWITCH_STACK
+ jbsr syscall_trace_leave
+
+ret_from_signal:
+ RESTORE_SWITCH_STACK
+ addql #4,%sp
+ jra ret_from_exception
+
+ENTRY(system_call)
+ SAVE_ALL_SYS
+
+ /* save top of frame*/
+ pea %sp@
+ jbsr set_esp0
+ addql #4,%sp
+
+ movel %sp@(PT_OFF_ORIG_D0),%d0
+
+ movel %sp,%d1 /* get thread_info pointer */
+ andl #-THREAD_SIZE,%d1
+ movel %d1,%a2
+ btst #(TIF_SYSCALL_TRACE%8),%a2@(TINFO_FLAGS+(31-TIF_SYSCALL_TRACE)/8)
+ jne do_trace
+ cmpl #NR_syscalls,%d0
+ jcc badsys
+ lsl #2,%d0
+ lea sys_call_table,%a0
+ movel %a0@(%d0), %a0
+ jbsr %a0@
+ movel %d0,%sp@(PT_OFF_D0) /* save the return value*/
+
+ret_from_exception:
+ btst #5,%sp@(PT_OFF_SR) /* check if returning to kernel*/
+ jeq Luser_return /* if so, skip resched, signals*/
+
+Lkernel_return:
+ RESTORE_ALL
+
+Luser_return:
+ /* only allow interrupts when we are really the last one on the*/
+ /* kernel stack, otherwise stack overflow can occur during*/
+ /* heavy interrupt load*/
+ andw #ALLOWINT,%sr
+
+ movel %sp,%d1 /* get thread_info pointer */
+ andl #-THREAD_SIZE,%d1
+ movel %d1,%a2
+1:
+ move %a2@(TINFO_FLAGS),%d1 /* thread_info->flags */
+ jne Lwork_to_do
+ RESTORE_ALL
+
+Lwork_to_do:
+ movel %a2@(TINFO_FLAGS),%d1 /* thread_info->flags */
+ btst #TIF_NEED_RESCHED,%d1
+ jne reschedule
+
+Lsignal_return:
+ subql #4,%sp /* dummy return address*/
+ SAVE_SWITCH_STACK
+ pea %sp@(SWITCH_STACK_SIZE)
+ bsrw do_notify_resume
+ addql #4,%sp
+ RESTORE_SWITCH_STACK
+ addql #4,%sp
+ jra 1b
+
+/*
+ * This is the main interrupt handler, responsible for calling process_int()
+ */
+inthandler1:
+ SAVE_ALL_INT
+ movew %sp@(PT_OFF_FORMATVEC), %d0
+ and #0x3ff, %d0
+
+ movel %sp,%sp@-
+ movel #65,%sp@- /* put vector # on stack*/
+ jbsr process_int /* process the IRQ*/
+3: addql #8,%sp /* pop parameters off stack*/
+ bra ret_from_interrupt
+
+inthandler2:
+ SAVE_ALL_INT
+ movew %sp@(PT_OFF_FORMATVEC), %d0
+ and #0x3ff, %d0
+
+ movel %sp,%sp@-
+ movel #66,%sp@- /* put vector # on stack*/
+ jbsr process_int /* process the IRQ*/
+3: addql #8,%sp /* pop parameters off stack*/
+ bra ret_from_interrupt
+
+inthandler3:
+ SAVE_ALL_INT
+ movew %sp@(PT_OFF_FORMATVEC), %d0
+ and #0x3ff, %d0
+
+ movel %sp,%sp@-
+ movel #67,%sp@- /* put vector # on stack*/
+ jbsr process_int /* process the IRQ*/
+3: addql #8,%sp /* pop parameters off stack*/
+ bra ret_from_interrupt
+
+inthandler4:
+ SAVE_ALL_INT
+ movew %sp@(PT_OFF_FORMATVEC), %d0
+ and #0x3ff, %d0
+
+ movel %sp,%sp@-
+ movel #68,%sp@- /* put vector # on stack*/
+ jbsr process_int /* process the IRQ*/
+3: addql #8,%sp /* pop parameters off stack*/
+ bra ret_from_interrupt
+
+inthandler5:
+ SAVE_ALL_INT
+ movew %sp@(PT_OFF_FORMATVEC), %d0
+ and #0x3ff, %d0
+
+ movel %sp,%sp@-
+ movel #69,%sp@- /* put vector # on stack*/
+ jbsr process_int /* process the IRQ*/
+3: addql #8,%sp /* pop parameters off stack*/
+ bra ret_from_interrupt
+
+inthandler6:
+ SAVE_ALL_INT
+ movew %sp@(PT_OFF_FORMATVEC), %d0
+ and #0x3ff, %d0
+
+ movel %sp,%sp@-
+ movel #70,%sp@- /* put vector # on stack*/
+ jbsr process_int /* process the IRQ*/
+3: addql #8,%sp /* pop parameters off stack*/
+ bra ret_from_interrupt
+
+inthandler7:
+ SAVE_ALL_INT
+ movew %sp@(PT_OFF_FORMATVEC), %d0
+ and #0x3ff, %d0
+
+ movel %sp,%sp@-
+ movel #71,%sp@- /* put vector # on stack*/
+ jbsr process_int /* process the IRQ*/
+3: addql #8,%sp /* pop parameters off stack*/
+ bra ret_from_interrupt
+
+inthandler:
+ SAVE_ALL_INT
+ movew %sp@(PT_OFF_FORMATVEC), %d0
+ and #0x3ff, %d0
+
+ movel %sp,%sp@-
+ movel %d0,%sp@- /* put vector # on stack*/
+ jbsr process_int /* process the IRQ*/
+3: addql #8,%sp /* pop parameters off stack*/
+ bra ret_from_interrupt
+
+ret_from_interrupt:
+ jeq 1f
+2:
+ RESTORE_ALL
+1:
+ moveb %sp@(PT_OFF_SR), %d0
+ and #7, %d0
+ jhi 2b
+
+ /* check if we need to do software interrupts */
+ jeq ret_from_exception
+
+ pea ret_from_exception
+ jra do_softirq
+
+
+/*
+ * Handler for uninitialized and spurious interrupts.
+ */
+ENTRY(bad_interrupt)
+ addql #1,irq_err_count
+ rte
+
+/*
+ * Beware - when entering resume, prev (the current task) is
+ * in a0, next (the new task) is in a1, so don't change these
+ * registers until their contents are no longer needed.
+ */
+ENTRY(resume)
+ movel %a0,%d1 /* save prev thread in d1 */
+ movew %sr,%a0@(TASK_THREAD+THREAD_SR) /* save sr */
+ SAVE_SWITCH_STACK
+ movel %sp,%a0@(TASK_THREAD+THREAD_KSP) /* save kernel stack */
+ movel %usp,%a3 /* save usp */
+ movel %a3,%a0@(TASK_THREAD+THREAD_USP)
+
+ movel %a1@(TASK_THREAD+THREAD_USP),%a3 /* restore user stack */
+ movel %a3,%usp
+ movel %a1@(TASK_THREAD+THREAD_KSP),%sp /* restore new thread stack */
+ RESTORE_SWITCH_STACK
+ movew %a1@(TASK_THREAD+THREAD_SR),%sr /* restore thread status reg */
+ rts
+