summaryrefslogtreecommitdiff
path: root/arch
diff options
context:
space:
mode:
authorJean-Paul Etienne <fractalclone@gmail.com>2017-01-11 00:24:30 +0100
committerAndrew Boie <andrew.p.boie@intel.com>2017-01-13 19:52:23 +0000
commitcd83e85edc5d741f6b52c6b5995303c30bda443a (patch)
treee321b5f31613f123017f945c8ae3f360ad5f865e /arch
parentba776a1fd15c24dc1ba014c3abe19e07346e663f (diff)
arch: added support for the riscv32 architecture
RISC-V is an open-source instruction set architecture. Added support for the 32bit version of RISC-V to Zephyr. 1) exceptions/interrupts/faults are handled at the architecture level via the __irq_wrapper handler. Context saving/restoring of registers can be handled at both architecture and SOC levels. If SOC-specific registers need to be saved, SOC level needs to provide __soc_save_context and __soc_restore_context functions that shall be accounted by the architecture level, when corresponding config variable RISCV_SOC_CONTEXT_SAVE is set. 2) As RISC-V architecture does not provide a clear ISA specification about interrupt handling, each RISC-V SOC handles it in its own way. Hence, at the architecture level, the __irq_wrapper handler expects the following functions to be provided by the SOC level: __soc_is_irq: to check if the exception is the result of an interrupt or not. __soc_handle_irq: handle pending IRQ at SOC level (ex: clear pending IRQ in SOC-specific IRQ register) 3) Thread/task scheduling, as well as IRQ offloading are handled via the RISC-V system call ("ecall"), which is also handled via the __irq_wrapper handler. The _Swap asm function just calls "ecall" to generate an exception. 4) As there is no conventional way of handling CPU power save in RISC-V, the default nano_cpu_idle and nano_cpu_atomic_idle functions just unlock interrupts and return to the caller, without issuing any CPU power saving instruction. Nonetheless, to allow SOC-level to implement proper CPU power save, nano_cpu_idle and nano_cpu_atomic_idle functions are defined as __weak at the architecture level. Change-Id: I980a161d0009f3f404ad22b226a6229fbb492389 Signed-off-by: Jean-Paul Etienne <fractalclone@gmail.com>
Diffstat (limited to 'arch')
-rw-r--r--arch/Kconfig3
-rw-r--r--arch/riscv32/Kbuild6
-rw-r--r--arch/riscv32/Kconfig80
-rw-r--r--arch/riscv32/Makefile16
-rw-r--r--arch/riscv32/core/Makefile5
-rw-r--r--arch/riscv32/core/cpu_idle.c69
-rw-r--r--arch/riscv32/core/fatal.c211
-rw-r--r--arch/riscv32/core/irq_manage.c34
-rw-r--r--arch/riscv32/core/irq_offload.c54
-rw-r--r--arch/riscv32/core/isr.S415
-rw-r--r--arch/riscv32/core/offsets/offsets.c92
-rw-r--r--arch/riscv32/core/prep_c.c46
-rw-r--r--arch/riscv32/core/reset.S70
-rw-r--r--arch/riscv32/core/sw_isr_table.S61
-rw-r--r--arch/riscv32/core/swap.S85
-rw-r--r--arch/riscv32/core/thread.c113
-rw-r--r--arch/riscv32/defconfig0
-rw-r--r--arch/riscv32/include/kernel_arch_data.h90
-rw-r--r--arch/riscv32/include/kernel_arch_func.h72
-rw-r--r--arch/riscv32/include/kernel_event_logger_arch.h54
-rw-r--r--arch/riscv32/include/offsets_short_arch.h74
21 files changed, 1650 insertions, 0 deletions
diff --git a/arch/Kconfig b/arch/Kconfig
index 1fd294fc0..0af160817 100644
--- a/arch/Kconfig
+++ b/arch/Kconfig
@@ -37,6 +37,9 @@ config NIOS2
bool "Nios II Gen 2 architecture"
select ATOMIC_OPERATIONS_C
+config RISCV32
+ bool "RISCV32 architecture"
+
endchoice
#
diff --git a/arch/riscv32/Kbuild b/arch/riscv32/Kbuild
new file mode 100644
index 000000000..157372617
--- /dev/null
+++ b/arch/riscv32/Kbuild
@@ -0,0 +1,6 @@
+subdir-ccflags-y +=-I$(srctree)/include/drivers
+subdir-ccflags-y +=-I$(srctree)/drivers
+subdir-asflags-y += $(subdir-ccflags-y)
+
+obj-y += soc/$(SOC_PATH)/
+obj-y += core/
diff --git a/arch/riscv32/Kconfig b/arch/riscv32/Kconfig
new file mode 100644
index 000000000..48fdf13ac
--- /dev/null
+++ b/arch/riscv32/Kconfig
@@ -0,0 +1,80 @@
+#
+# Copyright (c) 2016 Jean-Paul Etienne <fractalclone@gmail.com>
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+choice
+ prompt "RISCV32 configuration selection"
+ depends on RISCV32
+ source "arch/riscv32/soc/*/Kconfig.soc"
+endchoice
+
+menu "RISCV32 Options"
+ depends on RISCV32
+
+config ARCH
+ string
+ default "riscv32"
+
+config ARCH_DEFCONFIG
+ string
+ default "arch/riscv32/defconfig"
+
+menu "RISCV32 Processor Options"
+
+config INCLUDE_RESET_VECTOR
+ bool "Include Reset vector"
+ default n
+ help
+ Include the reset vector stub that inits CPU and then jumps to __start
+
+config IRQ_OFFLOAD
+ bool "Enable IRQ offload"
+ default n
+ help
+ Enable irq_offload() API which allows functions to be synchronously
+ run in interrupt context. Mainly useful for test cases.
+
+config RISCV_SOC_CONTEXT_SAVE
+ bool "Enable SOC-based context saving in IRQ handler"
+ default n
+ help
+ Enable SOC-based context saving, for SOCS which require saving of
+ extra registers when entering an interrupt/exception
+
+config RISCV_SOC_INTERRUPT_INIT
+ bool "Enable SOC-based interrupt initialization"
+ default n
+ help
+ Enable SOC-based interrupt initialization
+ (call soc_interrupt_init, within _IntLibInit when enabled)
+
+config RISCV_GENERIC_TOOLCHAIN
+ bool "Compile using generic riscv32 toolchain"
+ default y
+ help
+ Compile using generic riscv32 toolchain.
+ Allow SOCs that have custom extended riscv ISA to still
+ compile with generic riscv32 toolchain.
+
+config RISCV_HAS_CPU_IDLE
+ bool "Does SOC has CPU IDLE instruction"
+ default n
+ help
+ Does SOC has CPU IDLE instruction
+endmenu
+
+source "arch/riscv32/soc/*/Kconfig"
+
+endmenu
diff --git a/arch/riscv32/Makefile b/arch/riscv32/Makefile
new file mode 100644
index 000000000..90cfb57f0
--- /dev/null
+++ b/arch/riscv32/Makefile
@@ -0,0 +1,16 @@
+-include $(srctree)/arch/$(ARCH)/soc/$(SOC_PATH)/Makefile
+
+# Put functions and data in their own binary sections so that ld can
+# garbage collect them
+arch_cflags += $(call cc-option,-ffunction-sections) \
+ $(call cc-option,-fdata-sections)
+
+KBUILD_AFLAGS += $(arch_cflags)
+KBUILD_CFLAGS += $(arch_cflags)
+KBUILD_CXXFLAGS += $(arch_cflags)
+
+soc-cxxflags ?= $(soc-cflags)
+soc-aflags ?= $(soc-cflags)
+KBUILD_CFLAGS += $(soc-cflags)
+KBUILD_CXXFLAGS += $(soc-cxxflags)
+KBUILD_AFLAGS += $(soc-aflags)
diff --git a/arch/riscv32/core/Makefile b/arch/riscv32/core/Makefile
new file mode 100644
index 000000000..a4367cb86
--- /dev/null
+++ b/arch/riscv32/core/Makefile
@@ -0,0 +1,5 @@
+ccflags-y += -I$(srctree)/kernel/unified/include
+ccflags-y +=-I$(srctree)/arch/$(ARCH)/include
+
+obj-y += isr.o reset.o sw_isr_table.o fatal.o irq_manage.o \
+ prep_c.o cpu_idle.o swap.o thread.o irq_offload.o
diff --git a/arch/riscv32/core/cpu_idle.c b/arch/riscv32/core/cpu_idle.c
new file mode 100644
index 000000000..fe7762aeb
--- /dev/null
+++ b/arch/riscv32/core/cpu_idle.c
@@ -0,0 +1,69 @@
+/*
+ * Copyright (c) 2016 Jean-Paul Etienne <fractalclone@gmail.com>
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <irq.h>
+
+/*
+ * In RISC-V there is no conventional way to handle CPU power save.
+ * Each RISC-V SOC handles it in its own way.
+ * Hence, by default, nano_cpu_idle and nano_cpu_atomic_idle functions just
+ * unlock interrupts and return to the caller, without issuing any CPU power
+ * saving instruction.
+ *
+ * Nonetheless, define the default nano_cpu_idle and nano_cpu_atomic_idle
+ * functions as weak functions, so that they can be replaced at the SOC-level.
+ */
+
+/**
+ *
+ * @brief Power save idle routine
+ *
+ * This function will be called by the kernel idle loop or possibly within
+ * an implementation of _sys_power_save_idle in the kernel when the
+ * '_sys_power_save_flag' variable is non-zero.
+ *
+ * @return N/A
+ */
+void __weak k_cpu_idle(void)
+{
+ irq_unlock(SOC_MSTATUS_IEN);
+}
+
+/**
+ *
+ * @brief Atomically re-enable interrupts and enter low power mode
+ *
+ * This function is utilized by the nanokernel object "wait" APIs for tasks,
+ * e.g. nano_task_lifo_get(), nano_task_sem_take(),
+ * nano_task_stack_pop(), and nano_task_fifo_get().
+ *
+ * INTERNAL
+ * The requirements for k_cpu_atomic_idle() are as follows:
+ * 1) The enablement of interrupts and entering a low-power mode needs to be
+ * atomic, i.e. there should be no period of time where interrupts are
+ * enabled before the processor enters a low-power mode. See the comments
+ * in k_lifo_get(), for example, of the race condition that
+ * occurs if this requirement is not met.
+ *
+ * 2) After waking up from the low-power mode, the interrupt lockout state
+ * must be restored as indicated in the 'imask' input parameter.
+ *
+ * @return N/A
+ */
+void __weak k_cpu_atomic_idle(unsigned int key)
+{
+ irq_unlock(key);
+}
diff --git a/arch/riscv32/core/fatal.c b/arch/riscv32/core/fatal.c
new file mode 100644
index 000000000..e4108ea14
--- /dev/null
+++ b/arch/riscv32/core/fatal.c
@@ -0,0 +1,211 @@
+/*
+ * Copyright (c) 2016 Jean-Paul Etienne <fractalclone@gmail.com>
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <nanokernel.h>
+#include <arch/cpu.h>
+#include <kernel_structs.h>
+#include <inttypes.h>
+
+#ifdef CONFIG_PRINTK
+#include <misc/printk.h>
+#define PRINTK(...) printk(__VA_ARGS__)
+#else
+#define PRINTK(...)
+#endif
+
+const NANO_ESF _default_esf = {
+ 0xdeadbaad,
+ 0xdeadbaad,
+ 0xdeadbaad,
+ 0xdeadbaad,
+ 0xdeadbaad,
+ 0xdeadbaad,
+ 0xdeadbaad,
+ 0xdeadbaad,
+ 0xdeadbaad,
+ 0xdeadbaad,
+ 0xdeadbaad,
+ 0xdeadbaad,
+ 0xdeadbaad,
+ 0xdeadbaad,
+ 0xdeadbaad,
+ 0xdeadbaad,
+ 0xdeadbaad,
+ 0xdeadbaad,
+ 0xdeadbaad,
+ 0xdeadbaad,
+#if defined(CONFIG_SOC_RISCV32_PULPINO)
+ 0xdeadbaad,
+ 0xdeadbaad,
+ 0xdeadbaad,
+ 0xdeadbaad,
+ 0xdeadbaad,
+ 0xdeadbaad,
+#endif
+};
+
+
+/**
+ *
+ * @brief Nanokernel fatal error handler
+ *
+ * This routine is called when a fatal error condition is detected by either
+ * hardware or software.
+ *
+ * The caller is expected to always provide a usable ESF. In the event that the
+ * fatal error does not have a hardware generated ESF, the caller should either
+ * create its own or call _Fault instead.
+ *
+ * @param reason the reason that the handler was called
+ * @param esf pointer to the exception stack frame
+ *
+ * @return This function does not return.
+ */
+FUNC_NORETURN void _NanoFatalErrorHandler(unsigned int reason,
+ const NANO_ESF *esf)
+{
+ switch (reason) {
+ case _NANO_ERR_CPU_EXCEPTION:
+ case _NANO_ERR_SPURIOUS_INT:
+ break;
+
+ case _NANO_ERR_INVALID_TASK_EXIT:
+ PRINTK("***** Invalid Exit Software Error! *****\n");
+ break;
+
+#if defined(CONFIG_STACK_CANARIES)
+ case _NANO_ERR_STACK_CHK_FAIL:
+ PRINTK("***** Stack Check Fail! *****\n");
+ break;
+#endif /* CONFIG_STACK_CANARIES */
+
+ case _NANO_ERR_ALLOCATION_FAIL:
+ PRINTK("**** Kernel Allocation Failure! ****\n");
+ break;
+
+ default:
+ PRINTK("**** Unknown Fatal Error %d! ****\n", reason);
+ break;
+ }
+
+ PRINTK("Current thread ID = %p\n"
+ "Faulting instruction address = 0x%" PRIx32 "\n"
+ " ra: 0x%" PRIx32 " gp: 0x%" PRIx32
+ " tp: 0x%" PRIx32 " t0: 0x%" PRIx32 "\n"
+ " t1: 0x%" PRIx32 " t2: 0x%" PRIx32
+ " t3: 0x%" PRIx32 " t4: 0x%" PRIx32 "\n"
+ " t5: 0x%" PRIx32 " t6: 0x%" PRIx32
+ " a0: 0x%" PRIx32 " a1: 0x%" PRIx32 "\n"
+ " a2: 0x%" PRIx32 " a3: 0x%" PRIx32
+ " a4: 0x%" PRIx32 " a5: 0x%" PRIx32 "\n"
+ " a6: 0x%" PRIx32 " a7: 0x%" PRIx32 "\n",
+ k_current_get(),
+ (esf->mepc == 0xdeadbaad) ? 0xdeadbaad : esf->mepc - 4,
+ esf->ra, esf->gp, esf->tp, esf->t0,
+ esf->t1, esf->t2, esf->t3, esf->t4,
+ esf->t5, esf->t6, esf->a0, esf->a1,
+ esf->a2, esf->a3, esf->a4, esf->a5,
+ esf->a6, esf->a7);
+
+ _SysFatalErrorHandler(reason, esf);
+ /* spin forever */
+ for (;;)
+ __asm__ volatile("nop");
+}
+
+
+/**
+ *
+ * @brief Fatal error handler
+ *
+ * This routine implements the corrective action to be taken when the system
+ * detects a fatal error.
+ *
+ * This sample implementation attempts to abort the current thread and allow
+ * the system to continue executing, which may permit the system to continue
+ * functioning with degraded capabilities.
+ *
+ * System designers may wish to enhance or substitute this sample
+ * implementation to take other actions, such as logging error (or debug)
+ * information to a persistent repository and/or rebooting the system.
+ *
+ * @param reason fatal error reason
+ * @param esf pointer to exception stack frame
+ *
+ * @return N/A
+ */
+void _SysFatalErrorHandler(unsigned int reason, const NANO_ESF *esf)
+{
+ ARG_UNUSED(reason);
+ ARG_UNUSED(esf);
+
+#if !defined(CONFIG_SIMPLE_FATAL_ERROR_HANDLER)
+ if (k_is_in_isr() || _is_thread_essential()) {
+ PRINTK("Fatal fault in %s! Spinning...\n",
+ k_is_in_isr() ? "ISR" : "essential thread");
+ /* spin forever */
+ for (;;)
+ __asm__ volatile("nop");
+ }
+ PRINTK("Fatal fault in thread! Aborting.\n");
+ k_thread_abort(_current);
+
+#else
+ for (;;) {
+ k_cpu_idle();
+ }
+
+#endif
+
+ CODE_UNREACHABLE;
+}
+
+
+#ifdef CONFIG_PRINTK
+static char *cause_str(uint32_t cause)
+{
+ switch (cause) {
+ case 0:
+ return "Instruction address misaligned";
+ case 1:
+ return "Instruction Access fault";
+ case 2:
+ return "Illegal instruction";
+ case 3:
+ return "Breakpoint";
+ case 4:
+ return "Load address misaligned";
+ case 5:
+ return "Load access fault";
+ default:
+ return "unknown";
+ }
+}
+#endif
+
+
+FUNC_NORETURN void _Fault(const NANO_ESF *esf)
+{
+ uint32_t mcause;
+
+ __asm__ volatile("csrr %0, mcause" : "=r" (mcause));
+
+ mcause &= SOC_MCAUSE_IRQ_MASK;
+
+ PRINTK("Exception cause %s (%d)\n", cause_str(mcause), (int)mcause);
+
+ _NanoFatalErrorHandler(_NANO_ERR_CPU_EXCEPTION, esf);
+}
diff --git a/arch/riscv32/core/irq_manage.c b/arch/riscv32/core/irq_manage.c
new file mode 100644
index 000000000..007f76026
--- /dev/null
+++ b/arch/riscv32/core/irq_manage.c
@@ -0,0 +1,34 @@
+/*
+ * Copyright (c) 2016 Jean-Paul Etienne <fractalclone@gmail.com>
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <toolchain.h>
+#include <kernel_structs.h>
+#include <misc/printk.h>
+
+void _irq_spurious(void *unused)
+{
+ uint32_t mcause;
+
+ ARG_UNUSED(unused);
+
+ __asm__ volatile("csrr %0, mcause" : "=r" (mcause));
+
+ mcause &= SOC_MCAUSE_IRQ_MASK;
+
+ printk("Spurious interrupt detected! IRQ: %d\n", (int)mcause);
+
+ _NanoFatalErrorHandler(_NANO_ERR_SPURIOUS_INT, &_default_esf);
+}
diff --git a/arch/riscv32/core/irq_offload.c b/arch/riscv32/core/irq_offload.c
new file mode 100644
index 000000000..17be4c505
--- /dev/null
+++ b/arch/riscv32/core/irq_offload.c
@@ -0,0 +1,54 @@
+/*
+ * Copyright (c) 2016 Jean-Paul Etienne <fractalclone@gmail.com>
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <irq.h>
+#include <irq_offload.h>
+#include <misc/printk.h>
+
+volatile irq_offload_routine_t _offload_routine;
+static volatile void *offload_param;
+
+/*
+ * Called by _enter_irq
+ *
+ * Just in case the offload routine itself generates an unhandled
+ * exception, clear the offload_routine global before executing.
+ */
+void _irq_do_offload(void)
+{
+ irq_offload_routine_t tmp;
+
+ if (!_offload_routine)
+ return;
+
+ tmp = _offload_routine;
+ _offload_routine = NULL;
+
+ tmp((void *)offload_param);
+}
+
+void irq_offload(irq_offload_routine_t routine, void *parameter)
+{
+ int key;
+
+ key = irq_lock();
+ _offload_routine = routine;
+ offload_param = parameter;
+
+ __asm__ volatile ("ecall");
+
+ irq_unlock(key);
+}
diff --git a/arch/riscv32/core/isr.S b/arch/riscv32/core/isr.S
new file mode 100644
index 000000000..ff5a327a7
--- /dev/null
+++ b/arch/riscv32/core/isr.S
@@ -0,0 +1,415 @@
+/*
+ * Copyright (c) 2016 Jean-Paul Etienne <fractalclone@gmail.com>
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define _ASMLANGUAGE
+
+#include <toolchain.h>
+#include <sections.h>
+#include <kernel_structs.h>
+#include <offsets_short.h>
+
+/* imports */
+GDATA(_sw_isr_table)
+GTEXT(__soc_save_context)
+GTEXT(__soc_restore_context)
+GTEXT(__soc_is_irq)
+GTEXT(__soc_handle_irq)
+GTEXT(_Fault)
+
+GTEXT(_k_neg_eagain)
+GTEXT(_is_next_thread_current)
+GTEXT(_get_next_ready_thread)
+
+#ifdef CONFIG_KERNEL_EVENT_LOGGER_CONTEXT_SWITCH
+GTEXT(_sys_k_event_logger_context_switch)
+#endif
+
+#ifdef CONFIG_KERNEL_EVENT_LOGGER_SLEEP
+GTEXT(_sys_k_event_logger_exit_sleep)
+#endif
+
+#ifdef CONFIG_KERNEL_EVENT_LOGGER_INTERRUPT
+GTEXT(_sys_k_event_logger_interrupt)
+#endif
+
+#ifdef CONFIG_IRQ_OFFLOAD
+GTEXT(_offload_routine)
+#endif
+
+/* exports */
+GTEXT(__irq_wrapper)
+
+/* use ABI name of registers for the sake of simplicity */
+
+/*
+ * ISR is handled at both ARCH and SOC levels.
+ * At the ARCH level, ISR handles basic context saving/restore of registers
+ * onto/from the thread stack and calls corresponding IRQ function registered
+ * at driver level.
+
+ * At SOC level, ISR handles saving/restoring of SOC-specific registers
+ * onto/from the thread stack (handled via __soc_save_context and
+ * __soc_restore_context functions). SOC level save/restore context
+ * is accounted for only if CONFIG_RISCV_SOC_CONTEXT_SAVE variable is set
+ *
+ * Moreover, given that RISC-V architecture does not provide a clear ISA
+ * specification about interrupt handling, each RISC-V SOC handles it in
+ * its own way. Hence, the generic RISC-V ISR handler expects the following
+ * functions to be provided at the SOC level:
+ * __soc_is_irq: to check if the exception is the result of an interrupt or not.
+ * __soc_handle_irq: handle pending IRQ at SOC level (ex: clear pending IRQ in
+ * SOC-specific IRQ register)
+ */
+
+/*
+ * Handler called upon each exception/interrupt/fault
+ * In this architecture, system call (ECALL) is used to perform context
+ * switching or IRQ offloading (when enabled).
+ */
+SECTION_FUNC(exception.entry, __irq_wrapper)
+ /* Allocate space on thread stack to save registers */
+ addi sp, sp, -__NANO_ESF_SIZEOF
+
+ /*
+ * Save caller-saved registers on current thread stack.
+ * NOTE: need to be updated to account for floating-point registers
+ * floating-point registers should be accounted for when corresponding
+ * config variable is set
+ */
+ sw ra, __NANO_ESF_ra_OFFSET(sp)
+ sw gp, __NANO_ESF_gp_OFFSET(sp)
+ sw tp, __NANO_ESF_tp_OFFSET(sp)
+ sw t0, __NANO_ESF_t0_OFFSET(sp)
+ sw t1, __NANO_ESF_t1_OFFSET(sp)
+ sw t2, __NANO_ESF_t2_OFFSET(sp)
+ sw t3, __NANO_ESF_t3_OFFSET(sp)
+ sw t4, __NANO_ESF_t4_OFFSET(sp)
+ sw t5, __NANO_ESF_t5_OFFSET(sp)
+ sw t6, __NANO_ESF_t6_OFFSET(sp)
+ sw a0, __NANO_ESF_a0_OFFSET(sp)
+ sw a1, __NANO_ESF_a1_OFFSET(sp)
+ sw a2, __NANO_ESF_a2_OFFSET(sp)
+ sw a3, __NANO_ESF_a3_OFFSET(sp)
+ sw a4, __NANO_ESF_a4_OFFSET(sp)
+ sw a5, __NANO_ESF_a5_OFFSET(sp)
+ sw a6, __NANO_ESF_a6_OFFSET(sp)
+ sw a7, __NANO_ESF_a7_OFFSET(sp)
+
+ /* Save MEPC register */
+ csrr t0, mepc
+ sw t0, __NANO_ESF_mepc_OFFSET(sp)
+
+ /* Save SOC-specific MSTATUS register */
+ csrr t0, SOC_MSTATUS_REG
+ sw t0, __NANO_ESF_mstatus_OFFSET(sp)
+
+#ifdef CONFIG_RISCV_SOC_CONTEXT_SAVE
+ /* Handle context saving at SOC level. */
+ jal ra, __soc_save_context
+#endif /* CONFIG_RISCV_SOC_CONTEXT_SAVE */
+
+ /*
+ * Check if exception is the result of an interrupt or not.
+ * (SOC dependent). Following the RISC-V architecture spec, the MSB
+ * of the mcause register is used to indicate whether an exception
+ * is the result of an interrupt or an exception/fault. But for some
+ * SOCs (like pulpino or riscv-qemu), the MSB is never set to indicate
+ * interrupt. Hence, check for interrupt/exception via the __soc_is_irq
+ * function (that needs to be implemented by each SOC). The result is
+ * returned via register a0 (1: interrupt, 0 exception)
+ */
+ jal ra, __soc_is_irq
+
+ /* If a0 != 0, jump to is_interrupt */
+ addi t1, x0, 0
+ bnez a0, is_interrupt
+
+ /*
+ * If exception is not an interrupt, MEPC will contain
+ * the instruction address, which has caused the exception.
+ * Increment saved MEPC by 4 to prevent running into the
+ * exception again, upon exiting the ISR.
+ */
+ lw t0, __NANO_ESF_mepc_OFFSET(sp)
+ addi t0, t0, 4
+ sw t0, __NANO_ESF_mepc_OFFSET(sp)
+
+ /*
+ * If the exception is the result of an ECALL, check whether to
+ * perform a context-switch or an IRQ offload. Otherwise call _Fault
+ * to report the exception.
+ */
+ csrr t0, mcause
+ li t2, SOC_MCAUSE_IRQ_MASK
+ and t0, t0, t2
+ li t1, SOC_MCAUSE_ECALL_EXP
+
+ /*
+ * If mcause == SOC_MCAUSE_ECALL_EXP, handle system call,
+ * otherwise handle fault
+ */
+#ifdef CONFIG_IRQ_OFFLOAD
+ /* If not system call, jump to is_fault */
+ bne t0, t1, is_fault
+
+ /*
+ * Determine if the system call is the result of an IRQ offloading.
+ * Done by checking if _offload_routine is not pointing to NULL.
+ * If NULL, jump to reschedule to perform a context-switch, otherwise,
+ * jump to is_interrupt to handle the IRQ offload.
+ */
+ la t0, _offload_routine
+ lw t1, 0x00(t0)
+ beqz t1, reschedule
+ bnez t1, is_interrupt
+
+is_fault:
+#else
+ /*
+ * Go to reschedule to handle context-switch if system call,
+ * otherwise call _Fault to handle exception
+ */
+ beq t0, t1, reschedule
+#endif
+
+ /*
+ * Call _Fault to handle exception.
+ * Stack pointer is pointing to a NANO_ESF structure, pass it
+ * to _Fault (via register a0).
+ * If _Fault shall return, set return address to no_reschedule
+ * to restore stack.
+ */
+ addi a0, sp, 0
+ la ra, no_reschedule
+ tail _Fault
+
+is_interrupt:
+ /*
+ * Save current thread stack pointer and switch
+ * stack pointer to interrupt stack.
+ */
+
+ /* Save thread stack pointer to temp register t0 */
+ addi t0, sp, 0
+
+ /* Switch to interrupt stack */
+ la t2, _kernel
+ lw sp, _kernel_offset_to_irq_stack(t2)
+
+ /*
+ * Save thread stack pointer on interrupt stack
+ * In RISC-V, stack pointer needs to be 16-byte aligned
+ */
+ addi sp, sp, -16
+ sw t0, 0x00(sp)
+
+on_irq_stack:
+ /* Increment _kernel.nested variable */
+ lw t3, _kernel_offset_to_nested(t2)
+ addi t3, t3, 1
+ sw t3, _kernel_offset_to_nested(t2)
+
+ /*
+ * If we are here due to a system call, t1 register should != 0.
+ * In this case, perform IRQ offloading, otherwise jump to call_irq
+ */
+ beqz t1, call_irq
+
+ /*
+ * Call _irq_do_offload to handle IRQ offloading.
+ * Set return address to on_thread_stack in order to jump there
+ * upon returning from _irq_do_offload
+ */
+ la ra, on_thread_stack
+ tail _irq_do_offload
+
+call_irq:
+#ifdef CONFIG_KERNEL_EVENT_LOGGER_SLEEP
+ call _sys_k_event_logger_exit_sleep
+#endif
+
+#ifdef CONFIG_KERNEL_EVENT_LOGGER_INTERRUPT
+ call _sys_k_event_logger_interrupt
+#endif
+
+ /* Get IRQ causing interrupt */
+ csrr a0, mcause
+ li t0, SOC_MCAUSE_IRQ_MASK
+ and a0, a0, t0
+
+ /*
+ * Clear pending IRQ generating the interrupt at SOC level
+ * Pass IRQ number to __soc_handle_irq via register a0
+ */
+ jal ra, __soc_handle_irq
+
+ /*
+ * Call corresponding registered function in _sw_isr_table.
+ * (table is 8-bytes wide, we should shift index by 3)
+ */
+ la t0, _sw_isr_table
+ slli a0, a0, 3
+ add t0, t0, a0
+
+ /* Load argument in a0 register */
+ lw a0, 0x00(t0)
+
+ /* Load ISR function address in register t1 */
+ lw t1, 0x04(t0)
+
+ /* Call ISR function */
+ jalr ra, t1
+
+on_thread_stack:
+ /* Get reference to _kernel */
+ la t1, _kernel
+
+ /* Decrement _kernel.nested variable */
+ lw t2, _kernel_offset_to_nested(t1)
+ addi t2, t2, -1
+ sw t2, _kernel_offset_to_nested(t1)
+
+ /* Restore thread stack pointer */
+ lw t0, 0x00(sp)
+ addi sp, t0, 0
+
+#ifdef CONFIG_PREEMPT_ENABLED
+ /*
+ * Check if we need to perform a reschedule
+ */
+
+ /* Get pointer to _kernel.current */
+ lw t2, _kernel_offset_to_current(t1)
+
+ /*
+ * If non-preemptible thread, do not schedule
+ * (see explanation of preempt field in kernel_structs.h
+ */
+ lhu t3, _thread_offset_to_preempt(t2)
+ li t4, _NON_PREEMPT_THRESHOLD
+ bgeu t3, t4, no_reschedule
+
+ /*
+ * Check if next thread to schedule is current thread.
+ * If yes do not perform a reschedule
+ */
+ lw t3, _kernel_offset_to_ready_q_cache(t1)
+ beq t3, t2, no_reschedule
+#else
+ j no_reschedule
+#endif /* CONFIG_PREEMPT_ENABLED */
+
+reschedule:
+#if CONFIG_KERNEL_EVENT_LOGGER_CONTEXT_SWITCH
+ call _sys_k_event_logger_context_switch
+#endif /* CONFIG_KERNEL_EVENT_LOGGER_CONTEXT_SWITCH */
+
+ /* Get reference to _kernel */
+ la t0, _kernel
+
+ /* Get pointer to _kernel.current */
+ lw t1, _kernel_offset_to_current(t0)
+
+ /*
+ * Save callee-saved registers of current thread
+ * prior to handle context-switching
+ */
+ sw s0, _thread_offset_to_s0(t1)
+ sw s1, _thread_offset_to_s1(t1)
+ sw s2, _thread_offset_to_s2(t1)
+ sw s3, _thread_offset_to_s3(t1)
+ sw s4, _thread_offset_to_s4(t1)
+ sw s5, _thread_offset_to_s5(t1)
+ sw s6, _thread_offset_to_s6(t1)
+ sw s7, _thread_offset_to_s7(t1)
+ sw s8, _thread_offset_to_s8(t1)
+ sw s9, _thread_offset_to_s9(t1)
+ sw s10, _thread_offset_to_s10(t1)
+ sw s11, _thread_offset_to_s11(t1)
+
+ /*
+ * Save stack pointer of current thread and set the default return value
+ * of _Swap to _k_neg_eagain for the thread.
+ */
+ sw sp, _thread_offset_to_sp(t1)
+ la t2, _k_neg_eagain
+ lw t3, 0x00(t2)
+ sw t3, _thread_offset_to_swap_return_value(t1)
+
+ /* Get next thread to schedule. */
+ lw t1, _kernel_offset_to_ready_q_cache(t0)
+
+ /*
+ * Set _kernel.current to new thread loaded in t1
+ */
+ sw t1, _kernel_offset_to_current(t0)
+
+ /* Switch to new thread stack */
+ lw sp, _thread_offset_to_sp(t1)
+
+ /* Restore callee-saved registers of new thread */
+ lw s0, _thread_offset_to_s0(t1)
+ lw s1, _thread_offset_to_s1(t1)
+ lw s2, _thread_offset_to_s2(t1)
+ lw s3, _thread_offset_to_s3(t1)
+ lw s4, _thread_offset_to_s4(t1)
+ lw s5, _thread_offset_to_s5(t1)
+ lw s6, _thread_offset_to_s6(t1)
+ lw s7, _thread_offset_to_s7(t1)
+ lw s8, _thread_offset_to_s8(t1)
+ lw s9, _thread_offset_to_s9(t1)
+ lw s10, _thread_offset_to_s10(t1)
+ lw s11, _thread_offset_to_s11(t1)
+
+no_reschedule:
+#ifdef CONFIG_RISCV_SOC_CONTEXT_SAVE
+ /* Restore context at SOC level */
+ jal ra, __soc_restore_context
+#endif /* CONFIG_RISCV_SOC_CONTEXT_SAVE */
+
+ /* Restore caller-saved registers from thread stack */
+ lw ra, __NANO_ESF_ra_OFFSET(sp)
+ lw gp, __NANO_ESF_gp_OFFSET(sp)
+ lw tp, __NANO_ESF_tp_OFFSET(sp)
+ lw t0, __NANO_ESF_t0_OFFSET(sp)
+ lw t1, __NANO_ESF_t1_OFFSET(sp)
+ lw t2, __NANO_ESF_t2_OFFSET(sp)
+ lw t3, __NANO_ESF_t3_OFFSET(sp)
+ lw t4, __NANO_ESF_t4_OFFSET(sp)
+ lw t5, __NANO_ESF_t5_OFFSET(sp)
+ lw t6, __NANO_ESF_t6_OFFSET(sp)
+ lw a0, __NANO_ESF_a0_OFFSET(sp)
+ lw a1, __NANO_ESF_a1_OFFSET(sp)
+ lw a2, __NANO_ESF_a2_OFFSET(sp)
+ lw a3, __NANO_ESF_a3_OFFSET(sp)
+ lw a4, __NANO_ESF_a4_OFFSET(sp)
+ lw a5, __NANO_ESF_a5_OFFSET(sp)
+ lw a6, __NANO_ESF_a6_OFFSET(sp)
+ lw a7, __NANO_ESF_a7_OFFSET(sp)
+
+ /* Restore MEPC register */
+ lw t0, __NANO_ESF_mepc_OFFSET(sp)
+ csrw mepc, t0
+
+ /* Restore SOC-specific MSTATUS register */
+ lw t0, __NANO_ESF_mstatus_OFFSET(sp)
+ csrw SOC_MSTATUS_REG, t0
+
+ /* Release stack space */
+ addi sp, sp, __NANO_ESF_SIZEOF
+
+ /* Call SOC_ERET to exit ISR */
+ SOC_ERET
diff --git a/arch/riscv32/core/offsets/offsets.c b/arch/riscv32/core/offsets/offsets.c
new file mode 100644
index 000000000..682ca247f
--- /dev/null
+++ b/arch/riscv32/core/offsets/offsets.c
@@ -0,0 +1,92 @@
+/*
+ * Copyright (c) 2016 Jean-Paul Etienne <fractalclone@gmail.com>
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * @file
+ * @brief RISCV32 kernel structure member offset definition file
+ *
+ * This module is responsible for the generation of the absolute symbols whose
+ * value represents the member offsets for various RISCV32 kernel
+ * structures.
+ */
+
+#include <gen_offset.h>
+#include <kernel_structs.h>
+#include <kernel_offsets.h>
+
+/* thread_arch_t member offsets */
+GEN_OFFSET_SYM(_thread_arch_t, swap_return_value);
+
+/* struct coop member offsets */
+GEN_OFFSET_SYM(_callee_saved_t, sp);
+GEN_OFFSET_SYM(_callee_saved_t, s0);
+GEN_OFFSET_SYM(_callee_saved_t, s1);
+GEN_OFFSET_SYM(_callee_saved_t, s2);
+GEN_OFFSET_SYM(_callee_saved_t, s3);
+GEN_OFFSET_SYM(_callee_saved_t, s4);
+GEN_OFFSET_SYM(_callee_saved_t, s5);
+GEN_OFFSET_SYM(_callee_saved_t, s6);
+GEN_OFFSET_SYM(_callee_saved_t, s7);
+GEN_OFFSET_SYM(_callee_saved_t, s8);
+GEN_OFFSET_SYM(_callee_saved_t, s9);
+GEN_OFFSET_SYM(_callee_saved_t, s10);
+GEN_OFFSET_SYM(_callee_saved_t, s11);
+
+/* esf member offsets */
+GEN_OFFSET_SYM(NANO_ESF, ra);
+GEN_OFFSET_SYM(NANO_ESF, gp);
+GEN_OFFSET_SYM(NANO_ESF, tp);
+GEN_OFFSET_SYM(NANO_ESF, t0);
+GEN_OFFSET_SYM(NANO_ESF, t1);
+GEN_OFFSET_SYM(NANO_ESF, t2);
+GEN_OFFSET_SYM(NANO_ESF, t3);
+GEN_OFFSET_SYM(NANO_ESF, t4);
+GEN_OFFSET_SYM(NANO_ESF, t5);
+GEN_OFFSET_SYM(NANO_ESF, t6);
+GEN_OFFSET_SYM(NANO_ESF, a0);
+GEN_OFFSET_SYM(NANO_ESF, a1);
+GEN_OFFSET_SYM(NANO_ESF, a2);
+GEN_OFFSET_SYM(NANO_ESF, a3);
+GEN_OFFSET_SYM(NANO_ESF, a4);
+GEN_OFFSET_SYM(NANO_ESF, a5);
+GEN_OFFSET_SYM(NANO_ESF, a6);
+GEN_OFFSET_SYM(NANO_ESF, a7);
+
+GEN_OFFSET_SYM(NANO_ESF, mepc);
+GEN_OFFSET_SYM(NANO_ESF, mstatus);
+
+#if defined(CONFIG_SOC_RISCV32_PULPINO)
+GEN_OFFSET_SYM(NANO_ESF, lpstart0);
+GEN_OFFSET_SYM(NANO_ESF, lpend0);
+GEN_OFFSET_SYM(NANO_ESF, lpcount0);
+GEN_OFFSET_SYM(NANO_ESF, lpstart1);
+GEN_OFFSET_SYM(NANO_ESF, lpend1);
+GEN_OFFSET_SYM(NANO_ESF, lpcount1);
+#endif
+
+/*
+ * RISC-V requires the stack to be 16-bytes aligned, hence SP needs to grow or
+ * shrink by a size, which follows the RISC-V stack alignment requirements
+ * Hence, ensure that __tTCS_NOFLOAT_SIZEOF and __tTCS_NOFLOAT_SIZEOF sizes
+ * are aligned accordingly.
+ */
+GEN_ABSOLUTE_SYM(__NANO_ESF_SIZEOF, STACK_ROUND_UP(sizeof(NANO_ESF)));
+
+/* size of the struct tcs structure sans save area for floating point regs */
+GEN_ABSOLUTE_SYM(_K_THREAD_NO_FLOAT_SIZEOF,
+ STACK_ROUND_UP(sizeof(struct k_thread)));
+
+GEN_ABS_SYM_END
diff --git a/arch/riscv32/core/prep_c.c b/arch/riscv32/core/prep_c.c
new file mode 100644
index 000000000..83de38322
--- /dev/null
+++ b/arch/riscv32/core/prep_c.c
@@ -0,0 +1,46 @@
+/*
+ * Copyright (c) 2016 Jean-Paul Etienne <fractalclone@gmail.com>
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * @file
+ * @brief Full C support initialization
+ *
+ *
+ * Initialization of full C support: zero the .bss and call _Cstart().
+ *
+ * Stack is available in this module, but not the global data/bss until their
+ * initialization is performed.
+ */
+
+#include <stddef.h>
+#include <toolchain.h>
+#include <nano_internal.h>
+
+/**
+ *
+ * @brief Prepare to and run C code
+ *
+ * This routine prepares for the execution of and runs C code.
+ *
+ * @return N/A
+ */
+
+void _PrepC(void)
+{
+ _bss_zero();
+ _Cstart();
+ CODE_UNREACHABLE;
+}
diff --git a/arch/riscv32/core/reset.S b/arch/riscv32/core/reset.S
new file mode 100644
index 000000000..e45f86345
--- /dev/null
+++ b/arch/riscv32/core/reset.S
@@ -0,0 +1,70 @@
+/*
+ * Copyright (c) 2016 Jean-Paul Etienne <fractalclone@gmail.com>
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define _ASMLANGUAGE
+#include <kernel_structs.h>
+
+/* exports */
+GTEXT(__start)
+GTEXT(__reset)
+
+/* imports */
+GTEXT(_PrepC)
+
+#if CONFIG_INCLUDE_RESET_VECTOR
+SECTION_FUNC(reset, __reset)
+ /*
+ * jump to __start
+ * use call opcode in case __start is far away.
+ * This will be dependent on linker.ld configuration.
+ */
+ call __start
+#endif /* CONFIG_INCLUDE_RESET_VECTOR */
+
+/* use ABI name of registers for the sake of simplicity */
+
+/*
+ * Remainder of asm-land initialization code before we can jump into
+ * the C domain
+ */
+SECTION_FUNC(TEXT, __start)
+#ifdef CONFIG_INIT_STACKS
+ /* Pre-populate all bytes in _interrupt_stack with 0xAA */
+ la t0, _interrupt_stack
+ li t1, CONFIG_ISR_STACK_SIZE
+ add t1, t1, t0
+
+ /* Populate _interrupt_stack with 0xaaaaaaaa */
+ li t2, 0xaaaaaaaa
+aa_loop:
+ sw t2, 0x00(t0)
+ addi t0, t0, 4
+ blt t0, t1, aa_loop
+#endif
+
+ /*
+ * Initially, setup stack pointer to
+ * _interrupt_stack + CONFIG_ISR_STACK_SIZE
+ */
+ la sp, _interrupt_stack
+ li t0, CONFIG_ISR_STACK_SIZE
+ add sp, sp, t0
+
+ /*
+ * Jump into C domain. _PrepC zeroes BSS, copies rw data into RAM,
+ * and then enters kernel _Cstart
+ */
+ call _PrepC
diff --git a/arch/riscv32/core/sw_isr_table.S b/arch/riscv32/core/sw_isr_table.S
new file mode 100644
index 000000000..8fe00f629
--- /dev/null
+++ b/arch/riscv32/core/sw_isr_table.S
@@ -0,0 +1,61 @@
+/* sw_isr_table.S - ISR table for static ISR declarations for RISCV32 */
+
+/*
+ * Copyright (c) 2016 Jean-Paul Etienne <fractalclone@gmail.com>
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define _ASMLANGUAGE
+
+#include <toolchain.h>
+#include <sections.h>
+#include <arch/cpu.h>
+
+/*
+ * enable preprocessor features, such
+ * as %expr - evaluate the expression and use it as a string
+ */
+.altmacro
+
+/*
+ * Define an ISR table entry
+ * Define symbol as weak and give the section .gnu.linkonce
+ * prefix. This allows linker overload the symbol and the
+ * whole section by the one defined by a device driver
+ */
+.macro _isr_table_entry_declare index
+ WDATA(_isr_irq\index)
+ .section .gnu.linkonce.isr_irq\index
+ _isr_irq\index: .word 0xABAD1DEA, _irq_spurious
+.endm
+
+/*
+ * Declare the ISR table
+ */
+.macro _isr_table_declare from, to
+ counter = \from
+ .rept (\to - \from)
+ _isr_table_entry_declare %counter
+ counter = counter + 1
+ .endr
+.endm
+
+GTEXT(_irq_spurious)
+GDATA(_sw_isr_table)
+
+.section .isr_irq0
+.align
+_sw_isr_table:
+
+_isr_table_declare 0 CONFIG_NUM_IRQS
diff --git a/arch/riscv32/core/swap.S b/arch/riscv32/core/swap.S
new file mode 100644
index 000000000..78b5148cf
--- /dev/null
+++ b/arch/riscv32/core/swap.S
@@ -0,0 +1,85 @@
+/*
+ * Copyright (c) 2016 Jean-Paul Etienne <fractalclone@gmail.com>
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define _ASMLANGUAGE
+#include <irq.h>
+#include <kernel_structs.h>
+#include <offsets_short.h>
+
+/* exports */
+GTEXT(_Swap)
+GTEXT(_thread_entry_wrapper)
+
+/* Use ABI name of registers for the sake of simplicity */
+
+/*
+ * unsigned int _Swap(unsigned int key)
+ *
+ * Always called with interrupts locked
+ * key is stored in a0 register
+ */
+SECTION_FUNC(exception.other, _Swap)
+
+ /* Make a system call to perform context switch */
+ ecall
+
+ /*
+ * when thread is rescheduled, unlock irq and return.
+ * Restored register a0 contains IRQ lock state of thread.
+ *
+ * Prior to unlocking irq, load return value of
+ * _Swap to temp register t2 (from _thread_offset_to_swap_return_value).
+ * Normally, it should be -EAGAIN, unless someone has previously
+ * called _set_thread_return_value(..).
+ */
+ la t0, _kernel
+
+ /* Get pointer to _kernel.current */
+ lw t1, _kernel_offset_to_current(t0)
+
+ /* Load return value of _Swap function in temp register t2 */
+ lw t2, _thread_offset_to_swap_return_value(t1)
+
+ /*
+ * Unlock irq, following IRQ lock state in a0 register.
+ * Use atomic instruction csrrs to do so.
+ */
+ andi a0, a0, SOC_MSTATUS_IEN
+ csrrs t0, mstatus, a0
+
+ /* Set value of return register a0 to value of register t2 */
+ addi a0, t2, 0
+
+ /* Return */
+ jalr x0, ra
+
+
+/*
+ * void _thread_entry_wrapper(_thread_entry_t, void *, void *, void *)
+ */
+SECTION_FUNC(TEXT, _thread_entry_wrapper)
+ /*
+ * _thread_entry_wrapper is called for every new thread upon the return
+ * of _Swap or ISR. Its address, as well as its input function arguments
+ * thread_entry_t, void *, void *, void * are restored from the thread
+ * stack (initialized via function _thread).
+ * In this case, thread_entry_t, * void *, void * and void * are stored
+ * in registers a0, a1, a2 and a3. These registers are used as arguments
+ * to function _thread_entry. Hence, just call _thread_entry with
+ * return address set to 0 to indicate a non-returning function call.
+ */
+
+ jal x0, _thread_entry
diff --git a/arch/riscv32/core/thread.c b/arch/riscv32/core/thread.c
new file mode 100644
index 000000000..2ec9b9755
--- /dev/null
+++ b/arch/riscv32/core/thread.c
@@ -0,0 +1,113 @@
+/*
+ * Copyright (c) 2016 Jean-Paul Etienne <fractalclone@gmail.com>
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <kernel.h>
+#include <arch/cpu.h>
+#include <kernel_structs.h>
+#include <wait_q.h>
+#include <string.h>
+
+#if defined(CONFIG_THREAD_MONITOR)
+/*
+ * Add a thread to the kernel's list of active threads.
+ */
+static ALWAYS_INLINE void thread_monitor_init(struct k_thread *thread)
+{
+ unsigned int key;
+
+ key = irq_lock();
+ thread->next_thread = _kernel.threads;
+ _kernel.threads = thread;
+ irq_unlock(key);
+}
+#else
+#define thread_monitor_init(thread) \
+ do {/* do nothing */ \
+ } while ((0))
+#endif /* CONFIG_THREAD_MONITOR */
+
+void _thread_entry_wrapper(_thread_entry_t thread,
+ void *arg1,
+ void *arg2,
+ void *arg3);
+
+void _new_thread(char *stack_memory, size_t stack_size,
+ _thread_entry_t thread_func,
+ void *arg1, void *arg2, void *arg3,
+ int priority, unsigned options)
+{
+ _ASSERT_VALID_PRIO(priority, thread_func);
+
+ struct k_thread *thread;
+ struct __esf *stack_init;
+
+#ifdef CONFIG_INIT_STACKS
+ memset(stack_memory, 0xaa, stack_size);
+#endif
+ /* Initial stack frame for thread */
+ stack_init = (struct __esf *)
+ STACK_ROUND_DOWN(stack_memory +
+ stack_size - sizeof(struct __esf));
+
+ /* Setup the initial stack frame */
+ stack_init->a0 = (uint32_t)thread_func;
+ stack_init->a1 = (uint32_t)arg1;
+ stack_init->a2 = (uint32_t)arg2;
+ stack_init->a3 = (uint32_t)arg3;
+ /*
+ * Following the RISC-V architecture,
+ * the MSTATUS register (used to globally enable/disable interrupt),
+ * as well as the MEPC register (used to by the core to save the
+ * value of the program counter at which an interrupt/exception occcurs)
+ * need to be saved on the stack, upon an interrupt/exception
+ * and restored prior to returning from the interrupt/exception.
+ * This shall allow to handle nested interrupts.
+ *
+ * Given that context switching is performed via a system call exception
+ * within the RISCV32 architecture implementation, initially set:
+ * 1) MSTATUS to SOC_MSTATUS_DEF_RESTORE in the thread stack to enable
+ * interrupts when the newly created thread will be scheduled;
+ * 2) MEPC to the address of the _thread_entry_wrapper in the thread
+ * stack.
+ * Hence, when going out of an interrupt/exception/context-switch,
+ * after scheduling the newly created thread:
+ * 1) interrupts will be enabled, as the MSTATUS register will be
+ * restored following the MSTATUS value set within the thread stack;
+ * 2) the core will jump to _thread_entry_wrapper, as the program
+ * counter will be restored following the MEPC value set within the
+ * thread stack.
+ */
+ stack_init->mstatus = SOC_MSTATUS_DEF_RESTORE;
+ stack_init->mepc = (uint32_t)_thread_entry_wrapper;
+
+ /* Initialize various struct k_thread members */
+ thread = (struct k_thread *)stack_memory;
+
+ _init_thread_base(&thread->base, priority, K_PRESTART, options);
+
+ /* static threads overwrite it afterwards with real value */
+ thread->init_data = NULL;
+ thread->fn_abort = NULL;
+
+#ifdef CONFIG_THREAD_CUSTOM_DATA
+ /* Initialize custom data field (value is opaque to kernel) */
+ thread->custom_data = NULL;
+#endif
+
+ thread->callee_saved.sp = (uint32_t)stack_init;
+
+ thread_monitor_init(thread);
+}
diff --git a/arch/riscv32/defconfig b/arch/riscv32/defconfig
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/arch/riscv32/defconfig
diff --git a/arch/riscv32/include/kernel_arch_data.h b/arch/riscv32/include/kernel_arch_data.h
new file mode 100644
index 000000000..fa5b99e33
--- /dev/null
+++ b/arch/riscv32/include/kernel_arch_data.h
@@ -0,0 +1,90 @@
+/*
+ * Copyright (c) 2016 Jean-Paul Etienne <fractalclone@gmail.com>
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * @file
+ * @brief Private kernel definitions
+ *
+ * This file contains private kernel structures definitions and various
+ * other definitions for the RISCV32 processor architecture.
+ */
+
+#ifndef _kernel_arch_data_h_
+#define _kernel_arch_data_h_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <toolchain.h>
+#include <sections.h>
+#include <arch/cpu.h>
+
+#ifndef _ASMLANGUAGE
+#include <kernel.h>
+#include <stdint.h>
+#include <misc/util.h>
+#include <misc/dlist.h>
+#include <nano_internal.h>
+
+/*
+ * The following structure defines the list of registers that need to be
+ * saved/restored when a cooperative context switch occurs.
+ */
+struct _callee_saved {
+ uint32_t sp; /* Stack pointer, (x2 register) */
+
+ uint32_t s0; /* saved register/frame pointer */
+ uint32_t s1; /* saved register */
+ uint32_t s2; /* saved register */
+ uint32_t s3; /* saved register */
+ uint32_t s4; /* saved register */
+ uint32_t s5; /* saved register */
+ uint32_t s6; /* saved register */
+ uint32_t s7; /* saved register */
+ uint32_t s8; /* saved register */
+ uint32_t s9; /* saved register */
+ uint32_t s10; /* saved register */
+ uint32_t s11; /* saved register */
+};
+typedef struct _callee_saved _callee_saved_t;
+
+struct _caller_saved {
+ /*
+ * Nothing here, the exception code puts all the caller-saved
+ * registers onto the stack.
+ */
+};
+
+typedef struct _caller_saved _caller_saved_t;
+
+struct _thread_arch {
+ uint32_t swap_return_value; /* Return value of _Swap() */
+};
+
+typedef struct _thread_arch _thread_arch_t;
+
+struct _kernel_arch {
+ /* nothing for now */
+};
+
+typedef struct _kernel_arch _kernel_arch_t;
+
+extern char _interrupt_stack[CONFIG_ISR_STACK_SIZE];
+
+#endif /* _ASMLANGUAGE */
+
+#endif /* _kernel_arch_data_h_ */
diff --git a/arch/riscv32/include/kernel_arch_func.h b/arch/riscv32/include/kernel_arch_func.h
new file mode 100644
index 000000000..fb03d6c92
--- /dev/null
+++ b/arch/riscv32/include/kernel_arch_func.h
@@ -0,0 +1,72 @@
+/*
+ * Copyright (c) 2016 Jean-Paul Etienne <fractalclone@gmail.com>
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * @file
+ * @brief Private kernel definitions
+ *
+ * This file contains private kernel function/macro definitions and various
+ * other definitions for the RISCV32 processor architecture.
+ */
+
+#ifndef _kernel_arch_func__h_
+#define _kernel_arch_func__h_
+
+#include <soc.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#ifndef _ASMLANGUAGE
+void nano_cpu_idle(void);
+void nano_cpu_atomic_idle(unsigned int key);
+
+static ALWAYS_INLINE void nanoArchInit(void)
+{
+ _kernel.irq_stack = _interrupt_stack + CONFIG_ISR_STACK_SIZE;
+}
+
+static ALWAYS_INLINE void
+_set_thread_return_value(struct k_thread *thread, unsigned int value)
+{
+ thread->arch.swap_return_value = value;
+}
+
+static inline void _IntLibInit(void)
+{
+#if defined(CONFIG_RISCV_SOC_INTERRUPT_INIT)
+ soc_interrupt_init();
+#endif
+}
+
+FUNC_NORETURN void _NanoFatalErrorHandler(unsigned int reason,
+ const NANO_ESF *esf);
+
+
+#define _is_in_isr() (_kernel.nested != 0)
+
+#ifdef CONFIG_IRQ_OFFLOAD
+int _irq_do_offload(void);
+#endif
+
+#endif /* _ASMLANGUAGE */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _kernel_arch_func__h_ */
diff --git a/arch/riscv32/include/kernel_event_logger_arch.h b/arch/riscv32/include/kernel_event_logger_arch.h
new file mode 100644
index 000000000..4a4481f73
--- /dev/null
+++ b/arch/riscv32/include/kernel_event_logger_arch.h
@@ -0,0 +1,54 @@
+/*
+ * Copyright (c) 2016 Jean-Paul Etienne <fractalclone@gmail.com>
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * @file
+ * @brief Kernel event logger support for RISCV32
+ */
+
+#ifndef __KERNEL_EVENT_LOGGER_ARCH_H__
+#define __KERNEL_EVENT_LOGGER_ARCH_H__
+
+#include <arch/cpu.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * @brief Get the identification of the current interrupt.
+ *
+ * This routine obtain the key of the interrupt that is currently processed
+ * if it is called from an IRQ context.
+ *
+ * @return The key of the interrupt that is currently being processed.
+ */
+static inline int _sys_current_irq_key_get(void)
+{
+ uint32_t mcause;
+
+ __asm__ volatile("csrr %0, mcause" : "=r" (mcause));
+
+ mcause &= SOC_MCAUSE_IRQ_MASK;
+
+ return mcause;
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __KERNEL_EVENT_LOGGER_ARCH_H__ */
diff --git a/arch/riscv32/include/offsets_short_arch.h b/arch/riscv32/include/offsets_short_arch.h
new file mode 100644
index 000000000..e47ed0d48
--- /dev/null
+++ b/arch/riscv32/include/offsets_short_arch.h
@@ -0,0 +1,74 @@
+/*
+ * Copyright (c) 2016 Jean-Paul Etienne <fractalclone@gmail.com>
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef _offsets_short_arch__h_
+#define _offsets_short_arch__h_
+
+#include <offsets.h>
+
+/* kernel */
+
+/* nothing for now */
+
+/* end - kernel */
+
+/* threads */
+
+#define _thread_offset_to_sp \
+ (___thread_t_callee_saved_OFFSET + ___callee_saved_t_sp_OFFSET)
+
+#define _thread_offset_to_s0 \
+ (___thread_t_callee_saved_OFFSET + ___callee_saved_t_s0_OFFSET)
+
+#define _thread_offset_to_s1 \
+ (___thread_t_callee_saved_OFFSET + ___callee_saved_t_s1_OFFSET)
+
+#define _thread_offset_to_s2 \
+ (___thread_t_callee_saved_OFFSET + ___callee_saved_t_s2_OFFSET)
+
+#define _thread_offset_to_s3 \
+ (___thread_t_callee_saved_OFFSET + ___callee_saved_t_s3_OFFSET)
+
+#define _thread_offset_to_s4 \
+ (___thread_t_callee_saved_OFFSET + ___callee_saved_t_s4_OFFSET)
+
+#define _thread_offset_to_s5 \
+ (___thread_t_callee_saved_OFFSET + ___callee_saved_t_s5_OFFSET)
+
+#define _thread_offset_to_s6 \
+ (___thread_t_callee_saved_OFFSET + ___callee_saved_t_s6_OFFSET)
+
+#define _thread_offset_to_s7 \
+ (___thread_t_callee_saved_OFFSET + ___callee_saved_t_s7_OFFSET)
+
+#define _thread_offset_to_s8 \
+ (___thread_t_callee_saved_OFFSET + ___callee_saved_t_s8_OFFSET)
+
+#define _thread_offset_to_s9 \
+ (___thread_t_callee_saved_OFFSET + ___callee_saved_t_s9_OFFSET)
+
+#define _thread_offset_to_s10 \
+ (___thread_t_callee_saved_OFFSET + ___callee_saved_t_s10_OFFSET)
+
+#define _thread_offset_to_s11 \
+ (___thread_t_callee_saved_OFFSET + ___callee_saved_t_s11_OFFSET)
+
+#define _thread_offset_to_swap_return_value \
+ (___thread_t_arch_OFFSET + ___thread_arch_t_swap_return_value_OFFSET)
+
+/* end - threads */
+
+#endif /* _offsets_short_arch__h_ */