summaryrefslogtreecommitdiff
path: root/arch
diff options
context:
space:
mode:
authorBenjamin Walsh <walsh.benj@gmail.com>2017-01-22 12:07:29 -0500
committerAnas Nashif <nashif@linux.intel.com>2017-01-24 13:34:50 +0000
commit4b655024486bf0b39328e04c9fc1a5d1ff986207 (patch)
treeeed2e4abdc83c2f66e1a2e89afac1f6220f4ee0f /arch
parent867f8ee371cf720f72af234fa7592b558de7c5c7 (diff)
kernel/x86: move INT_ACTIVE/EXC_ACTIVE to thread_state
They are internal states, not user-facing. Also prepend an underscore since they are kernel internal symbols. Change-Id: I53740e0d04a796ba1ccc409b5809438cdb189332 Signed-off-by: Benjamin Walsh <walsh.benj@gmail.com>
Diffstat (limited to 'arch')
-rw-r--r--arch/x86/core/excstub.S10
-rw-r--r--arch/x86/core/float.c2
-rw-r--r--arch/x86/core/intstub.S8
-rw-r--r--arch/x86/core/swap.S6
-rw-r--r--arch/x86/include/kernel_arch_data.h16
5 files changed, 24 insertions, 18 deletions
diff --git a/arch/x86/core/excstub.S b/arch/x86/core/excstub.S
index 6f24ede11..aa22c27f3 100644
--- a/arch/x86/core/excstub.S
+++ b/arch/x86/core/excstub.S
@@ -125,7 +125,7 @@ SECTION_FUNC(TEXT, _exception_enter)
* registers and the stack of the preempted thread.
*/
- testb $EXC_ACTIVE, _thread_offset_to_execution_flags(%edx)
+ testb $_EXC_ACTIVE, _thread_offset_to_thread_state(%edx)
jne alreadyInException
movl %esp, _thread_offset_to_esf(%edx)
@@ -134,14 +134,14 @@ alreadyInException:
#endif /* CONFIG_GDB_INFO */
/*
- * Set the EXC_ACTIVE bit in the TCS of the current thread.
+ * Set the _EXC_ACTIVE state bit of the current thread.
* This enables _Swap() to preserve the thread's FP registers
* (where needed) if the exception handler causes a context switch.
* It also indicates to debug tools that an exception is being
* handled in the event of a context switch.
*/
- orb $EXC_ACTIVE, _thread_offset_to_execution_flags(%edx)
+ orb $_EXC_ACTIVE, _thread_offset_to_thread_state(%edx)
#endif /* CONFIG_FP_SHARING || CONFIG_GDB_INFO */
@@ -195,12 +195,12 @@ allDone:
jne nestedException
/*
- * Clear the EXC_ACTIVE bit in the k_thread of the current execution
+ * Clear the _EXC_ACTIVE bit in the k_thread of the current execution
* context if we are not in a nested exception (ie, when we exit the
* outermost exception).
*/
- andb $~EXC_ACTIVE, _thread_offset_to_execution_flags (%ecx)
+ andb $~_EXC_ACTIVE, _thread_offset_to_thread_state(%ecx)
nestedException:
#endif /* CONFIG_FP_SHARING || CONFIG_GDB_INFO */
diff --git a/arch/x86/core/float.c b/arch/x86/core/float.c
index 0fb0737c0..13039d991 100644
--- a/arch/x86/core/float.c
+++ b/arch/x86/core/float.c
@@ -122,7 +122,7 @@ void k_float_enable(struct tcs *tcs, unsigned int options)
fp_owner = _kernel.current_fp;
if (fp_owner) {
- if (fp_owner->base.execution_flags & INT_OR_EXC_MASK) {
+ if (fp_owner->base.thread_state & _INT_OR_EXC_MASK) {
_FpCtxSave(fp_owner);
}
}
diff --git a/arch/x86/core/intstub.S b/arch/x86/core/intstub.S
index 2a43066da..ddd8b04dc 100644
--- a/arch/x86/core/intstub.S
+++ b/arch/x86/core/intstub.S
@@ -290,14 +290,14 @@ alreadyOnIntStack:
je noReschedule
/*
- * Set the INT_ACTIVE bit in the k_thread to allow the upcoming call to
+ * Set the _INT_ACTIVE bit in the k_thread to allow the upcoming call to
* _Swap() to determine whether non-floating registers need to be
* preserved using the lazy save/restore algorithm, or to indicate to
* debug tools that a preemptive context switch has occurred.
*/
#if defined(CONFIG_FP_SHARING) || defined(CONFIG_GDB_INFO)
- orb $INT_ACTIVE, _thread_offset_to_execution_flags(%edx)
+ orb $_INT_ACTIVE, _thread_offset_to_thread_state(%edx)
#endif
/*
@@ -340,12 +340,12 @@ alreadyOnIntStack:
defined(CONFIG_GDB_INFO) )
/*
* _Swap() has restored the floating point registers, if needed.
- * Clear the INT_ACTIVE bit of the interrupted thread's TCS
+ * Clear the _INT_ACTIVE bit in the interrupted thread's state
* since it has served its purpose.
*/
movl _kernel + _kernel_offset_to_current, %eax
- andb $~INT_ACTIVE, _thread_offset_to_execution_flags(%eax)
+ andb $~_INT_ACTIVE, _thread_offset_to_thread_state(%eax)
#endif /* CONFIG_FP_SHARING || CONFIG_GDB_INFO */
/* Restore volatile registers and return to the interrupted thread */
diff --git a/arch/x86/core/swap.S b/arch/x86/core/swap.S
index c23c7c8bd..98f2d61a6 100644
--- a/arch/x86/core/swap.S
+++ b/arch/x86/core/swap.S
@@ -44,7 +44,7 @@
* the non-volatile integer registers need to be saved in the TCS of the
* outgoing thread. The restoration of the integer registers of the incoming
* thread depends on whether that thread was preemptively context switched out.
- * The INT_ACTIVE and EXC_ACTIVE bits in the k_thread->execution_flags field
+ * The _INT_ACTIVE and _EXC_ACTIVE bits in the k_thread->thread_state field
* will signify that the thread was preemptively context switched out, and thus
* both the volatile and non-volatile integer registers need to be restored.
*
@@ -187,7 +187,7 @@ SECTION_FUNC(TEXT, _Swap)
* was preemptively context switched.
*/
- testb $INT_OR_EXC_MASK, _thread_offset_to_execution_flags(%ebx)
+ testb $_INT_OR_EXC_MASK, _thread_offset_to_thread_state(%ebx)
je restoreContext_NoFloatSave
@@ -227,7 +227,7 @@ restoreContext_NoFloatSave:
* was previously preemptively context switched out.
*/
- testb $INT_OR_EXC_MASK, _thread_offset_to_execution_flags(%eax)
+ testb $_INT_OR_EXC_MASK, _thread_offset_to_thread_state(%eax)
je restoreContext_NoFloatRestore
#ifdef CONFIG_SSE
diff --git a/arch/x86/include/kernel_arch_data.h b/arch/x86/include/kernel_arch_data.h
index 51b3f2e52..36888448e 100644
--- a/arch/x86/include/kernel_arch_data.h
+++ b/arch/x86/include/kernel_arch_data.h
@@ -42,21 +42,27 @@
#define STACK_ALIGN_SIZE 4
-/* x86 Bitmask definitions for struct k_thread->execution_flags */
+/* x86 Bitmask definitions for struct k_thread->thread_state */
/* executing context is interrupt handler */
-#define INT_ACTIVE (1 << 7)
+#define _INT_ACTIVE (1 << 7)
/* executing context is exception handler */
-#define EXC_ACTIVE (1 << 6)
+#define _EXC_ACTIVE (1 << 6)
+
+#define _INT_OR_EXC_MASK (_INT_ACTIVE | _EXC_ACTIVE)
-#define INT_OR_EXC_MASK (INT_ACTIVE | EXC_ACTIVE)
+/* end - states */
+
+/* x86 Bitmask definitions for struct k_thread->execution_flags */
#if defined(CONFIG_FP_SHARING) && defined(CONFIG_SSE)
/* thread uses SSEx (and also FP) registers */
-#define K_SSE_REGS (1 << 5)
+#define K_SSE_REGS (1 << 7)
#endif
+/* end - execution flags */
+
#if defined(CONFIG_FP_SHARING) && defined(CONFIG_SSE)
#define _FP_USER_MASK (K_FP_REGS | K_SSE_REGS)
#elif defined(CONFIG_FP_SHARING)