aboutsummaryrefslogtreecommitdiff
path: root/libgo/runtime/runtime.h
diff options
context:
space:
mode:
Diffstat (limited to 'libgo/runtime/runtime.h')
-rw-r--r--libgo/runtime/runtime.h233
1 files changed, 160 insertions, 73 deletions
diff --git a/libgo/runtime/runtime.h b/libgo/runtime/runtime.h
index 7f061cbbe31..818465cb897 100644
--- a/libgo/runtime/runtime.h
+++ b/libgo/runtime/runtime.h
@@ -8,6 +8,7 @@
#define _GNU_SOURCE
#include "go-assert.h"
+#include <setjmp.h>
#include <signal.h>
#include <stdio.h>
#include <stdlib.h>
@@ -17,17 +18,17 @@
#include <fcntl.h>
#include <pthread.h>
#include <semaphore.h>
+#include <ucontext.h>
#ifdef HAVE_SYS_MMAN_H
#include <sys/mman.h>
#endif
+#include "array.h"
#include "go-alloc.h"
#include "go-panic.h"
#include "go-string.h"
-typedef struct __go_string String;
-
/* This file supports C files copied from the 6g runtime library.
This is a version of the 6g runtime.h rewritten for gccgo's version
of the code. */
@@ -49,69 +50,82 @@ typedef unsigned int uintptr __attribute__ ((mode (pointer)));
typedef uint8 bool;
typedef uint8 byte;
typedef struct G G;
+typedef union Lock Lock;
typedef struct M M;
+typedef union Note Note;
typedef struct MCache MCache;
typedef struct FixAlloc FixAlloc;
-typedef struct Lock Lock;
typedef struct __go_defer_stack Defer;
typedef struct __go_panic_stack Panic;
+typedef struct __go_open_array Slice;
+typedef struct __go_string String;
-/* We use mutexes for locks. 6g uses futexes directly, and perhaps
- someday we will do that too. */
-
-struct Lock
-{
- uint32 key;
- sem_t sem;
-};
-
-/* A Note. */
-
-typedef struct Note Note;
-
-struct Note {
- int32 woken;
-};
-
-/* Per CPU declarations. */
-
-#ifdef __rtems__
-#define __thread
-#endif
-
-extern __thread G* g;
-extern __thread M* m;
+/*
+ * per-cpu declaration.
+ */
+extern M* runtime_m(void);
+extern G* runtime_g(void);
extern M runtime_m0;
extern G runtime_g0;
-#ifdef __rtems__
-#undef __thread
-#endif
-
-/* Constants. */
-
+/*
+ * defined constants
+ */
+enum
+{
+ // G status
+ //
+ // If you add to this list, add to the list
+ // of "okay during garbage collection" status
+ // in mgc0.c too.
+ Gidle,
+ Grunnable,
+ Grunning,
+ Gsyscall,
+ Gwaiting,
+ Gmoribund,
+ Gdead,
+};
enum
{
true = 1,
false = 0,
};
-/* Structures. */
-
+/*
+ * structures
+ */
+union Lock
+{
+ uint32 key; // futex-based impl
+ M* waitm; // linked list of waiting M's (sema-based impl)
+};
+union Note
+{
+ uint32 key; // futex-based impl
+ M* waitm; // waiting M (sema-based impl)
+};
struct G
{
Defer* defer;
Panic* panic;
void* exception; // current exception being thrown
bool is_foreign; // whether current exception from other language
+ void *gcstack; // if status==Gsyscall, gcstack = stackbase to use during gc
+ uintptr gcstack_size;
+ void* gcnext_segment;
+ void* gcnext_sp;
+ void* gcinitial_sp;
+ jmp_buf gcregs;
byte* entry; // initial function
G* alllink; // on allg
void* param; // passed parameter on wakeup
+ bool fromgogo; // reached from gogo
int16 status;
int32 goid;
- int8* waitreason; // if status==Gwaiting
+ const char* waitreason; // if status==Gwaiting
G* schedlink;
bool readyonstop;
bool ispanic;
@@ -122,85 +136,144 @@ struct G
// uintptr sigcode0;
// uintptr sigcode1;
// uintptr sigpc;
- // uintptr gopc; // pc of go statement that created this goroutine
+ uintptr gopc; // pc of go statement that created this goroutine
+
+ ucontext_t context;
+ void* stack_context[10];
};
struct M
{
+ G* g0; // goroutine with scheduling stack
+ G* gsignal; // signal-handling G
G* curg; // current running goroutine
int32 id;
int32 mallocing;
int32 gcing;
int32 locks;
int32 nomemprof;
- int32 gcing_for_prof;
- int32 holds_finlock;
- int32 gcing_for_finlock;
+ int32 waitnextg;
+ int32 dying;
int32 profilehz;
+ int32 helpgc;
uint32 fastrand;
+ Note havenextg;
+ G* nextg;
+ M* alllink; // on allm
+ M* schedlink;
MCache *mcache;
+ G* lockedg;
+ G* idleg;
+ M* nextwaitm; // next M waiting for lock
+ uintptr waitsema; // semaphore for parking on locks
+ uint32 waitsemacount;
+ uint32 waitsemalock;
+};
- /* For the list of all threads. */
- struct __go_thread_id *list_entry;
+/* Macros. */
- /* For the garbage collector. */
- void *gc_sp;
- size_t gc_len;
- void *gc_next_segment;
- void *gc_next_sp;
- void *gc_initial_sp;
+#ifdef __WINDOWS__
+enum {
+ Windows = 1
+};
+#else
+enum {
+ Windows = 0
};
+#endif
-/* Macros. */
#define nelem(x) (sizeof(x)/sizeof((x)[0]))
#define nil ((void*)0)
#define USED(v) ((void) v)
-/* We map throw to assert. */
-#define runtime_throw(s) __go_assert(s == 0)
+/*
+ * external data
+ */
+G* runtime_allg;
+G* runtime_lastg;
+M* runtime_allm;
+extern int32 runtime_gomaxprocs;
+extern bool runtime_singleproc;
+extern uint32 runtime_panicking;
+extern int32 runtime_gcwaiting; // gc is waiting to run
+int32 runtime_ncpu;
+/*
+ * common functions and data
+ */
+int32 runtime_findnull(const byte*);
+
+/*
+ * very low level c-called
+ */
+void runtime_args(int32, byte**);
+void runtime_osinit();
+void runtime_goargs(void);
+void runtime_goenvs(void);
+void runtime_throw(const char*);
void* runtime_mal(uintptr);
+void runtime_schedinit(void);
+void runtime_initsig(int32);
+String runtime_gostringnocopy(byte*);
+void* runtime_mstart(void*);
+G* runtime_malg(int32, byte**, size_t*);
+void runtime_minit(void);
void runtime_mallocinit(void);
-void runtime_initfintab(void);
+void runtime_gosched(void);
+void runtime_goexit(void);
+void runtime_entersyscall(void) __asm__("libgo_syscall.syscall.entersyscall");
+void runtime_exitsyscall(void) __asm__("libgo_syscall.syscall.exitsyscall");
void siginit(void);
bool __go_sigsend(int32 sig);
int64 runtime_nanotime(void);
void runtime_stoptheworld(void);
void runtime_starttheworld(bool);
-void __go_go(void (*pfn)(void*), void*);
-void __go_gc_goroutine_init(void*);
-void __go_enable_gc(void);
-int __go_run_goroutine_gc(int);
-void __go_scanstacks(void (*scan)(byte *, int64));
-void __go_stealcache(void);
-void __go_cachestats(void);
+G* __go_go(void (*pfn)(void*), void*);
/*
* mutual exclusion locks. in the uncontended case,
* as fast as spin locks (just a few user-level instructions),
* but on the contention path they sleep in the kernel.
+ * a zeroed Lock is unlocked (no need to initialize each lock).
*/
-void runtime_initlock(Lock*);
void runtime_lock(Lock*);
void runtime_unlock(Lock*);
-void runtime_destroylock(Lock*);
-
-void runtime_semacquire (uint32 *) asm ("libgo_runtime.runtime.Semacquire");
-void runtime_semrelease (uint32 *) asm ("libgo_runtime.runtime.Semrelease");
/*
* sleep and wakeup on one-time events.
* before any calls to notesleep or notewakeup,
* must call noteclear to initialize the Note.
- * then, any number of threads can call notesleep
+ * then, exactly one thread can call notesleep
* and exactly one thread can call notewakeup (once).
- * once notewakeup has been called, all the notesleeps
- * will return. future notesleeps will return immediately.
+ * once notewakeup has been called, the notesleep
+ * will return. future notesleep will return immediately.
+ * subsequent noteclear must be called only after
+ * previous notesleep has returned, e.g. it's disallowed
+ * to call noteclear straight after notewakeup.
+ *
+ * notetsleep is like notesleep but wakes up after
+ * a given number of nanoseconds even if the event
+ * has not yet happened. if a goroutine uses notetsleep to
+ * wake up early, it must wait to call noteclear until it
+ * can be sure that no other goroutine is calling
+ * notewakeup.
*/
void runtime_noteclear(Note*);
void runtime_notesleep(Note*);
void runtime_notewakeup(Note*);
+void runtime_notetsleep(Note*, int64);
+
+/*
+ * low-level synchronization for implementing the above
+ */
+uintptr runtime_semacreate(void);
+int32 runtime_semasleep(int64);
+void runtime_semawakeup(M*);
+// or
+void runtime_futexsleep(uint32*, uint32, int64);
+void runtime_futexwakeup(uint32*, uint32);
+
/* Functions. */
#define runtime_printf printf
@@ -208,10 +281,9 @@ void runtime_notewakeup(Note*);
#define runtime_free(p) __go_free(p)
#define runtime_memclr(buf, size) __builtin_memset((buf), 0, (size))
#define runtime_strcmp(s1, s2) __builtin_strcmp((s1), (s2))
-#define runtime_getenv(s) getenv(s)
-#define runtime_atoi(s) atoi(s)
#define runtime_mcmp(a, b, s) __builtin_memcmp((a), (b), (s))
#define runtime_memmove(a, b, s) __builtin_memmove((a), (b), (s))
+#define runtime_exit(s) _exit(s)
MCache* runtime_allocmcache(void);
void free(void *v);
struct __go_func_type;
@@ -221,12 +293,24 @@ bool runtime_addfinalizer(void*, void(*fn)(void*), const struct __go_func_type *
#define runtime_cas(pval, old, new) __sync_bool_compare_and_swap (pval, old, new)
#define runtime_casp(pval, old, new) __sync_bool_compare_and_swap (pval, old, new)
#define runtime_xadd(p, v) __sync_add_and_fetch (p, v)
-
-void runtime_sigprof(uint8 *pc, uint8 *sp, uint8 *lr);
-void runtime_cpuprofinit(void);
+#define runtime_xchg(p, v) __atomic_exchange_n (p, v, __ATOMIC_SEQ_CST)
+#define runtime_atomicload(p) __atomic_load_n (p, __ATOMIC_SEQ_CST)
+#define runtime_atomicstore(p, v) __atomic_store_n (p, v, __ATOMIC_SEQ_CST)
+#define runtime_atomicloadp(p) __atomic_load_n (p, __ATOMIC_SEQ_CST)
+#define runtime_atomicstorep(p, v) __atomic_store_n (p, v, __ATOMIC_SEQ_CST)
+
+void runtime_dopanic(int32) __attribute__ ((noreturn));
+void runtime_startpanic(void);
+void runtime_ready(G*);
+const byte* runtime_getenv(const char*);
+int32 runtime_atoi(const byte*);
+void runtime_sigprof(uint8 *pc, uint8 *sp, uint8 *lr, G *gp);
void runtime_resetcpuprofiler(int32);
void runtime_setcpuprofilerate(void(*)(uintptr*, int32), int32);
uint32 runtime_fastrand1(void);
+void runtime_semacquire(uint32 volatile *);
+void runtime_semrelease(uint32 volatile *);
+int32 runtime_gomaxprocsfunc(int32 n);
void runtime_procyield(uint32);
void runtime_osyield(void);
void runtime_usleep(uint32);
@@ -239,3 +323,6 @@ void reflect_call(const struct __go_func_type *, const void *, _Bool, _Bool,
#ifdef __rtems__
void __wrap_rtems_task_variable_add(void **);
#endif
+
+/* Temporary. */
+void runtime_cond_wait(pthread_cond_t*, pthread_mutex_t*);