aboutsummaryrefslogtreecommitdiff
path: root/libgo/runtime/lock_sema.c
diff options
context:
space:
mode:
Diffstat (limited to 'libgo/runtime/lock_sema.c')
-rw-r--r--libgo/runtime/lock_sema.c217
1 files changed, 217 insertions, 0 deletions
diff --git a/libgo/runtime/lock_sema.c b/libgo/runtime/lock_sema.c
new file mode 100644
index 00000000000..2fa837d8b0e
--- /dev/null
+++ b/libgo/runtime/lock_sema.c
@@ -0,0 +1,217 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+#include "runtime.h"
+
+// This implementation depends on OS-specific implementations of
+//
+// uintptr runtime.semacreate(void)
+// Create a semaphore, which will be assigned to m->waitsema.
+// The zero value is treated as absence of any semaphore,
+// so be sure to return a non-zero value.
+//
+// int32 runtime.semasleep(int64 ns)
+// If ns < 0, acquire m->waitsema and return 0.
+// If ns >= 0, try to acquire m->waitsema for at most ns nanoseconds.
+// Return 0 if the semaphore was acquired, -1 if interrupted or timed out.
+//
+// int32 runtime.semawakeup(M *mp)
+// Wake up mp, which is or will soon be sleeping on mp->waitsema.
+//
+
+enum
+{
+ LOCKED = 1,
+
+ ACTIVE_SPIN = 4,
+ ACTIVE_SPIN_CNT = 30,
+ PASSIVE_SPIN = 1,
+};
+
+void
+runtime_lock(Lock *l)
+{
+ uintptr v;
+ uint32 i, spin;
+
+ if(m->locks++ < 0)
+ runtime_throw("runtime_lock: lock count");
+
+ // Speculative grab for lock.
+ if(runtime_casp(&l->waitm, nil, (void*)LOCKED))
+ return;
+
+ if(m->waitsema == 0)
+ m->waitsema = runtime_semacreate();
+
+ // On uniprocessor's, no point spinning.
+ // On multiprocessors, spin for ACTIVE_SPIN attempts.
+ spin = 0;
+ if(runtime_ncpu > 1)
+ spin = ACTIVE_SPIN;
+
+ for(i=0;; i++) {
+ v = (uintptr)runtime_atomicloadp(&l->waitm);
+ if((v&LOCKED) == 0) {
+unlocked:
+ if(runtime_casp(&l->waitm, (void*)v, (void*)(v|LOCKED)))
+ return;
+ i = 0;
+ }
+ if(i<spin)
+ runtime_procyield(ACTIVE_SPIN_CNT);
+ else if(i<spin+PASSIVE_SPIN)
+ runtime_osyield();
+ else {
+ // Someone else has it.
+ // l->waitm points to a linked list of M's waiting
+ // for this lock, chained through m->nextwaitm.
+ // Queue this M.
+ for(;;) {
+ m->nextwaitm = (void*)(v&~LOCKED);
+ if(runtime_casp(&l->waitm, (void*)v, (void*)((uintptr)m|LOCKED)))
+ break;
+ v = (uintptr)runtime_atomicloadp(&l->waitm);
+ if((v&LOCKED) == 0)
+ goto unlocked;
+ }
+ if(v&LOCKED) {
+ // Queued. Wait.
+ runtime_semasleep(-1);
+ i = 0;
+ }
+ }
+ }
+}
+
+void
+runtime_unlock(Lock *l)
+{
+ uintptr v;
+ M *mp;
+
+ if(--m->locks < 0)
+ runtime_throw("runtime_unlock: lock count");
+
+ for(;;) {
+ v = (uintptr)runtime_atomicloadp(&l->waitm);
+ if(v == LOCKED) {
+ if(runtime_casp(&l->waitm, (void*)LOCKED, nil))
+ break;
+ } else {
+ // Other M's are waiting for the lock.
+ // Dequeue an M.
+ mp = (void*)(v&~LOCKED);
+ if(runtime_casp(&l->waitm, (void*)v, mp->nextwaitm)) {
+ // Dequeued an M. Wake it.
+ runtime_semawakeup(mp);
+ break;
+ }
+ }
+ }
+}
+
+// One-time notifications.
+void
+runtime_noteclear(Note *n)
+{
+ n->waitm = nil;
+}
+
+void
+runtime_notewakeup(Note *n)
+{
+ M *mp;
+
+ do
+ mp = runtime_atomicloadp(&n->waitm);
+ while(!runtime_casp(&n->waitm, mp, (void*)LOCKED));
+
+ // Successfully set waitm to LOCKED.
+ // What was it before?
+ if(mp == nil) {
+ // Nothing was waiting. Done.
+ } else if(mp == (M*)LOCKED) {
+ // Two notewakeups! Not allowed.
+ runtime_throw("notewakeup - double wakeup");
+ } else {
+ // Must be the waiting m. Wake it up.
+ runtime_semawakeup(mp);
+ }
+}
+
+void
+runtime_notesleep(Note *n)
+{
+ if(m->waitsema == 0)
+ m->waitsema = runtime_semacreate();
+ if(!runtime_casp(&n->waitm, nil, m)) { // must be LOCKED (got wakeup)
+ if(n->waitm != (void*)LOCKED)
+ runtime_throw("notesleep - waitm out of sync");
+ return;
+ }
+ // Queued. Sleep.
+ runtime_semasleep(-1);
+}
+
+void
+runtime_notetsleep(Note *n, int64 ns)
+{
+ M *mp;
+ int64 deadline, now;
+
+ if(ns < 0) {
+ runtime_notesleep(n);
+ return;
+ }
+
+ if(m->waitsema == 0)
+ m->waitsema = runtime_semacreate();
+
+ // Register for wakeup on n->waitm.
+ if(!runtime_casp(&n->waitm, nil, m)) { // must be LOCKED (got wakeup already)
+ if(n->waitm != (void*)LOCKED)
+ runtime_throw("notetsleep - waitm out of sync");
+ return;
+ }
+
+ deadline = runtime_nanotime() + ns;
+ for(;;) {
+ // Registered. Sleep.
+ if(runtime_semasleep(ns) >= 0) {
+ // Acquired semaphore, semawakeup unregistered us.
+ // Done.
+ return;
+ }
+
+ // Interrupted or timed out. Still registered. Semaphore not acquired.
+ now = runtime_nanotime();
+ if(now >= deadline)
+ break;
+
+ // Deadline hasn't arrived. Keep sleeping.
+ ns = deadline - now;
+ }
+
+ // Deadline arrived. Still registered. Semaphore not acquired.
+ // Want to give up and return, but have to unregister first,
+ // so that any notewakeup racing with the return does not
+ // try to grant us the semaphore when we don't expect it.
+ for(;;) {
+ mp = runtime_atomicloadp(&n->waitm);
+ if(mp == m) {
+ // No wakeup yet; unregister if possible.
+ if(runtime_casp(&n->waitm, mp, nil))
+ return;
+ } else if(mp == (M*)LOCKED) {
+ // Wakeup happened so semaphore is available.
+ // Grab it to avoid getting out of sync.
+ if(runtime_semasleep(-1) < 0)
+ runtime_throw("runtime: unable to acquire - semaphore out of sync");
+ return;
+ } else {
+ runtime_throw("runtime: unexpected waitm - semaphore out of sync");
+ }
+ }
+}