aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJakub Jelinek <jakub@redhat.com>2008-03-25 09:48:16 +0000
committerJakub Jelinek <jakub@redhat.com>2008-03-25 09:48:16 +0000
commit7d68ffced7e31dd5a34de2962c92d15e5c936dbd (patch)
tree992709e727bac57aa59f5aad45cd48228c6c7872
parente5dc0cb473026fdc6dac019e4dae24d655acceb0 (diff)
* team.c (gomp_thread_start): Don't clear ts.static_trip here.
* loop.c (gomp_loop_static_start, gomp_loop_dynamic_start): Clear ts.static_trip here. * work.c (gomp_work_share_start): Don't clear ts.static_trip here. * libgomp.h: Include ptrlock.h. (struct gomp_work_share): Reshuffle fields. Add next_alloc, next_ws, next_free and inline_ordered_team_ids fields, change ordered_team_ids into pointer from flexible array member. (struct gomp_team_state): Add last_work_share field, remove work_share_generation. (struct gomp_team): Remove work_share_lock, generation_mask, oldest_live_gen, num_live_gen and init_work_shares fields, add work work_share_list_alloc, work_share_list_free and work_share_chunk fields. Change work_shares from pointer to pointers into an array. (gomp_new_team): New prototype. (gomp_team_start): Change type of last argument. (gomp_new_work_share): Removed. (gomp_init_work_share, gomp_fini_work_share): New prototypes. (gomp_work_share_init_done): New static inline. * team.c (gomp_thread_start): Clear ts.last_work_share, don't clear ts.work_share_generation. (new_team): Removed. (gomp_new_team): New function. (free_team): Free gomp_work_share blocks chained through next_alloc, instead of freeing work_shares and destroying work_share_lock. (gomp_team_start): Change last argument from ws to team, don't create new team, set ts.work_share to &team->work_shares[0] and clear ts.last_work_share. Don't clear ts.work_share_generation. (gomp_team_end): Call gomp_fini_work_share. * work.c (gomp_new_work_share): Removed. (alloc_work_share, gomp_init_work_share, gomp_fini_work_share): New functions. (free_work_share): Add team argument. Call gomp_fini_work_share and then either free ws if orphaned, or put it into work_share_list_free list of the current team. (gomp_work_share_start, gomp_work_share_end, gomp_work_share_end_nowait): Rewritten. * sections.c (GOMP_sections_start): Call gomp_work_share_init_done after gomp_sections_init. If HAVE_SYNC_BUILTINS, call gomp_iter_dynamic_next instead of the _locked variant and don't take lock around it, otherwise acquire it before calling gomp_iter_dynamic_next_locked. (GOMP_sections_next): If HAVE_SYNC_BUILTINS, call gomp_iter_dynamic_next instead of the _locked variant and don't take lock around it. (GOMP_parallel_sections_start): Call gomp_new_team instead of gomp_new_work_share. Call gomp_sections_init on &team->work_shares[0]. Adjust gomp_team_start caller. * loop.c (gomp_loop_static_start, gomp_loop_ordered_static_start): Call gomp_work_share_init_done after gomp_loop_init. Don't unlock ws->lock. (gomp_loop_dynamic_start, gomp_loop_guided_start): Call gomp_work_share_init_done after gomp_loop_init. If HAVE_SYNC_BUILTINS, don't unlock ws->lock, otherwise lock it. (gomp_loop_ordered_dynamic_start, gomp_loop_ordered_guided_start): Call gomp_work_share_init_done after gomp_loop_init. Lock ws->lock. (gomp_parallel_loop_start): Call gomp_new_team instead of gomp_new_work_share. Call gomp_loop_init on &team->work_shares[0]. Adjust gomp_team_start caller. * single.c (GOMP_single_start, GOMP_single_copy_start): Call gomp_work_share_init_done if gomp_work_share_start returned true. Don't unlock ws->lock. * parallel.c (gomp_resolve_num_threads): Fix non-HAVE_SYNC_BUILTINS case. (GOMP_parallel_start): Call gomp_new_team and pass that as last argument to gomp_team_start. * config/linux/ptrlock.c: New file. * config/linux/ptrlock.h: New file. * config/posix/ptrlock.c: New file. * config/posix/ptrlock.h: New file. * Makefile.am (libgomp_la_SOURCES): Add ptrlock.c. * Makefile.in: Regenerated. * testsuite/Makefile.in: Regenerated. * testsuite/libgomp.c++/collapse-1.C (main): Make k private in second omp for loop. git-svn-id: https://gcc.gnu.org/svn/gcc/branches/gomp-3_0-branch@133506 138bc75d-0d04-0410-961f-82ee72b054a4
-rw-r--r--libgomp/ChangeLog.gomp81
-rw-r--r--libgomp/Makefile.am2
-rw-r--r--libgomp/Makefile.in13
-rw-r--r--libgomp/config/linux/ptrlock.c70
-rw-r--r--libgomp/config/linux/ptrlock.h65
-rw-r--r--libgomp/config/posix/ptrlock.c1
-rw-r--r--libgomp/config/posix/ptrlock.h69
-rw-r--r--libgomp/libgomp.h143
-rw-r--r--libgomp/loop.c58
-rw-r--r--libgomp/parallel.c9
-rw-r--r--libgomp/sections.c32
-rw-r--r--libgomp/single.c11
-rw-r--r--libgomp/team.c55
-rw-r--r--libgomp/testsuite/Makefile.in6
-rw-r--r--libgomp/testsuite/libgomp.c++/collapse-1.C2
-rw-r--r--libgomp/work.c220
16 files changed, 616 insertions, 221 deletions
diff --git a/libgomp/ChangeLog.gomp b/libgomp/ChangeLog.gomp
index 7f5987578e4..3919787141c 100644
--- a/libgomp/ChangeLog.gomp
+++ b/libgomp/ChangeLog.gomp
@@ -1,3 +1,84 @@
+2008-03-25 Jakub Jelinek <jakub@redhat.com>
+
+ * team.c (gomp_thread_start): Don't clear ts.static_trip here.
+ * loop.c (gomp_loop_static_start, gomp_loop_dynamic_start): Clear
+ ts.static_trip here.
+ * work.c (gomp_work_share_start): Don't clear ts.static_trip here.
+
+2008-03-21 Jakub Jelinek <jakub@redhat.com>
+
+ * libgomp.h: Include ptrlock.h.
+ (struct gomp_work_share): Reshuffle fields. Add next_alloc,
+ next_ws, next_free and inline_ordered_team_ids fields, change
+ ordered_team_ids into pointer from flexible array member.
+ (struct gomp_team_state): Add last_work_share field, remove
+ work_share_generation.
+ (struct gomp_team): Remove work_share_lock, generation_mask,
+ oldest_live_gen, num_live_gen and init_work_shares fields, add
+ work work_share_list_alloc, work_share_list_free and work_share_chunk
+ fields. Change work_shares from pointer to pointers into an array.
+ (gomp_new_team): New prototype.
+ (gomp_team_start): Change type of last argument.
+ (gomp_new_work_share): Removed.
+ (gomp_init_work_share, gomp_fini_work_share): New prototypes.
+ (gomp_work_share_init_done): New static inline.
+ * team.c (gomp_thread_start): Clear ts.last_work_share, don't clear
+ ts.work_share_generation.
+ (new_team): Removed.
+ (gomp_new_team): New function.
+ (free_team): Free gomp_work_share blocks chained through next_alloc,
+ instead of freeing work_shares and destroying work_share_lock.
+ (gomp_team_start): Change last argument from ws to team, don't create
+ new team, set ts.work_share to &team->work_shares[0] and clear
+ ts.last_work_share. Don't clear ts.work_share_generation.
+ (gomp_team_end): Call gomp_fini_work_share.
+ * work.c (gomp_new_work_share): Removed.
+ (alloc_work_share, gomp_init_work_share, gomp_fini_work_share): New
+ functions.
+ (free_work_share): Add team argument. Call gomp_fini_work_share
+ and then either free ws if orphaned, or put it into
+ work_share_list_free list of the current team.
+ (gomp_work_share_start, gomp_work_share_end,
+ gomp_work_share_end_nowait): Rewritten.
+ * sections.c (GOMP_sections_start): Call gomp_work_share_init_done
+ after gomp_sections_init. If HAVE_SYNC_BUILTINS, call
+ gomp_iter_dynamic_next instead of the _locked variant and don't take
+ lock around it, otherwise acquire it before calling
+ gomp_iter_dynamic_next_locked.
+ (GOMP_sections_next): If HAVE_SYNC_BUILTINS, call
+ gomp_iter_dynamic_next instead of the _locked variant and don't take
+ lock around it.
+ (GOMP_parallel_sections_start): Call gomp_new_team instead of
+ gomp_new_work_share. Call gomp_sections_init on &team->work_shares[0].
+ Adjust gomp_team_start caller.
+ * loop.c (gomp_loop_static_start, gomp_loop_ordered_static_start): Call
+ gomp_work_share_init_done after gomp_loop_init. Don't unlock ws->lock.
+ (gomp_loop_dynamic_start, gomp_loop_guided_start): Call
+ gomp_work_share_init_done after gomp_loop_init. If HAVE_SYNC_BUILTINS,
+ don't unlock ws->lock, otherwise lock it.
+ (gomp_loop_ordered_dynamic_start, gomp_loop_ordered_guided_start): Call
+ gomp_work_share_init_done after gomp_loop_init. Lock ws->lock.
+ (gomp_parallel_loop_start): Call gomp_new_team instead of
+ gomp_new_work_share. Call gomp_loop_init on &team->work_shares[0].
+ Adjust gomp_team_start caller.
+ * single.c (GOMP_single_start, GOMP_single_copy_start): Call
+ gomp_work_share_init_done if gomp_work_share_start returned true.
+ Don't unlock ws->lock.
+ * parallel.c (gomp_resolve_num_threads): Fix non-HAVE_SYNC_BUILTINS
+ case.
+ (GOMP_parallel_start): Call gomp_new_team and pass that as last
+ argument to gomp_team_start.
+ * config/linux/ptrlock.c: New file.
+ * config/linux/ptrlock.h: New file.
+ * config/posix/ptrlock.c: New file.
+ * config/posix/ptrlock.h: New file.
+ * Makefile.am (libgomp_la_SOURCES): Add ptrlock.c.
+ * Makefile.in: Regenerated.
+ * testsuite/Makefile.in: Regenerated.
+
+ * testsuite/libgomp.c++/collapse-1.C (main): Make k private in second
+ omp for loop.
+
2008-03-19 Jakub Jelinek <jakub@redhat.com>
* libgomp.h (gomp_active_wait_policy): Remove decl.
diff --git a/libgomp/Makefile.am b/libgomp/Makefile.am
index 0101001fe89..7b77a8dde64 100644
--- a/libgomp/Makefile.am
+++ b/libgomp/Makefile.am
@@ -31,7 +31,7 @@ libgomp_la_LDFLAGS = $(libgomp_version_info) $(libgomp_version_script)
libgomp_la_SOURCES = alloc.c barrier.c critical.c env.c error.c iter.c \
loop.c ordered.c parallel.c sections.c single.c task.c team.c work.c \
- lock.c mutex.c proc.c sem.c bar.c time.c fortran.c affinity.c
+ lock.c mutex.c proc.c sem.c bar.c ptrlock.c time.c fortran.c affinity.c
nodist_noinst_HEADERS = libgomp_f.h
nodist_libsubinclude_HEADERS = omp.h
diff --git a/libgomp/Makefile.in b/libgomp/Makefile.in
index 3df46c55748..20221ca68a2 100644
--- a/libgomp/Makefile.in
+++ b/libgomp/Makefile.in
@@ -82,8 +82,8 @@ LTLIBRARIES = $(toolexeclib_LTLIBRARIES)
libgomp_la_LIBADD =
am_libgomp_la_OBJECTS = alloc.lo barrier.lo critical.lo env.lo \
error.lo iter.lo loop.lo ordered.lo parallel.lo sections.lo \
- single.lo task.lo team.lo work.lo lock.lo mutex.lo proc.lo sem.lo \
- bar.lo time.lo fortran.lo affinity.lo
+ single.lo task.lo team.lo work.lo lock.lo mutex.lo proc.lo \
+ sem.lo bar.lo ptrlock.lo time.lo fortran.lo affinity.lo
libgomp_la_OBJECTS = $(am_libgomp_la_OBJECTS)
DEFAULT_INCLUDES = -I. -I$(srcdir) -I.
depcomp = $(SHELL) $(top_srcdir)/../depcomp
@@ -191,9 +191,15 @@ MAINTAINER_MODE_TRUE = @MAINTAINER_MODE_TRUE@
MAKEINFO = @MAKEINFO@
NM = @NM@
OBJEXT = @OBJEXT@
+OMP_LOCK_25_ALIGN = @OMP_LOCK_25_ALIGN@
+OMP_LOCK_25_KIND = @OMP_LOCK_25_KIND@
+OMP_LOCK_25_SIZE = @OMP_LOCK_25_SIZE@
OMP_LOCK_ALIGN = @OMP_LOCK_ALIGN@
OMP_LOCK_KIND = @OMP_LOCK_KIND@
OMP_LOCK_SIZE = @OMP_LOCK_SIZE@
+OMP_NEST_LOCK_25_ALIGN = @OMP_NEST_LOCK_25_ALIGN@
+OMP_NEST_LOCK_25_KIND = @OMP_NEST_LOCK_25_KIND@
+OMP_NEST_LOCK_25_SIZE = @OMP_NEST_LOCK_25_SIZE@
OMP_NEST_LOCK_ALIGN = @OMP_NEST_LOCK_ALIGN@
OMP_NEST_LOCK_KIND = @OMP_NEST_LOCK_KIND@
OMP_NEST_LOCK_SIZE = @OMP_NEST_LOCK_SIZE@
@@ -288,7 +294,7 @@ libgomp_version_info = -version-info $(libtool_VERSION)
libgomp_la_LDFLAGS = $(libgomp_version_info) $(libgomp_version_script)
libgomp_la_SOURCES = alloc.c barrier.c critical.c env.c error.c iter.c \
loop.c ordered.c parallel.c sections.c single.c task.c team.c work.c \
- lock.c mutex.c proc.c sem.c bar.c time.c fortran.c affinity.c
+ lock.c mutex.c proc.c sem.c bar.c ptrlock.c time.c fortran.c affinity.c
nodist_noinst_HEADERS = libgomp_f.h
nodist_libsubinclude_HEADERS = omp.h
@@ -430,6 +436,7 @@ distclean-compile:
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/ordered.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/parallel.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/proc.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/ptrlock.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sections.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sem.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/single.Plo@am__quote@
diff --git a/libgomp/config/linux/ptrlock.c b/libgomp/config/linux/ptrlock.c
new file mode 100644
index 00000000000..8faa1b2287d
--- /dev/null
+++ b/libgomp/config/linux/ptrlock.c
@@ -0,0 +1,70 @@
+/* Copyright (C) 2008 Free Software Foundation, Inc.
+ Contributed by Jakub Jelinek <jakub@redhat.com>.
+
+ This file is part of the GNU OpenMP Library (libgomp).
+
+ Libgomp is free software; you can redistribute it and/or modify it
+ under the terms of the GNU Lesser General Public License as published by
+ the Free Software Foundation; either version 2.1 of the License, or
+ (at your option) any later version.
+
+ Libgomp is distributed in the hope that it will be useful, but WITHOUT ANY
+ WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+ FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+ more details.
+
+ You should have received a copy of the GNU Lesser General Public License
+ along with libgomp; see the file COPYING.LIB. If not, write to the
+ Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
+ MA 02110-1301, USA. */
+
+/* As a special exception, if you link this library with other files, some
+ of which are compiled with GCC, to produce an executable, this library
+ does not by itself cause the resulting executable to be covered by the
+ GNU General Public License. This exception does not however invalidate
+ any other reasons why the executable file might be covered by the GNU
+ General Public License. */
+
+/* This is a Linux specific implementation of a mutex synchronization
+ mechanism for libgomp. This type is private to the library. This
+ implementation uses atomic instructions and the futex syscall. */
+
+#include <endian.h>
+#include <limits.h>
+#include "wait.h"
+
+void *
+gomp_ptrlock_get_slow (gomp_ptrlock_t *ptrlock)
+{
+ int *intptr;
+ __sync_bool_compare_and_swap (ptrlock, 1, 2);
+
+ /* futex works on ints, not pointers.
+ But a valid work share pointer will be at least
+ 8 byte aligned, so it is safe to assume the low
+ 32-bits of the pointer won't contain values 1 or 2. */
+ __asm volatile ("" : "=r" (intptr) : "0" (ptrlock));
+#if __BYTE_ORDER == __BIG_ENDIAN
+ if (sizeof (*ptrlock) > sizeof (int))
+ intptr += (sizeof (*ptrlock) / sizeof (int)) - 1;
+#endif
+ do
+ do_wait (intptr, 2);
+ while (*intptr == 2);
+ __asm volatile ("" : : : "memory");
+ return *ptrlock;
+}
+
+void
+gomp_ptrlock_set_slow (gomp_ptrlock_t *ptrlock, void *ptr)
+{
+ int *intptr;
+
+ *ptrlock = ptr;
+ __asm volatile ("" : "=r" (intptr) : "0" (ptrlock));
+#if __BYTE_ORDER == __BIG_ENDIAN
+ if (sizeof (*ptrlock) > sizeof (int))
+ intptr += (sizeof (*ptrlock) / sizeof (int)) - 1;
+#endif
+ futex_wake (intptr, INT_MAX);
+}
diff --git a/libgomp/config/linux/ptrlock.h b/libgomp/config/linux/ptrlock.h
new file mode 100644
index 00000000000..bb5441676a4
--- /dev/null
+++ b/libgomp/config/linux/ptrlock.h
@@ -0,0 +1,65 @@
+/* Copyright (C) 2008 Free Software Foundation, Inc.
+ Contributed by Jakub Jelinek <jakub@redhat.com>.
+
+ This file is part of the GNU OpenMP Library (libgomp).
+
+ Libgomp is free software; you can redistribute it and/or modify it
+ under the terms of the GNU Lesser General Public License as published by
+ the Free Software Foundation; either version 2.1 of the License, or
+ (at your option) any later version.
+
+ Libgomp is distributed in the hope that it will be useful, but WITHOUT ANY
+ WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+ FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+ more details.
+
+ You should have received a copy of the GNU Lesser General Public License
+ along with libgomp; see the file COPYING.LIB. If not, write to the
+ Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
+ MA 02110-1301, USA. */
+
+/* As a special exception, if you link this library with other files, some
+ of which are compiled with GCC, to produce an executable, this library
+ does not by itself cause the resulting executable to be covered by the
+ GNU General Public License. This exception does not however invalidate
+ any other reasons why the executable file might be covered by the GNU
+ General Public License. */
+
+/* This is a Linux specific implementation of a mutex synchronization
+ mechanism for libgomp. This type is private to the library. This
+ implementation uses atomic instructions and the futex syscall. */
+
+#ifndef GOMP_PTRLOCK_H
+#define GOMP_PTRLOCK_H 1
+
+typedef void *gomp_ptrlock_t;
+
+static inline void gomp_ptrlock_init (gomp_ptrlock_t *ptrlock, void *ptr)
+{
+ *ptrlock = ptr;
+}
+
+extern void *gomp_ptrlock_get_slow (gomp_ptrlock_t *ptrlock);
+static inline void *gomp_ptrlock_get (gomp_ptrlock_t *ptrlock)
+{
+ if ((uintptr_t) *ptrlock > 2)
+ return *ptrlock;
+
+ if (__sync_bool_compare_and_swap (ptrlock, NULL, (uintptr_t) 1))
+ return NULL;
+
+ return gomp_ptrlock_get_slow (ptrlock);
+}
+
+extern void gomp_ptrlock_set_slow (gomp_ptrlock_t *ptrlock, void *ptr);
+static inline void gomp_ptrlock_set (gomp_ptrlock_t *ptrlock, void *ptr)
+{
+ if (!__sync_bool_compare_and_swap (ptrlock, (uintptr_t) 1, ptr))
+ gomp_ptrlock_set_slow (ptrlock, ptr);
+}
+
+static inline void gomp_ptrlock_destroy (gomp_ptrlock_t *ptrlock)
+{
+}
+
+#endif /* GOMP_PTRLOCK_H */
diff --git a/libgomp/config/posix/ptrlock.c b/libgomp/config/posix/ptrlock.c
new file mode 100644
index 00000000000..39bb64da0f9
--- /dev/null
+++ b/libgomp/config/posix/ptrlock.c
@@ -0,0 +1 @@
+/* Everything is in the header. */
diff --git a/libgomp/config/posix/ptrlock.h b/libgomp/config/posix/ptrlock.h
new file mode 100644
index 00000000000..1271ebb227b
--- /dev/null
+++ b/libgomp/config/posix/ptrlock.h
@@ -0,0 +1,69 @@
+/* Copyright (C) 2008 Free Software Foundation, Inc.
+ Contributed by Jakub Jelinek <jakub@redhat.com>.
+
+ This file is part of the GNU OpenMP Library (libgomp).
+
+ Libgomp is free software; you can redistribute it and/or modify it
+ under the terms of the GNU Lesser General Public License as published by
+ the Free Software Foundation; either version 2.1 of the License, or
+ (at your option) any later version.
+
+ Libgomp is distributed in the hope that it will be useful, but WITHOUT ANY
+ WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+ FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+ more details.
+
+ You should have received a copy of the GNU Lesser General Public License
+ along with libgomp; see the file COPYING.LIB. If not, write to the
+ Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
+ MA 02110-1301, USA. */
+
+/* As a special exception, if you link this library with other files, some
+ of which are compiled with GCC, to produce an executable, this library
+ does not by itself cause the resulting executable to be covered by the
+ GNU General Public License. This exception does not however invalidate
+ any other reasons why the executable file might be covered by the GNU
+ General Public License. */
+
+/* This is a Linux specific implementation of a mutex synchronization
+ mechanism for libgomp. This type is private to the library. This
+ implementation uses atomic instructions and the futex syscall. */
+
+#ifndef GOMP_PTRLOCK_H
+#define GOMP_PTRLOCK_H 1
+
+typedef struct { void *ptr; gomp_mutex_t lock; } gomp_ptrlock_t;
+
+static inline void gomp_ptrlock_init (gomp_ptrlock_t *ptrlock, void *ptr)
+{
+ ptrlock->ptr = ptr;
+ gomp_mutex_init (&ptrlock->lock);
+}
+
+static inline void *gomp_ptrlock_get (gomp_ptrlock_t *ptrlock)
+{
+ if (ptrlock->ptr != NULL)
+ return ptrlock->ptr;
+
+ gomp_mutex_lock (&ptrlock->lock);
+ if (ptrlock->ptr != NULL)
+ {
+ gomp_mutex_unlock (&ptrlock->lock);
+ return ptrlock->ptr;
+ }
+
+ return NULL;
+}
+
+static inline void gomp_ptrlock_set (gomp_ptrlock_t *ptrlock, void *ptr)
+{
+ ptrlock->ptr = ptr;
+ gomp_mutex_unlock (&ptrlock->lock);
+}
+
+static inline void gomp_ptrlock_destroy (gomp_ptrlock_t *ptrlock)
+{
+ gomp_mutex_destroy (&ptrlock->lock);
+}
+
+#endif /* GOMP_PTRLOCK_H */
diff --git a/libgomp/libgomp.h b/libgomp/libgomp.h
index e57077c3209..95cab09a1bd 100644
--- a/libgomp/libgomp.h
+++ b/libgomp/libgomp.h
@@ -50,6 +50,7 @@
#include "sem.h"
#include "mutex.h"
#include "bar.h"
+#include "ptrlock.h"
/* This structure contains the data to control one work-sharing construct,
@@ -84,12 +85,45 @@ struct gomp_work_share
is always 1. */
long incr;
- /* The above fields are written once during gomp_loop_init. Make sure
- the following fields are in a different cache line. */
+ /* This is a circular queue that details which threads will be allowed
+ into the ordered region and in which order. When a thread allocates
+ iterations on which it is going to work, it also registers itself at
+ the end of the array. When a thread reaches the ordered region, it
+ checks to see if it is the one at the head of the queue. If not, it
+ blocks on its RELEASE semaphore. */
+ unsigned *ordered_team_ids;
+
+ /* This is the number of threads that have registered themselves in
+ the circular queue ordered_team_ids. */
+ unsigned ordered_num_used;
+
+ /* This is the team_id of the currently acknowledged owner of the ordered
+ section, or -1u if the ordered section has not been acknowledged by
+ any thread. This is distinguished from the thread that is *allowed*
+ to take the section next. */
+ unsigned ordered_owner;
+
+ /* This is the index into the circular queue ordered_team_ids of the
+ current thread that's allowed into the ordered reason. */
+ unsigned ordered_cur;
+
+ /* This is a chain of allocated gomp_work_share blocks, valid only
+ in the first gomp_work_share struct in the block. */
+ struct gomp_work_share *next_alloc;
+
+ /* The above fields are written once during workshare initialization,
+ or related to ordered worksharing. Make sure the following fields
+ are in a different cache line. */
/* This lock protects the update of the following members. */
gomp_mutex_t lock __attribute__((aligned (64)));
+ /* This is the count of the number of threads that have exited the work
+ share construct. If the construct was marked nowait, they have moved on
+ to other work; otherwise they're blocked on a barrier. The last member
+ of the team to exit the work share construct must deallocate it. */
+ unsigned threads_completed;
+
union {
/* This is the next iteration value to be allocated. In the case of
GFS_STATIC loops, this the iteration start point and never changes. */
@@ -99,33 +133,19 @@ struct gomp_work_share
void *copyprivate;
};
- /* This is the count of the number of threads that have exited the work
- share construct. If the construct was marked nowait, they have moved on
- to other work; otherwise they're blocked on a barrier. The last member
- of the team to exit the work share construct must deallocate it. */
- unsigned threads_completed;
-
- /* This is the index into the circular queue ordered_team_ids of the
- current thread that's allowed into the ordered reason. */
- unsigned ordered_cur;
-
- /* This is the number of threads that have registered themselves in
- the circular queue ordered_team_ids. */
- unsigned ordered_num_used;
+ union {
+ /* Link to gomp_work_share struct for next work sharing construct
+ encountered after this one. */
+ gomp_ptrlock_t next_ws;
- /* This is the team_id of the currently acknoledged owner of the ordered
- section, or -1u if the ordered section has not been acknowledged by
- any thread. This is distinguished from the thread that is *allowed*
- to take the section next. */
- unsigned ordered_owner;
+ /* gomp_work_share structs are chained in the free work share cache
+ through this. */
+ struct gomp_work_share *next_free;
+ };
- /* This is a circular queue that details which threads will be allowed
- into the ordered region and in which order. When a thread allocates
- iterations on which it is going to work, it also registers itself at
- the end of the array. When a thread reaches the ordered region, it
- checks to see if it is the one at the head of the queue. If not, it
- blocks on its RELEASE semaphore. */
- unsigned ordered_team_ids[];
+ /* If only few threads are in the team, ordered_team_ids can point
+ to this array which fills the padding at the end of this struct. */
+ unsigned inline_ordered_team_ids[0];
};
/* This structure contains all of the thread-local data associated with
@@ -139,10 +159,15 @@ struct gomp_team_state
/* This is the work share construct which this thread is currently
processing. Recall that with NOWAIT, not all threads may be
- processing the same construct. This value is NULL when there
- is no construct being processed. */
+ processing the same construct. */
struct gomp_work_share *work_share;
+ /* This is the previous work share construct or NULL if there wasn't any.
+ When all threads are done with the current work sharing construct,
+ the previous one can be freed. The current one can't, as its
+ next_ws field is used. */
+ struct gomp_work_share *last_work_share;
+
/* This is the ID of this thread within the team. This value is
guaranteed to be between 0 and N-1, where N is the number of
threads in the team. */
@@ -154,13 +179,6 @@ struct gomp_team_state
/* Active nesting level. Only active parallel regions are counted. */
unsigned active_level;
- /* The work share "generation" is a number that increases by one for
- each work share construct encountered in the dynamic flow of the
- program. It is used to find the control data for the work share
- when encountering it for the first time. This particular number
- reflects the generation of the work_share member of this struct. */
- unsigned work_share_generation;
-
/* For GFS_RUNTIME loops that resolved to GFS_STATIC, this is the
trip number through the loop. So first time a particular loop
is encountered this number is 0, the second time through the loop
@@ -209,27 +227,13 @@ struct gomp_task
struct gomp_team
{
- /* This lock protects access to the following work shares data structures. */
- gomp_mutex_t work_share_lock;
-
- /* This is a dynamically sized array containing pointers to the control
- structs for all "live" work share constructs. Here "live" means that
- the construct has been encountered by at least one thread, and not
- completed by all threads. */
- struct gomp_work_share **work_shares;
-
- /* The work_shares array is indexed by "generation & generation_mask".
- The mask will be 2**N - 1, where 2**N is the size of the array. */
- unsigned generation_mask;
-
- /* These two values define the bounds of the elements of the work_shares
- array that are currently in use. */
- unsigned oldest_live_gen;
- unsigned num_live_gen;
-
/* This is the number of threads in the current team. */
unsigned nthreads;
+ /* This is number of gomp_work_share structs that have been allocated
+ as a block last time. */
+ unsigned work_share_chunk;
+
/* This is the saved team state that applied to a master thread before
the current thread was created. */
struct gomp_team_state prev_ts;
@@ -243,11 +247,26 @@ struct gomp_team
of the threads in the team. */
gomp_sem_t **ordered_release;
- struct gomp_work_share *init_work_shares[4];
+ /* List of gomp_work_share structs chained through next_free fields.
+ This is populated and taken off only by the first thread in the
+ team encountering a new work sharing construct, in a critical
+ section. */
+ struct gomp_work_share *work_share_list_alloc;
+
+ /* List of gomp_work_share structs freed by free_work_share. New
+ entries are atomically added to the start of the list, and
+ alloc_work_share can safely only move all but the first entry
+ to work_share_list alloc, as free_work_share can happen concurrently
+ with alloc_work_share. */
+ struct gomp_work_share *work_share_list_free;
/* This barrier is used for most synchronization of the team. */
gomp_barrier_t barrier;
+ /* Initial work shares, to avoid allocating any gomp_work_share
+ structs in the common case. */
+ struct gomp_work_share work_shares[8];
+
/* This array contains structures for implicit tasks. */
struct gomp_task implicit_task[];
};
@@ -368,17 +387,27 @@ extern void gomp_end_task (void);
/* team.c */
+extern struct gomp_team *gomp_new_team (unsigned);
extern void gomp_team_start (void (*) (void *), void *, unsigned,
- struct gomp_work_share *);
+ struct gomp_team *);
extern void gomp_team_end (void);
/* work.c */
-extern struct gomp_work_share * gomp_new_work_share (bool, unsigned);
+extern void gomp_init_work_share (struct gomp_work_share *, bool, unsigned);
+extern void gomp_fini_work_share (struct gomp_work_share *);
extern bool gomp_work_share_start (bool);
extern void gomp_work_share_end (void);
extern void gomp_work_share_end_nowait (void);
+static inline void
+gomp_work_share_init_done (void)
+{
+ struct gomp_thread *thr = gomp_thread ();
+ if (__builtin_expect (thr->ts.last_work_share != NULL, 1))
+ gomp_ptrlock_set (&thr->ts.last_work_share->next_ws, thr->ts.work_share);
+}
+
#ifdef HAVE_ATTRIBUTE_VISIBILITY
# pragma GCC visibility pop
#endif
diff --git a/libgomp/loop.c b/libgomp/loop.c
index 725e6bdf150..13ed6d44833 100644
--- a/libgomp/loop.c
+++ b/libgomp/loop.c
@@ -86,10 +86,13 @@ gomp_loop_static_start (long start, long end, long incr, long chunk_size,
{
struct gomp_thread *thr = gomp_thread ();
+ thr->ts.static_trip = 0;
if (gomp_work_share_start (false))
- gomp_loop_init (thr->ts.work_share, start, end, incr,
- GFS_STATIC, chunk_size);
- gomp_mutex_unlock (&thr->ts.work_share->lock);
+ {
+ gomp_loop_init (thr->ts.work_share, start, end, incr,
+ GFS_STATIC, chunk_size);
+ gomp_work_share_init_done ();
+ }
return !gomp_iter_static_next (istart, iend);
}
@@ -102,13 +105,16 @@ gomp_loop_dynamic_start (long start, long end, long incr, long chunk_size,
bool ret;
if (gomp_work_share_start (false))
- gomp_loop_init (thr->ts.work_share, start, end, incr,
- GFS_DYNAMIC, chunk_size);
+ {
+ gomp_loop_init (thr->ts.work_share, start, end, incr,
+ GFS_DYNAMIC, chunk_size);
+ gomp_work_share_init_done ();
+ }
#ifdef HAVE_SYNC_BUILTINS
- gomp_mutex_unlock (&thr->ts.work_share->lock);
ret = gomp_iter_dynamic_next (istart, iend);
#else
+ gomp_mutex_lock (&thr->ts.work_share->lock);
ret = gomp_iter_dynamic_next_locked (istart, iend);
gomp_mutex_unlock (&thr->ts.work_share->lock);
#endif
@@ -124,13 +130,16 @@ gomp_loop_guided_start (long start, long end, long incr, long chunk_size,
bool ret;
if (gomp_work_share_start (false))
- gomp_loop_init (thr->ts.work_share, start, end, incr,
- GFS_GUIDED, chunk_size);
+ {
+ gomp_loop_init (thr->ts.work_share, start, end, incr,
+ GFS_GUIDED, chunk_size);
+ gomp_work_share_init_done ();
+ }
#ifdef HAVE_SYNC_BUILTINS
- gomp_mutex_unlock (&thr->ts.work_share->lock);
ret = gomp_iter_guided_next (istart, iend);
#else
+ gomp_mutex_lock (&thr->ts.work_share->lock);
ret = gomp_iter_guided_next_locked (istart, iend);
gomp_mutex_unlock (&thr->ts.work_share->lock);
#endif
@@ -168,13 +177,14 @@ gomp_loop_ordered_static_start (long start, long end, long incr,
{
struct gomp_thread *thr = gomp_thread ();
+ thr->ts.static_trip = 0;
if (gomp_work_share_start (true))
{
gomp_loop_init (thr->ts.work_share, start, end, incr,
GFS_STATIC, chunk_size);
gomp_ordered_static_init ();
+ gomp_work_share_init_done ();
}
- gomp_mutex_unlock (&thr->ts.work_share->lock);
return !gomp_iter_static_next (istart, iend);
}
@@ -187,8 +197,14 @@ gomp_loop_ordered_dynamic_start (long start, long end, long incr,
bool ret;
if (gomp_work_share_start (true))
- gomp_loop_init (thr->ts.work_share, start, end, incr,
- GFS_DYNAMIC, chunk_size);
+ {
+ gomp_loop_init (thr->ts.work_share, start, end, incr,
+ GFS_DYNAMIC, chunk_size);
+ gomp_mutex_lock (&thr->ts.work_share->lock);
+ gomp_work_share_init_done ();
+ }
+ else
+ gomp_mutex_lock (&thr->ts.work_share->lock);
ret = gomp_iter_dynamic_next_locked (istart, iend);
if (ret)
@@ -206,8 +222,14 @@ gomp_loop_ordered_guided_start (long start, long end, long incr,
bool ret;
if (gomp_work_share_start (true))
- gomp_loop_init (thr->ts.work_share, start, end, incr,
- GFS_GUIDED, chunk_size);
+ {
+ gomp_loop_init (thr->ts.work_share, start, end, incr,
+ GFS_GUIDED, chunk_size);
+ gomp_mutex_lock (&thr->ts.work_share->lock);
+ gomp_work_share_init_done ();
+ }
+ else
+ gomp_mutex_lock (&thr->ts.work_share->lock);
ret = gomp_iter_guided_next_locked (istart, iend);
if (ret)
@@ -395,12 +417,12 @@ gomp_parallel_loop_start (void (*fn) (void *), void *data,
long incr, enum gomp_schedule_type sched,
long chunk_size)
{
- struct gomp_work_share *ws;
+ struct gomp_team *team;
num_threads = gomp_resolve_num_threads (num_threads, 0);
- ws = gomp_new_work_share (false, num_threads);
- gomp_loop_init (ws, start, end, incr, sched, chunk_size);
- gomp_team_start (fn, data, num_threads, ws);
+ team = gomp_new_team (num_threads);
+ gomp_loop_init (&team->work_shares[0], start, end, incr, sched, chunk_size);
+ gomp_team_start (fn, data, num_threads, team);
}
void
diff --git a/libgomp/parallel.c b/libgomp/parallel.c
index 5537a49a463..0882179d165 100644
--- a/libgomp/parallel.c
+++ b/libgomp/parallel.c
@@ -94,10 +94,11 @@ gomp_resolve_num_threads (unsigned specified, unsigned count)
#else
gomp_mutex_lock (&gomp_remaining_threads_lock);
num_threads = max_num_threads;
- if (num_threads > gomp_remaining_threads_count)
- num_threads = gomp_remaining_threads_count + 1;
+ remaining = gomp_remaining_threads_count;
+ if (num_threads > remaining)
+ num_threads = remaining + 1;
gomp_remaining_threads_count -= num_threads - 1;
- gomp_mutex_unlock (&gomp_remaining_threads_unlock);
+ gomp_mutex_unlock (&gomp_remaining_threads_lock);
#endif
return num_threads;
@@ -107,7 +108,7 @@ void
GOMP_parallel_start (void (*fn) (void *), void *data, unsigned num_threads)
{
num_threads = gomp_resolve_num_threads (num_threads, 0);
- gomp_team_start (fn, data, num_threads, NULL);
+ gomp_team_start (fn, data, num_threads, gomp_new_team (num_threads));
}
void
diff --git a/libgomp/sections.c b/libgomp/sections.c
index eeccbfd47f7..27625efec3e 100644
--- a/libgomp/sections.c
+++ b/libgomp/sections.c
@@ -59,14 +59,24 @@ GOMP_sections_start (unsigned count)
long s, e, ret;
if (gomp_work_share_start (false))
- gomp_sections_init (thr->ts.work_share, count);
+ {
+ gomp_sections_init (thr->ts.work_share, count);
+ gomp_work_share_init_done ();
+ }
+#ifdef HAVE_SYNC_BUILTINS
+ if (gomp_iter_dynamic_next (&s, &e))
+ ret = s;
+ else
+ ret = 0;
+#else
+ gomp_mutex_lock (&thr->ts.work_share->lock);
if (gomp_iter_dynamic_next_locked (&s, &e))
ret = s;
else
ret = 0;
-
gomp_mutex_unlock (&thr->ts.work_share->lock);
+#endif
return ret;
}
@@ -83,15 +93,23 @@ GOMP_sections_start (unsigned count)
unsigned
GOMP_sections_next (void)
{
- struct gomp_thread *thr = gomp_thread ();
long s, e, ret;
+#ifdef HAVE_SYNC_BUILTINS
+ if (gomp_iter_dynamic_next (&s, &e))
+ ret = s;
+ else
+ ret = 0;
+#else
+ struct gomp_thread *thr = gomp_thread ();
+
gomp_mutex_lock (&thr->ts.work_share->lock);
if (gomp_iter_dynamic_next_locked (&s, &e))
ret = s;
else
ret = 0;
gomp_mutex_unlock (&thr->ts.work_share->lock);
+#endif
return ret;
}
@@ -103,12 +121,12 @@ void
GOMP_parallel_sections_start (void (*fn) (void *), void *data,
unsigned num_threads, unsigned count)
{
- struct gomp_work_share *ws;
+ struct gomp_team *team;
num_threads = gomp_resolve_num_threads (num_threads, count);
- ws = gomp_new_work_share (false, num_threads);
- gomp_sections_init (ws, count);
- gomp_team_start (fn, data, num_threads, ws);
+ team = gomp_new_team (num_threads);
+ gomp_sections_init (&team->work_shares[0], count);
+ gomp_team_start (fn, data, num_threads, team);
}
/* The GOMP_section_end* routines are called after the thread is told
diff --git a/libgomp/single.c b/libgomp/single.c
index dde05d9ceb8..439d8e70f9d 100644
--- a/libgomp/single.c
+++ b/libgomp/single.c
@@ -1,4 +1,4 @@
-/* Copyright (C) 2005 Free Software Foundation, Inc.
+/* Copyright (C) 2005, 2008 Free Software Foundation, Inc.
Contributed by Richard Henderson <rth@redhat.com>.
This file is part of the GNU OpenMP Library (libgomp).
@@ -38,7 +38,8 @@ bool
GOMP_single_start (void)
{
bool ret = gomp_work_share_start (false);
- gomp_mutex_unlock (&gomp_thread ()->ts.work_share->lock);
+ if (ret)
+ gomp_work_share_init_done ();
gomp_work_share_end_nowait ();
return ret;
}
@@ -57,10 +58,12 @@ GOMP_single_copy_start (void)
void *ret;
first = gomp_work_share_start (false);
- gomp_mutex_unlock (&thr->ts.work_share->lock);
if (first)
- ret = NULL;
+ {
+ gomp_work_share_init_done ();
+ ret = NULL;
+ }
else
{
gomp_barrier_wait (&thr->ts.team->barrier);
diff --git a/libgomp/team.c b/libgomp/team.c
index f54f08e59a0..743cd7fbef1 100644
--- a/libgomp/team.c
+++ b/libgomp/team.c
@@ -117,11 +117,10 @@ gomp_thread_start (void *xdata)
thr->data = NULL;
thr->ts.team = NULL;
thr->ts.work_share = NULL;
+ thr->ts.last_work_share = NULL;
thr->ts.team_id = 0;
thr->ts.level = 0;
thr->ts.active_level = 0;
- thr->ts.work_share_generation = 0;
- thr->ts.static_trip = 0;
gomp_barrier_wait_last (&team->barrier);
gomp_barrier_wait (&gomp_threads_dock);
@@ -138,22 +137,25 @@ gomp_thread_start (void *xdata)
/* Create a new team data structure. */
-static struct gomp_team *
-new_team (unsigned nthreads, struct gomp_work_share *work_share)
+struct gomp_team *
+gomp_new_team (unsigned nthreads)
{
struct gomp_team *team;
size_t size;
+ int i;
size = sizeof (*team) + nthreads * (sizeof (team->ordered_release[0])
+ sizeof (team->implicit_task[0]));
team = gomp_malloc (size);
- gomp_mutex_init (&team->work_share_lock);
- team->work_shares = team->init_work_shares;
- team->generation_mask = 3;
- team->oldest_live_gen = work_share == NULL;
- team->num_live_gen = work_share != NULL;
- team->init_work_shares[0] = work_share;
+ team->work_share_chunk = 8;
+ gomp_init_work_share (&team->work_shares[0], false, nthreads);
+ team->work_shares[0].next_alloc = NULL;
+ team->work_share_list_free = NULL;
+ team->work_share_list_alloc = &team->work_shares[1];
+ for (i = 1; i < 7; i++)
+ team->work_shares[i].next_free = &team->work_shares[i + 1];
+ team->work_shares[i].next_free = NULL;
team->nthreads = nthreads;
gomp_barrier_init (&team->barrier, nthreads);
@@ -171,9 +173,17 @@ new_team (unsigned nthreads, struct gomp_work_share *work_share)
static void
free_team (struct gomp_team *team)
{
- if (__builtin_expect (team->work_shares != team->init_work_shares, 0))
- free (team->work_shares);
- gomp_mutex_destroy (&team->work_share_lock);
+ if (__builtin_expect (team->work_shares[0].next_alloc != NULL, 0))
+ {
+ struct gomp_work_share *ws = team->work_shares[0].next_alloc;
+ do
+ {
+ struct gomp_work_share *next_ws = ws->next_alloc;
+ free (ws);
+ ws = next_ws;
+ }
+ while (ws != NULL);
+ }
gomp_barrier_destroy (&team->barrier);
gomp_sem_destroy (&team->master_release);
free (team);
@@ -184,11 +194,10 @@ free_team (struct gomp_team *team)
void
gomp_team_start (void (*fn) (void *), void *data, unsigned nthreads,
- struct gomp_work_share *work_share)
+ struct gomp_team *team)
{
struct gomp_thread_start_data *start_data;
struct gomp_thread *thr, *nthr;
- struct gomp_team *team;
struct gomp_task *task;
struct gomp_task_icv *icv;
bool nested;
@@ -200,20 +209,18 @@ gomp_team_start (void (*fn) (void *), void *data, unsigned nthreads,
task = thr->task;
icv = task ? &task->icv : &gomp_global_icv;
- team = new_team (nthreads, work_share);
-
/* Always save the previous state, even if this isn't a nested team.
In particular, we should save any work share state from an outer
orphaned work share construct. */
team->prev_ts = thr->ts;
thr->ts.team = team;
- thr->ts.work_share = work_share;
thr->ts.team_id = 0;
++thr->ts.level;
if (nthreads > 1)
++thr->ts.active_level;
- thr->ts.work_share_generation = 0;
+ thr->ts.work_share = &team->work_shares[0];
+ thr->ts.last_work_share = NULL;
thr->ts.static_trip = 0;
thr->task = &team->implicit_task[0];
gomp_init_task (thr->task, task, icv);
@@ -258,11 +265,11 @@ gomp_team_start (void (*fn) (void *), void *data, unsigned nthreads,
{
nthr = gomp_threads[i];
nthr->ts.team = team;
- nthr->ts.work_share = work_share;
+ nthr->ts.work_share = &team->work_shares[0];
+ nthr->ts.last_work_share = NULL;
nthr->ts.team_id = i;
nthr->ts.level = team->prev_ts.level + 1;
nthr->ts.active_level = thr->ts.active_level;
- nthr->ts.work_share_generation = 0;
nthr->ts.static_trip = 0;
nthr->task = &team->implicit_task[i];
gomp_init_task (nthr->task, task, icv);
@@ -326,11 +333,11 @@ gomp_team_start (void (*fn) (void *), void *data, unsigned nthreads,
start_data->fn = fn;
start_data->fn_data = data;
start_data->ts.team = team;
- start_data->ts.work_share = work_share;
+ start_data->ts.work_share = &team->work_shares[0];
+ start_data->ts.last_work_share = NULL;
start_data->ts.team_id = i;
start_data->ts.level = team->prev_ts.level + 1;
start_data->ts.active_level = thr->ts.active_level;
- start_data->ts.work_share_generation = 0;
start_data->ts.static_trip = 0;
start_data->task = &team->implicit_task[i];
gomp_init_task (start_data->task, task, icv);
@@ -382,6 +389,8 @@ gomp_team_end (void)
gomp_barrier_wait (&team->barrier);
+ gomp_fini_work_share (thr->ts.work_share);
+
gomp_end_task ();
thr->ts = team->prev_ts;
diff --git a/libgomp/testsuite/Makefile.in b/libgomp/testsuite/Makefile.in
index aaf23dcbc6d..b869900954a 100644
--- a/libgomp/testsuite/Makefile.in
+++ b/libgomp/testsuite/Makefile.in
@@ -110,9 +110,15 @@ MAINTAINER_MODE_TRUE = @MAINTAINER_MODE_TRUE@
MAKEINFO = @MAKEINFO@
NM = @NM@
OBJEXT = @OBJEXT@
+OMP_LOCK_25_ALIGN = @OMP_LOCK_25_ALIGN@
+OMP_LOCK_25_KIND = @OMP_LOCK_25_KIND@
+OMP_LOCK_25_SIZE = @OMP_LOCK_25_SIZE@
OMP_LOCK_ALIGN = @OMP_LOCK_ALIGN@
OMP_LOCK_KIND = @OMP_LOCK_KIND@
OMP_LOCK_SIZE = @OMP_LOCK_SIZE@
+OMP_NEST_LOCK_25_ALIGN = @OMP_NEST_LOCK_25_ALIGN@
+OMP_NEST_LOCK_25_KIND = @OMP_NEST_LOCK_25_KIND@
+OMP_NEST_LOCK_25_SIZE = @OMP_NEST_LOCK_25_SIZE@
OMP_NEST_LOCK_ALIGN = @OMP_NEST_LOCK_ALIGN@
OMP_NEST_LOCK_KIND = @OMP_NEST_LOCK_KIND@
OMP_NEST_LOCK_SIZE = @OMP_NEST_LOCK_SIZE@
diff --git a/libgomp/testsuite/libgomp.c++/collapse-1.C b/libgomp/testsuite/libgomp.c++/collapse-1.C
index 6d30a65081b..132d35cf41d 100644
--- a/libgomp/testsuite/libgomp.c++/collapse-1.C
+++ b/libgomp/testsuite/libgomp.c++/collapse-1.C
@@ -17,7 +17,7 @@ main ()
a[i][j][k] = i + j * 4 + k * 16;
#pragma omp parallel
{
- #pragma omp for collapse(2) reduction(|:l)
+ #pragma omp for collapse(2) reduction(|:l) private (k)
for (i = 0; i < 2; i++)
for (j = 0; j < 2; j++)
for (k = 0; k < 2; k++)
diff --git a/libgomp/work.c b/libgomp/work.c
index 1829aeaff21..d9a515e511f 100644
--- a/libgomp/work.c
+++ b/libgomp/work.c
@@ -34,35 +34,112 @@
#include <string.h>
-/* Create a new work share structure. */
+/* Allocate a new work share structure, preferably from current team's
+ free gomp_work_share cache. */
-struct gomp_work_share *
-gomp_new_work_share (bool ordered, unsigned nthreads)
+static struct gomp_work_share *
+alloc_work_share (struct gomp_team *team)
{
struct gomp_work_share *ws;
- size_t size;
+ unsigned int i;
- size = offsetof (struct gomp_work_share, ordered_team_ids);
- if (ordered)
- size += nthreads * sizeof (ws->ordered_team_ids[0]);
+ /* This is called in a critical section. */
+ if (team->work_share_list_alloc != NULL)
+ {
+ ws = team->work_share_list_alloc;
+ team->work_share_list_alloc = ws->next_free;
+ return ws;
+ }
- ws = gomp_malloc_cleared (size);
- gomp_mutex_init (&ws->lock);
- ws->ordered_owner = -1;
+ ws = team->work_share_list_free;
+ /* We need atomic read from work_share_list_free,
+ as free_work_share can be called concurrently. */
+ __asm ("" : "+r" (ws));
+ if (ws && ws->next_free)
+ {
+ struct gomp_work_share *next = ws->next_free;
+ ws->next_free = NULL;
+ team->work_share_list_alloc = next->next_free;
+ return next;
+ }
+
+ team->work_share_chunk *= 2;
+ ws = gomp_malloc (team->work_share_chunk * sizeof (struct gomp_work_share));
+ ws->next_alloc = team->work_shares[0].next_alloc;
+ team->work_shares[0].next_alloc = ws;
+ team->work_share_list_alloc = &ws[1];
+ for (i = 1; i < team->work_share_chunk - 1; i++)
+ ws[i].next_free = &ws[i + 1];
+ ws[i].next_free = NULL;
return ws;
}
+/* Initialize an already allocated struct gomp_work_share.
+ This shouldn't touch the next_alloc field. */
+
+void
+gomp_init_work_share (struct gomp_work_share *ws, bool ordered,
+ unsigned nthreads)
+{
+ gomp_mutex_init (&ws->lock);
+ if (__builtin_expect (ordered, 0))
+ {
+#define INLINE_ORDERED_TEAM_IDS_CNT \
+ ((sizeof (struct gomp_work_share) \
+ - offsetof (struct gomp_work_share, inline_ordered_team_ids)) \
+ / sizeof (((struct gomp_work_share *) 0)->inline_ordered_team_ids[0]))
+
+ if (nthreads > INLINE_ORDERED_TEAM_IDS_CNT)
+ ws->ordered_team_ids
+ = gomp_malloc (nthreads * sizeof (*ws->ordered_team_ids));
+ else
+ ws->ordered_team_ids = ws->inline_ordered_team_ids;
+ memset (ws->ordered_team_ids, '\0',
+ nthreads * sizeof (*ws->ordered_team_ids));
+ ws->ordered_num_used = 0;
+ ws->ordered_owner = -1;
+ ws->ordered_cur = 0;
+ }
+ else
+ ws->ordered_team_ids = NULL;
+ gomp_ptrlock_init (&ws->next_ws, NULL);
+ ws->threads_completed = 0;
+}
-/* Free a work share structure. */
+/* Do any needed destruction of gomp_work_share fields before it
+ is put back into free gomp_work_share cache or freed. */
-static void
-free_work_share (struct gomp_work_share *ws)
+void
+gomp_fini_work_share (struct gomp_work_share *ws)
{
gomp_mutex_destroy (&ws->lock);
- free (ws);
+ if (ws->ordered_team_ids != ws->inline_ordered_team_ids)
+ free (ws->ordered_team_ids);
+ gomp_ptrlock_destroy (&ws->next_ws);
}
+/* Free a work share struct, if not orphaned, put it into current
+ team's free gomp_work_share cache. */
+
+static inline void
+free_work_share (struct gomp_team *team, struct gomp_work_share *ws)
+{
+ gomp_fini_work_share (ws);
+ if (__builtin_expect (team == NULL, 0))
+ free (ws);
+ else
+ {
+ struct gomp_work_share *next_ws;
+ do
+ {
+ next_ws = team->work_share_list_free;
+ ws->next_free = next_ws;
+ }
+ while (!__sync_bool_compare_and_swap (&team->work_share_list_free,
+ next_ws, ws));
+ }
+}
/* The current thread is ready to begin the next work sharing construct.
In all cases, thr->ts.work_share is updated to point to the new
@@ -75,79 +152,34 @@ gomp_work_share_start (bool ordered)
struct gomp_thread *thr = gomp_thread ();
struct gomp_team *team = thr->ts.team;
struct gomp_work_share *ws;
- unsigned ws_index, ws_gen;
/* Work sharing constructs can be orphaned. */
if (team == NULL)
{
- ws = gomp_new_work_share (ordered, 1);
+ ws = gomp_malloc (sizeof (*ws));
+ gomp_init_work_share (ws, ordered, 1);
thr->ts.work_share = ws;
- thr->ts.static_trip = 0;
- gomp_mutex_lock (&ws->lock);
- return true;
+ return ws;
}
- gomp_mutex_lock (&team->work_share_lock);
-
- /* This thread is beginning its next generation. */
- ws_gen = ++thr->ts.work_share_generation;
-
- /* If this next generation is not newer than any other generation in
- the team, then simply reference the existing construct. */
- if (ws_gen - team->oldest_live_gen < team->num_live_gen)
+ ws = thr->ts.work_share;
+ thr->ts.last_work_share = ws;
+ ws = gomp_ptrlock_get (&ws->next_ws);
+ if (ws == NULL)
{
- ws_index = ws_gen & team->generation_mask;
- ws = team->work_shares[ws_index];
+ /* This thread encountered a new ws first. */
+ struct gomp_work_share *ws = alloc_work_share (team);
+ gomp_init_work_share (ws, ordered, team->nthreads);
thr->ts.work_share = ws;
- thr->ts.static_trip = 0;
-
- gomp_mutex_lock (&ws->lock);
- gomp_mutex_unlock (&team->work_share_lock);
-
- return false;
+ return true;
}
-
- /* Resize the work shares queue if we've run out of space. */
- if (team->num_live_gen++ == team->generation_mask)
+ else
{
- if (team->work_shares == team->init_work_shares)
- {
- team->work_shares = gomp_malloc (2 * team->num_live_gen
- * sizeof (*team->work_shares));
- memcpy (team->work_shares, team->init_work_shares,
- sizeof (team->init_work_shares));
- }
- else
- team->work_shares = gomp_realloc (team->work_shares,
- 2 * team->num_live_gen
- * sizeof (*team->work_shares));
-
- /* Unless oldest_live_gen is zero, the sequence of live elements
- wraps around the end of the array. If we do nothing, we break
- lookup of the existing elements. Fix that by unwrapping the
- data from the front to the end. */
- if (team->oldest_live_gen > 0)
- memcpy (team->work_shares + team->num_live_gen,
- team->work_shares,
- (team->oldest_live_gen & team->generation_mask)
- * sizeof (*team->work_shares));
-
- team->generation_mask = team->generation_mask * 2 + 1;
+ thr->ts.work_share = ws;
+ return false;
}
-
- ws_index = ws_gen & team->generation_mask;
- ws = gomp_new_work_share (ordered, team->nthreads);
- thr->ts.work_share = ws;
- thr->ts.static_trip = 0;
- team->work_shares[ws_index] = ws;
-
- gomp_mutex_lock (&ws->lock);
- gomp_mutex_unlock (&team->work_share_lock);
-
- return true;
}
-
/* The current thread is done with its current work sharing construct.
This version does imply a barrier at the end of the work-share. */
@@ -156,15 +188,13 @@ gomp_work_share_end (void)
{
struct gomp_thread *thr = gomp_thread ();
struct gomp_team *team = thr->ts.team;
- struct gomp_work_share *ws = thr->ts.work_share;
gomp_barrier_state_t bstate;
- thr->ts.work_share = NULL;
-
/* Work sharing constructs can be orphaned. */
if (team == NULL)
{
- free_work_share (ws);
+ free_work_share (NULL, thr->ts.work_share);
+ thr->ts.work_share = NULL;
return;
}
@@ -172,20 +202,14 @@ gomp_work_share_end (void)
if (gomp_barrier_last_thread (bstate))
{
- unsigned ws_index;
-
- ws_index = thr->ts.work_share_generation & team->generation_mask;
- team->work_shares[ws_index] = NULL;
- team->oldest_live_gen++;
- team->num_live_gen = 0;
-
- free_work_share (ws);
+ if (__builtin_expect (thr->ts.last_work_share != NULL, 1))
+ free_work_share (team, thr->ts.last_work_share);
}
gomp_barrier_wait_end (&team->barrier, bstate);
+ thr->ts.last_work_share = NULL;
}
-
/* The current thread is done with its current work sharing construct.
This version does NOT imply a barrier at the end of the work-share. */
@@ -197,15 +221,17 @@ gomp_work_share_end_nowait (void)
struct gomp_work_share *ws = thr->ts.work_share;
unsigned completed;
- thr->ts.work_share = NULL;
-
/* Work sharing constructs can be orphaned. */
if (team == NULL)
{
- free_work_share (ws);
+ free_work_share (NULL, ws);
+ thr->ts.work_share = NULL;
return;
}
+ if (__builtin_expect (thr->ts.last_work_share == NULL, 0))
+ return;
+
#ifdef HAVE_SYNC_BUILTINS
completed = __sync_add_and_fetch (&ws->threads_completed, 1);
#else
@@ -215,18 +241,6 @@ gomp_work_share_end_nowait (void)
#endif
if (completed == team->nthreads)
- {
- unsigned ws_index;
-
- gomp_mutex_lock (&team->work_share_lock);
-
- ws_index = thr->ts.work_share_generation & team->generation_mask;
- team->work_shares[ws_index] = NULL;
- team->oldest_live_gen++;
- team->num_live_gen--;
-
- gomp_mutex_unlock (&team->work_share_lock);
-
- free_work_share (ws);
- }
+ free_work_share (team, thr->ts.last_work_share);
+ thr->ts.last_work_share = NULL;
}