aboutsummaryrefslogtreecommitdiff
path: root/boehm-gc
diff options
context:
space:
mode:
authorBryce McKinlay <bryce@albatross.co.nz>2000-04-19 02:29:16 +0000
committerBryce McKinlay <bryce@albatross.co.nz>2000-04-19 02:29:16 +0000
commiteeabde020bda68ced571a4ff3cbaac2a53e3a13a (patch)
treeb994ff1648f56f954388f0f6acfe4414031cbe1b /boehm-gc
parent0f553650c7f33dd86970ad8c2180a639faf56077 (diff)
This commit was generated by cvs2svn to compensate for changes in r33244,
which included commits to RCS files with non-trunk default branches. git-svn-id: https://gcc.gnu.org/svn/gcc/trunk@33245 138bc75d-0d04-0410-961f-82ee72b054a4
Diffstat (limited to 'boehm-gc')
-rw-r--r--boehm-gc/backptr.h9
-rw-r--r--boehm-gc/cord/cordprnt.c2
-rw-r--r--boehm-gc/cord/gc.h9
-rw-r--r--boehm-gc/hpux_irix_threads.c3
-rw-r--r--boehm-gc/include/backptr.h9
-rw-r--r--boehm-gc/include/cord.h2
-rw-r--r--boehm-gc/include/gc.h51
-rw-r--r--boehm-gc/include/gc_cpp.h21
-rw-r--r--boehm-gc/include/gc_typed.h2
-rw-r--r--boehm-gc/include/private/gc_priv.h182
-rw-r--r--boehm-gc/include/private/gcconfig.h329
11 files changed, 484 insertions, 135 deletions
diff --git a/boehm-gc/backptr.h b/boehm-gc/backptr.h
index d34224e42ce..f5b7c5aa2b9 100644
--- a/boehm-gc/backptr.h
+++ b/boehm-gc/backptr.h
@@ -29,9 +29,11 @@
/* source is heap object ==> *base_p != 0, *offset_p = offset */
/* Returns 1 on success, 0 if source couldn't be determined. */
/* Dest can be any address within a heap object. */
-typedef enum { GC_UNREFERENCED, /* No refence info available. */
+typedef enum { GC_UNREFERENCED, /* No reference info available. */
GC_NO_SPACE, /* Dest not allocated with debug alloc */
GC_REFD_FROM_ROOT, /* Referenced directly by root *base_p */
+ GC_REFD_FROM_REG, /* Referenced from a register, i.e. */
+ /* a root without an address. */
GC_REFD_FROM_HEAP, /* Referenced from another heap obj. */
GC_FINALIZER_REFD /* Finalizable and hence accessible. */
} GC_ref_kind;
@@ -53,4 +55,9 @@ void * GC_generate_random_valid_address(void);
/* source in dbg_mlc.c also serves as a sample client. */
void GC_generate_random_backtrace(void);
+/* Print a backtrace from a specific address. Used by the */
+/* above. The client should call GC_gcollect() immediately */
+/* before invocation. */
+void GC_print_backtrace(void *);
+
diff --git a/boehm-gc/cord/cordprnt.c b/boehm-gc/cord/cordprnt.c
index 9c8cc8736a9..8d57f0467fb 100644
--- a/boehm-gc/cord/cordprnt.c
+++ b/boehm-gc/cord/cordprnt.c
@@ -233,7 +233,7 @@ int CORD_vsprintf(CORD * out, CORD format, va_list args)
if (width == NONE && prec == NONE) {
register char c;
- c = va_arg(args, char);
+ c = va_arg(args, int);
CORD_ec_append(result, c);
goto done;
}
diff --git a/boehm-gc/cord/gc.h b/boehm-gc/cord/gc.h
index cc74765d098..3ac0d4403df 100644
--- a/boehm-gc/cord/gc.h
+++ b/boehm-gc/cord/gc.h
@@ -1,7 +1,8 @@
/*
* Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
* Copyright (c) 1991-1995 by Xerox Corporation. All rights reserved.
- * Copyright 1996 by Silicon Graphics. All rights reserved.
+ * Copyright 1996-1999 by Silicon Graphics. All rights reserved.
+ * Copyright 1999 by Hewlett-Packard Company. All rights reserved.
*
* THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
* OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
@@ -352,11 +353,11 @@ GC_API GC_PTR GC_malloc_atomic_ignore_off_page GC_PROTO((size_t lb));
#ifdef GC_ADD_CALLER
# define GC_EXTRAS GC_RETURN_ADDR, __FILE__, __LINE__
-# define GC_EXTRA_PARAMS GC_word ra, GC_CONST char * descr_string,
- int descr_int
+# define GC_EXTRA_PARAMS GC_word ra, GC_CONST char * s,
+ int i
#else
# define GC_EXTRAS __FILE__, __LINE__
-# define GC_EXTRA_PARAMS GC_CONST char * descr_string, int descr_int
+# define GC_EXTRA_PARAMS GC_CONST char * s, int i
#endif
/* Debugging (annotated) allocation. GC_gcollect will check */
diff --git a/boehm-gc/hpux_irix_threads.c b/boehm-gc/hpux_irix_threads.c
index a5b2cce5f85..f6e74365216 100644
--- a/boehm-gc/hpux_irix_threads.c
+++ b/boehm-gc/hpux_irix_threads.c
@@ -387,7 +387,8 @@ int GC_is_thread_stack(ptr_t addr)
}
# endif
-/* We hold allocation lock. We assume the world is stopped. */
+/* We hold allocation lock. Should do exactly the right thing if the */
+/* world is stopped. Should not fail if it isn't. */
void GC_push_all_stacks()
{
register int i;
diff --git a/boehm-gc/include/backptr.h b/boehm-gc/include/backptr.h
index d34224e42ce..f5b7c5aa2b9 100644
--- a/boehm-gc/include/backptr.h
+++ b/boehm-gc/include/backptr.h
@@ -29,9 +29,11 @@
/* source is heap object ==> *base_p != 0, *offset_p = offset */
/* Returns 1 on success, 0 if source couldn't be determined. */
/* Dest can be any address within a heap object. */
-typedef enum { GC_UNREFERENCED, /* No refence info available. */
+typedef enum { GC_UNREFERENCED, /* No reference info available. */
GC_NO_SPACE, /* Dest not allocated with debug alloc */
GC_REFD_FROM_ROOT, /* Referenced directly by root *base_p */
+ GC_REFD_FROM_REG, /* Referenced from a register, i.e. */
+ /* a root without an address. */
GC_REFD_FROM_HEAP, /* Referenced from another heap obj. */
GC_FINALIZER_REFD /* Finalizable and hence accessible. */
} GC_ref_kind;
@@ -53,4 +55,9 @@ void * GC_generate_random_valid_address(void);
/* source in dbg_mlc.c also serves as a sample client. */
void GC_generate_random_backtrace(void);
+/* Print a backtrace from a specific address. Used by the */
+/* above. The client should call GC_gcollect() immediately */
+/* before invocation. */
+void GC_print_backtrace(void *);
+
diff --git a/boehm-gc/include/cord.h b/boehm-gc/include/cord.h
index 584112fd181..926089e86fb 100644
--- a/boehm-gc/include/cord.h
+++ b/boehm-gc/include/cord.h
@@ -41,7 +41,7 @@
* This interface is fairly big, largely for performance reasons.
* The most basic constants and functions:
*
- * CORD - the type fo a cord;
+ * CORD - the type of a cord;
* CORD_EMPTY - empty cord;
* CORD_len(cord) - length of a cord;
* CORD_cat(cord1,cord2) - concatenation of two cords;
diff --git a/boehm-gc/include/gc.h b/boehm-gc/include/gc.h
index cc74765d098..e35f54f7d3f 100644
--- a/boehm-gc/include/gc.h
+++ b/boehm-gc/include/gc.h
@@ -1,7 +1,8 @@
/*
* Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
* Copyright (c) 1991-1995 by Xerox Corporation. All rights reserved.
- * Copyright 1996 by Silicon Graphics. All rights reserved.
+ * Copyright 1996-1999 by Silicon Graphics. All rights reserved.
+ * Copyright 1999 by Hewlett-Packard Company. All rights reserved.
*
* THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
* OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
@@ -35,6 +36,14 @@
#include "libgc_globals.h"
#endif
+#if defined(__MINGW32__) && defined(WIN32_THREADS)
+# ifdef GC_BUILD
+# define GC_API __declspec(dllexport)
+# else
+# define GC_API __declspec(dllimport)
+# endif
+#endif
+
#if defined(_MSC_VER) && defined(_DLL)
# ifdef GC_BUILD
# define GC_API __declspec(dllexport)
@@ -130,6 +139,17 @@ GC_API int GC_dont_expand;
/* Dont expand heap unless explicitly requested */
/* or forced to. */
+GC_API int GC_use_entire_heap;
+ /* Causes the nonincremental collector to use the */
+ /* entire heap before collecting. This was the only */
+ /* option for GC versions < 5.0. This sometimes */
+ /* results in more large block fragmentation, since */
+ /* very larg blocks will tend to get broken up */
+ /* during each GC cycle. It is likely to result in a */
+ /* larger working set, but lower collection */
+ /* frequencies, and hence fewer instructions executed */
+ /* in the collector. */
+
GC_API int GC_full_freq; /* Number of partial collections between */
/* full collections. Matters only if */
/* GC_incremental is set. */
@@ -352,11 +372,11 @@ GC_API GC_PTR GC_malloc_atomic_ignore_off_page GC_PROTO((size_t lb));
#ifdef GC_ADD_CALLER
# define GC_EXTRAS GC_RETURN_ADDR, __FILE__, __LINE__
-# define GC_EXTRA_PARAMS GC_word ra, GC_CONST char * descr_string,
- int descr_int
+# define GC_EXTRA_PARAMS GC_word ra, GC_CONST char * s,
+ int i
#else
# define GC_EXTRAS __FILE__, __LINE__
-# define GC_EXTRA_PARAMS GC_CONST char * descr_string, int descr_int
+# define GC_EXTRA_PARAMS GC_CONST char * s, int i
#endif
/* Debugging (annotated) allocation. GC_gcollect will check */
@@ -387,6 +407,8 @@ GC_API void GC_debug_end_stubborn_change GC_PROTO((GC_PTR));
GC_debug_register_finalizer(p, f, d, of, od)
# define GC_REGISTER_FINALIZER_IGNORE_SELF(p, f, d, of, od) \
GC_debug_register_finalizer_ignore_self(p, f, d, of, od)
+# define GC_REGISTER_FINALIZER_NO_ORDER(p, f, d, of, od) \
+ GC_debug_register_finalizer_no_order(p, f, d, of, od)
# define GC_MALLOC_STUBBORN(sz) GC_debug_malloc_stubborn(sz, GC_EXTRAS);
# define GC_CHANGE_STUBBORN(p) GC_debug_change_stubborn(p)
# define GC_END_STUBBORN_CHANGE(p) GC_debug_end_stubborn_change(p)
@@ -403,6 +425,8 @@ GC_API void GC_debug_end_stubborn_change GC_PROTO((GC_PTR));
GC_register_finalizer(p, f, d, of, od)
# define GC_REGISTER_FINALIZER_IGNORE_SELF(p, f, d, of, od) \
GC_register_finalizer_ignore_self(p, f, d, of, od)
+# define GC_REGISTER_FINALIZER_NO_ORDER(p, f, d, of, od) \
+ GC_register_finalizer_no_order(p, f, d, of, od)
# define GC_MALLOC_STUBBORN(sz) GC_malloc_stubborn(sz)
# define GC_CHANGE_STUBBORN(p) GC_change_stubborn(p)
# define GC_END_STUBBORN_CHANGE(p) GC_end_stubborn_change(p)
@@ -481,6 +505,16 @@ GC_API void GC_debug_register_finalizer_ignore_self
GC_PROTO((GC_PTR obj, GC_finalization_proc fn, GC_PTR cd,
GC_finalization_proc *ofn, GC_PTR *ocd));
+/* Another version of the above. It ignores all cycles. */
+/* It should probably only be used by Java implementations. */
+GC_API void GC_register_finalizer_no_order
+ GC_PROTO((GC_PTR obj, GC_finalization_proc fn, GC_PTR cd,
+ GC_finalization_proc *ofn, GC_PTR *ocd));
+GC_API void GC_debug_register_finalizer_no_order
+ GC_PROTO((GC_PTR obj, GC_finalization_proc fn, GC_PTR cd,
+ GC_finalization_proc *ofn, GC_PTR *ocd));
+
+
/* The following routine may be used to break cycles between */
/* finalizable objects, thus causing cyclic finalizable */
/* objects to be finalized in the correct order. Standard */
@@ -537,6 +571,9 @@ GC_API int GC_unregister_disappearing_link GC_PROTO((GC_PTR * /* link */));
GC_API GC_PTR GC_make_closure GC_PROTO((GC_finalization_proc fn, GC_PTR data));
GC_API void GC_debug_invoke_finalizer GC_PROTO((GC_PTR obj, GC_PTR data));
+/* Returns !=0 if GC_invoke_finalizers has something to do. */
+GC_API int GC_should_invoke_finalizers GC_PROTO((void));
+
GC_API int GC_invoke_finalizers GC_PROTO((void));
/* Run finalizers for all objects that are ready to */
/* be finalized. Return the number of finalizers */
@@ -700,7 +737,8 @@ GC_API void (*GC_is_visible_print_proc)
# endif /* SOLARIS_THREADS */
-#if defined(IRIX_THREADS) || defined(LINUX_THREADS) || defined(HPUX_THREADS)
+#if !defined(USE_LD_WRAP) && \
+ (defined(IRIX_THREADS) || defined(LINUX_THREADS) || defined(HPUX_THREADS))
/* We treat these similarly. */
# include <pthread.h>
# include <signal.h>
@@ -714,8 +752,9 @@ GC_API void (*GC_is_visible_print_proc)
# define pthread_create GC_pthread_create
# define pthread_sigmask GC_pthread_sigmask
# define pthread_join GC_pthread_join
+# define dlopen GC_dlopen
-#endif /* IRIX_THREADS || LINUX_THREADS */
+#endif /* xxxxx_THREADS */
# if defined(PCR) || defined(SOLARIS_THREADS) || defined(WIN32_THREADS) || \
defined(IRIX_THREADS) || defined(LINUX_THREADS) || \
diff --git a/boehm-gc/include/gc_cpp.h b/boehm-gc/include/gc_cpp.h
index ad7df5d71fa..36013e135b9 100644
--- a/boehm-gc/include/gc_cpp.h
+++ b/boehm-gc/include/gc_cpp.h
@@ -16,12 +16,11 @@ the code was modified is included with the above copyright notice.
C++ Interface to the Boehm Collector
John R. Ellis and Jesse Hull
- Last modified on Mon Jul 24 15:43:42 PDT 1995 by ellis
This interface provides access to the Boehm collector. It provides
basic facilities similar to those described in "Safe, Efficient
Garbage Collection for C++", by John R. Elis and David L. Detlefs
-(ftp.parc.xerox.com:/pub/ellis/gc).
+(ftp://ftp.parc.xerox.com/pub/ellis/gc).
All heap-allocated objects are either "collectable" or
"uncollectable". Programs must explicitly delete uncollectable
@@ -38,7 +37,7 @@ Objects derived from class "gc" are collectable. For example:
A* a = new A; // a is collectable.
Collectable instances of non-class types can be allocated using the GC
-placement:
+(or UseGC) placement:
typedef int A[ 10 ];
A* a = new (GC) A;
@@ -124,6 +123,12 @@ invoked using the ANSI-conforming syntax t->~T(). If you're using
cfront 3.0, you'll have to comment out the class gc_cleanup, which
uses explicit invocation.
+5. GC name conflicts:
+
+Many other systems seem to use the identifier "GC" as an abbreviation
+for "Graphics Context". Since version 5.0, GC placement has been replaced
+by UseGC. GC is an alias for UseGC, unless GC_NAME_CONFLICT is defined.
+
****************************************************************************/
#include "gc.h"
@@ -138,7 +143,11 @@ uses explicit invocation.
# define OPERATOR_NEW_ARRAY
#endif
-enum GCPlacement {GC, NoGC, PointerFreeGC};
+enum GCPlacement {UseGC,
+#ifndef GC_NAME_CONFLICT
+ GC=UseGC,
+#endif
+ NoGC, PointerFreeGC};
class gc {public:
inline void* operator new( size_t size );
@@ -211,7 +220,7 @@ inline void* gc::operator new( size_t size ) {
return GC_MALLOC( size );}
inline void* gc::operator new( size_t size, GCPlacement gcp ) {
- if (gcp == GC)
+ if (gcp == UseGC)
return GC_MALLOC( size );
else if (gcp == PointerFreeGC)
return GC_MALLOC_ATOMIC( size );
@@ -261,7 +270,7 @@ inline void* operator new(
{
void* obj;
- if (gcp == GC) {
+ if (gcp == UseGC) {
obj = GC_MALLOC( size );
if (cleanup != 0)
GC_REGISTER_FINALIZER_IGNORE_SELF(
diff --git a/boehm-gc/include/gc_typed.h b/boehm-gc/include/gc_typed.h
index e4a6b94756e..2e0598f204c 100644
--- a/boehm-gc/include/gc_typed.h
+++ b/boehm-gc/include/gc_typed.h
@@ -61,6 +61,7 @@ GC_API GC_PTR GC_malloc_explicitly_typed
GC_PROTO((size_t size_in_bytes, GC_descr d));
/* Allocate an object whose layout is described by d. */
/* The resulting object MAY NOT BE PASSED TO REALLOC. */
+ /* The returned object is cleared. */
GC_API GC_PTR GC_malloc_explicitly_typed_ignore_off_page
GC_PROTO((size_t size_in_bytes, GC_descr d));
@@ -75,6 +76,7 @@ GC_API GC_PTR GC_calloc_explicitly_typed
/* alignment required for pointers. E.g. on a 32-bit */
/* machine with 16-bit aligned pointers, size_in_bytes */
/* must be a multiple of 2. */
+ /* Returned object is cleared. */
#ifdef GC_DEBUG
# define GC_MALLOC_EXPLICTLY_TYPED(bytes, d) GC_MALLOC(bytes)
diff --git a/boehm-gc/include/private/gc_priv.h b/boehm-gc/include/private/gc_priv.h
index ac4d63a0b26..eabb85f0c20 100644
--- a/boehm-gc/include/private/gc_priv.h
+++ b/boehm-gc/include/private/gc_priv.h
@@ -44,7 +44,7 @@
typedef GC_word word;
typedef GC_signed_word signed_word;
-# ifndef CONFIG_H
+# ifndef GCCONFIG_H
# include "gcconfig.h"
# endif
@@ -82,6 +82,7 @@ typedef char * ptr_t; /* A generic pointer to which we can add */
# define GC_FAR
#endif
+
/*********************************/
/* */
/* Definitions for conservative */
@@ -173,15 +174,6 @@ typedef char * ptr_t; /* A generic pointer to which we can add */
/* May save significant amounts of space for obj_map */
/* entries. */
-#ifndef OLD_BLOCK_ALLOC
- /* Macros controlling large block allocation strategy. */
-# define EXACT_FIRST /* Make a complete pass through the large object */
- /* free list before splitting a block */
-# define PRESERVE_LAST /* Do not divide last allocated heap segment */
- /* unless we would otherwise need to expand the */
- /* heap. */
-#endif
-
/* ALIGN_DOUBLE requires MERGE_SIZES at present. */
# if defined(ALIGN_DOUBLE) && !defined(MERGE_SIZES)
# define MERGE_SIZES
@@ -281,6 +273,13 @@ void GC_print_callers (/* struct callinfo info[NFRAMES] */);
# define MS_TIME_DIFF(a,b) ((double) (a.tv_sec - b.tv_sec) * 1000.0 \
+ (double) (a.tv_usec - b.tv_usec) / 1000.0)
#else /* !BSD_TIME */
+# ifdef MSWIN32
+# include <windows.h>
+# include <winbase.h>
+# define CLOCK_TYPE DWORD
+# define GET_TIME(x) x = GetTickCount()
+# define MS_TIME_DIFF(a,b) ((long)((a)-(b)))
+# else /* !MSWIN32, !BSD_TIME */
# include <time.h>
# if !defined(__STDC__) && defined(SPARC) && defined(SUNOS4)
clock_t clock(); /* Not in time.h, where it belongs */
@@ -306,6 +305,7 @@ void GC_print_callers (/* struct callinfo info[NFRAMES] */);
# define GET_TIME(x) x = clock()
# define MS_TIME_DIFF(a,b) ((unsigned long) \
(1000.0*(double)((a)-(b))/(double)CLOCKS_PER_SEC))
+# endif /* !MSWIN32 */
#endif /* !BSD_TIME */
/* We use bzero and bcopy internally. They may not be available. */
@@ -437,8 +437,11 @@ void GC_print_callers (/* struct callinfo info[NFRAMES] */);
# define LOCK() mutex_lock(&GC_allocate_ml);
# define UNLOCK() mutex_unlock(&GC_allocate_ml);
# endif
-# ifdef LINUX_THREADS
+# if defined(LINUX_THREADS)
+# if defined(I386)|| defined(POWERPC) || defined(ALPHA) || defined(IA64) \
+ || defined(M68K)
# include <pthread.h>
+# define USE_SPIN_LOCK
# if defined(I386)
inline static int GC_test_and_set(volatile unsigned int *addr) {
int oldval;
@@ -448,9 +451,38 @@ void GC_print_callers (/* struct callinfo info[NFRAMES] */);
: "0"(1), "m"(*(addr)));
return oldval;
}
-# else
-# if defined(POWERPC)
+# endif
+# if defined(IA64)
inline static int GC_test_and_set(volatile unsigned int *addr) {
+ int oldval;
+ __asm__ __volatile__("xchg4 %0=%1,%2"
+ : "=r"(oldval), "=m"(*addr)
+ : "r"(1), "1"(*addr));
+ return oldval;
+ }
+ inline static void GC_clear(volatile unsigned int *addr) {
+ __asm__ __volatile__("st4.rel %0=r0" : "=m" (*addr));
+ }
+# define GC_CLEAR_DEFINED
+# endif
+# ifdef M68K
+ /* Contributed by Tony Mantler. I'm not sure how well it was */
+ /* tested. */
+ inline static int GC_test_and_set(volatile unsigned int *addr) {
+ char oldval; /* this must be no longer than 8 bits */
+
+ /* The return value is semi-phony. */
+ /* 'tas' sets bit 7 while the return */
+ /* value pretends bit 0 was set */
+ __asm__ __volatile__(
+ "tas %1@; sne %0; negb %0"
+ : "=d" (oldval)
+ : "a" (addr));
+ return oldval;
+ }
+# endif
+# if defined(POWERPC)
+ inline static int GC_test_and_set(volatile unsigned int *addr) {
int oldval;
int temp = 1; // locked value
@@ -465,46 +497,61 @@ void GC_print_callers (/* struct callinfo info[NFRAMES] */);
: "r"(temp), "1"(addr)
: "memory");
return (int)oldval;
- }
-# else
-# ifdef ALPHA
- inline static int GC_test_and_set(volatile unsigned int *
-addr)
- {
- unsigned long oldvalue;
- unsigned long temp;
-
- __asm__ __volatile__(
- "1: ldl_l %0,%1\n"
- " and %0,%3,%2\n"
- " bne %2,2f\n"
- " xor %0,%3,%0\n"
- " stl_c %0,%1\n"
- " beq %0,3f\n"
- " mb\n"
- "2:\n"
- ".section .text2,\"ax\"\n"
- "3: br 1b\n"
- ".previous"
- :"=&r" (temp), "=m" (*addr), "=&r"
-(oldvalue)
- :"Ir" (1), "m" (*addr));
-
- return oldvalue;
- }
-# else
- -- > Need implementation of GC_test_and_set()
-# endif
-# endif
+ }
+ inline static void GC_clear(volatile unsigned int *addr) {
+ __asm__ __volatile__("eieio");
+ *(addr) = 0;
+ }
+# define GC_CLEAR_DEFINED
# endif
- inline static void GC_clear(volatile unsigned int *addr) {
+# ifdef ALPHA
+ inline static int GC_test_and_set(volatile unsigned int * addr)
+ {
+ unsigned long oldvalue;
+ unsigned long temp;
+
+ __asm__ __volatile__(
+ "1: ldl_l %0,%1\n"
+ " and %0,%3,%2\n"
+ " bne %2,2f\n"
+ " xor %0,%3,%0\n"
+ " stl_c %0,%1\n"
+ " beq %0,3f\n"
+ " mb\n"
+ "2:\n"
+ ".section .text2,\"ax\"\n"
+ "3: br 1b\n"
+ ".previous"
+ :"=&r" (temp), "=m" (*addr), "=&r" (oldvalue)
+ :"Ir" (1), "m" (*addr));
+
+ return oldvalue;
+ }
+ /* Should probably also define GC_clear, since it needs */
+ /* a memory barrier ?? */
+# endif /* ALPHA */
+# ifdef ARM32
+ inline static int GC_test_and_set(volatile unsigned int *addr) {
+ int oldval;
+ /* SWP on ARM is very similar to XCHG on x86. Doesn't lock the
+ * bus because there are no SMP ARM machines. If/when there are,
+ * this code will likely need to be updated. */
+ /* See linuxthreads/sysdeps/arm/pt-machine.h in glibc-2.1 */
+ __asm__ __volatile__("swp %0, %1, [%2]"
+ : "=r"(oldval)
+ : "r"(1), "r"(addr));
+ return oldval;
+ }
+# endif
+# ifndef GC_CLEAR_DEFINED
+ inline static void GC_clear(volatile unsigned int *addr) {
+ /* Try to discourage gcc from moving anything past this. */
+ __asm__ __volatile__(" ");
*(addr) = 0;
- }
+ }
+# endif
extern volatile unsigned int GC_allocate_lock;
- /* This is not a mutex because mutexes that obey the (optional) */
- /* POSIX scheduling rules are subject to convoys in high contention */
- /* applications. This is basically a spin lock. */
extern pthread_t GC_lock_holder;
extern void GC_lock(void);
/* Allocation lock holder. Only set if acquired by client through */
@@ -517,12 +564,19 @@ addr)
{ if (GC_test_and_set(&GC_allocate_lock)) GC_lock(); }
# define UNLOCK() \
GC_clear(&GC_allocate_lock)
- extern GC_bool GC_collecting;
+ extern VOLATILE GC_bool GC_collecting;
# define ENTER_GC() \
{ \
GC_collecting = 1; \
}
# define EXIT_GC() GC_collecting = 0;
+# else /* LINUX_THREADS on hardware for which we don't know how */
+ /* to do test and set. */
+# include <pthread.h>
+ extern pthread_mutex_t GC_allocate_ml;
+# define LOCK() pthread_mutex_lock(&GC_allocate_ml)
+# define UNLOCK() pthread_mutex_unlock(&GC_allocate_ml)
+# endif
# endif /* LINUX_THREADS */
# if defined(HPUX_THREADS)
# include <pthread.h>
@@ -581,7 +635,7 @@ addr)
*(volatile unsigned long *)(&GC_allocate_lock) = 0; }
# endif
# endif
- extern GC_bool GC_collecting;
+ extern VOLATILE GC_bool GC_collecting;
# define ENTER_GC() \
{ \
GC_collecting = 1; \
@@ -957,8 +1011,10 @@ struct hblk {
/* The type of mark procedures. This really belongs in gc_mark.h. */
/* But we put it here, so that we can avoid scanning the mark proc */
/* table. */
-typedef struct ms_entry * (*mark_proc)(/* word * addr, mark_stack_ptr,
- mark_stack_limit, env */);
+typedef struct ms_entry * (*mark_proc)(/* word * addr,
+ struct ms_entry *mark_stack_ptr,
+ struct ms_entry *mark_stack_limit,
+ word env */);
# define LOG_MAX_MARK_PROCS 6
# define MAX_MARK_PROCS (1 << LOG_MAX_MARK_PROCS)
@@ -1035,6 +1091,7 @@ struct roots {
struct _GC_arrays {
word _heapsize;
word _max_heapsize;
+ word _requested_heapsize; /* Heap size due to explicit expansion */
ptr_t _last_heap_addr;
ptr_t _prev_heap_addr;
word _large_free_bytes;
@@ -1059,6 +1116,10 @@ struct _GC_arrays {
word _mem_freed;
/* Number of explicitly deallocated words of memory */
/* since last collection. */
+ ptr_t _scratch_end_ptr;
+ ptr_t _scratch_last_end_ptr;
+ /* Used by headers.c, and can easily appear to point to */
+ /* heap. */
mark_proc _mark_procs[MAX_MARK_PROCS];
/* Table of user-defined mark procedures. There is */
/* a small number of these, which can be referenced */
@@ -1223,9 +1284,12 @@ GC_API GC_FAR struct _GC_arrays GC_arrays;
# define GC_words_finalized GC_arrays._words_finalized
# define GC_non_gc_bytes_at_gc GC_arrays._non_gc_bytes_at_gc
# define GC_mem_freed GC_arrays._mem_freed
+# define GC_scratch_end_ptr GC_arrays._scratch_end_ptr
+# define GC_scratch_last_end_ptr GC_arrays._scratch_last_end_ptr
# define GC_mark_procs GC_arrays._mark_procs
# define GC_heapsize GC_arrays._heapsize
# define GC_max_heapsize GC_arrays._max_heapsize
+# define GC_requested_heapsize GC_arrays._requested_heapsize
# define GC_words_allocd_before_gc GC_arrays._words_allocd_before_gc
# define GC_heap_sects GC_arrays._heap_sects
# define GC_last_stack GC_arrays._last_stack
@@ -1260,6 +1324,8 @@ GC_API GC_FAR struct _GC_arrays GC_arrays;
# define beginGC_arrays ((ptr_t)(&GC_arrays))
# define endGC_arrays (((ptr_t)(&GC_arrays)) + (sizeof GC_arrays))
+#define USED_HEAP_SIZE (GC_heapsize - GC_large_free_bytes)
+
/* Object kinds: */
# define MAXOBJKINDS 16
@@ -1392,10 +1458,7 @@ extern ptr_t GC_greatest_plausible_heap_addr;
ptr_t GC_approx_sp();
GC_bool GC_should_collect();
-#ifdef PRESERVE_LAST
- GC_bool GC_in_last_heap_sect(/* ptr_t */);
- /* In last added heap section? If so, avoid breaking up. */
-#endif
+
void GC_apply_to_all_blocks(/*fn, client_data*/);
/* Invoke fn(hbp, client_data) for each */
/* allocated heap block. */
@@ -1672,9 +1735,10 @@ ptr_t GC_allocobj(/* sz_inn_words, kind */);
/* head. */
void GC_init_headers();
-GC_bool GC_install_header(/*h*/);
+struct hblkhdr * GC_install_header(/*h*/);
/* Install a header for block h. */
- /* Return FALSE on failure. */
+ /* Return 0 on failure, or the header */
+ /* otherwise. */
GC_bool GC_install_counts(/*h, sz*/);
/* Set up forwarding counts for block */
/* h of size sz. */
diff --git a/boehm-gc/include/private/gcconfig.h b/boehm-gc/include/private/gcconfig.h
index c9017d371a8..4c4bca31222 100644
--- a/boehm-gc/include/private/gcconfig.h
+++ b/boehm-gc/include/private/gcconfig.h
@@ -13,9 +13,9 @@
* modified is included with the above copyright notice.
*/
-#ifndef CONFIG_H
+#ifndef GCCONFIG_H
-# define CONFIG_H
+# define GCCONFIG_H
/* Machine dependent parameters. Some tuning parameters can be found */
/* near the top of gc_private.h. */
@@ -53,6 +53,11 @@
# define NETBSD
# define mach_type_known
# endif
+# if defined(__NetBSD__) && defined(arm32)
+# define ARM32
+# define NETBSD
+# define mach_type_known
+# endif
# if defined(vax)
# define VAX
# ifdef ultrix
@@ -64,15 +69,18 @@
# endif
# if defined(mips) || defined(__mips)
# define MIPS
-# if defined(ultrix) || defined(__ultrix) || defined(__NetBSD__)
-# define ULTRIX
-# else
-# if defined(_SYSTYPE_SVR4) || defined(SYSTYPE_SVR4) || defined(__SYSTYPE_SVR4__)
-# define IRIX5 /* or IRIX 6.X */
-# else
-# define RISCOS /* or IRIX 4.X */
-# endif
-# endif
+# if !defined(LINUX)
+# if defined(ultrix) || defined(__ultrix) || defined(__NetBSD__)
+# define ULTRIX
+# else
+# if defined(_SYSTYPE_SVR4) || defined(SYSTYPE_SVR4) \
+ || defined(__SYSTYPE_SVR4__)
+# define IRIX5 /* or IRIX 6.X */
+# else
+# define RISCOS /* or IRIX 4.X */
+# endif
+# endif
+# endif /* !LINUX */
# define mach_type_known
# endif
# if defined(sequent) && defined(i386)
@@ -130,15 +138,22 @@
# define SYSV
# define mach_type_known
# endif
-# if defined(_PA_RISC1_0) || defined(_PA_RISC1_1) \
+# if defined(_PA_RISC1_0) || defined(_PA_RISC1_1) || defined(_PA_RISC2_0) \
|| defined(hppa) || defined(__hppa__)
# define HP_PA
+# ifndef LINUX
+# define HPUX
+# endif
# define mach_type_known
# endif
# if defined(LINUX) && (defined(i386) || defined(__i386__))
# define I386
# define mach_type_known
# endif
+# if defined(LINUX) && (defined(__ia64__) || defined(__ia64))
+# define IA64
+# define mach_type_known
+# endif
# if defined(LINUX) && defined(powerpc)
# define POWERPC
# define mach_type_known
@@ -147,10 +162,14 @@
# define M68K
# define mach_type_known
# endif
-# if defined(LINUX) && defined(sparc)
+# if defined(LINUX) && (defined(sparc) || defined(__sparc__))
# define SPARC
# define mach_type_known
# endif
+# if defined(LINUX) && defined(arm)
+# define ARM32
+# define mach_type_known
+# endif
# if defined(__alpha) || defined(__alpha__)
# define ALPHA
# if !defined(LINUX)
@@ -243,6 +262,11 @@
# define CYGWIN32
# define mach_type_known
# endif
+# if defined(__MINGW32__)
+# define I386
+# define MSWIN32
+# define mach_type_known
+# endif
# if defined(__BORLANDC__)
# define I386
# define MSWIN32
@@ -253,6 +277,10 @@
# define UTS4
# define mach_type_known
# endif
+# if defined(__pj__)
+# define PJ
+# define mach_type_known
+# endif
/* Ivan Demakov */
# if defined(__WATCOMC__) && defined(__386__)
# define I386
@@ -307,6 +335,9 @@
/* (CX_UX and DGUX) */
/* S370 ==> 370-like machine */
/* running Amdahl UTS4 */
+ /* ARM32 ==> Intel StrongARM */
+ /* IA64 ==> Intel IA64 */
+ /* (e.g. Itanium) */
/*
@@ -392,6 +423,15 @@
*
* An architecture may define DYNAMIC_LOADING if dynamic_load.c
* defined GC_register_dynamic_libraries() for the architecture.
+ *
+ * An architecture may define PREFETCH(x) to preload the cache with *x.
+ * This defaults to a no-op.
+ *
+ * PREFETCH_FOR_WRITE(x) is used if *x is about to be written.
+ *
+ * An architecture may also define CLEAR_DOUBLE(x) to be a fast way to
+ * clear the two words at GC_malloc-aligned address x. By default,
+ * word stores of 0 are used instead.
*/
@@ -516,7 +556,7 @@
# undef STACK_GRAN
# define STACK_GRAN 0x10000000
/* Stack usually starts at 0x80000000 */
-# define DATASTART GC_data_start
+# define LINUX_DATA_START
extern int _end;
# define DATAEND (&_end)
# endif
@@ -615,8 +655,8 @@
# ifdef LINUX
# define OS_TYPE "LINUX"
# ifdef __ELF__
-# define DATASTART GC_data_start
-# define DYNAMIC_LOADING
+# define LINUX_DATA_START
+# define DYNAMIC_LOADING
# else
Linux Sparc non elf ?
# endif
@@ -684,13 +724,16 @@
# endif
# ifdef LINUX
# define OS_TYPE "LINUX"
-# define HEURISTIC1
-# undef STACK_GRAN
-# define STACK_GRAN 0x10000000
- /* STACKBOTTOM is usually 0xc0000000, but this changes with */
- /* different kernel configurations. In particular, systems */
- /* with 2GB physical memory will usually move the user */
- /* address space limit, and hence initial SP to 0x80000000. */
+# define LINUX_STACKBOTTOM
+# if 0
+# define HEURISTIC1
+# undef STACK_GRAN
+# define STACK_GRAN 0x10000000
+ /* STACKBOTTOM is usually 0xc0000000, but this changes with */
+ /* different kernel configurations. In particular, systems */
+ /* with 2GB physical memory will usually move the user */
+ /* address space limit, and hence initial SP to 0x80000000. */
+# endif
# if !defined(LINUX_THREADS) || !defined(REDIRECT_MALLOC)
# define MPROTECT_VDB
# else
@@ -706,8 +749,7 @@
# endif
# include <features.h>
# if defined(__GLIBC__) && __GLIBC__ >= 2
- extern int __data_start;
-# define DATASTART ((ptr_t)(&__data_start))
+# define LINUX_DATA_START
# else
extern char **__environ;
# define DATASTART ((ptr_t)(&__environ))
@@ -726,6 +768,26 @@
extern int etext;
# define DATASTART ((ptr_t)((((word) (&etext)) + 0xfff) & ~0xfff))
# endif
+# ifdef USE_I686_PREFETCH
+# define PREFETCH(x) \
+ __asm__ __volatile__ (" prefetchnta %0": : "m"(*(char *)(x)))
+ /* Empirically prefetcht0 is much more effective at reducing */
+ /* cache miss stalls for the targetted load instructions. But it */
+ /* seems to interfere enough with other cache traffic that the net */
+ /* result is worse than prefetchnta. */
+# if 0
+ /* Using prefetches for write seems to have a slight negative */
+ /* impact on performance, at least for a PIII/500. */
+# define PREFETCH_FOR_WRITE(x) \
+ __asm__ __volatile__ (" prefetcht0 %0": : "m"(*(char *)(x)))
+# endif
+# endif
+# ifdef USE_3DNOW_PREFETCH
+# define PREFETCH(x) \
+ __asm__ __volatile__ (" prefetch %0": : "m"(*(char *)(x)))
+# define PREFETCH_FOR_WRITE(x)
+ __asm__ __volatile__ (" prefetchw %0": : "m"(*(char *)(x)))
+# endif
# endif
# ifdef CYGWIN32
# define OS_TYPE "CYGWIN32"
@@ -836,35 +898,48 @@
# ifdef MIPS
# define MACH_TYPE "MIPS"
-# ifndef IRIX5
-# define DATASTART (ptr_t)0x10000000
- /* Could probably be slightly higher since */
- /* startup code allocates lots of stuff. */
-# else
- extern int _fdata;
-# define DATASTART ((ptr_t)(&_fdata))
-# ifdef USE_MMAP
-# define HEAP_START (ptr_t)0x30000000
-# else
-# define HEAP_START DATASTART
-# endif
- /* Lowest plausible heap address. */
- /* In the MMAP case, we map there. */
- /* In either case it is used to identify */
- /* heap sections so they're not */
- /* considered as roots. */
-# endif /* IRIX5 */
-# define HEURISTIC2
/* # define STACKBOTTOM ((ptr_t)0x7fff8000) sometimes also works. */
+# ifdef LINUX
+ /* This was developed for a linuxce style platform. Probably */
+ /* needs to be tweaked for workstation class machines. */
+# define OS_TYPE "LINUX"
+ extern int __data_start;
+# define DATASTART ((ptr_t)(&__data_start))
+# define ALIGNMENT 4
+# define USE_GENERIC_PUSH_REGS 1
+# define STACKBOTTOM 0x80000000
+ /* In many cases, this should probably use LINUX_STACKBOTTOM */
+ /* instead. But some kernel versions seem to give the wrong */
+ /* value from /proc. */
+# endif /* Linux */
# ifdef ULTRIX
+# define HEURISTIC2
+# define DATASTART (ptr_t)0x10000000
+ /* Could probably be slightly higher since */
+ /* startup code allocates lots of stuff. */
# define OS_TYPE "ULTRIX"
# define ALIGNMENT 4
# endif
# ifdef RISCOS
+# define HEURISTIC2
+# define DATASTART (ptr_t)0x10000000
# define OS_TYPE "RISCOS"
# define ALIGNMENT 4 /* Required by hardware */
# endif
# ifdef IRIX5
+# define HEURISTIC2
+ extern int _fdata;
+# define DATASTART ((ptr_t)(&_fdata))
+# ifdef USE_MMAP
+# define HEAP_START (ptr_t)0x30000000
+# else
+# define HEAP_START DATASTART
+# endif
+ /* Lowest plausible heap address. */
+ /* In the MMAP case, we map there. */
+ /* In either case it is used to identify */
+ /* heap sections so they're not */
+ /* considered as roots. */
# define OS_TYPE "IRIX5"
# define MPROTECT_VDB
# ifdef _MIPS_SZPTR
@@ -892,9 +967,17 @@
# endif
# ifdef HP_PA
+ /* OS is assumed to be HP/UX */
# define MACH_TYPE "HP_PA"
-# define ALIGNMENT 4
-# define ALIGN_DOUBLE
+# define OS_TYPE "HPUX"
+# ifdef __LP64__
+# define CPP_WORDSZ 64
+# define ALIGNMENT 8
+# else
+# define CPP_WORDSZ 32
+# define ALIGNMENT 4
+# define ALIGN_DOUBLE
+# endif
extern int __data_start;
# define DATASTART ((ptr_t)(&__data_start))
# if 0
@@ -911,14 +994,21 @@
# endif
# define STACK_GROWS_UP
# define DYNAMIC_LOADING
+# ifndef HPUX_THREADS
+# define MPROTECT_VDB
+# endif
# include <unistd.h>
# define GETPAGESIZE() sysconf(_SC_PAGE_SIZE)
- /* They misspelled the Posix macro? */
# endif
# ifdef ALPHA
# define MACH_TYPE "ALPHA"
# define ALIGNMENT 8
+# define USE_GENERIC_PUSH_REGS
+ /* Gcc and probably the DEC/Compaq compiler spill pointers to preserved */
+ /* fp registers in some cases when the target is a 21264. The assembly */
+ /* code doesn't handle that yet, and version dependencies make that a */
+ /* bit tricky. Do the easy thing for now. */
# ifdef OSF1
# define OS_TYPE "OSF1"
# define DATASTART ((ptr_t) 0x140000000)
@@ -939,13 +1029,9 @@
# define CPP_WORDSZ 64
# define STACKBOTTOM ((ptr_t) 0x120000000)
# ifdef __ELF__
-# if 0
- /* __data_start apparently disappeared in some recent releases. */
- extern int __data_start;
-# define DATASTART &__data_start
-# endif
-# define DATASTART GC_data_start
+# define LINUX_DATA_START
# define DYNAMIC_LOADING
+ /* This doesn't work if the collector is in a dynamic library. */
# else
# define DATASTART ((ptr_t) 0x140000000)
# endif
@@ -957,6 +1043,49 @@
# endif
# endif
+# ifdef IA64
+# define MACH_TYPE "IA64"
+# define ALIGN_DOUBLE
+ /* Requires 16 byte alignment for malloc */
+# define ALIGNMENT 8
+# define USE_GENERIC_PUSH_REGS
+ /* We need to get preserved registers in addition to register windows. */
+ /* That's easiest to do with setjmp. */
+# ifdef HPUX
+ --> needs work
+# endif
+# ifdef LINUX
+# define OS_TYPE "LINUX"
+# define CPP_WORDSZ 64
+ /* This should really be done through /proc, but that */
+ /* requires we run on an IA64 kernel. */
+# define STACKBOTTOM ((ptr_t) 0xa000000000000000l)
+ /* We also need the base address of the register stack */
+ /* backing store. There is probably a better way to */
+ /* get that, too ... */
+# define BACKING_STORE_BASE ((ptr_t) 0x9fffffff80000000l)
+# if 1
+# define SEARCH_FOR_DATA_START
+# define DATASTART GC_data_start
+# else
+ extern int data_start;
+# define DATASTART ((ptr_t)(&data_start))
+# endif
+# define DYNAMIC_LOADING
+# define MPROTECT_VDB
+ /* Requires Linux 2.3.47 or later. */
+ extern int _end;
+# define DATAEND (&_end)
+ /* PREFETCH appears to have a large performance impact. */
+# define PREFETCH(x) \
+ __asm__ (" lfetch [%0]": : "r"((void *)(x)))
+# define PREFETCH_FOR_WRITE(x) \
+ __asm__ (" lfetch.excl [%0]": : "r"((void *)(x)))
+# define CLEAR_DOUBLE(x) \
+ __asm__ (" stf.spill [%0]=f0": : "r"((void *)(x)))
+# endif
+# endif
+
# ifdef M88K
# define MACH_TYPE "M88K"
# define ALIGNMENT 4
@@ -987,6 +1116,69 @@
# define HEURISTIC2
# endif
+# if defined(PJ)
+# define ALIGNMENT 4
+ extern int _etext;
+# define DATASTART ((ptr_t)(&_etext))
+# define HEURISTIC1
+# endif
+
+# ifdef ARM32
+# define CPP_WORDSZ 32
+# define MACH_TYPE "ARM32"
+# define ALIGNMENT 4
+# ifdef NETBSD
+# define OS_TYPE "NETBSD"
+# define HEURISTIC2
+ extern char etext;
+# define DATASTART ((ptr_t)(&etext))
+# define USE_GENERIC_PUSH_REGS
+# endif
+# ifdef LINUX
+# define OS_TYPE "LINUX"
+# define HEURISTIC1
+# undef STACK_GRAN
+# define STACK_GRAN 0x10000000
+# define USE_GENERIC_PUSH_REGS
+# ifdef __ELF__
+# define DYNAMIC_LOADING
+# include <features.h>
+# if defined(__GLIBC__) && __GLIBC__ >= 2
+# define LINUX_DATA_START
+# else
+ extern char **__environ;
+# define DATASTART ((ptr_t)(&__environ))
+ /* hideous kludge: __environ is the first */
+ /* word in crt0.o, and delimits the start */
+ /* of the data segment, no matter which */
+ /* ld options were passed through. */
+ /* We could use _etext instead, but that */
+ /* would include .rodata, which may */
+ /* contain large read-only data tables */
+ /* that we'd rather not scan. */
+# endif
+ extern int _end;
+# define DATAEND (&_end)
+# else
+ extern int etext;
+# define DATASTART ((ptr_t)((((word) (&etext)) + 0xfff) & ~0xfff))
+# endif
+# endif
+#endif
+
+#ifdef LINUX_DATA_START
+ /* Some Linux distributions arrange to define __data_start. Some */
+ /* define data_start as a weak symbol. The latter is technically */
+ /* broken, since the user program may define data_start, in which */
+ /* case we lose. Nonetheless, we try both, prefering __data_start. */
+ /* We assume gcc. */
+# pragma weak __data_start
+ extern int __data_start;
+# pragma weak data_start
+ extern int data_start;
+# define DATASTART ((ptr_t)(&__data_start != 0? &__data_start : &data_start))
+#endif
+
# ifndef STACK_GROWS_UP
# define STACK_GROWS_DOWN
# endif
@@ -1029,6 +1221,10 @@
# define SUNOS5SIGS
# endif
+# if defined(HPUX)
+# define SUNOS5SIGS
+# endif
+
# if CPP_WORDSZ != 32 && CPP_WORDSZ != 64
-> bad word size
# endif
@@ -1063,6 +1259,26 @@
# define DEFAULT_VDB
# endif
+# ifndef PREFETCH
+# define PREFETCH(x)
+# define NO_PREFETCH
+# endif
+
+# ifndef PREFETCH_FOR_WRITE
+# define PREFETCH_FOR_WRITE(x)
+# define NO_PREFETCH_FOR_WRITE
+# endif
+
+# ifndef CACHE_LINE_SIZE
+# define CACHE_LINE_SIZE 32 /* Wild guess */
+# endif
+
+# ifndef CLEAR_DOUBLE
+# define CLEAR_DOUBLE(x) \
+ ((word*)x)[0] = 0; \
+ ((word*)x)[1] = 0;
+# endif /* CLEAR_DOUBLE */
+
# if defined(_SOLARIS_PTHREADS) && !defined(SOLARIS_THREADS)
# define SOLARIS_THREADS
# endif
@@ -1078,10 +1294,13 @@
# if defined(SOLARIS_THREADS) && !defined(SUNOS5)
--> inconsistent configuration
# endif
+# if defined(HPUX_THREADS) && !defined(HPUX)
+--> inconsistent configuration
+# endif
# if defined(PCR) || defined(SRC_M3) || \
defined(SOLARIS_THREADS) || defined(WIN32_THREADS) || \
defined(IRIX_THREADS) || defined(LINUX_THREADS) || \
- defined(IRIX_JDK_THREADS)
+ defined(IRIX_JDK_THREADS) || defined(HPUX_THREADS)
# define THREADS
# endif
@@ -1096,4 +1315,4 @@
/* include assembly code to do it well. */
# endif
-# endif
+# endif /* GCCONFIG_H */