| 1 | /* | 
|---|
| 2 | * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers | 
|---|
| 3 | * Copyright (c) 1991-1995 by Xerox Corporation.  All rights reserved. | 
|---|
| 4 | * Copyright 1996-1999 by Silicon Graphics.  All rights reserved. | 
|---|
| 5 | * Copyright 1999 by Hewlett-Packard Company.  All rights reserved. | 
|---|
| 6 | * | 
|---|
| 7 | * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED | 
|---|
| 8 | * OR IMPLIED.  ANY USE IS AT YOUR OWN RISK. | 
|---|
| 9 | * | 
|---|
| 10 | * Permission is hereby granted to use or copy this program | 
|---|
| 11 | * for any purpose,  provided the above notices are retained on all copies. | 
|---|
| 12 | * Permission to modify the code and to distribute modified code is granted, | 
|---|
| 13 | * provided the above notices are retained, and a notice that the code was | 
|---|
| 14 | * modified is included with the above copyright notice. | 
|---|
| 15 | */ | 
|---|
| 16 |  | 
|---|
| 17 | /* | 
|---|
| 18 | * Note that this defines a large number of tuning hooks, which can | 
|---|
| 19 | * safely be ignored in nearly all cases.  For normal use it suffices | 
|---|
| 20 | * to call only GC_MALLOC and perhaps GC_REALLOC. | 
|---|
| 21 | * For better performance, also look at GC_MALLOC_ATOMIC, and | 
|---|
| 22 | * GC_enable_incremental.  If you need an action to be performed | 
|---|
| 23 | * immediately before an object is collected, look at GC_register_finalizer. | 
|---|
| 24 | * If you are using Solaris threads, look at the end of this file. | 
|---|
| 25 | * Everything else is best ignored unless you encounter performance | 
|---|
| 26 | * problems. | 
|---|
| 27 | */ | 
|---|
| 28 |  | 
|---|
| 29 | #ifndef _GC_H | 
|---|
| 30 |  | 
|---|
| 31 | # define _GC_H | 
|---|
| 32 |  | 
|---|
| 33 | # include "gc_config_macros.h" | 
|---|
| 34 |  | 
|---|
| 35 | # if defined(__STDC__) || defined(__cplusplus) || defined(_AIX) | 
|---|
| 36 | #   define GC_PROTO(args) args | 
|---|
| 37 | typedef void * GC_PTR; | 
|---|
| 38 | #   define GC_CONST const | 
|---|
| 39 | # else | 
|---|
| 40 | #   define GC_PROTO(args) () | 
|---|
| 41 | typedef char * GC_PTR; | 
|---|
| 42 | #   define GC_CONST | 
|---|
| 43 | #  endif | 
|---|
| 44 |  | 
|---|
| 45 | # ifdef __cplusplus | 
|---|
| 46 | extern "C" { | 
|---|
| 47 | # endif | 
|---|
| 48 |  | 
|---|
| 49 |  | 
|---|
| 50 | /* Define word and signed_word to be unsigned and signed types of the   */ | 
|---|
| 51 | /* size as char * or void *.  There seems to be no way to do this       */ | 
|---|
| 52 | /* even semi-portably.  The following is probably no better/worse       */ | 
|---|
| 53 | /* than almost anything else.                                           */ | 
|---|
| 54 | /* The ANSI standard suggests that size_t and ptr_diff_t might be       */ | 
|---|
| 55 | /* better choices.  But those had incorrect definitions on some older   */ | 
|---|
| 56 | /* systems.  Notably "typedef int size_t" is WRONG.                     */ | 
|---|
| 57 | #ifndef _WIN64 | 
|---|
| 58 | typedef unsigned long GC_word; | 
|---|
| 59 | typedef long GC_signed_word; | 
|---|
| 60 | #else | 
|---|
| 61 | /* Win64 isn't really supported yet, but this is the first step. And  */ | 
|---|
| 62 | /* it might cause error messages to show up in more plausible places. */ | 
|---|
| 63 | /* This needs basetsd.h, which is included by windows.h.              */ | 
|---|
| 64 | typedef ULONG_PTR GC_word; | 
|---|
| 65 | typedef LONG_PTR GC_word; | 
|---|
| 66 | #endif | 
|---|
| 67 |  | 
|---|
| 68 | /* Public read-only variables */ | 
|---|
| 69 |  | 
|---|
| 70 | GC_API GC_word GC_gc_no;/* Counter incremented per collection.          */ | 
|---|
| 71 | /* Includes empty GCs at startup.               */ | 
|---|
| 72 |  | 
|---|
| 73 | GC_API int GC_parallel; /* GC is parallelized for performance on        */ | 
|---|
| 74 | /* multiprocessors.  Currently set only         */ | 
|---|
| 75 | /* implicitly if collector is built with        */ | 
|---|
| 76 | /* -DPARALLEL_MARK and if either:               */ | 
|---|
| 77 | /*  Env variable GC_NPROC is set to > 1, or     */ | 
|---|
| 78 | /*  GC_NPROC is not set and this is an MP.      */ | 
|---|
| 79 | /* If GC_parallel is set, incremental           */ | 
|---|
| 80 | /* collection is only partially functional,     */ | 
|---|
| 81 | /* and may not be desirable.                    */ | 
|---|
| 82 |  | 
|---|
| 83 |  | 
|---|
| 84 | /* Public R/W variables */ | 
|---|
| 85 |  | 
|---|
| 86 | GC_API GC_PTR (*GC_oom_fn) GC_PROTO((size_t bytes_requested)); | 
|---|
| 87 | /* When there is insufficient memory to satisfy */ | 
|---|
| 88 | /* an allocation request, we return             */ | 
|---|
| 89 | /* (*GC_oom_fn)().  By default this just        */ | 
|---|
| 90 | /* returns 0.                                   */ | 
|---|
| 91 | /* If it returns, it must return 0 or a valid   */ | 
|---|
| 92 | /* pointer to a previously allocated heap       */ | 
|---|
| 93 | /* object.                                      */ | 
|---|
| 94 |  | 
|---|
| 95 | GC_API int GC_find_leak; | 
|---|
| 96 | /* Do not actually garbage collect, but simply  */ | 
|---|
| 97 | /* report inaccessible memory that was not      */ | 
|---|
| 98 | /* deallocated with GC_free.  Initial value     */ | 
|---|
| 99 | /* is determined by FIND_LEAK macro.            */ | 
|---|
| 100 |  | 
|---|
| 101 | GC_API int GC_all_interior_pointers; | 
|---|
| 102 | /* Arrange for pointers to object interiors to  */ | 
|---|
| 103 | /* be recognized as valid.  May not be changed  */ | 
|---|
| 104 | /* after GC initialization.                     */ | 
|---|
| 105 | /* Initial value is determined by               */ | 
|---|
| 106 | /* -DALL_INTERIOR_POINTERS.                     */ | 
|---|
| 107 | /* Unless DONT_ADD_BYTE_AT_END is defined, this */ | 
|---|
| 108 | /* also affects whether sizes are increased by  */ | 
|---|
| 109 | /* at least a byte to allow "off the end"       */ | 
|---|
| 110 | /* pointer recognition.                         */ | 
|---|
| 111 | /* MUST BE 0 or 1.                              */ | 
|---|
| 112 |  | 
|---|
| 113 | GC_API int GC_quiet;    /* Disable statistics output.  Only matters if  */ | 
|---|
| 114 | /* collector has been compiled with statistics  */ | 
|---|
| 115 | /* enabled.  This involves a performance cost,  */ | 
|---|
| 116 | /* and is thus not the default.                 */ | 
|---|
| 117 |  | 
|---|
| 118 | GC_API int GC_finalize_on_demand; | 
|---|
| 119 | /* If nonzero, finalizers will only be run in   */ | 
|---|
| 120 | /* response to an explicit GC_invoke_finalizers */ | 
|---|
| 121 | /* call.  The default is determined by whether  */ | 
|---|
| 122 | /* the FINALIZE_ON_DEMAND macro is defined      */ | 
|---|
| 123 | /* when the collector is built.                 */ | 
|---|
| 124 |  | 
|---|
| 125 | GC_API int GC_java_finalization; | 
|---|
| 126 | /* Mark objects reachable from finalizable      */ | 
|---|
| 127 | /* objects in a separate postpass.  This makes  */ | 
|---|
| 128 | /* it a bit safer to use non-topologically-     */ | 
|---|
| 129 | /* ordered finalization.  Default value is      */ | 
|---|
| 130 | /* determined by JAVA_FINALIZATION macro.       */ | 
|---|
| 131 |  | 
|---|
| 132 | GC_API void (* GC_finalizer_notifier) GC_PROTO((void)); | 
|---|
| 133 | /* Invoked by the collector when there are      */ | 
|---|
| 134 | /* objects to be finalized.  Invoked at most    */ | 
|---|
| 135 | /* once per GC cycle.  Never invoked unless     */ | 
|---|
| 136 | /* GC_finalize_on_demand is set.                */ | 
|---|
| 137 | /* Typically this will notify a finalization    */ | 
|---|
| 138 | /* thread, which will call GC_invoke_finalizers */ | 
|---|
| 139 | /* in response.                                 */ | 
|---|
| 140 |  | 
|---|
| 141 | GC_API int GC_dont_gc;  /* != 0 ==> Dont collect.  In versions 6.2a1+,  */ | 
|---|
| 142 | /* this overrides explicit GC_gcollect() calls. */ | 
|---|
| 143 | /* Used as a counter, so that nested enabling   */ | 
|---|
| 144 | /* and disabling work correctly.  Should        */ | 
|---|
| 145 | /* normally be updated with GC_enable() and     */ | 
|---|
| 146 | /* GC_disable() calls.                          */ | 
|---|
| 147 | /* Direct assignment to GC_dont_gc is           */ | 
|---|
| 148 | /* deprecated.                                  */ | 
|---|
| 149 |  | 
|---|
| 150 | GC_API int GC_dont_expand; | 
|---|
| 151 | /* Dont expand heap unless explicitly requested */ | 
|---|
| 152 | /* or forced to.                                */ | 
|---|
| 153 |  | 
|---|
| 154 | GC_API int GC_use_entire_heap; | 
|---|
| 155 | /* Causes the nonincremental collector to use the       */ | 
|---|
| 156 | /* entire heap before collecting.  This was the only    */ | 
|---|
| 157 | /* option for GC versions < 5.0.  This sometimes        */ | 
|---|
| 158 | /* results in more large block fragmentation, since     */ | 
|---|
| 159 | /* very larg blocks will tend to get broken up          */ | 
|---|
| 160 | /* during each GC cycle.  It is likely to result in a   */ | 
|---|
| 161 | /* larger working set, but lower collection             */ | 
|---|
| 162 | /* frequencies, and hence fewer instructions executed   */ | 
|---|
| 163 | /* in the collector.                                    */ | 
|---|
| 164 |  | 
|---|
| 165 | GC_API int GC_full_freq;    /* Number of partial collections between    */ | 
|---|
| 166 | /* full collections.  Matters only if       */ | 
|---|
| 167 | /* GC_incremental is set.                   */ | 
|---|
| 168 | /* Full collections are also triggered if   */ | 
|---|
| 169 | /* the collector detects a substantial      */ | 
|---|
| 170 | /* increase in the number of in-use heap    */ | 
|---|
| 171 | /* blocks.  Values in the tens are now      */ | 
|---|
| 172 | /* perfectly reasonable, unlike for         */ | 
|---|
| 173 | /* earlier GC versions.                     */ | 
|---|
| 174 |  | 
|---|
| 175 | GC_API GC_word GC_non_gc_bytes; | 
|---|
| 176 | /* Bytes not considered candidates for collection. */ | 
|---|
| 177 | /* Used only to control scheduling of collections. */ | 
|---|
| 178 | /* Updated by GC_malloc_uncollectable and GC_free. */ | 
|---|
| 179 | /* Wizards only.                                   */ | 
|---|
| 180 |  | 
|---|
| 181 | GC_API int GC_no_dls; | 
|---|
| 182 | /* Don't register dynamic library data segments. */ | 
|---|
| 183 | /* Wizards only.  Should be used only if the     */ | 
|---|
| 184 | /* application explicitly registers all roots.   */ | 
|---|
| 185 | /* In Microsoft Windows environments, this will  */ | 
|---|
| 186 | /* usually also prevent registration of the      */ | 
|---|
| 187 | /* main data segment as part of the root set.    */ | 
|---|
| 188 |  | 
|---|
| 189 | GC_API GC_word GC_free_space_divisor; | 
|---|
| 190 | /* We try to make sure that we allocate at      */ | 
|---|
| 191 | /* least N/GC_free_space_divisor bytes between  */ | 
|---|
| 192 | /* collections, where N is the heap size plus   */ | 
|---|
| 193 | /* a rough estimate of the root set size.       */ | 
|---|
| 194 | /* Initially, GC_free_space_divisor = 3.        */ | 
|---|
| 195 | /* Increasing its value will use less space     */ | 
|---|
| 196 | /* but more collection time.  Decreasing it     */ | 
|---|
| 197 | /* will appreciably decrease collection time    */ | 
|---|
| 198 | /* at the expense of space.                     */ | 
|---|
| 199 | /* GC_free_space_divisor = 1 will effectively   */ | 
|---|
| 200 | /* disable collections.                         */ | 
|---|
| 201 |  | 
|---|
| 202 | GC_API GC_word GC_max_retries; | 
|---|
| 203 | /* The maximum number of GCs attempted before   */ | 
|---|
| 204 | /* reporting out of memory after heap           */ | 
|---|
| 205 | /* expansion fails.  Initially 0.               */ | 
|---|
| 206 |  | 
|---|
| 207 |  | 
|---|
| 208 | GC_API char *GC_stackbottom;    /* Cool end of user stack.              */ | 
|---|
| 209 | /* May be set in the client prior to    */ | 
|---|
| 210 | /* calling any GC_ routines.  This      */ | 
|---|
| 211 | /* avoids some overhead, and            */ | 
|---|
| 212 | /* potentially some signals that can    */ | 
|---|
| 213 | /* confuse debuggers.  Otherwise the    */ | 
|---|
| 214 | /* collector attempts to set it         */ | 
|---|
| 215 | /* automatically.                       */ | 
|---|
| 216 | /* For multithreaded code, this is the  */ | 
|---|
| 217 | /* cold end of the stack for the        */ | 
|---|
| 218 | /* primordial thread.                   */ | 
|---|
| 219 |  | 
|---|
| 220 | GC_API int GC_dont_precollect;  /* Don't collect as part of             */ | 
|---|
| 221 | /* initialization.  Should be set only  */ | 
|---|
| 222 | /* if the client wants a chance to      */ | 
|---|
| 223 | /* manually initialize the root set     */ | 
|---|
| 224 | /* before the first collection.         */ | 
|---|
| 225 | /* Interferes with blacklisting.        */ | 
|---|
| 226 | /* Wizards only.                        */ | 
|---|
| 227 |  | 
|---|
| 228 | GC_API unsigned long GC_time_limit; | 
|---|
| 229 | /* If incremental collection is enabled, */ | 
|---|
| 230 | /* We try to terminate collections       */ | 
|---|
| 231 | /* after this many milliseconds.  Not a  */ | 
|---|
| 232 | /* hard time bound.  Setting this to     */ | 
|---|
| 233 | /* GC_TIME_UNLIMITED will essentially    */ | 
|---|
| 234 | /* disable incremental collection while  */ | 
|---|
| 235 | /* leaving generational collection       */ | 
|---|
| 236 | /* enabled.                              */ | 
|---|
| 237 | #       define GC_TIME_UNLIMITED 999999 | 
|---|
| 238 | /* Setting GC_time_limit to this value   */ | 
|---|
| 239 | /* will disable the "pause time exceeded"*/ | 
|---|
| 240 | /* tests.                                */ | 
|---|
| 241 |  | 
|---|
| 242 | /* Public procedures */ | 
|---|
| 243 |  | 
|---|
| 244 | /* Initialize the collector.  This is only required when using thread-local | 
|---|
| 245 | * allocation, since unlike the regular allocation routines, GC_local_malloc | 
|---|
| 246 | * is not self-initializing.  If you use GC_local_malloc you should arrange | 
|---|
| 247 | * to call this somehow (e.g. from a constructor) before doing any allocation. | 
|---|
| 248 | * For win32 threads, it needs to be called explicitly. | 
|---|
| 249 | */ | 
|---|
| 250 | GC_API void GC_init GC_PROTO((void)); | 
|---|
| 251 |  | 
|---|
| 252 | /* | 
|---|
| 253 | * general purpose allocation routines, with roughly malloc calling conv. | 
|---|
| 254 | * The atomic versions promise that no relevant pointers are contained | 
|---|
| 255 | * in the object.  The nonatomic versions guarantee that the new object | 
|---|
| 256 | * is cleared.  GC_malloc_stubborn promises that no changes to the object | 
|---|
| 257 | * will occur after GC_end_stubborn_change has been called on the | 
|---|
| 258 | * result of GC_malloc_stubborn. GC_malloc_uncollectable allocates an object | 
|---|
| 259 | * that is scanned for pointers to collectable objects, but is not itself | 
|---|
| 260 | * collectable.  The object is scanned even if it does not appear to | 
|---|
| 261 | * be reachable.  GC_malloc_uncollectable and GC_free called on the resulting | 
|---|
| 262 | * object implicitly update GC_non_gc_bytes appropriately. | 
|---|
| 263 | * | 
|---|
| 264 | * Note that the GC_malloc_stubborn support is stubbed out by default | 
|---|
| 265 | * starting in 6.0.  GC_malloc_stubborn is an alias for GC_malloc unless | 
|---|
| 266 | * the collector is built with STUBBORN_ALLOC defined. | 
|---|
| 267 | */ | 
|---|
| 268 | GC_API GC_PTR GC_malloc GC_PROTO((size_t size_in_bytes)); | 
|---|
| 269 | GC_API GC_PTR GC_malloc_atomic GC_PROTO((size_t size_in_bytes)); | 
|---|
| 270 | GC_API char *GC_strdup GC_PROTO((const char *str)); | 
|---|
| 271 | GC_API GC_PTR GC_malloc_uncollectable GC_PROTO((size_t size_in_bytes)); | 
|---|
| 272 | GC_API GC_PTR GC_malloc_stubborn GC_PROTO((size_t size_in_bytes)); | 
|---|
| 273 |  | 
|---|
| 274 | /* The following is only defined if the library has been suitably       */ | 
|---|
| 275 | /* compiled:                                                            */ | 
|---|
| 276 | GC_API GC_PTR GC_malloc_atomic_uncollectable GC_PROTO((size_t size_in_bytes)); | 
|---|
| 277 |  | 
|---|
| 278 | /* Explicitly deallocate an object.  Dangerous if used incorrectly.     */ | 
|---|
| 279 | /* Requires a pointer to the base of an object.                         */ | 
|---|
| 280 | /* If the argument is stubborn, it should not be changeable when freed. */ | 
|---|
| 281 | /* An object should not be enable for finalization when it is           */ | 
|---|
| 282 | /* explicitly deallocated.                                              */ | 
|---|
| 283 | /* GC_free(0) is a no-op, as required by ANSI C for free.               */ | 
|---|
| 284 | GC_API void GC_free GC_PROTO((GC_PTR object_addr)); | 
|---|
| 285 |  | 
|---|
| 286 | /* | 
|---|
| 287 | * Stubborn objects may be changed only if the collector is explicitly informed. | 
|---|
| 288 | * The collector is implicitly informed of coming change when such | 
|---|
| 289 | * an object is first allocated.  The following routines inform the | 
|---|
| 290 | * collector that an object will no longer be changed, or that it will | 
|---|
| 291 | * once again be changed.  Only nonNIL pointer stores into the object | 
|---|
| 292 | * are considered to be changes.  The argument to GC_end_stubborn_change | 
|---|
| 293 | * must be exacly the value returned by GC_malloc_stubborn or passed to | 
|---|
| 294 | * GC_change_stubborn.  (In the second case it may be an interior pointer | 
|---|
| 295 | * within 512 bytes of the beginning of the objects.) | 
|---|
| 296 | * There is a performance penalty for allowing more than | 
|---|
| 297 | * one stubborn object to be changed at once, but it is acceptable to | 
|---|
| 298 | * do so.  The same applies to dropping stubborn objects that are still | 
|---|
| 299 | * changeable. | 
|---|
| 300 | */ | 
|---|
| 301 | GC_API void GC_change_stubborn GC_PROTO((GC_PTR)); | 
|---|
| 302 | GC_API void GC_end_stubborn_change GC_PROTO((GC_PTR)); | 
|---|
| 303 |  | 
|---|
| 304 | /* Return a pointer to the base (lowest address) of an object given     */ | 
|---|
| 305 | /* a pointer to a location within the object.                           */ | 
|---|
| 306 | /* I.e. map an interior pointer to the corresponding bas pointer.       */ | 
|---|
| 307 | /* Note that with debugging allocation, this returns a pointer to the   */ | 
|---|
| 308 | /* actual base of the object, i.e. the debug information, not to        */ | 
|---|
| 309 | /* the base of the user object.                                         */ | 
|---|
| 310 | /* Return 0 if displaced_pointer doesn't point to within a valid        */ | 
|---|
| 311 | /* object.                                                              */ | 
|---|
| 312 | /* Note that a deallocated object in the garbage collected heap         */ | 
|---|
| 313 | /* may be considered valid, even if it has been deallocated with        */ | 
|---|
| 314 | /* GC_free.                                                             */ | 
|---|
| 315 | GC_API GC_PTR GC_base GC_PROTO((GC_PTR displaced_pointer)); | 
|---|
| 316 |  | 
|---|
| 317 | /* Given a pointer to the base of an object, return its size in bytes.  */ | 
|---|
| 318 | /* The returned size may be slightly larger than what was originally    */ | 
|---|
| 319 | /* requested.                                                           */ | 
|---|
| 320 | GC_API size_t GC_size GC_PROTO((GC_PTR object_addr)); | 
|---|
| 321 |  | 
|---|
| 322 | /* For compatibility with C library.  This is occasionally faster than  */ | 
|---|
| 323 | /* a malloc followed by a bcopy.  But if you rely on that, either here  */ | 
|---|
| 324 | /* or with the standard C library, your code is broken.  In my          */ | 
|---|
| 325 | /* opinion, it shouldn't have been invented, but now we're stuck. -HB   */ | 
|---|
| 326 | /* The resulting object has the same kind as the original.              */ | 
|---|
| 327 | /* If the argument is stubborn, the result will have changes enabled.   */ | 
|---|
| 328 | /* It is an error to have changes enabled for the original object.      */ | 
|---|
| 329 | /* Follows ANSI comventions for NULL old_object.                        */ | 
|---|
| 330 | GC_API GC_PTR GC_realloc | 
|---|
| 331 | GC_PROTO((GC_PTR old_object, size_t new_size_in_bytes)); | 
|---|
| 332 |  | 
|---|
| 333 | /* Explicitly increase the heap size.   */ | 
|---|
| 334 | /* Returns 0 on failure, 1 on success.  */ | 
|---|
| 335 | GC_API int GC_expand_hp GC_PROTO((size_t number_of_bytes)); | 
|---|
| 336 |  | 
|---|
| 337 | /* Limit the heap size to n bytes.  Useful when you're debugging,       */ | 
|---|
| 338 | /* especially on systems that don't handle running out of memory well.  */ | 
|---|
| 339 | /* n == 0 ==> unbounded.  This is the default.                          */ | 
|---|
| 340 | GC_API void GC_set_max_heap_size GC_PROTO((GC_word n)); | 
|---|
| 341 |  | 
|---|
| 342 | /* Inform the collector that a certain section of statically allocated  */ | 
|---|
| 343 | /* memory contains no pointers to garbage collected memory.  Thus it    */ | 
|---|
| 344 | /* need not be scanned.  This is sometimes important if the application */ | 
|---|
| 345 | /* maps large read/write files into the address space, which could be   */ | 
|---|
| 346 | /* mistaken for dynamic library data segments on some systems.          */ | 
|---|
| 347 | GC_API void GC_exclude_static_roots GC_PROTO((GC_PTR start, GC_PTR finish)); | 
|---|
| 348 |  | 
|---|
| 349 | /* Clear the set of root segments.  Wizards only. */ | 
|---|
| 350 | GC_API void GC_clear_roots GC_PROTO((void)); | 
|---|
| 351 |  | 
|---|
| 352 | /* Add a root segment.  Wizards only. */ | 
|---|
| 353 | GC_API void GC_add_roots GC_PROTO((char * low_address, | 
|---|
| 354 | char * high_address_plus_1)); | 
|---|
| 355 |  | 
|---|
| 356 | /* Remove a root segment.  Wizards only. */ | 
|---|
| 357 | GC_API void GC_remove_roots GC_PROTO((char * low_address, | 
|---|
| 358 | char * high_address_plus_1)); | 
|---|
| 359 |  | 
|---|
| 360 | /* Add a displacement to the set of those considered valid by the       */ | 
|---|
| 361 | /* collector.  GC_register_displacement(n) means that if p was returned */ | 
|---|
| 362 | /* by GC_malloc, then (char *)p + n will be considered to be a valid    */ | 
|---|
| 363 | /* pointer to p.  N must be small and less than the size of p.          */ | 
|---|
| 364 | /* (All pointers to the interior of objects from the stack are          */ | 
|---|
| 365 | /* considered valid in any case.  This applies to heap objects and      */ | 
|---|
| 366 | /* static data.)                                                        */ | 
|---|
| 367 | /* Preferably, this should be called before any other GC procedures.    */ | 
|---|
| 368 | /* Calling it later adds to the probability of excess memory            */ | 
|---|
| 369 | /* retention.                                                           */ | 
|---|
| 370 | /* This is a no-op if the collector has recognition of                  */ | 
|---|
| 371 | /* arbitrary interior pointers enabled, which is now the default.       */ | 
|---|
| 372 | GC_API void GC_register_displacement GC_PROTO((GC_word n)); | 
|---|
| 373 |  | 
|---|
| 374 | /* The following version should be used if any debugging allocation is  */ | 
|---|
| 375 | /* being done.                                                          */ | 
|---|
| 376 | GC_API void GC_debug_register_displacement GC_PROTO((GC_word n)); | 
|---|
| 377 |  | 
|---|
| 378 | /* Explicitly trigger a full, world-stop collection.    */ | 
|---|
| 379 | GC_API void GC_gcollect GC_PROTO((void)); | 
|---|
| 380 |  | 
|---|
| 381 | /* Trigger a full world-stopped collection.  Abort the collection if    */ | 
|---|
| 382 | /* and when stop_func returns a nonzero value.  Stop_func will be       */ | 
|---|
| 383 | /* called frequently, and should be reasonably fast.  This works even   */ | 
|---|
| 384 | /* if virtual dirty bits, and hence incremental collection is not       */ | 
|---|
| 385 | /* available for this architecture.  Collections can be aborted faster  */ | 
|---|
| 386 | /* than normal pause times for incremental collection.  However,        */ | 
|---|
| 387 | /* aborted collections do no useful work; the next collection needs     */ | 
|---|
| 388 | /* to start from the beginning.                                         */ | 
|---|
| 389 | /* Return 0 if the collection was aborted, 1 if it succeeded.           */ | 
|---|
| 390 | typedef int (* GC_stop_func) GC_PROTO((void)); | 
|---|
| 391 | GC_API int GC_try_to_collect GC_PROTO((GC_stop_func stop_func)); | 
|---|
| 392 |  | 
|---|
| 393 | /* Return the number of bytes in the heap.  Excludes collector private  */ | 
|---|
| 394 | /* data structures.  Includes empty blocks and fragmentation loss.      */ | 
|---|
| 395 | /* Includes some pages that were allocated but never written.           */ | 
|---|
| 396 | GC_API size_t GC_get_heap_size GC_PROTO((void)); | 
|---|
| 397 |  | 
|---|
| 398 | /* Return a lower bound on the number of free bytes in the heap.        */ | 
|---|
| 399 | GC_API size_t GC_get_free_bytes GC_PROTO((void)); | 
|---|
| 400 |  | 
|---|
| 401 | /* Return the number of bytes allocated since the last collection.      */ | 
|---|
| 402 | GC_API size_t GC_get_bytes_since_gc GC_PROTO((void)); | 
|---|
| 403 |  | 
|---|
| 404 | /* Return the total number of bytes allocated in this process.          */ | 
|---|
| 405 | /* Never decreases, except due to wrapping.                             */ | 
|---|
| 406 | GC_API size_t GC_get_total_bytes GC_PROTO((void)); | 
|---|
| 407 |  | 
|---|
| 408 | /* Disable garbage collection.  Even GC_gcollect calls will be          */ | 
|---|
| 409 | /* ineffective.                                                         */ | 
|---|
| 410 | GC_API void GC_disable GC_PROTO((void)); | 
|---|
| 411 |  | 
|---|
| 412 | /* Reenable garbage collection.  GC_disable() and GC_enable() calls     */ | 
|---|
| 413 | /* nest.  Garbage collection is enabled if the number of calls to both  */ | 
|---|
| 414 | /* both functions is equal.                                             */ | 
|---|
| 415 | GC_API void GC_enable GC_PROTO((void)); | 
|---|
| 416 |  | 
|---|
| 417 | /* Enable incremental/generational collection.  */ | 
|---|
| 418 | /* Not advisable unless dirty bits are          */ | 
|---|
| 419 | /* available or most heap objects are           */ | 
|---|
| 420 | /* pointerfree(atomic) or immutable.            */ | 
|---|
| 421 | /* Don't use in leak finding mode.              */ | 
|---|
| 422 | /* Ignored if GC_dont_gc is true.               */ | 
|---|
| 423 | /* Only the generational piece of this is       */ | 
|---|
| 424 | /* functional if GC_parallel is TRUE            */ | 
|---|
| 425 | /* or if GC_time_limit is GC_TIME_UNLIMITED.    */ | 
|---|
| 426 | /* Causes GC_local_gcj_malloc() to revert to    */ | 
|---|
| 427 | /* locked allocation.  Must be called           */ | 
|---|
| 428 | /* before any GC_local_gcj_malloc() calls.      */ | 
|---|
| 429 | GC_API void GC_enable_incremental GC_PROTO((void)); | 
|---|
| 430 |  | 
|---|
| 431 | /* Does incremental mode write-protect pages?  Returns zero or  */ | 
|---|
| 432 | /* more of the following, or'ed together:                       */ | 
|---|
| 433 | #define GC_PROTECTS_POINTER_HEAP  1 /* May protect non-atomic objs.     */ | 
|---|
| 434 | #define GC_PROTECTS_PTRFREE_HEAP  2 | 
|---|
| 435 | #define GC_PROTECTS_STATIC_DATA   4 /* Curently never.                  */ | 
|---|
| 436 | #define GC_PROTECTS_STACK         8 /* Probably impractical.            */ | 
|---|
| 437 |  | 
|---|
| 438 | #define GC_PROTECTS_NONE 0 | 
|---|
| 439 | GC_API int GC_incremental_protection_needs GC_PROTO((void)); | 
|---|
| 440 |  | 
|---|
| 441 | /* Perform some garbage collection work, if appropriate.        */ | 
|---|
| 442 | /* Return 0 if there is no more work to be done.                */ | 
|---|
| 443 | /* Typically performs an amount of work corresponding roughly   */ | 
|---|
| 444 | /* to marking from one page.  May do more work if further       */ | 
|---|
| 445 | /* progress requires it, e.g. if incremental collection is      */ | 
|---|
| 446 | /* disabled.  It is reasonable to call this in a wait loop      */ | 
|---|
| 447 | /* until it returns 0.                                          */ | 
|---|
| 448 | GC_API int GC_collect_a_little GC_PROTO((void)); | 
|---|
| 449 |  | 
|---|
| 450 | /* Allocate an object of size lb bytes.  The client guarantees that     */ | 
|---|
| 451 | /* as long as the object is live, it will be referenced by a pointer    */ | 
|---|
| 452 | /* that points to somewhere within the first 256 bytes of the object.   */ | 
|---|
| 453 | /* (This should normally be declared volatile to prevent the compiler   */ | 
|---|
| 454 | /* from invalidating this assertion.)  This routine is only useful      */ | 
|---|
| 455 | /* if a large array is being allocated.  It reduces the chance of       */ | 
|---|
| 456 | /* accidentally retaining such an array as a result of scanning an      */ | 
|---|
| 457 | /* integer that happens to be an address inside the array.  (Actually,  */ | 
|---|
| 458 | /* it reduces the chance of the allocator not finding space for such    */ | 
|---|
| 459 | /* an array, since it will try hard to avoid introducing such a false   */ | 
|---|
| 460 | /* reference.)  On a SunOS 4.X or MS Windows system this is recommended */ | 
|---|
| 461 | /* for arrays likely to be larger than 100K or so.  For other systems,  */ | 
|---|
| 462 | /* or if the collector is not configured to recognize all interior      */ | 
|---|
| 463 | /* pointers, the threshold is normally much higher.                     */ | 
|---|
| 464 | GC_API GC_PTR GC_malloc_ignore_off_page GC_PROTO((size_t lb)); | 
|---|
| 465 | GC_API GC_PTR GC_malloc_atomic_ignore_off_page GC_PROTO((size_t lb)); | 
|---|
| 466 |  | 
|---|
| 467 | #if defined(__sgi) && !defined(__GNUC__) && _COMPILER_VERSION >= 720 | 
|---|
| 468 | #   define GC_ADD_CALLER | 
|---|
| 469 | #   define GC_RETURN_ADDR (GC_word)__return_address | 
|---|
| 470 | #endif | 
|---|
| 471 |  | 
|---|
| 472 | #if defined(__linux__) || defined(__GLIBC__) | 
|---|
| 473 | # include <features.h> | 
|---|
| 474 | # if (__GLIBC__ == 2 && __GLIBC_MINOR__ >= 1 || __GLIBC__ > 2) \ | 
|---|
| 475 | && !defined(__ia64__) | 
|---|
| 476 | #   ifndef GC_HAVE_BUILTIN_BACKTRACE | 
|---|
| 477 | #     define GC_HAVE_BUILTIN_BACKTRACE | 
|---|
| 478 | #   endif | 
|---|
| 479 | # endif | 
|---|
| 480 | # if defined(__i386__) || defined(__x86_64__) | 
|---|
| 481 | #   define GC_CAN_SAVE_CALL_STACKS | 
|---|
| 482 | # endif | 
|---|
| 483 | #endif | 
|---|
| 484 |  | 
|---|
| 485 | #if defined(GC_HAVE_BUILTIN_BACKTRACE) && !defined(GC_CAN_SAVE_CALL_STACKS) | 
|---|
| 486 | # define GC_CAN_SAVE_CALL_STACKS | 
|---|
| 487 | #endif | 
|---|
| 488 |  | 
|---|
| 489 | #if defined(__sparc__) | 
|---|
| 490 | #   define GC_CAN_SAVE_CALL_STACKS | 
|---|
| 491 | #endif | 
|---|
| 492 |  | 
|---|
| 493 | /* If we're on an a platform on which we can't save call stacks, but    */ | 
|---|
| 494 | /* gcc is normally used, we go ahead and define GC_ADD_CALLER.          */ | 
|---|
| 495 | /* We make this decision independent of whether gcc is actually being   */ | 
|---|
| 496 | /* used, in order to keep the interface consistent, and allow mixing    */ | 
|---|
| 497 | /* of compilers.                                                        */ | 
|---|
| 498 | /* This may also be desirable if it is possible but expensive to        */ | 
|---|
| 499 | /* retrieve the call chain.                                             */ | 
|---|
| 500 | #if (defined(__linux__) || defined(__NetBSD__) || defined(__OpenBSD__) \ | 
|---|
| 501 | || defined(__FreeBSD__) || defined(__DragonFly__)) & !defined(GC_CAN_SAVE_CALL_STACKS) | 
|---|
| 502 | # define GC_ADD_CALLER | 
|---|
| 503 | # if __GNUC__ >= 3 || (__GNUC__ == 2 && __GNUC_MINOR__ >= 95) | 
|---|
| 504 | /* gcc knows how to retrieve return address, but we don't know */ | 
|---|
| 505 | /* how to generate call stacks.                                */ | 
|---|
| 506 | #   define GC_RETURN_ADDR (GC_word)__builtin_return_address(0) | 
|---|
| 507 | # else | 
|---|
| 508 | /* Just pass 0 for gcc compatibility. */ | 
|---|
| 509 | #   define GC_RETURN_ADDR 0 | 
|---|
| 510 | # endif | 
|---|
| 511 | #endif | 
|---|
| 512 |  | 
|---|
| 513 | #ifdef GC_ADD_CALLER | 
|---|
| 514 | #  define GC_EXTRAS GC_RETURN_ADDR, __FILE__, __LINE__ | 
|---|
| 515 | #  define GC_EXTRA_PARAMS GC_word ra, GC_CONST char * s, int i | 
|---|
| 516 | #else | 
|---|
| 517 | #  define GC_EXTRAS __FILE__, __LINE__ | 
|---|
| 518 | #  define GC_EXTRA_PARAMS GC_CONST char * s, int i | 
|---|
| 519 | #endif | 
|---|
| 520 |  | 
|---|
| 521 | /* Debugging (annotated) allocation.  GC_gcollect will check            */ | 
|---|
| 522 | /* objects allocated in this way for overwrites, etc.                   */ | 
|---|
| 523 | GC_API GC_PTR GC_debug_malloc | 
|---|
| 524 | GC_PROTO((size_t size_in_bytes, GC_EXTRA_PARAMS)); | 
|---|
| 525 | GC_API GC_PTR GC_debug_malloc_atomic | 
|---|
| 526 | GC_PROTO((size_t size_in_bytes, GC_EXTRA_PARAMS)); | 
|---|
| 527 | GC_API char *GC_debug_strdup | 
|---|
| 528 | GC_PROTO((const char *str, GC_EXTRA_PARAMS)); | 
|---|
| 529 | GC_API GC_PTR GC_debug_malloc_uncollectable | 
|---|
| 530 | GC_PROTO((size_t size_in_bytes, GC_EXTRA_PARAMS)); | 
|---|
| 531 | GC_API GC_PTR GC_debug_malloc_stubborn | 
|---|
| 532 | GC_PROTO((size_t size_in_bytes, GC_EXTRA_PARAMS)); | 
|---|
| 533 | GC_API GC_PTR GC_debug_malloc_ignore_off_page | 
|---|
| 534 | GC_PROTO((size_t size_in_bytes, GC_EXTRA_PARAMS)); | 
|---|
| 535 | GC_API GC_PTR GC_debug_malloc_atomic_ignore_off_page | 
|---|
| 536 | GC_PROTO((size_t size_in_bytes, GC_EXTRA_PARAMS)); | 
|---|
| 537 | GC_API void GC_debug_free GC_PROTO((GC_PTR object_addr)); | 
|---|
| 538 | GC_API GC_PTR GC_debug_realloc | 
|---|
| 539 | GC_PROTO((GC_PTR old_object, size_t new_size_in_bytes, | 
|---|
| 540 | GC_EXTRA_PARAMS)); | 
|---|
| 541 | GC_API void GC_debug_change_stubborn GC_PROTO((GC_PTR)); | 
|---|
| 542 | GC_API void GC_debug_end_stubborn_change GC_PROTO((GC_PTR)); | 
|---|
| 543 |  | 
|---|
| 544 | /* Routines that allocate objects with debug information (like the      */ | 
|---|
| 545 | /* above), but just fill in dummy file and line number information.     */ | 
|---|
| 546 | /* Thus they can serve as drop-in malloc/realloc replacements.  This    */ | 
|---|
| 547 | /* can be useful for two reasons:                                       */ | 
|---|
| 548 | /* 1) It allows the collector to be built with DBG_HDRS_ALL defined     */ | 
|---|
| 549 | /*    even if some allocation calls come from 3rd party libraries       */ | 
|---|
| 550 | /*    that can't be recompiled.                                         */ | 
|---|
| 551 | /* 2) On some platforms, the file and line information is redundant,    */ | 
|---|
| 552 | /*    since it can be reconstructed from a stack trace.  On such        */ | 
|---|
| 553 | /*    platforms it may be more convenient not to recompile, e.g. for    */ | 
|---|
| 554 | /*    leak detection.  This can be accomplished by instructing the      */ | 
|---|
| 555 | /*    linker to replace malloc/realloc with these.                      */ | 
|---|
| 556 | GC_API GC_PTR GC_debug_malloc_replacement GC_PROTO((size_t size_in_bytes)); | 
|---|
| 557 | GC_API GC_PTR GC_debug_realloc_replacement | 
|---|
| 558 | GC_PROTO((GC_PTR object_addr, size_t size_in_bytes)); | 
|---|
| 559 |  | 
|---|
| 560 | # ifdef GC_DEBUG | 
|---|
| 561 | #   define GC_MALLOC(sz) GC_debug_malloc(sz, GC_EXTRAS) | 
|---|
| 562 | #   define GC_MALLOC_ATOMIC(sz) GC_debug_malloc_atomic(sz, GC_EXTRAS) | 
|---|
| 563 | #   define GC_STRDUP(s) GC_debug_strdup((s), GC_EXTRAS) | 
|---|
| 564 | #   define GC_MALLOC_UNCOLLECTABLE(sz) \ | 
|---|
| 565 | GC_debug_malloc_uncollectable(sz, GC_EXTRAS) | 
|---|
| 566 | #   define GC_MALLOC_IGNORE_OFF_PAGE(sz) \ | 
|---|
| 567 | GC_debug_malloc_ignore_off_page(sz, GC_EXTRAS) | 
|---|
| 568 | #   define GC_MALLOC_ATOMIC_IGNORE_OFF_PAGE(sz) \ | 
|---|
| 569 | GC_debug_malloc_atomic_ignore_off_page(sz, GC_EXTRAS) | 
|---|
| 570 | #   define GC_REALLOC(old, sz) GC_debug_realloc(old, sz, GC_EXTRAS) | 
|---|
| 571 | #   define GC_FREE(p) GC_debug_free(p) | 
|---|
| 572 | #   define GC_REGISTER_FINALIZER(p, f, d, of, od) \ | 
|---|
| 573 | GC_debug_register_finalizer(p, f, d, of, od) | 
|---|
| 574 | #   define GC_REGISTER_FINALIZER_IGNORE_SELF(p, f, d, of, od) \ | 
|---|
| 575 | GC_debug_register_finalizer_ignore_self(p, f, d, of, od) | 
|---|
| 576 | #   define GC_REGISTER_FINALIZER_NO_ORDER(p, f, d, of, od) \ | 
|---|
| 577 | GC_debug_register_finalizer_no_order(p, f, d, of, od) | 
|---|
| 578 | #   define GC_MALLOC_STUBBORN(sz) GC_debug_malloc_stubborn(sz, GC_EXTRAS); | 
|---|
| 579 | #   define GC_CHANGE_STUBBORN(p) GC_debug_change_stubborn(p) | 
|---|
| 580 | #   define GC_END_STUBBORN_CHANGE(p) GC_debug_end_stubborn_change(p) | 
|---|
| 581 | #   define GC_GENERAL_REGISTER_DISAPPEARING_LINK(link, obj) \ | 
|---|
| 582 | GC_general_register_disappearing_link(link, GC_base(obj)) | 
|---|
| 583 | #   define GC_REGISTER_DISPLACEMENT(n) GC_debug_register_displacement(n) | 
|---|
| 584 | # else | 
|---|
| 585 | #   define GC_MALLOC(sz) GC_malloc(sz) | 
|---|
| 586 | #   define GC_MALLOC_ATOMIC(sz) GC_malloc_atomic(sz) | 
|---|
| 587 | #   define GC_STRDUP(s) GC_strdup(s) | 
|---|
| 588 | #   define GC_MALLOC_UNCOLLECTABLE(sz) GC_malloc_uncollectable(sz) | 
|---|
| 589 | #   define GC_MALLOC_IGNORE_OFF_PAGE(sz) \ | 
|---|
| 590 | GC_malloc_ignore_off_page(sz) | 
|---|
| 591 | #   define GC_MALLOC_ATOMIC_IGNORE_OFF_PAGE(sz) \ | 
|---|
| 592 | GC_malloc_atomic_ignore_off_page(sz) | 
|---|
| 593 | #   define GC_REALLOC(old, sz) GC_realloc(old, sz) | 
|---|
| 594 | #   define GC_FREE(p) GC_free(p) | 
|---|
| 595 | #   define GC_REGISTER_FINALIZER(p, f, d, of, od) \ | 
|---|
| 596 | GC_register_finalizer(p, f, d, of, od) | 
|---|
| 597 | #   define GC_REGISTER_FINALIZER_IGNORE_SELF(p, f, d, of, od) \ | 
|---|
| 598 | GC_register_finalizer_ignore_self(p, f, d, of, od) | 
|---|
| 599 | #   define GC_REGISTER_FINALIZER_NO_ORDER(p, f, d, of, od) \ | 
|---|
| 600 | GC_register_finalizer_no_order(p, f, d, of, od) | 
|---|
| 601 | #   define GC_MALLOC_STUBBORN(sz) GC_malloc_stubborn(sz) | 
|---|
| 602 | #   define GC_CHANGE_STUBBORN(p) GC_change_stubborn(p) | 
|---|
| 603 | #   define GC_END_STUBBORN_CHANGE(p) GC_end_stubborn_change(p) | 
|---|
| 604 | #   define GC_GENERAL_REGISTER_DISAPPEARING_LINK(link, obj) \ | 
|---|
| 605 | GC_general_register_disappearing_link(link, obj) | 
|---|
| 606 | #   define GC_REGISTER_DISPLACEMENT(n) GC_register_displacement(n) | 
|---|
| 607 | # endif | 
|---|
| 608 | /* The following are included because they are often convenient, and    */ | 
|---|
| 609 | /* reduce the chance for a misspecifed size argument.  But calls may    */ | 
|---|
| 610 | /* expand to something syntactically incorrect if t is a complicated    */ | 
|---|
| 611 | /* type expression.                                                     */ | 
|---|
| 612 | # define GC_NEW(t) (t *)GC_MALLOC(sizeof (t)) | 
|---|
| 613 | # define GC_NEW_ATOMIC(t) (t *)GC_MALLOC_ATOMIC(sizeof (t)) | 
|---|
| 614 | # define GC_NEW_STUBBORN(t) (t *)GC_MALLOC_STUBBORN(sizeof (t)) | 
|---|
| 615 | # define GC_NEW_UNCOLLECTABLE(t) (t *)GC_MALLOC_UNCOLLECTABLE(sizeof (t)) | 
|---|
| 616 |  | 
|---|
| 617 | /* Finalization.  Some of these primitives are grossly unsafe.          */ | 
|---|
| 618 | /* The idea is to make them both cheap, and sufficient to build         */ | 
|---|
| 619 | /* a safer layer, closer to Modula-3, Java, or PCedar finalization.     */ | 
|---|
| 620 | /* The interface represents my conclusions from a long discussion       */ | 
|---|
| 621 | /* with Alan Demers, Dan Greene, Carl Hauser, Barry Hayes,              */ | 
|---|
| 622 | /* Christian Jacobi, and Russ Atkinson.  It's not perfect, and          */ | 
|---|
| 623 | /* probably nobody else agrees with it.     Hans-J. Boehm  3/13/92      */ | 
|---|
| 624 | typedef void (*GC_finalization_proc) | 
|---|
| 625 | GC_PROTO((GC_PTR obj, GC_PTR client_data)); | 
|---|
| 626 |  | 
|---|
| 627 | GC_API void GC_register_finalizer | 
|---|
| 628 | GC_PROTO((GC_PTR obj, GC_finalization_proc fn, GC_PTR cd, | 
|---|
| 629 | GC_finalization_proc *ofn, GC_PTR *ocd)); | 
|---|
| 630 | GC_API void GC_debug_register_finalizer | 
|---|
| 631 | GC_PROTO((GC_PTR obj, GC_finalization_proc fn, GC_PTR cd, | 
|---|
| 632 | GC_finalization_proc *ofn, GC_PTR *ocd)); | 
|---|
| 633 | /* When obj is no longer accessible, invoke             */ | 
|---|
| 634 | /* (*fn)(obj, cd).  If a and b are inaccessible, and    */ | 
|---|
| 635 | /* a points to b (after disappearing links have been    */ | 
|---|
| 636 | /* made to disappear), then only a will be              */ | 
|---|
| 637 | /* finalized.  (If this does not create any new         */ | 
|---|
| 638 | /* pointers to b, then b will be finalized after the    */ | 
|---|
| 639 | /* next collection.)  Any finalizable object that       */ | 
|---|
| 640 | /* is reachable from itself by following one or more    */ | 
|---|
| 641 | /* pointers will not be finalized (or collected).       */ | 
|---|
| 642 | /* Thus cycles involving finalizable objects should     */ | 
|---|
| 643 | /* be avoided, or broken by disappearing links.         */ | 
|---|
| 644 | /* All but the last finalizer registered for an object  */ | 
|---|
| 645 | /* is ignored.                                          */ | 
|---|
| 646 | /* Finalization may be removed by passing 0 as fn.      */ | 
|---|
| 647 | /* Finalizers are implicitly unregistered just before   */ | 
|---|
| 648 | /* they are invoked.                                    */ | 
|---|
| 649 | /* The old finalizer and client data are stored in      */ | 
|---|
| 650 | /* *ofn and *ocd.                                       */ | 
|---|
| 651 | /* Fn is never invoked on an accessible object,         */ | 
|---|
| 652 | /* provided hidden pointers are converted to real       */ | 
|---|
| 653 | /* pointers only if the allocation lock is held, and    */ | 
|---|
| 654 | /* such conversions are not performed by finalization   */ | 
|---|
| 655 | /* routines.                                            */ | 
|---|
| 656 | /* If GC_register_finalizer is aborted as a result of   */ | 
|---|
| 657 | /* a signal, the object may be left with no             */ | 
|---|
| 658 | /* finalization, even if neither the old nor new        */ | 
|---|
| 659 | /* finalizer were NULL.                                 */ | 
|---|
| 660 | /* Obj should be the nonNULL starting address of an     */ | 
|---|
| 661 | /* object allocated by GC_malloc or friends.            */ | 
|---|
| 662 | /* Note that any garbage collectable object referenced  */ | 
|---|
| 663 | /* by cd will be considered accessible until the        */ | 
|---|
| 664 | /* finalizer is invoked.                                */ | 
|---|
| 665 |  | 
|---|
| 666 | /* Another versions of the above follow.  It ignores            */ | 
|---|
| 667 | /* self-cycles, i.e. pointers from a finalizable object to      */ | 
|---|
| 668 | /* itself.  There is a stylistic argument that this is wrong,   */ | 
|---|
| 669 | /* but it's unavoidable for C++, since the compiler may         */ | 
|---|
| 670 | /* silently introduce these.  It's also benign in that specific */ | 
|---|
| 671 | /* case.  And it helps if finalizable objects are split to      */ | 
|---|
| 672 | /* avoid cycles.                                                */ | 
|---|
| 673 | /* Note that cd will still be viewed as accessible, even if it  */ | 
|---|
| 674 | /* refers to the object itself.                                 */ | 
|---|
| 675 | GC_API void GC_register_finalizer_ignore_self | 
|---|
| 676 | GC_PROTO((GC_PTR obj, GC_finalization_proc fn, GC_PTR cd, | 
|---|
| 677 | GC_finalization_proc *ofn, GC_PTR *ocd)); | 
|---|
| 678 | GC_API void GC_debug_register_finalizer_ignore_self | 
|---|
| 679 | GC_PROTO((GC_PTR obj, GC_finalization_proc fn, GC_PTR cd, | 
|---|
| 680 | GC_finalization_proc *ofn, GC_PTR *ocd)); | 
|---|
| 681 |  | 
|---|
| 682 | /* Another version of the above.  It ignores all cycles.        */ | 
|---|
| 683 | /* It should probably only be used by Java implementations.     */ | 
|---|
| 684 | /* Note that cd will still be viewed as accessible, even if it  */ | 
|---|
| 685 | /* refers to the object itself.                                 */ | 
|---|
| 686 | GC_API void GC_register_finalizer_no_order | 
|---|
| 687 | GC_PROTO((GC_PTR obj, GC_finalization_proc fn, GC_PTR cd, | 
|---|
| 688 | GC_finalization_proc *ofn, GC_PTR *ocd)); | 
|---|
| 689 | GC_API void GC_debug_register_finalizer_no_order | 
|---|
| 690 | GC_PROTO((GC_PTR obj, GC_finalization_proc fn, GC_PTR cd, | 
|---|
| 691 | GC_finalization_proc *ofn, GC_PTR *ocd)); | 
|---|
| 692 |  | 
|---|
| 693 |  | 
|---|
| 694 | /* The following routine may be used to break cycles between    */ | 
|---|
| 695 | /* finalizable objects, thus causing cyclic finalizable         */ | 
|---|
| 696 | /* objects to be finalized in the correct order.  Standard      */ | 
|---|
| 697 | /* use involves calling GC_register_disappearing_link(&p),      */ | 
|---|
| 698 | /* where p is a pointer that is not followed by finalization    */ | 
|---|
| 699 | /* code, and should not be considered in determining            */ | 
|---|
| 700 | /* finalization order.                                          */ | 
|---|
| 701 | GC_API int GC_register_disappearing_link GC_PROTO((GC_PTR * /* link */)); | 
|---|
| 702 | /* Link should point to a field of a heap allocated     */ | 
|---|
| 703 | /* object obj.  *link will be cleared when obj is       */ | 
|---|
| 704 | /* found to be inaccessible.  This happens BEFORE any   */ | 
|---|
| 705 | /* finalization code is invoked, and BEFORE any         */ | 
|---|
| 706 | /* decisions about finalization order are made.         */ | 
|---|
| 707 | /* This is useful in telling the finalizer that         */ | 
|---|
| 708 | /* some pointers are not essential for proper           */ | 
|---|
| 709 | /* finalization.  This may avoid finalization cycles.   */ | 
|---|
| 710 | /* Note that obj may be resurrected by another          */ | 
|---|
| 711 | /* finalizer, and thus the clearing of *link may        */ | 
|---|
| 712 | /* be visible to non-finalization code.                 */ | 
|---|
| 713 | /* There's an argument that an arbitrary action should  */ | 
|---|
| 714 | /* be allowed here, instead of just clearing a pointer. */ | 
|---|
| 715 | /* But this causes problems if that action alters, or   */ | 
|---|
| 716 | /* examines connectivity.                               */ | 
|---|
| 717 | /* Returns 1 if link was already registered, 0          */ | 
|---|
| 718 | /* otherwise.                                           */ | 
|---|
| 719 | /* Only exists for backward compatibility.  See below:  */ | 
|---|
| 720 |  | 
|---|
| 721 | GC_API int GC_general_register_disappearing_link | 
|---|
| 722 | GC_PROTO((GC_PTR * /* link */, GC_PTR obj)); | 
|---|
| 723 | /* A slight generalization of the above. *link is       */ | 
|---|
| 724 | /* cleared when obj first becomes inaccessible.  This   */ | 
|---|
| 725 | /* can be used to implement weak pointers easily and    */ | 
|---|
| 726 | /* safely. Typically link will point to a location      */ | 
|---|
| 727 | /* holding a disguised pointer to obj.  (A pointer      */ | 
|---|
| 728 | /* inside an "atomic" object is effectively             */ | 
|---|
| 729 | /* disguised.)   In this way soft                       */ | 
|---|
| 730 | /* pointers are broken before any object                */ | 
|---|
| 731 | /* reachable from them are finalized.  Each link        */ | 
|---|
| 732 | /* May be registered only once, i.e. with one obj       */ | 
|---|
| 733 | /* value.  This was added after a long email discussion */ | 
|---|
| 734 | /* with John Ellis.                                     */ | 
|---|
| 735 | /* Obj must be a pointer to the first word of an object */ | 
|---|
| 736 | /* we allocated.  It is unsafe to explicitly deallocate */ | 
|---|
| 737 | /* the object containing link.  Explicitly deallocating */ | 
|---|
| 738 | /* obj may or may not cause link to eventually be       */ | 
|---|
| 739 | /* cleared.                                             */ | 
|---|
| 740 | GC_API int GC_unregister_disappearing_link GC_PROTO((GC_PTR * /* link */)); | 
|---|
| 741 | /* Returns 0 if link was not actually registered.       */ | 
|---|
| 742 | /* Undoes a registration by either of the above two     */ | 
|---|
| 743 | /* routines.                                            */ | 
|---|
| 744 |  | 
|---|
| 745 | /* Returns !=0  if GC_invoke_finalizers has something to do.            */ | 
|---|
| 746 | GC_API int GC_should_invoke_finalizers GC_PROTO((void)); | 
|---|
| 747 |  | 
|---|
| 748 | GC_API int GC_invoke_finalizers GC_PROTO((void)); | 
|---|
| 749 | /* Run finalizers for all objects that are ready to     */ | 
|---|
| 750 | /* be finalized.  Return the number of finalizers       */ | 
|---|
| 751 | /* that were run.  Normally this is also called         */ | 
|---|
| 752 | /* implicitly during some allocations.  If              */ | 
|---|
| 753 | /* GC_finalize_on_demand is nonzero, it must be called  */ | 
|---|
| 754 | /* explicitly.                                          */ | 
|---|
| 755 |  | 
|---|
| 756 | /* GC_set_warn_proc can be used to redirect or filter warning messages. */ | 
|---|
| 757 | /* p may not be a NULL pointer.                                         */ | 
|---|
| 758 | typedef void (*GC_warn_proc) GC_PROTO((char *msg, GC_word arg)); | 
|---|
| 759 | GC_API GC_warn_proc GC_set_warn_proc GC_PROTO((GC_warn_proc p)); | 
|---|
| 760 | /* Returns old warning procedure.   */ | 
|---|
| 761 |  | 
|---|
| 762 | GC_API GC_word GC_set_free_space_divisor GC_PROTO((GC_word value)); | 
|---|
| 763 | /* Set free_space_divisor.  See above for definition.       */ | 
|---|
| 764 | /* Returns old value.                                       */ | 
|---|
| 765 |  | 
|---|
| 766 | /* The following is intended to be used by a higher level       */ | 
|---|
| 767 | /* (e.g. Java-like) finalization facility.  It is expected      */ | 
|---|
| 768 | /* that finalization code will arrange for hidden pointers to   */ | 
|---|
| 769 | /* disappear.  Otherwise objects can be accessed after they     */ | 
|---|
| 770 | /* have been collected.                                         */ | 
|---|
| 771 | /* Note that putting pointers in atomic objects or in           */ | 
|---|
| 772 | /* nonpointer slots of "typed" objects is equivalent to         */ | 
|---|
| 773 | /* disguising them in this way, and may have other advantages.  */ | 
|---|
| 774 | # if defined(I_HIDE_POINTERS) || defined(GC_I_HIDE_POINTERS) | 
|---|
| 775 | typedef GC_word GC_hidden_pointer; | 
|---|
| 776 | #   define HIDE_POINTER(p) (~(GC_hidden_pointer)(p)) | 
|---|
| 777 | #   define REVEAL_POINTER(p) ((GC_PTR)(HIDE_POINTER(p))) | 
|---|
| 778 | /* Converting a hidden pointer to a real pointer requires verifying */ | 
|---|
| 779 | /* that the object still exists.  This involves acquiring the       */ | 
|---|
| 780 | /* allocator lock to avoid a race with the collector.               */ | 
|---|
| 781 | # endif /* I_HIDE_POINTERS */ | 
|---|
| 782 |  | 
|---|
| 783 | typedef GC_PTR (*GC_fn_type) GC_PROTO((GC_PTR client_data)); | 
|---|
| 784 | GC_API GC_PTR GC_call_with_alloc_lock | 
|---|
| 785 | GC_PROTO((GC_fn_type fn, GC_PTR client_data)); | 
|---|
| 786 |  | 
|---|
| 787 | /* The following routines are primarily intended for use with a         */ | 
|---|
| 788 | /* preprocessor which inserts calls to check C pointer arithmetic.      */ | 
|---|
| 789 | /* They indicate failure by invoking the corresponding _print_proc.     */ | 
|---|
| 790 |  | 
|---|
| 791 | /* Check that p and q point to the same object.                 */ | 
|---|
| 792 | /* Fail conspicuously if they don't.                            */ | 
|---|
| 793 | /* Returns the first argument.                                  */ | 
|---|
| 794 | /* Succeeds if neither p nor q points to the heap.              */ | 
|---|
| 795 | /* May succeed if both p and q point to between heap objects.   */ | 
|---|
| 796 | GC_API GC_PTR GC_same_obj GC_PROTO((GC_PTR p, GC_PTR q)); | 
|---|
| 797 |  | 
|---|
| 798 | /* Checked pointer pre- and post- increment operations.  Note that      */ | 
|---|
| 799 | /* the second argument is in units of bytes, not multiples of the       */ | 
|---|
| 800 | /* object size.  This should either be invoked from a macro, or the     */ | 
|---|
| 801 | /* call should be automatically generated.                              */ | 
|---|
| 802 | GC_API GC_PTR GC_pre_incr GC_PROTO((GC_PTR *p, size_t how_much)); | 
|---|
| 803 | GC_API GC_PTR GC_post_incr GC_PROTO((GC_PTR *p, size_t how_much)); | 
|---|
| 804 |  | 
|---|
| 805 | /* Check that p is visible                                              */ | 
|---|
| 806 | /* to the collector as a possibly pointer containing location.          */ | 
|---|
| 807 | /* If it isn't fail conspicuously.                                      */ | 
|---|
| 808 | /* Returns the argument in all cases.  May erroneously succeed          */ | 
|---|
| 809 | /* in hard cases.  (This is intended for debugging use with             */ | 
|---|
| 810 | /* untyped allocations.  The idea is that it should be possible, though */ | 
|---|
| 811 | /* slow, to add such a call to all indirect pointer stores.)            */ | 
|---|
| 812 | /* Currently useless for multithreaded worlds.                          */ | 
|---|
| 813 | GC_API GC_PTR GC_is_visible GC_PROTO((GC_PTR p)); | 
|---|
| 814 |  | 
|---|
| 815 | /* Check that if p is a pointer to a heap page, then it points to       */ | 
|---|
| 816 | /* a valid displacement within a heap object.                           */ | 
|---|
| 817 | /* Fail conspicuously if this property does not hold.                   */ | 
|---|
| 818 | /* Uninteresting with GC_all_interior_pointers.                         */ | 
|---|
| 819 | /* Always returns its argument.                                         */ | 
|---|
| 820 | GC_API GC_PTR GC_is_valid_displacement GC_PROTO((GC_PTR p)); | 
|---|
| 821 |  | 
|---|
| 822 | /* Safer, but slow, pointer addition.  Probably useful mainly with      */ | 
|---|
| 823 | /* a preprocessor.  Useful only for heap pointers.                      */ | 
|---|
| 824 | #ifdef GC_DEBUG | 
|---|
| 825 | #   define GC_PTR_ADD3(x, n, type_of_result) \ | 
|---|
| 826 | ((type_of_result)GC_same_obj((x)+(n), (x))) | 
|---|
| 827 | #   define GC_PRE_INCR3(x, n, type_of_result) \ | 
|---|
| 828 | ((type_of_result)GC_pre_incr(&(x), (n)*sizeof(*x)) | 
|---|
| 829 | #   define GC_POST_INCR2(x, type_of_result) \ | 
|---|
| 830 | ((type_of_result)GC_post_incr(&(x), sizeof(*x)) | 
|---|
| 831 | #   ifdef __GNUC__ | 
|---|
| 832 | #       define GC_PTR_ADD(x, n) \ | 
|---|
| 833 | GC_PTR_ADD3(x, n, typeof(x)) | 
|---|
| 834 | #       define GC_PRE_INCR(x, n) \ | 
|---|
| 835 | GC_PRE_INCR3(x, n, typeof(x)) | 
|---|
| 836 | #       define GC_POST_INCR(x, n) \ | 
|---|
| 837 | GC_POST_INCR3(x, typeof(x)) | 
|---|
| 838 | #   else | 
|---|
| 839 | /* We can't do this right without typeof, which ANSI    */ | 
|---|
| 840 | /* decided was not sufficiently useful.  Repeatedly     */ | 
|---|
| 841 | /* mentioning the arguments seems too dangerous to be   */ | 
|---|
| 842 | /* useful.  So does not casting the result.             */ | 
|---|
| 843 | #       define GC_PTR_ADD(x, n) ((x)+(n)) | 
|---|
| 844 | #   endif | 
|---|
| 845 | #else   /* !GC_DEBUG */ | 
|---|
| 846 | #   define GC_PTR_ADD3(x, n, type_of_result) ((x)+(n)) | 
|---|
| 847 | #   define GC_PTR_ADD(x, n) ((x)+(n)) | 
|---|
| 848 | #   define GC_PRE_INCR3(x, n, type_of_result) ((x) += (n)) | 
|---|
| 849 | #   define GC_PRE_INCR(x, n) ((x) += (n)) | 
|---|
| 850 | #   define GC_POST_INCR2(x, n, type_of_result) ((x)++) | 
|---|
| 851 | #   define GC_POST_INCR(x, n) ((x)++) | 
|---|
| 852 | #endif | 
|---|
| 853 |  | 
|---|
| 854 | /* Safer assignment of a pointer to a nonstack location.        */ | 
|---|
| 855 | #ifdef GC_DEBUG | 
|---|
| 856 | # if defined(__STDC__) || defined(_AIX) | 
|---|
| 857 | #   define GC_PTR_STORE(p, q) \ | 
|---|
| 858 | (*(void **)GC_is_visible(p) = GC_is_valid_displacement(q)) | 
|---|
| 859 | # else | 
|---|
| 860 | #   define GC_PTR_STORE(p, q) \ | 
|---|
| 861 | (*(char **)GC_is_visible(p) = GC_is_valid_displacement(q)) | 
|---|
| 862 | # endif | 
|---|
| 863 | #else /* !GC_DEBUG */ | 
|---|
| 864 | #   define GC_PTR_STORE(p, q) *((p) = (q)) | 
|---|
| 865 | #endif | 
|---|
| 866 |  | 
|---|
| 867 | /* Functions called to report pointer checking errors */ | 
|---|
| 868 | GC_API void (*GC_same_obj_print_proc) GC_PROTO((GC_PTR p, GC_PTR q)); | 
|---|
| 869 |  | 
|---|
| 870 | GC_API void (*GC_is_valid_displacement_print_proc) | 
|---|
| 871 | GC_PROTO((GC_PTR p)); | 
|---|
| 872 |  | 
|---|
| 873 | GC_API void (*GC_is_visible_print_proc) | 
|---|
| 874 | GC_PROTO((GC_PTR p)); | 
|---|
| 875 |  | 
|---|
| 876 |  | 
|---|
| 877 | /* For pthread support, we generally need to intercept a number of      */ | 
|---|
| 878 | /* thread library calls.  We do that here by macro defining them.       */ | 
|---|
| 879 |  | 
|---|
| 880 | #if !defined(GC_USE_LD_WRAP) && \ | 
|---|
| 881 | (defined(GC_PTHREADS) || defined(GC_SOLARIS_THREADS)) | 
|---|
| 882 | # include "gc_pthread_redirects.h" | 
|---|
| 883 | #endif | 
|---|
| 884 |  | 
|---|
| 885 | # if defined(PCR) || defined(GC_SOLARIS_THREADS) || \ | 
|---|
| 886 | defined(GC_PTHREADS) || defined(GC_WIN32_THREADS) | 
|---|
| 887 | /* Any flavor of threads except SRC_M3. */ | 
|---|
| 888 | /* This returns a list of objects, linked through their first           */ | 
|---|
| 889 | /* word.  Its use can greatly reduce lock contention problems, since    */ | 
|---|
| 890 | /* the allocation lock can be acquired and released many fewer times.   */ | 
|---|
| 891 | /* lb must be large enough to hold the pointer field.                   */ | 
|---|
| 892 | /* It is used internally by gc_local_alloc.h, which provides a simpler  */ | 
|---|
| 893 | /* programming interface on Linux.                                      */ | 
|---|
| 894 | GC_PTR GC_malloc_many(size_t lb); | 
|---|
| 895 | #define GC_NEXT(p) (*(GC_PTR *)(p))     /* Retrieve the next element    */ | 
|---|
| 896 | /* in returned list.            */ | 
|---|
| 897 | extern void GC_thr_init GC_PROTO((void));/* Needed for Solaris/X86      */ | 
|---|
| 898 |  | 
|---|
| 899 | #endif /* THREADS && !SRC_M3 */ | 
|---|
| 900 |  | 
|---|
| 901 | #if defined(GC_WIN32_THREADS) && !defined(__CYGWIN32__) && !defined(__CYGWIN__) | 
|---|
| 902 | # include <windows.h> | 
|---|
| 903 |  | 
|---|
| 904 | /* | 
|---|
| 905 | * All threads must be created using GC_CreateThread, so that they will be | 
|---|
| 906 | * recorded in the thread table.  For backwards compatibility, this is not | 
|---|
| 907 | * technically true if the GC is built as a dynamic library, since it can | 
|---|
| 908 | * and does then use DllMain to keep track of thread creations.  But new code | 
|---|
| 909 | * should be built to call GC_CreateThread. | 
|---|
| 910 | */ | 
|---|
| 911 | GC_API HANDLE WINAPI GC_CreateThread( | 
|---|
| 912 | LPSECURITY_ATTRIBUTES lpThreadAttributes, | 
|---|
| 913 | DWORD dwStackSize, LPTHREAD_START_ROUTINE lpStartAddress, | 
|---|
| 914 | LPVOID lpParameter, DWORD dwCreationFlags, LPDWORD lpThreadId ); | 
|---|
| 915 |  | 
|---|
| 916 | # if defined(_WIN32_WCE) | 
|---|
| 917 | /* | 
|---|
| 918 | * win32_threads.c implements the real WinMain, which will start a new thread | 
|---|
| 919 | * to call GC_WinMain after initializing the garbage collector. | 
|---|
| 920 | */ | 
|---|
| 921 | int WINAPI GC_WinMain( | 
|---|
| 922 | HINSTANCE hInstance, | 
|---|
| 923 | HINSTANCE hPrevInstance, | 
|---|
| 924 | LPWSTR lpCmdLine, | 
|---|
| 925 | int nCmdShow ); | 
|---|
| 926 |  | 
|---|
| 927 | #  ifndef GC_BUILD | 
|---|
| 928 | #    define WinMain GC_WinMain | 
|---|
| 929 | #    define CreateThread GC_CreateThread | 
|---|
| 930 | #  endif | 
|---|
| 931 | # endif /* defined(_WIN32_WCE) */ | 
|---|
| 932 |  | 
|---|
| 933 | #endif /* defined(GC_WIN32_THREADS)  && !cygwin */ | 
|---|
| 934 |  | 
|---|
| 935 | /* | 
|---|
| 936 | * Fully portable code should call GC_INIT() from the main program | 
|---|
| 937 | * before making any other GC_ calls.  On most platforms this is a | 
|---|
| 938 | * no-op and the collector self-initializes.  But a number of platforms | 
|---|
| 939 | * make that too hard. | 
|---|
| 940 | */ | 
|---|
| 941 | #if (defined(sparc) || defined(__sparc)) && defined(sun) | 
|---|
| 942 | /* | 
|---|
| 943 | * If you are planning on putting | 
|---|
| 944 | * the collector in a SunOS 5 dynamic library, you need to call GC_INIT() | 
|---|
| 945 | * from the statically loaded program section. | 
|---|
| 946 | * This circumvents a Solaris 2.X (X<=4) linker bug. | 
|---|
| 947 | */ | 
|---|
| 948 | extern int _end[], _etext[]; | 
|---|
| 949 | #   ifdef __cplusplus | 
|---|
| 950 | extern "C" void GC_noop1(GC_word); | 
|---|
| 951 | #   else | 
|---|
| 952 | void GC_noop1(); | 
|---|
| 953 | #   endif /* !__cplusplus */ | 
|---|
| 954 | #   define GC_INIT() { GC_noop1((GC_word)_end); \ | 
|---|
| 955 | GC_noop1((GC_word)_etext); } | 
|---|
| 956 | #else | 
|---|
| 957 | # if defined(__CYGWIN32__) || defined (_AIX) | 
|---|
| 958 | /* | 
|---|
| 959 | * Similarly gnu-win32 DLLs need explicit initialization from | 
|---|
| 960 | * the main program, as does AIX. | 
|---|
| 961 | */ | 
|---|
| 962 | #   ifdef __CYGWIN32__ | 
|---|
| 963 | extern int _data_start__[]; | 
|---|
| 964 | extern int _data_end__[]; | 
|---|
| 965 | extern int _bss_start__[]; | 
|---|
| 966 | extern int _bss_end__[]; | 
|---|
| 967 | #     define GC_MAX(x,y) ((x) > (y) ? (x) : (y)) | 
|---|
| 968 | #     define GC_MIN(x,y) ((x) < (y) ? (x) : (y)) | 
|---|
| 969 | #     define GC_DATASTART ((GC_PTR) GC_MIN(_data_start__, _bss_start__)) | 
|---|
| 970 | #     define GC_DATAEND  ((GC_PTR) GC_MAX(_data_end__, _bss_end__)) | 
|---|
| 971 | #     ifdef GC_DLL | 
|---|
| 972 | #       define GC_INIT() { GC_add_roots(GC_DATASTART, GC_DATAEND); } | 
|---|
| 973 | #     else | 
|---|
| 974 | #       define GC_INIT() | 
|---|
| 975 | #     endif | 
|---|
| 976 | #   endif | 
|---|
| 977 | #   if defined(_AIX) | 
|---|
| 978 | extern int _data[], _end[]; | 
|---|
| 979 | #     define GC_DATASTART ((GC_PTR)((ulong)_data)) | 
|---|
| 980 | #     define GC_DATAEND ((GC_PTR)((ulong)_end)) | 
|---|
| 981 | #     define GC_INIT() { GC_add_roots(GC_DATASTART, GC_DATAEND); } | 
|---|
| 982 | #   endif | 
|---|
| 983 | # else | 
|---|
| 984 | #  if defined(__APPLE__) && defined(__MACH__) || defined(GC_WIN32_THREADS) | 
|---|
| 985 | #   define GC_INIT() { GC_init(); } | 
|---|
| 986 | #  else | 
|---|
| 987 | #   define GC_INIT() | 
|---|
| 988 | #  endif /* !__MACH && !GC_WIN32_THREADS */ | 
|---|
| 989 | # endif /* !AIX && !cygwin */ | 
|---|
| 990 | #endif /* !sparc */ | 
|---|
| 991 |  | 
|---|
| 992 | #if !defined(_WIN32_WCE) \ | 
|---|
| 993 | && ((defined(_MSDOS) || defined(_MSC_VER)) && (_M_IX86 >= 300) \ | 
|---|
| 994 | || defined(_WIN32) && !defined(__CYGWIN32__) && !defined(__CYGWIN__)) | 
|---|
| 995 | /* win32S may not free all resources on process exit.  */ | 
|---|
| 996 | /* This explicitly deallocates the heap.               */ | 
|---|
| 997 | GC_API void GC_win32_free_heap (); | 
|---|
| 998 | #endif | 
|---|
| 999 |  | 
|---|
| 1000 | #if ( defined(_AMIGA) && !defined(GC_AMIGA_MAKINGLIB) ) | 
|---|
| 1001 | /* Allocation really goes through GC_amiga_allocwrapper_do */ | 
|---|
| 1002 | # include "gc_amiga_redirects.h" | 
|---|
| 1003 | #endif | 
|---|
| 1004 |  | 
|---|
| 1005 | #if defined(GC_REDIRECT_TO_LOCAL) && !defined(GC_LOCAL_ALLOC_H) | 
|---|
| 1006 | #  include  "gc_local_alloc.h" | 
|---|
| 1007 | #endif | 
|---|
| 1008 |  | 
|---|
| 1009 | #ifdef __cplusplus | 
|---|
| 1010 | }  /* end of extern "C" */ | 
|---|
| 1011 | #endif | 
|---|
| 1012 |  | 
|---|
| 1013 | #endif /* _GC_H */ | 
|---|