1 | /*
|
---|
2 | * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
|
---|
3 | * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
|
---|
4 | * Copyright (c) 1996 by Silicon Graphics. All rights reserved.
|
---|
5 | * Copyright (c) 2000 by Hewlett-Packard Company. All rights reserved.
|
---|
6 | *
|
---|
7 | * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
|
---|
8 | * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
|
---|
9 | *
|
---|
10 | * Permission is hereby granted to use or copy this program
|
---|
11 | * for any purpose, provided the above notices are retained on all copies.
|
---|
12 | * Permission to modify the code and to distribute modified code is granted,
|
---|
13 | * provided the above notices are retained, and a notice that the code was
|
---|
14 | * modified is included with the above copyright notice.
|
---|
15 | */
|
---|
16 |
|
---|
17 | /*
|
---|
18 | * These are extra allocation routines which are likely to be less
|
---|
19 | * frequently used than those in malloc.c. They are separate in the
|
---|
20 | * hope that the .o file will be excluded from statically linked
|
---|
21 | * executables. We should probably break this up further.
|
---|
22 | */
|
---|
23 |
|
---|
24 | #include <stdio.h>
|
---|
25 | #include "private/gc_priv.h"
|
---|
26 |
|
---|
27 | extern ptr_t GC_clear_stack(); /* in misc.c, behaves like identity */
|
---|
28 | void GC_extend_size_map(); /* in misc.c. */
|
---|
29 | GC_bool GC_alloc_reclaim_list(); /* in malloc.c */
|
---|
30 |
|
---|
31 | /* Some externally visible but unadvertised variables to allow access to */
|
---|
32 | /* free lists from inlined allocators without including gc_priv.h */
|
---|
33 | /* or introducing dependencies on internal data structure layouts. */
|
---|
34 | ptr_t * GC_CONST GC_objfreelist_ptr = GC_objfreelist;
|
---|
35 | ptr_t * GC_CONST GC_aobjfreelist_ptr = GC_aobjfreelist;
|
---|
36 | ptr_t * GC_CONST GC_uobjfreelist_ptr = GC_uobjfreelist;
|
---|
37 | # ifdef ATOMIC_UNCOLLECTABLE
|
---|
38 | ptr_t * GC_CONST GC_auobjfreelist_ptr = GC_auobjfreelist;
|
---|
39 | # endif
|
---|
40 |
|
---|
41 |
|
---|
42 | GC_PTR GC_generic_or_special_malloc(lb,knd)
|
---|
43 | word lb;
|
---|
44 | int knd;
|
---|
45 | {
|
---|
46 | switch(knd) {
|
---|
47 | # ifdef STUBBORN_ALLOC
|
---|
48 | case STUBBORN:
|
---|
49 | return(GC_malloc_stubborn((size_t)lb));
|
---|
50 | # endif
|
---|
51 | case PTRFREE:
|
---|
52 | return(GC_malloc_atomic((size_t)lb));
|
---|
53 | case NORMAL:
|
---|
54 | return(GC_malloc((size_t)lb));
|
---|
55 | case UNCOLLECTABLE:
|
---|
56 | return(GC_malloc_uncollectable((size_t)lb));
|
---|
57 | # ifdef ATOMIC_UNCOLLECTABLE
|
---|
58 | case AUNCOLLECTABLE:
|
---|
59 | return(GC_malloc_atomic_uncollectable((size_t)lb));
|
---|
60 | # endif /* ATOMIC_UNCOLLECTABLE */
|
---|
61 | default:
|
---|
62 | return(GC_generic_malloc(lb,knd));
|
---|
63 | }
|
---|
64 | }
|
---|
65 |
|
---|
66 |
|
---|
67 | /* Change the size of the block pointed to by p to contain at least */
|
---|
68 | /* lb bytes. The object may be (and quite likely will be) moved. */
|
---|
69 | /* The kind (e.g. atomic) is the same as that of the old. */
|
---|
70 | /* Shrinking of large blocks is not implemented well. */
|
---|
71 | # ifdef __STDC__
|
---|
72 | GC_PTR GC_realloc(GC_PTR p, size_t lb)
|
---|
73 | # else
|
---|
74 | GC_PTR GC_realloc(p,lb)
|
---|
75 | GC_PTR p;
|
---|
76 | size_t lb;
|
---|
77 | # endif
|
---|
78 | {
|
---|
79 | register struct hblk * h;
|
---|
80 | register hdr * hhdr;
|
---|
81 | register word sz; /* Current size in bytes */
|
---|
82 | register word orig_sz; /* Original sz in bytes */
|
---|
83 | int obj_kind;
|
---|
84 |
|
---|
85 | if (p == 0) return(GC_malloc(lb)); /* Required by ANSI */
|
---|
86 | h = HBLKPTR(p);
|
---|
87 | hhdr = HDR(h);
|
---|
88 | sz = hhdr -> hb_sz;
|
---|
89 | obj_kind = hhdr -> hb_obj_kind;
|
---|
90 | sz = WORDS_TO_BYTES(sz);
|
---|
91 | orig_sz = sz;
|
---|
92 |
|
---|
93 | if (sz > MAXOBJBYTES) {
|
---|
94 | /* Round it up to the next whole heap block */
|
---|
95 | register word descr;
|
---|
96 |
|
---|
97 | sz = (sz+HBLKSIZE-1) & (~HBLKMASK);
|
---|
98 | hhdr -> hb_sz = BYTES_TO_WORDS(sz);
|
---|
99 | descr = GC_obj_kinds[obj_kind].ok_descriptor;
|
---|
100 | if (GC_obj_kinds[obj_kind].ok_relocate_descr) descr += sz;
|
---|
101 | hhdr -> hb_descr = descr;
|
---|
102 | if (IS_UNCOLLECTABLE(obj_kind)) GC_non_gc_bytes += (sz - orig_sz);
|
---|
103 | /* Extra area is already cleared by GC_alloc_large_and_clear. */
|
---|
104 | }
|
---|
105 | if (ADD_SLOP(lb) <= sz) {
|
---|
106 | if (lb >= (sz >> 1)) {
|
---|
107 | # ifdef STUBBORN_ALLOC
|
---|
108 | if (obj_kind == STUBBORN) GC_change_stubborn(p);
|
---|
109 | # endif
|
---|
110 | if (orig_sz > lb) {
|
---|
111 | /* Clear unneeded part of object to avoid bogus pointer */
|
---|
112 | /* tracing. */
|
---|
113 | /* Safe for stubborn objects. */
|
---|
114 | BZERO(((ptr_t)p) + lb, orig_sz - lb);
|
---|
115 | }
|
---|
116 | return(p);
|
---|
117 | } else {
|
---|
118 | /* shrink */
|
---|
119 | GC_PTR result =
|
---|
120 | GC_generic_or_special_malloc((word)lb, obj_kind);
|
---|
121 |
|
---|
122 | if (result == 0) return(0);
|
---|
123 | /* Could also return original object. But this */
|
---|
124 | /* gives the client warning of imminent disaster. */
|
---|
125 | BCOPY(p, result, lb);
|
---|
126 | # ifndef IGNORE_FREE
|
---|
127 | GC_free(p);
|
---|
128 | # endif
|
---|
129 | return(result);
|
---|
130 | }
|
---|
131 | } else {
|
---|
132 | /* grow */
|
---|
133 | GC_PTR result =
|
---|
134 | GC_generic_or_special_malloc((word)lb, obj_kind);
|
---|
135 |
|
---|
136 | if (result == 0) return(0);
|
---|
137 | BCOPY(p, result, sz);
|
---|
138 | # ifndef IGNORE_FREE
|
---|
139 | GC_free(p);
|
---|
140 | # endif
|
---|
141 | return(result);
|
---|
142 | }
|
---|
143 | }
|
---|
144 |
|
---|
145 | # if defined(REDIRECT_MALLOC) || defined(REDIRECT_REALLOC)
|
---|
146 | # ifdef __STDC__
|
---|
147 | GC_PTR realloc(GC_PTR p, size_t lb)
|
---|
148 | # else
|
---|
149 | GC_PTR realloc(p,lb)
|
---|
150 | GC_PTR p;
|
---|
151 | size_t lb;
|
---|
152 | # endif
|
---|
153 | {
|
---|
154 | # ifdef REDIRECT_REALLOC
|
---|
155 | return(REDIRECT_REALLOC(p, lb));
|
---|
156 | # else
|
---|
157 | return(GC_realloc(p, lb));
|
---|
158 | # endif
|
---|
159 | }
|
---|
160 | # endif /* REDIRECT_MALLOC */
|
---|
161 |
|
---|
162 |
|
---|
163 | /* The same thing, except caller does not hold allocation lock. */
|
---|
164 | /* We avoid holding allocation lock while we clear memory. */
|
---|
165 | ptr_t GC_generic_malloc_ignore_off_page(lb, k)
|
---|
166 | register size_t lb;
|
---|
167 | register int k;
|
---|
168 | {
|
---|
169 | register ptr_t result;
|
---|
170 | word lw;
|
---|
171 | word n_blocks;
|
---|
172 | GC_bool init;
|
---|
173 | DCL_LOCK_STATE;
|
---|
174 |
|
---|
175 | if (SMALL_OBJ(lb))
|
---|
176 | return(GC_generic_malloc((word)lb, k));
|
---|
177 | lw = ROUNDED_UP_WORDS(lb);
|
---|
178 | n_blocks = OBJ_SZ_TO_BLOCKS(lw);
|
---|
179 | init = GC_obj_kinds[k].ok_init;
|
---|
180 | GC_INVOKE_FINALIZERS();
|
---|
181 | DISABLE_SIGNALS();
|
---|
182 | LOCK();
|
---|
183 | result = (ptr_t)GC_alloc_large(lw, k, IGNORE_OFF_PAGE);
|
---|
184 | if (0 != result) {
|
---|
185 | if (GC_debugging_started) {
|
---|
186 | BZERO(result, n_blocks * HBLKSIZE);
|
---|
187 | } else {
|
---|
188 | # ifdef THREADS
|
---|
189 | /* Clear any memory that might be used for GC descriptors */
|
---|
190 | /* before we release the lock. */
|
---|
191 | ((word *)result)[0] = 0;
|
---|
192 | ((word *)result)[1] = 0;
|
---|
193 | ((word *)result)[lw-1] = 0;
|
---|
194 | ((word *)result)[lw-2] = 0;
|
---|
195 | # endif
|
---|
196 | }
|
---|
197 | }
|
---|
198 | GC_words_allocd += lw;
|
---|
199 | UNLOCK();
|
---|
200 | ENABLE_SIGNALS();
|
---|
201 | if (0 == result) {
|
---|
202 | return((*GC_oom_fn)(lb));
|
---|
203 | } else {
|
---|
204 | if (init & !GC_debugging_started) {
|
---|
205 | BZERO(result, n_blocks * HBLKSIZE);
|
---|
206 | }
|
---|
207 | return(result);
|
---|
208 | }
|
---|
209 | }
|
---|
210 |
|
---|
211 | # if defined(__STDC__) || defined(__cplusplus)
|
---|
212 | void * GC_malloc_ignore_off_page(size_t lb)
|
---|
213 | # else
|
---|
214 | char * GC_malloc_ignore_off_page(lb)
|
---|
215 | register size_t lb;
|
---|
216 | # endif
|
---|
217 | {
|
---|
218 | return((GC_PTR)GC_generic_malloc_ignore_off_page(lb, NORMAL));
|
---|
219 | }
|
---|
220 |
|
---|
221 | # if defined(__STDC__) || defined(__cplusplus)
|
---|
222 | void * GC_malloc_atomic_ignore_off_page(size_t lb)
|
---|
223 | # else
|
---|
224 | char * GC_malloc_atomic_ignore_off_page(lb)
|
---|
225 | register size_t lb;
|
---|
226 | # endif
|
---|
227 | {
|
---|
228 | return((GC_PTR)GC_generic_malloc_ignore_off_page(lb, PTRFREE));
|
---|
229 | }
|
---|
230 |
|
---|
231 | /* Increment GC_words_allocd from code that doesn't have direct access */
|
---|
232 | /* to GC_arrays. */
|
---|
233 | # ifdef __STDC__
|
---|
234 | void GC_incr_words_allocd(size_t n)
|
---|
235 | {
|
---|
236 | GC_words_allocd += n;
|
---|
237 | }
|
---|
238 |
|
---|
239 | /* The same for GC_mem_freed. */
|
---|
240 | void GC_incr_mem_freed(size_t n)
|
---|
241 | {
|
---|
242 | GC_mem_freed += n;
|
---|
243 | }
|
---|
244 | # endif /* __STDC__ */
|
---|
245 |
|
---|
246 | /* Analogous to the above, but assumes a small object size, and */
|
---|
247 | /* bypasses MERGE_SIZES mechanism. Used by gc_inline.h. */
|
---|
248 | ptr_t GC_generic_malloc_words_small_inner(lw, k)
|
---|
249 | register word lw;
|
---|
250 | register int k;
|
---|
251 | {
|
---|
252 | register ptr_t op;
|
---|
253 | register ptr_t *opp;
|
---|
254 | register struct obj_kind * kind = GC_obj_kinds + k;
|
---|
255 |
|
---|
256 | opp = &(kind -> ok_freelist[lw]);
|
---|
257 | if( (op = *opp) == 0 ) {
|
---|
258 | if (!GC_is_initialized) {
|
---|
259 | GC_init_inner();
|
---|
260 | }
|
---|
261 | if (kind -> ok_reclaim_list != 0 || GC_alloc_reclaim_list(kind)) {
|
---|
262 | op = GC_clear_stack(GC_allocobj((word)lw, k));
|
---|
263 | }
|
---|
264 | if (op == 0) {
|
---|
265 | UNLOCK();
|
---|
266 | ENABLE_SIGNALS();
|
---|
267 | return ((*GC_oom_fn)(WORDS_TO_BYTES(lw)));
|
---|
268 | }
|
---|
269 | }
|
---|
270 | *opp = obj_link(op);
|
---|
271 | obj_link(op) = 0;
|
---|
272 | GC_words_allocd += lw;
|
---|
273 | return((ptr_t)op);
|
---|
274 | }
|
---|
275 |
|
---|
276 | /* Analogous to the above, but assumes a small object size, and */
|
---|
277 | /* bypasses MERGE_SIZES mechanism. Used by gc_inline.h. */
|
---|
278 | #ifdef __STDC__
|
---|
279 | ptr_t GC_generic_malloc_words_small(size_t lw, int k)
|
---|
280 | #else
|
---|
281 | ptr_t GC_generic_malloc_words_small(lw, k)
|
---|
282 | register word lw;
|
---|
283 | register int k;
|
---|
284 | #endif
|
---|
285 | {
|
---|
286 | register ptr_t op;
|
---|
287 | DCL_LOCK_STATE;
|
---|
288 |
|
---|
289 | GC_INVOKE_FINALIZERS();
|
---|
290 | DISABLE_SIGNALS();
|
---|
291 | LOCK();
|
---|
292 | op = GC_generic_malloc_words_small_inner(lw, k);
|
---|
293 | UNLOCK();
|
---|
294 | ENABLE_SIGNALS();
|
---|
295 | return((ptr_t)op);
|
---|
296 | }
|
---|
297 |
|
---|
298 | #if defined(THREADS) && !defined(SRC_M3)
|
---|
299 |
|
---|
300 | extern signed_word GC_mem_found; /* Protected by GC lock. */
|
---|
301 |
|
---|
302 | #ifdef PARALLEL_MARK
|
---|
303 | volatile signed_word GC_words_allocd_tmp = 0;
|
---|
304 | /* Number of words of memory allocated since */
|
---|
305 | /* we released the GC lock. Instead of */
|
---|
306 | /* reacquiring the GC lock just to add this in, */
|
---|
307 | /* we add it in the next time we reacquire */
|
---|
308 | /* the lock. (Atomically adding it doesn't */
|
---|
309 | /* work, since we would have to atomically */
|
---|
310 | /* update it in GC_malloc, which is too */
|
---|
311 | /* expensive. */
|
---|
312 | #endif /* PARALLEL_MARK */
|
---|
313 |
|
---|
314 | /* See reclaim.c: */
|
---|
315 | extern ptr_t GC_reclaim_generic();
|
---|
316 |
|
---|
317 | /* Return a list of 1 or more objects of the indicated size, linked */
|
---|
318 | /* through the first word in the object. This has the advantage that */
|
---|
319 | /* it acquires the allocation lock only once, and may greatly reduce */
|
---|
320 | /* time wasted contending for the allocation lock. Typical usage would */
|
---|
321 | /* be in a thread that requires many items of the same size. It would */
|
---|
322 | /* keep its own free list in thread-local storage, and call */
|
---|
323 | /* GC_malloc_many or friends to replenish it. (We do not round up */
|
---|
324 | /* object sizes, since a call indicates the intention to consume many */
|
---|
325 | /* objects of exactly this size.) */
|
---|
326 | /* We return the free-list by assigning it to *result, since it is */
|
---|
327 | /* not safe to return, e.g. a linked list of pointer-free objects, */
|
---|
328 | /* since the collector would not retain the entire list if it were */
|
---|
329 | /* invoked just as we were returning. */
|
---|
330 | /* Note that the client should usually clear the link field. */
|
---|
331 | void GC_generic_malloc_many(lb, k, result)
|
---|
332 | register word lb;
|
---|
333 | register int k;
|
---|
334 | ptr_t *result;
|
---|
335 | {
|
---|
336 | ptr_t op;
|
---|
337 | ptr_t p;
|
---|
338 | ptr_t *opp;
|
---|
339 | word lw;
|
---|
340 | word my_words_allocd = 0;
|
---|
341 | struct obj_kind * ok = &(GC_obj_kinds[k]);
|
---|
342 | DCL_LOCK_STATE;
|
---|
343 |
|
---|
344 | # if defined(GATHERSTATS) || defined(PARALLEL_MARK)
|
---|
345 | # define COUNT_ARG , &my_words_allocd
|
---|
346 | # else
|
---|
347 | # define COUNT_ARG
|
---|
348 | # define NEED_TO_COUNT
|
---|
349 | # endif
|
---|
350 | if (!SMALL_OBJ(lb)) {
|
---|
351 | op = GC_generic_malloc(lb, k);
|
---|
352 | if(0 != op) obj_link(op) = 0;
|
---|
353 | *result = op;
|
---|
354 | return;
|
---|
355 | }
|
---|
356 | lw = ALIGNED_WORDS(lb);
|
---|
357 | GC_INVOKE_FINALIZERS();
|
---|
358 | DISABLE_SIGNALS();
|
---|
359 | LOCK();
|
---|
360 | if (!GC_is_initialized) GC_init_inner();
|
---|
361 | /* Do our share of marking work */
|
---|
362 | if (GC_incremental && !GC_dont_gc) {
|
---|
363 | ENTER_GC();
|
---|
364 | GC_collect_a_little_inner(1);
|
---|
365 | EXIT_GC();
|
---|
366 | }
|
---|
367 | /* First see if we can reclaim a page of objects waiting to be */
|
---|
368 | /* reclaimed. */
|
---|
369 | {
|
---|
370 | struct hblk ** rlh = ok -> ok_reclaim_list;
|
---|
371 | struct hblk * hbp;
|
---|
372 | hdr * hhdr;
|
---|
373 |
|
---|
374 | rlh += lw;
|
---|
375 | while ((hbp = *rlh) != 0) {
|
---|
376 | hhdr = HDR(hbp);
|
---|
377 | *rlh = hhdr -> hb_next;
|
---|
378 | # ifdef PARALLEL_MARK
|
---|
379 | {
|
---|
380 | signed_word my_words_allocd_tmp = GC_words_allocd_tmp;
|
---|
381 |
|
---|
382 | GC_ASSERT(my_words_allocd_tmp >= 0);
|
---|
383 | /* We only decrement it while holding the GC lock. */
|
---|
384 | /* Thus we can't accidentally adjust it down in more */
|
---|
385 | /* than one thread simultaneously. */
|
---|
386 | if (my_words_allocd_tmp != 0) {
|
---|
387 | (void)GC_atomic_add(
|
---|
388 | (volatile GC_word *)(&GC_words_allocd_tmp),
|
---|
389 | (GC_word)(-my_words_allocd_tmp));
|
---|
390 | GC_words_allocd += my_words_allocd_tmp;
|
---|
391 | }
|
---|
392 | }
|
---|
393 | GC_acquire_mark_lock();
|
---|
394 | ++ GC_fl_builder_count;
|
---|
395 | UNLOCK();
|
---|
396 | ENABLE_SIGNALS();
|
---|
397 | GC_release_mark_lock();
|
---|
398 | # endif
|
---|
399 | op = GC_reclaim_generic(hbp, hhdr, lw,
|
---|
400 | ok -> ok_init, 0 COUNT_ARG);
|
---|
401 | if (op != 0) {
|
---|
402 | # ifdef NEED_TO_COUNT
|
---|
403 | /* We are neither gathering statistics, nor marking in */
|
---|
404 | /* parallel. Thus GC_reclaim_generic doesn't count */
|
---|
405 | /* for us. */
|
---|
406 | for (p = op; p != 0; p = obj_link(p)) {
|
---|
407 | my_words_allocd += lw;
|
---|
408 | }
|
---|
409 | # endif
|
---|
410 | # if defined(GATHERSTATS)
|
---|
411 | /* We also reclaimed memory, so we need to adjust */
|
---|
412 | /* that count. */
|
---|
413 | /* This should be atomic, so the results may be */
|
---|
414 | /* inaccurate. */
|
---|
415 | GC_mem_found += my_words_allocd;
|
---|
416 | # endif
|
---|
417 | # ifdef PARALLEL_MARK
|
---|
418 | *result = op;
|
---|
419 | (void)GC_atomic_add(
|
---|
420 | (volatile GC_word *)(&GC_words_allocd_tmp),
|
---|
421 | (GC_word)(my_words_allocd));
|
---|
422 | GC_acquire_mark_lock();
|
---|
423 | -- GC_fl_builder_count;
|
---|
424 | if (GC_fl_builder_count == 0) GC_notify_all_builder();
|
---|
425 | GC_release_mark_lock();
|
---|
426 | (void) GC_clear_stack(0);
|
---|
427 | return;
|
---|
428 | # else
|
---|
429 | GC_words_allocd += my_words_allocd;
|
---|
430 | goto out;
|
---|
431 | # endif
|
---|
432 | }
|
---|
433 | # ifdef PARALLEL_MARK
|
---|
434 | GC_acquire_mark_lock();
|
---|
435 | -- GC_fl_builder_count;
|
---|
436 | if (GC_fl_builder_count == 0) GC_notify_all_builder();
|
---|
437 | GC_release_mark_lock();
|
---|
438 | DISABLE_SIGNALS();
|
---|
439 | LOCK();
|
---|
440 | /* GC lock is needed for reclaim list access. We */
|
---|
441 | /* must decrement fl_builder_count before reaquiring GC */
|
---|
442 | /* lock. Hopefully this path is rare. */
|
---|
443 | # endif
|
---|
444 | }
|
---|
445 | }
|
---|
446 | /* Next try to use prefix of global free list if there is one. */
|
---|
447 | /* We don't refill it, but we need to use it up before allocating */
|
---|
448 | /* a new block ourselves. */
|
---|
449 | opp = &(GC_obj_kinds[k].ok_freelist[lw]);
|
---|
450 | if ( (op = *opp) != 0 ) {
|
---|
451 | *opp = 0;
|
---|
452 | my_words_allocd = 0;
|
---|
453 | for (p = op; p != 0; p = obj_link(p)) {
|
---|
454 | my_words_allocd += lw;
|
---|
455 | if (my_words_allocd >= BODY_SZ) {
|
---|
456 | *opp = obj_link(p);
|
---|
457 | obj_link(p) = 0;
|
---|
458 | break;
|
---|
459 | }
|
---|
460 | }
|
---|
461 | GC_words_allocd += my_words_allocd;
|
---|
462 | goto out;
|
---|
463 | }
|
---|
464 | /* Next try to allocate a new block worth of objects of this size. */
|
---|
465 | {
|
---|
466 | struct hblk *h = GC_allochblk(lw, k, 0);
|
---|
467 | if (h != 0) {
|
---|
468 | if (IS_UNCOLLECTABLE(k)) GC_set_hdr_marks(HDR(h));
|
---|
469 | GC_words_allocd += BYTES_TO_WORDS(HBLKSIZE)
|
---|
470 | - BYTES_TO_WORDS(HBLKSIZE) % lw;
|
---|
471 | # ifdef PARALLEL_MARK
|
---|
472 | GC_acquire_mark_lock();
|
---|
473 | ++ GC_fl_builder_count;
|
---|
474 | UNLOCK();
|
---|
475 | ENABLE_SIGNALS();
|
---|
476 | GC_release_mark_lock();
|
---|
477 | # endif
|
---|
478 |
|
---|
479 | op = GC_build_fl(h, lw, ok -> ok_init, 0);
|
---|
480 | # ifdef PARALLEL_MARK
|
---|
481 | *result = op;
|
---|
482 | GC_acquire_mark_lock();
|
---|
483 | -- GC_fl_builder_count;
|
---|
484 | if (GC_fl_builder_count == 0) GC_notify_all_builder();
|
---|
485 | GC_release_mark_lock();
|
---|
486 | (void) GC_clear_stack(0);
|
---|
487 | return;
|
---|
488 | # else
|
---|
489 | goto out;
|
---|
490 | # endif
|
---|
491 | }
|
---|
492 | }
|
---|
493 |
|
---|
494 | /* As a last attempt, try allocating a single object. Note that */
|
---|
495 | /* this may trigger a collection or expand the heap. */
|
---|
496 | op = GC_generic_malloc_inner(lb, k);
|
---|
497 | if (0 != op) obj_link(op) = 0;
|
---|
498 |
|
---|
499 | out:
|
---|
500 | *result = op;
|
---|
501 | UNLOCK();
|
---|
502 | ENABLE_SIGNALS();
|
---|
503 | (void) GC_clear_stack(0);
|
---|
504 | }
|
---|
505 |
|
---|
506 | GC_PTR GC_malloc_many(size_t lb)
|
---|
507 | {
|
---|
508 | ptr_t result;
|
---|
509 | GC_generic_malloc_many(lb, NORMAL, &result);
|
---|
510 | return result;
|
---|
511 | }
|
---|
512 |
|
---|
513 | /* Note that the "atomic" version of this would be unsafe, since the */
|
---|
514 | /* links would not be seen by the collector. */
|
---|
515 | # endif
|
---|
516 |
|
---|
517 | /* Allocate lb bytes of pointerful, traced, but not collectable data */
|
---|
518 | # ifdef __STDC__
|
---|
519 | GC_PTR GC_malloc_uncollectable(size_t lb)
|
---|
520 | # else
|
---|
521 | GC_PTR GC_malloc_uncollectable(lb)
|
---|
522 | size_t lb;
|
---|
523 | # endif
|
---|
524 | {
|
---|
525 | register ptr_t op;
|
---|
526 | register ptr_t *opp;
|
---|
527 | register word lw;
|
---|
528 | DCL_LOCK_STATE;
|
---|
529 |
|
---|
530 | if( SMALL_OBJ(lb) ) {
|
---|
531 | # ifdef MERGE_SIZES
|
---|
532 | if (EXTRA_BYTES != 0 && lb != 0) lb--;
|
---|
533 | /* We don't need the extra byte, since this won't be */
|
---|
534 | /* collected anyway. */
|
---|
535 | lw = GC_size_map[lb];
|
---|
536 | # else
|
---|
537 | lw = ALIGNED_WORDS(lb);
|
---|
538 | # endif
|
---|
539 | opp = &(GC_uobjfreelist[lw]);
|
---|
540 | FASTLOCK();
|
---|
541 | if( FASTLOCK_SUCCEEDED() && (op = *opp) != 0 ) {
|
---|
542 | /* See above comment on signals. */
|
---|
543 | *opp = obj_link(op);
|
---|
544 | obj_link(op) = 0;
|
---|
545 | GC_words_allocd += lw;
|
---|
546 | /* Mark bit ws already set on free list. It will be */
|
---|
547 | /* cleared only temporarily during a collection, as a */
|
---|
548 | /* result of the normal free list mark bit clearing. */
|
---|
549 | GC_non_gc_bytes += WORDS_TO_BYTES(lw);
|
---|
550 | FASTUNLOCK();
|
---|
551 | return((GC_PTR) op);
|
---|
552 | }
|
---|
553 | FASTUNLOCK();
|
---|
554 | op = (ptr_t)GC_generic_malloc((word)lb, UNCOLLECTABLE);
|
---|
555 | } else {
|
---|
556 | op = (ptr_t)GC_generic_malloc((word)lb, UNCOLLECTABLE);
|
---|
557 | }
|
---|
558 | if (0 == op) return(0);
|
---|
559 | /* We don't need the lock here, since we have an undisguised */
|
---|
560 | /* pointer. We do need to hold the lock while we adjust */
|
---|
561 | /* mark bits. */
|
---|
562 | {
|
---|
563 | register struct hblk * h;
|
---|
564 |
|
---|
565 | h = HBLKPTR(op);
|
---|
566 | lw = HDR(h) -> hb_sz;
|
---|
567 |
|
---|
568 | DISABLE_SIGNALS();
|
---|
569 | LOCK();
|
---|
570 | GC_set_mark_bit(op);
|
---|
571 | GC_non_gc_bytes += WORDS_TO_BYTES(lw);
|
---|
572 | UNLOCK();
|
---|
573 | ENABLE_SIGNALS();
|
---|
574 | return((GC_PTR) op);
|
---|
575 | }
|
---|
576 | }
|
---|
577 |
|
---|
578 | # ifdef ATOMIC_UNCOLLECTABLE
|
---|
579 | /* Allocate lb bytes of pointerfree, untraced, uncollectable data */
|
---|
580 | /* This is normally roughly equivalent to the system malloc. */
|
---|
581 | /* But it may be useful if malloc is redefined. */
|
---|
582 | # ifdef __STDC__
|
---|
583 | GC_PTR GC_malloc_atomic_uncollectable(size_t lb)
|
---|
584 | # else
|
---|
585 | GC_PTR GC_malloc_atomic_uncollectable(lb)
|
---|
586 | size_t lb;
|
---|
587 | # endif
|
---|
588 | {
|
---|
589 | register ptr_t op;
|
---|
590 | register ptr_t *opp;
|
---|
591 | register word lw;
|
---|
592 | DCL_LOCK_STATE;
|
---|
593 |
|
---|
594 | if( SMALL_OBJ(lb) ) {
|
---|
595 | # ifdef MERGE_SIZES
|
---|
596 | if (EXTRA_BYTES != 0 && lb != 0) lb--;
|
---|
597 | /* We don't need the extra byte, since this won't be */
|
---|
598 | /* collected anyway. */
|
---|
599 | lw = GC_size_map[lb];
|
---|
600 | # else
|
---|
601 | lw = ALIGNED_WORDS(lb);
|
---|
602 | # endif
|
---|
603 | opp = &(GC_auobjfreelist[lw]);
|
---|
604 | FASTLOCK();
|
---|
605 | if( FASTLOCK_SUCCEEDED() && (op = *opp) != 0 ) {
|
---|
606 | /* See above comment on signals. */
|
---|
607 | *opp = obj_link(op);
|
---|
608 | obj_link(op) = 0;
|
---|
609 | GC_words_allocd += lw;
|
---|
610 | /* Mark bit was already set while object was on free list. */
|
---|
611 | GC_non_gc_bytes += WORDS_TO_BYTES(lw);
|
---|
612 | FASTUNLOCK();
|
---|
613 | return((GC_PTR) op);
|
---|
614 | }
|
---|
615 | FASTUNLOCK();
|
---|
616 | op = (ptr_t)GC_generic_malloc((word)lb, AUNCOLLECTABLE);
|
---|
617 | } else {
|
---|
618 | op = (ptr_t)GC_generic_malloc((word)lb, AUNCOLLECTABLE);
|
---|
619 | }
|
---|
620 | if (0 == op) return(0);
|
---|
621 | /* We don't need the lock here, since we have an undisguised */
|
---|
622 | /* pointer. We do need to hold the lock while we adjust */
|
---|
623 | /* mark bits. */
|
---|
624 | {
|
---|
625 | register struct hblk * h;
|
---|
626 |
|
---|
627 | h = HBLKPTR(op);
|
---|
628 | lw = HDR(h) -> hb_sz;
|
---|
629 |
|
---|
630 | DISABLE_SIGNALS();
|
---|
631 | LOCK();
|
---|
632 | GC_set_mark_bit(op);
|
---|
633 | GC_non_gc_bytes += WORDS_TO_BYTES(lw);
|
---|
634 | UNLOCK();
|
---|
635 | ENABLE_SIGNALS();
|
---|
636 | return((GC_PTR) op);
|
---|
637 | }
|
---|
638 | }
|
---|
639 |
|
---|
640 | #endif /* ATOMIC_UNCOLLECTABLE */
|
---|