source: trunk/src/gcc/boehm-gc/alloc.c@ 1036

Last change on this file since 1036 was 2, checked in by bird, 22 years ago

Initial revision

  • Property cvs2svn:cvs-rev set to 1.1
  • Property svn:eol-style set to native
  • Property svn:executable set to *
File size: 29.4 KB
Line 
1/*
2 * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
3 * Copyright (c) 1991-1996 by Xerox Corporation. All rights reserved.
4 * Copyright (c) 1998 by Silicon Graphics. All rights reserved.
5 * Copyright (c) 1999 by Hewlett-Packard Company. All rights reserved.
6 *
7 * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
8 * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
9 *
10 * Permission is hereby granted to use or copy this program
11 * for any purpose, provided the above notices are retained on all copies.
12 * Permission to modify the code and to distribute modified code is granted,
13 * provided the above notices are retained, and a notice that the code was
14 * modified is included with the above copyright notice.
15 *
16 */
17
18
19# include "private/gc_priv.h"
20
21# include <stdio.h>
22# if !defined(MACOS) && !defined(MSWINCE)
23# include <signal.h>
24# include <sys/types.h>
25# endif
26
27/*
28 * Separate free lists are maintained for different sized objects
29 * up to MAXOBJSZ.
30 * The call GC_allocobj(i,k) ensures that the freelist for
31 * kind k objects of size i points to a non-empty
32 * free list. It returns a pointer to the first entry on the free list.
33 * In a single-threaded world, GC_allocobj may be called to allocate
34 * an object of (small) size i as follows:
35 *
36 * opp = &(GC_objfreelist[i]);
37 * if (*opp == 0) GC_allocobj(i, NORMAL);
38 * ptr = *opp;
39 * *opp = obj_link(ptr);
40 *
41 * Note that this is very fast if the free list is non-empty; it should
42 * only involve the execution of 4 or 5 simple instructions.
43 * All composite objects on freelists are cleared, except for
44 * their first word.
45 */
46
47/*
48 * The allocator uses GC_allochblk to allocate large chunks of objects.
49 * These chunks all start on addresses which are multiples of
50 * HBLKSZ. Each allocated chunk has an associated header,
51 * which can be located quickly based on the address of the chunk.
52 * (See headers.c for details.)
53 * This makes it possible to check quickly whether an
54 * arbitrary address corresponds to an object administered by the
55 * allocator.
56 */
57
58word GC_non_gc_bytes = 0; /* Number of bytes not intended to be collected */
59
60word GC_gc_no = 0;
61
62#ifndef SMALL_CONFIG
63 int GC_incremental = 0; /* By default, stop the world. */
64#endif
65
66int GC_parallel = FALSE; /* By default, parallel GC is off. */
67
68int GC_full_freq = 19; /* Every 20th collection is a full */
69 /* collection, whether we need it */
70 /* or not. */
71
72GC_bool GC_need_full_gc = FALSE;
73 /* Need full GC do to heap growth. */
74
75word GC_used_heap_size_after_full = 0;
76
77char * GC_copyright[] =
78{"Copyright 1988,1989 Hans-J. Boehm and Alan J. Demers ",
79"Copyright (c) 1991-1995 by Xerox Corporation. All rights reserved. ",
80"Copyright (c) 1996-1998 by Silicon Graphics. All rights reserved. ",
81"Copyright (c) 1999-2001 by Hewlett-Packard Company. All rights reserved. ",
82"THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY",
83" EXPRESSED OR IMPLIED. ANY USE IS AT YOUR OWN RISK.",
84"See source code for details." };
85
86# include "version.h"
87
88/* some more variables */
89
90extern signed_word GC_mem_found; /* Number of reclaimed longwords */
91 /* after garbage collection */
92
93GC_bool GC_dont_expand = 0;
94
95word GC_free_space_divisor = 3;
96
97extern GC_bool GC_collection_in_progress();
98 /* Collection is in progress, or was abandoned. */
99
100extern GC_bool GC_print_back_height;
101
102int GC_never_stop_func GC_PROTO((void)) { return(0); }
103
104unsigned long GC_time_limit = TIME_LIMIT;
105
106CLOCK_TYPE GC_start_time; /* Time at which we stopped world. */
107 /* used only in GC_timeout_stop_func. */
108
109int GC_n_attempts = 0; /* Number of attempts at finishing */
110 /* collection within GC_time_limit. */
111
112#if defined(SMALL_CONFIG) || defined(NO_CLOCK)
113# define GC_timeout_stop_func GC_never_stop_func
114#else
115 int GC_timeout_stop_func GC_PROTO((void))
116 {
117 CLOCK_TYPE current_time;
118 static unsigned count = 0;
119 unsigned long time_diff;
120
121 if ((count++ & 3) != 0) return(0);
122#ifndef NO_CLOCK
123 GET_TIME(current_time);
124 time_diff = MS_TIME_DIFF(current_time,GC_start_time);
125 if (time_diff >= GC_time_limit) {
126# ifdef CONDPRINT
127 if (GC_print_stats) {
128 GC_printf0("Abandoning stopped marking after ");
129 GC_printf1("%lu msecs", (unsigned long)time_diff);
130 GC_printf1("(attempt %d)\n", (unsigned long) GC_n_attempts);
131 }
132# endif
133 return(1);
134 }
135#endif
136 return(0);
137 }
138#endif /* !SMALL_CONFIG */
139
140/* Return the minimum number of words that must be allocated between */
141/* collections to amortize the collection cost. */
142static word min_words_allocd()
143{
144# ifdef THREADS
145 /* We punt, for now. */
146 register signed_word stack_size = 10000;
147# else
148 int dummy;
149 register signed_word stack_size = (ptr_t)(&dummy) - GC_stackbottom;
150# endif
151 word total_root_size; /* includes double stack size, */
152 /* since the stack is expensive */
153 /* to scan. */
154 word scan_size; /* Estimate of memory to be scanned */
155 /* during normal GC. */
156
157 if (stack_size < 0) stack_size = -stack_size;
158 total_root_size = 2 * stack_size + GC_root_size;
159 scan_size = BYTES_TO_WORDS(GC_heapsize - GC_large_free_bytes
160 + (GC_large_free_bytes >> 2)
161 /* use a bit more of large empty heap */
162 + total_root_size);
163 if (GC_incremental) {
164 return scan_size / (2 * GC_free_space_divisor);
165 } else {
166 return scan_size / GC_free_space_divisor;
167 }
168}
169
170/* Return the number of words allocated, adjusted for explicit storage */
171/* management, etc.. This number is used in deciding when to trigger */
172/* collections. */
173word GC_adj_words_allocd()
174{
175 register signed_word result;
176 register signed_word expl_managed =
177 BYTES_TO_WORDS((long)GC_non_gc_bytes
178 - (long)GC_non_gc_bytes_at_gc);
179
180 /* Don't count what was explicitly freed, or newly allocated for */
181 /* explicit management. Note that deallocating an explicitly */
182 /* managed object should not alter result, assuming the client */
183 /* is playing by the rules. */
184 result = (signed_word)GC_words_allocd
185 - (signed_word)GC_mem_freed - expl_managed;
186 if (result > (signed_word)GC_words_allocd) {
187 result = GC_words_allocd;
188 /* probably client bug or unfortunate scheduling */
189 }
190 result += GC_words_finalized;
191 /* We count objects enqueued for finalization as though they */
192 /* had been reallocated this round. Finalization is user */
193 /* visible progress. And if we don't count this, we have */
194 /* stability problems for programs that finalize all objects. */
195 result += GC_words_wasted;
196 /* This doesn't reflect useful work. But if there is lots of */
197 /* new fragmentation, the same is probably true of the heap, */
198 /* and the collection will be correspondingly cheaper. */
199 if (result < (signed_word)(GC_words_allocd >> 3)) {
200 /* Always count at least 1/8 of the allocations. We don't want */
201 /* to collect too infrequently, since that would inhibit */
202 /* coalescing of free storage blocks. */
203 /* This also makes us partially robust against client bugs. */
204 return(GC_words_allocd >> 3);
205 } else {
206 return(result);
207 }
208}
209
210
211/* Clear up a few frames worth of garbage left at the top of the stack. */
212/* This is used to prevent us from accidentally treating garbade left */
213/* on the stack by other parts of the collector as roots. This */
214/* differs from the code in misc.c, which actually tries to keep the */
215/* stack clear of long-lived, client-generated garbage. */
216void GC_clear_a_few_frames()
217{
218# define NWORDS 64
219 word frames[NWORDS];
220 register int i;
221
222 for (i = 0; i < NWORDS; i++) frames[i] = 0;
223}
224
225/* Have we allocated enough to amortize a collection? */
226GC_bool GC_should_collect()
227{
228 return(GC_adj_words_allocd() >= min_words_allocd());
229}
230
231
232void GC_notify_full_gc()
233{
234 if (GC_start_call_back != (void (*) GC_PROTO((void)))0) {
235 (*GC_start_call_back)();
236 }
237}
238
239GC_bool GC_is_full_gc = FALSE;
240
241/*
242 * Initiate a garbage collection if appropriate.
243 * Choose judiciously
244 * between partial, full, and stop-world collections.
245 * Assumes lock held, signals disabled.
246 */
247void GC_maybe_gc()
248{
249 static int n_partial_gcs = 0;
250
251 if (GC_should_collect()) {
252 if (!GC_incremental) {
253 GC_notify_full_gc();
254 GC_gcollect_inner();
255 n_partial_gcs = 0;
256 return;
257 } else {
258# ifdef PARALLEL_MARK
259 GC_wait_for_reclaim();
260# endif
261 if (GC_need_full_gc || n_partial_gcs >= GC_full_freq) {
262# ifdef CONDPRINT
263 if (GC_print_stats) {
264 GC_printf2(
265 "***>Full mark for collection %lu after %ld allocd bytes\n",
266 (unsigned long) GC_gc_no+1,
267 (long)WORDS_TO_BYTES(GC_words_allocd));
268 }
269# endif
270 GC_promote_black_lists();
271 (void)GC_reclaim_all((GC_stop_func)0, TRUE);
272 GC_clear_marks();
273 n_partial_gcs = 0;
274 GC_notify_full_gc();
275 GC_is_full_gc = TRUE;
276 } else {
277 n_partial_gcs++;
278 }
279 }
280 /* We try to mark with the world stopped. */
281 /* If we run out of time, this turns into */
282 /* incremental marking. */
283# ifndef NO_CLOCK
284 if (GC_time_limit != GC_TIME_UNLIMITED) { GET_TIME(GC_start_time); }
285# endif
286 if (GC_stopped_mark(GC_time_limit == GC_TIME_UNLIMITED?
287 GC_never_stop_func : GC_timeout_stop_func)) {
288# ifdef SAVE_CALL_CHAIN
289 GC_save_callers(GC_last_stack);
290# endif
291 GC_finish_collection();
292 } else {
293 if (!GC_is_full_gc) {
294 /* Count this as the first attempt */
295 GC_n_attempts++;
296 }
297 }
298 }
299}
300
301
302/*
303 * Stop the world garbage collection. Assumes lock held, signals disabled.
304 * If stop_func is not GC_never_stop_func, then abort if stop_func returns TRUE.
305 */
306GC_bool GC_try_to_collect_inner(stop_func)
307GC_stop_func stop_func;
308{
309 if (GC_incremental && GC_collection_in_progress()) {
310# ifdef CONDPRINT
311 if (GC_print_stats) {
312 GC_printf0(
313 "GC_try_to_collect_inner: finishing collection in progress\n");
314 }
315# endif /* CONDPRINT */
316 /* Just finish collection already in progress. */
317 while(GC_collection_in_progress()) {
318 if (stop_func()) return(FALSE);
319 GC_collect_a_little_inner(1);
320 }
321 }
322# ifdef CONDPRINT
323 if (GC_print_stats) {
324 GC_printf2(
325 "Initiating full world-stop collection %lu after %ld allocd bytes\n",
326 (unsigned long) GC_gc_no+1,
327 (long)WORDS_TO_BYTES(GC_words_allocd));
328 }
329# endif
330 GC_promote_black_lists();
331 /* Make sure all blocks have been reclaimed, so sweep routines */
332 /* don't see cleared mark bits. */
333 /* If we're guaranteed to finish, then this is unnecessary. */
334 /* In the find_leak case, we have to finish to guarantee that */
335 /* previously unmarked objects are not reported as leaks. */
336# ifdef PARALLEL_MARK
337 GC_wait_for_reclaim();
338# endif
339 if ((GC_find_leak || stop_func != GC_never_stop_func)
340 && !GC_reclaim_all(stop_func, FALSE)) {
341 /* Aborted. So far everything is still consistent. */
342 return(FALSE);
343 }
344 GC_invalidate_mark_state(); /* Flush mark stack. */
345 GC_clear_marks();
346# ifdef SAVE_CALL_CHAIN
347 GC_save_callers(GC_last_stack);
348# endif
349 GC_is_full_gc = TRUE;
350 if (!GC_stopped_mark(stop_func)) {
351 if (!GC_incremental) {
352 /* We're partially done and have no way to complete or use */
353 /* current work. Reestablish invariants as cheaply as */
354 /* possible. */
355 GC_invalidate_mark_state();
356 GC_unpromote_black_lists();
357 } /* else we claim the world is already still consistent. We'll */
358 /* finish incrementally. */
359 return(FALSE);
360 }
361 GC_finish_collection();
362 return(TRUE);
363}
364
365
366
367/*
368 * Perform n units of garbage collection work. A unit is intended to touch
369 * roughly GC_RATE pages. Every once in a while, we do more than that.
370 * This needa to be a fairly large number with our current incremental
371 * GC strategy, since otherwise we allocate too much during GC, and the
372 * cleanup gets expensive.
373 */
374# define GC_RATE 10
375# define MAX_PRIOR_ATTEMPTS 1
376 /* Maximum number of prior attempts at world stop marking */
377 /* A value of 1 means that we finish the second time, no matter */
378 /* how long it takes. Doesn't count the initial root scan */
379 /* for a full GC. */
380
381int GC_deficit = 0; /* The number of extra calls to GC_mark_some */
382 /* that we have made. */
383
384void GC_collect_a_little_inner(n)
385int n;
386{
387 register int i;
388
389 if (GC_incremental && GC_collection_in_progress()) {
390 for (i = GC_deficit; i < GC_RATE*n; i++) {
391 if (GC_mark_some((ptr_t)0)) {
392 /* Need to finish a collection */
393# ifdef SAVE_CALL_CHAIN
394 GC_save_callers(GC_last_stack);
395# endif
396# ifdef PARALLEL_MARK
397 GC_wait_for_reclaim();
398# endif
399 if (GC_n_attempts < MAX_PRIOR_ATTEMPTS
400 && GC_time_limit != GC_TIME_UNLIMITED) {
401 GET_TIME(GC_start_time);
402 if (!GC_stopped_mark(GC_timeout_stop_func)) {
403 GC_n_attempts++;
404 break;
405 }
406 } else {
407 (void)GC_stopped_mark(GC_never_stop_func);
408 }
409 GC_finish_collection();
410 break;
411 }
412 }
413 if (GC_deficit > 0) GC_deficit -= GC_RATE*n;
414 if (GC_deficit < 0) GC_deficit = 0;
415 } else {
416 GC_maybe_gc();
417 }
418}
419
420int GC_collect_a_little GC_PROTO(())
421{
422 int result;
423 DCL_LOCK_STATE;
424
425 DISABLE_SIGNALS();
426 LOCK();
427 GC_collect_a_little_inner(1);
428 result = (int)GC_collection_in_progress();
429 UNLOCK();
430 ENABLE_SIGNALS();
431 return(result);
432}
433
434/*
435 * Assumes lock is held, signals are disabled.
436 * We stop the world.
437 * If stop_func() ever returns TRUE, we may fail and return FALSE.
438 * Increment GC_gc_no if we succeed.
439 */
440GC_bool GC_stopped_mark(stop_func)
441GC_stop_func stop_func;
442{
443 register int i;
444 int dummy;
445# if defined(PRINTTIMES) || defined(CONDPRINT)
446 CLOCK_TYPE start_time, current_time;
447# endif
448
449 STOP_WORLD();
450# ifdef PRINTTIMES
451 GET_TIME(start_time);
452# endif
453# if defined(CONDPRINT) && !defined(PRINTTIMES)
454 if (GC_print_stats) GET_TIME(start_time);
455# endif
456# ifdef CONDPRINT
457 if (GC_print_stats) {
458 GC_printf1("--> Marking for collection %lu ",
459 (unsigned long) GC_gc_no + 1);
460 GC_printf2("after %lu allocd bytes + %lu wasted bytes\n",
461 (unsigned long) WORDS_TO_BYTES(GC_words_allocd),
462 (unsigned long) WORDS_TO_BYTES(GC_words_wasted));
463 }
464# endif
465# ifdef MAKE_BACK_GRAPH
466 if (GC_print_back_height) {
467 GC_build_back_graph();
468 }
469# endif
470
471 /* Mark from all roots. */
472 /* Minimize junk left in my registers and on the stack */
473 GC_clear_a_few_frames();
474 GC_noop(0,0,0,0,0,0);
475 GC_initiate_gc();
476 for(i = 0;;i++) {
477 if ((*stop_func)()) {
478# ifdef CONDPRINT
479 if (GC_print_stats) {
480 GC_printf0("Abandoned stopped marking after ");
481 GC_printf1("%lu iterations\n",
482 (unsigned long)i);
483 }
484# endif
485 GC_deficit = i; /* Give the mutator a chance. */
486 START_WORLD();
487 return(FALSE);
488 }
489 if (GC_mark_some((ptr_t)(&dummy))) break;
490 }
491
492 GC_gc_no++;
493# ifdef PRINTSTATS
494 GC_printf2("Collection %lu reclaimed %ld bytes",
495 (unsigned long) GC_gc_no - 1,
496 (long)WORDS_TO_BYTES(GC_mem_found));
497# else
498# ifdef CONDPRINT
499 if (GC_print_stats) {
500 GC_printf1("Collection %lu finished", (unsigned long) GC_gc_no - 1);
501 }
502# endif
503# endif /* !PRINTSTATS */
504# ifdef CONDPRINT
505 if (GC_print_stats) {
506 GC_printf1(" ---> heapsize = %lu bytes\n",
507 (unsigned long) GC_heapsize);
508 /* Printf arguments may be pushed in funny places. Clear the */
509 /* space. */
510 GC_printf0("");
511 }
512# endif /* CONDPRINT */
513
514 /* Check all debugged objects for consistency */
515 if (GC_debugging_started) {
516 (*GC_check_heap)();
517 }
518
519# ifdef PRINTTIMES
520 GET_TIME(current_time);
521 GC_printf1("World-stopped marking took %lu msecs\n",
522 MS_TIME_DIFF(current_time,start_time));
523# else
524# ifdef CONDPRINT
525 if (GC_print_stats) {
526 GET_TIME(current_time);
527 GC_printf1("World-stopped marking took %lu msecs\n",
528 MS_TIME_DIFF(current_time,start_time));
529 }
530# endif
531# endif
532 START_WORLD();
533 return(TRUE);
534}
535
536/* Set all mark bits for the free list whose first entry is q */
537#ifdef __STDC__
538 void GC_set_fl_marks(ptr_t q)
539#else
540 void GC_set_fl_marks(q)
541 ptr_t q;
542#endif
543{
544 ptr_t p;
545 struct hblk * h, * last_h = 0;
546 hdr *hhdr;
547 int word_no;
548
549 for (p = q; p != 0; p = obj_link(p)){
550 h = HBLKPTR(p);
551 if (h != last_h) {
552 last_h = h;
553 hhdr = HDR(h);
554 }
555 word_no = (((word *)p) - ((word *)h));
556 set_mark_bit_from_hdr(hhdr, word_no);
557 }
558}
559
560/* Clear all mark bits for the free list whose first entry is q */
561/* Decrement GC_mem_found by number of words on free list. */
562#ifdef __STDC__
563 void GC_clear_fl_marks(ptr_t q)
564#else
565 void GC_clear_fl_marks(q)
566 ptr_t q;
567#endif
568{
569 ptr_t p;
570 struct hblk * h, * last_h = 0;
571 hdr *hhdr;
572 int word_no;
573
574 for (p = q; p != 0; p = obj_link(p)){
575 h = HBLKPTR(p);
576 if (h != last_h) {
577 last_h = h;
578 hhdr = HDR(h);
579 }
580 word_no = (((word *)p) - ((word *)h));
581 clear_mark_bit_from_hdr(hhdr, word_no);
582# ifdef GATHERSTATS
583 GC_mem_found -= hhdr -> hb_sz;
584# endif
585 }
586}
587
588/* Finish up a collection. Assumes lock is held, signals are disabled, */
589/* but the world is otherwise running. */
590void GC_finish_collection()
591{
592# ifdef PRINTTIMES
593 CLOCK_TYPE start_time;
594 CLOCK_TYPE finalize_time;
595 CLOCK_TYPE done_time;
596
597 GET_TIME(start_time);
598 finalize_time = start_time;
599# endif
600
601# ifdef GATHERSTATS
602 GC_mem_found = 0;
603# endif
604# if defined(LINUX) && defined(__ELF__) && !defined(SMALL_CONFIG)
605 if (getenv("GC_PRINT_ADDRESS_MAP") != 0) {
606 GC_print_address_map();
607 }
608# endif
609 if (GC_find_leak) {
610 /* Mark all objects on the free list. All objects should be */
611 /* marked when we're done. */
612 {
613 register word size; /* current object size */
614 int kind;
615 ptr_t q;
616
617 for (kind = 0; kind < GC_n_kinds; kind++) {
618 for (size = 1; size <= MAXOBJSZ; size++) {
619 q = GC_obj_kinds[kind].ok_freelist[size];
620 if (q != 0) GC_set_fl_marks(q);
621 }
622 }
623 }
624 GC_start_reclaim(TRUE);
625 /* The above just checks; it doesn't really reclaim anything. */
626 }
627
628 GC_finalize();
629# ifdef STUBBORN_ALLOC
630 GC_clean_changing_list();
631# endif
632
633# ifdef PRINTTIMES
634 GET_TIME(finalize_time);
635# endif
636
637 if (GC_print_back_height) {
638# ifdef MAKE_BACK_GRAPH
639 GC_traverse_back_graph();
640# else
641# ifndef SMALL_CONFIG
642 GC_err_printf0("Back height not available: "
643 "Rebuild collector with -DMAKE_BACK_GRAPH\n");
644# endif
645# endif
646 }
647
648 /* Clear free list mark bits, in case they got accidentally marked */
649 /* (or GC_find_leak is set and they were intentionally marked). */
650 /* Also subtract memory remaining from GC_mem_found count. */
651 /* Note that composite objects on free list are cleared. */
652 /* Thus accidentally marking a free list is not a problem; only */
653 /* objects on the list itself will be marked, and that's fixed here. */
654 {
655 register word size; /* current object size */
656 register ptr_t q; /* pointer to current object */
657 int kind;
658
659 for (kind = 0; kind < GC_n_kinds; kind++) {
660 for (size = 1; size <= MAXOBJSZ; size++) {
661 q = GC_obj_kinds[kind].ok_freelist[size];
662 if (q != 0) GC_clear_fl_marks(q);
663 }
664 }
665 }
666
667
668# ifdef PRINTSTATS
669 GC_printf1("Bytes recovered before sweep - f.l. count = %ld\n",
670 (long)WORDS_TO_BYTES(GC_mem_found));
671# endif
672 /* Reconstruct free lists to contain everything not marked */
673 GC_start_reclaim(FALSE);
674 if (GC_is_full_gc) {
675 GC_used_heap_size_after_full = USED_HEAP_SIZE;
676 GC_need_full_gc = FALSE;
677 } else {
678 GC_need_full_gc =
679 BYTES_TO_WORDS(USED_HEAP_SIZE - GC_used_heap_size_after_full)
680 > min_words_allocd();
681 }
682
683# ifdef PRINTSTATS
684 GC_printf2(
685 "Immediately reclaimed %ld bytes in heap of size %lu bytes",
686 (long)WORDS_TO_BYTES(GC_mem_found),
687 (unsigned long)GC_heapsize);
688# ifdef USE_MUNMAP
689 GC_printf1("(%lu unmapped)", GC_unmapped_bytes);
690# endif
691 GC_printf2(
692 "\n%lu (atomic) + %lu (composite) collectable bytes in use\n",
693 (unsigned long)WORDS_TO_BYTES(GC_atomic_in_use),
694 (unsigned long)WORDS_TO_BYTES(GC_composite_in_use));
695# endif
696
697 GC_n_attempts = 0;
698 GC_is_full_gc = FALSE;
699 /* Reset or increment counters for next cycle */
700 GC_words_allocd_before_gc += GC_words_allocd;
701 GC_non_gc_bytes_at_gc = GC_non_gc_bytes;
702 GC_words_allocd = 0;
703 GC_words_wasted = 0;
704 GC_mem_freed = 0;
705
706# ifdef USE_MUNMAP
707 GC_unmap_old();
708# endif
709# ifdef PRINTTIMES
710 GET_TIME(done_time);
711 GC_printf2("Finalize + initiate sweep took %lu + %lu msecs\n",
712 MS_TIME_DIFF(finalize_time,start_time),
713 MS_TIME_DIFF(done_time,finalize_time));
714# endif
715}
716
717/* Externally callable routine to invoke full, stop-world collection */
718# if defined(__STDC__) || defined(__cplusplus)
719 int GC_try_to_collect(GC_stop_func stop_func)
720# else
721 int GC_try_to_collect(stop_func)
722 GC_stop_func stop_func;
723# endif
724{
725 int result;
726 DCL_LOCK_STATE;
727
728 GC_INVOKE_FINALIZERS();
729 DISABLE_SIGNALS();
730 LOCK();
731 ENTER_GC();
732 if (!GC_is_initialized) GC_init_inner();
733 /* Minimize junk left in my registers */
734 GC_noop(0,0,0,0,0,0);
735 result = (int)GC_try_to_collect_inner(stop_func);
736 EXIT_GC();
737 UNLOCK();
738 ENABLE_SIGNALS();
739 if(result) GC_INVOKE_FINALIZERS();
740 return(result);
741}
742
743void GC_gcollect GC_PROTO(())
744{
745 GC_notify_full_gc();
746 (void)GC_try_to_collect(GC_never_stop_func);
747}
748
749word GC_n_heap_sects = 0; /* Number of sections currently in heap. */
750
751/*
752 * Use the chunk of memory starting at p of size bytes as part of the heap.
753 * Assumes p is HBLKSIZE aligned, and bytes is a multiple of HBLKSIZE.
754 */
755void GC_add_to_heap(p, bytes)
756struct hblk *p;
757word bytes;
758{
759 word words;
760 hdr * phdr;
761
762 if (GC_n_heap_sects >= MAX_HEAP_SECTS) {
763 ABORT("Too many heap sections: Increase MAXHINCR or MAX_HEAP_SECTS");
764 }
765 phdr = GC_install_header(p);
766 if (0 == phdr) {
767 /* This is extremely unlikely. Can't add it. This will */
768 /* almost certainly result in a 0 return from the allocator, */
769 /* which is entirely appropriate. */
770 return;
771 }
772 GC_heap_sects[GC_n_heap_sects].hs_start = (ptr_t)p;
773 GC_heap_sects[GC_n_heap_sects].hs_bytes = bytes;
774 GC_n_heap_sects++;
775 words = BYTES_TO_WORDS(bytes);
776 phdr -> hb_sz = words;
777 phdr -> hb_map = (unsigned char *)1; /* A value != GC_invalid_map */
778 phdr -> hb_flags = 0;
779 GC_freehblk(p);
780 GC_heapsize += bytes;
781 if ((ptr_t)p <= (ptr_t)GC_least_plausible_heap_addr
782 || GC_least_plausible_heap_addr == 0) {
783 GC_least_plausible_heap_addr = (GC_PTR)((ptr_t)p - sizeof(word));
784 /* Making it a little smaller than necessary prevents */
785 /* us from getting a false hit from the variable */
786 /* itself. There's some unintentional reflection */
787 /* here. */
788 }
789 if ((ptr_t)p + bytes >= (ptr_t)GC_greatest_plausible_heap_addr) {
790 GC_greatest_plausible_heap_addr = (GC_PTR)((ptr_t)p + bytes);
791 }
792}
793
794# if !defined(NO_DEBUGGING)
795void GC_print_heap_sects()
796{
797 register unsigned i;
798
799 GC_printf1("Total heap size: %lu\n", (unsigned long) GC_heapsize);
800 for (i = 0; i < GC_n_heap_sects; i++) {
801 unsigned long start = (unsigned long) GC_heap_sects[i].hs_start;
802 unsigned long len = (unsigned long) GC_heap_sects[i].hs_bytes;
803 struct hblk *h;
804 unsigned nbl = 0;
805
806 GC_printf3("Section %ld from 0x%lx to 0x%lx ", (unsigned long)i,
807 start, (unsigned long)(start + len));
808 for (h = (struct hblk *)start; h < (struct hblk *)(start + len); h++) {
809 if (GC_is_black_listed(h, HBLKSIZE)) nbl++;
810 }
811 GC_printf2("%lu/%lu blacklisted\n", (unsigned long)nbl,
812 (unsigned long)(len/HBLKSIZE));
813 }
814}
815# endif
816
817GC_PTR GC_least_plausible_heap_addr = (GC_PTR)ONES;
818GC_PTR GC_greatest_plausible_heap_addr = 0;
819
820ptr_t GC_max(x,y)
821ptr_t x, y;
822{
823 return(x > y? x : y);
824}
825
826ptr_t GC_min(x,y)
827ptr_t x, y;
828{
829 return(x < y? x : y);
830}
831
832# if defined(__STDC__) || defined(__cplusplus)
833 void GC_set_max_heap_size(GC_word n)
834# else
835 void GC_set_max_heap_size(n)
836 GC_word n;
837# endif
838{
839 GC_max_heapsize = n;
840}
841
842GC_word GC_max_retries = 0;
843
844/*
845 * this explicitly increases the size of the heap. It is used
846 * internally, but may also be invoked from GC_expand_hp by the user.
847 * The argument is in units of HBLKSIZE.
848 * Tiny values of n are rounded up.
849 * Returns FALSE on failure.
850 */
851GC_bool GC_expand_hp_inner(n)
852word n;
853{
854 word bytes;
855 struct hblk * space;
856 word expansion_slop; /* Number of bytes by which we expect the */
857 /* heap to expand soon. */
858
859 if (n < MINHINCR) n = MINHINCR;
860 bytes = n * HBLKSIZE;
861 /* Make sure bytes is a multiple of GC_page_size */
862 {
863 word mask = GC_page_size - 1;
864 bytes += mask;
865 bytes &= ~mask;
866 }
867
868 if (GC_max_heapsize != 0 && GC_heapsize + bytes > GC_max_heapsize) {
869 /* Exceeded self-imposed limit */
870 return(FALSE);
871 }
872 space = GET_MEM(bytes);
873 if( space == 0 ) {
874# ifdef CONDPRINT
875 if (GC_print_stats) {
876 GC_printf1("Failed to expand heap by %ld bytes\n",
877 (unsigned long)bytes);
878 }
879# endif
880 return(FALSE);
881 }
882# ifdef CONDPRINT
883 if (GC_print_stats) {
884 GC_printf2("Increasing heap size by %lu after %lu allocated bytes\n",
885 (unsigned long)bytes,
886 (unsigned long)WORDS_TO_BYTES(GC_words_allocd));
887# ifdef UNDEFINED
888 GC_printf1("Root size = %lu\n", GC_root_size);
889 GC_print_block_list(); GC_print_hblkfreelist();
890 GC_printf0("\n");
891# endif
892 }
893# endif
894 expansion_slop = 8 * WORDS_TO_BYTES(min_words_allocd());
895 if (5 * HBLKSIZE * MAXHINCR > expansion_slop) {
896 expansion_slop = 5 * HBLKSIZE * MAXHINCR;
897 }
898 if (GC_last_heap_addr == 0 && !((word)space & SIGNB)
899 || GC_last_heap_addr != 0 && GC_last_heap_addr < (ptr_t)space) {
900 /* Assume the heap is growing up */
901 GC_greatest_plausible_heap_addr =
902 GC_max(GC_greatest_plausible_heap_addr,
903 (ptr_t)space + bytes + expansion_slop);
904 } else {
905 /* Heap is growing down */
906 GC_least_plausible_heap_addr =
907 GC_min(GC_least_plausible_heap_addr,
908 (ptr_t)space - expansion_slop);
909 }
910 GC_prev_heap_addr = GC_last_heap_addr;
911 GC_last_heap_addr = (ptr_t)space;
912 GC_add_to_heap(space, bytes);
913 return(TRUE);
914}
915
916/* Really returns a bool, but it's externally visible, so that's clumsy. */
917/* Arguments is in bytes. */
918# if defined(__STDC__) || defined(__cplusplus)
919 int GC_expand_hp(size_t bytes)
920# else
921 int GC_expand_hp(bytes)
922 size_t bytes;
923# endif
924{
925 int result;
926 DCL_LOCK_STATE;
927
928 DISABLE_SIGNALS();
929 LOCK();
930 if (!GC_is_initialized) GC_init_inner();
931 result = (int)GC_expand_hp_inner(divHBLKSZ((word)bytes));
932 if (result) GC_requested_heapsize += bytes;
933 UNLOCK();
934 ENABLE_SIGNALS();
935 return(result);
936}
937
938unsigned GC_fail_count = 0;
939 /* How many consecutive GC/expansion failures? */
940 /* Reset by GC_allochblk. */
941
942GC_bool GC_collect_or_expand(needed_blocks, ignore_off_page)
943word needed_blocks;
944GC_bool ignore_off_page;
945{
946 if (!GC_incremental && !GC_dont_gc &&
947 (GC_dont_expand && GC_words_allocd > 0 || GC_should_collect())) {
948 GC_notify_full_gc();
949 GC_gcollect_inner();
950 } else {
951 word blocks_to_get = GC_heapsize/(HBLKSIZE*GC_free_space_divisor)
952 + needed_blocks;
953
954 if (blocks_to_get > MAXHINCR) {
955 word slop;
956
957 if (ignore_off_page) {
958 slop = 4;
959 } else {
960 slop = 2*divHBLKSZ(BL_LIMIT);
961 if (slop > needed_blocks) slop = needed_blocks;
962 }
963 if (needed_blocks + slop > MAXHINCR) {
964 blocks_to_get = needed_blocks + slop;
965 } else {
966 blocks_to_get = MAXHINCR;
967 }
968 }
969 if (!GC_expand_hp_inner(blocks_to_get)
970 && !GC_expand_hp_inner(needed_blocks)) {
971 if (GC_fail_count++ < GC_max_retries) {
972 WARN("Out of Memory! Trying to continue ...\n", 0);
973 GC_notify_full_gc();
974 GC_gcollect_inner();
975 } else {
976# if !defined(AMIGA) || !defined(GC_AMIGA_FASTALLOC)
977 WARN("Out of Memory! Returning NIL!\n", 0);
978# endif
979 return(FALSE);
980 }
981 } else {
982# ifdef CONDPRINT
983 if (GC_fail_count && GC_print_stats) {
984 GC_printf0("Memory available again ...\n");
985 }
986# endif
987 }
988 }
989 return(TRUE);
990}
991
992/*
993 * Make sure the object free list for sz is not empty.
994 * Return a pointer to the first object on the free list.
995 * The object MUST BE REMOVED FROM THE FREE LIST BY THE CALLER.
996 * Assumes we hold the allocator lock and signals are disabled.
997 *
998 */
999ptr_t GC_allocobj(sz, kind)
1000word sz;
1001int kind;
1002{
1003 register ptr_t * flh = &(GC_obj_kinds[kind].ok_freelist[sz]);
1004
1005 if (sz == 0) return(0);
1006
1007 while (*flh == 0) {
1008 ENTER_GC();
1009 /* Do our share of marking work */
1010 if(GC_incremental && !GC_dont_gc) GC_collect_a_little_inner(1);
1011 /* Sweep blocks for objects of this size */
1012 GC_continue_reclaim(sz, kind);
1013 EXIT_GC();
1014 if (*flh == 0) {
1015 GC_new_hblk(sz, kind);
1016 }
1017 if (*flh == 0) {
1018 ENTER_GC();
1019 if (!GC_collect_or_expand((word)1,FALSE)) {
1020 EXIT_GC();
1021 return(0);
1022 }
1023 EXIT_GC();
1024 }
1025 }
1026
1027 return(*flh);
1028}
Note: See TracBrowser for help on using the repository browser.