1 | /*
|
---|
2 | * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
|
---|
3 | * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
|
---|
4 | * Copyright (c) 1998-1999 by Silicon Graphics. All rights reserved.
|
---|
5 | * Copyright (c) 1999 by Hewlett-Packard Company. All rights reserved.
|
---|
6 | *
|
---|
7 | * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
|
---|
8 | * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
|
---|
9 | *
|
---|
10 | * Permission is hereby granted to use or copy this program
|
---|
11 | * for any purpose, provided the above notices are retained on all copies.
|
---|
12 | * Permission to modify the code and to distribute modified code is granted,
|
---|
13 | * provided the above notices are retained, and a notice that the code was
|
---|
14 | * modified is included with the above copyright notice.
|
---|
15 | */
|
---|
16 |
|
---|
17 | /* #define DEBUG */
|
---|
18 | #include <stdio.h>
|
---|
19 | #include "private/gc_priv.h"
|
---|
20 |
|
---|
21 | GC_bool GC_use_entire_heap = 0;
|
---|
22 |
|
---|
23 | /*
|
---|
24 | * Free heap blocks are kept on one of several free lists,
|
---|
25 | * depending on the size of the block. Each free list is doubly linked.
|
---|
26 | * Adjacent free blocks are coalesced.
|
---|
27 | */
|
---|
28 |
|
---|
29 |
|
---|
30 | # define MAX_BLACK_LIST_ALLOC (2*HBLKSIZE)
|
---|
31 | /* largest block we will allocate starting on a black */
|
---|
32 | /* listed block. Must be >= HBLKSIZE. */
|
---|
33 |
|
---|
34 |
|
---|
35 | # define UNIQUE_THRESHOLD 32
|
---|
36 | /* Sizes up to this many HBLKs each have their own free list */
|
---|
37 | # define HUGE_THRESHOLD 256
|
---|
38 | /* Sizes of at least this many heap blocks are mapped to a */
|
---|
39 | /* single free list. */
|
---|
40 | # define FL_COMPRESSION 8
|
---|
41 | /* In between sizes map this many distinct sizes to a single */
|
---|
42 | /* bin. */
|
---|
43 |
|
---|
44 | # define N_HBLK_FLS (HUGE_THRESHOLD - UNIQUE_THRESHOLD)/FL_COMPRESSION \
|
---|
45 | + UNIQUE_THRESHOLD
|
---|
46 |
|
---|
47 | struct hblk * GC_hblkfreelist[N_HBLK_FLS+1] = { 0 };
|
---|
48 |
|
---|
49 | #ifndef USE_MUNMAP
|
---|
50 | word GC_free_bytes[N_HBLK_FLS+1] = { 0 };
|
---|
51 | /* Number of free bytes on each list. */
|
---|
52 |
|
---|
53 | /* Is bytes + the number of free bytes on lists n .. N_HBLK_FLS */
|
---|
54 | /* > GC_max_large_allocd_bytes? */
|
---|
55 | GC_bool GC_enough_large_bytes_left(bytes,n)
|
---|
56 | word bytes;
|
---|
57 | int n;
|
---|
58 | {
|
---|
59 | int i;
|
---|
60 | for (i = N_HBLK_FLS; i >= n; --i) {
|
---|
61 | bytes += GC_free_bytes[i];
|
---|
62 | if (bytes > GC_max_large_allocd_bytes) return TRUE;
|
---|
63 | }
|
---|
64 | return FALSE;
|
---|
65 | }
|
---|
66 |
|
---|
67 | # define INCR_FREE_BYTES(n, b) GC_free_bytes[n] += (b);
|
---|
68 |
|
---|
69 | # define FREE_ASSERT(e) GC_ASSERT(e)
|
---|
70 |
|
---|
71 | #else /* USE_MUNMAP */
|
---|
72 |
|
---|
73 | # define INCR_FREE_BYTES(n, b)
|
---|
74 | # define FREE_ASSERT(e)
|
---|
75 |
|
---|
76 | #endif /* USE_MUNMAP */
|
---|
77 |
|
---|
78 | /* Map a number of blocks to the appropriate large block free list index. */
|
---|
79 | int GC_hblk_fl_from_blocks(blocks_needed)
|
---|
80 | word blocks_needed;
|
---|
81 | {
|
---|
82 | if (blocks_needed <= UNIQUE_THRESHOLD) return blocks_needed;
|
---|
83 | if (blocks_needed >= HUGE_THRESHOLD) return N_HBLK_FLS;
|
---|
84 | return (blocks_needed - UNIQUE_THRESHOLD)/FL_COMPRESSION
|
---|
85 | + UNIQUE_THRESHOLD;
|
---|
86 |
|
---|
87 | }
|
---|
88 |
|
---|
89 | # define PHDR(hhdr) HDR(hhdr -> hb_prev)
|
---|
90 | # define NHDR(hhdr) HDR(hhdr -> hb_next)
|
---|
91 |
|
---|
92 | # ifdef USE_MUNMAP
|
---|
93 | # define IS_MAPPED(hhdr) (((hhdr) -> hb_flags & WAS_UNMAPPED) == 0)
|
---|
94 | # else /* !USE_MMAP */
|
---|
95 | # define IS_MAPPED(hhdr) 1
|
---|
96 | # endif /* USE_MUNMAP */
|
---|
97 |
|
---|
98 | # if !defined(NO_DEBUGGING)
|
---|
99 | void GC_print_hblkfreelist()
|
---|
100 | {
|
---|
101 | struct hblk * h;
|
---|
102 | word total_free = 0;
|
---|
103 | hdr * hhdr;
|
---|
104 | word sz;
|
---|
105 | int i;
|
---|
106 |
|
---|
107 | for (i = 0; i <= N_HBLK_FLS; ++i) {
|
---|
108 | h = GC_hblkfreelist[i];
|
---|
109 | # ifdef USE_MUNMAP
|
---|
110 | if (0 != h) GC_printf1("Free list %ld (Total size %ld):\n",
|
---|
111 | (unsigned long)i);
|
---|
112 | # else
|
---|
113 | if (0 != h) GC_printf2("Free list %ld (Total size %ld):\n",
|
---|
114 | (unsigned long)i,
|
---|
115 | (unsigned long)GC_free_bytes[i]);
|
---|
116 | # endif
|
---|
117 | while (h != 0) {
|
---|
118 | hhdr = HDR(h);
|
---|
119 | sz = hhdr -> hb_sz;
|
---|
120 | GC_printf2("\t0x%lx size %lu ", (unsigned long)h, (unsigned long)sz);
|
---|
121 | total_free += sz;
|
---|
122 | if (GC_is_black_listed(h, HBLKSIZE) != 0) {
|
---|
123 | GC_printf0("start black listed\n");
|
---|
124 | } else if (GC_is_black_listed(h, hhdr -> hb_sz) != 0) {
|
---|
125 | GC_printf0("partially black listed\n");
|
---|
126 | } else {
|
---|
127 | GC_printf0("not black listed\n");
|
---|
128 | }
|
---|
129 | h = hhdr -> hb_next;
|
---|
130 | }
|
---|
131 | }
|
---|
132 | if (total_free != GC_large_free_bytes) {
|
---|
133 | GC_printf1("GC_large_free_bytes = %lu (INCONSISTENT!!)\n",
|
---|
134 | (unsigned long) GC_large_free_bytes);
|
---|
135 | }
|
---|
136 | GC_printf1("Total of %lu bytes on free list\n", (unsigned long)total_free);
|
---|
137 | }
|
---|
138 |
|
---|
139 | /* Return the free list index on which the block described by the header */
|
---|
140 | /* appears, or -1 if it appears nowhere. */
|
---|
141 | int free_list_index_of(wanted)
|
---|
142 | hdr * wanted;
|
---|
143 | {
|
---|
144 | struct hblk * h;
|
---|
145 | hdr * hhdr;
|
---|
146 | int i;
|
---|
147 |
|
---|
148 | for (i = 0; i <= N_HBLK_FLS; ++i) {
|
---|
149 | h = GC_hblkfreelist[i];
|
---|
150 | while (h != 0) {
|
---|
151 | hhdr = HDR(h);
|
---|
152 | if (hhdr == wanted) return i;
|
---|
153 | h = hhdr -> hb_next;
|
---|
154 | }
|
---|
155 | }
|
---|
156 | return -1;
|
---|
157 | }
|
---|
158 |
|
---|
159 | void GC_dump_regions()
|
---|
160 | {
|
---|
161 | unsigned i;
|
---|
162 | ptr_t start, end;
|
---|
163 | ptr_t p;
|
---|
164 | size_t bytes;
|
---|
165 | hdr *hhdr;
|
---|
166 | for (i = 0; i < GC_n_heap_sects; ++i) {
|
---|
167 | start = GC_heap_sects[i].hs_start;
|
---|
168 | bytes = GC_heap_sects[i].hs_bytes;
|
---|
169 | end = start + bytes;
|
---|
170 | /* Merge in contiguous sections. */
|
---|
171 | while (i+1 < GC_n_heap_sects && GC_heap_sects[i+1].hs_start == end) {
|
---|
172 | ++i;
|
---|
173 | end = GC_heap_sects[i].hs_start + GC_heap_sects[i].hs_bytes;
|
---|
174 | }
|
---|
175 | GC_printf2("***Section from 0x%lx to 0x%lx\n", start, end);
|
---|
176 | for (p = start; p < end;) {
|
---|
177 | hhdr = HDR(p);
|
---|
178 | GC_printf1("\t0x%lx ", (unsigned long)p);
|
---|
179 | if (IS_FORWARDING_ADDR_OR_NIL(hhdr)) {
|
---|
180 | GC_printf1("Missing header!!\n", hhdr);
|
---|
181 | p += HBLKSIZE;
|
---|
182 | continue;
|
---|
183 | }
|
---|
184 | if (HBLK_IS_FREE(hhdr)) {
|
---|
185 | int correct_index = GC_hblk_fl_from_blocks(
|
---|
186 | divHBLKSZ(hhdr -> hb_sz));
|
---|
187 | int actual_index;
|
---|
188 |
|
---|
189 | GC_printf1("\tfree block of size 0x%lx bytes",
|
---|
190 | (unsigned long)(hhdr -> hb_sz));
|
---|
191 | if (IS_MAPPED(hhdr)) {
|
---|
192 | GC_printf0("\n");
|
---|
193 | } else {
|
---|
194 | GC_printf0("(unmapped)\n");
|
---|
195 | }
|
---|
196 | actual_index = free_list_index_of(hhdr);
|
---|
197 | if (-1 == actual_index) {
|
---|
198 | GC_printf1("\t\tBlock not on free list %ld!!\n",
|
---|
199 | correct_index);
|
---|
200 | } else if (correct_index != actual_index) {
|
---|
201 | GC_printf2("\t\tBlock on list %ld, should be on %ld!!\n",
|
---|
202 | actual_index, correct_index);
|
---|
203 | }
|
---|
204 | p += hhdr -> hb_sz;
|
---|
205 | } else {
|
---|
206 | GC_printf1("\tused for blocks of size 0x%lx bytes\n",
|
---|
207 | (unsigned long)WORDS_TO_BYTES(hhdr -> hb_sz));
|
---|
208 | p += HBLKSIZE * OBJ_SZ_TO_BLOCKS(hhdr -> hb_sz);
|
---|
209 | }
|
---|
210 | }
|
---|
211 | }
|
---|
212 | }
|
---|
213 |
|
---|
214 | # endif /* NO_DEBUGGING */
|
---|
215 |
|
---|
216 | /* Initialize hdr for a block containing the indicated size and */
|
---|
217 | /* kind of objects. */
|
---|
218 | /* Return FALSE on failure. */
|
---|
219 | static GC_bool setup_header(hhdr, sz, kind, flags)
|
---|
220 | register hdr * hhdr;
|
---|
221 | word sz; /* object size in words */
|
---|
222 | int kind;
|
---|
223 | unsigned char flags;
|
---|
224 | {
|
---|
225 | register word descr;
|
---|
226 |
|
---|
227 | /* Add description of valid object pointers */
|
---|
228 | if (!GC_add_map_entry(sz)) return(FALSE);
|
---|
229 | hhdr -> hb_map = GC_obj_map[sz > MAXOBJSZ? 0 : sz];
|
---|
230 |
|
---|
231 | /* Set size, kind and mark proc fields */
|
---|
232 | hhdr -> hb_sz = sz;
|
---|
233 | hhdr -> hb_obj_kind = kind;
|
---|
234 | hhdr -> hb_flags = flags;
|
---|
235 | descr = GC_obj_kinds[kind].ok_descriptor;
|
---|
236 | if (GC_obj_kinds[kind].ok_relocate_descr) descr += WORDS_TO_BYTES(sz);
|
---|
237 | hhdr -> hb_descr = descr;
|
---|
238 |
|
---|
239 | /* Clear mark bits */
|
---|
240 | GC_clear_hdr_marks(hhdr);
|
---|
241 |
|
---|
242 | hhdr -> hb_last_reclaimed = (unsigned short)GC_gc_no;
|
---|
243 | return(TRUE);
|
---|
244 | }
|
---|
245 |
|
---|
246 | #define FL_UNKNOWN -1
|
---|
247 | /*
|
---|
248 | * Remove hhdr from the appropriate free list.
|
---|
249 | * We assume it is on the nth free list, or on the size
|
---|
250 | * appropriate free list if n is FL_UNKNOWN.
|
---|
251 | */
|
---|
252 | void GC_remove_from_fl(hhdr, n)
|
---|
253 | hdr * hhdr;
|
---|
254 | int n;
|
---|
255 | {
|
---|
256 | int index;
|
---|
257 |
|
---|
258 | GC_ASSERT(((hhdr -> hb_sz) & (HBLKSIZE-1)) == 0);
|
---|
259 | # ifndef USE_MUNMAP
|
---|
260 | /* We always need index to mainatin free counts. */
|
---|
261 | if (FL_UNKNOWN == n) {
|
---|
262 | index = GC_hblk_fl_from_blocks(divHBLKSZ(hhdr -> hb_sz));
|
---|
263 | } else {
|
---|
264 | index = n;
|
---|
265 | }
|
---|
266 | # endif
|
---|
267 | if (hhdr -> hb_prev == 0) {
|
---|
268 | # ifdef USE_MUNMAP
|
---|
269 | if (FL_UNKNOWN == n) {
|
---|
270 | index = GC_hblk_fl_from_blocks(divHBLKSZ(hhdr -> hb_sz));
|
---|
271 | } else {
|
---|
272 | index = n;
|
---|
273 | }
|
---|
274 | # endif
|
---|
275 | GC_ASSERT(HDR(GC_hblkfreelist[index]) == hhdr);
|
---|
276 | GC_hblkfreelist[index] = hhdr -> hb_next;
|
---|
277 | } else {
|
---|
278 | hdr *phdr;
|
---|
279 | GET_HDR(hhdr -> hb_prev, phdr);
|
---|
280 | phdr -> hb_next = hhdr -> hb_next;
|
---|
281 | }
|
---|
282 | INCR_FREE_BYTES(index, - (signed_word)(hhdr -> hb_sz));
|
---|
283 | FREE_ASSERT(GC_free_bytes[index] >= 0);
|
---|
284 | if (0 != hhdr -> hb_next) {
|
---|
285 | hdr * nhdr;
|
---|
286 | GC_ASSERT(!IS_FORWARDING_ADDR_OR_NIL(NHDR(hhdr)));
|
---|
287 | GET_HDR(hhdr -> hb_next, nhdr);
|
---|
288 | nhdr -> hb_prev = hhdr -> hb_prev;
|
---|
289 | }
|
---|
290 | }
|
---|
291 |
|
---|
292 | /*
|
---|
293 | * Return a pointer to the free block ending just before h, if any.
|
---|
294 | */
|
---|
295 | struct hblk * GC_free_block_ending_at(h)
|
---|
296 | struct hblk *h;
|
---|
297 | {
|
---|
298 | struct hblk * p = h - 1;
|
---|
299 | hdr * phdr;
|
---|
300 |
|
---|
301 | GET_HDR(p, phdr);
|
---|
302 | while (0 != phdr && IS_FORWARDING_ADDR_OR_NIL(phdr)) {
|
---|
303 | p = FORWARDED_ADDR(p,phdr);
|
---|
304 | phdr = HDR(p);
|
---|
305 | }
|
---|
306 | if (0 != phdr) {
|
---|
307 | if(HBLK_IS_FREE(phdr)) {
|
---|
308 | return p;
|
---|
309 | } else {
|
---|
310 | return 0;
|
---|
311 | }
|
---|
312 | }
|
---|
313 | p = GC_prev_block(h - 1);
|
---|
314 | if (0 != p) {
|
---|
315 | phdr = HDR(p);
|
---|
316 | if (HBLK_IS_FREE(phdr) && (ptr_t)p + phdr -> hb_sz == (ptr_t)h) {
|
---|
317 | return p;
|
---|
318 | }
|
---|
319 | }
|
---|
320 | return 0;
|
---|
321 | }
|
---|
322 |
|
---|
323 | /*
|
---|
324 | * Add hhdr to the appropriate free list.
|
---|
325 | * We maintain individual free lists sorted by address.
|
---|
326 | */
|
---|
327 | void GC_add_to_fl(h, hhdr)
|
---|
328 | struct hblk *h;
|
---|
329 | hdr * hhdr;
|
---|
330 | {
|
---|
331 | int index = GC_hblk_fl_from_blocks(divHBLKSZ(hhdr -> hb_sz));
|
---|
332 | struct hblk *second = GC_hblkfreelist[index];
|
---|
333 | hdr * second_hdr;
|
---|
334 | # ifdef GC_ASSERTIONS
|
---|
335 | struct hblk *next = (struct hblk *)((word)h + hhdr -> hb_sz);
|
---|
336 | hdr * nexthdr = HDR(next);
|
---|
337 | struct hblk *prev = GC_free_block_ending_at(h);
|
---|
338 | hdr * prevhdr = HDR(prev);
|
---|
339 | GC_ASSERT(nexthdr == 0 || !HBLK_IS_FREE(nexthdr) || !IS_MAPPED(nexthdr));
|
---|
340 | GC_ASSERT(prev == 0 || !HBLK_IS_FREE(prevhdr) || !IS_MAPPED(prevhdr));
|
---|
341 | # endif
|
---|
342 | GC_ASSERT(((hhdr -> hb_sz) & (HBLKSIZE-1)) == 0);
|
---|
343 | GC_hblkfreelist[index] = h;
|
---|
344 | INCR_FREE_BYTES(index, hhdr -> hb_sz);
|
---|
345 | FREE_ASSERT(GC_free_bytes[index] <= GC_large_free_bytes)
|
---|
346 | hhdr -> hb_next = second;
|
---|
347 | hhdr -> hb_prev = 0;
|
---|
348 | if (0 != second) {
|
---|
349 | GET_HDR(second, second_hdr);
|
---|
350 | second_hdr -> hb_prev = h;
|
---|
351 | }
|
---|
352 | GC_invalidate_map(hhdr);
|
---|
353 | }
|
---|
354 |
|
---|
355 | #ifdef USE_MUNMAP
|
---|
356 |
|
---|
357 | /* Unmap blocks that haven't been recently touched. This is the only way */
|
---|
358 | /* way blocks are ever unmapped. */
|
---|
359 | void GC_unmap_old(void)
|
---|
360 | {
|
---|
361 | struct hblk * h;
|
---|
362 | hdr * hhdr;
|
---|
363 | word sz;
|
---|
364 | unsigned short last_rec, threshold;
|
---|
365 | int i;
|
---|
366 | # define UNMAP_THRESHOLD 6
|
---|
367 |
|
---|
368 | for (i = 0; i <= N_HBLK_FLS; ++i) {
|
---|
369 | for (h = GC_hblkfreelist[i]; 0 != h; h = hhdr -> hb_next) {
|
---|
370 | hhdr = HDR(h);
|
---|
371 | if (!IS_MAPPED(hhdr)) continue;
|
---|
372 | threshold = (unsigned short)(GC_gc_no - UNMAP_THRESHOLD);
|
---|
373 | last_rec = hhdr -> hb_last_reclaimed;
|
---|
374 | if (last_rec > GC_gc_no
|
---|
375 | || last_rec < threshold && threshold < GC_gc_no
|
---|
376 | /* not recently wrapped */) {
|
---|
377 | sz = hhdr -> hb_sz;
|
---|
378 | GC_unmap((ptr_t)h, sz);
|
---|
379 | hhdr -> hb_flags |= WAS_UNMAPPED;
|
---|
380 | }
|
---|
381 | }
|
---|
382 | }
|
---|
383 | }
|
---|
384 |
|
---|
385 | /* Merge all unmapped blocks that are adjacent to other free */
|
---|
386 | /* blocks. This may involve remapping, since all blocks are either */
|
---|
387 | /* fully mapped or fully unmapped. */
|
---|
388 | void GC_merge_unmapped(void)
|
---|
389 | {
|
---|
390 | struct hblk * h, *next;
|
---|
391 | hdr * hhdr, *nexthdr;
|
---|
392 | word size, nextsize;
|
---|
393 | int i;
|
---|
394 |
|
---|
395 | for (i = 0; i <= N_HBLK_FLS; ++i) {
|
---|
396 | h = GC_hblkfreelist[i];
|
---|
397 | while (h != 0) {
|
---|
398 | GET_HDR(h, hhdr);
|
---|
399 | size = hhdr->hb_sz;
|
---|
400 | next = (struct hblk *)((word)h + size);
|
---|
401 | GET_HDR(next, nexthdr);
|
---|
402 | /* Coalesce with successor, if possible */
|
---|
403 | if (0 != nexthdr && HBLK_IS_FREE(nexthdr)) {
|
---|
404 | nextsize = nexthdr -> hb_sz;
|
---|
405 | if (IS_MAPPED(hhdr)) {
|
---|
406 | GC_ASSERT(!IS_MAPPED(nexthdr));
|
---|
407 | /* make both consistent, so that we can merge */
|
---|
408 | if (size > nextsize) {
|
---|
409 | GC_remap((ptr_t)next, nextsize);
|
---|
410 | } else {
|
---|
411 | GC_unmap((ptr_t)h, size);
|
---|
412 | hhdr -> hb_flags |= WAS_UNMAPPED;
|
---|
413 | }
|
---|
414 | } else if (IS_MAPPED(nexthdr)) {
|
---|
415 | GC_ASSERT(!IS_MAPPED(hhdr));
|
---|
416 | if (size > nextsize) {
|
---|
417 | GC_unmap((ptr_t)next, nextsize);
|
---|
418 | } else {
|
---|
419 | GC_remap((ptr_t)h, size);
|
---|
420 | hhdr -> hb_flags &= ~WAS_UNMAPPED;
|
---|
421 | }
|
---|
422 | } else {
|
---|
423 | /* Unmap any gap in the middle */
|
---|
424 | GC_unmap_gap((ptr_t)h, size, (ptr_t)next, nexthdr -> hb_sz);
|
---|
425 | }
|
---|
426 | /* If they are both unmapped, we merge, but leave unmapped. */
|
---|
427 | GC_remove_from_fl(hhdr, i);
|
---|
428 | GC_remove_from_fl(nexthdr, FL_UNKNOWN);
|
---|
429 | hhdr -> hb_sz += nexthdr -> hb_sz;
|
---|
430 | GC_remove_header(next);
|
---|
431 | GC_add_to_fl(h, hhdr);
|
---|
432 | /* Start over at beginning of list */
|
---|
433 | h = GC_hblkfreelist[i];
|
---|
434 | } else /* not mergable with successor */ {
|
---|
435 | h = hhdr -> hb_next;
|
---|
436 | }
|
---|
437 | } /* while (h != 0) ... */
|
---|
438 | } /* for ... */
|
---|
439 | }
|
---|
440 |
|
---|
441 | #endif /* USE_MUNMAP */
|
---|
442 |
|
---|
443 | /*
|
---|
444 | * Return a pointer to a block starting at h of length bytes.
|
---|
445 | * Memory for the block is mapped.
|
---|
446 | * Remove the block from its free list, and return the remainder (if any)
|
---|
447 | * to its appropriate free list.
|
---|
448 | * May fail by returning 0.
|
---|
449 | * The header for the returned block must be set up by the caller.
|
---|
450 | * If the return value is not 0, then hhdr is the header for it.
|
---|
451 | */
|
---|
452 | struct hblk * GC_get_first_part(h, hhdr, bytes, index)
|
---|
453 | struct hblk *h;
|
---|
454 | hdr * hhdr;
|
---|
455 | word bytes;
|
---|
456 | int index;
|
---|
457 | {
|
---|
458 | word total_size = hhdr -> hb_sz;
|
---|
459 | struct hblk * rest;
|
---|
460 | hdr * rest_hdr;
|
---|
461 |
|
---|
462 | GC_ASSERT((total_size & (HBLKSIZE-1)) == 0);
|
---|
463 | GC_remove_from_fl(hhdr, index);
|
---|
464 | if (total_size == bytes) return h;
|
---|
465 | rest = (struct hblk *)((word)h + bytes);
|
---|
466 | rest_hdr = GC_install_header(rest);
|
---|
467 | if (0 == rest_hdr) return(0);
|
---|
468 | rest_hdr -> hb_sz = total_size - bytes;
|
---|
469 | rest_hdr -> hb_flags = 0;
|
---|
470 | # ifdef GC_ASSERTIONS
|
---|
471 | /* Mark h not free, to avoid assertion about adjacent free blocks. */
|
---|
472 | hhdr -> hb_map = 0;
|
---|
473 | # endif
|
---|
474 | GC_add_to_fl(rest, rest_hdr);
|
---|
475 | return h;
|
---|
476 | }
|
---|
477 |
|
---|
478 | /*
|
---|
479 | * H is a free block. N points at an address inside it.
|
---|
480 | * A new header for n has already been set up. Fix up h's header
|
---|
481 | * to reflect the fact that it is being split, move it to the
|
---|
482 | * appropriate free list.
|
---|
483 | * N replaces h in the original free list.
|
---|
484 | *
|
---|
485 | * Nhdr is not completely filled in, since it is about to allocated.
|
---|
486 | * It may in fact end up on the wrong free list for its size.
|
---|
487 | * (Hence adding it to a free list is silly. But this path is hopefully
|
---|
488 | * rare enough that it doesn't matter. The code is cleaner this way.)
|
---|
489 | */
|
---|
490 | void GC_split_block(h, hhdr, n, nhdr, index)
|
---|
491 | struct hblk *h;
|
---|
492 | hdr * hhdr;
|
---|
493 | struct hblk *n;
|
---|
494 | hdr * nhdr;
|
---|
495 | int index; /* Index of free list */
|
---|
496 | {
|
---|
497 | word total_size = hhdr -> hb_sz;
|
---|
498 | word h_size = (word)n - (word)h;
|
---|
499 | struct hblk *prev = hhdr -> hb_prev;
|
---|
500 | struct hblk *next = hhdr -> hb_next;
|
---|
501 |
|
---|
502 | /* Replace h with n on its freelist */
|
---|
503 | nhdr -> hb_prev = prev;
|
---|
504 | nhdr -> hb_next = next;
|
---|
505 | nhdr -> hb_sz = total_size - h_size;
|
---|
506 | nhdr -> hb_flags = 0;
|
---|
507 | if (0 != prev) {
|
---|
508 | HDR(prev) -> hb_next = n;
|
---|
509 | } else {
|
---|
510 | GC_hblkfreelist[index] = n;
|
---|
511 | }
|
---|
512 | if (0 != next) {
|
---|
513 | HDR(next) -> hb_prev = n;
|
---|
514 | }
|
---|
515 | INCR_FREE_BYTES(index, -(signed_word)h_size);
|
---|
516 | FREE_ASSERT(GC_free_bytes[index] > 0);
|
---|
517 | # ifdef GC_ASSERTIONS
|
---|
518 | nhdr -> hb_map = 0; /* Don't fail test for consecutive */
|
---|
519 | /* free blocks in GC_add_to_fl. */
|
---|
520 | # endif
|
---|
521 | # ifdef USE_MUNMAP
|
---|
522 | hhdr -> hb_last_reclaimed = GC_gc_no;
|
---|
523 | # endif
|
---|
524 | hhdr -> hb_sz = h_size;
|
---|
525 | GC_add_to_fl(h, hhdr);
|
---|
526 | GC_invalidate_map(nhdr);
|
---|
527 | }
|
---|
528 |
|
---|
529 | struct hblk * GC_allochblk_nth();
|
---|
530 |
|
---|
531 | /*
|
---|
532 | * Allocate (and return pointer to) a heap block
|
---|
533 | * for objects of size sz words, searching the nth free list.
|
---|
534 | *
|
---|
535 | * NOTE: We set obj_map field in header correctly.
|
---|
536 | * Caller is responsible for building an object freelist in block.
|
---|
537 | *
|
---|
538 | * Unlike older versions of the collectors, the client is responsible
|
---|
539 | * for clearing the block, if necessary.
|
---|
540 | */
|
---|
541 | struct hblk *
|
---|
542 | GC_allochblk(sz, kind, flags)
|
---|
543 | word sz;
|
---|
544 | int kind;
|
---|
545 | unsigned flags; /* IGNORE_OFF_PAGE or 0 */
|
---|
546 | {
|
---|
547 | word blocks = OBJ_SZ_TO_BLOCKS(sz);
|
---|
548 | int start_list = GC_hblk_fl_from_blocks(blocks);
|
---|
549 | int i;
|
---|
550 | for (i = start_list; i <= N_HBLK_FLS; ++i) {
|
---|
551 | struct hblk * result = GC_allochblk_nth(sz, kind, flags, i);
|
---|
552 | if (0 != result) {
|
---|
553 | return result;
|
---|
554 | }
|
---|
555 | }
|
---|
556 | return 0;
|
---|
557 | }
|
---|
558 | /*
|
---|
559 | * The same, but with search restricted to nth free list.
|
---|
560 | */
|
---|
561 | struct hblk *
|
---|
562 | GC_allochblk_nth(sz, kind, flags, n)
|
---|
563 | word sz;
|
---|
564 | int kind;
|
---|
565 | unsigned char flags; /* IGNORE_OFF_PAGE or 0 */
|
---|
566 | int n;
|
---|
567 | {
|
---|
568 | register struct hblk *hbp;
|
---|
569 | register hdr * hhdr; /* Header corr. to hbp */
|
---|
570 | register struct hblk *thishbp;
|
---|
571 | register hdr * thishdr; /* Header corr. to hbp */
|
---|
572 | signed_word size_needed; /* number of bytes in requested objects */
|
---|
573 | signed_word size_avail; /* bytes available in this block */
|
---|
574 |
|
---|
575 | size_needed = HBLKSIZE * OBJ_SZ_TO_BLOCKS(sz);
|
---|
576 |
|
---|
577 | /* search for a big enough block in free list */
|
---|
578 | hbp = GC_hblkfreelist[n];
|
---|
579 | for(; 0 != hbp; hbp = hhdr -> hb_next) {
|
---|
580 | GET_HDR(hbp, hhdr);
|
---|
581 | size_avail = hhdr->hb_sz;
|
---|
582 | if (size_avail < size_needed) continue;
|
---|
583 | if (!GC_use_entire_heap
|
---|
584 | && size_avail != size_needed
|
---|
585 | && USED_HEAP_SIZE >= GC_requested_heapsize
|
---|
586 | && !GC_incremental && GC_should_collect()) {
|
---|
587 | # ifdef USE_MUNMAP
|
---|
588 | continue;
|
---|
589 | # else
|
---|
590 | /* If we enough large blocks left to cover any */
|
---|
591 | /* previous request for large blocks, we go ahead */
|
---|
592 | /* and split. Assuming a steady state, that should */
|
---|
593 | /* be safe. It means that we can use the full */
|
---|
594 | /* heap if we allocate only small objects. */
|
---|
595 | if (!GC_enough_large_bytes_left(GC_large_allocd_bytes, n)) {
|
---|
596 | continue;
|
---|
597 | }
|
---|
598 | # endif /* !USE_MUNMAP */
|
---|
599 | }
|
---|
600 | /* If the next heap block is obviously better, go on. */
|
---|
601 | /* This prevents us from disassembling a single large block */
|
---|
602 | /* to get tiny blocks. */
|
---|
603 | {
|
---|
604 | signed_word next_size;
|
---|
605 |
|
---|
606 | thishbp = hhdr -> hb_next;
|
---|
607 | if (thishbp != 0) {
|
---|
608 | GET_HDR(thishbp, thishdr);
|
---|
609 | next_size = (signed_word)(thishdr -> hb_sz);
|
---|
610 | if (next_size < size_avail
|
---|
611 | && next_size >= size_needed
|
---|
612 | && !GC_is_black_listed(thishbp, (word)size_needed)) {
|
---|
613 | continue;
|
---|
614 | }
|
---|
615 | }
|
---|
616 | }
|
---|
617 | if ( !IS_UNCOLLECTABLE(kind) &&
|
---|
618 | (kind != PTRFREE || size_needed > MAX_BLACK_LIST_ALLOC)) {
|
---|
619 | struct hblk * lasthbp = hbp;
|
---|
620 | ptr_t search_end = (ptr_t)hbp + size_avail - size_needed;
|
---|
621 | signed_word orig_avail = size_avail;
|
---|
622 | signed_word eff_size_needed = ((flags & IGNORE_OFF_PAGE)?
|
---|
623 | HBLKSIZE
|
---|
624 | : size_needed);
|
---|
625 |
|
---|
626 |
|
---|
627 | while ((ptr_t)lasthbp <= search_end
|
---|
628 | && (thishbp = GC_is_black_listed(lasthbp,
|
---|
629 | (word)eff_size_needed))
|
---|
630 | != 0) {
|
---|
631 | lasthbp = thishbp;
|
---|
632 | }
|
---|
633 | size_avail -= (ptr_t)lasthbp - (ptr_t)hbp;
|
---|
634 | thishbp = lasthbp;
|
---|
635 | if (size_avail >= size_needed) {
|
---|
636 | if (thishbp != hbp &&
|
---|
637 | 0 != (thishdr = GC_install_header(thishbp))) {
|
---|
638 | /* Make sure it's mapped before we mangle it. */
|
---|
639 | # ifdef USE_MUNMAP
|
---|
640 | if (!IS_MAPPED(hhdr)) {
|
---|
641 | GC_remap((ptr_t)hbp, hhdr -> hb_sz);
|
---|
642 | hhdr -> hb_flags &= ~WAS_UNMAPPED;
|
---|
643 | }
|
---|
644 | # endif
|
---|
645 | /* Split the block at thishbp */
|
---|
646 | GC_split_block(hbp, hhdr, thishbp, thishdr, n);
|
---|
647 | /* Advance to thishbp */
|
---|
648 | hbp = thishbp;
|
---|
649 | hhdr = thishdr;
|
---|
650 | /* We must now allocate thishbp, since it may */
|
---|
651 | /* be on the wrong free list. */
|
---|
652 | }
|
---|
653 | } else if (size_needed > (signed_word)BL_LIMIT
|
---|
654 | && orig_avail - size_needed
|
---|
655 | > (signed_word)BL_LIMIT) {
|
---|
656 | /* Punt, since anything else risks unreasonable heap growth. */
|
---|
657 | if (++GC_large_alloc_warn_suppressed
|
---|
658 | >= GC_large_alloc_warn_interval) {
|
---|
659 | WARN("Repeated allocation of very large block "
|
---|
660 | "(appr. size %ld):\n"
|
---|
661 | "\tMay lead to memory leak and poor performance.\n",
|
---|
662 | size_needed);
|
---|
663 | GC_large_alloc_warn_suppressed = 0;
|
---|
664 | }
|
---|
665 | size_avail = orig_avail;
|
---|
666 | } else if (size_avail == 0 && size_needed == HBLKSIZE
|
---|
667 | && IS_MAPPED(hhdr)) {
|
---|
668 | if (!GC_find_leak) {
|
---|
669 | static unsigned count = 0;
|
---|
670 |
|
---|
671 | /* The block is completely blacklisted. We need */
|
---|
672 | /* to drop some such blocks, since otherwise we spend */
|
---|
673 | /* all our time traversing them if pointerfree */
|
---|
674 | /* blocks are unpopular. */
|
---|
675 | /* A dropped block will be reconsidered at next GC. */
|
---|
676 | if ((++count & 3) == 0) {
|
---|
677 | /* Allocate and drop the block in small chunks, to */
|
---|
678 | /* maximize the chance that we will recover some */
|
---|
679 | /* later. */
|
---|
680 | word total_size = hhdr -> hb_sz;
|
---|
681 | struct hblk * limit = hbp + divHBLKSZ(total_size);
|
---|
682 | struct hblk * h;
|
---|
683 | struct hblk * prev = hhdr -> hb_prev;
|
---|
684 |
|
---|
685 | GC_words_wasted += total_size;
|
---|
686 | GC_large_free_bytes -= total_size;
|
---|
687 | GC_remove_from_fl(hhdr, n);
|
---|
688 | for (h = hbp; h < limit; h++) {
|
---|
689 | if (h == hbp || 0 != (hhdr = GC_install_header(h))) {
|
---|
690 | (void) setup_header(
|
---|
691 | hhdr,
|
---|
692 | BYTES_TO_WORDS(HBLKSIZE),
|
---|
693 | PTRFREE, 0); /* Cant fail */
|
---|
694 | if (GC_debugging_started) {
|
---|
695 | BZERO(h, HBLKSIZE);
|
---|
696 | }
|
---|
697 | }
|
---|
698 | }
|
---|
699 | /* Restore hbp to point at free block */
|
---|
700 | hbp = prev;
|
---|
701 | if (0 == hbp) {
|
---|
702 | return GC_allochblk_nth(sz, kind, flags, n);
|
---|
703 | }
|
---|
704 | hhdr = HDR(hbp);
|
---|
705 | }
|
---|
706 | }
|
---|
707 | }
|
---|
708 | }
|
---|
709 | if( size_avail >= size_needed ) {
|
---|
710 | # ifdef USE_MUNMAP
|
---|
711 | if (!IS_MAPPED(hhdr)) {
|
---|
712 | GC_remap((ptr_t)hbp, hhdr -> hb_sz);
|
---|
713 | hhdr -> hb_flags &= ~WAS_UNMAPPED;
|
---|
714 | }
|
---|
715 | # endif
|
---|
716 | /* hbp may be on the wrong freelist; the parameter n */
|
---|
717 | /* is important. */
|
---|
718 | hbp = GC_get_first_part(hbp, hhdr, size_needed, n);
|
---|
719 | break;
|
---|
720 | }
|
---|
721 | }
|
---|
722 |
|
---|
723 | if (0 == hbp) return 0;
|
---|
724 |
|
---|
725 | /* Add it to map of valid blocks */
|
---|
726 | if (!GC_install_counts(hbp, (word)size_needed)) return(0);
|
---|
727 | /* This leaks memory under very rare conditions. */
|
---|
728 |
|
---|
729 | /* Set up header */
|
---|
730 | if (!setup_header(hhdr, sz, kind, flags)) {
|
---|
731 | GC_remove_counts(hbp, (word)size_needed);
|
---|
732 | return(0); /* ditto */
|
---|
733 | }
|
---|
734 |
|
---|
735 | /* Notify virtual dirty bit implementation that we are about to write. */
|
---|
736 | /* Ensure that pointerfree objects are not protected if it's avoidable. */
|
---|
737 | GC_remove_protection(hbp, divHBLKSZ(size_needed),
|
---|
738 | (hhdr -> hb_descr == 0) /* pointer-free */);
|
---|
739 |
|
---|
740 | /* We just successfully allocated a block. Restart count of */
|
---|
741 | /* consecutive failures. */
|
---|
742 | {
|
---|
743 | extern unsigned GC_fail_count;
|
---|
744 |
|
---|
745 | GC_fail_count = 0;
|
---|
746 | }
|
---|
747 |
|
---|
748 | GC_large_free_bytes -= size_needed;
|
---|
749 |
|
---|
750 | GC_ASSERT(IS_MAPPED(hhdr));
|
---|
751 | return( hbp );
|
---|
752 | }
|
---|
753 |
|
---|
754 | struct hblk * GC_freehblk_ptr = 0; /* Search position hint for GC_freehblk */
|
---|
755 |
|
---|
756 | /*
|
---|
757 | * Free a heap block.
|
---|
758 | *
|
---|
759 | * Coalesce the block with its neighbors if possible.
|
---|
760 | *
|
---|
761 | * All mark words are assumed to be cleared.
|
---|
762 | */
|
---|
763 | void
|
---|
764 | GC_freehblk(hbp)
|
---|
765 | struct hblk *hbp;
|
---|
766 | {
|
---|
767 | struct hblk *next, *prev;
|
---|
768 | hdr *hhdr, *prevhdr, *nexthdr;
|
---|
769 | signed_word size;
|
---|
770 |
|
---|
771 |
|
---|
772 | GET_HDR(hbp, hhdr);
|
---|
773 | size = hhdr->hb_sz;
|
---|
774 | size = HBLKSIZE * OBJ_SZ_TO_BLOCKS(size);
|
---|
775 | GC_remove_counts(hbp, (word)size);
|
---|
776 | hhdr->hb_sz = size;
|
---|
777 |
|
---|
778 | /* Check for duplicate deallocation in the easy case */
|
---|
779 | if (HBLK_IS_FREE(hhdr)) {
|
---|
780 | GC_printf1("Duplicate large block deallocation of 0x%lx\n",
|
---|
781 | (unsigned long) hbp);
|
---|
782 | ABORT("Duplicate large block deallocation");
|
---|
783 | }
|
---|
784 |
|
---|
785 | GC_ASSERT(IS_MAPPED(hhdr));
|
---|
786 | GC_invalidate_map(hhdr);
|
---|
787 | next = (struct hblk *)((word)hbp + size);
|
---|
788 | GET_HDR(next, nexthdr);
|
---|
789 | prev = GC_free_block_ending_at(hbp);
|
---|
790 | /* Coalesce with successor, if possible */
|
---|
791 | if(0 != nexthdr && HBLK_IS_FREE(nexthdr) && IS_MAPPED(nexthdr)) {
|
---|
792 | GC_remove_from_fl(nexthdr, FL_UNKNOWN);
|
---|
793 | hhdr -> hb_sz += nexthdr -> hb_sz;
|
---|
794 | GC_remove_header(next);
|
---|
795 | }
|
---|
796 | /* Coalesce with predecessor, if possible. */
|
---|
797 | if (0 != prev) {
|
---|
798 | prevhdr = HDR(prev);
|
---|
799 | if (IS_MAPPED(prevhdr)) {
|
---|
800 | GC_remove_from_fl(prevhdr, FL_UNKNOWN);
|
---|
801 | prevhdr -> hb_sz += hhdr -> hb_sz;
|
---|
802 | GC_remove_header(hbp);
|
---|
803 | hbp = prev;
|
---|
804 | hhdr = prevhdr;
|
---|
805 | }
|
---|
806 | }
|
---|
807 |
|
---|
808 | GC_large_free_bytes += size;
|
---|
809 | GC_add_to_fl(hbp, hhdr);
|
---|
810 | }
|
---|
811 |
|
---|