Changeset 388 for python/vendor/current/Modules/gcmodule.c
- Timestamp:
- Mar 19, 2014, 11:11:30 AM (11 years ago)
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
python/vendor/current/Modules/gcmodule.c
r2 r388 20 20 21 21 #include "Python.h" 22 #include "frameobject.h" 22 #include "frameobject.h" /* for PyFrame_ClearFreeList */ 23 23 24 24 /* Get an object's GC head */ … … 31 31 32 32 struct gc_generation { 33 34 35 36 33 PyGC_Head head; 34 int threshold; /* collection threshold */ 35 int count; /* count of allocations or collections of younger 36 generations */ 37 37 }; 38 38 … … 42 42 /* linked lists of container objects */ 43 43 static struct gc_generation generations[NUM_GENERATIONS] = { 44 /* PyGC_Head, threshold,count */45 {{{GEN_HEAD(0), GEN_HEAD(0), 0}}, 700,0},46 {{{GEN_HEAD(1), GEN_HEAD(1), 0}}, 10,0},47 {{{GEN_HEAD(2), GEN_HEAD(2), 0}}, 10,0},44 /* PyGC_Head, threshold, count */ 45 {{{GEN_HEAD(0), GEN_HEAD(0), 0}}, 700, 0}, 46 {{{GEN_HEAD(1), GEN_HEAD(1), 0}}, 10, 0}, 47 {{{GEN_HEAD(2), GEN_HEAD(2), 0}}, 10, 0}, 48 48 }; 49 49 … … 64 64 static PyObject *delstr = NULL; 65 65 66 /* This is the number of objects who survived the last full collection. It 67 approximates the number of long lived objects tracked by the GC. 68 69 (by "full collection", we mean a collection of the oldest generation). 70 */ 71 static Py_ssize_t long_lived_total = 0; 72 73 /* This is the number of objects who survived all "non-full" collections, 74 and are awaiting to undergo a full collection for the first time. 75 76 */ 77 static Py_ssize_t long_lived_pending = 0; 78 79 /* 80 NOTE: about the counting of long-lived objects. 81 82 To limit the cost of garbage collection, there are two strategies; 83 - make each collection faster, e.g. by scanning fewer objects 84 - do less collections 85 This heuristic is about the latter strategy. 86 87 In addition to the various configurable thresholds, we only trigger a 88 full collection if the ratio 89 long_lived_pending / long_lived_total 90 is above a given value (hardwired to 25%). 91 92 The reason is that, while "non-full" collections (i.e., collections of 93 the young and middle generations) will always examine roughly the same 94 number of objects -- determined by the aforementioned thresholds --, 95 the cost of a full collection is proportional to the total number of 96 long-lived objects, which is virtually unbounded. 97 98 Indeed, it has been remarked that doing a full collection every 99 <constant number> of object creations entails a dramatic performance 100 degradation in workloads which consist in creating and storing lots of 101 long-lived objects (e.g. building a large list of GC-tracked objects would 102 show quadratic performance, instead of linear as expected: see issue #4074). 103 104 Using the above ratio, instead, yields amortized linear performance in 105 the total number of objects (the effect of which can be summarized 106 thusly: "each full garbage collection is more and more costly as the 107 number of objects grows, but we do fewer and fewer of them"). 108 109 This heuristic was suggested by Martin von Löwis on python-dev in 110 June 2008. His original analysis and proposal can be found at: 111 http://mail.python.org/pipermail/python-dev/2008-June/080579.html 112 */ 113 114 /* 115 NOTE: about untracking of mutable objects. 116 117 Certain types of container cannot participate in a reference cycle, and 118 so do not need to be tracked by the garbage collector. Untracking these 119 objects reduces the cost of garbage collections. However, determining 120 which objects may be untracked is not free, and the costs must be 121 weighed against the benefits for garbage collection. 122 123 There are two possible strategies for when to untrack a container: 124 125 i) When the container is created. 126 ii) When the container is examined by the garbage collector. 127 128 Tuples containing only immutable objects (integers, strings etc, and 129 recursively, tuples of immutable objects) do not need to be tracked. 130 The interpreter creates a large number of tuples, many of which will 131 not survive until garbage collection. It is therefore not worthwhile 132 to untrack eligible tuples at creation time. 133 134 Instead, all tuples except the empty tuple are tracked when created. 135 During garbage collection it is determined whether any surviving tuples 136 can be untracked. A tuple can be untracked if all of its contents are 137 already not tracked. Tuples are examined for untracking in all garbage 138 collection cycles. It may take more than one cycle to untrack a tuple. 139 140 Dictionaries containing only immutable objects also do not need to be 141 tracked. Dictionaries are untracked when created. If a tracked item is 142 inserted into a dictionary (either as a key or value), the dictionary 143 becomes tracked. During a full garbage collection (all generations), 144 the collector will untrack any dictionaries whose contents are not 145 tracked. 146 147 The module provides the python function is_tracked(obj), which returns 148 the CURRENT tracking status of the object. Subsequent garbage 149 collections may change the tracking status of the object. 150 151 Untracking of certain containers was introduced in issue #4688, and 152 the algorithm was refined in response to issue #14775. 153 */ 154 66 155 /* set for debugging information */ 67 #define DEBUG_STATS 68 #define DEBUG_COLLECTABLE 69 #define DEBUG_UNCOLLECTABLE 70 #define DEBUG_INSTANCES 71 #define DEBUG_OBJECTS 72 #define DEBUG_SAVEALL 73 #define DEBUG_LEAK 74 75 76 77 156 #define DEBUG_STATS (1<<0) /* print collection statistics */ 157 #define DEBUG_COLLECTABLE (1<<1) /* print collectable objects */ 158 #define DEBUG_UNCOLLECTABLE (1<<2) /* print uncollectable objects */ 159 #define DEBUG_INSTANCES (1<<3) /* print instances */ 160 #define DEBUG_OBJECTS (1<<4) /* print other objects */ 161 #define DEBUG_SAVEALL (1<<5) /* save all garbage in gc.garbage */ 162 #define DEBUG_LEAK DEBUG_COLLECTABLE | \ 163 DEBUG_UNCOLLECTABLE | \ 164 DEBUG_INSTANCES | \ 165 DEBUG_OBJECTS | \ 166 DEBUG_SAVEALL 78 167 static int debug; 79 168 static PyObject *tmod = NULL; … … 118 207 ---------------------------------------------------------------------------- 119 208 */ 120 #define GC_UNTRACKED 121 #define GC_REACHABLE 122 #define GC_TENTATIVELY_UNREACHABLE 209 #define GC_UNTRACKED _PyGC_REFS_UNTRACKED 210 #define GC_REACHABLE _PyGC_REFS_REACHABLE 211 #define GC_TENTATIVELY_UNREACHABLE _PyGC_REFS_TENTATIVELY_UNREACHABLE 123 212 124 213 #define IS_TRACKED(o) ((AS_GC(o))->gc.gc_refs != GC_UNTRACKED) 125 214 #define IS_REACHABLE(o) ((AS_GC(o))->gc.gc_refs == GC_REACHABLE) 126 215 #define IS_TENTATIVELY_UNREACHABLE(o) ( \ 127 216 (AS_GC(o))->gc.gc_refs == GC_TENTATIVELY_UNREACHABLE) 128 217 129 218 /*** list functions ***/ … … 132 221 gc_list_init(PyGC_Head *list) 133 222 { 134 135 223 list->gc.gc_prev = list; 224 list->gc.gc_next = list; 136 225 } 137 226 … … 139 228 gc_list_is_empty(PyGC_Head *list) 140 229 { 141 230 return (list->gc.gc_next == list); 142 231 } 143 232 … … 148 237 gc_list_append(PyGC_Head *node, PyGC_Head *list) 149 238 { 150 151 152 153 239 node->gc.gc_next = list; 240 node->gc.gc_prev = list->gc.gc_prev; 241 node->gc.gc_prev->gc.gc_next = node; 242 list->gc.gc_prev = node; 154 243 } 155 244 #endif … … 159 248 gc_list_remove(PyGC_Head *node) 160 249 { 161 162 163 250 node->gc.gc_prev->gc.gc_next = node->gc.gc_next; 251 node->gc.gc_next->gc.gc_prev = node->gc.gc_prev; 252 node->gc.gc_next = NULL; /* object is not currently tracked */ 164 253 } 165 254 … … 171 260 gc_list_move(PyGC_Head *node, PyGC_Head *list) 172 261 { 173 174 175 176 177 178 179 180 181 182 262 PyGC_Head *new_prev; 263 PyGC_Head *current_prev = node->gc.gc_prev; 264 PyGC_Head *current_next = node->gc.gc_next; 265 /* Unlink from current list. */ 266 current_prev->gc.gc_next = current_next; 267 current_next->gc.gc_prev = current_prev; 268 /* Relink at end of new list. */ 269 new_prev = node->gc.gc_prev = list->gc.gc_prev; 270 new_prev->gc.gc_next = list->gc.gc_prev = node; 271 node->gc.gc_next = list; 183 272 } 184 273 … … 187 276 gc_list_merge(PyGC_Head *from, PyGC_Head *to) 188 277 { 189 190 191 192 193 194 195 196 197 198 278 PyGC_Head *tail; 279 assert(from != to); 280 if (!gc_list_is_empty(from)) { 281 tail = to->gc.gc_prev; 282 tail->gc.gc_next = from->gc.gc_next; 283 tail->gc.gc_next->gc.gc_prev = tail; 284 to->gc.gc_prev = from->gc.gc_prev; 285 to->gc.gc_prev->gc.gc_next = to; 286 } 287 gc_list_init(from); 199 288 } 200 289 … … 202 291 gc_list_size(PyGC_Head *list) 203 292 { 204 205 206 207 208 209 293 PyGC_Head *gc; 294 Py_ssize_t n = 0; 295 for (gc = list->gc.gc_next; gc != list; gc = gc->gc.gc_next) { 296 n++; 297 } 298 return n; 210 299 } 211 300 … … 216 305 append_objects(PyObject *py_list, PyGC_Head *gc_list) 217 306 { 218 219 220 221 222 223 224 225 226 227 307 PyGC_Head *gc; 308 for (gc = gc_list->gc.gc_next; gc != gc_list; gc = gc->gc.gc_next) { 309 PyObject *op = FROM_GC(gc); 310 if (op != py_list) { 311 if (PyList_Append(py_list, op)) { 312 return -1; /* exception */ 313 } 314 } 315 } 316 return 0; 228 317 } 229 318 … … 238 327 update_refs(PyGC_Head *containers) 239 328 { 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 329 PyGC_Head *gc = containers->gc.gc_next; 330 for (; gc != containers; gc = gc->gc.gc_next) { 331 assert(gc->gc.gc_refs == GC_REACHABLE); 332 gc->gc.gc_refs = Py_REFCNT(FROM_GC(gc)); 333 /* Python's cyclic gc should never see an incoming refcount 334 * of 0: if something decref'ed to 0, it should have been 335 * deallocated immediately at that time. 336 * Possible cause (if the assert triggers): a tp_dealloc 337 * routine left a gc-aware object tracked during its teardown 338 * phase, and did something-- or allowed something to happen -- 339 * that called back into Python. gc can trigger then, and may 340 * see the still-tracked dying object. Before this assert 341 * was added, such mistakes went on to allow gc to try to 342 * delete the object again. In a debug build, that caused 343 * a mysterious segfault, when _Py_ForgetReference tried 344 * to remove the object from the doubly-linked list of all 345 * objects a second time. In a release build, an actual 346 * double deallocation occurred, which leads to corruption 347 * of the allocator's internal bookkeeping pointers. That's 348 * so serious that maybe this should be a release-build 349 * check instead of an assert? 350 */ 351 assert(gc->gc.gc_refs != 0); 352 } 264 353 } 265 354 … … 268 357 visit_decref(PyObject *op, void *data) 269 358 { 270 271 272 273 274 275 276 277 278 279 280 281 359 assert(op != NULL); 360 if (PyObject_IS_GC(op)) { 361 PyGC_Head *gc = AS_GC(op); 362 /* We're only interested in gc_refs for objects in the 363 * generation being collected, which can be recognized 364 * because only they have positive gc_refs. 365 */ 366 assert(gc->gc.gc_refs != 0); /* else refcount was too small */ 367 if (gc->gc.gc_refs > 0) 368 gc->gc.gc_refs--; 369 } 370 return 0; 282 371 } 283 372 … … 290 379 subtract_refs(PyGC_Head *containers) 291 380 { 292 293 294 295 296 297 298 299 381 traverseproc traverse; 382 PyGC_Head *gc = containers->gc.gc_next; 383 for (; gc != containers; gc=gc->gc.gc_next) { 384 traverse = Py_TYPE(FROM_GC(gc))->tp_traverse; 385 (void) traverse(FROM_GC(gc), 386 (visitproc)visit_decref, 387 NULL); 388 } 300 389 } 301 390 … … 304 393 visit_reachable(PyObject *op, PyGC_Head *reachable) 305 394 { 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 395 if (PyObject_IS_GC(op)) { 396 PyGC_Head *gc = AS_GC(op); 397 const Py_ssize_t gc_refs = gc->gc.gc_refs; 398 399 if (gc_refs == 0) { 400 /* This is in move_unreachable's 'young' list, but 401 * the traversal hasn't yet gotten to it. All 402 * we need to do is tell move_unreachable that it's 403 * reachable. 404 */ 405 gc->gc.gc_refs = 1; 406 } 407 else if (gc_refs == GC_TENTATIVELY_UNREACHABLE) { 408 /* This had gc_refs = 0 when move_unreachable got 409 * to it, but turns out it's reachable after all. 410 * Move it back to move_unreachable's 'young' list, 411 * and move_unreachable will eventually get to it 412 * again. 413 */ 414 gc_list_move(gc, reachable); 415 gc->gc.gc_refs = 1; 416 } 417 /* Else there's nothing to do. 418 * If gc_refs > 0, it must be in move_unreachable's 'young' 419 * list, and move_unreachable will eventually get to it. 420 * If gc_refs == GC_REACHABLE, it's either in some other 421 * generation so we don't care about it, or move_unreachable 422 * already dealt with it. 423 * If gc_refs == GC_UNTRACKED, it must be ignored. 424 */ 425 else { 426 assert(gc_refs > 0 427 || gc_refs == GC_REACHABLE 428 || gc_refs == GC_UNTRACKED); 429 } 430 } 431 return 0; 343 432 } 344 433 … … 354 443 move_unreachable(PyGC_Head *young, PyGC_Head *unreachable) 355 444 { 356 PyGC_Head *gc = young->gc.gc_next; 357 358 /* Invariants: all objects "to the left" of us in young have gc_refs 359 * = GC_REACHABLE, and are indeed reachable (directly or indirectly) 360 * from outside the young list as it was at entry. All other objects 361 * from the original young "to the left" of us are in unreachable now, 362 * and have gc_refs = GC_TENTATIVELY_UNREACHABLE. All objects to the 363 * left of us in 'young' now have been scanned, and no objects here 364 * or to the right have been scanned yet. 365 */ 366 367 while (gc != young) { 368 PyGC_Head *next; 369 370 if (gc->gc.gc_refs) { 371 /* gc is definitely reachable from outside the 372 * original 'young'. Mark it as such, and traverse 373 * its pointers to find any other objects that may 374 * be directly reachable from it. Note that the 375 * call to tp_traverse may append objects to young, 376 * so we have to wait until it returns to determine 377 * the next object to visit. 378 */ 379 PyObject *op = FROM_GC(gc); 380 traverseproc traverse = Py_TYPE(op)->tp_traverse; 381 assert(gc->gc.gc_refs > 0); 382 gc->gc.gc_refs = GC_REACHABLE; 383 (void) traverse(op, 384 (visitproc)visit_reachable, 385 (void *)young); 386 next = gc->gc.gc_next; 387 } 388 else { 389 /* This *may* be unreachable. To make progress, 390 * assume it is. gc isn't directly reachable from 391 * any object we've already traversed, but may be 392 * reachable from an object we haven't gotten to yet. 393 * visit_reachable will eventually move gc back into 394 * young if that's so, and we'll see it again. 395 */ 396 next = gc->gc.gc_next; 397 gc_list_move(gc, unreachable); 398 gc->gc.gc_refs = GC_TENTATIVELY_UNREACHABLE; 399 } 400 gc = next; 401 } 445 PyGC_Head *gc = young->gc.gc_next; 446 447 /* Invariants: all objects "to the left" of us in young have gc_refs 448 * = GC_REACHABLE, and are indeed reachable (directly or indirectly) 449 * from outside the young list as it was at entry. All other objects 450 * from the original young "to the left" of us are in unreachable now, 451 * and have gc_refs = GC_TENTATIVELY_UNREACHABLE. All objects to the 452 * left of us in 'young' now have been scanned, and no objects here 453 * or to the right have been scanned yet. 454 */ 455 456 while (gc != young) { 457 PyGC_Head *next; 458 459 if (gc->gc.gc_refs) { 460 /* gc is definitely reachable from outside the 461 * original 'young'. Mark it as such, and traverse 462 * its pointers to find any other objects that may 463 * be directly reachable from it. Note that the 464 * call to tp_traverse may append objects to young, 465 * so we have to wait until it returns to determine 466 * the next object to visit. 467 */ 468 PyObject *op = FROM_GC(gc); 469 traverseproc traverse = Py_TYPE(op)->tp_traverse; 470 assert(gc->gc.gc_refs > 0); 471 gc->gc.gc_refs = GC_REACHABLE; 472 (void) traverse(op, 473 (visitproc)visit_reachable, 474 (void *)young); 475 next = gc->gc.gc_next; 476 if (PyTuple_CheckExact(op)) { 477 _PyTuple_MaybeUntrack(op); 478 } 479 } 480 else { 481 /* This *may* be unreachable. To make progress, 482 * assume it is. gc isn't directly reachable from 483 * any object we've already traversed, but may be 484 * reachable from an object we haven't gotten to yet. 485 * visit_reachable will eventually move gc back into 486 * young if that's so, and we'll see it again. 487 */ 488 next = gc->gc.gc_next; 489 gc_list_move(gc, unreachable); 490 gc->gc.gc_refs = GC_TENTATIVELY_UNREACHABLE; 491 } 492 gc = next; 493 } 402 494 } 403 495 … … 412 504 has_finalizer(PyObject *op) 413 505 { 414 if (PyInstance_Check(op)) { 415 assert(delstr != NULL); 416 return _PyInstance_Lookup(op, delstr) != NULL; 417 } 418 else if (PyType_HasFeature(op->ob_type, Py_TPFLAGS_HEAPTYPE)) 419 return op->ob_type->tp_del != NULL; 420 else if (PyGen_CheckExact(op)) 421 return PyGen_NeedsFinalizing((PyGenObject *)op); 422 else 423 return 0; 506 if (PyInstance_Check(op)) { 507 assert(delstr != NULL); 508 return _PyInstance_Lookup(op, delstr) != NULL; 509 } 510 else if (PyType_HasFeature(op->ob_type, Py_TPFLAGS_HEAPTYPE)) 511 return op->ob_type->tp_del != NULL; 512 else if (PyGen_CheckExact(op)) 513 return PyGen_NeedsFinalizing((PyGenObject *)op); 514 else 515 return 0; 516 } 517 518 /* Try to untrack all currently tracked dictionaries */ 519 static void 520 untrack_dicts(PyGC_Head *head) 521 { 522 PyGC_Head *next, *gc = head->gc.gc_next; 523 while (gc != head) { 524 PyObject *op = FROM_GC(gc); 525 next = gc->gc.gc_next; 526 if (PyDict_CheckExact(op)) 527 _PyDict_MaybeUntrack(op); 528 gc = next; 529 } 424 530 } 425 531 … … 431 537 move_finalizers(PyGC_Head *unreachable, PyGC_Head *finalizers) 432 538 { 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 539 PyGC_Head *gc; 540 PyGC_Head *next; 541 542 /* March over unreachable. Move objects with finalizers into 543 * `finalizers`. 544 */ 545 for (gc = unreachable->gc.gc_next; gc != unreachable; gc = next) { 546 PyObject *op = FROM_GC(gc); 547 548 assert(IS_TENTATIVELY_UNREACHABLE(op)); 549 next = gc->gc.gc_next; 550 551 if (has_finalizer(op)) { 552 gc_list_move(gc, finalizers); 553 gc->gc.gc_refs = GC_REACHABLE; 554 } 555 } 450 556 } 451 557 … … 454 560 visit_move(PyObject *op, PyGC_Head *tolist) 455 561 { 456 457 458 459 460 461 462 463 562 if (PyObject_IS_GC(op)) { 563 if (IS_TENTATIVELY_UNREACHABLE(op)) { 564 PyGC_Head *gc = AS_GC(op); 565 gc_list_move(gc, tolist); 566 gc->gc.gc_refs = GC_REACHABLE; 567 } 568 } 569 return 0; 464 570 } 465 571 … … 470 576 move_finalizer_reachable(PyGC_Head *finalizers) 471 577 { 472 473 474 475 476 477 478 479 480 578 traverseproc traverse; 579 PyGC_Head *gc = finalizers->gc.gc_next; 580 for (; gc != finalizers; gc = gc->gc.gc_next) { 581 /* Note that the finalizers list may grow during this. */ 582 traverse = Py_TYPE(FROM_GC(gc))->tp_traverse; 583 (void) traverse(FROM_GC(gc), 584 (visitproc)visit_move, 585 (void *)finalizers); 586 } 481 587 } 482 588 … … 495 601 handle_weakrefs(PyGC_Head *unreachable, PyGC_Head *old) 496 602 { 497 498 PyObject *op;/* generally FROM_GC(gc) */499 PyWeakReference *wr;/* generally a cast of op */500 PyGC_Head wrcb_to_call;/* weakrefs with callbacks to call */501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 PyGC_Head *wrasgc;/* AS_GC(wr) */534 535 536 537 538 539 540 541 542 543 continue;/* no callback */544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 603 PyGC_Head *gc; 604 PyObject *op; /* generally FROM_GC(gc) */ 605 PyWeakReference *wr; /* generally a cast of op */ 606 PyGC_Head wrcb_to_call; /* weakrefs with callbacks to call */ 607 PyGC_Head *next; 608 int num_freed = 0; 609 610 gc_list_init(&wrcb_to_call); 611 612 /* Clear all weakrefs to the objects in unreachable. If such a weakref 613 * also has a callback, move it into `wrcb_to_call` if the callback 614 * needs to be invoked. Note that we cannot invoke any callbacks until 615 * all weakrefs to unreachable objects are cleared, lest the callback 616 * resurrect an unreachable object via a still-active weakref. We 617 * make another pass over wrcb_to_call, invoking callbacks, after this 618 * pass completes. 619 */ 620 for (gc = unreachable->gc.gc_next; gc != unreachable; gc = next) { 621 PyWeakReference **wrlist; 622 623 op = FROM_GC(gc); 624 assert(IS_TENTATIVELY_UNREACHABLE(op)); 625 next = gc->gc.gc_next; 626 627 if (! PyType_SUPPORTS_WEAKREFS(Py_TYPE(op))) 628 continue; 629 630 /* It supports weakrefs. Does it have any? */ 631 wrlist = (PyWeakReference **) 632 PyObject_GET_WEAKREFS_LISTPTR(op); 633 634 /* `op` may have some weakrefs. March over the list, clear 635 * all the weakrefs, and move the weakrefs with callbacks 636 * that must be called into wrcb_to_call. 637 */ 638 for (wr = *wrlist; wr != NULL; wr = *wrlist) { 639 PyGC_Head *wrasgc; /* AS_GC(wr) */ 640 641 /* _PyWeakref_ClearRef clears the weakref but leaves 642 * the callback pointer intact. Obscure: it also 643 * changes *wrlist. 644 */ 645 assert(wr->wr_object == op); 646 _PyWeakref_ClearRef(wr); 647 assert(wr->wr_object == Py_None); 648 if (wr->wr_callback == NULL) 649 continue; /* no callback */ 650 651 /* Headache time. `op` is going away, and is weakly referenced by 652 * `wr`, which has a callback. Should the callback be invoked? If wr 653 * is also trash, no: 654 * 655 * 1. There's no need to call it. The object and the weakref are 656 * both going away, so it's legitimate to pretend the weakref is 657 * going away first. The user has to ensure a weakref outlives its 658 * referent if they want a guarantee that the wr callback will get 659 * invoked. 660 * 661 * 2. It may be catastrophic to call it. If the callback is also in 662 * cyclic trash (CT), then although the CT is unreachable from 663 * outside the current generation, CT may be reachable from the 664 * callback. Then the callback could resurrect insane objects. 665 * 666 * Since the callback is never needed and may be unsafe in this case, 667 * wr is simply left in the unreachable set. Note that because we 668 * already called _PyWeakref_ClearRef(wr), its callback will never 669 * trigger. 670 * 671 * OTOH, if wr isn't part of CT, we should invoke the callback: the 672 * weakref outlived the trash. Note that since wr isn't CT in this 673 * case, its callback can't be CT either -- wr acted as an external 674 * root to this generation, and therefore its callback did too. So 675 * nothing in CT is reachable from the callback either, so it's hard 676 * to imagine how calling it later could create a problem for us. wr 677 * is moved to wrcb_to_call in this case. 678 */ 679 if (IS_TENTATIVELY_UNREACHABLE(wr)) 680 continue; 681 assert(IS_REACHABLE(wr)); 682 683 /* Create a new reference so that wr can't go away 684 * before we can process it again. 685 */ 686 Py_INCREF(wr); 687 688 /* Move wr to wrcb_to_call, for the next pass. */ 689 wrasgc = AS_GC(wr); 690 assert(wrasgc != next); /* wrasgc is reachable, but 691 next isn't, so they can't 692 be the same */ 693 gc_list_move(wrasgc, &wrcb_to_call); 694 } 695 } 696 697 /* Invoke the callbacks we decided to honor. It's safe to invoke them 698 * because they can't reference unreachable objects. 699 */ 700 while (! gc_list_is_empty(&wrcb_to_call)) { 701 PyObject *temp; 702 PyObject *callback; 703 704 gc = wrcb_to_call.gc.gc_next; 705 op = FROM_GC(gc); 706 assert(IS_REACHABLE(op)); 707 assert(PyWeakref_Check(op)); 708 wr = (PyWeakReference *)op; 709 callback = wr->wr_callback; 710 assert(callback != NULL); 711 712 /* copy-paste of weakrefobject.c's handle_callback() */ 713 temp = PyObject_CallFunctionObjArgs(callback, wr, NULL); 714 if (temp == NULL) 715 PyErr_WriteUnraisable(callback); 716 else 717 Py_DECREF(temp); 718 719 /* Give up the reference we created in the first pass. When 720 * op's refcount hits 0 (which it may or may not do right now), 721 * op's tp_dealloc will decref op->wr_callback too. Note 722 * that the refcount probably will hit 0 now, and because this 723 * weakref was reachable to begin with, gc didn't already 724 * add it to its count of freed objects. Example: a reachable 725 * weak value dict maps some key to this reachable weakref. 726 * The callback removes this key->weakref mapping from the 727 * dict, leaving no other references to the weakref (excepting 728 * ours). 729 */ 730 Py_DECREF(op); 731 if (wrcb_to_call.gc.gc_next == gc) { 732 /* object is still alive -- move it */ 733 gc_list_move(gc, old); 734 } 735 else 736 ++num_freed; 737 } 738 739 return num_freed; 634 740 } 635 741 … … 637 743 debug_instance(char *msg, PyInstanceObject *inst) 638 744 { 639 640 641 642 643 644 645 646 647 745 char *cname; 746 /* simple version of instance_repr */ 747 PyObject *classname = inst->in_class->cl_name; 748 if (classname != NULL && PyString_Check(classname)) 749 cname = PyString_AsString(classname); 750 else 751 cname = "?"; 752 PySys_WriteStderr("gc: %.100s <%.100s instance at %p>\n", 753 msg, cname, inst); 648 754 } 649 755 … … 651 757 debug_cycle(char *msg, PyObject *op) 652 758 { 653 654 655 656 657 658 659 759 if ((debug & DEBUG_INSTANCES) && PyInstance_Check(op)) { 760 debug_instance(msg, (PyInstanceObject *)op); 761 } 762 else if (debug & DEBUG_OBJECTS) { 763 PySys_WriteStderr("gc: %.100s <%.100s %p>\n", 764 msg, Py_TYPE(op)->tp_name, op); 765 } 660 766 } 661 767 … … 672 778 handle_finalizers(PyGC_Head *finalizers, PyGC_Head *old) 673 779 { 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 } 693 694 /* Break reference cycles by clearing the containers involved. 780 PyGC_Head *gc = finalizers->gc.gc_next; 781 782 if (garbage == NULL) { 783 garbage = PyList_New(0); 784 if (garbage == NULL) 785 Py_FatalError("gc couldn't create gc.garbage list"); 786 } 787 for (; gc != finalizers; gc = gc->gc.gc_next) { 788 PyObject *op = FROM_GC(gc); 789 790 if ((debug & DEBUG_SAVEALL) || has_finalizer(op)) { 791 if (PyList_Append(garbage, op) < 0) 792 return -1; 793 } 794 } 795 796 gc_list_merge(finalizers, old); 797 return 0; 798 } 799 800 /* Break reference cycles by clearing the containers involved. This is 695 801 * tricky business as the lists can be changing and we don't know which 696 802 * objects may be freed. It is possible I screwed something up here. … … 699 805 delete_garbage(PyGC_Head *collectable, PyGC_Head *old) 700 806 { 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 807 inquiry clear; 808 809 while (!gc_list_is_empty(collectable)) { 810 PyGC_Head *gc = collectable->gc.gc_next; 811 PyObject *op = FROM_GC(gc); 812 813 assert(IS_TENTATIVELY_UNREACHABLE(op)); 814 if (debug & DEBUG_SAVEALL) { 815 PyList_Append(garbage, op); 816 } 817 else { 818 if ((clear = Py_TYPE(op)->tp_clear) != NULL) { 819 Py_INCREF(op); 820 clear(op); 821 Py_DECREF(op); 822 } 823 } 824 if (collectable->gc.gc_next == gc) { 825 /* object is still alive, move it, it may die later */ 826 gc_list_move(gc, old); 827 gc->gc.gc_refs = GC_REACHABLE; 828 } 829 } 724 830 } 725 831 … … 732 838 clear_freelists(void) 733 839 { 734 (void)PyMethod_ClearFreeList(); 735 (void)PyFrame_ClearFreeList(); 736 (void)PyCFunction_ClearFreeList(); 737 (void)PyTuple_ClearFreeList(); 738 (void)PyUnicode_ClearFreeList(); 739 (void)PyInt_ClearFreeList(); 740 (void)PyFloat_ClearFreeList(); 840 (void)PyMethod_ClearFreeList(); 841 (void)PyFrame_ClearFreeList(); 842 (void)PyCFunction_ClearFreeList(); 843 (void)PyTuple_ClearFreeList(); 844 #ifdef Py_USING_UNICODE 845 (void)PyUnicode_ClearFreeList(); 846 #endif 847 (void)PyInt_ClearFreeList(); 848 (void)PyFloat_ClearFreeList(); 741 849 } 742 850 … … 744 852 get_time(void) 745 853 { 746 747 748 749 750 751 752 753 754 755 756 757 758 854 double result = 0; 855 if (tmod != NULL) { 856 PyObject *f = PyObject_CallMethod(tmod, "time", NULL); 857 if (f == NULL) { 858 PyErr_Clear(); 859 } 860 else { 861 if (PyFloat_Check(f)) 862 result = PyFloat_AsDouble(f); 863 Py_DECREF(f); 864 } 865 } 866 return result; 759 867 } 760 868 … … 764 872 collect(int generation) 765 873 { 766 int i; 767 Py_ssize_t m = 0; /* # objects collected */ 768 Py_ssize_t n = 0; /* # unreachable objects that couldn't be collected */ 769 PyGC_Head *young; /* the generation we are examining */ 770 PyGC_Head *old; /* next older generation */ 771 PyGC_Head unreachable; /* non-problematic unreachable trash */ 772 PyGC_Head finalizers; /* objects with, & reachable from, __del__ */ 773 PyGC_Head *gc; 774 double t1 = 0.0; 775 776 if (delstr == NULL) { 777 delstr = PyString_InternFromString("__del__"); 778 if (delstr == NULL) 779 Py_FatalError("gc couldn't allocate \"__del__\""); 780 } 781 782 if (debug & DEBUG_STATS) { 783 t1 = get_time(); 784 PySys_WriteStderr("gc: collecting generation %d...\n", 785 generation); 786 PySys_WriteStderr("gc: objects in each generation:"); 787 for (i = 0; i < NUM_GENERATIONS; i++) 788 PySys_WriteStderr(" %" PY_FORMAT_SIZE_T "d", 789 gc_list_size(GEN_HEAD(i))); 790 PySys_WriteStderr("\n"); 791 } 792 793 /* update collection and allocation counters */ 794 if (generation+1 < NUM_GENERATIONS) 795 generations[generation+1].count += 1; 796 for (i = 0; i <= generation; i++) 797 generations[i].count = 0; 798 799 /* merge younger generations with one we are currently collecting */ 800 for (i = 0; i < generation; i++) { 801 gc_list_merge(GEN_HEAD(i), GEN_HEAD(generation)); 802 } 803 804 /* handy references */ 805 young = GEN_HEAD(generation); 806 if (generation < NUM_GENERATIONS-1) 807 old = GEN_HEAD(generation+1); 808 else 809 old = young; 810 811 /* Using ob_refcnt and gc_refs, calculate which objects in the 812 * container set are reachable from outside the set (i.e., have a 813 * refcount greater than 0 when all the references within the 814 * set are taken into account). 815 */ 816 update_refs(young); 817 subtract_refs(young); 818 819 /* Leave everything reachable from outside young in young, and move 820 * everything else (in young) to unreachable. 821 * NOTE: This used to move the reachable objects into a reachable 822 * set instead. But most things usually turn out to be reachable, 823 * so it's more efficient to move the unreachable things. 824 */ 825 gc_list_init(&unreachable); 826 move_unreachable(young, &unreachable); 827 828 /* Move reachable objects to next generation. */ 829 if (young != old) 830 gc_list_merge(young, old); 831 832 /* All objects in unreachable are trash, but objects reachable from 833 * finalizers can't safely be deleted. Python programmers should take 834 * care not to create such things. For Python, finalizers means 835 * instance objects with __del__ methods. Weakrefs with callbacks 836 * can also call arbitrary Python code but they will be dealt with by 837 * handle_weakrefs(). 838 */ 839 gc_list_init(&finalizers); 840 move_finalizers(&unreachable, &finalizers); 841 /* finalizers contains the unreachable objects with a finalizer; 842 * unreachable objects reachable *from* those are also uncollectable, 843 * and we move those into the finalizers list too. 844 */ 845 move_finalizer_reachable(&finalizers); 846 847 /* Collect statistics on collectable objects found and print 848 * debugging information. 849 */ 850 for (gc = unreachable.gc.gc_next; gc != &unreachable; 851 gc = gc->gc.gc_next) { 852 m++; 853 if (debug & DEBUG_COLLECTABLE) { 854 debug_cycle("collectable", FROM_GC(gc)); 855 } 856 } 857 858 /* Clear weakrefs and invoke callbacks as necessary. */ 859 m += handle_weakrefs(&unreachable, old); 860 861 /* Call tp_clear on objects in the unreachable set. This will cause 862 * the reference cycles to be broken. It may also cause some objects 863 * in finalizers to be freed. 864 */ 865 delete_garbage(&unreachable, old); 866 867 /* Collect statistics on uncollectable objects found and print 868 * debugging information. */ 869 for (gc = finalizers.gc.gc_next; 870 gc != &finalizers; 871 gc = gc->gc.gc_next) { 872 n++; 873 if (debug & DEBUG_UNCOLLECTABLE) 874 debug_cycle("uncollectable", FROM_GC(gc)); 875 } 876 if (debug & DEBUG_STATS) { 877 double t2 = get_time(); 878 if (m == 0 && n == 0) 879 PySys_WriteStderr("gc: done"); 880 else 881 PySys_WriteStderr( 882 "gc: done, " 883 "%" PY_FORMAT_SIZE_T "d unreachable, " 884 "%" PY_FORMAT_SIZE_T "d uncollectable", 885 n+m, n); 886 if (t1 && t2) { 887 PySys_WriteStderr(", %.4fs elapsed", t2-t1); 888 } 889 PySys_WriteStderr(".\n"); 890 } 891 892 /* Append instances in the uncollectable set to a Python 893 * reachable list of garbage. The programmer has to deal with 894 * this if they insist on creating this type of structure. 895 */ 896 (void)handle_finalizers(&finalizers, old); 897 898 /* Clear free list only during the collection of the higest 899 * generation */ 900 if (generation == NUM_GENERATIONS-1) { 901 clear_freelists(); 902 } 903 904 if (PyErr_Occurred()) { 905 if (gc_str == NULL) 906 gc_str = PyString_FromString("garbage collection"); 907 PyErr_WriteUnraisable(gc_str); 908 Py_FatalError("unexpected exception during garbage collection"); 909 } 910 return n+m; 874 int i; 875 Py_ssize_t m = 0; /* # objects collected */ 876 Py_ssize_t n = 0; /* # unreachable objects that couldn't be collected */ 877 PyGC_Head *young; /* the generation we are examining */ 878 PyGC_Head *old; /* next older generation */ 879 PyGC_Head unreachable; /* non-problematic unreachable trash */ 880 PyGC_Head finalizers; /* objects with, & reachable from, __del__ */ 881 PyGC_Head *gc; 882 double t1 = 0.0; 883 884 if (delstr == NULL) { 885 delstr = PyString_InternFromString("__del__"); 886 if (delstr == NULL) 887 Py_FatalError("gc couldn't allocate \"__del__\""); 888 } 889 890 if (debug & DEBUG_STATS) { 891 PySys_WriteStderr("gc: collecting generation %d...\n", 892 generation); 893 PySys_WriteStderr("gc: objects in each generation:"); 894 for (i = 0; i < NUM_GENERATIONS; i++) 895 PySys_WriteStderr(" %" PY_FORMAT_SIZE_T "d", 896 gc_list_size(GEN_HEAD(i))); 897 t1 = get_time(); 898 PySys_WriteStderr("\n"); 899 } 900 901 /* update collection and allocation counters */ 902 if (generation+1 < NUM_GENERATIONS) 903 generations[generation+1].count += 1; 904 for (i = 0; i <= generation; i++) 905 generations[i].count = 0; 906 907 /* merge younger generations with one we are currently collecting */ 908 for (i = 0; i < generation; i++) { 909 gc_list_merge(GEN_HEAD(i), GEN_HEAD(generation)); 910 } 911 912 /* handy references */ 913 young = GEN_HEAD(generation); 914 if (generation < NUM_GENERATIONS-1) 915 old = GEN_HEAD(generation+1); 916 else 917 old = young; 918 919 /* Using ob_refcnt and gc_refs, calculate which objects in the 920 * container set are reachable from outside the set (i.e., have a 921 * refcount greater than 0 when all the references within the 922 * set are taken into account). 923 */ 924 update_refs(young); 925 subtract_refs(young); 926 927 /* Leave everything reachable from outside young in young, and move 928 * everything else (in young) to unreachable. 929 * NOTE: This used to move the reachable objects into a reachable 930 * set instead. But most things usually turn out to be reachable, 931 * so it's more efficient to move the unreachable things. 932 */ 933 gc_list_init(&unreachable); 934 move_unreachable(young, &unreachable); 935 936 /* Move reachable objects to next generation. */ 937 if (young != old) { 938 if (generation == NUM_GENERATIONS - 2) { 939 long_lived_pending += gc_list_size(young); 940 } 941 gc_list_merge(young, old); 942 } 943 else { 944 /* We only untrack dicts in full collections, to avoid quadratic 945 dict build-up. See issue #14775. */ 946 untrack_dicts(young); 947 long_lived_pending = 0; 948 long_lived_total = gc_list_size(young); 949 } 950 951 /* All objects in unreachable are trash, but objects reachable from 952 * finalizers can't safely be deleted. Python programmers should take 953 * care not to create such things. For Python, finalizers means 954 * instance objects with __del__ methods. Weakrefs with callbacks 955 * can also call arbitrary Python code but they will be dealt with by 956 * handle_weakrefs(). 957 */ 958 gc_list_init(&finalizers); 959 move_finalizers(&unreachable, &finalizers); 960 /* finalizers contains the unreachable objects with a finalizer; 961 * unreachable objects reachable *from* those are also uncollectable, 962 * and we move those into the finalizers list too. 963 */ 964 move_finalizer_reachable(&finalizers); 965 966 /* Collect statistics on collectable objects found and print 967 * debugging information. 968 */ 969 for (gc = unreachable.gc.gc_next; gc != &unreachable; 970 gc = gc->gc.gc_next) { 971 m++; 972 if (debug & DEBUG_COLLECTABLE) { 973 debug_cycle("collectable", FROM_GC(gc)); 974 } 975 } 976 977 /* Clear weakrefs and invoke callbacks as necessary. */ 978 m += handle_weakrefs(&unreachable, old); 979 980 /* Call tp_clear on objects in the unreachable set. This will cause 981 * the reference cycles to be broken. It may also cause some objects 982 * in finalizers to be freed. 983 */ 984 delete_garbage(&unreachable, old); 985 986 /* Collect statistics on uncollectable objects found and print 987 * debugging information. */ 988 for (gc = finalizers.gc.gc_next; 989 gc != &finalizers; 990 gc = gc->gc.gc_next) { 991 n++; 992 if (debug & DEBUG_UNCOLLECTABLE) 993 debug_cycle("uncollectable", FROM_GC(gc)); 994 } 995 if (debug & DEBUG_STATS) { 996 double t2 = get_time(); 997 if (m == 0 && n == 0) 998 PySys_WriteStderr("gc: done"); 999 else 1000 PySys_WriteStderr( 1001 "gc: done, " 1002 "%" PY_FORMAT_SIZE_T "d unreachable, " 1003 "%" PY_FORMAT_SIZE_T "d uncollectable", 1004 n+m, n); 1005 if (t1 && t2) { 1006 PySys_WriteStderr(", %.4fs elapsed", t2-t1); 1007 } 1008 PySys_WriteStderr(".\n"); 1009 } 1010 1011 /* Append instances in the uncollectable set to a Python 1012 * reachable list of garbage. The programmer has to deal with 1013 * this if they insist on creating this type of structure. 1014 */ 1015 (void)handle_finalizers(&finalizers, old); 1016 1017 /* Clear free list only during the collection of the highest 1018 * generation */ 1019 if (generation == NUM_GENERATIONS-1) { 1020 clear_freelists(); 1021 } 1022 1023 if (PyErr_Occurred()) { 1024 if (gc_str == NULL) 1025 gc_str = PyString_FromString("garbage collection"); 1026 PyErr_WriteUnraisable(gc_str); 1027 Py_FatalError("unexpected exception during garbage collection"); 1028 } 1029 return n+m; 911 1030 } 912 1031 … … 914 1033 collect_generations(void) 915 1034 { 916 int i; 917 Py_ssize_t n = 0; 918 919 /* Find the oldest generation (higest numbered) where the count 920 * exceeds the threshold. Objects in the that generation and 921 * generations younger than it will be collected. */ 922 for (i = NUM_GENERATIONS-1; i >= 0; i--) { 923 if (generations[i].count > generations[i].threshold) { 924 n = collect(i); 925 break; 926 } 927 } 928 return n; 1035 int i; 1036 Py_ssize_t n = 0; 1037 1038 /* Find the oldest generation (highest numbered) where the count 1039 * exceeds the threshold. Objects in the that generation and 1040 * generations younger than it will be collected. */ 1041 for (i = NUM_GENERATIONS-1; i >= 0; i--) { 1042 if (generations[i].count > generations[i].threshold) { 1043 /* Avoid quadratic performance degradation in number 1044 of tracked objects. See comments at the beginning 1045 of this file, and issue #4074. 1046 */ 1047 if (i == NUM_GENERATIONS - 1 1048 && long_lived_pending < long_lived_total / 4) 1049 continue; 1050 n = collect(i); 1051 break; 1052 } 1053 } 1054 return n; 929 1055 } 930 1056 … … 937 1063 gc_enable(PyObject *self, PyObject *noargs) 938 1064 { 939 940 941 1065 enabled = 1; 1066 Py_INCREF(Py_None); 1067 return Py_None; 942 1068 } 943 1069 … … 950 1076 gc_disable(PyObject *self, PyObject *noargs) 951 1077 { 952 953 954 1078 enabled = 0; 1079 Py_INCREF(Py_None); 1080 return Py_None; 955 1081 } 956 1082 … … 963 1089 gc_isenabled(PyObject *self, PyObject *noargs) 964 1090 { 965 1091 return PyBool_FromLong((long)enabled); 966 1092 } 967 1093 … … 977 1103 gc_collect(PyObject *self, PyObject *args, PyObject *kws) 978 1104 { 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1105 static char *keywords[] = {"generation", NULL}; 1106 int genarg = NUM_GENERATIONS - 1; 1107 Py_ssize_t n; 1108 1109 if (!PyArg_ParseTupleAndKeywords(args, kws, "|i", keywords, &genarg)) 1110 return NULL; 1111 1112 else if (genarg < 0 || genarg >= NUM_GENERATIONS) { 1113 PyErr_SetString(PyExc_ValueError, "invalid generation"); 1114 return NULL; 1115 } 1116 1117 if (collecting) 1118 n = 0; /* already collecting, don't do anything */ 1119 else { 1120 collecting = 1; 1121 n = collect(genarg); 1122 collecting = 0; 1123 } 1124 1125 return PyInt_FromSsize_t(n); 1000 1126 } 1001 1127 … … 1019 1145 gc_set_debug(PyObject *self, PyObject *args) 1020 1146 { 1021 1022 1023 1024 1025 1147 if (!PyArg_ParseTuple(args, "i:set_debug", &debug)) 1148 return NULL; 1149 1150 Py_INCREF(Py_None); 1151 return Py_None; 1026 1152 } 1027 1153 … … 1034 1160 gc_get_debug(PyObject *self, PyObject *noargs) 1035 1161 { 1036 1162 return Py_BuildValue("i", debug); 1037 1163 } 1038 1164 … … 1046 1172 gc_set_thresh(PyObject *self, PyObject *args) 1047 1173 { 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1174 int i; 1175 if (!PyArg_ParseTuple(args, "i|ii:set_threshold", 1176 &generations[0].threshold, 1177 &generations[1].threshold, 1178 &generations[2].threshold)) 1179 return NULL; 1180 for (i = 2; i < NUM_GENERATIONS; i++) { 1181 /* generations higher than 2 get the same threshold */ 1182 generations[i].threshold = generations[2].threshold; 1183 } 1184 1185 Py_INCREF(Py_None); 1186 return Py_None; 1061 1187 } 1062 1188 … … 1069 1195 gc_get_thresh(PyObject *self, PyObject *noargs) 1070 1196 { 1071 1072 1073 1074 1197 return Py_BuildValue("(iii)", 1198 generations[0].threshold, 1199 generations[1].threshold, 1200 generations[2].threshold); 1075 1201 } 1076 1202 … … 1083 1209 gc_get_count(PyObject *self, PyObject *noargs) 1084 1210 { 1085 1086 1087 1088 1211 return Py_BuildValue("(iii)", 1212 generations[0].count, 1213 generations[1].count, 1214 generations[2].count); 1089 1215 } 1090 1216 … … 1092 1218 referrersvisit(PyObject* obj, PyObject *objs) 1093 1219 { 1094 1095 1096 1097 1098 1220 Py_ssize_t i; 1221 for (i = 0; i < PyTuple_GET_SIZE(objs); i++) 1222 if (PyTuple_GET_ITEM(objs, i) == obj) 1223 return 1; 1224 return 0; 1099 1225 } 1100 1226 … … 1102 1228 gc_referrers_for(PyObject *objs, PyGC_Head *list, PyObject *resultlist) 1103 1229 { 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1230 PyGC_Head *gc; 1231 PyObject *obj; 1232 traverseproc traverse; 1233 for (gc = list->gc.gc_next; gc != list; gc = gc->gc.gc_next) { 1234 obj = FROM_GC(gc); 1235 traverse = Py_TYPE(obj)->tp_traverse; 1236 if (obj == objs || obj == resultlist) 1237 continue; 1238 if (traverse(obj, (visitproc)referrersvisit, objs)) { 1239 if (PyList_Append(resultlist, obj) < 0) 1240 return 0; /* error */ 1241 } 1242 } 1243 return 1; /* no error */ 1118 1244 } 1119 1245 … … 1125 1251 gc_get_referrers(PyObject *self, PyObject *args) 1126 1252 { 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1253 int i; 1254 PyObject *result = PyList_New(0); 1255 if (!result) return NULL; 1256 1257 for (i = 0; i < NUM_GENERATIONS; i++) { 1258 if (!(gc_referrers_for(args, GEN_HEAD(i), result))) { 1259 Py_DECREF(result); 1260 return NULL; 1261 } 1262 } 1263 return result; 1138 1264 } 1139 1265 … … 1142 1268 referentsvisit(PyObject *obj, PyObject *list) 1143 1269 { 1144 1270 return PyList_Append(list, obj) < 0; 1145 1271 } 1146 1272 … … 1152 1278 gc_get_referents(PyObject *self, PyObject *args) 1153 1279 { 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1280 Py_ssize_t i; 1281 PyObject *result = PyList_New(0); 1282 1283 if (result == NULL) 1284 return NULL; 1285 1286 for (i = 0; i < PyTuple_GET_SIZE(args); i++) { 1287 traverseproc traverse; 1288 PyObject *obj = PyTuple_GET_ITEM(args, i); 1289 1290 if (! PyObject_IS_GC(obj)) 1291 continue; 1292 traverse = Py_TYPE(obj)->tp_traverse; 1293 if (! traverse) 1294 continue; 1295 if (traverse(obj, (visitproc)referentsvisit, result)) { 1296 Py_DECREF(result); 1297 return NULL; 1298 } 1299 } 1300 return result; 1175 1301 } 1176 1302 … … 1184 1310 gc_get_objects(PyObject *self, PyObject *noargs) 1185 1311 { 1186 int i; 1187 PyObject* result; 1188 1189 result = PyList_New(0); 1190 if (result == NULL) 1191 return NULL; 1192 for (i = 0; i < NUM_GENERATIONS; i++) { 1193 if (append_objects(result, GEN_HEAD(i))) { 1194 Py_DECREF(result); 1195 return NULL; 1196 } 1197 } 1198 return result; 1312 int i; 1313 PyObject* result; 1314 1315 result = PyList_New(0); 1316 if (result == NULL) 1317 return NULL; 1318 for (i = 0; i < NUM_GENERATIONS; i++) { 1319 if (append_objects(result, GEN_HEAD(i))) { 1320 Py_DECREF(result); 1321 return NULL; 1322 } 1323 } 1324 return result; 1325 } 1326 1327 PyDoc_STRVAR(gc_is_tracked__doc__, 1328 "is_tracked(obj) -> bool\n" 1329 "\n" 1330 "Returns true if the object is tracked by the garbage collector.\n" 1331 "Simple atomic objects will return false.\n" 1332 ); 1333 1334 static PyObject * 1335 gc_is_tracked(PyObject *self, PyObject *obj) 1336 { 1337 PyObject *result; 1338 1339 if (PyObject_IS_GC(obj) && IS_TRACKED(obj)) 1340 result = Py_True; 1341 else 1342 result = Py_False; 1343 Py_INCREF(result); 1344 return result; 1199 1345 } 1200 1346 … … 1213 1359 "get_threshold() -- Return the current the collection thresholds.\n" 1214 1360 "get_objects() -- Return a list of all objects tracked by the collector.\n" 1361 "is_tracked() -- Returns true if a given object is tracked.\n" 1215 1362 "get_referrers() -- Return the list of objects that refer to an object.\n" 1216 1363 "get_referents() -- Return the list of objects that an object refers to.\n"); 1217 1364 1218 1365 static PyMethodDef GcMethods[] = { 1219 {"enable", gc_enable, METH_NOARGS, gc_enable__doc__}, 1220 {"disable", gc_disable, METH_NOARGS, gc_disable__doc__}, 1221 {"isenabled", gc_isenabled, METH_NOARGS, gc_isenabled__doc__}, 1222 {"set_debug", gc_set_debug, METH_VARARGS, gc_set_debug__doc__}, 1223 {"get_debug", gc_get_debug, METH_NOARGS, gc_get_debug__doc__}, 1224 {"get_count", gc_get_count, METH_NOARGS, gc_get_count__doc__}, 1225 {"set_threshold", gc_set_thresh, METH_VARARGS, gc_set_thresh__doc__}, 1226 {"get_threshold", gc_get_thresh, METH_NOARGS, gc_get_thresh__doc__}, 1227 {"collect", (PyCFunction)gc_collect, 1228 METH_VARARGS | METH_KEYWORDS, gc_collect__doc__}, 1229 {"get_objects", gc_get_objects,METH_NOARGS, gc_get_objects__doc__}, 1230 {"get_referrers", gc_get_referrers, METH_VARARGS, 1231 gc_get_referrers__doc__}, 1232 {"get_referents", gc_get_referents, METH_VARARGS, 1233 gc_get_referents__doc__}, 1234 {NULL, NULL} /* Sentinel */ 1366 {"enable", gc_enable, METH_NOARGS, gc_enable__doc__}, 1367 {"disable", gc_disable, METH_NOARGS, gc_disable__doc__}, 1368 {"isenabled", gc_isenabled, METH_NOARGS, gc_isenabled__doc__}, 1369 {"set_debug", gc_set_debug, METH_VARARGS, gc_set_debug__doc__}, 1370 {"get_debug", gc_get_debug, METH_NOARGS, gc_get_debug__doc__}, 1371 {"get_count", gc_get_count, METH_NOARGS, gc_get_count__doc__}, 1372 {"set_threshold", gc_set_thresh, METH_VARARGS, gc_set_thresh__doc__}, 1373 {"get_threshold", gc_get_thresh, METH_NOARGS, gc_get_thresh__doc__}, 1374 {"collect", (PyCFunction)gc_collect, 1375 METH_VARARGS | METH_KEYWORDS, gc_collect__doc__}, 1376 {"get_objects", gc_get_objects,METH_NOARGS, gc_get_objects__doc__}, 1377 {"is_tracked", gc_is_tracked, METH_O, gc_is_tracked__doc__}, 1378 {"get_referrers", gc_get_referrers, METH_VARARGS, 1379 gc_get_referrers__doc__}, 1380 {"get_referents", gc_get_referents, METH_VARARGS, 1381 gc_get_referents__doc__}, 1382 {NULL, NULL} /* Sentinel */ 1235 1383 }; 1236 1384 … … 1238 1386 initgc(void) 1239 1387 { 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1388 PyObject *m; 1389 1390 m = Py_InitModule4("gc", 1391 GcMethods, 1392 gc__doc__, 1393 NULL, 1394 PYTHON_API_VERSION); 1395 if (m == NULL) 1396 return; 1397 1398 if (garbage == NULL) { 1399 garbage = PyList_New(0); 1400 if (garbage == NULL) 1401 return; 1402 } 1403 Py_INCREF(garbage); 1404 if (PyModule_AddObject(m, "garbage", garbage) < 0) 1405 return; 1406 1407 /* Importing can't be done in collect() because collect() 1408 * can be called via PyGC_Collect() in Py_Finalize(). 1409 * This wouldn't be a problem, except that <initialized> is 1410 * reset to 0 before calling collect which trips up 1411 * the import and triggers an assertion. 1412 */ 1413 if (tmod == NULL) { 1414 tmod = PyImport_ImportModuleNoBlock("time"); 1415 if (tmod == NULL) 1416 PyErr_Clear(); 1417 } 1270 1418 1271 1419 #define ADD_INT(NAME) if (PyModule_AddIntConstant(m, #NAME, NAME) < 0) return 1272 1273 1274 1275 1276 1277 1278 1420 ADD_INT(DEBUG_STATS); 1421 ADD_INT(DEBUG_COLLECTABLE); 1422 ADD_INT(DEBUG_UNCOLLECTABLE); 1423 ADD_INT(DEBUG_INSTANCES); 1424 ADD_INT(DEBUG_OBJECTS); 1425 ADD_INT(DEBUG_SAVEALL); 1426 ADD_INT(DEBUG_LEAK); 1279 1427 #undef ADD_INT 1280 1428 } … … 1284 1432 PyGC_Collect(void) 1285 1433 { 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1434 Py_ssize_t n; 1435 1436 if (collecting) 1437 n = 0; /* already collecting, don't do anything */ 1438 else { 1439 collecting = 1; 1440 n = collect(NUM_GENERATIONS - 1); 1441 collecting = 0; 1442 } 1443 1444 return n; 1297 1445 } 1298 1446 … … 1301 1449 _PyGC_Dump(PyGC_Head *g) 1302 1450 { 1303 1451 _PyObject_Dump(FROM_GC(g)); 1304 1452 } 1305 1453 … … 1315 1463 PyObject_GC_Track(void *op) 1316 1464 { 1317 1465 _PyObject_GC_TRACK(op); 1318 1466 } 1319 1467 … … 1328 1476 PyObject_GC_UnTrack(void *op) 1329 1477 { 1330 1331 1332 1333 1334 1478 /* Obscure: the Py_TRASHCAN mechanism requires that we be able to 1479 * call PyObject_GC_UnTrack twice on an object. 1480 */ 1481 if (IS_TRACKED(op)) 1482 _PyObject_GC_UNTRACK(op); 1335 1483 } 1336 1484 … … 1345 1493 _PyObject_GC_Malloc(size_t basicsize) 1346 1494 { 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1495 PyObject *op; 1496 PyGC_Head *g; 1497 if (basicsize > PY_SSIZE_T_MAX - sizeof(PyGC_Head)) 1498 return PyErr_NoMemory(); 1499 g = (PyGC_Head *)PyObject_MALLOC( 1500 sizeof(PyGC_Head) + basicsize); 1501 if (g == NULL) 1502 return PyErr_NoMemory(); 1503 g->gc.gc_refs = GC_UNTRACKED; 1504 generations[0].count++; /* number of allocated GC objects */ 1505 if (generations[0].count > generations[0].threshold && 1506 enabled && 1507 generations[0].threshold && 1508 !collecting && 1509 !PyErr_Occurred()) { 1510 collecting = 1; 1511 collect_generations(); 1512 collecting = 0; 1513 } 1514 op = FROM_GC(g); 1515 return op; 1368 1516 } 1369 1517 … … 1371 1519 _PyObject_GC_New(PyTypeObject *tp) 1372 1520 { 1373 1374 1375 1376 1521 PyObject *op = _PyObject_GC_Malloc(_PyObject_SIZE(tp)); 1522 if (op != NULL) 1523 op = PyObject_INIT(op, tp); 1524 return op; 1377 1525 } 1378 1526 … … 1380 1528 _PyObject_GC_NewVar(PyTypeObject *tp, Py_ssize_t nitems) 1381 1529 { 1382 1383 1384 1385 1386 1530 const size_t size = _PyObject_VAR_SIZE(tp, nitems); 1531 PyVarObject *op = (PyVarObject *) _PyObject_GC_Malloc(size); 1532 if (op != NULL) 1533 op = PyObject_INIT_VAR(op, tp, nitems); 1534 return op; 1387 1535 } 1388 1536 … … 1390 1538 _PyObject_GC_Resize(PyVarObject *op, Py_ssize_t nitems) 1391 1539 { 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1540 const size_t basicsize = _PyObject_VAR_SIZE(Py_TYPE(op), nitems); 1541 PyGC_Head *g = AS_GC(op); 1542 if (basicsize > PY_SSIZE_T_MAX - sizeof(PyGC_Head)) 1543 return (PyVarObject *)PyErr_NoMemory(); 1544 g = (PyGC_Head *)PyObject_REALLOC(g, sizeof(PyGC_Head) + basicsize); 1545 if (g == NULL) 1546 return (PyVarObject *)PyErr_NoMemory(); 1547 op = (PyVarObject *) FROM_GC(g); 1548 Py_SIZE(op) = nitems; 1549 return op; 1402 1550 } 1403 1551 … … 1405 1553 PyObject_GC_Del(void *op) 1406 1554 { 1407 1408 1409 1410 1411 1412 1413 1555 PyGC_Head *g = AS_GC(op); 1556 if (IS_TRACKED(op)) 1557 gc_list_remove(g); 1558 if (generations[0].count > 0) { 1559 generations[0].count--; 1560 } 1561 PyObject_FREE(g); 1414 1562 } 1415 1563
Note:
See TracChangeset
for help on using the changeset viewer.