source: vendor/python/2.5/Objects/obmalloc.c

Last change on this file was 3225, checked in by bird, 18 years ago

Python 2.5

File size: 56.7 KB
Line 
1#include "Python.h"
2
3#ifdef WITH_PYMALLOC
4
5/* An object allocator for Python.
6
7 Here is an introduction to the layers of the Python memory architecture,
8 showing where the object allocator is actually used (layer +2), It is
9 called for every object allocation and deallocation (PyObject_New/Del),
10 unless the object-specific allocators implement a proprietary allocation
11 scheme (ex.: ints use a simple free list). This is also the place where
12 the cyclic garbage collector operates selectively on container objects.
13
14
15 Object-specific allocators
16 _____ ______ ______ ________
17 [ int ] [ dict ] [ list ] ... [ string ] Python core |
18+3 | <----- Object-specific memory -----> | <-- Non-object memory --> |
19 _______________________________ | |
20 [ Python's object allocator ] | |
21+2 | ####### Object memory ####### | <------ Internal buffers ------> |
22 ______________________________________________________________ |
23 [ Python's raw memory allocator (PyMem_ API) ] |
24+1 | <----- Python memory (under PyMem manager's control) ------> | |
25 __________________________________________________________________
26 [ Underlying general-purpose allocator (ex: C library malloc) ]
27 0 | <------ Virtual memory allocated for the python process -------> |
28
29 =========================================================================
30 _______________________________________________________________________
31 [ OS-specific Virtual Memory Manager (VMM) ]
32-1 | <--- Kernel dynamic storage allocation & management (page-based) ---> |
33 __________________________________ __________________________________
34 [ ] [ ]
35-2 | <-- Physical memory: ROM/RAM --> | | <-- Secondary storage (swap) --> |
36
37*/
38/*==========================================================================*/
39
40/* A fast, special-purpose memory allocator for small blocks, to be used
41 on top of a general-purpose malloc -- heavily based on previous art. */
42
43/* Vladimir Marangozov -- August 2000 */
44
45/*
46 * "Memory management is where the rubber meets the road -- if we do the wrong
47 * thing at any level, the results will not be good. And if we don't make the
48 * levels work well together, we are in serious trouble." (1)
49 *
50 * (1) Paul R. Wilson, Mark S. Johnstone, Michael Neely, and David Boles,
51 * "Dynamic Storage Allocation: A Survey and Critical Review",
52 * in Proc. 1995 Int'l. Workshop on Memory Management, September 1995.
53 */
54
55/* #undef WITH_MEMORY_LIMITS */ /* disable mem limit checks */
56
57/*==========================================================================*/
58
59/*
60 * Allocation strategy abstract:
61 *
62 * For small requests, the allocator sub-allocates <Big> blocks of memory.
63 * Requests greater than 256 bytes are routed to the system's allocator.
64 *
65 * Small requests are grouped in size classes spaced 8 bytes apart, due
66 * to the required valid alignment of the returned address. Requests of
67 * a particular size are serviced from memory pools of 4K (one VMM page).
68 * Pools are fragmented on demand and contain free lists of blocks of one
69 * particular size class. In other words, there is a fixed-size allocator
70 * for each size class. Free pools are shared by the different allocators
71 * thus minimizing the space reserved for a particular size class.
72 *
73 * This allocation strategy is a variant of what is known as "simple
74 * segregated storage based on array of free lists". The main drawback of
75 * simple segregated storage is that we might end up with lot of reserved
76 * memory for the different free lists, which degenerate in time. To avoid
77 * this, we partition each free list in pools and we share dynamically the
78 * reserved space between all free lists. This technique is quite efficient
79 * for memory intensive programs which allocate mainly small-sized blocks.
80 *
81 * For small requests we have the following table:
82 *
83 * Request in bytes Size of allocated block Size class idx
84 * ----------------------------------------------------------------
85 * 1-8 8 0
86 * 9-16 16 1
87 * 17-24 24 2
88 * 25-32 32 3
89 * 33-40 40 4
90 * 41-48 48 5
91 * 49-56 56 6
92 * 57-64 64 7
93 * 65-72 72 8
94 * ... ... ...
95 * 241-248 248 30
96 * 249-256 256 31
97 *
98 * 0, 257 and up: routed to the underlying allocator.
99 */
100
101/*==========================================================================*/
102
103/*
104 * -- Main tunable settings section --
105 */
106
107/*
108 * Alignment of addresses returned to the user. 8-bytes alignment works
109 * on most current architectures (with 32-bit or 64-bit address busses).
110 * The alignment value is also used for grouping small requests in size
111 * classes spaced ALIGNMENT bytes apart.
112 *
113 * You shouldn't change this unless you know what you are doing.
114 */
115#define ALIGNMENT 8 /* must be 2^N */
116#define ALIGNMENT_SHIFT 3
117#define ALIGNMENT_MASK (ALIGNMENT - 1)
118
119/* Return the number of bytes in size class I, as a uint. */
120#define INDEX2SIZE(I) (((uint)(I) + 1) << ALIGNMENT_SHIFT)
121
122/*
123 * Max size threshold below which malloc requests are considered to be
124 * small enough in order to use preallocated memory pools. You can tune
125 * this value according to your application behaviour and memory needs.
126 *
127 * The following invariants must hold:
128 * 1) ALIGNMENT <= SMALL_REQUEST_THRESHOLD <= 256
129 * 2) SMALL_REQUEST_THRESHOLD is evenly divisible by ALIGNMENT
130 *
131 * Although not required, for better performance and space efficiency,
132 * it is recommended that SMALL_REQUEST_THRESHOLD is set to a power of 2.
133 */
134#define SMALL_REQUEST_THRESHOLD 256
135#define NB_SMALL_SIZE_CLASSES (SMALL_REQUEST_THRESHOLD / ALIGNMENT)
136
137/*
138 * The system's VMM page size can be obtained on most unices with a
139 * getpagesize() call or deduced from various header files. To make
140 * things simpler, we assume that it is 4K, which is OK for most systems.
141 * It is probably better if this is the native page size, but it doesn't
142 * have to be. In theory, if SYSTEM_PAGE_SIZE is larger than the native page
143 * size, then `POOL_ADDR(p)->arenaindex' could rarely cause a segmentation
144 * violation fault. 4K is apparently OK for all the platforms that python
145 * currently targets.
146 */
147#define SYSTEM_PAGE_SIZE (4 * 1024)
148#define SYSTEM_PAGE_SIZE_MASK (SYSTEM_PAGE_SIZE - 1)
149
150/*
151 * Maximum amount of memory managed by the allocator for small requests.
152 */
153#ifdef WITH_MEMORY_LIMITS
154#ifndef SMALL_MEMORY_LIMIT
155#define SMALL_MEMORY_LIMIT (64 * 1024 * 1024) /* 64 MB -- more? */
156#endif
157#endif
158
159/*
160 * The allocator sub-allocates <Big> blocks of memory (called arenas) aligned
161 * on a page boundary. This is a reserved virtual address space for the
162 * current process (obtained through a malloc call). In no way this means
163 * that the memory arenas will be used entirely. A malloc(<Big>) is usually
164 * an address range reservation for <Big> bytes, unless all pages within this
165 * space are referenced subsequently. So malloc'ing big blocks and not using
166 * them does not mean "wasting memory". It's an addressable range wastage...
167 *
168 * Therefore, allocating arenas with malloc is not optimal, because there is
169 * some address space wastage, but this is the most portable way to request
170 * memory from the system across various platforms.
171 */
172#define ARENA_SIZE (256 << 10) /* 256KB */
173
174#ifdef WITH_MEMORY_LIMITS
175#define MAX_ARENAS (SMALL_MEMORY_LIMIT / ARENA_SIZE)
176#endif
177
178/*
179 * Size of the pools used for small blocks. Should be a power of 2,
180 * between 1K and SYSTEM_PAGE_SIZE, that is: 1k, 2k, 4k.
181 */
182#define POOL_SIZE SYSTEM_PAGE_SIZE /* must be 2^N */
183#define POOL_SIZE_MASK SYSTEM_PAGE_SIZE_MASK
184
185/*
186 * -- End of tunable settings section --
187 */
188
189/*==========================================================================*/
190
191/*
192 * Locking
193 *
194 * To reduce lock contention, it would probably be better to refine the
195 * crude function locking with per size class locking. I'm not positive
196 * however, whether it's worth switching to such locking policy because
197 * of the performance penalty it might introduce.
198 *
199 * The following macros describe the simplest (should also be the fastest)
200 * lock object on a particular platform and the init/fini/lock/unlock
201 * operations on it. The locks defined here are not expected to be recursive
202 * because it is assumed that they will always be called in the order:
203 * INIT, [LOCK, UNLOCK]*, FINI.
204 */
205
206/*
207 * Python's threads are serialized, so object malloc locking is disabled.
208 */
209#define SIMPLELOCK_DECL(lock) /* simple lock declaration */
210#define SIMPLELOCK_INIT(lock) /* allocate (if needed) and initialize */
211#define SIMPLELOCK_FINI(lock) /* free/destroy an existing lock */
212#define SIMPLELOCK_LOCK(lock) /* acquire released lock */
213#define SIMPLELOCK_UNLOCK(lock) /* release acquired lock */
214
215/*
216 * Basic types
217 * I don't care if these are defined in <sys/types.h> or elsewhere. Axiom.
218 */
219#undef uchar
220#define uchar unsigned char /* assuming == 8 bits */
221
222#undef uint
223#define uint unsigned int /* assuming >= 16 bits */
224
225#undef ulong
226#define ulong unsigned long /* assuming >= 32 bits */
227
228#undef uptr
229#define uptr Py_uintptr_t
230
231/* When you say memory, my mind reasons in terms of (pointers to) blocks */
232typedef uchar block;
233
234/* Pool for small blocks. */
235struct pool_header {
236 union { block *_padding;
237 uint count; } ref; /* number of allocated blocks */
238 block *freeblock; /* pool's free list head */
239 struct pool_header *nextpool; /* next pool of this size class */
240 struct pool_header *prevpool; /* previous pool "" */
241 uint arenaindex; /* index into arenas of base adr */
242 uint szidx; /* block size class index */
243 uint nextoffset; /* bytes to virgin block */
244 uint maxnextoffset; /* largest valid nextoffset */
245};
246
247typedef struct pool_header *poolp;
248
249/* Record keeping for arenas. */
250struct arena_object {
251 /* The address of the arena, as returned by malloc. Note that 0
252 * will never be returned by a successful malloc, and is used
253 * here to mark an arena_object that doesn't correspond to an
254 * allocated arena.
255 */
256 uptr address;
257
258 /* Pool-aligned pointer to the next pool to be carved off. */
259 block* pool_address;
260
261 /* The number of available pools in the arena: free pools + never-
262 * allocated pools.
263 */
264 uint nfreepools;
265
266 /* The total number of pools in the arena, whether or not available. */
267 uint ntotalpools;
268
269 /* Singly-linked list of available pools. */
270 struct pool_header* freepools;
271
272 /* Whenever this arena_object is not associated with an allocated
273 * arena, the nextarena member is used to link all unassociated
274 * arena_objects in the singly-linked `unused_arena_objects` list.
275 * The prevarena member is unused in this case.
276 *
277 * When this arena_object is associated with an allocated arena
278 * with at least one available pool, both members are used in the
279 * doubly-linked `usable_arenas` list, which is maintained in
280 * increasing order of `nfreepools` values.
281 *
282 * Else this arena_object is associated with an allocated arena
283 * all of whose pools are in use. `nextarena` and `prevarena`
284 * are both meaningless in this case.
285 */
286 struct arena_object* nextarena;
287 struct arena_object* prevarena;
288};
289
290#undef ROUNDUP
291#define ROUNDUP(x) (((x) + ALIGNMENT_MASK) & ~ALIGNMENT_MASK)
292#define POOL_OVERHEAD ROUNDUP(sizeof(struct pool_header))
293
294#define DUMMY_SIZE_IDX 0xffff /* size class of newly cached pools */
295
296/* Round pointer P down to the closest pool-aligned address <= P, as a poolp */
297#define POOL_ADDR(P) ((poolp)((uptr)(P) & ~(uptr)POOL_SIZE_MASK))
298
299/* Return total number of blocks in pool of size index I, as a uint. */
300#define NUMBLOCKS(I) ((uint)(POOL_SIZE - POOL_OVERHEAD) / INDEX2SIZE(I))
301
302/*==========================================================================*/
303
304/*
305 * This malloc lock
306 */
307SIMPLELOCK_DECL(_malloc_lock)
308#define LOCK() SIMPLELOCK_LOCK(_malloc_lock)
309#define UNLOCK() SIMPLELOCK_UNLOCK(_malloc_lock)
310#define LOCK_INIT() SIMPLELOCK_INIT(_malloc_lock)
311#define LOCK_FINI() SIMPLELOCK_FINI(_malloc_lock)
312
313/*
314 * Pool table -- headed, circular, doubly-linked lists of partially used pools.
315
316This is involved. For an index i, usedpools[i+i] is the header for a list of
317all partially used pools holding small blocks with "size class idx" i. So
318usedpools[0] corresponds to blocks of size 8, usedpools[2] to blocks of size
31916, and so on: index 2*i <-> blocks of size (i+1)<<ALIGNMENT_SHIFT.
320
321Pools are carved off an arena's highwater mark (an arena_object's pool_address
322member) as needed. Once carved off, a pool is in one of three states forever
323after:
324
325used == partially used, neither empty nor full
326 At least one block in the pool is currently allocated, and at least one
327 block in the pool is not currently allocated (note this implies a pool
328 has room for at least two blocks).
329 This is a pool's initial state, as a pool is created only when malloc
330 needs space.
331 The pool holds blocks of a fixed size, and is in the circular list headed
332 at usedpools[i] (see above). It's linked to the other used pools of the
333 same size class via the pool_header's nextpool and prevpool members.
334 If all but one block is currently allocated, a malloc can cause a
335 transition to the full state. If all but one block is not currently
336 allocated, a free can cause a transition to the empty state.
337
338full == all the pool's blocks are currently allocated
339 On transition to full, a pool is unlinked from its usedpools[] list.
340 It's not linked to from anything then anymore, and its nextpool and
341 prevpool members are meaningless until it transitions back to used.
342 A free of a block in a full pool puts the pool back in the used state.
343 Then it's linked in at the front of the appropriate usedpools[] list, so
344 that the next allocation for its size class will reuse the freed block.
345
346empty == all the pool's blocks are currently available for allocation
347 On transition to empty, a pool is unlinked from its usedpools[] list,
348 and linked to the front of its arena_object's singly-linked freepools list,
349 via its nextpool member. The prevpool member has no meaning in this case.
350 Empty pools have no inherent size class: the next time a malloc finds
351 an empty list in usedpools[], it takes the first pool off of freepools.
352 If the size class needed happens to be the same as the size class the pool
353 last had, some pool initialization can be skipped.
354
355
356Block Management
357
358Blocks within pools are again carved out as needed. pool->freeblock points to
359the start of a singly-linked list of free blocks within the pool. When a
360block is freed, it's inserted at the front of its pool's freeblock list. Note
361that the available blocks in a pool are *not* linked all together when a pool
362is initialized. Instead only "the first two" (lowest addresses) blocks are
363set up, returning the first such block, and setting pool->freeblock to a
364one-block list holding the second such block. This is consistent with that
365pymalloc strives at all levels (arena, pool, and block) never to touch a piece
366of memory until it's actually needed.
367
368So long as a pool is in the used state, we're certain there *is* a block
369available for allocating, and pool->freeblock is not NULL. If pool->freeblock
370points to the end of the free list before we've carved the entire pool into
371blocks, that means we simply haven't yet gotten to one of the higher-address
372blocks. The offset from the pool_header to the start of "the next" virgin
373block is stored in the pool_header nextoffset member, and the largest value
374of nextoffset that makes sense is stored in the maxnextoffset member when a
375pool is initialized. All the blocks in a pool have been passed out at least
376once when and only when nextoffset > maxnextoffset.
377
378
379Major obscurity: While the usedpools vector is declared to have poolp
380entries, it doesn't really. It really contains two pointers per (conceptual)
381poolp entry, the nextpool and prevpool members of a pool_header. The
382excruciating initialization code below fools C so that
383
384 usedpool[i+i]
385
386"acts like" a genuine poolp, but only so long as you only reference its
387nextpool and prevpool members. The "- 2*sizeof(block *)" gibberish is
388compensating for that a pool_header's nextpool and prevpool members
389immediately follow a pool_header's first two members:
390
391 union { block *_padding;
392 uint count; } ref;
393 block *freeblock;
394
395each of which consume sizeof(block *) bytes. So what usedpools[i+i] really
396contains is a fudged-up pointer p such that *if* C believes it's a poolp
397pointer, then p->nextpool and p->prevpool are both p (meaning that the headed
398circular list is empty).
399
400It's unclear why the usedpools setup is so convoluted. It could be to
401minimize the amount of cache required to hold this heavily-referenced table
402(which only *needs* the two interpool pointer members of a pool_header). OTOH,
403referencing code has to remember to "double the index" and doing so isn't
404free, usedpools[0] isn't a strictly legal pointer, and we're crucially relying
405on that C doesn't insert any padding anywhere in a pool_header at or before
406the prevpool member.
407**************************************************************************** */
408
409#define PTA(x) ((poolp )((uchar *)&(usedpools[2*(x)]) - 2*sizeof(block *)))
410#define PT(x) PTA(x), PTA(x)
411
412static poolp usedpools[2 * ((NB_SMALL_SIZE_CLASSES + 7) / 8) * 8] = {
413 PT(0), PT(1), PT(2), PT(3), PT(4), PT(5), PT(6), PT(7)
414#if NB_SMALL_SIZE_CLASSES > 8
415 , PT(8), PT(9), PT(10), PT(11), PT(12), PT(13), PT(14), PT(15)
416#if NB_SMALL_SIZE_CLASSES > 16
417 , PT(16), PT(17), PT(18), PT(19), PT(20), PT(21), PT(22), PT(23)
418#if NB_SMALL_SIZE_CLASSES > 24
419 , PT(24), PT(25), PT(26), PT(27), PT(28), PT(29), PT(30), PT(31)
420#if NB_SMALL_SIZE_CLASSES > 32
421 , PT(32), PT(33), PT(34), PT(35), PT(36), PT(37), PT(38), PT(39)
422#if NB_SMALL_SIZE_CLASSES > 40
423 , PT(40), PT(41), PT(42), PT(43), PT(44), PT(45), PT(46), PT(47)
424#if NB_SMALL_SIZE_CLASSES > 48
425 , PT(48), PT(49), PT(50), PT(51), PT(52), PT(53), PT(54), PT(55)
426#if NB_SMALL_SIZE_CLASSES > 56
427 , PT(56), PT(57), PT(58), PT(59), PT(60), PT(61), PT(62), PT(63)
428#endif /* NB_SMALL_SIZE_CLASSES > 56 */
429#endif /* NB_SMALL_SIZE_CLASSES > 48 */
430#endif /* NB_SMALL_SIZE_CLASSES > 40 */
431#endif /* NB_SMALL_SIZE_CLASSES > 32 */
432#endif /* NB_SMALL_SIZE_CLASSES > 24 */
433#endif /* NB_SMALL_SIZE_CLASSES > 16 */
434#endif /* NB_SMALL_SIZE_CLASSES > 8 */
435};
436
437/*==========================================================================
438Arena management.
439
440`arenas` is a vector of arena_objects. It contains maxarenas entries, some of
441which may not be currently used (== they're arena_objects that aren't
442currently associated with an allocated arena). Note that arenas proper are
443separately malloc'ed.
444
445Prior to Python 2.5, arenas were never free()'ed. Starting with Python 2.5,
446we do try to free() arenas, and use some mild heuristic strategies to increase
447the likelihood that arenas eventually can be freed.
448
449unused_arena_objects
450
451 This is a singly-linked list of the arena_objects that are currently not
452 being used (no arena is associated with them). Objects are taken off the
453 head of the list in new_arena(), and are pushed on the head of the list in
454 PyObject_Free() when the arena is empty. Key invariant: an arena_object
455 is on this list if and only if its .address member is 0.
456
457usable_arenas
458
459 This is a doubly-linked list of the arena_objects associated with arenas
460 that have pools available. These pools are either waiting to be reused,
461 or have not been used before. The list is sorted to have the most-
462 allocated arenas first (ascending order based on the nfreepools member).
463 This means that the next allocation will come from a heavily used arena,
464 which gives the nearly empty arenas a chance to be returned to the system.
465 In my unscientific tests this dramatically improved the number of arenas
466 that could be freed.
467
468Note that an arena_object associated with an arena all of whose pools are
469currently in use isn't on either list.
470*/
471
472/* Array of objects used to track chunks of memory (arenas). */
473static struct arena_object* arenas = NULL;
474/* Number of slots currently allocated in the `arenas` vector. */
475static uint maxarenas = 0;
476
477/* The head of the singly-linked, NULL-terminated list of available
478 * arena_objects.
479 */
480static struct arena_object* unused_arena_objects = NULL;
481
482/* The head of the doubly-linked, NULL-terminated at each end, list of
483 * arena_objects associated with arenas that have pools available.
484 */
485static struct arena_object* usable_arenas = NULL;
486
487/* How many arena_objects do we initially allocate?
488 * 16 = can allocate 16 arenas = 16 * ARENA_SIZE = 4MB before growing the
489 * `arenas` vector.
490 */
491#define INITIAL_ARENA_OBJECTS 16
492
493/* Number of arenas allocated that haven't been free()'d. */
494static size_t narenas_currently_allocated = 0;
495
496#ifdef PYMALLOC_DEBUG
497/* Total number of times malloc() called to allocate an arena. */
498static size_t ntimes_arena_allocated = 0;
499/* High water mark (max value ever seen) for narenas_currently_allocated. */
500static size_t narenas_highwater = 0;
501#endif
502
503/* Allocate a new arena. If we run out of memory, return NULL. Else
504 * allocate a new arena, and return the address of an arena_object
505 * describing the new arena. It's expected that the caller will set
506 * `usable_arenas` to the return value.
507 */
508static struct arena_object*
509new_arena(void)
510{
511 struct arena_object* arenaobj;
512 uint excess; /* number of bytes above pool alignment */
513
514#ifdef PYMALLOC_DEBUG
515 if (Py_GETENV("PYTHONMALLOCSTATS"))
516 _PyObject_DebugMallocStats();
517#endif
518 if (unused_arena_objects == NULL) {
519 uint i;
520 uint numarenas;
521 size_t nbytes;
522
523 /* Double the number of arena objects on each allocation.
524 * Note that it's possible for `numarenas` to overflow.
525 */
526 numarenas = maxarenas ? maxarenas << 1 : INITIAL_ARENA_OBJECTS;
527 if (numarenas <= maxarenas)
528 return NULL; /* overflow */
529 nbytes = numarenas * sizeof(*arenas);
530 if (nbytes / sizeof(*arenas) != numarenas)
531 return NULL; /* overflow */
532 arenaobj = (struct arena_object *)realloc(arenas, nbytes);
533 if (arenaobj == NULL)
534 return NULL;
535 arenas = arenaobj;
536
537 /* We might need to fix pointers that were copied. However,
538 * new_arena only gets called when all the pages in the
539 * previous arenas are full. Thus, there are *no* pointers
540 * into the old array. Thus, we don't have to worry about
541 * invalid pointers. Just to be sure, some asserts:
542 */
543 assert(usable_arenas == NULL);
544 assert(unused_arena_objects == NULL);
545
546 /* Put the new arenas on the unused_arena_objects list. */
547 for (i = maxarenas; i < numarenas; ++i) {
548 arenas[i].address = 0; /* mark as unassociated */
549 arenas[i].nextarena = i < numarenas - 1 ?
550 &arenas[i+1] : NULL;
551 }
552
553 /* Update globals. */
554 unused_arena_objects = &arenas[maxarenas];
555 maxarenas = numarenas;
556 }
557
558 /* Take the next available arena object off the head of the list. */
559 assert(unused_arena_objects != NULL);
560 arenaobj = unused_arena_objects;
561 unused_arena_objects = arenaobj->nextarena;
562 assert(arenaobj->address == 0);
563 arenaobj->address = (uptr)malloc(ARENA_SIZE);
564 if (arenaobj->address == 0) {
565 /* The allocation failed: return NULL after putting the
566 * arenaobj back.
567 */
568 arenaobj->nextarena = unused_arena_objects;
569 unused_arena_objects = arenaobj;
570 return NULL;
571 }
572
573 ++narenas_currently_allocated;
574#ifdef PYMALLOC_DEBUG
575 ++ntimes_arena_allocated;
576 if (narenas_currently_allocated > narenas_highwater)
577 narenas_highwater = narenas_currently_allocated;
578#endif
579 arenaobj->freepools = NULL;
580 /* pool_address <- first pool-aligned address in the arena
581 nfreepools <- number of whole pools that fit after alignment */
582 arenaobj->pool_address = (block*)arenaobj->address;
583 arenaobj->nfreepools = ARENA_SIZE / POOL_SIZE;
584 assert(POOL_SIZE * arenaobj->nfreepools == ARENA_SIZE);
585 excess = (uint)(arenaobj->address & POOL_SIZE_MASK);
586 if (excess != 0) {
587 --arenaobj->nfreepools;
588 arenaobj->pool_address += POOL_SIZE - excess;
589 }
590 arenaobj->ntotalpools = arenaobj->nfreepools;
591
592 return arenaobj;
593}
594
595/*
596Py_ADDRESS_IN_RANGE(P, POOL)
597
598Return true if and only if P is an address that was allocated by pymalloc.
599POOL must be the pool address associated with P, i.e., POOL = POOL_ADDR(P)
600(the caller is asked to compute this because the macro expands POOL more than
601once, and for efficiency it's best for the caller to assign POOL_ADDR(P) to a
602variable and pass the latter to the macro; because Py_ADDRESS_IN_RANGE is
603called on every alloc/realloc/free, micro-efficiency is important here).
604
605Tricky: Let B be the arena base address associated with the pool, B =
606arenas[(POOL)->arenaindex].address. Then P belongs to the arena if and only if
607
608 B <= P < B + ARENA_SIZE
609
610Subtracting B throughout, this is true iff
611
612 0 <= P-B < ARENA_SIZE
613
614By using unsigned arithmetic, the "0 <=" half of the test can be skipped.
615
616Obscure: A PyMem "free memory" function can call the pymalloc free or realloc
617before the first arena has been allocated. `arenas` is still NULL in that
618case. We're relying on that maxarenas is also 0 in that case, so that
619(POOL)->arenaindex < maxarenas must be false, saving us from trying to index
620into a NULL arenas.
621
622Details: given P and POOL, the arena_object corresponding to P is AO =
623arenas[(POOL)->arenaindex]. Suppose obmalloc controls P. Then (barring wild
624stores, etc), POOL is the correct address of P's pool, AO.address is the
625correct base address of the pool's arena, and P must be within ARENA_SIZE of
626AO.address. In addition, AO.address is not 0 (no arena can start at address 0
627(NULL)). Therefore Py_ADDRESS_IN_RANGE correctly reports that obmalloc
628controls P.
629
630Now suppose obmalloc does not control P (e.g., P was obtained via a direct
631call to the system malloc() or realloc()). (POOL)->arenaindex may be anything
632in this case -- it may even be uninitialized trash. If the trash arenaindex
633is >= maxarenas, the macro correctly concludes at once that obmalloc doesn't
634control P.
635
636Else arenaindex is < maxarena, and AO is read up. If AO corresponds to an
637allocated arena, obmalloc controls all the memory in slice AO.address :
638AO.address+ARENA_SIZE. By case assumption, P is not controlled by obmalloc,
639so P doesn't lie in that slice, so the macro correctly reports that P is not
640controlled by obmalloc.
641
642Finally, if P is not controlled by obmalloc and AO corresponds to an unused
643arena_object (one not currently associated with an allocated arena),
644AO.address is 0, and the second test in the macro reduces to:
645
646 P < ARENA_SIZE
647
648If P >= ARENA_SIZE (extremely likely), the macro again correctly concludes
649that P is not controlled by obmalloc. However, if P < ARENA_SIZE, this part
650of the test still passes, and the third clause (AO.address != 0) is necessary
651to get the correct result: AO.address is 0 in this case, so the macro
652correctly reports that P is not controlled by obmalloc (despite that P lies in
653slice AO.address : AO.address + ARENA_SIZE).
654
655Note: The third (AO.address != 0) clause was added in Python 2.5. Before
6562.5, arenas were never free()'ed, and an arenaindex < maxarena always
657corresponded to a currently-allocated arena, so the "P is not controlled by
658obmalloc, AO corresponds to an unused arena_object, and P < ARENA_SIZE" case
659was impossible.
660
661Note that the logic is excruciating, and reading up possibly uninitialized
662memory when P is not controlled by obmalloc (to get at (POOL)->arenaindex)
663creates problems for some memory debuggers. The overwhelming advantage is
664that this test determines whether an arbitrary address is controlled by
665obmalloc in a small constant time, independent of the number of arenas
666obmalloc controls. Since this test is needed at every entry point, it's
667extremely desirable that it be this fast.
668*/
669#define Py_ADDRESS_IN_RANGE(P, POOL) \
670 ((POOL)->arenaindex < maxarenas && \
671 (uptr)(P) - arenas[(POOL)->arenaindex].address < (uptr)ARENA_SIZE && \
672 arenas[(POOL)->arenaindex].address != 0)
673
674
675/* This is only useful when running memory debuggers such as
676 * Purify or Valgrind. Uncomment to use.
677 *
678#define Py_USING_MEMORY_DEBUGGER
679 */
680
681#ifdef Py_USING_MEMORY_DEBUGGER
682
683/* Py_ADDRESS_IN_RANGE may access uninitialized memory by design
684 * This leads to thousands of spurious warnings when using
685 * Purify or Valgrind. By making a function, we can easily
686 * suppress the uninitialized memory reads in this one function.
687 * So we won't ignore real errors elsewhere.
688 *
689 * Disable the macro and use a function.
690 */
691
692#undef Py_ADDRESS_IN_RANGE
693
694#if defined(__GNUC__) && (__GNUC__ == 3) && (__GNUC_MINOR__ >= 1)
695#define Py_NO_INLINE __attribute__((__noinline__))
696#else
697#define Py_NO_INLINE
698#endif
699
700/* Don't make static, to try to ensure this isn't inlined. */
701int Py_ADDRESS_IN_RANGE(void *P, poolp pool) Py_NO_INLINE;
702#undef Py_NO_INLINE
703#endif
704
705/*==========================================================================*/
706
707/* malloc. Note that nbytes==0 tries to return a non-NULL pointer, distinct
708 * from all other currently live pointers. This may not be possible.
709 */
710
711/*
712 * The basic blocks are ordered by decreasing execution frequency,
713 * which minimizes the number of jumps in the most common cases,
714 * improves branching prediction and instruction scheduling (small
715 * block allocations typically result in a couple of instructions).
716 * Unless the optimizer reorders everything, being too smart...
717 */
718
719#undef PyObject_Malloc
720void *
721PyObject_Malloc(size_t nbytes)
722{
723 block *bp;
724 poolp pool;
725 poolp next;
726 uint size;
727
728 /*
729 * This implicitly redirects malloc(0).
730 */
731 if ((nbytes - 1) < SMALL_REQUEST_THRESHOLD) {
732 LOCK();
733 /*
734 * Most frequent paths first
735 */
736 size = (uint)(nbytes - 1) >> ALIGNMENT_SHIFT;
737 pool = usedpools[size + size];
738 if (pool != pool->nextpool) {
739 /*
740 * There is a used pool for this size class.
741 * Pick up the head block of its free list.
742 */
743 ++pool->ref.count;
744 bp = pool->freeblock;
745 assert(bp != NULL);
746 if ((pool->freeblock = *(block **)bp) != NULL) {
747 UNLOCK();
748 return (void *)bp;
749 }
750 /*
751 * Reached the end of the free list, try to extend it.
752 */
753 if (pool->nextoffset <= pool->maxnextoffset) {
754 /* There is room for another block. */
755 pool->freeblock = (block*)pool +
756 pool->nextoffset;
757 pool->nextoffset += INDEX2SIZE(size);
758 *(block **)(pool->freeblock) = NULL;
759 UNLOCK();
760 return (void *)bp;
761 }
762 /* Pool is full, unlink from used pools. */
763 next = pool->nextpool;
764 pool = pool->prevpool;
765 next->prevpool = pool;
766 pool->nextpool = next;
767 UNLOCK();
768 return (void *)bp;
769 }
770
771 /* There isn't a pool of the right size class immediately
772 * available: use a free pool.
773 */
774 if (usable_arenas == NULL) {
775 /* No arena has a free pool: allocate a new arena. */
776#ifdef WITH_MEMORY_LIMITS
777 if (narenas_currently_allocated >= MAX_ARENAS) {
778 UNLOCK();
779 goto redirect;
780 }
781#endif
782 usable_arenas = new_arena();
783 if (usable_arenas == NULL) {
784 UNLOCK();
785 goto redirect;
786 }
787 usable_arenas->nextarena =
788 usable_arenas->prevarena = NULL;
789 }
790 assert(usable_arenas->address != 0);
791
792 /* Try to get a cached free pool. */
793 pool = usable_arenas->freepools;
794 if (pool != NULL) {
795 /* Unlink from cached pools. */
796 usable_arenas->freepools = pool->nextpool;
797
798 /* This arena already had the smallest nfreepools
799 * value, so decreasing nfreepools doesn't change
800 * that, and we don't need to rearrange the
801 * usable_arenas list. However, if the arena has
802 * become wholly allocated, we need to remove its
803 * arena_object from usable_arenas.
804 */
805 --usable_arenas->nfreepools;
806 if (usable_arenas->nfreepools == 0) {
807 /* Wholly allocated: remove. */
808 assert(usable_arenas->freepools == NULL);
809 assert(usable_arenas->nextarena == NULL ||
810 usable_arenas->nextarena->prevarena ==
811 usable_arenas);
812
813 usable_arenas = usable_arenas->nextarena;
814 if (usable_arenas != NULL) {
815 usable_arenas->prevarena = NULL;
816 assert(usable_arenas->address != 0);
817 }
818 }
819 else {
820 /* nfreepools > 0: it must be that freepools
821 * isn't NULL, or that we haven't yet carved
822 * off all the arena's pools for the first
823 * time.
824 */
825 assert(usable_arenas->freepools != NULL ||
826 usable_arenas->pool_address <=
827 (block*)usable_arenas->address +
828 ARENA_SIZE - POOL_SIZE);
829 }
830 init_pool:
831 /* Frontlink to used pools. */
832 next = usedpools[size + size]; /* == prev */
833 pool->nextpool = next;
834 pool->prevpool = next;
835 next->nextpool = pool;
836 next->prevpool = pool;
837 pool->ref.count = 1;
838 if (pool->szidx == size) {
839 /* Luckily, this pool last contained blocks
840 * of the same size class, so its header
841 * and free list are already initialized.
842 */
843 bp = pool->freeblock;
844 pool->freeblock = *(block **)bp;
845 UNLOCK();
846 return (void *)bp;
847 }
848 /*
849 * Initialize the pool header, set up the free list to
850 * contain just the second block, and return the first
851 * block.
852 */
853 pool->szidx = size;
854 size = INDEX2SIZE(size);
855 bp = (block *)pool + POOL_OVERHEAD;
856 pool->nextoffset = POOL_OVERHEAD + (size << 1);
857 pool->maxnextoffset = POOL_SIZE - size;
858 pool->freeblock = bp + size;
859 *(block **)(pool->freeblock) = NULL;
860 UNLOCK();
861 return (void *)bp;
862 }
863
864 /* Carve off a new pool. */
865 assert(usable_arenas->nfreepools > 0);
866 assert(usable_arenas->freepools == NULL);
867 pool = (poolp)usable_arenas->pool_address;
868 assert((block*)pool <= (block*)usable_arenas->address +
869 ARENA_SIZE - POOL_SIZE);
870 pool->arenaindex = usable_arenas - arenas;
871 assert(&arenas[pool->arenaindex] == usable_arenas);
872 pool->szidx = DUMMY_SIZE_IDX;
873 usable_arenas->pool_address += POOL_SIZE;
874 --usable_arenas->nfreepools;
875
876 if (usable_arenas->nfreepools == 0) {
877 assert(usable_arenas->nextarena == NULL ||
878 usable_arenas->nextarena->prevarena ==
879 usable_arenas);
880 /* Unlink the arena: it is completely allocated. */
881 usable_arenas = usable_arenas->nextarena;
882 if (usable_arenas != NULL) {
883 usable_arenas->prevarena = NULL;
884 assert(usable_arenas->address != 0);
885 }
886 }
887
888 goto init_pool;
889 }
890
891 /* The small block allocator ends here. */
892
893redirect:
894 /* Redirect the original request to the underlying (libc) allocator.
895 * We jump here on bigger requests, on error in the code above (as a
896 * last chance to serve the request) or when the max memory limit
897 * has been reached.
898 */
899 if (nbytes == 0)
900 nbytes = 1;
901 return (void *)malloc(nbytes);
902}
903
904/* free */
905
906#undef PyObject_Free
907void
908PyObject_Free(void *p)
909{
910 poolp pool;
911 block *lastfree;
912 poolp next, prev;
913 uint size;
914
915 if (p == NULL) /* free(NULL) has no effect */
916 return;
917
918 pool = POOL_ADDR(p);
919 if (Py_ADDRESS_IN_RANGE(p, pool)) {
920 /* We allocated this address. */
921 LOCK();
922 /* Link p to the start of the pool's freeblock list. Since
923 * the pool had at least the p block outstanding, the pool
924 * wasn't empty (so it's already in a usedpools[] list, or
925 * was full and is in no list -- it's not in the freeblocks
926 * list in any case).
927 */
928 assert(pool->ref.count > 0); /* else it was empty */
929 *(block **)p = lastfree = pool->freeblock;
930 pool->freeblock = (block *)p;
931 if (lastfree) {
932 struct arena_object* ao;
933 uint nf; /* ao->nfreepools */
934
935 /* freeblock wasn't NULL, so the pool wasn't full,
936 * and the pool is in a usedpools[] list.
937 */
938 if (--pool->ref.count != 0) {
939 /* pool isn't empty: leave it in usedpools */
940 UNLOCK();
941 return;
942 }
943 /* Pool is now empty: unlink from usedpools, and
944 * link to the front of freepools. This ensures that
945 * previously freed pools will be allocated later
946 * (being not referenced, they are perhaps paged out).
947 */
948 next = pool->nextpool;
949 prev = pool->prevpool;
950 next->prevpool = prev;
951 prev->nextpool = next;
952
953 /* Link the pool to freepools. This is a singly-linked
954 * list, and pool->prevpool isn't used there.
955 */
956 ao = &arenas[pool->arenaindex];
957 pool->nextpool = ao->freepools;
958 ao->freepools = pool;
959 nf = ++ao->nfreepools;
960
961 /* All the rest is arena management. We just freed
962 * a pool, and there are 4 cases for arena mgmt:
963 * 1. If all the pools are free, return the arena to
964 * the system free().
965 * 2. If this is the only free pool in the arena,
966 * add the arena back to the `usable_arenas` list.
967 * 3. If the "next" arena has a smaller count of free
968 * pools, we have to "slide this arena right" to
969 * restore that usable_arenas is sorted in order of
970 * nfreepools.
971 * 4. Else there's nothing more to do.
972 */
973 if (nf == ao->ntotalpools) {
974 /* Case 1. First unlink ao from usable_arenas.
975 */
976 assert(ao->prevarena == NULL ||
977 ao->prevarena->address != 0);
978 assert(ao ->nextarena == NULL ||
979 ao->nextarena->address != 0);
980
981 /* Fix the pointer in the prevarena, or the
982 * usable_arenas pointer.
983 */
984 if (ao->prevarena == NULL) {
985 usable_arenas = ao->nextarena;
986 assert(usable_arenas == NULL ||
987 usable_arenas->address != 0);
988 }
989 else {
990 assert(ao->prevarena->nextarena == ao);
991 ao->prevarena->nextarena =
992 ao->nextarena;
993 }
994 /* Fix the pointer in the nextarena. */
995 if (ao->nextarena != NULL) {
996 assert(ao->nextarena->prevarena == ao);
997 ao->nextarena->prevarena =
998 ao->prevarena;
999 }
1000 /* Record that this arena_object slot is
1001 * available to be reused.
1002 */
1003 ao->nextarena = unused_arena_objects;
1004 unused_arena_objects = ao;
1005
1006 /* Free the entire arena. */
1007 free((void *)ao->address);
1008 ao->address = 0; /* mark unassociated */
1009 --narenas_currently_allocated;
1010
1011 UNLOCK();
1012 return;
1013 }
1014 if (nf == 1) {
1015 /* Case 2. Put ao at the head of
1016 * usable_arenas. Note that because
1017 * ao->nfreepools was 0 before, ao isn't
1018 * currently on the usable_arenas list.
1019 */
1020 ao->nextarena = usable_arenas;
1021 ao->prevarena = NULL;
1022 if (usable_arenas)
1023 usable_arenas->prevarena = ao;
1024 usable_arenas = ao;
1025 assert(usable_arenas->address != 0);
1026
1027 UNLOCK();
1028 return;
1029 }
1030 /* If this arena is now out of order, we need to keep
1031 * the list sorted. The list is kept sorted so that
1032 * the "most full" arenas are used first, which allows
1033 * the nearly empty arenas to be completely freed. In
1034 * a few un-scientific tests, it seems like this
1035 * approach allowed a lot more memory to be freed.
1036 */
1037 if (ao->nextarena == NULL ||
1038 nf <= ao->nextarena->nfreepools) {
1039 /* Case 4. Nothing to do. */
1040 UNLOCK();
1041 return;
1042 }
1043 /* Case 3: We have to move the arena towards the end
1044 * of the list, because it has more free pools than
1045 * the arena to its right.
1046 * First unlink ao from usable_arenas.
1047 */
1048 if (ao->prevarena != NULL) {
1049 /* ao isn't at the head of the list */
1050 assert(ao->prevarena->nextarena == ao);
1051 ao->prevarena->nextarena = ao->nextarena;
1052 }
1053 else {
1054 /* ao is at the head of the list */
1055 assert(usable_arenas == ao);
1056 usable_arenas = ao->nextarena;
1057 }
1058 ao->nextarena->prevarena = ao->prevarena;
1059
1060 /* Locate the new insertion point by iterating over
1061 * the list, using our nextarena pointer.
1062 */
1063 while (ao->nextarena != NULL &&
1064 nf > ao->nextarena->nfreepools) {
1065 ao->prevarena = ao->nextarena;
1066 ao->nextarena = ao->nextarena->nextarena;
1067 }
1068
1069 /* Insert ao at this point. */
1070 assert(ao->nextarena == NULL ||
1071 ao->prevarena == ao->nextarena->prevarena);
1072 assert(ao->prevarena->nextarena == ao->nextarena);
1073
1074 ao->prevarena->nextarena = ao;
1075 if (ao->nextarena != NULL)
1076 ao->nextarena->prevarena = ao;
1077
1078 /* Verify that the swaps worked. */
1079 assert(ao->nextarena == NULL ||
1080 nf <= ao->nextarena->nfreepools);
1081 assert(ao->prevarena == NULL ||
1082 nf > ao->prevarena->nfreepools);
1083 assert(ao->nextarena == NULL ||
1084 ao->nextarena->prevarena == ao);
1085 assert((usable_arenas == ao &&
1086 ao->prevarena == NULL) ||
1087 ao->prevarena->nextarena == ao);
1088
1089 UNLOCK();
1090 return;
1091 }
1092 /* Pool was full, so doesn't currently live in any list:
1093 * link it to the front of the appropriate usedpools[] list.
1094 * This mimics LRU pool usage for new allocations and
1095 * targets optimal filling when several pools contain
1096 * blocks of the same size class.
1097 */
1098 --pool->ref.count;
1099 assert(pool->ref.count > 0); /* else the pool is empty */
1100 size = pool->szidx;
1101 next = usedpools[size + size];
1102 prev = next->prevpool;
1103 /* insert pool before next: prev <-> pool <-> next */
1104 pool->nextpool = next;
1105 pool->prevpool = prev;
1106 next->prevpool = pool;
1107 prev->nextpool = pool;
1108 UNLOCK();
1109 return;
1110 }
1111
1112 /* We didn't allocate this address. */
1113 free(p);
1114}
1115
1116/* realloc. If p is NULL, this acts like malloc(nbytes). Else if nbytes==0,
1117 * then as the Python docs promise, we do not treat this like free(p), and
1118 * return a non-NULL result.
1119 */
1120
1121#undef PyObject_Realloc
1122void *
1123PyObject_Realloc(void *p, size_t nbytes)
1124{
1125 void *bp;
1126 poolp pool;
1127 size_t size;
1128
1129 if (p == NULL)
1130 return PyObject_Malloc(nbytes);
1131
1132 pool = POOL_ADDR(p);
1133 if (Py_ADDRESS_IN_RANGE(p, pool)) {
1134 /* We're in charge of this block */
1135 size = INDEX2SIZE(pool->szidx);
1136 if (nbytes <= size) {
1137 /* The block is staying the same or shrinking. If
1138 * it's shrinking, there's a tradeoff: it costs
1139 * cycles to copy the block to a smaller size class,
1140 * but it wastes memory not to copy it. The
1141 * compromise here is to copy on shrink only if at
1142 * least 25% of size can be shaved off.
1143 */
1144 if (4 * nbytes > 3 * size) {
1145 /* It's the same,
1146 * or shrinking and new/old > 3/4.
1147 */
1148 return p;
1149 }
1150 size = nbytes;
1151 }
1152 bp = PyObject_Malloc(nbytes);
1153 if (bp != NULL) {
1154 memcpy(bp, p, size);
1155 PyObject_Free(p);
1156 }
1157 return bp;
1158 }
1159 /* We're not managing this block. If nbytes <=
1160 * SMALL_REQUEST_THRESHOLD, it's tempting to try to take over this
1161 * block. However, if we do, we need to copy the valid data from
1162 * the C-managed block to one of our blocks, and there's no portable
1163 * way to know how much of the memory space starting at p is valid.
1164 * As bug 1185883 pointed out the hard way, it's possible that the
1165 * C-managed block is "at the end" of allocated VM space, so that
1166 * a memory fault can occur if we try to copy nbytes bytes starting
1167 * at p. Instead we punt: let C continue to manage this block.
1168 */
1169 if (nbytes)
1170 return realloc(p, nbytes);
1171 /* C doesn't define the result of realloc(p, 0) (it may or may not
1172 * return NULL then), but Python's docs promise that nbytes==0 never
1173 * returns NULL. We don't pass 0 to realloc(), to avoid that endcase
1174 * to begin with. Even then, we can't be sure that realloc() won't
1175 * return NULL.
1176 */
1177 bp = realloc(p, 1);
1178 return bp ? bp : p;
1179}
1180
1181#else /* ! WITH_PYMALLOC */
1182
1183/*==========================================================================*/
1184/* pymalloc not enabled: Redirect the entry points to malloc. These will
1185 * only be used by extensions that are compiled with pymalloc enabled. */
1186
1187void *
1188PyObject_Malloc(size_t n)
1189{
1190 return PyMem_MALLOC(n);
1191}
1192
1193void *
1194PyObject_Realloc(void *p, size_t n)
1195{
1196 return PyMem_REALLOC(p, n);
1197}
1198
1199void
1200PyObject_Free(void *p)
1201{
1202 PyMem_FREE(p);
1203}
1204#endif /* WITH_PYMALLOC */
1205
1206#ifdef PYMALLOC_DEBUG
1207/*==========================================================================*/
1208/* A x-platform debugging allocator. This doesn't manage memory directly,
1209 * it wraps a real allocator, adding extra debugging info to the memory blocks.
1210 */
1211
1212/* Special bytes broadcast into debug memory blocks at appropriate times.
1213 * Strings of these are unlikely to be valid addresses, floats, ints or
1214 * 7-bit ASCII.
1215 */
1216#undef CLEANBYTE
1217#undef DEADBYTE
1218#undef FORBIDDENBYTE
1219#define CLEANBYTE 0xCB /* clean (newly allocated) memory */
1220#define DEADBYTE 0xDB /* dead (newly freed) memory */
1221#define FORBIDDENBYTE 0xFB /* untouchable bytes at each end of a block */
1222
1223static size_t serialno = 0; /* incremented on each debug {m,re}alloc */
1224
1225/* serialno is always incremented via calling this routine. The point is
1226 * to supply a single place to set a breakpoint.
1227 */
1228static void
1229bumpserialno(void)
1230{
1231 ++serialno;
1232}
1233
1234#define SST SIZEOF_SIZE_T
1235
1236/* Read sizeof(size_t) bytes at p as a big-endian size_t. */
1237static size_t
1238read_size_t(const void *p)
1239{
1240 const uchar *q = (const uchar *)p;
1241 size_t result = *q++;
1242 int i;
1243
1244 for (i = SST; --i > 0; ++q)
1245 result = (result << 8) | *q;
1246 return result;
1247}
1248
1249/* Write n as a big-endian size_t, MSB at address p, LSB at
1250 * p + sizeof(size_t) - 1.
1251 */
1252static void
1253write_size_t(void *p, size_t n)
1254{
1255 uchar *q = (uchar *)p + SST - 1;
1256 int i;
1257
1258 for (i = SST; --i >= 0; --q) {
1259 *q = (uchar)(n & 0xff);
1260 n >>= 8;
1261 }
1262}
1263
1264#ifdef Py_DEBUG
1265/* Is target in the list? The list is traversed via the nextpool pointers.
1266 * The list may be NULL-terminated, or circular. Return 1 if target is in
1267 * list, else 0.
1268 */
1269static int
1270pool_is_in_list(const poolp target, poolp list)
1271{
1272 poolp origlist = list;
1273 assert(target != NULL);
1274 if (list == NULL)
1275 return 0;
1276 do {
1277 if (target == list)
1278 return 1;
1279 list = list->nextpool;
1280 } while (list != NULL && list != origlist);
1281 return 0;
1282}
1283
1284#else
1285#define pool_is_in_list(X, Y) 1
1286
1287#endif /* Py_DEBUG */
1288
1289/* Let S = sizeof(size_t). The debug malloc asks for 4*S extra bytes and
1290 fills them with useful stuff, here calling the underlying malloc's result p:
1291
1292p[0: S]
1293 Number of bytes originally asked for. This is a size_t, big-endian (easier
1294 to read in a memory dump).
1295p[S: 2*S]
1296 Copies of FORBIDDENBYTE. Used to catch under- writes and reads.
1297p[2*S: 2*S+n]
1298 The requested memory, filled with copies of CLEANBYTE.
1299 Used to catch reference to uninitialized memory.
1300 &p[2*S] is returned. Note that this is 8-byte aligned if pymalloc
1301 handled the request itself.
1302p[2*S+n: 2*S+n+S]
1303 Copies of FORBIDDENBYTE. Used to catch over- writes and reads.
1304p[2*S+n+S: 2*S+n+2*S]
1305 A serial number, incremented by 1 on each call to _PyObject_DebugMalloc
1306 and _PyObject_DebugRealloc.
1307 This is a big-endian size_t.
1308 If "bad memory" is detected later, the serial number gives an
1309 excellent way to set a breakpoint on the next run, to capture the
1310 instant at which this block was passed out.
1311*/
1312
1313void *
1314_PyObject_DebugMalloc(size_t nbytes)
1315{
1316 uchar *p; /* base address of malloc'ed block */
1317 uchar *tail; /* p + 2*SST + nbytes == pointer to tail pad bytes */
1318 size_t total; /* nbytes + 4*SST */
1319
1320 bumpserialno();
1321 total = nbytes + 4*SST;
1322 if (total < nbytes)
1323 /* overflow: can't represent total as a size_t */
1324 return NULL;
1325
1326 p = (uchar *)PyObject_Malloc(total);
1327 if (p == NULL)
1328 return NULL;
1329
1330 write_size_t(p, nbytes);
1331 memset(p + SST, FORBIDDENBYTE, SST);
1332
1333 if (nbytes > 0)
1334 memset(p + 2*SST, CLEANBYTE, nbytes);
1335
1336 tail = p + 2*SST + nbytes;
1337 memset(tail, FORBIDDENBYTE, SST);
1338 write_size_t(tail + SST, serialno);
1339
1340 return p + 2*SST;
1341}
1342
1343/* The debug free first checks the 2*SST bytes on each end for sanity (in
1344 particular, that the FORBIDDENBYTEs are still intact).
1345 Then fills the original bytes with DEADBYTE.
1346 Then calls the underlying free.
1347*/
1348void
1349_PyObject_DebugFree(void *p)
1350{
1351 uchar *q = (uchar *)p - 2*SST; /* address returned from malloc */
1352 size_t nbytes;
1353
1354 if (p == NULL)
1355 return;
1356 _PyObject_DebugCheckAddress(p);
1357 nbytes = read_size_t(q);
1358 if (nbytes > 0)
1359 memset(q, DEADBYTE, nbytes);
1360 PyObject_Free(q);
1361}
1362
1363void *
1364_PyObject_DebugRealloc(void *p, size_t nbytes)
1365{
1366 uchar *q = (uchar *)p;
1367 uchar *tail;
1368 size_t total; /* nbytes + 4*SST */
1369 size_t original_nbytes;
1370 int i;
1371
1372 if (p == NULL)
1373 return _PyObject_DebugMalloc(nbytes);
1374
1375 _PyObject_DebugCheckAddress(p);
1376 bumpserialno();
1377 original_nbytes = read_size_t(q - 2*SST);
1378 total = nbytes + 4*SST;
1379 if (total < nbytes)
1380 /* overflow: can't represent total as a size_t */
1381 return NULL;
1382
1383 if (nbytes < original_nbytes) {
1384 /* shrinking: mark old extra memory dead */
1385 memset(q + nbytes, DEADBYTE, original_nbytes - nbytes);
1386 }
1387
1388 /* Resize and add decorations. */
1389 q = (uchar *)PyObject_Realloc(q - 2*SST, total);
1390 if (q == NULL)
1391 return NULL;
1392
1393 write_size_t(q, nbytes);
1394 for (i = 0; i < SST; ++i)
1395 assert(q[SST + i] == FORBIDDENBYTE);
1396 q += 2*SST;
1397 tail = q + nbytes;
1398 memset(tail, FORBIDDENBYTE, SST);
1399 write_size_t(tail + SST, serialno);
1400
1401 if (nbytes > original_nbytes) {
1402 /* growing: mark new extra memory clean */
1403 memset(q + original_nbytes, CLEANBYTE,
1404 nbytes - original_nbytes);
1405 }
1406
1407 return q;
1408}
1409
1410/* Check the forbidden bytes on both ends of the memory allocated for p.
1411 * If anything is wrong, print info to stderr via _PyObject_DebugDumpAddress,
1412 * and call Py_FatalError to kill the program.
1413 */
1414 void
1415_PyObject_DebugCheckAddress(const void *p)
1416{
1417 const uchar *q = (const uchar *)p;
1418 char *msg;
1419 size_t nbytes;
1420 const uchar *tail;
1421 int i;
1422
1423 if (p == NULL) {
1424 msg = "didn't expect a NULL pointer";
1425 goto error;
1426 }
1427
1428 /* Check the stuff at the start of p first: if there's underwrite
1429 * corruption, the number-of-bytes field may be nuts, and checking
1430 * the tail could lead to a segfault then.
1431 */
1432 for (i = SST; i >= 1; --i) {
1433 if (*(q-i) != FORBIDDENBYTE) {
1434 msg = "bad leading pad byte";
1435 goto error;
1436 }
1437 }
1438
1439 nbytes = read_size_t(q - 2*SST);
1440 tail = q + nbytes;
1441 for (i = 0; i < SST; ++i) {
1442 if (tail[i] != FORBIDDENBYTE) {
1443 msg = "bad trailing pad byte";
1444 goto error;
1445 }
1446 }
1447
1448 return;
1449
1450error:
1451 _PyObject_DebugDumpAddress(p);
1452 Py_FatalError(msg);
1453}
1454
1455/* Display info to stderr about the memory block at p. */
1456void
1457_PyObject_DebugDumpAddress(const void *p)
1458{
1459 const uchar *q = (const uchar *)p;
1460 const uchar *tail;
1461 size_t nbytes, serial;
1462 int i;
1463 int ok;
1464
1465 fprintf(stderr, "Debug memory block at address p=%p:\n", p);
1466 if (p == NULL)
1467 return;
1468
1469 nbytes = read_size_t(q - 2*SST);
1470 fprintf(stderr, " %" PY_FORMAT_SIZE_T "u bytes originally "
1471 "requested\n", nbytes);
1472
1473 /* In case this is nuts, check the leading pad bytes first. */
1474 fprintf(stderr, " The %d pad bytes at p-%d are ", SST, SST);
1475 ok = 1;
1476 for (i = 1; i <= SST; ++i) {
1477 if (*(q-i) != FORBIDDENBYTE) {
1478 ok = 0;
1479 break;
1480 }
1481 }
1482 if (ok)
1483 fputs("FORBIDDENBYTE, as expected.\n", stderr);
1484 else {
1485 fprintf(stderr, "not all FORBIDDENBYTE (0x%02x):\n",
1486 FORBIDDENBYTE);
1487 for (i = SST; i >= 1; --i) {
1488 const uchar byte = *(q-i);
1489 fprintf(stderr, " at p-%d: 0x%02x", i, byte);
1490 if (byte != FORBIDDENBYTE)
1491 fputs(" *** OUCH", stderr);
1492 fputc('\n', stderr);
1493 }
1494
1495 fputs(" Because memory is corrupted at the start, the "
1496 "count of bytes requested\n"
1497 " may be bogus, and checking the trailing pad "
1498 "bytes may segfault.\n", stderr);
1499 }
1500
1501 tail = q + nbytes;
1502 fprintf(stderr, " The %d pad bytes at tail=%p are ", SST, tail);
1503 ok = 1;
1504 for (i = 0; i < SST; ++i) {
1505 if (tail[i] != FORBIDDENBYTE) {
1506 ok = 0;
1507 break;
1508 }
1509 }
1510 if (ok)
1511 fputs("FORBIDDENBYTE, as expected.\n", stderr);
1512 else {
1513 fprintf(stderr, "not all FORBIDDENBYTE (0x%02x):\n",
1514 FORBIDDENBYTE);
1515 for (i = 0; i < SST; ++i) {
1516 const uchar byte = tail[i];
1517 fprintf(stderr, " at tail+%d: 0x%02x",
1518 i, byte);
1519 if (byte != FORBIDDENBYTE)
1520 fputs(" *** OUCH", stderr);
1521 fputc('\n', stderr);
1522 }
1523 }
1524
1525 serial = read_size_t(tail + SST);
1526 fprintf(stderr, " The block was made by call #%" PY_FORMAT_SIZE_T
1527 "u to debug malloc/realloc.\n", serial);
1528
1529 if (nbytes > 0) {
1530 i = 0;
1531 fputs(" Data at p:", stderr);
1532 /* print up to 8 bytes at the start */
1533 while (q < tail && i < 8) {
1534 fprintf(stderr, " %02x", *q);
1535 ++i;
1536 ++q;
1537 }
1538 /* and up to 8 at the end */
1539 if (q < tail) {
1540 if (tail - q > 8) {
1541 fputs(" ...", stderr);
1542 q = tail - 8;
1543 }
1544 while (q < tail) {
1545 fprintf(stderr, " %02x", *q);
1546 ++q;
1547 }
1548 }
1549 fputc('\n', stderr);
1550 }
1551}
1552
1553static size_t
1554printone(const char* msg, size_t value)
1555{
1556 int i, k;
1557 char buf[100];
1558 size_t origvalue = value;
1559
1560 fputs(msg, stderr);
1561 for (i = (int)strlen(msg); i < 35; ++i)
1562 fputc(' ', stderr);
1563 fputc('=', stderr);
1564
1565 /* Write the value with commas. */
1566 i = 22;
1567 buf[i--] = '\0';
1568 buf[i--] = '\n';
1569 k = 3;
1570 do {
1571 size_t nextvalue = value / 10;
1572 uint digit = (uint)(value - nextvalue * 10);
1573 value = nextvalue;
1574 buf[i--] = (char)(digit + '0');
1575 --k;
1576 if (k == 0 && value && i >= 0) {
1577 k = 3;
1578 buf[i--] = ',';
1579 }
1580 } while (value && i >= 0);
1581
1582 while (i >= 0)
1583 buf[i--] = ' ';
1584 fputs(buf, stderr);
1585
1586 return origvalue;
1587}
1588
1589/* Print summary info to stderr about the state of pymalloc's structures.
1590 * In Py_DEBUG mode, also perform some expensive internal consistency
1591 * checks.
1592 */
1593void
1594_PyObject_DebugMallocStats(void)
1595{
1596 uint i;
1597 const uint numclasses = SMALL_REQUEST_THRESHOLD >> ALIGNMENT_SHIFT;
1598 /* # of pools, allocated blocks, and free blocks per class index */
1599 size_t numpools[SMALL_REQUEST_THRESHOLD >> ALIGNMENT_SHIFT];
1600 size_t numblocks[SMALL_REQUEST_THRESHOLD >> ALIGNMENT_SHIFT];
1601 size_t numfreeblocks[SMALL_REQUEST_THRESHOLD >> ALIGNMENT_SHIFT];
1602 /* total # of allocated bytes in used and full pools */
1603 size_t allocated_bytes = 0;
1604 /* total # of available bytes in used pools */
1605 size_t available_bytes = 0;
1606 /* # of free pools + pools not yet carved out of current arena */
1607 uint numfreepools = 0;
1608 /* # of bytes for arena alignment padding */
1609 size_t arena_alignment = 0;
1610 /* # of bytes in used and full pools used for pool_headers */
1611 size_t pool_header_bytes = 0;
1612 /* # of bytes in used and full pools wasted due to quantization,
1613 * i.e. the necessarily leftover space at the ends of used and
1614 * full pools.
1615 */
1616 size_t quantization = 0;
1617 /* # of arenas actually allocated. */
1618 size_t narenas = 0;
1619 /* running total -- should equal narenas * ARENA_SIZE */
1620 size_t total;
1621 char buf[128];
1622
1623 fprintf(stderr, "Small block threshold = %d, in %u size classes.\n",
1624 SMALL_REQUEST_THRESHOLD, numclasses);
1625
1626 for (i = 0; i < numclasses; ++i)
1627 numpools[i] = numblocks[i] = numfreeblocks[i] = 0;
1628
1629 /* Because full pools aren't linked to from anything, it's easiest
1630 * to march over all the arenas. If we're lucky, most of the memory
1631 * will be living in full pools -- would be a shame to miss them.
1632 */
1633 for (i = 0; i < maxarenas; ++i) {
1634 uint poolsinarena;
1635 uint j;
1636 uptr base = arenas[i].address;
1637
1638 /* Skip arenas which are not allocated. */
1639 if (arenas[i].address == (uptr)NULL)
1640 continue;
1641 narenas += 1;
1642
1643 poolsinarena = arenas[i].ntotalpools;
1644 numfreepools += arenas[i].nfreepools;
1645
1646 /* round up to pool alignment */
1647 if (base & (uptr)POOL_SIZE_MASK) {
1648 arena_alignment += POOL_SIZE;
1649 base &= ~(uptr)POOL_SIZE_MASK;
1650 base += POOL_SIZE;
1651 }
1652
1653 /* visit every pool in the arena */
1654 assert(base <= (uptr) arenas[i].pool_address);
1655 for (j = 0;
1656 base < (uptr) arenas[i].pool_address;
1657 ++j, base += POOL_SIZE) {
1658 poolp p = (poolp)base;
1659 const uint sz = p->szidx;
1660 uint freeblocks;
1661
1662 if (p->ref.count == 0) {
1663 /* currently unused */
1664 assert(pool_is_in_list(p, arenas[i].freepools));
1665 continue;
1666 }
1667 ++numpools[sz];
1668 numblocks[sz] += p->ref.count;
1669 freeblocks = NUMBLOCKS(sz) - p->ref.count;
1670 numfreeblocks[sz] += freeblocks;
1671#ifdef Py_DEBUG
1672 if (freeblocks > 0)
1673 assert(pool_is_in_list(p, usedpools[sz + sz]));
1674#endif
1675 }
1676 }
1677 assert(narenas == narenas_currently_allocated);
1678
1679 fputc('\n', stderr);
1680 fputs("class size num pools blocks in use avail blocks\n"
1681 "----- ---- --------- ------------- ------------\n",
1682 stderr);
1683
1684 for (i = 0; i < numclasses; ++i) {
1685 size_t p = numpools[i];
1686 size_t b = numblocks[i];
1687 size_t f = numfreeblocks[i];
1688 uint size = INDEX2SIZE(i);
1689 if (p == 0) {
1690 assert(b == 0 && f == 0);
1691 continue;
1692 }
1693 fprintf(stderr, "%5u %6u "
1694 "%11" PY_FORMAT_SIZE_T "u "
1695 "%15" PY_FORMAT_SIZE_T "u "
1696 "%13" PY_FORMAT_SIZE_T "u\n",
1697 i, size, p, b, f);
1698 allocated_bytes += b * size;
1699 available_bytes += f * size;
1700 pool_header_bytes += p * POOL_OVERHEAD;
1701 quantization += p * ((POOL_SIZE - POOL_OVERHEAD) % size);
1702 }
1703 fputc('\n', stderr);
1704 (void)printone("# times object malloc called", serialno);
1705
1706 (void)printone("# arenas allocated total", ntimes_arena_allocated);
1707 (void)printone("# arenas reclaimed", ntimes_arena_allocated - narenas);
1708 (void)printone("# arenas highwater mark", narenas_highwater);
1709 (void)printone("# arenas allocated current", narenas);
1710
1711 PyOS_snprintf(buf, sizeof(buf),
1712 "%" PY_FORMAT_SIZE_T "u arenas * %d bytes/arena",
1713 narenas, ARENA_SIZE);
1714 (void)printone(buf, narenas * ARENA_SIZE);
1715
1716 fputc('\n', stderr);
1717
1718 total = printone("# bytes in allocated blocks", allocated_bytes);
1719 total += printone("# bytes in available blocks", available_bytes);
1720
1721 PyOS_snprintf(buf, sizeof(buf),
1722 "%u unused pools * %d bytes", numfreepools, POOL_SIZE);
1723 total += printone(buf, (size_t)numfreepools * POOL_SIZE);
1724
1725 total += printone("# bytes lost to pool headers", pool_header_bytes);
1726 total += printone("# bytes lost to quantization", quantization);
1727 total += printone("# bytes lost to arena alignment", arena_alignment);
1728 (void)printone("Total", total);
1729}
1730
1731#endif /* PYMALLOC_DEBUG */
1732
1733#ifdef Py_USING_MEMORY_DEBUGGER
1734/* Make this function last so gcc won't inline it since the definition is
1735 * after the reference.
1736 */
1737int
1738Py_ADDRESS_IN_RANGE(void *P, poolp pool)
1739{
1740 return pool->arenaindex < maxarenas &&
1741 (uptr)P - arenas[pool->arenaindex].address < (uptr)ARENA_SIZE &&
1742 arenas[pool->arenaindex].address != 0;
1743}
1744#endif
Note: See TracBrowser for help on using the repository browser.