obmalloc.c 56.8 KB
Newer Older
1 2 3 4
#include "Python.h"

#ifdef WITH_PYMALLOC

5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63
/* An object allocator for Python.

   Here is an introduction to the layers of the Python memory architecture,
   showing where the object allocator is actually used (layer +2), It is
   called for every object allocation and deallocation (PyObject_New/Del),
   unless the object-specific allocators implement a proprietary allocation
   scheme (ex.: ints use a simple free list). This is also the place where
   the cyclic garbage collector operates selectively on container objects.


        Object-specific allocators
    _____   ______   ______       ________
   [ int ] [ dict ] [ list ] ... [ string ]       Python core         |
+3 | <----- Object-specific memory -----> | <-- Non-object memory --> |
    _______________________________       |                           |
   [   Python's object allocator   ]      |                           |
+2 | ####### Object memory ####### | <------ Internal buffers ------> |
    ______________________________________________________________    |
   [          Python's raw memory allocator (PyMem_ API)          ]   |
+1 | <----- Python memory (under PyMem manager's control) ------> |   |
    __________________________________________________________________
   [    Underlying general-purpose allocator (ex: C library malloc)   ]
 0 | <------ Virtual memory allocated for the python process -------> |

   =========================================================================
    _______________________________________________________________________
   [                OS-specific Virtual Memory Manager (VMM)               ]
-1 | <--- Kernel dynamic storage allocation & management (page-based) ---> |
    __________________________________   __________________________________
   [                                  ] [                                  ]
-2 | <-- Physical memory: ROM/RAM --> | | <-- Secondary storage (swap) --> |

*/
/*==========================================================================*/

/* A fast, special-purpose memory allocator for small blocks, to be used
   on top of a general-purpose malloc -- heavily based on previous art. */

/* Vladimir Marangozov -- August 2000 */

/*
 * "Memory management is where the rubber meets the road -- if we do the wrong
 * thing at any level, the results will not be good. And if we don't make the
 * levels work well together, we are in serious trouble." (1)
 *
 * (1) Paul R. Wilson, Mark S. Johnstone, Michael Neely, and David Boles,
 *    "Dynamic Storage Allocation: A Survey and Critical Review",
 *    in Proc. 1995 Int'l. Workshop on Memory Management, September 1995.
 */

/* #undef WITH_MEMORY_LIMITS */		/* disable mem limit checks  */

/*==========================================================================*/

/*
 * Allocation strategy abstract:
 *
 * For small requests, the allocator sub-allocates <Big> blocks of memory.
 * Requests greater than 256 bytes are routed to the system's allocator.
Tim Peters's avatar
Tim Peters committed
64
 *
65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96
 * Small requests are grouped in size classes spaced 8 bytes apart, due
 * to the required valid alignment of the returned address. Requests of
 * a particular size are serviced from memory pools of 4K (one VMM page).
 * Pools are fragmented on demand and contain free lists of blocks of one
 * particular size class. In other words, there is a fixed-size allocator
 * for each size class. Free pools are shared by the different allocators
 * thus minimizing the space reserved for a particular size class.
 *
 * This allocation strategy is a variant of what is known as "simple
 * segregated storage based on array of free lists". The main drawback of
 * simple segregated storage is that we might end up with lot of reserved
 * memory for the different free lists, which degenerate in time. To avoid
 * this, we partition each free list in pools and we share dynamically the
 * reserved space between all free lists. This technique is quite efficient
 * for memory intensive programs which allocate mainly small-sized blocks.
 *
 * For small requests we have the following table:
 *
 * Request in bytes	Size of allocated block      Size class idx
 * ----------------------------------------------------------------
 *        1-8                     8                       0
 *	  9-16                   16                       1
 *	 17-24                   24                       2
 *	 25-32                   32                       3
 *	 33-40                   40                       4
 *	 41-48                   48                       5
 *	 49-56                   56                       6
 *	 57-64                   64                       7
 *	 65-72                   72                       8
 *	  ...                   ...                     ...
 *	241-248                 248                      30
 *	249-256                 256                      31
Tim Peters's avatar
Tim Peters committed
97
 *
98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118
 *	0, 257 and up: routed to the underlying allocator.
 */

/*==========================================================================*/

/*
 * -- Main tunable settings section --
 */

/*
 * Alignment of addresses returned to the user. 8-bytes alignment works
 * on most current architectures (with 32-bit or 64-bit address busses).
 * The alignment value is also used for grouping small requests in size
 * classes spaced ALIGNMENT bytes apart.
 *
 * You shouldn't change this unless you know what you are doing.
 */
#define ALIGNMENT		8		/* must be 2^N */
#define ALIGNMENT_SHIFT		3
#define ALIGNMENT_MASK		(ALIGNMENT - 1)

119 120 121
/* Return the number of bytes in size class I, as a uint. */
#define INDEX2SIZE(I) (((uint)(I) + 1) << ALIGNMENT_SHIFT)

122 123 124 125 126 127 128
/*
 * Max size threshold below which malloc requests are considered to be
 * small enough in order to use preallocated memory pools. You can tune
 * this value according to your application behaviour and memory needs.
 *
 * The following invariants must hold:
 *	1) ALIGNMENT <= SMALL_REQUEST_THRESHOLD <= 256
Tim Peters's avatar
Tim Peters committed
129
 *	2) SMALL_REQUEST_THRESHOLD is evenly divisible by ALIGNMENT
130 131 132 133
 *
 * Although not required, for better performance and space efficiency,
 * it is recommended that SMALL_REQUEST_THRESHOLD is set to a power of 2.
 */
Tim Peters's avatar
Tim Peters committed
134
#define SMALL_REQUEST_THRESHOLD	256
135 136 137 138 139 140 141
#define NB_SMALL_SIZE_CLASSES	(SMALL_REQUEST_THRESHOLD / ALIGNMENT)

/*
 * The system's VMM page size can be obtained on most unices with a
 * getpagesize() call or deduced from various header files. To make
 * things simpler, we assume that it is 4K, which is OK for most systems.
 * It is probably better if this is the native page size, but it doesn't
142 143 144
 * have to be.  In theory, if SYSTEM_PAGE_SIZE is larger than the native page
 * size, then `POOL_ADDR(p)->arenaindex' could rarely cause a segmentation
 * violation fault.  4K is apparently OK for all the platforms that python
145
 * currently targets.
146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169
 */
#define SYSTEM_PAGE_SIZE	(4 * 1024)
#define SYSTEM_PAGE_SIZE_MASK	(SYSTEM_PAGE_SIZE - 1)

/*
 * Maximum amount of memory managed by the allocator for small requests.
 */
#ifdef WITH_MEMORY_LIMITS
#ifndef SMALL_MEMORY_LIMIT
#define SMALL_MEMORY_LIMIT	(64 * 1024 * 1024)	/* 64 MB -- more? */
#endif
#endif

/*
 * The allocator sub-allocates <Big> blocks of memory (called arenas) aligned
 * on a page boundary. This is a reserved virtual address space for the
 * current process (obtained through a malloc call). In no way this means
 * that the memory arenas will be used entirely. A malloc(<Big>) is usually
 * an address range reservation for <Big> bytes, unless all pages within this
 * space are referenced subsequently. So malloc'ing big blocks and not using
 * them does not mean "wasting memory". It's an addressable range wastage...
 *
 * Therefore, allocating arenas with malloc is not optimal, because there is
 * some address space wastage, but this is the most portable way to request
Tim Peters's avatar
Tim Peters committed
170
 * memory from the system across various platforms.
171
 */
172
#define ARENA_SIZE		(256 << 10)	/* 256KB */
173 174 175 176 177 178 179

#ifdef WITH_MEMORY_LIMITS
#define MAX_ARENAS		(SMALL_MEMORY_LIMIT / ARENA_SIZE)
#endif

/*
 * Size of the pools used for small blocks. Should be a power of 2,
180
 * between 1K and SYSTEM_PAGE_SIZE, that is: 1k, 2k, 4k.
181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219
 */
#define POOL_SIZE		SYSTEM_PAGE_SIZE	/* must be 2^N */
#define POOL_SIZE_MASK		SYSTEM_PAGE_SIZE_MASK

/*
 * -- End of tunable settings section --
 */

/*==========================================================================*/

/*
 * Locking
 *
 * To reduce lock contention, it would probably be better to refine the
 * crude function locking with per size class locking. I'm not positive
 * however, whether it's worth switching to such locking policy because
 * of the performance penalty it might introduce.
 *
 * The following macros describe the simplest (should also be the fastest)
 * lock object on a particular platform and the init/fini/lock/unlock
 * operations on it. The locks defined here are not expected to be recursive
 * because it is assumed that they will always be called in the order:
 * INIT, [LOCK, UNLOCK]*, FINI.
 */

/*
 * Python's threads are serialized, so object malloc locking is disabled.
 */
#define SIMPLELOCK_DECL(lock)	/* simple lock declaration		*/
#define SIMPLELOCK_INIT(lock)	/* allocate (if needed) and initialize	*/
#define SIMPLELOCK_FINI(lock)	/* free/destroy an existing lock 	*/
#define SIMPLELOCK_LOCK(lock)	/* acquire released lock */
#define SIMPLELOCK_UNLOCK(lock)	/* release acquired lock */

/*
 * Basic types
 * I don't care if these are defined in <sys/types.h> or elsewhere. Axiom.
 */
#undef  uchar
220
#define uchar	unsigned char	/* assuming == 8 bits  */
221 222

#undef  uint
223
#define uint	unsigned int	/* assuming >= 16 bits */
224 225

#undef  ulong
226
#define ulong	unsigned long	/* assuming >= 32 bits */
227

Tim Peters's avatar
Tim Peters committed
228
#undef uptr
229
#define uptr	Py_uintptr_t
Tim Peters's avatar
Tim Peters committed
230

231 232 233
/* When you say memory, my mind reasons in terms of (pointers to) blocks */
typedef uchar block;

234
/* Pool for small blocks. */
235
struct pool_header {
236
	union { block *_padding;
237 238 239 240
		uint count; } ref;	/* number of allocated blocks    */
	block *freeblock;		/* pool's free list head         */
	struct pool_header *nextpool;	/* next pool of this size class  */
	struct pool_header *prevpool;	/* previous pool       ""        */
241
	uint arenaindex;		/* index into arenas of base adr */
242
	uint szidx;			/* block size class index	 */
243 244
	uint nextoffset;		/* bytes to virgin block	 */
	uint maxnextoffset;		/* largest valid nextoffset	 */
245 246 247 248
};

typedef struct pool_header *poolp;

249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289
/* Record keeping for arenas. */
struct arena_object {
	/* The address of the arena, as returned by malloc.  Note that 0
	 * will never be returned by a successful malloc, and is used
	 * here to mark an arena_object that doesn't correspond to an
	 * allocated arena.
	 */
	uptr address;

	/* Pool-aligned pointer to the next pool to be carved off. */
	block* pool_address;

	/* The number of available pools in the arena:  free pools + never-
	 * allocated pools.
	 */
	uint nfreepools;

	/* The total number of pools in the arena, whether or not available. */
	uint ntotalpools;

	/* Singly-linked list of available pools. */
	struct pool_header* freepools;

	/* Whenever this arena_object is not associated with an allocated
	 * arena, the nextarena member is used to link all unassociated
	 * arena_objects in the singly-linked `unused_arena_objects` list.
	 * The prevarena member is unused in this case.
	 *
	 * When this arena_object is associated with an allocated arena
	 * with at least one available pool, both members are used in the
	 * doubly-linked `usable_arenas` list, which is maintained in
	 * increasing order of `nfreepools` values.
	 *
	 * Else this arena_object is associated with an allocated arena
	 * all of whose pools are in use.  `nextarena` and `prevarena`
	 * are both meaningless in this case.
	 */
	struct arena_object* nextarena;
	struct arena_object* prevarena;
};

290 291 292 293 294 295
#undef  ROUNDUP
#define ROUNDUP(x)		(((x) + ALIGNMENT_MASK) & ~ALIGNMENT_MASK)
#define POOL_OVERHEAD		ROUNDUP(sizeof(struct pool_header))

#define DUMMY_SIZE_IDX		0xffff	/* size class of newly cached pools */

Tim Peters's avatar
Tim Peters committed
296
/* Round pointer P down to the closest pool-aligned address <= P, as a poolp */
297 298
#define POOL_ADDR(P) ((poolp)((uptr)(P) & ~(uptr)POOL_SIZE_MASK))

299 300
/* Return total number of blocks in pool of size index I, as a uint. */
#define NUMBLOCKS(I) ((uint)(POOL_SIZE - POOL_OVERHEAD) / INDEX2SIZE(I))
Tim Peters's avatar
Tim Peters committed
301

302 303 304 305 306
/*==========================================================================*/

/*
 * This malloc lock
 */
307
SIMPLELOCK_DECL(_malloc_lock)
308 309 310 311
#define LOCK()		SIMPLELOCK_LOCK(_malloc_lock)
#define UNLOCK()	SIMPLELOCK_UNLOCK(_malloc_lock)
#define LOCK_INIT()	SIMPLELOCK_INIT(_malloc_lock)
#define LOCK_FINI()	SIMPLELOCK_FINI(_malloc_lock)
312 313

/*
314 315 316 317 318 319 320
 * Pool table -- headed, circular, doubly-linked lists of partially used pools.

This is involved.  For an index i, usedpools[i+i] is the header for a list of
all partially used pools holding small blocks with "size class idx" i. So
usedpools[0] corresponds to blocks of size 8, usedpools[2] to blocks of size
16, and so on:  index 2*i <-> blocks of size (i+1)<<ALIGNMENT_SHIFT.

321 322 323
Pools are carved off an arena's highwater mark (an arena_object's pool_address
member) as needed.  Once carved off, a pool is in one of three states forever
after:
324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347

used == partially used, neither empty nor full
    At least one block in the pool is currently allocated, and at least one
    block in the pool is not currently allocated (note this implies a pool
    has room for at least two blocks).
    This is a pool's initial state, as a pool is created only when malloc
    needs space.
    The pool holds blocks of a fixed size, and is in the circular list headed
    at usedpools[i] (see above).  It's linked to the other used pools of the
    same size class via the pool_header's nextpool and prevpool members.
    If all but one block is currently allocated, a malloc can cause a
    transition to the full state.  If all but one block is not currently
    allocated, a free can cause a transition to the empty state.

full == all the pool's blocks are currently allocated
    On transition to full, a pool is unlinked from its usedpools[] list.
    It's not linked to from anything then anymore, and its nextpool and
    prevpool members are meaningless until it transitions back to used.
    A free of a block in a full pool puts the pool back in the used state.
    Then it's linked in at the front of the appropriate usedpools[] list, so
    that the next allocation for its size class will reuse the freed block.

empty == all the pool's blocks are currently available for allocation
    On transition to empty, a pool is unlinked from its usedpools[] list,
348
    and linked to the front of its arena_object's singly-linked freepools list,
349 350 351 352
    via its nextpool member.  The prevpool member has no meaning in this case.
    Empty pools have no inherent size class:  the next time a malloc finds
    an empty list in usedpools[], it takes the first pool off of freepools.
    If the size class needed happens to be the same as the size class the pool
353
    last had, some pool initialization can be skipped.
354 355 356 357 358 359 360 361


Block Management

Blocks within pools are again carved out as needed.  pool->freeblock points to
the start of a singly-linked list of free blocks within the pool.  When a
block is freed, it's inserted at the front of its pool's freeblock list.  Note
that the available blocks in a pool are *not* linked all together when a pool
362 363 364 365 366 367 368
is initialized.  Instead only "the first two" (lowest addresses) blocks are
set up, returning the first such block, and setting pool->freeblock to a
one-block list holding the second such block.  This is consistent with that
pymalloc strives at all levels (arena, pool, and block) never to touch a piece
of memory until it's actually needed.

So long as a pool is in the used state, we're certain there *is* a block
369 370 371 372 373 374 375 376
available for allocating, and pool->freeblock is not NULL.  If pool->freeblock
points to the end of the free list before we've carved the entire pool into
blocks, that means we simply haven't yet gotten to one of the higher-address
blocks.  The offset from the pool_header to the start of "the next" virgin
block is stored in the pool_header nextoffset member, and the largest value
of nextoffset that makes sense is stored in the maxnextoffset member when a
pool is initialized.  All the blocks in a pool have been passed out at least
once when and only when nextoffset > maxnextoffset.
377

378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408

Major obscurity:  While the usedpools vector is declared to have poolp
entries, it doesn't really.  It really contains two pointers per (conceptual)
poolp entry, the nextpool and prevpool members of a pool_header.  The
excruciating initialization code below fools C so that

    usedpool[i+i]

"acts like" a genuine poolp, but only so long as you only reference its
nextpool and prevpool members.  The "- 2*sizeof(block *)" gibberish is
compensating for that a pool_header's nextpool and prevpool members
immediately follow a pool_header's first two members:

	union { block *_padding;
		uint count; } ref;
	block *freeblock;

each of which consume sizeof(block *) bytes.  So what usedpools[i+i] really
contains is a fudged-up pointer p such that *if* C believes it's a poolp
pointer, then p->nextpool and p->prevpool are both p (meaning that the headed
circular list is empty).

It's unclear why the usedpools setup is so convoluted.  It could be to
minimize the amount of cache required to hold this heavily-referenced table
(which only *needs* the two interpool pointer members of a pool_header). OTOH,
referencing code has to remember to "double the index" and doing so isn't
free, usedpools[0] isn't a strictly legal pointer, and we're crucially relying
on that C doesn't insert any padding anywhere in a pool_header at or before
the prevpool member.
**************************************************************************** */

409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436
#define PTA(x)	((poolp )((uchar *)&(usedpools[2*(x)]) - 2*sizeof(block *)))
#define PT(x)	PTA(x), PTA(x)

static poolp usedpools[2 * ((NB_SMALL_SIZE_CLASSES + 7) / 8) * 8] = {
	PT(0), PT(1), PT(2), PT(3), PT(4), PT(5), PT(6), PT(7)
#if NB_SMALL_SIZE_CLASSES > 8
	, PT(8), PT(9), PT(10), PT(11), PT(12), PT(13), PT(14), PT(15)
#if NB_SMALL_SIZE_CLASSES > 16
	, PT(16), PT(17), PT(18), PT(19), PT(20), PT(21), PT(22), PT(23)
#if NB_SMALL_SIZE_CLASSES > 24
	, PT(24), PT(25), PT(26), PT(27), PT(28), PT(29), PT(30), PT(31)
#if NB_SMALL_SIZE_CLASSES > 32
	, PT(32), PT(33), PT(34), PT(35), PT(36), PT(37), PT(38), PT(39)
#if NB_SMALL_SIZE_CLASSES > 40
	, PT(40), PT(41), PT(42), PT(43), PT(44), PT(45), PT(46), PT(47)
#if NB_SMALL_SIZE_CLASSES > 48
	, PT(48), PT(49), PT(50), PT(51), PT(52), PT(53), PT(54), PT(55)
#if NB_SMALL_SIZE_CLASSES > 56
	, PT(56), PT(57), PT(58), PT(59), PT(60), PT(61), PT(62), PT(63)
#endif /* NB_SMALL_SIZE_CLASSES > 56 */
#endif /* NB_SMALL_SIZE_CLASSES > 48 */
#endif /* NB_SMALL_SIZE_CLASSES > 40 */
#endif /* NB_SMALL_SIZE_CLASSES > 32 */
#endif /* NB_SMALL_SIZE_CLASSES > 24 */
#endif /* NB_SMALL_SIZE_CLASSES > 16 */
#endif /* NB_SMALL_SIZE_CLASSES >  8 */
};

437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478
/*==========================================================================
Arena management.

`arenas` is a vector of arena_objects.  It contains maxarenas entries, some of
which may not be currently used (== they're arena_objects that aren't
currently associated with an allocated arena).  Note that arenas proper are
separately malloc'ed.

Prior to Python 2.5, arenas were never free()'ed.  Starting with Python 2.5,
we do try to free() arenas, and use some mild heuristic strategies to increase
the likelihood that arenas eventually can be freed.

unused_arena_objects

    This is a singly-linked list of the arena_objects that are currently not
    being used (no arena is associated with them).  Objects are taken off the
    head of the list in new_arena(), and are pushed on the head of the list in
    PyObject_Free() when the arena is empty.  Key invariant:  an arena_object
    is on this list if and only if its .address member is 0.

usable_arenas

    This is a doubly-linked list of the arena_objects associated with arenas
    that have pools available.  These pools are either waiting to be reused,
    or have not been used before.  The list is sorted to have the most-
    allocated arenas first (ascending order based on the nfreepools member).
    This means that the next allocation will come from a heavily used arena,
    which gives the nearly empty arenas a chance to be returned to the system.
    In my unscientific tests this dramatically improved the number of arenas
    that could be freed.

Note that an arena_object associated with an arena all of whose pools are
currently in use isn't on either list.
*/

/* Array of objects used to track chunks of memory (arenas). */
static struct arena_object* arenas = NULL;
/* Number of slots currently allocated in the `arenas` vector. */
static uint maxarenas = 0;

/* The head of the singly-linked, NULL-terminated list of available
 * arena_objects.
479
 */
480
static struct arena_object* unused_arena_objects = NULL;
481

482 483 484 485
/* The head of the doubly-linked, NULL-terminated at each end, list of
 * arena_objects associated with arenas that have pools available.
 */
static struct arena_object* usable_arenas = NULL;
Tim Peters's avatar
Tim Peters committed
486

487 488 489
/* How many arena_objects do we initially allocate?
 * 16 = can allocate 16 arenas = 16 * ARENA_SIZE = 4MB before growing the
 * `arenas` vector.
490
 */
491
#define INITIAL_ARENA_OBJECTS 16
492

493
/* Number of arenas allocated that haven't been free()'d. */
494
static size_t narenas_currently_allocated = 0;
Tim Peters's avatar
Tim Peters committed
495

496 497
#ifdef PYMALLOC_DEBUG
/* Total number of times malloc() called to allocate an arena. */
498
static size_t ntimes_arena_allocated = 0;
499
/* High water mark (max value ever seen) for narenas_currently_allocated. */
500
static size_t narenas_highwater = 0;
501
#endif
Tim Peters's avatar
Tim Peters committed
502

503 504 505 506
/* Allocate a new arena.  If we run out of memory, return NULL.  Else
 * allocate a new arena, and return the address of an arena_object
 * describing the new arena.  It's expected that the caller will set
 * `usable_arenas` to the return value.
Tim Peters's avatar
Tim Peters committed
507
 */
508
static struct arena_object*
Tim Peters's avatar
Tim Peters committed
509 510
new_arena(void)
{
511
	struct arena_object* arenaobj;
512
	uint excess;	/* number of bytes above pool alignment */
Tim Peters's avatar
Tim Peters committed
513

514 515 516 517
#ifdef PYMALLOC_DEBUG
	if (Py_GETENV("PYTHONMALLOCSTATS"))
		_PyObject_DebugMallocStats();
#endif
518 519 520 521
	if (unused_arena_objects == NULL) {
		uint i;
		uint numarenas;
		size_t nbytes;
522

523 524 525 526 527 528 529 530 531
		/* Double the number of arena objects on each allocation.
		 * Note that it's possible for `numarenas` to overflow.
		 */
		numarenas = maxarenas ? maxarenas << 1 : INITIAL_ARENA_OBJECTS;
		if (numarenas <= maxarenas)
			return NULL;	/* overflow */
		nbytes = numarenas * sizeof(*arenas);
		if (nbytes / sizeof(*arenas) != numarenas)
			return NULL;	/* overflow */
Neal Norwitz's avatar
Neal Norwitz committed
532
		arenaobj = (struct arena_object *)realloc(arenas, nbytes);
533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551
		if (arenaobj == NULL)
			return NULL;
		arenas = arenaobj;

		/* We might need to fix pointers that were copied.  However,
		 * new_arena only gets called when all the pages in the
		 * previous arenas are full.  Thus, there are *no* pointers
		 * into the old array. Thus, we don't have to worry about
		 * invalid pointers.  Just to be sure, some asserts:
		 */
		assert(usable_arenas == NULL);
		assert(unused_arena_objects == NULL);

		/* Put the new arenas on the unused_arena_objects list. */
		for (i = maxarenas; i < numarenas; ++i) {
			arenas[i].address = 0;	/* mark as unassociated */
			arenas[i].nextarena = i < numarenas - 1 ?
					       &arenas[i+1] : NULL;
		}
Tim Peters's avatar
Tim Peters committed
552

553 554 555
		/* Update globals. */
		unused_arena_objects = &arenas[maxarenas];
		maxarenas = numarenas;
Tim Peters's avatar
Tim Peters committed
556
	}
557 558 559 560 561 562 563 564 565 566

	/* Take the next available arena object off the head of the list. */
	assert(unused_arena_objects != NULL);
	arenaobj = unused_arena_objects;
	unused_arena_objects = arenaobj->nextarena;
	assert(arenaobj->address == 0);
	arenaobj->address = (uptr)malloc(ARENA_SIZE);
	if (arenaobj->address == 0) {
		/* The allocation failed: return NULL after putting the
		 * arenaobj back.
Tim Peters's avatar
Tim Peters committed
567
		 */
568 569 570
		arenaobj->nextarena = unused_arena_objects;
		unused_arena_objects = arenaobj;
		return NULL;
Tim Peters's avatar
Tim Peters committed
571 572
	}

573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590
	++narenas_currently_allocated;
#ifdef PYMALLOC_DEBUG
	++ntimes_arena_allocated;
	if (narenas_currently_allocated > narenas_highwater)
		narenas_highwater = narenas_currently_allocated;
#endif
	arenaobj->freepools = NULL;
	/* pool_address <- first pool-aligned address in the arena
	   nfreepools <- number of whole pools that fit after alignment */
	arenaobj->pool_address = (block*)arenaobj->address;
	arenaobj->nfreepools = ARENA_SIZE / POOL_SIZE;
	assert(POOL_SIZE * arenaobj->nfreepools == ARENA_SIZE);
	excess = (uint)(arenaobj->address & POOL_SIZE_MASK);
	if (excess != 0) {
		--arenaobj->nfreepools;
		arenaobj->pool_address += POOL_SIZE - excess;
	}
	arenaobj->ntotalpools = arenaobj->nfreepools;
Tim Peters's avatar
Tim Peters committed
591

592
	return arenaobj;
Tim Peters's avatar
Tim Peters committed
593 594
}

595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673
/*
Py_ADDRESS_IN_RANGE(P, POOL)

Return true if and only if P is an address that was allocated by pymalloc.
POOL must be the pool address associated with P, i.e., POOL = POOL_ADDR(P)
(the caller is asked to compute this because the macro expands POOL more than
once, and for efficiency it's best for the caller to assign POOL_ADDR(P) to a
variable and pass the latter to the macro; because Py_ADDRESS_IN_RANGE is
called on every alloc/realloc/free, micro-efficiency is important here).

Tricky:  Let B be the arena base address associated with the pool, B =
arenas[(POOL)->arenaindex].address.  Then P belongs to the arena if and only if

	B <= P < B + ARENA_SIZE

Subtracting B throughout, this is true iff

	0 <= P-B < ARENA_SIZE

By using unsigned arithmetic, the "0 <=" half of the test can be skipped.

Obscure:  A PyMem "free memory" function can call the pymalloc free or realloc
before the first arena has been allocated.  `arenas` is still NULL in that
case.  We're relying on that maxarenas is also 0 in that case, so that
(POOL)->arenaindex < maxarenas  must be false, saving us from trying to index
into a NULL arenas.

Details:  given P and POOL, the arena_object corresponding to P is AO =
arenas[(POOL)->arenaindex].  Suppose obmalloc controls P.  Then (barring wild
stores, etc), POOL is the correct address of P's pool, AO.address is the
correct base address of the pool's arena, and P must be within ARENA_SIZE of
AO.address.  In addition, AO.address is not 0 (no arena can start at address 0
(NULL)).  Therefore Py_ADDRESS_IN_RANGE correctly reports that obmalloc
controls P.

Now suppose obmalloc does not control P (e.g., P was obtained via a direct
call to the system malloc() or realloc()).  (POOL)->arenaindex may be anything
in this case -- it may even be uninitialized trash.  If the trash arenaindex
is >= maxarenas, the macro correctly concludes at once that obmalloc doesn't
control P.

Else arenaindex is < maxarena, and AO is read up.  If AO corresponds to an
allocated arena, obmalloc controls all the memory in slice AO.address :
AO.address+ARENA_SIZE.  By case assumption, P is not controlled by obmalloc,
so P doesn't lie in that slice, so the macro correctly reports that P is not
controlled by obmalloc.

Finally, if P is not controlled by obmalloc and AO corresponds to an unused
arena_object (one not currently associated with an allocated arena),
AO.address is 0, and the second test in the macro reduces to:

	P < ARENA_SIZE

If P >= ARENA_SIZE (extremely likely), the macro again correctly concludes
that P is not controlled by obmalloc.  However, if P < ARENA_SIZE, this part
of the test still passes, and the third clause (AO.address != 0) is necessary
to get the correct result:  AO.address is 0 in this case, so the macro
correctly reports that P is not controlled by obmalloc (despite that P lies in
slice AO.address : AO.address + ARENA_SIZE).

Note:  The third (AO.address != 0) clause was added in Python 2.5.  Before
2.5, arenas were never free()'ed, and an arenaindex < maxarena always
corresponded to a currently-allocated arena, so the "P is not controlled by
obmalloc, AO corresponds to an unused arena_object, and P < ARENA_SIZE" case
was impossible.

Note that the logic is excruciating, and reading up possibly uninitialized
memory when P is not controlled by obmalloc (to get at (POOL)->arenaindex)
creates problems for some memory debuggers.  The overwhelming advantage is
that this test determines whether an arbitrary address is controlled by
obmalloc in a small constant time, independent of the number of arenas
obmalloc controls.  Since this test is needed at every entry point, it's
extremely desirable that it be this fast.
*/
#define Py_ADDRESS_IN_RANGE(P, POOL)			\
	((POOL)->arenaindex < maxarenas &&		\
	 (uptr)(P) - arenas[(POOL)->arenaindex].address < (uptr)ARENA_SIZE && \
	 arenas[(POOL)->arenaindex].address != 0)

674 675 676 677

/* This is only useful when running memory debuggers such as
 * Purify or Valgrind.  Uncomment to use.
 *
678
 */
679
#define Py_USING_MEMORY_DEBUGGER
680 681 682 683 684 685 686 687 688 689 690 691 692 693

#ifdef Py_USING_MEMORY_DEBUGGER

/* Py_ADDRESS_IN_RANGE may access uninitialized memory by design
 * This leads to thousands of spurious warnings when using
 * Purify or Valgrind.  By making a function, we can easily
 * suppress the uninitialized memory reads in this one function.
 * So we won't ignore real errors elsewhere.
 *
 * Disable the macro and use a function.
 */

#undef Py_ADDRESS_IN_RANGE

694 695
#if defined(__GNUC__) && ((__GNUC__ == 3) && (__GNUC_MINOR__ >= 1) || \
			  (__GNUC__ >= 4))
696 697 698 699 700 701 702 703
#define Py_NO_INLINE __attribute__((__noinline__))
#else
#define Py_NO_INLINE
#endif

/* Don't make static, to try to ensure this isn't inlined. */
int Py_ADDRESS_IN_RANGE(void *P, poolp pool) Py_NO_INLINE;
#undef Py_NO_INLINE
704
#endif
705

706 707
/*==========================================================================*/

708 709 710
/* malloc.  Note that nbytes==0 tries to return a non-NULL pointer, distinct
 * from all other currently live pointers.  This may not be possible.
 */
711 712 713 714 715 716 717 718 719

/*
 * The basic blocks are ordered by decreasing execution frequency,
 * which minimizes the number of jumps in the most common cases,
 * improves branching prediction and instruction scheduling (small
 * block allocations typically result in a couple of instructions).
 * Unless the optimizer reorders everything, being too smart...
 */

720
#undef PyObject_Malloc
721
void *
722
PyObject_Malloc(size_t nbytes)
723 724 725 726 727 728 729
{
	block *bp;
	poolp pool;
	poolp next;
	uint size;

	/*
730
	 * This implicitly redirects malloc(0).
731 732 733 734 735 736
	 */
	if ((nbytes - 1) < SMALL_REQUEST_THRESHOLD) {
		LOCK();
		/*
		 * Most frequent paths first
		 */
737
		size = (uint)(nbytes - 1) >> ALIGNMENT_SHIFT;
738 739 740 741 742 743 744 745
		pool = usedpools[size + size];
		if (pool != pool->nextpool) {
			/*
			 * There is a used pool for this size class.
			 * Pick up the head block of its free list.
			 */
			++pool->ref.count;
			bp = pool->freeblock;
746
			assert(bp != NULL);
747 748 749 750 751
			if ((pool->freeblock = *(block **)bp) != NULL) {
				UNLOCK();
				return (void *)bp;
			}
			/*
752
			 * Reached the end of the free list, try to extend it.
753
			 */
754
			if (pool->nextoffset <= pool->maxnextoffset) {
755 756
				/* There is room for another block. */
				pool->freeblock = (block*)pool +
757 758
						  pool->nextoffset;
				pool->nextoffset += INDEX2SIZE(size);
759 760 761 762
				*(block **)(pool->freeblock) = NULL;
				UNLOCK();
				return (void *)bp;
			}
763
			/* Pool is full, unlink from used pools. */
764 765 766 767 768 769 770
			next = pool->nextpool;
			pool = pool->prevpool;
			next->prevpool = pool;
			pool->nextpool = next;
			UNLOCK();
			return (void *)bp;
		}
771 772 773

		/* There isn't a pool of the right size class immediately
		 * available:  use a free pool.
774
		 */
775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794
		if (usable_arenas == NULL) {
			/* No arena has a free pool:  allocate a new arena. */
#ifdef WITH_MEMORY_LIMITS
			if (narenas_currently_allocated >= MAX_ARENAS) {
				UNLOCK();
				goto redirect;
			}
#endif
			usable_arenas = new_arena();
			if (usable_arenas == NULL) {
				UNLOCK();
				goto redirect;
			}
			usable_arenas->nextarena =
				usable_arenas->prevarena = NULL;
		}
		assert(usable_arenas->address != 0);

		/* Try to get a cached free pool. */
		pool = usable_arenas->freepools;
795
		if (pool != NULL) {
796 797 798 799 800 801 802 803 804
			/* Unlink from cached pools. */
			usable_arenas->freepools = pool->nextpool;

			/* This arena already had the smallest nfreepools
			 * value, so decreasing nfreepools doesn't change
			 * that, and we don't need to rearrange the
			 * usable_arenas list.  However, if the arena has
			 * become wholly allocated, we need to remove its
			 * arena_object from usable_arenas.
805
			 */
806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830
			--usable_arenas->nfreepools;
			if (usable_arenas->nfreepools == 0) {
				/* Wholly allocated:  remove. */
				assert(usable_arenas->freepools == NULL);
				assert(usable_arenas->nextarena == NULL ||
				       usable_arenas->nextarena->prevarena ==
					   usable_arenas);

				usable_arenas = usable_arenas->nextarena;
				if (usable_arenas != NULL) {
					usable_arenas->prevarena = NULL;
					assert(usable_arenas->address != 0);
				}
			}
			else {
				/* nfreepools > 0:  it must be that freepools
				 * isn't NULL, or that we haven't yet carved
				 * off all the arena's pools for the first
				 * time.
				 */
				assert(usable_arenas->freepools != NULL ||
				       usable_arenas->pool_address <=
				           (block*)usable_arenas->address +
				               ARENA_SIZE - POOL_SIZE);
			}
831
		init_pool:
832
			/* Frontlink to used pools. */
833 834 835 836 837 838 839
			next = usedpools[size + size]; /* == prev */
			pool->nextpool = next;
			pool->prevpool = next;
			next->nextpool = pool;
			next->prevpool = pool;
			pool->ref.count = 1;
			if (pool->szidx == size) {
840
				/* Luckily, this pool last contained blocks
841 842 843 844 845 846 847 848 849
				 * of the same size class, so its header
				 * and free list are already initialized.
				 */
				bp = pool->freeblock;
				pool->freeblock = *(block **)bp;
				UNLOCK();
				return (void *)bp;
			}
			/*
850 851 852
			 * Initialize the pool header, set up the free list to
			 * contain just the second block, and return the first
			 * block.
853 854
			 */
			pool->szidx = size;
855
			size = INDEX2SIZE(size);
856
			bp = (block *)pool + POOL_OVERHEAD;
857 858
			pool->nextoffset = POOL_OVERHEAD + (size << 1);
			pool->maxnextoffset = POOL_SIZE - size;
859 860 861 862 863
			pool->freeblock = bp + size;
			*(block **)(pool->freeblock) = NULL;
			UNLOCK();
			return (void *)bp;
		}
864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886

		/* Carve off a new pool. */
		assert(usable_arenas->nfreepools > 0);
		assert(usable_arenas->freepools == NULL);
		pool = (poolp)usable_arenas->pool_address;
		assert((block*)pool <= (block*)usable_arenas->address +
		                       ARENA_SIZE - POOL_SIZE);
		pool->arenaindex = usable_arenas - arenas;
		assert(&arenas[pool->arenaindex] == usable_arenas);
		pool->szidx = DUMMY_SIZE_IDX;
		usable_arenas->pool_address += POOL_SIZE;
		--usable_arenas->nfreepools;

		if (usable_arenas->nfreepools == 0) {
			assert(usable_arenas->nextarena == NULL ||
			       usable_arenas->nextarena->prevarena ==
			       	   usable_arenas);
			/* Unlink the arena:  it is completely allocated. */
			usable_arenas = usable_arenas->nextarena;
			if (usable_arenas != NULL) {
				usable_arenas->prevarena = NULL;
				assert(usable_arenas->address != 0);
			}
887
		}
888 889

		goto init_pool;
890 891 892 893
	}

        /* The small block allocator ends here. */

Tim Peters's avatar
Tim Peters committed
894
redirect:
895
	/* Redirect the original request to the underlying (libc) allocator.
896 897 898 899
	 * We jump here on bigger requests, on error in the code above (as a
	 * last chance to serve the request) or when the max memory limit
	 * has been reached.
	 */
900 901 902
	if (nbytes == 0)
		nbytes = 1;
	return (void *)malloc(nbytes);
903 904 905 906
}

/* free */

907
#undef PyObject_Free
908
void
909
PyObject_Free(void *p)
910 911
{
	poolp pool;
912
	block *lastfree;
913 914 915 916 917 918
	poolp next, prev;
	uint size;

	if (p == NULL)	/* free(NULL) has no effect */
		return;

Tim Peters's avatar
Tim Peters committed
919
	pool = POOL_ADDR(p);
920
	if (Py_ADDRESS_IN_RANGE(p, pool)) {
Tim Peters's avatar
Tim Peters committed
921 922
		/* We allocated this address. */
		LOCK();
923
		/* Link p to the start of the pool's freeblock list.  Since
924 925 926 927
		 * the pool had at least the p block outstanding, the pool
		 * wasn't empty (so it's already in a usedpools[] list, or
		 * was full and is in no list -- it's not in the freeblocks
		 * list in any case).
Tim Peters's avatar
Tim Peters committed
928
		 */
929
		assert(pool->ref.count > 0);	/* else it was empty */
930 931 932
		*(block **)p = lastfree = pool->freeblock;
		pool->freeblock = (block *)p;
		if (lastfree) {
933 934 935 936
			struct arena_object* ao;
			uint nf;  /* ao->nfreepools */

			/* freeblock wasn't NULL, so the pool wasn't full,
937
			 * and the pool is in a usedpools[] list.
Tim Peters's avatar
Tim Peters committed
938
			 */
939 940 941 942 943
			if (--pool->ref.count != 0) {
				/* pool isn't empty:  leave it in usedpools */
				UNLOCK();
				return;
			}
944
			/* Pool is now empty:  unlink from usedpools, and
945
			 * link to the front of freepools.  This ensures that
946 947
			 * previously freed pools will be allocated later
			 * (being not referenced, they are perhaps paged out).
Tim Peters's avatar
Tim Peters committed
948
			 */
949 950 951 952
			next = pool->nextpool;
			prev = pool->prevpool;
			next->prevpool = prev;
			prev->nextpool = next;
953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047

			/* Link the pool to freepools.  This is a singly-linked
			 * list, and pool->prevpool isn't used there.
			 */
			ao = &arenas[pool->arenaindex];
			pool->nextpool = ao->freepools;
			ao->freepools = pool;
			nf = ++ao->nfreepools;

			/* All the rest is arena management.  We just freed
			 * a pool, and there are 4 cases for arena mgmt:
			 * 1. If all the pools are free, return the arena to
			 *    the system free().
			 * 2. If this is the only free pool in the arena,
			 *    add the arena back to the `usable_arenas` list.
			 * 3. If the "next" arena has a smaller count of free
			 *    pools, we have to "slide this arena right" to
			 *    restore that usable_arenas is sorted in order of
			 *    nfreepools.
			 * 4. Else there's nothing more to do.
			 */
			if (nf == ao->ntotalpools) {
				/* Case 1.  First unlink ao from usable_arenas.
				 */
				assert(ao->prevarena == NULL ||
				       ao->prevarena->address != 0);
				assert(ao ->nextarena == NULL ||
				       ao->nextarena->address != 0);

				/* Fix the pointer in the prevarena, or the
				 * usable_arenas pointer.
				 */
				if (ao->prevarena == NULL) {
					usable_arenas = ao->nextarena;
					assert(usable_arenas == NULL ||
					       usable_arenas->address != 0);
				}
				else {
					assert(ao->prevarena->nextarena == ao);
					ao->prevarena->nextarena =
						ao->nextarena;
				}
				/* Fix the pointer in the nextarena. */
				if (ao->nextarena != NULL) {
					assert(ao->nextarena->prevarena == ao);
					ao->nextarena->prevarena =
						ao->prevarena;
				}
				/* Record that this arena_object slot is
				 * available to be reused.
				 */
				ao->nextarena = unused_arena_objects;
				unused_arena_objects = ao;

				/* Free the entire arena. */
				free((void *)ao->address);
				ao->address = 0;	/* mark unassociated */
				--narenas_currently_allocated;

				UNLOCK();
				return;
			}
			if (nf == 1) {
				/* Case 2.  Put ao at the head of
				 * usable_arenas.  Note that because
				 * ao->nfreepools was 0 before, ao isn't
				 * currently on the usable_arenas list.
				 */
				ao->nextarena = usable_arenas;
				ao->prevarena = NULL;
				if (usable_arenas)
					usable_arenas->prevarena = ao;
				usable_arenas = ao;
				assert(usable_arenas->address != 0);

				UNLOCK();
				return;
			}
			/* If this arena is now out of order, we need to keep
			 * the list sorted.  The list is kept sorted so that
			 * the "most full" arenas are used first, which allows
			 * the nearly empty arenas to be completely freed.  In
			 * a few un-scientific tests, it seems like this
			 * approach allowed a lot more memory to be freed.
			 */
			if (ao->nextarena == NULL ||
				     nf <= ao->nextarena->nfreepools) {
				/* Case 4.  Nothing to do. */
				UNLOCK();
				return;
			}
			/* Case 3:  We have to move the arena towards the end
			 * of the list, because it has more free pools than
			 * the arena to its right.
			 * First unlink ao from usable_arenas.
1048
			 */
1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089
			if (ao->prevarena != NULL) {
				/* ao isn't at the head of the list */
				assert(ao->prevarena->nextarena == ao);
				ao->prevarena->nextarena = ao->nextarena;
			}
			else {
				/* ao is at the head of the list */
				assert(usable_arenas == ao);
				usable_arenas = ao->nextarena;
			}
			ao->nextarena->prevarena = ao->prevarena;

			/* Locate the new insertion point by iterating over
			 * the list, using our nextarena pointer.
			 */
			while (ao->nextarena != NULL &&
					nf > ao->nextarena->nfreepools) {
				ao->prevarena = ao->nextarena;
				ao->nextarena = ao->nextarena->nextarena;
			}

			/* Insert ao at this point. */
			assert(ao->nextarena == NULL ||
				ao->prevarena == ao->nextarena->prevarena);
			assert(ao->prevarena->nextarena == ao->nextarena);

			ao->prevarena->nextarena = ao;
			if (ao->nextarena != NULL)
				ao->nextarena->prevarena = ao;

			/* Verify that the swaps worked. */
			assert(ao->nextarena == NULL ||
				  nf <= ao->nextarena->nfreepools);
			assert(ao->prevarena == NULL ||
				  nf > ao->prevarena->nfreepools);
			assert(ao->nextarena == NULL ||
				ao->nextarena->prevarena == ao);
			assert((usable_arenas == ao &&
				ao->prevarena == NULL) ||
				ao->prevarena->nextarena == ao);

Tim Peters's avatar
Tim Peters committed
1090 1091 1092
			UNLOCK();
			return;
		}
1093
		/* Pool was full, so doesn't currently live in any list:
1094 1095 1096 1097
		 * link it to the front of the appropriate usedpools[] list.
		 * This mimics LRU pool usage for new allocations and
		 * targets optimal filling when several pools contain
		 * blocks of the same size class.
Tim Peters's avatar
Tim Peters committed
1098
		 */
1099 1100 1101 1102 1103 1104 1105 1106 1107 1108
		--pool->ref.count;
		assert(pool->ref.count > 0);	/* else the pool is empty */
		size = pool->szidx;
		next = usedpools[size + size];
		prev = next->prevpool;
		/* insert pool before next:   prev <-> pool <-> next */
		pool->nextpool = next;
		pool->prevpool = prev;
		next->prevpool = pool;
		prev->nextpool = pool;
1109 1110 1111
		UNLOCK();
		return;
	}
Tim Peters's avatar
Tim Peters committed
1112

1113
	/* We didn't allocate this address. */
1114
	free(p);
1115 1116
}

1117 1118 1119 1120
/* realloc.  If p is NULL, this acts like malloc(nbytes).  Else if nbytes==0,
 * then as the Python docs promise, we do not treat this like free(p), and
 * return a non-NULL result.
 */
1121

1122
#undef PyObject_Realloc
1123
void *
1124
PyObject_Realloc(void *p, size_t nbytes)
1125
{
1126
	void *bp;
1127
	poolp pool;
Martin v. Löwis's avatar
Martin v. Löwis committed
1128
	size_t size;
1129 1130

	if (p == NULL)
1131
		return PyObject_Malloc(nbytes);
1132

Tim Peters's avatar
Tim Peters committed
1133
	pool = POOL_ADDR(p);
1134
	if (Py_ADDRESS_IN_RANGE(p, pool)) {
1135
		/* We're in charge of this block */
1136
		size = INDEX2SIZE(pool->szidx);
1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152
		if (nbytes <= size) {
			/* The block is staying the same or shrinking.  If
			 * it's shrinking, there's a tradeoff:  it costs
			 * cycles to copy the block to a smaller size class,
			 * but it wastes memory not to copy it.  The
			 * compromise here is to copy on shrink only if at
			 * least 25% of size can be shaved off.
			 */
			if (4 * nbytes > 3 * size) {
				/* It's the same,
				 * or shrinking and new/old > 3/4.
				 */
				return p;
			}
			size = nbytes;
		}
1153
		bp = PyObject_Malloc(nbytes);
1154 1155
		if (bp != NULL) {
			memcpy(bp, p, size);
1156
			PyObject_Free(p);
1157
		}
1158 1159
		return bp;
	}
1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179
	/* We're not managing this block.  If nbytes <=
	 * SMALL_REQUEST_THRESHOLD, it's tempting to try to take over this
	 * block.  However, if we do, we need to copy the valid data from
	 * the C-managed block to one of our blocks, and there's no portable
	 * way to know how much of the memory space starting at p is valid.
	 * As bug 1185883 pointed out the hard way, it's possible that the
	 * C-managed block is "at the end" of allocated VM space, so that
	 * a memory fault can occur if we try to copy nbytes bytes starting
	 * at p.  Instead we punt:  let C continue to manage this block.
         */
	if (nbytes)
		return realloc(p, nbytes);
	/* C doesn't define the result of realloc(p, 0) (it may or may not
	 * return NULL then), but Python's docs promise that nbytes==0 never
	 * returns NULL.  We don't pass 0 to realloc(), to avoid that endcase
	 * to begin with.  Even then, we can't be sure that realloc() won't
	 * return NULL.
	 */
	bp = realloc(p, 1);
   	return bp ? bp : p;
1180 1181
}

1182
#else	/* ! WITH_PYMALLOC */
1183 1184

/*==========================================================================*/
1185 1186
/* pymalloc not enabled:  Redirect the entry points to malloc.  These will
 * only be used by extensions that are compiled with pymalloc enabled. */
1187

Tim Peters's avatar
Tim Peters committed
1188
void *
1189
PyObject_Malloc(size_t n)
1190 1191 1192 1193
{
	return PyMem_MALLOC(n);
}

Tim Peters's avatar
Tim Peters committed
1194
void *
1195
PyObject_Realloc(void *p, size_t n)
1196 1197 1198 1199 1200
{
	return PyMem_REALLOC(p, n);
}

void
1201
PyObject_Free(void *p)
1202 1203 1204 1205 1206
{
	PyMem_FREE(p);
}
#endif /* WITH_PYMALLOC */

1207 1208
#ifdef PYMALLOC_DEBUG
/*==========================================================================*/
1209 1210 1211
/* A x-platform debugging allocator.  This doesn't manage memory directly,
 * it wraps a real allocator, adding extra debugging info to the memory blocks.
 */
1212

1213 1214 1215 1216 1217 1218 1219 1220
/* Special bytes broadcast into debug memory blocks at appropriate times.
 * Strings of these are unlikely to be valid addresses, floats, ints or
 * 7-bit ASCII.
 */
#undef CLEANBYTE
#undef DEADBYTE
#undef FORBIDDENBYTE
#define CLEANBYTE      0xCB    /* clean (newly allocated) memory */
1221
#define DEADBYTE       0xDB    /* dead (newly freed) memory */
1222
#define FORBIDDENBYTE  0xFB    /* untouchable bytes at each end of a block */
1223

1224
static size_t serialno = 0;	/* incremented on each debug {m,re}alloc */
1225

1226
/* serialno is always incremented via calling this routine.  The point is
1227 1228
 * to supply a single place to set a breakpoint.
 */
1229
static void
1230
bumpserialno(void)
1231 1232 1233 1234
{
	++serialno;
}

1235
#define SST SIZEOF_SIZE_T
1236

1237 1238 1239
/* Read sizeof(size_t) bytes at p as a big-endian size_t. */
static size_t
read_size_t(const void *p)
1240
{
1241
	const uchar *q = (const uchar *)p;
1242 1243 1244 1245 1246 1247
	size_t result = *q++;
	int i;

	for (i = SST; --i > 0; ++q)
		result = (result << 8) | *q;
	return result;
1248 1249
}

1250 1251 1252
/* Write n as a big-endian size_t, MSB at address p, LSB at
 * p + sizeof(size_t) - 1.
 */
1253
static void
1254
write_size_t(void *p, size_t n)
1255
{
1256 1257 1258 1259 1260 1261 1262
	uchar *q = (uchar *)p + SST - 1;
	int i;

	for (i = SST; --i >= 0; --q) {
		*q = (uchar)(n & 0xff);
		n >>= 8;
	}
1263 1264
}

1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289
#ifdef Py_DEBUG
/* Is target in the list?  The list is traversed via the nextpool pointers.
 * The list may be NULL-terminated, or circular.  Return 1 if target is in
 * list, else 0.
 */
static int
pool_is_in_list(const poolp target, poolp list)
{
	poolp origlist = list;
	assert(target != NULL);
	if (list == NULL)
		return 0;
	do {
		if (target == list)
			return 1;
		list = list->nextpool;
	} while (list != NULL && list != origlist);
	return 0;
}

#else
#define pool_is_in_list(X, Y) 1

#endif	/* Py_DEBUG */

1290 1291
/* Let S = sizeof(size_t).  The debug malloc asks for 4*S extra bytes and
   fills them with useful stuff, here calling the underlying malloc's result p:
1292

1293 1294 1295 1296
p[0: S]
    Number of bytes originally asked for.  This is a size_t, big-endian (easier
    to read in a memory dump).
p[S: 2*S]
1297
    Copies of FORBIDDENBYTE.  Used to catch under- writes and reads.
1298
p[2*S: 2*S+n]
1299
    The requested memory, filled with copies of CLEANBYTE.
1300
    Used to catch reference to uninitialized memory.
1301
    &p[2*S] is returned.  Note that this is 8-byte aligned if pymalloc
1302
    handled the request itself.
1303
p[2*S+n: 2*S+n+S]
1304
    Copies of FORBIDDENBYTE.  Used to catch over- writes and reads.
1305
p[2*S+n+S: 2*S+n+2*S]
1306 1307
    A serial number, incremented by 1 on each call to _PyObject_DebugMalloc
    and _PyObject_DebugRealloc.
1308
    This is a big-endian size_t.
1309 1310 1311 1312 1313 1314
    If "bad memory" is detected later, the serial number gives an
    excellent way to set a breakpoint on the next run, to capture the
    instant at which this block was passed out.
*/

void *
1315
_PyObject_DebugMalloc(size_t nbytes)
1316 1317
{
	uchar *p;	/* base address of malloc'ed block */
1318 1319
	uchar *tail;	/* p + 2*SST + nbytes == pointer to tail pad bytes */
	size_t total;	/* nbytes + 4*SST */
1320

1321
	bumpserialno();
1322 1323 1324
	total = nbytes + 4*SST;
	if (total < nbytes)
		/* overflow:  can't represent total as a size_t */
1325 1326
		return NULL;

1327
	p = (uchar *)PyObject_Malloc(total);
1328 1329 1330
	if (p == NULL)
		return NULL;

1331 1332
	write_size_t(p, nbytes);
	memset(p + SST, FORBIDDENBYTE, SST);
1333 1334

	if (nbytes > 0)
1335
		memset(p + 2*SST, CLEANBYTE, nbytes);
1336

1337 1338 1339
	tail = p + 2*SST + nbytes;
	memset(tail, FORBIDDENBYTE, SST);
	write_size_t(tail + SST, serialno);
1340

1341
	return p + 2*SST;
1342 1343
}

1344
/* The debug free first checks the 2*SST bytes on each end for sanity (in
1345 1346
   particular, that the FORBIDDENBYTEs are still intact).
   Then fills the original bytes with DEADBYTE.
1347 1348 1349
   Then calls the underlying free.
*/
void
1350
_PyObject_DebugFree(void *p)
1351
{
1352
	uchar *q = (uchar *)p - 2*SST;  /* address returned from malloc */
1353 1354 1355 1356
	size_t nbytes;

	if (p == NULL)
		return;
1357
	_PyObject_DebugCheckAddress(p);
1358
	nbytes = read_size_t(q);
1359
	if (nbytes > 0)
1360
		memset(q, DEADBYTE, nbytes);
1361
	PyObject_Free(q);
1362 1363 1364
}

void *
1365
_PyObject_DebugRealloc(void *p, size_t nbytes)
1366 1367
{
	uchar *q = (uchar *)p;
1368
	uchar *tail;
1369
	size_t total;	/* nbytes + 4*SST */
1370
	size_t original_nbytes;
1371
	int i;
1372 1373

	if (p == NULL)
1374
		return _PyObject_DebugMalloc(nbytes);
1375

1376
	_PyObject_DebugCheckAddress(p);
1377
	bumpserialno();
1378 1379 1380 1381
	original_nbytes = read_size_t(q - 2*SST);
	total = nbytes + 4*SST;
	if (total < nbytes)
		/* overflow:  can't represent total as a size_t */
1382
		return NULL;
1383 1384

	if (nbytes < original_nbytes) {
1385 1386
		/* shrinking:  mark old extra memory dead */
		memset(q + nbytes, DEADBYTE, original_nbytes - nbytes);
1387 1388
	}

1389
	/* Resize and add decorations. */
1390
	q = (uchar *)PyObject_Realloc(q - 2*SST, total);
1391 1392 1393
	if (q == NULL)
		return NULL;

1394 1395 1396 1397
	write_size_t(q, nbytes);
	for (i = 0; i < SST; ++i)
		assert(q[SST + i] == FORBIDDENBYTE);
	q += 2*SST;
1398
	tail = q + nbytes;
1399 1400
	memset(tail, FORBIDDENBYTE, SST);
	write_size_t(tail + SST, serialno);
1401 1402 1403 1404 1405

	if (nbytes > original_nbytes) {
		/* growing:  mark new extra memory clean */
		memset(q + original_nbytes, CLEANBYTE,
			nbytes - original_nbytes);
1406
	}
1407 1408

	return q;
1409 1410
}

1411
/* Check the forbidden bytes on both ends of the memory allocated for p.
1412
 * If anything is wrong, print info to stderr via _PyObject_DebugDumpAddress,
1413 1414 1415
 * and call Py_FatalError to kill the program.
 */
 void
1416
_PyObject_DebugCheckAddress(const void *p)
1417 1418
{
	const uchar *q = (const uchar *)p;
1419
	char *msg;
1420
	size_t nbytes;
1421
	const uchar *tail;
1422
	int i;
1423

1424
	if (p == NULL) {
1425
		msg = "didn't expect a NULL pointer";
1426 1427
		goto error;
	}
1428

1429 1430 1431 1432
	/* Check the stuff at the start of p first:  if there's underwrite
	 * corruption, the number-of-bytes field may be nuts, and checking
	 * the tail could lead to a segfault then.
	 */
1433
	for (i = SST; i >= 1; --i) {
1434
		if (*(q-i) != FORBIDDENBYTE) {
1435 1436 1437 1438
			msg = "bad leading pad byte";
			goto error;
		}
	}
1439

1440
	nbytes = read_size_t(q - 2*SST);
1441
	tail = q + nbytes;
1442
	for (i = 0; i < SST; ++i) {
1443 1444 1445
		if (tail[i] != FORBIDDENBYTE) {
			msg = "bad trailing pad byte";
			goto error;
1446 1447 1448
		}
	}

1449 1450 1451
	return;

error:
1452
	_PyObject_DebugDumpAddress(p);
1453
	Py_FatalError(msg);
1454 1455
}

1456
/* Display info to stderr about the memory block at p. */
1457
void
1458
_PyObject_DebugDumpAddress(const void *p)
1459 1460 1461
{
	const uchar *q = (const uchar *)p;
	const uchar *tail;
1462
	size_t nbytes, serial;
1463
	int i;
1464
	int ok;
1465 1466 1467 1468 1469

	fprintf(stderr, "Debug memory block at address p=%p:\n", p);
	if (p == NULL)
		return;

1470 1471 1472
	nbytes = read_size_t(q - 2*SST);
	fprintf(stderr, "    %" PY_FORMAT_SIZE_T "u bytes originally "
	                "requested\n", nbytes);
1473

1474
	/* In case this is nuts, check the leading pad bytes first. */
1475 1476 1477 1478 1479 1480 1481
	fprintf(stderr, "    The %d pad bytes at p-%d are ", SST, SST);
	ok = 1;
	for (i = 1; i <= SST; ++i) {
		if (*(q-i) != FORBIDDENBYTE) {
			ok = 0;
			break;
		}
1482
	}
1483 1484
	if (ok)
		fputs("FORBIDDENBYTE, as expected.\n", stderr);
1485
	else {
1486 1487
		fprintf(stderr, "not all FORBIDDENBYTE (0x%02x):\n",
			FORBIDDENBYTE);
1488
		for (i = SST; i >= 1; --i) {
1489 1490
			const uchar byte = *(q-i);
			fprintf(stderr, "        at p-%d: 0x%02x", i, byte);
1491
			if (byte != FORBIDDENBYTE)
1492 1493 1494
				fputs(" *** OUCH", stderr);
			fputc('\n', stderr);
		}
1495 1496 1497 1498 1499

		fputs("    Because memory is corrupted at the start, the "
		      "count of bytes requested\n"
		      "       may be bogus, and checking the trailing pad "
		      "bytes may segfault.\n", stderr);
1500 1501 1502
	}

	tail = q + nbytes;
1503 1504 1505 1506 1507 1508 1509
	fprintf(stderr, "    The %d pad bytes at tail=%p are ", SST, tail);
	ok = 1;
	for (i = 0; i < SST; ++i) {
		if (tail[i] != FORBIDDENBYTE) {
			ok = 0;
			break;
		}
1510
	}
1511 1512
	if (ok)
		fputs("FORBIDDENBYTE, as expected.\n", stderr);
1513
	else {
1514 1515
		fprintf(stderr, "not all FORBIDDENBYTE (0x%02x):\n",
			FORBIDDENBYTE);
1516
		for (i = 0; i < SST; ++i) {
1517 1518 1519
			const uchar byte = tail[i];
			fprintf(stderr, "        at tail+%d: 0x%02x",
				i, byte);
1520
			if (byte != FORBIDDENBYTE)
1521 1522 1523 1524 1525
				fputs(" *** OUCH", stderr);
			fputc('\n', stderr);
		}
	}

1526 1527 1528
	serial = read_size_t(tail + SST);
	fprintf(stderr, "    The block was made by call #%" PY_FORMAT_SIZE_T
			"u to debug malloc/realloc.\n", serial);
1529 1530

	if (nbytes > 0) {
1531
		i = 0;
1532
		fputs("    Data at p:", stderr);
1533 1534 1535 1536 1537 1538 1539 1540 1541
		/* print up to 8 bytes at the start */
		while (q < tail && i < 8) {
			fprintf(stderr, " %02x", *q);
			++i;
			++q;
		}
		/* and up to 8 at the end */
		if (q < tail) {
			if (tail - q > 8) {
1542
				fputs(" ...", stderr);
1543 1544 1545 1546 1547 1548 1549
				q = tail - 8;
			}
			while (q < tail) {
				fprintf(stderr, " %02x", *q);
				++q;
			}
		}
1550
		fputc('\n', stderr);
1551 1552 1553
	}
}

1554 1555
static size_t
printone(const char* msg, size_t value)
1556
{
1557 1558
	int i, k;
	char buf[100];
1559
	size_t origvalue = value;
1560 1561

	fputs(msg, stderr);
1562
	for (i = (int)strlen(msg); i < 35; ++i)
1563
		fputc(' ', stderr);
1564 1565 1566 1567 1568 1569 1570 1571
	fputc('=', stderr);

	/* Write the value with commas. */
	i = 22;
	buf[i--] = '\0';
	buf[i--] = '\n';
	k = 3;
	do {
1572 1573
		size_t nextvalue = value / 10;
		uint digit = (uint)(value - nextvalue * 10);
1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587
		value = nextvalue;
		buf[i--] = (char)(digit + '0');
		--k;
		if (k == 0 && value && i >= 0) {
			k = 3;
			buf[i--] = ',';
		}
	} while (value && i >= 0);

	while (i >= 0)
		buf[i--] = ' ';
	fputs(buf, stderr);

	return origvalue;
1588 1589
}

1590 1591 1592 1593
/* Print summary info to stderr about the state of pymalloc's structures.
 * In Py_DEBUG mode, also perform some expensive internal consistency
 * checks.
 */
1594
void
1595
_PyObject_DebugMallocStats(void)
1596 1597 1598
{
	uint i;
	const uint numclasses = SMALL_REQUEST_THRESHOLD >> ALIGNMENT_SHIFT;
1599
	/* # of pools, allocated blocks, and free blocks per class index */
1600 1601 1602
	size_t numpools[SMALL_REQUEST_THRESHOLD >> ALIGNMENT_SHIFT];
	size_t numblocks[SMALL_REQUEST_THRESHOLD >> ALIGNMENT_SHIFT];
	size_t numfreeblocks[SMALL_REQUEST_THRESHOLD >> ALIGNMENT_SHIFT];
1603
	/* total # of allocated bytes in used and full pools */
1604
	size_t allocated_bytes = 0;
1605
	/* total # of available bytes in used pools */
1606
	size_t available_bytes = 0;
1607 1608 1609
	/* # of free pools + pools not yet carved out of current arena */
	uint numfreepools = 0;
	/* # of bytes for arena alignment padding */
1610
	size_t arena_alignment = 0;
1611
	/* # of bytes in used and full pools used for pool_headers */
1612
	size_t pool_header_bytes = 0;
1613 1614 1615 1616
	/* # of bytes in used and full pools wasted due to quantization,
	 * i.e. the necessarily leftover space at the ends of used and
	 * full pools.
	 */
1617
	size_t quantization = 0;
1618
	/* # of arenas actually allocated. */
1619
	size_t narenas = 0;
1620
	/* running total -- should equal narenas * ARENA_SIZE */
1621
	size_t total;
1622
	char buf[128];
1623 1624 1625 1626 1627 1628 1629

	fprintf(stderr, "Small block threshold = %d, in %u size classes.\n",
		SMALL_REQUEST_THRESHOLD, numclasses);

	for (i = 0; i < numclasses; ++i)
		numpools[i] = numblocks[i] = numfreeblocks[i] = 0;

1630 1631 1632
	/* Because full pools aren't linked to from anything, it's easiest
	 * to march over all the arenas.  If we're lucky, most of the memory
	 * will be living in full pools -- would be a shame to miss them.
1633
	 */
1634
	for (i = 0; i < maxarenas; ++i) {
1635 1636
		uint poolsinarena;
		uint j;
1637 1638 1639 1640 1641 1642 1643 1644 1645
		uptr base = arenas[i].address;

		/* Skip arenas which are not allocated. */
		if (arenas[i].address == (uptr)NULL)
			continue;
		narenas += 1;

		poolsinarena = arenas[i].ntotalpools;
		numfreepools += arenas[i].nfreepools;
1646 1647 1648

		/* round up to pool alignment */
		if (base & (uptr)POOL_SIZE_MASK) {
1649
			arena_alignment += POOL_SIZE;
1650 1651 1652 1653 1654
			base &= ~(uptr)POOL_SIZE_MASK;
			base += POOL_SIZE;
		}

		/* visit every pool in the arena */
1655 1656 1657 1658
		assert(base <= (uptr) arenas[i].pool_address);
		for (j = 0;
			    base < (uptr) arenas[i].pool_address;
			    ++j, base += POOL_SIZE) {
1659
			poolp p = (poolp)base;
1660 1661 1662
			const uint sz = p->szidx;
			uint freeblocks;

1663 1664
			if (p->ref.count == 0) {
				/* currently unused */
1665
				assert(pool_is_in_list(p, arenas[i].freepools));
1666 1667
				continue;
			}
1668 1669 1670 1671 1672 1673 1674 1675
			++numpools[sz];
			numblocks[sz] += p->ref.count;
			freeblocks = NUMBLOCKS(sz) - p->ref.count;
			numfreeblocks[sz] += freeblocks;
#ifdef Py_DEBUG
			if (freeblocks > 0)
				assert(pool_is_in_list(p, usedpools[sz + sz]));
#endif
1676 1677
		}
	}
1678
	assert(narenas == narenas_currently_allocated);
1679 1680

	fputc('\n', stderr);
1681 1682
	fputs("class   size   num pools   blocks in use  avail blocks\n"
	      "-----   ----   ---------   -------------  ------------\n",
1683 1684 1685
		stderr);

	for (i = 0; i < numclasses; ++i) {
1686 1687 1688
		size_t p = numpools[i];
		size_t b = numblocks[i];
		size_t f = numfreeblocks[i];
1689
		uint size = INDEX2SIZE(i);
1690 1691 1692 1693
		if (p == 0) {
			assert(b == 0 && f == 0);
			continue;
		}
1694 1695 1696 1697
		fprintf(stderr, "%5u %6u "
				"%11" PY_FORMAT_SIZE_T "u "
				"%15" PY_FORMAT_SIZE_T "u "
				"%13" PY_FORMAT_SIZE_T "u\n",
1698
			i, size, p, b, f);
1699 1700 1701 1702
		allocated_bytes += b * size;
		available_bytes += f * size;
		pool_header_bytes += p * POOL_OVERHEAD;
		quantization += p * ((POOL_SIZE - POOL_OVERHEAD) % size);
1703 1704
	}
	fputc('\n', stderr);
1705
	(void)printone("# times object malloc called", serialno);
1706

1707 1708 1709 1710 1711
	(void)printone("# arenas allocated total", ntimes_arena_allocated);
	(void)printone("# arenas reclaimed", ntimes_arena_allocated - narenas);
	(void)printone("# arenas highwater mark", narenas_highwater);
	(void)printone("# arenas allocated current", narenas);

1712
	PyOS_snprintf(buf, sizeof(buf),
1713 1714
		"%" PY_FORMAT_SIZE_T "u arenas * %d bytes/arena",
		narenas, ARENA_SIZE);
1715
	(void)printone(buf, narenas * ARENA_SIZE);
1716 1717 1718

	fputc('\n', stderr);

1719
	total = printone("# bytes in allocated blocks", allocated_bytes);
1720
	total += printone("# bytes in available blocks", available_bytes);
1721

1722 1723
	PyOS_snprintf(buf, sizeof(buf),
		"%u unused pools * %d bytes", numfreepools, POOL_SIZE);
1724
	total += printone(buf, (size_t)numfreepools * POOL_SIZE);
1725 1726 1727 1728 1729

	total += printone("# bytes lost to pool headers", pool_header_bytes);
	total += printone("# bytes lost to quantization", quantization);
	total += printone("# bytes lost to arena alignment", arena_alignment);
	(void)printone("Total", total);
1730 1731
}

1732
#endif	/* PYMALLOC_DEBUG */
1733 1734

#ifdef Py_USING_MEMORY_DEBUGGER
1735 1736 1737
/* Make this function last so gcc won't inline it since the definition is
 * after the reference.
 */
1738 1739 1740
int
Py_ADDRESS_IN_RANGE(void *P, poolp pool)
{
1741 1742 1743
	return pool->arenaindex < maxarenas &&
	       (uptr)P - arenas[pool->arenaindex].address < (uptr)ARENA_SIZE &&
	       arenas[pool->arenaindex].address != 0;
1744 1745
}
#endif