summaryrefslogtreecommitdiffstats
path: root/Objects/obmalloc.c
blob: 43ddc728992f77c044d67ecc7bf789f339937331 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
/* An object allocator for Python.

   Here is an introduction to the layers of the Python memory architecture,
   showing where the object allocator is actually used (layer +2), It is
   called for every object allocation and deallocation (PyObject_New/Del),
   unless the object-specific allocators implement a proprietary allocation
   scheme (ex.: ints use a simple free list). This is also the place where
   the cyclic garbage collector operates selectively on container objects.


        Object-specific allocators
    _____   ______   ______       ________
   [ int ] [ dict ] [ list ] ... [ string ]       Python core         |
+3 | <----- Object-specific memory -----> | <-- Non-object memory --> |
    _______________________________       |                           |
   [   Python's object allocator   ]      |                           |
+2 | ####### Object memory ####### | <------ Internal buffers ------> |
    ______________________________________________________________    |
   [          Python's raw memory allocator (PyMem_ API)          ]   |
+1 | <----- Python memory (under PyMem manager's control) ------> |   |
    __________________________________________________________________
   [    Underlying general-purpose allocator (ex: C library malloc)   ]
 0 | <------ Virtual memory allocated for the python process -------> |

   =========================================================================
    _______________________________________________________________________
   [                OS-specific Virtual Memory Manager (VMM)               ]
-1 | <--- Kernel dynamic storage allocation & management (page-based) ---> |
    __________________________________   __________________________________
   [                                  ] [                                  ]
-2 | <-- Physical memory: ROM/RAM --> | | <-- Secondary storage (swap) --> |

*/
/*==========================================================================*/

/* A fast, special-purpose memory allocator for small blocks, to be used
   on top of a general-purpose malloc -- heavily based on previous art. */

/* Vladimir Marangozov -- August 2000 */

/*
 * "Memory management is where the rubber meets the road -- if we do the wrong
 * thing at any level, the results will not be good. And if we don't make the
 * levels work well together, we are in serious trouble." (1)
 *
 * (1) Paul R. Wilson, Mark S. Johnstone, Michael Neely, and David Boles,
 *    "Dynamic Storage Allocation: A Survey and Critical Review",
 *    in Proc. 1995 Int'l. Workshop on Memory Management, September 1995.
 */

/* #undef WITH_MEMORY_LIMITS */		/* disable mem limit checks  */
#define WITH_MALLOC_HOOKS		/* for profiling & debugging */

/*==========================================================================*/

/*
 * Public functions exported by this allocator.
 *
 * -- Define and use these names in your code to obtain or release memory --
 */
#define _THIS_MALLOC		PyCore_OBJECT_MALLOC_FUNC
#define _THIS_CALLOC		/* unused */
#define _THIS_REALLOC		PyCore_OBJECT_REALLOC_FUNC
#define _THIS_FREE		PyCore_OBJECT_FREE_FUNC

/*
 * Underlying allocator's functions called by this allocator.
 * The underlying allocator is usually the one which comes with libc.
 *
 * -- Don't use these functions in your code (to avoid mixing allocators) --
 *
 * Redefine these __only__ if you are using a 3rd party general purpose
 * allocator which exports functions with names _other_ than the standard
 * malloc, calloc, realloc, free.
 */
#define _SYSTEM_MALLOC		PyCore_MALLOC_FUNC
#define _SYSTEM_CALLOC		/* unused */
#define _SYSTEM_REALLOC		PyCore_REALLOC_FUNC
#define _SYSTEM_FREE		PyCore_FREE_FUNC

/*
 * If malloc hooks are needed, names of the hooks' set & fetch
 * functions exported by this allocator.
 */
#ifdef WITH_MALLOC_HOOKS
#define _SET_HOOKS		_PyCore_ObjectMalloc_SetHooks
#define _FETCH_HOOKS		_PyCore_ObjectMalloc_FetchHooks
#endif

/*==========================================================================*/

/*
 * Allocation strategy abstract:
 *
 * For small requests, the allocator sub-allocates <Big> blocks of memory.
 * Requests greater than 256 bytes are routed to the system's allocator.
 *    
 * Small requests are grouped in size classes spaced 8 bytes apart, due
 * to the required valid alignment of the returned address. Requests of
 * a particular size are serviced from memory pools of 4K (one VMM page).
 * Pools are fragmented on demand and contain free lists of blocks of one
 * particular size class. In other words, there is a fixed-size allocator
 * for each size class. Free pools are shared by the different allocators
 * thus minimizing the space reserved for a particular size class.
 *
 * This allocation strategy is a variant of what is known as "simple
 * segregated storage based on array of free lists". The main drawback of
 * simple segregated storage is that we might end up with lot of reserved
 * memory for the different free lists, which degenerate in time. To avoid
 * this, we partition each free list in pools and we share dynamically the
 * reserved space between all free lists. This technique is quite efficient
 * for memory intensive programs which allocate mainly small-sized blocks.
 *
 * For small requests we have the following table:
 *
 * Request in bytes	Size of allocated block      Size class idx
 * ----------------------------------------------------------------
 *        1-8                     8                       0
 *	  9-16                   16                       1
 *	 17-24                   24                       2
 *	 25-32                   32                       3
 *	 33-40                   40                       4
 *	 41-48                   48                       5
 *	 49-56                   56                       6
 *	 57-64                   64                       7
 *	 65-72                   72                       8
 *	  ...                   ...                     ...
 *	241-248                 248                      30
 *	249-256                 256                      31
 *	
 *	0, 257 and up: routed to the underlying allocator.
 */

/*==========================================================================*/

/*
 * -- Main tunable settings section --
 */

/*
 * Alignment of addresses returned to the user. 8-bytes alignment works
 * on most current architectures (with 32-bit or 64-bit address busses).
 * The alignment value is also used for grouping small requests in size
 * classes spaced ALIGNMENT bytes apart.
 *
 * You shouldn't change this unless you know what you are doing.
 */

#define ALIGNMENT		8		/* must be 2^N */
#define ALIGNMENT_SHIFT		3
#define ALIGNMENT_MASK		(ALIGNMENT - 1)

/*
 * Max size threshold below which malloc requests are considered to be
 * small enough in order to use preallocated memory pools. You can tune
 * this value according to your application behaviour and memory needs.
 *
 * The following invariants must hold:
 *	1) ALIGNMENT <= SMALL_REQUEST_THRESHOLD <= 256
 *	2) SMALL_REQUEST_THRESHOLD == N * ALIGNMENT
 *
 * Although not required, for better performance and space efficiency,
 * it is recommended that SMALL_REQUEST_THRESHOLD is set to a power of 2.
 */

/*
 * For Python compiled on systems with 32 bit pointers and integers,
 * a value of 64 (= 8 * 8) is a reasonable speed/space tradeoff for
 * the object allocator. To adjust automatically this threshold for
 * systems with 64 bit pointers, we make this setting depend on a
 * Python-specific slot size unit = sizeof(long) + sizeof(void *),
 * which is expected to be 8, 12 or 16 bytes.
 */

#define _PYOBJECT_THRESHOLD	((SIZEOF_LONG + SIZEOF_VOID_P) * ALIGNMENT)

#define SMALL_REQUEST_THRESHOLD	_PYOBJECT_THRESHOLD /* must be N * ALIGNMENT */

#define NB_SMALL_SIZE_CLASSES	(SMALL_REQUEST_THRESHOLD / ALIGNMENT)

/*
 * The system's VMM page size can be obtained on most unices with a
 * getpagesize() call or deduced from various header files. To make
 * things simpler, we assume that it is 4K, which is OK for most systems.
 * It is probably better if this is the native page size, but it doesn't
 * have to be.
 */

#define SYSTEM_PAGE_SIZE	(4 * 1024)
#define SYSTEM_PAGE_SIZE_MASK	(SYSTEM_PAGE_SIZE - 1)

/*
 * Maximum amount of memory managed by the allocator for small requests.
 */

#ifdef WITH_MEMORY_LIMITS
#ifndef SMALL_MEMORY_LIMIT
#define SMALL_MEMORY_LIMIT	(64 * 1024 * 1024)	/* 64 MB -- more? */
#endif
#endif

/*
 * The allocator sub-allocates <Big> blocks of memory (called arenas) aligned
 * on a page boundary. This is a reserved virtual address space for the
 * current process (obtained through a malloc call). In no way this means
 * that the memory arenas will be used entirely. A malloc(<Big>) is usually
 * an address range reservation for <Big> bytes, unless all pages within this
 * space are referenced subsequently. So malloc'ing big blocks and not using
 * them does not mean "wasting memory". It's an addressable range wastage...
 *
 * Therefore, allocating arenas with malloc is not optimal, because there is
 * some address space wastage, but this is the most portable way to request
 * memory from the system accross various platforms.
 */

#define ARENA_SIZE		(256 * 1024 - SYSTEM_PAGE_SIZE)	/* 256k - 1p */

#ifdef WITH_MEMORY_LIMITS
#define MAX_ARENAS		(SMALL_MEMORY_LIMIT / ARENA_SIZE)
#endif

/*
 * Size of the pools used for small blocks. Should be a power of 2,
 * between 1K and SYSTEM_PAGE_SIZE, that is: 1k, 2k, 4k, eventually 8k.
 */

#define POOL_SIZE		SYSTEM_PAGE_SIZE	/* must be 2^N */
#define POOL_SIZE_MASK		SYSTEM_PAGE_SIZE_MASK
#define POOL_MAGIC		0x74D3A651		/* authentication id */

#define ARENA_NB_POOLS		(ARENA_SIZE / POOL_SIZE)
#define ARENA_NB_PAGES		(ARENA_SIZE / SYSTEM_PAGE_SIZE)

/*
 * -- End of tunable settings section --
 */

/*==========================================================================*/

/*
 * Locking
 *
 * To reduce lock contention, it would probably be better to refine the
 * crude function locking with per size class locking. I'm not positive
 * however, whether it's worth switching to such locking policy because
 * of the performance penalty it might introduce.
 *
 * The following macros describe the simplest (should also be the fastest)
 * lock object on a particular platform and the init/fini/lock/unlock
 * operations on it. The locks defined here are not expected to be recursive
 * because it is assumed that they will always be called in the order:
 * INIT, [LOCK, UNLOCK]*, FINI.
 */

/*
 * Python's threads are serialized, so object malloc locking is disabled.
 */
#define SIMPLELOCK_DECL(lock)	/* simple lock declaration		*/
#define SIMPLELOCK_INIT(lock)	/* allocate (if needed) and initialize	*/
#define SIMPLELOCK_FINI(lock)	/* free/destroy an existing lock 	*/
#define SIMPLELOCK_LOCK(lock)	/* acquire released lock */
#define SIMPLELOCK_UNLOCK(lock)	/* release acquired lock */

/*
 * Basic types
 * I don't care if these are defined in <sys/types.h> or elsewhere. Axiom.
 */

#undef  uchar
#define uchar			unsigned char	/* assuming == 8 bits  */

#undef  ushort
#define ushort			unsigned short	/* assuming >= 16 bits */

#undef  uint
#define uint			unsigned int	/* assuming >= 16 bits */

#undef  ulong
#define ulong			unsigned long	/* assuming >= 32 bits */

#undef  off_t
#define off_t 			uint	/* 16 bits <= off_t <= 64 bits */

/* When you say memory, my mind reasons in terms of (pointers to) blocks */
typedef uchar block;

/* Pool for small blocks */
struct pool_header {
	union { block *_padding;
		uint count; } ref;	/* number of allocated blocks    */
	block *freeblock;		/* pool's free list head         */
	struct pool_header *nextpool;	/* next pool of this size class  */
	struct pool_header *prevpool;	/* previous pool       ""        */
	struct pool_header *pooladdr;	/* pool address (always aligned) */
	uint magic;			/* pool magic number		 */
	uint szidx;			/* block size class index	 */
	uint capacity;			/* pool capacity in # of blocks  */
};

typedef struct pool_header *poolp;

#undef  ROUNDUP
#define ROUNDUP(x)		(((x) + ALIGNMENT_MASK) & ~ALIGNMENT_MASK)
#define POOL_OVERHEAD		ROUNDUP(sizeof(struct pool_header))

#define DUMMY_SIZE_IDX		0xffff	/* size class of newly cached pools */

/*==========================================================================*/

/*
 * This malloc lock
 */
SIMPLELOCK_DECL(_malloc_lock);
#define LOCK()		SIMPLELOCK_LOCK(_malloc_lock)
#define UNLOCK()	SIMPLELOCK_UNLOCK(_malloc_lock)
#define LOCK_INIT()	SIMPLELOCK_INIT(_malloc_lock)
#define LOCK_FINI()	SIMPLELOCK_FINI(_malloc_lock)

/*
 * Pool table -- doubly linked lists of partially used pools
 */
#define PTA(x)	((poolp )((uchar *)&(usedpools[2*(x)]) - 2*sizeof(block *)))
#define PT(x)	PTA(x), PTA(x)

static poolp usedpools[2 * ((NB_SMALL_SIZE_CLASSES + 7) / 8) * 8] = {
	PT(0), PT(1), PT(2), PT(3), PT(4), PT(5), PT(6), PT(7)
#if NB_SMALL_SIZE_CLASSES > 8
	, PT(8), PT(9), PT(10), PT(11), PT(12), PT(13), PT(14), PT(15)
#if NB_SMALL_SIZE_CLASSES > 16
	, PT(16), PT(17), PT(18), PT(19), PT(20), PT(21), PT(22), PT(23)
#if NB_SMALL_SIZE_CLASSES > 24
	, PT(24), PT(25), PT(26), PT(27), PT(28), PT(29), PT(30), PT(31)
#if NB_SMALL_SIZE_CLASSES > 32
	, PT(32), PT(33), PT(34), PT(35), PT(36), PT(37), PT(38), PT(39)
#if NB_SMALL_SIZE_CLASSES > 40
	, PT(40), PT(41), PT(42), PT(43), PT(44), PT(45), PT(46), PT(47)
#if NB_SMALL_SIZE_CLASSES > 48
	, PT(48), PT(49), PT(50), PT(51), PT(52), PT(53), PT(54), PT(55)
#if NB_SMALL_SIZE_CLASSES > 56
	, PT(56), PT(57), PT(58), PT(59), PT(60), PT(61), PT(62), PT(63)
#endif /* NB_SMALL_SIZE_CLASSES > 56 */
#endif /* NB_SMALL_SIZE_CLASSES > 48 */
#endif /* NB_SMALL_SIZE_CLASSES > 40 */
#endif /* NB_SMALL_SIZE_CLASSES > 32 */
#endif /* NB_SMALL_SIZE_CLASSES > 24 */
#endif /* NB_SMALL_SIZE_CLASSES > 16 */
#endif /* NB_SMALL_SIZE_CLASSES >  8 */
};

/*
 * Free (cached) pools
 */
static poolp freepools = NULL;		/* free list for cached pools */

/*
 * Arenas
 */
static uint arenacnt = 0;		/* number of allocated arenas */
static uint watermark = ARENA_NB_POOLS;	/* number of pools allocated from
					   the current arena */
static block *arenalist = NULL;		/* list of allocated arenas */
static block *arenabase = NULL;		/* free space start address in
					   current arena */

/*
 * Hooks
 */
#ifdef WITH_MALLOC_HOOKS
static void *(*malloc_hook)(size_t) = NULL;
static void *(*calloc_hook)(size_t, size_t) = NULL;
static void *(*realloc_hook)(void *, size_t) = NULL;
static void (*free_hook)(void *) = NULL;
#endif /* !WITH_MALLOC_HOOKS */

/*==========================================================================*/

/* malloc */

/*
 * The basic blocks are ordered by decreasing execution frequency,
 * which minimizes the number of jumps in the most common cases,
 * improves branching prediction and instruction scheduling (small
 * block allocations typically result in a couple of instructions).
 * Unless the optimizer reorders everything, being too smart...
 */

void *
_THIS_MALLOC(size_t nbytes)
{
	block *bp;
	poolp pool;
	poolp next;
	uint size;

#ifdef WITH_MALLOC_HOOKS	
	if (malloc_hook != NULL)
		return (*malloc_hook)(nbytes);
#endif

	/*
	 * This implicitly redirects malloc(0)
	 */
	if ((nbytes - 1) < SMALL_REQUEST_THRESHOLD) {
		LOCK();
		/*
		 * Most frequent paths first
		 */
		size = (uint )(nbytes - 1) >> ALIGNMENT_SHIFT;
		pool = usedpools[size + size];
		if (pool != pool->nextpool) {
			/*
			 * There is a used pool for this size class.
			 * Pick up the head block of its free list.
			 */
			++pool->ref.count;
			bp = pool->freeblock;
			if ((pool->freeblock = *(block **)bp) != NULL) {
				UNLOCK();
				return (void *)bp;
			}
			/*
			 * Reached the end of the free list, try to extend it
			 */
			if (pool->ref.count < pool->capacity) {
				/*
				 * There is room for another block
				 */
				size++;
				size <<= ALIGNMENT_SHIFT; /* block size */
				pool->freeblock = (block *)pool + \
						  POOL_OVERHEAD + \
						  pool->ref.count * size;
				*(block **)(pool->freeblock) = NULL;
				UNLOCK();
				return (void *)bp;
			}
			/*
			 * Pool is full, unlink from used pools
			 */
			next = pool->nextpool;
			pool = pool->prevpool;
			next->prevpool = pool;
			pool->nextpool = next;
			UNLOCK();
			return (void *)bp;
		}
		/*
		 * Try to get a cached free pool
		 */
		pool = freepools;
		if (pool != NULL) {
			/*
			 * Unlink from cached pools
			 */
			freepools = pool->nextpool;
		init_pool:
			/*
			 * Frontlink to used pools
			 */
			next = usedpools[size + size]; /* == prev */
			pool->nextpool = next;
			pool->prevpool = next;
			next->nextpool = pool;
			next->prevpool = pool;
			pool->ref.count = 1;
			if (pool->szidx == size) {
				/*
				 * Luckily, this pool last contained blocks
				 * of the same size class, so its header
				 * and free list are already initialized.
				 */
				bp = pool->freeblock;
				pool->freeblock = *(block **)bp;
				UNLOCK();
				return (void *)bp;
			}
			/*
			 * Initialize the pool header and free list
			 * then return the first block.
			 */
			pool->szidx = size;
			size++;
			size <<= ALIGNMENT_SHIFT; /* block size */
			bp = (block *)pool + POOL_OVERHEAD;
			pool->freeblock = bp + size;
			*(block **)(pool->freeblock) = NULL;
			pool->capacity = (POOL_SIZE - POOL_OVERHEAD) / size;
			UNLOCK();
			return (void *)bp;
		}
                /*
                 * Allocate new pool
                 */
		if (watermark < ARENA_NB_POOLS) {
			/* commit malloc(POOL_SIZE) from the current arena */
		commit_pool:
			watermark++;
			pool = (poolp )arenabase;
			arenabase += POOL_SIZE;
			pool->pooladdr = pool;
			pool->magic = (uint )POOL_MAGIC;
			pool->szidx = DUMMY_SIZE_IDX;
			goto init_pool;
		}
                /*
                 * Allocate new arena
                 */
#ifdef WITH_MEMORY_LIMITS
		if (!(arenacnt < MAX_ARENAS)) {
			UNLOCK();
			goto redirect;
		}
#endif
		/*
		 * With malloc, we can't avoid loosing one page address space
		 * per arena due to the required alignment on page boundaries.
		 */
		bp = (block *)_SYSTEM_MALLOC(ARENA_SIZE + SYSTEM_PAGE_SIZE);
		if (bp == NULL) {
			UNLOCK();
			goto redirect;
		}
		/* 
		 * Keep a reference in the list of allocated arenas. We might
		 * want to release (some of) them in the future. The first
		 * word is never used, no matter whether the returned address
		 * is page-aligned or not, so we safely store a pointer in it.
		 */
		*(block **)bp = arenalist;
		arenalist = bp;
		arenacnt++;
		watermark = 0;
		/* Page-round up */
		arenabase = bp + (SYSTEM_PAGE_SIZE -
				  ((off_t )bp & SYSTEM_PAGE_SIZE_MASK));
		goto commit_pool;
	}

        /* The small block allocator ends here. */

	redirect:
	
	/*
	 * Redirect the original request to the underlying (libc) allocator.
	 * We jump here on bigger requests, on error in the code above (as a
	 * last chance to serve the request) or when the max memory limit
	 * has been reached.
	 */
	return (void *)_SYSTEM_MALLOC(nbytes);
}

/* free */

void
_THIS_FREE(void *p)
{
	poolp pool;
	poolp next, prev;
	uint size;
	off_t offset;

#ifdef WITH_MALLOC_HOOKS
	if (free_hook != NULL) {
		(*free_hook)(p);
		return;
	}
#endif

	if (p == NULL)	/* free(NULL) has no effect */
		return;

	offset = (off_t )p & POOL_SIZE_MASK;
	pool = (poolp )((block *)p - offset);
	if (pool->pooladdr != pool || pool->magic != (uint )POOL_MAGIC) {
		_SYSTEM_FREE(p);
		return;
	}

	LOCK();
	/*
	 * At this point, the pool is not empty
	 */
	if ((*(block **)p = pool->freeblock) == NULL) {
		/*
		 * Pool was full
		 */
		pool->freeblock = (block *)p;
		--pool->ref.count;
		/*
		 * Frontlink to used pools
		 * This mimics LRU pool usage for new allocations and
		 * targets optimal filling when several pools contain
		 * blocks of the same size class.
		 */
		size = pool->szidx;
		next = usedpools[size + size];
		prev = next->prevpool;
		pool->nextpool = next;
		pool->prevpool = prev;
		next->prevpool = pool;
		prev->nextpool = pool;
		UNLOCK();
		return;
	}
	/*
	 * Pool was not full
	 */
	pool->freeblock = (block *)p;
	if (--pool->ref.count != 0) {
		UNLOCK();
		return;
	}
	/*
	 * Pool is now empty, unlink from used pools
	 */
	next = pool->nextpool;
	prev = pool->prevpool;
	next->prevpool = prev;
	prev->nextpool = next;
	/*
	 * Frontlink to free pools
	 * This ensures that previously freed pools will be allocated
	 * later (being not referenced, they are perhaps paged out).
	 */
	pool->nextpool = freepools;
	freepools = pool;
	UNLOCK();
	return;
}

/* realloc */

void *
_THIS_REALLOC(void *p, size_t nbytes)
{
	block *bp;
	poolp pool;
	uint size;

#ifdef WITH_MALLOC_HOOKS
	if (realloc_hook != NULL)
		return (*realloc_hook)(p, nbytes);
#endif

	if (p == NULL)
		return _THIS_MALLOC(nbytes);

	/* realloc(p, 0) on big blocks is redirected. */
	pool = (poolp )((block *)p - ((off_t )p & POOL_SIZE_MASK));
	if (pool->pooladdr != pool || pool->magic != (uint )POOL_MAGIC) {
		/* We haven't allocated this block */
		if (!(nbytes > SMALL_REQUEST_THRESHOLD) && nbytes) {
			/* small request */
			size = nbytes;
			goto malloc_copy_free;
		}
		bp = (block *)_SYSTEM_REALLOC(p, nbytes);
	}
	else {
		/* We're in charge of this block */
		size = (pool->szidx + 1) << ALIGNMENT_SHIFT; /* block size */
		if (size >= nbytes) {
			/* Don't bother if a smaller size was requested
			   except for realloc(p, 0) == free(p), ret NULL */
			if (nbytes == 0) {
				_THIS_FREE(p);
				bp = NULL;
			}
			else
				bp = (block *)p;
		}
		else {

		malloc_copy_free:

			bp = (block *)_THIS_MALLOC(nbytes);
			if (bp != NULL) {
				memcpy(bp, p, size);
				_THIS_FREE(p);
			}
		}
	}
	return (void *)bp;
}

/* calloc */

/* -- unused --
void *
_THIS_CALLOC(size_t nbel, size_t elsz)
{
        void *p;
	size_t nbytes;

#ifdef WITH_MALLOC_HOOKS
	if (calloc_hook != NULL)
		return (*calloc_hook)(nbel, elsz);
#endif

	nbytes = nbel * elsz;
	p = _THIS_MALLOC(nbytes);
	if (p != NULL)
		memset(p, 0, nbytes);
	return p;
}
*/

/*==========================================================================*/

/*
 * Hooks
 */

#ifdef WITH_MALLOC_HOOKS

void
_SET_HOOKS( void *(*malloc_func)(size_t),
	    void *(*calloc_func)(size_t, size_t),
	    void *(*realloc_func)(void *, size_t),
	    void (*free_func)(void *) )
{
	LOCK();
	malloc_hook = malloc_func;
	calloc_hook = calloc_func;
	realloc_hook = realloc_func;
	free_hook = free_func;
	UNLOCK();
}

void
_FETCH_HOOKS( void *(**malloc_funcp)(size_t),
	      void *(**calloc_funcp)(size_t, size_t),
              void *(**realloc_funcp)(void *, size_t),
              void (**free_funcp)(void *) )
{
	LOCK();
	*malloc_funcp = malloc_hook;
	*calloc_funcp = calloc_hook;
	*realloc_funcp = realloc_hook;
	*free_funcp = free_hook;
	UNLOCK();
}
#endif /* !WITH_MALLOC_HOOKS */