summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authormig <mig>2013-01-03 04:14:21 (GMT)
committermig <mig>2013-01-03 04:14:21 (GMT)
commit3d5e70d5dc85e8fd518ed7acc4f2b3060020ca6f (patch)
tree9d8b2f63dffe12a21a9ff57dad59251a30109690
parenta891f1e0e8c912d7336d290cba7edf364b0af03a (diff)
downloadtcl-3d5e70d5dc85e8fd518ed7acc4f2b3060020ca6f.zip
tcl-3d5e70d5dc85e8fd518ed7acc4f2b3060020ca6f.tar.gz
tcl-3d5e70d5dc85e8fd518ed7acc4f2b3060020ca6f.tar.bz2
*BROKEN* First steps towards choosing allocators at load time
-rw-r--r--generic/tclAlloc.c1015
-rw-r--r--generic/tclAlloc.h64
-rw-r--r--generic/tclAllocNative.c52
-rw-r--r--generic/tclAllocZippy.c852
-rw-r--r--generic/tclBasic.c1
-rw-r--r--generic/tclExecute.c1
-rw-r--r--generic/tclInt.h72
-rw-r--r--generic/tclListObj.c8
-rw-r--r--generic/tclObj.c2
-rw-r--r--unix/Makefile.in9
10 files changed, 1059 insertions, 1017 deletions
diff --git a/generic/tclAlloc.c b/generic/tclAlloc.c
index 0ce34a5..6249953 100644
--- a/generic/tclAlloc.c
+++ b/generic/tclAlloc.c
@@ -1,207 +1,30 @@
/*
* tclAlloc.c --
*
- * This is a very flexible storage allocator for Tcl, for use with or
- * without threads. Depending on the value of TCL_ALLOCATOR it compiles
- * as:
+ * This is the generic part of the Tcl allocator. It handles the
+ * freeObjLists and defines which main allocator will be used.
*
- * (1) aPURIFY
- * A purify build, use the native malloc for all allocs including
- * Tcl_Objs
- *
- * (2) aNATIVE
- * Use the native malloc and a per-thread Tcl_Obj pool, with
- * inter-thread recycling of objects.
- * TODO: in this case build ZIPPY as a preloadable malloc-replacement?
- *
- * (3) aZIPPY
- * use the ex-tclThreadAlloc, essentially aolserver's fast threaded
- * allocator. Mods with respect to the original:
- * - split blocks in the shared pool before mallocing again for
- * improved cache usage
- * - stats and Tcl_GetMemoryInfo disabled per default, enable with
- * -DZIPPY_STATS
- * - adapt for unthreaded usage as replacement of the ex tclAlloc
- * - -DHAVE_FAST_TSD: use fast TSD via __thread where available
- * - (TODO!) build zippy as a pre-loadable library to use with a
- * native build as a malloc replacement. Difficulty is to make it
- * portable (easy enough on modern elf/unix, to be researched on
- * win and mac . This would be the best option, instead of
- * MULTI. It could be built in two versions (perf, debug/stats)
- *
- * (4) aMULTI
- * all of the above, selectable at startup with an env var. This
- * build will be slightly slower than the specific builds above.
- *
- * All variants can be built for both threaded and unthreaded Tcl.
- *
- * The Initial Developer of the Original Code is America Online, Inc.
- * Portions created by AOL are Copyright (C) 1999 America Online, Inc.
- *
- * Copyright (c) 2008-2011 by Miguel Sofer. All rights reserved.
+ * Copyright (c) 2013 by Miguel Sofer. All rights reserved.
*
* See the file "license.terms" for information on usage and redistribution of
* this file, and for a DISCLAIMER OF ALL WARRANTIES.
*/
#include "tclInt.h"
+#include "tclAlloc.h"
-#undef TclpAlloc
-#undef TclpRealloc
-#undef TclpFree
-#undef TclSmallAlloc
-#undef TclSmallFree
-
-#if !USE_ZIPPY
/*
- * Not much of this file is needed, most things are dealt with in the
- * macros. Just shunt the allocators for use by the library, the core
- * never calls this.
- *
- * This is all that is needed for a TCL_ALLOC_PURIFY build, a native build
- * needs the Tcl_Obj pools too.
+ * Parameters for the per-thread Tcl_Obj cache:
+ * - if >NOBJHIGH free objects, move some to the shared cache
+ * - if no objects are available, create NOBJALLOC of them
*/
-
-char *
-TclpAlloc(
- unsigned int reqSize)
-{
- return malloc(reqSize);
-}
-
-char *
-TclpRealloc(
- char *ptr,
- unsigned int reqSize)
-{
- return realloc(ptr, reqSize);
-}
-void
-TclpFree(
- char *ptr)
-{
- free(ptr);
-}
-
-#endif /* !USE_ZIPPY, this is end of code for aPURIFY */
-
-
-#if USE_OBJQ
-
-/*
- * Parameters for the per-thread Tcl_Obj cache
- * Actual definition of NOBJHIGH moved to tclInt.h to be used in macros
- */
-
-#define NOBJHIGH ALLOC_NOBJHIGH
+#define NOBJHIGH 1200
#define NOBJALLOC ((NOBJHIGH*2)/3)
/*
- * Advance some defs that are relevant for ZIPPY an MULTI, in order to use
- * them in macro and struct definitions
- */
-
-#if USE_ZIPPY
-/*
- * This macro is used to properly align the memory allocated by Tcl, giving
- * the same alignment as the native malloc.
- */
-
-#if defined(__APPLE__)
-#define TCL_ALLOCALIGN 16
-#else
-#define TCL_ALLOCALIGN (2*sizeof(void *))
-#endif
-
-#define ALIGN(x) (((x) + TCL_ALLOCALIGN - 1) & ~(TCL_ALLOCALIGN - 1))
-
-/*
- * The following struct stores accounting information for each block including
- * two small magic numbers and a bucket number when in use or a next pointer
- * when free. The original requested size (not including the Block overhead)
- * is also maintained.
- */
-
-typedef struct Block {
- union {
- struct Block *next; /* Next in free list. */
- struct {
- unsigned char magic1; /* First magic number. */
- unsigned char bucket; /* Bucket block allocated from. */
- unsigned char inUse; /* Block memory currently in use, see
- * details in TclpAlloc/Realloc. */
- unsigned char magic2; /* Second magic number. */
- } s;
- } u;
-} Block;
-
-#define OFFSET ALIGN(sizeof(Block))
-
-#define nextBlock u.next
-#define sourceBucket u.s.bucket
-#define magicNum1 u.s.magic1
-#define magicNum2 u.s.magic2
-#define used u.s.inUse
-#define MAGIC 0xEF
-
-/*
- * The following defines the minimum and maximum block sizes and the number
- * of buckets in the bucket cache.
- * 32b 64b Apple-32b(?)
- * TCL_ALLOCALIGN 8 16 16
- * sizeof(Block) 8 16 16
- * OFFSET 8 16 16
- * MINALLOC 16 32 32
- * NBUCKETS 11 10 10
- * MAXALLOC 16384 16384 16384
- * small allocs 1024 512 1024
- * at a time
- */
-
-#define MINALLOC ALIGN(OFFSET+8)
-#define NBUCKETS (11 - (MINALLOC >> 5))
-#define MAXALLOC (MINALLOC << (NBUCKETS - 1))
-
-/*
- * The following structure defines a bucket of blocks, optionally with various
- * accounting and statistics information.
- */
-
-typedef struct Bucket {
- Block *firstPtr; /* First block available */
- long numFree; /* Number of blocks available */
-#ifdef ZIPPY_STATS
- /* All fields below for accounting only */
-
- long numRemoves; /* Number of removes from bucket */
- long numInserts; /* Number of inserts into bucket */
- long numWaits; /* Number of waits to acquire a lock */
- long numLocks; /* Number of locks acquired */
-#endif
-} Bucket;
-
-/*
- * The following array specifies various per-bucket limits and locks. The
- * values are statically initialized to avoid calculating them repeatedly.
- */
-
-static struct {
- size_t blockSize; /* Bucket blocksize. */
- int shift;
-#if defined(TCL_THREADS)
- int maxBlocks; /* Max blocks before move to share. */
- int numMove; /* Num blocks to move to share. */
- Tcl_Mutex *lockPtr; /* Share bucket lock. */
-#endif
-} bucketInfo[NBUCKETS];
-
-#endif /* Advanced USE_ZIPPY definitions, back to common code */
-
-
-/*
- * The Tcl_Obj per-thread cache, used by aNATIVE, aZIPPY and aMULTI.
+ * The Tcl_Obj per-thread cache.
*/
typedef struct Cache {
@@ -210,12 +33,7 @@ typedef struct Cache {
#if defined(TCL_THREADS)
struct Cache *nextPtr; /* Linked list of cache entries */
#endif
-#if USE_ZIPPY
-#if defined(TCL_THREADS)
- Tcl_ThreadId owner; /* Which thread's cache is this? */
-#endif
- Bucket buckets[NBUCKETS]; /* The buckets for this thread */
-#endif /* USE_ZIPPY, ie TCL_ALLOCATOR != aNATIVE */
+ void *allocCachePtr;
} Cache;
static Cache sharedCache;
@@ -255,28 +73,6 @@ static __thread Cache *tcachePtr;
(cachePtr) = (&sharedCache)
#endif /* THREADS */
-#if USE_ZIPPY
-static void InitBucketInfo(void);
-static inline char * Block2Ptr(Block *blockPtr,
- int bucket, unsigned int reqSize);
-static inline Block * Ptr2Block(char *ptr);
-
-static int GetBlocks(Cache *cachePtr, int bucket);
-
-#if (TCL_THREADS)
-static void PutBlocks(Cache *cachePtr, int bucket, int numMove);
-static void LockBucket(Cache *cachePtr, int bucket);
-static void UnlockBucket(Cache *cachePtr, int bucket);
-#else
-#define PutBlocks(cachePtr, bucket, numMove)
-#endif
-
-#if TCL_ALLOCATOR == aMULTI
-static int allocator;
-static void ChooseAllocator();
-#endif
-
-#endif /* USE_ZIPPY */
/*
*----------------------------------------------------------------------
@@ -310,13 +106,11 @@ GetCache(void)
if (cachePtr == NULL) {
Tcl_Panic("alloc: could not allocate new cache");
}
+ cachePtr->allocCachePtr= NULL;
Tcl_MutexLock(listLockPtr);
cachePtr->nextPtr = firstCachePtr;
firstCachePtr = cachePtr;
Tcl_MutexUnlock(listLockPtr);
-#if USE_ZIPPY && defined(ZIPPY_STATS)
- cachePtr->owner = Tcl_GetCurrentThread();
-#endif
TclpSetAllocCache(cachePtr);
}
return cachePtr;
@@ -324,6 +118,39 @@ GetCache(void)
#endif
/*
+ * TclSetSharedAllocCache, TclSetAllocCache, TclGetAllocCache
+ *
+ * These are utility functions for the loadable allocator.
+ */
+
+void
+TclSetSharedAllocCache(
+ void *allocCachePtr)
+{
+ sharedPtr->allocCachePtr = allocCachePtr;
+}
+
+void
+TclSetAllocCache(
+ void *allocCachePtr)
+{
+ Cache *cachePtr;
+
+ GETCACHE(cachePtr);
+ cachePtr->allocCachePtr = allocCachePtr;
+}
+
+void *
+TclGetAllocCache(void)
+{
+ Cache *cachePtr;
+
+ GETCACHE(cachePtr);
+ return cachePtr->allocCachePtr;
+}
+
+
+/*
*-------------------------------------------------------------------------
*
* TclInitAlloc --
@@ -334,43 +161,28 @@ GetCache(void)
* None.
*
* Side effects:
- * Initialize the mutex used to serialize allocations.
+ * Initialize the mutex used to serialize obj allocations.
+ * Call the allocator-specific initialization.
*
*-------------------------------------------------------------------------
*/
-#if USE_ZIPPY
-static void
-InitBucketInfo ()
-{
- int i;
- int shift = 0;
-
- for (i = 0; i < NBUCKETS; ++i) {
- bucketInfo[i].blockSize = MINALLOC << i;
- while (((bucketInfo[i].blockSize -OFFSET) >> shift) > 255) {
- ++shift;
- }
- bucketInfo[i].shift = shift;
-#if defined(TCL_THREADS)
- /* TODO: clearer logic? Change move to keep? */
- bucketInfo[i].maxBlocks = 1 << (NBUCKETS - 1 - i);
- bucketInfo[i].numMove = i < NBUCKETS - 1 ?
- 1 << (NBUCKETS - 2 - i) : 1;
- bucketInfo[i].lockPtr = TclpNewAllocMutex();
-#endif
- }
-}
-#endif
-
void
TclInitAlloc(void)
{
+#ifdef PURIFY
+ TCL_PURIFY = 1;
+#else
+ TCL_PURIFY = (getenv("TCL_PURIFY") != NULL);
+#endif
+ TCL_THREADED = 0;
+
/*
* Set the params for the correct allocator
*/
#if defined(TCL_THREADS)
+ TCL_THREADED = 1;
if (listLockPtr == NULL) {
Tcl_Mutex *initLockPtr;
initLockPtr = Tcl_GetAllocMutex();
@@ -378,19 +190,13 @@ TclInitAlloc(void)
if (listLockPtr == NULL) {
listLockPtr = TclpNewAllocMutex();
objLockPtr = TclpNewAllocMutex();
-#if USE_ZIPPY
- InitBucketInfo();
-#endif
+ TclXpInitAlloc();
}
Tcl_MutexUnlock(initLockPtr);
}
-#elif USE_ZIPPY
- InitBucketInfo();
+#else
+ TclXpInitAlloc();
#endif /* THREADS */
-
-#if TCL_ALLOCATOR == aMULTI
- ChooseAllocator();
-#endif
}
/*
@@ -405,7 +211,7 @@ TclInitAlloc(void)
* None.
*
* Side effects:
- * None.
+ * Call the allocator-specific finalization.
*
*----------------------------------------------------------------------
*/
@@ -415,15 +221,6 @@ TclFinalizeAlloc(void)
{
#if defined(TCL_THREADS)
-#if USE_ZIPPY
- unsigned int i;
-
- for (i = 0; i < NBUCKETS; ++i) {
- TclpFreeAllocMutex(bucketInfo[i].lockPtr);
- bucketInfo[i].lockPtr = NULL;
- }
-#endif
-
TclpFreeAllocMutex(objLockPtr);
objLockPtr = NULL;
@@ -432,6 +229,7 @@ TclFinalizeAlloc(void)
TclpFreeAllocCache(NULL);
#endif
+ TclXpFinalizeAlloc();
}
/*
@@ -457,19 +255,6 @@ TclFreeAllocCache(
{
Cache *cachePtr = arg;
Cache **nextPtrPtr;
-#if USE_ZIPPY
- register unsigned int bucket;
-
- /*
- * Flush blocks.
- */
-
- for (bucket = 0; bucket < NBUCKETS; ++bucket) {
- if (cachePtr->buckets[bucket].numFree > 0) {
- PutBlocks(cachePtr, bucket, cachePtr->buckets[bucket].numFree);
- }
- }
-#endif
/*
* Flush objs.
@@ -482,6 +267,12 @@ TclFreeAllocCache(
}
/*
+ * Flush the external allocator cache
+ */
+
+ TclXpFreeAllocCache(cachePtr->allocCachePtr);
+
+ /*
* Remove from pool list.
*/
@@ -536,16 +327,18 @@ TclSmallAlloc(void)
return objPtr;
}
-#if TCL_ALLOCATOR == aMULTI
/*
* Do it AFTER looking at the queue, so that it doesn't slow down
* non-purify small allocs.
*/
- if (allocator == aPURIFY) {
- return (Tcl_Obj *) malloc(sizeof(Tcl_Obj));
+ if (TCL_PURIFY) {
+ Tcl_Obj *objPtr = (Tcl_Obj *) TclpAlloc(sizeof(Tcl_Obj));
+ if (objPtr == NULL) {
+ Tcl_Panic("alloc: could not allocate a new object");
+ }
+ return objPtr;
}
-#endif
/*
* Get this thread's obj list structure and move or allocate new objs if
@@ -603,12 +396,10 @@ TclSmallFree(
Cache *cachePtr;
Tcl_Obj *objPtr = ptr;
-#if TCL_ALLOCATOR == aMULTI
- if (allocator == aPURIFY) {
- free((char *) ptr);
+ if (TCL_PURIFY) {
+ TclpFree((char *) ptr);
return;
}
-#endif
GETCACHE(cachePtr);
@@ -682,666 +473,6 @@ MoveObjs(
toPtr->firstObjPtr = fromFirstObjPtr;
}
#endif
-#endif /* end of code for aNATIVE */
-
-#if USE_ZIPPY
-/*
- * The rest of this file deals with aZIPPY and aMULTI builds
- */
-
-/*
- * If range checking is enabled, an additional byte will be allocated to store
- * the magic number at the end of the requested memory.
- */
-
-
-/*
- *----------------------------------------------------------------------
- *
- * Block2Ptr, Ptr2Block --
- *
- * Convert between internal blocks and user pointers.
- *
- * Results:
- * User pointer or internal block.
- *
- * Side effects:
- * Invalid blocks will abort the server.
- *
- *----------------------------------------------------------------------
- */
-
-static inline char *
-Block2Ptr(
- Block *blockPtr,
- int bucket,
- unsigned int reqSize)
-{
- register void *ptr;
-
- blockPtr->magicNum1 = blockPtr->magicNum2 = MAGIC;
- if (bucket == NBUCKETS) {
- blockPtr->used = 255;
- } else {
- blockPtr->used = (reqSize >> bucketInfo[bucket].shift);
- }
- blockPtr->sourceBucket = bucket;
- ptr = (void *) (((char *)blockPtr) + OFFSET);
- return (char *) ptr;
-}
-
-static inline Block *
-Ptr2Block(
- char *ptr)
-{
- register Block *blockPtr;
-
- blockPtr = (Block *) (((char *) ptr) - OFFSET);
- if (blockPtr->magicNum1 != MAGIC || blockPtr->magicNum2 != MAGIC) {
- Tcl_Panic("alloc: invalid block: %p: %x %x",
- blockPtr, blockPtr->magicNum1, blockPtr->magicNum2);
- }
- return blockPtr;
-}
-
-/*
- *----------------------------------------------------------------------
- *
- * TclpAlloc --
- *
- * Allocate memory.
- *
- * Results:
- * Pointer to memory just beyond Block pointer.
- *
- * Side effects:
- * May allocate more blocks for a bucket.
- *
- *----------------------------------------------------------------------
- */
-
-char *
-TclpAlloc(
- unsigned int reqSize)
-{
- Cache *cachePtr;
- Block *blockPtr;
- register int bucket;
- size_t size;
-
-#if TCL_ALLOCATOR == aMULTI
- if (allocator < aNONE) {
- return (void *) malloc(reqSize);
- }
-#endif
-
- GETCACHE(cachePtr);
-
-#ifndef __LP64__
- if (sizeof(int) >= sizeof(size_t)) {
- /* An unsigned int overflow can also be a size_t overflow */
- const size_t zero = 0;
- const size_t max = ~zero;
-
- if (((size_t) reqSize) > max - OFFSET) {
- /* Requested allocation exceeds memory */
- return NULL;
- }
- }
-#endif
-
- /*
- * Increment the requested size to include room for the Block structure.
- * Call malloc() directly if the required amount is greater than the
- * largest block, otherwise pop the smallest block large enough,
- * allocating more blocks if necessary.
- */
-
- size = reqSize + OFFSET;
- if (size > MAXALLOC) {
- bucket = NBUCKETS;
- blockPtr = malloc(size);
- } else {
- blockPtr = NULL;
- bucket = 0;
- while (bucketInfo[bucket].blockSize < size) {
- bucket++;
- }
- if (cachePtr->buckets[bucket].numFree || GetBlocks(cachePtr, bucket)) {
- blockPtr = cachePtr->buckets[bucket].firstPtr;
- cachePtr->buckets[bucket].firstPtr = blockPtr->nextBlock;
- cachePtr->buckets[bucket].numFree--;
-#ifdef ZIPPY_STATS
- cachePtr->buckets[bucket].numRemoves++;
-#endif
- }
- if (blockPtr == NULL) {
- return NULL;
- }
- }
- return Block2Ptr(blockPtr, bucket, reqSize);
-}
-
-/*
- *----------------------------------------------------------------------
- *
- * TclpFree --
- *
- * Return blocks to the thread block cache.
- *
- * Results:
- * None.
- *
- * Side effects:
- * May move blocks to shared cache.
- *
- *----------------------------------------------------------------------
- */
-
-void
-TclpFree(
- char *ptr)
-{
- Cache *cachePtr;
- Block *blockPtr;
- int bucket;
-
-#if TCL_ALLOCATOR == aMULTI
- if (allocator < aNONE) {
- return free((char *) ptr);
- }
-#endif
-
- if (ptr == NULL) {
- return;
- }
-
- blockPtr = Ptr2Block(ptr);
-
- /*
- * Get the block back from the user pointer and call system free directly
- * for large blocks. Otherwise, push the block back on the bucket and move
- * blocks to the shared cache if there are now too many free.
- */
-
- bucket = blockPtr->sourceBucket;
- if (bucket == NBUCKETS) {
- free(blockPtr);
- return;
- }
-
- GETCACHE(cachePtr);
-
- blockPtr->nextBlock = cachePtr->buckets[bucket].firstPtr;
- cachePtr->buckets[bucket].firstPtr = blockPtr;
- cachePtr->buckets[bucket].numFree++;
-#ifdef ZIPPY_STATS
- cachePtr->buckets[bucket].numInserts++;
-#endif
-#if defined(TCL_THREADS)
- if (cachePtr != sharedPtr &&
- cachePtr->buckets[bucket].numFree > bucketInfo[bucket].maxBlocks) {
- PutBlocks(cachePtr, bucket, bucketInfo[bucket].numMove);
- }
-#endif
-}
-
-/*
- *----------------------------------------------------------------------
- *
- * TclpRealloc --
- *
- * Re-allocate memory to a larger or smaller size.
- *
- * Results:
- * Pointer to memory just beyond Block pointer.
- *
- * Side effects:
- * Previous memory, if any, may be freed.
- *
- *----------------------------------------------------------------------
- */
-
-char *
-TclpRealloc(
- char *ptr,
- unsigned int reqSize)
-{
- Block *blockPtr;
- void *newPtr;
- size_t size, min;
- int bucket;
-
-#if TCL_ALLOCATOR == aMULTI
- if (allocator < aNONE) {
- return (void *) realloc((char *) ptr, reqSize);
- }
-#endif
-
- if (ptr == NULL) {
- return TclpAlloc(reqSize);
- }
-
-#ifndef __LP64__
- if (sizeof(int) >= sizeof(size_t)) {
- /* An unsigned int overflow can also be a size_t overflow */
- const size_t zero = 0;
- const size_t max = ~zero;
-
- if (((size_t) reqSize) > max - OFFSET) {
- /* Requested allocation exceeds memory */
- return NULL;
- }
- }
-#endif
-
- /*
- * If the block is not a system block and belongs in the same block,
- * simply return the existing pointer. Otherwise, if the block is a system
- * block and the new size would also require a system block, call
- * realloc() directly.
- */
-
- blockPtr = Ptr2Block(ptr);
-
- size = reqSize + OFFSET;
- bucket = blockPtr->sourceBucket;
- if (bucket != NBUCKETS) {
- if (bucket > 0) {
- min = bucketInfo[bucket-1].blockSize;
- } else {
- min = 0;
- }
- if (size > min && size <= bucketInfo[bucket].blockSize) {
- return Block2Ptr(blockPtr, bucket, reqSize);
- }
- } else if (size > MAXALLOC) {
- blockPtr = realloc(blockPtr, size);
- if (blockPtr == NULL) {
- return NULL;
- }
- return Block2Ptr(blockPtr, NBUCKETS, reqSize);
- }
-
- /*
- * Finally, perform an expensive malloc/copy/free.
- */
-
- newPtr = TclpAlloc(reqSize);
- if (newPtr != NULL) {
- size_t maxSize = bucketInfo[bucket].blockSize - OFFSET;
- size_t toCopy = ((blockPtr->used + 1) << bucketInfo[bucket].shift);
-
- if (toCopy > maxSize) {
- toCopy = maxSize;
- }
- if (toCopy > reqSize) {
- toCopy = reqSize;
- }
-
- memcpy(newPtr, ptr, toCopy);
- TclpFree(ptr);
- }
- return newPtr;
-}
-
-/*
- *----------------------------------------------------------------------
- *
- * TclAllocMaximize --
- *
- * Given a TclpAlloc'ed pointer, it returns the maximal size that can be used
- * by the allocated memory. This is almost always larger than the requested
- * size, as it corresponds to the bucket's size.
- *
- * Results:
- * New size.
- *
- *----------------------------------------------------------------------
- */
- unsigned int
- TclAllocMaximize(
- void *ptr)
-{
- Block *blockPtr;
- int bucket;
- size_t size;
-
- return UINT_MAX;
-#if TCL_ALLOCATOR == aMULTI
- if (allocator < aNONE) {
- /*
- * No info, return UINT_MAX as a signal.
- */
-
- return UINT_MAX;
- }
-#endif
-
- blockPtr = Ptr2Block(ptr);
- bucket = blockPtr->sourceBucket;
-
- if (bucket == NBUCKETS) {
- /*
- * System malloc'ed: no info
- */
-
- return UINT_MAX;
- }
-
- size = bucketInfo[bucket].blockSize - OFFSET;
- blockPtr->used = 255;
- return size;
-}
-
-#ifdef ZIPPY_STATS
-
-/*
- *----------------------------------------------------------------------
- *
- * Tcl_GetMemoryInfo --
- *
- * Return a list-of-lists of memory stats.
- *
- * Results:
- * None.
- *
- * Side effects:
- * List appended to given dstring.
- *
- *----------------------------------------------------------------------
- */
-
-void
-Tcl_GetMemoryInfo(
- Tcl_DString *dsPtr)
-{
- Cache *cachePtr;
- char buf[200];
- unsigned int n;
-
- Tcl_MutexLock(listLockPtr);
- cachePtr = firstCachePtr;
- while (cachePtr != NULL) {
- Tcl_DStringStartSublist(dsPtr);
-#if defined(TCL_THREADS)
- if (cachePtr == sharedPtr) {
- Tcl_DStringAppendElement(dsPtr, "shared");
- } else {
- sprintf(buf, "thread%p", cachePtr->owner);
- Tcl_DStringAppendElement(dsPtr, buf);
- }
-#else
- Tcl_DStringAppendElement(dsPtr, "unthreaded");
-#endif
- for (n = 0; n < NBUCKETS; ++n) {
- sprintf(buf, "%lu %ld %ld %ld %ld %ld %ld",
- (unsigned long) bucketInfo[n].blockSize,
- cachePtr->buckets[n].numFree,
- cachePtr->buckets[n].numRemoves,
- cachePtr->buckets[n].numInserts,
- cachePtr->buckets[n].numLocks,
- cachePtr->buckets[n].numWaits);
- Tcl_DStringAppendElement(dsPtr, buf);
- }
- Tcl_DStringEndSublist(dsPtr);
-#if defined(TCL_THREADS)
- cachePtr = cachePtr->nextPtr;
-#else
- cachePtr = NULL;
-#endif
- }
- Tcl_MutexUnlock(listLockPtr);
-}
-#endif /* ZIPPY_STATS */
-
-#if defined(TCL_THREADS)
-/*
- *----------------------------------------------------------------------
- *
- * LockBucket, UnlockBucket --
- *
- * Set/unset the lock to access a bucket in the shared cache.
- *
- * Results:
- * None.
- *
- * Side effects:
- * Lock activity and contention are monitored globally and on a per-cache
- * basis.
- *
- *----------------------------------------------------------------------
- */
-
-static void
-LockBucket(
- Cache *cachePtr,
- int bucket)
-{
- Tcl_MutexLock(bucketInfo[bucket].lockPtr);
-#ifdef ZIPPY_STATS
- cachePtr->buckets[bucket].numLocks++;
- sharedPtr->buckets[bucket].numLocks++;
-#endif
-}
-
-static void
-UnlockBucket(
- Cache *cachePtr,
- int bucket)
-{
- Tcl_MutexUnlock(bucketInfo[bucket].lockPtr);
-}
-
-/*
- *----------------------------------------------------------------------
- *
- * PutBlocks --
- *
- * Return unused blocks to the shared cache.
- *
- * Results:
- * None.
- *
- * Side effects:
- * None.
- *
- *----------------------------------------------------------------------
- */
-
-static void
-PutBlocks(
- Cache *cachePtr,
- int bucket,
- int numMove)
-{
- register Block *lastPtr, *firstPtr;
- register int n = numMove;
-
- /*
- * Before acquiring the lock, walk the block list to find the last block
- * to be moved.
- */
-
- firstPtr = lastPtr = cachePtr->buckets[bucket].firstPtr;
- while (--n > 0) {
- lastPtr = lastPtr->nextBlock;
- }
- cachePtr->buckets[bucket].firstPtr = lastPtr->nextBlock;
- cachePtr->buckets[bucket].numFree -= numMove;
-
- /*
- * Aquire the lock and place the list of blocks at the front of the shared
- * cache bucket.
- */
-
- LockBucket(cachePtr, bucket);
- lastPtr->nextBlock = sharedPtr->buckets[bucket].firstPtr;
- sharedPtr->buckets[bucket].firstPtr = firstPtr;
- sharedPtr->buckets[bucket].numFree += numMove;
- UnlockBucket(cachePtr, bucket);
-}
-#endif
-
-/*
- *----------------------------------------------------------------------
- *
- * GetBlocks --
- *
- * Get more blocks for a bucket.
- *
- * Results:
- * 1 if blocks where allocated, 0 otherwise.
- *
- * Side effects:
- * Cache may be filled with available blocks.
- *
- *----------------------------------------------------------------------
- */
-
-static int
-GetBlocks(
- Cache *cachePtr,
- int bucket)
-{
- register Block *blockPtr = NULL;
- register int n;
-
-#if defined(TCL_THREADS)
- /*
- * First, atttempt to move blocks from the shared cache. Note the
- * potentially dirty read of numFree before acquiring the lock which is a
- * slight performance enhancement. The value is verified after the lock is
- * actually acquired.
- */
-
- if (cachePtr != sharedPtr && sharedPtr->buckets[bucket].numFree > 0) {
- LockBucket(cachePtr, bucket);
- if (sharedPtr->buckets[bucket].numFree > 0) {
-
- /*
- * Either move the entire list or walk the list to find the last
- * block to move.
- */
-
- n = bucketInfo[bucket].numMove;
- if (n >= sharedPtr->buckets[bucket].numFree) {
- cachePtr->buckets[bucket].firstPtr =
- sharedPtr->buckets[bucket].firstPtr;
- cachePtr->buckets[bucket].numFree =
- sharedPtr->buckets[bucket].numFree;
- sharedPtr->buckets[bucket].firstPtr = NULL;
- sharedPtr->buckets[bucket].numFree = 0;
- } else {
- blockPtr = sharedPtr->buckets[bucket].firstPtr;
- cachePtr->buckets[bucket].firstPtr = blockPtr;
- sharedPtr->buckets[bucket].numFree -= n;
- cachePtr->buckets[bucket].numFree = n;
- while (--n > 0) {
- blockPtr = blockPtr->nextBlock;
- }
- sharedPtr->buckets[bucket].firstPtr = blockPtr->nextBlock;
- blockPtr->nextBlock = NULL;
- }
- }
- UnlockBucket(cachePtr, bucket);
- }
-#endif
-
- if (cachePtr->buckets[bucket].numFree == 0) {
- register size_t size;
-
- /*
- * If no blocks could be moved from shared, first look for a larger
- * block in this cache OR the shared cache to split up.
- */
-
- n = NBUCKETS;
- size = 0; /* lint */
- while (--n > bucket) {
- if (cachePtr->buckets[n].numFree > 0) {
- size = bucketInfo[n].blockSize;
- blockPtr = cachePtr->buckets[n].firstPtr;
- cachePtr->buckets[n].firstPtr = blockPtr->nextBlock;
- cachePtr->buckets[n].numFree--;
- break;
- }
- }
-#if defined(TCL_THREADS)
- if (blockPtr == NULL) {
- n = NBUCKETS;
- size = 0; /* lint */
- while (--n > bucket) {
- if (sharedPtr->buckets[n].numFree > 0) {
- size = bucketInfo[n].blockSize;
- LockBucket(cachePtr, n);
- if (sharedPtr->buckets[n].numFree > 0) {
- blockPtr = sharedPtr->buckets[n].firstPtr;
- sharedPtr->buckets[n].firstPtr = blockPtr->nextBlock;
- sharedPtr->buckets[n].numFree--;
- UnlockBucket(cachePtr, n);
- break;
- }
- UnlockBucket(cachePtr, n);
- }
- }
- }
-#endif
- /*
- * Otherwise, allocate a big new block directly.
- */
-
- if (blockPtr == NULL) {
- size = MAXALLOC;
- blockPtr = malloc(size);
- if (blockPtr == NULL) {
- return 0;
- }
- }
-
- /*
- * Split the larger block into smaller blocks for this bucket.
- */
-
- n = size / bucketInfo[bucket].blockSize;
- cachePtr->buckets[bucket].numFree = n;
- cachePtr->buckets[bucket].firstPtr = blockPtr;
- while (--n > 0) {
- blockPtr->nextBlock = (Block *)
- ((char *) blockPtr + bucketInfo[bucket].blockSize);
- blockPtr = blockPtr->nextBlock;
- }
- blockPtr->nextBlock = NULL;
- }
- return 1;
-}
-
-#if TCL_ALLOCATOR == aMULTI
-static void
-ChooseAllocator()
-{
- char *choice = getenv("TCL_ALLOCATOR");
-
- /*
- * This is only called when compiled with aMULTI
- */
-
- allocator = aZIPPY;
-
- if (choice) {
- /*
- * Only override the base when requesting native or purify
- */
-
- if (!strcmp(choice, "aNATIVE")) {
- allocator = aNATIVE;
- } else if (!strcmp(choice, "aPURIFY")) {
- allocator = aPURIFY;
- }
- }
-}
-#endif
-#endif /* USE_ZIPPY */
/*
* Local Variables:
diff --git a/generic/tclAlloc.h b/generic/tclAlloc.h
new file mode 100644
index 0000000..1aef34e
--- /dev/null
+++ b/generic/tclAlloc.h
@@ -0,0 +1,64 @@
+/*
+ * tclAlloc.h --
+ *
+ * This defines the interface for pluggable memory allocators for Tcl.
+ *
+ * Copyright (c) 2013 by Miguel Sofer. All rights reserved.
+ *
+ * See the file "license.terms" for information on usage and redistribution of
+ * this file, and for a DISCLAIMER OF ALL WARRANTIES.
+ */
+
+#ifndef _TCLALLOC
+#define _TCLALLOC
+
+/*
+ * These 5 functions MUST be defined by the allocator.
+ */
+
+char * TclpAlloc(unsigned int reqSize);
+char * TclpRealloc(char *ptr, unsigned int reqSize);
+void TclpFree(char *ptr);
+void TclXpInitAlloc(void);
+void TclXpFinalizeAlloc(void);
+void TclXpFreeAllocCache(void *ptr);
+
+
+/*
+ * The allocator should allow for "purify mode" by checking this variable. If
+ * it is set to !0, it should just shunt to plain malloc.
+ * This is used for debugging; the value can be treated as a constant, it does
+ * not change in a running process.
+ */
+
+int TCL_PURIFY;
+int TCL_THREADED;
+
+/*
+ * This macro is used to properly align the memory allocated by Tcl, giving
+ * the same alignment as the native malloc.
+ */
+
+#if defined(__APPLE__)
+#define ALLOCALIGN 16
+#else
+#define ALLOCALIGN (2*sizeof(void *))
+#endif
+
+#define ALIGN(x) (((x) + ALLOCALIGN - 1) & ~(ALLOCALIGN - 1))
+
+/*
+ * These are utility functions (defined in tclAlloc.c) to give access to
+ * either per-thread or per-interp caches. They will return a pointer to which
+ * the allocator should attach the proper structure that it wishes to
+ * maintain.
+ *
+ * If the content is NULL, it means that the value has not been initialized for
+ * this interp or thread and the corresponding Set function should be called.
+ */
+
+void TclSetSharedAllocCache(void *allocCachePtr);
+void TclSetAllocCache(void *allocCachePtr);
+void *TclGetAllocCache(void);
+
+#endif
diff --git a/generic/tclAllocNative.c b/generic/tclAllocNative.c
new file mode 100644
index 0000000..6fb354a
--- /dev/null
+++ b/generic/tclAllocNative.c
@@ -0,0 +1,52 @@
+/*
+ * tclAlloc.c --
+ *
+ * This is the basic native allocator for Tcl.
+ *
+ * Copyright (c) 2013 by Miguel Sofer. All rights reserved.
+ *
+ * See the file "license.terms" for information on usage and redistribution of
+ * this file, and for a DISCLAIMER OF ALL WARRANTIES.
+ */
+
+#include <stdlib.h>
+#include "tclAlloc.h"
+
+char *
+TclpAlloc(
+ unsigned int reqSize)
+{
+ return malloc(reqSize);
+}
+
+char *
+TclpRealloc(
+ char *ptr,
+ unsigned int reqSize)
+{
+ return realloc(ptr, reqSize);
+}
+
+void
+TclpFree(
+ char *ptr)
+{
+ free(ptr);
+}
+
+void
+TclXpInitAlloc(void)
+{
+}
+
+void
+TclXpFinalizeAlloc(void)
+{
+}
+
+void
+TclXpFreeAllocCache(
+ void *ptr)
+{
+}
+
diff --git a/generic/tclAllocZippy.c b/generic/tclAllocZippy.c
new file mode 100644
index 0000000..cc08bbf
--- /dev/null
+++ b/generic/tclAllocZippy.c
@@ -0,0 +1,852 @@
+/*
+ * tclAllocZippy.c --
+ *
+ * This is a very flexible storage allocator for Tcl, for use with or
+ * without threads.
+ *
+ * It is essentially the ex-tclThreadAlloc, aolserver's fast threaded
+ * allocator. Mods with respect to the original:
+ * - split blocks in the shared pool before mallocing again for
+ * improved cache usage
+ * - stats and Tcl_GetMemoryInfo disabled per default, enable with
+ * -DZIPPY_STATS
+ * - adapt for unthreaded usage as replacement of the ex tclAlloc
+ * - (TODO!) build zippy as a pre-loadable library to use with a
+ * native build as a malloc replacement. Difficulty is to make it
+ * portable (easy enough on modern elf/unix, to be researched on
+ * win and mac . This would be the best option, instead of
+ * MULTI. It could be built in two versions (perf, debug/stats)
+ *
+ * The Initial Developer of the Original Code is America Online, Inc.
+ * Portions created by AOL are Copyright (C) 1999 America Online, Inc.
+ *
+ * Copyright (c) 2008-2013 by Miguel Sofer. All rights reserved.
+ *
+ * See the file "license.terms" for information on usage and redistribution of
+ * this file, and for a DISCLAIMER OF ALL WARRANTIES.
+ */
+
+#include "tclInt.h"
+#include "tclAlloc.h"
+
+/*
+ * The following struct stores accounting information for each block including
+ * two small magic numbers and a bucket number when in use or a next pointer
+ * when free. The original requested size (not including the Block overhead)
+ * is also maintained.
+ */
+
+/*
+ * The following union stores accounting information for each block including
+ * two small magic numbers and a bucket number when in use or a next pointer
+ * when free. The original requested size (not including the Block overhead)
+ * is also maintained.
+ */
+
+typedef struct Block {
+ union {
+ struct Block *next; /* Next in free list. */
+ struct {
+ unsigned char magic1; /* First magic number. */
+ unsigned char bucket; /* Bucket block allocated from. */
+ unsigned char inUse; /* Block memory currently in use, see
+ * details in TclpAlloc/Realloc. */
+ unsigned char magic2; /* Second magic number. */
+ } s;
+ } u;
+} Block;
+
+#define OFFSET ALIGN(sizeof(Block))
+
+#define nextBlock u.next
+#define sourceBucket u.s.bucket
+#define magicNum1 u.s.magic1
+#define magicNum2 u.s.magic2
+#define used u.s.inUse
+#define MAGIC 0xEF
+
+/*
+ * The following defines the minimum and maximum block sizes and the number
+ * of buckets in the bucket cache.
+ * 32b 64b Apple-32b(?)
+ * ALLOCALIGN 8 16 16
+ * sizeof(Block) 8 16 16
+ * OFFSET 8 16 16
+ * MINALLOC 16 32 32
+ * NBUCKETS 11 10 10
+ * MAXALLOC 16384 16384 16384
+ * small allocs 1024 512 1024
+ * at a time
+ */
+
+#define MINALLOC ALIGN(OFFSET+8)
+#define NBUCKETS (11 - (MINALLOC >> 5))
+#define MAXALLOC (MINALLOC << (NBUCKETS - 1))
+
+/*
+ * The following structure defines a bucket of blocks, optionally with various
+ * accounting and statistics information.
+ */
+
+typedef struct Bucket {
+ Block *firstPtr; /* First block available */
+ long numFree; /* Number of blocks available */
+#ifdef ZIPPY_STATS
+ /* All fields below for accounting only */
+
+ long numRemoves; /* Number of removes from bucket */
+ long numInserts; /* Number of inserts into bucket */
+ long numWaits; /* Number of waits to acquire a lock */
+ long numLocks; /* Number of locks acquired */
+#endif
+} Bucket;
+
+/*
+ * The following array specifies various per-bucket limits and locks. The
+ * values are statically initialized to avoid calculating them repeatedly.
+ */
+
+static struct {
+ size_t blockSize; /* Bucket blocksize. */
+ int shift;
+ int maxBlocks; /* Max blocks before move to share. */
+ int numMove; /* Num blocks to move to share. */
+ Tcl_Mutex *lockPtr; /* Share bucket lock. */
+} bucketInfo[NBUCKETS];
+
+/*
+ * The Tcl_Obj per-thread cache, used by aNATIVE, aZIPPY and aMULTI.
+ */
+
+typedef struct Cache {
+ Bucket buckets[NBUCKETS]; /* The buckets for this thread */
+ struct Cache *nextPtr; /* Linked list of cache entries */
+} Cache;
+
+static Cache sharedCache;
+#define sharedPtr (&sharedCache)
+
+static Tcl_Mutex *listLockPtr;
+static Cache *firstCachePtr = &sharedCache;
+
+static void InitBucketInfo(void);
+static inline char * Block2Ptr(Block *blockPtr,
+ int bucket, unsigned int reqSize);
+static inline Block * Ptr2Block(char *ptr);
+
+static int GetBlocks(Cache *cachePtr, int bucket);
+
+static void PutBlocks(Cache *cachePtr, int bucket, int numMove);
+static void LockBucket(Cache *cachePtr, int bucket);
+static void UnlockBucket(Cache *cachePtr, int bucket);
+
+
+/*
+ *-------------------------------------------------------------------------
+ *
+ * TclXpInitAlloc --
+ *
+ * Initialize the memory system.
+ *
+ * Results:
+ * None.
+ *
+ * Side effects:
+ * Initialize the mutex used to serialize allocations.
+ *
+ *-------------------------------------------------------------------------
+ */
+
+static void
+InitBucketInfo ()
+{
+ int i;
+ int shift = 0;
+
+ for (i = 0; i < NBUCKETS; ++i) {
+ bucketInfo[i].blockSize = MINALLOC << i;
+ while (((bucketInfo[i].blockSize -OFFSET) >> shift) > 255) {
+ ++shift;
+ }
+ bucketInfo[i].shift = shift;
+
+ if (TCL_THREADED) {
+ /* TODO: clearer logic? Change move to keep? */
+ bucketInfo[i].maxBlocks = 1 << (NBUCKETS - 1 - i);
+ bucketInfo[i].numMove = i < NBUCKETS - 1 ?
+ 1 << (NBUCKETS - 2 - i) : 1;
+ bucketInfo[i].lockPtr = TclpNewAllocMutex();
+ }
+ }
+}
+
+void
+TclXpInitAlloc(void)
+{
+ /*
+ * Set the params for the correct allocator
+ */
+
+ if (TCL_THREADED) {
+ InitBucketInfo();
+ TclSetSharedAllocCache(sharedPtr);
+ }
+}
+
+/*
+ *----------------------------------------------------------------------
+ *
+ * TclXpFinalizeAlloc --
+ *
+ * This procedure is used to destroy all private resources used in this
+ * file.
+ *
+ * Results:
+ * None.
+ *
+ * Side effects:
+ * None.
+ *
+ *----------------------------------------------------------------------
+ */
+
+void
+TclXpFinalizeAlloc(void)
+{
+
+ if (TCL_THREADED) {
+ unsigned int i;
+
+ for (i = 0; i < NBUCKETS; ++i) {
+ TclpFreeAllocMutex(bucketInfo[i].lockPtr);
+ bucketInfo[i].lockPtr = NULL;
+ }
+
+ TclpFreeAllocMutex(listLockPtr);
+ listLockPtr = NULL;
+ }
+}
+
+/*
+ *----------------------------------------------------------------------
+ *
+ * TclXpFreeAllocCache --
+ *
+ * Flush and delete a cache, removing from list of caches.
+ *
+ * Results:
+ * None.
+ *
+ * Side effects:
+ * None.
+ *
+ *----------------------------------------------------------------------
+ */
+
+void
+TclXpFreeAllocCache(
+ void *arg)
+{
+ Cache *cachePtr = arg;
+ Cache **nextPtrPtr;
+
+ register unsigned int bucket;
+
+ /*
+ * Flush blocks.
+ */
+
+ for (bucket = 0; bucket < NBUCKETS; ++bucket) {
+ if (cachePtr->buckets[bucket].numFree > 0) {
+ PutBlocks(cachePtr, bucket, cachePtr->buckets[bucket].numFree);
+ }
+ }
+
+ /*
+ * Remove from pool list.
+ */
+
+ Tcl_MutexLock(listLockPtr);
+ nextPtrPtr = &firstCachePtr;
+ while (*nextPtrPtr != cachePtr) {
+ nextPtrPtr = &(*nextPtrPtr)->nextPtr;
+ }
+ *nextPtrPtr = cachePtr->nextPtr;
+ cachePtr->nextPtr = NULL;
+ Tcl_MutexUnlock(listLockPtr);
+ free(cachePtr);
+}
+
+/*
+ *----------------------------------------------------------------------
+ *
+ * Block2Ptr, Ptr2Block --
+ *
+ * Convert between internal blocks and user pointers.
+ *
+ * Results:
+ * User pointer or internal block.
+ *
+ * Side effects:
+ * Invalid blocks will abort the server.
+ *
+ *----------------------------------------------------------------------
+ */
+
+static inline char *
+Block2Ptr(
+ Block *blockPtr,
+ int bucket,
+ unsigned int reqSize)
+{
+ register void *ptr;
+
+ blockPtr->magicNum1 = blockPtr->magicNum2 = MAGIC;
+ if (bucket == NBUCKETS) {
+ blockPtr->used = 255;
+ } else {
+ blockPtr->used = (reqSize >> bucketInfo[bucket].shift);
+ }
+ blockPtr->sourceBucket = bucket;
+ ptr = (void *) (((char *)blockPtr) + OFFSET);
+ return (char *) ptr;
+}
+
+static inline Block *
+Ptr2Block(
+ char *ptr)
+{
+ register Block *blockPtr;
+
+ blockPtr = (Block *) (((char *) ptr) - OFFSET);
+ if (blockPtr->magicNum1 != MAGIC || blockPtr->magicNum2 != MAGIC) {
+ Tcl_Panic("alloc: invalid block: %p: %x %x",
+ blockPtr, blockPtr->magicNum1, blockPtr->magicNum2);
+ }
+ return blockPtr;
+}
+
+/*
+ *----------------------------------------------------------------------
+ *
+ * TclpAlloc --
+ *
+ * Allocate memory.
+ *
+ * Results:
+ * Pointer to memory just beyond Block pointer.
+ *
+ * Side effects:
+ * May allocate more blocks for a bucket.
+ *
+ *----------------------------------------------------------------------
+ */
+
+static inline Cache *
+GetAllocCache(void)
+{
+ Cache *cachePtr = TclGetAllocCache();
+ if (cachePtr == NULL) {
+ fprintf(stderr,"0");
+ cachePtr = calloc(0, sizeof(Cache));
+ if (cachePtr == NULL) {
+ Tcl_Panic("alloc: could not allocate new cache");
+ }
+ TclSetAllocCache(cachePtr);
+ } else {fprintf(stderr,"1");}
+ return cachePtr;
+}
+
+
+char *
+TclpAlloc(
+ unsigned int reqSize)
+{
+ Cache *cachePtr;
+ Block *blockPtr;
+ register int bucket;
+ size_t size;
+
+ if (TCL_PURIFY) {
+ return (void *) malloc(reqSize);
+ }
+
+ cachePtr = GetAllocCache();
+
+#ifndef __LP64__
+ if (sizeof(int) >= sizeof(size_t)) {
+ /* An unsigned int overflow can also be a size_t overflow */
+ const size_t zero = 0;
+ const size_t max = ~zero;
+
+ if (((size_t) reqSize) > max - OFFSET) {
+ /* Requested allocation exceeds memory */
+ return NULL;
+ }
+ }
+#endif
+
+ /*
+ * Increment the requested size to include room for the Block structure.
+ * Call malloc() directly if the required amount is greater than the
+ * largest block, otherwise pop the smallest block large enough,
+ * allocating more blocks if necessary.
+ */
+
+ size = reqSize + OFFSET;
+ if (size > MAXALLOC) {
+ bucket = NBUCKETS;
+ blockPtr = malloc(size);
+ } else {
+ blockPtr = NULL;
+ bucket = 0;
+ while (bucketInfo[bucket].blockSize < size) {
+ bucket++;
+ }
+ if (cachePtr->buckets[bucket].numFree || GetBlocks(cachePtr, bucket)) {
+ blockPtr = cachePtr->buckets[bucket].firstPtr;
+ cachePtr->buckets[bucket].firstPtr = blockPtr->nextBlock;
+ cachePtr->buckets[bucket].numFree--;
+#ifdef ZIPPY_STATS
+ cachePtr->buckets[bucket].numRemoves++;
+#endif
+ }
+ if (blockPtr == NULL) {
+ return NULL;
+ }
+ }
+ return Block2Ptr(blockPtr, bucket, reqSize);
+}
+
+/*
+ *----------------------------------------------------------------------
+ *
+ * TclpFree --
+ *
+ * Return blocks to the thread block cache.
+ *
+ * Results:
+ * None.
+ *
+ * Side effects:
+ * May move blocks to shared cache.
+ *
+ *----------------------------------------------------------------------
+ */
+
+void
+TclpFree(
+ char *ptr)
+{
+ Cache *cachePtr;
+ Block *blockPtr;
+ int bucket;
+
+ if (TCL_PURIFY) {
+ return free((char *) ptr);
+ }
+
+ if (ptr == NULL) {
+ return;
+ }
+
+ blockPtr = Ptr2Block(ptr);
+
+ /*
+ * Get the block back from the user pointer and call system free directly
+ * for large blocks. Otherwise, push the block back on the bucket and move
+ * blocks to the shared cache if there are now too many free.
+ */
+
+ bucket = blockPtr->sourceBucket;
+ if (bucket == NBUCKETS) {
+ free(blockPtr);
+ return;
+ }
+
+ cachePtr = GetAllocCache();
+
+ blockPtr->nextBlock = cachePtr->buckets[bucket].firstPtr;
+ cachePtr->buckets[bucket].firstPtr = blockPtr;
+ cachePtr->buckets[bucket].numFree++;
+#ifdef ZIPPY_STATS
+ cachePtr->buckets[bucket].numInserts++;
+#endif
+ if (cachePtr != sharedPtr &&
+ cachePtr->buckets[bucket].numFree > bucketInfo[bucket].maxBlocks) {
+ PutBlocks(cachePtr, bucket, bucketInfo[bucket].numMove);
+ }
+}
+
+/*
+ *----------------------------------------------------------------------
+ *
+ * TclpRealloc --
+ *
+ * Re-allocate memory to a larger or smaller size.
+ *
+ * Results:
+ * Pointer to memory just beyond Block pointer.
+ *
+ * Side effects:
+ * Previous memory, if any, may be freed.
+ *
+ *----------------------------------------------------------------------
+ */
+
+char *
+TclpRealloc(
+ char *ptr,
+ unsigned int reqSize)
+{
+ Block *blockPtr;
+ void *newPtr;
+ size_t size, min;
+ int bucket;
+
+ if (TCL_PURIFY) {
+ return (void *) realloc((char *) ptr, reqSize);
+ }
+
+ if (ptr == NULL) {
+ return TclpAlloc(reqSize);
+ }
+
+#ifndef __LP64__
+ if (sizeof(int) >= sizeof(size_t)) {
+ /* An unsigned int overflow can also be a size_t overflow */
+ const size_t zero = 0;
+ const size_t max = ~zero;
+
+ if (((size_t) reqSize) > max - OFFSET) {
+ /* Requested allocation exceeds memory */
+ return NULL;
+ }
+ }
+#endif
+
+ /*
+ * If the block is not a system block and belongs in the same block,
+ * simply return the existing pointer. Otherwise, if the block is a system
+ * block and the new size would also require a system block, call
+ * realloc() directly.
+ */
+
+ blockPtr = Ptr2Block(ptr);
+
+ size = reqSize + OFFSET;
+ bucket = blockPtr->sourceBucket;
+ if (bucket != NBUCKETS) {
+ if (bucket > 0) {
+ min = bucketInfo[bucket-1].blockSize;
+ } else {
+ min = 0;
+ }
+ if (size > min && size <= bucketInfo[bucket].blockSize) {
+ return Block2Ptr(blockPtr, bucket, reqSize);
+ }
+ } else if (size > MAXALLOC) {
+ blockPtr = realloc(blockPtr, size);
+ if (blockPtr == NULL) {
+ return NULL;
+ }
+ return Block2Ptr(blockPtr, NBUCKETS, reqSize);
+ }
+
+ /*
+ * Finally, perform an expensive malloc/copy/free.
+ */
+
+ newPtr = TclpAlloc(reqSize);
+ if (newPtr != NULL) {
+ size_t maxSize = bucketInfo[bucket].blockSize - OFFSET;
+ size_t toCopy = ((blockPtr->used + 1) << bucketInfo[bucket].shift);
+
+ if (toCopy > maxSize) {
+ toCopy = maxSize;
+ }
+ if (toCopy > reqSize) {
+ toCopy = reqSize;
+ }
+
+ memcpy(newPtr, ptr, toCopy);
+ TclpFree(ptr);
+ }
+ return newPtr;
+}
+#ifdef ZIPPY_STATS
+
+/*
+ *----------------------------------------------------------------------
+ *
+ * Tcl_GetMemoryInfo --
+ *
+ * Return a list-of-lists of memory stats.
+ *
+ * Results:
+ * None.
+ *
+ * Side effects:
+ * List appended to given dstring.
+ *
+ *----------------------------------------------------------------------
+ */
+
+void
+Tcl_GetMemoryInfo(
+ Tcl_DString *dsPtr)
+{
+ Cache *cachePtr;
+ char buf[200];
+ unsigned int n;
+
+ Tcl_MutexLock(listLockPtr);
+ cachePtr = firstCachePtr;
+ while (cachePtr != NULL) {
+ Tcl_DStringStartSublist(dsPtr);
+ if (cachePtr == sharedPtr) {
+ Tcl_DStringAppendElement(dsPtr, "shared");
+ } else {
+ sprintf(buf, "thread%p", cachePtr->owner);
+ Tcl_DStringAppendElement(dsPtr, buf);
+ }
+ for (n = 0; n < NBUCKETS; ++n) {
+ sprintf(buf, "%lu %ld %ld %ld %ld %ld %ld",
+ (unsigned long) bucketInfo[n].blockSize,
+ cachePtr->buckets[n].numFree,
+ cachePtr->buckets[n].numRemoves,
+ cachePtr->buckets[n].numInserts,
+ cachePtr->buckets[n].numLocks,
+ cachePtr->buckets[n].numWaits);
+ Tcl_DStringAppendElement(dsPtr, buf);
+ }
+ Tcl_DStringEndSublist(dsPtr);
+ cachePtr = cachePtr->nextPtr;
+ }
+ Tcl_MutexUnlock(listLockPtr);
+}
+#endif /* ZIPPY_STATS */
+
+/*
+ *----------------------------------------------------------------------
+ *
+ * LockBucket, UnlockBucket --
+ *
+ * Set/unset the lock to access a bucket in the shared cache.
+ *
+ * Results:
+ * None.
+ *
+ * Side effects:
+ * Lock activity and contention are monitored globally and on a per-cache
+ * basis.
+ *
+ *----------------------------------------------------------------------
+ */
+
+static void
+LockBucket(
+ Cache *cachePtr,
+ int bucket)
+{
+ Tcl_MutexLock(bucketInfo[bucket].lockPtr);
+#ifdef ZIPPY_STATS
+ cachePtr->buckets[bucket].numLocks++;
+ sharedPtr->buckets[bucket].numLocks++;
+#endif
+}
+
+static void
+UnlockBucket(
+ Cache *cachePtr,
+ int bucket)
+{
+ Tcl_MutexUnlock(bucketInfo[bucket].lockPtr);
+}
+
+/*
+ *----------------------------------------------------------------------
+ *
+ * PutBlocks --
+ *
+ * Return unused blocks to the shared cache.
+ *
+ * Results:
+ * None.
+ *
+ * Side effects:
+ * None.
+ *
+ *----------------------------------------------------------------------
+ */
+
+static void
+PutBlocks(
+ Cache *cachePtr,
+ int bucket,
+ int numMove)
+{
+ register Block *lastPtr, *firstPtr;
+ register int n = numMove;
+
+ /*
+ * Before acquiring the lock, walk the block list to find the last block
+ * to be moved.
+ */
+
+ firstPtr = lastPtr = cachePtr->buckets[bucket].firstPtr;
+ while (--n > 0) {
+ lastPtr = lastPtr->nextBlock;
+ }
+ cachePtr->buckets[bucket].firstPtr = lastPtr->nextBlock;
+ cachePtr->buckets[bucket].numFree -= numMove;
+
+ /*
+ * Aquire the lock and place the list of blocks at the front of the shared
+ * cache bucket.
+ */
+
+ LockBucket(cachePtr, bucket);
+ lastPtr->nextBlock = sharedPtr->buckets[bucket].firstPtr;
+ sharedPtr->buckets[bucket].firstPtr = firstPtr;
+ sharedPtr->buckets[bucket].numFree += numMove;
+ UnlockBucket(cachePtr, bucket);
+}
+
+/*
+ *----------------------------------------------------------------------
+ *
+ * GetBlocks --
+ *
+ * Get more blocks for a bucket.
+ *
+ * Results:
+ * 1 if blocks where allocated, 0 otherwise.
+ *
+ * Side effects:
+ * Cache may be filled with available blocks.
+ *
+ *----------------------------------------------------------------------
+ */
+
+static int
+GetBlocks(
+ Cache *cachePtr,
+ int bucket)
+{
+ register Block *blockPtr = NULL;
+ register int n;
+
+ /*
+ * First, atttempt to move blocks from the shared cache. Note the
+ * potentially dirty read of numFree before acquiring the lock which is a
+ * slight performance enhancement. The value is verified after the lock is
+ * actually acquired.
+ */
+
+ if (cachePtr != sharedPtr && sharedPtr->buckets[bucket].numFree > 0) {
+ LockBucket(cachePtr, bucket);
+ if (sharedPtr->buckets[bucket].numFree > 0) {
+
+ /*
+ * Either move the entire list or walk the list to find the last
+ * block to move.
+ */
+
+ n = bucketInfo[bucket].numMove;
+ if (n >= sharedPtr->buckets[bucket].numFree) {
+ cachePtr->buckets[bucket].firstPtr =
+ sharedPtr->buckets[bucket].firstPtr;
+ cachePtr->buckets[bucket].numFree =
+ sharedPtr->buckets[bucket].numFree;
+ sharedPtr->buckets[bucket].firstPtr = NULL;
+ sharedPtr->buckets[bucket].numFree = 0;
+ } else {
+ blockPtr = sharedPtr->buckets[bucket].firstPtr;
+ cachePtr->buckets[bucket].firstPtr = blockPtr;
+ sharedPtr->buckets[bucket].numFree -= n;
+ cachePtr->buckets[bucket].numFree = n;
+ while (--n > 0) {
+ blockPtr = blockPtr->nextBlock;
+ }
+ sharedPtr->buckets[bucket].firstPtr = blockPtr->nextBlock;
+ blockPtr->nextBlock = NULL;
+ }
+ }
+ UnlockBucket(cachePtr, bucket);
+ }
+
+ if (cachePtr->buckets[bucket].numFree == 0) {
+ register size_t size;
+
+ /*
+ * If no blocks could be moved from shared, first look for a larger
+ * block in this cache OR the shared cache to split up.
+ */
+
+ n = NBUCKETS;
+ size = 0; /* lint */
+ while (--n > bucket) {
+ if (cachePtr->buckets[n].numFree > 0) {
+ size = bucketInfo[n].blockSize;
+ blockPtr = cachePtr->buckets[n].firstPtr;
+ cachePtr->buckets[n].firstPtr = blockPtr->nextBlock;
+ cachePtr->buckets[n].numFree--;
+ break;
+ }
+ }
+ if (blockPtr == NULL) {
+ n = NBUCKETS;
+ size = 0; /* lint */
+ while (--n > bucket) {
+ if (sharedPtr->buckets[n].numFree > 0) {
+ size = bucketInfo[n].blockSize;
+ LockBucket(cachePtr, n);
+ if (sharedPtr->buckets[n].numFree > 0) {
+ blockPtr = sharedPtr->buckets[n].firstPtr;
+ sharedPtr->buckets[n].firstPtr = blockPtr->nextBlock;
+ sharedPtr->buckets[n].numFree--;
+ UnlockBucket(cachePtr, n);
+ break;
+ }
+ UnlockBucket(cachePtr, n);
+ }
+ }
+ }
+ /*
+ * Otherwise, allocate a big new block directly.
+ */
+
+ if (blockPtr == NULL) {
+ size = MAXALLOC;
+ blockPtr = malloc(size);
+ if (blockPtr == NULL) {
+ return 0;
+ }
+ }
+
+ /*
+ * Split the larger block into smaller blocks for this bucket.
+ */
+
+ n = size / bucketInfo[bucket].blockSize;
+ cachePtr->buckets[bucket].numFree = n;
+ cachePtr->buckets[bucket].firstPtr = blockPtr;
+ while (--n > 0) {
+ blockPtr->nextBlock = (Block *)
+ ((char *) blockPtr + bucketInfo[bucket].blockSize);
+ blockPtr = blockPtr->nextBlock;
+ }
+ blockPtr->nextBlock = NULL;
+ }
+ return 1;
+}
+
+
+/*
+ * Local Variables:
+ * mode: c
+ * c-basic-offset: 4
+ * fill-column: 78
+ * End:
+ */
diff --git a/generic/tclBasic.c b/generic/tclBasic.c
index 974c089..75959b9 100644
--- a/generic/tclBasic.c
+++ b/generic/tclBasic.c
@@ -728,7 +728,6 @@ Tcl_CreateInterp(void)
* cache was already initialised by the call to alloc the interp struct.
*/
- iPtr->pendingObjDataPtr = NULL;
iPtr->asyncReadyPtr = TclGetAsyncReadyPtr();
iPtr->deferredCallbacks = NULL;
diff --git a/generic/tclExecute.c b/generic/tclExecute.c
index 059bc79..58a3826 100644
--- a/generic/tclExecute.c
+++ b/generic/tclExecute.c
@@ -2384,7 +2384,6 @@ TEBCresume(
depth = tosPtr - initTosPtr;
TD = ckrealloc(TD, size);
- size = TclAllocMaximize(TD);
if (size == UINT_MAX) {
TD->capacity = reqWords;
} else {
diff --git a/generic/tclInt.h b/generic/tclInt.h
index 16c0fab..26b9b57 100644
--- a/generic/tclInt.h
+++ b/generic/tclInt.h
@@ -2082,9 +2082,6 @@ typedef struct Interp {
* They are used by the macros defined below.
*/
- void *pendingObjDataPtr; /* Pointer to the Cache and PendingObjData
- * structs for this interp's thread; see
- * tclObj.c and tclThreadAlloc.c */
int *asyncReadyPtr; /* Pointer to the asyncReady indicator for
* this interp's thread; see tclAsync.c */
/*
@@ -3975,67 +3972,14 @@ MODULE_SCOPE void TclpFreeAllocMutex(Tcl_Mutex *mutex);
MODULE_SCOPE Tcl_Mutex *TclpNewAllocMutex(void);
#endif
-/*
- * List of valid allocators. Have to respect the following convention:
- * - allocators that shunt TclpAlloc to malloc are below aNONE
- * - allocators that use zippy are above aNONE
- */
-
-#define aNATIVE 0
-#define aPURIFY 1
-#define aNONE 2
-#define aZIPPY 3
-#define aMULTI 4
-
-#if defined(TCL_ALLOCATOR) && (TCL_ALLOCATOR == aNONE)
-#undef TCL_ALLOCATOR
-#endif
+MODULE_SCOPE char * TclpAlloc(unsigned int size);
+MODULE_SCOPE char * TclpRealloc(char * ptr, unsigned int size);
+MODULE_SCOPE void TclpFree(char * ptr);
-#if defined(TCL_ALLOCATOR) && ((TCL_ALLOCATOR < 0) || (TCL_ALLOCATOR > aMULTI))
-#undef TCL_ALLOCATOR
-#endif
-
-#ifdef PURIFY
-# undef TCL_ALLOCATOR
-# define TCL_ALLOCATOR aPURIFY
-#endif
-
-#if !defined(TCL_ALLOCATOR)
-# if defined(USE_THREAD_ALLOC) || defined(USE_TCLALLOC)
-# define TCL_ALLOCATOR aZIPPY
-# else
-# define TCL_ALLOCATOR aNATIVE
-# endif
-#endif
-
-#define USE_ZIPPY ((TCL_ALLOCATOR != aNATIVE) && (TCL_ALLOCATOR != aPURIFY))
-#define USE_OBJQ (TCL_ALLOCATOR != aPURIFY)
-
-#if !USE_ZIPPY /* native or purify */
-# define TclpAlloc(size) malloc(size)
-# define TclpRealloc(ptr, size) realloc((ptr),(size))
-# define TclpFree(size) free(size)
-# define TclAllocMaximize(ptr) UINT_MAX
-#else
- MODULE_SCOPE char * TclpAlloc(unsigned int size);
- MODULE_SCOPE char * TclpRealloc(char * ptr, unsigned int size);
- MODULE_SCOPE void TclpFree(char * ptr);
- MODULE_SCOPE unsigned int TclAllocMaximize(void *ptr);
-#endif
-
-#if !USE_OBJQ
-# define TclSmallAlloc() ckalloc(sizeof(Tcl_Obj))
-# define TclSmallFree(ptr) ckfree(ptr)
-# define TclInitAlloc()
-# define TclFinalizeAlloc()
-# define TclFreeAllocCache(ptr)
-#else
-#define ALLOC_NOBJHIGH 1200
- MODULE_SCOPE void * TclSmallAlloc();
- MODULE_SCOPE void TclSmallFree(void *ptr);
- MODULE_SCOPE void TclInitAlloc(void);
- MODULE_SCOPE void TclFinalizeAlloc(void);
-#endif
+MODULE_SCOPE void * TclSmallAlloc();
+MODULE_SCOPE void TclSmallFree(void *ptr);
+MODULE_SCOPE void TclInitAlloc(void);
+MODULE_SCOPE void TclFinalizeAlloc(void);
#define TclCkSmallAlloc(nbytes, memPtr) \
do { \
@@ -4047,7 +3991,7 @@ MODULE_SCOPE Tcl_Mutex *TclpNewAllocMutex(void);
* Support for Clang Static Analyzer <http://clang-analyzer.llvm.org>
*/
-#if (TCL_ALLOCATOR == aPURIFY) && defined(__clang__)
+#if defined(PURIFY) && defined(__clang__)
#if __has_feature(attribute_analyzer_noreturn) && \
!defined(Tcl_Panic) && defined(Tcl_Panic_TCL_DECLARED)
void Tcl_Panic(const char *, ...) __attribute__((analyzer_noreturn));
diff --git a/generic/tclListObj.c b/generic/tclListObj.c
index 41aea32..07d2644 100644
--- a/generic/tclListObj.c
+++ b/generic/tclListObj.c
@@ -916,14 +916,6 @@ Tcl_ListObjReplace(
isShared = (listRepPtr->refCount > 1);
numRequired = numElems - count + objc;
- if (numRequired > listRepPtr->maxElemCount){
- unsigned int allocSize = TclAllocMaximize(listRepPtr);
- if (allocSize != UINT_MAX) {
- listRepPtr->maxElemCount = Size2Elems(allocSize);
- }
- }
-
-
for (i = 0; i < objc; i++) {
Tcl_IncrRefCount(objv[i]);
}
diff --git a/generic/tclObj.c b/generic/tclObj.c
index 17dea83..5bf20b0 100644
--- a/generic/tclObj.c
+++ b/generic/tclObj.c
@@ -26,7 +26,7 @@ static Tcl_HashTable typeTable;
static int typeTableInitialized = 0; /* 0 means not yet initialized. */
TCL_DECLARE_MUTEX(tableMutex)
-#if defined(TCL_THREADS)
+#if (defined(TCL_THREADS) && TCL_MEM_DEBUG)
static Tcl_Mutex tclObjMutex;
#endif
diff --git a/unix/Makefile.in b/unix/Makefile.in
index 6734df9..fd72cd6 100644
--- a/unix/Makefile.in
+++ b/unix/Makefile.in
@@ -291,6 +291,7 @@ XTTEST_OBJS = xtTestInit.o tclTest.o tclTestObj.o tclTestProcBodyObj.o \
tclThreadTest.o tclUnixTest.o tclXtNotify.o tclXtTest.o
GENERIC_OBJS = regcomp.o regexec.o regfree.o regerror.o tclAlloc.o \
+ tclAllocZippy.o \
tclAsync.o tclBasic.o tclBinary.o tclCkalloc.o tclClock.o \
tclCmdAH.o tclCmdIL.o tclCmdMZ.o tclCompCmds.o tclCompCmdsSZ.o \
tclCompExpr.o tclCompile.o tclConfig.o tclDate.o tclDictObj.o \
@@ -385,6 +386,8 @@ GENERIC_SRCS = \
$(GENERIC_DIR)/regfree.c \
$(GENERIC_DIR)/regerror.c \
$(GENERIC_DIR)/tclAlloc.c \
+ $(GENERIC_DIR)/tclAllocNative.c \
+ $(GENERIC_DIR)/tclAllocZippy.c \
$(GENERIC_DIR)/tclAssembly.c \
$(GENERIC_DIR)/tclAsync.c \
$(GENERIC_DIR)/tclBasic.c \
@@ -1022,6 +1025,12 @@ tclAppInit.o: $(UNIX_DIR)/tclAppInit.c
tclAlloc.o: $(GENERIC_DIR)/tclAlloc.c
$(CC) -c $(CC_SWITCHES) $(GENERIC_DIR)/tclAlloc.c
+tclAllocNative.o: $(GENERIC_DIR)/tclAllocNative.c
+ $(CC) -c $(CC_SWITCHES) $(GENERIC_DIR)/tclAllocNative.c
+
+tclAllocZippy.o: $(GENERIC_DIR)/tclAllocZippy.c
+ $(CC) -c $(CC_SWITCHES) $(GENERIC_DIR)/tclAllocZippy.c
+
tclAssembly.o: $(GENERIC_DIR)/tclAssembly.c $(COMPILEHDR)
$(CC) -c $(CC_SWITCHES) $(GENERIC_DIR)/tclAssembly.c