summaryrefslogtreecommitdiffstats
path: root/generic/tclThreadAlloc.c
diff options
context:
space:
mode:
Diffstat (limited to 'generic/tclThreadAlloc.c')
-rw-r--r--[-rwxr-xr-x]generic/tclThreadAlloc.c474
1 files changed, 331 insertions, 143 deletions
diff --git a/generic/tclThreadAlloc.c b/generic/tclThreadAlloc.c
index 9c77910..8077de4 100755..100644
--- a/generic/tclThreadAlloc.c
+++ b/generic/tclThreadAlloc.c
@@ -10,13 +10,11 @@
*
* See the file "license.terms" for information on usage and redistribution of
* this file, and for a DISCLAIMER OF ALL WARRANTIES.
- *
- * RCS: @(#) $Id: tclThreadAlloc.c,v 1.20 2005/12/20 22:16:48 dkf Exp $
*/
#include "tclInt.h"
#if defined(TCL_THREADS) && defined(USE_THREAD_ALLOC)
-
+
/*
* If range checking is enabled, an additional byte will be allocated to store
* the magic number at the end of the requested memory.
@@ -37,15 +35,9 @@
*/
#define NOBJALLOC 800
-#define NOBJHIGH 1200
-
-/*
- * The following defines the number of buckets in the bucket cache and those
- * block sizes from (1<<4) to (1<<(3+NBUCKETS))
- */
-#define NBUCKETS 11
-#define MAXALLOC 16284
+/* Actual definition moved to tclInt.h */
+#define NOBJHIGH ALLOC_NOBJHIGH
/*
* The following union stores accounting information for each block including
@@ -54,23 +46,36 @@
* is also maintained.
*/
-typedef struct Block {
- union {
- struct Block *next; /* Next in free list. */
- struct {
- unsigned char magic1; /* First magic number. */
- unsigned char bucket; /* Bucket block allocated from. */
- unsigned char unused; /* Padding. */
- unsigned char magic2; /* Second magic number. */
- } s;
- } u;
- size_t reqSize; /* Requested allocation size. */
+typedef union Block {
+ struct {
+ union {
+ union Block *next; /* Next in free list. */
+ struct {
+ unsigned char magic1; /* First magic number. */
+ unsigned char bucket; /* Bucket block allocated from. */
+ unsigned char unused; /* Padding. */
+ unsigned char magic2; /* Second magic number. */
+ } s;
+ } u;
+ size_t reqSize; /* Requested allocation size. */
+ } b;
+ unsigned char padding[TCL_ALLOCALIGN];
} Block;
-#define nextBlock u.next
-#define sourceBucket u.s.bucket
-#define magicNum1 u.s.magic1
-#define magicNum2 u.s.magic2
+#define nextBlock b.u.next
+#define sourceBucket b.u.s.bucket
+#define magicNum1 b.u.s.magic1
+#define magicNum2 b.u.s.magic2
#define MAGIC 0xEF
+#define blockReqSize b.reqSize
+
+/*
+ * The following defines the minimum and and maximum block sizes and the number
+ * of buckets in the bucket cache.
+ */
+
+#define MINALLOC ((sizeof(Block) + 8 + (TCL_ALLOCALIGN-1)) & ~(TCL_ALLOCALIGN-1))
+#define NBUCKETS (11 - (MINALLOC >> 5))
+#define MAXALLOC (MINALLOC << (NBUCKETS - 1))
/*
* The following structure defines a bucket of blocks with various accounting
@@ -79,6 +84,7 @@ typedef struct Block {
typedef struct Bucket {
Block *firstPtr; /* First block available */
+ Block *lastPtr; /* End of block list */
long numFree; /* Number of blocks available */
/* All fields below for accounting only */
@@ -92,7 +98,9 @@ typedef struct Bucket {
/*
* The following structure defines a cache of buckets and objs, of which there
- * will be (at most) one per thread.
+ * will be (at most) one per thread. Any changes need to be reflected in the
+ * struct AllocCache defined in tclInt.h, possibly also in the initialisation
+ * code in Tcl_CreateInterp().
*/
typedef struct Cache {
@@ -100,6 +108,7 @@ typedef struct Cache {
Tcl_ThreadId owner; /* Which thread's cache is this? */
Tcl_Obj *firstObjPtr; /* List of free objects for thread */
int numObjects; /* Number of objects for thread */
+ Tcl_Obj *lastPtr; /* Last object in this cache */
int totalAssigned; /* Total space assigned to thread */
Bucket buckets[NBUCKETS]; /* The buckets for this thread */
} Cache;
@@ -114,19 +123,7 @@ static struct {
int maxBlocks; /* Max blocks before move to share. */
int numMove; /* Num blocks to move to share. */
Tcl_Mutex *lockPtr; /* Share bucket lock. */
-} bucketInfo[NBUCKETS] = {
- { 16, 1024, 512, NULL},
- { 32, 512, 256, NULL},
- { 64, 256, 128, NULL},
- { 128, 128, 64, NULL},
- { 256, 64, 32, NULL},
- { 512, 32, 16, NULL},
- { 1024, 16, 8, NULL},
- { 2048, 8, 4, NULL},
- { 4096, 4, 2, NULL},
- { 8192, 2, 1, NULL},
- {16284, 1, 1, NULL},
-};
+} bucketInfo[NBUCKETS];
/*
* Static functions defined in this file.
@@ -140,6 +137,7 @@ static int GetBlocks(Cache *cachePtr, int bucket);
static Block * Ptr2Block(char *ptr);
static char * Block2Ptr(Block *blockPtr, int bucket, unsigned int reqSize);
static void MoveObjs(Cache *fromPtr, Cache *toPtr, int numMove);
+static void PutObjs(Cache *fromPtr, int numMove);
/*
* Local variables defined in this file and initialized at startup.
@@ -150,6 +148,26 @@ static Tcl_Mutex *objLockPtr;
static Cache sharedCache;
static Cache *sharedPtr = &sharedCache;
static Cache *firstCachePtr = &sharedCache;
+
+#if defined(HAVE_FAST_TSD)
+static __thread Cache *tcachePtr;
+
+# define GETCACHE(cachePtr) \
+ do { \
+ if (!tcachePtr) { \
+ tcachePtr = GetCache(); \
+ } \
+ (cachePtr) = tcachePtr; \
+ } while (0)
+#else
+# define GETCACHE(cachePtr) \
+ do { \
+ (cachePtr) = TclpGetAllocCache(); \
+ if ((cachePtr) == NULL) { \
+ (cachePtr) = GetCache(); \
+ } \
+ } while (0)
+#endif
/*
*----------------------------------------------------------------------
@@ -178,16 +196,11 @@ GetCache(void)
if (listLockPtr == NULL) {
Tcl_Mutex *initLockPtr;
- int i;
initLockPtr = Tcl_GetAllocMutex();
Tcl_MutexLock(initLockPtr);
if (listLockPtr == NULL) {
- listLockPtr = TclpNewAllocMutex();
- objLockPtr = TclpNewAllocMutex();
- for (i = 0; i < NBUCKETS; ++i) {
- bucketInfo[i].lockPtr = TclpNewAllocMutex();
- }
+ TclInitThreadAlloc();
}
Tcl_MutexUnlock(initLockPtr);
}
@@ -198,10 +211,11 @@ GetCache(void)
cachePtr = TclpGetAllocCache();
if (cachePtr == NULL) {
- cachePtr = calloc(1, sizeof(Cache));
+ cachePtr = TclpSysAlloc(sizeof(Cache), 0);
if (cachePtr == NULL) {
Tcl_Panic("alloc: could not allocate new cache");
}
+ memset(cachePtr, 0, sizeof(Cache));
Tcl_MutexLock(listLockPtr);
cachePtr->nextPtr = firstCachePtr;
firstCachePtr = cachePtr;
@@ -234,7 +248,7 @@ TclFreeAllocCache(
{
Cache *cachePtr = arg;
Cache **nextPtrPtr;
- register int bucket;
+ register unsigned int bucket;
/*
* Flush blocks.
@@ -251,9 +265,7 @@ TclFreeAllocCache(
*/
if (cachePtr->numObjects > 0) {
- Tcl_MutexLock(objLockPtr);
- MoveObjs(cachePtr, sharedPtr, cachePtr->numObjects);
- Tcl_MutexUnlock(objLockPtr);
+ PutObjs(cachePtr, cachePtr->numObjects);
}
/*
@@ -268,7 +280,7 @@ TclFreeAllocCache(
*nextPtrPtr = cachePtr->nextPtr;
cachePtr->nextPtr = NULL;
Tcl_MutexUnlock(listLockPtr);
- free(cachePtr);
+ TclpSysFree(cachePtr);
}
/*
@@ -291,18 +303,29 @@ char *
TclpAlloc(
unsigned int reqSize)
{
- Cache *cachePtr = TclpGetAllocCache();
+ Cache *cachePtr;
Block *blockPtr;
register int bucket;
size_t size;
- if (cachePtr == NULL) {
- cachePtr = GetCache();
+#ifndef __LP64__
+ if (sizeof(int) >= sizeof(size_t)) {
+ /* An unsigned int overflow can also be a size_t overflow */
+ const size_t zero = 0;
+ const size_t max = ~zero;
+
+ if (((size_t) reqSize) > max - sizeof(Block) - RCHECK) {
+ /* Requested allocation exceeds memory */
+ return NULL;
+ }
}
+#endif
+
+ GETCACHE(cachePtr);
/*
* Increment the requested size to include room for the Block structure.
- * Call malloc() directly if the required amount is greater than the
+ * Call TclpSysAlloc() directly if the required amount is greater than the
* largest block, otherwise pop the smallest block large enough,
* allocating more blocks if necessary.
*/
@@ -310,24 +333,24 @@ TclpAlloc(
blockPtr = NULL;
size = reqSize + sizeof(Block);
#if RCHECK
- ++size;
+ size++;
#endif
if (size > MAXALLOC) {
bucket = NBUCKETS;
- blockPtr = malloc(size);
+ blockPtr = TclpSysAlloc(size, 0);
if (blockPtr != NULL) {
cachePtr->totalAssigned += reqSize;
}
} else {
bucket = 0;
while (bucketInfo[bucket].blockSize < size) {
- ++bucket;
+ bucket++;
}
if (cachePtr->buckets[bucket].numFree || GetBlocks(cachePtr, bucket)) {
blockPtr = cachePtr->buckets[bucket].firstPtr;
cachePtr->buckets[bucket].firstPtr = blockPtr->nextBlock;
- --cachePtr->buckets[bucket].numFree;
- ++cachePtr->buckets[bucket].numRemoves;
+ cachePtr->buckets[bucket].numFree--;
+ cachePtr->buckets[bucket].numRemoves++;
cachePtr->buckets[bucket].totalAssigned += reqSize;
}
}
@@ -365,10 +388,7 @@ TclpFree(
return;
}
- cachePtr = TclpGetAllocCache();
- if (cachePtr == NULL) {
- cachePtr = GetCache();
- }
+ GETCACHE(cachePtr);
/*
* Get the block back from the user pointer and call system free directly
@@ -379,16 +399,19 @@ TclpFree(
blockPtr = Ptr2Block(ptr);
bucket = blockPtr->sourceBucket;
if (bucket == NBUCKETS) {
- cachePtr->totalAssigned -= blockPtr->reqSize;
- free(blockPtr);
+ cachePtr->totalAssigned -= blockPtr->blockReqSize;
+ TclpSysFree(blockPtr);
return;
}
- cachePtr->buckets[bucket].totalAssigned -= blockPtr->reqSize;
+ cachePtr->buckets[bucket].totalAssigned -= blockPtr->blockReqSize;
blockPtr->nextBlock = cachePtr->buckets[bucket].firstPtr;
cachePtr->buckets[bucket].firstPtr = blockPtr;
- ++cachePtr->buckets[bucket].numFree;
- ++cachePtr->buckets[bucket].numInserts;
+ if (cachePtr->buckets[bucket].numFree == 0) {
+ cachePtr->buckets[bucket].lastPtr = blockPtr;
+ }
+ cachePtr->buckets[bucket].numFree++;
+ cachePtr->buckets[bucket].numInserts++;
if (cachePtr != sharedPtr &&
cachePtr->buckets[bucket].numFree > bucketInfo[bucket].maxBlocks) {
@@ -417,9 +440,9 @@ TclpRealloc(
char *ptr,
unsigned int reqSize)
{
- Cache *cachePtr = TclpGetAllocCache();
+ Cache *cachePtr;
Block *blockPtr;
- void *new;
+ void *newPtr;
size_t size, min;
int bucket;
@@ -427,20 +450,31 @@ TclpRealloc(
return TclpAlloc(reqSize);
}
- if (cachePtr == NULL) {
- cachePtr = GetCache();
+#ifndef __LP64__
+ if (sizeof(int) >= sizeof(size_t)) {
+ /* An unsigned int overflow can also be a size_t overflow */
+ const size_t zero = 0;
+ const size_t max = ~zero;
+
+ if (((size_t) reqSize) > max - sizeof(Block) - RCHECK) {
+ /* Requested allocation exceeds memory */
+ return NULL;
+ }
}
+#endif
+
+ GETCACHE(cachePtr);
/*
* If the block is not a system block and fits in place, simply return the
* existing pointer. Otherwise, if the block is a system block and the new
- * size would also require a system block, call realloc() directly.
+ * size would also require a system block, call TclpSysRealloc() directly.
*/
blockPtr = Ptr2Block(ptr);
size = reqSize + sizeof(Block);
#if RCHECK
- ++size;
+ size++;
#endif
bucket = blockPtr->sourceBucket;
if (bucket != NBUCKETS) {
@@ -450,14 +484,14 @@ TclpRealloc(
min = 0;
}
if (size > min && size <= bucketInfo[bucket].blockSize) {
- cachePtr->buckets[bucket].totalAssigned -= blockPtr->reqSize;
+ cachePtr->buckets[bucket].totalAssigned -= blockPtr->blockReqSize;
cachePtr->buckets[bucket].totalAssigned += reqSize;
return Block2Ptr(blockPtr, bucket, reqSize);
}
} else if (size > MAXALLOC) {
- cachePtr->totalAssigned -= blockPtr->reqSize;
+ cachePtr->totalAssigned -= blockPtr->blockReqSize;
cachePtr->totalAssigned += reqSize;
- blockPtr = realloc(blockPtr, size);
+ blockPtr = TclpSysRealloc(blockPtr, size);
if (blockPtr == NULL) {
return NULL;
}
@@ -468,15 +502,15 @@ TclpRealloc(
* Finally, perform an expensive malloc/copy/free.
*/
- new = TclpAlloc(reqSize);
- if (new != NULL) {
- if (reqSize > blockPtr->reqSize) {
- reqSize = blockPtr->reqSize;
+ newPtr = TclpAlloc(reqSize);
+ if (newPtr != NULL) {
+ if (reqSize > blockPtr->blockReqSize) {
+ reqSize = blockPtr->blockReqSize;
}
- memcpy(new, ptr, reqSize);
+ memcpy(newPtr, ptr, reqSize);
TclpFree(ptr);
}
- return new;
+ return newPtr;
}
/*
@@ -493,18 +527,20 @@ TclpRealloc(
* May move Tcl_Obj's from shared cached or allocate new Tcl_Obj's if
* list is empty.
*
+ * Note:
+ * If this code is updated, the changes need to be reflected in the macro
+ * TclAllocObjStorageEx() defined in tclInt.h
+ *
*----------------------------------------------------------------------
*/
Tcl_Obj *
TclThreadAllocObj(void)
{
- register Cache *cachePtr = TclpGetAllocCache();
+ register Cache *cachePtr;
register Tcl_Obj *objPtr;
- if (cachePtr == NULL) {
- cachePtr = GetCache();
- }
+ GETCACHE(cachePtr);
/*
* Get this thread's obj list structure and move or allocate new objs if
@@ -527,15 +563,17 @@ TclThreadAllocObj(void)
Tcl_Obj *newObjsPtr;
cachePtr->numObjects = numMove = NOBJALLOC;
- newObjsPtr = malloc(sizeof(Tcl_Obj) * numMove);
+ newObjsPtr = TclpSysAlloc(sizeof(Tcl_Obj) * numMove, 0);
if (newObjsPtr == NULL) {
Tcl_Panic("alloc: could not allocate %d new objects", numMove);
}
+ cachePtr->lastPtr = newObjsPtr + numMove - 1;
+ objPtr = cachePtr->firstObjPtr; /* NULL */
while (--numMove >= 0) {
- objPtr = &newObjsPtr[numMove];
- objPtr->internalRep.otherValuePtr = cachePtr->firstObjPtr;
- cachePtr->firstObjPtr = objPtr;
+ newObjsPtr[numMove].internalRep.twoPtrValue.ptr1 = objPtr;
+ objPtr = newObjsPtr + numMove;
}
+ cachePtr->firstObjPtr = newObjsPtr;
}
}
@@ -544,8 +582,8 @@ TclThreadAllocObj(void)
*/
objPtr = cachePtr->firstObjPtr;
- cachePtr->firstObjPtr = objPtr->internalRep.otherValuePtr;
- --cachePtr->numObjects;
+ cachePtr->firstObjPtr = objPtr->internalRep.twoPtrValue.ptr1;
+ cachePtr->numObjects--;
return objPtr;
}
@@ -562,6 +600,10 @@ TclThreadAllocObj(void)
* Side effects:
* May move free Tcl_Obj's to shared list upon hitting high water mark.
*
+ * Note:
+ * If this code is updated, the changes need to be reflected in the macro
+ * TclAllocObjStorageEx() defined in tclInt.h
+ *
*----------------------------------------------------------------------
*/
@@ -569,19 +611,20 @@ void
TclThreadFreeObj(
Tcl_Obj *objPtr)
{
- Cache *cachePtr = TclpGetAllocCache();
+ Cache *cachePtr;
- if (cachePtr == NULL) {
- cachePtr = GetCache();
- }
+ GETCACHE(cachePtr);
/*
* Get this thread's list and push on the free Tcl_Obj.
*/
- objPtr->internalRep.otherValuePtr = cachePtr->firstObjPtr;
+ objPtr->internalRep.twoPtrValue.ptr1 = cachePtr->firstObjPtr;
cachePtr->firstObjPtr = objPtr;
- ++cachePtr->numObjects;
+ if (cachePtr->numObjects == 0) {
+ cachePtr->lastPtr = objPtr;
+ }
+ cachePtr->numObjects++;
/*
* If the number of free objects has exceeded the high water mark, move
@@ -589,9 +632,7 @@ TclThreadFreeObj(
*/
if (cachePtr->numObjects > NOBJHIGH) {
- Tcl_MutexLock(objLockPtr);
- MoveObjs(cachePtr, sharedPtr, NOBJALLOC);
- Tcl_MutexUnlock(objLockPtr);
+ PutObjs(cachePtr, NOBJALLOC);
}
}
@@ -611,13 +652,13 @@ TclThreadFreeObj(
*----------------------------------------------------------------------
*/
-MODULE_SCOPE void
+void
Tcl_GetMemoryInfo(
Tcl_DString *dsPtr)
{
Cache *cachePtr;
char buf[200];
- int n;
+ unsigned int n;
Tcl_MutexLock(listLockPtr);
cachePtr = firstCachePtr;
@@ -680,22 +721,76 @@ MoveObjs(
*/
while (--numMove) {
- objPtr = objPtr->internalRep.otherValuePtr;
+ objPtr = objPtr->internalRep.twoPtrValue.ptr1;
}
- fromPtr->firstObjPtr = objPtr->internalRep.otherValuePtr;
+ fromPtr->firstObjPtr = objPtr->internalRep.twoPtrValue.ptr1;
/*
* Move all objects as a block - they are already linked to each other, we
* just have to update the first and last.
*/
- objPtr->internalRep.otherValuePtr = toPtr->firstObjPtr;
+ toPtr->lastPtr = objPtr;
+ objPtr->internalRep.twoPtrValue.ptr1 = toPtr->firstObjPtr; /* NULL */
toPtr->firstObjPtr = fromFirstObjPtr;
}
/*
*----------------------------------------------------------------------
*
+ * PutObjs --
+ *
+ * Move Tcl_Obj's from thread cache to shared cache.
+ *
+ * Results:
+ * None.
+ *
+ * Side effects:
+ * None.
+ *
+ *----------------------------------------------------------------------
+ */
+
+static void
+PutObjs(
+ Cache *fromPtr,
+ int numMove)
+{
+ int keep = fromPtr->numObjects - numMove;
+ Tcl_Obj *firstPtr, *lastPtr = NULL;
+
+ fromPtr->numObjects = keep;
+ firstPtr = fromPtr->firstObjPtr;
+ if (keep == 0) {
+ fromPtr->firstObjPtr = NULL;
+ } else {
+ do {
+ lastPtr = firstPtr;
+ firstPtr = firstPtr->internalRep.twoPtrValue.ptr1;
+ } while (--keep > 0);
+ lastPtr->internalRep.twoPtrValue.ptr1 = NULL;
+ }
+
+ /*
+ * Move all objects as a block - they are already linked to each other, we
+ * just have to update the first and last.
+ */
+
+ Tcl_MutexLock(objLockPtr);
+ fromPtr->lastPtr->internalRep.twoPtrValue.ptr1 = sharedPtr->firstObjPtr;
+ sharedPtr->firstObjPtr = firstPtr;
+ if (sharedPtr->numObjects == 0) {
+ sharedPtr->lastPtr = fromPtr->lastPtr;
+ }
+ sharedPtr->numObjects += numMove;
+ Tcl_MutexUnlock(objLockPtr);
+
+ fromPtr->lastPtr = lastPtr;
+}
+
+/*
+ *----------------------------------------------------------------------
+ *
* Block2Ptr, Ptr2Block --
*
* Convert between internal blocks and user pointers.
@@ -719,7 +814,7 @@ Block2Ptr(
blockPtr->magicNum1 = blockPtr->magicNum2 = MAGIC;
blockPtr->sourceBucket = bucket;
- blockPtr->reqSize = reqSize;
+ blockPtr->blockReqSize = reqSize;
ptr = ((void *) (blockPtr + 1));
#if RCHECK
((unsigned char *)(ptr))[reqSize] = MAGIC;
@@ -735,14 +830,14 @@ Ptr2Block(
blockPtr = (((Block *) ptr) - 1);
if (blockPtr->magicNum1 != MAGIC || blockPtr->magicNum2 != MAGIC) {
- Tcl_Panic("alloc: invalid block: %p: %x %x\n",
+ Tcl_Panic("alloc: invalid block: %p: %x %x",
blockPtr, blockPtr->magicNum1, blockPtr->magicNum2);
}
#if RCHECK
- if (((unsigned char *) ptr)[blockPtr->reqSize] != MAGIC) {
- Tcl_Panic("alloc: invalid block: %p: %x %x %x\n",
+ if (((unsigned char *) ptr)[blockPtr->blockReqSize] != MAGIC) {
+ Tcl_Panic("alloc: invalid block: %p: %x %x %x",
blockPtr, blockPtr->magicNum1, blockPtr->magicNum2,
- ((unsigned char *) ptr)[blockPtr->reqSize]);
+ ((unsigned char *) ptr)[blockPtr->blockReqSize]);
}
#endif
return blockPtr;
@@ -770,17 +865,9 @@ LockBucket(
Cache *cachePtr,
int bucket)
{
-#if 0
- if (Tcl_MutexTryLock(bucketInfo[bucket].lockPtr) != TCL_OK) {
- Tcl_MutexLock(bucketInfo[bucket].lockPtr);
- ++cachePtr->buckets[bucket].numWaits;
- ++sharedPtr->buckets[bucket].numWaits;
- }
-#else
Tcl_MutexLock(bucketInfo[bucket].lockPtr);
-#endif
- ++cachePtr->buckets[bucket].numLocks;
- ++sharedPtr->buckets[bucket].numLocks;
+ cachePtr->buckets[bucket].numLocks++;
+ sharedPtr->buckets[bucket].numLocks++;
}
static void
@@ -813,20 +900,25 @@ PutBlocks(
int bucket,
int numMove)
{
- register Block *lastPtr, *firstPtr;
- register int n = numMove;
-
/*
- * Before acquiring the lock, walk the block list to find the last block
- * to be moved.
+ * We have numFree. Want to shed numMove. So compute how many
+ * Blocks to keep.
*/
- firstPtr = lastPtr = cachePtr->buckets[bucket].firstPtr;
- while (--n > 0) {
- lastPtr = lastPtr->nextBlock;
+ int keep = cachePtr->buckets[bucket].numFree - numMove;
+ Block *lastPtr = NULL, *firstPtr;
+
+ cachePtr->buckets[bucket].numFree = keep;
+ firstPtr = cachePtr->buckets[bucket].firstPtr;
+ if (keep == 0) {
+ cachePtr->buckets[bucket].firstPtr = NULL;
+ } else {
+ do {
+ lastPtr = firstPtr;
+ firstPtr = firstPtr->nextBlock;
+ } while (--keep > 0);
+ lastPtr->nextBlock = NULL;
}
- cachePtr->buckets[bucket].firstPtr = lastPtr->nextBlock;
- cachePtr->buckets[bucket].numFree -= numMove;
/*
* Aquire the lock and place the list of blocks at the front of the shared
@@ -834,10 +926,17 @@ PutBlocks(
*/
LockBucket(cachePtr, bucket);
- lastPtr->nextBlock = sharedPtr->buckets[bucket].firstPtr;
+ cachePtr->buckets[bucket].lastPtr->nextBlock
+ = sharedPtr->buckets[bucket].firstPtr;
sharedPtr->buckets[bucket].firstPtr = firstPtr;
+ if (sharedPtr->buckets[bucket].numFree == 0) {
+ sharedPtr->buckets[bucket].lastPtr
+ = cachePtr->buckets[bucket].lastPtr;
+ }
sharedPtr->buckets[bucket].numFree += numMove;
UnlockBucket(cachePtr, bucket);
+
+ cachePtr->buckets[bucket].lastPtr = lastPtr;
}
/*
@@ -884,6 +983,8 @@ GetBlocks(
if (n >= sharedPtr->buckets[bucket].numFree) {
cachePtr->buckets[bucket].firstPtr =
sharedPtr->buckets[bucket].firstPtr;
+ cachePtr->buckets[bucket].lastPtr =
+ sharedPtr->buckets[bucket].lastPtr;
cachePtr->buckets[bucket].numFree =
sharedPtr->buckets[bucket].numFree;
sharedPtr->buckets[bucket].firstPtr = NULL;
@@ -897,6 +998,7 @@ GetBlocks(
blockPtr = blockPtr->nextBlock;
}
sharedPtr->buckets[bucket].firstPtr = blockPtr->nextBlock;
+ cachePtr->buckets[bucket].lastPtr = blockPtr;
blockPtr->nextBlock = NULL;
}
}
@@ -919,7 +1021,7 @@ GetBlocks(
size = bucketInfo[n].blockSize;
blockPtr = cachePtr->buckets[n].firstPtr;
cachePtr->buckets[n].firstPtr = blockPtr->nextBlock;
- --cachePtr->buckets[n].numFree;
+ cachePtr->buckets[n].numFree--;
break;
}
}
@@ -930,7 +1032,7 @@ GetBlocks(
if (blockPtr == NULL) {
size = MAXALLOC;
- blockPtr = malloc(size);
+ blockPtr = TclpSysAlloc(size, 0);
if (blockPtr == NULL) {
return 0;
}
@@ -948,10 +1050,45 @@ GetBlocks(
((char *) blockPtr + bucketInfo[bucket].blockSize);
blockPtr = blockPtr->nextBlock;
}
+ cachePtr->buckets[bucket].lastPtr = blockPtr;
blockPtr->nextBlock = NULL;
}
return 1;
}
+
+/*
+ *----------------------------------------------------------------------
+ *
+ * TclInitThreadAlloc --
+ *
+ * Initializes the allocator cache-maintenance structures.
+ * It is done early and protected during the TclInitSubsystems().
+ *
+ * Results:
+ * None.
+ *
+ * Side effects:
+ * None.
+ *
+ *----------------------------------------------------------------------
+ */
+
+void
+TclInitThreadAlloc(void)
+{
+ unsigned int i;
+
+ listLockPtr = TclpNewAllocMutex();
+ objLockPtr = TclpNewAllocMutex();
+ for (i = 0; i < NBUCKETS; ++i) {
+ bucketInfo[i].blockSize = MINALLOC << i;
+ bucketInfo[i].maxBlocks = 1 << (NBUCKETS - 1 - i);
+ bucketInfo[i].numMove = i < NBUCKETS - 1 ?
+ 1 << (NBUCKETS - 2 - i) : 1;
+ bucketInfo[i].lockPtr = TclpNewAllocMutex();
+ }
+ TclpInitAllocCache();
+}
/*
*----------------------------------------------------------------------
@@ -973,10 +1110,11 @@ GetBlocks(
void
TclFinalizeThreadAlloc(void)
{
- int i;
+ unsigned int i;
+
for (i = 0; i < NBUCKETS; ++i) {
- TclpFreeAllocMutex(bucketInfo[i].lockPtr);
- bucketInfo[i].lockPtr = NULL;
+ TclpFreeAllocMutex(bucketInfo[i].lockPtr);
+ bucketInfo[i].lockPtr = NULL;
}
TclpFreeAllocMutex(objLockPtr);
@@ -988,7 +1126,57 @@ TclFinalizeThreadAlloc(void)
TclpFreeAllocCache(NULL);
}
-#else
+/*
+ *----------------------------------------------------------------------
+ *
+ * TclFinalizeThreadAllocThread --
+ *
+ * This procedure is used to destroy single thread private resources
+ * defined in this file. Called either during Tcl_FinalizeThread() or
+ * Tcl_Finalize().
+ *
+ * Results:
+ * None.
+ *
+ * Side effects:
+ * None.
+ *
+ *----------------------------------------------------------------------
+ */
+
+void
+TclFinalizeThreadAllocThread(void)
+{
+ Cache *cachePtr = TclpGetAllocCache();
+ if (cachePtr != NULL) {
+ TclpFreeAllocCache(cachePtr);
+ }
+}
+
+#else /* !(TCL_THREADS && USE_THREAD_ALLOC) */
+/*
+ *----------------------------------------------------------------------
+ *
+ * Tcl_GetMemoryInfo --
+ *
+ * Return a list-of-lists of memory stats.
+ *
+ * Results:
+ * None.
+ *
+ * Side effects:
+ * List appended to given dstring.
+ *
+ *----------------------------------------------------------------------
+ */
+
+void
+Tcl_GetMemoryInfo(
+ Tcl_DString *dsPtr)
+{
+ Tcl_Panic("Tcl_GetMemoryInfo called when threaded memory allocator not in use");
+}
+
/*
*----------------------------------------------------------------------
*
@@ -1009,9 +1197,9 @@ TclFinalizeThreadAlloc(void)
void
TclFinalizeThreadAlloc(void)
{
- Tcl_Panic("TclFinalizeThreadAlloc called when threaded memory allocator not in use.");
+ Tcl_Panic("TclFinalizeThreadAlloc called when threaded memory allocator not in use");
}
-#endif /* TCL_THREADS */
+#endif /* TCL_THREADS && USE_THREAD_ALLOC */
/*
* Local Variables: