summaryrefslogtreecommitdiffstats
path: root/generic/tclThreadAlloc.c
diff options
context:
space:
mode:
Diffstat (limited to 'generic/tclThreadAlloc.c')
-rw-r--r--[-rwxr-xr-x]generic/tclThreadAlloc.c573
1 files changed, 331 insertions, 242 deletions
diff --git a/generic/tclThreadAlloc.c b/generic/tclThreadAlloc.c
index 553bd4f..ddf888a 100755..100644
--- a/generic/tclThreadAlloc.c
+++ b/generic/tclThreadAlloc.c
@@ -2,24 +2,22 @@
* tclThreadAlloc.c --
*
* This is a very fast storage allocator for used with threads (designed
- * avoid lock contention). The basic strategy is to allocate memory in
+ * avoid lock contention). The basic strategy is to allocate memory in
* fixed size blocks from block caches.
*
* The Initial Developer of the Original Code is America Online, Inc.
* Portions created by AOL are Copyright (C) 1999 America Online, Inc.
*
- * See the file "license.terms" for information on usage and redistribution
- * of this file, and for a DISCLAIMER OF ALL WARRANTIES.
- *
- * RCS: @(#) $Id: tclThreadAlloc.c,v 1.14 2004/07/21 01:45:44 hobbs Exp $
+ * See the file "license.terms" for information on usage and redistribution of
+ * this file, and for a DISCLAIMER OF ALL WARRANTIES.
*/
#include "tclInt.h"
#if defined(TCL_THREADS) && defined(USE_THREAD_ALLOC)
-
+
/*
- * If range checking is enabled, an additional byte will be allocated
- * to store the magic number at the end of the requested memory.
+ * If range checking is enabled, an additional byte will be allocated to store
+ * the magic number at the end of the requested memory.
*/
#ifndef RCHECK
@@ -31,123 +29,115 @@
#endif
/*
- * The following define the number of Tcl_Obj's to allocate/move
- * at a time and the high water mark to prune a per-thread cache.
- * On a 32 bit system, sizeof(Tcl_Obj) = 24 so 800 * 24 = ~16k.
+ * The following define the number of Tcl_Obj's to allocate/move at a time and
+ * the high water mark to prune a per-thread cache. On a 32 bit system,
+ * sizeof(Tcl_Obj) = 24 so 800 * 24 = ~16k.
*/
-#define NOBJALLOC 800
-#define NOBJHIGH 1200
+#define NOBJALLOC 800
+
+/* Actual definition moved to tclInt.h */
+#define NOBJHIGH ALLOC_NOBJHIGH
/*
- * The following defines the number of buckets in the bucket
- * cache and those block sizes from (1<<4) to (1<<(3+NBUCKETS))
+ * The following union stores accounting information for each block including
+ * two small magic numbers and a bucket number when in use or a next pointer
+ * when free. The original requested size (not including the Block overhead)
+ * is also maintained.
*/
-#define NBUCKETS 11
-#define MAXALLOC 16284
+typedef union Block {
+ struct {
+ union {
+ union Block *next; /* Next in free list. */
+ struct {
+ unsigned char magic1; /* First magic number. */
+ unsigned char bucket; /* Bucket block allocated from. */
+ unsigned char unused; /* Padding. */
+ unsigned char magic2; /* Second magic number. */
+ } s;
+ } u;
+ size_t reqSize; /* Requested allocation size. */
+ } b;
+ unsigned char padding[TCL_ALLOCALIGN];
+} Block;
+#define nextBlock b.u.next
+#define sourceBucket b.u.s.bucket
+#define magicNum1 b.u.s.magic1
+#define magicNum2 b.u.s.magic2
+#define MAGIC 0xEF
+#define blockReqSize b.reqSize
/*
- * The following union stores accounting information for
- * each block including two small magic numbers and
- * a bucket number when in use or a next pointer when
- * free. The original requested size (not including
- * the Block overhead) is also maintained.
+ * The following defines the minimum and and maximum block sizes and the number
+ * of buckets in the bucket cache.
*/
-typedef struct Block {
- union {
- struct Block *next; /* Next in free list. */
- struct {
- unsigned char magic1; /* First magic number. */
- unsigned char bucket; /* Bucket block allocated from. */
- unsigned char unused; /* Padding. */
- unsigned char magic2; /* Second magic number. */
- } s;
- } u;
- size_t reqSize; /* Requested allocation size. */
-} Block;
-#define nextBlock u.next
-#define sourceBucket u.s.bucket
-#define magicNum1 u.s.magic1
-#define magicNum2 u.s.magic2
-#define MAGIC 0xEF
+#define MINALLOC ((sizeof(Block) + 8 + (TCL_ALLOCALIGN-1)) & ~(TCL_ALLOCALIGN-1))
+#define NBUCKETS (11 - (MINALLOC >> 5))
+#define MAXALLOC (MINALLOC << (NBUCKETS - 1))
/*
- * The following structure defines a bucket of blocks with
- * various accounting and statistics information.
+ * The following structure defines a bucket of blocks with various accounting
+ * and statistics information.
*/
typedef struct Bucket {
- Block *firstPtr; /* First block available */
- int numFree; /* Number of blocks available */
+ Block *firstPtr; /* First block available */
+ long numFree; /* Number of blocks available */
/* All fields below for accounting only */
- int numRemoves; /* Number of removes from bucket */
- int numInserts; /* Number of inserts into bucket */
- int numWaits; /* Number of waits to acquire a lock */
- int numLocks; /* Number of locks acquired */
- int totalAssigned; /* Total space assigned to bucket */
+ long numRemoves; /* Number of removes from bucket */
+ long numInserts; /* Number of inserts into bucket */
+ long numWaits; /* Number of waits to acquire a lock */
+ long numLocks; /* Number of locks acquired */
+ long totalAssigned; /* Total space assigned to bucket */
} Bucket;
/*
- * The following structure defines a cache of buckets and objs, of
- * which there will be (at most) one per thread.
+ * The following structure defines a cache of buckets and objs, of which there
+ * will be (at most) one per thread. Any changes need to be reflected in the
+ * struct AllocCache defined in tclInt.h, possibly also in the initialisation
+ * code in Tcl_CreateInterp().
*/
typedef struct Cache {
- struct Cache *nextPtr; /* Linked list of cache entries */
- Tcl_ThreadId owner; /* Which thread's cache is this? */
- Tcl_Obj *firstObjPtr; /* List of free objects for thread */
- int numObjects; /* Number of objects for thread */
- int totalAssigned; /* Total space assigned to thread */
- Bucket buckets[NBUCKETS]; /* The buckets for this thread */
+ struct Cache *nextPtr; /* Linked list of cache entries */
+ Tcl_ThreadId owner; /* Which thread's cache is this? */
+ Tcl_Obj *firstObjPtr; /* List of free objects for thread */
+ int numObjects; /* Number of objects for thread */
+ int totalAssigned; /* Total space assigned to thread */
+ Bucket buckets[NBUCKETS]; /* The buckets for this thread */
} Cache;
/*
- * The following array specifies various per-bucket limits and locks.
- * The values are statically initialized to avoid calculating them
- * repeatedly.
+ * The following array specifies various per-bucket limits and locks. The
+ * values are statically initialized to avoid calculating them repeatedly.
*/
static struct {
- size_t blockSize; /* Bucket blocksize. */
- int maxBlocks; /* Max blocks before move to share. */
- int numMove; /* Num blocks to move to share. */
- Tcl_Mutex *lockPtr; /* Share bucket lock. */
-} bucketInfo[NBUCKETS] = {
- { 16, 1024, 512, NULL},
- { 32, 512, 256, NULL},
- { 64, 256, 128, NULL},
- { 128, 128, 64, NULL},
- { 256, 64, 32, NULL},
- { 512, 32, 16, NULL},
- { 1024, 16, 8, NULL},
- { 2048, 8, 4, NULL},
- { 4096, 4, 2, NULL},
- { 8192, 2, 1, NULL},
- {16284, 1, 1, NULL},
-};
+ size_t blockSize; /* Bucket blocksize. */
+ int maxBlocks; /* Max blocks before move to share. */
+ int numMove; /* Num blocks to move to share. */
+ Tcl_Mutex *lockPtr; /* Share bucket lock. */
+} bucketInfo[NBUCKETS];
/*
* Static functions defined in this file.
*/
-static void LockBucket _ANSI_ARGS_((Cache *cachePtr, int bucket));
-static void UnlockBucket _ANSI_ARGS_((Cache *cachePtr, int bucket));
-static void PutBlocks _ANSI_ARGS_((Cache *cachePtr, int bucket,
- int numMove));
-static int GetBlocks _ANSI_ARGS_((Cache *cachePtr, int bucket));
-static Block * Ptr2Block _ANSI_ARGS_((char *ptr));
-static char * Block2Ptr _ANSI_ARGS_((Block *blockPtr, int bucket,
- unsigned int reqSize));
-static void MoveObjs _ANSI_ARGS_((Cache *fromPtr, Cache *toPtr,
- int numMove));
+static Cache * GetCache(void);
+static void LockBucket(Cache *cachePtr, int bucket);
+static void UnlockBucket(Cache *cachePtr, int bucket);
+static void PutBlocks(Cache *cachePtr, int bucket, int numMove);
+static int GetBlocks(Cache *cachePtr, int bucket);
+static Block * Ptr2Block(char *ptr);
+static char * Block2Ptr(Block *blockPtr, int bucket, unsigned int reqSize);
+static void MoveObjs(Cache *fromPtr, Cache *toPtr, int numMove);
/*
- * Local variables defined in this file and initialized at
- * startup.
+ * Local variables defined in this file and initialized at startup.
*/
static Tcl_Mutex *listLockPtr;
@@ -155,6 +145,26 @@ static Tcl_Mutex *objLockPtr;
static Cache sharedCache;
static Cache *sharedPtr = &sharedCache;
static Cache *firstCachePtr = &sharedCache;
+
+#if defined(HAVE_FAST_TSD)
+static __thread Cache *tcachePtr;
+
+# define GETCACHE(cachePtr) \
+ do { \
+ if (!tcachePtr) { \
+ tcachePtr = GetCache(); \
+ } \
+ (cachePtr) = tcachePtr; \
+ } while (0)
+#else
+# define GETCACHE(cachePtr) \
+ do { \
+ (cachePtr) = TclpGetAllocCache(); \
+ if ((cachePtr) == NULL) { \
+ (cachePtr) = GetCache(); \
+ } \
+ } while (0)
+#endif
/*
*----------------------------------------------------------------------
@@ -183,7 +193,7 @@ GetCache(void)
if (listLockPtr == NULL) {
Tcl_Mutex *initLockPtr;
- int i;
+ unsigned int i;
initLockPtr = Tcl_GetAllocMutex();
Tcl_MutexLock(initLockPtr);
@@ -191,6 +201,10 @@ GetCache(void)
listLockPtr = TclpNewAllocMutex();
objLockPtr = TclpNewAllocMutex();
for (i = 0; i < NBUCKETS; ++i) {
+ bucketInfo[i].blockSize = MINALLOC << i;
+ bucketInfo[i].maxBlocks = 1 << (NBUCKETS - 1 - i);
+ bucketInfo[i].numMove = i < NBUCKETS - 1 ?
+ 1 << (NBUCKETS - 2 - i) : 1;
bucketInfo[i].lockPtr = TclpNewAllocMutex();
}
}
@@ -234,12 +248,12 @@ GetCache(void)
*/
void
-TclFreeAllocCache(arg)
- void *arg;
+TclFreeAllocCache(
+ void *arg)
{
Cache *cachePtr = arg;
Cache **nextPtrPtr;
- register int bucket;
+ register unsigned int bucket;
/*
* Flush blocks.
@@ -293,30 +307,40 @@ TclFreeAllocCache(arg)
*/
char *
-TclpAlloc(reqSize)
- unsigned int reqSize;
+TclpAlloc(
+ unsigned int reqSize)
{
- Cache *cachePtr = TclpGetAllocCache();
+ Cache *cachePtr;
Block *blockPtr;
register int bucket;
size_t size;
- if (cachePtr == NULL) {
- cachePtr = GetCache();
+#ifndef __LP64__
+ if (sizeof(int) >= sizeof(size_t)) {
+ /* An unsigned int overflow can also be a size_t overflow */
+ const size_t zero = 0;
+ const size_t max = ~zero;
+
+ if (((size_t) reqSize) > max - sizeof(Block) - RCHECK) {
+ /* Requested allocation exceeds memory */
+ return NULL;
+ }
}
+#endif
+
+ GETCACHE(cachePtr);
/*
- * Increment the requested size to include room for
- * the Block structure. Call malloc() directly if the
- * required amount is greater than the largest block,
- * otherwise pop the smallest block large enough,
+ * Increment the requested size to include room for the Block structure.
+ * Call malloc() directly if the required amount is greater than the
+ * largest block, otherwise pop the smallest block large enough,
* allocating more blocks if necessary.
*/
blockPtr = NULL;
size = reqSize + sizeof(Block);
#if RCHECK
- ++size;
+ size++;
#endif
if (size > MAXALLOC) {
bucket = NBUCKETS;
@@ -327,13 +351,13 @@ TclpAlloc(reqSize)
} else {
bucket = 0;
while (bucketInfo[bucket].blockSize < size) {
- ++bucket;
+ bucket++;
}
if (cachePtr->buckets[bucket].numFree || GetBlocks(cachePtr, bucket)) {
blockPtr = cachePtr->buckets[bucket].firstPtr;
cachePtr->buckets[bucket].firstPtr = blockPtr->nextBlock;
- --cachePtr->buckets[bucket].numFree;
- ++cachePtr->buckets[bucket].numRemoves;
+ cachePtr->buckets[bucket].numFree--;
+ cachePtr->buckets[bucket].numRemoves++;
cachePtr->buckets[bucket].totalAssigned += reqSize;
}
}
@@ -360,8 +384,8 @@ TclpAlloc(reqSize)
*/
void
-TclpFree(ptr)
- char *ptr;
+TclpFree(
+ char *ptr)
{
Cache *cachePtr;
Block *blockPtr;
@@ -371,30 +395,28 @@ TclpFree(ptr)
return;
}
- cachePtr = TclpGetAllocCache();
- if (cachePtr == NULL) {
- cachePtr = GetCache();
- }
+ GETCACHE(cachePtr);
/*
- * Get the block back from the user pointer and call system free
- * directly for large blocks. Otherwise, push the block back on
- * the bucket and move blocks to the shared cache if there are now
- * too many free.
+ * Get the block back from the user pointer and call system free directly
+ * for large blocks. Otherwise, push the block back on the bucket and move
+ * blocks to the shared cache if there are now too many free.
*/
blockPtr = Ptr2Block(ptr);
bucket = blockPtr->sourceBucket;
if (bucket == NBUCKETS) {
- cachePtr->totalAssigned -= blockPtr->reqSize;
+ cachePtr->totalAssigned -= blockPtr->blockReqSize;
free(blockPtr);
return;
}
- cachePtr->buckets[bucket].totalAssigned -= blockPtr->reqSize;
+
+ cachePtr->buckets[bucket].totalAssigned -= blockPtr->blockReqSize;
blockPtr->nextBlock = cachePtr->buckets[bucket].firstPtr;
cachePtr->buckets[bucket].firstPtr = blockPtr;
- ++cachePtr->buckets[bucket].numFree;
- ++cachePtr->buckets[bucket].numInserts;
+ cachePtr->buckets[bucket].numFree++;
+ cachePtr->buckets[bucket].numInserts++;
+
if (cachePtr != sharedPtr &&
cachePtr->buckets[bucket].numFree > bucketInfo[bucket].maxBlocks) {
PutBlocks(cachePtr, bucket, bucketInfo[bucket].numMove);
@@ -418,13 +440,13 @@ TclpFree(ptr)
*/
char *
-TclpRealloc(ptr, reqSize)
- char *ptr;
- unsigned int reqSize;
+TclpRealloc(
+ char *ptr,
+ unsigned int reqSize)
{
- Cache *cachePtr = TclpGetAllocCache();
+ Cache *cachePtr;
Block *blockPtr;
- void *new;
+ void *newPtr;
size_t size, min;
int bucket;
@@ -432,21 +454,31 @@ TclpRealloc(ptr, reqSize)
return TclpAlloc(reqSize);
}
- if (cachePtr == NULL) {
- cachePtr = GetCache();
+#ifndef __LP64__
+ if (sizeof(int) >= sizeof(size_t)) {
+ /* An unsigned int overflow can also be a size_t overflow */
+ const size_t zero = 0;
+ const size_t max = ~zero;
+
+ if (((size_t) reqSize) > max - sizeof(Block) - RCHECK) {
+ /* Requested allocation exceeds memory */
+ return NULL;
+ }
}
+#endif
+
+ GETCACHE(cachePtr);
/*
- * If the block is not a system block and fits in place,
- * simply return the existing pointer. Otherwise, if the block
- * is a system block and the new size would also require a system
- * block, call realloc() directly.
+ * If the block is not a system block and fits in place, simply return the
+ * existing pointer. Otherwise, if the block is a system block and the new
+ * size would also require a system block, call realloc() directly.
*/
blockPtr = Ptr2Block(ptr);
size = reqSize + sizeof(Block);
#if RCHECK
- ++size;
+ size++;
#endif
bucket = blockPtr->sourceBucket;
if (bucket != NBUCKETS) {
@@ -456,12 +488,12 @@ TclpRealloc(ptr, reqSize)
min = 0;
}
if (size > min && size <= bucketInfo[bucket].blockSize) {
- cachePtr->buckets[bucket].totalAssigned -= blockPtr->reqSize;
+ cachePtr->buckets[bucket].totalAssigned -= blockPtr->blockReqSize;
cachePtr->buckets[bucket].totalAssigned += reqSize;
return Block2Ptr(blockPtr, bucket, reqSize);
}
} else if (size > MAXALLOC) {
- cachePtr->totalAssigned -= blockPtr->reqSize;
+ cachePtr->totalAssigned -= blockPtr->blockReqSize;
cachePtr->totalAssigned += reqSize;
blockPtr = realloc(blockPtr, size);
if (blockPtr == NULL) {
@@ -474,15 +506,15 @@ TclpRealloc(ptr, reqSize)
* Finally, perform an expensive malloc/copy/free.
*/
- new = TclpAlloc(reqSize);
- if (new != NULL) {
- if (reqSize > blockPtr->reqSize) {
- reqSize = blockPtr->reqSize;
+ newPtr = TclpAlloc(reqSize);
+ if (newPtr != NULL) {
+ if (reqSize > blockPtr->blockReqSize) {
+ reqSize = blockPtr->blockReqSize;
}
- memcpy(new, ptr, reqSize);
+ memcpy(newPtr, ptr, reqSize);
TclpFree(ptr);
}
- return new;
+ return newPtr;
}
/*
@@ -496,8 +528,12 @@ TclpRealloc(ptr, reqSize)
* Pointer to uninitialized Tcl_Obj.
*
* Side effects:
- * May move Tcl_Obj's from shared cached or allocate new Tcl_Obj's
- * if list is empty.
+ * May move Tcl_Obj's from shared cached or allocate new Tcl_Obj's if
+ * list is empty.
+ *
+ * Note:
+ * If this code is updated, the changes need to be reflected in the macro
+ * TclAllocObjStorageEx() defined in tclInt.h
*
*----------------------------------------------------------------------
*/
@@ -505,21 +541,19 @@ TclpRealloc(ptr, reqSize)
Tcl_Obj *
TclThreadAllocObj(void)
{
- register Cache *cachePtr = TclpGetAllocCache();
- register int numMove;
+ register Cache *cachePtr;
register Tcl_Obj *objPtr;
- Tcl_Obj *newObjsPtr;
- if (cachePtr == NULL) {
- cachePtr = GetCache();
- }
+ GETCACHE(cachePtr);
/*
- * Get this thread's obj list structure and move
- * or allocate new objs if necessary.
+ * Get this thread's obj list structure and move or allocate new objs if
+ * necessary.
*/
if (cachePtr->numObjects == 0) {
+ register int numMove;
+
Tcl_MutexLock(objLockPtr);
numMove = sharedPtr->numObjects;
if (numMove > 0) {
@@ -530,6 +564,8 @@ TclThreadAllocObj(void)
}
Tcl_MutexUnlock(objLockPtr);
if (cachePtr->numObjects == 0) {
+ Tcl_Obj *newObjsPtr;
+
cachePtr->numObjects = numMove = NOBJALLOC;
newObjsPtr = malloc(sizeof(Tcl_Obj) * numMove);
if (newObjsPtr == NULL) {
@@ -537,7 +573,7 @@ TclThreadAllocObj(void)
}
while (--numMove >= 0) {
objPtr = &newObjsPtr[numMove];
- objPtr->internalRep.otherValuePtr = cachePtr->firstObjPtr;
+ objPtr->internalRep.twoPtrValue.ptr1 = cachePtr->firstObjPtr;
cachePtr->firstObjPtr = objPtr;
}
}
@@ -548,8 +584,8 @@ TclThreadAllocObj(void)
*/
objPtr = cachePtr->firstObjPtr;
- cachePtr->firstObjPtr = objPtr->internalRep.otherValuePtr;
- --cachePtr->numObjects;
+ cachePtr->firstObjPtr = objPtr->internalRep.twoPtrValue.ptr1;
+ cachePtr->numObjects--;
return objPtr;
}
@@ -564,33 +600,34 @@ TclThreadAllocObj(void)
* None.
*
* Side effects:
- * May move free Tcl_Obj's to shared list upon hitting high
- * water mark.
+ * May move free Tcl_Obj's to shared list upon hitting high water mark.
+ *
+ * Note:
+ * If this code is updated, the changes need to be reflected in the macro
+ * TclAllocObjStorageEx() defined in tclInt.h
*
*----------------------------------------------------------------------
*/
void
-TclThreadFreeObj(objPtr)
- Tcl_Obj *objPtr;
+TclThreadFreeObj(
+ Tcl_Obj *objPtr)
{
- Cache *cachePtr = TclpGetAllocCache();
+ Cache *cachePtr;
- if (cachePtr == NULL) {
- cachePtr = GetCache();
- }
+ GETCACHE(cachePtr);
/*
* Get this thread's list and push on the free Tcl_Obj.
*/
- objPtr->internalRep.otherValuePtr = cachePtr->firstObjPtr;
+ objPtr->internalRep.twoPtrValue.ptr1 = cachePtr->firstObjPtr;
cachePtr->firstObjPtr = objPtr;
- ++cachePtr->numObjects;
+ cachePtr->numObjects++;
/*
- * If the number of free objects has exceeded the high
- * water mark, move some blocks to the shared list.
+ * If the number of free objects has exceeded the high water mark, move
+ * some blocks to the shared list.
*/
if (cachePtr->numObjects > NOBJHIGH) {
@@ -617,12 +654,12 @@ TclThreadFreeObj(objPtr)
*/
void
-Tcl_GetMemoryInfo(dsPtr)
- Tcl_DString *dsPtr;
+Tcl_GetMemoryInfo(
+ Tcl_DString *dsPtr)
{
Cache *cachePtr;
char buf[200];
- int n;
+ unsigned int n;
Tcl_MutexLock(listLockPtr);
cachePtr = firstCachePtr;
@@ -635,8 +672,8 @@ Tcl_GetMemoryInfo(dsPtr)
Tcl_DStringAppendElement(dsPtr, buf);
}
for (n = 0; n < NBUCKETS; ++n) {
- sprintf(buf, "%d %d %d %d %d %d %d",
- (int) bucketInfo[n].blockSize,
+ sprintf(buf, "%lu %ld %ld %ld %ld %ld %ld",
+ (unsigned long) bucketInfo[n].blockSize,
cachePtr->buckets[n].numFree,
cachePtr->buckets[n].numRemoves,
cachePtr->buckets[n].numInserts,
@@ -668,9 +705,10 @@ Tcl_GetMemoryInfo(dsPtr)
*/
static void
-MoveObjs(fromPtr, toPtr, numMove)
- Cache *fromPtr, *toPtr;
- int numMove;
+MoveObjs(
+ Cache *fromPtr,
+ Cache *toPtr,
+ int numMove)
{
register Tcl_Obj *objPtr = fromPtr->firstObjPtr;
Tcl_Obj *fromFirstObjPtr = objPtr;
@@ -679,22 +717,21 @@ MoveObjs(fromPtr, toPtr, numMove)
fromPtr->numObjects -= numMove;
/*
- * Find the last object to be moved; set the next one
- * (the first one not to be moved) as the first object
- * in the 'from' cache.
+ * Find the last object to be moved; set the next one (the first one not
+ * to be moved) as the first object in the 'from' cache.
*/
while (--numMove) {
- objPtr = objPtr->internalRep.otherValuePtr;
+ objPtr = objPtr->internalRep.twoPtrValue.ptr1;
}
- fromPtr->firstObjPtr = objPtr->internalRep.otherValuePtr;
+ fromPtr->firstObjPtr = objPtr->internalRep.twoPtrValue.ptr1;
/*
- * Move all objects as a block - they are already linked to
- * each other, we just have to update the first and last.
+ * Move all objects as a block - they are already linked to each other, we
+ * just have to update the first and last.
*/
- objPtr->internalRep.otherValuePtr = toPtr->firstObjPtr;
+ objPtr->internalRep.twoPtrValue.ptr1 = toPtr->firstObjPtr;
toPtr->firstObjPtr = fromFirstObjPtr;
}
@@ -715,16 +752,16 @@ MoveObjs(fromPtr, toPtr, numMove)
*/
static char *
-Block2Ptr(blockPtr, bucket, reqSize)
- Block *blockPtr;
- int bucket;
- unsigned int reqSize;
+Block2Ptr(
+ Block *blockPtr,
+ int bucket,
+ unsigned int reqSize)
{
register void *ptr;
blockPtr->magicNum1 = blockPtr->magicNum2 = MAGIC;
blockPtr->sourceBucket = bucket;
- blockPtr->reqSize = reqSize;
+ blockPtr->blockReqSize = reqSize;
ptr = ((void *) (blockPtr + 1));
#if RCHECK
((unsigned char *)(ptr))[reqSize] = MAGIC;
@@ -733,21 +770,21 @@ Block2Ptr(blockPtr, bucket, reqSize)
}
static Block *
-Ptr2Block(ptr)
- char *ptr;
+Ptr2Block(
+ char *ptr)
{
register Block *blockPtr;
blockPtr = (((Block *) ptr) - 1);
if (blockPtr->magicNum1 != MAGIC || blockPtr->magicNum2 != MAGIC) {
- Tcl_Panic("alloc: invalid block: %p: %x %x\n",
+ Tcl_Panic("alloc: invalid block: %p: %x %x",
blockPtr, blockPtr->magicNum1, blockPtr->magicNum2);
}
#if RCHECK
- if (((unsigned char *) ptr)[blockPtr->reqSize] != MAGIC) {
- Tcl_Panic("alloc: invalid block: %p: %x %x %x\n",
+ if (((unsigned char *) ptr)[blockPtr->blockReqSize] != MAGIC) {
+ Tcl_Panic("alloc: invalid block: %p: %x %x %x",
blockPtr, blockPtr->magicNum1, blockPtr->magicNum2,
- ((unsigned char *) ptr)[blockPtr->reqSize]);
+ ((unsigned char *) ptr)[blockPtr->blockReqSize]);
}
#endif
return blockPtr;
@@ -764,34 +801,26 @@ Ptr2Block(ptr)
* None.
*
* Side effects:
- * Lock activity and contention are monitored globally and on
- * a per-cache basis.
+ * Lock activity and contention are monitored globally and on a per-cache
+ * basis.
*
*----------------------------------------------------------------------
*/
static void
-LockBucket(cachePtr, bucket)
- Cache *cachePtr;
- int bucket;
+LockBucket(
+ Cache *cachePtr,
+ int bucket)
{
-#if 0
- if (Tcl_MutexTryLock(bucketInfo[bucket].lockPtr) != TCL_OK) {
- Tcl_MutexLock(bucketInfo[bucket].lockPtr);
- ++cachePtr->buckets[bucket].numWaits;
- ++sharedPtr->buckets[bucket].numWaits;
- }
-#else
Tcl_MutexLock(bucketInfo[bucket].lockPtr);
-#endif
- ++cachePtr->buckets[bucket].numLocks;
- ++sharedPtr->buckets[bucket].numLocks;
+ cachePtr->buckets[bucket].numLocks++;
+ sharedPtr->buckets[bucket].numLocks++;
}
static void
-UnlockBucket(cachePtr, bucket)
- Cache *cachePtr;
- int bucket;
+UnlockBucket(
+ Cache *cachePtr,
+ int bucket)
{
Tcl_MutexUnlock(bucketInfo[bucket].lockPtr);
}
@@ -813,16 +842,17 @@ UnlockBucket(cachePtr, bucket)
*/
static void
-PutBlocks(cachePtr, bucket, numMove)
- Cache *cachePtr;
- int bucket, numMove;
+PutBlocks(
+ Cache *cachePtr,
+ int bucket,
+ int numMove)
{
register Block *lastPtr, *firstPtr;
register int n = numMove;
/*
- * Before acquiring the lock, walk the block list to find
- * the last block to be moved.
+ * Before acquiring the lock, walk the block list to find the last block
+ * to be moved.
*/
firstPtr = lastPtr = cachePtr->buckets[bucket].firstPtr;
@@ -833,8 +863,8 @@ PutBlocks(cachePtr, bucket, numMove)
cachePtr->buckets[bucket].numFree -= numMove;
/*
- * Aquire the lock and place the list of blocks at the front
- * of the shared cache bucket.
+ * Aquire the lock and place the list of blocks at the front of the shared
+ * cache bucket.
*/
LockBucket(cachePtr, bucket);
@@ -861,19 +891,18 @@ PutBlocks(cachePtr, bucket, numMove)
*/
static int
-GetBlocks(cachePtr, bucket)
- Cache *cachePtr;
- int bucket;
+GetBlocks(
+ Cache *cachePtr,
+ int bucket)
{
register Block *blockPtr;
register int n;
- register size_t size;
/*
- * First, atttempt to move blocks from the shared cache. Note
- * the potentially dirty read of numFree before acquiring the lock
- * which is a slight performance enhancement. The value is
- * verified after the lock is actually acquired.
+ * First, atttempt to move blocks from the shared cache. Note the
+ * potentially dirty read of numFree before acquiring the lock which is a
+ * slight performance enhancement. The value is verified after the lock is
+ * actually acquired.
*/
if (cachePtr != sharedPtr && sharedPtr->buckets[bucket].numFree > 0) {
@@ -881,8 +910,8 @@ GetBlocks(cachePtr, bucket)
if (sharedPtr->buckets[bucket].numFree > 0) {
/*
- * Either move the entire list or walk the list to find
- * the last block to move.
+ * Either move the entire list or walk the list to find the last
+ * block to move.
*/
n = bucketInfo[bucket].numMove;
@@ -909,10 +938,11 @@ GetBlocks(cachePtr, bucket)
}
if (cachePtr->buckets[bucket].numFree == 0) {
+ register size_t size;
/*
- * If no blocks could be moved from shared, first look for a
- * larger block in this cache to split up.
+ * If no blocks could be moved from shared, first look for a larger
+ * block in this cache to split up.
*/
blockPtr = NULL;
@@ -923,7 +953,7 @@ GetBlocks(cachePtr, bucket)
size = bucketInfo[n].blockSize;
blockPtr = cachePtr->buckets[n].firstPtr;
cachePtr->buckets[n].firstPtr = blockPtr->nextBlock;
- --cachePtr->buckets[n].numFree;
+ cachePtr->buckets[n].numFree--;
break;
}
}
@@ -962,8 +992,8 @@ GetBlocks(cachePtr, bucket)
*
* TclFinalizeThreadAlloc --
*
- * This procedure is used to destroy all private resources used in
- * this file.
+ * This procedure is used to destroy all private resources used in this
+ * file.
*
* Results:
* None.
@@ -975,12 +1005,13 @@ GetBlocks(cachePtr, bucket)
*/
void
-TclFinalizeThreadAlloc()
+TclFinalizeThreadAlloc(void)
{
- int i;
+ unsigned int i;
+
for (i = 0; i < NBUCKETS; ++i) {
- TclpFreeAllocMutex(bucketInfo[i].lockPtr);
- bucketInfo[i].lockPtr = NULL;
+ TclpFreeAllocMutex(bucketInfo[i].lockPtr);
+ bucketInfo[i].lockPtr = NULL;
}
TclpFreeAllocMutex(objLockPtr);
@@ -988,17 +1019,68 @@ TclFinalizeThreadAlloc()
TclpFreeAllocMutex(listLockPtr);
listLockPtr = NULL;
+
+ TclpFreeAllocCache(NULL);
}
-#else
+/*
+ *----------------------------------------------------------------------
+ *
+ * TclFinalizeThreadAllocThread --
+ *
+ * This procedure is used to destroy single thread private resources used
+ * in this file.
+ * Called in TclpFinalizeThreadData when a thread exits (Tcl_FinalizeThread).
+ *
+ * Results:
+ * None.
+ *
+ * Side effects:
+ * None.
+ *
+ *----------------------------------------------------------------------
+ */
+void
+TclFinalizeThreadAllocThread(void)
+{
+ Cache *cachePtr = TclpGetAllocCache();
+ if (cachePtr != NULL) {
+ TclpFreeAllocCache(cachePtr);
+ }
+}
+
+#else /* !(TCL_THREADS && USE_THREAD_ALLOC) */
+/*
+ *----------------------------------------------------------------------
+ *
+ * Tcl_GetMemoryInfo --
+ *
+ * Return a list-of-lists of memory stats.
+ *
+ * Results:
+ * None.
+ *
+ * Side effects:
+ * List appended to given dstring.
+ *
+ *----------------------------------------------------------------------
+ */
+
+void
+Tcl_GetMemoryInfo(
+ Tcl_DString *dsPtr)
+{
+ Tcl_Panic("Tcl_GetMemoryInfo called when threaded memory allocator not in use");
+}
+
/*
*----------------------------------------------------------------------
*
* TclFinalizeThreadAlloc --
*
- * This procedure is used to destroy all private resources used in
- * this file.
+ * This procedure is used to destroy all private resources used in this
+ * file.
*
* Results:
* None.
@@ -1010,9 +1092,16 @@ TclFinalizeThreadAlloc()
*/
void
-TclFinalizeThreadAlloc()
+TclFinalizeThreadAlloc(void)
{
- Tcl_Panic("TclFinalizeThreadAlloc called when threaded memory allocator not in use.");
+ Tcl_Panic("TclFinalizeThreadAlloc called when threaded memory allocator not in use");
}
-
-#endif /* TCL_THREADS */
+#endif /* TCL_THREADS && USE_THREAD_ALLOC */
+
+/*
+ * Local Variables:
+ * mode: c
+ * c-basic-offset: 4
+ * fill-column: 78
+ * End:
+ */