diff options
Diffstat (limited to 'generic/tclThreadAlloc.c')
-rwxr-xr-x[-rw-r--r--] | generic/tclThreadAlloc.c | 52 |
1 files changed, 34 insertions, 18 deletions
diff --git a/generic/tclThreadAlloc.c b/generic/tclThreadAlloc.c index bea85bd..c3acb2a 100644..100755 --- a/generic/tclThreadAlloc.c +++ b/generic/tclThreadAlloc.c @@ -35,7 +35,9 @@ */ #define NOBJALLOC 800 -#define NOBJHIGH 1200 + +/* Actual definition moved to tclInt.h */ +#define NOBJHIGH ALLOC_NOBJHIGH /* * The following union stores accounting information for each block including @@ -95,7 +97,9 @@ typedef struct Bucket { /* * The following structure defines a cache of buckets and objs, of which there - * will be (at most) one per thread. + * will be (at most) one per thread. Any changes need to be reflected in the + * struct AllocCache defined in tclInt.h, possibly also in the initialisation + * code in Tcl_CreateInterp(). */ typedef struct Cache { @@ -291,6 +295,7 @@ TclpAlloc( register int bucket; size_t size; +#ifndef __LP64__ if (sizeof(int) >= sizeof(size_t)) { /* An unsigned int overflow can also be a size_t overflow */ const size_t zero = 0; @@ -301,6 +306,7 @@ TclpAlloc( return NULL; } } +#endif cachePtr = TclpGetAllocCache(); if (cachePtr == NULL) { @@ -317,7 +323,7 @@ TclpAlloc( blockPtr = NULL; size = reqSize + sizeof(Block); #if RCHECK - ++size; + size++; #endif if (size > MAXALLOC) { bucket = NBUCKETS; @@ -328,13 +334,13 @@ TclpAlloc( } else { bucket = 0; while (bucketInfo[bucket].blockSize < size) { - ++bucket; + bucket++; } if (cachePtr->buckets[bucket].numFree || GetBlocks(cachePtr, bucket)) { blockPtr = cachePtr->buckets[bucket].firstPtr; cachePtr->buckets[bucket].firstPtr = blockPtr->nextBlock; - --cachePtr->buckets[bucket].numFree; - ++cachePtr->buckets[bucket].numRemoves; + cachePtr->buckets[bucket].numFree--; + cachePtr->buckets[bucket].numRemoves++; cachePtr->buckets[bucket].totalAssigned += reqSize; } } @@ -394,8 +400,8 @@ TclpFree( cachePtr->buckets[bucket].totalAssigned -= blockPtr->blockReqSize; blockPtr->nextBlock = cachePtr->buckets[bucket].firstPtr; cachePtr->buckets[bucket].firstPtr = blockPtr; - ++cachePtr->buckets[bucket].numFree; - ++cachePtr->buckets[bucket].numInserts; + cachePtr->buckets[bucket].numFree++; + cachePtr->buckets[bucket].numInserts++; if (cachePtr != sharedPtr && cachePtr->buckets[bucket].numFree > bucketInfo[bucket].maxBlocks) { @@ -434,6 +440,7 @@ TclpRealloc( return TclpAlloc(reqSize); } +#ifndef __LP64__ if (sizeof(int) >= sizeof(size_t)) { /* An unsigned int overflow can also be a size_t overflow */ const size_t zero = 0; @@ -444,6 +451,7 @@ TclpRealloc( return NULL; } } +#endif cachePtr = TclpGetAllocCache(); if (cachePtr == NULL) { @@ -459,7 +467,7 @@ TclpRealloc( blockPtr = Ptr2Block(ptr); size = reqSize + sizeof(Block); #if RCHECK - ++size; + size++; #endif bucket = blockPtr->sourceBucket; if (bucket != NBUCKETS) { @@ -512,6 +520,10 @@ TclpRealloc( * May move Tcl_Obj's from shared cached or allocate new Tcl_Obj's if * list is empty. * + * Note: + * If this code is updated, the changes need to be reflected in the macro + * TclAllocObjStorageEx() defined in tclInt.h + * *---------------------------------------------------------------------- */ @@ -564,7 +576,7 @@ TclThreadAllocObj(void) objPtr = cachePtr->firstObjPtr; cachePtr->firstObjPtr = objPtr->internalRep.otherValuePtr; - --cachePtr->numObjects; + cachePtr->numObjects--; return objPtr; } @@ -581,6 +593,10 @@ TclThreadAllocObj(void) * Side effects: * May move free Tcl_Obj's to shared list upon hitting high water mark. * + * Note: + * If this code is updated, the changes need to be reflected in the macro + * TclAllocObjStorageEx() defined in tclInt.h + * *---------------------------------------------------------------------- */ @@ -600,7 +616,7 @@ TclThreadFreeObj( objPtr->internalRep.otherValuePtr = cachePtr->firstObjPtr; cachePtr->firstObjPtr = objPtr; - ++cachePtr->numObjects; + cachePtr->numObjects++; /* * If the number of free objects has exceeded the high water mark, move @@ -792,14 +808,14 @@ LockBucket( #if 0 if (Tcl_MutexTryLock(bucketInfo[bucket].lockPtr) != TCL_OK) { Tcl_MutexLock(bucketInfo[bucket].lockPtr); - ++cachePtr->buckets[bucket].numWaits; - ++sharedPtr->buckets[bucket].numWaits; + cachePtr->buckets[bucket].numWaits++; + sharedPtr->buckets[bucket].numWaits++; } #else Tcl_MutexLock(bucketInfo[bucket].lockPtr); #endif - ++cachePtr->buckets[bucket].numLocks; - ++sharedPtr->buckets[bucket].numLocks; + cachePtr->buckets[bucket].numLocks++; + sharedPtr->buckets[bucket].numLocks++; } static void @@ -938,7 +954,7 @@ GetBlocks( size = bucketInfo[n].blockSize; blockPtr = cachePtr->buckets[n].firstPtr; cachePtr->buckets[n].firstPtr = blockPtr->nextBlock; - --cachePtr->buckets[n].numFree; + cachePtr->buckets[n].numFree--; break; } } @@ -995,8 +1011,8 @@ TclFinalizeThreadAlloc(void) unsigned int i; for (i = 0; i < NBUCKETS; ++i) { - TclpFreeAllocMutex(bucketInfo[i].lockPtr); - bucketInfo[i].lockPtr = NULL; + TclpFreeAllocMutex(bucketInfo[i].lockPtr); + bucketInfo[i].lockPtr = NULL; } TclpFreeAllocMutex(objLockPtr); |