diff options
Diffstat (limited to 'generic/tclThreadAlloc.c')
| -rw-r--r--[-rwxr-xr-x] | generic/tclThreadAlloc.c | 215 | 
1 files changed, 146 insertions, 69 deletions
| diff --git a/generic/tclThreadAlloc.c b/generic/tclThreadAlloc.c index 2cde913..ddf888a 100755..100644 --- a/generic/tclThreadAlloc.c +++ b/generic/tclThreadAlloc.c @@ -10,8 +10,6 @@   *   * See the file "license.terms" for information on usage and redistribution of   * this file, and for a DISCLAIMER OF ALL WARRANTIES. - * - * RCS: @(#) $Id: tclThreadAlloc.c,v 1.22 2007/06/29 03:17:05 das Exp $   */  #include "tclInt.h" @@ -37,17 +35,9 @@   */  #define NOBJALLOC	800 -#define NOBJHIGH	1200 - -/* - * Alignment for allocated memory. - */ -#if defined(__APPLE__) -#define ALLOCALIGN	16 -#else -#define ALLOCALIGN	8 -#endif +/* Actual definition moved to tclInt.h */ +#define NOBJHIGH	ALLOC_NOBJHIGH  /*   * The following union stores accounting information for each block including @@ -69,7 +59,7 @@ typedef union Block {  	} u;  	size_t reqSize;			/* Requested allocation size. */      } b; -    unsigned char padding[ALLOCALIGN]; +    unsigned char padding[TCL_ALLOCALIGN];  } Block;  #define nextBlock	b.u.next  #define sourceBucket	b.u.s.bucket @@ -83,7 +73,7 @@ typedef union Block {   * of buckets in the bucket cache.   */ -#define MINALLOC	((sizeof(Block) + 8 + (ALLOCALIGN-1)) & ~(ALLOCALIGN-1)) +#define MINALLOC	((sizeof(Block) + 8 + (TCL_ALLOCALIGN-1)) & ~(TCL_ALLOCALIGN-1))  #define NBUCKETS	(11 - (MINALLOC >> 5))  #define MAXALLOC	(MINALLOC << (NBUCKETS - 1)) @@ -107,7 +97,9 @@ typedef struct Bucket {  /*   * The following structure defines a cache of buckets and objs, of which there - * will be (at most) one per thread. + * will be (at most) one per thread. Any changes need to be reflected in the + * struct AllocCache defined in tclInt.h, possibly also in the initialisation + * code in Tcl_CreateInterp().   */  typedef struct Cache { @@ -153,6 +145,26 @@ static Tcl_Mutex *objLockPtr;  static Cache sharedCache;  static Cache *sharedPtr = &sharedCache;  static Cache *firstCachePtr = &sharedCache; + +#if defined(HAVE_FAST_TSD) +static __thread Cache *tcachePtr; + +# define GETCACHE(cachePtr)			\ +    do {					\ +	if (!tcachePtr) {			\ +	    tcachePtr = GetCache();		\ +	}					\ +	(cachePtr) = tcachePtr;			\ +    } while (0) +#else +# define GETCACHE(cachePtr)			\ +    do {					\ +	(cachePtr) = TclpGetAllocCache();	\ +	if ((cachePtr) == NULL) {		\ +	    (cachePtr) = GetCache();		\ +	}					\ +    } while (0) +#endif  /*   *---------------------------------------------------------------------- @@ -298,14 +310,25 @@ char *  TclpAlloc(      unsigned int reqSize)  { -    Cache *cachePtr = TclpGetAllocCache(); +    Cache *cachePtr;      Block *blockPtr;      register int bucket;      size_t size; -    if (cachePtr == NULL) { -	cachePtr = GetCache(); +#ifndef __LP64__ +    if (sizeof(int) >= sizeof(size_t)) { +	/* An unsigned int overflow can also be a size_t overflow */ +	const size_t zero = 0; +	const size_t max = ~zero; + +	if (((size_t) reqSize) > max - sizeof(Block) - RCHECK) { +	    /* Requested allocation exceeds memory */ +	    return NULL; +	}      } +#endif + +    GETCACHE(cachePtr);      /*       * Increment the requested size to include room for the Block structure. @@ -317,7 +340,7 @@ TclpAlloc(      blockPtr = NULL;      size = reqSize + sizeof(Block);  #if RCHECK -    ++size; +    size++;  #endif      if (size > MAXALLOC) {  	bucket = NBUCKETS; @@ -328,13 +351,13 @@ TclpAlloc(      } else {  	bucket = 0;  	while (bucketInfo[bucket].blockSize < size) { -	    ++bucket; +	    bucket++;  	}  	if (cachePtr->buckets[bucket].numFree || GetBlocks(cachePtr, bucket)) {  	    blockPtr = cachePtr->buckets[bucket].firstPtr;  	    cachePtr->buckets[bucket].firstPtr = blockPtr->nextBlock; -	    --cachePtr->buckets[bucket].numFree; -	    ++cachePtr->buckets[bucket].numRemoves; +	    cachePtr->buckets[bucket].numFree--; +	    cachePtr->buckets[bucket].numRemoves++;  	    cachePtr->buckets[bucket].totalAssigned += reqSize;  	}      } @@ -372,10 +395,7 @@ TclpFree(  	return;      } -    cachePtr = TclpGetAllocCache(); -    if (cachePtr == NULL) { -	cachePtr = GetCache(); -    } +    GETCACHE(cachePtr);      /*       * Get the block back from the user pointer and call system free directly @@ -394,8 +414,8 @@ TclpFree(      cachePtr->buckets[bucket].totalAssigned -= blockPtr->blockReqSize;      blockPtr->nextBlock = cachePtr->buckets[bucket].firstPtr;      cachePtr->buckets[bucket].firstPtr = blockPtr; -    ++cachePtr->buckets[bucket].numFree; -    ++cachePtr->buckets[bucket].numInserts; +    cachePtr->buckets[bucket].numFree++; +    cachePtr->buckets[bucket].numInserts++;      if (cachePtr != sharedPtr &&  	    cachePtr->buckets[bucket].numFree > bucketInfo[bucket].maxBlocks) { @@ -424,9 +444,9 @@ TclpRealloc(      char *ptr,      unsigned int reqSize)  { -    Cache *cachePtr = TclpGetAllocCache(); +    Cache *cachePtr;      Block *blockPtr; -    void *new; +    void *newPtr;      size_t size, min;      int bucket; @@ -434,9 +454,20 @@ TclpRealloc(  	return TclpAlloc(reqSize);      } -    if (cachePtr == NULL) { -	cachePtr = GetCache(); +#ifndef __LP64__ +    if (sizeof(int) >= sizeof(size_t)) { +	/* An unsigned int overflow can also be a size_t overflow */ +	const size_t zero = 0; +	const size_t max = ~zero; + +	if (((size_t) reqSize) > max - sizeof(Block) - RCHECK) { +	    /* Requested allocation exceeds memory */ +	    return NULL; +	}      } +#endif + +    GETCACHE(cachePtr);      /*       * If the block is not a system block and fits in place, simply return the @@ -447,7 +478,7 @@ TclpRealloc(      blockPtr = Ptr2Block(ptr);      size = reqSize + sizeof(Block);  #if RCHECK -    ++size; +    size++;  #endif      bucket = blockPtr->sourceBucket;      if (bucket != NBUCKETS) { @@ -475,15 +506,15 @@ TclpRealloc(       * Finally, perform an expensive malloc/copy/free.       */ -    new = TclpAlloc(reqSize); -    if (new != NULL) { +    newPtr = TclpAlloc(reqSize); +    if (newPtr != NULL) {  	if (reqSize > blockPtr->blockReqSize) {  	    reqSize = blockPtr->blockReqSize;  	} -	memcpy(new, ptr, reqSize); +	memcpy(newPtr, ptr, reqSize);  	TclpFree(ptr);      } -    return new; +    return newPtr;  }  /* @@ -500,18 +531,20 @@ TclpRealloc(   *	May move Tcl_Obj's from shared cached or allocate new Tcl_Obj's if   *	list is empty.   * + * Note: + *	If this code is updated, the changes need to be reflected in the macro + *	TclAllocObjStorageEx() defined in tclInt.h + *   *----------------------------------------------------------------------   */  Tcl_Obj *  TclThreadAllocObj(void)  { -    register Cache *cachePtr = TclpGetAllocCache(); +    register Cache *cachePtr;      register Tcl_Obj *objPtr; -    if (cachePtr == NULL) { -	cachePtr = GetCache(); -    } +    GETCACHE(cachePtr);      /*       * Get this thread's obj list structure and move or allocate new objs if @@ -540,7 +573,7 @@ TclThreadAllocObj(void)  	    }  	    while (--numMove >= 0) {  		objPtr = &newObjsPtr[numMove]; -		objPtr->internalRep.otherValuePtr = cachePtr->firstObjPtr; +		objPtr->internalRep.twoPtrValue.ptr1 = cachePtr->firstObjPtr;  		cachePtr->firstObjPtr = objPtr;  	    }  	} @@ -551,8 +584,8 @@ TclThreadAllocObj(void)       */      objPtr = cachePtr->firstObjPtr; -    cachePtr->firstObjPtr = objPtr->internalRep.otherValuePtr; -    --cachePtr->numObjects; +    cachePtr->firstObjPtr = objPtr->internalRep.twoPtrValue.ptr1; +    cachePtr->numObjects--;      return objPtr;  } @@ -569,6 +602,10 @@ TclThreadAllocObj(void)   * Side effects:   *	May move free Tcl_Obj's to shared list upon hitting high water mark.   * + * Note: + *	If this code is updated, the changes need to be reflected in the macro + *	TclAllocObjStorageEx() defined in tclInt.h + *   *----------------------------------------------------------------------   */ @@ -576,19 +613,17 @@ void  TclThreadFreeObj(      Tcl_Obj *objPtr)  { -    Cache *cachePtr = TclpGetAllocCache(); +    Cache *cachePtr; -    if (cachePtr == NULL) { -	cachePtr = GetCache(); -    } +    GETCACHE(cachePtr);      /*       * Get this thread's list and push on the free Tcl_Obj.       */ -    objPtr->internalRep.otherValuePtr = cachePtr->firstObjPtr; +    objPtr->internalRep.twoPtrValue.ptr1 = cachePtr->firstObjPtr;      cachePtr->firstObjPtr = objPtr; -    ++cachePtr->numObjects; +    cachePtr->numObjects++;      /*       * If the number of free objects has exceeded the high water mark, move @@ -618,7 +653,7 @@ TclThreadFreeObj(   *----------------------------------------------------------------------   */ -MODULE_SCOPE void +void  Tcl_GetMemoryInfo(      Tcl_DString *dsPtr)  { @@ -687,16 +722,16 @@ MoveObjs(       */      while (--numMove) { -	objPtr = objPtr->internalRep.otherValuePtr; +	objPtr = objPtr->internalRep.twoPtrValue.ptr1;      } -    fromPtr->firstObjPtr = objPtr->internalRep.otherValuePtr; +    fromPtr->firstObjPtr = objPtr->internalRep.twoPtrValue.ptr1;      /*       * Move all objects as a block - they are already linked to each other, we       * just have to update the first and last.       */ -    objPtr->internalRep.otherValuePtr = toPtr->firstObjPtr; +    objPtr->internalRep.twoPtrValue.ptr1 = toPtr->firstObjPtr;      toPtr->firstObjPtr = fromFirstObjPtr;  } @@ -777,17 +812,9 @@ LockBucket(      Cache *cachePtr,      int bucket)  { -#if 0 -    if (Tcl_MutexTryLock(bucketInfo[bucket].lockPtr) != TCL_OK) { -	Tcl_MutexLock(bucketInfo[bucket].lockPtr); -	++cachePtr->buckets[bucket].numWaits; -	++sharedPtr->buckets[bucket].numWaits; -    } -#else      Tcl_MutexLock(bucketInfo[bucket].lockPtr); -#endif -    ++cachePtr->buckets[bucket].numLocks; -    ++sharedPtr->buckets[bucket].numLocks; +    cachePtr->buckets[bucket].numLocks++; +    sharedPtr->buckets[bucket].numLocks++;  }  static void @@ -926,7 +953,7 @@ GetBlocks(  		size = bucketInfo[n].blockSize;  		blockPtr = cachePtr->buckets[n].firstPtr;  		cachePtr->buckets[n].firstPtr = blockPtr->nextBlock; -		--cachePtr->buckets[n].numFree; +		cachePtr->buckets[n].numFree--;  		break;  	    }  	} @@ -983,8 +1010,8 @@ TclFinalizeThreadAlloc(void)      unsigned int i;      for (i = 0; i < NBUCKETS; ++i) { -        TclpFreeAllocMutex(bucketInfo[i].lockPtr); -        bucketInfo[i].lockPtr = NULL; +	TclpFreeAllocMutex(bucketInfo[i].lockPtr); +	bucketInfo[i].lockPtr = NULL;      }      TclpFreeAllocMutex(objLockPtr); @@ -996,7 +1023,57 @@ TclFinalizeThreadAlloc(void)      TclpFreeAllocCache(NULL);  } -#else +/* + *---------------------------------------------------------------------- + * + * TclFinalizeThreadAllocThread -- + * + *	This procedure is used to destroy single thread private resources used + *	in this file.  + * Called in TclpFinalizeThreadData when a thread exits (Tcl_FinalizeThread). + * + * Results: + *	None. + * + * Side effects: + *	None. + * + *---------------------------------------------------------------------- + */ + +void +TclFinalizeThreadAllocThread(void) +{ +    Cache *cachePtr = TclpGetAllocCache(); +    if (cachePtr != NULL) { +	TclpFreeAllocCache(cachePtr); +    } +} + +#else /* !(TCL_THREADS && USE_THREAD_ALLOC) */ +/* + *---------------------------------------------------------------------- + * + * Tcl_GetMemoryInfo -- + * + *	Return a list-of-lists of memory stats. + * + * Results: + *	None. + * + * Side effects: + *	List appended to given dstring. + * + *---------------------------------------------------------------------- + */ + +void +Tcl_GetMemoryInfo( +    Tcl_DString *dsPtr) +{ +    Tcl_Panic("Tcl_GetMemoryInfo called when threaded memory allocator not in use"); +} +  /*   *----------------------------------------------------------------------   * @@ -1019,7 +1096,7 @@ TclFinalizeThreadAlloc(void)  {      Tcl_Panic("TclFinalizeThreadAlloc called when threaded memory allocator not in use");  } -#endif /* TCL_THREADS */ +#endif /* TCL_THREADS && USE_THREAD_ALLOC */  /*   * Local Variables: | 
