summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--ChangeLog8
-rw-r--r--generic/tclAlloc.c948
-rw-r--r--generic/tclCmdAH.c4
-rw-r--r--generic/tclCmdMZ.c19
-rw-r--r--generic/tclInt.h45
-rw-r--r--generic/tclObj.c31
6 files changed, 1011 insertions, 44 deletions
diff --git a/ChangeLog b/ChangeLog
index 9a025d1..faf06d5 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,5 +1,13 @@
2002-02-26 Jeff Hobbs <jeffh@ActiveState.com>
+ * generic/tclCmdMZ.c (Tcl_RegsubObjCmd): prevent obj leak in
+ remedial regsub case.
+
+ * generic/tclFileName.c (Tcl_TranslateFileName): decr refcount for
+ error case to prevent mem leak.
+
+ * generic/tclVar.c (Tcl_ArrayObjCmd): removed extra obj allocation.
+
* unix/tclUnixSock.c (Tcl_GetHostName): added an extra
gethostbyname check to guard against failure with truncated names
returned by uname.
diff --git a/generic/tclAlloc.c b/generic/tclAlloc.c
index 3b39d72..6452ec5 100644
--- a/generic/tclAlloc.c
+++ b/generic/tclAlloc.c
@@ -15,13 +15,957 @@
* See the file "license.terms" for information on usage and redistribution
* of this file, and for a DISCLAIMER OF ALL WARRANTIES.
*
- * RCS: @(#) $Id: tclAlloc.c,v 1.13 2002/02/26 02:19:13 hobbs Exp $
+ * RCS: @(#) $Id: tclAlloc.c,v 1.14 2002/02/27 06:39:26 hobbs Exp $
*/
#include "tclInt.h"
#include "tclPort.h"
-#if USE_TCLALLOC
+#if defined(TCL_THREADS) && !defined(MAC_TCL) && !defined(WIN32)
+
+/*
+ * tclAlloc.c --
+ *
+ * Special purpose allocator for threaded Tcl.
+ *
+ * Fast cache memory allocator designed to avoid lock
+ * contention. The basic strategy is to allocate memory in
+ * fixed size blocks from block caches.
+ */
+
+#include "tclInt.h"
+#include <pthread.h>
+#include <errno.h>
+
+/*
+ * If range checking is enabled, an additional byte will be allocated
+ * to store the magic number at the end of the requested memory.
+ */
+
+#ifndef RCHECK
+#ifdef NDEBUG
+#define RCHECK 0
+#else
+#define RCHECK 1
+#endif
+#endif
+
+/*
+ * The following define the number of Tcl_Obj's to allocate/move
+ * at a time and the high water mark to prune a per-thread cache.
+ * On a 32 bit system, sizeof(Tcl_Obj) = 24 so 800 * 24 = ~16k.
+ */
+
+#define NOBJALLOC 800
+#define NOBJHIGH 1200
+
+/*
+ * The following defines the number of buckets in the bucket
+ * cache and those block sizes from (1<<4) to (1<<(3+NBUCKETS))
+ */
+
+#define NBUCKETS 11
+#define MAXALLOC 16284
+
+/*
+ * The following union stores accounting information for
+ * each block including two small magic numbers and
+ * a bucket number when in use or a next pointer when
+ * free. The original requested size (not including
+ * the Block overhead) is also maintained.
+ */
+
+typedef struct Block {
+ union {
+ struct Block *next; /* Next in free list. */
+ struct {
+ unsigned char magic1; /* First magic number. */
+ unsigned char bucket; /* Bucket block allocated from. */
+ unsigned char unused; /* Padding. */
+ unsigned char magic2; /* Second magic number. */
+ } b_s;
+ } b_u;
+ size_t b_reqsize; /* Requested allocation size. */
+} Block;
+#define b_next b_u.next
+#define b_bucket b_u.b_s.bucket
+#define b_magic1 b_u.b_s.magic1
+#define b_magic2 b_u.b_s.magic2
+#define MAGIC 0xef
+
+/*
+ * The following structure defines a bucket of blocks with
+ * various accouting and statistics information.
+ */
+
+typedef struct Bucket {
+ Block *firstPtr;
+ int nfree;
+ int nget;
+ int nput;
+ int nwait;
+ int nlock;
+ int nrequest;
+} Bucket;
+
+/*
+ * The following structure defines a cache of buckets and objs.
+ */
+
+typedef struct Cache {
+ struct Cache *nextPtr;
+ pthread_t owner;
+ int busy;
+ Tcl_Obj *firstObjPtr;
+ int nobjs;
+ int nsysalloc;
+ Bucket buckets[NBUCKETS];
+} Cache;
+
+/*
+ * The following array specifies various per-bucket
+ * limits and locks. The values are statically initialized
+ * to avoid calculating them repeatedly.
+ */
+
+struct binfo {
+ size_t blocksize; /* Bucket blocksize. */
+ int maxblocks; /* Max blocks before move to share. */
+ int nmove; /* Num blocks to move to share. */
+ pthread_mutex_t lock; /* Share bucket lock. */
+} binfo[NBUCKETS] = {
+ { 16, 1024, 512, PTHREAD_MUTEX_INITIALIZER},
+ { 32, 512, 256, PTHREAD_MUTEX_INITIALIZER},
+ { 64, 256, 128, PTHREAD_MUTEX_INITIALIZER},
+ { 128, 128, 64, PTHREAD_MUTEX_INITIALIZER},
+ { 256, 64, 32, PTHREAD_MUTEX_INITIALIZER},
+ { 512, 32, 16, PTHREAD_MUTEX_INITIALIZER},
+ { 1024, 16, 8, PTHREAD_MUTEX_INITIALIZER},
+ { 2048, 8, 4, PTHREAD_MUTEX_INITIALIZER},
+ { 4096, 4, 2, PTHREAD_MUTEX_INITIALIZER},
+ { 8192, 2, 1, PTHREAD_MUTEX_INITIALIZER},
+ {16284, 1, 1, PTHREAD_MUTEX_INITIALIZER},
+};
+
+/*
+ * Static functions defined in this file.
+ */
+
+static void LockBucket(Cache *cachePtr, int bucket);
+static void UnlockBucket(Cache *cachePtr, int bucket);
+static void PutBlocks(Cache *cachePtr, int bucket, int nmove);
+static int GetBlocks(Cache *cachePtr, int bucket);
+static Block *Ptr2Block(char *ptr);
+static char *Block2Ptr(Block *blockPtr, int bucket, unsigned int reqsize);
+static void FreeCache(void *arg);
+static void MoveObjs(Cache *fromPtr, Cache *toPtr, int nmove);
+
+/*
+ * Local variables defined in this file and initialized at
+ * startup.
+ */
+
+static int initialized = 0;
+static pthread_mutex_t listlock = PTHREAD_MUTEX_INITIALIZER;
+static pthread_mutex_t objlock = PTHREAD_MUTEX_INITIALIZER;
+static pthread_once_t once = PTHREAD_ONCE_INIT;
+static pthread_key_t key;
+static Cache sharedCache;
+static Cache *sharedPtr = &sharedCache;
+static Cache *firstCachePtr = &sharedCache;
+
+
+/*
+ *----------------------------------------------------------------------
+ *
+ * GetCache ---
+ *
+ * Gets per-thread memory cache, allocating it if necessary.
+ *
+ * Results:
+ * Pointer to cache.
+ *
+ * Side effects:
+ * None.
+ *
+ *----------------------------------------------------------------------
+ */
+
+static void
+Init(void)
+{
+ int err;
+
+ err = pthread_key_create(&key, FreeCache);
+ if (err != 0) {
+ panic("alloc: pthread_key_create: %s", strerror(err));
+ }
+ initialized = 1;
+}
+
+static Cache *
+GetCache(void)
+{
+ Cache *cachePtr;
+ int err;
+
+ /*
+ * Check for first-time initialization.
+ */
+
+ if (!initialized) {
+ pthread_once(&once, Init);
+ }
+
+ /*
+ * Get this thread's cache, allocating if necessary.
+ */
+
+ cachePtr = pthread_getspecific(key);
+ if (cachePtr == NULL) {
+ cachePtr = calloc(1, sizeof(Cache));
+ if (cachePtr == NULL) {
+ panic("alloc: could not allocate new cache");
+ }
+ pthread_mutex_lock(&listlock);
+ cachePtr->nextPtr = firstCachePtr;
+ firstCachePtr = cachePtr;
+ pthread_mutex_unlock(&listlock);
+ cachePtr->owner = pthread_self();
+ err = pthread_setspecific(key, cachePtr);
+ if (err != 0) {
+ panic("alloc: pthread_setspecific: %s", strerror(err));
+ }
+ }
+ cachePtr->busy = 1;
+ return cachePtr;
+}
+
+
+/*
+ *----------------------------------------------------------------------
+ *
+ * FreeCache --
+ *
+ * Flush and delete a cache, removing from all-caches list.
+ *
+ * Results:
+ * None.
+ *
+ * Side effects:
+ * None.
+ *
+ *----------------------------------------------------------------------
+ */
+
+static void
+FreeCache(void *arg)
+{
+ Cache *cachePtr = arg;
+ Cache **nextPtrPtr;
+ register int bucket;
+ int err;
+
+ /*
+ * Reset the cache and continue to finalize the thread until
+ * it appears idle.
+ */
+
+ if (cachePtr->busy) {
+ err = pthread_setspecific(key, cachePtr);
+ if (err != 0) {
+ panic("alloc: pthread_setspecific: %s", strerror(err));
+ }
+ Tcl_FinalizeThread();
+ cachePtr->busy = 0;
+ return;
+ }
+
+ /*
+ * Flush blocks.
+ */
+
+ for (bucket = 0; bucket < NBUCKETS; ++bucket) {
+ if (cachePtr->buckets[bucket].nfree > 0) {
+ PutBlocks(cachePtr, bucket, cachePtr->buckets[bucket].nfree);
+ }
+ }
+
+ /*
+ * Flush objs.
+ */
+
+ if (cachePtr->nobjs > 0) {
+ pthread_mutex_lock(&objlock);
+ MoveObjs(cachePtr, sharedPtr, cachePtr->nobjs);
+ pthread_mutex_unlock(&objlock);
+ }
+
+ /*
+ * Remove from pool list.
+ */
+
+ pthread_mutex_lock(&listlock);
+ nextPtrPtr = &firstCachePtr;
+ while (*nextPtrPtr != cachePtr) {
+ nextPtrPtr = &(*nextPtrPtr)->nextPtr;
+ }
+ *nextPtrPtr = cachePtr->nextPtr;
+ cachePtr->nextPtr = NULL;
+ pthread_mutex_unlock(&listlock);
+
+ free(cachePtr);
+}
+
+
+/*
+ *----------------------------------------------------------------------
+ *
+ * TclpAlloc --
+ *
+ * Allocate memory.
+ *
+ * Results:
+ * Pointer to memory just beyond Block pointer.
+ *
+ * Side effects:
+ * May allocate more blocks for a bucket.
+ *
+ *----------------------------------------------------------------------
+ */
+
+char *
+TclpAlloc(unsigned int reqsize)
+{
+ Cache *cachePtr = GetCache();
+ Block *blockPtr;
+ register int bucket;
+ size_t size;
+
+ /*
+ * Increment the requested size to include room for
+ * the Block structure. Call malloc() directly if the
+ * required amount is greater than the largest block,
+ * otherwise pop the smallest block large enough,
+ * allocating more blocks if necessary.
+ */
+
+ blockPtr = NULL;
+ size = reqsize + sizeof(Block);
+#if RCHECK
+ ++size;
+#endif
+ if (size > MAXALLOC) {
+ bucket = NBUCKETS;
+ blockPtr = malloc(size);
+ if (blockPtr != NULL) {
+ cachePtr->nsysalloc += reqsize;
+ }
+ } else {
+ bucket = 0;
+ while (binfo[bucket].blocksize < size) {
+ ++bucket;
+ }
+ if (cachePtr->buckets[bucket].nfree || GetBlocks(cachePtr, bucket)) {
+ blockPtr = cachePtr->buckets[bucket].firstPtr;
+ cachePtr->buckets[bucket].firstPtr = blockPtr->b_next;
+ --cachePtr->buckets[bucket].nfree;
+ ++cachePtr->buckets[bucket].nget;
+ cachePtr->buckets[bucket].nrequest += reqsize;
+ }
+ }
+ if (blockPtr == NULL) {
+ return NULL;
+ }
+ return Block2Ptr(blockPtr, bucket, reqsize);
+}
+
+
+/*
+ *----------------------------------------------------------------------
+ *
+ * TclpFree --
+ *
+ * Return blocks to the thread block cache.
+ *
+ * Results:
+ * None.
+ *
+ * Side effects:
+ * May move blocks to shared cache.
+ *
+ *----------------------------------------------------------------------
+ */
+
+void
+TclpFree(char *ptr)
+{
+ Cache *cachePtr = GetCache();
+ Block *blockPtr;
+ int bucket;
+
+ if (ptr == NULL) {
+ return;
+ }
+
+ /*
+ * Get the block back from the user pointer and
+ * call system free directly for large blocks.
+ * Otherwise, push the block back on the bucket and
+ * move blocks to the shared cache if there are now
+ * too many free.
+ */
+
+ blockPtr = Ptr2Block(ptr);
+ bucket = blockPtr->b_bucket;
+ if (bucket == NBUCKETS) {
+ cachePtr->nsysalloc -= blockPtr->b_reqsize;
+ free(blockPtr);
+ } else {
+ cachePtr->buckets[bucket].nrequest -= blockPtr->b_reqsize;
+ blockPtr->b_next = cachePtr->buckets[bucket].firstPtr;
+ cachePtr->buckets[bucket].firstPtr = blockPtr;
+ ++cachePtr->buckets[bucket].nfree;
+ ++cachePtr->buckets[bucket].nput;
+ if (cachePtr != sharedPtr &&
+ cachePtr->buckets[bucket].nfree > binfo[bucket].maxblocks) {
+ PutBlocks(cachePtr, bucket, binfo[bucket].nmove);
+ }
+ }
+}
+
+
+/*
+ *----------------------------------------------------------------------
+ *
+ * TclpRealloc --
+ *
+ * Re-allocate memory to a larger or smaller size.
+ *
+ * Results:
+ * Pointer to memory just beyond Block pointer.
+ *
+ * Side effects:
+ * Previous memory, if any, may be freed.
+ *
+ *----------------------------------------------------------------------
+ */
+
+char *
+TclpRealloc(char *ptr, unsigned int reqsize)
+{
+ Cache *cachePtr = GetCache();
+ Block *blockPtr;
+ void *new;
+ size_t size, min;
+ int bucket;
+
+ if (ptr == NULL) {
+ return (TclpAlloc(reqsize));
+ }
+
+ /*
+ * If the block is not a system block and fits in place,
+ * simply return the existing pointer. Otherwise, if the block
+ * is a system block and the new size would also require a system
+ * block, call realloc() directly.
+ */
+
+ blockPtr = Ptr2Block(ptr);
+ size = reqsize + sizeof(Block);
+#if RCHECK
+ ++size;
+#endif
+ bucket = blockPtr->b_bucket;
+ if (bucket != NBUCKETS) {
+ if (bucket > 0) {
+ min = binfo[bucket-1].blocksize;
+ } else {
+ min = 0;
+ }
+ if (size > min && size <= binfo[bucket].blocksize) {
+ cachePtr->buckets[bucket].nrequest -= blockPtr->b_reqsize;
+ cachePtr->buckets[bucket].nrequest += reqsize;
+ return Block2Ptr(blockPtr, bucket, reqsize);
+ }
+ } else if (size > MAXALLOC) {
+ cachePtr->nsysalloc -= blockPtr->b_reqsize;
+ cachePtr->nsysalloc += reqsize;
+ blockPtr = realloc(blockPtr, size);
+ if (blockPtr == NULL) {
+ return NULL;
+ }
+ return Block2Ptr(blockPtr, NBUCKETS, reqsize);
+ }
+
+ /*
+ * Finally, perform an expensive malloc/copy/free.
+ */
+
+ new = TclpAlloc(reqsize);
+ if (new != NULL) {
+ if (reqsize > blockPtr->b_reqsize) {
+ reqsize = blockPtr->b_reqsize;
+ }
+ memcpy(new, ptr, reqsize);
+ TclpFree(ptr);
+ }
+ return new;
+}
+
+
+/*
+ *----------------------------------------------------------------------
+ *
+ * TclThreadAllocObj --
+ *
+ * Allocate a Tcl_Obj from the per-thread cache.
+ *
+ * Results:
+ * Pointer to uninitialized Tcl_Obj.
+ *
+ * Side effects:
+ * May move Tcl_Obj's from shared cached or allocate new Tcl_Obj's
+ * if list is empty.
+ *
+ *----------------------------------------------------------------------
+ */
+
+Tcl_Obj *
+TclThreadAllocObj(void)
+{
+ register Cache *cachePtr = GetCache();
+ register int nmove;
+ register Tcl_Obj *objPtr;
+ Tcl_Obj *newObjsPtr;
+
+ /*
+ * Get this thread's obj list structure and move
+ * or allocate new objs if necessary.
+ */
+
+ if (cachePtr->nobjs == 0) {
+ pthread_mutex_lock(&objlock);
+ nmove = sharedPtr->nobjs;
+ if (nmove > 0) {
+ if (nmove > NOBJALLOC) {
+ nmove = NOBJALLOC;
+ }
+ MoveObjs(sharedPtr, cachePtr, nmove);
+ }
+ pthread_mutex_unlock(&objlock);
+ if (cachePtr->nobjs == 0) {
+ cachePtr->nobjs = nmove = NOBJALLOC;
+ newObjsPtr = malloc(sizeof(Tcl_Obj) * nmove);
+ if (newObjsPtr == NULL) {
+ panic("alloc: could not allocate %d new objects", nmove);
+ }
+ while (--nmove >= 0) {
+ objPtr = &newObjsPtr[nmove];
+ objPtr->internalRep.otherValuePtr = cachePtr->firstObjPtr;
+ cachePtr->firstObjPtr = objPtr;
+ }
+ }
+ }
+
+ /*
+ * Pop the first object.
+ */
+
+ objPtr = cachePtr->firstObjPtr;
+ cachePtr->firstObjPtr = objPtr->internalRep.otherValuePtr;
+ --cachePtr->nobjs;
+ return objPtr;
+}
+
+
+/*
+ *----------------------------------------------------------------------
+ *
+ * TclThreadFreeObj --
+ *
+ * Return a free Tcl_Obj to the per-thread cache.
+ *
+ * Results:
+ * None.
+ *
+ * Side effects:
+ * May move free Tcl_Obj's to shared list upon hitting high
+ * water mark.
+ *
+ *----------------------------------------------------------------------
+ */
+
+void
+TclThreadFreeObj(Tcl_Obj *objPtr)
+{
+ Cache *cachePtr = GetCache();
+
+ /*
+ * Get this thread's list and push on the free Tcl_Obj.
+ */
+
+ objPtr->internalRep.otherValuePtr = cachePtr->firstObjPtr;
+ cachePtr->firstObjPtr = objPtr;
+
+ /*
+ * If the number of free objects has exceeded the high
+ * water mark, move some blocks to the shared list.
+ */
+
+ ++cachePtr->nobjs;
+ if (cachePtr->nobjs > NOBJHIGH) {
+ pthread_mutex_lock(&objlock);
+ MoveObjs(cachePtr, sharedPtr, NOBJALLOC);
+ pthread_mutex_unlock(&objlock);
+ }
+}
+
+
+/*
+ *----------------------------------------------------------------------
+ *
+ * Tcl_GetMemoryInfo --
+ *
+ * Return a list-of-lists of memory stats.
+ *
+ * Results:
+ * None.
+ *
+ * Side effects:
+ * List appended to given dstring.
+ *
+ *----------------------------------------------------------------------
+ */
+
+void
+Tcl_GetMemoryInfo(Tcl_DString *dsPtr)
+{
+ Cache *cachePtr;
+ char buf[200];
+ int n;
+
+ pthread_mutex_lock(&listlock);
+ cachePtr = firstCachePtr;
+ while (cachePtr != NULL) {
+ Tcl_DStringStartSublist(dsPtr);
+ if (cachePtr == sharedPtr) {
+ Tcl_DStringAppendElement(dsPtr, "shared");
+ } else {
+ sprintf(buf, "thread%d", (int) cachePtr->owner);
+ Tcl_DStringAppendElement(dsPtr, buf);
+ }
+ for (n = 0; n < NBUCKETS; ++n) {
+ sprintf(buf, "%d %d %d %d %d %d %d",
+ binfo[n].blocksize,
+ cachePtr->buckets[n].nfree,
+ cachePtr->buckets[n].nget,
+ cachePtr->buckets[n].nput,
+ cachePtr->buckets[n].nrequest,
+ cachePtr->buckets[n].nlock,
+ cachePtr->buckets[n].nwait);
+ Tcl_DStringAppendElement(dsPtr, buf);
+ }
+ Tcl_DStringEndSublist(dsPtr);
+ cachePtr = cachePtr->nextPtr;
+ }
+ pthread_mutex_unlock(&listlock);
+}
+
+
+/*
+ *----------------------------------------------------------------------
+ *
+ * MoveObjs --
+ *
+ * Move Tcl_Obj's between caches.
+ *
+ * Results:
+ * None.
+ *
+ * Side effects:
+ * None.
+ *
+ *----------------------------------------------------------------------
+ */
+
+static void
+MoveObjs(Cache *fromPtr, Cache *toPtr, int nmove)
+{
+ register Tcl_Obj *objPtr;
+
+ toPtr->nobjs += nmove;
+ fromPtr->nobjs -= nmove;
+ while (--nmove >= 0) {
+ objPtr = fromPtr->firstObjPtr;
+ fromPtr->firstObjPtr = objPtr->internalRep.otherValuePtr;
+ objPtr->internalRep.otherValuePtr = toPtr->firstObjPtr;
+ toPtr->firstObjPtr = objPtr;
+ }
+}
+
+
+/*
+ *----------------------------------------------------------------------
+ *
+ * Block2Ptr, Ptr2Block --
+ *
+ * Convert between internal blocks and user pointers.
+ *
+ * Results:
+ * User pointer or internal block.
+ *
+ * Side effects:
+ * Invalid blocks will abort the server.
+ *
+ *----------------------------------------------------------------------
+ */
+
+static char *
+Block2Ptr(Block *blockPtr, int bucket, unsigned int reqsize)
+{
+ register void *ptr;
+
+ blockPtr->b_magic1 = blockPtr->b_magic2 = MAGIC;
+ blockPtr->b_bucket = bucket;
+ blockPtr->b_reqsize = reqsize;
+ ptr = ((void *) (blockPtr + 1));
+#if RCHECK
+ ((unsigned char *)(ptr))[reqsize] = MAGIC;
+#endif
+ return (char *) ptr;
+}
+
+static Block *
+Ptr2Block(char *ptr)
+{
+ register Block *blockPtr;
+
+ blockPtr = (((Block *) ptr) - 1);
+ if (blockPtr->b_magic1 != MAGIC
+#if RCHECK
+ || ((unsigned char *) ptr)[blockPtr->b_reqsize] != MAGIC
+#endif
+ || blockPtr->b_magic2 != MAGIC) {
+ panic("alloc: invalid block: %p: %x %x %x\n",
+ blockPtr, blockPtr->b_magic1, blockPtr->b_magic2,
+ ((unsigned char *) ptr)[blockPtr->b_reqsize]);
+ }
+ return blockPtr;
+}
+
+
+/*
+ *----------------------------------------------------------------------
+ *
+ * LockBucket, UnlockBucket --
+ *
+ * Set/unset the lock to access a bucket in the shared cache.
+ *
+ * Results:
+ * None.
+ *
+ * Side effects:
+ * Lock activity and contention are monitored globally and on
+ * a per-cache basis.
+ *
+ *----------------------------------------------------------------------
+ */
+
+static void
+LockBucket(Cache *cachePtr, int bucket)
+{
+ int err;
+
+ err = pthread_mutex_trylock(&binfo[bucket].lock);
+ if (err == EBUSY) {
+ err = pthread_mutex_lock(&binfo[bucket].lock);
+ if (err != 0) {
+ panic("alloc: pthread_mutex_lock: %s", strerror(err));
+ }
+ ++cachePtr->buckets[bucket].nwait;
+ ++sharedPtr->buckets[bucket].nwait;
+ } else if (err != 0) {
+ panic("alloc: pthread_mutex_trylock: %s", strerror(err));
+ }
+ ++cachePtr->buckets[bucket].nlock;
+ ++sharedPtr->buckets[bucket].nlock;
+}
+
+
+static void
+UnlockBucket(Cache *cachePtr, int bucket)
+{
+ int err;
+
+ err = pthread_mutex_unlock(&binfo[bucket].lock);
+ if (err != 0) {
+ panic("alloc: pthread_mutex_unlock: %s", strerror(err));
+ }
+}
+
+
+/*
+ *----------------------------------------------------------------------
+ *
+ * PutBlocks --
+ *
+ * Return unused blocks to the shared cache.
+ *
+ * Results:
+ * None.
+ *
+ * Side effects:
+ * None.
+ *
+ *----------------------------------------------------------------------
+ */
+
+static void
+PutBlocks(Cache *cachePtr, int bucket, int nmove)
+{
+ register Block *lastPtr, *firstPtr;
+ register int n = nmove;
+
+ /*
+ * Before acquiring the lock, walk the block list to find
+ * the last block to be moved.
+ */
+
+ firstPtr = lastPtr = cachePtr->buckets[bucket].firstPtr;
+ while (--n > 0) {
+ lastPtr = lastPtr->b_next;
+ }
+ cachePtr->buckets[bucket].firstPtr = lastPtr->b_next;
+ cachePtr->buckets[bucket].nfree -= nmove;
+
+ /*
+ * Aquire the lock and place the list of blocks at the front
+ * of the shared cache bucket.
+ */
+
+ LockBucket(cachePtr, bucket);
+ lastPtr->b_next = sharedPtr->buckets[bucket].firstPtr;
+ sharedPtr->buckets[bucket].firstPtr = firstPtr;
+ sharedPtr->buckets[bucket].nfree += nmove;
+ UnlockBucket(cachePtr, bucket);
+}
+
+
+/*
+ *----------------------------------------------------------------------
+ *
+ * GetBlocks --
+ *
+ * Get more blocks for a bucket.
+ *
+ * Results:
+ * 1 if blocks where allocated, 0 otherwise.
+ *
+ * Side effects:
+ * Cache may be filled with available blocks.
+ *
+ *----------------------------------------------------------------------
+ */
+
+static int
+GetBlocks(Cache *cachePtr, int bucket)
+{
+ register Block *blockPtr;
+ register int n;
+ register size_t size;
+
+ /*
+ * First, atttempt to move blocks from the shared cache. Note
+ * the potentially dirty read of nfree before acquiring the lock
+ * which is a slight performance enhancement. The value is
+ * verified after the lock is actually acquired.
+ */
+
+ if (cachePtr != sharedPtr && sharedPtr->buckets[bucket].nfree > 0) {
+ LockBucket(cachePtr, bucket);
+ if (sharedPtr->buckets[bucket].nfree > 0) {
+
+ /*
+ * Either move the entire list or walk the list to find
+ * the last block to move.
+ */
+
+ n = binfo[bucket].nmove;
+ if (n >= sharedPtr->buckets[bucket].nfree) {
+ cachePtr->buckets[bucket].firstPtr = sharedPtr->buckets[bucket].firstPtr;
+ cachePtr->buckets[bucket].nfree = sharedPtr->buckets[bucket].nfree;
+ sharedPtr->buckets[bucket].firstPtr = NULL;
+ sharedPtr->buckets[bucket].nfree = 0;
+ } else {
+ blockPtr = sharedPtr->buckets[bucket].firstPtr;
+ cachePtr->buckets[bucket].firstPtr = blockPtr;
+ sharedPtr->buckets[bucket].nfree -= n;
+ cachePtr->buckets[bucket].nfree = n;
+ while (--n > 0) {
+ blockPtr = blockPtr->b_next;
+ }
+ sharedPtr->buckets[bucket].firstPtr = blockPtr->b_next;
+ blockPtr->b_next = NULL;
+ }
+ }
+ UnlockBucket(cachePtr, bucket);
+ }
+
+ if (cachePtr->buckets[bucket].nfree == 0) {
+
+ /*
+ * If no blocks could be moved from shared, first look for a
+ * larger block in this cache to split up.
+ */
+
+ size = 0; /* lint */
+ blockPtr = NULL;
+ n = NBUCKETS;
+ while (--n > bucket) {
+ if (cachePtr->buckets[n].nfree > 0) {
+ size = binfo[n].blocksize;
+ blockPtr = cachePtr->buckets[n].firstPtr;
+ cachePtr->buckets[n].firstPtr = blockPtr->b_next;
+ --cachePtr->buckets[n].nfree;
+ break;
+ }
+ }
+
+ /*
+ * Otherwise, allocate a big new block directly.
+ */
+
+ if (blockPtr == NULL) {
+ size = MAXALLOC;
+ blockPtr = malloc(size);
+ if (blockPtr == NULL) {
+ return 0;
+ }
+ }
+
+ /*
+ * Split the larger block into smaller blocks for this bucket.
+ */
+
+ n = size / binfo[bucket].blocksize;
+ cachePtr->buckets[bucket].nfree = n;
+ cachePtr->buckets[bucket].firstPtr = blockPtr;
+ while (--n > 0) {
+ blockPtr->b_next = (Block *)
+ ((char *) blockPtr + binfo[bucket].blocksize);
+ blockPtr = blockPtr->b_next;
+ }
+ blockPtr->b_next = NULL;
+ }
+ return 1;
+}
+
+/*
+ * END TCL_THREADS MEMORY ALLOCATOR
+ */
+
+#elif USE_TCLALLOC
#ifdef TCL_DEBUG
# define DEBUG
diff --git a/generic/tclCmdAH.c b/generic/tclCmdAH.c
index d3928e2..661bb4c 100644
--- a/generic/tclCmdAH.c
+++ b/generic/tclCmdAH.c
@@ -11,7 +11,7 @@
* See the file "license.terms" for information on usage and redistribution
* of this file, and for a DISCLAIMER OF ALL WARRANTIES.
*
- * RCS: @(#) $Id: tclCmdAH.c,v 1.22 2002/02/19 10:26:24 dkf Exp $
+ * RCS: @(#) $Id: tclCmdAH.c,v 1.23 2002/02/27 06:39:26 hobbs Exp $
*/
#include "tclInt.h"
@@ -1441,6 +1441,8 @@ StoreStatData(interp, varName, statPtr)
STORE_ARY("mode", Tcl_NewIntObj(mode));
STORE_ARY("type", Tcl_NewStringObj(GetTypeFromMode(mode), -1));
#undef STORE_ARY
+ Tcl_DecrRefCount(var);
+ Tcl_DecrRefCount(field);
return TCL_OK;
}
diff --git a/generic/tclCmdMZ.c b/generic/tclCmdMZ.c
index 04c6a6c..d78e29c 100644
--- a/generic/tclCmdMZ.c
+++ b/generic/tclCmdMZ.c
@@ -14,7 +14,7 @@
* See the file "license.terms" for information on usage and redistribution
* of this file, and for a DISCLAIMER OF ALL WARRANTIES.
*
- * RCS: @(#) $Id: tclCmdMZ.c,v 1.61 2002/02/22 14:52:45 dkf Exp $
+ * RCS: @(#) $Id: tclCmdMZ.c,v 1.62 2002/02/27 06:39:26 hobbs Exp $
*/
#include "tclInt.h"
@@ -584,14 +584,16 @@ Tcl_RegsubObjCmd(dummy, interp, objc, objv)
* regsub behavior for "" matches between each character.
* 'string map' skips the "" case.
*/
- resultPtr = Tcl_NewUnicodeObj(wstring, 0);
- Tcl_IncrRefCount(resultPtr);
- for (; wstring < wend; wstring++) {
- Tcl_AppendUnicodeToObj(resultPtr, wsubspec, wsublen);
- Tcl_AppendUnicodeToObj(resultPtr, wstring, 1);
- numMatches++;
+ if (wstring < wend) {
+ resultPtr = Tcl_NewUnicodeObj(wstring, 0);
+ Tcl_IncrRefCount(resultPtr);
+ for (; wstring < wend; wstring++) {
+ Tcl_AppendUnicodeToObj(resultPtr, wsubspec, wsublen);
+ Tcl_AppendUnicodeToObj(resultPtr, wstring, 1);
+ numMatches++;
+ }
+ wlen = 0;
}
- wlen = 0;
} else {
wsrclc = Tcl_UniCharToLower(*wsrc);
for (p = wfirstChar = wstring; wstring < wend; wstring++) {
@@ -776,7 +778,6 @@ Tcl_RegsubObjCmd(dummy, interp, objc, objv)
* On zero matches, just ignore the offset, since it shouldn't
* matter to us in this case, and the user may have skewed it.
*/
- /*Tcl_AppendUnicodeToObj(resultPtr, wstring, wlen);*/
resultPtr = objv[1];
Tcl_IncrRefCount(resultPtr);
} else if (offset < wlen) {
diff --git a/generic/tclInt.h b/generic/tclInt.h
index 5c9ffbc..145d6b8 100644
--- a/generic/tclInt.h
+++ b/generic/tclInt.h
@@ -12,7 +12,7 @@
* See the file "license.terms" for information on usage and redistribution
* of this file, and for a DISCLAIMER OF ALL WARRANTIES.
*
- * RCS: @(#) $Id: tclInt.h,v 1.81 2002/02/22 22:36:09 dgp Exp $
+ * RCS: @(#) $Id: tclInt.h,v 1.82 2002/02/27 06:39:26 hobbs Exp $
*/
#ifndef _TCLINT
@@ -2125,6 +2125,8 @@ EXTERN int TclCompileForCmd _ANSI_ARGS_((Tcl_Interp *interp,
Tcl_Parse *parsePtr, struct CompileEnv *envPtr));
EXTERN int TclCompileForeachCmd _ANSI_ARGS_((Tcl_Interp *interp,
Tcl_Parse *parsePtr, struct CompileEnv *envPtr));
+EXTERN int TclCompileGetsCmd _ANSI_ARGS_((Tcl_Interp *interp,
+ Tcl_Parse *parsePtr, struct CompileEnv *envPtr));
EXTERN int TclCompileIfCmd _ANSI_ARGS_((Tcl_Interp *interp,
Tcl_Parse *parsePtr, struct CompileEnv *envPtr));
EXTERN int TclCompileIncrCmd _ANSI_ARGS_((Tcl_Interp *interp,
@@ -2244,15 +2246,39 @@ EXTERN int TclCompileWhileCmd _ANSI_ARGS_((Tcl_Interp *interp,
TclIncrObjsFreed(); \
}
-#else /* not TCL_MEM_DEBUG */
+#elif defined(TCL_THREADS)
-#ifdef TCL_THREADS
-/* declared in tclObj.c */
-extern Tcl_Mutex tclObjMutex;
-#endif
+/*
+ * The TCL_THREADS mode is like the regular mode but allocates Tcl_Obj's
+ * from per-thread caches.
+ */
+
+EXTERN Tcl_Obj *TclThreadAllocObj _ANSI_ARGS_((void));
+EXTERN void TclThreadFreeObj _ANSI_ARGS_((Tcl_Obj *));
+
+# define TclNewObj(objPtr) \
+ (objPtr) = TclThreadAllocObj(); \
+ (objPtr)->refCount = 0; \
+ (objPtr)->bytes = tclEmptyStringRep; \
+ (objPtr)->length = 0; \
+ (objPtr)->typePtr = NULL
+
+# define TclDecrRefCount(objPtr) \
+ if (--(objPtr)->refCount <= 0) { \
+ if (((objPtr)->bytes != NULL) \
+ && ((objPtr)->bytes != tclEmptyStringRep)) { \
+ ckfree((char *) (objPtr)->bytes); \
+ } \
+ if (((objPtr)->typePtr != NULL) \
+ && ((objPtr)->typePtr->freeIntRepProc != NULL)) { \
+ (objPtr)->typePtr->freeIntRepProc(objPtr); \
+ } \
+ TclThreadFreeObj((objPtr)); \
+ }
+
+#else /* not TCL_MEM_DEBUG */
# define TclNewObj(objPtr) \
- Tcl_MutexLock(&tclObjMutex); \
if (tclFreeObjList == NULL) { \
TclAllocateFreeObjects(); \
} \
@@ -2263,8 +2289,7 @@ extern Tcl_Mutex tclObjMutex;
(objPtr)->bytes = tclEmptyStringRep; \
(objPtr)->length = 0; \
(objPtr)->typePtr = NULL; \
- TclIncrObjsAllocated(); \
- Tcl_MutexUnlock(&tclObjMutex)
+ TclIncrObjsAllocated();
# define TclDecrRefCount(objPtr) \
if (--(objPtr)->refCount <= 0) { \
@@ -2276,11 +2301,9 @@ extern Tcl_Mutex tclObjMutex;
&& ((objPtr)->typePtr->freeIntRepProc != NULL)) { \
(objPtr)->typePtr->freeIntRepProc(objPtr); \
} \
- Tcl_MutexLock(&tclObjMutex); \
(objPtr)->internalRep.otherValuePtr = (VOID *) tclFreeObjList; \
tclFreeObjList = (objPtr); \
TclIncrObjsFreed(); \
- Tcl_MutexUnlock(&tclObjMutex); \
}
#endif /* TCL_MEM_DEBUG */
diff --git a/generic/tclObj.c b/generic/tclObj.c
index a2eb282..8bf059b 100644
--- a/generic/tclObj.c
+++ b/generic/tclObj.c
@@ -11,7 +11,7 @@
* See the file "license.terms" for information on usage and redistribution
* of this file, and for a DISCLAIMER OF ALL WARRANTIES.
*
- * RCS: @(#) $Id: tclObj.c,v 1.30 2002/02/22 22:36:09 dgp Exp $
+ * RCS: @(#) $Id: tclObj.c,v 1.31 2002/02/27 06:39:27 hobbs Exp $
*/
#include "tclInt.h"
@@ -30,15 +30,8 @@ TCL_DECLARE_MUTEX(tableMutex)
* Head of the list of free Tcl_Obj structs we maintain.
*/
+#ifndef TCL_THREADS
Tcl_Obj *tclFreeObjList = NULL;
-
-/*
- * The object allocator is single threaded. This mutex is referenced
- * by the TclNewObj macro, however, so must be visible.
- */
-
-#ifdef TCL_THREADS
-Tcl_Mutex tclObjMutex;
#endif
/*
@@ -246,7 +239,6 @@ TclInitObjSubsystem()
Tcl_RegisterObjType(&tclCmdNameType);
#ifdef TCL_COMPILE_STATS
- Tcl_MutexLock(&tclObjMutex);
tclObjsAlloced = 0;
tclObjsFreed = 0;
{
@@ -255,7 +247,6 @@ TclInitObjSubsystem()
tclObjsShared[i] = 0;
}
}
- Tcl_MutexUnlock(&tclObjMutex);
#endif
}
@@ -286,9 +277,9 @@ TclFinalizeCompExecEnv()
typeTableInitialized = 0;
}
Tcl_MutexUnlock(&tableMutex);
- Tcl_MutexLock(&tclObjMutex);
+#ifndef TCL_THREADS
tclFreeObjList = NULL;
- Tcl_MutexUnlock(&tclObjMutex);
+#endif
TclFinalizeCompilation();
TclFinalizeExecution();
@@ -520,9 +511,10 @@ Tcl_NewObj()
* we maintain.
*/
- Tcl_MutexLock(&tclObjMutex);
#ifdef PURIFY
objPtr = (Tcl_Obj *) Tcl_Ckalloc(sizeof(Tcl_Obj));
+#elif defined(TCL_THREADS)
+ objPtr = TclThreadAllocObj();
#else
if (tclFreeObjList == NULL) {
TclAllocateFreeObjects();
@@ -537,7 +529,6 @@ Tcl_NewObj()
#ifdef TCL_COMPILE_STATS
tclObjsAlloced++;
#endif /* TCL_COMPILE_STATS */
- Tcl_MutexUnlock(&tclObjMutex);
return objPtr;
}
#endif /* TCL_MEM_DEBUG */
@@ -592,9 +583,7 @@ Tcl_DbNewObj(file, line)
objPtr->length = 0;
objPtr->typePtr = NULL;
#ifdef TCL_COMPILE_STATS
- Tcl_MutexLock(&tclObjMutex);
tclObjsAlloced++;
- Tcl_MutexUnlock(&tclObjMutex);
#endif /* TCL_COMPILE_STATS */
return objPtr;
}
@@ -638,6 +627,7 @@ Tcl_DbNewObj(file, line)
void
TclAllocateFreeObjects()
{
+#ifndef TCL_THREADS
size_t bytesToAlloc = (OBJS_TO_ALLOC_EACH_TIME * sizeof(Tcl_Obj));
char *basePtr;
register Tcl_Obj *prevPtr, *objPtr;
@@ -661,6 +651,7 @@ TclAllocateFreeObjects()
objPtr++;
}
tclFreeObjList = prevPtr;
+#endif
}
#undef OBJS_TO_ALLOC_EACH_TIME
@@ -712,9 +703,10 @@ TclFreeObj(objPtr)
* Tcl_Obj structs we maintain.
*/
- Tcl_MutexLock(&tclObjMutex);
#if defined(TCL_MEM_DEBUG) || defined(PURIFY)
ckfree((char *) objPtr);
+#elif defined(TCL_THREADS)
+ TclThreadFreeObj(objPtr);
#else
objPtr->internalRep.otherValuePtr = (VOID *) tclFreeObjList;
tclFreeObjList = objPtr;
@@ -723,7 +715,6 @@ TclFreeObj(objPtr)
#ifdef TCL_COMPILE_STATS
tclObjsFreed++;
#endif /* TCL_COMPILE_STATS */
- Tcl_MutexUnlock(&tclObjMutex);
}
/*
@@ -2586,7 +2577,6 @@ Tcl_DbIsShared(objPtr, file, line)
}
#endif
#ifdef TCL_COMPILE_STATS
- Tcl_MutexLock(&tclObjMutex);
if ((objPtr)->refCount <= 1) {
tclObjsShared[1]++;
} else if ((objPtr)->refCount < TCL_MAX_SHARED_OBJ_STATS) {
@@ -2594,7 +2584,6 @@ Tcl_DbIsShared(objPtr, file, line)
} else {
tclObjsShared[0]++;
}
- Tcl_MutexUnlock(&tclObjMutex);
#endif
return ((objPtr)->refCount > 1);
}