diff options
Diffstat (limited to 'generic')
35 files changed, 2720 insertions, 1565 deletions
diff --git a/generic/tclAlloc.c b/generic/tclAlloc.c index e641e97..6fff92b 100644 --- a/generic/tclAlloc.c +++ b/generic/tclAlloc.c @@ -1,427 +1,253 @@ /* * tclAlloc.c -- * - * This is a very flexible storage allocator for Tcl, for use with or - * without threads. Depending on the compile flags, it builds as: + * This is a very fast storage allocator. It allocates blocks of a small + * number of different sizes, and keeps free lists of each size. Blocks + * that don't exactly fit are passed up to the next larger size. Blocks + * over a certain size are directly allocated from the system. * - * (1) Build flags: TCL_ALLOC_NATIVE - * NATIVE: use the native malloc and a per-thread Tcl_Obj pool, with - * inter-thread recycling of objects. The per-thread pool can be - * disabled at startup with an env var, thus providing the PURIFY - * behaviour that is useful for valgrind and similar tools. Note that - * the PURIFY costs are negligible when disabled, but when enabled - * Tcl_Obj allocs will be even slower than in a full PURIFY build - * NOTE: the obj pool shares all code with zippy's smallest allocs! - * It does look overcomplicated for this particular case, but - * keeping them together allows simpler maintenance and avoids - * the need for separate debugging - * TODO: in this case build ZIPPY as a preloadable malloc-replacement + * Copyright (c) 1983 Regents of the University of California. + * Copyright (c) 1996-1997 Sun Microsystems, Inc. + * Copyright (c) 1998-1999 by Scriptics Corporation. * - * (2) Build flags: TCL_ALLOC_ZIPPY - * ZIPPY: use the ex-tclThreadAlloc, essentially aolserver's - * fast threaded allocator. Mods with respect to the original: - * - change in the block sizes, so that the smallest alloc is - * Tcl_Obj-sized - * - share the Tcl_Obj pool with the smallest allocs pool for - * improved cache usage - * - split blocks in the shared pool before mallocing again for - * improved cache usage - * - ?change in the number of blocks to move to/from the shared - * cache: it used to be a fixed number, it is now computed - * to leave a fixed number in the thread's pool. This improves - * sharing behaviour when one thread uses a lot of memory once - * and rarely again (eg, at startup), at the cost of slowing - * slightly threads that allocate/free large numbers of blocks - * repeatedly - * - stats and Tcl_GetMemoryInfo disabled per default, enable with - * -DZIPPY_STATS - * - adapt for unthreaded usage as replacement of the ex tclAlloc - * - -DHAVE_FAST_TSD: use fast TSD via __thread where available - * - (TODO!) build zippy as a pre-loadable library to use with a - * native build as a malloc replacement. Difficulties are: - * (a) make that portable (easy enough on modern elf/unix, to - * be researched on win and mac) - * (b) coordinate the Tcl_Obj pool and the smallest allocs, - * as they are now addressed from different files. This - * might require a special Tcl build with no - * TclSmallAlloc, and a separate preloadable for use with - * native builds? Or else separate them again, but that's - * not really good I think. - * - * NOTES: - * . this would be the best option, instead of MULTI. It - * could be built in two versions (perf, debug/stats) - * . would a preloaded zippy be slower than builtin? - * Possibly, due to extra indirection. - * - * (3) Build flags: TCL_ALLOC_MULTI - * MULTI: all of the above, selectable at startup with an env - * var. This build will be very slightly slower than the specific - * builds above, but is completely portable: it does not depend on - * any help from the loader or such. - * - * All variants can be built for both threaded and unthreaded Tcl. - * - * The Initial Developer of the Original Code is America Online, Inc. - * Portions created by AOL are Copyright (C) 1999 America Online, Inc. - * - * Copyright (c) 2008-2011 by Miguel Sofer. All rights reserved. + * Portions contributed by Chris Kingsley, Jack Jansen and Ray Johnson. * * See the file "license.terms" for information on usage and redistribution of * this file, and for a DISCLAIMER OF ALL WARRANTIES. */ -#include "tclInt.h" - /* - * This macro is used to properly align the memory allocated by Tcl, giving - * the same alignment as the native malloc. + * Windows and Unix use an alternative allocator when building with threads + * that has significantly reduced lock contention. */ -#if defined(__APPLE__) -#define TCL_ALLOCALIGN 16 -#else -#define TCL_ALLOCALIGN (2*sizeof(void *)) -#endif - -#undef TclpAlloc -#undef TclpRealloc -#undef TclpFree -#undef TclSmallAlloc -#undef TclSmallFree - -#if (TCL_ALLOCATOR == aNATIVE) || (TCL_ALLOCATOR == aPURIFY) -/* - * Not much of this file is needed, most things are dealt with in the - * macros. Just shunt the allocators for use by the library, the core - * never calls this. - * - * This is all that is needed for a TCL_ALLOC_PURIFY build, a native build - * needs the Tcl_Obj pools too. - */ - -char * -TclpAlloc( - unsigned int reqSize) -{ - return malloc(reqSize); -} +#include "tclInt.h" +#if !defined(TCL_THREADS) || !defined(USE_THREAD_ALLOC) -char * -TclpRealloc( - char *ptr, - unsigned int reqSize) -{ - return realloc(ptr, reqSize); -} +#if USE_TCLALLOC -void -TclpFree( - char *ptr) -{ - free(ptr); -} - -#endif /* end of common code for PURIFY and NATIVE*/ +#ifdef TCL_DEBUG +# define DEBUG +/* #define MSTATS */ +# define RCHECK +#endif -#if TCL_ALLOCATOR != aPURIFY /* - * The rest of this file deals with ZIPPY and MULTI builds, as well as the - * Tcl_Obj pools for NATIVE + * We should really make use of AC_CHECK_TYPE(caddr_t) here, but it can wait + * until Tcl uses config.h properly. */ +#if defined(_MSC_VER) || defined(__MINGW32__) || defined(__BORLANDC__) +typedef unsigned long caddr_t; +#endif + /* - * Note: we rely on the optimizer to remove unneeded code, instead of setting - * up a maze of #ifdefs all over the code. - * We should insure that debug builds do at least this much optimization, right? + * The overhead on a block is at least 8 bytes. When free, this space contains + * a pointer to the next free block, and the bottom two bits must be zero. + * When in use, the first byte is set to MAGIC, and the second byte is the + * size index. The remaining bytes are for alignment. If range checking is + * enabled then a second word holds the size of the requested block, less 1, + * rounded up to a multiple of sizeof(RMAGIC). The order of elements is + * critical: ov.magic must overlay the low order bits of ov.next, and ov.magic + * can not be a valid ov.next bit pattern. */ -#if TCL_ALLOCATOR == aZIPPY -# define allocator aZIPPY -# define ALLOCATOR_BASE aZIPPY -#elif TCL_ALLOCATOR == aNATIVE -/* Keep the option to switch PURIFY mode on! */ -static int allocator = aNONE; -# define ALLOCATOR_BASE aNATIVE -# define RCHECK 0 -# undef ZIPPY_STATS +union overhead { + union overhead *next; /* when free */ + unsigned char padding[TCL_ALLOCALIGN]; /* align struct to TCL_ALLOCALIGN bytes */ + struct { + unsigned char magic0; /* magic number */ + unsigned char index; /* bucket # */ + unsigned char unused; /* unused */ + unsigned char magic1; /* other magic number */ +#ifdef RCHECK + unsigned short rmagic; /* range magic number */ + unsigned long size; /* actual block size */ + unsigned short unused2; /* padding to 8-byte align */ +#endif + } ovu; +#define overMagic0 ovu.magic0 +#define overMagic1 ovu.magic1 +#define bucketIndex ovu.index +#define rangeCheckMagic ovu.rmagic +#define realBlockSize ovu.size +}; + + +#define MAGIC 0xef /* magic # on accounting info */ +#define RMAGIC 0x5555 /* magic # on range info */ + +#ifdef RCHECK +#define RSLOP sizeof(unsigned short) #else -/* MULTI */ - static int allocator = aNONE; -# define ALLOCATOR_BASE aZIPPY -#endif - -#if TCL_ALLOCATOR != aZIPPY -static void ChooseAllocator(); +#define RSLOP 0 #endif +#define OVERHEAD (sizeof(union overhead) + RSLOP) /* - * If range checking is enabled, an additional byte will be allocated to store - * the magic number at the end of the requested memory. + * Macro to make it easier to refer to the end-of-block guard magic. */ -#ifndef RCHECK -# ifdef NDEBUG -# define RCHECK 0 -# else -# define RCHECK 1 -# endif -#endif +#define BLOCK_END(overPtr) \ + (*(unsigned short *)((caddr_t)((overPtr) + 1) + (overPtr)->realBlockSize)) /* - * The following struct stores accounting information for each block including - * two small magic numbers and a bucket number when in use or a next pointer - * when free. The original requested size (not including the Block overhead) - * is also maintained. + * nextf[i] is the pointer to the next free block of size 2^(i+3). The + * smallest allocatable block is MINBLOCK bytes. The overhead information + * precedes the data area returned to the user. */ -typedef struct Block { - union { - struct Block *next; /* Next in free list. */ - struct { - unsigned char magic1; /* First magic number. */ - unsigned char bucket; /* Bucket block allocated from. */ - unsigned char unused; /* Padding. */ - unsigned char magic2; /* Second magic number. */ - } s; - } u; - size_t reqSize; /* Requested allocation size. */ -} Block; - -#define ALIGN(x) (((x) + TCL_ALLOCALIGN - 1) & ~(TCL_ALLOCALIGN - 1)) -#define OFFSET ALIGN(sizeof(Block)) - -#define nextBlock u.next -#define sourceBucket u.s.bucket -#define magicNum1 u.s.magic1 -#define magicNum2 u.s.magic2 -#define MAGIC 0xEF +#define MINBLOCK ((sizeof(union overhead) + (TCL_ALLOCALIGN-1)) & ~(TCL_ALLOCALIGN-1)) +#define NBUCKETS (13 - (MINBLOCK >> 4)) +#define MAXMALLOC (1<<(NBUCKETS+2)) +static union overhead *nextf[NBUCKETS]; /* - * The following defines the minimum and maximum block sizes and the number - * of buckets in the bucket cache. - * 32b 64b Apple-32b - * TCL_ALLOCALIGN 8 16 16 - * sizeof(Block) 8 16 16 - * OFFSET 8 16 16 - * sizeof(Tcl_Obj) 24 48 24 - * ALLOCBASE 24 48 24 - * MINALLOC 24 48 24 - * NBUCKETS 11 10 11 - * MAXALLOC 24576 24576 24576 - * small allocs 1024 512 1024 - * at a time + * The following structure is used to keep track of all system memory + * currently owned by Tcl. When finalizing, all this memory will be returned + * to the system. */ -#if TCL_ALLOCATOR == aNATIVE -#define MINALLOC MAX(OFFSET, sizeof(Tcl_Obj)) -#else -#define MINALLOC ALIGN(MAX(OFFSET+8, sizeof(Tcl_Obj))) -#endif - -#define NBUCKETS 10 /* previously (11 - (MINALLOC >> 5)) */ -#define MAXALLOC (MINALLOC << (NBUCKETS - 1)) +struct block { + struct block *nextPtr; /* Linked list. */ + struct block *prevPtr; /* Linked list for big blocks, ensures 8-byte + * alignment for suballocated blocks. */ +}; -#if TCL_ALLOCATOR == aNATIVE -# define NBUCKETS_0 1 -# define nBuckets 1 -#else -# define NBUCKETS_0 NBUCKETS -# if TCL_ALLOCATOR == aZIPPY -# define nBuckets NBUCKETS -# else - static int nBuckets = NBUCKETS; -# endif -#endif +static struct block *blockList; /* Tracks the suballocated blocks. */ +static struct block bigBlocks={ /* Big blocks aren't suballocated. */ + &bigBlocks, &bigBlocks +}; /* - * The following structure defines a bucket of blocks, optionally with various - * accounting and statistics information. + * The allocator is protected by a special mutex that must be explicitly + * initialized. Futhermore, because Tcl_Alloc may be used before anything else + * in Tcl, we make this module self-initializing after all with the allocInit + * variable. */ -typedef struct Bucket { - Block *firstPtr; /* First block available */ - long numFree; /* Number of blocks available */ -#ifdef ZIPPY_STATS - /* All fields below for accounting only */ - - long numRemoves; /* Number of removes from bucket */ - long numInserts; /* Number of inserts into bucket */ - long numWaits; /* Number of waits to acquire a lock */ - long numLocks; /* Number of locks acquired */ - long totalAssigned; /* Total space assigned to bucket */ +#ifdef TCL_THREADS +static Tcl_Mutex *allocMutexPtr; #endif -} Bucket; +static int allocInit = 0; + +#ifdef MSTATS /* - * The following structure defines a cache of buckets, at most one per - * thread. + * numMallocs[i] is the difference between the number of mallocs and frees for + * a given block size. */ -typedef struct Cache { -#if defined(TCL_THREADS) - struct Cache *nextPtr; /* Linked list of cache entries */ -#ifdef ZIPPY_STATS - Tcl_ThreadId owner; /* Which thread's cache is this? */ -#endif +static unsigned int numMallocs[NBUCKETS+1]; #endif -#ifdef ZIPPY_STATS - int totalAssigned; /* Total space assigned to thread */ -#endif - Bucket buckets[1]; /* The buckets for this thread */ -} Cache; +#if defined(DEBUG) || defined(RCHECK) +#define ASSERT(p) if (!(p)) Tcl_Panic(# p) +#define RANGE_ASSERT(p) if (!(p)) Tcl_Panic(# p) +#else +#define ASSERT(p) +#define RANGE_ASSERT(p) +#endif /* - * The following array specifies various per-bucket limits and locks. The - * values are statically initialized to avoid calculating them repeatedly. + * Prototypes for functions used only in this file. */ -static struct { - size_t blockSize; /* Bucket blocksize. */ -#if defined(TCL_THREADS) - int maxBlocks; /* Max blocks before move to share. */ - int numMove; /* Num blocks to move to share. */ - Tcl_Mutex *lockPtr; /* Share bucket lock. */ -#endif -} bucketInfo[NBUCKETS_0]; - +static void MoreCore(int bucket); + /* - * Static functions defined in this file. + *------------------------------------------------------------------------- + * + * TclInitAlloc -- + * + * Initialize the memory system. + * + * Results: + * None. + * + * Side effects: + * Initialize the mutex used to serialize allocations. + * + *------------------------------------------------------------------------- */ -static Cache * GetCache(void); -static int GetBlocks(Cache *cachePtr, int bucket); -static inline Block * Ptr2Block(char *ptr); -static inline char * Block2Ptr(Block *blockPtr, int bucket, unsigned int reqSize); - -#if defined(TCL_THREADS) - -static Cache *firstCachePtr = NULL; -static Cache *sharedPtr = NULL; - -static Tcl_Mutex *listLockPtr; -static Tcl_Mutex *objLockPtr; - -static void LockBucket(Cache *cachePtr, int bucket); -static void UnlockBucket(Cache *cachePtr, int bucket); -static void PutBlocks(Cache *cachePtr, int bucket, int numMove); - -#if defined(HAVE_FAST_TSD) -static __thread Cache *tcachePtr; -static __thread int allocInitialized = 0; - -# define GETCACHE(cachePtr) \ - do { \ - if (!allocInitialized) { \ - allocInitialized = 1; \ - tcachePtr = GetCache(); \ - } \ - (cachePtr) = tcachePtr; \ - } while (0) -#else -# define GETCACHE(cachePtr) \ - do { \ - (cachePtr) = TclpGetAllocCache(); \ - if ((cachePtr) == NULL) { \ - (cachePtr) = GetCache(); \ - } \ - } while (0) -#endif -#else /* NOT THREADS! */ - -static int allocInitialized = 0; - -#define TclpSetAllocCache() -#define PutBlocks(cachePtr, bucket, numMove) -#define firstCachePtr sharedCachePtr - -# define GETCACHE(cachePtr) \ - do { \ - if (!allocInitialized) { \ - allocInitialized = 1; \ - GetCache(); \ - } \ - (cachePtr) = sharedPtr; \ - } while (0) - -static void * -TclpGetAllocCache(void) +void +TclInitAlloc(void) { - if (!allocInitialized) { - allocInitialized = 1; - GetCache(); + if (!allocInit) { + allocInit = 1; +#ifdef TCL_THREADS + allocMutexPtr = Tcl_GetAllocMutex(); +#endif } - return sharedPtr; } -#endif - /* - *---------------------------------------------------------------------- + *------------------------------------------------------------------------- + * + * TclFinalizeAllocSubsystem -- * - * Block2Ptr, Ptr2Block -- + * Release all resources being used by this subsystem, including + * aggressively freeing all memory allocated by TclpAlloc() that has not + * yet been released with TclpFree(). * - * Convert between internal blocks and user pointers. + * After this function is called, all memory allocated with TclpAlloc() + * should be considered unusable. * * Results: - * User pointer or internal block. + * None. * * Side effects: - * Invalid blocks will abort the server. + * This subsystem is self-initializing, since memory can be allocated + * before Tcl is formally initialized. After this call, this subsystem + * has been reset to its initial state and is usable again. * - *---------------------------------------------------------------------- + *------------------------------------------------------------------------- */ -static inline char * -Block2Ptr( - Block *blockPtr, - int bucket, - unsigned int reqSize) +void +TclFinalizeAllocSubsystem(void) { - register void *ptr; - - blockPtr->magicNum1 = blockPtr->magicNum2 = MAGIC; - blockPtr->sourceBucket = bucket; - blockPtr->reqSize = reqSize; - ptr = (void *) (((char *)blockPtr) + OFFSET); -#if RCHECK - ((unsigned char *)(ptr))[reqSize] = MAGIC; -#endif - return (char *) ptr; -} + unsigned int i; + struct block *blockPtr, *nextPtr; -static inline Block * -Ptr2Block( - char *ptr) -{ - register Block *blockPtr; + Tcl_MutexLock(allocMutexPtr); + for (blockPtr = blockList; blockPtr != NULL; blockPtr = nextPtr) { + nextPtr = blockPtr->nextPtr; + TclpSysFree(blockPtr); + } + blockList = NULL; - blockPtr = (Block *) (((char *) ptr) - OFFSET); - if (blockPtr->magicNum1 != MAGIC || blockPtr->magicNum2 != MAGIC) { - Tcl_Panic("alloc: invalid block: %p: %x %x", - blockPtr, blockPtr->magicNum1, blockPtr->magicNum2); + for (blockPtr = bigBlocks.nextPtr; blockPtr != &bigBlocks; ) { + nextPtr = blockPtr->nextPtr; + TclpSysFree(blockPtr); + blockPtr = nextPtr; } -#if RCHECK - if (((unsigned char *) ptr)[blockPtr->reqSize] != MAGIC) { - Tcl_Panic("alloc: invalid block: %p: %x %x %x", - blockPtr, blockPtr->magicNum1, blockPtr->magicNum2, - ((unsigned char *) ptr)[blockPtr->reqSize]); + bigBlocks.nextPtr = &bigBlocks; + bigBlocks.prevPtr = &bigBlocks; + + for (i=0 ; i<NBUCKETS ; i++) { + nextf[i] = NULL; +#ifdef MSTATS + numMallocs[i] = 0; +#endif } +#ifdef MSTATS + numMallocs[i] = 0; #endif - return blockPtr; + Tcl_MutexUnlock(allocMutexPtr); } /* *---------------------------------------------------------------------- * - * GetCache --- + * TclpAlloc -- * - * Gets per-thread memory cache, allocating it if necessary. + * Allocate more memory. * * Results: - * Pointer to cache. + * None. * * Side effects: * None. @@ -429,237 +255,183 @@ Ptr2Block( *---------------------------------------------------------------------- */ -static Cache * -GetCache(void) +char * +TclpAlloc( + unsigned int numBytes) /* Number of bytes to allocate. */ { - Cache *cachePtr; - unsigned int i; -#if TCL_ALLOCATOR == aZIPPY -#define allocSize (sizeof(Cache) + (NBUCKETS -1)*sizeof(Bucket)) -#elif TCL_ALLOCATOR == aNATIVE -#define allocSize sizeof(Cache) -#else - unsigned int allocSize; -#endif - - /* - * Set the params for the correct allocator - */ - -#if TCL_ALLOCATOR != aZIPPY - if (allocator == aNONE) { - /* This insures that it is set just once, as any changes after - * initialization guarantee a hard crash + register union overhead *overPtr; + register long bucket; + register unsigned amount; + struct block *bigBlockPtr = NULL; + + if (!allocInit) { + /* + * We have to make the "self initializing" because Tcl_Alloc may be + * used before any other part of Tcl. E.g., see main() for tclsh! */ - - ChooseAllocator(); - } -#if TCL_ALLOCATOR == aMULTI - if (allocator == aZIPPY) { - allocSize = (sizeof(Cache) + (NBUCKETS -1)*sizeof(Bucket)); - nBuckets = NBUCKETS; - } else { - allocSize = sizeof(Cache); - nBuckets = 1; + TclInitAlloc(); } -#endif -#endif + Tcl_MutexLock(allocMutexPtr); /* - * Check for first-time initialization. + * First the simple case: we simple allocate big blocks directly. */ -#if defined(TCL_THREADS) - if (listLockPtr == NULL) { - Tcl_Mutex *initLockPtr; - initLockPtr = Tcl_GetAllocMutex(); - Tcl_MutexLock(initLockPtr); - if (listLockPtr == NULL) { - listLockPtr = TclpNewAllocMutex(); - objLockPtr = TclpNewAllocMutex(); -#endif - for (i = 0; i < nBuckets; ++i) { - bucketInfo[i].blockSize = MINALLOC << i; -#if defined(TCL_THREADS) - /* TODO: clearer logic? Change move to keep? */ - bucketInfo[i].maxBlocks = 1 << (NBUCKETS - 1 - i); - bucketInfo[i].numMove = i < NBUCKETS - 1 ? - 1 << (NBUCKETS - 2 - i) : 1; - bucketInfo[i].lockPtr = TclpNewAllocMutex(); -#endif - } -#if defined(TCL_THREADS) - sharedPtr = calloc(1, allocSize); - firstCachePtr = sharedPtr; + if (numBytes >= MAXMALLOC - OVERHEAD) { + if (numBytes <= UINT_MAX - OVERHEAD -sizeof(struct block)) { + bigBlockPtr = (struct block *) TclpSysAlloc((unsigned) + (sizeof(struct block) + OVERHEAD + numBytes), 0); } - Tcl_MutexUnlock(initLockPtr); - } + if (bigBlockPtr == NULL) { + Tcl_MutexUnlock(allocMutexPtr); + return NULL; + } + bigBlockPtr->nextPtr = bigBlocks.nextPtr; + bigBlocks.nextPtr = bigBlockPtr; + bigBlockPtr->prevPtr = &bigBlocks; + bigBlockPtr->nextPtr->prevPtr = bigBlockPtr; + + overPtr = (union overhead *) (bigBlockPtr + 1); + overPtr->overMagic0 = overPtr->overMagic1 = MAGIC; + overPtr->bucketIndex = 0xff; +#ifdef MSTATS + numMallocs[NBUCKETS]++; #endif - if (allocator == aPURIFY) { - bucketInfo[0].maxBlocks = 0; +#ifdef RCHECK + /* + * Record allocated size of block and bound space with magic numbers. + */ + + overPtr->realBlockSize = (numBytes + RSLOP - 1) & ~(RSLOP - 1); + overPtr->rangeCheckMagic = RMAGIC; + BLOCK_END(overPtr) = RMAGIC; +#endif + + Tcl_MutexUnlock(allocMutexPtr); + return (void *)(overPtr+1); } - + /* - * Get this thread's cache, allocating if necessary. + * Convert amount of memory requested into closest block size stored in + * hash buckets which satisfies request. Account for space used per block + * for accounting. */ - cachePtr = TclpGetAllocCache(); - if (cachePtr == NULL) { - cachePtr = calloc(1, allocSize); - if (cachePtr == NULL) { - Tcl_Panic("alloc: could not allocate new cache"); + amount = MINBLOCK; /* size of first bucket */ + bucket = MINBLOCK >> 4; + + while (numBytes + OVERHEAD > amount) { + amount <<= 1; + if (amount == 0) { + Tcl_MutexUnlock(allocMutexPtr); + return NULL; } -#if defined(TCL_THREADS) - Tcl_MutexLock(listLockPtr); - cachePtr->nextPtr = firstCachePtr; - firstCachePtr = cachePtr; - Tcl_MutexUnlock(listLockPtr); -#ifdef ZIPPY_STATS - cachePtr->owner = Tcl_GetCurrentThread(); -#endif - TclpSetAllocCache(cachePtr); -#endif + bucket++; } - return cachePtr; -} - -#if defined(TCL_THREADS) -/* - *---------------------------------------------------------------------- - * - * TclFreeAllocCache -- - * - * Flush and delete a cache, removing from list of caches. - * - * Results: - * None. - * - * Side effects: - * None. - * - *---------------------------------------------------------------------- - */ - -void -TclFreeAllocCache( - void *arg) -{ - Cache *cachePtr = arg; - Cache **nextPtrPtr; - register unsigned int bucket; + ASSERT(bucket < NBUCKETS); /* - * Flush blocks. + * If nothing in hash bucket right now, request more memory from the + * system. */ - for (bucket = 0; bucket < nBuckets; ++bucket) { - if (cachePtr->buckets[bucket].numFree > 0) { - PutBlocks(cachePtr, bucket, cachePtr->buckets[bucket].numFree); + if ((overPtr = nextf[bucket]) == NULL) { + MoreCore(bucket); + if ((overPtr = nextf[bucket]) == NULL) { + Tcl_MutexUnlock(allocMutexPtr); + return NULL; } } /* - * Remove from pool list. + * Remove from linked list */ - Tcl_MutexLock(listLockPtr); - nextPtrPtr = &firstCachePtr; - while (*nextPtrPtr != cachePtr) { - nextPtrPtr = &(*nextPtrPtr)->nextPtr; - } - *nextPtrPtr = cachePtr->nextPtr; - cachePtr->nextPtr = NULL; - Tcl_MutexUnlock(listLockPtr); - free(cachePtr); -} + nextf[bucket] = overPtr->next; + overPtr->overMagic0 = overPtr->overMagic1 = MAGIC; + overPtr->bucketIndex = (unsigned char) bucket; + +#ifdef MSTATS + numMallocs[bucket]++; #endif + +#ifdef RCHECK + /* + * Record allocated size of block and bound space with magic numbers. + */ + + overPtr->realBlockSize = (numBytes + RSLOP - 1) & ~(RSLOP - 1); + overPtr->rangeCheckMagic = RMAGIC; + BLOCK_END(overPtr) = RMAGIC; +#endif + + Tcl_MutexUnlock(allocMutexPtr); + return ((char *)(overPtr + 1)); +} -#if TCL_ALLOCATOR != aNATIVE /* *---------------------------------------------------------------------- * - * TclpAlloc -- + * MoreCore -- + * + * Allocate more memory to the indicated bucket. * - * Allocate memory. + * Assumes Mutex is already held. * * Results: - * Pointer to memory just beyond Block pointer. + * None. * * Side effects: - * May allocate more blocks for a bucket. + * Attempts to get more memory from the system. * *---------------------------------------------------------------------- */ -char * -TclpAlloc( - unsigned int reqSize) +static void +MoreCore( + int bucket) /* What bucket to allocat to. */ { - Cache *cachePtr; - Block *blockPtr; - register int bucket; - size_t size; + register union overhead *overPtr; + register long size; /* size of desired block */ + long amount; /* amount to allocate */ + int numBlocks; /* how many blocks we get */ + struct block *blockPtr; - if (allocator < aNONE) { - return (void *) malloc(reqSize); - } - - GETCACHE(cachePtr); + /* + * sbrk_size <= 0 only for big, FLUFFY, requests (about 2^30 bytes on a + * VAX, I think) or for a negative arg. + */ -#ifndef __LP64__ - if (sizeof(int) >= sizeof(size_t)) { - /* An unsigned int overflow can also be a size_t overflow */ - const size_t zero = 0; - const size_t max = ~zero; + size = 1 << (bucket + 3); + ASSERT(size > 0); - if (((size_t) reqSize) > max - OFFSET - RCHECK) { - /* Requested allocation exceeds memory */ - return NULL; - } + amount = MAXMALLOC; + numBlocks = amount / size; + ASSERT(numBlocks*size == amount); + + blockPtr = (struct block *) TclpSysAlloc((unsigned) + (sizeof(struct block) + amount), 1); + /* no more room! */ + if (blockPtr == NULL) { + return; } -#endif + blockPtr->nextPtr = blockList; + blockList = blockPtr; + + overPtr = (union overhead *) (blockPtr + 1); /* - * Increment the requested size to include room for the Block structure. - * Call malloc() directly if the required amount is greater than the - * largest block, otherwise pop the smallest block large enough, - * allocating more blocks if necessary. + * Add new memory allocated to that on free list for this hash bucket. */ - size = reqSize + OFFSET; -#if RCHECK - size++; -#endif - if (size > MAXALLOC) { - bucket = nBuckets; - blockPtr = malloc(size); -#ifdef ZIPPY_STATS - if (blockPtr != NULL) { - cachePtr->totalAssigned += reqSize; - } -#endif - } else { - blockPtr = NULL; - bucket = 0; - while (bucketInfo[bucket].blockSize < size) { - bucket++; - } - if (cachePtr->buckets[bucket].numFree || GetBlocks(cachePtr, bucket)) { - blockPtr = cachePtr->buckets[bucket].firstPtr; - cachePtr->buckets[bucket].firstPtr = blockPtr->nextBlock; - cachePtr->buckets[bucket].numFree--; -#ifdef ZIPPY_STATS - cachePtr->buckets[bucket].numRemoves++; - cachePtr->buckets[bucket].totalAssigned += reqSize; -#endif - } - if (blockPtr == NULL) { - return NULL; - } + nextf[bucket] = overPtr; + while (--numBlocks > 0) { + overPtr->next = (union overhead *)((caddr_t)overPtr + size); + overPtr = (union overhead *)((caddr_t)overPtr + size); } - return Block2Ptr(blockPtr, bucket, reqSize); + overPtr->next = NULL; } /* @@ -667,72 +439,64 @@ TclpAlloc( * * TclpFree -- * - * Return blocks to the thread block cache. + * Free memory. * * Results: * None. * * Side effects: - * May move blocks to shared cache. + * None. * *---------------------------------------------------------------------- */ void TclpFree( - char *ptr) + char *oldPtr) /* Pointer to memory to free. */ { - Cache *cachePtr; - Block *blockPtr; - int bucket; + register long size; + register union overhead *overPtr; + struct block *bigBlockPtr; - if (ptr == NULL) { + if (oldPtr == NULL) { return; } - if (allocator < aNONE) { - return free((char *) ptr); + Tcl_MutexLock(allocMutexPtr); + overPtr = (union overhead *)((caddr_t)oldPtr - sizeof(union overhead)); + + ASSERT(overPtr->overMagic0 == MAGIC); /* make sure it was in use */ + ASSERT(overPtr->overMagic1 == MAGIC); + if (overPtr->overMagic0 != MAGIC || overPtr->overMagic1 != MAGIC) { + Tcl_MutexUnlock(allocMutexPtr); + return; } -#ifdef ZIPPY_STATS - GETCACHE(cachePtr); + RANGE_ASSERT(overPtr->rangeCheckMagic == RMAGIC); + RANGE_ASSERT(BLOCK_END(overPtr) == RMAGIC); + size = overPtr->bucketIndex; + if (size == 0xff) { +#ifdef MSTATS + numMallocs[NBUCKETS]--; #endif - /* - * Get the block back from the user pointer and call system free directly - * for large blocks. Otherwise, push the block back on the bucket and move - * blocks to the shared cache if there are now too many free. - */ + bigBlockPtr = (struct block *) overPtr - 1; + bigBlockPtr->prevPtr->nextPtr = bigBlockPtr->nextPtr; + bigBlockPtr->nextPtr->prevPtr = bigBlockPtr->prevPtr; + TclpSysFree(bigBlockPtr); - blockPtr = Ptr2Block(ptr); - bucket = blockPtr->sourceBucket; - if (bucket == nBuckets) { -#ifdef ZIPPY_STATS - cachePtr->totalAssigned -= blockPtr->reqSize; -#endif - free(blockPtr); + Tcl_MutexUnlock(allocMutexPtr); return; } + ASSERT(size < NBUCKETS); + overPtr->next = nextf[size]; /* also clobbers overMagic */ + nextf[size] = overPtr; -#ifndef ZIPPY_STATS - GETCACHE(cachePtr); +#ifdef MSTATS + numMallocs[size]--; #endif -#ifdef ZIPPY_STATS - cachePtr->buckets[bucket].totalAssigned -= blockPtr->reqSize; -#endif - blockPtr->nextBlock = cachePtr->buckets[bucket].firstPtr; - cachePtr->buckets[bucket].firstPtr = blockPtr; - cachePtr->buckets[bucket].numFree++; -#ifdef ZIPPY_STATS - cachePtr->buckets[bucket].numInserts++; -#endif -#if defined(TCL_THREADS) - if (cachePtr != sharedPtr && - cachePtr->buckets[bucket].numFree > bucketInfo[bucket].maxBlocks) { - PutBlocks(cachePtr, bucket, bucketInfo[bucket].numMove); - } -#endif + Tcl_MutexUnlock(allocMutexPtr); } /* @@ -740,366 +504,190 @@ TclpFree( * * TclpRealloc -- * - * Re-allocate memory to a larger or smaller size. + * Reallocate memory. * * Results: - * Pointer to memory just beyond Block pointer. + * None. * * Side effects: - * Previous memory, if any, may be freed. + * None. * *---------------------------------------------------------------------- */ char * -TclpRealloc( - char *ptr, - unsigned int reqSize) +TclpRealloc( + char *oldPtr, /* Pointer to alloced block. */ + unsigned int numBytes) /* New size of memory. */ { - Cache *cachePtr; - Block *blockPtr; - void *newPtr; - size_t size, min; - int bucket; - - if (allocator < aNONE) { - return (void *) realloc((char *) ptr, reqSize); - } + int i; + union overhead *overPtr; + struct block *bigBlockPtr; + int expensive; + unsigned long maxSize; - GETCACHE(cachePtr); - - if (ptr == NULL) { - return TclpAlloc(reqSize); + if (oldPtr == NULL) { + return TclpAlloc(numBytes); } -#ifndef __LP64__ - if (sizeof(int) >= sizeof(size_t)) { - /* An unsigned int overflow can also be a size_t overflow */ - const size_t zero = 0; - const size_t max = ~zero; + Tcl_MutexLock(allocMutexPtr); - if (((size_t) reqSize) > max - OFFSET - RCHECK) { - /* Requested allocation exceeds memory */ - return NULL; - } + overPtr = (union overhead *)((caddr_t)oldPtr - sizeof(union overhead)); + + ASSERT(overPtr->overMagic0 == MAGIC); /* make sure it was in use */ + ASSERT(overPtr->overMagic1 == MAGIC); + if (overPtr->overMagic0 != MAGIC || overPtr->overMagic1 != MAGIC) { + Tcl_MutexUnlock(allocMutexPtr); + return NULL; } -#endif + + RANGE_ASSERT(overPtr->rangeCheckMagic == RMAGIC); + RANGE_ASSERT(BLOCK_END(overPtr) == RMAGIC); + i = overPtr->bucketIndex; /* - * If the block is not a system block and fits in place, simply return the - * existing pointer. Otherwise, if the block is a system block and the new - * size would also require a system block, call realloc() directly. + * If the block isn't in a bin, just realloc it. */ - blockPtr = Ptr2Block(ptr); - size = reqSize + OFFSET; -#if RCHECK - size++; -#endif - bucket = blockPtr->sourceBucket; - if (bucket != nBuckets) { - if (bucket > 0) { - min = bucketInfo[bucket-1].blockSize; - } else { - min = 0; - } - if (size > min && size <= bucketInfo[bucket].blockSize) { -#ifdef ZIPPY_STATS - cachePtr->buckets[bucket].totalAssigned -= blockPtr->reqSize; - cachePtr->buckets[bucket].totalAssigned += reqSize; -#endif - return Block2Ptr(blockPtr, bucket, reqSize); - } - } else if (size > MAXALLOC) { -#ifdef ZIPPY_STATS - cachePtr->totalAssigned -= blockPtr->reqSize; - cachePtr->totalAssigned += reqSize; -#endif - blockPtr = realloc(blockPtr, size); - if (blockPtr == NULL) { + if (i == 0xff) { + struct block *prevPtr, *nextPtr; + bigBlockPtr = (struct block *) overPtr - 1; + prevPtr = bigBlockPtr->prevPtr; + nextPtr = bigBlockPtr->nextPtr; + bigBlockPtr = (struct block *) TclpSysRealloc(bigBlockPtr, + sizeof(struct block) + OVERHEAD + numBytes); + if (bigBlockPtr == NULL) { + Tcl_MutexUnlock(allocMutexPtr); return NULL; } - return Block2Ptr(blockPtr, nBuckets, reqSize); - } - /* - * Finally, perform an expensive malloc/copy/free. - */ + if (prevPtr->nextPtr != bigBlockPtr) { + /* + * If the block has moved, splice the new block into the list + * where the old block used to be. + */ - newPtr = TclpAlloc(reqSize); - if (newPtr != NULL) { - if (reqSize > blockPtr->reqSize) { - reqSize = blockPtr->reqSize; + prevPtr->nextPtr = bigBlockPtr; + nextPtr->prevPtr = bigBlockPtr; } - memcpy(newPtr, ptr, reqSize); - TclpFree(ptr); - } - return newPtr; -} - -/* - *---------------------------------------------------------------------- - * - * TclAllocMaximize -- - * - * Given a TclpAlloc'ed pointer, it returns the maximal size that can be used - * by the allocated memory. This is almost always larger than the requested - * size, as it corresponds to the bucket's size. - * - * Results: - * New size. - * - *---------------------------------------------------------------------- - */ - unsigned int - TclAllocMaximize( - void *ptr) -{ - Block *blockPtr; - int bucket; - size_t oldSize, newSize; - if (allocator < aNONE) { - /* - * No info, return UINT_MAX as a signal. - */ - - return UINT_MAX; - } - - blockPtr = Ptr2Block(ptr); - bucket = blockPtr->sourceBucket; - - if (bucket == nBuckets) { + overPtr = (union overhead *) (bigBlockPtr + 1); + +#ifdef MSTATS + numMallocs[NBUCKETS]++; +#endif + +#ifdef RCHECK /* - * System malloc'ed: no info + * Record allocated size of block and update magic number bounds. */ - - return UINT_MAX; - } - oldSize = blockPtr->reqSize; - newSize = bucketInfo[bucket].blockSize - OFFSET - RCHECK; - blockPtr->reqSize = newSize; -#if RCHECK - ((unsigned char *)(ptr))[newSize] = MAGIC; + overPtr->realBlockSize = (numBytes + RSLOP - 1) & ~(RSLOP - 1); + BLOCK_END(overPtr) = RMAGIC; #endif -#ifdef ZIPPY_STATS - { - Cache *cachePtr; - GETCACHE(cachePtr); - cachePtr->buckets[bucket].totalAssigned += (newSize - oldSize); + + Tcl_MutexUnlock(allocMutexPtr); + return (char *)(overPtr+1); + } + maxSize = 1 << (i+3); + expensive = 0; + if (numBytes+OVERHEAD > maxSize) { + expensive = 1; + } else if (i>0 && numBytes+OVERHEAD < maxSize/2) { + expensive = 1; } -#endif - return newSize; -} -#ifdef ZIPPY_STATS - -/* - *---------------------------------------------------------------------- - * - * Tcl_GetMemoryInfo -- - * - * Return a list-of-lists of memory stats. - * - * Results: - * None. - * - * Side effects: - * List appended to given dstring. - * - *---------------------------------------------------------------------- - */ + if (expensive) { + void *newPtr; -void -Tcl_GetMemoryInfo( - Tcl_DString *dsPtr) -{ - Cache *cachePtr; - char buf[200]; - unsigned int n; - - Tcl_MutexLock(listLockPtr); - cachePtr = firstCachePtr; - while (cachePtr != NULL) { - Tcl_DStringStartSublist(dsPtr); -#if defined(TCL_THREADS) - if (cachePtr == sharedPtr) { - Tcl_DStringAppendElement(dsPtr, "shared"); - } else { - sprintf(buf, "thread%p", cachePtr->owner); - Tcl_DStringAppendElement(dsPtr, buf); + Tcl_MutexUnlock(allocMutexPtr); + + newPtr = TclpAlloc(numBytes); + if (newPtr == NULL) { + return NULL; } -#else - Tcl_DStringAppendElement(dsPtr, "unthreaded"); -#endif - for (n = 0; n < nBuckets; ++n) { - sprintf(buf, "%lu %ld %ld %ld %ld %ld %ld", - (unsigned long) bucketInfo[n].blockSize, - cachePtr->buckets[n].numFree, - cachePtr->buckets[n].numRemoves, - cachePtr->buckets[n].numInserts, - cachePtr->buckets[n].totalAssigned, - cachePtr->buckets[n].numLocks, - cachePtr->buckets[n].numWaits); - Tcl_DStringAppendElement(dsPtr, buf); + maxSize -= OVERHEAD; + if (maxSize < numBytes) { + numBytes = maxSize; } - Tcl_DStringEndSublist(dsPtr); -#if defined(TCL_THREADS) - cachePtr = cachePtr->nextPtr; -#else - cachePtr = NULL; -#endif + memcpy(newPtr, oldPtr, (size_t) numBytes); + TclpFree(oldPtr); + return newPtr; } - Tcl_MutexUnlock(listLockPtr); -} -#endif /* ZIPPY_STATS */ -#endif /* code above only for NATIVE allocator */ - -/* - *---------------------------------------------------------------------- - * - * TclSmallAlloc -- - * - * Allocate a Tcl_Obj sized block from the per-thread cache. - * - * Results: - * Pointer to uninitialized memory. - * - * Side effects: - * May move blocks from shared cached or allocate new blocks if - * list is empty. - * - *---------------------------------------------------------------------- - */ -void * -TclSmallAlloc(void) -{ - Cache *cachePtr; - Block *blockPtr; - Bucket *bucketPtr; - - GETCACHE(cachePtr); - bucketPtr = &cachePtr->buckets[0]; - - blockPtr = bucketPtr->firstPtr; - if (bucketPtr->numFree || GetBlocks(cachePtr, 0)) { - blockPtr = bucketPtr->firstPtr; - bucketPtr->firstPtr = blockPtr->nextBlock; - bucketPtr->numFree--; -#ifdef ZIPPY_STATS - bucketPtr->numRemoves++; - bucketPtr->totalAssigned += sizeof(Tcl_Obj); + /* + * Ok, we don't have to copy, it fits as-is + */ + +#ifdef RCHECK + overPtr->realBlockSize = (numBytes + RSLOP - 1) & ~(RSLOP - 1); + BLOCK_END(overPtr) = RMAGIC; #endif - } - return blockPtr; + + Tcl_MutexUnlock(allocMutexPtr); + return(oldPtr); } /* *---------------------------------------------------------------------- * - * TclSmallFree -- + * mstats -- * - * Return a free Tcl_Obj-sized block to the per-thread cache. + * Prints two lines of numbers, one showing the length of the free list + * for each size category, the second showing the number of mallocs - + * frees for each size category. * * Results: * None. * * Side effects: - * May move free blocks to shared list upon hitting high water mark. + * None. * *---------------------------------------------------------------------- */ +#ifdef MSTATS void -TclSmallFree( - void *ptr) +mstats( + char *s) /* Where to write info. */ { - Cache *cachePtr; - Block *blockPtr = ptr; - Bucket *bucketPtr; + register int i, j; + register union overhead *overPtr; + int totalFree = 0, totalUsed = 0; - GETCACHE(cachePtr); - bucketPtr = &cachePtr->buckets[0]; + Tcl_MutexLock(allocMutexPtr); -#ifdef ZIPPY_STATS - bucketPtr->totalAssigned -= sizeof(Tcl_Obj); -#endif - blockPtr->nextBlock = bucketPtr->firstPtr; - bucketPtr->firstPtr = blockPtr; - bucketPtr->numFree++; -#ifdef ZIPPY_STATS - bucketPtr->numInserts++; -#endif - - if (bucketPtr->numFree > bucketInfo[0].maxBlocks) { - if (allocator == aPURIFY) { - /* undo */ - bucketPtr->numFree = 0; - bucketPtr->firstPtr = NULL; - free((char *) blockPtr); - return; + fprintf(stderr, "Memory allocation statistics %s\nTclpFree:\t", s); + for (i = 0; i < NBUCKETS; i++) { + for (j=0, overPtr=nextf[i]; overPtr; overPtr=overPtr->next, j++) { + fprintf(stderr, " %d", j); } -#if defined(TCL_THREADS) - PutBlocks(cachePtr, 0, bucketInfo[0].numMove); -#endif + totalFree += j * (1 << (i + 3)); } -} - -#if defined(TCL_THREADS) -/* - *---------------------------------------------------------------------- - * - * LockBucket, UnlockBucket -- - * - * Set/unset the lock to access a bucket in the shared cache. - * - * Results: - * None. - * - * Side effects: - * Lock activity and contention are monitored globally and on a per-cache - * basis. - * - *---------------------------------------------------------------------- - */ -static void -LockBucket( - Cache *cachePtr, - int bucket) -{ -#if 0 - if (Tcl_MutexTryLock(bucketInfo[bucket].lockPtr) != TCL_OK) { - Tcl_MutexLock(bucketInfo[bucket].lockPtr); - cachePtr->buckets[bucket].numWaits++; - sharedPtr->buckets[bucket].numWaits++; + fprintf(stderr, "\nused:\t"); + for (i = 0; i < NBUCKETS; i++) { + fprintf(stderr, " %d", numMallocs[i]); + totalUsed += numMallocs[i] * (1 << (i + 3)); } -#else - Tcl_MutexLock(bucketInfo[bucket].lockPtr); -#endif -#ifdef ZIPPY_STATS - cachePtr->buckets[bucket].numLocks++; - sharedPtr->buckets[bucket].numLocks++; -#endif -} -static void -UnlockBucket( - Cache *cachePtr, - int bucket) -{ - Tcl_MutexUnlock(bucketInfo[bucket].lockPtr); + fprintf(stderr, "\n\tTotal small in use: %d, total free: %d\n", + totalUsed, totalFree); + fprintf(stderr, "\n\tNumber of big (>%d) blocks in use: %d\n", + MAXMALLOC, numMallocs[NBUCKETS]); + + Tcl_MutexUnlock(allocMutexPtr); } +#endif + +#else /* !USE_TCLALLOC */ /* *---------------------------------------------------------------------- * - * PutBlocks -- + * TclpAlloc -- * - * Return unused blocks to the shared cache. + * Allocate more memory. * * Results: * None. @@ -1110,212 +698,43 @@ UnlockBucket( *---------------------------------------------------------------------- */ -static void -PutBlocks( - Cache *cachePtr, - int bucket, - int numMove) +char * +TclpAlloc( + unsigned int numBytes) /* Number of bytes to allocate. */ { - register Block *lastPtr, *firstPtr; - register int n = numMove; - - /* - * Before acquiring the lock, walk the block list to find the last block - * to be moved. - */ - - firstPtr = lastPtr = cachePtr->buckets[bucket].firstPtr; - while (--n > 0) { - lastPtr = lastPtr->nextBlock; - } - cachePtr->buckets[bucket].firstPtr = lastPtr->nextBlock; - cachePtr->buckets[bucket].numFree -= numMove; - - /* - * Aquire the lock and place the list of blocks at the front of the shared - * cache bucket. - */ - - LockBucket(cachePtr, bucket); - lastPtr->nextBlock = sharedPtr->buckets[bucket].firstPtr; - sharedPtr->buckets[bucket].firstPtr = firstPtr; - sharedPtr->buckets[bucket].numFree += numMove; - UnlockBucket(cachePtr, bucket); + return (char *) malloc(numBytes); } -#endif /* *---------------------------------------------------------------------- * - * GetBlocks -- - * - * Get more blocks for a bucket. - * - * Results: - * 1 if blocks where allocated, 0 otherwise. - * - * Side effects: - * Cache may be filled with available blocks. - * - *---------------------------------------------------------------------- - */ - -static int -GetBlocks( - Cache *cachePtr, - int bucket) -{ - register Block *blockPtr = NULL; - register int n; - - if (allocator == aPURIFY) { - if (bucket) { - Tcl_Panic("purify mode asking for blocks?"); - } - cachePtr->buckets[0].firstPtr = (Block *) calloc(1, MINALLOC); - cachePtr->buckets[0].numFree = 1; - return 1; - } - -#if defined(TCL_THREADS) - /* - * First, atttempt to move blocks from the shared cache. Note the - * potentially dirty read of numFree before acquiring the lock which is a - * slight performance enhancement. The value is verified after the lock is - * actually acquired. - */ - - if (cachePtr != sharedPtr && sharedPtr->buckets[bucket].numFree > 0) { - LockBucket(cachePtr, bucket); - if (sharedPtr->buckets[bucket].numFree > 0) { - - /* - * Either move the entire list or walk the list to find the last - * block to move. - */ - - n = bucketInfo[bucket].numMove; - if (n >= sharedPtr->buckets[bucket].numFree) { - cachePtr->buckets[bucket].firstPtr = - sharedPtr->buckets[bucket].firstPtr; - cachePtr->buckets[bucket].numFree = - sharedPtr->buckets[bucket].numFree; - sharedPtr->buckets[bucket].firstPtr = NULL; - sharedPtr->buckets[bucket].numFree = 0; - } else { - blockPtr = sharedPtr->buckets[bucket].firstPtr; - cachePtr->buckets[bucket].firstPtr = blockPtr; - sharedPtr->buckets[bucket].numFree -= n; - cachePtr->buckets[bucket].numFree = n; - while (--n > 0) { - blockPtr = blockPtr->nextBlock; - } - sharedPtr->buckets[bucket].firstPtr = blockPtr->nextBlock; - blockPtr->nextBlock = NULL; - } - } - UnlockBucket(cachePtr, bucket); - } -#endif - - if (cachePtr->buckets[bucket].numFree == 0) { - register size_t size; - -#if TCL_ALLOCATOR != aNATIVE - /* - * If no blocks could be moved from shared, first look for a larger - * block in this cache OR the shared cache to split up. - */ - - n = nBuckets; - size = 0; /* lint */ - while (--n > bucket) { - if (cachePtr->buckets[n].numFree > 0) { - size = bucketInfo[n].blockSize; - blockPtr = cachePtr->buckets[n].firstPtr; - cachePtr->buckets[n].firstPtr = blockPtr->nextBlock; - cachePtr->buckets[n].numFree--; - break; - } - } -#if defined(TCL_THREADS) - if (blockPtr == NULL) { - n = nBuckets; - size = 0; /* lint */ - while (--n > bucket) { - if (sharedPtr->buckets[n].numFree > 0) { - size = bucketInfo[n].blockSize; - LockBucket(cachePtr, n); - if (sharedPtr->buckets[n].numFree > 0) { - blockPtr = sharedPtr->buckets[n].firstPtr; - sharedPtr->buckets[n].firstPtr = blockPtr->nextBlock; - sharedPtr->buckets[n].numFree--; - UnlockBucket(cachePtr, n); - break; - } - UnlockBucket(cachePtr, n); - } - } - } -#endif -#endif - /* - * Otherwise, allocate a big new block directly. - */ - - if (blockPtr == NULL) { - size = MAXALLOC; - blockPtr = malloc(size); - if (blockPtr == NULL) { - return 0; - } - } - - /* - * Split the larger block into smaller blocks for this bucket. - */ - - n = size / bucketInfo[bucket].blockSize; - cachePtr->buckets[bucket].numFree = n; - cachePtr->buckets[bucket].firstPtr = blockPtr; - while (--n > 0) { - blockPtr->nextBlock = (Block *) - ((char *) blockPtr + bucketInfo[bucket].blockSize); - blockPtr = blockPtr->nextBlock; - } - blockPtr->nextBlock = NULL; - } - return 1; -} - -/* - *------------------------------------------------------------------------- - * - * TclInitAlloc -- + * TclpFree -- * - * Initialize the memory system. + * Free memory. * * Results: * None. * * Side effects: - * Initialize the mutex used to serialize allocations. + * None. * - *------------------------------------------------------------------------- + *---------------------------------------------------------------------- */ void -TclInitAlloc(void) +TclpFree( + char *oldPtr) /* Pointer to memory to free. */ { + free(oldPtr); + return; } /* *---------------------------------------------------------------------- * - * TclFinalizeAlloc -- + * TclpRealloc -- * - * This procedure is used to destroy all private resources used in this - * file. + * Reallocate memory. * * Results: * None. @@ -1326,55 +745,16 @@ TclInitAlloc(void) *---------------------------------------------------------------------- */ -void -TclFinalizeAlloc(void) -{ -#if defined(TCL_THREADS) - unsigned int i; - - for (i = 0; i < nBuckets; ++i) { - TclpFreeAllocMutex(bucketInfo[i].lockPtr); - bucketInfo[i].lockPtr = NULL; - } - - TclpFreeAllocMutex(objLockPtr); - objLockPtr = NULL; - - TclpFreeAllocMutex(listLockPtr); - listLockPtr = NULL; - - TclpFreeAllocCache(NULL); -#endif -} - -#if TCL_ALLOCATOR != aZIPPY -static void -ChooseAllocator() +char * +TclpRealloc( + char *oldPtr, /* Pointer to alloced block. */ + unsigned int numBytes) /* New size of memory. */ { - char *choice = getenv("TCL_ALLOCATOR"); - - /* - * This is only called with ALLOCATOR_BASE aZIPPY (when compiled with - * aMULTI) or aNATIVE (when compiled with aNATIVE). - */ - - allocator = ALLOCATOR_BASE; - - if (choice) { - /* - * Only override the base when requesting native or purify - */ - - if (!strcmp(choice, "aNATIVE")) { - allocator = aNATIVE; - } else if (!strcmp(choice, "aPURIFY")) { - allocator = aPURIFY; - } - } + return (char *) realloc(oldPtr, numBytes); } -#endif -#endif /* end of !PURIFY */ +#endif /* !USE_TCLALLOC */ +#endif /* !TCL_THREADS */ /* * Local Variables: diff --git a/generic/tclAssembly.c b/generic/tclAssembly.c index 2562558..754941f 100644 --- a/generic/tclAssembly.c +++ b/generic/tclAssembly.c @@ -1095,9 +1095,11 @@ NewAssemblyEnv( * generation*/ int flags) /* Compilation flags (TCL_EVAL_DIRECT) */ { - AssemblyEnv* assemEnvPtr = ckalloc(sizeof(AssemblyEnv)); + Tcl_Interp* interp = (Tcl_Interp*) envPtr->iPtr; + /* Tcl interpreter */ + AssemblyEnv* assemEnvPtr = TclStackAlloc(interp, sizeof(AssemblyEnv)); /* Assembler environment under construction */ - Tcl_Parse* parsePtr = ckalloc(sizeof(Tcl_Parse)); + Tcl_Parse* parsePtr = TclStackAlloc(interp, sizeof(Tcl_Parse)); /* Parse of one line of assembly code */ assemEnvPtr->envPtr = envPtr; @@ -1142,6 +1144,11 @@ static void FreeAssemblyEnv( AssemblyEnv* assemEnvPtr) /* Environment to free */ { + CompileEnv* envPtr = assemEnvPtr->envPtr; + /* Compilation environment being used for code + * generation */ + Tcl_Interp* interp = (Tcl_Interp*) envPtr->iPtr; + /* Tcl interpreter */ BasicBlock* thisBB; /* Pointer to a basic block being deleted */ BasicBlock* nextBB; /* Pointer to a deleted basic block's * successor */ @@ -1184,8 +1191,8 @@ FreeAssemblyEnv( * Dispose what's left. */ - ckfree(assemEnvPtr->parsePtr); - ckfree(assemEnvPtr); + TclStackFree(interp, assemEnvPtr->parsePtr); + TclStackFree(interp, assemEnvPtr); } /* diff --git a/generic/tclBasic.c b/generic/tclBasic.c index 5e676ba..5f2b301 100644 --- a/generic/tclBasic.c +++ b/generic/tclBasic.c @@ -728,6 +728,11 @@ Tcl_CreateInterp(void) * cache was already initialised by the call to alloc the interp struct. */ +#if defined(TCL_THREADS) && defined(USE_THREAD_ALLOC) + iPtr->allocCache = TclpGetAllocCache(); +#else + iPtr->allocCache = NULL; +#endif iPtr->pendingObjDataPtr = NULL; iPtr->asyncReadyPtr = TclGetAsyncReadyPtr(); iPtr->deferredCallbacks = NULL; @@ -2314,7 +2319,8 @@ TclInvokeStringCommand( { Command *cmdPtr = clientData; int i, result; - const char **argv = ckalloc((unsigned)(objc + 1) * sizeof(char *)); + const char **argv = + TclStackAlloc(interp, (unsigned)(objc + 1) * sizeof(char *)); for (i = 0; i < objc; i++) { argv[i] = Tcl_GetString(objv[i]); @@ -2327,7 +2333,7 @@ TclInvokeStringCommand( result = cmdPtr->proc(cmdPtr->clientData, interp, objc, argv); - ckfree((void *) argv); + TclStackFree(interp, (void *) argv); return result; } @@ -2362,7 +2368,8 @@ TclInvokeObjectCommand( Command *cmdPtr = clientData; Tcl_Obj *objPtr; int i, length, result; - Tcl_Obj **objv = ckalloc((unsigned)(argc * sizeof(Tcl_Obj *))); + Tcl_Obj **objv = + TclStackAlloc(interp, (unsigned)(argc * sizeof(Tcl_Obj *))); for (i = 0; i < argc; i++) { length = strlen(argv[i]); @@ -2398,7 +2405,7 @@ TclInvokeObjectCommand( objPtr = objv[i]; Tcl_DecrRefCount(objPtr); } - ckfree(objv); + TclStackFree(interp, objv); return result; } @@ -4556,7 +4563,7 @@ TEOV_NotFound( Tcl_ListObjGetElements(NULL, currNsPtr->unknownHandlerPtr, &handlerObjc, &handlerObjv); newObjc = objc + handlerObjc; - newObjv = ckalloc((int) sizeof(Tcl_Obj *) * newObjc); + newObjv = TclStackAlloc(interp, (int) sizeof(Tcl_Obj *) * newObjc); /* * Copy command prefix from unknown handler and add on the real command's @@ -4595,7 +4602,7 @@ TEOV_NotFound( for (i = 0; i < handlerObjc; ++i) { Tcl_DecrRefCount(newObjv[i]); } - ckfree(newObjv); + TclStackFree(interp, newObjv); return TCL_ERROR; } @@ -4633,7 +4640,7 @@ TEOV_NotFoundCallback( for (i = 0; i < objc; ++i) { Tcl_DecrRefCount(objv[i]); } - ckfree(objv); + TclStackFree(interp, objv); return result; } @@ -4930,11 +4937,12 @@ TclEvalEx( * state has been allocated while evaluating * the script, so that it can be freed * properly if an error occurs. */ - Tcl_Parse *parsePtr = ckalloc(sizeof(Tcl_Parse)); - CmdFrame *eeFramePtr = ckalloc(sizeof(CmdFrame)); - Tcl_Obj **stackObjArray = ckalloc(minObjs * sizeof(Tcl_Obj *)); - int *expandStack = ckalloc(minObjs * sizeof(int)); - int *linesStack = ckalloc(minObjs * sizeof(int)); + Tcl_Parse *parsePtr = TclStackAlloc(interp, sizeof(Tcl_Parse)); + CmdFrame *eeFramePtr = TclStackAlloc(interp, sizeof(CmdFrame)); + Tcl_Obj **stackObjArray = + TclStackAlloc(interp, minObjs * sizeof(Tcl_Obj *)); + int *expandStack = TclStackAlloc(interp, minObjs * sizeof(int)); + int *linesStack = TclStackAlloc(interp, minObjs * sizeof(int)); /* TIP #280 Structures for tracking of command * locations. */ int *clNext = NULL; /* Pointer for the tracking of invisible @@ -5330,11 +5338,11 @@ TclEvalEx( if (eeFramePtr->type == TCL_LOCATION_SOURCE) { Tcl_DecrRefCount(eeFramePtr->data.eval.path); } - ckfree(linesStack); - ckfree(expandStack); - ckfree(stackObjArray); - ckfree(eeFramePtr); - ckfree(parsePtr); + TclStackFree(interp, linesStack); + TclStackFree(interp, expandStack); + TclStackFree(interp, stackObjArray); + TclStackFree(interp, eeFramePtr); + TclStackFree(interp, parsePtr); return code; } @@ -5972,7 +5980,7 @@ TclNREvalObjEx( * should be pushed, as needed by alias and ensemble redirections. */ - eoFramePtr = ckalloc(sizeof(CmdFrame)); + eoFramePtr = TclStackAlloc(interp, sizeof(CmdFrame)); eoFramePtr->nline = 0; eoFramePtr->line = NULL; @@ -6090,7 +6098,7 @@ TclNREvalObjEx( */ int pc = 0; - CmdFrame *ctxPtr = ckalloc(sizeof(CmdFrame)); + CmdFrame *ctxPtr = TclStackAlloc(interp, sizeof(CmdFrame)); *ctxPtr = *invoker; if (invoker->type == TCL_LOCATION_BC) { @@ -6131,7 +6139,7 @@ TclNREvalObjEx( Tcl_DecrRefCount(ctxPtr->data.eval.path); } - ckfree(ctxPtr); + TclStackFree(interp, ctxPtr); } /* @@ -6210,7 +6218,7 @@ TEOEx_ListCallback( if (eoFramePtr) { iPtr->cmdFramePtr = eoFramePtr->nextPtr; - ckfree(eoFramePtr); + TclStackFree(interp, eoFramePtr); } TclDecrRefCount(listPtr); diff --git a/generic/tclCkalloc.c b/generic/tclCkalloc.c index afc6594..056841d 100644 --- a/generic/tclCkalloc.c +++ b/generic/tclCkalloc.c @@ -1296,6 +1296,10 @@ TclFinalizeMemorySubsystem(void) Tcl_MutexUnlock(ckallocMutexPtr); #endif + +#if USE_TCLALLOC + TclFinalizeAllocSubsystem(); +#endif } /* diff --git a/generic/tclCmdAH.c b/generic/tclCmdAH.c index b4afdef..3edfa54 100644 --- a/generic/tclCmdAH.c +++ b/generic/tclCmdAH.c @@ -2348,7 +2348,7 @@ TclNRForObjCmd( return TCL_ERROR; } - TclCkSmallAlloc(sizeof(ForIterData), iterPtr); + TclSmallAllocEx(interp, sizeof(ForIterData), iterPtr); iterPtr->cond = objv[2]; iterPtr->body = objv[4]; iterPtr->next = objv[3]; @@ -2376,7 +2376,7 @@ ForSetupCallback( if (result == TCL_ERROR) { Tcl_AddErrorInfo(interp, "\n (\"for\" initial command)"); } - TclSmallFree(iterPtr); + TclSmallFreeEx(interp, iterPtr); return result; } TclNRAddCallback(interp, TclNRForIterCallback, iterPtr, NULL, NULL, NULL); @@ -2414,7 +2414,7 @@ TclNRForIterCallback( Tcl_AppendObjToErrorInfo(interp, Tcl_ObjPrintf(iterPtr->msg, Tcl_GetErrorLine(interp))); } - TclSmallFree(iterPtr); + TclSmallFreeEx(interp, iterPtr); return result; } @@ -2431,11 +2431,11 @@ ForCondCallback( if (result != TCL_OK) { Tcl_DecrRefCount(boolObj); - TclSmallFree(iterPtr); + TclSmallFreeEx(interp, iterPtr); return result; } else if (Tcl_GetBooleanFromObj(interp, boolObj, &value) != TCL_OK) { Tcl_DecrRefCount(boolObj); - TclSmallFree(iterPtr); + TclSmallFreeEx(interp, iterPtr); return TCL_ERROR; } Tcl_DecrRefCount(boolObj); @@ -2452,7 +2452,7 @@ ForCondCallback( return TclNREvalObjEx(interp, iterPtr->body, 0, iPtr->cmdFramePtr, iterPtr->word); } - TclSmallFree(iterPtr); + TclSmallFreeEx(interp, iterPtr); return result; } @@ -2492,7 +2492,7 @@ ForPostNextCallback( if ((result != TCL_BREAK) && (result != TCL_OK)) { if (result == TCL_ERROR) { Tcl_AddErrorInfo(interp, "\n (\"for\" loop-end command)"); - TclSmallFree(iterPtr); + TclSmallFreeEx(interp, iterPtr); } return result; } @@ -2560,7 +2560,7 @@ TclNRForeachCmd( * allocation for better performance. */ - statePtr = ckalloc( + statePtr = TclStackAlloc(interp, sizeof(struct ForeachState) + 3 * numLists * sizeof(int) + 2 * numLists * (sizeof(Tcl_Obj **) + sizeof(Tcl_Obj *))); memset(statePtr, 0, @@ -2754,7 +2754,7 @@ ForeachCleanup( TclDecrRefCount(statePtr->aCopyList[i]); } } - ckfree(statePtr); + TclStackFree(interp, statePtr); } /* diff --git a/generic/tclCmdIL.c b/generic/tclCmdIL.c index cd4a72b..b38ec9f 100644 --- a/generic/tclCmdIL.c +++ b/generic/tclCmdIL.c @@ -1313,7 +1313,7 @@ TclInfoFrame( * Execution of bytecode. Talk to the BC engine to fill out the frame. */ - CmdFrame *fPtr = ckalloc(sizeof(CmdFrame)); + CmdFrame *fPtr = TclStackAlloc(interp, sizeof(CmdFrame)); *fPtr = *framePtr; @@ -1347,7 +1347,7 @@ TclInfoFrame( ADD_PAIR("cmd", Tcl_NewStringObj(fPtr->cmd.str.cmd, fPtr->cmd.str.len)); - ckfree(fPtr); + TclStackFree(interp, fPtr); break; } @@ -3016,7 +3016,7 @@ Tcl_LsearchObjCmd( int j; if (sortInfo.indexc > 1) { - ckfree(sortInfo.indexv); + TclStackFree(interp, sortInfo.indexv); } if (i > objc-4) { if (startPtr != NULL) { @@ -3051,7 +3051,7 @@ Tcl_LsearchObjCmd( break; default: sortInfo.indexv = - ckalloc(sizeof(int) * sortInfo.indexc); + TclStackAlloc(interp, sizeof(int) * sortInfo.indexc); } /* @@ -3158,7 +3158,7 @@ Tcl_LsearchObjCmd( if (offset > listc-1) { if (sortInfo.indexc > 1) { - ckfree(sortInfo.indexv); + TclStackFree(interp, sortInfo.indexv); } if (allMatches || inlineReturn) { Tcl_ResetResult(interp); @@ -3483,7 +3483,7 @@ Tcl_LsearchObjCmd( done: if (sortInfo.indexc > 1) { - ckfree(sortInfo.indexv); + TclStackFree(interp, sortInfo.indexv); } return result; } @@ -3770,7 +3770,7 @@ Tcl_LsortObjCmd( break; default: sortInfo.indexv = - ckalloc(sizeof(int) * sortInfo.indexc); + TclStackAlloc(interp, sizeof(int) * sortInfo.indexc); allocatedIndexVector = 1; /* Cannot use indexc field, as it * might be decreased by 1 later. */ } @@ -3865,7 +3865,6 @@ Tcl_LsortObjCmd( /* * Do not shrink the actual memory block used; that doesn't * work with TclStackAlloc-allocated memory. [Bug 2918962] - * FIXME: TclStackAlloc is now retired, we could shrink it. */ for (i = 0; i < sortInfo.indexc; i++) { @@ -3903,7 +3902,7 @@ Tcl_LsortObjCmd( * begins sorting it into the sublists as it appears. */ - elementArray = ckalloc(length * sizeof(SortElement)); + elementArray = TclStackAlloc(interp, length * sizeof(SortElement)); for (i=0; i < length; i++){ idx = groupSize * i + groupOffset; @@ -4027,7 +4026,7 @@ Tcl_LsortObjCmd( } done1: - ckfree(elementArray); + TclStackFree(interp, elementArray); done: if (sortInfo.sortMode == SORTMODE_COMMAND) { @@ -4037,7 +4036,7 @@ Tcl_LsortObjCmd( } done2: if (allocatedIndexVector) { - ckfree(sortInfo.indexv); + TclStackFree(interp, sortInfo.indexv); } return sortInfo.resultCode; } diff --git a/generic/tclCmdMZ.c b/generic/tclCmdMZ.c index d85cd83..05f2e5d 100644 --- a/generic/tclCmdMZ.c +++ b/generic/tclCmdMZ.c @@ -1835,7 +1835,7 @@ StringMapCmd( * adapt this code... */ - mapElemv = ckalloc(sizeof(Tcl_Obj *) * mapElemc); + mapElemv = TclStackAlloc(interp, sizeof(Tcl_Obj *) * mapElemc); Tcl_DictObjFirst(interp, objv[objc-2], &search, mapElemv+0, mapElemv+1, &done); for (i=2 ; i<mapElemc ; i+=2) { @@ -1944,10 +1944,10 @@ StringMapCmd( * case. */ - mapStrings = ckalloc(mapElemc*2*sizeof(Tcl_UniChar *)); - mapLens = ckalloc(mapElemc * 2 * sizeof(int)); + mapStrings = TclStackAlloc(interp, mapElemc*2*sizeof(Tcl_UniChar *)); + mapLens = TclStackAlloc(interp, mapElemc * 2 * sizeof(int)); if (nocase) { - u2lc = ckalloc(mapElemc * sizeof(Tcl_UniChar)); + u2lc = TclStackAlloc(interp, mapElemc * sizeof(Tcl_UniChar)); } for (index = 0; index < mapElemc; index++) { mapStrings[index] = Tcl_GetUnicodeFromObj(mapElemv[index], @@ -1997,10 +1997,10 @@ StringMapCmd( } } if (nocase) { - ckfree(u2lc); + TclStackFree(interp, u2lc); } - ckfree(mapLens); - ckfree(mapStrings); + TclStackFree(interp, mapLens); + TclStackFree(interp, mapStrings); } if (p != ustring1) { /* @@ -2012,7 +2012,7 @@ StringMapCmd( Tcl_SetObjResult(interp, resultPtr); done: if (mapWithDict) { - ckfree(mapElemv); + TclStackFree(interp, mapElemv); } if (copySource) { Tcl_DecrRefCount(sourceObj); @@ -3849,7 +3849,7 @@ TclNRSwitchObjCmd( */ matchFound: - ctxPtr = ckalloc(sizeof(CmdFrame)); + ctxPtr = TclStackAlloc(interp, sizeof(CmdFrame)); *ctxPtr = *iPtr->cmdFramePtr; if (splitObjs) { @@ -3966,7 +3966,7 @@ SwitchPostProc( (overflow ? limit : patternLength), pattern, (overflow ? "..." : ""), Tcl_GetErrorLine(interp))); } - ckfree(ctxPtr); + TclStackFree(interp, ctxPtr); return result; } @@ -4729,7 +4729,7 @@ TclNRWhileObjCmd( * We reuse [for]'s callback, passing a NULL for the 'next' script. */ - TclCkSmallAlloc(sizeof(ForIterData), iterPtr); + TclSmallAllocEx(interp, sizeof(ForIterData), iterPtr); iterPtr->cond = objv[1]; iterPtr->body = objv[2]; iterPtr->next = NULL; diff --git a/generic/tclCompCmds.c b/generic/tclCompCmds.c index 2fda2b9..083f530 100644 --- a/generic/tclCompCmds.c +++ b/generic/tclCompCmds.c @@ -1021,7 +1021,8 @@ TclCompileDictUpdateCmd( duiPtr = ckalloc(sizeof(DictUpdateInfo) + sizeof(int) * (numVars - 1)); duiPtr->length = numVars; - keyTokenPtrs = ckalloc(sizeof(Tcl_Token *) * numVars); + keyTokenPtrs = TclStackAlloc(interp, + sizeof(Tcl_Token *) * numVars); tokenPtr = TokenAfter(dictVarTokenPtr); for (i=0 ; i<numVars ; i++) { @@ -1059,7 +1060,7 @@ TclCompileDictUpdateCmd( if (tokenPtr->type != TCL_TOKEN_SIMPLE_WORD) { failedUpdateInfoAssembly: ckfree(duiPtr); - ckfree(keyTokenPtrs); + TclStackFree(interp, keyTokenPtrs); return TCL_ERROR; } bodyTokenPtr = tokenPtr; @@ -1123,7 +1124,7 @@ TclCompileDictUpdateCmd( Tcl_Panic("TclCompileDictCmd(update): bad jump distance %d", (int) (CurrentOffset(envPtr) - jumpFixup.codeOffset)); } - ckfree(keyTokenPtrs); + TclStackFree(interp, keyTokenPtrs); return TCL_OK; } @@ -1636,9 +1637,10 @@ TclCompileForeachCmd( */ numLists = (numWords - 2)/2; - varcList = ckalloc(numLists * sizeof(int)); + varcList = TclStackAlloc(interp, numLists * sizeof(int)); memset(varcList, 0, numLists * sizeof(int)); - varvList = (const char ***) ckalloc(numLists * sizeof(const char **)); + varvList = (const char ***) TclStackAlloc(interp, + numLists * sizeof(const char **)); memset((char*) varvList, 0, numLists * sizeof(const char **)); /* @@ -1865,8 +1867,8 @@ TclCompileForeachCmd( ckfree(varvList[loopIndex]); } } - ckfree((void *)varvList); - ckfree(varcList); + TclStackFree(interp, (void *)varvList); + TclStackFree(interp, varcList); return code; } @@ -3514,7 +3516,7 @@ TclCompileReturnCmd( * Allocate some working space. */ - objv = ckalloc(numOptionWords * sizeof(Tcl_Obj *)); + objv = TclStackAlloc(interp, numOptionWords * sizeof(Tcl_Obj *)); /* * Scan through the return options. If any are unknown at compile time, @@ -3538,7 +3540,7 @@ TclCompileReturnCmd( while (--objc >= 0) { TclDecrRefCount(objv[objc]); } - ckfree(objv); + TclStackFree(interp, objv); if (TCL_ERROR == status) { /* * Something was bogus in the return options. Clear the error message, @@ -4026,7 +4028,7 @@ PushVarName( * assemble the corresponding token. */ - elemTokenPtr = ckalloc(sizeof(Tcl_Token)); + elemTokenPtr = TclStackAlloc(interp, sizeof(Tcl_Token)); allocedTokens = 1; elemTokenPtr->type = TCL_TOKEN_TEXT; elemTokenPtr->start = elName; @@ -4079,7 +4081,7 @@ PushVarName( * token. */ - elemTokenPtr = ckalloc(n * sizeof(Tcl_Token)); + elemTokenPtr = TclStackAlloc(interp, n * sizeof(Tcl_Token)); allocedTokens = 1; elemTokenPtr->type = TCL_TOKEN_TEXT; elemTokenPtr->start = elName; @@ -4167,7 +4169,7 @@ PushVarName( varTokenPtr[removedParen].size++; } if (allocedTokens) { - ckfree(elemTokenPtr); + TclStackFree(interp, elemTokenPtr); } *localIndexPtr = localIndex; *simpleVarNamePtr = simpleVarName; diff --git a/generic/tclCompCmdsSZ.c b/generic/tclCompCmdsSZ.c index ff494f2..d956819 100644 --- a/generic/tclCompCmdsSZ.c +++ b/generic/tclCompCmdsSZ.c @@ -595,7 +595,7 @@ TclCompileSubstCmd( return TCL_ERROR; } - objv = ckalloc(/*numArgs*/ numOpts * sizeof(Tcl_Obj *)); + objv = TclStackAlloc(interp, /*numArgs*/ numOpts * sizeof(Tcl_Obj *)); for (objc = 0; objc < /*numArgs*/ numOpts; objc++) { objv[objc] = Tcl_NewObj(); @@ -628,7 +628,7 @@ TclCompileSubstCmd( while (--objc >= 0) { TclDecrRefCount(objv[objc]); } - ckfree(objv); + TclStackFree(interp, objv); if (/*toSubst == NULL*/ code != TCL_OK) { return TCL_ERROR; } @@ -1320,8 +1320,8 @@ IssueSwitchChainedTests( contFixIndex = -1; contFixCount = 0; - fixupArray = ckalloc(sizeof(JumpFixup) * numBodyTokens); - fixupTargetArray = ckalloc(sizeof(int) * numBodyTokens); + fixupArray = TclStackAlloc(interp, sizeof(JumpFixup) * numBodyTokens); + fixupTargetArray = TclStackAlloc(interp, sizeof(int) * numBodyTokens); memset(fixupTargetArray, 0, numBodyTokens * sizeof(int)); fixupCount = 0; foundDefault = 0; @@ -1520,8 +1520,8 @@ IssueSwitchChainedTests( } } } - ckfree(fixupTargetArray); - ckfree(fixupArray); + TclStackFree(interp, fixupTargetArray); + TclStackFree(interp, fixupArray); envPtr->currStackDepth = savedStackDepth + 1; } @@ -1582,7 +1582,7 @@ IssueSwitchJumpTable( jtPtr = ckalloc(sizeof(JumptableInfo)); Tcl_InitHashTable(&jtPtr->hashTable, TCL_STRING_KEYS); infoIndex = TclCreateAuxData(jtPtr, &tclJumptableInfoType, envPtr); - finalFixups = ckalloc(sizeof(int) * (numBodyTokens/2)); + finalFixups = TclStackAlloc(interp, sizeof(int) * (numBodyTokens/2)); foundDefault = 0; mustGenerate = 1; @@ -1720,7 +1720,7 @@ IssueSwitchJumpTable( * Clean up all our temporary space and return. */ - ckfree(finalFixups); + TclStackFree(interp, finalFixups); } /* @@ -1975,12 +1975,12 @@ TclCompileTryCmd( numHandlers = numWords >> 2; numWords -= numHandlers * 4; if (numHandlers > 0) { - handlerTokens = ckalloc(sizeof(Tcl_Token*)*numHandlers); - matchClauses = ckalloc(sizeof(Tcl_Obj *) * numHandlers); + handlerTokens = TclStackAlloc(interp, sizeof(Tcl_Token*)*numHandlers); + matchClauses = TclStackAlloc(interp, sizeof(Tcl_Obj *) * numHandlers); memset(matchClauses, 0, sizeof(Tcl_Obj *) * numHandlers); - matchCodes = ckalloc(sizeof(int) * numHandlers); - resultVarIndices = ckalloc(sizeof(int) * numHandlers); - optionVarIndices = ckalloc(sizeof(int) * numHandlers); + matchCodes = TclStackAlloc(interp, sizeof(int) * numHandlers); + resultVarIndices = TclStackAlloc(interp, sizeof(int) * numHandlers); + optionVarIndices = TclStackAlloc(interp, sizeof(int) * numHandlers); for (i=0 ; i<numHandlers ; i++) { Tcl_Obj *tmpObj, **objv; @@ -2139,11 +2139,11 @@ TclCompileTryCmd( TclDecrRefCount(matchClauses[i]); } } - ckfree(optionVarIndices); - ckfree(resultVarIndices); - ckfree(matchCodes); - ckfree(matchClauses); - ckfree(handlerTokens); + TclStackFree(interp, optionVarIndices); + TclStackFree(interp, resultVarIndices); + TclStackFree(interp, matchCodes); + TclStackFree(interp, matchClauses); + TclStackFree(interp, handlerTokens); } return result; } @@ -2219,8 +2219,8 @@ IssueTryInstructions( * Slight overallocation, but reduces size of this function. */ - addrsToFix = ckalloc(sizeof(int)*numHandlers); - forwardsToFix = ckalloc(sizeof(int)*numHandlers); + addrsToFix = TclStackAlloc(interp, sizeof(int)*numHandlers); + forwardsToFix = TclStackAlloc(interp, sizeof(int)*numHandlers); for (i=0 ; i<numHandlers ; i++) { sprintf(buf, "%d", matchCodes[i]); @@ -2307,8 +2307,8 @@ IssueTryInstructions( for (i=0 ; i<numHandlers ; i++) { FIXJUMP(addrsToFix[i]); } - ckfree(forwardsToFix); - ckfree(addrsToFix); + TclStackFree(interp, forwardsToFix); + TclStackFree(interp, addrsToFix); return TCL_OK; } @@ -2370,8 +2370,8 @@ IssueTryFinallyInstructions( * Slight overallocation, but reduces size of this function. */ - addrsToFix = ckalloc(sizeof(int)*numHandlers); - forwardsToFix = ckalloc(sizeof(int)*numHandlers); + addrsToFix = TclStackAlloc(interp, sizeof(int)*numHandlers); + forwardsToFix = TclStackAlloc(interp, sizeof(int)*numHandlers); for (i=0 ; i<numHandlers ; i++) { sprintf(buf, "%d", matchCodes[i]); @@ -2503,8 +2503,8 @@ IssueTryFinallyInstructions( for (i=0 ; i<numHandlers-1 ; i++) { FIXJUMP(addrsToFix[i]); } - ckfree(forwardsToFix); - ckfree(addrsToFix); + TclStackFree(interp, forwardsToFix); + TclStackFree(interp, addrsToFix); } /* @@ -2900,7 +2900,7 @@ PushVarName( * assemble the corresponding token. */ - elemTokenPtr = ckalloc(sizeof(Tcl_Token)); + elemTokenPtr = TclStackAlloc(interp, sizeof(Tcl_Token)); allocedTokens = 1; elemTokenPtr->type = TCL_TOKEN_TEXT; elemTokenPtr->start = elName; @@ -2953,7 +2953,7 @@ PushVarName( * token. */ - elemTokenPtr = ckalloc(n * sizeof(Tcl_Token)); + elemTokenPtr = TclStackAlloc(interp, n * sizeof(Tcl_Token)); allocedTokens = 1; elemTokenPtr->type = TCL_TOKEN_TEXT; elemTokenPtr->start = elName; @@ -3041,7 +3041,7 @@ PushVarName( varTokenPtr[removedParen].size++; } if (allocedTokens) { - ckfree(elemTokenPtr); + TclStackFree(interp, elemTokenPtr); } *localIndexPtr = localIndex; *simpleVarNamePtr = simpleVarName; diff --git a/generic/tclCompExpr.c b/generic/tclCompExpr.c index 396448b..a07d6df 100644 --- a/generic/tclCompExpr.c +++ b/generic/tclCompExpr.c @@ -920,7 +920,7 @@ ParseExpr( case SCRIPT: { Tcl_Parse *nestedPtr = - ckalloc(sizeof(Tcl_Parse)); + TclStackAlloc(interp, sizeof(Tcl_Parse)); tokenPtr = parsePtr->tokenPtr + parsePtr->numTokens; tokenPtr->type = TCL_TOKEN_COMMAND; @@ -955,7 +955,7 @@ ParseExpr( break; } } - ckfree(nestedPtr); + TclStackFree(interp, nestedPtr); end = start; start = tokenPtr->start; scanned = end - start; @@ -1821,7 +1821,7 @@ Tcl_ParseExpr( OpNode *opTree = NULL; /* Will point to the tree of operators. */ Tcl_Obj *litList = Tcl_NewObj(); /* List to hold the literals. */ Tcl_Obj *funcList = Tcl_NewObj(); /* List to hold the functon names. */ - Tcl_Parse *exprParsePtr = ckalloc(sizeof(Tcl_Parse)); + Tcl_Parse *exprParsePtr = TclStackAlloc(interp, sizeof(Tcl_Parse)); /* Holds the Tcl_Tokens of substitutions. */ if (numBytes < 0) { @@ -1843,7 +1843,7 @@ Tcl_ParseExpr( } Tcl_FreeParse(exprParsePtr); - ckfree(exprParsePtr); + TclStackFree(interp, exprParsePtr); ckfree(opTree); return code; } @@ -2072,7 +2072,7 @@ TclCompileExpr( OpNode *opTree = NULL; /* Will point to the tree of operators */ Tcl_Obj *litList = Tcl_NewObj(); /* List to hold the literals */ Tcl_Obj *funcList = Tcl_NewObj(); /* List to hold the functon names*/ - Tcl_Parse *parsePtr = ckalloc(sizeof(Tcl_Parse)); + Tcl_Parse *parsePtr = TclStackAlloc(interp, sizeof(Tcl_Parse)); /* Holds the Tcl_Tokens of substitutions */ int code = ParseExpr(interp, script, numBytes, &opTree, litList, @@ -2100,7 +2100,7 @@ TclCompileExpr( } Tcl_FreeParse(parsePtr); - ckfree(parsePtr); + TclStackFree(interp, parsePtr); Tcl_DecrRefCount(funcList); Tcl_DecrRefCount(litList); ckfree(opTree); @@ -2143,7 +2143,7 @@ ExecConstantExprTree( * bytecode, so there's no need to tend to TIP 280 issues. */ - envPtr = ckalloc(sizeof(CompileEnv)); + envPtr = TclStackAlloc(interp, sizeof(CompileEnv)); TclInitCompileEnv(interp, envPtr, NULL, 0, NULL, 0); CompileExprTree(interp, nodes, index, litObjvPtr, NULL, NULL, envPtr, 0 /* optimize */); @@ -2151,7 +2151,7 @@ ExecConstantExprTree( Tcl_IncrRefCount(byteCodeObj); TclInitByteCodeObj(byteCodeObj, envPtr); TclFreeCompileEnv(envPtr); - ckfree(envPtr); + TclStackFree(interp, envPtr); byteCodePtr = (ByteCode *) byteCodeObj->internalRep.otherValuePtr; TclNRExecuteByteCode(interp, byteCodePtr); code = TclNRRunCallbacks(interp, TCL_OK, rootPtr); @@ -2208,10 +2208,10 @@ CompileExprTree( switch (nodePtr->lexeme) { case QUESTION: - newJump = ckalloc(sizeof(JumpList)); + newJump = TclStackAlloc(interp, sizeof(JumpList)); newJump->next = jumpPtr; jumpPtr = newJump; - newJump = ckalloc(sizeof(JumpList)); + newJump = TclStackAlloc(interp, sizeof(JumpList)); newJump->next = jumpPtr; jumpPtr = newJump; jumpPtr->depth = envPtr->currStackDepth; @@ -2219,13 +2219,13 @@ CompileExprTree( break; case AND: case OR: - newJump = ckalloc(sizeof(JumpList)); + newJump = TclStackAlloc(interp, sizeof(JumpList)); newJump->next = jumpPtr; jumpPtr = newJump; - newJump = ckalloc(sizeof(JumpList)); + newJump = TclStackAlloc(interp, sizeof(JumpList)); newJump->next = jumpPtr; jumpPtr = newJump; - newJump = ckalloc(sizeof(JumpList)); + newJump = TclStackAlloc(interp, sizeof(JumpList)); newJump->next = jumpPtr; jumpPtr = newJump; jumpPtr->depth = envPtr->currStackDepth; @@ -2331,10 +2331,10 @@ CompileExprTree( envPtr->currStackDepth = jumpPtr->depth + 1; freePtr = jumpPtr; jumpPtr = jumpPtr->next; - ckfree(freePtr); + TclStackFree(interp, freePtr); freePtr = jumpPtr; jumpPtr = jumpPtr->next; - ckfree(freePtr); + TclStackFree(interp, freePtr); break; case AND: case OR: @@ -2358,13 +2358,13 @@ CompileExprTree( envPtr->currStackDepth = jumpPtr->depth + 1; freePtr = jumpPtr; jumpPtr = jumpPtr->next; - ckfree(freePtr); + TclStackFree(interp, freePtr); freePtr = jumpPtr; jumpPtr = jumpPtr->next; - ckfree(freePtr); + TclStackFree(interp, freePtr); freePtr = jumpPtr; jumpPtr = jumpPtr->next; - ckfree(freePtr); + TclStackFree(interp, freePtr); break; default: TclEmitOpcode(instruction[nodePtr->lexeme], envPtr); @@ -2541,8 +2541,9 @@ TclSortingOpCmd( Tcl_SetObjResult(interp, Tcl_NewBooleanObj(1)); } else { TclOpCmdClientData *occdPtr = clientData; - Tcl_Obj **litObjv = ckalloc(2 * (objc-2) * sizeof(Tcl_Obj *)); - OpNode *nodes = ckalloc(2 * (objc-2) * sizeof(OpNode)); + Tcl_Obj **litObjv = TclStackAlloc(interp, + 2 * (objc-2) * sizeof(Tcl_Obj *)); + OpNode *nodes = TclStackAlloc(interp, 2 * (objc-2) * sizeof(OpNode)); unsigned char lexeme; int i, lastAnd = 1; Tcl_Obj *const *litObjPtrPtr = litObjv; @@ -2582,8 +2583,8 @@ TclSortingOpCmd( code = ExecConstantExprTree(interp, nodes, 0, &litObjPtrPtr); - ckfree(nodes); - ckfree(litObjv); + TclStackFree(interp, nodes); + TclStackFree(interp, litObjv); } return code; } @@ -2669,7 +2670,7 @@ TclVariadicOpCmd( return code; } else { Tcl_Obj *const *litObjv = objv + 1; - OpNode *nodes = ckalloc((objc-1) * sizeof(OpNode)); + OpNode *nodes = TclStackAlloc(interp, (objc-1) * sizeof(OpNode)); int i, lastOp = OT_LITERAL; nodes[0].lexeme = START; @@ -2702,7 +2703,7 @@ TclVariadicOpCmd( code = ExecConstantExprTree(interp, nodes, 0, &litObjv); - ckfree(nodes); + TclStackFree(interp, nodes); return code; } } diff --git a/generic/tclCompile.c b/generic/tclCompile.c index 4d6bf33..aed9e3b 100644 --- a/generic/tclCompile.c +++ b/generic/tclCompile.c @@ -1202,7 +1202,7 @@ TclInitCompileEnv( * ...) which may make change the type as well. */ - CmdFrame *ctxPtr = ckalloc(sizeof(CmdFrame)); + CmdFrame *ctxPtr = TclStackAlloc(interp, sizeof(CmdFrame)); int pc = 0; *ctxPtr = *invoker; @@ -1255,7 +1255,7 @@ TclInitCompileEnv( } } - ckfree(ctxPtr); + TclStackFree(interp, ctxPtr); } envPtr->extCmdMapPtr->start = envPtr->line; @@ -1461,7 +1461,7 @@ TclCompileScript( /* TIP #280 */ ExtCmdLoc *eclPtr = envPtr->extCmdMapPtr; int *wlines, wlineat, cmdLine, *clNext; - Tcl_Parse *parsePtr = ckalloc(sizeof(Tcl_Parse)); + Tcl_Parse *parsePtr = TclStackAlloc(interp, sizeof(Tcl_Parse)); Tcl_DStringInit(&ds); @@ -1877,7 +1877,7 @@ TclCompileScript( } envPtr->numSrcBytes = p - script; - ckfree(parsePtr); + TclStackFree(interp, parsePtr); Tcl_DStringFree(&ds); } diff --git a/generic/tclDictObj.c b/generic/tclDictObj.c index 4ed3fe6..3da91a3 100644 --- a/generic/tclDictObj.c +++ b/generic/tclDictObj.c @@ -2425,14 +2425,14 @@ DictForNRCmd( TCL_STATIC); return TCL_ERROR; } - searchPtr = ckalloc(sizeof(Tcl_DictSearch)); + searchPtr = TclStackAlloc(interp, sizeof(Tcl_DictSearch)); if (Tcl_DictObjFirst(interp, objv[2], searchPtr, &keyObj, &valueObj, &done) != TCL_OK) { - ckfree(searchPtr); + TclStackFree(interp, searchPtr); return TCL_ERROR; } if (done) { - ckfree(searchPtr); + TclStackFree(interp, searchPtr); return TCL_OK; } TclListObjGetElements(NULL, objv[1], &varc, &varv); @@ -2488,7 +2488,7 @@ DictForNRCmd( TclDecrRefCount(valueVarObj); TclDecrRefCount(scriptObj); Tcl_DictObjDone(searchPtr); - ckfree(searchPtr); + TclStackFree(interp, searchPtr); return TCL_ERROR; } @@ -2574,7 +2574,7 @@ DictForLoopCallback( TclDecrRefCount(valueVarObj); TclDecrRefCount(scriptObj); Tcl_DictObjDone(searchPtr); - ckfree(searchPtr); + TclStackFree(interp, searchPtr); return result; } diff --git a/generic/tclEvent.c b/generic/tclEvent.c index 49e8137..78bd7b8 100644 --- a/generic/tclEvent.c +++ b/generic/tclEvent.c @@ -1032,7 +1032,9 @@ TclInitSubsystems(void) TclInitThreadStorage(); /* Creates master hash table for * thread local storage */ +#if USE_TCLALLOC TclInitAlloc(); /* Process wide mutex init */ +#endif #ifdef TCL_MEM_DEBUG TclInitDbCkalloc(); /* Process wide mutex init */ #endif @@ -1209,7 +1211,9 @@ Tcl_Finalize(void) * Close down the thread-specific object allocator. */ - TclFinalizeAlloc(); +#if defined(TCL_THREADS) && defined(USE_THREAD_ALLOC) + TclFinalizeThreadAlloc(); +#endif /* * We defer unloading of packages until very late to avoid memory access diff --git a/generic/tclExecute.c b/generic/tclExecute.c index 2ed1537..26d3e04 100644 --- a/generic/tclExecute.c +++ b/generic/tclExecute.c @@ -171,21 +171,19 @@ static BuiltinFunc const tclBuiltinFuncTable[] = { typedef struct TEBCdata { ByteCode *codePtr; /* Constant until the BC returns */ /* -----------------------------------------*/ - Tcl_Obj **tosPtr; const unsigned char *pc; /* These fields are used on return TO this */ - int catchDepth; /* this level: they record the state when a */ + ptrdiff_t *catchTop; /* this level: they record the state when a */ int cleanup; /* new codePtr was received for NR */ Tcl_Obj *auxObjList; /* execution. */ int checkInterp; CmdFrame cmdFrame; - unsigned int capacity; void * stack[1]; /* Start of the actual combined catch and obj * stacks; the struct will be expanded as * necessary */ } TEBCdata; #define TEBC_YIELD() \ - TD->tosPtr = tosPtr; \ + esPtr->tosPtr = tosPtr; \ TD->pc = pc; \ TD->cleanup = cleanup; \ TclNRAddCallback(interp, TEBCresume, TD, \ @@ -194,7 +192,7 @@ typedef struct TEBCdata { #define TEBC_DATA_DIG() \ pc = TD->pc; \ cleanup = TD->cleanup; \ - tosPtr = TD->tosPtr + tosPtr = esPtr->tosPtr #define PUSH_TAUX_OBJ(objPtr) \ @@ -298,6 +296,20 @@ VarHashCreateVar( } while (0) /* + * Macros used to cache often-referenced Tcl evaluation stack information + * in local variables. Note that a DECACHE_STACK_INFO()-CACHE_STACK_INFO() + * pair must surround any call inside TclNRExecuteByteCode (and a few other + * procedures that use this scheme) that could result in a recursive call + * to TclNRExecuteByteCode. + */ + +#define CACHE_STACK_INFO() \ + checkInterp = 1 + +#define DECACHE_STACK_INFO() \ + esPtr->tosPtr = tosPtr + +/* * Macros used to access items on the Tcl evaluation stack. PUSH_OBJECT * increments the object's ref count since it makes the stack have another * reference pointing to the object. However, POP_OBJECT does not decrement @@ -671,6 +683,7 @@ static void ValidatePcAndStackTop(ByteCode *codePtr, int stackLowerBound, int checkStack); #endif /* TCL_COMPILE_DEBUG */ static ByteCode * CompileExprObj(Tcl_Interp *interp, Tcl_Obj *objPtr); +static void DeleteExecStack(ExecStack *esPtr); static void DupExprCodeInternalRep(Tcl_Obj *srcPtr, Tcl_Obj *copyPtr); MODULE_SCOPE int TclCompareTwoNumbers(Tcl_Obj *valuePtr, @@ -686,10 +699,16 @@ static ExceptionRange * GetExceptRangeForPc(const unsigned char *pc, static const char * GetSrcInfoForPc(const unsigned char *pc, ByteCode *codePtr, int *lengthPtr, const unsigned char **pcBeg); +static Tcl_Obj ** GrowEvaluationStack(ExecEnv *eePtr, int growth, + int move); static void IllegalExprOperandType(Tcl_Interp *interp, const unsigned char *pc, Tcl_Obj *opndPtr); static void InitByteCodeExecution(Tcl_Interp *interp); +static inline int OFFSET(void *ptr); static void ReleaseDictIterator(Tcl_Obj *objPtr); +/* Useful elsewhere, make available in tclInt.h or stubs? */ +static Tcl_Obj ** StackAllocWords(Tcl_Interp *interp, int numWords); +static Tcl_Obj ** StackReallocWords(Tcl_Interp *interp, int numWords); static Tcl_NRPostProc CopyCallback; static Tcl_NRPostProc ExprObjCallback; @@ -826,7 +845,10 @@ TclCreateExecEnv( * [sizeof(Tcl_Obj*)] */ { ExecEnv *eePtr = ckalloc(sizeof(ExecEnv)); + ExecStack *esPtr = ckalloc(sizeof(ExecStack) + + (size_t) (size-1) * sizeof(Tcl_Obj *)); + eePtr->execStackPtr = esPtr; TclNewBooleanObj(eePtr->constants[0], 0); Tcl_IncrRefCount(eePtr->constants[0]); TclNewBooleanObj(eePtr->constants[1], 1); @@ -836,6 +858,12 @@ TclCreateExecEnv( eePtr->corPtr = NULL; eePtr->rewind = 0; + esPtr->prevPtr = NULL; + esPtr->nextPtr = NULL; + esPtr->markerPtr = NULL; + esPtr->endPtr = &esPtr->stackWords[size-1]; + esPtr->tosPtr = &esPtr->stackWords[-1]; + Tcl_MutexLock(&execMutex); if (!execInitialized) { TclInitAuxDataTypeTable(); @@ -864,14 +892,42 @@ TclCreateExecEnv( *---------------------------------------------------------------------- */ +static void +DeleteExecStack( + ExecStack *esPtr) +{ + if (esPtr->markerPtr) { + Tcl_Panic("freeing an execStack which is still in use"); + } + + if (esPtr->prevPtr) { + esPtr->prevPtr->nextPtr = esPtr->nextPtr; + } + if (esPtr->nextPtr) { + esPtr->nextPtr->prevPtr = esPtr->prevPtr; + } + ckfree(esPtr); +} + void TclDeleteExecEnv( ExecEnv *eePtr) /* Execution environment to free. */ { + ExecStack *esPtr = eePtr->execStackPtr, *tmpPtr; + /* * Delete all stacks in this exec env. */ + while (esPtr->nextPtr) { + esPtr = esPtr->nextPtr; + } + while (esPtr) { + tmpPtr = esPtr; + esPtr = tmpPtr->prevPtr; + DeleteExecStack(tmpPtr); + } + TclDecrRefCount(eePtr->constants[0]); TclDecrRefCount(eePtr->constants[1]); if (eePtr->callbackPtr) { @@ -911,6 +967,339 @@ TclFinalizeExecution(void) } /* + * Auxiliary code to insure that GrowEvaluationStack always returns correctly + * aligned memory. + * + * WALLOCALIGN represents the alignment reqs in words, just as TCL_ALLOCALIGN + * represents the reqs in bytes. This assumes that TCL_ALLOCALIGN is a + * multiple of the wordsize 'sizeof(Tcl_Obj *)'. + */ + +#define WALLOCALIGN \ + (TCL_ALLOCALIGN/sizeof(Tcl_Obj *)) + +/* + * OFFSET computes how many words have to be skipped until the next aligned + * word. Note that we are only interested in the low order bits of ptr, so + * that any possible information loss in PTR2INT is of no consequence. + */ + +static inline int +OFFSET( + void *ptr) +{ + int mask = TCL_ALLOCALIGN-1; + int base = PTR2INT(ptr) & mask; + return (TCL_ALLOCALIGN - base)/sizeof(Tcl_Obj *); +} + +/* + * Given a marker, compute where the following aligned memory starts. + */ + +#define MEMSTART(markerPtr) \ + ((markerPtr) + OFFSET(markerPtr)) + +/* + *---------------------------------------------------------------------- + * + * GrowEvaluationStack -- + * + * This procedure grows a Tcl evaluation stack stored in an ExecEnv, + * copying over the words since the last mark if so requested. A mark is + * set at the beginning of the new area when no copying is requested. + * + * Results: + * Returns a pointer to the first usable word in the (possibly) grown + * stack. + * + * Side effects: + * The size of the evaluation stack may be grown, a marker is set + * + *---------------------------------------------------------------------- + */ + +static Tcl_Obj ** +GrowEvaluationStack( + ExecEnv *eePtr, /* Points to the ExecEnv with an evaluation + * stack to enlarge. */ + int growth, /* How much larger than the current used + * size. */ + int move) /* 1 if move words since last marker. */ +{ + ExecStack *esPtr = eePtr->execStackPtr, *oldPtr = NULL; + int newBytes, newElems, currElems; + int needed = growth - (esPtr->endPtr - esPtr->tosPtr); + Tcl_Obj **markerPtr = esPtr->markerPtr, **memStart; + int moveWords = 0; + + if (move) { + if (!markerPtr) { + Tcl_Panic("STACK: Reallocating with no previous alloc"); + } + if (needed <= 0) { + return MEMSTART(markerPtr); + } + } else { + Tcl_Obj **tmpMarkerPtr = esPtr->tosPtr + 1; + int offset = OFFSET(tmpMarkerPtr); + + if (needed + offset < 0) { + /* + * Put a marker pointing to the previous marker in this stack, and + * store it in esPtr as the current marker. Return a pointer to + * the start of aligned memory. + */ + + esPtr->markerPtr = tmpMarkerPtr; + memStart = tmpMarkerPtr + offset; + esPtr->tosPtr = memStart - 1; + *esPtr->markerPtr = (Tcl_Obj *) markerPtr; + return memStart; + } + } + + /* + * Reset move to hold the number of words to be moved to new stack (if + * any) and growth to hold the complete stack requirements: add one for + * the marker, (WALLOCALIGN-1) for the maximal possible offset. + */ + + if (move) { + moveWords = esPtr->tosPtr - MEMSTART(markerPtr) + 1; + } + needed = growth + moveWords + WALLOCALIGN; + + /* + * Check if there is enough room in the next stack (if there is one, it + * should be both empty and the last one!) + */ + + if (esPtr->nextPtr) { + oldPtr = esPtr; + esPtr = oldPtr->nextPtr; + currElems = esPtr->endPtr - &esPtr->stackWords[-1]; + if (esPtr->markerPtr || (esPtr->tosPtr != &esPtr->stackWords[-1])) { + Tcl_Panic("STACK: Stack after current is in use"); + } + if (esPtr->nextPtr) { + Tcl_Panic("STACK: Stack after current is not last"); + } + if (needed <= currElems) { + goto newStackReady; + } + DeleteExecStack(esPtr); + esPtr = oldPtr; + } else { + currElems = esPtr->endPtr - &esPtr->stackWords[-1]; + } + + /* + * We need to allocate a new stack! It needs to store 'growth' words, + * including the elements to be copied over and the new marker. + */ + + newElems = 2*currElems; + while (needed > newElems) { + newElems *= 2; + } + newBytes = sizeof(ExecStack) + (newElems-1) * sizeof(Tcl_Obj *); + + oldPtr = esPtr; + esPtr = ckalloc(newBytes); + + oldPtr->nextPtr = esPtr; + esPtr->prevPtr = oldPtr; + esPtr->nextPtr = NULL; + esPtr->endPtr = &esPtr->stackWords[newElems-1]; + + newStackReady: + eePtr->execStackPtr = esPtr; + + /* + * Store a NULL marker at the beginning of the stack, to indicate that + * this is the first marker in this stack and that rewinding to here + * should actually be a return to the previous stack. + */ + + esPtr->stackWords[0] = NULL; + esPtr->markerPtr = &esPtr->stackWords[0]; + memStart = MEMSTART(esPtr->markerPtr); + esPtr->tosPtr = memStart - 1; + + if (move) { + memcpy(memStart, MEMSTART(markerPtr), moveWords*sizeof(Tcl_Obj *)); + esPtr->tosPtr += moveWords; + oldPtr->markerPtr = (Tcl_Obj **) *markerPtr; + oldPtr->tosPtr = markerPtr-1; + } + + /* + * Free the old stack if it is now unused. + */ + + if (!oldPtr->markerPtr) { + DeleteExecStack(oldPtr); + } + + return memStart; +} + +/* + *-------------------------------------------------------------- + * + * TclStackAlloc, TclStackRealloc, TclStackFree -- + * + * Allocate memory from the execution stack; it has to be returned later + * with a call to TclStackFree. + * + * Results: + * A pointer to the first byte allocated, or panics if the allocation did + * not succeed. + * + * Side effects: + * The execution stack may be grown. + * + *-------------------------------------------------------------- + */ + +static Tcl_Obj ** +StackAllocWords( + Tcl_Interp *interp, + int numWords) +{ + /* + * Note that GrowEvaluationStack sets a marker in the stack. This marker + * is read when rewinding, e.g., by TclStackFree. + */ + + Interp *iPtr = (Interp *) interp; + ExecEnv *eePtr = iPtr->execEnvPtr; + Tcl_Obj **resPtr = GrowEvaluationStack(eePtr, numWords, 0); + + eePtr->execStackPtr->tosPtr += numWords; + return resPtr; +} + +static Tcl_Obj ** +StackReallocWords( + Tcl_Interp *interp, + int numWords) +{ + Interp *iPtr = (Interp *) interp; + ExecEnv *eePtr = iPtr->execEnvPtr; + Tcl_Obj **resPtr = GrowEvaluationStack(eePtr, numWords, 1); + + eePtr->execStackPtr->tosPtr += numWords; + return resPtr; +} + +void +TclStackFree( + Tcl_Interp *interp, + void *freePtr) +{ + Interp *iPtr = (Interp *) interp; + ExecEnv *eePtr; + ExecStack *esPtr; + Tcl_Obj **markerPtr, *marker; + + if (iPtr == NULL || iPtr->execEnvPtr == NULL) { + Tcl_Free((char *) freePtr); + return; + } + + /* + * Rewind the stack to the previous marker position. The current marker, + * as set in the last call to GrowEvaluationStack, contains a pointer to + * the previous marker. + */ + + eePtr = iPtr->execEnvPtr; + esPtr = eePtr->execStackPtr; + markerPtr = esPtr->markerPtr; + marker = *markerPtr; + + if ((freePtr != NULL) && (MEMSTART(markerPtr) != (Tcl_Obj **)freePtr)) { + Tcl_Panic("TclStackFree: incorrect freePtr (%p != %p). Call out of sequence?", + freePtr, MEMSTART(markerPtr)); + } + + esPtr->tosPtr = markerPtr - 1; + esPtr->markerPtr = (Tcl_Obj **) marker; + if (marker) { + return; + } + + /* + * Return to previous active stack. Note that repeated expansions or + * reallocs could have generated several unused intervening stacks: free + * them too. + */ + + while (esPtr->nextPtr) { + esPtr = esPtr->nextPtr; + } + esPtr->tosPtr = &esPtr->stackWords[-1]; + while (esPtr->prevPtr) { + ExecStack *tmpPtr = esPtr->prevPtr; + if (tmpPtr->tosPtr == &tmpPtr->stackWords[-1]) { + DeleteExecStack(tmpPtr); + } else { + break; + } + } + if (esPtr->prevPtr) { + eePtr->execStackPtr = esPtr->prevPtr; + } else { + eePtr->execStackPtr = esPtr; + } +} + +void * +TclStackAlloc( + Tcl_Interp *interp, + int numBytes) +{ + Interp *iPtr = (Interp *) interp; + int numWords = (numBytes + (sizeof(Tcl_Obj *) - 1))/sizeof(Tcl_Obj *); + + if (iPtr == NULL || iPtr->execEnvPtr == NULL) { + return (void *) Tcl_Alloc(numBytes); + } + + return (void *) StackAllocWords(interp, numWords); +} + +void * +TclStackRealloc( + Tcl_Interp *interp, + void *ptr, + int numBytes) +{ + Interp *iPtr = (Interp *) interp; + ExecEnv *eePtr; + ExecStack *esPtr; + Tcl_Obj **markerPtr; + int numWords; + + if (iPtr == NULL || iPtr->execEnvPtr == NULL) { + return (void *) Tcl_Realloc((char *) ptr, numBytes); + } + + eePtr = iPtr->execEnvPtr; + esPtr = eePtr->execStackPtr; + markerPtr = esPtr->markerPtr; + + if (MEMSTART(markerPtr) != (Tcl_Obj **)ptr) { + Tcl_Panic("TclStackRealloc: incorrect ptr. Call out of sequence?"); + } + + numWords = (numBytes + (sizeof(Tcl_Obj *) - 1))/sizeof(Tcl_Obj *); + return (void *) StackReallocWords(interp, numWords); +} + +/* *-------------------------------------------------------------- * * Tcl_ExprObj -- @@ -1308,7 +1697,7 @@ TclCompileObj( int redo = 0; if (invoker) { - CmdFrame *ctxPtr = ckalloc(sizeof(CmdFrame)); + CmdFrame *ctxPtr = TclStackAlloc(interp,sizeof(CmdFrame)); *ctxPtr = *invoker; if (invoker->type == TCL_LOCATION_BC) { @@ -1347,7 +1736,7 @@ TclCompileObj( && (ctxPtr->type == TCL_LOCATION_SOURCE)); } - ckfree(ctxPtr); + TclStackFree(interp, ctxPtr); } if (redo) { @@ -1532,15 +1921,10 @@ TclIncrObj( *---------------------------------------------------------------------- */ #define bcFramePtr (&TD->cmdFrame) -#define catchStack (TD->stack) -#define initTosPtr ((Tcl_Obj **) &TD->stack[codePtr->maxExceptDepth - 1]) - -#define capacity2size(cap) \ - (sizeof(TEBCdata) + sizeof(void *)*(cap + codePtr->maxExceptDepth - 1)) +#define initCatchTop ((ptrdiff_t *) (&TD->stack[-1])) +#define initTosPtr ((Tcl_Obj **) (initCatchTop+codePtr->maxExceptDepth)) +#define esPtr (iPtr->execEnvPtr->execStackPtr) -#define size2capacity(s) \ - (((s - sizeof(TEBCdata))/sizeof(void *)) - codePtr->maxExceptDepth + 1) - int TclNRExecuteByteCode( Tcl_Interp *interp, /* Token for command interpreter. */ @@ -1548,7 +1932,10 @@ TclNRExecuteByteCode( { Interp *iPtr = (Interp *) interp; TEBCdata *TD; - unsigned int size = capacity2size(codePtr->maxStackDepth); + int size = sizeof(TEBCdata) -1 + + + (codePtr->maxStackDepth + codePtr->maxExceptDepth) + *(sizeof(void *)); + int numWords = (size + sizeof(Tcl_Obj *) - 1)/sizeof(Tcl_Obj *); if (iPtr->execEnvPtr->rewind) { return TCL_ERROR; @@ -1568,19 +1955,12 @@ TclNRExecuteByteCode( * execution stack is large enough to execute this ByteCode. */ - TD = ckalloc(size); - size = TclAllocMaximize(TD); - if (size == UINT_MAX) { - TD->capacity = codePtr->maxStackDepth; - } else { - TD->capacity = size2capacity(size); - } - - TD->tosPtr = initTosPtr; + TD = (TEBCdata *) GrowEvaluationStack(iPtr->execEnvPtr, numWords, 0); + esPtr->tosPtr = initTosPtr; TD->codePtr = codePtr; TD->pc = codePtr->codeStart; - TD->catchDepth = -1; + TD->catchTop = initCatchTop; TD->cleanup = 0; TD->auxObjList = NULL; TD->checkInterp = 0; @@ -1668,11 +2048,11 @@ TEBCresume( TEBCdata *TD = data[0]; #define auxObjList (TD->auxObjList) -#define catchDepth (TD->catchDepth) +#define catchTop (TD->catchTop) #define codePtr (TD->codePtr) #define checkInterp (TD->checkInterp) /* Indicates when a check of interp readyness - * is necessary. Set by checkInterp = 1 */ + * is necessary. Set by CACHE_STACK_INFO() */ /* * Globals: variables that store state, must remain valid at all times. @@ -1733,7 +2113,7 @@ TEBCresume( codePtr->flags &= ~TCL_BYTECODE_RECOMPILE; } - checkInterp = 1; + CACHE_STACK_INFO(); if (result == TCL_OK) { #ifndef TCL_COMPILE_DEBUG if (*pc == INST_POP) { @@ -1873,28 +2253,29 @@ TEBCresume( */ if ((instructionCount++ & ASYNC_CHECK_COUNT_MASK) == 0) { + DECACHE_STACK_INFO(); if (TclAsyncReady(iPtr)) { result = Tcl_AsyncInvoke(interp, result); if (result == TCL_ERROR) { - checkInterp = 1; + CACHE_STACK_INFO(); goto gotError; } } if (TclCanceled(iPtr)) { if (Tcl_Canceled(interp, TCL_LEAVE_ERR_MSG) == TCL_ERROR) { - checkInterp = 1; + CACHE_STACK_INFO(); goto gotError; } } if (TclLimitReady(iPtr->limit)) { if (Tcl_LimitCheck(interp) == TCL_ERROR) { - checkInterp = 1; + CACHE_STACK_INFO(); goto gotError; } } - checkInterp = 1; + CACHE_STACK_INFO(); } TCL_DTRACE_INST_NEXT(); @@ -2262,7 +2643,7 @@ TEBCresume( case INST_EXPAND_STKTOP: { int i; - unsigned int reqWords; + ptrdiff_t moved; /* * Make sure that the element at stackTop is a list; if not, just @@ -2276,6 +2657,7 @@ TEBCresume( Tcl_GetObjResult(interp)); goto gotError; } + (void) POP_OBJECT(); /* * Make sure there is enough room in the stack to expand this list @@ -2284,30 +2666,24 @@ TEBCresume( * stack depth, as seen by the compiler. */ - reqWords = - /* how many were needed originally */ - codePtr->maxStackDepth - /* plus how many we already consumed in previous expansions */ - + (CURR_DEPTH - TclGetInt4AtPtr(pc+1)) - /* plus how many are needed for this expansion */ - + objc - 1; + length = objc + (codePtr->maxStackDepth - TclGetInt4AtPtr(pc+1)); + DECACHE_STACK_INFO(); + moved = GrowEvaluationStack(iPtr->execEnvPtr, length, 1) + - (Tcl_Obj **) TD; + if (moved) { + /* + * Change the global data to point to the new stack: move the + * TEBCdataPtr TD, recompute the position of every other + * stack-allocated parameter, update the stack pointers. + */ - (void) POP_OBJECT(); - if (reqWords > TD->capacity) { - ptrdiff_t depth; - unsigned int size = capacity2size(reqWords); - - depth = tosPtr - initTosPtr; - TD = ckrealloc(TD, size); - size = TclAllocMaximize(TD); - if (size == UINT_MAX) { - TD->capacity = reqWords; - } else { - TD->capacity = size2capacity(size); - } - tosPtr = initTosPtr + depth; + esPtr = iPtr->execEnvPtr->execStackPtr; + TD = (TEBCdata *) (((Tcl_Obj **)TD) + moved); + + catchTop += moved; + tosPtr += moved; } - + /* * Expand the list at stacktop onto the stack; free the list. Knowing * that it has a freeIntRepProc we use Tcl_DecrRefCount(). @@ -2326,8 +2702,9 @@ TEBCresume( bcFramePtr->data.tebc.pc = (char *) pc; iPtr->cmdFramePtr = bcFramePtr; + DECACHE_STACK_INFO(); newCodePtr = CompileExprObj(interp, OBJ_AT_TOS); - checkInterp = 1; + CACHE_STACK_INFO(); cleanup = 1; pc++; TEBC_YIELD(); @@ -2413,6 +2790,8 @@ TEBCresume( codePtr, bcFramePtr, pc - codePtr->codeStart); } + DECACHE_STACK_INFO(); + pc += pcAdjustment; TEBC_YIELD(); return TclNREvalObjv(interp, objc, objv, @@ -2637,9 +3016,10 @@ TEBCresume( * TclPtrGetVar to process fully. */ + DECACHE_STACK_INFO(); objResultPtr = TclPtrGetVar(interp, varPtr, arrayPtr, part1Ptr, part2Ptr, TCL_LEAVE_ERR_MSG, opnd); - checkInterp = 1; + CACHE_STACK_INFO(); if (!objResultPtr) { TRACE_APPEND(("ERROR: %.30s\n", O2S(Tcl_GetObjResult(interp)))); goto gotError; @@ -2883,9 +3263,10 @@ TEBCresume( part1Ptr = part2Ptr = NULL; doCallPtrSetVar: + DECACHE_STACK_INFO(); objResultPtr = TclPtrSetVar(interp, varPtr, arrayPtr, part1Ptr, part2Ptr, valuePtr, storeFlags, opnd); - checkInterp = 1; + CACHE_STACK_INFO(); if (!objResultPtr) { TRACE_APPEND(("ERROR: %.30s\n", O2S(Tcl_GetObjResult(interp)))); goto gotError; @@ -3146,9 +3527,10 @@ TEBCresume( } Tcl_DecrRefCount(incrPtr); } else { + DECACHE_STACK_INFO(); objResultPtr = TclPtrIncrObjVar(interp, varPtr, arrayPtr, part1Ptr, part2Ptr, incrPtr, TCL_LEAVE_ERR_MSG, opnd); - checkInterp = 1; + CACHE_STACK_INFO(); Tcl_DecrRefCount(incrPtr); if (objResultPtr == NULL) { TRACE_APPEND(("ERROR: %.30s\n", @@ -3180,9 +3562,10 @@ TEBCresume( } TRACE(("%u => ", opnd)); if (ReadTraced(varPtr)) { + DECACHE_STACK_INFO(); TclObjCallVarTraces(iPtr, NULL, varPtr, NULL, NULL, TCL_TRACE_READS, 0, opnd); - checkInterp = 1; + CACHE_STACK_INFO(); if (TclIsVarUndefined(varPtr)) { TclCleanupVar(varPtr, NULL); varPtr = NULL; @@ -3215,9 +3598,10 @@ TEBCresume( 0, 1, arrayPtr, opnd); if (varPtr) { if (ReadTraced(varPtr) || (arrayPtr && ReadTraced(arrayPtr))) { + DECACHE_STACK_INFO(); TclObjCallVarTraces(iPtr, arrayPtr, varPtr, NULL, part2Ptr, TCL_TRACE_READS, 0, opnd); - checkInterp = 1; + CACHE_STACK_INFO(); } if (TclIsVarUndefined(varPtr)) { TclCleanupVar(varPtr, arrayPtr); @@ -3247,9 +3631,10 @@ TEBCresume( /*createPart1*/0, /*createPart2*/1, &arrayPtr); if (varPtr) { if (ReadTraced(varPtr) || (arrayPtr && ReadTraced(arrayPtr))) { + DECACHE_STACK_INFO(); TclObjCallVarTraces(iPtr, arrayPtr, varPtr, part1Ptr,part2Ptr, TCL_TRACE_READS, 0, -1); - checkInterp = 1; + CACHE_STACK_INFO(); } if (TclIsVarUndefined(varPtr)) { TclCleanupVar(varPtr, arrayPtr); @@ -3293,11 +3678,12 @@ TEBCresume( } slowUnsetScalar: + DECACHE_STACK_INFO(); if (TclPtrUnsetVar(interp, varPtr, NULL, NULL, NULL, flags, opnd) != TCL_OK && flags) { goto errorInUnset; } - checkInterp = 1; + CACHE_STACK_INFO(); NEXT_INST_F(6, 0, 0); case INST_UNSET_ARRAY: @@ -3334,6 +3720,7 @@ TEBCresume( } } slowUnsetArray: + DECACHE_STACK_INFO(); varPtr = TclLookupArrayElement(interp, NULL, part2Ptr, flags, "unset", 0, 0, arrayPtr, opnd); if (!varPtr) { @@ -3344,7 +3731,7 @@ TEBCresume( flags, opnd) != TCL_OK && (flags & TCL_LEAVE_ERR_MSG)) { goto errorInUnset; } - checkInterp = 1; + CACHE_STACK_INFO(); NEXT_INST_F(6, 1, 0); case INST_UNSET_ARRAY_STK: @@ -3364,15 +3751,16 @@ TEBCresume( TRACE(("%s \"%.30s\"\n", (flags?"normal":"noerr"), O2S(part1Ptr))); doUnsetStk: + DECACHE_STACK_INFO(); if (TclObjUnsetVar2(interp, part1Ptr, part2Ptr, flags) != TCL_OK && (flags & TCL_LEAVE_ERR_MSG)) { goto errorInUnset; } - checkInterp = 1; + CACHE_STACK_INFO(); NEXT_INST_V(2, cleanup, 0); errorInUnset: - checkInterp = 1; + CACHE_STACK_INFO(); TRACE_APPEND(("ERROR: %.30s\n", O2S(Tcl_GetObjResult(interp)))); goto gotError; @@ -3393,8 +3781,9 @@ TEBCresume( } varPtr->value.objPtr = NULL; } else { + DECACHE_STACK_INFO(); TclPtrUnsetVar(interp, varPtr, NULL, NULL, NULL, 0, opnd); - checkInterp = 1; + CACHE_STACK_INFO(); } NEXT_INST_F(5, 0, 0); } @@ -3635,16 +4024,18 @@ TEBCresume( if (TclGetBooleanFromObj(NULL, valuePtr, &i1) != TCL_OK) { TRACE(("\"%.20s\" => ILLEGAL TYPE %s \n", O2S(valuePtr), (valuePtr->typePtr? valuePtr->typePtr->name : "null"))); + DECACHE_STACK_INFO(); IllegalExprOperandType(interp, pc, valuePtr); - checkInterp = 1; + CACHE_STACK_INFO(); goto gotError; } if (TclGetBooleanFromObj(NULL, value2Ptr, &i2) != TCL_OK) { TRACE(("\"%.20s\" => ILLEGAL TYPE %s \n", O2S(value2Ptr), (value2Ptr->typePtr? value2Ptr->typePtr->name : "null"))); + DECACHE_STACK_INFO(); IllegalExprOperandType(interp, pc, value2Ptr); - checkInterp = 1; + CACHE_STACK_INFO(); goto gotError; } @@ -4421,8 +4812,9 @@ TEBCresume( TRACE(("%.20s %.20s => ILLEGAL 1st TYPE %s\n", O2S(valuePtr), O2S(value2Ptr), (valuePtr->typePtr? valuePtr->typePtr->name : "null"))); + DECACHE_STACK_INFO(); IllegalExprOperandType(interp, pc, valuePtr); - checkInterp = 1; + CACHE_STACK_INFO(); goto gotError; } @@ -4431,8 +4823,9 @@ TEBCresume( TRACE(("%.20s %.20s => ILLEGAL 2nd TYPE %s\n", O2S(valuePtr), O2S(value2Ptr), (value2Ptr->typePtr? value2Ptr->typePtr->name : "null"))); + DECACHE_STACK_INFO(); IllegalExprOperandType(interp, pc, value2Ptr); - checkInterp = 1; + CACHE_STACK_INFO(); goto gotError; } @@ -4490,10 +4883,11 @@ TEBCresume( Tcl_SetResult(interp, "negative shift argument", TCL_STATIC); #if 0 + DECACHE_STACK_INFO(); Tcl_SetErrorCode(interp, "ARITH", "DOMAIN", "domain error: argument not in valid range", NULL); - checkInterp = 1; + CACHE_STACK_INFO(); #endif goto gotError; } else if (l1 == 0) { @@ -4537,10 +4931,11 @@ TEBCresume( Tcl_SetResult(interp, "negative shift argument", TCL_STATIC); #if 0 + DECACHE_STACK_INFO(); Tcl_SetErrorCode(interp, "ARITH", "DOMAIN", "domain error: argument not in valid range", NULL); - checkInterp = 1; + CACHE_STACK_INFO(); #endif goto gotError; } else if (l1 == 0) { @@ -4560,9 +4955,10 @@ TEBCresume( "integer value too large to represent", TCL_STATIC); #if 0 + DECACHE_STACK_INFO(); Tcl_SetErrorCode(interp, "ARITH", "IOVERFLOW", "integer value too large to represent", NULL); - checkInterp = 1; + CACHE_STACK_INFO(); #endif goto gotError; } else { @@ -4645,8 +5041,9 @@ TEBCresume( TRACE(("%.20s %.20s => ILLEGAL 1st TYPE %s\n", O2S(value2Ptr), O2S(valuePtr), (valuePtr->typePtr? valuePtr->typePtr->name: "null"))); + DECACHE_STACK_INFO(); IllegalExprOperandType(interp, pc, valuePtr); - checkInterp = 1; + CACHE_STACK_INFO(); goto gotError; } @@ -4665,8 +5062,9 @@ TEBCresume( TRACE(("%.20s %.20s => ILLEGAL 2nd TYPE %s\n", O2S(value2Ptr), O2S(valuePtr), (value2Ptr->typePtr? value2Ptr->typePtr->name: "null"))); + DECACHE_STACK_INFO(); IllegalExprOperandType(interp, pc, value2Ptr); - checkInterp = 1; + CACHE_STACK_INFO(); goto gotError; } @@ -4813,8 +5211,9 @@ TEBCresume( if (TclGetBooleanFromObj(NULL, valuePtr, &b) != TCL_OK) { TRACE(("\"%.20s\" => ILLEGAL TYPE %s\n", O2S(valuePtr), (valuePtr->typePtr? valuePtr->typePtr->name : "null"))); + DECACHE_STACK_INFO(); IllegalExprOperandType(interp, pc, valuePtr); - checkInterp = 1; + CACHE_STACK_INFO(); goto gotError; } /* TODO: Consider peephole opt. */ @@ -4832,8 +5231,9 @@ TEBCresume( TRACE(("\"%.20s\" => ILLEGAL TYPE %s \n", O2S(valuePtr), (valuePtr->typePtr? valuePtr->typePtr->name : "null"))); + DECACHE_STACK_INFO(); IllegalExprOperandType(interp, pc, valuePtr); - checkInterp = 1; + CACHE_STACK_INFO(); goto gotError; } if (type1 == TCL_NUMBER_LONG) { @@ -4858,8 +5258,9 @@ TEBCresume( || IsErroringNaNType(type1)) { TRACE(("\"%.20s\" => ILLEGAL TYPE %s \n", O2S(valuePtr), (valuePtr->typePtr? valuePtr->typePtr->name : "null"))); + DECACHE_STACK_INFO(); IllegalExprOperandType(interp, pc, valuePtr); - checkInterp = 1; + CACHE_STACK_INFO(); goto gotError; } switch (type1) { @@ -4903,8 +5304,9 @@ TEBCresume( TRACE(("\"%.20s\" => ILLEGAL TYPE %s \n", O2S(valuePtr), (valuePtr->typePtr? valuePtr->typePtr->name:"null"))); + DECACHE_STACK_INFO(); IllegalExprOperandType(interp, pc, valuePtr); - checkInterp = 1; + CACHE_STACK_INFO(); goto gotError; } @@ -4920,8 +5322,9 @@ TEBCresume( TRACE(("\"%.20s\" => ILLEGAL TYPE %s \n", O2S(valuePtr), (valuePtr->typePtr? valuePtr->typePtr->name:"null"))); + DECACHE_STACK_INFO(); IllegalExprOperandType(interp, pc, valuePtr); - checkInterp = 1; + CACHE_STACK_INFO(); } else { /* * Numeric conversion of NaN -> error. @@ -4929,8 +5332,9 @@ TEBCresume( TRACE(("\"%.20s\" => IEEE FLOATING PT ERROR\n", O2S(objResultPtr))); + DECACHE_STACK_INFO(); TclExprFloatError(interp, *((const double *) ptr1)); - checkInterp = 1; + CACHE_STACK_INFO(); } goto gotError; } @@ -4975,8 +5379,9 @@ TEBCresume( case INST_BREAK: /* + DECACHE_STACK_INFO(); Tcl_ResetResult(interp); - checkInterp = 1; + CACHE_STACK_INFO(); */ result = TCL_BREAK; cleanup = 0; @@ -4984,8 +5389,9 @@ TEBCresume( case INST_CONTINUE: /* + DECACHE_STACK_INFO(); Tcl_ResetResult(interp); - checkInterp = 1; + CACHE_STACK_INFO(); */ result = TCL_CONTINUE; cleanup = 0; @@ -5118,16 +5524,17 @@ TEBCresume( Tcl_IncrRefCount(valuePtr); } } else { + DECACHE_STACK_INFO(); if (TclPtrSetVar(interp, varPtr, NULL, NULL, NULL, valuePtr, TCL_LEAVE_ERR_MSG, varIndex)==NULL){ - checkInterp = 1; + CACHE_STACK_INFO(); TRACE_WITH_OBJ(( "%u => ERROR init. index temp %d: ", opnd,varIndex), Tcl_GetObjResult(interp)); TclDecrRefCount(listPtr); goto gotError; } - checkInterp = 1; + CACHE_STACK_INFO(); } valIndex++; } @@ -5159,18 +5566,19 @@ TEBCresume( * stack. */ - catchStack[++catchDepth] = INT2PTR(CURR_DEPTH); - TRACE(("%u => catchDepth=%d, stackTop=%d\n", - TclGetUInt4AtPtr(pc+1), (int) (catchDepth), + *(++catchTop) = CURR_DEPTH; + TRACE(("%u => catchTop=%d, stackTop=%d\n", + TclGetUInt4AtPtr(pc+1), (int) (catchTop - initCatchTop - 1), (int) CURR_DEPTH)); NEXT_INST_F(5, 0, 0); case INST_END_CATCH: - catchDepth--; + catchTop--; + DECACHE_STACK_INFO(); Tcl_ResetResult(interp); - checkInterp = 1; + CACHE_STACK_INFO(); result = TCL_OK; - TRACE(("=> catchDepth=%d\n", (int) (catchDepth))); + TRACE(("=> catchTop=%d\n", (int) (catchTop - initCatchTop - 1))); NEXT_INST_F(1, 0, 0); case INST_PUSH_RESULT: @@ -5192,8 +5600,9 @@ TEBCresume( NEXT_INST_F(1, 0, 1); case INST_PUSH_RETURN_OPTIONS: + DECACHE_STACK_INFO(); objResultPtr = Tcl_GetReturnOptions(interp, result); - checkInterp = 1; + CACHE_STACK_INFO(); TRACE_WITH_OBJ(("=> "), objResultPtr); NEXT_INST_F(1, 0, 1); @@ -5245,12 +5654,13 @@ TEBCresume( TRACE_APPEND(("%.30s\n", O2S(objResultPtr))); NEXT_INST_V(5, opnd+1, 1); } + DECACHE_STACK_INFO(); Tcl_ResetResult(interp); Tcl_AppendResult(interp, "key \"", TclGetString(OBJ_AT_TOS), "\" not known in dictionary", NULL); Tcl_SetErrorCode(interp, "TCL", "LOOKUP", "DICT", TclGetString(OBJ_AT_TOS), NULL); - checkInterp = 1; + CACHE_STACK_INFO(); TRACE_WITH_OBJ(("%u => ERROR ", opnd), Tcl_GetObjResult(interp)); } else { TRACE_WITH_OBJ(( @@ -5273,8 +5683,9 @@ TEBCresume( if (TclIsVarDirectReadable(varPtr)) { dictPtr = varPtr->value.objPtr; } else { + DECACHE_STACK_INFO(); dictPtr = TclPtrGetVar(interp, varPtr, NULL,NULL,NULL, 0, opnd2); - checkInterp = 1; + CACHE_STACK_INFO(); } if (dictPtr == NULL) { TclNewObj(dictPtr); @@ -5346,9 +5757,10 @@ TEBCresume( objResultPtr = dictPtr; } else { Tcl_IncrRefCount(dictPtr); + DECACHE_STACK_INFO(); objResultPtr = TclPtrSetVar(interp, varPtr, NULL, NULL, NULL, dictPtr, TCL_LEAVE_ERR_MSG, opnd2); - checkInterp = 1; + CACHE_STACK_INFO(); TclDecrRefCount(dictPtr); if (objResultPtr == NULL) { TRACE_APPEND(("ERROR: %.30s\n", @@ -5375,8 +5787,9 @@ TEBCresume( if (TclIsVarDirectReadable(varPtr)) { dictPtr = varPtr->value.objPtr; } else { + DECACHE_STACK_INFO(); dictPtr = TclPtrGetVar(interp, varPtr, NULL, NULL, NULL, 0, opnd); - checkInterp = 1; + CACHE_STACK_INFO(); } if (dictPtr == NULL) { TclNewObj(dictPtr); @@ -5480,9 +5893,10 @@ TEBCresume( objResultPtr = dictPtr; } else { Tcl_IncrRefCount(dictPtr); + DECACHE_STACK_INFO(); objResultPtr = TclPtrSetVar(interp, varPtr, NULL, NULL, NULL, dictPtr, TCL_LEAVE_ERR_MSG, opnd); - checkInterp = 1; + CACHE_STACK_INFO(); TclDecrRefCount(dictPtr); if (objResultPtr == NULL) { TRACE_APPEND(("ERROR: %.30s\n", @@ -5584,9 +5998,10 @@ TEBCresume( if (TclIsVarDirectReadable(varPtr)) { dictPtr = varPtr->value.objPtr; } else { + DECACHE_STACK_INFO(); dictPtr = TclPtrGetVar(interp, varPtr, NULL, NULL, NULL, TCL_LEAVE_ERR_MSG, opnd); - checkInterp = 1; + CACHE_STACK_INFO(); if (dictPtr == NULL) { goto gotError; } @@ -5607,6 +6022,7 @@ TEBCresume( while (TclIsVarLink(varPtr)) { varPtr = varPtr->value.linkPtr; } + DECACHE_STACK_INFO(); if (valuePtr == NULL) { TclObjUnsetVar2(interp, localName(iPtr->varFramePtr, duiPtr->varIndices[i]), @@ -5614,10 +6030,10 @@ TEBCresume( } else if (TclPtrSetVar(interp, varPtr, NULL, NULL, NULL, valuePtr, TCL_LEAVE_ERR_MSG, duiPtr->varIndices[i]) == NULL) { - checkInterp = 1; + CACHE_STACK_INFO(); goto gotError; } - checkInterp = 1; + CACHE_STACK_INFO(); } NEXT_INST_F(9, 0, 0); @@ -5633,8 +6049,9 @@ TEBCresume( if (TclIsVarDirectReadable(varPtr)) { dictPtr = varPtr->value.objPtr; } else { + DECACHE_STACK_INFO(); dictPtr = TclPtrGetVar(interp, varPtr, NULL, NULL, NULL, 0, opnd); - checkInterp = 1; + CACHE_STACK_INFO(); } if (dictPtr == NULL) { NEXT_INST_F(9, 1, 0); @@ -5660,9 +6077,10 @@ TEBCresume( if (TclIsVarDirectReadable(var2Ptr)) { valuePtr = var2Ptr->value.objPtr; } else { + DECACHE_STACK_INFO(); valuePtr = TclPtrGetVar(interp, var2Ptr, NULL, NULL, NULL, 0, duiPtr->varIndices[i]); - checkInterp = 1; + CACHE_STACK_INFO(); } if (valuePtr == NULL) { Tcl_DictObjRemove(interp, dictPtr, keyPtrPtr[i]); @@ -5678,9 +6096,10 @@ TEBCresume( TclDecrRefCount(varPtr->value.objPtr); varPtr->value.objPtr = dictPtr; } else { + DECACHE_STACK_INFO(); objResultPtr = TclPtrSetVar(interp, varPtr, NULL, NULL, NULL, dictPtr, TCL_LEAVE_ERR_MSG, opnd); - checkInterp = 1; + CACHE_STACK_INFO(); if (objResultPtr == NULL) { if (allocdict) { TclDecrRefCount(dictPtr); @@ -5796,9 +6215,10 @@ TEBCresume( */ divideByZero: + DECACHE_STACK_INFO(); Tcl_SetResult(interp, "divide by zero", TCL_STATIC); Tcl_SetErrorCode(interp, "ARITH", "DIVZERO", "divide by zero", NULL); - checkInterp = 1; + CACHE_STACK_INFO(); goto gotError; /* @@ -5807,11 +6227,12 @@ TEBCresume( */ exponOfZero: + DECACHE_STACK_INFO(); Tcl_SetResult(interp, "exponentiation of zero by negative power", TCL_STATIC); Tcl_SetErrorCode(interp, "ARITH", "DOMAIN", "exponentiation of zero by negative power", NULL); - checkInterp = 1; + CACHE_STACK_INFO(); /* * Almost all error paths feed through here rather than assigning to @@ -5837,8 +6258,9 @@ TEBCresume( const unsigned char *pcBeg; bytes = GetSrcInfoForPc(pc, codePtr, &length, &pcBeg); + DECACHE_STACK_INFO(); TclLogCommandInfo(interp, codePtr->source, bytes, bytes ? length : 0, pcBeg, tosPtr); - checkInterp = 1; + CACHE_STACK_INFO(); } iPtr->flags &= ~ERR_ALREADY_LOGGED; @@ -5848,8 +6270,8 @@ TEBCresume( */ while (auxObjList) { - if ((catchDepth >=0) && (PTR2INT(catchStack[catchDepth]) > - PTR2INT(auxObjList->internalRep.twoPtrValue.ptr1))) { + if ((catchTop != initCatchTop) && (*catchTop > + (ptrdiff_t) auxObjList->internalRep.twoPtrValue.ptr1)) { break; } POP_TAUX_OBJ(); @@ -5889,7 +6311,7 @@ TEBCresume( #endif goto abnormalReturn; } - if (catchDepth == -1) { + if (catchTop == initCatchTop) { #ifdef TCL_COMPILE_DEBUG if (traceInstructions) { fprintf(stdout, " ... no enclosing catch, returning %s\n", @@ -5924,16 +6346,16 @@ TEBCresume( */ processCatch: - while (CURR_DEPTH > PTR2INT(catchStack[catchDepth])) { + while (CURR_DEPTH > *catchTop) { valuePtr = POP_OBJECT(); TclDecrRefCount(valuePtr); } #ifdef TCL_COMPILE_DEBUG if (traceInstructions) { - fprintf(stdout, " ... found catch at %d, catchDepth=%d, " + fprintf(stdout, " ... found catch at %d, catchTop=%d, " "unwound to %ld, new pc %u\n", - rangePtr->codeOffset, (int) catchDepth, - PTR2INT(catchStack[catchDepth]), (unsigned) rangePtr->catchOffset); + rangePtr->codeOffset, (int) (catchTop - initCatchTop - 1), + (long) *catchTop, (unsigned) rangePtr->catchOffset); } #endif pc = (codePtr->codeStart + rangePtr->catchOffset); @@ -5982,7 +6404,7 @@ TEBCresume( if (--codePtr->refCount <= 0) { TclCleanupByteCode(codePtr); } - ckfree(TD); /* free my stack */ + TclStackFree(interp, TD); /* free my stack */ return result; } @@ -5990,9 +6412,10 @@ TEBCresume( #undef codePtr #undef iPtr #undef bcFramePtr +#undef initCatchTop #undef initTosPtr #undef auxObjList -#undef catchDepth +#undef catchTop #undef TCONST /* diff --git a/generic/tclFCmd.c b/generic/tclFCmd.c index 52ad278..6d3c013 100644 --- a/generic/tclFCmd.c +++ b/generic/tclFCmd.c @@ -999,7 +999,7 @@ TclFileAttrsCmd( goto end; } attributeStringsAllocated = (const char **) - ckalloc((1+numObjStrings) * sizeof(char *)); + TclStackAlloc(interp, (1+numObjStrings) * sizeof(char *)); for (index = 0; index < numObjStrings; index++) { Tcl_ListObjIndex(interp, objStrings, index, &objPtr); attributeStringsAllocated[index] = TclGetString(objPtr); @@ -1110,7 +1110,7 @@ TclFileAttrsCmd( * Free up the array we allocated. */ - ckfree((void *) attributeStringsAllocated); + TclStackFree(interp, (void *) attributeStringsAllocated); /* * We don't need this object that was passed to us any more. diff --git a/generic/tclFileName.c b/generic/tclFileName.c index eff1010..d53c271 100644 --- a/generic/tclFileName.c +++ b/generic/tclFileName.c @@ -1422,7 +1422,7 @@ Tcl_GlobObjCmd( if (length <= 0) { goto skipTypes; } - globTypes = ckalloc(sizeof(Tcl_GlobTypeData)); + globTypes = TclStackAlloc(interp, sizeof(Tcl_GlobTypeData)); globTypes->type = 0; globTypes->perm = 0; globTypes->macType = NULL; @@ -1638,7 +1638,7 @@ Tcl_GlobObjCmd( if (globTypes->macCreator != NULL) { Tcl_DecrRefCount(globTypes->macCreator); } - ckfree(globTypes); + TclStackFree(interp, globTypes); } return result; } diff --git a/generic/tclIOCmd.c b/generic/tclIOCmd.c index ffa172a..1f0e4a9 100644 --- a/generic/tclIOCmd.c +++ b/generic/tclIOCmd.c @@ -929,7 +929,7 @@ Tcl_ExecObjCmd( */ argc = objc - skip; - argv = ckalloc((unsigned)(argc + 1) * sizeof(char *)); + argv = TclStackAlloc(interp, (unsigned)(argc + 1) * sizeof(char *)); /* * Copy the string conversions of each (post option) object into the @@ -947,7 +947,7 @@ Tcl_ExecObjCmd( * Free the argv array. */ - ckfree((void *) argv); + TclStackFree(interp, (void *) argv); if (chan == NULL) { return TCL_ERROR; diff --git a/generic/tclIndexObj.c b/generic/tclIndexObj.c index f9511af..d98842e 100644 --- a/generic/tclIndexObj.c +++ b/generic/tclIndexObj.c @@ -952,12 +952,12 @@ Tcl_WrongNumArgs( len = Tcl_ScanCountedElement(elementStr, elemLen, &flags); if (MAY_QUOTE_WORD && len != elemLen) { - char *quotedElementStr = ckalloc((unsigned)len); + char *quotedElementStr = TclStackAlloc(interp, (unsigned)len); len = Tcl_ConvertCountedElement(elementStr, elemLen, quotedElementStr, flags); Tcl_AppendToObj(objPtr, quotedElementStr, len); - ckfree(quotedElementStr); + TclStackFree(interp, quotedElementStr); } else { Tcl_AppendToObj(objPtr, elementStr, elemLen); } @@ -1006,12 +1006,12 @@ Tcl_WrongNumArgs( len = Tcl_ScanCountedElement(elementStr, elemLen, &flags); if (MAY_QUOTE_WORD && len != elemLen) { - char *quotedElementStr = ckalloc((unsigned) len); + char *quotedElementStr = TclStackAlloc(interp,(unsigned) len); len = Tcl_ConvertCountedElement(elementStr, elemLen, quotedElementStr, flags); Tcl_AppendToObj(objPtr, quotedElementStr, len); - ckfree(quotedElementStr); + TclStackFree(interp, quotedElementStr); } else { Tcl_AppendToObj(objPtr, elementStr, elemLen); } diff --git a/generic/tclInt.decls b/generic/tclInt.decls index 4da999e..df60dae 100644 --- a/generic/tclInt.decls +++ b/generic/tclInt.decls @@ -35,9 +35,9 @@ scspec EXTERN #declare 2 { # int TclAccessInsertProc(TclAccessProc_ *proc) #} -#declare 3 { -# void TclAllocateFreeObjects(void) -#} +declare 3 { + void TclAllocateFreeObjects(void) +} # Replaced by TclpChdir in 8.1: # declare 4 { # int TclChdir(Tcl_Interp *interp, char *dirName) @@ -290,9 +290,9 @@ declare 64 { #declare 68 { # int TclpAccess(const char *path, int mode) #} -#declare 69 { -# char *TclpAlloc(unsigned int size) -#} +declare 69 { + char *TclpAlloc(unsigned int size) +} #declare 70 { # int TclpCopyFile(const char *source, const char *dest) #} @@ -306,9 +306,9 @@ declare 64 { #declare 73 { # int TclpDeleteFile(const char *path) #} -#declare 74 { -# void TclpFree(char *ptr) -#} +declare 74 { + void TclpFree(char *ptr) +} declare 75 { unsigned long TclpGetClicks(void) } @@ -332,9 +332,9 @@ declare 78 { # Tcl_Channel TclpOpenFileChannel(Tcl_Interp *interp, char *fileName, # char *modeString, int permissions) #} -#declare 81 { -# char *TclpRealloc(char *ptr, unsigned int size) -#} +declare 81 { + char *TclpRealloc(char *ptr, unsigned int size) +} #declare 82 { # int TclpRemoveDirectory(const char *path, int recursive, # Tcl_DString *errorPtr) @@ -867,12 +867,12 @@ declare 213 { declare 214 { void TclSetObjNameOfExecutable(Tcl_Obj *name, Tcl_Encoding encoding) } -#declare 215 { -# void *TclStackAlloc(Tcl_Interp *interp, unsigned int numBytes) -#} -#declare 216 { -# void TclStackFree(Tcl_Interp *interp, void *freePtr) -#} +declare 215 { + void *TclStackAlloc(Tcl_Interp *interp, int numBytes) +} +declare 216 { + void TclStackFree(Tcl_Interp *interp, void *freePtr) +} declare 217 { int TclPushStackFrame(Tcl_Interp *interp, Tcl_CallFrame **framePtrPtr, Tcl_Namespace *namespacePtr, int isProcCallFrame) @@ -891,9 +891,9 @@ declare 225 { Tcl_Obj *TclTraceDictPath(Tcl_Interp *interp, Tcl_Obj *rootPtr, int keyc, Tcl_Obj *const keyv[], int flags) } -#declare 226 { -# int TclObjBeingDeleted(Tcl_Obj *objPtr) -#} +declare 226 { + int TclObjBeingDeleted(Tcl_Obj *objPtr) +} declare 227 { void TclSetNsPath(Namespace *nsPtr, int pathLength, Tcl_Namespace *pathAry[]) diff --git a/generic/tclInt.h b/generic/tclInt.h index a22348f..53e4323 100644 --- a/generic/tclInt.h +++ b/generic/tclInt.h @@ -10,7 +10,7 @@ * Copyright (c) 2001, 2002 by Kevin B. Kenny. All rights reserved. * Copyright (c) 2007 Daniel A. Steffen <das@users.sourceforge.net> * Copyright (c) 2006-2008 by Joe Mistachkin. All rights reserved. - * Copyright (c) 2008-2011 by Miguel Sofer. All rights reserved. + * Copyright (c) 2008 by Miguel Sofer. All rights reserved. * * See the file "license.terms" for information on usage and redistribution of * this file, and for a DISCLAIMER OF ALL WARRANTIES. @@ -1390,6 +1390,13 @@ MODULE_SCOPE void TclThreadDataKeySet(Tcl_ThreadDataKey *keyPtr, (ThreadSpecificData *)Tcl_GetThreadData((keyPtr), sizeof(ThreadSpecificData)) /* + *---------------------------------------------------------------- + * Data structures related to bytecode compilation and execution. These are + * used primarily in tclCompile.c, tclExecute.c, and tclBasic.c. + *---------------------------------------------------------------- + */ + +/* * Forward declaration to prevent errors when the forward references to * Tcl_Parse and CompileEnv are encountered in the procedure type CompileProc * declared below. @@ -1431,6 +1438,19 @@ typedef int (CompileHookProc)(Tcl_Interp *interp, struct CompileEnv *compEnvPtr, ClientData clientData); /* + * The data structure for a (linked list of) execution stacks. + */ + +typedef struct ExecStack { + struct ExecStack *prevPtr; + struct ExecStack *nextPtr; + Tcl_Obj **markerPtr; + Tcl_Obj **endPtr; + Tcl_Obj **tosPtr; + Tcl_Obj *stackWords[1]; +} ExecStack; + +/* * The data structure defining the execution environment for ByteCode's. * There is one ExecEnv structure per Tcl interpreter. It holds the evaluation * stack that holds command operands and results. The stack grows towards @@ -1467,6 +1487,8 @@ typedef struct CoroutineData { } CoroutineData; typedef struct ExecEnv { + ExecStack *execStackPtr; /* Points to the first item in the evaluation + * stack on the heap. */ Tcl_Obj *constants[2]; /* Pointers to constant "0" and "1" objs. */ struct Tcl_Interp *interp; struct NRE_callback *callbackPtr; @@ -1747,6 +1769,24 @@ enum PkgPreferOptions { /* *---------------------------------------------------------------- + * This structure shadows the first few fields of the memory cache for the + * allocator defined in tclThreadAlloc.c; it has to be kept in sync with the + * definition there. + * Some macros require knowledge of some fields in the struct in order to + * avoid hitting the TSD unnecessarily. In order to facilitate this, a pointer + * to the relevant fields is kept in the objCache field in struct Interp. + *---------------------------------------------------------------- + */ + +typedef struct AllocCache { + struct Cache *nextPtr; /* Linked list of cache entries. */ + Tcl_ThreadId owner; /* Which thread's cache is this? */ + Tcl_Obj *firstObjPtr; /* List of free objects for thread. */ + int numObjects; /* Number of objects for thread. */ +} AllocCache; + +/* + *---------------------------------------------------------------- * This structure defines an interpreter, which is a collection of commands * plus other state information related to interpreting commands, such as * variable storage. Primary responsibility for this data structure is in @@ -2078,6 +2118,7 @@ typedef struct Interp { * They are used by the macros defined below. */ + AllocCache *allocCache; void *pendingObjDataPtr; /* Pointer to the Cache and PendingObjData * structs for this interp's thread; see * tclObj.c and tclThreadAlloc.c */ @@ -2310,6 +2351,17 @@ struct LimitHandler { #define UCHAR(c) ((unsigned char) (c)) /* + * This macro is used to properly align the memory allocated by Tcl, giving + * the same alignment as the native malloc. + */ + +#if defined(__APPLE__) +#define TCL_ALLOCALIGN 16 +#else +#define TCL_ALLOCALIGN (2*sizeof(void *)) +#endif + +/* * This macro is used to determine the offset needed to safely allocate any * data structure in memory. Given a starting offset or size, it "rounds up" * or "aligns" the offset to the next 8-byte boundary so that any data @@ -2668,6 +2720,13 @@ MODULE_SCOPE const Tcl_HashKeyType tclOneWordHashKeyType; MODULE_SCOPE const Tcl_HashKeyType tclStringHashKeyType; MODULE_SCOPE const Tcl_HashKeyType tclObjHashKeyType; +/* + * The head of the list of free Tcl objects, and the total number of Tcl + * objects ever allocated and freed. + */ + +MODULE_SCOPE Tcl_Obj * tclFreeObjList; + #ifdef TCL_COMPILE_STATS MODULE_SCOPE long tclObjsAlloced; MODULE_SCOPE long tclObjsFreed; @@ -2843,6 +2902,7 @@ MODULE_SCOPE void TclCreateLateExitHandler(Tcl_ExitProc *proc, ClientData clientData); MODULE_SCOPE void TclDeleteLateExitHandler(Tcl_ExitProc *proc, ClientData clientData); +MODULE_SCOPE void TclFinalizeAllocSubsystem(void); MODULE_SCOPE void TclFinalizeAsync(void); MODULE_SCOPE void TclFinalizeDoubleConversion(void); MODULE_SCOPE void TclFinalizeEncodingSubsystem(void); @@ -2859,6 +2919,7 @@ MODULE_SCOPE void TclFinalizeNotifier(void); MODULE_SCOPE void TclFinalizeObjects(void); MODULE_SCOPE void TclFinalizePreserve(void); MODULE_SCOPE void TclFinalizeSynchronization(void); +MODULE_SCOPE void TclFinalizeThreadAlloc(void); MODULE_SCOPE void TclFinalizeThreadData(void); MODULE_SCOPE void TclFinalizeThreadObjects(void); MODULE_SCOPE double TclFloor(const mp_int *a); @@ -3036,6 +3097,8 @@ MODULE_SCOPE void TclSetDuplicateObj(Tcl_Obj *dupPtr, Tcl_Obj *objPtr); MODULE_SCOPE void TclSetProcessGlobalValue(ProcessGlobalValue *pgvPtr, Tcl_Obj *newValue, Tcl_Encoding encoding); MODULE_SCOPE void TclSignalExitThread(Tcl_ThreadId id, int result); +MODULE_SCOPE void * TclStackRealloc(Tcl_Interp *interp, void *ptr, + int numBytes); MODULE_SCOPE int TclStringMatch(const char *str, int strLen, const char *pattern, int ptnLen, int flags); MODULE_SCOPE int TclStringMatchObj(Tcl_Obj *stringObj, @@ -3745,10 +3808,10 @@ typedef const char *TclDTraceStr; #endif /* TCL_COMPILE_STATS */ # define TclAllocObjStorage(objPtr) \ - (objPtr) = TclSmallAlloc() + TclAllocObjStorageEx(NULL, (objPtr)) # define TclFreeObjStorage(objPtr) \ - TclSmallFree(objPtr) + TclFreeObjStorageEx(NULL, (objPtr)) #ifndef TCL_MEM_DEBUG # define TclNewObj(objPtr) \ @@ -3783,125 +3846,128 @@ typedef const char *TclDTraceStr; } \ } -#else /* TCL_MEM_DEBUG */ -MODULE_SCOPE void TclDbInitNewObj(Tcl_Obj *objPtr, const char *file, - int line); +#if defined(PURIFY) -# define TclDbNewObj(objPtr, file, line) \ - do { \ - TclIncrObjsAllocated(); \ - (objPtr) = (Tcl_Obj *) \ - Tcl_DbCkalloc(sizeof(Tcl_Obj), (file), (line)); \ - TclDbInitNewObj((objPtr), (file), (line)); \ - TCL_DTRACE_OBJ_CREATE(objPtr); \ - } while (0) - -# define TclNewObj(objPtr) \ - TclDbNewObj(objPtr, __FILE__, __LINE__); - -# define TclDecrRefCount(objPtr) \ - Tcl_DbDecrRefCount(objPtr, __FILE__, __LINE__) +/* + * The PURIFY mode is like the regular mode, but instead of doing block + * Tcl_Obj allocation and keeping a freed list for efficiency, it always + * allocates and frees a single Tcl_Obj so that tools like Purify can better + * track memory leaks. + */ -# define TclNewListObjDirect(objc, objv) \ - TclDbNewListObjDirect(objc, objv, __FILE__, __LINE__) +# define TclAllocObjStorageEx(interp, objPtr) \ + (objPtr) = (Tcl_Obj *) Tcl_Alloc(sizeof(Tcl_Obj)) -#endif /* TCL_MEM_DEBUG */ +# define TclFreeObjStorageEx(interp, objPtr) \ + ckfree((char *) (objPtr)) -/* - * Macros that drive the allocator behaviour - */ +#undef USE_THREAD_ALLOC +#elif defined(TCL_THREADS) && defined(USE_THREAD_ALLOC) -#if defined(TCL_THREADS) /* * The TCL_THREADS mode is like the regular mode but allocates Tcl_Obj's from * per-thread caches. */ -MODULE_SCOPE void TclpFreeAllocCache(void *); + +MODULE_SCOPE Tcl_Obj * TclThreadAllocObj(void); +MODULE_SCOPE void TclThreadFreeObj(Tcl_Obj *); +MODULE_SCOPE Tcl_Mutex *TclpNewAllocMutex(void); +MODULE_SCOPE void TclFreeAllocCache(void *); MODULE_SCOPE void * TclpGetAllocCache(void); MODULE_SCOPE void TclpSetAllocCache(void *); -MODULE_SCOPE void TclFreeAllocCache(void *); MODULE_SCOPE void TclpFreeAllocMutex(Tcl_Mutex *mutex); -MODULE_SCOPE Tcl_Mutex *TclpNewAllocMutex(void); -#endif +MODULE_SCOPE void TclpFreeAllocCache(void *); /* - * List of valid allocators. Have to respect the following convention: - * - allocators that shunt TclpAlloc to malloc are below aNONE - * - allocators that use zippy are above aNONE + * These macros need to be kept in sync with the code of TclThreadAllocObj() + * and TclThreadFreeObj(). + * + * Note that the optimiser should resolve the case (interp==NULL) at compile + * time. */ -#define aNATIVE 0 -#define aPURIFY 1 -#define aNONE 2 -#define aZIPPY 3 -#define aMULTI 4 +# define ALLOC_NOBJHIGH 1200 -#if defined(TCL_ALLOCATOR) && ((TCL_ALLOCATOR < 0) || (TCL_ALLOCATOR > aMULTI)) -#undef TCL_ALLOCATOR -#endif +# define TclAllocObjStorageEx(interp, objPtr) \ + do { \ + AllocCache *cachePtr; \ + if (((interp) == NULL) || \ + ((cachePtr = ((Interp *)(interp))->allocCache), \ + (cachePtr->numObjects == 0))) { \ + (objPtr) = TclThreadAllocObj(); \ + } else { \ + (objPtr) = cachePtr->firstObjPtr; \ + cachePtr->firstObjPtr = (objPtr)->internalRep.otherValuePtr; \ + --cachePtr->numObjects; \ + } \ + } while (0) -#ifdef PURIFY -# undef TCL_ALLOCATOR -# define TCL_ALLOCATOR aPURIFY -#endif +# define TclFreeObjStorageEx(interp, objPtr) \ + do { \ + AllocCache *cachePtr; \ + if (((interp) == NULL) || \ + ((cachePtr = ((Interp *)(interp))->allocCache), \ + (cachePtr->numObjects >= ALLOC_NOBJHIGH))) { \ + TclThreadFreeObj(objPtr); \ + } else { \ + (objPtr)->internalRep.otherValuePtr = cachePtr->firstObjPtr; \ + cachePtr->firstObjPtr = objPtr; \ + ++cachePtr->numObjects; \ + } \ + } while (0) -#if !defined(TCL_ALLOCATOR) -# if defined(USE_THREAD_ALLOC) || defined(USE_TCLALLOC) -# define TCL_ALLOCATOR aZIPPY -# else -# define TCL_ALLOCATOR aNATIVE -# endif -#endif +#else /* not PURIFY or USE_THREAD_ALLOC */ -#if TCL_ALLOCATOR < aNONE /* native or purify */ -# define TclpAlloc(size) malloc(size) -# define TclpRealloc(ptr, size) realloc((ptr),(size)) -# define TclpFree(size) free(size) -# define TclAllocMaximize(ptr) UINT_MAX -#else - MODULE_SCOPE char * TclpAlloc(unsigned int size); - MODULE_SCOPE char * TclpRealloc(char * ptr, unsigned int size); - MODULE_SCOPE void TclpFree(char * ptr); - MODULE_SCOPE unsigned int TclAllocMaximize(void *ptr); +#ifdef TCL_THREADS +/* declared in tclObj.c */ +MODULE_SCOPE Tcl_Mutex tclObjMutex; #endif -#if TCL_ALLOCATOR == aPURIFY -# define TclSmallAlloc() ckalloc(sizeof(Tcl_Obj)) -# define TclSmallFree(ptr) ckfree(ptr) -# define TclInitAlloc() -# define TclFinalizeAlloc() -# define TclFreeAllocCache(ptr) -#else - MODULE_SCOPE void * TclSmallAlloc(); - MODULE_SCOPE void TclSmallFree(void *ptr); - MODULE_SCOPE void TclInitAlloc(void); - MODULE_SCOPE void TclFinalizeAlloc(void); +# define TclAllocObjStorageEx(interp, objPtr) \ + do { \ + Tcl_MutexLock(&tclObjMutex); \ + if (tclFreeObjList == NULL) { \ + TclAllocateFreeObjects(); \ + } \ + (objPtr) = tclFreeObjList; \ + tclFreeObjList = (Tcl_Obj *) \ + tclFreeObjList->internalRep.otherValuePtr; \ + Tcl_MutexUnlock(&tclObjMutex); \ + } while (0) + +# define TclFreeObjStorageEx(interp, objPtr) \ + do { \ + Tcl_MutexLock(&tclObjMutex); \ + (objPtr)->internalRep.otherValuePtr = (void *) tclFreeObjList; \ + tclFreeObjList = (objPtr); \ + Tcl_MutexUnlock(&tclObjMutex); \ + } while (0) #endif -#define TclCkSmallAlloc(nbytes, memPtr) \ - do { \ - TCL_CT_ASSERT((nbytes)<=sizeof(Tcl_Obj)); \ - memPtr = TclSmallAlloc(); \ +#else /* TCL_MEM_DEBUG */ +MODULE_SCOPE void TclDbInitNewObj(Tcl_Obj *objPtr, const char *file, + int line); + +# define TclDbNewObj(objPtr, file, line) \ + do { \ + TclIncrObjsAllocated(); \ + (objPtr) = (Tcl_Obj *) \ + Tcl_DbCkalloc(sizeof(Tcl_Obj), (file), (line)); \ + TclDbInitNewObj((objPtr), (file), (line)); \ + TCL_DTRACE_OBJ_CREATE(objPtr); \ } while (0) -/* - * Support for Clang Static Analyzer <http://clang-analyzer.llvm.org> - */ +# define TclNewObj(objPtr) \ + TclDbNewObj(objPtr, __FILE__, __LINE__); -#if (TCL_ALLOCATOR == aPURIFY) && defined(__clang__) -#if __has_feature(attribute_analyzer_noreturn) && \ - !defined(Tcl_Panic) && defined(Tcl_Panic_TCL_DECLARED) -void Tcl_Panic(const char *, ...) __attribute__((analyzer_noreturn)); -#endif -#if !defined(CLANG_ASSERT) -#include <assert.h> -#define CLANG_ASSERT(x) assert(x) -#endif -#elif !defined(CLANG_ASSERT) - #define CLANG_ASSERT(x) -#endif /* PURIFY && __clang__ */ +# define TclDecrRefCount(objPtr) \ + Tcl_DbDecrRefCount(objPtr, __FILE__, __LINE__) +# define TclNewListObjDirect(objc, objv) \ + TclDbNewListObjDirect(objc, objv, __FILE__, __LINE__) +#undef USE_THREAD_ALLOC +#endif /* TCL_MEM_DEBUG */ /* *---------------------------------------------------------------- @@ -4405,11 +4471,73 @@ MODULE_SCOPE Tcl_PackageInitProc Procbodytest_SafeInit; {enum { ct_assert_value = 1/(!!(e)) };} /* + *---------------------------------------------------------------- + * Allocator for small structs (<=sizeof(Tcl_Obj)) using the Tcl_Obj pool. + * Only checked at compile time. + * + * ONLY USE FOR CONSTANT nBytes. + * + * DO NOT LET THEM CROSS THREAD BOUNDARIES + *---------------------------------------------------------------- + */ + +#define TclSmallAlloc(nbytes, memPtr) \ + TclSmallAllocEx(NULL, (nbytes), (memPtr)) + +#define TclSmallFree(memPtr) \ + TclSmallFreeEx(NULL, (memPtr)) + +#ifndef TCL_MEM_DEBUG +#define TclSmallAllocEx(interp, nbytes, memPtr) \ + do { \ + Tcl_Obj *objPtr; \ + TCL_CT_ASSERT((nbytes)<=sizeof(Tcl_Obj)); \ + TclIncrObjsAllocated(); \ + TclAllocObjStorageEx((interp), (objPtr)); \ + memPtr = (ClientData) (objPtr); \ + } while (0) + +#define TclSmallFreeEx(interp, memPtr) \ + do { \ + TclFreeObjStorageEx((interp), (Tcl_Obj *) (memPtr)); \ + TclIncrObjsFreed(); \ + } while (0) + +#else /* TCL_MEM_DEBUG */ +#define TclSmallAllocEx(interp, nbytes, memPtr) \ + do { \ + Tcl_Obj *objPtr; \ + TCL_CT_ASSERT((nbytes)<=sizeof(Tcl_Obj)); \ + TclNewObj(objPtr); \ + memPtr = (ClientData) objPtr; \ + } while (0) + +#define TclSmallFreeEx(interp, memPtr) \ + do { \ + Tcl_Obj *objPtr = (Tcl_Obj *) memPtr; \ + objPtr->bytes = NULL; \ + objPtr->typePtr = NULL; \ + objPtr->refCount = 1; \ + TclDecrRefCount(objPtr); \ + } while (0) +#endif /* TCL_MEM_DEBUG */ + +/* * Support for Clang Static Analyzer <http://clang-analyzer.llvm.org> */ +#if defined(PURIFY) && defined(__clang__) +#if __has_feature(attribute_analyzer_noreturn) && \ + !defined(Tcl_Panic) && defined(Tcl_Panic_TCL_DECLARED) +void Tcl_Panic(const char *, ...) __attribute__((analyzer_noreturn)); +#endif +#if !defined(CLANG_ASSERT) +#include <assert.h> +#define CLANG_ASSERT(x) assert(x) +#endif +#elif !defined(CLANG_ASSERT) #define CLANG_ASSERT(x) - +#endif /* PURIFY && __clang__ */ /* *---------------------------------------------------------------- @@ -4482,8 +4610,8 @@ typedef struct NRE_callback { #if NRE_USE_SMALL_ALLOC #define TCLNR_ALLOC(interp, ptr) \ - TclCkSmallAlloc(sizeof(NRE_callback), (ptr)) -#define TCLNR_FREE(interp, ptr) TclSmallFree(ptr) + TclSmallAllocEx(interp, sizeof(NRE_callback), (ptr)) +#define TCLNR_FREE(interp, ptr) TclSmallFreeEx((interp), (ptr)) #else #define TCLNR_ALLOC(interp, ptr) \ (ptr = ((ClientData) ckalloc(sizeof(NRE_callback)))) diff --git a/generic/tclIntDecls.h b/generic/tclIntDecls.h index 0e9d54f..b294e4f 100644 --- a/generic/tclIntDecls.h +++ b/generic/tclIntDecls.h @@ -58,7 +58,8 @@ /* Slot 0 is reserved */ /* Slot 1 is reserved */ /* Slot 2 is reserved */ -/* Slot 3 is reserved */ +/* 3 */ +EXTERN void TclAllocateFreeObjects(void); /* Slot 4 is reserved */ /* 5 */ EXTERN int TclCleanupChildren(Tcl_Interp *interp, int numPids, @@ -199,12 +200,14 @@ EXTERN int TclObjInvoke(Tcl_Interp *interp, int objc, /* Slot 66 is reserved */ /* Slot 67 is reserved */ /* Slot 68 is reserved */ -/* Slot 69 is reserved */ +/* 69 */ +EXTERN char * TclpAlloc(unsigned int size); /* Slot 70 is reserved */ /* Slot 71 is reserved */ /* Slot 72 is reserved */ /* Slot 73 is reserved */ -/* Slot 74 is reserved */ +/* 74 */ +EXTERN void TclpFree(char *ptr); /* 75 */ EXTERN unsigned long TclpGetClicks(void); /* 76 */ @@ -215,7 +218,8 @@ EXTERN void TclpGetTime(Tcl_Time *time); EXTERN int TclpGetTimeZone(unsigned long time); /* Slot 79 is reserved */ /* Slot 80 is reserved */ -/* Slot 81 is reserved */ +/* 81 */ +EXTERN char * TclpRealloc(char *ptr, unsigned int size); /* Slot 82 is reserved */ /* Slot 83 is reserved */ /* Slot 84 is reserved */ @@ -502,8 +506,10 @@ EXTERN Tcl_Obj * TclGetObjNameOfExecutable(void); /* 214 */ EXTERN void TclSetObjNameOfExecutable(Tcl_Obj *name, Tcl_Encoding encoding); -/* Slot 215 is reserved */ -/* Slot 216 is reserved */ +/* 215 */ +EXTERN void * TclStackAlloc(Tcl_Interp *interp, int numBytes); +/* 216 */ +EXTERN void TclStackFree(Tcl_Interp *interp, void *freePtr); /* 217 */ EXTERN int TclPushStackFrame(Tcl_Interp *interp, Tcl_CallFrame **framePtrPtr, @@ -522,7 +528,8 @@ EXTERN TclPlatformType * TclGetPlatform(void); EXTERN Tcl_Obj * TclTraceDictPath(Tcl_Interp *interp, Tcl_Obj *rootPtr, int keyc, Tcl_Obj *const keyv[], int flags); -/* Slot 226 is reserved */ +/* 226 */ +EXTERN int TclObjBeingDeleted(Tcl_Obj *objPtr); /* 227 */ EXTERN void TclSetNsPath(Namespace *nsPtr, int pathLength, Tcl_Namespace *pathAry[]); @@ -602,7 +609,7 @@ typedef struct TclIntStubs { void (*reserved0)(void); void (*reserved1)(void); void (*reserved2)(void); - void (*reserved3)(void); + void (*tclAllocateFreeObjects) (void); /* 3 */ void (*reserved4)(void); int (*tclCleanupChildren) (Tcl_Interp *interp, int numPids, Tcl_Pid *pidPtr, Tcl_Channel errorChan); /* 5 */ void (*tclCleanupCommand) (Command *cmdPtr); /* 6 */ @@ -668,19 +675,19 @@ typedef struct TclIntStubs { void (*reserved66)(void); void (*reserved67)(void); void (*reserved68)(void); - void (*reserved69)(void); + char * (*tclpAlloc) (unsigned int size); /* 69 */ void (*reserved70)(void); void (*reserved71)(void); void (*reserved72)(void); void (*reserved73)(void); - void (*reserved74)(void); + void (*tclpFree) (char *ptr); /* 74 */ unsigned long (*tclpGetClicks) (void); /* 75 */ unsigned long (*tclpGetSeconds) (void); /* 76 */ void (*tclpGetTime) (Tcl_Time *time); /* 77 */ int (*tclpGetTimeZone) (unsigned long time); /* 78 */ void (*reserved79)(void); void (*reserved80)(void); - void (*reserved81)(void); + char * (*tclpRealloc) (char *ptr, unsigned int size); /* 81 */ void (*reserved82)(void); void (*reserved83)(void); void (*reserved84)(void); @@ -814,8 +821,8 @@ typedef struct TclIntStubs { void (*tclpFindExecutable) (const char *argv0); /* 212 */ Tcl_Obj * (*tclGetObjNameOfExecutable) (void); /* 213 */ void (*tclSetObjNameOfExecutable) (Tcl_Obj *name, Tcl_Encoding encoding); /* 214 */ - void (*reserved215)(void); - void (*reserved216)(void); + void * (*tclStackAlloc) (Tcl_Interp *interp, int numBytes); /* 215 */ + void (*tclStackFree) (Tcl_Interp *interp, void *freePtr); /* 216 */ int (*tclPushStackFrame) (Tcl_Interp *interp, Tcl_CallFrame **framePtrPtr, Tcl_Namespace *namespacePtr, int isProcCallFrame); /* 217 */ void (*tclPopStackFrame) (Tcl_Interp *interp); /* 218 */ void (*reserved219)(void); @@ -825,7 +832,7 @@ typedef struct TclIntStubs { void (*reserved223)(void); TclPlatformType * (*tclGetPlatform) (void); /* 224 */ Tcl_Obj * (*tclTraceDictPath) (Tcl_Interp *interp, Tcl_Obj *rootPtr, int keyc, Tcl_Obj *const keyv[], int flags); /* 225 */ - void (*reserved226)(void); + int (*tclObjBeingDeleted) (Tcl_Obj *objPtr); /* 226 */ void (*tclSetNsPath) (Namespace *nsPtr, int pathLength, Tcl_Namespace *pathAry[]); /* 227 */ void (*reserved228)(void); int (*tclPtrMakeUpvar) (Tcl_Interp *interp, Var *otherP1Ptr, const char *myName, int myFlags, int index); /* 229 */ @@ -869,7 +876,8 @@ extern const TclIntStubs *tclIntStubsPtr; /* Slot 0 is reserved */ /* Slot 1 is reserved */ /* Slot 2 is reserved */ -/* Slot 3 is reserved */ +#define TclAllocateFreeObjects \ + (tclIntStubsPtr->tclAllocateFreeObjects) /* 3 */ /* Slot 4 is reserved */ #define TclCleanupChildren \ (tclIntStubsPtr->tclCleanupChildren) /* 5 */ @@ -973,12 +981,14 @@ extern const TclIntStubs *tclIntStubsPtr; /* Slot 66 is reserved */ /* Slot 67 is reserved */ /* Slot 68 is reserved */ -/* Slot 69 is reserved */ +#define TclpAlloc \ + (tclIntStubsPtr->tclpAlloc) /* 69 */ /* Slot 70 is reserved */ /* Slot 71 is reserved */ /* Slot 72 is reserved */ /* Slot 73 is reserved */ -/* Slot 74 is reserved */ +#define TclpFree \ + (tclIntStubsPtr->tclpFree) /* 74 */ #define TclpGetClicks \ (tclIntStubsPtr->tclpGetClicks) /* 75 */ #define TclpGetSeconds \ @@ -989,7 +999,8 @@ extern const TclIntStubs *tclIntStubsPtr; (tclIntStubsPtr->tclpGetTimeZone) /* 78 */ /* Slot 79 is reserved */ /* Slot 80 is reserved */ -/* Slot 81 is reserved */ +#define TclpRealloc \ + (tclIntStubsPtr->tclpRealloc) /* 81 */ /* Slot 82 is reserved */ /* Slot 83 is reserved */ /* Slot 84 is reserved */ @@ -1205,8 +1216,10 @@ extern const TclIntStubs *tclIntStubsPtr; (tclIntStubsPtr->tclGetObjNameOfExecutable) /* 213 */ #define TclSetObjNameOfExecutable \ (tclIntStubsPtr->tclSetObjNameOfExecutable) /* 214 */ -/* Slot 215 is reserved */ -/* Slot 216 is reserved */ +#define TclStackAlloc \ + (tclIntStubsPtr->tclStackAlloc) /* 215 */ +#define TclStackFree \ + (tclIntStubsPtr->tclStackFree) /* 216 */ #define TclPushStackFrame \ (tclIntStubsPtr->tclPushStackFrame) /* 217 */ #define TclPopStackFrame \ @@ -1220,7 +1233,8 @@ extern const TclIntStubs *tclIntStubsPtr; (tclIntStubsPtr->tclGetPlatform) /* 224 */ #define TclTraceDictPath \ (tclIntStubsPtr->tclTraceDictPath) /* 225 */ -/* Slot 226 is reserved */ +#define TclObjBeingDeleted \ + (tclIntStubsPtr->tclObjBeingDeleted) /* 226 */ #define TclSetNsPath \ (tclIntStubsPtr->tclSetNsPath) /* 227 */ /* Slot 228 is reserved */ diff --git a/generic/tclInterp.c b/generic/tclInterp.c index 46a5f42..67761ed 100644 --- a/generic/tclInterp.c +++ b/generic/tclInterp.c @@ -1169,7 +1169,7 @@ Tcl_CreateAlias( int i; int result; - objv = ckalloc((unsigned) sizeof(Tcl_Obj *) * argc); + objv = TclStackAlloc(slaveInterp, (unsigned) sizeof(Tcl_Obj *) * argc); for (i = 0; i < argc; i++) { objv[i] = Tcl_NewStringObj(argv[i], -1); Tcl_IncrRefCount(objv[i]); @@ -1187,7 +1187,7 @@ Tcl_CreateAlias( for (i = 0; i < argc; i++) { Tcl_DecrRefCount(objv[i]); } - ckfree(objv); + TclStackFree(slaveInterp, objv); Tcl_DecrRefCount(targetObjPtr); Tcl_DecrRefCount(slaveObjPtr); @@ -1863,7 +1863,7 @@ AliasObjCmd( if (cmdc <= ALIAS_CMDV_PREALLOC) { cmdv = cmdArr; } else { - cmdv = ckalloc(cmdc * sizeof(Tcl_Obj *)); + cmdv = TclStackAlloc(interp, cmdc * sizeof(Tcl_Obj *)); } prefv = &aliasPtr->objPtr; @@ -1930,7 +1930,7 @@ AliasObjCmd( Tcl_DecrRefCount(cmdv[i]); } if (cmdv != cmdArr) { - ckfree(cmdv); + TclStackFree(interp, cmdv); } return result; #undef ALIAS_CMDV_PREALLOC diff --git a/generic/tclListObj.c b/generic/tclListObj.c index 4c1e219..46710d6 100644 --- a/generic/tclListObj.c +++ b/generic/tclListObj.c @@ -67,20 +67,13 @@ const Tcl_ObjType tclListType = { *---------------------------------------------------------------------- */ -#define Elems2Size(n) \ - (sizeof(List) - sizeof(Tcl_Obj *) + n*sizeof(Tcl_Obj *)) - -#define Size2Elems(s) \ - (s - (sizeof(List) - sizeof(Tcl_Obj *)))/sizeof(Tcl_Obj *) - static List * NewListIntRep( int objc, Tcl_Obj *const objv[]) { List *listRepPtr; - unsigned int allocSize; - + if (objc <= 0) { return NULL; } @@ -96,17 +89,14 @@ NewListIntRep( return NULL; } - listRepPtr = attemptckalloc(Elems2Size(objc)); + listRepPtr = attemptckalloc(sizeof(List) + ((objc-1) * sizeof(Tcl_Obj*))); if (listRepPtr == NULL) { return NULL; } - allocSize = TclAllocMaximize(listRepPtr); - + listRepPtr->canonicalFlag = 0; listRepPtr->refCount = 0; - listRepPtr->maxElemCount = (allocSize == UINT_MAX) - ? objc - : Size2Elems(allocSize); + listRepPtr->maxElemCount = objc; if (objv) { Tcl_Obj **elemPtrs; @@ -586,7 +576,7 @@ Tcl_ListObjAppendElement( if (numRequired > listRepPtr->maxElemCount){ newMax = 2 * numRequired; - newSize = Elems2Size(newMax); + newSize = sizeof(List) + ((newMax-1) * sizeof(Tcl_Obj *)); } else { newMax = listRepPtr->maxElemCount; newSize = 0; @@ -611,10 +601,7 @@ Tcl_ListObjAppendElement( oldListRepPtr->refCount--; } else if (newSize) { listRepPtr = ckrealloc(listRepPtr, newSize); - newSize = TclAllocMaximize(listRepPtr); - listRepPtr->maxElemCount = (newSize == UINT_MAX) - ? newMax - : Size2Elems(newSize); + listRepPtr->maxElemCount = newMax; } listPtr->internalRep.twoPtrValue.ptr1 = listRepPtr; diff --git a/generic/tclNamesp.c b/generic/tclNamesp.c index 08a9443..ad233b9 100644 --- a/generic/tclNamesp.c +++ b/generic/tclNamesp.c @@ -465,7 +465,7 @@ TclPushStackFrame( * treated as references to namespace * variables. */ { - *framePtrPtr = ckalloc(sizeof(CallFrame)); + *framePtrPtr = TclStackAlloc(interp, sizeof(CallFrame)); return Tcl_PushCallFrame(interp, *framePtrPtr, namespacePtr, isProcCallFrame); } @@ -477,7 +477,7 @@ TclPopStackFrame( CallFrame *freePtr = ((Interp *) interp)->framePtr; Tcl_PopCallFrame(interp); - ckfree(freePtr); + TclStackFree(interp, freePtr); } /* @@ -2632,7 +2632,8 @@ TclResetShadowedCmdRefs( int found, i; int trailFront = -1; int trailSize = 5; /* Formerly NUM_TRAIL_ELEMS. */ - Namespace **trailPtr = ckalloc(trailSize * sizeof(Namespace *)); + Namespace **trailPtr = TclStackAlloc(interp, + trailSize * sizeof(Namespace *)); /* * Start at the namespace containing the new command, and work up through @@ -2721,12 +2722,13 @@ TclResetShadowedCmdRefs( if (trailFront == trailSize) { int newSize = 2 * trailSize; - trailPtr = ckrealloc(trailPtr, newSize * sizeof(Namespace *)); + trailPtr = TclStackRealloc(interp, trailPtr, + newSize * sizeof(Namespace *)); trailSize = newSize; } trailPtr[trailFront] = nsPtr; } - ckfree(trailPtr); + TclStackFree(interp, trailPtr); } /* @@ -3968,7 +3970,8 @@ NamespacePathCmd( goto badNamespace; } if (nsObjc != 0) { - namespaceList = ckalloc(sizeof(Tcl_Namespace *) * nsObjc); + namespaceList = TclStackAlloc(interp, + sizeof(Tcl_Namespace *) * nsObjc); for (i=0 ; i<nsObjc ; i++) { if (TclGetNamespaceFromObj(interp, nsObjv[i], @@ -3987,7 +3990,7 @@ NamespacePathCmd( result = TCL_OK; badNamespace: if (namespaceList != NULL) { - ckfree(namespaceList); + TclStackFree(interp, namespaceList); } return result; } diff --git a/generic/tclOOCall.c b/generic/tclOOCall.c index 8814819..1e8d1a3 100644 --- a/generic/tclOOCall.c +++ b/generic/tclOOCall.c @@ -104,7 +104,7 @@ TclOODeleteContext( register Object *oPtr = contextPtr->oPtr; TclOODeleteChain(contextPtr->callPtr); - ckfree(contextPtr); + TclStackFree(oPtr->fPtr->interp, contextPtr); DelRef(oPtr); } @@ -1087,7 +1087,7 @@ TclOOGetCallContext( } returnContext: - contextPtr = ckalloc(sizeof(CallContext)); + contextPtr = TclStackAlloc(oPtr->fPtr->interp, sizeof(CallContext)); contextPtr->oPtr = oPtr; AddRef(oPtr); contextPtr->callPtr = callPtr; diff --git a/generic/tclOODefineCmds.c b/generic/tclOODefineCmds.c index cc3a0ad..8d8eb85 100644 --- a/generic/tclOODefineCmds.c +++ b/generic/tclOODefineCmds.c @@ -455,7 +455,7 @@ TclOOUnknownDefinition( * Got one match, and only one match! */ - Tcl_Obj **newObjv = ckalloc(sizeof(Tcl_Obj*)*(objc-1)); + Tcl_Obj **newObjv = TclStackAlloc(interp, sizeof(Tcl_Obj*)*(objc-1)); int result; newObjv[0] = Tcl_NewStringObj(matchedStr, -1); @@ -465,7 +465,7 @@ TclOOUnknownDefinition( } result = Tcl_EvalObjv(interp, objc-1, newObjv, 0); Tcl_DecrRefCount(newObjv[0]); - ckfree(newObjv); + TclStackFree(interp, newObjv); return result; } @@ -1546,7 +1546,7 @@ TclOODefineMixinObjCmd( Tcl_AppendResult(interp, "attempt to misuse API", NULL); return TCL_ERROR; } - mixins = ckalloc(sizeof(Class *) * (objc-1)); + mixins = TclStackAlloc(interp, sizeof(Class *) * (objc-1)); for (i=1 ; i<objc ; i++) { Class *clsPtr = GetClassInOuterContext(interp, objv[i], @@ -1568,11 +1568,11 @@ TclOODefineMixinObjCmd( TclOOClassSetMixins(interp, oPtr->classPtr, objc-1, mixins); } - ckfree(mixins); + TclStackFree(interp, mixins); return TCL_OK; freeAndError: - ckfree(mixins); + TclStackFree(interp, mixins); return TCL_ERROR; } diff --git a/generic/tclOOMethod.c b/generic/tclOOMethod.c index 0996eab..112d663 100644 --- a/generic/tclOOMethod.c +++ b/generic/tclOOMethod.c @@ -686,7 +686,7 @@ InvokeProcedureMethod( * Allocate the special frame data. */ - fdPtr = ckalloc(sizeof(PMFrameData)); + fdPtr = TclStackAlloc(interp, sizeof(PMFrameData)); /* * Create a call frame for this method. @@ -695,7 +695,7 @@ InvokeProcedureMethod( result = PushMethodCallFrame(interp, (CallContext *) context, pmPtr, objc, objv, fdPtr); if (result != TCL_OK) { - ckfree(fdPtr); + TclStackFree(interp, fdPtr); return result; } pmPtr->refCount++; @@ -719,11 +719,11 @@ InvokeProcedureMethod( pmPtr->procPtr->cmdPtr = fdPtr->oldCmdPtr; Tcl_PopCallFrame(interp); - ckfree(fdPtr->framePtr); + TclStackFree(interp, fdPtr->framePtr); if (--pmPtr->refCount < 1) { DeleteProcedureMethodRecord(pmPtr); } - ckfree(fdPtr); + TclStackFree(interp, fdPtr); return result; } } @@ -774,7 +774,7 @@ FinalizePMCall( if (--pmPtr->refCount < 1) { DeleteProcedureMethodRecord(pmPtr); } - ckfree(fdPtr); + TclStackFree(interp, fdPtr); return result; } @@ -1447,7 +1447,7 @@ FinalizeForwardCall( { Tcl_Obj **argObjs = data[0]; - ckfree(argObjs); + TclStackFree(interp, argObjs); return result; } @@ -1576,7 +1576,7 @@ InitEnsembleRewrite( Tcl_Obj **argObjs; unsigned len = rewriteLength + objc - toRewrite; - argObjs = ckalloc(sizeof(Tcl_Obj *) * len); + argObjs = TclStackAlloc(interp, sizeof(Tcl_Obj *) * len); memcpy(argObjs, rewriteObjs, rewriteLength * sizeof(Tcl_Obj *)); memcpy(argObjs + rewriteLength, objv + toRewrite, sizeof(Tcl_Obj *) * (objc - toRewrite)); diff --git a/generic/tclObj.c b/generic/tclObj.c index 4298f62..3bc6f12 100644 --- a/generic/tclObj.c +++ b/generic/tclObj.c @@ -26,8 +26,20 @@ static Tcl_HashTable typeTable; static int typeTableInitialized = 0; /* 0 means not yet initialized. */ TCL_DECLARE_MUTEX(tableMutex) -#if defined(TCL_THREADS) && defined(TCL_COMPILE_STATS) -static Tcl_Mutex tclObjMutex; +/* + * Head of the list of free Tcl_Obj structs we maintain. + */ + +Tcl_Obj *tclFreeObjList = NULL; + +/* + * The object allocator is single threaded. This mutex is referenced by the + * TclNewObj macro, however, so must be visible. + */ + +#ifdef TCL_THREADS +MODULE_SCOPE Tcl_Mutex tclObjMutex; +Tcl_Mutex tclObjMutex; #endif /* @@ -483,6 +495,15 @@ TclFinalizeObjects(void) typeTableInitialized = 0; } Tcl_MutexUnlock(&tableMutex); + + /* + * All we do here is reset the head pointer of the linked list of free + * Tcl_Obj's to NULL; the memory finalization will take care of releasing + * memory for us. + */ + Tcl_MutexLock(&tclObjMutex); + tclFreeObjList = NULL; + Tcl_MutexUnlock(&tclObjMutex); } /* @@ -1217,6 +1238,59 @@ Tcl_DbNewObj( /* *---------------------------------------------------------------------- * + * TclAllocateFreeObjects -- + * + * Function to allocate a number of free Tcl_Objs. This is done using a + * single ckalloc to reduce the overhead for Tcl_Obj allocation. + * + * Assumes mutex is held. + * + * Results: + * None. + * + * Side effects: + * tclFreeObjList, the head of the list of free Tcl_Objs, is set to the + * first of a number of free Tcl_Obj's linked together by their + * internalRep.otherValuePtrs. + * + *---------------------------------------------------------------------- + */ + +#define OBJS_TO_ALLOC_EACH_TIME 100 + +void +TclAllocateFreeObjects(void) +{ + size_t bytesToAlloc = (OBJS_TO_ALLOC_EACH_TIME * sizeof(Tcl_Obj)); + char *basePtr; + register Tcl_Obj *prevPtr, *objPtr; + register int i; + + /* + * This has been noted by Purify to be a potential leak. The problem is + * that Tcl, when not TCL_MEM_DEBUG compiled, keeps around all allocated + * Tcl_Obj's, pointed to by tclFreeObjList, when freed instead of actually + * freeing the memory. TclFinalizeObjects() does not ckfree() this memory, + * but leaves it to Tcl's memory subsystem finalization to release it. + * Purify apparently can't figure that out, and fires a false alarm. + */ + + basePtr = ckalloc(bytesToAlloc); + + prevPtr = NULL; + objPtr = (Tcl_Obj *) basePtr; + for (i = 0; i < OBJS_TO_ALLOC_EACH_TIME; i++) { + objPtr->internalRep.otherValuePtr = prevPtr; + prevPtr = objPtr; + objPtr++; + } + tclFreeObjList = prevPtr; +} +#undef OBJS_TO_ALLOC_EACH_TIME + +/* + *---------------------------------------------------------------------- + * * TclFreeObj -- * * This function frees the memory associated with the argument object. @@ -1262,6 +1336,7 @@ TclFreeObj( */ TclInvalidateStringRep(objPtr); + objPtr->length = -1; if (ObjDeletePending(context)) { PushObjToDelete(context, objPtr); @@ -1329,6 +1404,7 @@ TclFreeObj( */ TclInvalidateStringRep(objPtr); + objPtr->length = -1; if (!objPtr->typePtr || !objPtr->typePtr->freeIntRepProc) { /* @@ -1411,6 +1487,31 @@ TclFreeObj( /* *---------------------------------------------------------------------- * + * TclObjBeingDeleted -- + * + * This function returns 1 when the Tcl_Obj is being deleted. It is + * provided for the rare cases where the reason for the loss of an + * internal rep might be relevant. [FR 1512138] + * + * Results: + * 1 if being deleted, 0 otherwise. + * + * Side effects: + * None. + * + *---------------------------------------------------------------------- + */ + +int +TclObjBeingDeleted( + Tcl_Obj *objPtr) +{ + return (objPtr->length == -1); +} + +/* + *---------------------------------------------------------------------- + * * Tcl_DuplicateObj -- * * Create and return a new object that is a duplicate of the argument diff --git a/generic/tclParse.c b/generic/tclParse.c index afd4c0b..9bfe608 100644 --- a/generic/tclParse.c +++ b/generic/tclParse.c @@ -1129,14 +1129,14 @@ ParseTokens( src++; numBytes--; - nestedPtr = ckalloc(sizeof(Tcl_Parse)); + nestedPtr = TclStackAlloc(parsePtr->interp, sizeof(Tcl_Parse)); while (1) { if (Tcl_ParseCommand(parsePtr->interp, src, numBytes, 1, nestedPtr) != TCL_OK) { parsePtr->errorType = nestedPtr->errorType; parsePtr->term = nestedPtr->term; parsePtr->incomplete = nestedPtr->incomplete; - ckfree(nestedPtr); + TclStackFree(parsePtr->interp, nestedPtr); return TCL_ERROR; } src = nestedPtr->commandStart + nestedPtr->commandSize; @@ -1162,11 +1162,11 @@ ParseTokens( parsePtr->errorType = TCL_PARSE_MISSING_BRACKET; parsePtr->term = tokenPtr->start; parsePtr->incomplete = 1; - ckfree(nestedPtr); + TclStackFree(parsePtr->interp, nestedPtr); return TCL_ERROR; } } - ckfree(nestedPtr); + TclStackFree(parsePtr->interp, nestedPtr); tokenPtr->type = TCL_TOKEN_COMMAND; tokenPtr->size = src - tokenPtr->start; parsePtr->numTokens++; @@ -1526,10 +1526,10 @@ Tcl_ParseVar( { register Tcl_Obj *objPtr; int code; - Tcl_Parse *parsePtr = ckalloc(sizeof(Tcl_Parse)); + Tcl_Parse *parsePtr = TclStackAlloc(interp, sizeof(Tcl_Parse)); if (Tcl_ParseVarName(interp, start, -1, parsePtr, 0) != TCL_OK) { - ckfree(parsePtr); + TclStackFree(interp, parsePtr); return NULL; } @@ -1541,13 +1541,13 @@ Tcl_ParseVar( * There isn't a variable name after all: the $ is just a $. */ - ckfree(parsePtr); + TclStackFree(interp, parsePtr); return "$"; } code = TclSubstTokens(interp, parsePtr->tokenPtr, parsePtr->numTokens, NULL, 1, NULL, NULL); - ckfree(parsePtr); + TclStackFree(interp, parsePtr); if (code != TCL_OK) { return NULL; } @@ -2008,7 +2008,7 @@ TclSubstParse( Tcl_Token *tokenPtr; const char *lastTerm = parsePtr->term; Tcl_Parse *nestedPtr = - ckalloc(sizeof(Tcl_Parse)); + TclStackAlloc(interp, sizeof(Tcl_Parse)); while (TCL_OK == Tcl_ParseCommand(NULL, p, length, 0, nestedPtr)) { @@ -2026,7 +2026,7 @@ TclSubstParse( } lastTerm = nestedPtr->term; } - ckfree(nestedPtr); + TclStackFree(interp, nestedPtr); if (lastTerm == parsePtr->term) { /* diff --git a/generic/tclProc.c b/generic/tclProc.c index 63dd61d..6cd5bb2 100644 --- a/generic/tclProc.c +++ b/generic/tclProc.c @@ -222,7 +222,7 @@ Tcl_ProcObjCmd( */ if (iPtr->cmdFramePtr) { - CmdFrame *contextPtr = ckalloc(sizeof(CmdFrame)); + CmdFrame *contextPtr = TclStackAlloc(interp, sizeof(CmdFrame)); *contextPtr = *iPtr->cmdFramePtr; if (contextPtr->type == TCL_LOCATION_BC) { @@ -300,7 +300,7 @@ Tcl_ProcObjCmd( Tcl_DecrRefCount(contextPtr->data.eval.path); contextPtr->data.eval.path = NULL; } - ckfree(contextPtr); + TclStackFree(interp, contextPtr); } /* @@ -1096,7 +1096,8 @@ ProcWrongNumArgs( */ numArgs = framePtr->procPtr->numArgs; - desiredObjs = ckalloc((int) sizeof(Tcl_Obj *) * (numArgs+1)); + desiredObjs = TclStackAlloc(interp, + (int) sizeof(Tcl_Obj *) * (numArgs+1)); if (framePtr->isProcCallFrame & FRAME_IS_LAMBDA) { desiredObjs[0] = Tcl_NewStringObj("lambdaExpr", -1); @@ -1134,7 +1135,7 @@ ProcWrongNumArgs( for (i=0 ; i<=numArgs ; i++) { Tcl_DecrRefCount(desiredObjs[i]); } - ckfree(desiredObjs); + TclStackFree(interp, desiredObjs); return TCL_ERROR; } @@ -1448,7 +1449,7 @@ InitArgsAndLocals( * parameters. */ - varPtr = ckalloc((int)(localCt * sizeof(Var))); + varPtr = TclStackAlloc(interp, (int)(localCt * sizeof(Var))); framePtr->compiledLocals = varPtr; framePtr->numCompiledLocals = localCt; @@ -1739,9 +1740,9 @@ TclNRInterpProcCore( if (result != TCL_OK) { freePtr = iPtr->framePtr; Tcl_PopCallFrame(interp); /* Pop but do not free. */ - ckfree(freePtr->compiledLocals); + TclStackFree(interp, freePtr->compiledLocals); /* Free compiledLocals. */ - ckfree(freePtr); /* Free CallFrame. */ + TclStackFree(interp, freePtr); /* Free CallFrame. */ return TCL_ERROR; } @@ -1911,9 +1912,9 @@ InterpProcNR2( freePtr = iPtr->framePtr; Tcl_PopCallFrame(interp); /* Pop but do not free. */ - ckfree(freePtr->compiledLocals); + TclStackFree(interp, freePtr->compiledLocals); /* Free compiledLocals. */ - ckfree(freePtr); /* Free CallFrame. */ + TclStackFree(interp, freePtr); /* Free CallFrame. */ return result; } @@ -2515,7 +2516,7 @@ SetLambdaFromAny( */ if (iPtr->cmdFramePtr) { - CmdFrame *contextPtr = ckalloc(sizeof(CmdFrame)); + CmdFrame *contextPtr = TclStackAlloc(interp, sizeof(CmdFrame)); *contextPtr = *iPtr->cmdFramePtr; if (contextPtr->type == TCL_LOCATION_BC) { @@ -2579,7 +2580,7 @@ SetLambdaFromAny( Tcl_DecrRefCount(contextPtr->data.eval.path); } - ckfree(contextPtr); + TclStackFree(interp, contextPtr); } /* @@ -2716,7 +2717,7 @@ TclNRApplyObjCmd( return TCL_ERROR; } - extraPtr = ckalloc(sizeof(ApplyExtraData)); + extraPtr = TclStackAlloc(interp, sizeof(ApplyExtraData)); memset(&extraPtr->cmd, 0, sizeof(Command)); procPtr->cmdPtr = &extraPtr->cmd; extraPtr->cmd.nsPtr = (Namespace *) nsPtr; @@ -2767,7 +2768,7 @@ ApplyNR2( ((Interp *) interp)->ensembleRewrite.sourceObjs = NULL; } - ckfree(extraPtr); + TclStackFree(interp, extraPtr); return result; } diff --git a/generic/tclScan.c b/generic/tclScan.c index 45f970d..c862be4 100644 --- a/generic/tclScan.c +++ b/generic/tclScan.c @@ -259,7 +259,7 @@ ValidateFormat( char *end; Tcl_UniChar ch; int objIndex, xpgSize, nspace = numVars; - int *nassign = ckalloc(nspace * sizeof(int)); + int *nassign = TclStackAlloc(interp, nspace * sizeof(int)); char buf[TCL_UTF_MAX+1]; /* @@ -465,7 +465,8 @@ ValidateFormat( } else { nspace += 16; /* formerly STATIC_LIST_SIZE */ } - nassign = ckrealloc(nassign, nspace * sizeof(int)); + nassign = TclStackRealloc(interp, nassign, + nspace * sizeof(int)); for (i = value; i < nspace; i++) { nassign[i] = 0; } @@ -508,7 +509,7 @@ ValidateFormat( } } - ckfree(nassign); + TclStackFree(interp, nassign); return TCL_OK; badIndex: @@ -522,7 +523,7 @@ ValidateFormat( } error: - ckfree(nassign); + TclStackFree(interp, nassign); return TCL_ERROR; } diff --git a/generic/tclStubInit.c b/generic/tclStubInit.c index dcf6005..eb9a9be 100644 --- a/generic/tclStubInit.c +++ b/generic/tclStubInit.c @@ -57,7 +57,7 @@ static const TclIntStubs tclIntStubs = { 0, /* 0 */ 0, /* 1 */ 0, /* 2 */ - 0, /* 3 */ + TclAllocateFreeObjects, /* 3 */ 0, /* 4 */ TclCleanupChildren, /* 5 */ TclCleanupCommand, /* 6 */ @@ -123,19 +123,19 @@ static const TclIntStubs tclIntStubs = { 0, /* 66 */ 0, /* 67 */ 0, /* 68 */ - 0, /* 69 */ + TclpAlloc, /* 69 */ 0, /* 70 */ 0, /* 71 */ 0, /* 72 */ 0, /* 73 */ - 0, /* 74 */ + TclpFree, /* 74 */ TclpGetClicks, /* 75 */ TclpGetSeconds, /* 76 */ TclpGetTime, /* 77 */ TclpGetTimeZone, /* 78 */ 0, /* 79 */ 0, /* 80 */ - 0, /* 81 */ + TclpRealloc, /* 81 */ 0, /* 82 */ 0, /* 83 */ 0, /* 84 */ @@ -269,8 +269,8 @@ static const TclIntStubs tclIntStubs = { TclpFindExecutable, /* 212 */ TclGetObjNameOfExecutable, /* 213 */ TclSetObjNameOfExecutable, /* 214 */ - 0, /* 215 */ - 0, /* 216 */ + TclStackAlloc, /* 215 */ + TclStackFree, /* 216 */ TclPushStackFrame, /* 217 */ TclPopStackFrame, /* 218 */ 0, /* 219 */ @@ -280,7 +280,7 @@ static const TclIntStubs tclIntStubs = { 0, /* 223 */ TclGetPlatform, /* 224 */ TclTraceDictPath, /* 225 */ - 0, /* 226 */ + TclObjBeingDeleted, /* 226 */ TclSetNsPath, /* 227 */ 0, /* 228 */ TclPtrMakeUpvar, /* 229 */ diff --git a/generic/tclTest.c b/generic/tclTest.c index 2878c8d..b757185 100644 --- a/generic/tclTest.c +++ b/generic/tclTest.c @@ -6720,7 +6720,7 @@ TestNRELevels( Interp *iPtr = (Interp *) interp; static ptrdiff_t *refDepth = NULL; ptrdiff_t depth; - Tcl_Obj *levels[5]; + Tcl_Obj *levels[6]; int i = 0; NRE_callback *cbPtr = iPtr->execEnvPtr->callbackPtr; @@ -6734,14 +6734,16 @@ TestNRELevels( levels[1] = Tcl_NewIntObj(iPtr->numLevels); levels[2] = Tcl_NewIntObj(iPtr->cmdFramePtr->level); levels[3] = Tcl_NewIntObj(iPtr->varFramePtr->level); + levels[4] = Tcl_NewIntObj(iPtr->execEnvPtr->execStackPtr->tosPtr + - iPtr->execEnvPtr->execStackPtr->stackWords); while (cbPtr) { i++; cbPtr = cbPtr->nextPtr; } - levels[4] = Tcl_NewIntObj(i); + levels[5] = Tcl_NewIntObj(i); - Tcl_SetObjResult(interp, Tcl_NewListObj(5, levels)); + Tcl_SetObjResult(interp, Tcl_NewListObj(6, levels)); return TCL_OK; } diff --git a/generic/tclThreadAlloc.c b/generic/tclThreadAlloc.c new file mode 100755 index 0000000..18ae9cc --- /dev/null +++ b/generic/tclThreadAlloc.c @@ -0,0 +1,1090 @@ +/* + * tclThreadAlloc.c -- + * + * This is a very fast storage allocator for used with threads (designed + * avoid lock contention). The basic strategy is to allocate memory in + * fixed size blocks from block caches. + * + * The Initial Developer of the Original Code is America Online, Inc. + * Portions created by AOL are Copyright (C) 1999 America Online, Inc. + * + * See the file "license.terms" for information on usage and redistribution of + * this file, and for a DISCLAIMER OF ALL WARRANTIES. + */ + +#include "tclInt.h" +#if defined(TCL_THREADS) && defined(USE_THREAD_ALLOC) + +/* + * If range checking is enabled, an additional byte will be allocated to store + * the magic number at the end of the requested memory. + */ + +#ifndef RCHECK +#ifdef NDEBUG +#define RCHECK 0 +#else +#define RCHECK 1 +#endif +#endif + +/* + * The following define the number of Tcl_Obj's to allocate/move at a time and + * the high water mark to prune a per-thread cache. On a 32 bit system, + * sizeof(Tcl_Obj) = 24 so 800 * 24 = ~16k. + */ + +#define NOBJALLOC 800 + +/* Actual definition moved to tclInt.h */ +#define NOBJHIGH ALLOC_NOBJHIGH + +/* + * The following union stores accounting information for each block including + * two small magic numbers and a bucket number when in use or a next pointer + * when free. The original requested size (not including the Block overhead) + * is also maintained. + */ + +typedef union Block { + struct { + union { + union Block *next; /* Next in free list. */ + struct { + unsigned char magic1; /* First magic number. */ + unsigned char bucket; /* Bucket block allocated from. */ + unsigned char unused; /* Padding. */ + unsigned char magic2; /* Second magic number. */ + } s; + } u; + size_t reqSize; /* Requested allocation size. */ + } b; + unsigned char padding[TCL_ALLOCALIGN]; +} Block; +#define nextBlock b.u.next +#define sourceBucket b.u.s.bucket +#define magicNum1 b.u.s.magic1 +#define magicNum2 b.u.s.magic2 +#define MAGIC 0xEF +#define blockReqSize b.reqSize + +/* + * The following defines the minimum and and maximum block sizes and the number + * of buckets in the bucket cache. + */ + +#define MINALLOC ((sizeof(Block) + 8 + (TCL_ALLOCALIGN-1)) & ~(TCL_ALLOCALIGN-1)) +#define NBUCKETS (11 - (MINALLOC >> 5)) +#define MAXALLOC (MINALLOC << (NBUCKETS - 1)) + +/* + * The following structure defines a bucket of blocks with various accounting + * and statistics information. + */ + +typedef struct Bucket { + Block *firstPtr; /* First block available */ + long numFree; /* Number of blocks available */ + + /* All fields below for accounting only */ + + long numRemoves; /* Number of removes from bucket */ + long numInserts; /* Number of inserts into bucket */ + long numWaits; /* Number of waits to acquire a lock */ + long numLocks; /* Number of locks acquired */ + long totalAssigned; /* Total space assigned to bucket */ +} Bucket; + +/* + * The following structure defines a cache of buckets and objs, of which there + * will be (at most) one per thread. Any changes need to be reflected in the + * struct AllocCache defined in tclInt.h, possibly also in the initialisation + * code in Tcl_CreateInterp(). + */ + +typedef struct Cache { + struct Cache *nextPtr; /* Linked list of cache entries */ + Tcl_ThreadId owner; /* Which thread's cache is this? */ + Tcl_Obj *firstObjPtr; /* List of free objects for thread */ + int numObjects; /* Number of objects for thread */ + int totalAssigned; /* Total space assigned to thread */ + Bucket buckets[NBUCKETS]; /* The buckets for this thread */ +} Cache; + +/* + * The following array specifies various per-bucket limits and locks. The + * values are statically initialized to avoid calculating them repeatedly. + */ + +static struct { + size_t blockSize; /* Bucket blocksize. */ + int maxBlocks; /* Max blocks before move to share. */ + int numMove; /* Num blocks to move to share. */ + Tcl_Mutex *lockPtr; /* Share bucket lock. */ +} bucketInfo[NBUCKETS]; + +/* + * Static functions defined in this file. + */ + +static Cache * GetCache(void); +static void LockBucket(Cache *cachePtr, int bucket); +static void UnlockBucket(Cache *cachePtr, int bucket); +static void PutBlocks(Cache *cachePtr, int bucket, int numMove); +static int GetBlocks(Cache *cachePtr, int bucket); +static Block * Ptr2Block(char *ptr); +static char * Block2Ptr(Block *blockPtr, int bucket, unsigned int reqSize); +static void MoveObjs(Cache *fromPtr, Cache *toPtr, int numMove); + +/* + * Local variables defined in this file and initialized at startup. + */ + +static Tcl_Mutex *listLockPtr; +static Tcl_Mutex *objLockPtr; +static Cache sharedCache; +static Cache *sharedPtr = &sharedCache; +static Cache *firstCachePtr = &sharedCache; + +#if defined(HAVE_FAST_TSD) +static __thread Cache *tcachePtr; +static __thread int allocInitialized = 0; + +# define GETCACHE(cachePtr) \ + do { \ + if (!allocInitialized) { \ + allocInitialized = 1; \ + tcachePtr = GetCache(); \ + } \ + (cachePtr) = tcachePtr; \ + } while (0) +#else +# define GETCACHE(cachePtr) \ + do { \ + (cachePtr) = TclpGetAllocCache(); \ + if ((cachePtr) == NULL) { \ + (cachePtr) = GetCache(); \ + } \ + } while (0) +#endif + +/* + *---------------------------------------------------------------------- + * + * GetCache --- + * + * Gets per-thread memory cache, allocating it if necessary. + * + * Results: + * Pointer to cache. + * + * Side effects: + * None. + * + *---------------------------------------------------------------------- + */ + +static Cache * +GetCache(void) +{ + Cache *cachePtr; + + /* + * Check for first-time initialization. + */ + + if (listLockPtr == NULL) { + Tcl_Mutex *initLockPtr; + unsigned int i; + + initLockPtr = Tcl_GetAllocMutex(); + Tcl_MutexLock(initLockPtr); + if (listLockPtr == NULL) { + listLockPtr = TclpNewAllocMutex(); + objLockPtr = TclpNewAllocMutex(); + for (i = 0; i < NBUCKETS; ++i) { + bucketInfo[i].blockSize = MINALLOC << i; + bucketInfo[i].maxBlocks = 1 << (NBUCKETS - 1 - i); + bucketInfo[i].numMove = i < NBUCKETS - 1 ? + 1 << (NBUCKETS - 2 - i) : 1; + bucketInfo[i].lockPtr = TclpNewAllocMutex(); + } + } + Tcl_MutexUnlock(initLockPtr); + } + + /* + * Get this thread's cache, allocating if necessary. + */ + + cachePtr = TclpGetAllocCache(); + if (cachePtr == NULL) { + cachePtr = calloc(1, sizeof(Cache)); + if (cachePtr == NULL) { + Tcl_Panic("alloc: could not allocate new cache"); + } + Tcl_MutexLock(listLockPtr); + cachePtr->nextPtr = firstCachePtr; + firstCachePtr = cachePtr; + Tcl_MutexUnlock(listLockPtr); + cachePtr->owner = Tcl_GetCurrentThread(); + TclpSetAllocCache(cachePtr); + } + return cachePtr; +} + +/* + *---------------------------------------------------------------------- + * + * TclFreeAllocCache -- + * + * Flush and delete a cache, removing from list of caches. + * + * Results: + * None. + * + * Side effects: + * None. + * + *---------------------------------------------------------------------- + */ + +void +TclFreeAllocCache( + void *arg) +{ + Cache *cachePtr = arg; + Cache **nextPtrPtr; + register unsigned int bucket; + + /* + * Flush blocks. + */ + + for (bucket = 0; bucket < NBUCKETS; ++bucket) { + if (cachePtr->buckets[bucket].numFree > 0) { + PutBlocks(cachePtr, bucket, cachePtr->buckets[bucket].numFree); + } + } + + /* + * Flush objs. + */ + + if (cachePtr->numObjects > 0) { + Tcl_MutexLock(objLockPtr); + MoveObjs(cachePtr, sharedPtr, cachePtr->numObjects); + Tcl_MutexUnlock(objLockPtr); + } + + /* + * Remove from pool list. + */ + + Tcl_MutexLock(listLockPtr); + nextPtrPtr = &firstCachePtr; + while (*nextPtrPtr != cachePtr) { + nextPtrPtr = &(*nextPtrPtr)->nextPtr; + } + *nextPtrPtr = cachePtr->nextPtr; + cachePtr->nextPtr = NULL; + Tcl_MutexUnlock(listLockPtr); + free(cachePtr); +} + +/* + *---------------------------------------------------------------------- + * + * TclpAlloc -- + * + * Allocate memory. + * + * Results: + * Pointer to memory just beyond Block pointer. + * + * Side effects: + * May allocate more blocks for a bucket. + * + *---------------------------------------------------------------------- + */ + +char * +TclpAlloc( + unsigned int reqSize) +{ + Cache *cachePtr; + Block *blockPtr; + register int bucket; + size_t size; + +#ifndef __LP64__ + if (sizeof(int) >= sizeof(size_t)) { + /* An unsigned int overflow can also be a size_t overflow */ + const size_t zero = 0; + const size_t max = ~zero; + + if (((size_t) reqSize) > max - sizeof(Block) - RCHECK) { + /* Requested allocation exceeds memory */ + return NULL; + } + } +#endif + + GETCACHE(cachePtr); + + /* + * Increment the requested size to include room for the Block structure. + * Call malloc() directly if the required amount is greater than the + * largest block, otherwise pop the smallest block large enough, + * allocating more blocks if necessary. + */ + + blockPtr = NULL; + size = reqSize + sizeof(Block); +#if RCHECK + size++; +#endif + if (size > MAXALLOC) { + bucket = NBUCKETS; + blockPtr = malloc(size); + if (blockPtr != NULL) { + cachePtr->totalAssigned += reqSize; + } + } else { + bucket = 0; + while (bucketInfo[bucket].blockSize < size) { + bucket++; + } + if (cachePtr->buckets[bucket].numFree || GetBlocks(cachePtr, bucket)) { + blockPtr = cachePtr->buckets[bucket].firstPtr; + cachePtr->buckets[bucket].firstPtr = blockPtr->nextBlock; + cachePtr->buckets[bucket].numFree--; + cachePtr->buckets[bucket].numRemoves++; + cachePtr->buckets[bucket].totalAssigned += reqSize; + } + } + if (blockPtr == NULL) { + return NULL; + } + return Block2Ptr(blockPtr, bucket, reqSize); +} + +/* + *---------------------------------------------------------------------- + * + * TclpFree -- + * + * Return blocks to the thread block cache. + * + * Results: + * None. + * + * Side effects: + * May move blocks to shared cache. + * + *---------------------------------------------------------------------- + */ + +void +TclpFree( + char *ptr) +{ + Cache *cachePtr; + Block *blockPtr; + int bucket; + + if (ptr == NULL) { + return; + } + + GETCACHE(cachePtr); + + /* + * Get the block back from the user pointer and call system free directly + * for large blocks. Otherwise, push the block back on the bucket and move + * blocks to the shared cache if there are now too many free. + */ + + blockPtr = Ptr2Block(ptr); + bucket = blockPtr->sourceBucket; + if (bucket == NBUCKETS) { + cachePtr->totalAssigned -= blockPtr->blockReqSize; + free(blockPtr); + return; + } + + cachePtr->buckets[bucket].totalAssigned -= blockPtr->blockReqSize; + blockPtr->nextBlock = cachePtr->buckets[bucket].firstPtr; + cachePtr->buckets[bucket].firstPtr = blockPtr; + cachePtr->buckets[bucket].numFree++; + cachePtr->buckets[bucket].numInserts++; + + if (cachePtr != sharedPtr && + cachePtr->buckets[bucket].numFree > bucketInfo[bucket].maxBlocks) { + PutBlocks(cachePtr, bucket, bucketInfo[bucket].numMove); + } +} + +/* + *---------------------------------------------------------------------- + * + * TclpRealloc -- + * + * Re-allocate memory to a larger or smaller size. + * + * Results: + * Pointer to memory just beyond Block pointer. + * + * Side effects: + * Previous memory, if any, may be freed. + * + *---------------------------------------------------------------------- + */ + +char * +TclpRealloc( + char *ptr, + unsigned int reqSize) +{ + Cache *cachePtr; + Block *blockPtr; + void *newPtr; + size_t size, min; + int bucket; + + if (ptr == NULL) { + return TclpAlloc(reqSize); + } + +#ifndef __LP64__ + if (sizeof(int) >= sizeof(size_t)) { + /* An unsigned int overflow can also be a size_t overflow */ + const size_t zero = 0; + const size_t max = ~zero; + + if (((size_t) reqSize) > max - sizeof(Block) - RCHECK) { + /* Requested allocation exceeds memory */ + return NULL; + } + } +#endif + + GETCACHE(cachePtr); + + /* + * If the block is not a system block and fits in place, simply return the + * existing pointer. Otherwise, if the block is a system block and the new + * size would also require a system block, call realloc() directly. + */ + + blockPtr = Ptr2Block(ptr); + size = reqSize + sizeof(Block); +#if RCHECK + size++; +#endif + bucket = blockPtr->sourceBucket; + if (bucket != NBUCKETS) { + if (bucket > 0) { + min = bucketInfo[bucket-1].blockSize; + } else { + min = 0; + } + if (size > min && size <= bucketInfo[bucket].blockSize) { + cachePtr->buckets[bucket].totalAssigned -= blockPtr->blockReqSize; + cachePtr->buckets[bucket].totalAssigned += reqSize; + return Block2Ptr(blockPtr, bucket, reqSize); + } + } else if (size > MAXALLOC) { + cachePtr->totalAssigned -= blockPtr->blockReqSize; + cachePtr->totalAssigned += reqSize; + blockPtr = realloc(blockPtr, size); + if (blockPtr == NULL) { + return NULL; + } + return Block2Ptr(blockPtr, NBUCKETS, reqSize); + } + + /* + * Finally, perform an expensive malloc/copy/free. + */ + + newPtr = TclpAlloc(reqSize); + if (newPtr != NULL) { + if (reqSize > blockPtr->blockReqSize) { + reqSize = blockPtr->blockReqSize; + } + memcpy(newPtr, ptr, reqSize); + TclpFree(ptr); + } + return newPtr; +} + +/* + *---------------------------------------------------------------------- + * + * TclThreadAllocObj -- + * + * Allocate a Tcl_Obj from the per-thread cache. + * + * Results: + * Pointer to uninitialized Tcl_Obj. + * + * Side effects: + * May move Tcl_Obj's from shared cached or allocate new Tcl_Obj's if + * list is empty. + * + * Note: + * If this code is updated, the changes need to be reflected in the macro + * TclAllocObjStorageEx() defined in tclInt.h + * + *---------------------------------------------------------------------- + */ + +Tcl_Obj * +TclThreadAllocObj(void) +{ + register Cache *cachePtr; + register Tcl_Obj *objPtr; + + GETCACHE(cachePtr); + + /* + * Get this thread's obj list structure and move or allocate new objs if + * necessary. + */ + + if (cachePtr->numObjects == 0) { + register int numMove; + + Tcl_MutexLock(objLockPtr); + numMove = sharedPtr->numObjects; + if (numMove > 0) { + if (numMove > NOBJALLOC) { + numMove = NOBJALLOC; + } + MoveObjs(sharedPtr, cachePtr, numMove); + } + Tcl_MutexUnlock(objLockPtr); + if (cachePtr->numObjects == 0) { + Tcl_Obj *newObjsPtr; + + cachePtr->numObjects = numMove = NOBJALLOC; + newObjsPtr = malloc(sizeof(Tcl_Obj) * numMove); + if (newObjsPtr == NULL) { + Tcl_Panic("alloc: could not allocate %d new objects", numMove); + } + while (--numMove >= 0) { + objPtr = &newObjsPtr[numMove]; + objPtr->internalRep.otherValuePtr = cachePtr->firstObjPtr; + cachePtr->firstObjPtr = objPtr; + } + } + } + + /* + * Pop the first object. + */ + + objPtr = cachePtr->firstObjPtr; + cachePtr->firstObjPtr = objPtr->internalRep.otherValuePtr; + cachePtr->numObjects--; + return objPtr; +} + +/* + *---------------------------------------------------------------------- + * + * TclThreadFreeObj -- + * + * Return a free Tcl_Obj to the per-thread cache. + * + * Results: + * None. + * + * Side effects: + * May move free Tcl_Obj's to shared list upon hitting high water mark. + * + * Note: + * If this code is updated, the changes need to be reflected in the macro + * TclAllocObjStorageEx() defined in tclInt.h + * + *---------------------------------------------------------------------- + */ + +void +TclThreadFreeObj( + Tcl_Obj *objPtr) +{ + Cache *cachePtr; + + GETCACHE(cachePtr); + + /* + * Get this thread's list and push on the free Tcl_Obj. + */ + + objPtr->internalRep.otherValuePtr = cachePtr->firstObjPtr; + cachePtr->firstObjPtr = objPtr; + cachePtr->numObjects++; + + /* + * If the number of free objects has exceeded the high water mark, move + * some blocks to the shared list. + */ + + if (cachePtr->numObjects > NOBJHIGH) { + Tcl_MutexLock(objLockPtr); + MoveObjs(cachePtr, sharedPtr, NOBJALLOC); + Tcl_MutexUnlock(objLockPtr); + } +} + +/* + *---------------------------------------------------------------------- + * + * Tcl_GetMemoryInfo -- + * + * Return a list-of-lists of memory stats. + * + * Results: + * None. + * + * Side effects: + * List appended to given dstring. + * + *---------------------------------------------------------------------- + */ + +void +Tcl_GetMemoryInfo( + Tcl_DString *dsPtr) +{ + Cache *cachePtr; + char buf[200]; + unsigned int n; + + Tcl_MutexLock(listLockPtr); + cachePtr = firstCachePtr; + while (cachePtr != NULL) { + Tcl_DStringStartSublist(dsPtr); + if (cachePtr == sharedPtr) { + Tcl_DStringAppendElement(dsPtr, "shared"); + } else { + sprintf(buf, "thread%p", cachePtr->owner); + Tcl_DStringAppendElement(dsPtr, buf); + } + for (n = 0; n < NBUCKETS; ++n) { + sprintf(buf, "%lu %ld %ld %ld %ld %ld %ld", + (unsigned long) bucketInfo[n].blockSize, + cachePtr->buckets[n].numFree, + cachePtr->buckets[n].numRemoves, + cachePtr->buckets[n].numInserts, + cachePtr->buckets[n].totalAssigned, + cachePtr->buckets[n].numLocks, + cachePtr->buckets[n].numWaits); + Tcl_DStringAppendElement(dsPtr, buf); + } + Tcl_DStringEndSublist(dsPtr); + cachePtr = cachePtr->nextPtr; + } + Tcl_MutexUnlock(listLockPtr); +} + +/* + *---------------------------------------------------------------------- + * + * MoveObjs -- + * + * Move Tcl_Obj's between caches. + * + * Results: + * None. + * + * Side effects: + * None. + * + *---------------------------------------------------------------------- + */ + +static void +MoveObjs( + Cache *fromPtr, + Cache *toPtr, + int numMove) +{ + register Tcl_Obj *objPtr = fromPtr->firstObjPtr; + Tcl_Obj *fromFirstObjPtr = objPtr; + + toPtr->numObjects += numMove; + fromPtr->numObjects -= numMove; + + /* + * Find the last object to be moved; set the next one (the first one not + * to be moved) as the first object in the 'from' cache. + */ + + while (--numMove) { + objPtr = objPtr->internalRep.otherValuePtr; + } + fromPtr->firstObjPtr = objPtr->internalRep.otherValuePtr; + + /* + * Move all objects as a block - they are already linked to each other, we + * just have to update the first and last. + */ + + objPtr->internalRep.otherValuePtr = toPtr->firstObjPtr; + toPtr->firstObjPtr = fromFirstObjPtr; +} + +/* + *---------------------------------------------------------------------- + * + * Block2Ptr, Ptr2Block -- + * + * Convert between internal blocks and user pointers. + * + * Results: + * User pointer or internal block. + * + * Side effects: + * Invalid blocks will abort the server. + * + *---------------------------------------------------------------------- + */ + +static char * +Block2Ptr( + Block *blockPtr, + int bucket, + unsigned int reqSize) +{ + register void *ptr; + + blockPtr->magicNum1 = blockPtr->magicNum2 = MAGIC; + blockPtr->sourceBucket = bucket; + blockPtr->blockReqSize = reqSize; + ptr = ((void *) (blockPtr + 1)); +#if RCHECK + ((unsigned char *)(ptr))[reqSize] = MAGIC; +#endif + return (char *) ptr; +} + +static Block * +Ptr2Block( + char *ptr) +{ + register Block *blockPtr; + + blockPtr = (((Block *) ptr) - 1); + if (blockPtr->magicNum1 != MAGIC || blockPtr->magicNum2 != MAGIC) { + Tcl_Panic("alloc: invalid block: %p: %x %x", + blockPtr, blockPtr->magicNum1, blockPtr->magicNum2); + } +#if RCHECK + if (((unsigned char *) ptr)[blockPtr->blockReqSize] != MAGIC) { + Tcl_Panic("alloc: invalid block: %p: %x %x %x", + blockPtr, blockPtr->magicNum1, blockPtr->magicNum2, + ((unsigned char *) ptr)[blockPtr->blockReqSize]); + } +#endif + return blockPtr; +} + +/* + *---------------------------------------------------------------------- + * + * LockBucket, UnlockBucket -- + * + * Set/unset the lock to access a bucket in the shared cache. + * + * Results: + * None. + * + * Side effects: + * Lock activity and contention are monitored globally and on a per-cache + * basis. + * + *---------------------------------------------------------------------- + */ + +static void +LockBucket( + Cache *cachePtr, + int bucket) +{ +#if 0 + if (Tcl_MutexTryLock(bucketInfo[bucket].lockPtr) != TCL_OK) { + Tcl_MutexLock(bucketInfo[bucket].lockPtr); + cachePtr->buckets[bucket].numWaits++; + sharedPtr->buckets[bucket].numWaits++; + } +#else + Tcl_MutexLock(bucketInfo[bucket].lockPtr); +#endif + cachePtr->buckets[bucket].numLocks++; + sharedPtr->buckets[bucket].numLocks++; +} + +static void +UnlockBucket( + Cache *cachePtr, + int bucket) +{ + Tcl_MutexUnlock(bucketInfo[bucket].lockPtr); +} + +/* + *---------------------------------------------------------------------- + * + * PutBlocks -- + * + * Return unused blocks to the shared cache. + * + * Results: + * None. + * + * Side effects: + * None. + * + *---------------------------------------------------------------------- + */ + +static void +PutBlocks( + Cache *cachePtr, + int bucket, + int numMove) +{ + register Block *lastPtr, *firstPtr; + register int n = numMove; + + /* + * Before acquiring the lock, walk the block list to find the last block + * to be moved. + */ + + firstPtr = lastPtr = cachePtr->buckets[bucket].firstPtr; + while (--n > 0) { + lastPtr = lastPtr->nextBlock; + } + cachePtr->buckets[bucket].firstPtr = lastPtr->nextBlock; + cachePtr->buckets[bucket].numFree -= numMove; + + /* + * Aquire the lock and place the list of blocks at the front of the shared + * cache bucket. + */ + + LockBucket(cachePtr, bucket); + lastPtr->nextBlock = sharedPtr->buckets[bucket].firstPtr; + sharedPtr->buckets[bucket].firstPtr = firstPtr; + sharedPtr->buckets[bucket].numFree += numMove; + UnlockBucket(cachePtr, bucket); +} + +/* + *---------------------------------------------------------------------- + * + * GetBlocks -- + * + * Get more blocks for a bucket. + * + * Results: + * 1 if blocks where allocated, 0 otherwise. + * + * Side effects: + * Cache may be filled with available blocks. + * + *---------------------------------------------------------------------- + */ + +static int +GetBlocks( + Cache *cachePtr, + int bucket) +{ + register Block *blockPtr; + register int n; + + /* + * First, atttempt to move blocks from the shared cache. Note the + * potentially dirty read of numFree before acquiring the lock which is a + * slight performance enhancement. The value is verified after the lock is + * actually acquired. + */ + + if (cachePtr != sharedPtr && sharedPtr->buckets[bucket].numFree > 0) { + LockBucket(cachePtr, bucket); + if (sharedPtr->buckets[bucket].numFree > 0) { + + /* + * Either move the entire list or walk the list to find the last + * block to move. + */ + + n = bucketInfo[bucket].numMove; + if (n >= sharedPtr->buckets[bucket].numFree) { + cachePtr->buckets[bucket].firstPtr = + sharedPtr->buckets[bucket].firstPtr; + cachePtr->buckets[bucket].numFree = + sharedPtr->buckets[bucket].numFree; + sharedPtr->buckets[bucket].firstPtr = NULL; + sharedPtr->buckets[bucket].numFree = 0; + } else { + blockPtr = sharedPtr->buckets[bucket].firstPtr; + cachePtr->buckets[bucket].firstPtr = blockPtr; + sharedPtr->buckets[bucket].numFree -= n; + cachePtr->buckets[bucket].numFree = n; + while (--n > 0) { + blockPtr = blockPtr->nextBlock; + } + sharedPtr->buckets[bucket].firstPtr = blockPtr->nextBlock; + blockPtr->nextBlock = NULL; + } + } + UnlockBucket(cachePtr, bucket); + } + + if (cachePtr->buckets[bucket].numFree == 0) { + register size_t size; + + /* + * If no blocks could be moved from shared, first look for a larger + * block in this cache to split up. + */ + + blockPtr = NULL; + n = NBUCKETS; + size = 0; /* lint */ + while (--n > bucket) { + if (cachePtr->buckets[n].numFree > 0) { + size = bucketInfo[n].blockSize; + blockPtr = cachePtr->buckets[n].firstPtr; + cachePtr->buckets[n].firstPtr = blockPtr->nextBlock; + cachePtr->buckets[n].numFree--; + break; + } + } + + /* + * Otherwise, allocate a big new block directly. + */ + + if (blockPtr == NULL) { + size = MAXALLOC; + blockPtr = malloc(size); + if (blockPtr == NULL) { + return 0; + } + } + + /* + * Split the larger block into smaller blocks for this bucket. + */ + + n = size / bucketInfo[bucket].blockSize; + cachePtr->buckets[bucket].numFree = n; + cachePtr->buckets[bucket].firstPtr = blockPtr; + while (--n > 0) { + blockPtr->nextBlock = (Block *) + ((char *) blockPtr + bucketInfo[bucket].blockSize); + blockPtr = blockPtr->nextBlock; + } + blockPtr->nextBlock = NULL; + } + return 1; +} + +/* + *---------------------------------------------------------------------- + * + * TclFinalizeThreadAlloc -- + * + * This procedure is used to destroy all private resources used in this + * file. + * + * Results: + * None. + * + * Side effects: + * None. + * + *---------------------------------------------------------------------- + */ + +void +TclFinalizeThreadAlloc(void) +{ + unsigned int i; + + for (i = 0; i < NBUCKETS; ++i) { + TclpFreeAllocMutex(bucketInfo[i].lockPtr); + bucketInfo[i].lockPtr = NULL; + } + + TclpFreeAllocMutex(objLockPtr); + objLockPtr = NULL; + + TclpFreeAllocMutex(listLockPtr); + listLockPtr = NULL; + + TclpFreeAllocCache(NULL); +} + +#else /* !(TCL_THREADS && USE_THREAD_ALLOC) */ +/* + *---------------------------------------------------------------------- + * + * Tcl_GetMemoryInfo -- + * + * Return a list-of-lists of memory stats. + * + * Results: + * None. + * + * Side effects: + * List appended to given dstring. + * + *---------------------------------------------------------------------- + */ + +void +Tcl_GetMemoryInfo( + Tcl_DString *dsPtr) +{ + Tcl_Panic("Tcl_GetMemoryInfo called when threaded memory allocator not in use"); +} + +/* + *---------------------------------------------------------------------- + * + * TclFinalizeThreadAlloc -- + * + * This procedure is used to destroy all private resources used in this + * file. + * + * Results: + * None. + * + * Side effects: + * None. + * + *---------------------------------------------------------------------- + */ + +void +TclFinalizeThreadAlloc(void) +{ + Tcl_Panic("TclFinalizeThreadAlloc called when threaded memory allocator not in use"); +} +#endif /* TCL_THREADS && USE_THREAD_ALLOC */ + +/* + * Local Variables: + * mode: c + * c-basic-offset: 4 + * fill-column: 78 + * End: + */ diff --git a/generic/tclTrace.c b/generic/tclTrace.c index ffbaa17..d5fb6f6 100644 --- a/generic/tclTrace.c +++ b/generic/tclTrace.c @@ -1650,7 +1650,7 @@ CallTraceFunction( * Copy the command characters into a new string. */ - commandCopy = ckalloc((unsigned) numChars + 1); + commandCopy = TclStackAlloc(interp, (unsigned) numChars + 1); memcpy(commandCopy, command, (size_t) numChars); commandCopy[numChars] = '\0'; @@ -1661,7 +1661,7 @@ CallTraceFunction( traceCode = tracePtr->proc(tracePtr->clientData, (Tcl_Interp *) iPtr, iPtr->numLevels, commandCopy, (Tcl_Command) cmdPtr, objc, objv); - ckfree(commandCopy); + TclStackFree(interp, commandCopy); return traceCode; } @@ -2237,7 +2237,7 @@ StringTraceProc( * which uses strings for everything. */ - argv = (const char **) ckalloc( + argv = (const char **) TclStackAlloc(interp, (unsigned) ((objc + 1) * sizeof(const char *))); for (i = 0; i < objc; i++) { argv[i] = Tcl_GetString(objv[i]); @@ -2252,7 +2252,7 @@ StringTraceProc( data->proc(data->clientData, interp, level, (char *) command, cmdPtr->proc, cmdPtr->clientData, objc, argv); - ckfree((void *) argv); + TclStackFree(interp, (void *) argv); return TCL_OK; } |