summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authormig <mig>2011-03-18 12:54:54 (GMT)
committermig <mig>2011-03-18 12:54:54 (GMT)
commitfaf9def2a7c70743d49dd1e923d82b8dc0f9d718 (patch)
treefa0e823ccf0388ab9b5df5c91c91aa1247cf1624
parentdf469c8ffaea0347ffe69bd2b776e7840a25d645 (diff)
downloadtcl-faf9def2a7c70743d49dd1e923d82b8dc0f9d718.zip
tcl-faf9def2a7c70743d49dd1e923d82b8dc0f9d718.tar.gz
tcl-faf9def2a7c70743d49dd1e923d82b8dc0f9d718.tar.bz2
development branch for allocator changes
-rw-r--r--README.mig-alloc-reform65
-rw-r--r--generic/tclAlloc.c1484
-rw-r--r--generic/tclAssembly.c15
-rw-r--r--generic/tclBasic.c50
-rw-r--r--generic/tclCkalloc.c4
-rw-r--r--generic/tclCmdAH.c18
-rw-r--r--generic/tclCmdIL.c21
-rw-r--r--generic/tclCmdMZ.c22
-rw-r--r--generic/tclCompCmds.c26
-rw-r--r--generic/tclCompCmdsSZ.c58
-rw-r--r--generic/tclCompExpr.c49
-rw-r--r--generic/tclCompile.c8
-rw-r--r--generic/tclDictObj.c10
-rw-r--r--generic/tclEvent.c6
-rw-r--r--generic/tclExecute.c642
-rw-r--r--generic/tclFCmd.c4
-rw-r--r--generic/tclFileName.c4
-rw-r--r--generic/tclIOCmd.c4
-rw-r--r--generic/tclIndexObj.c8
-rw-r--r--generic/tclInt.decls18
-rw-r--r--generic/tclInt.h310
-rw-r--r--generic/tclIntDecls.h24
-rw-r--r--generic/tclInterp.c8
-rw-r--r--generic/tclNamesp.c17
-rw-r--r--generic/tclOOCall.c4
-rw-r--r--generic/tclOODefineCmds.c10
-rw-r--r--generic/tclOOMethod.c14
-rw-r--r--generic/tclObj.c71
-rw-r--r--generic/tclParse.c20
-rw-r--r--generic/tclProc.c27
-rw-r--r--generic/tclScan.c9
-rw-r--r--generic/tclStubInit.c6
-rw-r--r--generic/tclTest.c8
-rwxr-xr-xgeneric/tclThreadAlloc.c1081
-rw-r--r--generic/tclTrace.c8
-rw-r--r--tests/nre.test4
-rw-r--r--tests/tailcall.test18
-rw-r--r--unix/Makefile.in11
-rw-r--r--unix/tclUnixPipe.c8
-rw-r--r--unix/tclUnixThrd.c7
40 files changed, 1519 insertions, 2662 deletions
diff --git a/README.mig-alloc-reform b/README.mig-alloc-reform
new file mode 100644
index 0000000..139af2e
--- /dev/null
+++ b/README.mig-alloc-reform
@@ -0,0 +1,65 @@
+What is mig-alloc-reform?
+ 1. A massive simplification of the memory management in Tcl core.
+ a. removal of the Tcl stack, each BC allocates its own stacklet
+ b. TclStackAlloc is gone, replaced with ckalloc; goodbye to sometimes
+ hard sync problems
+ c. removal of the allocCache slot in struct Interp
+ d. retirement of the (unused) Tcl allocator USE_TCLALLOC; replacement
+ with a single-thread special case of zippy
+ e. unify all allocator options in a single file tclAlloc.c
+ d. exploit fast TSD via __thread where available (autoconferry still
+ missing, enable by hand with -DHAVE_FAST_TSD)
+ f. small improvement in zippy's memory usage: try to split blocks in
+ the shared cache before allocating new ones from the system
+
+ 2. New allocator options
+ a. purify build (but stop using them, see below). This is suitable to
+ use with a preloaded malloc replacement
+ b. (~NEW) native build: call to sys malloc, but maintain zippy's
+ Tcl_Obj caches (per thread, if threads enabled). Can be switched to
+ run as a purify build via an env var at startup. This is suitable to
+ use with a preloaded malloc replacement. The threaded variant is new.
+ c. zippy build
+ d. (NEW) multi build: this is a build that can function as any of the
+ other three. Per default it runs as zippy, but can be switched to
+ native or purify via an env var at startup. May or may not be used
+ for deployment, but it will definitely be very useful for
+ development: no need to recompile in order to valgrind, just set an
+ env var!
+
+ How do you use it? Options are:
+ 1. Don't pay any attention to it, build as always. You will get the same
+ allocator as before
+ 2. Select the build you want with compiler flags
+ -DTCL_ALLOCATOR=(aNATIVE|aPURIFY|aZIPPY|aMULTI)
+ 3. Select behaviour at startup: native can be switched to purify, multi
+ can be switched to any of the others. Define the env var
+ TCL_ALLOCATOR when starting up and you're good to go
+
+
+** PERFORMANCE NOTES **
+ * not measured, but: purify, native and zippy builds should be just as
+ fast as before. The obj-alloc macros have been removed while
+ developing. It is not certain that they provide a speedup, this will
+ be measured and acted accordingly
+ * multi build should be a only a tad slower, may even be suitable as
+ default build on all platforms
+
+
+** TO DO LIST **
+ * DEFINITELY
+ - test like crazy
+ - timings: versus older version (in unthreaded, fast-tsd and slow-tsd
+ builds). Determine if the obj-alloc macros should be reenabled
+ - autoconferry to auto-detect HAVE_FAST_TSD
+ - autoconferry to choose allocator flags? Keep USE_THREAD_ALLOC and
+ USE_TCLALLOC for back compat with external build scripts only (and
+ set them too!), but set also the new variants
+ TCL_ALLOCATOR=(aNATIVE|aPURIFY|aZIPPY|aMULTI)
+ - Makefile.in and autoconferry changes in windows, mac
+ - choose allocators from the command line instead of env vars?
+ - verify interaction with memdebug (should be 'none', but ...)
+
+ * MAYBE
+ - build zippy as malloc-replacement, compile always aNATIVE and
+ preload alternatives
diff --git a/generic/tclAlloc.c b/generic/tclAlloc.c
index 6fff92b..782a12b 100644
--- a/generic/tclAlloc.c
+++ b/generic/tclAlloc.c
@@ -1,253 +1,428 @@
/*
* tclAlloc.c --
*
- * This is a very fast storage allocator. It allocates blocks of a small
- * number of different sizes, and keeps free lists of each size. Blocks
- * that don't exactly fit are passed up to the next larger size. Blocks
- * over a certain size are directly allocated from the system.
+ * This is a very flexible storage allocator for Tcl, for use with or
+ * without threads. Depending on the compile flags, it builds as:
*
- * Copyright (c) 1983 Regents of the University of California.
- * Copyright (c) 1996-1997 Sun Microsystems, Inc.
- * Copyright (c) 1998-1999 by Scriptics Corporation.
+ * (1) Build flags: TCL_ALLOC_NATIVE
+ * NATIVE: use the native malloc and a per-thread Tcl_Obj pool, with
+ * inter-thread recycling of objects. The per-thread pool can be
+ * disabled at startup with an env var, thus providing the PURIFY
+ * behaviour that is useful for valgrind and similar tools. Note that
+ * the PURIFY costs are negligible when disabled, but when enabled
+ * Tcl_Obj allocs will be even slower than in a full PURIFY build
+ * NOTE: the obj pool shares all code with zippy's smallest allocs!
+ * It does look overcomplicated for this particular case, but
+ * keeping them together allows simpler maintenance and avoids
+ * the need for separate debugging
+ * TODO: in this case build ZIPPY as a preloadable malloc-replacement
*
- * Portions contributed by Chris Kingsley, Jack Jansen and Ray Johnson.
+ * (2) Build flags: TCL_ALLOC_ZIPPY
+ * ZIPPY: use the ex-tclThreadAlloc, essentially aolserver's
+ * fast threaded allocator. Mods with respect to the original:
+ * - change in the block sizes, so that the smallest alloc is
+ * Tcl_Obj-sized
+ * - share the Tcl_Obj pool with the smallest allocs pool for
+ * improved cache usage
+ * - split blocks in the shared pool before mallocing again for
+ * improved cache usage
+ * - ?change in the number of blocks to move to/from the shared
+ * cache: it used to be a fixed number, it is now computed
+ * to leave a fixed number in the thread's pool. This improves
+ * sharing behaviour when one thread uses a lot of memory once
+ * and rarely again (eg, at startup), at the cost of slowing
+ * slightly threads that allocate/free large numbers of blocks
+ * repeatedly
+ * - stats and Tcl_GetMemoryInfo disabled per default, enable with
+ * -DZIPPY_STATS
+ * - adapt for unthreaded usage as replacement of the ex tclAlloc
+ * - -DHAVE_FAST_TSD: use fast TSD via __thread where available
+ * - (TODO!) build zippy as a pre-loadable library to use with a
+ * native build as a malloc replacement. Difficulties are:
+ * (a) make that portable (easy enough on modern elf/unix, to
+ * be researched on win and mac)
+ * (b) coordinate the Tcl_Obj pool and the smallest allocs,
+ * as they are now addressed from different files. This
+ * might require a special Tcl build with no
+ * TclSmallAlloc, and a separate preloadable for use with
+ * native builds? Or else separate them again, but that's
+ * not really good I think.
+ *
+ * NOTES:
+ * . this would be the best option, instead of MULTI. It
+ * could be built in two versions (perf, debug/stats)
+ * . would a preloaded zippy be slower than builtin?
+ * Possibly, due to extra indirection.
+ *
+ * (3) Build flags: TCL_ALLOC_MULTI
+ * MULTI: all of the above, selectable at startup with an env
+ * var. This build will be very slightly slower than the specific
+ * builds above, but is completely portable: it does not depend on
+ * any help from the loader or such.
+ *
+ * All variants can be built for both threaded and unthreaded Tcl.
+ *
+ * The Initial Developer of the Original Code is America Online, Inc.
+ * Portions created by AOL are Copyright (C) 1999 America Online, Inc.
+ *
+ * Copyright (c) 2008-2011 by Miguel Sofer. All rights reserved.
*
* See the file "license.terms" for information on usage and redistribution of
* this file, and for a DISCLAIMER OF ALL WARRANTIES.
*/
-/*
- * Windows and Unix use an alternative allocator when building with threads
- * that has significantly reduced lock contention.
- */
-
#include "tclInt.h"
-#if !defined(TCL_THREADS) || !defined(USE_THREAD_ALLOC)
-#if USE_TCLALLOC
+/*
+ * This macro is used to properly align the memory allocated by Tcl, giving
+ * the same alignment as the native malloc.
+ */
-#ifdef TCL_DEBUG
-# define DEBUG
-/* #define MSTATS */
-# define RCHECK
+#if defined(__APPLE__)
+#define TCL_ALLOCALIGN 16
+#else
+#define TCL_ALLOCALIGN (2*sizeof(void *))
#endif
+#undef TclpAlloc
+#undef TclpRealloc
+#undef TclpFree
+#undef TclSmallAlloc
+#undef TclSmallFree
+
+#if (TCL_ALLOCATOR == aNATIVE) || (TCL_ALLOCATOR == aPURIFY)
/*
- * We should really make use of AC_CHECK_TYPE(caddr_t) here, but it can wait
- * until Tcl uses config.h properly.
+ * Not much of this file is needed, most things are dealt with in the
+ * macros. Just shunt the allocators for use by the library, the core
+ * never calls this.
+ *
+ * This is all that is needed for a TCL_ALLOC_PURIFY build, a native build
+ * needs the Tcl_Obj pools too.
*/
+
+char *
+TclpAlloc(
+ unsigned int reqSize)
+{
+ return malloc(reqSize);
+}
-#if defined(_MSC_VER) || defined(__MINGW32__) || defined(__BORLANDC__)
-typedef unsigned long caddr_t;
-#endif
+char *
+TclpRealloc(
+ char *ptr,
+ unsigned int reqSize)
+{
+ return realloc(ptr, reqSize);
+}
+
+void
+TclpFree(
+ char *ptr)
+{
+ free(ptr);
+}
+
+#endif /* end of common code for PURIFY and NATIVE*/
+
+#if TCL_ALLOCATOR != aPURIFY
+/*
+ * The rest of this file deals with ZIPPY and MULTI builds, as well as the
+ * Tcl_Obj pools for NATIVE
+ */
/*
- * The overhead on a block is at least 8 bytes. When free, this space contains
- * a pointer to the next free block, and the bottom two bits must be zero.
- * When in use, the first byte is set to MAGIC, and the second byte is the
- * size index. The remaining bytes are for alignment. If range checking is
- * enabled then a second word holds the size of the requested block, less 1,
- * rounded up to a multiple of sizeof(RMAGIC). The order of elements is
- * critical: ov.magic must overlay the low order bits of ov.next, and ov.magic
- * can not be a valid ov.next bit pattern.
+ * Note: we rely on the optimizer to remove unneeded code, instead of setting
+ * up a maze of #ifdefs all over the code.
+ * We should insure that debug builds do at least this much optimization, right?
*/
-union overhead {
- union overhead *next; /* when free */
- unsigned char padding[TCL_ALLOCALIGN]; /* align struct to TCL_ALLOCALIGN bytes */
- struct {
- unsigned char magic0; /* magic number */
- unsigned char index; /* bucket # */
- unsigned char unused; /* unused */
- unsigned char magic1; /* other magic number */
-#ifdef RCHECK
- unsigned short rmagic; /* range magic number */
- unsigned long size; /* actual block size */
- unsigned short unused2; /* padding to 8-byte align */
-#endif
- } ovu;
-#define overMagic0 ovu.magic0
-#define overMagic1 ovu.magic1
-#define bucketIndex ovu.index
-#define rangeCheckMagic ovu.rmagic
-#define realBlockSize ovu.size
-};
-
-
-#define MAGIC 0xef /* magic # on accounting info */
-#define RMAGIC 0x5555 /* magic # on range info */
-
-#ifdef RCHECK
-#define RSLOP sizeof(unsigned short)
+#if TCL_ALLOCATOR == aZIPPY
+# define allocator aZIPPY
+# define ALLOCATOR_BASE aZIPPY
+#elif TCL_ALLOCATOR == aNATIVE
+/* Keep the option to switch PURIFY mode on! */
+static int allocator = aNONE;
+# define ALLOCATOR_BASE aNATIVE
+# define RCHECK 0
+# undef ZIPPY_STATS
#else
-#define RSLOP 0
+/* MULTI */
+ static int allocator = aNONE;
+# define ALLOCATOR_BASE aZIPPY
+#endif
+
+#if TCL_ALLOCATOR != aZIPPY
+static void ChooseAllocator();
#endif
-#define OVERHEAD (sizeof(union overhead) + RSLOP)
/*
- * Macro to make it easier to refer to the end-of-block guard magic.
+ * If range checking is enabled, an additional byte will be allocated to store
+ * the magic number at the end of the requested memory.
*/
-#define BLOCK_END(overPtr) \
- (*(unsigned short *)((caddr_t)((overPtr) + 1) + (overPtr)->realBlockSize))
+#ifndef RCHECK
+# ifdef NDEBUG
+# define RCHECK 0
+# else
+# define RCHECK 1
+# endif
+#endif
/*
- * nextf[i] is the pointer to the next free block of size 2^(i+3). The
- * smallest allocatable block is MINBLOCK bytes. The overhead information
- * precedes the data area returned to the user.
+ * The following struct stores accounting information for each block including
+ * two small magic numbers and a bucket number when in use or a next pointer
+ * when free. The original requested size (not including the Block overhead)
+ * is also maintained.
*/
-#define MINBLOCK ((sizeof(union overhead) + (TCL_ALLOCALIGN-1)) & ~(TCL_ALLOCALIGN-1))
-#define NBUCKETS (13 - (MINBLOCK >> 4))
-#define MAXMALLOC (1<<(NBUCKETS+2))
-static union overhead *nextf[NBUCKETS];
+typedef struct Block {
+ union {
+ struct Block *next; /* Next in free list. */
+ struct {
+ unsigned char magic1; /* First magic number. */
+ unsigned char bucket; /* Bucket block allocated from. */
+ unsigned char unused; /* Padding. */
+ unsigned char magic2; /* Second magic number. */
+ } s;
+ } u;
+ size_t reqSize; /* Requested allocation size. */
+} Block;
+
+#define ALIGN(x) (((x) + TCL_ALLOCALIGN - 1) & ~(TCL_ALLOCALIGN - 1))
+#define OFFSET ALIGN(sizeof(Block))
+
+#define nextBlock u.next
+#define sourceBucket u.s.bucket
+#define magicNum1 u.s.magic1
+#define magicNum2 u.s.magic2
+#define MAGIC 0xEF
+#define blockReqSize reqSize
/*
- * The following structure is used to keep track of all system memory
- * currently owned by Tcl. When finalizing, all this memory will be returned
- * to the system.
+ * The following defines the minimum and maximum block sizes and the number
+ * of buckets in the bucket cache.
+ * 32b 64b Apple-32b
+ * TCL_ALLOCALIGN 8 16 16
+ * sizeof(Block) 8 16 16
+ * OFFSET 8 16 16
+ * sizeof(Tcl_Obj) 24 48 24
+ * ALLOCBASE 24 48 24
+ * MINALLOC 24 48 24
+ * NBUCKETS 11 10 11
+ * MAXALLOC 24576 24576 24576
+ * small allocs 1024 512 1024
+ * at a time
*/
-struct block {
- struct block *nextPtr; /* Linked list. */
- struct block *prevPtr; /* Linked list for big blocks, ensures 8-byte
- * alignment for suballocated blocks. */
-};
+#if TCL_ALLOCATOR == aNATIVE
+#define MINALLOC MAX(OFFSET, sizeof(Tcl_Obj))
+#else
+#define MINALLOC ALIGN(MAX(OFFSET+8, sizeof(Tcl_Obj)))
+#endif
-static struct block *blockList; /* Tracks the suballocated blocks. */
-static struct block bigBlocks={ /* Big blocks aren't suballocated. */
- &bigBlocks, &bigBlocks
-};
+#define NBUCKETS 10 /* previously (11 - (MINALLOC >> 5)) */
+#define MAXALLOC (MINALLOC << (NBUCKETS - 1))
+
+#if TCL_ALLOCATOR == aNATIVE
+# define NBUCKETS_0 1
+# define nBuckets 1
+#else
+# define NBUCKETS_0 NBUCKETS
+# if TCL_ALLOCATOR == aZIPPY
+# define nBuckets NBUCKETS
+# else
+ static int nBuckets = NBUCKETS;
+# endif
+#endif
/*
- * The allocator is protected by a special mutex that must be explicitly
- * initialized. Futhermore, because Tcl_Alloc may be used before anything else
- * in Tcl, we make this module self-initializing after all with the allocInit
- * variable.
+ * The following structure defines a bucket of blocks, optionally with various
+ * accounting and statistics information.
*/
-#ifdef TCL_THREADS
-static Tcl_Mutex *allocMutexPtr;
+typedef struct Bucket {
+ Block *firstPtr; /* First block available */
+ long numFree; /* Number of blocks available */
+#ifdef ZIPPY_STATS
+ /* All fields below for accounting only */
+
+ long numRemoves; /* Number of removes from bucket */
+ long numInserts; /* Number of inserts into bucket */
+ long numWaits; /* Number of waits to acquire a lock */
+ long numLocks; /* Number of locks acquired */
+ long totalAssigned; /* Total space assigned to bucket */
#endif
-static int allocInit = 0;
-
-#ifdef MSTATS
+} Bucket;
/*
- * numMallocs[i] is the difference between the number of mallocs and frees for
- * a given block size.
+ * The following structure defines a cache of buckets, at most one per
+ * thread.
*/
-static unsigned int numMallocs[NBUCKETS+1];
+typedef struct Cache {
+#if defined(TCL_THREADS)
+ struct Cache *nextPtr; /* Linked list of cache entries */
+#ifdef ZIPPY_STATS
+ Tcl_ThreadId owner; /* Which thread's cache is this? */
#endif
-
-#if defined(DEBUG) || defined(RCHECK)
-#define ASSERT(p) if (!(p)) Tcl_Panic(# p)
-#define RANGE_ASSERT(p) if (!(p)) Tcl_Panic(# p)
-#else
-#define ASSERT(p)
-#define RANGE_ASSERT(p)
#endif
+#ifdef ZIPPY_STATS
+ int totalAssigned; /* Total space assigned to thread */
+#endif
+ Bucket buckets[1]; /* The buckets for this thread */
+} Cache;
+
/*
- * Prototypes for functions used only in this file.
+ * The following array specifies various per-bucket limits and locks. The
+ * values are statically initialized to avoid calculating them repeatedly.
*/
-static void MoreCore(int bucket);
-
+static struct {
+ size_t blockSize; /* Bucket blocksize. */
+#if defined(TCL_THREADS)
+ int maxBlocks; /* Max blocks before move to share. */
+ int numMove; /* Num blocks to move to share. */
+ Tcl_Mutex *lockPtr; /* Share bucket lock. */
+#endif
+} bucketInfo[NBUCKETS_0];
+
/*
- *-------------------------------------------------------------------------
- *
- * TclInitAlloc --
- *
- * Initialize the memory system.
- *
- * Results:
- * None.
- *
- * Side effects:
- * Initialize the mutex used to serialize allocations.
- *
- *-------------------------------------------------------------------------
+ * Static functions defined in this file.
*/
-void
-TclInitAlloc(void)
-{
- if (!allocInit) {
- allocInit = 1;
-#ifdef TCL_THREADS
- allocMutexPtr = Tcl_GetAllocMutex();
+static Cache * GetCache(void);
+static int GetBlocks(Cache *cachePtr, int bucket);
+static inline Block * Ptr2Block(char *ptr);
+static inline char * Block2Ptr(Block *blockPtr, int bucket, unsigned int reqSize);
+
+#if defined(TCL_THREADS)
+
+static Cache *firstCachePtr = NULL;
+static Cache *sharedPtr = NULL;
+
+static Tcl_Mutex *listLockPtr;
+static Tcl_Mutex *objLockPtr;
+
+static void LockBucket(Cache *cachePtr, int bucket);
+static void UnlockBucket(Cache *cachePtr, int bucket);
+static void PutBlocks(Cache *cachePtr, int bucket, int numMove);
+
+#if defined(HAVE_FAST_TSD)
+static __thread Cache *tcachePtr;
+static __thread int allocInitialized = 0;
+
+# define GETCACHE(cachePtr) \
+ do { \
+ if (!allocInitialized) { \
+ allocInitialized = 1; \
+ tcachePtr = GetCache(); \
+ } \
+ (cachePtr) = tcachePtr; \
+ } while (0)
+#else
+# define GETCACHE(cachePtr) \
+ do { \
+ (cachePtr) = TclpGetAllocCache(); \
+ if ((cachePtr) == NULL) { \
+ (cachePtr) = GetCache(); \
+ } \
+ } while (0)
#endif
+#else /* NOT THREADS! */
+
+static int allocInitialized = 0;
+
+#define TclpSetAllocCache()
+#define PutBlocks(cachePtr, bucket, numMove)
+#define firstCachePtr sharedCachePtr
+
+# define GETCACHE(cachePtr) \
+ do { \
+ if (!allocInitialized) { \
+ allocInitialized = 1; \
+ GetCache(); \
+ } \
+ (cachePtr) = sharedPtr; \
+ } while (0)
+
+static void *
+TclpGetAllocCache(void)
+{
+ if (!allocInitialized) {
+ allocInitialized = 1;
+ GetCache();
}
+ return sharedPtr;
}
+#endif
+
/*
- *-------------------------------------------------------------------------
- *
- * TclFinalizeAllocSubsystem --
+ *----------------------------------------------------------------------
*
- * Release all resources being used by this subsystem, including
- * aggressively freeing all memory allocated by TclpAlloc() that has not
- * yet been released with TclpFree().
+ * Block2Ptr, Ptr2Block --
*
- * After this function is called, all memory allocated with TclpAlloc()
- * should be considered unusable.
+ * Convert between internal blocks and user pointers.
*
* Results:
- * None.
+ * User pointer or internal block.
*
* Side effects:
- * This subsystem is self-initializing, since memory can be allocated
- * before Tcl is formally initialized. After this call, this subsystem
- * has been reset to its initial state and is usable again.
+ * Invalid blocks will abort the server.
*
- *-------------------------------------------------------------------------
+ *----------------------------------------------------------------------
*/
-void
-TclFinalizeAllocSubsystem(void)
+static inline char *
+Block2Ptr(
+ Block *blockPtr,
+ int bucket,
+ unsigned int reqSize)
{
- unsigned int i;
- struct block *blockPtr, *nextPtr;
+ register void *ptr;
+
+ blockPtr->magicNum1 = blockPtr->magicNum2 = MAGIC;
+ blockPtr->sourceBucket = bucket;
+ blockPtr->blockReqSize = reqSize;
+ ptr = (void *) (((char *)blockPtr) + OFFSET);
+#if RCHECK
+ ((unsigned char *)(ptr))[reqSize] = MAGIC;
+#endif
+ return (char *) ptr;
+}
- Tcl_MutexLock(allocMutexPtr);
- for (blockPtr = blockList; blockPtr != NULL; blockPtr = nextPtr) {
- nextPtr = blockPtr->nextPtr;
- TclpSysFree(blockPtr);
- }
- blockList = NULL;
+static inline Block *
+Ptr2Block(
+ char *ptr)
+{
+ register Block *blockPtr;
- for (blockPtr = bigBlocks.nextPtr; blockPtr != &bigBlocks; ) {
- nextPtr = blockPtr->nextPtr;
- TclpSysFree(blockPtr);
- blockPtr = nextPtr;
+ blockPtr = (Block *) (((char *) ptr) - OFFSET);
+ if (blockPtr->magicNum1 != MAGIC || blockPtr->magicNum2 != MAGIC) {
+ Tcl_Panic("alloc: invalid block: %p: %x %x",
+ blockPtr, blockPtr->magicNum1, blockPtr->magicNum2);
}
- bigBlocks.nextPtr = &bigBlocks;
- bigBlocks.prevPtr = &bigBlocks;
-
- for (i=0 ; i<NBUCKETS ; i++) {
- nextf[i] = NULL;
-#ifdef MSTATS
- numMallocs[i] = 0;
-#endif
+#if RCHECK
+ if (((unsigned char *) ptr)[blockPtr->blockReqSize] != MAGIC) {
+ Tcl_Panic("alloc: invalid block: %p: %x %x %x",
+ blockPtr, blockPtr->magicNum1, blockPtr->magicNum2,
+ ((unsigned char *) ptr)[blockPtr->blockReqSize]);
}
-#ifdef MSTATS
- numMallocs[i] = 0;
#endif
- Tcl_MutexUnlock(allocMutexPtr);
+ return blockPtr;
}
/*
*----------------------------------------------------------------------
*
- * TclpAlloc --
+ * GetCache ---
*
- * Allocate more memory.
+ * Gets per-thread memory cache, allocating it if necessary.
*
* Results:
- * None.
+ * Pointer to cache.
*
* Side effects:
* None.
@@ -255,183 +430,237 @@ TclFinalizeAllocSubsystem(void)
*----------------------------------------------------------------------
*/
-char *
-TclpAlloc(
- unsigned int numBytes) /* Number of bytes to allocate. */
+static Cache *
+GetCache(void)
{
- register union overhead *overPtr;
- register long bucket;
- register unsigned amount;
- struct block *bigBlockPtr = NULL;
-
- if (!allocInit) {
- /*
- * We have to make the "self initializing" because Tcl_Alloc may be
- * used before any other part of Tcl. E.g., see main() for tclsh!
+ Cache *cachePtr;
+ unsigned int i;
+#if TCL_ALLOCATOR == aZIPPY
+#define allocSize (sizeof(Cache) + (NBUCKETS -1)*sizeof(Bucket))
+#elif TCL_ALLOCATOR == aNATIVE
+#define allocSize sizeof(Cache)
+#else
+ unsigned int allocSize;
+#endif
+
+ /*
+ * Set the params for the correct allocator
+ */
+
+#if TCL_ALLOCATOR != aZIPPY
+ if (allocator == aNONE) {
+ /* This insures that it is set just once, as any changes after
+ * initialization guarantee a hard crash
*/
+
+ ChooseAllocator();
+ }
- TclInitAlloc();
+#if TCL_ALLOCATOR == aMULTI
+ if (allocator == aZIPPY) {
+ allocSize = (sizeof(Cache) + (NBUCKETS -1)*sizeof(Bucket));
+ nBuckets = NBUCKETS;
+ } else {
+ allocSize = sizeof(Cache);
+ nBuckets = 1;
}
- Tcl_MutexLock(allocMutexPtr);
+#endif
+#endif
/*
- * First the simple case: we simple allocate big blocks directly.
+ * Check for first-time initialization.
*/
- if (numBytes >= MAXMALLOC - OVERHEAD) {
- if (numBytes <= UINT_MAX - OVERHEAD -sizeof(struct block)) {
- bigBlockPtr = (struct block *) TclpSysAlloc((unsigned)
- (sizeof(struct block) + OVERHEAD + numBytes), 0);
- }
- if (bigBlockPtr == NULL) {
- Tcl_MutexUnlock(allocMutexPtr);
- return NULL;
- }
- bigBlockPtr->nextPtr = bigBlocks.nextPtr;
- bigBlocks.nextPtr = bigBlockPtr;
- bigBlockPtr->prevPtr = &bigBlocks;
- bigBlockPtr->nextPtr->prevPtr = bigBlockPtr;
-
- overPtr = (union overhead *) (bigBlockPtr + 1);
- overPtr->overMagic0 = overPtr->overMagic1 = MAGIC;
- overPtr->bucketIndex = 0xff;
-#ifdef MSTATS
- numMallocs[NBUCKETS]++;
+#if defined(TCL_THREADS)
+ if (listLockPtr == NULL) {
+ Tcl_Mutex *initLockPtr;
+ initLockPtr = Tcl_GetAllocMutex();
+ Tcl_MutexLock(initLockPtr);
+ if (listLockPtr == NULL) {
+ listLockPtr = TclpNewAllocMutex();
+ objLockPtr = TclpNewAllocMutex();
#endif
-
-#ifdef RCHECK
- /*
- * Record allocated size of block and bound space with magic numbers.
- */
-
- overPtr->realBlockSize = (numBytes + RSLOP - 1) & ~(RSLOP - 1);
- overPtr->rangeCheckMagic = RMAGIC;
- BLOCK_END(overPtr) = RMAGIC;
+ for (i = 0; i < nBuckets; ++i) {
+ bucketInfo[i].blockSize = MINALLOC << i;
+#if defined(TCL_THREADS)
+ /* TODO: clearer logic? Change move to keep? */
+ bucketInfo[i].maxBlocks = 1 << (NBUCKETS - 1 - i);
+ bucketInfo[i].numMove = i < NBUCKETS - 1 ?
+ 1 << (NBUCKETS - 2 - i) : 1;
+ bucketInfo[i].lockPtr = TclpNewAllocMutex();
#endif
-
- Tcl_MutexUnlock(allocMutexPtr);
- return (void *)(overPtr+1);
+ }
+#if defined(TCL_THREADS)
+ sharedPtr = calloc(1, allocSize);
+ firstCachePtr = sharedPtr;
+ }
+ Tcl_MutexUnlock(initLockPtr);
}
+#endif
+ if (allocator == aPURIFY) {
+ bucketInfo[0].maxBlocks = 0;
+ }
+
/*
- * Convert amount of memory requested into closest block size stored in
- * hash buckets which satisfies request. Account for space used per block
- * for accounting.
+ * Get this thread's cache, allocating if necessary.
*/
- amount = MINBLOCK; /* size of first bucket */
- bucket = MINBLOCK >> 4;
-
- while (numBytes + OVERHEAD > amount) {
- amount <<= 1;
- if (amount == 0) {
- Tcl_MutexUnlock(allocMutexPtr);
- return NULL;
+ cachePtr = TclpGetAllocCache();
+ if (cachePtr == NULL) {
+ cachePtr = calloc(1, allocSize);
+ if (cachePtr == NULL) {
+ Tcl_Panic("alloc: could not allocate new cache");
}
- bucket++;
+#if defined(TCL_THREADS)
+ Tcl_MutexLock(listLockPtr);
+ cachePtr->nextPtr = firstCachePtr;
+ firstCachePtr = cachePtr;
+ Tcl_MutexUnlock(listLockPtr);
+#ifdef ZIPPY_STATS
+ cachePtr->owner = Tcl_GetCurrentThread();
+#endif
+ TclpSetAllocCache(cachePtr);
+#endif
}
- ASSERT(bucket < NBUCKETS);
+ return cachePtr;
+}
+
+#if defined(TCL_THREADS)
+/*
+ *----------------------------------------------------------------------
+ *
+ * TclFreeAllocCache --
+ *
+ * Flush and delete a cache, removing from list of caches.
+ *
+ * Results:
+ * None.
+ *
+ * Side effects:
+ * None.
+ *
+ *----------------------------------------------------------------------
+ */
+
+void
+TclFreeAllocCache(
+ void *arg)
+{
+ Cache *cachePtr = arg;
+ Cache **nextPtrPtr;
+ register unsigned int bucket;
/*
- * If nothing in hash bucket right now, request more memory from the
- * system.
+ * Flush blocks.
*/
- if ((overPtr = nextf[bucket]) == NULL) {
- MoreCore(bucket);
- if ((overPtr = nextf[bucket]) == NULL) {
- Tcl_MutexUnlock(allocMutexPtr);
- return NULL;
+ for (bucket = 0; bucket < nBuckets; ++bucket) {
+ if (cachePtr->buckets[bucket].numFree > 0) {
+ PutBlocks(cachePtr, bucket, cachePtr->buckets[bucket].numFree);
}
}
/*
- * Remove from linked list
+ * Remove from pool list.
*/
- nextf[bucket] = overPtr->next;
- overPtr->overMagic0 = overPtr->overMagic1 = MAGIC;
- overPtr->bucketIndex = (unsigned char) bucket;
-
-#ifdef MSTATS
- numMallocs[bucket]++;
-#endif
-
-#ifdef RCHECK
- /*
- * Record allocated size of block and bound space with magic numbers.
- */
-
- overPtr->realBlockSize = (numBytes + RSLOP - 1) & ~(RSLOP - 1);
- overPtr->rangeCheckMagic = RMAGIC;
- BLOCK_END(overPtr) = RMAGIC;
-#endif
-
- Tcl_MutexUnlock(allocMutexPtr);
- return ((char *)(overPtr + 1));
+ Tcl_MutexLock(listLockPtr);
+ nextPtrPtr = &firstCachePtr;
+ while (*nextPtrPtr != cachePtr) {
+ nextPtrPtr = &(*nextPtrPtr)->nextPtr;
+ }
+ *nextPtrPtr = cachePtr->nextPtr;
+ cachePtr->nextPtr = NULL;
+ Tcl_MutexUnlock(listLockPtr);
+ free(cachePtr);
}
+#endif
+#if TCL_ALLOCATOR != aNATIVE
/*
*----------------------------------------------------------------------
*
- * MoreCore --
- *
- * Allocate more memory to the indicated bucket.
+ * TclpAlloc --
*
- * Assumes Mutex is already held.
+ * Allocate memory.
*
* Results:
- * None.
+ * Pointer to memory just beyond Block pointer.
*
* Side effects:
- * Attempts to get more memory from the system.
+ * May allocate more blocks for a bucket.
*
*----------------------------------------------------------------------
*/
-static void
-MoreCore(
- int bucket) /* What bucket to allocat to. */
+char *
+TclpAlloc(
+ unsigned int reqSize)
{
- register union overhead *overPtr;
- register long size; /* size of desired block */
- long amount; /* amount to allocate */
- int numBlocks; /* how many blocks we get */
- struct block *blockPtr;
-
- /*
- * sbrk_size <= 0 only for big, FLUFFY, requests (about 2^30 bytes on a
- * VAX, I think) or for a negative arg.
- */
+ Cache *cachePtr;
+ Block *blockPtr;
+ register int bucket;
+ size_t size;
- size = 1 << (bucket + 3);
- ASSERT(size > 0);
+ if (allocator < aNONE) {
+ return (void *) malloc(reqSize);
+ }
+
+ GETCACHE(cachePtr);
- amount = MAXMALLOC;
- numBlocks = amount / size;
- ASSERT(numBlocks*size == amount);
+#ifndef __LP64__
+ if (sizeof(int) >= sizeof(size_t)) {
+ /* An unsigned int overflow can also be a size_t overflow */
+ const size_t zero = 0;
+ const size_t max = ~zero;
- blockPtr = (struct block *) TclpSysAlloc((unsigned)
- (sizeof(struct block) + amount), 1);
- /* no more room! */
- if (blockPtr == NULL) {
- return;
+ if (((size_t) reqSize) > max - OFFSET - RCHECK) {
+ /* Requested allocation exceeds memory */
+ return NULL;
+ }
}
- blockPtr->nextPtr = blockList;
- blockList = blockPtr;
-
- overPtr = (union overhead *) (blockPtr + 1);
+#endif
/*
- * Add new memory allocated to that on free list for this hash bucket.
+ * Increment the requested size to include room for the Block structure.
+ * Call malloc() directly if the required amount is greater than the
+ * largest block, otherwise pop the smallest block large enough,
+ * allocating more blocks if necessary.
*/
- nextf[bucket] = overPtr;
- while (--numBlocks > 0) {
- overPtr->next = (union overhead *)((caddr_t)overPtr + size);
- overPtr = (union overhead *)((caddr_t)overPtr + size);
+ blockPtr = NULL;
+ size = reqSize + OFFSET;
+#if RCHECK
+ size++;
+#endif
+ if (size > MAXALLOC) {
+ bucket = nBuckets;
+ blockPtr = malloc(size);
+#ifdef ZIPPY_STATS
+ if (blockPtr != NULL) {
+ cachePtr->totalAssigned += reqSize;
+ }
+#endif
+ } else {
+ bucket = 0;
+ while (bucketInfo[bucket].blockSize < size) {
+ bucket++;
+ }
+ if (cachePtr->buckets[bucket].numFree || GetBlocks(cachePtr, bucket)) {
+ blockPtr = cachePtr->buckets[bucket].firstPtr;
+ cachePtr->buckets[bucket].firstPtr = blockPtr->nextBlock;
+ cachePtr->buckets[bucket].numFree--;
+#ifdef ZIPPY_STATS
+ cachePtr->buckets[bucket].numRemoves++;
+ cachePtr->buckets[bucket].totalAssigned += reqSize;
+#endif
+ }
+ }
+ if (blockPtr == NULL) {
+ return NULL;
}
- overPtr->next = NULL;
+ return Block2Ptr(blockPtr, bucket, reqSize);
}
/*
@@ -439,64 +668,66 @@ MoreCore(
*
* TclpFree --
*
- * Free memory.
+ * Return blocks to the thread block cache.
*
* Results:
* None.
*
* Side effects:
- * None.
+ * May move blocks to shared cache.
*
*----------------------------------------------------------------------
*/
void
TclpFree(
- char *oldPtr) /* Pointer to memory to free. */
+ char *ptr)
{
- register long size;
- register union overhead *overPtr;
- struct block *bigBlockPtr;
+ Cache *cachePtr;
+ Block *blockPtr;
+ int bucket;
- if (oldPtr == NULL) {
- return;
+ if (allocator < aNONE) {
+ return free((char *) ptr);
}
- Tcl_MutexLock(allocMutexPtr);
- overPtr = (union overhead *)((caddr_t)oldPtr - sizeof(union overhead));
+ GETCACHE(cachePtr);
- ASSERT(overPtr->overMagic0 == MAGIC); /* make sure it was in use */
- ASSERT(overPtr->overMagic1 == MAGIC);
- if (overPtr->overMagic0 != MAGIC || overPtr->overMagic1 != MAGIC) {
- Tcl_MutexUnlock(allocMutexPtr);
+ if (ptr == NULL) {
return;
}
- RANGE_ASSERT(overPtr->rangeCheckMagic == RMAGIC);
- RANGE_ASSERT(BLOCK_END(overPtr) == RMAGIC);
- size = overPtr->bucketIndex;
- if (size == 0xff) {
-#ifdef MSTATS
- numMallocs[NBUCKETS]--;
-#endif
-
- bigBlockPtr = (struct block *) overPtr - 1;
- bigBlockPtr->prevPtr->nextPtr = bigBlockPtr->nextPtr;
- bigBlockPtr->nextPtr->prevPtr = bigBlockPtr->prevPtr;
- TclpSysFree(bigBlockPtr);
+ /*
+ * Get the block back from the user pointer and call system free directly
+ * for large blocks. Otherwise, push the block back on the bucket and move
+ * blocks to the shared cache if there are now too many free.
+ */
- Tcl_MutexUnlock(allocMutexPtr);
+ blockPtr = Ptr2Block(ptr);
+ bucket = blockPtr->sourceBucket;
+ if (bucket == nBuckets) {
+#ifdef ZIPPY_STATS
+ cachePtr->totalAssigned -= blockPtr->blockReqSize;
+#endif
+ free(blockPtr);
return;
}
- ASSERT(size < NBUCKETS);
- overPtr->next = nextf[size]; /* also clobbers overMagic */
- nextf[size] = overPtr;
-#ifdef MSTATS
- numMallocs[size]--;
+#ifdef ZIPPY_STATS
+ cachePtr->buckets[bucket].totalAssigned -= blockPtr->blockReqSize;
+#endif
+ blockPtr->nextBlock = cachePtr->buckets[bucket].firstPtr;
+ cachePtr->buckets[bucket].firstPtr = blockPtr;
+ cachePtr->buckets[bucket].numFree++;
+#ifdef ZIPPY_STATS
+ cachePtr->buckets[bucket].numInserts++;
+#endif
+#if defined(TCL_THREADS)
+ if (cachePtr != sharedPtr &&
+ cachePtr->buckets[bucket].numFree > bucketInfo[bucket].maxBlocks) {
+ PutBlocks(cachePtr, bucket, bucketInfo[bucket].numMove);
+ }
#endif
-
- Tcl_MutexUnlock(allocMutexPtr);
}
/*
@@ -504,138 +735,308 @@ TclpFree(
*
* TclpRealloc --
*
- * Reallocate memory.
+ * Re-allocate memory to a larger or smaller size.
*
* Results:
- * None.
+ * Pointer to memory just beyond Block pointer.
*
* Side effects:
- * None.
+ * Previous memory, if any, may be freed.
*
*----------------------------------------------------------------------
*/
char *
-TclpRealloc(
- char *oldPtr, /* Pointer to alloced block. */
- unsigned int numBytes) /* New size of memory. */
+TclpRealloc(
+ char *ptr,
+ unsigned int reqSize)
{
- int i;
- union overhead *overPtr;
- struct block *bigBlockPtr;
- int expensive;
- unsigned long maxSize;
-
- if (oldPtr == NULL) {
- return TclpAlloc(numBytes);
+ Cache *cachePtr;
+ Block *blockPtr;
+ void *newPtr;
+ size_t size, min;
+ int bucket;
+
+ if (allocator < aNONE) {
+ return (void *) realloc((char *) ptr, reqSize);
}
- Tcl_MutexLock(allocMutexPtr);
-
- overPtr = (union overhead *)((caddr_t)oldPtr - sizeof(union overhead));
+ GETCACHE(cachePtr);
- ASSERT(overPtr->overMagic0 == MAGIC); /* make sure it was in use */
- ASSERT(overPtr->overMagic1 == MAGIC);
- if (overPtr->overMagic0 != MAGIC || overPtr->overMagic1 != MAGIC) {
- Tcl_MutexUnlock(allocMutexPtr);
- return NULL;
+ if (ptr == NULL) {
+ return TclpAlloc(reqSize);
}
- RANGE_ASSERT(overPtr->rangeCheckMagic == RMAGIC);
- RANGE_ASSERT(BLOCK_END(overPtr) == RMAGIC);
- i = overPtr->bucketIndex;
+#ifndef __LP64__
+ if (sizeof(int) >= sizeof(size_t)) {
+ /* An unsigned int overflow can also be a size_t overflow */
+ const size_t zero = 0;
+ const size_t max = ~zero;
+
+ if (((size_t) reqSize) > max - OFFSET - RCHECK) {
+ /* Requested allocation exceeds memory */
+ return NULL;
+ }
+ }
+#endif
/*
- * If the block isn't in a bin, just realloc it.
+ * If the block is not a system block and fits in place, simply return the
+ * existing pointer. Otherwise, if the block is a system block and the new
+ * size would also require a system block, call realloc() directly.
*/
- if (i == 0xff) {
- struct block *prevPtr, *nextPtr;
- bigBlockPtr = (struct block *) overPtr - 1;
- prevPtr = bigBlockPtr->prevPtr;
- nextPtr = bigBlockPtr->nextPtr;
- bigBlockPtr = (struct block *) TclpSysRealloc(bigBlockPtr,
- sizeof(struct block) + OVERHEAD + numBytes);
- if (bigBlockPtr == NULL) {
- Tcl_MutexUnlock(allocMutexPtr);
+ blockPtr = Ptr2Block(ptr);
+ size = reqSize + OFFSET;
+#if RCHECK
+ size++;
+#endif
+ bucket = blockPtr->sourceBucket;
+ if (bucket != nBuckets) {
+ if (bucket > 0) {
+ min = bucketInfo[bucket-1].blockSize;
+ } else {
+ min = 0;
+ }
+ if (size > min && size <= bucketInfo[bucket].blockSize) {
+#ifdef ZIPPY_STATS
+ cachePtr->buckets[bucket].totalAssigned -= blockPtr->blockReqSize;
+ cachePtr->buckets[bucket].totalAssigned += reqSize;
+#endif
+ return Block2Ptr(blockPtr, bucket, reqSize);
+ }
+ } else if (size > MAXALLOC) {
+#ifdef ZIPPY_STATS
+ cachePtr->totalAssigned -= blockPtr->blockReqSize;
+ cachePtr->totalAssigned += reqSize;
+#endif
+ blockPtr = realloc(blockPtr, size);
+ if (blockPtr == NULL) {
return NULL;
}
+ return Block2Ptr(blockPtr, nBuckets, reqSize);
+ }
- if (prevPtr->nextPtr != bigBlockPtr) {
- /*
- * If the block has moved, splice the new block into the list
- * where the old block used to be.
- */
+ /*
+ * Finally, perform an expensive malloc/copy/free.
+ */
- prevPtr->nextPtr = bigBlockPtr;
- nextPtr->prevPtr = bigBlockPtr;
+ newPtr = TclpAlloc(reqSize);
+ if (newPtr != NULL) {
+ if (reqSize > blockPtr->blockReqSize) {
+ reqSize = blockPtr->blockReqSize;
}
+ memcpy(newPtr, ptr, reqSize);
+ TclpFree(ptr);
+ }
+ return newPtr;
+}
+#ifdef ZIPPY_STATS
+
+/*
+ *----------------------------------------------------------------------
+ *
+ * Tcl_GetMemoryInfo --
+ *
+ * Return a list-of-lists of memory stats.
+ *
+ * Results:
+ * None.
+ *
+ * Side effects:
+ * List appended to given dstring.
+ *
+ *----------------------------------------------------------------------
+ */
- overPtr = (union overhead *) (bigBlockPtr + 1);
-
-#ifdef MSTATS
- numMallocs[NBUCKETS]++;
+void
+Tcl_GetMemoryInfo(
+ Tcl_DString *dsPtr)
+{
+ Cache *cachePtr;
+ char buf[200];
+ unsigned int n;
+
+ Tcl_MutexLock(listLockPtr);
+ cachePtr = firstCachePtr;
+ while (cachePtr != NULL) {
+ Tcl_DStringStartSublist(dsPtr);
+#if defined(TCL_THREADS)
+ if (cachePtr == sharedPtr) {
+ Tcl_DStringAppendElement(dsPtr, "shared");
+ } else {
+ sprintf(buf, "thread%p", cachePtr->owner);
+ Tcl_DStringAppendElement(dsPtr, buf);
+ }
+#else
+ Tcl_DStringAppendElement(dsPtr, "unthreaded");
#endif
-
-#ifdef RCHECK
- /*
- * Record allocated size of block and update magic number bounds.
- */
-
- overPtr->realBlockSize = (numBytes + RSLOP - 1) & ~(RSLOP - 1);
- BLOCK_END(overPtr) = RMAGIC;
+ for (n = 0; n < nBuckets; ++n) {
+ sprintf(buf, "%lu %ld %ld %ld %ld %ld %ld",
+ (unsigned long) bucketInfo[n].blockSize,
+ cachePtr->buckets[n].numFree,
+ cachePtr->buckets[n].numRemoves,
+ cachePtr->buckets[n].numInserts,
+ cachePtr->buckets[n].totalAssigned,
+ cachePtr->buckets[n].numLocks,
+ cachePtr->buckets[n].numWaits);
+ Tcl_DStringAppendElement(dsPtr, buf);
+ }
+ Tcl_DStringEndSublist(dsPtr);
+#if defined(TCL_THREADS)
+ cachePtr = cachePtr->nextPtr;
+#else
+ cachePtr = NULL;
#endif
-
- Tcl_MutexUnlock(allocMutexPtr);
- return (char *)(overPtr+1);
}
- maxSize = 1 << (i+3);
- expensive = 0;
- if (numBytes+OVERHEAD > maxSize) {
- expensive = 1;
- } else if (i>0 && numBytes+OVERHEAD < maxSize/2) {
- expensive = 1;
+ Tcl_MutexUnlock(listLockPtr);
+}
+#endif /* ZIPPY_STATS */
+#endif /* code above only for NATIVE allocator */
+
+/*
+ *----------------------------------------------------------------------
+ *
+ * TclSmallAlloc --
+ *
+ * Allocate a Tcl_Obj sized block from the per-thread cache.
+ *
+ * Results:
+ * Pointer to uninitialized memory.
+ *
+ * Side effects:
+ * May move blocks from shared cached or allocate new blocks if
+ * list is empty.
+ *
+ *----------------------------------------------------------------------
+ */
+
+void *
+TclSmallAlloc(void)
+{
+ Cache *cachePtr;
+ Block *blockPtr;
+ Bucket *bucketPtr;
+
+ GETCACHE(cachePtr);
+ bucketPtr = &cachePtr->buckets[0];
+
+ blockPtr = bucketPtr->firstPtr;
+ if (bucketPtr->numFree || GetBlocks(cachePtr, 0)) {
+ blockPtr = bucketPtr->firstPtr;
+ bucketPtr->firstPtr = blockPtr->nextBlock;
+ bucketPtr->numFree--;
+#ifdef ZIPPY_STATS
+ bucketPtr->numRemoves++;
+ bucketPtr->totalAssigned += sizeof(Tcl_Obj);
+#endif
}
+ return blockPtr;
+}
+
+/*
+ *----------------------------------------------------------------------
+ *
+ * TclSmallFree --
+ *
+ * Return a free Tcl_Obj-sized block to the per-thread cache.
+ *
+ * Results:
+ * None.
+ *
+ * Side effects:
+ * May move free blocks to shared list upon hitting high water mark.
+ *
+ *----------------------------------------------------------------------
+ */
- if (expensive) {
- void *newPtr;
+void
+TclSmallFree(
+ void *ptr)
+{
+ Cache *cachePtr;
+ Block *blockPtr = ptr;
+ Bucket *bucketPtr;
- Tcl_MutexUnlock(allocMutexPtr);
+ GETCACHE(cachePtr);
+ bucketPtr = &cachePtr->buckets[0];
- newPtr = TclpAlloc(numBytes);
- if (newPtr == NULL) {
- return NULL;
- }
- maxSize -= OVERHEAD;
- if (maxSize < numBytes) {
- numBytes = maxSize;
+#ifdef ZIPPY_STATS
+ bucketPtr->totalAssigned -= sizeof(Tcl_Obj);
+#endif
+ blockPtr->nextBlock = bucketPtr->firstPtr;
+ bucketPtr->firstPtr = blockPtr;
+ bucketPtr->numFree++;
+#ifdef ZIPPY_STATS
+ bucketPtr->numInserts++;
+#endif
+
+ if (bucketPtr->numFree > bucketInfo[0].maxBlocks) {
+ if (allocator == aPURIFY) {
+ /* undo */
+ bucketPtr->numFree = 0;
+ bucketPtr->firstPtr = NULL;
+ free((char *) blockPtr);
+ return;
}
- memcpy(newPtr, oldPtr, (size_t) numBytes);
- TclpFree(oldPtr);
- return newPtr;
+#if defined(TCL_THREADS)
+ PutBlocks(cachePtr, 0, bucketInfo[0].numMove);
+#endif
}
+}
+
+#if defined(TCL_THREADS)
+/*
+ *----------------------------------------------------------------------
+ *
+ * LockBucket, UnlockBucket --
+ *
+ * Set/unset the lock to access a bucket in the shared cache.
+ *
+ * Results:
+ * None.
+ *
+ * Side effects:
+ * Lock activity and contention are monitored globally and on a per-cache
+ * basis.
+ *
+ *----------------------------------------------------------------------
+ */
- /*
- * Ok, we don't have to copy, it fits as-is
- */
-
-#ifdef RCHECK
- overPtr->realBlockSize = (numBytes + RSLOP - 1) & ~(RSLOP - 1);
- BLOCK_END(overPtr) = RMAGIC;
+static void
+LockBucket(
+ Cache *cachePtr,
+ int bucket)
+{
+#if 0
+ if (Tcl_MutexTryLock(bucketInfo[bucket].lockPtr) != TCL_OK) {
+ Tcl_MutexLock(bucketInfo[bucket].lockPtr);
+ cachePtr->buckets[bucket].numWaits++;
+ sharedPtr->buckets[bucket].numWaits++;
+ }
+#else
+ Tcl_MutexLock(bucketInfo[bucket].lockPtr);
#endif
+#ifdef ZIPPY_STATS
+ cachePtr->buckets[bucket].numLocks++;
+ sharedPtr->buckets[bucket].numLocks++;
+#endif
+}
- Tcl_MutexUnlock(allocMutexPtr);
- return(oldPtr);
+static void
+UnlockBucket(
+ Cache *cachePtr,
+ int bucket)
+{
+ Tcl_MutexUnlock(bucketInfo[bucket].lockPtr);
}
/*
*----------------------------------------------------------------------
*
- * mstats --
+ * PutBlocks --
*
- * Prints two lines of numbers, one showing the length of the free list
- * for each size category, the second showing the number of mallocs -
- * frees for each size category.
+ * Return unused blocks to the shared cache.
*
* Results:
* None.
@@ -646,95 +1047,203 @@ TclpRealloc(
*----------------------------------------------------------------------
*/
-#ifdef MSTATS
-void
-mstats(
- char *s) /* Where to write info. */
+static void
+PutBlocks(
+ Cache *cachePtr,
+ int bucket,
+ int numMove)
{
- register int i, j;
- register union overhead *overPtr;
- int totalFree = 0, totalUsed = 0;
+ register Block *lastPtr, *firstPtr;
+ register int n = numMove;
- Tcl_MutexLock(allocMutexPtr);
-
- fprintf(stderr, "Memory allocation statistics %s\nTclpFree:\t", s);
- for (i = 0; i < NBUCKETS; i++) {
- for (j=0, overPtr=nextf[i]; overPtr; overPtr=overPtr->next, j++) {
- fprintf(stderr, " %d", j);
- }
- totalFree += j * (1 << (i + 3));
- }
+ /*
+ * Before acquiring the lock, walk the block list to find the last block
+ * to be moved.
+ */
- fprintf(stderr, "\nused:\t");
- for (i = 0; i < NBUCKETS; i++) {
- fprintf(stderr, " %d", numMallocs[i]);
- totalUsed += numMallocs[i] * (1 << (i + 3));
+ firstPtr = lastPtr = cachePtr->buckets[bucket].firstPtr;
+ while (--n > 0) {
+ lastPtr = lastPtr->nextBlock;
}
+ cachePtr->buckets[bucket].firstPtr = lastPtr->nextBlock;
+ cachePtr->buckets[bucket].numFree -= numMove;
- fprintf(stderr, "\n\tTotal small in use: %d, total free: %d\n",
- totalUsed, totalFree);
- fprintf(stderr, "\n\tNumber of big (>%d) blocks in use: %d\n",
- MAXMALLOC, numMallocs[NBUCKETS]);
+ /*
+ * Aquire the lock and place the list of blocks at the front of the shared
+ * cache bucket.
+ */
- Tcl_MutexUnlock(allocMutexPtr);
+ LockBucket(cachePtr, bucket);
+ lastPtr->nextBlock = sharedPtr->buckets[bucket].firstPtr;
+ sharedPtr->buckets[bucket].firstPtr = firstPtr;
+ sharedPtr->buckets[bucket].numFree += numMove;
+ UnlockBucket(cachePtr, bucket);
}
#endif
-
-#else /* !USE_TCLALLOC */
/*
*----------------------------------------------------------------------
*
- * TclpAlloc --
+ * GetBlocks --
*
- * Allocate more memory.
+ * Get more blocks for a bucket.
*
* Results:
- * None.
+ * 1 if blocks where allocated, 0 otherwise.
*
* Side effects:
- * None.
+ * Cache may be filled with available blocks.
*
*----------------------------------------------------------------------
*/
-char *
-TclpAlloc(
- unsigned int numBytes) /* Number of bytes to allocate. */
+static int
+GetBlocks(
+ Cache *cachePtr,
+ int bucket)
{
- return (char *) malloc(numBytes);
+ register Block *blockPtr = NULL;
+ register int n;
+
+ if (allocator == aPURIFY) {
+ if (bucket) {
+ Tcl_Panic("purify mode asking for blocks?");
+ }
+ cachePtr->buckets[0].firstPtr = (Block *) calloc(1, MINALLOC);
+ cachePtr->buckets[0].numFree = 1;
+ return 1;
+ }
+
+#if defined(TCL_THREADS)
+ /*
+ * First, atttempt to move blocks from the shared cache. Note the
+ * potentially dirty read of numFree before acquiring the lock which is a
+ * slight performance enhancement. The value is verified after the lock is
+ * actually acquired.
+ */
+
+ if (cachePtr != sharedPtr && sharedPtr->buckets[bucket].numFree > 0) {
+ LockBucket(cachePtr, bucket);
+ if (sharedPtr->buckets[bucket].numFree > 0) {
+
+ /*
+ * Either move the entire list or walk the list to find the last
+ * block to move.
+ */
+
+ n = bucketInfo[bucket].numMove;
+ if (n >= sharedPtr->buckets[bucket].numFree) {
+ cachePtr->buckets[bucket].firstPtr =
+ sharedPtr->buckets[bucket].firstPtr;
+ cachePtr->buckets[bucket].numFree =
+ sharedPtr->buckets[bucket].numFree;
+ sharedPtr->buckets[bucket].firstPtr = NULL;
+ sharedPtr->buckets[bucket].numFree = 0;
+ } else {
+ blockPtr = sharedPtr->buckets[bucket].firstPtr;
+ cachePtr->buckets[bucket].firstPtr = blockPtr;
+ sharedPtr->buckets[bucket].numFree -= n;
+ cachePtr->buckets[bucket].numFree = n;
+ while (--n > 0) {
+ blockPtr = blockPtr->nextBlock;
+ }
+ sharedPtr->buckets[bucket].firstPtr = blockPtr->nextBlock;
+ blockPtr->nextBlock = NULL;
+ }
+ }
+ UnlockBucket(cachePtr, bucket);
+ }
+#endif
+
+ if (cachePtr->buckets[bucket].numFree == 0) {
+ register size_t size;
+
+#if TCL_ALLOCATOR != aNATIVE
+ /*
+ * If no blocks could be moved from shared, first look for a larger
+ * block in this cache OR the shared cache to split up.
+ */
+
+ n = nBuckets;
+ size = 0; /* lint */
+ while (--n > bucket) {
+ size = bucketInfo[n].blockSize;
+ if (cachePtr->buckets[n].numFree > 0) {
+ blockPtr = cachePtr->buckets[n].firstPtr;
+ cachePtr->buckets[n].firstPtr = blockPtr->nextBlock;
+ cachePtr->buckets[n].numFree--;
+ break;
+ } else if (sharedPtr->buckets[n].numFree > 0){
+ LockBucket(cachePtr, n);
+ if (sharedPtr->buckets[n].numFree > 0) {
+ blockPtr = sharedPtr->buckets[n].firstPtr;
+ sharedPtr->buckets[n].firstPtr = blockPtr->nextBlock;
+ sharedPtr->buckets[n].numFree--;
+ UnlockBucket(cachePtr, n);
+ break;
+ }
+ UnlockBucket(cachePtr, n);
+ }
+ }
+#endif
+
+ /*
+ * Otherwise, allocate a big new block directly.
+ */
+
+ if (blockPtr == NULL) {
+ size = MAXALLOC;
+ blockPtr = malloc(size);
+ if (blockPtr == NULL) {
+ return 0;
+ }
+ }
+
+ /*
+ * Split the larger block into smaller blocks for this bucket.
+ */
+
+ n = size / bucketInfo[bucket].blockSize;
+ cachePtr->buckets[bucket].numFree = n;
+ cachePtr->buckets[bucket].firstPtr = blockPtr;
+ while (--n > 0) {
+ blockPtr->nextBlock = (Block *)
+ ((char *) blockPtr + bucketInfo[bucket].blockSize);
+ blockPtr = blockPtr->nextBlock;
+ }
+ blockPtr->nextBlock = NULL;
+ }
+ return 1;
}
/*
- *----------------------------------------------------------------------
+ *-------------------------------------------------------------------------
*
- * TclpFree --
+ * TclInitAlloc --
*
- * Free memory.
+ * Initialize the memory system.
*
* Results:
* None.
*
* Side effects:
- * None.
+ * Initialize the mutex used to serialize allocations.
*
- *----------------------------------------------------------------------
+ *-------------------------------------------------------------------------
*/
void
-TclpFree(
- char *oldPtr) /* Pointer to memory to free. */
+TclInitAlloc(void)
{
- free(oldPtr);
- return;
}
/*
*----------------------------------------------------------------------
*
- * TclpRealloc --
+ * TclFinalizeAlloc --
*
- * Reallocate memory.
+ * This procedure is used to destroy all private resources used in this
+ * file.
*
* Results:
* None.
@@ -745,16 +1254,55 @@ TclpFree(
*----------------------------------------------------------------------
*/
-char *
-TclpRealloc(
- char *oldPtr, /* Pointer to alloced block. */
- unsigned int numBytes) /* New size of memory. */
+void
+TclFinalizeAlloc(void)
{
- return (char *) realloc(oldPtr, numBytes);
+#if defined(TCL_THREADS)
+ unsigned int i;
+
+ for (i = 0; i < nBuckets; ++i) {
+ TclpFreeAllocMutex(bucketInfo[i].lockPtr);
+ bucketInfo[i].lockPtr = NULL;
+ }
+
+ TclpFreeAllocMutex(objLockPtr);
+ objLockPtr = NULL;
+
+ TclpFreeAllocMutex(listLockPtr);
+ listLockPtr = NULL;
+
+ TclpFreeAllocCache(NULL);
+#endif
}
+
+#if TCL_ALLOCATOR != aZIPPY
+static void
+ChooseAllocator()
+{
+ char *choice = getenv("TCL_ALLOCATOR");
+
+ /*
+ * This is only called with ALLOCATOR_BASE aZIPPY (when compiled with
+ * aMULTI) or aNATIVE (when compiled with aNATIVE).
+ */
+
+ allocator = ALLOCATOR_BASE;
+
+ if (choice) {
+ /*
+ * Only override the base when requesting native or purify
+ */
+
+ if (!strcmp(choice, "aNATIVE")) {
+ allocator = aNATIVE;
+ } else if (!strcmp(choice, "aPURIFY")) {
+ allocator = aPURIFY;
+ }
+ }
+}
+#endif
-#endif /* !USE_TCLALLOC */
-#endif /* !TCL_THREADS */
+#endif /* end of !PURIFY */
/*
* Local Variables:
diff --git a/generic/tclAssembly.c b/generic/tclAssembly.c
index 754941f..2562558 100644
--- a/generic/tclAssembly.c
+++ b/generic/tclAssembly.c
@@ -1095,11 +1095,9 @@ NewAssemblyEnv(
* generation*/
int flags) /* Compilation flags (TCL_EVAL_DIRECT) */
{
- Tcl_Interp* interp = (Tcl_Interp*) envPtr->iPtr;
- /* Tcl interpreter */
- AssemblyEnv* assemEnvPtr = TclStackAlloc(interp, sizeof(AssemblyEnv));
+ AssemblyEnv* assemEnvPtr = ckalloc(sizeof(AssemblyEnv));
/* Assembler environment under construction */
- Tcl_Parse* parsePtr = TclStackAlloc(interp, sizeof(Tcl_Parse));
+ Tcl_Parse* parsePtr = ckalloc(sizeof(Tcl_Parse));
/* Parse of one line of assembly code */
assemEnvPtr->envPtr = envPtr;
@@ -1144,11 +1142,6 @@ static void
FreeAssemblyEnv(
AssemblyEnv* assemEnvPtr) /* Environment to free */
{
- CompileEnv* envPtr = assemEnvPtr->envPtr;
- /* Compilation environment being used for code
- * generation */
- Tcl_Interp* interp = (Tcl_Interp*) envPtr->iPtr;
- /* Tcl interpreter */
BasicBlock* thisBB; /* Pointer to a basic block being deleted */
BasicBlock* nextBB; /* Pointer to a deleted basic block's
* successor */
@@ -1191,8 +1184,8 @@ FreeAssemblyEnv(
* Dispose what's left.
*/
- TclStackFree(interp, assemEnvPtr->parsePtr);
- TclStackFree(interp, assemEnvPtr);
+ ckfree(assemEnvPtr->parsePtr);
+ ckfree(assemEnvPtr);
}
/*
diff --git a/generic/tclBasic.c b/generic/tclBasic.c
index 5f2b301..5e676ba 100644
--- a/generic/tclBasic.c
+++ b/generic/tclBasic.c
@@ -728,11 +728,6 @@ Tcl_CreateInterp(void)
* cache was already initialised by the call to alloc the interp struct.
*/
-#if defined(TCL_THREADS) && defined(USE_THREAD_ALLOC)
- iPtr->allocCache = TclpGetAllocCache();
-#else
- iPtr->allocCache = NULL;
-#endif
iPtr->pendingObjDataPtr = NULL;
iPtr->asyncReadyPtr = TclGetAsyncReadyPtr();
iPtr->deferredCallbacks = NULL;
@@ -2319,8 +2314,7 @@ TclInvokeStringCommand(
{
Command *cmdPtr = clientData;
int i, result;
- const char **argv =
- TclStackAlloc(interp, (unsigned)(objc + 1) * sizeof(char *));
+ const char **argv = ckalloc((unsigned)(objc + 1) * sizeof(char *));
for (i = 0; i < objc; i++) {
argv[i] = Tcl_GetString(objv[i]);
@@ -2333,7 +2327,7 @@ TclInvokeStringCommand(
result = cmdPtr->proc(cmdPtr->clientData, interp, objc, argv);
- TclStackFree(interp, (void *) argv);
+ ckfree((void *) argv);
return result;
}
@@ -2368,8 +2362,7 @@ TclInvokeObjectCommand(
Command *cmdPtr = clientData;
Tcl_Obj *objPtr;
int i, length, result;
- Tcl_Obj **objv =
- TclStackAlloc(interp, (unsigned)(argc * sizeof(Tcl_Obj *)));
+ Tcl_Obj **objv = ckalloc((unsigned)(argc * sizeof(Tcl_Obj *)));
for (i = 0; i < argc; i++) {
length = strlen(argv[i]);
@@ -2405,7 +2398,7 @@ TclInvokeObjectCommand(
objPtr = objv[i];
Tcl_DecrRefCount(objPtr);
}
- TclStackFree(interp, objv);
+ ckfree(objv);
return result;
}
@@ -4563,7 +4556,7 @@ TEOV_NotFound(
Tcl_ListObjGetElements(NULL, currNsPtr->unknownHandlerPtr,
&handlerObjc, &handlerObjv);
newObjc = objc + handlerObjc;
- newObjv = TclStackAlloc(interp, (int) sizeof(Tcl_Obj *) * newObjc);
+ newObjv = ckalloc((int) sizeof(Tcl_Obj *) * newObjc);
/*
* Copy command prefix from unknown handler and add on the real command's
@@ -4602,7 +4595,7 @@ TEOV_NotFound(
for (i = 0; i < handlerObjc; ++i) {
Tcl_DecrRefCount(newObjv[i]);
}
- TclStackFree(interp, newObjv);
+ ckfree(newObjv);
return TCL_ERROR;
}
@@ -4640,7 +4633,7 @@ TEOV_NotFoundCallback(
for (i = 0; i < objc; ++i) {
Tcl_DecrRefCount(objv[i]);
}
- TclStackFree(interp, objv);
+ ckfree(objv);
return result;
}
@@ -4937,12 +4930,11 @@ TclEvalEx(
* state has been allocated while evaluating
* the script, so that it can be freed
* properly if an error occurs. */
- Tcl_Parse *parsePtr = TclStackAlloc(interp, sizeof(Tcl_Parse));
- CmdFrame *eeFramePtr = TclStackAlloc(interp, sizeof(CmdFrame));
- Tcl_Obj **stackObjArray =
- TclStackAlloc(interp, minObjs * sizeof(Tcl_Obj *));
- int *expandStack = TclStackAlloc(interp, minObjs * sizeof(int));
- int *linesStack = TclStackAlloc(interp, minObjs * sizeof(int));
+ Tcl_Parse *parsePtr = ckalloc(sizeof(Tcl_Parse));
+ CmdFrame *eeFramePtr = ckalloc(sizeof(CmdFrame));
+ Tcl_Obj **stackObjArray = ckalloc(minObjs * sizeof(Tcl_Obj *));
+ int *expandStack = ckalloc(minObjs * sizeof(int));
+ int *linesStack = ckalloc(minObjs * sizeof(int));
/* TIP #280 Structures for tracking of command
* locations. */
int *clNext = NULL; /* Pointer for the tracking of invisible
@@ -5338,11 +5330,11 @@ TclEvalEx(
if (eeFramePtr->type == TCL_LOCATION_SOURCE) {
Tcl_DecrRefCount(eeFramePtr->data.eval.path);
}
- TclStackFree(interp, linesStack);
- TclStackFree(interp, expandStack);
- TclStackFree(interp, stackObjArray);
- TclStackFree(interp, eeFramePtr);
- TclStackFree(interp, parsePtr);
+ ckfree(linesStack);
+ ckfree(expandStack);
+ ckfree(stackObjArray);
+ ckfree(eeFramePtr);
+ ckfree(parsePtr);
return code;
}
@@ -5980,7 +5972,7 @@ TclNREvalObjEx(
* should be pushed, as needed by alias and ensemble redirections.
*/
- eoFramePtr = TclStackAlloc(interp, sizeof(CmdFrame));
+ eoFramePtr = ckalloc(sizeof(CmdFrame));
eoFramePtr->nline = 0;
eoFramePtr->line = NULL;
@@ -6098,7 +6090,7 @@ TclNREvalObjEx(
*/
int pc = 0;
- CmdFrame *ctxPtr = TclStackAlloc(interp, sizeof(CmdFrame));
+ CmdFrame *ctxPtr = ckalloc(sizeof(CmdFrame));
*ctxPtr = *invoker;
if (invoker->type == TCL_LOCATION_BC) {
@@ -6139,7 +6131,7 @@ TclNREvalObjEx(
Tcl_DecrRefCount(ctxPtr->data.eval.path);
}
- TclStackFree(interp, ctxPtr);
+ ckfree(ctxPtr);
}
/*
@@ -6218,7 +6210,7 @@ TEOEx_ListCallback(
if (eoFramePtr) {
iPtr->cmdFramePtr = eoFramePtr->nextPtr;
- TclStackFree(interp, eoFramePtr);
+ ckfree(eoFramePtr);
}
TclDecrRefCount(listPtr);
diff --git a/generic/tclCkalloc.c b/generic/tclCkalloc.c
index 9d63ebf..3b51f68 100644
--- a/generic/tclCkalloc.c
+++ b/generic/tclCkalloc.c
@@ -1296,10 +1296,6 @@ TclFinalizeMemorySubsystem(void)
Tcl_MutexUnlock(ckallocMutexPtr);
#endif
-
-#if USE_TCLALLOC
- TclFinalizeAllocSubsystem();
-#endif
}
/*
diff --git a/generic/tclCmdAH.c b/generic/tclCmdAH.c
index 3edfa54..b4afdef 100644
--- a/generic/tclCmdAH.c
+++ b/generic/tclCmdAH.c
@@ -2348,7 +2348,7 @@ TclNRForObjCmd(
return TCL_ERROR;
}
- TclSmallAllocEx(interp, sizeof(ForIterData), iterPtr);
+ TclCkSmallAlloc(sizeof(ForIterData), iterPtr);
iterPtr->cond = objv[2];
iterPtr->body = objv[4];
iterPtr->next = objv[3];
@@ -2376,7 +2376,7 @@ ForSetupCallback(
if (result == TCL_ERROR) {
Tcl_AddErrorInfo(interp, "\n (\"for\" initial command)");
}
- TclSmallFreeEx(interp, iterPtr);
+ TclSmallFree(iterPtr);
return result;
}
TclNRAddCallback(interp, TclNRForIterCallback, iterPtr, NULL, NULL, NULL);
@@ -2414,7 +2414,7 @@ TclNRForIterCallback(
Tcl_AppendObjToErrorInfo(interp,
Tcl_ObjPrintf(iterPtr->msg, Tcl_GetErrorLine(interp)));
}
- TclSmallFreeEx(interp, iterPtr);
+ TclSmallFree(iterPtr);
return result;
}
@@ -2431,11 +2431,11 @@ ForCondCallback(
if (result != TCL_OK) {
Tcl_DecrRefCount(boolObj);
- TclSmallFreeEx(interp, iterPtr);
+ TclSmallFree(iterPtr);
return result;
} else if (Tcl_GetBooleanFromObj(interp, boolObj, &value) != TCL_OK) {
Tcl_DecrRefCount(boolObj);
- TclSmallFreeEx(interp, iterPtr);
+ TclSmallFree(iterPtr);
return TCL_ERROR;
}
Tcl_DecrRefCount(boolObj);
@@ -2452,7 +2452,7 @@ ForCondCallback(
return TclNREvalObjEx(interp, iterPtr->body, 0, iPtr->cmdFramePtr,
iterPtr->word);
}
- TclSmallFreeEx(interp, iterPtr);
+ TclSmallFree(iterPtr);
return result;
}
@@ -2492,7 +2492,7 @@ ForPostNextCallback(
if ((result != TCL_BREAK) && (result != TCL_OK)) {
if (result == TCL_ERROR) {
Tcl_AddErrorInfo(interp, "\n (\"for\" loop-end command)");
- TclSmallFreeEx(interp, iterPtr);
+ TclSmallFree(iterPtr);
}
return result;
}
@@ -2560,7 +2560,7 @@ TclNRForeachCmd(
* allocation for better performance.
*/
- statePtr = TclStackAlloc(interp,
+ statePtr = ckalloc(
sizeof(struct ForeachState) + 3 * numLists * sizeof(int)
+ 2 * numLists * (sizeof(Tcl_Obj **) + sizeof(Tcl_Obj *)));
memset(statePtr, 0,
@@ -2754,7 +2754,7 @@ ForeachCleanup(
TclDecrRefCount(statePtr->aCopyList[i]);
}
}
- TclStackFree(interp, statePtr);
+ ckfree(statePtr);
}
/*
diff --git a/generic/tclCmdIL.c b/generic/tclCmdIL.c
index b38ec9f..cd4a72b 100644
--- a/generic/tclCmdIL.c
+++ b/generic/tclCmdIL.c
@@ -1313,7 +1313,7 @@ TclInfoFrame(
* Execution of bytecode. Talk to the BC engine to fill out the frame.
*/
- CmdFrame *fPtr = TclStackAlloc(interp, sizeof(CmdFrame));
+ CmdFrame *fPtr = ckalloc(sizeof(CmdFrame));
*fPtr = *framePtr;
@@ -1347,7 +1347,7 @@ TclInfoFrame(
ADD_PAIR("cmd",
Tcl_NewStringObj(fPtr->cmd.str.cmd, fPtr->cmd.str.len));
- TclStackFree(interp, fPtr);
+ ckfree(fPtr);
break;
}
@@ -3016,7 +3016,7 @@ Tcl_LsearchObjCmd(
int j;
if (sortInfo.indexc > 1) {
- TclStackFree(interp, sortInfo.indexv);
+ ckfree(sortInfo.indexv);
}
if (i > objc-4) {
if (startPtr != NULL) {
@@ -3051,7 +3051,7 @@ Tcl_LsearchObjCmd(
break;
default:
sortInfo.indexv =
- TclStackAlloc(interp, sizeof(int) * sortInfo.indexc);
+ ckalloc(sizeof(int) * sortInfo.indexc);
}
/*
@@ -3158,7 +3158,7 @@ Tcl_LsearchObjCmd(
if (offset > listc-1) {
if (sortInfo.indexc > 1) {
- TclStackFree(interp, sortInfo.indexv);
+ ckfree(sortInfo.indexv);
}
if (allMatches || inlineReturn) {
Tcl_ResetResult(interp);
@@ -3483,7 +3483,7 @@ Tcl_LsearchObjCmd(
done:
if (sortInfo.indexc > 1) {
- TclStackFree(interp, sortInfo.indexv);
+ ckfree(sortInfo.indexv);
}
return result;
}
@@ -3770,7 +3770,7 @@ Tcl_LsortObjCmd(
break;
default:
sortInfo.indexv =
- TclStackAlloc(interp, sizeof(int) * sortInfo.indexc);
+ ckalloc(sizeof(int) * sortInfo.indexc);
allocatedIndexVector = 1; /* Cannot use indexc field, as it
* might be decreased by 1 later. */
}
@@ -3865,6 +3865,7 @@ Tcl_LsortObjCmd(
/*
* Do not shrink the actual memory block used; that doesn't
* work with TclStackAlloc-allocated memory. [Bug 2918962]
+ * FIXME: TclStackAlloc is now retired, we could shrink it.
*/
for (i = 0; i < sortInfo.indexc; i++) {
@@ -3902,7 +3903,7 @@ Tcl_LsortObjCmd(
* begins sorting it into the sublists as it appears.
*/
- elementArray = TclStackAlloc(interp, length * sizeof(SortElement));
+ elementArray = ckalloc(length * sizeof(SortElement));
for (i=0; i < length; i++){
idx = groupSize * i + groupOffset;
@@ -4026,7 +4027,7 @@ Tcl_LsortObjCmd(
}
done1:
- TclStackFree(interp, elementArray);
+ ckfree(elementArray);
done:
if (sortInfo.sortMode == SORTMODE_COMMAND) {
@@ -4036,7 +4037,7 @@ Tcl_LsortObjCmd(
}
done2:
if (allocatedIndexVector) {
- TclStackFree(interp, sortInfo.indexv);
+ ckfree(sortInfo.indexv);
}
return sortInfo.resultCode;
}
diff --git a/generic/tclCmdMZ.c b/generic/tclCmdMZ.c
index 05f2e5d..d85cd83 100644
--- a/generic/tclCmdMZ.c
+++ b/generic/tclCmdMZ.c
@@ -1835,7 +1835,7 @@ StringMapCmd(
* adapt this code...
*/
- mapElemv = TclStackAlloc(interp, sizeof(Tcl_Obj *) * mapElemc);
+ mapElemv = ckalloc(sizeof(Tcl_Obj *) * mapElemc);
Tcl_DictObjFirst(interp, objv[objc-2], &search, mapElemv+0,
mapElemv+1, &done);
for (i=2 ; i<mapElemc ; i+=2) {
@@ -1944,10 +1944,10 @@ StringMapCmd(
* case.
*/
- mapStrings = TclStackAlloc(interp, mapElemc*2*sizeof(Tcl_UniChar *));
- mapLens = TclStackAlloc(interp, mapElemc * 2 * sizeof(int));
+ mapStrings = ckalloc(mapElemc*2*sizeof(Tcl_UniChar *));
+ mapLens = ckalloc(mapElemc * 2 * sizeof(int));
if (nocase) {
- u2lc = TclStackAlloc(interp, mapElemc * sizeof(Tcl_UniChar));
+ u2lc = ckalloc(mapElemc * sizeof(Tcl_UniChar));
}
for (index = 0; index < mapElemc; index++) {
mapStrings[index] = Tcl_GetUnicodeFromObj(mapElemv[index],
@@ -1997,10 +1997,10 @@ StringMapCmd(
}
}
if (nocase) {
- TclStackFree(interp, u2lc);
+ ckfree(u2lc);
}
- TclStackFree(interp, mapLens);
- TclStackFree(interp, mapStrings);
+ ckfree(mapLens);
+ ckfree(mapStrings);
}
if (p != ustring1) {
/*
@@ -2012,7 +2012,7 @@ StringMapCmd(
Tcl_SetObjResult(interp, resultPtr);
done:
if (mapWithDict) {
- TclStackFree(interp, mapElemv);
+ ckfree(mapElemv);
}
if (copySource) {
Tcl_DecrRefCount(sourceObj);
@@ -3849,7 +3849,7 @@ TclNRSwitchObjCmd(
*/
matchFound:
- ctxPtr = TclStackAlloc(interp, sizeof(CmdFrame));
+ ctxPtr = ckalloc(sizeof(CmdFrame));
*ctxPtr = *iPtr->cmdFramePtr;
if (splitObjs) {
@@ -3966,7 +3966,7 @@ SwitchPostProc(
(overflow ? limit : patternLength), pattern,
(overflow ? "..." : ""), Tcl_GetErrorLine(interp)));
}
- TclStackFree(interp, ctxPtr);
+ ckfree(ctxPtr);
return result;
}
@@ -4729,7 +4729,7 @@ TclNRWhileObjCmd(
* We reuse [for]'s callback, passing a NULL for the 'next' script.
*/
- TclSmallAllocEx(interp, sizeof(ForIterData), iterPtr);
+ TclCkSmallAlloc(sizeof(ForIterData), iterPtr);
iterPtr->cond = objv[1];
iterPtr->body = objv[2];
iterPtr->next = NULL;
diff --git a/generic/tclCompCmds.c b/generic/tclCompCmds.c
index 083f530..2fda2b9 100644
--- a/generic/tclCompCmds.c
+++ b/generic/tclCompCmds.c
@@ -1021,8 +1021,7 @@ TclCompileDictUpdateCmd(
duiPtr = ckalloc(sizeof(DictUpdateInfo) + sizeof(int) * (numVars - 1));
duiPtr->length = numVars;
- keyTokenPtrs = TclStackAlloc(interp,
- sizeof(Tcl_Token *) * numVars);
+ keyTokenPtrs = ckalloc(sizeof(Tcl_Token *) * numVars);
tokenPtr = TokenAfter(dictVarTokenPtr);
for (i=0 ; i<numVars ; i++) {
@@ -1060,7 +1059,7 @@ TclCompileDictUpdateCmd(
if (tokenPtr->type != TCL_TOKEN_SIMPLE_WORD) {
failedUpdateInfoAssembly:
ckfree(duiPtr);
- TclStackFree(interp, keyTokenPtrs);
+ ckfree(keyTokenPtrs);
return TCL_ERROR;
}
bodyTokenPtr = tokenPtr;
@@ -1124,7 +1123,7 @@ TclCompileDictUpdateCmd(
Tcl_Panic("TclCompileDictCmd(update): bad jump distance %d",
(int) (CurrentOffset(envPtr) - jumpFixup.codeOffset));
}
- TclStackFree(interp, keyTokenPtrs);
+ ckfree(keyTokenPtrs);
return TCL_OK;
}
@@ -1637,10 +1636,9 @@ TclCompileForeachCmd(
*/
numLists = (numWords - 2)/2;
- varcList = TclStackAlloc(interp, numLists * sizeof(int));
+ varcList = ckalloc(numLists * sizeof(int));
memset(varcList, 0, numLists * sizeof(int));
- varvList = (const char ***) TclStackAlloc(interp,
- numLists * sizeof(const char **));
+ varvList = (const char ***) ckalloc(numLists * sizeof(const char **));
memset((char*) varvList, 0, numLists * sizeof(const char **));
/*
@@ -1867,8 +1865,8 @@ TclCompileForeachCmd(
ckfree(varvList[loopIndex]);
}
}
- TclStackFree(interp, (void *)varvList);
- TclStackFree(interp, varcList);
+ ckfree((void *)varvList);
+ ckfree(varcList);
return code;
}
@@ -3516,7 +3514,7 @@ TclCompileReturnCmd(
* Allocate some working space.
*/
- objv = TclStackAlloc(interp, numOptionWords * sizeof(Tcl_Obj *));
+ objv = ckalloc(numOptionWords * sizeof(Tcl_Obj *));
/*
* Scan through the return options. If any are unknown at compile time,
@@ -3540,7 +3538,7 @@ TclCompileReturnCmd(
while (--objc >= 0) {
TclDecrRefCount(objv[objc]);
}
- TclStackFree(interp, objv);
+ ckfree(objv);
if (TCL_ERROR == status) {
/*
* Something was bogus in the return options. Clear the error message,
@@ -4028,7 +4026,7 @@ PushVarName(
* assemble the corresponding token.
*/
- elemTokenPtr = TclStackAlloc(interp, sizeof(Tcl_Token));
+ elemTokenPtr = ckalloc(sizeof(Tcl_Token));
allocedTokens = 1;
elemTokenPtr->type = TCL_TOKEN_TEXT;
elemTokenPtr->start = elName;
@@ -4081,7 +4079,7 @@ PushVarName(
* token.
*/
- elemTokenPtr = TclStackAlloc(interp, n * sizeof(Tcl_Token));
+ elemTokenPtr = ckalloc(n * sizeof(Tcl_Token));
allocedTokens = 1;
elemTokenPtr->type = TCL_TOKEN_TEXT;
elemTokenPtr->start = elName;
@@ -4169,7 +4167,7 @@ PushVarName(
varTokenPtr[removedParen].size++;
}
if (allocedTokens) {
- TclStackFree(interp, elemTokenPtr);
+ ckfree(elemTokenPtr);
}
*localIndexPtr = localIndex;
*simpleVarNamePtr = simpleVarName;
diff --git a/generic/tclCompCmdsSZ.c b/generic/tclCompCmdsSZ.c
index d956819..ff494f2 100644
--- a/generic/tclCompCmdsSZ.c
+++ b/generic/tclCompCmdsSZ.c
@@ -595,7 +595,7 @@ TclCompileSubstCmd(
return TCL_ERROR;
}
- objv = TclStackAlloc(interp, /*numArgs*/ numOpts * sizeof(Tcl_Obj *));
+ objv = ckalloc(/*numArgs*/ numOpts * sizeof(Tcl_Obj *));
for (objc = 0; objc < /*numArgs*/ numOpts; objc++) {
objv[objc] = Tcl_NewObj();
@@ -628,7 +628,7 @@ TclCompileSubstCmd(
while (--objc >= 0) {
TclDecrRefCount(objv[objc]);
}
- TclStackFree(interp, objv);
+ ckfree(objv);
if (/*toSubst == NULL*/ code != TCL_OK) {
return TCL_ERROR;
}
@@ -1320,8 +1320,8 @@ IssueSwitchChainedTests(
contFixIndex = -1;
contFixCount = 0;
- fixupArray = TclStackAlloc(interp, sizeof(JumpFixup) * numBodyTokens);
- fixupTargetArray = TclStackAlloc(interp, sizeof(int) * numBodyTokens);
+ fixupArray = ckalloc(sizeof(JumpFixup) * numBodyTokens);
+ fixupTargetArray = ckalloc(sizeof(int) * numBodyTokens);
memset(fixupTargetArray, 0, numBodyTokens * sizeof(int));
fixupCount = 0;
foundDefault = 0;
@@ -1520,8 +1520,8 @@ IssueSwitchChainedTests(
}
}
}
- TclStackFree(interp, fixupTargetArray);
- TclStackFree(interp, fixupArray);
+ ckfree(fixupTargetArray);
+ ckfree(fixupArray);
envPtr->currStackDepth = savedStackDepth + 1;
}
@@ -1582,7 +1582,7 @@ IssueSwitchJumpTable(
jtPtr = ckalloc(sizeof(JumptableInfo));
Tcl_InitHashTable(&jtPtr->hashTable, TCL_STRING_KEYS);
infoIndex = TclCreateAuxData(jtPtr, &tclJumptableInfoType, envPtr);
- finalFixups = TclStackAlloc(interp, sizeof(int) * (numBodyTokens/2));
+ finalFixups = ckalloc(sizeof(int) * (numBodyTokens/2));
foundDefault = 0;
mustGenerate = 1;
@@ -1720,7 +1720,7 @@ IssueSwitchJumpTable(
* Clean up all our temporary space and return.
*/
- TclStackFree(interp, finalFixups);
+ ckfree(finalFixups);
}
/*
@@ -1975,12 +1975,12 @@ TclCompileTryCmd(
numHandlers = numWords >> 2;
numWords -= numHandlers * 4;
if (numHandlers > 0) {
- handlerTokens = TclStackAlloc(interp, sizeof(Tcl_Token*)*numHandlers);
- matchClauses = TclStackAlloc(interp, sizeof(Tcl_Obj *) * numHandlers);
+ handlerTokens = ckalloc(sizeof(Tcl_Token*)*numHandlers);
+ matchClauses = ckalloc(sizeof(Tcl_Obj *) * numHandlers);
memset(matchClauses, 0, sizeof(Tcl_Obj *) * numHandlers);
- matchCodes = TclStackAlloc(interp, sizeof(int) * numHandlers);
- resultVarIndices = TclStackAlloc(interp, sizeof(int) * numHandlers);
- optionVarIndices = TclStackAlloc(interp, sizeof(int) * numHandlers);
+ matchCodes = ckalloc(sizeof(int) * numHandlers);
+ resultVarIndices = ckalloc(sizeof(int) * numHandlers);
+ optionVarIndices = ckalloc(sizeof(int) * numHandlers);
for (i=0 ; i<numHandlers ; i++) {
Tcl_Obj *tmpObj, **objv;
@@ -2139,11 +2139,11 @@ TclCompileTryCmd(
TclDecrRefCount(matchClauses[i]);
}
}
- TclStackFree(interp, optionVarIndices);
- TclStackFree(interp, resultVarIndices);
- TclStackFree(interp, matchCodes);
- TclStackFree(interp, matchClauses);
- TclStackFree(interp, handlerTokens);
+ ckfree(optionVarIndices);
+ ckfree(resultVarIndices);
+ ckfree(matchCodes);
+ ckfree(matchClauses);
+ ckfree(handlerTokens);
}
return result;
}
@@ -2219,8 +2219,8 @@ IssueTryInstructions(
* Slight overallocation, but reduces size of this function.
*/
- addrsToFix = TclStackAlloc(interp, sizeof(int)*numHandlers);
- forwardsToFix = TclStackAlloc(interp, sizeof(int)*numHandlers);
+ addrsToFix = ckalloc(sizeof(int)*numHandlers);
+ forwardsToFix = ckalloc(sizeof(int)*numHandlers);
for (i=0 ; i<numHandlers ; i++) {
sprintf(buf, "%d", matchCodes[i]);
@@ -2307,8 +2307,8 @@ IssueTryInstructions(
for (i=0 ; i<numHandlers ; i++) {
FIXJUMP(addrsToFix[i]);
}
- TclStackFree(interp, forwardsToFix);
- TclStackFree(interp, addrsToFix);
+ ckfree(forwardsToFix);
+ ckfree(addrsToFix);
return TCL_OK;
}
@@ -2370,8 +2370,8 @@ IssueTryFinallyInstructions(
* Slight overallocation, but reduces size of this function.
*/
- addrsToFix = TclStackAlloc(interp, sizeof(int)*numHandlers);
- forwardsToFix = TclStackAlloc(interp, sizeof(int)*numHandlers);
+ addrsToFix = ckalloc(sizeof(int)*numHandlers);
+ forwardsToFix = ckalloc(sizeof(int)*numHandlers);
for (i=0 ; i<numHandlers ; i++) {
sprintf(buf, "%d", matchCodes[i]);
@@ -2503,8 +2503,8 @@ IssueTryFinallyInstructions(
for (i=0 ; i<numHandlers-1 ; i++) {
FIXJUMP(addrsToFix[i]);
}
- TclStackFree(interp, forwardsToFix);
- TclStackFree(interp, addrsToFix);
+ ckfree(forwardsToFix);
+ ckfree(addrsToFix);
}
/*
@@ -2900,7 +2900,7 @@ PushVarName(
* assemble the corresponding token.
*/
- elemTokenPtr = TclStackAlloc(interp, sizeof(Tcl_Token));
+ elemTokenPtr = ckalloc(sizeof(Tcl_Token));
allocedTokens = 1;
elemTokenPtr->type = TCL_TOKEN_TEXT;
elemTokenPtr->start = elName;
@@ -2953,7 +2953,7 @@ PushVarName(
* token.
*/
- elemTokenPtr = TclStackAlloc(interp, n * sizeof(Tcl_Token));
+ elemTokenPtr = ckalloc(n * sizeof(Tcl_Token));
allocedTokens = 1;
elemTokenPtr->type = TCL_TOKEN_TEXT;
elemTokenPtr->start = elName;
@@ -3041,7 +3041,7 @@ PushVarName(
varTokenPtr[removedParen].size++;
}
if (allocedTokens) {
- TclStackFree(interp, elemTokenPtr);
+ ckfree(elemTokenPtr);
}
*localIndexPtr = localIndex;
*simpleVarNamePtr = simpleVarName;
diff --git a/generic/tclCompExpr.c b/generic/tclCompExpr.c
index a07d6df..396448b 100644
--- a/generic/tclCompExpr.c
+++ b/generic/tclCompExpr.c
@@ -920,7 +920,7 @@ ParseExpr(
case SCRIPT: {
Tcl_Parse *nestedPtr =
- TclStackAlloc(interp, sizeof(Tcl_Parse));
+ ckalloc(sizeof(Tcl_Parse));
tokenPtr = parsePtr->tokenPtr + parsePtr->numTokens;
tokenPtr->type = TCL_TOKEN_COMMAND;
@@ -955,7 +955,7 @@ ParseExpr(
break;
}
}
- TclStackFree(interp, nestedPtr);
+ ckfree(nestedPtr);
end = start;
start = tokenPtr->start;
scanned = end - start;
@@ -1821,7 +1821,7 @@ Tcl_ParseExpr(
OpNode *opTree = NULL; /* Will point to the tree of operators. */
Tcl_Obj *litList = Tcl_NewObj(); /* List to hold the literals. */
Tcl_Obj *funcList = Tcl_NewObj(); /* List to hold the functon names. */
- Tcl_Parse *exprParsePtr = TclStackAlloc(interp, sizeof(Tcl_Parse));
+ Tcl_Parse *exprParsePtr = ckalloc(sizeof(Tcl_Parse));
/* Holds the Tcl_Tokens of substitutions. */
if (numBytes < 0) {
@@ -1843,7 +1843,7 @@ Tcl_ParseExpr(
}
Tcl_FreeParse(exprParsePtr);
- TclStackFree(interp, exprParsePtr);
+ ckfree(exprParsePtr);
ckfree(opTree);
return code;
}
@@ -2072,7 +2072,7 @@ TclCompileExpr(
OpNode *opTree = NULL; /* Will point to the tree of operators */
Tcl_Obj *litList = Tcl_NewObj(); /* List to hold the literals */
Tcl_Obj *funcList = Tcl_NewObj(); /* List to hold the functon names*/
- Tcl_Parse *parsePtr = TclStackAlloc(interp, sizeof(Tcl_Parse));
+ Tcl_Parse *parsePtr = ckalloc(sizeof(Tcl_Parse));
/* Holds the Tcl_Tokens of substitutions */
int code = ParseExpr(interp, script, numBytes, &opTree, litList,
@@ -2100,7 +2100,7 @@ TclCompileExpr(
}
Tcl_FreeParse(parsePtr);
- TclStackFree(interp, parsePtr);
+ ckfree(parsePtr);
Tcl_DecrRefCount(funcList);
Tcl_DecrRefCount(litList);
ckfree(opTree);
@@ -2143,7 +2143,7 @@ ExecConstantExprTree(
* bytecode, so there's no need to tend to TIP 280 issues.
*/
- envPtr = TclStackAlloc(interp, sizeof(CompileEnv));
+ envPtr = ckalloc(sizeof(CompileEnv));
TclInitCompileEnv(interp, envPtr, NULL, 0, NULL, 0);
CompileExprTree(interp, nodes, index, litObjvPtr, NULL, NULL, envPtr,
0 /* optimize */);
@@ -2151,7 +2151,7 @@ ExecConstantExprTree(
Tcl_IncrRefCount(byteCodeObj);
TclInitByteCodeObj(byteCodeObj, envPtr);
TclFreeCompileEnv(envPtr);
- TclStackFree(interp, envPtr);
+ ckfree(envPtr);
byteCodePtr = (ByteCode *) byteCodeObj->internalRep.otherValuePtr;
TclNRExecuteByteCode(interp, byteCodePtr);
code = TclNRRunCallbacks(interp, TCL_OK, rootPtr);
@@ -2208,10 +2208,10 @@ CompileExprTree(
switch (nodePtr->lexeme) {
case QUESTION:
- newJump = TclStackAlloc(interp, sizeof(JumpList));
+ newJump = ckalloc(sizeof(JumpList));
newJump->next = jumpPtr;
jumpPtr = newJump;
- newJump = TclStackAlloc(interp, sizeof(JumpList));
+ newJump = ckalloc(sizeof(JumpList));
newJump->next = jumpPtr;
jumpPtr = newJump;
jumpPtr->depth = envPtr->currStackDepth;
@@ -2219,13 +2219,13 @@ CompileExprTree(
break;
case AND:
case OR:
- newJump = TclStackAlloc(interp, sizeof(JumpList));
+ newJump = ckalloc(sizeof(JumpList));
newJump->next = jumpPtr;
jumpPtr = newJump;
- newJump = TclStackAlloc(interp, sizeof(JumpList));
+ newJump = ckalloc(sizeof(JumpList));
newJump->next = jumpPtr;
jumpPtr = newJump;
- newJump = TclStackAlloc(interp, sizeof(JumpList));
+ newJump = ckalloc(sizeof(JumpList));
newJump->next = jumpPtr;
jumpPtr = newJump;
jumpPtr->depth = envPtr->currStackDepth;
@@ -2331,10 +2331,10 @@ CompileExprTree(
envPtr->currStackDepth = jumpPtr->depth + 1;
freePtr = jumpPtr;
jumpPtr = jumpPtr->next;
- TclStackFree(interp, freePtr);
+ ckfree(freePtr);
freePtr = jumpPtr;
jumpPtr = jumpPtr->next;
- TclStackFree(interp, freePtr);
+ ckfree(freePtr);
break;
case AND:
case OR:
@@ -2358,13 +2358,13 @@ CompileExprTree(
envPtr->currStackDepth = jumpPtr->depth + 1;
freePtr = jumpPtr;
jumpPtr = jumpPtr->next;
- TclStackFree(interp, freePtr);
+ ckfree(freePtr);
freePtr = jumpPtr;
jumpPtr = jumpPtr->next;
- TclStackFree(interp, freePtr);
+ ckfree(freePtr);
freePtr = jumpPtr;
jumpPtr = jumpPtr->next;
- TclStackFree(interp, freePtr);
+ ckfree(freePtr);
break;
default:
TclEmitOpcode(instruction[nodePtr->lexeme], envPtr);
@@ -2541,9 +2541,8 @@ TclSortingOpCmd(
Tcl_SetObjResult(interp, Tcl_NewBooleanObj(1));
} else {
TclOpCmdClientData *occdPtr = clientData;
- Tcl_Obj **litObjv = TclStackAlloc(interp,
- 2 * (objc-2) * sizeof(Tcl_Obj *));
- OpNode *nodes = TclStackAlloc(interp, 2 * (objc-2) * sizeof(OpNode));
+ Tcl_Obj **litObjv = ckalloc(2 * (objc-2) * sizeof(Tcl_Obj *));
+ OpNode *nodes = ckalloc(2 * (objc-2) * sizeof(OpNode));
unsigned char lexeme;
int i, lastAnd = 1;
Tcl_Obj *const *litObjPtrPtr = litObjv;
@@ -2583,8 +2582,8 @@ TclSortingOpCmd(
code = ExecConstantExprTree(interp, nodes, 0, &litObjPtrPtr);
- TclStackFree(interp, nodes);
- TclStackFree(interp, litObjv);
+ ckfree(nodes);
+ ckfree(litObjv);
}
return code;
}
@@ -2670,7 +2669,7 @@ TclVariadicOpCmd(
return code;
} else {
Tcl_Obj *const *litObjv = objv + 1;
- OpNode *nodes = TclStackAlloc(interp, (objc-1) * sizeof(OpNode));
+ OpNode *nodes = ckalloc((objc-1) * sizeof(OpNode));
int i, lastOp = OT_LITERAL;
nodes[0].lexeme = START;
@@ -2703,7 +2702,7 @@ TclVariadicOpCmd(
code = ExecConstantExprTree(interp, nodes, 0, &litObjv);
- TclStackFree(interp, nodes);
+ ckfree(nodes);
return code;
}
}
diff --git a/generic/tclCompile.c b/generic/tclCompile.c
index aed9e3b..4d6bf33 100644
--- a/generic/tclCompile.c
+++ b/generic/tclCompile.c
@@ -1202,7 +1202,7 @@ TclInitCompileEnv(
* ...) which may make change the type as well.
*/
- CmdFrame *ctxPtr = TclStackAlloc(interp, sizeof(CmdFrame));
+ CmdFrame *ctxPtr = ckalloc(sizeof(CmdFrame));
int pc = 0;
*ctxPtr = *invoker;
@@ -1255,7 +1255,7 @@ TclInitCompileEnv(
}
}
- TclStackFree(interp, ctxPtr);
+ ckfree(ctxPtr);
}
envPtr->extCmdMapPtr->start = envPtr->line;
@@ -1461,7 +1461,7 @@ TclCompileScript(
/* TIP #280 */
ExtCmdLoc *eclPtr = envPtr->extCmdMapPtr;
int *wlines, wlineat, cmdLine, *clNext;
- Tcl_Parse *parsePtr = TclStackAlloc(interp, sizeof(Tcl_Parse));
+ Tcl_Parse *parsePtr = ckalloc(sizeof(Tcl_Parse));
Tcl_DStringInit(&ds);
@@ -1877,7 +1877,7 @@ TclCompileScript(
}
envPtr->numSrcBytes = p - script;
- TclStackFree(interp, parsePtr);
+ ckfree(parsePtr);
Tcl_DStringFree(&ds);
}
diff --git a/generic/tclDictObj.c b/generic/tclDictObj.c
index 3da91a3..4ed3fe6 100644
--- a/generic/tclDictObj.c
+++ b/generic/tclDictObj.c
@@ -2425,14 +2425,14 @@ DictForNRCmd(
TCL_STATIC);
return TCL_ERROR;
}
- searchPtr = TclStackAlloc(interp, sizeof(Tcl_DictSearch));
+ searchPtr = ckalloc(sizeof(Tcl_DictSearch));
if (Tcl_DictObjFirst(interp, objv[2], searchPtr, &keyObj, &valueObj,
&done) != TCL_OK) {
- TclStackFree(interp, searchPtr);
+ ckfree(searchPtr);
return TCL_ERROR;
}
if (done) {
- TclStackFree(interp, searchPtr);
+ ckfree(searchPtr);
return TCL_OK;
}
TclListObjGetElements(NULL, objv[1], &varc, &varv);
@@ -2488,7 +2488,7 @@ DictForNRCmd(
TclDecrRefCount(valueVarObj);
TclDecrRefCount(scriptObj);
Tcl_DictObjDone(searchPtr);
- TclStackFree(interp, searchPtr);
+ ckfree(searchPtr);
return TCL_ERROR;
}
@@ -2574,7 +2574,7 @@ DictForLoopCallback(
TclDecrRefCount(valueVarObj);
TclDecrRefCount(scriptObj);
Tcl_DictObjDone(searchPtr);
- TclStackFree(interp, searchPtr);
+ ckfree(searchPtr);
return result;
}
diff --git a/generic/tclEvent.c b/generic/tclEvent.c
index 78bd7b8..49e8137 100644
--- a/generic/tclEvent.c
+++ b/generic/tclEvent.c
@@ -1032,9 +1032,7 @@ TclInitSubsystems(void)
TclInitThreadStorage(); /* Creates master hash table for
* thread local storage */
-#if USE_TCLALLOC
TclInitAlloc(); /* Process wide mutex init */
-#endif
#ifdef TCL_MEM_DEBUG
TclInitDbCkalloc(); /* Process wide mutex init */
#endif
@@ -1211,9 +1209,7 @@ Tcl_Finalize(void)
* Close down the thread-specific object allocator.
*/
-#if defined(TCL_THREADS) && defined(USE_THREAD_ALLOC)
- TclFinalizeThreadAlloc();
-#endif
+ TclFinalizeAlloc();
/*
* We defer unloading of packages until very late to avoid memory access
diff --git a/generic/tclExecute.c b/generic/tclExecute.c
index 26d3e04..b340144 100644
--- a/generic/tclExecute.c
+++ b/generic/tclExecute.c
@@ -171,19 +171,21 @@ static BuiltinFunc const tclBuiltinFuncTable[] = {
typedef struct TEBCdata {
ByteCode *codePtr; /* Constant until the BC returns */
/* -----------------------------------------*/
+ Tcl_Obj **tosPtr;
const unsigned char *pc; /* These fields are used on return TO this */
- ptrdiff_t *catchTop; /* this level: they record the state when a */
+ int catchDepth; /* this level: they record the state when a */
int cleanup; /* new codePtr was received for NR */
Tcl_Obj *auxObjList; /* execution. */
int checkInterp;
CmdFrame cmdFrame;
+ unsigned int capacity;
void * stack[1]; /* Start of the actual combined catch and obj
* stacks; the struct will be expanded as
* necessary */
} TEBCdata;
#define TEBC_YIELD() \
- esPtr->tosPtr = tosPtr; \
+ TD->tosPtr = tosPtr; \
TD->pc = pc; \
TD->cleanup = cleanup; \
TclNRAddCallback(interp, TEBCresume, TD, \
@@ -192,7 +194,7 @@ typedef struct TEBCdata {
#define TEBC_DATA_DIG() \
pc = TD->pc; \
cleanup = TD->cleanup; \
- tosPtr = esPtr->tosPtr
+ tosPtr = TD->tosPtr
#define PUSH_TAUX_OBJ(objPtr) \
@@ -296,20 +298,6 @@ VarHashCreateVar(
} while (0)
/*
- * Macros used to cache often-referenced Tcl evaluation stack information
- * in local variables. Note that a DECACHE_STACK_INFO()-CACHE_STACK_INFO()
- * pair must surround any call inside TclNRExecuteByteCode (and a few other
- * procedures that use this scheme) that could result in a recursive call
- * to TclNRExecuteByteCode.
- */
-
-#define CACHE_STACK_INFO() \
- checkInterp = 1
-
-#define DECACHE_STACK_INFO() \
- esPtr->tosPtr = tosPtr
-
-/*
* Macros used to access items on the Tcl evaluation stack. PUSH_OBJECT
* increments the object's ref count since it makes the stack have another
* reference pointing to the object. However, POP_OBJECT does not decrement
@@ -683,7 +671,6 @@ static void ValidatePcAndStackTop(ByteCode *codePtr,
int stackLowerBound, int checkStack);
#endif /* TCL_COMPILE_DEBUG */
static ByteCode * CompileExprObj(Tcl_Interp *interp, Tcl_Obj *objPtr);
-static void DeleteExecStack(ExecStack *esPtr);
static void DupExprCodeInternalRep(Tcl_Obj *srcPtr,
Tcl_Obj *copyPtr);
MODULE_SCOPE int TclCompareTwoNumbers(Tcl_Obj *valuePtr,
@@ -699,16 +686,10 @@ static ExceptionRange * GetExceptRangeForPc(const unsigned char *pc,
static const char * GetSrcInfoForPc(const unsigned char *pc,
ByteCode *codePtr, int *lengthPtr,
const unsigned char **pcBeg);
-static Tcl_Obj ** GrowEvaluationStack(ExecEnv *eePtr, int growth,
- int move);
static void IllegalExprOperandType(Tcl_Interp *interp,
const unsigned char *pc, Tcl_Obj *opndPtr);
static void InitByteCodeExecution(Tcl_Interp *interp);
-static inline int OFFSET(void *ptr);
static void ReleaseDictIterator(Tcl_Obj *objPtr);
-/* Useful elsewhere, make available in tclInt.h or stubs? */
-static Tcl_Obj ** StackAllocWords(Tcl_Interp *interp, int numWords);
-static Tcl_Obj ** StackReallocWords(Tcl_Interp *interp, int numWords);
static Tcl_NRPostProc CopyCallback;
static Tcl_NRPostProc ExprObjCallback;
@@ -845,10 +826,7 @@ TclCreateExecEnv(
* [sizeof(Tcl_Obj*)] */
{
ExecEnv *eePtr = ckalloc(sizeof(ExecEnv));
- ExecStack *esPtr = ckalloc(sizeof(ExecStack)
- + (size_t) (size-1) * sizeof(Tcl_Obj *));
- eePtr->execStackPtr = esPtr;
TclNewBooleanObj(eePtr->constants[0], 0);
Tcl_IncrRefCount(eePtr->constants[0]);
TclNewBooleanObj(eePtr->constants[1], 1);
@@ -858,12 +836,6 @@ TclCreateExecEnv(
eePtr->corPtr = NULL;
eePtr->rewind = 0;
- esPtr->prevPtr = NULL;
- esPtr->nextPtr = NULL;
- esPtr->markerPtr = NULL;
- esPtr->endPtr = &esPtr->stackWords[size-1];
- esPtr->tosPtr = &esPtr->stackWords[-1];
-
Tcl_MutexLock(&execMutex);
if (!execInitialized) {
TclInitAuxDataTypeTable();
@@ -892,42 +864,14 @@ TclCreateExecEnv(
*----------------------------------------------------------------------
*/
-static void
-DeleteExecStack(
- ExecStack *esPtr)
-{
- if (esPtr->markerPtr) {
- Tcl_Panic("freeing an execStack which is still in use");
- }
-
- if (esPtr->prevPtr) {
- esPtr->prevPtr->nextPtr = esPtr->nextPtr;
- }
- if (esPtr->nextPtr) {
- esPtr->nextPtr->prevPtr = esPtr->prevPtr;
- }
- ckfree(esPtr);
-}
-
void
TclDeleteExecEnv(
ExecEnv *eePtr) /* Execution environment to free. */
{
- ExecStack *esPtr = eePtr->execStackPtr, *tmpPtr;
-
/*
* Delete all stacks in this exec env.
*/
- while (esPtr->nextPtr) {
- esPtr = esPtr->nextPtr;
- }
- while (esPtr) {
- tmpPtr = esPtr;
- esPtr = tmpPtr->prevPtr;
- DeleteExecStack(tmpPtr);
- }
-
TclDecrRefCount(eePtr->constants[0]);
TclDecrRefCount(eePtr->constants[1]);
if (eePtr->callbackPtr) {
@@ -967,339 +911,6 @@ TclFinalizeExecution(void)
}
/*
- * Auxiliary code to insure that GrowEvaluationStack always returns correctly
- * aligned memory.
- *
- * WALLOCALIGN represents the alignment reqs in words, just as TCL_ALLOCALIGN
- * represents the reqs in bytes. This assumes that TCL_ALLOCALIGN is a
- * multiple of the wordsize 'sizeof(Tcl_Obj *)'.
- */
-
-#define WALLOCALIGN \
- (TCL_ALLOCALIGN/sizeof(Tcl_Obj *))
-
-/*
- * OFFSET computes how many words have to be skipped until the next aligned
- * word. Note that we are only interested in the low order bits of ptr, so
- * that any possible information loss in PTR2INT is of no consequence.
- */
-
-static inline int
-OFFSET(
- void *ptr)
-{
- int mask = TCL_ALLOCALIGN-1;
- int base = PTR2INT(ptr) & mask;
- return (TCL_ALLOCALIGN - base)/sizeof(Tcl_Obj *);
-}
-
-/*
- * Given a marker, compute where the following aligned memory starts.
- */
-
-#define MEMSTART(markerPtr) \
- ((markerPtr) + OFFSET(markerPtr))
-
-/*
- *----------------------------------------------------------------------
- *
- * GrowEvaluationStack --
- *
- * This procedure grows a Tcl evaluation stack stored in an ExecEnv,
- * copying over the words since the last mark if so requested. A mark is
- * set at the beginning of the new area when no copying is requested.
- *
- * Results:
- * Returns a pointer to the first usable word in the (possibly) grown
- * stack.
- *
- * Side effects:
- * The size of the evaluation stack may be grown, a marker is set
- *
- *----------------------------------------------------------------------
- */
-
-static Tcl_Obj **
-GrowEvaluationStack(
- ExecEnv *eePtr, /* Points to the ExecEnv with an evaluation
- * stack to enlarge. */
- int growth, /* How much larger than the current used
- * size. */
- int move) /* 1 if move words since last marker. */
-{
- ExecStack *esPtr = eePtr->execStackPtr, *oldPtr = NULL;
- int newBytes, newElems, currElems;
- int needed = growth - (esPtr->endPtr - esPtr->tosPtr);
- Tcl_Obj **markerPtr = esPtr->markerPtr, **memStart;
- int moveWords = 0;
-
- if (move) {
- if (!markerPtr) {
- Tcl_Panic("STACK: Reallocating with no previous alloc");
- }
- if (needed <= 0) {
- return MEMSTART(markerPtr);
- }
- } else {
- Tcl_Obj **tmpMarkerPtr = esPtr->tosPtr + 1;
- int offset = OFFSET(tmpMarkerPtr);
-
- if (needed + offset < 0) {
- /*
- * Put a marker pointing to the previous marker in this stack, and
- * store it in esPtr as the current marker. Return a pointer to
- * the start of aligned memory.
- */
-
- esPtr->markerPtr = tmpMarkerPtr;
- memStart = tmpMarkerPtr + offset;
- esPtr->tosPtr = memStart - 1;
- *esPtr->markerPtr = (Tcl_Obj *) markerPtr;
- return memStart;
- }
- }
-
- /*
- * Reset move to hold the number of words to be moved to new stack (if
- * any) and growth to hold the complete stack requirements: add one for
- * the marker, (WALLOCALIGN-1) for the maximal possible offset.
- */
-
- if (move) {
- moveWords = esPtr->tosPtr - MEMSTART(markerPtr) + 1;
- }
- needed = growth + moveWords + WALLOCALIGN;
-
- /*
- * Check if there is enough room in the next stack (if there is one, it
- * should be both empty and the last one!)
- */
-
- if (esPtr->nextPtr) {
- oldPtr = esPtr;
- esPtr = oldPtr->nextPtr;
- currElems = esPtr->endPtr - &esPtr->stackWords[-1];
- if (esPtr->markerPtr || (esPtr->tosPtr != &esPtr->stackWords[-1])) {
- Tcl_Panic("STACK: Stack after current is in use");
- }
- if (esPtr->nextPtr) {
- Tcl_Panic("STACK: Stack after current is not last");
- }
- if (needed <= currElems) {
- goto newStackReady;
- }
- DeleteExecStack(esPtr);
- esPtr = oldPtr;
- } else {
- currElems = esPtr->endPtr - &esPtr->stackWords[-1];
- }
-
- /*
- * We need to allocate a new stack! It needs to store 'growth' words,
- * including the elements to be copied over and the new marker.
- */
-
- newElems = 2*currElems;
- while (needed > newElems) {
- newElems *= 2;
- }
- newBytes = sizeof(ExecStack) + (newElems-1) * sizeof(Tcl_Obj *);
-
- oldPtr = esPtr;
- esPtr = ckalloc(newBytes);
-
- oldPtr->nextPtr = esPtr;
- esPtr->prevPtr = oldPtr;
- esPtr->nextPtr = NULL;
- esPtr->endPtr = &esPtr->stackWords[newElems-1];
-
- newStackReady:
- eePtr->execStackPtr = esPtr;
-
- /*
- * Store a NULL marker at the beginning of the stack, to indicate that
- * this is the first marker in this stack and that rewinding to here
- * should actually be a return to the previous stack.
- */
-
- esPtr->stackWords[0] = NULL;
- esPtr->markerPtr = &esPtr->stackWords[0];
- memStart = MEMSTART(esPtr->markerPtr);
- esPtr->tosPtr = memStart - 1;
-
- if (move) {
- memcpy(memStart, MEMSTART(markerPtr), moveWords*sizeof(Tcl_Obj *));
- esPtr->tosPtr += moveWords;
- oldPtr->markerPtr = (Tcl_Obj **) *markerPtr;
- oldPtr->tosPtr = markerPtr-1;
- }
-
- /*
- * Free the old stack if it is now unused.
- */
-
- if (!oldPtr->markerPtr) {
- DeleteExecStack(oldPtr);
- }
-
- return memStart;
-}
-
-/*
- *--------------------------------------------------------------
- *
- * TclStackAlloc, TclStackRealloc, TclStackFree --
- *
- * Allocate memory from the execution stack; it has to be returned later
- * with a call to TclStackFree.
- *
- * Results:
- * A pointer to the first byte allocated, or panics if the allocation did
- * not succeed.
- *
- * Side effects:
- * The execution stack may be grown.
- *
- *--------------------------------------------------------------
- */
-
-static Tcl_Obj **
-StackAllocWords(
- Tcl_Interp *interp,
- int numWords)
-{
- /*
- * Note that GrowEvaluationStack sets a marker in the stack. This marker
- * is read when rewinding, e.g., by TclStackFree.
- */
-
- Interp *iPtr = (Interp *) interp;
- ExecEnv *eePtr = iPtr->execEnvPtr;
- Tcl_Obj **resPtr = GrowEvaluationStack(eePtr, numWords, 0);
-
- eePtr->execStackPtr->tosPtr += numWords;
- return resPtr;
-}
-
-static Tcl_Obj **
-StackReallocWords(
- Tcl_Interp *interp,
- int numWords)
-{
- Interp *iPtr = (Interp *) interp;
- ExecEnv *eePtr = iPtr->execEnvPtr;
- Tcl_Obj **resPtr = GrowEvaluationStack(eePtr, numWords, 1);
-
- eePtr->execStackPtr->tosPtr += numWords;
- return resPtr;
-}
-
-void
-TclStackFree(
- Tcl_Interp *interp,
- void *freePtr)
-{
- Interp *iPtr = (Interp *) interp;
- ExecEnv *eePtr;
- ExecStack *esPtr;
- Tcl_Obj **markerPtr, *marker;
-
- if (iPtr == NULL || iPtr->execEnvPtr == NULL) {
- Tcl_Free((char *) freePtr);
- return;
- }
-
- /*
- * Rewind the stack to the previous marker position. The current marker,
- * as set in the last call to GrowEvaluationStack, contains a pointer to
- * the previous marker.
- */
-
- eePtr = iPtr->execEnvPtr;
- esPtr = eePtr->execStackPtr;
- markerPtr = esPtr->markerPtr;
- marker = *markerPtr;
-
- if ((freePtr != NULL) && (MEMSTART(markerPtr) != (Tcl_Obj **)freePtr)) {
- Tcl_Panic("TclStackFree: incorrect freePtr (%p != %p). Call out of sequence?",
- freePtr, MEMSTART(markerPtr));
- }
-
- esPtr->tosPtr = markerPtr - 1;
- esPtr->markerPtr = (Tcl_Obj **) marker;
- if (marker) {
- return;
- }
-
- /*
- * Return to previous active stack. Note that repeated expansions or
- * reallocs could have generated several unused intervening stacks: free
- * them too.
- */
-
- while (esPtr->nextPtr) {
- esPtr = esPtr->nextPtr;
- }
- esPtr->tosPtr = &esPtr->stackWords[-1];
- while (esPtr->prevPtr) {
- ExecStack *tmpPtr = esPtr->prevPtr;
- if (tmpPtr->tosPtr == &tmpPtr->stackWords[-1]) {
- DeleteExecStack(tmpPtr);
- } else {
- break;
- }
- }
- if (esPtr->prevPtr) {
- eePtr->execStackPtr = esPtr->prevPtr;
- } else {
- eePtr->execStackPtr = esPtr;
- }
-}
-
-void *
-TclStackAlloc(
- Tcl_Interp *interp,
- int numBytes)
-{
- Interp *iPtr = (Interp *) interp;
- int numWords = (numBytes + (sizeof(Tcl_Obj *) - 1))/sizeof(Tcl_Obj *);
-
- if (iPtr == NULL || iPtr->execEnvPtr == NULL) {
- return (void *) Tcl_Alloc(numBytes);
- }
-
- return (void *) StackAllocWords(interp, numWords);
-}
-
-void *
-TclStackRealloc(
- Tcl_Interp *interp,
- void *ptr,
- int numBytes)
-{
- Interp *iPtr = (Interp *) interp;
- ExecEnv *eePtr;
- ExecStack *esPtr;
- Tcl_Obj **markerPtr;
- int numWords;
-
- if (iPtr == NULL || iPtr->execEnvPtr == NULL) {
- return (void *) Tcl_Realloc((char *) ptr, numBytes);
- }
-
- eePtr = iPtr->execEnvPtr;
- esPtr = eePtr->execStackPtr;
- markerPtr = esPtr->markerPtr;
-
- if (MEMSTART(markerPtr) != (Tcl_Obj **)ptr) {
- Tcl_Panic("TclStackRealloc: incorrect ptr. Call out of sequence?");
- }
-
- numWords = (numBytes + (sizeof(Tcl_Obj *) - 1))/sizeof(Tcl_Obj *);
- return (void *) StackReallocWords(interp, numWords);
-}
-
-/*
*--------------------------------------------------------------
*
* Tcl_ExprObj --
@@ -1697,7 +1308,7 @@ TclCompileObj(
int redo = 0;
if (invoker) {
- CmdFrame *ctxPtr = TclStackAlloc(interp,sizeof(CmdFrame));
+ CmdFrame *ctxPtr = ckalloc(sizeof(CmdFrame));
*ctxPtr = *invoker;
if (invoker->type == TCL_LOCATION_BC) {
@@ -1736,7 +1347,7 @@ TclCompileObj(
&& (ctxPtr->type == TCL_LOCATION_SOURCE));
}
- TclStackFree(interp, ctxPtr);
+ ckfree(ctxPtr);
}
if (redo) {
@@ -1921,9 +1532,8 @@ TclIncrObj(
*----------------------------------------------------------------------
*/
#define bcFramePtr (&TD->cmdFrame)
-#define initCatchTop ((ptrdiff_t *) (&TD->stack[-1]))
-#define initTosPtr ((Tcl_Obj **) (initCatchTop+codePtr->maxExceptDepth))
-#define esPtr (iPtr->execEnvPtr->execStackPtr)
+#define catchStack (TD->stack)
+#define initTosPtr ((Tcl_Obj **) &TD->stack[codePtr->maxExceptDepth - 1])
int
TclNRExecuteByteCode(
@@ -1932,10 +1542,8 @@ TclNRExecuteByteCode(
{
Interp *iPtr = (Interp *) interp;
TEBCdata *TD;
- int size = sizeof(TEBCdata) -1 +
- + (codePtr->maxStackDepth + codePtr->maxExceptDepth)
- *(sizeof(void *));
- int numWords = (size + sizeof(Tcl_Obj *) - 1)/sizeof(Tcl_Obj *);
+ unsigned int size = sizeof(TEBCdata) + sizeof(void *) *
+ (codePtr->maxStackDepth + codePtr->maxExceptDepth - 1);
if (iPtr->execEnvPtr->rewind) {
return TCL_ERROR;
@@ -1955,15 +1563,16 @@ TclNRExecuteByteCode(
* execution stack is large enough to execute this ByteCode.
*/
- TD = (TEBCdata *) GrowEvaluationStack(iPtr->execEnvPtr, numWords, 0);
- esPtr->tosPtr = initTosPtr;
+ TD = ckalloc(size);
+ TD->tosPtr = initTosPtr;
TD->codePtr = codePtr;
TD->pc = codePtr->codeStart;
- TD->catchTop = initCatchTop;
+ TD->catchDepth = -1;
TD->cleanup = 0;
TD->auxObjList = NULL;
TD->checkInterp = 0;
+ TD->capacity = codePtr->maxStackDepth;
/*
* TIP #280: Initialize the frame. Do not push it yet: it will be pushed
@@ -2048,11 +1657,11 @@ TEBCresume(
TEBCdata *TD = data[0];
#define auxObjList (TD->auxObjList)
-#define catchTop (TD->catchTop)
+#define catchDepth (TD->catchDepth)
#define codePtr (TD->codePtr)
#define checkInterp (TD->checkInterp)
/* Indicates when a check of interp readyness
- * is necessary. Set by CACHE_STACK_INFO() */
+ * is necessary. Set by checkInterp = 1 */
/*
* Globals: variables that store state, must remain valid at all times.
@@ -2113,7 +1722,7 @@ TEBCresume(
codePtr->flags &= ~TCL_BYTECODE_RECOMPILE;
}
- CACHE_STACK_INFO();
+ checkInterp = 1;
if (result == TCL_OK) {
#ifndef TCL_COMPILE_DEBUG
if (*pc == INST_POP) {
@@ -2253,29 +1862,28 @@ TEBCresume(
*/
if ((instructionCount++ & ASYNC_CHECK_COUNT_MASK) == 0) {
- DECACHE_STACK_INFO();
if (TclAsyncReady(iPtr)) {
result = Tcl_AsyncInvoke(interp, result);
if (result == TCL_ERROR) {
- CACHE_STACK_INFO();
+ checkInterp = 1;
goto gotError;
}
}
if (TclCanceled(iPtr)) {
if (Tcl_Canceled(interp, TCL_LEAVE_ERR_MSG) == TCL_ERROR) {
- CACHE_STACK_INFO();
+ checkInterp = 1;
goto gotError;
}
}
if (TclLimitReady(iPtr->limit)) {
if (Tcl_LimitCheck(interp) == TCL_ERROR) {
- CACHE_STACK_INFO();
+ checkInterp = 1;
goto gotError;
}
}
- CACHE_STACK_INFO();
+ checkInterp = 1;
}
TCL_DTRACE_INST_NEXT();
@@ -2643,7 +2251,7 @@ TEBCresume(
case INST_EXPAND_STKTOP: {
int i;
- ptrdiff_t moved;
+ unsigned int reqWords;
/*
* Make sure that the element at stackTop is a list; if not, just
@@ -2657,7 +2265,6 @@ TEBCresume(
Tcl_GetObjResult(interp));
goto gotError;
}
- (void) POP_OBJECT();
/*
* Make sure there is enough room in the stack to expand this list
@@ -2666,24 +2273,26 @@ TEBCresume(
* stack depth, as seen by the compiler.
*/
- length = objc + (codePtr->maxStackDepth - TclGetInt4AtPtr(pc+1));
- DECACHE_STACK_INFO();
- moved = GrowEvaluationStack(iPtr->execEnvPtr, length, 1)
- - (Tcl_Obj **) TD;
- if (moved) {
- /*
- * Change the global data to point to the new stack: move the
- * TEBCdataPtr TD, recompute the position of every other
- * stack-allocated parameter, update the stack pointers.
- */
-
- esPtr = iPtr->execEnvPtr->execStackPtr;
- TD = (TEBCdata *) (((Tcl_Obj **)TD) + moved);
+ reqWords =
+ /* how many were needed originally */
+ codePtr->maxStackDepth
+ /* plus how many we already consumed in previous expansions */
+ + (CURR_DEPTH - TclGetInt4AtPtr(pc+1))
+ /* plus how many are needed for this expansion */
+ + objc - 1;
- catchTop += moved;
- tosPtr += moved;
+ (void) POP_OBJECT();
+ if (reqWords > TD->capacity) {
+ ptrdiff_t depth;
+ unsigned int size = sizeof(TEBCdata) + sizeof(void *) *
+ + (reqWords + codePtr->maxExceptDepth - 1);
+
+ depth = tosPtr - initTosPtr;
+ TD = ckrealloc(TD, size);
+ tosPtr = initTosPtr + depth;
+ TD->capacity = reqWords;
}
-
+
/*
* Expand the list at stacktop onto the stack; free the list. Knowing
* that it has a freeIntRepProc we use Tcl_DecrRefCount().
@@ -2702,9 +2311,8 @@ TEBCresume(
bcFramePtr->data.tebc.pc = (char *) pc;
iPtr->cmdFramePtr = bcFramePtr;
- DECACHE_STACK_INFO();
newCodePtr = CompileExprObj(interp, OBJ_AT_TOS);
- CACHE_STACK_INFO();
+ checkInterp = 1;
cleanup = 1;
pc++;
TEBC_YIELD();
@@ -2790,8 +2398,6 @@ TEBCresume(
codePtr, bcFramePtr, pc - codePtr->codeStart);
}
- DECACHE_STACK_INFO();
-
pc += pcAdjustment;
TEBC_YIELD();
return TclNREvalObjv(interp, objc, objv,
@@ -3016,10 +2622,9 @@ TEBCresume(
* TclPtrGetVar to process fully.
*/
- DECACHE_STACK_INFO();
objResultPtr = TclPtrGetVar(interp, varPtr, arrayPtr,
part1Ptr, part2Ptr, TCL_LEAVE_ERR_MSG, opnd);
- CACHE_STACK_INFO();
+ checkInterp = 1;
if (!objResultPtr) {
TRACE_APPEND(("ERROR: %.30s\n", O2S(Tcl_GetObjResult(interp))));
goto gotError;
@@ -3263,10 +2868,9 @@ TEBCresume(
part1Ptr = part2Ptr = NULL;
doCallPtrSetVar:
- DECACHE_STACK_INFO();
objResultPtr = TclPtrSetVar(interp, varPtr, arrayPtr,
part1Ptr, part2Ptr, valuePtr, storeFlags, opnd);
- CACHE_STACK_INFO();
+ checkInterp = 1;
if (!objResultPtr) {
TRACE_APPEND(("ERROR: %.30s\n", O2S(Tcl_GetObjResult(interp))));
goto gotError;
@@ -3527,10 +3131,9 @@ TEBCresume(
}
Tcl_DecrRefCount(incrPtr);
} else {
- DECACHE_STACK_INFO();
objResultPtr = TclPtrIncrObjVar(interp, varPtr, arrayPtr,
part1Ptr, part2Ptr, incrPtr, TCL_LEAVE_ERR_MSG, opnd);
- CACHE_STACK_INFO();
+ checkInterp = 1;
Tcl_DecrRefCount(incrPtr);
if (objResultPtr == NULL) {
TRACE_APPEND(("ERROR: %.30s\n",
@@ -3562,10 +3165,9 @@ TEBCresume(
}
TRACE(("%u => ", opnd));
if (ReadTraced(varPtr)) {
- DECACHE_STACK_INFO();
TclObjCallVarTraces(iPtr, NULL, varPtr, NULL, NULL,
TCL_TRACE_READS, 0, opnd);
- CACHE_STACK_INFO();
+ checkInterp = 1;
if (TclIsVarUndefined(varPtr)) {
TclCleanupVar(varPtr, NULL);
varPtr = NULL;
@@ -3598,10 +3200,9 @@ TEBCresume(
0, 1, arrayPtr, opnd);
if (varPtr) {
if (ReadTraced(varPtr) || (arrayPtr && ReadTraced(arrayPtr))) {
- DECACHE_STACK_INFO();
TclObjCallVarTraces(iPtr, arrayPtr, varPtr, NULL, part2Ptr,
TCL_TRACE_READS, 0, opnd);
- CACHE_STACK_INFO();
+ checkInterp = 1;
}
if (TclIsVarUndefined(varPtr)) {
TclCleanupVar(varPtr, arrayPtr);
@@ -3631,10 +3232,9 @@ TEBCresume(
/*createPart1*/0, /*createPart2*/1, &arrayPtr);
if (varPtr) {
if (ReadTraced(varPtr) || (arrayPtr && ReadTraced(arrayPtr))) {
- DECACHE_STACK_INFO();
TclObjCallVarTraces(iPtr, arrayPtr, varPtr, part1Ptr,part2Ptr,
TCL_TRACE_READS, 0, -1);
- CACHE_STACK_INFO();
+ checkInterp = 1;
}
if (TclIsVarUndefined(varPtr)) {
TclCleanupVar(varPtr, arrayPtr);
@@ -3678,12 +3278,11 @@ TEBCresume(
}
slowUnsetScalar:
- DECACHE_STACK_INFO();
if (TclPtrUnsetVar(interp, varPtr, NULL, NULL, NULL, flags,
opnd) != TCL_OK && flags) {
goto errorInUnset;
}
- CACHE_STACK_INFO();
+ checkInterp = 1;
NEXT_INST_F(6, 0, 0);
case INST_UNSET_ARRAY:
@@ -3720,7 +3319,6 @@ TEBCresume(
}
}
slowUnsetArray:
- DECACHE_STACK_INFO();
varPtr = TclLookupArrayElement(interp, NULL, part2Ptr, flags, "unset",
0, 0, arrayPtr, opnd);
if (!varPtr) {
@@ -3731,7 +3329,7 @@ TEBCresume(
flags, opnd) != TCL_OK && (flags & TCL_LEAVE_ERR_MSG)) {
goto errorInUnset;
}
- CACHE_STACK_INFO();
+ checkInterp = 1;
NEXT_INST_F(6, 1, 0);
case INST_UNSET_ARRAY_STK:
@@ -3751,16 +3349,15 @@ TEBCresume(
TRACE(("%s \"%.30s\"\n", (flags?"normal":"noerr"), O2S(part1Ptr)));
doUnsetStk:
- DECACHE_STACK_INFO();
if (TclObjUnsetVar2(interp, part1Ptr, part2Ptr, flags) != TCL_OK
&& (flags & TCL_LEAVE_ERR_MSG)) {
goto errorInUnset;
}
- CACHE_STACK_INFO();
+ checkInterp = 1;
NEXT_INST_V(2, cleanup, 0);
errorInUnset:
- CACHE_STACK_INFO();
+ checkInterp = 1;
TRACE_APPEND(("ERROR: %.30s\n", O2S(Tcl_GetObjResult(interp))));
goto gotError;
@@ -3781,9 +3378,8 @@ TEBCresume(
}
varPtr->value.objPtr = NULL;
} else {
- DECACHE_STACK_INFO();
TclPtrUnsetVar(interp, varPtr, NULL, NULL, NULL, 0, opnd);
- CACHE_STACK_INFO();
+ checkInterp = 1;
}
NEXT_INST_F(5, 0, 0);
}
@@ -4024,18 +3620,16 @@ TEBCresume(
if (TclGetBooleanFromObj(NULL, valuePtr, &i1) != TCL_OK) {
TRACE(("\"%.20s\" => ILLEGAL TYPE %s \n", O2S(valuePtr),
(valuePtr->typePtr? valuePtr->typePtr->name : "null")));
- DECACHE_STACK_INFO();
IllegalExprOperandType(interp, pc, valuePtr);
- CACHE_STACK_INFO();
+ checkInterp = 1;
goto gotError;
}
if (TclGetBooleanFromObj(NULL, value2Ptr, &i2) != TCL_OK) {
TRACE(("\"%.20s\" => ILLEGAL TYPE %s \n", O2S(value2Ptr),
(value2Ptr->typePtr? value2Ptr->typePtr->name : "null")));
- DECACHE_STACK_INFO();
IllegalExprOperandType(interp, pc, value2Ptr);
- CACHE_STACK_INFO();
+ checkInterp = 1;
goto gotError;
}
@@ -4812,9 +4406,8 @@ TEBCresume(
TRACE(("%.20s %.20s => ILLEGAL 1st TYPE %s\n", O2S(valuePtr),
O2S(value2Ptr), (valuePtr->typePtr?
valuePtr->typePtr->name : "null")));
- DECACHE_STACK_INFO();
IllegalExprOperandType(interp, pc, valuePtr);
- CACHE_STACK_INFO();
+ checkInterp = 1;
goto gotError;
}
@@ -4823,9 +4416,8 @@ TEBCresume(
TRACE(("%.20s %.20s => ILLEGAL 2nd TYPE %s\n", O2S(valuePtr),
O2S(value2Ptr), (value2Ptr->typePtr?
value2Ptr->typePtr->name : "null")));
- DECACHE_STACK_INFO();
IllegalExprOperandType(interp, pc, value2Ptr);
- CACHE_STACK_INFO();
+ checkInterp = 1;
goto gotError;
}
@@ -4883,11 +4475,10 @@ TEBCresume(
Tcl_SetResult(interp, "negative shift argument",
TCL_STATIC);
#if 0
- DECACHE_STACK_INFO();
Tcl_SetErrorCode(interp, "ARITH", "DOMAIN",
"domain error: argument not in valid range",
NULL);
- CACHE_STACK_INFO();
+ checkInterp = 1;
#endif
goto gotError;
} else if (l1 == 0) {
@@ -4931,11 +4522,10 @@ TEBCresume(
Tcl_SetResult(interp, "negative shift argument",
TCL_STATIC);
#if 0
- DECACHE_STACK_INFO();
Tcl_SetErrorCode(interp, "ARITH", "DOMAIN",
"domain error: argument not in valid range",
NULL);
- CACHE_STACK_INFO();
+ checkInterp = 1;
#endif
goto gotError;
} else if (l1 == 0) {
@@ -4955,10 +4545,9 @@ TEBCresume(
"integer value too large to represent",
TCL_STATIC);
#if 0
- DECACHE_STACK_INFO();
Tcl_SetErrorCode(interp, "ARITH", "IOVERFLOW",
"integer value too large to represent", NULL);
- CACHE_STACK_INFO();
+ checkInterp = 1;
#endif
goto gotError;
} else {
@@ -5041,9 +4630,8 @@ TEBCresume(
TRACE(("%.20s %.20s => ILLEGAL 1st TYPE %s\n",
O2S(value2Ptr), O2S(valuePtr),
(valuePtr->typePtr? valuePtr->typePtr->name: "null")));
- DECACHE_STACK_INFO();
IllegalExprOperandType(interp, pc, valuePtr);
- CACHE_STACK_INFO();
+ checkInterp = 1;
goto gotError;
}
@@ -5062,9 +4650,8 @@ TEBCresume(
TRACE(("%.20s %.20s => ILLEGAL 2nd TYPE %s\n",
O2S(value2Ptr), O2S(valuePtr),
(value2Ptr->typePtr? value2Ptr->typePtr->name: "null")));
- DECACHE_STACK_INFO();
IllegalExprOperandType(interp, pc, value2Ptr);
- CACHE_STACK_INFO();
+ checkInterp = 1;
goto gotError;
}
@@ -5211,9 +4798,8 @@ TEBCresume(
if (TclGetBooleanFromObj(NULL, valuePtr, &b) != TCL_OK) {
TRACE(("\"%.20s\" => ILLEGAL TYPE %s\n", O2S(valuePtr),
(valuePtr->typePtr? valuePtr->typePtr->name : "null")));
- DECACHE_STACK_INFO();
IllegalExprOperandType(interp, pc, valuePtr);
- CACHE_STACK_INFO();
+ checkInterp = 1;
goto gotError;
}
/* TODO: Consider peephole opt. */
@@ -5231,9 +4817,8 @@ TEBCresume(
TRACE(("\"%.20s\" => ILLEGAL TYPE %s \n", O2S(valuePtr),
(valuePtr->typePtr? valuePtr->typePtr->name : "null")));
- DECACHE_STACK_INFO();
IllegalExprOperandType(interp, pc, valuePtr);
- CACHE_STACK_INFO();
+ checkInterp = 1;
goto gotError;
}
if (type1 == TCL_NUMBER_LONG) {
@@ -5258,9 +4843,8 @@ TEBCresume(
|| IsErroringNaNType(type1)) {
TRACE(("\"%.20s\" => ILLEGAL TYPE %s \n", O2S(valuePtr),
(valuePtr->typePtr? valuePtr->typePtr->name : "null")));
- DECACHE_STACK_INFO();
IllegalExprOperandType(interp, pc, valuePtr);
- CACHE_STACK_INFO();
+ checkInterp = 1;
goto gotError;
}
switch (type1) {
@@ -5304,9 +4888,8 @@ TEBCresume(
TRACE(("\"%.20s\" => ILLEGAL TYPE %s \n", O2S(valuePtr),
(valuePtr->typePtr? valuePtr->typePtr->name:"null")));
- DECACHE_STACK_INFO();
IllegalExprOperandType(interp, pc, valuePtr);
- CACHE_STACK_INFO();
+ checkInterp = 1;
goto gotError;
}
@@ -5322,9 +4905,8 @@ TEBCresume(
TRACE(("\"%.20s\" => ILLEGAL TYPE %s \n", O2S(valuePtr),
(valuePtr->typePtr? valuePtr->typePtr->name:"null")));
- DECACHE_STACK_INFO();
IllegalExprOperandType(interp, pc, valuePtr);
- CACHE_STACK_INFO();
+ checkInterp = 1;
} else {
/*
* Numeric conversion of NaN -> error.
@@ -5332,9 +4914,8 @@ TEBCresume(
TRACE(("\"%.20s\" => IEEE FLOATING PT ERROR\n",
O2S(objResultPtr)));
- DECACHE_STACK_INFO();
TclExprFloatError(interp, *((const double *) ptr1));
- CACHE_STACK_INFO();
+ checkInterp = 1;
}
goto gotError;
}
@@ -5379,9 +4960,8 @@ TEBCresume(
case INST_BREAK:
/*
- DECACHE_STACK_INFO();
Tcl_ResetResult(interp);
- CACHE_STACK_INFO();
+ checkInterp = 1;
*/
result = TCL_BREAK;
cleanup = 0;
@@ -5389,9 +4969,8 @@ TEBCresume(
case INST_CONTINUE:
/*
- DECACHE_STACK_INFO();
Tcl_ResetResult(interp);
- CACHE_STACK_INFO();
+ checkInterp = 1;
*/
result = TCL_CONTINUE;
cleanup = 0;
@@ -5524,17 +5103,16 @@ TEBCresume(
Tcl_IncrRefCount(valuePtr);
}
} else {
- DECACHE_STACK_INFO();
if (TclPtrSetVar(interp, varPtr, NULL, NULL, NULL,
valuePtr, TCL_LEAVE_ERR_MSG, varIndex)==NULL){
- CACHE_STACK_INFO();
+ checkInterp = 1;
TRACE_WITH_OBJ((
"%u => ERROR init. index temp %d: ",
opnd,varIndex), Tcl_GetObjResult(interp));
TclDecrRefCount(listPtr);
goto gotError;
}
- CACHE_STACK_INFO();
+ checkInterp = 1;
}
valIndex++;
}
@@ -5566,19 +5144,18 @@ TEBCresume(
* stack.
*/
- *(++catchTop) = CURR_DEPTH;
- TRACE(("%u => catchTop=%d, stackTop=%d\n",
- TclGetUInt4AtPtr(pc+1), (int) (catchTop - initCatchTop - 1),
+ catchStack[++catchDepth] = INT2PTR(CURR_DEPTH);
+ TRACE(("%u => catchDepth=%d, stackTop=%d\n",
+ TclGetUInt4AtPtr(pc+1), (int) (catchDepth),
(int) CURR_DEPTH));
NEXT_INST_F(5, 0, 0);
case INST_END_CATCH:
- catchTop--;
- DECACHE_STACK_INFO();
+ catchDepth--;
Tcl_ResetResult(interp);
- CACHE_STACK_INFO();
+ checkInterp = 1;
result = TCL_OK;
- TRACE(("=> catchTop=%d\n", (int) (catchTop - initCatchTop - 1)));
+ TRACE(("=> catchDepth=%d\n", (int) (catchDepth)));
NEXT_INST_F(1, 0, 0);
case INST_PUSH_RESULT:
@@ -5600,9 +5177,8 @@ TEBCresume(
NEXT_INST_F(1, 0, 1);
case INST_PUSH_RETURN_OPTIONS:
- DECACHE_STACK_INFO();
objResultPtr = Tcl_GetReturnOptions(interp, result);
- CACHE_STACK_INFO();
+ checkInterp = 1;
TRACE_WITH_OBJ(("=> "), objResultPtr);
NEXT_INST_F(1, 0, 1);
@@ -5654,13 +5230,12 @@ TEBCresume(
TRACE_APPEND(("%.30s\n", O2S(objResultPtr)));
NEXT_INST_V(5, opnd+1, 1);
}
- DECACHE_STACK_INFO();
Tcl_ResetResult(interp);
Tcl_AppendResult(interp, "key \"", TclGetString(OBJ_AT_TOS),
"\" not known in dictionary", NULL);
Tcl_SetErrorCode(interp, "TCL", "LOOKUP", "DICT",
TclGetString(OBJ_AT_TOS), NULL);
- CACHE_STACK_INFO();
+ checkInterp = 1;
TRACE_WITH_OBJ(("%u => ERROR ", opnd), Tcl_GetObjResult(interp));
} else {
TRACE_WITH_OBJ((
@@ -5683,9 +5258,8 @@ TEBCresume(
if (TclIsVarDirectReadable(varPtr)) {
dictPtr = varPtr->value.objPtr;
} else {
- DECACHE_STACK_INFO();
dictPtr = TclPtrGetVar(interp, varPtr, NULL,NULL,NULL, 0, opnd2);
- CACHE_STACK_INFO();
+ checkInterp = 1;
}
if (dictPtr == NULL) {
TclNewObj(dictPtr);
@@ -5757,10 +5331,9 @@ TEBCresume(
objResultPtr = dictPtr;
} else {
Tcl_IncrRefCount(dictPtr);
- DECACHE_STACK_INFO();
objResultPtr = TclPtrSetVar(interp, varPtr, NULL, NULL, NULL,
dictPtr, TCL_LEAVE_ERR_MSG, opnd2);
- CACHE_STACK_INFO();
+ checkInterp = 1;
TclDecrRefCount(dictPtr);
if (objResultPtr == NULL) {
TRACE_APPEND(("ERROR: %.30s\n",
@@ -5787,9 +5360,8 @@ TEBCresume(
if (TclIsVarDirectReadable(varPtr)) {
dictPtr = varPtr->value.objPtr;
} else {
- DECACHE_STACK_INFO();
dictPtr = TclPtrGetVar(interp, varPtr, NULL, NULL, NULL, 0, opnd);
- CACHE_STACK_INFO();
+ checkInterp = 1;
}
if (dictPtr == NULL) {
TclNewObj(dictPtr);
@@ -5893,10 +5465,9 @@ TEBCresume(
objResultPtr = dictPtr;
} else {
Tcl_IncrRefCount(dictPtr);
- DECACHE_STACK_INFO();
objResultPtr = TclPtrSetVar(interp, varPtr, NULL, NULL, NULL,
dictPtr, TCL_LEAVE_ERR_MSG, opnd);
- CACHE_STACK_INFO();
+ checkInterp = 1;
TclDecrRefCount(dictPtr);
if (objResultPtr == NULL) {
TRACE_APPEND(("ERROR: %.30s\n",
@@ -5998,10 +5569,9 @@ TEBCresume(
if (TclIsVarDirectReadable(varPtr)) {
dictPtr = varPtr->value.objPtr;
} else {
- DECACHE_STACK_INFO();
dictPtr = TclPtrGetVar(interp, varPtr, NULL, NULL, NULL,
TCL_LEAVE_ERR_MSG, opnd);
- CACHE_STACK_INFO();
+ checkInterp = 1;
if (dictPtr == NULL) {
goto gotError;
}
@@ -6022,7 +5592,6 @@ TEBCresume(
while (TclIsVarLink(varPtr)) {
varPtr = varPtr->value.linkPtr;
}
- DECACHE_STACK_INFO();
if (valuePtr == NULL) {
TclObjUnsetVar2(interp,
localName(iPtr->varFramePtr, duiPtr->varIndices[i]),
@@ -6030,10 +5599,10 @@ TEBCresume(
} else if (TclPtrSetVar(interp, varPtr, NULL, NULL, NULL,
valuePtr, TCL_LEAVE_ERR_MSG,
duiPtr->varIndices[i]) == NULL) {
- CACHE_STACK_INFO();
+ checkInterp = 1;
goto gotError;
}
- CACHE_STACK_INFO();
+ checkInterp = 1;
}
NEXT_INST_F(9, 0, 0);
@@ -6049,9 +5618,8 @@ TEBCresume(
if (TclIsVarDirectReadable(varPtr)) {
dictPtr = varPtr->value.objPtr;
} else {
- DECACHE_STACK_INFO();
dictPtr = TclPtrGetVar(interp, varPtr, NULL, NULL, NULL, 0, opnd);
- CACHE_STACK_INFO();
+ checkInterp = 1;
}
if (dictPtr == NULL) {
NEXT_INST_F(9, 1, 0);
@@ -6077,10 +5645,9 @@ TEBCresume(
if (TclIsVarDirectReadable(var2Ptr)) {
valuePtr = var2Ptr->value.objPtr;
} else {
- DECACHE_STACK_INFO();
valuePtr = TclPtrGetVar(interp, var2Ptr, NULL, NULL, NULL, 0,
duiPtr->varIndices[i]);
- CACHE_STACK_INFO();
+ checkInterp = 1;
}
if (valuePtr == NULL) {
Tcl_DictObjRemove(interp, dictPtr, keyPtrPtr[i]);
@@ -6096,10 +5663,9 @@ TEBCresume(
TclDecrRefCount(varPtr->value.objPtr);
varPtr->value.objPtr = dictPtr;
} else {
- DECACHE_STACK_INFO();
objResultPtr = TclPtrSetVar(interp, varPtr, NULL, NULL, NULL,
dictPtr, TCL_LEAVE_ERR_MSG, opnd);
- CACHE_STACK_INFO();
+ checkInterp = 1;
if (objResultPtr == NULL) {
if (allocdict) {
TclDecrRefCount(dictPtr);
@@ -6215,10 +5781,9 @@ TEBCresume(
*/
divideByZero:
- DECACHE_STACK_INFO();
Tcl_SetResult(interp, "divide by zero", TCL_STATIC);
Tcl_SetErrorCode(interp, "ARITH", "DIVZERO", "divide by zero", NULL);
- CACHE_STACK_INFO();
+ checkInterp = 1;
goto gotError;
/*
@@ -6227,12 +5792,11 @@ TEBCresume(
*/
exponOfZero:
- DECACHE_STACK_INFO();
Tcl_SetResult(interp, "exponentiation of zero by negative power",
TCL_STATIC);
Tcl_SetErrorCode(interp, "ARITH", "DOMAIN",
"exponentiation of zero by negative power", NULL);
- CACHE_STACK_INFO();
+ checkInterp = 1;
/*
* Almost all error paths feed through here rather than assigning to
@@ -6258,9 +5822,8 @@ TEBCresume(
const unsigned char *pcBeg;
bytes = GetSrcInfoForPc(pc, codePtr, &length, &pcBeg);
- DECACHE_STACK_INFO();
TclLogCommandInfo(interp, codePtr->source, bytes, bytes ? length : 0, pcBeg, tosPtr);
- CACHE_STACK_INFO();
+ checkInterp = 1;
}
iPtr->flags &= ~ERR_ALREADY_LOGGED;
@@ -6270,8 +5833,8 @@ TEBCresume(
*/
while (auxObjList) {
- if ((catchTop != initCatchTop) && (*catchTop >
- (ptrdiff_t) auxObjList->internalRep.twoPtrValue.ptr1)) {
+ if ((catchDepth >=0) && (PTR2INT(catchStack[catchDepth]) >
+ PTR2INT(auxObjList->internalRep.twoPtrValue.ptr1))) {
break;
}
POP_TAUX_OBJ();
@@ -6311,7 +5874,7 @@ TEBCresume(
#endif
goto abnormalReturn;
}
- if (catchTop == initCatchTop) {
+ if (catchDepth == -1) {
#ifdef TCL_COMPILE_DEBUG
if (traceInstructions) {
fprintf(stdout, " ... no enclosing catch, returning %s\n",
@@ -6346,16 +5909,16 @@ TEBCresume(
*/
processCatch:
- while (CURR_DEPTH > *catchTop) {
+ while (CURR_DEPTH > PTR2INT(catchStack[catchDepth])) {
valuePtr = POP_OBJECT();
TclDecrRefCount(valuePtr);
}
#ifdef TCL_COMPILE_DEBUG
if (traceInstructions) {
- fprintf(stdout, " ... found catch at %d, catchTop=%d, "
+ fprintf(stdout, " ... found catch at %d, catchDepth=%d, "
"unwound to %ld, new pc %u\n",
- rangePtr->codeOffset, (int) (catchTop - initCatchTop - 1),
- (long) *catchTop, (unsigned) rangePtr->catchOffset);
+ rangePtr->codeOffset, (int) catchDepth,
+ PTR2INT(catchStack[catchDepth]), (unsigned) rangePtr->catchOffset);
}
#endif
pc = (codePtr->codeStart + rangePtr->catchOffset);
@@ -6404,7 +5967,7 @@ TEBCresume(
if (--codePtr->refCount <= 0) {
TclCleanupByteCode(codePtr);
}
- TclStackFree(interp, TD); /* free my stack */
+ ckfree(TD); /* free my stack */
return result;
}
@@ -6412,10 +5975,9 @@ TEBCresume(
#undef codePtr
#undef iPtr
#undef bcFramePtr
-#undef initCatchTop
#undef initTosPtr
#undef auxObjList
-#undef catchTop
+#undef catchDepth
#undef TCONST
/*
diff --git a/generic/tclFCmd.c b/generic/tclFCmd.c
index 6d3c013..52ad278 100644
--- a/generic/tclFCmd.c
+++ b/generic/tclFCmd.c
@@ -999,7 +999,7 @@ TclFileAttrsCmd(
goto end;
}
attributeStringsAllocated = (const char **)
- TclStackAlloc(interp, (1+numObjStrings) * sizeof(char *));
+ ckalloc((1+numObjStrings) * sizeof(char *));
for (index = 0; index < numObjStrings; index++) {
Tcl_ListObjIndex(interp, objStrings, index, &objPtr);
attributeStringsAllocated[index] = TclGetString(objPtr);
@@ -1110,7 +1110,7 @@ TclFileAttrsCmd(
* Free up the array we allocated.
*/
- TclStackFree(interp, (void *) attributeStringsAllocated);
+ ckfree((void *) attributeStringsAllocated);
/*
* We don't need this object that was passed to us any more.
diff --git a/generic/tclFileName.c b/generic/tclFileName.c
index d53c271..eff1010 100644
--- a/generic/tclFileName.c
+++ b/generic/tclFileName.c
@@ -1422,7 +1422,7 @@ Tcl_GlobObjCmd(
if (length <= 0) {
goto skipTypes;
}
- globTypes = TclStackAlloc(interp, sizeof(Tcl_GlobTypeData));
+ globTypes = ckalloc(sizeof(Tcl_GlobTypeData));
globTypes->type = 0;
globTypes->perm = 0;
globTypes->macType = NULL;
@@ -1638,7 +1638,7 @@ Tcl_GlobObjCmd(
if (globTypes->macCreator != NULL) {
Tcl_DecrRefCount(globTypes->macCreator);
}
- TclStackFree(interp, globTypes);
+ ckfree(globTypes);
}
return result;
}
diff --git a/generic/tclIOCmd.c b/generic/tclIOCmd.c
index 1f0e4a9..ffa172a 100644
--- a/generic/tclIOCmd.c
+++ b/generic/tclIOCmd.c
@@ -929,7 +929,7 @@ Tcl_ExecObjCmd(
*/
argc = objc - skip;
- argv = TclStackAlloc(interp, (unsigned)(argc + 1) * sizeof(char *));
+ argv = ckalloc((unsigned)(argc + 1) * sizeof(char *));
/*
* Copy the string conversions of each (post option) object into the
@@ -947,7 +947,7 @@ Tcl_ExecObjCmd(
* Free the argv array.
*/
- TclStackFree(interp, (void *) argv);
+ ckfree((void *) argv);
if (chan == NULL) {
return TCL_ERROR;
diff --git a/generic/tclIndexObj.c b/generic/tclIndexObj.c
index d98842e..f9511af 100644
--- a/generic/tclIndexObj.c
+++ b/generic/tclIndexObj.c
@@ -952,12 +952,12 @@ Tcl_WrongNumArgs(
len = Tcl_ScanCountedElement(elementStr, elemLen, &flags);
if (MAY_QUOTE_WORD && len != elemLen) {
- char *quotedElementStr = TclStackAlloc(interp, (unsigned)len);
+ char *quotedElementStr = ckalloc((unsigned)len);
len = Tcl_ConvertCountedElement(elementStr, elemLen,
quotedElementStr, flags);
Tcl_AppendToObj(objPtr, quotedElementStr, len);
- TclStackFree(interp, quotedElementStr);
+ ckfree(quotedElementStr);
} else {
Tcl_AppendToObj(objPtr, elementStr, elemLen);
}
@@ -1006,12 +1006,12 @@ Tcl_WrongNumArgs(
len = Tcl_ScanCountedElement(elementStr, elemLen, &flags);
if (MAY_QUOTE_WORD && len != elemLen) {
- char *quotedElementStr = TclStackAlloc(interp,(unsigned) len);
+ char *quotedElementStr = ckalloc((unsigned) len);
len = Tcl_ConvertCountedElement(elementStr, elemLen,
quotedElementStr, flags);
Tcl_AppendToObj(objPtr, quotedElementStr, len);
- TclStackFree(interp, quotedElementStr);
+ ckfree(quotedElementStr);
} else {
Tcl_AppendToObj(objPtr, elementStr, elemLen);
}
diff --git a/generic/tclInt.decls b/generic/tclInt.decls
index df60dae..6330836 100644
--- a/generic/tclInt.decls
+++ b/generic/tclInt.decls
@@ -35,9 +35,9 @@ scspec EXTERN
#declare 2 {
# int TclAccessInsertProc(TclAccessProc_ *proc)
#}
-declare 3 {
- void TclAllocateFreeObjects(void)
-}
+#declare 3 {
+# void TclAllocateFreeObjects(void)
+#}
# Replaced by TclpChdir in 8.1:
# declare 4 {
# int TclChdir(Tcl_Interp *interp, char *dirName)
@@ -867,12 +867,12 @@ declare 213 {
declare 214 {
void TclSetObjNameOfExecutable(Tcl_Obj *name, Tcl_Encoding encoding)
}
-declare 215 {
- void *TclStackAlloc(Tcl_Interp *interp, int numBytes)
-}
-declare 216 {
- void TclStackFree(Tcl_Interp *interp, void *freePtr)
-}
+#declare 215 {
+# void *TclStackAlloc(Tcl_Interp *interp, unsigned int numBytes)
+#}
+#declare 216 {
+# void TclStackFree(Tcl_Interp *interp, void *freePtr)
+#}
declare 217 {
int TclPushStackFrame(Tcl_Interp *interp, Tcl_CallFrame **framePtrPtr,
Tcl_Namespace *namespacePtr, int isProcCallFrame)
diff --git a/generic/tclInt.h b/generic/tclInt.h
index 42e2212..45eaf7e 100644
--- a/generic/tclInt.h
+++ b/generic/tclInt.h
@@ -10,7 +10,7 @@
* Copyright (c) 2001, 2002 by Kevin B. Kenny. All rights reserved.
* Copyright (c) 2007 Daniel A. Steffen <das@users.sourceforge.net>
* Copyright (c) 2006-2008 by Joe Mistachkin. All rights reserved.
- * Copyright (c) 2008 by Miguel Sofer. All rights reserved.
+ * Copyright (c) 2008-2011 by Miguel Sofer. All rights reserved.
*
* See the file "license.terms" for information on usage and redistribution of
* this file, and for a DISCLAIMER OF ALL WARRANTIES.
@@ -1390,13 +1390,6 @@ MODULE_SCOPE void TclThreadDataKeySet(Tcl_ThreadDataKey *keyPtr,
(ThreadSpecificData *)Tcl_GetThreadData((keyPtr), sizeof(ThreadSpecificData))
/*
- *----------------------------------------------------------------
- * Data structures related to bytecode compilation and execution. These are
- * used primarily in tclCompile.c, tclExecute.c, and tclBasic.c.
- *----------------------------------------------------------------
- */
-
-/*
* Forward declaration to prevent errors when the forward references to
* Tcl_Parse and CompileEnv are encountered in the procedure type CompileProc
* declared below.
@@ -1438,19 +1431,6 @@ typedef int (CompileHookProc)(Tcl_Interp *interp,
struct CompileEnv *compEnvPtr, ClientData clientData);
/*
- * The data structure for a (linked list of) execution stacks.
- */
-
-typedef struct ExecStack {
- struct ExecStack *prevPtr;
- struct ExecStack *nextPtr;
- Tcl_Obj **markerPtr;
- Tcl_Obj **endPtr;
- Tcl_Obj **tosPtr;
- Tcl_Obj *stackWords[1];
-} ExecStack;
-
-/*
* The data structure defining the execution environment for ByteCode's.
* There is one ExecEnv structure per Tcl interpreter. It holds the evaluation
* stack that holds command operands and results. The stack grows towards
@@ -1487,8 +1467,6 @@ typedef struct CoroutineData {
} CoroutineData;
typedef struct ExecEnv {
- ExecStack *execStackPtr; /* Points to the first item in the evaluation
- * stack on the heap. */
Tcl_Obj *constants[2]; /* Pointers to constant "0" and "1" objs. */
struct Tcl_Interp *interp;
struct NRE_callback *callbackPtr;
@@ -1769,24 +1747,6 @@ enum PkgPreferOptions {
/*
*----------------------------------------------------------------
- * This structure shadows the first few fields of the memory cache for the
- * allocator defined in tclThreadAlloc.c; it has to be kept in sync with the
- * definition there.
- * Some macros require knowledge of some fields in the struct in order to
- * avoid hitting the TSD unnecessarily. In order to facilitate this, a pointer
- * to the relevant fields is kept in the objCache field in struct Interp.
- *----------------------------------------------------------------
- */
-
-typedef struct AllocCache {
- struct Cache *nextPtr; /* Linked list of cache entries. */
- Tcl_ThreadId owner; /* Which thread's cache is this? */
- Tcl_Obj *firstObjPtr; /* List of free objects for thread. */
- int numObjects; /* Number of objects for thread. */
-} AllocCache;
-
-/*
- *----------------------------------------------------------------
* This structure defines an interpreter, which is a collection of commands
* plus other state information related to interpreting commands, such as
* variable storage. Primary responsibility for this data structure is in
@@ -2118,7 +2078,6 @@ typedef struct Interp {
* They are used by the macros defined below.
*/
- AllocCache *allocCache;
void *pendingObjDataPtr; /* Pointer to the Cache and PendingObjData
* structs for this interp's thread; see
* tclObj.c and tclThreadAlloc.c */
@@ -2351,17 +2310,6 @@ struct LimitHandler {
#define UCHAR(c) ((unsigned char) (c))
/*
- * This macro is used to properly align the memory allocated by Tcl, giving
- * the same alignment as the native malloc.
- */
-
-#if defined(__APPLE__)
-#define TCL_ALLOCALIGN 16
-#else
-#define TCL_ALLOCALIGN (2*sizeof(void *))
-#endif
-
-/*
* This macro is used to determine the offset needed to safely allocate any
* data structure in memory. Given a starting offset or size, it "rounds up"
* or "aligns" the offset to the next 8-byte boundary so that any data
@@ -2902,7 +2850,6 @@ MODULE_SCOPE void TclCreateLateExitHandler(Tcl_ExitProc *proc,
ClientData clientData);
MODULE_SCOPE void TclDeleteLateExitHandler(Tcl_ExitProc *proc,
ClientData clientData);
-MODULE_SCOPE void TclFinalizeAllocSubsystem(void);
MODULE_SCOPE void TclFinalizeAsync(void);
MODULE_SCOPE void TclFinalizeDoubleConversion(void);
MODULE_SCOPE void TclFinalizeEncodingSubsystem(void);
@@ -2919,7 +2866,6 @@ MODULE_SCOPE void TclFinalizeNotifier(void);
MODULE_SCOPE void TclFinalizeObjects(void);
MODULE_SCOPE void TclFinalizePreserve(void);
MODULE_SCOPE void TclFinalizeSynchronization(void);
-MODULE_SCOPE void TclFinalizeThreadAlloc(void);
MODULE_SCOPE void TclFinalizeThreadData(void);
MODULE_SCOPE void TclFinalizeThreadObjects(void);
MODULE_SCOPE double TclFloor(const mp_int *a);
@@ -3097,8 +3043,6 @@ MODULE_SCOPE void TclSetDuplicateObj(Tcl_Obj *dupPtr, Tcl_Obj *objPtr);
MODULE_SCOPE void TclSetProcessGlobalValue(ProcessGlobalValue *pgvPtr,
Tcl_Obj *newValue, Tcl_Encoding encoding);
MODULE_SCOPE void TclSignalExitThread(Tcl_ThreadId id, int result);
-MODULE_SCOPE void * TclStackRealloc(Tcl_Interp *interp, void *ptr,
- int numBytes);
MODULE_SCOPE int TclStringMatch(const char *str, int strLen,
const char *pattern, int ptnLen, int flags);
MODULE_SCOPE int TclStringMatchObj(Tcl_Obj *stringObj,
@@ -3808,10 +3752,10 @@ typedef const char *TclDTraceStr;
#endif /* TCL_COMPILE_STATS */
# define TclAllocObjStorage(objPtr) \
- TclAllocObjStorageEx(NULL, (objPtr))
+ (objPtr) = TclSmallAlloc()
# define TclFreeObjStorage(objPtr) \
- TclFreeObjStorageEx(NULL, (objPtr))
+ TclSmallFree(objPtr)
#ifndef TCL_MEM_DEBUG
# define TclNewObj(objPtr) \
@@ -3846,128 +3790,122 @@ typedef const char *TclDTraceStr;
} \
}
-#if defined(PURIFY)
+#else /* TCL_MEM_DEBUG */
+MODULE_SCOPE void TclDbInitNewObj(Tcl_Obj *objPtr, const char *file,
+ int line);
-/*
- * The PURIFY mode is like the regular mode, but instead of doing block
- * Tcl_Obj allocation and keeping a freed list for efficiency, it always
- * allocates and frees a single Tcl_Obj so that tools like Purify can better
- * track memory leaks.
- */
+# define TclDbNewObj(objPtr, file, line) \
+ do { \
+ TclIncrObjsAllocated(); \
+ (objPtr) = (Tcl_Obj *) \
+ Tcl_DbCkalloc(sizeof(Tcl_Obj), (file), (line)); \
+ TclDbInitNewObj((objPtr), (file), (line)); \
+ TCL_DTRACE_OBJ_CREATE(objPtr); \
+ } while (0)
-# define TclAllocObjStorageEx(interp, objPtr) \
- (objPtr) = (Tcl_Obj *) Tcl_Alloc(sizeof(Tcl_Obj))
+# define TclNewObj(objPtr) \
+ TclDbNewObj(objPtr, __FILE__, __LINE__);
-# define TclFreeObjStorageEx(interp, objPtr) \
- ckfree((char *) (objPtr))
+# define TclDecrRefCount(objPtr) \
+ Tcl_DbDecrRefCount(objPtr, __FILE__, __LINE__)
-#undef USE_THREAD_ALLOC
-#elif defined(TCL_THREADS) && defined(USE_THREAD_ALLOC)
+# define TclNewListObjDirect(objc, objv) \
+ TclDbNewListObjDirect(objc, objv, __FILE__, __LINE__)
+
+#endif /* TCL_MEM_DEBUG */
/*
+ * Macros that drive the allocator behaviour
+ */
+
+#if defined(TCL_THREADS)
+/*
* The TCL_THREADS mode is like the regular mode but allocates Tcl_Obj's from
* per-thread caches.
*/
-
-MODULE_SCOPE Tcl_Obj * TclThreadAllocObj(void);
-MODULE_SCOPE void TclThreadFreeObj(Tcl_Obj *);
-MODULE_SCOPE Tcl_Mutex *TclpNewAllocMutex(void);
-MODULE_SCOPE void TclFreeAllocCache(void *);
+MODULE_SCOPE void TclpFreeAllocCache(void *);
MODULE_SCOPE void * TclpGetAllocCache(void);
MODULE_SCOPE void TclpSetAllocCache(void *);
+MODULE_SCOPE void TclFreeAllocCache(void *);
MODULE_SCOPE void TclpFreeAllocMutex(Tcl_Mutex *mutex);
-MODULE_SCOPE void TclpFreeAllocCache(void *);
+MODULE_SCOPE Tcl_Mutex *TclpNewAllocMutex(void);
+#endif
/*
- * These macros need to be kept in sync with the code of TclThreadAllocObj()
- * and TclThreadFreeObj().
- *
- * Note that the optimiser should resolve the case (interp==NULL) at compile
- * time.
+ * List of valid allocators. Have to respect the following convention:
+ * - allocators that shunt TclpAlloc to malloc are below aNONE
+ * - allocators that use zippy are above aNONE
*/
-# define ALLOC_NOBJHIGH 1200
+#define aNATIVE 0
+#define aPURIFY 1
+#define aNONE 2
+#define aZIPPY 3
+#define aMULTI 4
-# define TclAllocObjStorageEx(interp, objPtr) \
- do { \
- AllocCache *cachePtr; \
- if (((interp) == NULL) || \
- ((cachePtr = ((Interp *)(interp))->allocCache), \
- (cachePtr->numObjects == 0))) { \
- (objPtr) = TclThreadAllocObj(); \
- } else { \
- (objPtr) = cachePtr->firstObjPtr; \
- cachePtr->firstObjPtr = (objPtr)->internalRep.otherValuePtr; \
- --cachePtr->numObjects; \
- } \
- } while (0)
-
-# define TclFreeObjStorageEx(interp, objPtr) \
- do { \
- AllocCache *cachePtr; \
- if (((interp) == NULL) || \
- ((cachePtr = ((Interp *)(interp))->allocCache), \
- (cachePtr->numObjects >= ALLOC_NOBJHIGH))) { \
- TclThreadFreeObj(objPtr); \
- } else { \
- (objPtr)->internalRep.otherValuePtr = cachePtr->firstObjPtr; \
- cachePtr->firstObjPtr = objPtr; \
- ++cachePtr->numObjects; \
- } \
- } while (0)
-
-#else /* not PURIFY or USE_THREAD_ALLOC */
+#if defined(TCL_ALLOCATOR) && ((TCL_ALLOCATOR < 0) || (TCL_ALLOCATOR > aMULTI))
+#undef TCL_ALLOCATOR
+#endif
-#ifdef TCL_THREADS
-/* declared in tclObj.c */
-MODULE_SCOPE Tcl_Mutex tclObjMutex;
+#ifdef PURIFY
+# undef TCL_ALLOCATOR
+# define TCL_ALLOCATOR aPURIFY
#endif
-# define TclAllocObjStorageEx(interp, objPtr) \
- do { \
- Tcl_MutexLock(&tclObjMutex); \
- if (tclFreeObjList == NULL) { \
- TclAllocateFreeObjects(); \
- } \
- (objPtr) = tclFreeObjList; \
- tclFreeObjList = (Tcl_Obj *) \
- tclFreeObjList->internalRep.otherValuePtr; \
- Tcl_MutexUnlock(&tclObjMutex); \
- } while (0)
+#if !defined(TCL_ALLOCATOR)
+# if defined(USE_THREAD_ALLOC) || defined(USE_TCLALLOC)
+# define TCL_ALLOCATOR aZIPPY
+# else
+# define TCL_ALLOCATOR aNATIVE
+# endif
+#endif
-# define TclFreeObjStorageEx(interp, objPtr) \
- do { \
- Tcl_MutexLock(&tclObjMutex); \
- (objPtr)->internalRep.otherValuePtr = (void *) tclFreeObjList; \
- tclFreeObjList = (objPtr); \
- Tcl_MutexUnlock(&tclObjMutex); \
- } while (0)
+#if TCL_ALLOCATOR < aNONE /* native or purify */
+# define TclpAlloc(size) ckalloc(size)
+# define TclpRealloc(ptr, size) ckrealloc((ptr),(size))
+# define TclpFree(size) ckfree(size)
+#else
+ MODULE_SCOPE char * TclpAlloc(unsigned int size);
+ MODULE_SCOPE char * TclpRealloc(char * ptr, unsigned int size);
+ MODULE_SCOPE void TclpFree(char * ptr);
#endif
-#else /* TCL_MEM_DEBUG */
-MODULE_SCOPE void TclDbInitNewObj(Tcl_Obj *objPtr, const char *file,
- int line);
+#if TCL_ALLOCATOR == aPURIFY
+# define TclSmallAlloc() ckalloc(sizeof(Tcl_Obj))
+# define TclSmallFree(ptr) ckfree(ptr)
+# define TclInitAlloc()
+# define TclFinalizeAlloc()
+#else
+ MODULE_SCOPE void * TclSmallAlloc();
+ MODULE_SCOPE void TclSmallFree(void *ptr);
+ MODULE_SCOPE void TclInitAlloc(void);
+ MODULE_SCOPE void TclFinalizeAlloc(void);
+#endif
-# define TclDbNewObj(objPtr, file, line) \
- do { \
- TclIncrObjsAllocated(); \
- (objPtr) = (Tcl_Obj *) \
- Tcl_DbCkalloc(sizeof(Tcl_Obj), (file), (line)); \
- TclDbInitNewObj((objPtr), (file), (line)); \
- TCL_DTRACE_OBJ_CREATE(objPtr); \
+#define TclCkSmallAlloc(nbytes, memPtr) \
+ do { \
+ TCL_CT_ASSERT((nbytes)<=sizeof(Tcl_Obj)); \
+ memPtr = TclSmallAlloc(); \
} while (0)
-# define TclNewObj(objPtr) \
- TclDbNewObj(objPtr, __FILE__, __LINE__);
+/*
+ * Support for Clang Static Analyzer <http://clang-analyzer.llvm.org>
+ */
-# define TclDecrRefCount(objPtr) \
- Tcl_DbDecrRefCount(objPtr, __FILE__, __LINE__)
+#if (TCL_ALLOCATOR == aPURIFY) && defined(__clang__)
+#if __has_feature(attribute_analyzer_noreturn) && \
+ !defined(Tcl_Panic) && defined(Tcl_Panic_TCL_DECLARED)
+void Tcl_Panic(const char *, ...) __attribute__((analyzer_noreturn));
+#endif
+#if !defined(CLANG_ASSERT)
+#include <assert.h>
+#define CLANG_ASSERT(x) assert(x)
+#endif
+#elif !defined(CLANG_ASSERT)
+ #define CLANG_ASSERT(x)
+#endif /* PURIFY && __clang__ */
-# define TclNewListObjDirect(objc, objv) \
- TclDbNewListObjDirect(objc, objv, __FILE__, __LINE__)
-#undef USE_THREAD_ALLOC
-#endif /* TCL_MEM_DEBUG */
/*
*----------------------------------------------------------------
@@ -4471,73 +4409,11 @@ MODULE_SCOPE Tcl_PackageInitProc Procbodytest_SafeInit;
{enum { ct_assert_value = 1/(!!(e)) };}
/*
- *----------------------------------------------------------------
- * Allocator for small structs (<=sizeof(Tcl_Obj)) using the Tcl_Obj pool.
- * Only checked at compile time.
- *
- * ONLY USE FOR CONSTANT nBytes.
- *
- * DO NOT LET THEM CROSS THREAD BOUNDARIES
- *----------------------------------------------------------------
- */
-
-#define TclSmallAlloc(nbytes, memPtr) \
- TclSmallAllocEx(NULL, (nbytes), (memPtr))
-
-#define TclSmallFree(memPtr) \
- TclSmallFreeEx(NULL, (memPtr))
-
-#ifndef TCL_MEM_DEBUG
-#define TclSmallAllocEx(interp, nbytes, memPtr) \
- do { \
- Tcl_Obj *objPtr; \
- TCL_CT_ASSERT((nbytes)<=sizeof(Tcl_Obj)); \
- TclIncrObjsAllocated(); \
- TclAllocObjStorageEx((interp), (objPtr)); \
- memPtr = (ClientData) (objPtr); \
- } while (0)
-
-#define TclSmallFreeEx(interp, memPtr) \
- do { \
- TclFreeObjStorageEx((interp), (Tcl_Obj *) (memPtr)); \
- TclIncrObjsFreed(); \
- } while (0)
-
-#else /* TCL_MEM_DEBUG */
-#define TclSmallAllocEx(interp, nbytes, memPtr) \
- do { \
- Tcl_Obj *objPtr; \
- TCL_CT_ASSERT((nbytes)<=sizeof(Tcl_Obj)); \
- TclNewObj(objPtr); \
- memPtr = (ClientData) objPtr; \
- } while (0)
-
-#define TclSmallFreeEx(interp, memPtr) \
- do { \
- Tcl_Obj *objPtr = (Tcl_Obj *) memPtr; \
- objPtr->bytes = NULL; \
- objPtr->typePtr = NULL; \
- objPtr->refCount = 1; \
- TclDecrRefCount(objPtr); \
- } while (0)
-#endif /* TCL_MEM_DEBUG */
-
-/*
* Support for Clang Static Analyzer <http://clang-analyzer.llvm.org>
*/
-#if defined(PURIFY) && defined(__clang__)
-#if __has_feature(attribute_analyzer_noreturn) && \
- !defined(Tcl_Panic) && defined(Tcl_Panic_TCL_DECLARED)
-void Tcl_Panic(const char *, ...) __attribute__((analyzer_noreturn));
-#endif
-#if !defined(CLANG_ASSERT)
-#include <assert.h>
-#define CLANG_ASSERT(x) assert(x)
-#endif
-#elif !defined(CLANG_ASSERT)
#define CLANG_ASSERT(x)
-#endif /* PURIFY && __clang__ */
+
/*
*----------------------------------------------------------------
@@ -4610,8 +4486,8 @@ typedef struct NRE_callback {
#if NRE_USE_SMALL_ALLOC
#define TCLNR_ALLOC(interp, ptr) \
- TclSmallAllocEx(interp, sizeof(NRE_callback), (ptr))
-#define TCLNR_FREE(interp, ptr) TclSmallFreeEx((interp), (ptr))
+ TclCkSmallAlloc(sizeof(NRE_callback), (ptr))
+#define TCLNR_FREE(interp, ptr) TclSmallFree(ptr)
#else
#define TCLNR_ALLOC(interp, ptr) \
(ptr = ((ClientData) ckalloc(sizeof(NRE_callback))))
diff --git a/generic/tclIntDecls.h b/generic/tclIntDecls.h
index b294e4f..0966d32 100644
--- a/generic/tclIntDecls.h
+++ b/generic/tclIntDecls.h
@@ -58,8 +58,7 @@
/* Slot 0 is reserved */
/* Slot 1 is reserved */
/* Slot 2 is reserved */
-/* 3 */
-EXTERN void TclAllocateFreeObjects(void);
+/* Slot 3 is reserved */
/* Slot 4 is reserved */
/* 5 */
EXTERN int TclCleanupChildren(Tcl_Interp *interp, int numPids,
@@ -506,10 +505,8 @@ EXTERN Tcl_Obj * TclGetObjNameOfExecutable(void);
/* 214 */
EXTERN void TclSetObjNameOfExecutable(Tcl_Obj *name,
Tcl_Encoding encoding);
-/* 215 */
-EXTERN void * TclStackAlloc(Tcl_Interp *interp, int numBytes);
-/* 216 */
-EXTERN void TclStackFree(Tcl_Interp *interp, void *freePtr);
+/* Slot 215 is reserved */
+/* Slot 216 is reserved */
/* 217 */
EXTERN int TclPushStackFrame(Tcl_Interp *interp,
Tcl_CallFrame **framePtrPtr,
@@ -609,7 +606,7 @@ typedef struct TclIntStubs {
void (*reserved0)(void);
void (*reserved1)(void);
void (*reserved2)(void);
- void (*tclAllocateFreeObjects) (void); /* 3 */
+ void (*reserved3)(void);
void (*reserved4)(void);
int (*tclCleanupChildren) (Tcl_Interp *interp, int numPids, Tcl_Pid *pidPtr, Tcl_Channel errorChan); /* 5 */
void (*tclCleanupCommand) (Command *cmdPtr); /* 6 */
@@ -821,8 +818,8 @@ typedef struct TclIntStubs {
void (*tclpFindExecutable) (const char *argv0); /* 212 */
Tcl_Obj * (*tclGetObjNameOfExecutable) (void); /* 213 */
void (*tclSetObjNameOfExecutable) (Tcl_Obj *name, Tcl_Encoding encoding); /* 214 */
- void * (*tclStackAlloc) (Tcl_Interp *interp, int numBytes); /* 215 */
- void (*tclStackFree) (Tcl_Interp *interp, void *freePtr); /* 216 */
+ void (*reserved215)(void);
+ void (*reserved216)(void);
int (*tclPushStackFrame) (Tcl_Interp *interp, Tcl_CallFrame **framePtrPtr, Tcl_Namespace *namespacePtr, int isProcCallFrame); /* 217 */
void (*tclPopStackFrame) (Tcl_Interp *interp); /* 218 */
void (*reserved219)(void);
@@ -876,8 +873,7 @@ extern const TclIntStubs *tclIntStubsPtr;
/* Slot 0 is reserved */
/* Slot 1 is reserved */
/* Slot 2 is reserved */
-#define TclAllocateFreeObjects \
- (tclIntStubsPtr->tclAllocateFreeObjects) /* 3 */
+/* Slot 3 is reserved */
/* Slot 4 is reserved */
#define TclCleanupChildren \
(tclIntStubsPtr->tclCleanupChildren) /* 5 */
@@ -1216,10 +1212,8 @@ extern const TclIntStubs *tclIntStubsPtr;
(tclIntStubsPtr->tclGetObjNameOfExecutable) /* 213 */
#define TclSetObjNameOfExecutable \
(tclIntStubsPtr->tclSetObjNameOfExecutable) /* 214 */
-#define TclStackAlloc \
- (tclIntStubsPtr->tclStackAlloc) /* 215 */
-#define TclStackFree \
- (tclIntStubsPtr->tclStackFree) /* 216 */
+/* Slot 215 is reserved */
+/* Slot 216 is reserved */
#define TclPushStackFrame \
(tclIntStubsPtr->tclPushStackFrame) /* 217 */
#define TclPopStackFrame \
diff --git a/generic/tclInterp.c b/generic/tclInterp.c
index 67761ed..46a5f42 100644
--- a/generic/tclInterp.c
+++ b/generic/tclInterp.c
@@ -1169,7 +1169,7 @@ Tcl_CreateAlias(
int i;
int result;
- objv = TclStackAlloc(slaveInterp, (unsigned) sizeof(Tcl_Obj *) * argc);
+ objv = ckalloc((unsigned) sizeof(Tcl_Obj *) * argc);
for (i = 0; i < argc; i++) {
objv[i] = Tcl_NewStringObj(argv[i], -1);
Tcl_IncrRefCount(objv[i]);
@@ -1187,7 +1187,7 @@ Tcl_CreateAlias(
for (i = 0; i < argc; i++) {
Tcl_DecrRefCount(objv[i]);
}
- TclStackFree(slaveInterp, objv);
+ ckfree(objv);
Tcl_DecrRefCount(targetObjPtr);
Tcl_DecrRefCount(slaveObjPtr);
@@ -1863,7 +1863,7 @@ AliasObjCmd(
if (cmdc <= ALIAS_CMDV_PREALLOC) {
cmdv = cmdArr;
} else {
- cmdv = TclStackAlloc(interp, cmdc * sizeof(Tcl_Obj *));
+ cmdv = ckalloc(cmdc * sizeof(Tcl_Obj *));
}
prefv = &aliasPtr->objPtr;
@@ -1930,7 +1930,7 @@ AliasObjCmd(
Tcl_DecrRefCount(cmdv[i]);
}
if (cmdv != cmdArr) {
- TclStackFree(interp, cmdv);
+ ckfree(cmdv);
}
return result;
#undef ALIAS_CMDV_PREALLOC
diff --git a/generic/tclNamesp.c b/generic/tclNamesp.c
index ad233b9..08a9443 100644
--- a/generic/tclNamesp.c
+++ b/generic/tclNamesp.c
@@ -465,7 +465,7 @@ TclPushStackFrame(
* treated as references to namespace
* variables. */
{
- *framePtrPtr = TclStackAlloc(interp, sizeof(CallFrame));
+ *framePtrPtr = ckalloc(sizeof(CallFrame));
return Tcl_PushCallFrame(interp, *framePtrPtr, namespacePtr,
isProcCallFrame);
}
@@ -477,7 +477,7 @@ TclPopStackFrame(
CallFrame *freePtr = ((Interp *) interp)->framePtr;
Tcl_PopCallFrame(interp);
- TclStackFree(interp, freePtr);
+ ckfree(freePtr);
}
/*
@@ -2632,8 +2632,7 @@ TclResetShadowedCmdRefs(
int found, i;
int trailFront = -1;
int trailSize = 5; /* Formerly NUM_TRAIL_ELEMS. */
- Namespace **trailPtr = TclStackAlloc(interp,
- trailSize * sizeof(Namespace *));
+ Namespace **trailPtr = ckalloc(trailSize * sizeof(Namespace *));
/*
* Start at the namespace containing the new command, and work up through
@@ -2722,13 +2721,12 @@ TclResetShadowedCmdRefs(
if (trailFront == trailSize) {
int newSize = 2 * trailSize;
- trailPtr = TclStackRealloc(interp, trailPtr,
- newSize * sizeof(Namespace *));
+ trailPtr = ckrealloc(trailPtr, newSize * sizeof(Namespace *));
trailSize = newSize;
}
trailPtr[trailFront] = nsPtr;
}
- TclStackFree(interp, trailPtr);
+ ckfree(trailPtr);
}
/*
@@ -3970,8 +3968,7 @@ NamespacePathCmd(
goto badNamespace;
}
if (nsObjc != 0) {
- namespaceList = TclStackAlloc(interp,
- sizeof(Tcl_Namespace *) * nsObjc);
+ namespaceList = ckalloc(sizeof(Tcl_Namespace *) * nsObjc);
for (i=0 ; i<nsObjc ; i++) {
if (TclGetNamespaceFromObj(interp, nsObjv[i],
@@ -3990,7 +3987,7 @@ NamespacePathCmd(
result = TCL_OK;
badNamespace:
if (namespaceList != NULL) {
- TclStackFree(interp, namespaceList);
+ ckfree(namespaceList);
}
return result;
}
diff --git a/generic/tclOOCall.c b/generic/tclOOCall.c
index 1e8d1a3..8814819 100644
--- a/generic/tclOOCall.c
+++ b/generic/tclOOCall.c
@@ -104,7 +104,7 @@ TclOODeleteContext(
register Object *oPtr = contextPtr->oPtr;
TclOODeleteChain(contextPtr->callPtr);
- TclStackFree(oPtr->fPtr->interp, contextPtr);
+ ckfree(contextPtr);
DelRef(oPtr);
}
@@ -1087,7 +1087,7 @@ TclOOGetCallContext(
}
returnContext:
- contextPtr = TclStackAlloc(oPtr->fPtr->interp, sizeof(CallContext));
+ contextPtr = ckalloc(sizeof(CallContext));
contextPtr->oPtr = oPtr;
AddRef(oPtr);
contextPtr->callPtr = callPtr;
diff --git a/generic/tclOODefineCmds.c b/generic/tclOODefineCmds.c
index 8d8eb85..cc3a0ad 100644
--- a/generic/tclOODefineCmds.c
+++ b/generic/tclOODefineCmds.c
@@ -455,7 +455,7 @@ TclOOUnknownDefinition(
* Got one match, and only one match!
*/
- Tcl_Obj **newObjv = TclStackAlloc(interp, sizeof(Tcl_Obj*)*(objc-1));
+ Tcl_Obj **newObjv = ckalloc(sizeof(Tcl_Obj*)*(objc-1));
int result;
newObjv[0] = Tcl_NewStringObj(matchedStr, -1);
@@ -465,7 +465,7 @@ TclOOUnknownDefinition(
}
result = Tcl_EvalObjv(interp, objc-1, newObjv, 0);
Tcl_DecrRefCount(newObjv[0]);
- TclStackFree(interp, newObjv);
+ ckfree(newObjv);
return result;
}
@@ -1546,7 +1546,7 @@ TclOODefineMixinObjCmd(
Tcl_AppendResult(interp, "attempt to misuse API", NULL);
return TCL_ERROR;
}
- mixins = TclStackAlloc(interp, sizeof(Class *) * (objc-1));
+ mixins = ckalloc(sizeof(Class *) * (objc-1));
for (i=1 ; i<objc ; i++) {
Class *clsPtr = GetClassInOuterContext(interp, objv[i],
@@ -1568,11 +1568,11 @@ TclOODefineMixinObjCmd(
TclOOClassSetMixins(interp, oPtr->classPtr, objc-1, mixins);
}
- TclStackFree(interp, mixins);
+ ckfree(mixins);
return TCL_OK;
freeAndError:
- TclStackFree(interp, mixins);
+ ckfree(mixins);
return TCL_ERROR;
}
diff --git a/generic/tclOOMethod.c b/generic/tclOOMethod.c
index 112d663..0996eab 100644
--- a/generic/tclOOMethod.c
+++ b/generic/tclOOMethod.c
@@ -686,7 +686,7 @@ InvokeProcedureMethod(
* Allocate the special frame data.
*/
- fdPtr = TclStackAlloc(interp, sizeof(PMFrameData));
+ fdPtr = ckalloc(sizeof(PMFrameData));
/*
* Create a call frame for this method.
@@ -695,7 +695,7 @@ InvokeProcedureMethod(
result = PushMethodCallFrame(interp, (CallContext *) context, pmPtr,
objc, objv, fdPtr);
if (result != TCL_OK) {
- TclStackFree(interp, fdPtr);
+ ckfree(fdPtr);
return result;
}
pmPtr->refCount++;
@@ -719,11 +719,11 @@ InvokeProcedureMethod(
pmPtr->procPtr->cmdPtr = fdPtr->oldCmdPtr;
Tcl_PopCallFrame(interp);
- TclStackFree(interp, fdPtr->framePtr);
+ ckfree(fdPtr->framePtr);
if (--pmPtr->refCount < 1) {
DeleteProcedureMethodRecord(pmPtr);
}
- TclStackFree(interp, fdPtr);
+ ckfree(fdPtr);
return result;
}
}
@@ -774,7 +774,7 @@ FinalizePMCall(
if (--pmPtr->refCount < 1) {
DeleteProcedureMethodRecord(pmPtr);
}
- TclStackFree(interp, fdPtr);
+ ckfree(fdPtr);
return result;
}
@@ -1447,7 +1447,7 @@ FinalizeForwardCall(
{
Tcl_Obj **argObjs = data[0];
- TclStackFree(interp, argObjs);
+ ckfree(argObjs);
return result;
}
@@ -1576,7 +1576,7 @@ InitEnsembleRewrite(
Tcl_Obj **argObjs;
unsigned len = rewriteLength + objc - toRewrite;
- argObjs = TclStackAlloc(interp, sizeof(Tcl_Obj *) * len);
+ argObjs = ckalloc(sizeof(Tcl_Obj *) * len);
memcpy(argObjs, rewriteObjs, rewriteLength * sizeof(Tcl_Obj *));
memcpy(argObjs + rewriteLength, objv + toRewrite,
sizeof(Tcl_Obj *) * (objc - toRewrite));
diff --git a/generic/tclObj.c b/generic/tclObj.c
index 3bc6f12..5056c1c 100644
--- a/generic/tclObj.c
+++ b/generic/tclObj.c
@@ -27,12 +27,6 @@ static int typeTableInitialized = 0; /* 0 means not yet initialized. */
TCL_DECLARE_MUTEX(tableMutex)
/*
- * Head of the list of free Tcl_Obj structs we maintain.
- */
-
-Tcl_Obj *tclFreeObjList = NULL;
-
-/*
* The object allocator is single threaded. This mutex is referenced by the
* TclNewObj macro, however, so must be visible.
*/
@@ -475,7 +469,7 @@ TclFinalizeThreadObjects(void)
* TclFinalizeObjects --
*
* This function is called by Tcl_Finalize to clean up all registered
- * Tcl_ObjType's and to reset the tclFreeObjList.
+ * Tcl_ObjType's
*
* Results:
* None.
@@ -495,15 +489,6 @@ TclFinalizeObjects(void)
typeTableInitialized = 0;
}
Tcl_MutexUnlock(&tableMutex);
-
- /*
- * All we do here is reset the head pointer of the linked list of free
- * Tcl_Obj's to NULL; the memory finalization will take care of releasing
- * memory for us.
- */
- Tcl_MutexLock(&tclObjMutex);
- tclFreeObjList = NULL;
- Tcl_MutexUnlock(&tclObjMutex);
}
/*
@@ -1238,59 +1223,6 @@ Tcl_DbNewObj(
/*
*----------------------------------------------------------------------
*
- * TclAllocateFreeObjects --
- *
- * Function to allocate a number of free Tcl_Objs. This is done using a
- * single ckalloc to reduce the overhead for Tcl_Obj allocation.
- *
- * Assumes mutex is held.
- *
- * Results:
- * None.
- *
- * Side effects:
- * tclFreeObjList, the head of the list of free Tcl_Objs, is set to the
- * first of a number of free Tcl_Obj's linked together by their
- * internalRep.otherValuePtrs.
- *
- *----------------------------------------------------------------------
- */
-
-#define OBJS_TO_ALLOC_EACH_TIME 100
-
-void
-TclAllocateFreeObjects(void)
-{
- size_t bytesToAlloc = (OBJS_TO_ALLOC_EACH_TIME * sizeof(Tcl_Obj));
- char *basePtr;
- register Tcl_Obj *prevPtr, *objPtr;
- register int i;
-
- /*
- * This has been noted by Purify to be a potential leak. The problem is
- * that Tcl, when not TCL_MEM_DEBUG compiled, keeps around all allocated
- * Tcl_Obj's, pointed to by tclFreeObjList, when freed instead of actually
- * freeing the memory. TclFinalizeObjects() does not ckfree() this memory,
- * but leaves it to Tcl's memory subsystem finalization to release it.
- * Purify apparently can't figure that out, and fires a false alarm.
- */
-
- basePtr = ckalloc(bytesToAlloc);
-
- prevPtr = NULL;
- objPtr = (Tcl_Obj *) basePtr;
- for (i = 0; i < OBJS_TO_ALLOC_EACH_TIME; i++) {
- objPtr->internalRep.otherValuePtr = prevPtr;
- prevPtr = objPtr;
- objPtr++;
- }
- tclFreeObjList = prevPtr;
-}
-#undef OBJS_TO_ALLOC_EACH_TIME
-
-/*
- *----------------------------------------------------------------------
- *
* TclFreeObj --
*
* This function frees the memory associated with the argument object.
@@ -1404,7 +1336,6 @@ TclFreeObj(
*/
TclInvalidateStringRep(objPtr);
- objPtr->length = -1;
if (!objPtr->typePtr || !objPtr->typePtr->freeIntRepProc) {
/*
diff --git a/generic/tclParse.c b/generic/tclParse.c
index 9bfe608..afd4c0b 100644
--- a/generic/tclParse.c
+++ b/generic/tclParse.c
@@ -1129,14 +1129,14 @@ ParseTokens(
src++;
numBytes--;
- nestedPtr = TclStackAlloc(parsePtr->interp, sizeof(Tcl_Parse));
+ nestedPtr = ckalloc(sizeof(Tcl_Parse));
while (1) {
if (Tcl_ParseCommand(parsePtr->interp, src, numBytes, 1,
nestedPtr) != TCL_OK) {
parsePtr->errorType = nestedPtr->errorType;
parsePtr->term = nestedPtr->term;
parsePtr->incomplete = nestedPtr->incomplete;
- TclStackFree(parsePtr->interp, nestedPtr);
+ ckfree(nestedPtr);
return TCL_ERROR;
}
src = nestedPtr->commandStart + nestedPtr->commandSize;
@@ -1162,11 +1162,11 @@ ParseTokens(
parsePtr->errorType = TCL_PARSE_MISSING_BRACKET;
parsePtr->term = tokenPtr->start;
parsePtr->incomplete = 1;
- TclStackFree(parsePtr->interp, nestedPtr);
+ ckfree(nestedPtr);
return TCL_ERROR;
}
}
- TclStackFree(parsePtr->interp, nestedPtr);
+ ckfree(nestedPtr);
tokenPtr->type = TCL_TOKEN_COMMAND;
tokenPtr->size = src - tokenPtr->start;
parsePtr->numTokens++;
@@ -1526,10 +1526,10 @@ Tcl_ParseVar(
{
register Tcl_Obj *objPtr;
int code;
- Tcl_Parse *parsePtr = TclStackAlloc(interp, sizeof(Tcl_Parse));
+ Tcl_Parse *parsePtr = ckalloc(sizeof(Tcl_Parse));
if (Tcl_ParseVarName(interp, start, -1, parsePtr, 0) != TCL_OK) {
- TclStackFree(interp, parsePtr);
+ ckfree(parsePtr);
return NULL;
}
@@ -1541,13 +1541,13 @@ Tcl_ParseVar(
* There isn't a variable name after all: the $ is just a $.
*/
- TclStackFree(interp, parsePtr);
+ ckfree(parsePtr);
return "$";
}
code = TclSubstTokens(interp, parsePtr->tokenPtr, parsePtr->numTokens,
NULL, 1, NULL, NULL);
- TclStackFree(interp, parsePtr);
+ ckfree(parsePtr);
if (code != TCL_OK) {
return NULL;
}
@@ -2008,7 +2008,7 @@ TclSubstParse(
Tcl_Token *tokenPtr;
const char *lastTerm = parsePtr->term;
Tcl_Parse *nestedPtr =
- TclStackAlloc(interp, sizeof(Tcl_Parse));
+ ckalloc(sizeof(Tcl_Parse));
while (TCL_OK ==
Tcl_ParseCommand(NULL, p, length, 0, nestedPtr)) {
@@ -2026,7 +2026,7 @@ TclSubstParse(
}
lastTerm = nestedPtr->term;
}
- TclStackFree(interp, nestedPtr);
+ ckfree(nestedPtr);
if (lastTerm == parsePtr->term) {
/*
diff --git a/generic/tclProc.c b/generic/tclProc.c
index 6cd5bb2..63dd61d 100644
--- a/generic/tclProc.c
+++ b/generic/tclProc.c
@@ -222,7 +222,7 @@ Tcl_ProcObjCmd(
*/
if (iPtr->cmdFramePtr) {
- CmdFrame *contextPtr = TclStackAlloc(interp, sizeof(CmdFrame));
+ CmdFrame *contextPtr = ckalloc(sizeof(CmdFrame));
*contextPtr = *iPtr->cmdFramePtr;
if (contextPtr->type == TCL_LOCATION_BC) {
@@ -300,7 +300,7 @@ Tcl_ProcObjCmd(
Tcl_DecrRefCount(contextPtr->data.eval.path);
contextPtr->data.eval.path = NULL;
}
- TclStackFree(interp, contextPtr);
+ ckfree(contextPtr);
}
/*
@@ -1096,8 +1096,7 @@ ProcWrongNumArgs(
*/
numArgs = framePtr->procPtr->numArgs;
- desiredObjs = TclStackAlloc(interp,
- (int) sizeof(Tcl_Obj *) * (numArgs+1));
+ desiredObjs = ckalloc((int) sizeof(Tcl_Obj *) * (numArgs+1));
if (framePtr->isProcCallFrame & FRAME_IS_LAMBDA) {
desiredObjs[0] = Tcl_NewStringObj("lambdaExpr", -1);
@@ -1135,7 +1134,7 @@ ProcWrongNumArgs(
for (i=0 ; i<=numArgs ; i++) {
Tcl_DecrRefCount(desiredObjs[i]);
}
- TclStackFree(interp, desiredObjs);
+ ckfree(desiredObjs);
return TCL_ERROR;
}
@@ -1449,7 +1448,7 @@ InitArgsAndLocals(
* parameters.
*/
- varPtr = TclStackAlloc(interp, (int)(localCt * sizeof(Var)));
+ varPtr = ckalloc((int)(localCt * sizeof(Var)));
framePtr->compiledLocals = varPtr;
framePtr->numCompiledLocals = localCt;
@@ -1740,9 +1739,9 @@ TclNRInterpProcCore(
if (result != TCL_OK) {
freePtr = iPtr->framePtr;
Tcl_PopCallFrame(interp); /* Pop but do not free. */
- TclStackFree(interp, freePtr->compiledLocals);
+ ckfree(freePtr->compiledLocals);
/* Free compiledLocals. */
- TclStackFree(interp, freePtr); /* Free CallFrame. */
+ ckfree(freePtr); /* Free CallFrame. */
return TCL_ERROR;
}
@@ -1912,9 +1911,9 @@ InterpProcNR2(
freePtr = iPtr->framePtr;
Tcl_PopCallFrame(interp); /* Pop but do not free. */
- TclStackFree(interp, freePtr->compiledLocals);
+ ckfree(freePtr->compiledLocals);
/* Free compiledLocals. */
- TclStackFree(interp, freePtr); /* Free CallFrame. */
+ ckfree(freePtr); /* Free CallFrame. */
return result;
}
@@ -2516,7 +2515,7 @@ SetLambdaFromAny(
*/
if (iPtr->cmdFramePtr) {
- CmdFrame *contextPtr = TclStackAlloc(interp, sizeof(CmdFrame));
+ CmdFrame *contextPtr = ckalloc(sizeof(CmdFrame));
*contextPtr = *iPtr->cmdFramePtr;
if (contextPtr->type == TCL_LOCATION_BC) {
@@ -2580,7 +2579,7 @@ SetLambdaFromAny(
Tcl_DecrRefCount(contextPtr->data.eval.path);
}
- TclStackFree(interp, contextPtr);
+ ckfree(contextPtr);
}
/*
@@ -2717,7 +2716,7 @@ TclNRApplyObjCmd(
return TCL_ERROR;
}
- extraPtr = TclStackAlloc(interp, sizeof(ApplyExtraData));
+ extraPtr = ckalloc(sizeof(ApplyExtraData));
memset(&extraPtr->cmd, 0, sizeof(Command));
procPtr->cmdPtr = &extraPtr->cmd;
extraPtr->cmd.nsPtr = (Namespace *) nsPtr;
@@ -2768,7 +2767,7 @@ ApplyNR2(
((Interp *) interp)->ensembleRewrite.sourceObjs = NULL;
}
- TclStackFree(interp, extraPtr);
+ ckfree(extraPtr);
return result;
}
diff --git a/generic/tclScan.c b/generic/tclScan.c
index c862be4..45f970d 100644
--- a/generic/tclScan.c
+++ b/generic/tclScan.c
@@ -259,7 +259,7 @@ ValidateFormat(
char *end;
Tcl_UniChar ch;
int objIndex, xpgSize, nspace = numVars;
- int *nassign = TclStackAlloc(interp, nspace * sizeof(int));
+ int *nassign = ckalloc(nspace * sizeof(int));
char buf[TCL_UTF_MAX+1];
/*
@@ -465,8 +465,7 @@ ValidateFormat(
} else {
nspace += 16; /* formerly STATIC_LIST_SIZE */
}
- nassign = TclStackRealloc(interp, nassign,
- nspace * sizeof(int));
+ nassign = ckrealloc(nassign, nspace * sizeof(int));
for (i = value; i < nspace; i++) {
nassign[i] = 0;
}
@@ -509,7 +508,7 @@ ValidateFormat(
}
}
- TclStackFree(interp, nassign);
+ ckfree(nassign);
return TCL_OK;
badIndex:
@@ -523,7 +522,7 @@ ValidateFormat(
}
error:
- TclStackFree(interp, nassign);
+ ckfree(nassign);
return TCL_ERROR;
}
diff --git a/generic/tclStubInit.c b/generic/tclStubInit.c
index eb9a9be..84c1ea9 100644
--- a/generic/tclStubInit.c
+++ b/generic/tclStubInit.c
@@ -57,7 +57,7 @@ static const TclIntStubs tclIntStubs = {
0, /* 0 */
0, /* 1 */
0, /* 2 */
- TclAllocateFreeObjects, /* 3 */
+ 0, /* 3 */
0, /* 4 */
TclCleanupChildren, /* 5 */
TclCleanupCommand, /* 6 */
@@ -269,8 +269,8 @@ static const TclIntStubs tclIntStubs = {
TclpFindExecutable, /* 212 */
TclGetObjNameOfExecutable, /* 213 */
TclSetObjNameOfExecutable, /* 214 */
- TclStackAlloc, /* 215 */
- TclStackFree, /* 216 */
+ 0, /* 215 */
+ 0, /* 216 */
TclPushStackFrame, /* 217 */
TclPopStackFrame, /* 218 */
0, /* 219 */
diff --git a/generic/tclTest.c b/generic/tclTest.c
index b757185..2878c8d 100644
--- a/generic/tclTest.c
+++ b/generic/tclTest.c
@@ -6720,7 +6720,7 @@ TestNRELevels(
Interp *iPtr = (Interp *) interp;
static ptrdiff_t *refDepth = NULL;
ptrdiff_t depth;
- Tcl_Obj *levels[6];
+ Tcl_Obj *levels[5];
int i = 0;
NRE_callback *cbPtr = iPtr->execEnvPtr->callbackPtr;
@@ -6734,16 +6734,14 @@ TestNRELevels(
levels[1] = Tcl_NewIntObj(iPtr->numLevels);
levels[2] = Tcl_NewIntObj(iPtr->cmdFramePtr->level);
levels[3] = Tcl_NewIntObj(iPtr->varFramePtr->level);
- levels[4] = Tcl_NewIntObj(iPtr->execEnvPtr->execStackPtr->tosPtr
- - iPtr->execEnvPtr->execStackPtr->stackWords);
while (cbPtr) {
i++;
cbPtr = cbPtr->nextPtr;
}
- levels[5] = Tcl_NewIntObj(i);
+ levels[4] = Tcl_NewIntObj(i);
- Tcl_SetObjResult(interp, Tcl_NewListObj(6, levels));
+ Tcl_SetObjResult(interp, Tcl_NewListObj(5, levels));
return TCL_OK;
}
diff --git a/generic/tclThreadAlloc.c b/generic/tclThreadAlloc.c
deleted file mode 100755
index c3acb2a..0000000
--- a/generic/tclThreadAlloc.c
+++ /dev/null
@@ -1,1081 +0,0 @@
-/*
- * tclThreadAlloc.c --
- *
- * This is a very fast storage allocator for used with threads (designed
- * avoid lock contention). The basic strategy is to allocate memory in
- * fixed size blocks from block caches.
- *
- * The Initial Developer of the Original Code is America Online, Inc.
- * Portions created by AOL are Copyright (C) 1999 America Online, Inc.
- *
- * See the file "license.terms" for information on usage and redistribution of
- * this file, and for a DISCLAIMER OF ALL WARRANTIES.
- */
-
-#include "tclInt.h"
-#if defined(TCL_THREADS) && defined(USE_THREAD_ALLOC)
-
-/*
- * If range checking is enabled, an additional byte will be allocated to store
- * the magic number at the end of the requested memory.
- */
-
-#ifndef RCHECK
-#ifdef NDEBUG
-#define RCHECK 0
-#else
-#define RCHECK 1
-#endif
-#endif
-
-/*
- * The following define the number of Tcl_Obj's to allocate/move at a time and
- * the high water mark to prune a per-thread cache. On a 32 bit system,
- * sizeof(Tcl_Obj) = 24 so 800 * 24 = ~16k.
- */
-
-#define NOBJALLOC 800
-
-/* Actual definition moved to tclInt.h */
-#define NOBJHIGH ALLOC_NOBJHIGH
-
-/*
- * The following union stores accounting information for each block including
- * two small magic numbers and a bucket number when in use or a next pointer
- * when free. The original requested size (not including the Block overhead)
- * is also maintained.
- */
-
-typedef union Block {
- struct {
- union {
- union Block *next; /* Next in free list. */
- struct {
- unsigned char magic1; /* First magic number. */
- unsigned char bucket; /* Bucket block allocated from. */
- unsigned char unused; /* Padding. */
- unsigned char magic2; /* Second magic number. */
- } s;
- } u;
- size_t reqSize; /* Requested allocation size. */
- } b;
- unsigned char padding[TCL_ALLOCALIGN];
-} Block;
-#define nextBlock b.u.next
-#define sourceBucket b.u.s.bucket
-#define magicNum1 b.u.s.magic1
-#define magicNum2 b.u.s.magic2
-#define MAGIC 0xEF
-#define blockReqSize b.reqSize
-
-/*
- * The following defines the minimum and and maximum block sizes and the number
- * of buckets in the bucket cache.
- */
-
-#define MINALLOC ((sizeof(Block) + 8 + (TCL_ALLOCALIGN-1)) & ~(TCL_ALLOCALIGN-1))
-#define NBUCKETS (11 - (MINALLOC >> 5))
-#define MAXALLOC (MINALLOC << (NBUCKETS - 1))
-
-/*
- * The following structure defines a bucket of blocks with various accounting
- * and statistics information.
- */
-
-typedef struct Bucket {
- Block *firstPtr; /* First block available */
- long numFree; /* Number of blocks available */
-
- /* All fields below for accounting only */
-
- long numRemoves; /* Number of removes from bucket */
- long numInserts; /* Number of inserts into bucket */
- long numWaits; /* Number of waits to acquire a lock */
- long numLocks; /* Number of locks acquired */
- long totalAssigned; /* Total space assigned to bucket */
-} Bucket;
-
-/*
- * The following structure defines a cache of buckets and objs, of which there
- * will be (at most) one per thread. Any changes need to be reflected in the
- * struct AllocCache defined in tclInt.h, possibly also in the initialisation
- * code in Tcl_CreateInterp().
- */
-
-typedef struct Cache {
- struct Cache *nextPtr; /* Linked list of cache entries */
- Tcl_ThreadId owner; /* Which thread's cache is this? */
- Tcl_Obj *firstObjPtr; /* List of free objects for thread */
- int numObjects; /* Number of objects for thread */
- int totalAssigned; /* Total space assigned to thread */
- Bucket buckets[NBUCKETS]; /* The buckets for this thread */
-} Cache;
-
-/*
- * The following array specifies various per-bucket limits and locks. The
- * values are statically initialized to avoid calculating them repeatedly.
- */
-
-static struct {
- size_t blockSize; /* Bucket blocksize. */
- int maxBlocks; /* Max blocks before move to share. */
- int numMove; /* Num blocks to move to share. */
- Tcl_Mutex *lockPtr; /* Share bucket lock. */
-} bucketInfo[NBUCKETS];
-
-/*
- * Static functions defined in this file.
- */
-
-static Cache * GetCache(void);
-static void LockBucket(Cache *cachePtr, int bucket);
-static void UnlockBucket(Cache *cachePtr, int bucket);
-static void PutBlocks(Cache *cachePtr, int bucket, int numMove);
-static int GetBlocks(Cache *cachePtr, int bucket);
-static Block * Ptr2Block(char *ptr);
-static char * Block2Ptr(Block *blockPtr, int bucket, unsigned int reqSize);
-static void MoveObjs(Cache *fromPtr, Cache *toPtr, int numMove);
-
-/*
- * Local variables defined in this file and initialized at startup.
- */
-
-static Tcl_Mutex *listLockPtr;
-static Tcl_Mutex *objLockPtr;
-static Cache sharedCache;
-static Cache *sharedPtr = &sharedCache;
-static Cache *firstCachePtr = &sharedCache;
-
-/*
- *----------------------------------------------------------------------
- *
- * GetCache ---
- *
- * Gets per-thread memory cache, allocating it if necessary.
- *
- * Results:
- * Pointer to cache.
- *
- * Side effects:
- * None.
- *
- *----------------------------------------------------------------------
- */
-
-static Cache *
-GetCache(void)
-{
- Cache *cachePtr;
-
- /*
- * Check for first-time initialization.
- */
-
- if (listLockPtr == NULL) {
- Tcl_Mutex *initLockPtr;
- unsigned int i;
-
- initLockPtr = Tcl_GetAllocMutex();
- Tcl_MutexLock(initLockPtr);
- if (listLockPtr == NULL) {
- listLockPtr = TclpNewAllocMutex();
- objLockPtr = TclpNewAllocMutex();
- for (i = 0; i < NBUCKETS; ++i) {
- bucketInfo[i].blockSize = MINALLOC << i;
- bucketInfo[i].maxBlocks = 1 << (NBUCKETS - 1 - i);
- bucketInfo[i].numMove = i < NBUCKETS - 1 ?
- 1 << (NBUCKETS - 2 - i) : 1;
- bucketInfo[i].lockPtr = TclpNewAllocMutex();
- }
- }
- Tcl_MutexUnlock(initLockPtr);
- }
-
- /*
- * Get this thread's cache, allocating if necessary.
- */
-
- cachePtr = TclpGetAllocCache();
- if (cachePtr == NULL) {
- cachePtr = calloc(1, sizeof(Cache));
- if (cachePtr == NULL) {
- Tcl_Panic("alloc: could not allocate new cache");
- }
- Tcl_MutexLock(listLockPtr);
- cachePtr->nextPtr = firstCachePtr;
- firstCachePtr = cachePtr;
- Tcl_MutexUnlock(listLockPtr);
- cachePtr->owner = Tcl_GetCurrentThread();
- TclpSetAllocCache(cachePtr);
- }
- return cachePtr;
-}
-
-/*
- *----------------------------------------------------------------------
- *
- * TclFreeAllocCache --
- *
- * Flush and delete a cache, removing from list of caches.
- *
- * Results:
- * None.
- *
- * Side effects:
- * None.
- *
- *----------------------------------------------------------------------
- */
-
-void
-TclFreeAllocCache(
- void *arg)
-{
- Cache *cachePtr = arg;
- Cache **nextPtrPtr;
- register unsigned int bucket;
-
- /*
- * Flush blocks.
- */
-
- for (bucket = 0; bucket < NBUCKETS; ++bucket) {
- if (cachePtr->buckets[bucket].numFree > 0) {
- PutBlocks(cachePtr, bucket, cachePtr->buckets[bucket].numFree);
- }
- }
-
- /*
- * Flush objs.
- */
-
- if (cachePtr->numObjects > 0) {
- Tcl_MutexLock(objLockPtr);
- MoveObjs(cachePtr, sharedPtr, cachePtr->numObjects);
- Tcl_MutexUnlock(objLockPtr);
- }
-
- /*
- * Remove from pool list.
- */
-
- Tcl_MutexLock(listLockPtr);
- nextPtrPtr = &firstCachePtr;
- while (*nextPtrPtr != cachePtr) {
- nextPtrPtr = &(*nextPtrPtr)->nextPtr;
- }
- *nextPtrPtr = cachePtr->nextPtr;
- cachePtr->nextPtr = NULL;
- Tcl_MutexUnlock(listLockPtr);
- free(cachePtr);
-}
-
-/*
- *----------------------------------------------------------------------
- *
- * TclpAlloc --
- *
- * Allocate memory.
- *
- * Results:
- * Pointer to memory just beyond Block pointer.
- *
- * Side effects:
- * May allocate more blocks for a bucket.
- *
- *----------------------------------------------------------------------
- */
-
-char *
-TclpAlloc(
- unsigned int reqSize)
-{
- Cache *cachePtr;
- Block *blockPtr;
- register int bucket;
- size_t size;
-
-#ifndef __LP64__
- if (sizeof(int) >= sizeof(size_t)) {
- /* An unsigned int overflow can also be a size_t overflow */
- const size_t zero = 0;
- const size_t max = ~zero;
-
- if (((size_t) reqSize) > max - sizeof(Block) - RCHECK) {
- /* Requested allocation exceeds memory */
- return NULL;
- }
- }
-#endif
-
- cachePtr = TclpGetAllocCache();
- if (cachePtr == NULL) {
- cachePtr = GetCache();
- }
-
- /*
- * Increment the requested size to include room for the Block structure.
- * Call malloc() directly if the required amount is greater than the
- * largest block, otherwise pop the smallest block large enough,
- * allocating more blocks if necessary.
- */
-
- blockPtr = NULL;
- size = reqSize + sizeof(Block);
-#if RCHECK
- size++;
-#endif
- if (size > MAXALLOC) {
- bucket = NBUCKETS;
- blockPtr = malloc(size);
- if (blockPtr != NULL) {
- cachePtr->totalAssigned += reqSize;
- }
- } else {
- bucket = 0;
- while (bucketInfo[bucket].blockSize < size) {
- bucket++;
- }
- if (cachePtr->buckets[bucket].numFree || GetBlocks(cachePtr, bucket)) {
- blockPtr = cachePtr->buckets[bucket].firstPtr;
- cachePtr->buckets[bucket].firstPtr = blockPtr->nextBlock;
- cachePtr->buckets[bucket].numFree--;
- cachePtr->buckets[bucket].numRemoves++;
- cachePtr->buckets[bucket].totalAssigned += reqSize;
- }
- }
- if (blockPtr == NULL) {
- return NULL;
- }
- return Block2Ptr(blockPtr, bucket, reqSize);
-}
-
-/*
- *----------------------------------------------------------------------
- *
- * TclpFree --
- *
- * Return blocks to the thread block cache.
- *
- * Results:
- * None.
- *
- * Side effects:
- * May move blocks to shared cache.
- *
- *----------------------------------------------------------------------
- */
-
-void
-TclpFree(
- char *ptr)
-{
- Cache *cachePtr;
- Block *blockPtr;
- int bucket;
-
- if (ptr == NULL) {
- return;
- }
-
- cachePtr = TclpGetAllocCache();
- if (cachePtr == NULL) {
- cachePtr = GetCache();
- }
-
- /*
- * Get the block back from the user pointer and call system free directly
- * for large blocks. Otherwise, push the block back on the bucket and move
- * blocks to the shared cache if there are now too many free.
- */
-
- blockPtr = Ptr2Block(ptr);
- bucket = blockPtr->sourceBucket;
- if (bucket == NBUCKETS) {
- cachePtr->totalAssigned -= blockPtr->blockReqSize;
- free(blockPtr);
- return;
- }
-
- cachePtr->buckets[bucket].totalAssigned -= blockPtr->blockReqSize;
- blockPtr->nextBlock = cachePtr->buckets[bucket].firstPtr;
- cachePtr->buckets[bucket].firstPtr = blockPtr;
- cachePtr->buckets[bucket].numFree++;
- cachePtr->buckets[bucket].numInserts++;
-
- if (cachePtr != sharedPtr &&
- cachePtr->buckets[bucket].numFree > bucketInfo[bucket].maxBlocks) {
- PutBlocks(cachePtr, bucket, bucketInfo[bucket].numMove);
- }
-}
-
-/*
- *----------------------------------------------------------------------
- *
- * TclpRealloc --
- *
- * Re-allocate memory to a larger or smaller size.
- *
- * Results:
- * Pointer to memory just beyond Block pointer.
- *
- * Side effects:
- * Previous memory, if any, may be freed.
- *
- *----------------------------------------------------------------------
- */
-
-char *
-TclpRealloc(
- char *ptr,
- unsigned int reqSize)
-{
- Cache *cachePtr;
- Block *blockPtr;
- void *newPtr;
- size_t size, min;
- int bucket;
-
- if (ptr == NULL) {
- return TclpAlloc(reqSize);
- }
-
-#ifndef __LP64__
- if (sizeof(int) >= sizeof(size_t)) {
- /* An unsigned int overflow can also be a size_t overflow */
- const size_t zero = 0;
- const size_t max = ~zero;
-
- if (((size_t) reqSize) > max - sizeof(Block) - RCHECK) {
- /* Requested allocation exceeds memory */
- return NULL;
- }
- }
-#endif
-
- cachePtr = TclpGetAllocCache();
- if (cachePtr == NULL) {
- cachePtr = GetCache();
- }
-
- /*
- * If the block is not a system block and fits in place, simply return the
- * existing pointer. Otherwise, if the block is a system block and the new
- * size would also require a system block, call realloc() directly.
- */
-
- blockPtr = Ptr2Block(ptr);
- size = reqSize + sizeof(Block);
-#if RCHECK
- size++;
-#endif
- bucket = blockPtr->sourceBucket;
- if (bucket != NBUCKETS) {
- if (bucket > 0) {
- min = bucketInfo[bucket-1].blockSize;
- } else {
- min = 0;
- }
- if (size > min && size <= bucketInfo[bucket].blockSize) {
- cachePtr->buckets[bucket].totalAssigned -= blockPtr->blockReqSize;
- cachePtr->buckets[bucket].totalAssigned += reqSize;
- return Block2Ptr(blockPtr, bucket, reqSize);
- }
- } else if (size > MAXALLOC) {
- cachePtr->totalAssigned -= blockPtr->blockReqSize;
- cachePtr->totalAssigned += reqSize;
- blockPtr = realloc(blockPtr, size);
- if (blockPtr == NULL) {
- return NULL;
- }
- return Block2Ptr(blockPtr, NBUCKETS, reqSize);
- }
-
- /*
- * Finally, perform an expensive malloc/copy/free.
- */
-
- newPtr = TclpAlloc(reqSize);
- if (newPtr != NULL) {
- if (reqSize > blockPtr->blockReqSize) {
- reqSize = blockPtr->blockReqSize;
- }
- memcpy(newPtr, ptr, reqSize);
- TclpFree(ptr);
- }
- return newPtr;
-}
-
-/*
- *----------------------------------------------------------------------
- *
- * TclThreadAllocObj --
- *
- * Allocate a Tcl_Obj from the per-thread cache.
- *
- * Results:
- * Pointer to uninitialized Tcl_Obj.
- *
- * Side effects:
- * May move Tcl_Obj's from shared cached or allocate new Tcl_Obj's if
- * list is empty.
- *
- * Note:
- * If this code is updated, the changes need to be reflected in the macro
- * TclAllocObjStorageEx() defined in tclInt.h
- *
- *----------------------------------------------------------------------
- */
-
-Tcl_Obj *
-TclThreadAllocObj(void)
-{
- register Cache *cachePtr = TclpGetAllocCache();
- register Tcl_Obj *objPtr;
-
- if (cachePtr == NULL) {
- cachePtr = GetCache();
- }
-
- /*
- * Get this thread's obj list structure and move or allocate new objs if
- * necessary.
- */
-
- if (cachePtr->numObjects == 0) {
- register int numMove;
-
- Tcl_MutexLock(objLockPtr);
- numMove = sharedPtr->numObjects;
- if (numMove > 0) {
- if (numMove > NOBJALLOC) {
- numMove = NOBJALLOC;
- }
- MoveObjs(sharedPtr, cachePtr, numMove);
- }
- Tcl_MutexUnlock(objLockPtr);
- if (cachePtr->numObjects == 0) {
- Tcl_Obj *newObjsPtr;
-
- cachePtr->numObjects = numMove = NOBJALLOC;
- newObjsPtr = malloc(sizeof(Tcl_Obj) * numMove);
- if (newObjsPtr == NULL) {
- Tcl_Panic("alloc: could not allocate %d new objects", numMove);
- }
- while (--numMove >= 0) {
- objPtr = &newObjsPtr[numMove];
- objPtr->internalRep.otherValuePtr = cachePtr->firstObjPtr;
- cachePtr->firstObjPtr = objPtr;
- }
- }
- }
-
- /*
- * Pop the first object.
- */
-
- objPtr = cachePtr->firstObjPtr;
- cachePtr->firstObjPtr = objPtr->internalRep.otherValuePtr;
- cachePtr->numObjects--;
- return objPtr;
-}
-
-/*
- *----------------------------------------------------------------------
- *
- * TclThreadFreeObj --
- *
- * Return a free Tcl_Obj to the per-thread cache.
- *
- * Results:
- * None.
- *
- * Side effects:
- * May move free Tcl_Obj's to shared list upon hitting high water mark.
- *
- * Note:
- * If this code is updated, the changes need to be reflected in the macro
- * TclAllocObjStorageEx() defined in tclInt.h
- *
- *----------------------------------------------------------------------
- */
-
-void
-TclThreadFreeObj(
- Tcl_Obj *objPtr)
-{
- Cache *cachePtr = TclpGetAllocCache();
-
- if (cachePtr == NULL) {
- cachePtr = GetCache();
- }
-
- /*
- * Get this thread's list and push on the free Tcl_Obj.
- */
-
- objPtr->internalRep.otherValuePtr = cachePtr->firstObjPtr;
- cachePtr->firstObjPtr = objPtr;
- cachePtr->numObjects++;
-
- /*
- * If the number of free objects has exceeded the high water mark, move
- * some blocks to the shared list.
- */
-
- if (cachePtr->numObjects > NOBJHIGH) {
- Tcl_MutexLock(objLockPtr);
- MoveObjs(cachePtr, sharedPtr, NOBJALLOC);
- Tcl_MutexUnlock(objLockPtr);
- }
-}
-
-/*
- *----------------------------------------------------------------------
- *
- * Tcl_GetMemoryInfo --
- *
- * Return a list-of-lists of memory stats.
- *
- * Results:
- * None.
- *
- * Side effects:
- * List appended to given dstring.
- *
- *----------------------------------------------------------------------
- */
-
-void
-Tcl_GetMemoryInfo(
- Tcl_DString *dsPtr)
-{
- Cache *cachePtr;
- char buf[200];
- unsigned int n;
-
- Tcl_MutexLock(listLockPtr);
- cachePtr = firstCachePtr;
- while (cachePtr != NULL) {
- Tcl_DStringStartSublist(dsPtr);
- if (cachePtr == sharedPtr) {
- Tcl_DStringAppendElement(dsPtr, "shared");
- } else {
- sprintf(buf, "thread%p", cachePtr->owner);
- Tcl_DStringAppendElement(dsPtr, buf);
- }
- for (n = 0; n < NBUCKETS; ++n) {
- sprintf(buf, "%lu %ld %ld %ld %ld %ld %ld",
- (unsigned long) bucketInfo[n].blockSize,
- cachePtr->buckets[n].numFree,
- cachePtr->buckets[n].numRemoves,
- cachePtr->buckets[n].numInserts,
- cachePtr->buckets[n].totalAssigned,
- cachePtr->buckets[n].numLocks,
- cachePtr->buckets[n].numWaits);
- Tcl_DStringAppendElement(dsPtr, buf);
- }
- Tcl_DStringEndSublist(dsPtr);
- cachePtr = cachePtr->nextPtr;
- }
- Tcl_MutexUnlock(listLockPtr);
-}
-
-/*
- *----------------------------------------------------------------------
- *
- * MoveObjs --
- *
- * Move Tcl_Obj's between caches.
- *
- * Results:
- * None.
- *
- * Side effects:
- * None.
- *
- *----------------------------------------------------------------------
- */
-
-static void
-MoveObjs(
- Cache *fromPtr,
- Cache *toPtr,
- int numMove)
-{
- register Tcl_Obj *objPtr = fromPtr->firstObjPtr;
- Tcl_Obj *fromFirstObjPtr = objPtr;
-
- toPtr->numObjects += numMove;
- fromPtr->numObjects -= numMove;
-
- /*
- * Find the last object to be moved; set the next one (the first one not
- * to be moved) as the first object in the 'from' cache.
- */
-
- while (--numMove) {
- objPtr = objPtr->internalRep.otherValuePtr;
- }
- fromPtr->firstObjPtr = objPtr->internalRep.otherValuePtr;
-
- /*
- * Move all objects as a block - they are already linked to each other, we
- * just have to update the first and last.
- */
-
- objPtr->internalRep.otherValuePtr = toPtr->firstObjPtr;
- toPtr->firstObjPtr = fromFirstObjPtr;
-}
-
-/*
- *----------------------------------------------------------------------
- *
- * Block2Ptr, Ptr2Block --
- *
- * Convert between internal blocks and user pointers.
- *
- * Results:
- * User pointer or internal block.
- *
- * Side effects:
- * Invalid blocks will abort the server.
- *
- *----------------------------------------------------------------------
- */
-
-static char *
-Block2Ptr(
- Block *blockPtr,
- int bucket,
- unsigned int reqSize)
-{
- register void *ptr;
-
- blockPtr->magicNum1 = blockPtr->magicNum2 = MAGIC;
- blockPtr->sourceBucket = bucket;
- blockPtr->blockReqSize = reqSize;
- ptr = ((void *) (blockPtr + 1));
-#if RCHECK
- ((unsigned char *)(ptr))[reqSize] = MAGIC;
-#endif
- return (char *) ptr;
-}
-
-static Block *
-Ptr2Block(
- char *ptr)
-{
- register Block *blockPtr;
-
- blockPtr = (((Block *) ptr) - 1);
- if (blockPtr->magicNum1 != MAGIC || blockPtr->magicNum2 != MAGIC) {
- Tcl_Panic("alloc: invalid block: %p: %x %x",
- blockPtr, blockPtr->magicNum1, blockPtr->magicNum2);
- }
-#if RCHECK
- if (((unsigned char *) ptr)[blockPtr->blockReqSize] != MAGIC) {
- Tcl_Panic("alloc: invalid block: %p: %x %x %x",
- blockPtr, blockPtr->magicNum1, blockPtr->magicNum2,
- ((unsigned char *) ptr)[blockPtr->blockReqSize]);
- }
-#endif
- return blockPtr;
-}
-
-/*
- *----------------------------------------------------------------------
- *
- * LockBucket, UnlockBucket --
- *
- * Set/unset the lock to access a bucket in the shared cache.
- *
- * Results:
- * None.
- *
- * Side effects:
- * Lock activity and contention are monitored globally and on a per-cache
- * basis.
- *
- *----------------------------------------------------------------------
- */
-
-static void
-LockBucket(
- Cache *cachePtr,
- int bucket)
-{
-#if 0
- if (Tcl_MutexTryLock(bucketInfo[bucket].lockPtr) != TCL_OK) {
- Tcl_MutexLock(bucketInfo[bucket].lockPtr);
- cachePtr->buckets[bucket].numWaits++;
- sharedPtr->buckets[bucket].numWaits++;
- }
-#else
- Tcl_MutexLock(bucketInfo[bucket].lockPtr);
-#endif
- cachePtr->buckets[bucket].numLocks++;
- sharedPtr->buckets[bucket].numLocks++;
-}
-
-static void
-UnlockBucket(
- Cache *cachePtr,
- int bucket)
-{
- Tcl_MutexUnlock(bucketInfo[bucket].lockPtr);
-}
-
-/*
- *----------------------------------------------------------------------
- *
- * PutBlocks --
- *
- * Return unused blocks to the shared cache.
- *
- * Results:
- * None.
- *
- * Side effects:
- * None.
- *
- *----------------------------------------------------------------------
- */
-
-static void
-PutBlocks(
- Cache *cachePtr,
- int bucket,
- int numMove)
-{
- register Block *lastPtr, *firstPtr;
- register int n = numMove;
-
- /*
- * Before acquiring the lock, walk the block list to find the last block
- * to be moved.
- */
-
- firstPtr = lastPtr = cachePtr->buckets[bucket].firstPtr;
- while (--n > 0) {
- lastPtr = lastPtr->nextBlock;
- }
- cachePtr->buckets[bucket].firstPtr = lastPtr->nextBlock;
- cachePtr->buckets[bucket].numFree -= numMove;
-
- /*
- * Aquire the lock and place the list of blocks at the front of the shared
- * cache bucket.
- */
-
- LockBucket(cachePtr, bucket);
- lastPtr->nextBlock = sharedPtr->buckets[bucket].firstPtr;
- sharedPtr->buckets[bucket].firstPtr = firstPtr;
- sharedPtr->buckets[bucket].numFree += numMove;
- UnlockBucket(cachePtr, bucket);
-}
-
-/*
- *----------------------------------------------------------------------
- *
- * GetBlocks --
- *
- * Get more blocks for a bucket.
- *
- * Results:
- * 1 if blocks where allocated, 0 otherwise.
- *
- * Side effects:
- * Cache may be filled with available blocks.
- *
- *----------------------------------------------------------------------
- */
-
-static int
-GetBlocks(
- Cache *cachePtr,
- int bucket)
-{
- register Block *blockPtr;
- register int n;
-
- /*
- * First, atttempt to move blocks from the shared cache. Note the
- * potentially dirty read of numFree before acquiring the lock which is a
- * slight performance enhancement. The value is verified after the lock is
- * actually acquired.
- */
-
- if (cachePtr != sharedPtr && sharedPtr->buckets[bucket].numFree > 0) {
- LockBucket(cachePtr, bucket);
- if (sharedPtr->buckets[bucket].numFree > 0) {
-
- /*
- * Either move the entire list or walk the list to find the last
- * block to move.
- */
-
- n = bucketInfo[bucket].numMove;
- if (n >= sharedPtr->buckets[bucket].numFree) {
- cachePtr->buckets[bucket].firstPtr =
- sharedPtr->buckets[bucket].firstPtr;
- cachePtr->buckets[bucket].numFree =
- sharedPtr->buckets[bucket].numFree;
- sharedPtr->buckets[bucket].firstPtr = NULL;
- sharedPtr->buckets[bucket].numFree = 0;
- } else {
- blockPtr = sharedPtr->buckets[bucket].firstPtr;
- cachePtr->buckets[bucket].firstPtr = blockPtr;
- sharedPtr->buckets[bucket].numFree -= n;
- cachePtr->buckets[bucket].numFree = n;
- while (--n > 0) {
- blockPtr = blockPtr->nextBlock;
- }
- sharedPtr->buckets[bucket].firstPtr = blockPtr->nextBlock;
- blockPtr->nextBlock = NULL;
- }
- }
- UnlockBucket(cachePtr, bucket);
- }
-
- if (cachePtr->buckets[bucket].numFree == 0) {
- register size_t size;
-
- /*
- * If no blocks could be moved from shared, first look for a larger
- * block in this cache to split up.
- */
-
- blockPtr = NULL;
- n = NBUCKETS;
- size = 0; /* lint */
- while (--n > bucket) {
- if (cachePtr->buckets[n].numFree > 0) {
- size = bucketInfo[n].blockSize;
- blockPtr = cachePtr->buckets[n].firstPtr;
- cachePtr->buckets[n].firstPtr = blockPtr->nextBlock;
- cachePtr->buckets[n].numFree--;
- break;
- }
- }
-
- /*
- * Otherwise, allocate a big new block directly.
- */
-
- if (blockPtr == NULL) {
- size = MAXALLOC;
- blockPtr = malloc(size);
- if (blockPtr == NULL) {
- return 0;
- }
- }
-
- /*
- * Split the larger block into smaller blocks for this bucket.
- */
-
- n = size / bucketInfo[bucket].blockSize;
- cachePtr->buckets[bucket].numFree = n;
- cachePtr->buckets[bucket].firstPtr = blockPtr;
- while (--n > 0) {
- blockPtr->nextBlock = (Block *)
- ((char *) blockPtr + bucketInfo[bucket].blockSize);
- blockPtr = blockPtr->nextBlock;
- }
- blockPtr->nextBlock = NULL;
- }
- return 1;
-}
-
-/*
- *----------------------------------------------------------------------
- *
- * TclFinalizeThreadAlloc --
- *
- * This procedure is used to destroy all private resources used in this
- * file.
- *
- * Results:
- * None.
- *
- * Side effects:
- * None.
- *
- *----------------------------------------------------------------------
- */
-
-void
-TclFinalizeThreadAlloc(void)
-{
- unsigned int i;
-
- for (i = 0; i < NBUCKETS; ++i) {
- TclpFreeAllocMutex(bucketInfo[i].lockPtr);
- bucketInfo[i].lockPtr = NULL;
- }
-
- TclpFreeAllocMutex(objLockPtr);
- objLockPtr = NULL;
-
- TclpFreeAllocMutex(listLockPtr);
- listLockPtr = NULL;
-
- TclpFreeAllocCache(NULL);
-}
-
-#else /* !(TCL_THREADS && USE_THREAD_ALLOC) */
-/*
- *----------------------------------------------------------------------
- *
- * Tcl_GetMemoryInfo --
- *
- * Return a list-of-lists of memory stats.
- *
- * Results:
- * None.
- *
- * Side effects:
- * List appended to given dstring.
- *
- *----------------------------------------------------------------------
- */
-
-void
-Tcl_GetMemoryInfo(
- Tcl_DString *dsPtr)
-{
- Tcl_Panic("Tcl_GetMemoryInfo called when threaded memory allocator not in use");
-}
-
-/*
- *----------------------------------------------------------------------
- *
- * TclFinalizeThreadAlloc --
- *
- * This procedure is used to destroy all private resources used in this
- * file.
- *
- * Results:
- * None.
- *
- * Side effects:
- * None.
- *
- *----------------------------------------------------------------------
- */
-
-void
-TclFinalizeThreadAlloc(void)
-{
- Tcl_Panic("TclFinalizeThreadAlloc called when threaded memory allocator not in use");
-}
-#endif /* TCL_THREADS && USE_THREAD_ALLOC */
-
-/*
- * Local Variables:
- * mode: c
- * c-basic-offset: 4
- * fill-column: 78
- * End:
- */
diff --git a/generic/tclTrace.c b/generic/tclTrace.c
index d5fb6f6..ffbaa17 100644
--- a/generic/tclTrace.c
+++ b/generic/tclTrace.c
@@ -1650,7 +1650,7 @@ CallTraceFunction(
* Copy the command characters into a new string.
*/
- commandCopy = TclStackAlloc(interp, (unsigned) numChars + 1);
+ commandCopy = ckalloc((unsigned) numChars + 1);
memcpy(commandCopy, command, (size_t) numChars);
commandCopy[numChars] = '\0';
@@ -1661,7 +1661,7 @@ CallTraceFunction(
traceCode = tracePtr->proc(tracePtr->clientData, (Tcl_Interp *) iPtr,
iPtr->numLevels, commandCopy, (Tcl_Command) cmdPtr, objc, objv);
- TclStackFree(interp, commandCopy);
+ ckfree(commandCopy);
return traceCode;
}
@@ -2237,7 +2237,7 @@ StringTraceProc(
* which uses strings for everything.
*/
- argv = (const char **) TclStackAlloc(interp,
+ argv = (const char **) ckalloc(
(unsigned) ((objc + 1) * sizeof(const char *)));
for (i = 0; i < objc; i++) {
argv[i] = Tcl_GetString(objv[i]);
@@ -2252,7 +2252,7 @@ StringTraceProc(
data->proc(data->clientData, interp, level, (char *) command,
cmdPtr->proc, cmdPtr->clientData, objc, argv);
- TclStackFree(interp, (void *) argv);
+ ckfree((void *) argv);
return TCL_OK;
}
diff --git a/tests/nre.test b/tests/nre.test
index 295f02e..17f9a51 100644
--- a/tests/nre.test
+++ b/tests/nre.test
@@ -25,8 +25,8 @@ if {[testConstraint testnrelevels]} {
namespace eval testnre {
namespace path ::tcl::mathop
#
- # [testnrelevels] returns a 6-list with: C-stack depth, iPtr->numlevels,
- # cmdFrame level, callFrame level, tosPtr and callback depth
+ # [testnrelevels] returns a 5-list with: C-stack depth, iPtr->numlevels,
+ # cmdFrame level, callFrame level and callback depth
#
variable last [testnrelevels]
proc depthDiff {} {
diff --git a/tests/tailcall.test b/tests/tailcall.test
index e9ec188..af496fc 100644
--- a/tests/tailcall.test
+++ b/tests/tailcall.test
@@ -24,8 +24,8 @@ testConstraint testnrelevels [llength [info commands testnrelevels]]
if {[testConstraint testnrelevels]} {
namespace eval testnre {
#
- # [testnrelevels] returns a 6-list with: C-stack depth, iPtr->numlevels,
- # cmdFrame level, callFrame level, tosPtr and callback depth
+ # [testnrelevels] returns a 5-list with: C-stack depth, iPtr->numlevels,
+ # cmdFrame level, callFrame level and callback depth
#
variable last [testnrelevels]
proc depthDiff {} {
@@ -66,7 +66,7 @@ test tailcall-0.1 {tailcall is constant space} -constraints testnrelevels -setup
a 0
} -cleanup {
rename a {}
-} -result {0 0 0 0 0 0}
+} -result {0 0 0 0 0}
test tailcall-0.2 {tailcall is constant space} -constraints testnrelevels -setup {
set a { i {
@@ -83,7 +83,7 @@ test tailcall-0.2 {tailcall is constant space} -constraints testnrelevels -setup
apply $a 0
} -cleanup {
unset a
-} -result {0 0 0 0 0 0}
+} -result {0 0 0 0 0}
test tailcall-0.3 {tailcall is constant space} -constraints testnrelevels -setup {
proc a i {
@@ -101,7 +101,7 @@ test tailcall-0.3 {tailcall is constant space} -constraints testnrelevels -setup
} -cleanup {
rename a {}
rename b {}
-} -result {0 0 0 0 0 0}
+} -result {0 0 0 0 0}
test tailcall-0.4 {tailcall is constant space} -constraints testnrelevels -setup {
namespace eval ::ns {
@@ -124,7 +124,7 @@ test tailcall-0.4 {tailcall is constant space} -constraints testnrelevels -setup
} -cleanup {
rename b {}
namespace delete ::ns
-} -result {0 0 0 0 0 0}
+} -result {0 0 0 0 0}
test tailcall-0.5 {tailcall is constant space} -constraints testnrelevels -setup {
proc b i {
@@ -142,7 +142,7 @@ test tailcall-0.5 {tailcall is constant space} -constraints testnrelevels -setup
} -cleanup {
rename a {}
rename b {}
-} -result {0 0 0 0 0 0}
+} -result {0 0 0 0 0}
test tailcall-0.6 {tailcall is constant space} -constraints {testnrelevels knownBug} -setup {
#
@@ -167,7 +167,7 @@ test tailcall-0.6 {tailcall is constant space} -constraints {testnrelevels known
rename a {}
rename c {}
rename d {}
-} -result {0 0 0 0 0 0}
+} -result {0 0 0 0 0}
test tailcall-0.7 {tailcall is constant space} -constraints testnrelevels -setup {
catch {rename foo {}}
@@ -188,7 +188,7 @@ test tailcall-0.7 {tailcall is constant space} -constraints testnrelevels -setup
} -cleanup {
rename a {}
rename foo {}
-} -result {0 0 0 0 0 0}
+} -result {0 0 0 0 0}
test tailcall-1 {tailcall} -body {
namespace eval a {
diff --git a/unix/Makefile.in b/unix/Makefile.in
index 20ba896..2b5f867 100644
--- a/unix/Makefile.in
+++ b/unix/Makefile.in
@@ -304,7 +304,7 @@ GENERIC_OBJS = regcomp.o regexec.o regfree.o regerror.o tclAlloc.o \
tclPreserve.o tclProc.o tclRegexp.o \
tclResolve.o tclResult.o tclScan.o tclStringObj.o \
tclStrToD.o tclThread.o \
- tclThreadAlloc.o tclThreadJoin.o tclThreadStorage.o tclStubInit.o \
+ tclThreadJoin.o tclThreadStorage.o tclStubInit.o \
tclTimer.o tclTrace.o tclUtf.o tclUtil.o tclVar.o tclZlib.o \
tclTomMathInterface.o \
tclAssembly.o
@@ -445,7 +445,6 @@ GENERIC_SRCS = \
$(GENERIC_DIR)/tclTestObj.c \
$(GENERIC_DIR)/tclTestProcBodyObj.c \
$(GENERIC_DIR)/tclThread.c \
- $(GENERIC_DIR)/tclThreadAlloc.c \
$(GENERIC_DIR)/tclThreadJoin.c \
$(GENERIC_DIR)/tclThreadStorage.c \
$(GENERIC_DIR)/tclTimer.c \
@@ -1007,11 +1006,8 @@ regerror.o: $(REGHDRS) $(GENERIC_DIR)/regerrs.h $(GENERIC_DIR)/regerror.c
tclAppInit.o: $(UNIX_DIR)/tclAppInit.c
$(CC) -c $(APP_CC_SWITCHES) $(UNIX_DIR)/tclAppInit.c
-# On Unix we want to use the normal malloc/free implementation, so we
-# specifically set the USE_TCLALLOC flag.
-
tclAlloc.o: $(GENERIC_DIR)/tclAlloc.c
- $(CC) -c $(CC_SWITCHES) -DUSE_TCLALLOC=0 $(GENERIC_DIR)/tclAlloc.c
+ $(CC) -c $(CC_SWITCHES) $(GENERIC_DIR)/tclAlloc.c
tclAssembly.o: $(GENERIC_DIR)/tclAssembly.c $(COMPILEHDR)
$(CC) -c $(CC_SWITCHES) $(GENERIC_DIR)/tclAssembly.c
@@ -1286,9 +1282,6 @@ tclTimer.o: $(GENERIC_DIR)/tclTimer.c
tclThread.o: $(GENERIC_DIR)/tclThread.c
$(CC) -c $(CC_SWITCHES) $(GENERIC_DIR)/tclThread.c
-tclThreadAlloc.o: $(GENERIC_DIR)/tclThreadAlloc.c
- $(CC) -c $(CC_SWITCHES) $(GENERIC_DIR)/tclThreadAlloc.c
-
tclThreadJoin.o: $(GENERIC_DIR)/tclThreadJoin.c
$(CC) -c $(CC_SWITCHES) $(GENERIC_DIR)/tclThreadJoin.c
diff --git a/unix/tclUnixPipe.c b/unix/tclUnixPipe.c
index d01624c..f6645fd 100644
--- a/unix/tclUnixPipe.c
+++ b/unix/tclUnixPipe.c
@@ -452,8 +452,8 @@ TclpCreateProcess(
* deallocated later
*/
- dsArray = TclStackAlloc(interp, argc * sizeof(Tcl_DString));
- newArgv = TclStackAlloc(interp, (argc+1) * sizeof(char *));
+ dsArray = ckalloc(argc * sizeof(Tcl_DString));
+ newArgv = ckalloc((argc+1) * sizeof(char *));
newArgv[argc] = NULL;
for (i = 0; i < argc; i++) {
newArgv[i] = Tcl_UtfToExternalDString(NULL, argv[i], -1, &dsArray[i]);
@@ -524,8 +524,8 @@ TclpCreateProcess(
for (i = 0; i < argc; i++) {
Tcl_DStringFree(&dsArray[i]);
}
- TclStackFree(interp, newArgv);
- TclStackFree(interp, dsArray);
+ ckfree(newArgv);
+ ckfree(dsArray);
if (pid == -1) {
Tcl_AppendResult(interp, "couldn't fork child process: ",
diff --git a/unix/tclUnixThrd.c b/unix/tclUnixThrd.c
index 0469d7a..a4db0df 100644
--- a/unix/tclUnixThrd.c
+++ b/unix/tclUnixThrd.c
@@ -676,12 +676,11 @@ TclpInetNtoa(
#endif
}
-#ifdef TCL_THREADS
+#if defined(TCL_THREADS)
/*
* Additions by AOL for specialized thread memory allocator.
*/
-#ifdef USE_THREAD_ALLOC
static volatile int initialized = 0;
static pthread_key_t key;
@@ -718,6 +717,7 @@ TclpFreeAllocMutex(
free(lockPtr);
}
+
void
TclpFreeAllocCache(
void *ptr)
@@ -760,8 +760,9 @@ TclpSetAllocCache(
{
pthread_setspecific(key, arg);
}
-#endif /* USE_THREAD_ALLOC */
+#endif
+#ifdef TCL_THREADS
void *
TclpThreadCreateKey(void)
{