summaryrefslogtreecommitdiffstats
path: root/Objects
diff options
context:
space:
mode:
authorTim Peters <tim.peters@gmail.com>2002-03-30 21:36:04 (GMT)
committerTim Peters <tim.peters@gmail.com>2002-03-30 21:36:04 (GMT)
commitc2ce91af5fbe71c5e12d78b0021701233aacde31 (patch)
treefc60478f7157c074eff770db5164bd24919893c7 /Objects
parent7b85b4aa7fa4e1535dc3eda7d2387a6ea7c29012 (diff)
downloadcpython-c2ce91af5fbe71c5e12d78b0021701233aacde31.zip
cpython-c2ce91af5fbe71c5e12d78b0021701233aacde31.tar.gz
cpython-c2ce91af5fbe71c5e12d78b0021701233aacde31.tar.bz2
It's once again thought safe to call the pymalloc free/realloc with an
address obtained from system malloc/realloc without holding the GIL. When the vector of arena base addresses has to grow, the old vector is deliberately leaked. This makes "stale" x-thread references safe. arenas and narenas are also declared volatile, and changed in an order that prevents a thread from picking up a value of narenas too large for the value of arenas it sees. Added more asserts. Fixed an old inaccurate comment. Added a comment explaining why it's safe to call pymalloc free/realloc with an address obtained from system malloc/realloc even when arenas is still NULL (this is obscure, since the ADDRESS_IN_RANGE macro appears <wink> to index into arenas).
Diffstat (limited to 'Objects')
-rw-r--r--Objects/obmalloc.c41
1 files changed, 27 insertions, 14 deletions
diff --git a/Objects/obmalloc.c b/Objects/obmalloc.c
index 23bf0a9..6b7b05e 100644
--- a/Objects/obmalloc.c
+++ b/Objects/obmalloc.c
@@ -171,7 +171,7 @@
/*
* Size of the pools used for small blocks. Should be a power of 2,
- * between 1K and SYSTEM_PAGE_SIZE, that is: 1k, 2k, 4k, eventually 8k.
+ * between 1K and SYSTEM_PAGE_SIZE, that is: 1k, 2k, 4k.
*/
#define POOL_SIZE SYSTEM_PAGE_SIZE /* must be 2^N */
#define POOL_SIZE_MASK SYSTEM_PAGE_SIZE_MASK
@@ -308,8 +308,8 @@ static poolp freepools = NULL; /* free list for cached pools */
* and only grows by appending at the end. For now we never return an arena
* to the OS.
*/
-static uptr *arenas = NULL;
-static uint narenas = 0;
+static uptr *volatile arenas = NULL; /* the pointer itself is volatile */
+static volatile uint narenas = 0;
static uint maxarenas = 0;
/* Number of pools still available to be allocated in the current arena. */
@@ -354,6 +354,7 @@ new_arena(void)
nfreepools <- number of whole pools that fit after alignment */
arenabase = bp;
nfreepools = ARENA_SIZE / POOL_SIZE;
+ assert(POOL_SIZE * nfreepools == ARENA_SIZE);
excess = (uint)bp & POOL_SIZE_MASK;
if (excess != 0) {
--nfreepools;
@@ -362,15 +363,16 @@ new_arena(void)
/* Make room for a new entry in the arenas vector. */
if (arenas == NULL) {
+ assert(narenas == 0 && maxarenas == 0);
arenas = (uptr *)PyMem_MALLOC(16 * sizeof(*arenas));
if (arenas == NULL)
goto error;
maxarenas = 16;
- narenas = 0;
}
else if (narenas == maxarenas) {
/* Grow arenas. Don't use realloc: if this fails, we
* don't want to lose the base addresses we already have.
+ *
* Exceedingly subtle: Someone may be calling the pymalloc
* free via PyMem_{DEL, Del, FREE, Free} without holding the
*.GIL. Someone else may simultaneously be calling the
@@ -392,26 +394,30 @@ new_arena(void)
* is supposed to fail. Having an incomplete vector can't
* make a supposed-to-fail case succeed by mistake (it could
* only make a supposed-to-succeed case fail by mistake).
+ *
+ * In addition, without a lock we can't know for sure when
+ * an old vector is no longer referenced, so we simply let
+ * old vectors leak.
+ *
+ * And on top of that, since narenas and arenas can't be
+ * changed as-a-pair atomically without a lock, we're also
+ * careful to declare them volatile and ensure that we change
+ * arenas first. This prevents another thread from picking
+ * up an narenas value too large for the arenas value it
+ * reads up (arenas never shrinks).
+ *
* Read the above 50 times before changing anything in this
* block.
- * XXX Fudge. This is still vulnerable: there's nothing
- * XXX to stop the bad-guy thread from picking up the
- * XXX current value of arenas, but not indexing off of it
- * XXX until after the PyMem_FREE(oldarenas) below completes.
*/
- uptr *oldarenas;
uptr *p;
- uint newmax = maxarenas + (maxarenas >> 1);
-
+ uint newmax = maxarenas << 1;
if (newmax <= maxarenas) /* overflow */
goto error;
p = (uptr *)PyMem_MALLOC(newmax * sizeof(*arenas));
if (p == NULL)
goto error;
memcpy(p, arenas, narenas * sizeof(*arenas));
- oldarenas = arenas;
- arenas = p;
- PyMem_FREE(oldarenas);
+ arenas = p; /* old arenas deliberately leaked */
maxarenas = newmax;
}
@@ -431,12 +437,19 @@ error:
/* Return true if and only if P is an address that was allocated by
* pymalloc. I must be the index into arenas that the address claims
* to come from.
+ *
* Tricky: Letting B be the arena base address in arenas[I], P belongs to the
* arena if and only if
* B <= P < B + ARENA_SIZE
* Subtracting B throughout, this is true iff
* 0 <= P-B < ARENA_SIZE
* By using unsigned arithmetic, the "0 <=" half of the test can be skipped.
+ *
+ * Obscure: A PyMem "free memory" function can call the pymalloc free or
+ * realloc before the first arena has been allocated. arenas is still
+ * NULL in that case. We're relying on that narenas is also 0 in that case,
+ * so the (I) < narenas must be false, saving us from trying to index into
+ * a NULL arenas.
*/
#define ADDRESS_IN_RANGE(P, I) \
((I) < narenas && (uptr)(P) - arenas[I] < (uptr)ARENA_SIZE)