summaryrefslogtreecommitdiffstats
path: root/Objects/obmalloc.c
diff options
context:
space:
mode:
authorBenjamin Peterson <benjamin@python.org>2018-02-24 19:59:10 (GMT)
committerGitHub <noreply@github.com>2018-02-24 19:59:10 (GMT)
commitb18f8bc1a77193c372d79afa79b284028a2842d7 (patch)
tree64e9493cd07303b3f78f454c7d3aef6ac1faa42f /Objects/obmalloc.c
parentacd7163c0a0674b2fb6cc0178d52cf90c953fbae (diff)
downloadcpython-b18f8bc1a77193c372d79afa79b284028a2842d7.zip
cpython-b18f8bc1a77193c372d79afa79b284028a2842d7.tar.gz
cpython-b18f8bc1a77193c372d79afa79b284028a2842d7.tar.bz2
remove vestigal locking from obmalloc (GH-5805)
obmalloc has (empty) macros for locking in the allocator. These aren't needed in CPython; we rely on the GIL.
Diffstat (limited to 'Objects/obmalloc.c')
-rw-r--r--Objects/obmalloc.c39
1 files changed, 0 insertions, 39 deletions
diff --git a/Objects/obmalloc.c b/Objects/obmalloc.c
index 0e485d6..eb7cbfc 100644
--- a/Objects/obmalloc.c
+++ b/Objects/obmalloc.c
@@ -849,30 +849,6 @@ static int running_on_valgrind = -1;
/*==========================================================================*/
-/*
- * Locking
- *
- * To reduce lock contention, it would probably be better to refine the
- * crude function locking with per size class locking. I'm not positive
- * however, whether it's worth switching to such locking policy because
- * of the performance penalty it might introduce.
- *
- * The following macros describe the simplest (should also be the fastest)
- * lock object on a particular platform and the init/fini/lock/unlock
- * operations on it. The locks defined here are not expected to be recursive
- * because it is assumed that they will always be called in the order:
- * INIT, [LOCK, UNLOCK]*, FINI.
- */
-
-/*
- * Python's threads are serialized, so object malloc locking is disabled.
- */
-#define SIMPLELOCK_DECL(lock) /* simple lock declaration */
-#define SIMPLELOCK_INIT(lock) /* allocate (if needed) and initialize */
-#define SIMPLELOCK_FINI(lock) /* free/destroy an existing lock */
-#define SIMPLELOCK_LOCK(lock) /* acquire released lock */
-#define SIMPLELOCK_UNLOCK(lock) /* release acquired lock */
-
/* When you say memory, my mind reasons in terms of (pointers to) blocks */
typedef uint8_t block;
@@ -945,15 +921,6 @@ struct arena_object {
/*==========================================================================*/
/*
- * This malloc lock
- */
-SIMPLELOCK_DECL(_malloc_lock)
-#define LOCK() SIMPLELOCK_LOCK(_malloc_lock)
-#define UNLOCK() SIMPLELOCK_UNLOCK(_malloc_lock)
-#define LOCK_INIT() SIMPLELOCK_INIT(_malloc_lock)
-#define LOCK_FINI() SIMPLELOCK_FINI(_malloc_lock)
-
-/*
* Pool table -- headed, circular, doubly-linked lists of partially used pools.
This is involved. For an index i, usedpools[i+i] is the header for a list of
@@ -1381,7 +1348,6 @@ pymalloc_alloc(void *ctx, void **ptr_p, size_t nbytes)
return 0;
}
- LOCK();
/*
* Most frequent paths first
*/
@@ -1537,13 +1503,11 @@ pymalloc_alloc(void *ctx, void **ptr_p, size_t nbytes)
goto init_pool;
success:
- UNLOCK();
assert(bp != NULL);
*ptr_p = (void *)bp;
return 1;
failed:
- UNLOCK();
return 0;
}
@@ -1612,8 +1576,6 @@ pymalloc_free(void *ctx, void *p)
}
/* We allocated this address. */
- LOCK();
-
/* Link p to the start of the pool's freeblock list. Since
* the pool had at least the p block outstanding, the pool
* wasn't empty (so it's already in a usedpools[] list, or
@@ -1798,7 +1760,6 @@ pymalloc_free(void *ctx, void *p)
goto success;
success:
- UNLOCK();
return 1;
}