summaryrefslogtreecommitdiffstats
path: root/Modules
diff options
context:
space:
mode:
authorAntoine Pitrou <solipsis@pitrou.net>2010-12-03 19:21:49 (GMT)
committerAntoine Pitrou <solipsis@pitrou.net>2010-12-03 19:21:49 (GMT)
commit976157f9f3f33923b702cbcd44e4c68be2748496 (patch)
treee22ee93a6cf098ec85e14a8558aaa4287d789653 /Modules
parenta818394053c1a6361448fa8f18a182435fbabb53 (diff)
downloadcpython-976157f9f3f33923b702cbcd44e4c68be2748496.zip
cpython-976157f9f3f33923b702cbcd44e4c68be2748496.tar.gz
cpython-976157f9f3f33923b702cbcd44e4c68be2748496.tar.bz2
Merged revisions 86981,86984 via svnmerge from
svn+ssh://pythondev@svn.python.org/python/branches/py3k ........ r86981 | antoine.pitrou | 2010-12-03 19:41:39 +0100 (ven., 03 déc. 2010) | 5 lines Issue #10478: Reentrant calls inside buffered IO objects (for example by way of a signal handler) now raise a RuntimeError instead of freezing the current process. ........ r86984 | antoine.pitrou | 2010-12-03 20:14:17 +0100 (ven., 03 déc. 2010) | 3 lines Add an "advanced topics" section to the io doc. ........
Diffstat (limited to 'Modules')
-rw-r--r--Modules/_io/bufferedio.c68
1 files changed, 51 insertions, 17 deletions
diff --git a/Modules/_io/bufferedio.c b/Modules/_io/bufferedio.c
index 611dc8c..11bc0b6 100644
--- a/Modules/_io/bufferedio.c
+++ b/Modules/_io/bufferedio.c
@@ -224,6 +224,7 @@ typedef struct {
#ifdef WITH_THREAD
PyThread_type_lock lock;
+ volatile long owner;
#endif
Py_ssize_t buffer_size;
@@ -259,15 +260,33 @@ typedef struct {
/* These macros protect the buffered object against concurrent operations. */
#ifdef WITH_THREAD
-#define ENTER_BUFFERED(self) \
- Py_BEGIN_ALLOW_THREADS \
- PyThread_acquire_lock(self->lock, 1); \
+static int
+_enter_buffered_busy(buffered *self)
+{
+ if (self->owner == PyThread_get_thread_ident()) {
+ PyErr_Format(PyExc_RuntimeError,
+ "reentrant call inside %R", self);
+ return 0;
+ }
+ Py_BEGIN_ALLOW_THREADS
+ PyThread_acquire_lock(self->lock, 1);
Py_END_ALLOW_THREADS
+ return 1;
+}
+
+#define ENTER_BUFFERED(self) \
+ ( (PyThread_acquire_lock(self->lock, 0) ? \
+ 1 : _enter_buffered_busy(self)) \
+ && (self->owner = PyThread_get_thread_ident(), 1) )
#define LEAVE_BUFFERED(self) \
- PyThread_release_lock(self->lock);
+ do { \
+ self->owner = 0; \
+ PyThread_release_lock(self->lock); \
+ } while(0);
+
#else
-#define ENTER_BUFFERED(self)
+#define ENTER_BUFFERED(self) 1
#define LEAVE_BUFFERED(self)
#endif
@@ -423,7 +442,8 @@ buffered_close(buffered *self, PyObject *args)
int r;
CHECK_INITIALIZED(self)
- ENTER_BUFFERED(self)
+ if (!ENTER_BUFFERED(self))
+ return NULL;
r = buffered_closed(self);
if (r < 0)
@@ -436,7 +456,8 @@ buffered_close(buffered *self, PyObject *args)
/* flush() will most probably re-take the lock, so drop it first */
LEAVE_BUFFERED(self)
res = PyObject_CallMethodObjArgs((PyObject *)self, _PyIO_str_flush, NULL);
- ENTER_BUFFERED(self)
+ if (!ENTER_BUFFERED(self))
+ return NULL;
if (res == NULL) {
goto end;
}
@@ -639,6 +660,7 @@ _buffered_init(buffered *self)
PyErr_SetString(PyExc_RuntimeError, "can't allocate read lock");
return -1;
}
+ self->owner = 0;
#endif
/* Find out whether buffer_size is a power of 2 */
/* XXX is this optimization useful? */
@@ -665,7 +687,8 @@ buffered_flush(buffered *self, PyObject *args)
CHECK_INITIALIZED(self)
CHECK_CLOSED(self, "flush of closed file")
- ENTER_BUFFERED(self)
+ if (!ENTER_BUFFERED(self))
+ return NULL;
res = _bufferedwriter_flush_unlocked(self, 0);
if (res != NULL && self->readable) {
/* Rewind the raw stream so that its position corresponds to
@@ -692,7 +715,8 @@ buffered_peek(buffered *self, PyObject *args)
return NULL;
}
- ENTER_BUFFERED(self)
+ if (!ENTER_BUFFERED(self))
+ return NULL;
if (self->writable) {
res = _bufferedwriter_flush_unlocked(self, 1);
@@ -727,7 +751,8 @@ buffered_read(buffered *self, PyObject *args)
if (n == -1) {
/* The number of bytes is unspecified, read until the end of stream */
- ENTER_BUFFERED(self)
+ if (!ENTER_BUFFERED(self))
+ return NULL;
res = _bufferedreader_read_all(self);
LEAVE_BUFFERED(self)
}
@@ -735,7 +760,8 @@ buffered_read(buffered *self, PyObject *args)
res = _bufferedreader_read_fast(self, n);
if (res == Py_None) {
Py_DECREF(res);
- ENTER_BUFFERED(self)
+ if (!ENTER_BUFFERED(self))
+ return NULL;
res = _bufferedreader_read_generic(self, n);
LEAVE_BUFFERED(self)
}
@@ -763,7 +789,8 @@ buffered_read1(buffered *self, PyObject *args)
if (n == 0)
return PyBytes_FromStringAndSize(NULL, 0);
- ENTER_BUFFERED(self)
+ if (!ENTER_BUFFERED(self))
+ return NULL;
if (self->writable) {
res = _bufferedwriter_flush_unlocked(self, 1);
@@ -819,7 +846,8 @@ buffered_readinto(buffered *self, PyObject *args)
/* TODO: use raw.readinto() instead! */
if (self->writable) {
- ENTER_BUFFERED(self)
+ if (!ENTER_BUFFERED(self))
+ return NULL;
res = _bufferedwriter_flush_unlocked(self, 0);
LEAVE_BUFFERED(self)
if (res == NULL)
@@ -863,7 +891,8 @@ _buffered_readline(buffered *self, Py_ssize_t limit)
goto end_unlocked;
}
- ENTER_BUFFERED(self)
+ if (!ENTER_BUFFERED(self))
+ goto end_unlocked;
/* Now we try to get some more from the raw stream */
if (self->writable) {
@@ -1013,7 +1042,8 @@ buffered_seek(buffered *self, PyObject *args)
}
}
- ENTER_BUFFERED(self)
+ if (!ENTER_BUFFERED(self))
+ return NULL;
/* Fallback: invoke raw seek() method and clear buffer */
if (self->writable) {
@@ -1051,7 +1081,8 @@ buffered_truncate(buffered *self, PyObject *args)
return NULL;
}
- ENTER_BUFFERED(self)
+ if (!ENTER_BUFFERED(self))
+ return NULL;
if (self->writable) {
res = _bufferedwriter_flush_unlocked(self, 0);
@@ -1705,7 +1736,10 @@ bufferedwriter_write(buffered *self, PyObject *args)
return NULL;
}
- ENTER_BUFFERED(self)
+ if (!ENTER_BUFFERED(self)) {
+ PyBuffer_Release(&buf);
+ return NULL;
+ }
/* Fast path: the data to write can be fully buffered. */
if (!VALID_READ_BUFFER(self) && !VALID_WRITE_BUFFER(self)) {