diff options
author | Serhiy Storchaka <storchaka@gmail.com> | 2017-01-12 17:42:20 (GMT) |
---|---|---|
committer | Serhiy Storchaka <storchaka@gmail.com> | 2017-01-12 17:42:20 (GMT) |
commit | 617c7753ce010617b45ae3bfa53a90c07b51510a (patch) | |
tree | 974d15dcc99f3504be149746e9b4069e5a2db6cc /Modules/_functoolsmodule.c | |
parent | 798ad2742b6523250e5477557e3cc2588d6e6475 (diff) | |
parent | 42e1ea9a10aa6c3df40af3b7561d6251c8e2ac9c (diff) | |
download | cpython-617c7753ce010617b45ae3bfa53a90c07b51510a.zip cpython-617c7753ce010617b45ae3bfa53a90c07b51510a.tar.gz cpython-617c7753ce010617b45ae3bfa53a90c07b51510a.tar.bz2 |
Issue #28969: Fixed race condition in C implementation of functools.lru_cache.
KeyError could be raised when cached function with full cache was
simultaneously called from differen threads with the same uncached arguments.
Diffstat (limited to 'Modules/_functoolsmodule.c')
-rw-r--r-- | Modules/_functoolsmodule.c | 58 |
1 files changed, 36 insertions, 22 deletions
diff --git a/Modules/_functoolsmodule.c b/Modules/_functoolsmodule.c index 62b45e9..4170883 100644 --- a/Modules/_functoolsmodule.c +++ b/Modules/_functoolsmodule.c @@ -863,42 +863,56 @@ bounded_lru_cache_wrapper(lru_cache_object *self, PyObject *args, PyObject *kwds } if (self->full && self->root.next != &self->root) { /* Use the oldest item to store the new key and result. */ - PyObject *oldkey, *oldresult; + PyObject *oldkey, *oldresult, *popresult; /* Extricate the oldest item. */ link = self->root.next; lru_cache_extricate_link(link); /* Remove it from the cache. The cache dict holds one reference to the link, and the linked list holds yet one reference to it. */ - if (_PyDict_DelItem_KnownHash(self->cache, link->key, - link->hash) < 0) { + popresult = _PyDict_Pop_KnownHash(self->cache, + link->key, link->hash, + Py_None); + if (popresult == Py_None) { + /* Getting here means that this same key was added to the + cache while the lock was released. Since the link + update is already done, we need only return the + computed result and update the count of misses. */ + Py_DECREF(popresult); + Py_DECREF(link); + Py_DECREF(key); + } + else if (popresult == NULL) { lru_cache_append_link(self, link); Py_DECREF(key); Py_DECREF(result); return NULL; } - /* Keep a reference to the old key and old result to - prevent their ref counts from going to zero during the - update. That will prevent potentially arbitrary object - clean-up code (i.e. __del__) from running while we're - still adjusting the links. */ - oldkey = link->key; - oldresult = link->result; - - link->hash = hash; - link->key = key; - link->result = result; - if (_PyDict_SetItem_KnownHash(self->cache, key, (PyObject *)link, - hash) < 0) { - Py_DECREF(link); + else { + Py_DECREF(popresult); + /* Keep a reference to the old key and old result to + prevent their ref counts from going to zero during the + update. That will prevent potentially arbitrary object + clean-up code (i.e. __del__) from running while we're + still adjusting the links. */ + oldkey = link->key; + oldresult = link->result; + + link->hash = hash; + link->key = key; + link->result = result; + if (_PyDict_SetItem_KnownHash(self->cache, key, (PyObject *)link, + hash) < 0) { + Py_DECREF(link); + Py_DECREF(oldkey); + Py_DECREF(oldresult); + return NULL; + } + lru_cache_append_link(self, link); + Py_INCREF(result); /* for return */ Py_DECREF(oldkey); Py_DECREF(oldresult); - return NULL; } - lru_cache_append_link(self, link); - Py_INCREF(result); /* for return */ - Py_DECREF(oldkey); - Py_DECREF(oldresult); } else { /* Put result in a new link at the front of the queue. */ link = (lru_list_elem *)PyObject_GC_New(lru_list_elem, |