summaryrefslogtreecommitdiffstats
path: root/Lib
diff options
context:
space:
mode:
authorLarry Hastings <larry@hastings.org>2015-05-24 23:41:42 (GMT)
committerLarry Hastings <larry@hastings.org>2015-05-24 23:41:42 (GMT)
commitaca575cb2563dae20aa1ffa6107f2d75b2a5cc5b (patch)
treebceee349738a6e2df461b4ff74ca65bb8525a04e /Lib
parentf46aa8e2d17ccb2ff92b803229808e3d330a3eab (diff)
parent46c56119480b18f8583889b22d3a9b524e16e0ec (diff)
downloadcpython-aca575cb2563dae20aa1ffa6107f2d75b2a5cc5b.zip
cpython-aca575cb2563dae20aa1ffa6107f2d75b2a5cc5b.tar.gz
cpython-aca575cb2563dae20aa1ffa6107f2d75b2a5cc5b.tar.bz2
Merge.
Diffstat (limited to 'Lib')
-rw-r--r--Lib/functools.py207
-rw-r--r--Lib/test/test_functools.py109
2 files changed, 200 insertions, 116 deletions
diff --git a/Lib/functools.py b/Lib/functools.py
index 19c25e1..09df068 100644
--- a/Lib/functools.py
+++ b/Lib/functools.py
@@ -419,120 +419,129 @@ def lru_cache(maxsize=128, typed=False):
if maxsize is not None and not isinstance(maxsize, int):
raise TypeError('Expected maxsize to be an integer or None')
+ def decorating_function(user_function):
+ wrapper = _lru_cache_wrapper(user_function, maxsize, typed, _CacheInfo)
+ return update_wrapper(wrapper, user_function)
+
+ return decorating_function
+
+def _lru_cache_wrapper(user_function, maxsize, typed, _CacheInfo):
# Constants shared by all lru cache instances:
sentinel = object() # unique object used to signal cache misses
make_key = _make_key # build a key from the function arguments
PREV, NEXT, KEY, RESULT = 0, 1, 2, 3 # names for the link fields
- def decorating_function(user_function):
- cache = {}
- hits = misses = 0
- full = False
- cache_get = cache.get # bound method to lookup a key or return None
- lock = RLock() # because linkedlist updates aren't threadsafe
- root = [] # root of the circular doubly linked list
- root[:] = [root, root, None, None] # initialize by pointing to self
-
- if maxsize == 0:
-
- def wrapper(*args, **kwds):
- # No caching -- just a statistics update after a successful call
- nonlocal misses
- result = user_function(*args, **kwds)
- misses += 1
+ cache = {}
+ hits = misses = 0
+ full = False
+ cache_get = cache.get # bound method to lookup a key or return None
+ lock = RLock() # because linkedlist updates aren't threadsafe
+ root = [] # root of the circular doubly linked list
+ root[:] = [root, root, None, None] # initialize by pointing to self
+
+ if maxsize == 0:
+
+ def wrapper(*args, **kwds):
+ # No caching -- just a statistics update after a successful call
+ nonlocal misses
+ result = user_function(*args, **kwds)
+ misses += 1
+ return result
+
+ elif maxsize is None:
+
+ def wrapper(*args, **kwds):
+ # Simple caching without ordering or size limit
+ nonlocal hits, misses
+ key = make_key(args, kwds, typed)
+ result = cache_get(key, sentinel)
+ if result is not sentinel:
+ hits += 1
return result
+ result = user_function(*args, **kwds)
+ cache[key] = result
+ misses += 1
+ return result
- elif maxsize is None:
+ else:
- def wrapper(*args, **kwds):
- # Simple caching without ordering or size limit
- nonlocal hits, misses
- key = make_key(args, kwds, typed)
- result = cache_get(key, sentinel)
- if result is not sentinel:
+ def wrapper(*args, **kwds):
+ # Size limited caching that tracks accesses by recency
+ nonlocal root, hits, misses, full
+ key = make_key(args, kwds, typed)
+ with lock:
+ link = cache_get(key)
+ if link is not None:
+ # Move the link to the front of the circular queue
+ link_prev, link_next, _key, result = link
+ link_prev[NEXT] = link_next
+ link_next[PREV] = link_prev
+ last = root[PREV]
+ last[NEXT] = root[PREV] = link
+ link[PREV] = last
+ link[NEXT] = root
hits += 1
return result
- result = user_function(*args, **kwds)
- cache[key] = result
+ result = user_function(*args, **kwds)
+ with lock:
+ if key in cache:
+ # Getting here means that this same key was added to the
+ # cache while the lock was released. Since the link
+ # update is already done, we need only return the
+ # computed result and update the count of misses.
+ pass
+ elif full:
+ # Use the old root to store the new key and result.
+ oldroot = root
+ oldroot[KEY] = key
+ oldroot[RESULT] = result
+ # Empty the oldest link and make it the new root.
+ # Keep a reference to the old key and old result to
+ # prevent their ref counts from going to zero during the
+ # update. That will prevent potentially arbitrary object
+ # clean-up code (i.e. __del__) from running while we're
+ # still adjusting the links.
+ root = oldroot[NEXT]
+ oldkey = root[KEY]
+ oldresult = root[RESULT]
+ root[KEY] = root[RESULT] = None
+ # Now update the cache dictionary.
+ del cache[oldkey]
+ # Save the potentially reentrant cache[key] assignment
+ # for last, after the root and links have been put in
+ # a consistent state.
+ cache[key] = oldroot
+ else:
+ # Put result in a new link at the front of the queue.
+ last = root[PREV]
+ link = [last, root, key, result]
+ last[NEXT] = root[PREV] = cache[key] = link
+ full = (len(cache) >= maxsize)
misses += 1
- return result
-
- else:
-
- def wrapper(*args, **kwds):
- # Size limited caching that tracks accesses by recency
- nonlocal root, hits, misses, full
- key = make_key(args, kwds, typed)
- with lock:
- link = cache_get(key)
- if link is not None:
- # Move the link to the front of the circular queue
- link_prev, link_next, _key, result = link
- link_prev[NEXT] = link_next
- link_next[PREV] = link_prev
- last = root[PREV]
- last[NEXT] = root[PREV] = link
- link[PREV] = last
- link[NEXT] = root
- hits += 1
- return result
- result = user_function(*args, **kwds)
- with lock:
- if key in cache:
- # Getting here means that this same key was added to the
- # cache while the lock was released. Since the link
- # update is already done, we need only return the
- # computed result and update the count of misses.
- pass
- elif full:
- # Use the old root to store the new key and result.
- oldroot = root
- oldroot[KEY] = key
- oldroot[RESULT] = result
- # Empty the oldest link and make it the new root.
- # Keep a reference to the old key and old result to
- # prevent their ref counts from going to zero during the
- # update. That will prevent potentially arbitrary object
- # clean-up code (i.e. __del__) from running while we're
- # still adjusting the links.
- root = oldroot[NEXT]
- oldkey = root[KEY]
- oldresult = root[RESULT]
- root[KEY] = root[RESULT] = None
- # Now update the cache dictionary.
- del cache[oldkey]
- # Save the potentially reentrant cache[key] assignment
- # for last, after the root and links have been put in
- # a consistent state.
- cache[key] = oldroot
- else:
- # Put result in a new link at the front of the queue.
- last = root[PREV]
- link = [last, root, key, result]
- last[NEXT] = root[PREV] = cache[key] = link
- full = (len(cache) >= maxsize)
- misses += 1
- return result
+ return result
- def cache_info():
- """Report cache statistics"""
- with lock:
- return _CacheInfo(hits, misses, maxsize, len(cache))
+ def cache_info():
+ """Report cache statistics"""
+ with lock:
+ return _CacheInfo(hits, misses, maxsize, len(cache))
- def cache_clear():
- """Clear the cache and cache statistics"""
- nonlocal hits, misses, full
- with lock:
- cache.clear()
- root[:] = [root, root, None, None]
- hits = misses = 0
- full = False
+ def cache_clear():
+ """Clear the cache and cache statistics"""
+ nonlocal hits, misses, full
+ with lock:
+ cache.clear()
+ root[:] = [root, root, None, None]
+ hits = misses = 0
+ full = False
- wrapper.cache_info = cache_info
- wrapper.cache_clear = cache_clear
- return update_wrapper(wrapper, user_function)
+ wrapper.cache_info = cache_info
+ wrapper.cache_clear = cache_clear
+ return update_wrapper(wrapper, user_function)
- return decorating_function
+try:
+ from _functools import _lru_cache_wrapper
+except ImportError:
+ pass
################################################################################
diff --git a/Lib/test/test_functools.py b/Lib/test/test_functools.py
index 03dd545..271d655 100644
--- a/Lib/test/test_functools.py
+++ b/Lib/test/test_functools.py
@@ -7,6 +7,10 @@ import sys
from test import support
import unittest
from weakref import proxy
+try:
+ import threading
+except ImportError:
+ threading = None
import functools
@@ -912,12 +916,12 @@ class Orderable_LT:
return self.value == other.value
-class TestLRU(unittest.TestCase):
+class TestLRU:
def test_lru(self):
def orig(x, y):
return 3 * x + y
- f = functools.lru_cache(maxsize=20)(orig)
+ f = self.module.lru_cache(maxsize=20)(orig)
hits, misses, maxsize, currsize = f.cache_info()
self.assertEqual(maxsize, 20)
self.assertEqual(currsize, 0)
@@ -955,7 +959,7 @@ class TestLRU(unittest.TestCase):
self.assertEqual(currsize, 1)
# test size zero (which means "never-cache")
- @functools.lru_cache(0)
+ @self.module.lru_cache(0)
def f():
nonlocal f_cnt
f_cnt += 1
@@ -971,7 +975,7 @@ class TestLRU(unittest.TestCase):
self.assertEqual(currsize, 0)
# test size one
- @functools.lru_cache(1)
+ @self.module.lru_cache(1)
def f():
nonlocal f_cnt
f_cnt += 1
@@ -987,7 +991,7 @@ class TestLRU(unittest.TestCase):
self.assertEqual(currsize, 1)
# test size two
- @functools.lru_cache(2)
+ @self.module.lru_cache(2)
def f(x):
nonlocal f_cnt
f_cnt += 1
@@ -1004,7 +1008,7 @@ class TestLRU(unittest.TestCase):
self.assertEqual(currsize, 2)
def test_lru_with_maxsize_none(self):
- @functools.lru_cache(maxsize=None)
+ @self.module.lru_cache(maxsize=None)
def fib(n):
if n < 2:
return n
@@ -1012,17 +1016,26 @@ class TestLRU(unittest.TestCase):
self.assertEqual([fib(n) for n in range(16)],
[0, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55, 89, 144, 233, 377, 610])
self.assertEqual(fib.cache_info(),
- functools._CacheInfo(hits=28, misses=16, maxsize=None, currsize=16))
+ self.module._CacheInfo(hits=28, misses=16, maxsize=None, currsize=16))
fib.cache_clear()
self.assertEqual(fib.cache_info(),
- functools._CacheInfo(hits=0, misses=0, maxsize=None, currsize=0))
+ self.module._CacheInfo(hits=0, misses=0, maxsize=None, currsize=0))
+
+ def test_lru_with_maxsize_negative(self):
+ @self.module.lru_cache(maxsize=-10)
+ def eq(n):
+ return n
+ for i in (0, 1):
+ self.assertEqual([eq(n) for n in range(150)], list(range(150)))
+ self.assertEqual(eq.cache_info(),
+ self.module._CacheInfo(hits=0, misses=300, maxsize=-10, currsize=1))
def test_lru_with_exceptions(self):
# Verify that user_function exceptions get passed through without
# creating a hard-to-read chained exception.
# http://bugs.python.org/issue13177
for maxsize in (None, 128):
- @functools.lru_cache(maxsize)
+ @self.module.lru_cache(maxsize)
def func(i):
return 'abc'[i]
self.assertEqual(func(0), 'a')
@@ -1035,7 +1048,7 @@ class TestLRU(unittest.TestCase):
def test_lru_with_types(self):
for maxsize in (None, 128):
- @functools.lru_cache(maxsize=maxsize, typed=True)
+ @self.module.lru_cache(maxsize=maxsize, typed=True)
def square(x):
return x * x
self.assertEqual(square(3), 9)
@@ -1050,7 +1063,7 @@ class TestLRU(unittest.TestCase):
self.assertEqual(square.cache_info().misses, 4)
def test_lru_with_keyword_args(self):
- @functools.lru_cache()
+ @self.module.lru_cache()
def fib(n):
if n < 2:
return n
@@ -1060,13 +1073,13 @@ class TestLRU(unittest.TestCase):
[0, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55, 89, 144, 233, 377, 610]
)
self.assertEqual(fib.cache_info(),
- functools._CacheInfo(hits=28, misses=16, maxsize=128, currsize=16))
+ self.module._CacheInfo(hits=28, misses=16, maxsize=128, currsize=16))
fib.cache_clear()
self.assertEqual(fib.cache_info(),
- functools._CacheInfo(hits=0, misses=0, maxsize=128, currsize=0))
+ self.module._CacheInfo(hits=0, misses=0, maxsize=128, currsize=0))
def test_lru_with_keyword_args_maxsize_none(self):
- @functools.lru_cache(maxsize=None)
+ @self.module.lru_cache(maxsize=None)
def fib(n):
if n < 2:
return n
@@ -1074,15 +1087,71 @@ class TestLRU(unittest.TestCase):
self.assertEqual([fib(n=number) for number in range(16)],
[0, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55, 89, 144, 233, 377, 610])
self.assertEqual(fib.cache_info(),
- functools._CacheInfo(hits=28, misses=16, maxsize=None, currsize=16))
+ self.module._CacheInfo(hits=28, misses=16, maxsize=None, currsize=16))
fib.cache_clear()
self.assertEqual(fib.cache_info(),
- functools._CacheInfo(hits=0, misses=0, maxsize=None, currsize=0))
+ self.module._CacheInfo(hits=0, misses=0, maxsize=None, currsize=0))
+
+ def test_lru_cache_decoration(self):
+ def f(zomg: 'zomg_annotation'):
+ """f doc string"""
+ return 42
+ g = self.module.lru_cache()(f)
+ for attr in self.module.WRAPPER_ASSIGNMENTS:
+ self.assertEqual(getattr(g, attr), getattr(f, attr))
+
+ @unittest.skipUnless(threading, 'This test requires threading.')
+ def test_lru_cache_threaded(self):
+ def orig(x, y):
+ return 3 * x + y
+ f = self.module.lru_cache(maxsize=20)(orig)
+ hits, misses, maxsize, currsize = f.cache_info()
+ self.assertEqual(currsize, 0)
+
+ def full(f, *args):
+ for _ in range(10):
+ f(*args)
+
+ def clear(f):
+ for _ in range(10):
+ f.cache_clear()
+
+ orig_si = sys.getswitchinterval()
+ sys.setswitchinterval(1e-6)
+ try:
+ # create 5 threads in order to fill cache
+ threads = []
+ for k in range(5):
+ t = threading.Thread(target=full, args=[f, k, k])
+ t.start()
+ threads.append(t)
+
+ for t in threads:
+ t.join()
+
+ hits, misses, maxsize, currsize = f.cache_info()
+ self.assertEqual(hits, 45)
+ self.assertEqual(misses, 5)
+ self.assertEqual(currsize, 5)
+
+ # create 5 threads in order to fill cache and 1 to clear it
+ cleaner = threading.Thread(target=clear, args=[f])
+ cleaner.start()
+ threads = [cleaner]
+ for k in range(5):
+ t = threading.Thread(target=full, args=[f, k, k])
+ t.start()
+ threads.append(t)
+
+ for t in threads:
+ t.join()
+ finally:
+ sys.setswitchinterval(orig_si)
def test_need_for_rlock(self):
# This will deadlock on an LRU cache that uses a regular lock
- @functools.lru_cache(maxsize=10)
+ @self.module.lru_cache(maxsize=10)
def test_func(x):
'Used to demonstrate a reentrant lru_cache call within a single thread'
return x
@@ -1110,6 +1179,12 @@ class TestLRU(unittest.TestCase):
def f():
pass
+class TestLRUC(TestLRU, unittest.TestCase):
+ module = c_functools
+
+class TestLRUPy(TestLRU, unittest.TestCase):
+ module = py_functools
+
class TestSingleDispatch(unittest.TestCase):
def test_simple_overloads(self):