diff options
author | Nick Coghlan <ncoghlan@gmail.com> | 2010-11-30 06:19:46 (GMT) |
---|---|---|
committer | Nick Coghlan <ncoghlan@gmail.com> | 2010-11-30 06:19:46 (GMT) |
commit | 234515afe594e5f9ca1b3f460735c68b04c031e2 (patch) | |
tree | 30efe3349cc7414e4f0f6d45f50a984558caea19 /Lib/functools.py | |
parent | ff27ee0b400030419cfd3c9966f275bfbcb569f8 (diff) | |
download | cpython-234515afe594e5f9ca1b3f460735c68b04c031e2.zip cpython-234515afe594e5f9ca1b3f460735c68b04c031e2.tar.gz cpython-234515afe594e5f9ca1b3f460735c68b04c031e2.tar.bz2 |
Issue 10586: change the new functools.lru_cache implementation to expose the maximum and current cache sizes through the public statistics API. This API is now a single function that returns a named tuple.
Diffstat (limited to 'Lib/functools.py')
-rw-r--r-- | Lib/functools.py | 25 |
1 files changed, 18 insertions, 7 deletions
diff --git a/Lib/functools.py b/Lib/functools.py index c1fa170..c223a62 100644 --- a/Lib/functools.py +++ b/Lib/functools.py @@ -12,7 +12,7 @@ __all__ = ['update_wrapper', 'wraps', 'WRAPPER_ASSIGNMENTS', 'WRAPPER_UPDATES', 'total_ordering', 'cmp_to_key', 'lru_cache', 'reduce', 'partial'] from _functools import partial, reduce -from collections import OrderedDict +from collections import OrderedDict, namedtuple try: from _thread import allocate_lock as Lock except: @@ -114,12 +114,15 @@ def cmp_to_key(mycmp): raise TypeError('hash not implemented') return K +_CacheInfo = namedtuple("CacheInfo", "maxsize, size, hits, misses") + def lru_cache(maxsize=100): """Least-recently-used cache decorator. Arguments to the cached function must be hashable. - Performance statistics stored in f.cache_hits and f.cache_misses. + Significant statistics (maxsize, size, hits, misses) are + available through the f.cache_info() named tuple. Clear the cache and statistics using f.cache_clear(). The underlying function is stored in f.__wrapped__. @@ -127,7 +130,7 @@ def lru_cache(maxsize=100): """ # Users should only access the lru_cache through its public API: - # cache_hits, cache_misses, cache_clear(), and __wrapped__ + # cache_info, cache_clear, and f.__wrapped__ # The internals of the lru_cache are encapsulated for thread safety and # to allow the implementation to change (including a possible C version). @@ -137,11 +140,13 @@ def lru_cache(maxsize=100): cache = OrderedDict() # ordered least recent to most recent cache_popitem = cache.popitem cache_renew = cache.move_to_end + hits = misses = 0 kwd_mark = object() # separate positional and keyword args lock = Lock() @wraps(user_function) def wrapper(*args, **kwds): + nonlocal hits, misses key = args if kwds: key += (kwd_mark,) + tuple(sorted(kwds.items())) @@ -149,23 +154,29 @@ def lru_cache(maxsize=100): with lock: result = cache[key] cache_renew(key) # record recent use of this key - wrapper.cache_hits += 1 + hits += 1 except KeyError: result = user_function(*args, **kwds) with lock: cache[key] = result # record recent use of this key - wrapper.cache_misses += 1 + misses += 1 if len(cache) > maxsize: cache_popitem(0) # purge least recently used cache entry return result + def cache_info(): + """Report significant cache statistics""" + with lock: + return _CacheInfo(maxsize, len(cache), hits, misses) + def cache_clear(): """Clear the cache and cache statistics""" + nonlocal hits, misses with lock: cache.clear() - wrapper.cache_hits = wrapper.cache_misses = 0 + hits = misses = 0 - wrapper.cache_hits = wrapper.cache_misses = 0 + wrapper.cache_info = cache_info wrapper.cache_clear = cache_clear return wrapper |