Memoizing decorator. Has the same API as the functools.lru_cache() in Py3.2 but without the LRU feature, so it takes less memory, runs faster, and doesn't need locks to keep the dictionary in a consistent state.
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76  | from collections import namedtuple
from functools import wraps
_CacheInfo = namedtuple("CacheInfo", "hits misses maxsize currsize")
def cache():
    """Memoizing cache decorator.
    Arguments to the cached function must be hashable.
    View the cache statistics named tuple (hits, misses maxsize, size) with
    f.cache_info().  Clear the cache and statistics with f.cache_clear().
    """
    def decorating_function(user_function,
                tuple=tuple, sorted=sorted, len=len, KeyError=KeyError):
        cache = dict()
        hits = misses = 0
        kwd_mark = object()             # separates positional and keyword args
        @wraps(user_function)
        def wrapper(*args, **kwds):
            nonlocal hits, misses
            key = args
            if kwds:
                key += (kwd_mark,) + tuple(sorted(kwds.items()))
            try:
                result = cache[key]
                hits += 1
            except KeyError:
                result = user_function(*args, **kwds)
                cache[key] = result
                misses += 1
            return result
        def cache_info():
            """Report cache statistics"""
            return _CacheInfo(hits, misses, None, len(cache))
        def cache_clear():
            """Clear the cache and cache statistics"""
            nonlocal hits, misses
            cache.clear()
            hits = misses = 0
        wrapper.cache_info = cache_info
        wrapper.cache_clear = cache_clear
        return wrapper
    return decorating_function
# ----- Example ----------------------------------------------------------------
if __name__ == '__main__':
    @cache()
    def fib(n):
        if n < 2:
            return 1
        return fib(n-1) + fib(n-2)
    from random import shuffle
    inputs = list(range(30))
    shuffle(inputs)
    results = sorted(fib(n) for n in inputs)
    print(results)
    print(fib.cache_info())
        
    expected_output = '''[1, 1, 2, 3, 5, 8, 13, 21, 34, 55, 89, 144, 
         233, 377, 610, 987, 1597, 2584, 4181, 6765, 10946, 17711, 28657, 
         46368, 75025, 121393, 196418, 317811, 514229, 832040]
         CacheInfo(hits=56, misses=30, maxsize=None, currsize=30)
    '''
 | 
Fast, lightweight alternative to the LRU cache in Py3.2. Use the LRU version for long running processes that need to free-up memory. Use this for whenever cumulative cache growth isn't an issue.
The @cache() syntax is used instead of @cache to keep the API as close as possible to the LRU cache.
Download
Copy to clipboard
Isn't that the same thing as " functools.lru_cache(maxsize=None) " ?