summaryrefslogtreecommitdiffstats
path: root/Lib/importlib/test/benchmark.py
blob: a8cd90eb4cd7947aa80bbad95fe52647f1107874 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
"""Benchmark some basic import use-cases."""
# XXX
#    - Bench from source (turn off bytecode generation)
#    - Bench from bytecode (remove existence of source)
#    - Bench bytecode generation
#    - Bench extensions
from . import util
from .source import util as source_util
import imp
import importlib
import sys
import timeit


def bench(name, cleanup=lambda: None, *, seconds=1, repeat=3):
    """Bench the given statement as many times as necessary until total
    executions take one second."""
    stmt = "__import__({!r})".format(name)
    timer = timeit.Timer(stmt)
    for x in range(repeat):
        total_time = 0
        count = 0
        while total_time < seconds:
            try:
                total_time += timer.timeit(1)
            finally:
                cleanup()
            count += 1
        else:
            # One execution too far
            if total_time > seconds:
                count -= 1
        yield count

def from_cache(repeat):
    """sys.modules"""
    name = '<benchmark import>'
    module = imp.new_module(name)
    module.__file__ = '<test>'
    module.__package__ = ''
    with util.uncache(name):
        sys.modules[name] = module
        for result in bench(name, repeat=repeat):
            yield result


def builtin_mod(repeat):
    """Built-in module"""
    name = 'errno'
    if name in sys.modules:
        del sys.modules[name]
    for result in bench(name, lambda: sys.modules.pop(name), repeat=repeat):
        yield result


def main(import_, repeat=3):
    __builtins__.__import__ = import_
    benchmarks = from_cache, builtin_mod
    for benchmark in benchmarks:
        print(benchmark.__doc__, "[", end=' ')
        sys.stdout.flush()
        results = []
        for result in benchmark(repeat):
            results.append(result)
            print(result, end=' ')
            sys.stdout.flush()
        print("]", "best is", max(results))


if __name__ == '__main__':
    import optparse

    parser = optparse.OptionParser()
    parser.add_option('-b', '--builtin', dest='builtin', action='store_true',
                        default=False, help="use the built-in __import__")
    options, args = parser.parse_args()
    if args:
        raise RuntimeError("unrecognized args: {0}".format(args))
    import_ = __import__
    if not options.builtin:
        import_ = importlib.__import__

    main(import_)