summaryrefslogtreecommitdiffstats
path: root/Tools/peg_generator/scripts/benchmark.py
blob: 4a063bf10034c89d16ab77e6291a5de96a2693b8 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
#!/usr/bin/env python3

import argparse
import ast
import sys
import os
from time import time

try:
    import memory_profiler
except ModuleNotFoundError:
    print(
        "Please run `make venv` to create a virtual environment and install"
        " all the dependencies, before running this script."
    )
    sys.exit(1)

sys.path.insert(0, os.getcwd())
from scripts.test_parse_directory import parse_directory

argparser = argparse.ArgumentParser(
    prog="benchmark", description="Reproduce the various pegen benchmarks"
)
argparser.add_argument(
    "--target",
    action="store",
    choices=["xxl", "stdlib"],
    default="xxl",
    help="Which target to use for the benchmark (default is xxl.py)",
)

subcommands = argparser.add_subparsers(title="Benchmarks", dest="subcommand")
command_compile = subcommands.add_parser(
    "compile", help="Benchmark parsing and compiling to bytecode"
)
command_parse = subcommands.add_parser("parse", help="Benchmark parsing and generating an ast.AST")


def benchmark(func):
    def wrapper(*args):
        times = list()
        for _ in range(3):
            start = time()
            result = func(*args)
            end = time()
            times.append(end - start)
        memory = memory_profiler.memory_usage((func, args))
        print(f"{func.__name__}")
        print(f"\tTime: {sum(times)/3:.3f} seconds on an average of 3 runs")
        print(f"\tMemory: {max(memory)} MiB on an average of 3 runs")
        return result

    return wrapper


@benchmark
def time_compile(source):
    return compile(source, "<string>", "exec")


@benchmark
def time_parse(source):
    return ast.parse(source)


def run_benchmark_xxl(subcommand, source):
    if subcommand == "compile":
        time_compile(source)
    elif subcommand == "parse":
        time_parse(source)


def run_benchmark_stdlib(subcommand):
    modes = {"compile": 2, "parse": 1}
    for _ in range(3):
        parse_directory(
            "../../Lib",
            verbose=False,
            excluded_files=[
                "*/bad*",
                "*/lib2to3/tests/data/*",
            ],
            short=True,
            mode=modes[subcommand],
        )


def main():
    args = argparser.parse_args()
    subcommand = args.subcommand
    target = args.target

    if subcommand is None:
        argparser.error("A benchmark to run is required")

    if target == "xxl":
        with open(os.path.join("data", "xxl.py"), "r") as f:
            source = f.read()
            run_benchmark_xxl(subcommand, source)
    elif target == "stdlib":
        run_benchmark_stdlib(subcommand)


if __name__ == "__main__":
    main()