diff options
author | Evan Martin <martine@danga.com> | 2011-12-29 19:57:02 (GMT) |
---|---|---|
committer | Evan Martin <martine@danga.com> | 2011-12-29 19:57:02 (GMT) |
commit | eaf1ff190423b1cf41eb1b905192be07aeb6b22e (patch) | |
tree | 81cf0c13c93fbddfc8129c10043096e738e74001 | |
parent | 9cf5918cc59dd5d431a4b4cc68aaf71b74efdc1d (diff) | |
download | Ninja-eaf1ff190423b1cf41eb1b905192be07aeb6b22e.zip Ninja-eaf1ff190423b1cf41eb1b905192be07aeb6b22e.tar.gz Ninja-eaf1ff190423b1cf41eb1b905192be07aeb6b22e.tar.bz2 |
add script for measuring build performance
-rw-r--r-- | HACKING | 10 | ||||
-rwxr-xr-x | misc/measure.py | 54 |
2 files changed, 60 insertions, 4 deletions
@@ -13,10 +13,12 @@ Testing performance impact of changes: If you have a Chrome build handy, it's a good test case. Otherwise, https://github.com/martine/ninja/downloads has a copy of the Chrome build files (and depfiles). You can untar that, then run - "ninja chrome". I often do something like: - (for i in `seq 5`; do time -p ninja chrome) 2>&1 | grep real > old - (for i in `seq 5`; do time -p ninja-new chrome) 2>&1 | grep real > new - and then compare those two lists of timings either by eye or with R. + path/to/my/ninja chrome + and compare that against a baseline Ninja. + + There's a script at misc/measure.py that repeatedly runs a command like + the above (to address variance) and summarizes its runtime. E.g. + path/to/misc/measure.py path/to/my/ninja chrome For changing the depfile parser, you can also build 'parser_perftest' and run that directly on some representative input files. diff --git a/misc/measure.py b/misc/measure.py new file mode 100755 index 0000000..1323fc6 --- /dev/null +++ b/misc/measure.py @@ -0,0 +1,54 @@ +#!/usr/bin/env python + +# Copyright 2011 Google Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""measure the runtime of a command by repeatedly running it. +""" + +import time +import subprocess +import sys + +devnull = open('/dev/null', 'w') + +def run(cmd, repeat=10): + print 'sampling:', + sys.stdout.flush() + + samples = [] + for _ in range(repeat): + start = time.time() + subprocess.call(cmd, stdout=devnull, stderr=devnull) + end = time.time() + dt = (end - start) * 1000 + print '%dms' % int(dt), + sys.stdout.flush() + samples.append(dt) + print + + # We're interested in the 'pure' runtime of the code, which is + # conceptually the smallest time we'd see if we ran it enough times + # such that it got the perfect time slices / disk cache hits. + best = min(samples) + # Also print how varied the outputs were in an attempt to make it + # more obvious if something has gone terribly wrong. + err = sum(s - best for s in samples) / float(len(samples)) + print 'estimate: %dms (mean err %.1fms)' % (best, err) + +if __name__ == '__main__': + if len(sys.argv) < 2: + print 'usage: measure.py command args...' + sys.exit(1) + run(cmd=sys.argv[1:]) |