summaryrefslogtreecommitdiffstats
path: root/test/option/debug-time.py
diff options
context:
space:
mode:
Diffstat (limited to 'test/option/debug-time.py')
-rw-r--r--test/option/debug-time.py146
1 files changed, 89 insertions, 57 deletions
diff --git a/test/option/debug-time.py b/test/option/debug-time.py
index b1471ba..51cfacf 100644
--- a/test/option/debug-time.py
+++ b/test/option/debug-time.py
@@ -30,49 +30,51 @@ import string
import re
import time
+_python_ = TestSCons._python_
+
test = TestSCons.TestSCons()
-test.write('SConstruct', """
-env = Environment(OBJSUFFIX = '.ooo', PROGSUFFIX = '.xxx')
-env.Program('foo', Split('foo.c bar.c'))
+test.write('sleep_cat.py', """\
+import sys
+import time
+time.sleep(int(sys.argv[1]))
+fp = open(sys.argv[2], 'wb')
+for arg in sys.argv[3:]:
+ fp.write(open(arg, 'rb').read())
+fp.close()
+sys.exit(0)
""")
-test.write('foo.c', r"""
-#include <stdio.h>
-#include <stdlib.h>
-#include "foo.h"
-int main(int argc, char *argv[])
-{
- argv[argc++] = "--";
- printf("f1.c\n");
- exit (0);
-}
-""")
+test.write('SConstruct', """
+env = Environment(PYTHON = r'%(_python_)s',
+ SLEEP_CAT = r'sleep_cat.py',
+ CATCOM = '$PYTHON $SLEEP_CAT $SECONDS $TARGET $SOURCES',
+ SECONDS = ARGUMENTS.get('SLEEP', '0'))
+f1 = env.Command('f1.out', 'f1.in', '$CATCOM')
+f2 = env.Command('f2.out', 'f2.in', '$CATCOM')
+f3 = env.Command('f3.out', 'f3.in', '$CATCOM')
+f4 = env.Command('f4.out', 'f4.in', '$CATCOM')
+env.Command('output', [f1, f2, f3, f4], '$CATCOM')
+""" % locals())
-test.write('bar.c', """
-#include "bar.h"
-""")
+test.write('f1.in', "f1.in\n")
+test.write('f2.in', "f2.in\n")
+test.write('f3.in', "f3.in\n")
+test.write('f4.in', "f4.in\n")
-test.write('foo.h', """
-#ifndef FOO_H
-#define FOO_H
-#include "bar.h"
-#endif
-""")
-test.write('bar.h', """
-#ifndef BAR_H
-#define BAR_H
-#include "foo.h"
-#endif
-""")
-def num(match, line):
- return float(re.search(match, line).group(1))
+def num(s, match):
+ return float(re.search(match, s).group(1))
+
+def within_tolerance(expected, actual, tolerance):
+ return abs((expected-actual)/actual) <= tolerance
+
+
-# Try to make things a little more equal by measuring Python overhead
-# executing a minimal file, and reading the scons.py script itself from
-# disk so that it's already been cached.
+# Try to make our results a little more accurate and repeatable by
+# measuring Python overhead executing a minimal file, and reading the
+# scons.py script itself from disk so that it's already been cached.
test.write('pass.py', "pass\n")
test.read(test.program)
@@ -80,55 +82,85 @@ start_time = time.time()
test.run(program=TestSCons.python, arguments=test.workpath('pass.py'))
overhead = time.time() - start_time
+
+
start_time = time.time()
-test.run(arguments = "--debug=time .")
+test.run(arguments = "-j1 --debug=time . SLEEP=0")
complete_time = time.time() - start_time
+
+
expected_total_time = complete_time - overhead
-lines = string.split(test.stdout(), '\n')
-expected_command_time = 0.0
-for cmdline in filter(lambda x: x[:23] == "Command execution time:", lines):
- n = num(r'Command execution time: (\d+\.\d+) seconds', cmdline)
- expected_command_time = expected_command_time + n
+pattern = r'Command execution time: (\d+\.\d+) seconds'
+times = map(float, re.findall(pattern, test.stdout()))
+expected_command_time = reduce(lambda x, y: x + y, times, 0.0)
-stdout = test.stdout()
-total_time = num(r'Total build time: (\d+\.\d+) seconds', stdout)
-sconscript_time = num(r'Total SConscript file execution time: (\d+\.\d+) seconds', stdout)
-scons_time = num(r'Total SCons execution time: (\d+\.\d+) seconds', stdout)
-command_time = num(r'Total command execution time: (\d+\.\d+) seconds', stdout)
+stdout = test.stdout()
-def within_tolerance(expected, actual, tolerance):
- return abs((expected-actual)/actual) <= tolerance
+total_time = num(stdout, r'Total build time: (\d+\.\d+) seconds')
+sconscript_time = num(stdout, r'Total SConscript file execution time: (\d+\.\d+) seconds')
+scons_time = num(stdout, r'Total SCons execution time: (\d+\.\d+) seconds')
+command_time = num(stdout, r'Total command execution time: (\d+\.\d+) seconds')
failures = []
if not within_tolerance(expected_command_time, command_time, 0.01):
failures.append("""\
-SCons reported a total command execution time of %s,
-but command execution times really totalled %s,
+SCons -j1 reported a total command execution time of %(command_time)s,
+but command execution times really totalled %(expected_command_time)s,
outside of the 1%% tolerance.
-""" % (command_time, expected_command_time))
+""" % locals())
added_times = sconscript_time+scons_time+command_time
if not within_tolerance(total_time, added_times, 0.01):
failures.append("""\
-SCons reported a total build time of %s,
-but the various execution times actually totalled %s,
+SCons -j1 reported a total build time of %(total_time)s,
+but the various execution times actually totalled %(added_times)s,
outside of the 1%% tolerance.
-""" % (total_time, added_times))
+""" % locals())
if not within_tolerance(total_time, expected_total_time, 0.15):
failures.append("""\
-SCons reported total build time of %s,
-but the actual measured build time was %s
-(end-to-end time of %s less Python overhead of %s),
+SCons -j1 reported total build time of %(total_time)s,
+but the actual measured build time was %(expected_total_time)s
+(end-to-end time of %(complete_time)s less Python overhead of %(overhead)s),
outside of the 15%% tolerance.
-""" % (total_time, expected_total_time, complete_time, overhead))
+""" % locals())
if failures:
print string.join([test.stdout()] + failures, '\n')
test.fail_test(1)
+
+
+test.run(arguments = "-c")
+
+test.run(arguments = "-j4 --debug=time . SLEEP=1")
+
+
+
+stdout = test.stdout()
+
+total_time = num(stdout, r'Total build time: (\d+\.\d+) seconds')
+sconscript_time = num(stdout, r'Total SConscript file execution time: (\d+\.\d+) seconds')
+scons_time = num(stdout, r'Total SCons execution time: (\d+\.\d+) seconds')
+command_time = num(stdout, r'Total command execution time: (\d+\.\d+) seconds')
+
+failures = []
+
+added_times = sconscript_time+scons_time+command_time
+if not within_tolerance(total_time, added_times, 0.01):
+ failures.append("""\
+SCons -j4 reported a total build time of %(total_time)s,
+but the various execution times actually totalled %(added_times)s,
+outside of the 1%% tolerance.
+""" % locals())
+
+if failures:
+ print string.join([test.stdout()] + failures, '\n')
+ test.fail_test(1)
+
+
test.pass_test()