diff options
author | Guido van Rossum <guido@python.org> | 2007-06-30 05:01:58 (GMT) |
---|---|---|
committer | Guido van Rossum <guido@python.org> | 2007-06-30 05:01:58 (GMT) |
commit | 486364b821ad25bc33e7247539d2c48a9e3b7051 (patch) | |
tree | 72b5efdf5cb3947fe5ead2849075dfdf7de28a7d /Tools/pybench/pybench.py | |
parent | 8ddff70822d4d6d739a659138801e690a78939d7 (diff) | |
download | cpython-486364b821ad25bc33e7247539d2c48a9e3b7051.zip cpython-486364b821ad25bc33e7247539d2c48a9e3b7051.tar.gz cpython-486364b821ad25bc33e7247539d2c48a9e3b7051.tar.bz2 |
Merged revisions 56020-56124 via svnmerge from
svn+ssh://pythondev@svn.python.org/python/branches/p3yk
................
r56037 | georg.brandl | 2007-06-19 05:33:20 -0700 (Tue, 19 Jun 2007) | 2 lines
Patch #1739659: don't slice dict.keys() in pydoc.
................
r56060 | martin.v.loewis | 2007-06-21 13:00:02 -0700 (Thu, 21 Jun 2007) | 2 lines
Regenerate to add True, False, None.
................
r56069 | neal.norwitz | 2007-06-21 22:31:56 -0700 (Thu, 21 Jun 2007) | 1 line
Get the doctest working again after adding None, True, and False as kewyords.
................
r56070 | neal.norwitz | 2007-06-21 23:25:33 -0700 (Thu, 21 Jun 2007) | 1 line
Add space to error message.
................
r56071 | neal.norwitz | 2007-06-21 23:40:04 -0700 (Thu, 21 Jun 2007) | 6 lines
Get pybench working, primarily
* Use print function
* Stop using string module
* Use sorted instead of assuming dict methods return lists
* Convert range result to a list
................
r56089 | collin.winter | 2007-06-26 10:31:48 -0700 (Tue, 26 Jun 2007) | 1 line
Fix AttributeError in distutils/dir_util.py.
................
r56124 | guido.van.rossum | 2007-06-29 18:04:31 -0700 (Fri, 29 Jun 2007) | 30 lines
Merged revisions 56014-56123 via svnmerge from
svn+ssh://pythondev@svn.python.org/python/trunk
........
r56019 | lars.gustaebel | 2007-06-18 04:42:11 -0700 (Mon, 18 Jun 2007) | 2 lines
Added exclude keyword argument to the TarFile.add() method.
........
r56023 | lars.gustaebel | 2007-06-18 13:05:55 -0700 (Mon, 18 Jun 2007) | 3 lines
Added missing \versionchanged tag for the new exclude
parameter.
........
r56038 | georg.brandl | 2007-06-19 05:36:00 -0700 (Tue, 19 Jun 2007) | 2 lines
Bug #1737864: allow empty message in logging format routines.
........
r56040 | georg.brandl | 2007-06-19 05:38:20 -0700 (Tue, 19 Jun 2007) | 2 lines
Bug #1739115: make shutil.rmtree docs clear wrt. file deletion.
........
r56084 | georg.brandl | 2007-06-25 08:21:23 -0700 (Mon, 25 Jun 2007) | 2 lines
Bug #1742901: document None behavior of shlex.split.
........
r56091 | georg.brandl | 2007-06-27 07:09:56 -0700 (Wed, 27 Jun 2007) | 2 lines
Fix a variable name in winreg docs.
........
................
Diffstat (limited to 'Tools/pybench/pybench.py')
-rwxr-xr-x | Tools/pybench/pybench.py | 230 |
1 files changed, 113 insertions, 117 deletions
diff --git a/Tools/pybench/pybench.py b/Tools/pybench/pybench.py index 6c9d445..17b4704 100755 --- a/Tools/pybench/pybench.py +++ b/Tools/pybench/pybench.py @@ -34,7 +34,7 @@ NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE ! """ -import sys, time, operator, string, platform +import sys, time, operator, platform from CommandLine import * try: @@ -103,7 +103,7 @@ def get_timer(timertype): def get_machine_details(): if _debug: - print 'Getting machine details...' + print('Getting machine details...') buildno, builddate = platform.python_build() python = platform.python_version() try: @@ -146,7 +146,8 @@ def print_machine_details(d, indent=''): d.get('buildno', 'n/a')), ' Unicode: %s' % d.get('unicode', 'n/a'), ] - print indent + string.join(l, '\n' + indent) + '\n' + joiner = '\n' + indent + print(indent + joiner.join(l) + '\n') ### Test baseclass @@ -280,9 +281,9 @@ class Test: prep_times.append(t) min_prep_time = min(prep_times) if _debug: - print - print 'Calib. prep time = %.6fms' % ( - min_prep_time * MILLI_SECONDS) + print() + print('Calib. prep time = %.6fms' % ( + min_prep_time * MILLI_SECONDS)) # Time the calibration runs (doing CALIBRATION_LOOPS loops of # .calibrate() method calls each) @@ -298,8 +299,8 @@ class Test: min_overhead = min(self.overhead_times) max_overhead = max(self.overhead_times) if _debug: - print 'Calib. overhead time = %.6fms' % ( - min_overhead * MILLI_SECONDS) + print('Calib. overhead time = %.6fms' % ( + min_overhead * MILLI_SECONDS)) if min_overhead < 0.0: raise ValueError('calibration setup did not work') if max_overhead - min_overhead > 0.1: @@ -436,7 +437,7 @@ class Benchmark: # Init vars self.tests = {} if _debug: - print 'Getting machine details...' + print('Getting machine details...') self.machine_details = get_machine_details() # Make .version an instance attribute to have it saved in the @@ -473,8 +474,8 @@ class Benchmark: # Add tests if self.verbose: - print 'Searching for tests ...' - print '--------------------------------------' + print('Searching for tests ...') + print('--------------------------------------') for testclass in setupmod.__dict__.values(): if not hasattr(testclass, 'is_a_test'): continue @@ -488,77 +489,74 @@ class Benchmark: warp=self.warp, calibration_runs=self.calibration_runs, timer=self.timer) - l = self.tests.keys() - l.sort() + l = sorted(self.tests) if self.verbose: for name in l: - print ' %s' % name - print '--------------------------------------' - print ' %i tests found' % len(l) - print + print(' %s' % name) + print('--------------------------------------') + print(' %i tests found' % len(l)) + print() def calibrate(self): - print 'Calibrating tests. Please wait...', + print('Calibrating tests. Please wait...', end=' ') if self.verbose: - print - print - print 'Test min max' - print '-' * LINE - tests = self.tests.items() - tests.sort() + print() + print() + print('Test min max') + print('-' * LINE) + tests = sorted(self.tests.items()) for i in range(len(tests)): name, test = tests[i] test.calibrate_test() if self.verbose: - print '%30s: %6.3fms %6.3fms' % \ + print('%30s: %6.3fms %6.3fms' % \ (name, min(test.overhead_times) * MILLI_SECONDS, - max(test.overhead_times) * MILLI_SECONDS) + max(test.overhead_times) * MILLI_SECONDS)) if self.verbose: - print - print 'Done with the calibration.' + print() + print('Done with the calibration.') else: - print 'done.' - print + print('done.') + print() def run(self): - tests = self.tests.items() - tests.sort() + tests = sorted(self.tests.items()) timer = self.get_timer() - print 'Running %i round(s) of the suite at warp factor %i:' % \ - (self.rounds, self.warp) - print + print('Running %i round(s) of the suite at warp factor %i:' % \ + (self.rounds, self.warp)) + print() self.roundtimes = [] for i in range(self.rounds): if self.verbose: - print ' Round %-25i effective absolute overhead' % (i+1) + print(' Round %-25i effective absolute overhead' % (i+1)) total_eff_time = 0.0 for j in range(len(tests)): name, test = tests[j] if self.verbose: - print '%30s:' % name, + print('%30s:' % name, end=' ') test.run() (eff_time, abs_time, min_overhead) = test.last_timing total_eff_time = total_eff_time + eff_time if self.verbose: - print ' %5.0fms %5.0fms %7.3fms' % \ + print(' %5.0fms %5.0fms %7.3fms' % \ (eff_time * MILLI_SECONDS, abs_time * MILLI_SECONDS, - min_overhead * MILLI_SECONDS) + min_overhead * MILLI_SECONDS)) self.roundtimes.append(total_eff_time) if self.verbose: - print (' ' - ' ------------------------------') - print (' ' + print((' ' + ' ------------------------------')) + print((' ' ' Totals: %6.0fms' % - (total_eff_time * MILLI_SECONDS)) - print + (total_eff_time * MILLI_SECONDS))) + print() else: - print '* Round %i done in %.3f seconds.' % (i+1, - total_eff_time) - print + print('* Round %i done in %.3f seconds.' % (i+1, + total_eff_time)) + print() def stat(self): @@ -583,25 +581,24 @@ class Benchmark: def print_header(self, title='Benchmark'): - print '-' * LINE - print '%s: %s' % (title, self.name) - print '-' * LINE - print - print ' Rounds: %s' % self.rounds - print ' Warp: %s' % self.warp - print ' Timer: %s' % self.timer - print + print('-' * LINE) + print('%s: %s' % (title, self.name)) + print('-' * LINE) + print() + print(' Rounds: %s' % self.rounds) + print(' Warp: %s' % self.warp) + print(' Timer: %s' % self.timer) + print() if self.machine_details: print_machine_details(self.machine_details, indent=' ') - print + print() def print_benchmark(self, hidenoise=0, limitnames=None): - print ('Test ' - ' minimum average operation overhead') - print '-' * LINE - tests = self.tests.items() - tests.sort() + print(('Test ' + ' minimum average operation overhead')) + print('-' * LINE) + tests = sorted(self.tests.items()) total_min_time = 0.0 total_avg_time = 0.0 for name, test in tests: @@ -615,43 +612,42 @@ class Benchmark: min_overhead) = test.stat() total_min_time = total_min_time + min_time total_avg_time = total_avg_time + avg_time - print '%30s: %5.0fms %5.0fms %6.2fus %7.3fms' % \ + print('%30s: %5.0fms %5.0fms %6.2fus %7.3fms' % \ (name, min_time * MILLI_SECONDS, avg_time * MILLI_SECONDS, op_avg * MICRO_SECONDS, - min_overhead *MILLI_SECONDS) - print '-' * LINE - print ('Totals: ' + min_overhead *MILLI_SECONDS)) + print('-' * LINE) + print(('Totals: ' ' %6.0fms %6.0fms' % (total_min_time * MILLI_SECONDS, total_avg_time * MILLI_SECONDS, - )) - print + ))) + print() def print_comparison(self, compare_to, hidenoise=0, limitnames=None): # Check benchmark versions if compare_to.version != self.version: - print ('* Benchmark versions differ: ' + print(('* Benchmark versions differ: ' 'cannot compare this benchmark to "%s" !' % - compare_to.name) - print + compare_to.name)) + print() self.print_benchmark(hidenoise=hidenoise, limitnames=limitnames) return # Print header compare_to.print_header('Comparing with') - print ('Test ' - ' minimum run-time average run-time') - print (' ' - ' this other diff this other diff') - print '-' * LINE + print(('Test ' + ' minimum run-time average run-time')) + print((' ' + ' this other diff this other diff')) + print('-' * LINE) # Print test comparisons - tests = self.tests.items() - tests.sort() + tests = sorted(self.tests.items()) total_min_time = other_total_min_time = 0.0 total_avg_time = other_total_avg_time = 0.0 benchmarks_compatible = self.compatible(compare_to) @@ -704,15 +700,15 @@ class Benchmark: # Benchmark or tests are not comparible min_diff, avg_diff = 'n/a', 'n/a' tests_compatible = 0 - print '%30s: %5.0fms %5.0fms %7s %5.0fms %5.0fms %7s' % \ + print('%30s: %5.0fms %5.0fms %7s %5.0fms %5.0fms %7s' % \ (name, min_time * MILLI_SECONDS, other_min_time * MILLI_SECONDS * compare_to.warp / self.warp, min_diff, avg_time * MILLI_SECONDS, other_avg_time * MILLI_SECONDS * compare_to.warp / self.warp, - avg_diff) - print '-' * LINE + avg_diff)) + print('-' * LINE) # Summarise test results if not benchmarks_compatible or not tests_compatible: @@ -730,7 +726,7 @@ class Benchmark: (other_total_avg_time * compare_to.warp) - 1.0) * PERCENT) else: avg_diff = 'n/a' - print ('Totals: ' + print(('Totals: ' ' %5.0fms %5.0fms %7s %5.0fms %5.0fms %7s' % (total_min_time * MILLI_SECONDS, (other_total_min_time * compare_to.warp/self.warp @@ -740,11 +736,11 @@ class Benchmark: (other_total_avg_time * compare_to.warp/self.warp * MILLI_SECONDS), avg_diff - )) - print - print '(this=%s, other=%s)' % (self.name, - compare_to.name) - print + ))) + print() + print('(this=%s, other=%s)' % (self.name, + compare_to.name)) + print() class PyBenchCmdline(Application): @@ -823,8 +819,8 @@ python pybench.py -s p25.pybench -c p21.pybench limitnames = self.values['-t'] if limitnames: if _debug: - print '* limiting test names to one with substring "%s"' % \ - limitnames + print('* limiting test names to one with substring "%s"' % \ + limitnames) limitnames = re.compile(limitnames, re.I) else: limitnames = None @@ -833,26 +829,26 @@ python pybench.py -s p25.pybench -c p21.pybench calibration_runs = self.values['-C'] timer = self.values['--timer'] - print '-' * LINE - print 'PYBENCH %s' % __version__ - print '-' * LINE - print '* using %s %s' % ( + print('-' * LINE) + print('PYBENCH %s' % __version__) + print('-' * LINE) + print('* using %s %s' % ( platform.python_implementation(), - string.join(string.split(sys.version), ' ')) + ' '.join(sys.version.split()))) # Switch off garbage collection if not withgc: try: import gc except ImportError: - print '* Python version doesn\'t support garbage collection' + print('* Python version doesn\'t support garbage collection') else: try: gc.disable() except NotImplementedError: - print '* Python version doesn\'t support gc.disable' + print('* Python version doesn\'t support gc.disable') else: - print '* disabled garbage collection' + print('* disabled garbage collection') # "Disable" sys check interval if not withsyscheck: @@ -861,18 +857,18 @@ python pybench.py -s p25.pybench -c p21.pybench try: sys.setcheckinterval(value) except (AttributeError, NotImplementedError): - print '* Python version doesn\'t support sys.setcheckinterval' + print('* Python version doesn\'t support sys.setcheckinterval') else: - print '* system check interval set to maximum: %s' % value + print('* system check interval set to maximum: %s' % value) if timer == TIMER_SYSTIMES_PROCESSTIME: import systimes - print '* using timer: systimes.processtime (%s)' % \ - systimes.SYSTIMES_IMPLEMENTATION + print('* using timer: systimes.processtime (%s)' % \ + systimes.SYSTIMES_IMPLEMENTATION) else: - print '* using timer: %s' % timer + print('* using timer: %s' % timer) - print + print() if compare_to: try: @@ -882,9 +878,9 @@ python pybench.py -s p25.pybench -c p21.pybench f.close() compare_to = bench except IOError as reason: - print '* Error opening/reading file %s: %s' % ( + print('* Error opening/reading file %s: %s' % ( repr(compare_to), - reason) + reason)) compare_to = None if show_bench: @@ -902,16 +898,16 @@ python pybench.py -s p25.pybench -c p21.pybench bench.print_benchmark(hidenoise=hidenoise, limitnames=limitnames) except IOError as reason: - print '* Error opening/reading file %s: %s' % ( + print('* Error opening/reading file %s: %s' % ( repr(show_bench), - reason) - print + reason)) + print() return if reportfile: - print 'Creating benchmark: %s (rounds=%i, warp=%i)' % \ - (reportfile, rounds, warp) - print + print('Creating benchmark: %s (rounds=%i, warp=%i)' % \ + (reportfile, rounds, warp)) + print() # Create benchmark object bench = Benchmark(reportfile, @@ -925,9 +921,9 @@ python pybench.py -s p25.pybench -c p21.pybench bench.calibrate() bench.run() except KeyboardInterrupt: - print - print '*** KeyboardInterrupt -- Aborting' - print + print() + print('*** KeyboardInterrupt -- Aborting') + print() return bench.print_header() if compare_to: @@ -948,12 +944,12 @@ python pybench.py -s p25.pybench -c p21.pybench pickle.dump(bench,f) f.close() except IOError as reason: - print '* Error opening/writing reportfile' + print('* Error opening/writing reportfile') except IOError as reason: - print '* Error opening/writing reportfile %s: %s' % ( + print('* Error opening/writing reportfile %s: %s' % ( reportfile, - reason) - print + reason)) + print() if __name__ == '__main__': PyBenchCmdline() |