summaryrefslogtreecommitdiffstats
path: root/Tools/pybench
diff options
context:
space:
mode:
authorMarc-André Lemburg <mal@egenix.com>2006-06-13 18:56:56 (GMT)
committerMarc-André Lemburg <mal@egenix.com>2006-06-13 18:56:56 (GMT)
commit7d9743dd6aebe3da1118ed7f0abb7b9cdc6302ff (patch)
tree5fd8c25b53d192efbd69cbd42e6ae4016a215db0 /Tools/pybench
parentef7fe5f228292733965c87b99d50a21d64c6d196 (diff)
downloadcpython-7d9743dd6aebe3da1118ed7f0abb7b9cdc6302ff.zip
cpython-7d9743dd6aebe3da1118ed7f0abb7b9cdc6302ff.tar.gz
cpython-7d9743dd6aebe3da1118ed7f0abb7b9cdc6302ff.tar.bz2
Updated to pybench 2.0.
See svn.python.org/external/pybench-2.0 for the original import of that version. Note that platform.py was not copied over from pybench-2.0 since it is already part of Python 2.5.
Diffstat (limited to 'Tools/pybench')
-rw-r--r--Tools/pybench/Arithmetic.py16
-rw-r--r--Tools/pybench/Calls.py109
-rw-r--r--Tools/pybench/CommandLine.py2
-rw-r--r--Tools/pybench/Constructs.py10
-rw-r--r--Tools/pybench/Dict.py78
-rw-r--r--Tools/pybench/Exceptions.py30
-rw-r--r--Tools/pybench/Imports.py12
-rw-r--r--Tools/pybench/Instances.py4
-rw-r--r--Tools/pybench/Lists.py146
-rw-r--r--Tools/pybench/Lookups.py10
-rw-r--r--[-rwxr-xr-x]Tools/pybench/NewInstances.py11
-rw-r--r--Tools/pybench/Numbers.py12
-rw-r--r--Tools/pybench/README371
-rw-r--r--Tools/pybench/Setup.py6
-rw-r--r--Tools/pybench/Strings.py24
-rw-r--r--Tools/pybench/Tuples.py16
-rw-r--r--Tools/pybench/Unicode.py18
-rw-r--r--Tools/pybench/clockres.py44
-rwxr-xr-xTools/pybench/pybench.py934
-rw-r--r--Tools/pybench/systimes.py44
20 files changed, 1244 insertions, 653 deletions
diff --git a/Tools/pybench/Arithmetic.py b/Tools/pybench/Arithmetic.py
index 4ed6219..6923b4b 100644
--- a/Tools/pybench/Arithmetic.py
+++ b/Tools/pybench/Arithmetic.py
@@ -2,7 +2,7 @@ from pybench import Test
class SimpleIntegerArithmetic(Test):
- version = 0.3
+ version = 2.0
operations = 5 * (3 + 5 + 5 + 3 + 3 + 3)
rounds = 120000
@@ -157,9 +157,9 @@ class SimpleIntegerArithmetic(Test):
class SimpleFloatArithmetic(Test):
- version = 0.3
+ version = 2.0
operations = 5 * (3 + 5 + 5 + 3 + 3 + 3)
- rounds = 100000
+ rounds = 120000
def test(self):
@@ -312,7 +312,7 @@ class SimpleFloatArithmetic(Test):
class SimpleIntFloatArithmetic(Test):
- version = 0.3
+ version = 2.0
operations = 5 * (3 + 5 + 5 + 3 + 3 + 3)
rounds = 120000
@@ -468,9 +468,9 @@ class SimpleIntFloatArithmetic(Test):
class SimpleLongArithmetic(Test):
- version = 0.3
+ version = 2.0
operations = 5 * (3 + 5 + 5 + 3 + 3 + 3)
- rounds = 30000
+ rounds = 60000
def test(self):
@@ -623,9 +623,9 @@ class SimpleLongArithmetic(Test):
class SimpleComplexArithmetic(Test):
- version = 0.3
+ version = 2.0
operations = 5 * (3 + 5 + 5 + 3 + 3 + 3)
- rounds = 40000
+ rounds = 80000
def test(self):
diff --git a/Tools/pybench/Calls.py b/Tools/pybench/Calls.py
index e295243..fa18314 100644
--- a/Tools/pybench/Calls.py
+++ b/Tools/pybench/Calls.py
@@ -2,7 +2,7 @@ from pybench import Test
class PythonFunctionCalls(Test):
- version = 0.3
+ version = 2.0
operations = 5*(1+4+4+2)
rounds = 60000
@@ -111,9 +111,9 @@ class PythonFunctionCalls(Test):
class BuiltinFunctionCalls(Test):
- version = 0.4
+ version = 2.0
operations = 5*(2+5+5+5)
- rounds = 30000
+ rounds = 60000
def test(self):
@@ -232,9 +232,9 @@ class BuiltinFunctionCalls(Test):
class PythonMethodCalls(Test):
- version = 0.3
+ version = 2.0
operations = 5*(6 + 5 + 4)
- rounds = 20000
+ rounds = 30000
def test(self):
@@ -374,9 +374,9 @@ class PythonMethodCalls(Test):
class Recursion(Test):
- version = 0.3
+ version = 2.0
operations = 5
- rounds = 50000
+ rounds = 100000
def test(self):
@@ -407,3 +407,98 @@ class Recursion(Test):
for i in xrange(self.rounds):
pass
+
+
+### Test to make Fredrik happy...
+
+if __name__ == '__main__':
+ import timeit
+ if 0:
+ timeit.TestClass = PythonFunctionCalls
+ timeit.main(['-s', 'test = TestClass(); test.rounds = 1000',
+ 'test.test()'])
+ else:
+ setup = """\
+global f,f1,g,h
+
+# define functions
+def f():
+ pass
+
+def f1(x):
+ pass
+
+def g(a,b,c):
+ return a,b,c
+
+def h(a,b,c,d=1,e=2,f=3):
+ return d,e,f
+
+i = 1
+"""
+ test = """\
+f()
+f1(i)
+f1(i)
+f1(i)
+f1(i)
+g(i,i,i)
+g(i,i,i)
+g(i,i,i)
+g(i,i,i)
+h(i,i,3,i,i)
+h(i,i,i,2,i,3)
+
+f()
+f1(i)
+f1(i)
+f1(i)
+f1(i)
+g(i,i,i)
+g(i,i,i)
+g(i,i,i)
+g(i,i,i)
+h(i,i,3,i,i)
+h(i,i,i,2,i,3)
+
+f()
+f1(i)
+f1(i)
+f1(i)
+f1(i)
+g(i,i,i)
+g(i,i,i)
+g(i,i,i)
+g(i,i,i)
+h(i,i,3,i,i)
+h(i,i,i,2,i,3)
+
+f()
+f1(i)
+f1(i)
+f1(i)
+f1(i)
+g(i,i,i)
+g(i,i,i)
+g(i,i,i)
+g(i,i,i)
+h(i,i,3,i,i)
+h(i,i,i,2,i,3)
+
+f()
+f1(i)
+f1(i)
+f1(i)
+f1(i)
+g(i,i,i)
+g(i,i,i)
+g(i,i,i)
+g(i,i,i)
+h(i,i,3,i,i)
+h(i,i,i,2,i,3)
+"""
+
+ timeit.main(['-s', setup,
+ test])
+
+
diff --git a/Tools/pybench/CommandLine.py b/Tools/pybench/CommandLine.py
index 13e4f9b..6601be5 100644
--- a/Tools/pybench/CommandLine.py
+++ b/Tools/pybench/CommandLine.py
@@ -358,7 +358,7 @@ class Application:
except self.InternalError:
print
- print '* Internal Error'
+ print '* Internal Error (use --debug to display the traceback)'
if self.debug:
print
traceback.print_exc(20, sys.stdout)
diff --git a/Tools/pybench/Constructs.py b/Tools/pybench/Constructs.py
index 00045bd..5105461 100644
--- a/Tools/pybench/Constructs.py
+++ b/Tools/pybench/Constructs.py
@@ -2,7 +2,7 @@ from pybench import Test
class IfThenElse(Test):
- version = 0.31
+ version = 2.0
operations = 30*3 # hard to say...
rounds = 150000
@@ -469,9 +469,9 @@ class IfThenElse(Test):
class NestedForLoops(Test):
- version = 0.3
+ version = 2.0
operations = 1000*10*5
- rounds = 150
+ rounds = 300
def test(self):
@@ -494,9 +494,9 @@ class NestedForLoops(Test):
class ForLoops(Test):
- version = 0.1
+ version = 2.0
operations = 5 * 5
- rounds = 8000
+ rounds = 10000
def test(self):
diff --git a/Tools/pybench/Dict.py b/Tools/pybench/Dict.py
index 54aeae7..9cdd682 100644
--- a/Tools/pybench/Dict.py
+++ b/Tools/pybench/Dict.py
@@ -2,9 +2,9 @@ from pybench import Test
class DictCreation(Test):
- version = 0.3
+ version = 2.0
operations = 5*(5 + 5)
- rounds = 60000
+ rounds = 80000
def test(self):
@@ -77,7 +77,7 @@ class DictCreation(Test):
class DictWithStringKeys(Test):
- version = 0.1
+ version = 2.0
operations = 5*(6 + 6)
rounds = 200000
@@ -166,9 +166,9 @@ class DictWithStringKeys(Test):
class DictWithFloatKeys(Test):
- version = 0.1
+ version = 2.0
operations = 5*(6 + 6)
- rounds = 200000
+ rounds = 150000
def test(self):
@@ -255,7 +255,7 @@ class DictWithFloatKeys(Test):
class DictWithIntegerKeys(Test):
- version = 0.1
+ version = 2.0
operations = 5*(6 + 6)
rounds = 200000
@@ -344,13 +344,14 @@ class DictWithIntegerKeys(Test):
class SimpleDictManipulation(Test):
- version = 0.3
+ version = 2.0
operations = 5*(6 + 6 + 6 + 6)
- rounds = 50000
+ rounds = 100000
def test(self):
d = {}
+ has_key = d.has_key
for i in xrange(self.rounds):
@@ -368,12 +369,12 @@ class SimpleDictManipulation(Test):
x = d[4]
x = d[5]
- d.has_key(0)
- d.has_key(2)
- d.has_key(4)
- d.has_key(6)
- d.has_key(8)
- d.has_key(10)
+ has_key(0)
+ has_key(2)
+ has_key(4)
+ has_key(6)
+ has_key(8)
+ has_key(10)
del d[0]
del d[1]
@@ -396,12 +397,12 @@ class SimpleDictManipulation(Test):
x = d[4]
x = d[5]
- d.has_key(0)
- d.has_key(2)
- d.has_key(4)
- d.has_key(6)
- d.has_key(8)
- d.has_key(10)
+ has_key(0)
+ has_key(2)
+ has_key(4)
+ has_key(6)
+ has_key(8)
+ has_key(10)
del d[0]
del d[1]
@@ -424,12 +425,12 @@ class SimpleDictManipulation(Test):
x = d[4]
x = d[5]
- d.has_key(0)
- d.has_key(2)
- d.has_key(4)
- d.has_key(6)
- d.has_key(8)
- d.has_key(10)
+ has_key(0)
+ has_key(2)
+ has_key(4)
+ has_key(6)
+ has_key(8)
+ has_key(10)
del d[0]
del d[1]
@@ -452,12 +453,12 @@ class SimpleDictManipulation(Test):
x = d[4]
x = d[5]
- d.has_key(0)
- d.has_key(2)
- d.has_key(4)
- d.has_key(6)
- d.has_key(8)
- d.has_key(10)
+ has_key(0)
+ has_key(2)
+ has_key(4)
+ has_key(6)
+ has_key(8)
+ has_key(10)
del d[0]
del d[1]
@@ -480,12 +481,12 @@ class SimpleDictManipulation(Test):
x = d[4]
x = d[5]
- d.has_key(0)
- d.has_key(2)
- d.has_key(4)
- d.has_key(6)
- d.has_key(8)
- d.has_key(10)
+ has_key(0)
+ has_key(2)
+ has_key(4)
+ has_key(6)
+ has_key(8)
+ has_key(10)
del d[0]
del d[1]
@@ -497,6 +498,7 @@ class SimpleDictManipulation(Test):
def calibrate(self):
d = {}
+ has_key = d.has_key
for i in xrange(self.rounds):
pass
diff --git a/Tools/pybench/Exceptions.py b/Tools/pybench/Exceptions.py
index 7e55708..eff69c7 100644
--- a/Tools/pybench/Exceptions.py
+++ b/Tools/pybench/Exceptions.py
@@ -2,9 +2,9 @@ from pybench import Test
class TryRaiseExcept(Test):
- version = 0.1
- operations = 2 + 3
- rounds = 60000
+ version = 2.0
+ operations = 2 + 3 + 3
+ rounds = 80000
def test(self):
@@ -31,6 +31,18 @@ class TryRaiseExcept(Test):
raise error,"something"
except:
pass
+ try:
+ raise error("something")
+ except:
+ pass
+ try:
+ raise error("something")
+ except:
+ pass
+ try:
+ raise error("something")
+ except:
+ pass
def calibrate(self):
@@ -42,9 +54,9 @@ class TryRaiseExcept(Test):
class TryExcept(Test):
- version = 0.1
+ version = 2.0
operations = 15 * 10
- rounds = 200000
+ rounds = 150000
def test(self):
@@ -677,3 +689,11 @@ class TryExcept(Test):
for i in xrange(self.rounds):
pass
+
+### Test to make Fredrik happy...
+
+if __name__ == '__main__':
+ import timeit
+ timeit.TestClass = TryRaiseExcept
+ timeit.main(['-s', 'test = TestClass(); test.rounds = 1000',
+ 'test.test()'])
diff --git a/Tools/pybench/Imports.py b/Tools/pybench/Imports.py
index 85eb604..afc728b 100644
--- a/Tools/pybench/Imports.py
+++ b/Tools/pybench/Imports.py
@@ -6,9 +6,9 @@ import package.submodule
class SecondImport(Test):
- version = 0.1
+ version = 2.0
operations = 5 * 5
- rounds = 20000
+ rounds = 40000
def test(self):
@@ -51,9 +51,9 @@ class SecondImport(Test):
class SecondPackageImport(Test):
- version = 0.1
+ version = 2.0
operations = 5 * 5
- rounds = 20000
+ rounds = 40000
def test(self):
@@ -95,9 +95,9 @@ class SecondPackageImport(Test):
class SecondSubmoduleImport(Test):
- version = 0.1
+ version = 2.0
operations = 5 * 5
- rounds = 20000
+ rounds = 40000
def test(self):
diff --git a/Tools/pybench/Instances.py b/Tools/pybench/Instances.py
index 9b1929d..1dfc82f 100644
--- a/Tools/pybench/Instances.py
+++ b/Tools/pybench/Instances.py
@@ -2,9 +2,9 @@ from pybench import Test
class CreateInstances(Test):
- version = 0.2
+ version = 2.0
operations = 3 + 7 + 4
- rounds = 60000
+ rounds = 80000
def test(self):
diff --git a/Tools/pybench/Lists.py b/Tools/pybench/Lists.py
index 4c18e99..67760db 100644
--- a/Tools/pybench/Lists.py
+++ b/Tools/pybench/Lists.py
@@ -2,22 +2,23 @@ from pybench import Test
class SimpleListManipulation(Test):
- version = 0.3
+ version = 2.0
operations = 5* (6 + 6 + 6)
- rounds = 60000
+ rounds = 130000
def test(self):
l = []
+ append = l.append
for i in xrange(self.rounds):
- l.append(2)
- l.append(3)
- l.append(4)
- l.append(2)
- l.append(3)
- l.append(4)
+ append(2)
+ append(3)
+ append(4)
+ append(2)
+ append(3)
+ append(4)
l[0] = 3
l[1] = 4
@@ -33,12 +34,12 @@ class SimpleListManipulation(Test):
x = l[4]
x = l[5]
- l.append(2)
- l.append(3)
- l.append(4)
- l.append(2)
- l.append(3)
- l.append(4)
+ append(2)
+ append(3)
+ append(4)
+ append(2)
+ append(3)
+ append(4)
l[0] = 3
l[1] = 4
@@ -54,12 +55,12 @@ class SimpleListManipulation(Test):
x = l[4]
x = l[5]
- l.append(2)
- l.append(3)
- l.append(4)
- l.append(2)
- l.append(3)
- l.append(4)
+ append(2)
+ append(3)
+ append(4)
+ append(2)
+ append(3)
+ append(4)
l[0] = 3
l[1] = 4
@@ -75,12 +76,12 @@ class SimpleListManipulation(Test):
x = l[4]
x = l[5]
- l.append(2)
- l.append(3)
- l.append(4)
- l.append(2)
- l.append(3)
- l.append(4)
+ append(2)
+ append(3)
+ append(4)
+ append(2)
+ append(3)
+ append(4)
l[0] = 3
l[1] = 4
@@ -96,12 +97,12 @@ class SimpleListManipulation(Test):
x = l[4]
x = l[5]
- l.append(2)
- l.append(3)
- l.append(4)
- l.append(2)
- l.append(3)
- l.append(4)
+ append(2)
+ append(3)
+ append(4)
+ append(2)
+ append(3)
+ append(4)
l[0] = 3
l[1] = 4
@@ -124,15 +125,16 @@ class SimpleListManipulation(Test):
def calibrate(self):
l = []
+ append = l.append
for i in xrange(self.rounds):
pass
class ListSlicing(Test):
- version = 0.4
+ version = 2.0
operations = 25*(3+1+2+1)
- rounds = 400
+ rounds = 800
def test(self):
@@ -141,7 +143,7 @@ class ListSlicing(Test):
for i in xrange(self.rounds):
- l = range(100)
+ l = n[:]
for j in r:
@@ -159,17 +161,14 @@ class ListSlicing(Test):
r = range(25)
for i in xrange(self.rounds):
-
- l = range(100)
-
for j in r:
pass
class SmallLists(Test):
- version = 0.3
+ version = 2.0
operations = 5*(1+ 6 + 6 + 3 + 1)
- rounds = 60000
+ rounds = 80000
def test(self):
@@ -177,12 +176,13 @@ class SmallLists(Test):
l = []
- l.append(2)
- l.append(3)
- l.append(4)
- l.append(2)
- l.append(3)
- l.append(4)
+ append = l.append
+ append(2)
+ append(3)
+ append(4)
+ append(2)
+ append(3)
+ append(4)
l[0] = 3
l[1] = 4
@@ -199,12 +199,13 @@ class SmallLists(Test):
l = []
- l.append(2)
- l.append(3)
- l.append(4)
- l.append(2)
- l.append(3)
- l.append(4)
+ append = l.append
+ append(2)
+ append(3)
+ append(4)
+ append(2)
+ append(3)
+ append(4)
l[0] = 3
l[1] = 4
@@ -221,12 +222,13 @@ class SmallLists(Test):
l = []
- l.append(2)
- l.append(3)
- l.append(4)
- l.append(2)
- l.append(3)
- l.append(4)
+ append = l.append
+ append(2)
+ append(3)
+ append(4)
+ append(2)
+ append(3)
+ append(4)
l[0] = 3
l[1] = 4
@@ -243,12 +245,13 @@ class SmallLists(Test):
l = []
- l.append(2)
- l.append(3)
- l.append(4)
- l.append(2)
- l.append(3)
- l.append(4)
+ append = l.append
+ append(2)
+ append(3)
+ append(4)
+ append(2)
+ append(3)
+ append(4)
l[0] = 3
l[1] = 4
@@ -265,12 +268,13 @@ class SmallLists(Test):
l = []
- l.append(2)
- l.append(3)
- l.append(4)
- l.append(2)
- l.append(3)
- l.append(4)
+ append = l.append
+ append(2)
+ append(3)
+ append(4)
+ append(2)
+ append(3)
+ append(4)
l[0] = 3
l[1] = 4
@@ -288,4 +292,4 @@ class SmallLists(Test):
def calibrate(self):
for i in xrange(self.rounds):
- l = []
+ pass
diff --git a/Tools/pybench/Lookups.py b/Tools/pybench/Lookups.py
index e5529cd..f20e7da 100644
--- a/Tools/pybench/Lookups.py
+++ b/Tools/pybench/Lookups.py
@@ -2,7 +2,7 @@ from pybench import Test
class SpecialClassAttribute(Test):
- version = 0.3
+ version = 2.0
operations = 5*(12 + 12)
rounds = 100000
@@ -183,7 +183,7 @@ class SpecialClassAttribute(Test):
class NormalClassAttribute(Test):
- version = 0.3
+ version = 2.0
operations = 5*(12 + 12)
rounds = 100000
@@ -369,7 +369,7 @@ class NormalClassAttribute(Test):
class SpecialInstanceAttribute(Test):
- version = 0.3
+ version = 2.0
operations = 5*(12 + 12)
rounds = 100000
@@ -557,7 +557,7 @@ class SpecialInstanceAttribute(Test):
class NormalInstanceAttribute(Test):
- version = 0.3
+ version = 2.0
operations = 5*(12 + 12)
rounds = 100000
@@ -745,7 +745,7 @@ class NormalInstanceAttribute(Test):
class BuiltinMethodLookup(Test):
- version = 0.3
+ version = 2.0
operations = 5*(3*5 + 3*5)
rounds = 70000
diff --git a/Tools/pybench/NewInstances.py b/Tools/pybench/NewInstances.py
index a352638..258beba 100755..100644
--- a/Tools/pybench/NewInstances.py
+++ b/Tools/pybench/NewInstances.py
@@ -1,8 +1,17 @@
from pybench import Test
+# Check for new-style class support:
+try:
+ class c(object):
+ pass
+except NameError:
+ raise ImportError
+
+###
+
class CreateNewInstances(Test):
- version = 0.1
+ version = 2.0
operations = 3 + 7 + 4
rounds = 60000
diff --git a/Tools/pybench/Numbers.py b/Tools/pybench/Numbers.py
index a6aea33..10c8940 100644
--- a/Tools/pybench/Numbers.py
+++ b/Tools/pybench/Numbers.py
@@ -2,7 +2,7 @@ from pybench import Test
class CompareIntegers(Test):
- version = 0.1
+ version = 2.0
operations = 30 * 5
rounds = 120000
@@ -198,9 +198,9 @@ class CompareIntegers(Test):
class CompareFloats(Test):
- version = 0.1
+ version = 2.0
operations = 30 * 5
- rounds = 60000
+ rounds = 80000
def test(self):
@@ -394,7 +394,7 @@ class CompareFloats(Test):
class CompareFloatsIntegers(Test):
- version = 0.1
+ version = 2.0
operations = 30 * 5
rounds = 60000
@@ -590,9 +590,9 @@ class CompareFloatsIntegers(Test):
class CompareLongs(Test):
- version = 0.1
+ version = 2.0
operations = 30 * 5
- rounds = 60000
+ rounds = 70000
def test(self):
diff --git a/Tools/pybench/README b/Tools/pybench/README
index 95ae392..022c8de 100644
--- a/Tools/pybench/README
+++ b/Tools/pybench/README
@@ -28,12 +28,37 @@ and then print out a report to stdout.
Micro-Manual
------------
-Run 'pybench.py -h' to see the help screen.
-Run 'pybench.py' to just let the benchmark suite do it's thing and
-'pybench.py -f <file>' to have it store the results in a file too.
+Run 'pybench.py -h' to see the help screen. Run 'pybench.py' to run
+the benchmark suite using default settings and 'pybench.py -f <file>'
+to have it store the results in a file too.
+
+It is usually a good idea to run pybench.py multiple times to see
+whether the environment, timers and benchmark run-times are suitable
+for doing benchmark tests.
+
+You can use the comparison feature of pybench.py ('pybench.py -c
+<file>') to check how well the system behaves in comparison to a
+reference run.
+
+If the differences are well below 10% for each test, then you have a
+system that is good for doing benchmark testings. Of you get random
+differences of more than 10% or significant differences between the
+values for minimum and average time, then you likely have some
+background processes running which cause the readings to become
+inconsistent. Examples include: web-browsers, email clients, RSS
+readers, music players, backup programs, etc.
+
+If you are only interested in a few tests of the whole suite, you can
+use the filtering option, e.g. 'pybench.py -t string' will only
+run/show the tests that have 'string' in their name.
This is the current output of pybench.py --help:
+"""
+------------------------------------------------------------------------
+PYBENCH - a benchmark test suite for Python interpreters/compilers.
+------------------------------------------------------------------------
+
Synopsis:
pybench.py [option] files...
@@ -42,14 +67,14 @@ Options and default settings:
-f arg save benchmark to file arg ()
-c arg compare benchmark with the one in file arg ()
-s arg show benchmark in file arg, then exit ()
- -S show statistics of benchmarks (0)
- -w arg set warp factor to arg (20)
- -d hide noise in compares (0)
- --no-gc disable garbage collection (0)
- --no-syscheck "disable" sys check interval (set to sys.maxint) (0)
- -t arg tests containing substring ()
- -C arg number of calibration runs (20)
- -v generate verbose output
+ -w arg set warp factor to arg (10)
+ -t arg run only tests with names matching arg ()
+ -C arg set the number of calibration runs to arg (20)
+ -d hide noise in comparisons (0)
+ -v verbose output (not recommended) (0)
+ --with-gc enable garbage collection (0)
+ --with-syscheck use default sys check interval (0)
+ --timer arg use given timer (time.time)
-h show this help text
--help show this help text
--debug enable debugging
@@ -57,17 +82,23 @@ Options and default settings:
--examples show examples of usage
Version:
- 1.3
+ 2.0
The normal operation is to run the suite and display the
-results. Use -f to save them for later reuse or comparisms.
+results. Use -f to save them for later reuse or comparisons.
-Examples:
+Available timers:
-python1.5 pybench.py -w 100 -f p15
-python1.4 pybench.py -w 100 -f p14
-python pybench.py -s p15 -c p14
+ time.time
+ time.clock
+ systimes.processtime
+Examples:
+
+python2.1 pybench.py -f p21.pybench
+python2.5 pybench.py -f p25.pybench
+python pybench.py -s p25.pybench -c p21.pybench
+"""
License
-------
@@ -78,184 +109,103 @@ See LICENSE file.
Sample output
-------------
-PYBENCH 1.3
-
-Machine Details:
- Platform ID: Linux-2.6.8-24.19-default-x86_64-with-SuSE-9.2-x86-64
- Executable: /home/lemburg/projects/Python/Installation/bin/python
- Python: 2.5a1.0
- Compiler: GCC 3.3.4 (pre 3.3.5 20040809)
- Build: Apr 9 2006 01:50:57 (#trunk)
-
-Searching for tests...
- BuiltinFunctionCalls
- BuiltinMethodLookup
- CompareFloats
- CompareFloatsIntegers
- CompareIntegers
- CompareInternedStrings
- CompareLongs
- CompareStrings
- CompareUnicode
- ConcatStrings
- ConcatUnicode
- CreateInstances
- CreateStringsWithConcat
- CreateUnicodeWithConcat
- DictCreation
- DictWithFloatKeys
- DictWithIntegerKeys
- DictWithStringKeys
- ForLoops
- IfThenElse
- ListSlicing
- NestedForLoops
- NormalClassAttribute
- NormalInstanceAttribute
- PythonFunctionCalls
- PythonMethodCalls
- Recursion
- SecondImport
- SecondPackageImport
- SecondSubmoduleImport
- SimpleComplexArithmetic
- SimpleDictManipulation
- SimpleFloatArithmetic
- SimpleIntFloatArithmetic
- SimpleIntegerArithmetic
- SimpleListManipulation
- SimpleLongArithmetic
- SmallLists
- SmallTuples
- SpecialClassAttribute
- SpecialInstanceAttribute
- StringMappings
- StringPredicates
- StringSlicing
- TryExcept
- TryRaiseExcept
- TupleSlicing
- UnicodeMappings
- UnicodePredicates
- UnicodeProperties
- UnicodeSlicing
-
-Running 10 round(s) of the suite:
+"""
+-------------------------------------------------------------------------------
+PYBENCH 2.0
+-------------------------------------------------------------------------------
+* using Python 2.4.2
+* disabled garbage collection
+* system check interval set to maximum: 2147483647
+* using timer: time.time
-...
+Calibrating tests. Please wait...
- Round 10 real abs overhead
- BuiltinFunctionCalls: 0.030r 0.030a 0.000o
- BuiltinMethodLookup: 0.059r 0.060a 0.001o
- CompareFloats: 0.050r 0.050a 0.000o
- CompareFloatsIntegers: 0.050r 0.050a 0.000o
- CompareIntegers: 0.070r 0.070a 0.000o
- CompareInternedStrings: 0.039r 0.040a 0.001o
- CompareLongs: 0.050r 0.050a 0.000o
- CompareStrings: 0.060r 0.060a 0.000o
- CompareUnicode: 0.060r 0.060a 0.000o
- ConcatStrings: 0.040r 0.040a 0.000o
- ConcatUnicode: 0.050r 0.050a 0.000o
- CreateInstances: 0.050r 0.050a 0.000o
- CreateStringsWithConcat: 0.029r 0.030a 0.001o
- CreateUnicodeWithConcat: 0.060r 0.060a 0.000o
- DictCreation: 0.040r 0.040a 0.000o
- DictWithFloatKeys: 0.089r 0.090a 0.000o
- DictWithIntegerKeys: 0.059r 0.060a 0.001o
- DictWithStringKeys: 0.070r 0.070a 0.001o
- ForLoops: 0.050r 0.050a 0.000o
- IfThenElse: 0.070r 0.070a 0.000o
- ListSlicing: 0.030r 0.030a 0.000o
- NestedForLoops: 0.030r 0.030a 0.000o
- NormalClassAttribute: 0.060r 0.060a 0.000o
- NormalInstanceAttribute: 0.060r 0.060a 0.000o
- PythonFunctionCalls: 0.060r 0.060a 0.000o
- PythonMethodCalls: 0.050r 0.050a 0.000o
- Recursion: 0.050r 0.050a 0.000o
- SecondImport: 0.030r 0.030a 0.000o
- SecondPackageImport: 0.030r 0.030a 0.000o
- SecondSubmoduleImport: 0.040r 0.040a 0.000o
- SimpleComplexArithmetic: 0.030r 0.030a 0.000o
- SimpleDictManipulation: 0.040r 0.040a 0.000o
- SimpleFloatArithmetic: 0.050r 0.050a 0.001o
- SimpleIntFloatArithmetic: 0.060r 0.060a 0.000o
- SimpleIntegerArithmetic: 0.060r 0.060a 0.000o
- SimpleListManipulation: 0.030r 0.030a 0.000o
- SimpleLongArithmetic: 0.030r 0.030a 0.000o
- SmallLists: 0.050r 0.050a 0.000o
- SmallTuples: 0.050r 0.050a 0.000o
- SpecialClassAttribute: 0.060r 0.060a 0.000o
- SpecialInstanceAttribute: 0.079r 0.080a 0.001o
- StringMappings: 0.060r 0.060a 0.000o
- StringPredicates: 0.049r 0.050a 0.001o
- StringSlicing: 0.039r 0.040a 0.000o
- TryExcept: 0.079r 0.080a 0.001o
- TryRaiseExcept: 0.059r 0.060a 0.001o
- TupleSlicing: 0.050r 0.050a 0.000o
- UnicodeMappings: 0.070r 0.070a 0.001o
- UnicodePredicates: 0.059r 0.060a 0.001o
- UnicodeProperties: 0.059r 0.060a 0.001o
- UnicodeSlicing: 0.050r 0.050a 0.000o
- ----------------------
- Average round time: 2.937 seconds
-
-
-Tests: per run per oper. overhead
-------------------------------------------------------------------------
- BuiltinFunctionCalls: 29.85 ms 0.23 us 0.00 ms
- BuiltinMethodLookup: 66.85 ms 0.13 us 0.50 ms
- CompareFloats: 43.00 ms 0.10 us 0.00 ms
- CompareFloatsIntegers: 51.80 ms 0.12 us 0.00 ms
- CompareIntegers: 70.70 ms 0.08 us 0.50 ms
- CompareInternedStrings: 41.40 ms 0.08 us 0.50 ms
- CompareLongs: 47.90 ms 0.11 us 0.00 ms
- CompareStrings: 58.50 ms 0.12 us 0.50 ms
- CompareUnicode: 56.55 ms 0.15 us 0.50 ms
- ConcatStrings: 44.75 ms 0.30 us 0.00 ms
- ConcatUnicode: 54.55 ms 0.36 us 0.50 ms
- CreateInstances: 50.95 ms 1.21 us 0.00 ms
- CreateStringsWithConcat: 28.85 ms 0.14 us 0.50 ms
- CreateUnicodeWithConcat: 53.75 ms 0.27 us 0.00 ms
- DictCreation: 41.90 ms 0.28 us 0.00 ms
- DictWithFloatKeys: 88.50 ms 0.15 us 0.50 ms
- DictWithIntegerKeys: 62.55 ms 0.10 us 0.50 ms
- DictWithStringKeys: 60.50 ms 0.10 us 0.50 ms
- ForLoops: 46.90 ms 4.69 us 0.00 ms
- IfThenElse: 60.55 ms 0.09 us 0.00 ms
- ListSlicing: 29.90 ms 8.54 us 0.00 ms
- NestedForLoops: 33.95 ms 0.10 us 0.00 ms
- NormalClassAttribute: 62.75 ms 0.10 us 0.50 ms
- NormalInstanceAttribute: 61.80 ms 0.10 us 0.50 ms
- PythonFunctionCalls: 60.00 ms 0.36 us 0.00 ms
- PythonMethodCalls: 50.00 ms 0.67 us 0.00 ms
- Recursion: 46.85 ms 3.75 us 0.00 ms
- SecondImport: 35.00 ms 1.40 us 0.00 ms
- SecondPackageImport: 32.00 ms 1.28 us 0.00 ms
- SecondSubmoduleImport: 38.00 ms 1.52 us 0.00 ms
- SimpleComplexArithmetic: 26.85 ms 0.12 us 0.00 ms
- SimpleDictManipulation: 40.85 ms 0.14 us 0.00 ms
- SimpleFloatArithmetic: 48.70 ms 0.09 us 0.50 ms
- SimpleIntFloatArithmetic: 57.70 ms 0.09 us 0.00 ms
- SimpleIntegerArithmetic: 58.75 ms 0.09 us 0.50 ms
- SimpleListManipulation: 34.80 ms 0.13 us 0.00 ms
- SimpleLongArithmetic: 30.95 ms 0.19 us 0.50 ms
- SmallLists: 47.60 ms 0.19 us 0.00 ms
- SmallTuples: 48.80 ms 0.20 us 0.50 ms
- SpecialClassAttribute: 61.70 ms 0.10 us 0.00 ms
- SpecialInstanceAttribute: 76.70 ms 0.13 us 0.50 ms
- StringMappings: 58.70 ms 0.47 us 0.00 ms
- StringPredicates: 50.00 ms 0.18 us 1.00 ms
- StringSlicing: 39.65 ms 0.23 us 0.50 ms
- TryExcept: 84.45 ms 0.06 us 0.50 ms
- TryRaiseExcept: 61.75 ms 4.12 us 0.50 ms
- TupleSlicing: 48.95 ms 0.47 us 0.00 ms
- UnicodeMappings: 71.50 ms 3.97 us 0.50 ms
- UnicodePredicates: 52.75 ms 0.23 us 1.00 ms
- UnicodeProperties: 61.90 ms 0.31 us 1.00 ms
- UnicodeSlicing: 53.75 ms 0.31 us 0.50 ms
-------------------------------------------------------------------------
- Average round time: 2937.00 ms
+Running 10 round(s) of the suite at warp factor 10:
+* Round 1 done in 6.388 seconds.
+* Round 2 done in 6.485 seconds.
+* Round 3 done in 6.786 seconds.
+...
+* Round 10 done in 6.546 seconds.
+
+-------------------------------------------------------------------------------
+Benchmark: 2006-06-12 12:09:25
+-------------------------------------------------------------------------------
+
+ Rounds: 10
+ Warp: 10
+ Timer: time.time
+
+ Machine Details:
+ Platform ID: Linux-2.6.8-24.19-default-x86_64-with-SuSE-9.2-x86-64
+ Processor: x86_64
+
+ Python:
+ Executable: /usr/local/bin/python
+ Version: 2.4.2
+ Compiler: GCC 3.3.4 (pre 3.3.5 20040809)
+ Bits: 64bit
+ Build: Oct 1 2005 15:24:35 (#1)
+ Unicode: UCS2
+
+
+Test minimum average operation overhead
+-------------------------------------------------------------------------------
+ BuiltinFunctionCalls: 126ms 145ms 0.28us 0.274ms
+ BuiltinMethodLookup: 124ms 130ms 0.12us 0.316ms
+ CompareFloats: 109ms 110ms 0.09us 0.361ms
+ CompareFloatsIntegers: 100ms 104ms 0.12us 0.271ms
+ CompareIntegers: 137ms 138ms 0.08us 0.542ms
+ CompareInternedStrings: 124ms 127ms 0.08us 1.367ms
+ CompareLongs: 100ms 104ms 0.10us 0.316ms
+ CompareStrings: 111ms 115ms 0.12us 0.929ms
+ CompareUnicode: 108ms 128ms 0.17us 0.693ms
+ ConcatStrings: 142ms 155ms 0.31us 0.562ms
+ ConcatUnicode: 119ms 127ms 0.42us 0.384ms
+ CreateInstances: 123ms 128ms 1.14us 0.367ms
+ CreateNewInstances: 121ms 126ms 1.49us 0.335ms
+ CreateStringsWithConcat: 130ms 135ms 0.14us 0.916ms
+ CreateUnicodeWithConcat: 130ms 135ms 0.34us 0.361ms
+ DictCreation: 108ms 109ms 0.27us 0.361ms
+ DictWithFloatKeys: 149ms 153ms 0.17us 0.678ms
+ DictWithIntegerKeys: 124ms 126ms 0.11us 0.915ms
+ DictWithStringKeys: 114ms 117ms 0.10us 0.905ms
+ ForLoops: 110ms 111ms 4.46us 0.063ms
+ IfThenElse: 118ms 119ms 0.09us 0.685ms
+ ListSlicing: 116ms 120ms 8.59us 0.103ms
+ NestedForLoops: 125ms 137ms 0.09us 0.019ms
+ NormalClassAttribute: 124ms 136ms 0.11us 0.457ms
+ NormalInstanceAttribute: 110ms 117ms 0.10us 0.454ms
+ PythonFunctionCalls: 107ms 113ms 0.34us 0.271ms
+ PythonMethodCalls: 140ms 149ms 0.66us 0.141ms
+ Recursion: 156ms 166ms 3.32us 0.452ms
+ SecondImport: 112ms 118ms 1.18us 0.180ms
+ SecondPackageImport: 118ms 127ms 1.27us 0.180ms
+ SecondSubmoduleImport: 140ms 151ms 1.51us 0.180ms
+ SimpleComplexArithmetic: 128ms 139ms 0.16us 0.361ms
+ SimpleDictManipulation: 134ms 136ms 0.11us 0.452ms
+ SimpleFloatArithmetic: 110ms 113ms 0.09us 0.571ms
+ SimpleIntFloatArithmetic: 106ms 111ms 0.08us 0.548ms
+ SimpleIntegerArithmetic: 106ms 109ms 0.08us 0.544ms
+ SimpleListManipulation: 103ms 113ms 0.10us 0.587ms
+ SimpleLongArithmetic: 112ms 118ms 0.18us 0.271ms
+ SmallLists: 105ms 116ms 0.17us 0.366ms
+ SmallTuples: 108ms 128ms 0.24us 0.406ms
+ SpecialClassAttribute: 119ms 136ms 0.11us 0.453ms
+ SpecialInstanceAttribute: 143ms 155ms 0.13us 0.454ms
+ StringMappings: 115ms 121ms 0.48us 0.405ms
+ StringPredicates: 120ms 129ms 0.18us 2.064ms
+ StringSlicing: 111ms 127ms 0.23us 0.781ms
+ TryExcept: 125ms 126ms 0.06us 0.681ms
+ TryRaiseExcept: 133ms 137ms 2.14us 0.361ms
+ TupleSlicing: 117ms 120ms 0.46us 0.066ms
+ UnicodeMappings: 156ms 160ms 4.44us 0.429ms
+ UnicodePredicates: 117ms 121ms 0.22us 2.487ms
+ UnicodeProperties: 115ms 153ms 0.38us 2.070ms
+ UnicodeSlicing: 126ms 129ms 0.26us 0.689ms
+-------------------------------------------------------------------------------
+Totals: 6283ms 6673ms
+"""
________________________________________________________________________
Writing New Tests
@@ -293,7 +243,7 @@ class IntegerCounting(Test):
# Number of rounds to execute per test run. This should be
# adjusted to a figure that results in a test run-time of between
- # 20-50 seconds.
+ # 1-2 seconds (at warp 1).
rounds = 100000
def test(self):
@@ -377,6 +327,41 @@ longer strictly comparable with previous runs, the '.version' class
variable should be updated. Therefafter, comparisons with previous
versions of the test will list as "n/a" to reflect the change.
+
+Version History
+---------------
+
+ 2.0: rewrote parts of pybench which resulted in more repeatable
+ timings:
+ - made timer a parameter
+ - changed the platform default timer to use high-resolution
+ timers rather than process timers (which have a much lower
+ resolution)
+ - added option to select timer
+ - added process time timer (using systimes.py)
+ - changed to use min() as timing estimator (average
+ is still taken as well to provide an idea of the difference)
+ - garbage collection is turned off per default
+ - sys check interval is set to the highest possible value
+ - calibration is now a separate step and done using
+ a different strategy that allows measuring the test
+ overhead more accurately
+ - modified the tests to each give a run-time of between
+ 100-200ms using warp 10
+ - changed default warp factor to 10 (from 20)
+ - compared results with timeit.py and confirmed measurements
+ - bumped all test versions to 2.0
+ - updated platform.py to the latest version
+ - changed the output format a bit to make it look
+ nicer
+ - refactored the APIs somewhat
+ 1.3+: Steve Holden added the NewInstances test and the filtering
+ option during the NeedForSpeed sprint; this also triggered a long
+ discussion on how to improve benchmark timing and finally
+ resulted in the release of 2.0
+ 1.3: initial checkin into the Python SVN repository
+
+
Have fun,
--
Marc-Andre Lemburg
diff --git a/Tools/pybench/Setup.py b/Tools/pybench/Setup.py
index f5c5190..f1417e6 100644
--- a/Tools/pybench/Setup.py
+++ b/Tools/pybench/Setup.py
@@ -14,7 +14,7 @@
# Defaults
Number_of_rounds = 10
-Warp_factor = 20
+Warp_factor = 10
# Import tests
from Arithmetic import *
@@ -24,8 +24,8 @@ from Lookups import *
from Instances import *
try:
from NewInstances import *
-except:
- print "Cannot test new-style objects"
+except ImportError:
+ pass
from Lists import *
from Tuples import *
from Dict import *
diff --git a/Tools/pybench/Strings.py b/Tools/pybench/Strings.py
index b01843a..3be8b35 100644
--- a/Tools/pybench/Strings.py
+++ b/Tools/pybench/Strings.py
@@ -3,9 +3,9 @@ from string import join
class ConcatStrings(Test):
- version = 0.1
+ version = 2.0
operations = 10 * 5
- rounds = 60000
+ rounds = 100000
def test(self):
@@ -85,7 +85,7 @@ class ConcatStrings(Test):
class CompareStrings(Test):
- version = 0.2
+ version = 2.0
operations = 10 * 5
rounds = 200000
@@ -167,9 +167,9 @@ class CompareStrings(Test):
class CompareInternedStrings(Test):
- version = 0.1
+ version = 2.0
operations = 10 * 5
- rounds = 200000
+ rounds = 300000
def test(self):
@@ -249,9 +249,9 @@ class CompareInternedStrings(Test):
class CreateStringsWithConcat(Test):
- version = 0.1
+ version = 2.0
operations = 10 * 5
- rounds = 80000
+ rounds = 200000
def test(self):
@@ -324,9 +324,9 @@ class CreateStringsWithConcat(Test):
class StringSlicing(Test):
- version = 0.1
+ version = 2.0
operations = 5 * 7
- rounds = 100000
+ rounds = 160000
def test(self):
@@ -387,7 +387,7 @@ if hasattr('', 'lower'):
class StringMappings(Test):
- version = 0.1
+ version = 2.0
operations = 3 * (5 + 4 + 2 + 1)
rounds = 70000
@@ -460,9 +460,9 @@ if hasattr('', 'lower'):
class StringPredicates(Test):
- version = 0.1
+ version = 2.0
operations = 10 * 7
- rounds = 80000
+ rounds = 100000
def test(self):
diff --git a/Tools/pybench/Tuples.py b/Tools/pybench/Tuples.py
index e84ea53..8e46989 100644
--- a/Tools/pybench/Tuples.py
+++ b/Tools/pybench/Tuples.py
@@ -2,18 +2,17 @@ from pybench import Test
class TupleSlicing(Test):
- version = 0.31
+ version = 2.0
operations = 3 * 25 * 10 * 7
- rounds = 400
+ rounds = 500
def test(self):
r = range(25)
+ t = tuple(range(100))
for i in xrange(self.rounds):
- t = tuple(range(100))
-
for j in r:
m = t[50:]
@@ -259,20 +258,17 @@ class TupleSlicing(Test):
def calibrate(self):
r = range(25)
+ t = tuple(range(100))
for i in xrange(self.rounds):
-
- t = tuple(range(100))
-
for j in r:
-
pass
class SmallTuples(Test):
- version = 0.3
+ version = 2.0
operations = 5*(1 + 3 + 6 + 2)
- rounds = 80000
+ rounds = 90000
def test(self):
diff --git a/Tools/pybench/Unicode.py b/Tools/pybench/Unicode.py
index 366f171..153a91e 100644
--- a/Tools/pybench/Unicode.py
+++ b/Tools/pybench/Unicode.py
@@ -8,7 +8,7 @@ from string import join
class ConcatUnicode(Test):
- version = 0.1
+ version = 2.0
operations = 10 * 5
rounds = 60000
@@ -90,7 +90,7 @@ class ConcatUnicode(Test):
class CompareUnicode(Test):
- version = 0.1
+ version = 2.0
operations = 10 * 5
rounds = 150000
@@ -172,7 +172,7 @@ class CompareUnicode(Test):
class CreateUnicodeWithConcat(Test):
- version = 0.1
+ version = 2.0
operations = 10 * 5
rounds = 80000
@@ -247,9 +247,9 @@ class CreateUnicodeWithConcat(Test):
class UnicodeSlicing(Test):
- version = 0.1
+ version = 2.0
operations = 5 * 7
- rounds = 100000
+ rounds = 140000
def test(self):
@@ -308,7 +308,7 @@ class UnicodeSlicing(Test):
class UnicodeMappings(Test):
- version = 0.1
+ version = 2.0
operations = 3 * (5 + 4 + 2 + 1)
rounds = 10000
@@ -381,9 +381,9 @@ class UnicodeMappings(Test):
class UnicodePredicates(Test):
- version = 0.1
+ version = 2.0
operations = 5 * 9
- rounds = 100000
+ rounds = 120000
def test(self):
@@ -458,7 +458,7 @@ except ImportError:
else:
class UnicodeProperties(Test):
- version = 0.1
+ version = 2.0
operations = 5 * 8
rounds = 100000
diff --git a/Tools/pybench/clockres.py b/Tools/pybench/clockres.py
new file mode 100644
index 0000000..a7855f2
--- /dev/null
+++ b/Tools/pybench/clockres.py
@@ -0,0 +1,44 @@
+#!/usr/bin/env python
+
+""" clockres - calculates the resolution in seconds of a given timer.
+
+ Copyright (c) 2006, Marc-Andre Lemburg (mal@egenix.com). See the
+ documentation for further information on copyrights, or contact
+ the author. All Rights Reserved.
+
+"""
+import time
+
+TEST_TIME = 1.0
+
+def clockres(timer):
+ d = {}
+ wallclock = time.time
+ start = wallclock()
+ stop = wallclock() + TEST_TIME
+ spin_loops = range(1000)
+ while 1:
+ now = wallclock()
+ if now >= stop:
+ break
+ for i in spin_loops:
+ d[timer()] = 1
+ values = d.keys()
+ values.sort()
+ min_diff = TEST_TIME
+ for i in range(len(values) - 1):
+ diff = values[i+1] - values[i]
+ if diff < min_diff:
+ min_diff = diff
+ return min_diff
+
+if __name__ == '__main__':
+ print 'Clock resolution of various timer implementations:'
+ print 'time.clock: %10.3fus' % (clockres(time.clock) * 1e6)
+ print 'time.time: %10.3fus' % (clockres(time.time) * 1e6)
+ try:
+ import systimes
+ print 'systimes.processtime: %10.3fus' % (clockres(systimes.processtime) * 1e6)
+ except ImportError:
+ pass
+
diff --git a/Tools/pybench/pybench.py b/Tools/pybench/pybench.py
index e0110d0..8ff16c5 100755
--- a/Tools/pybench/pybench.py
+++ b/Tools/pybench/pybench.py
@@ -34,20 +34,7 @@ NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
WITH THE USE OR PERFORMANCE OF THIS SOFTWARE !
"""
-# Version number
-__version__ = '1.3'
-
-#
-# NOTE: Use xrange for all test loops unless you want to face
-# a 20MB process !
-#
-# All tests should have rounds set to values so that a run()
-# takes between 20-50 seconds. This is to get fairly good
-# clock() values. You can use option -w to speedup the tests
-# by a fixed integer factor (the "warp factor").
-#
-
-import sys,time,operator
+import sys, time, operator, string
from CommandLine import *
try:
@@ -56,6 +43,111 @@ try:
except ImportError:
import pickle
+# Version number; version history: see README file !
+__version__ = '2.0'
+
+### Constants
+
+# Second fractions
+MILLI_SECONDS = 1e3
+MICRO_SECONDS = 1e6
+
+# Percent unit
+PERCENT = 100
+
+# Horizontal line length
+LINE = 79
+
+# Minimum test run-time
+MIN_TEST_RUNTIME = 1e-3
+
+# Number of calibration runs to use for calibrating the tests
+CALIBRATION_RUNS = 20
+
+# Number of calibration loops to run for each calibration run
+CALIBRATION_LOOPS = 20
+
+# Allow skipping calibration ?
+ALLOW_SKIPPING_CALIBRATION = 1
+
+# Timer types
+TIMER_TIME_TIME = 'time.time'
+TIMER_TIME_CLOCK = 'time.clock'
+TIMER_SYSTIMES_PROCESSTIME = 'systimes.processtime'
+
+# Choose platform default timer
+if sys.platform[:3] == 'win':
+ # On WinXP this has 2.5ms resolution
+ TIMER_PLATFORM_DEFAULT = TIMER_TIME_CLOCK
+else:
+ # On Linux this has 1ms resolution
+ TIMER_PLATFORM_DEFAULT = TIMER_TIME_TIME
+
+# Print debug information ?
+_debug = 0
+
+### Helpers
+
+def get_timer(timertype):
+
+ if timertype == TIMER_TIME_TIME:
+ return time.time
+ elif timertype == TIMER_TIME_CLOCK:
+ return time.clock
+ elif timertype == TIMER_SYSTIMES_PROCESSTIME:
+ import systimes
+ return systimes.processtime
+ else:
+ raise TypeError('unknown timer type: %s' % timertype)
+
+def get_machine_details():
+
+ import platform
+ if _debug:
+ print 'Getting machine details...'
+ buildno, builddate = platform.python_build()
+ python = platform.python_version()
+ if python > '2.0':
+ try:
+ unichr(100000)
+ except ValueError:
+ # UCS2 build (standard)
+ unicode = 'UCS2'
+ else:
+ # UCS4 build (most recent Linux distros)
+ unicode = 'UCS4'
+ else:
+ unicode = None
+ bits, linkage = platform.architecture()
+ return {
+ 'platform': platform.platform(),
+ 'processor': platform.processor(),
+ 'executable': sys.executable,
+ 'python': platform.python_version(),
+ 'compiler': platform.python_compiler(),
+ 'buildno': buildno,
+ 'builddate': builddate,
+ 'unicode': unicode,
+ 'bits': bits,
+ }
+
+def print_machine_details(d, indent=''):
+
+ l = ['Machine Details:',
+ ' Platform ID: %s' % d.get('platform', 'n/a'),
+ ' Processor: %s' % d.get('processor', 'n/a'),
+ '',
+ 'Python:',
+ ' Executable: %s' % d.get('executable', 'n/a'),
+ ' Version: %s' % d.get('python', 'n/a'),
+ ' Compiler: %s' % d.get('compiler', 'n/a'),
+ ' Bits: %s' % d.get('bits', 'n/a'),
+ ' Build: %s (#%s)' % (d.get('builddate', 'n/a'),
+ d.get('buildno', 'n/a')),
+ ' Unicode: %s' % d.get('unicode', 'n/a'),
+ ]
+ print indent + string.join(l, '\n' + indent) + '\n'
+
### Test baseclass
class Test:
@@ -84,7 +176,7 @@ class Test:
# Version number of the test as float (x.yy); this is important
# for comparisons of benchmark runs - tests with unequal version
# number will not get compared.
- version = 1.0
+ version = 2.0
# The number of abstract operations done in each round of the
# test. An operation is the basic unit of what you want to
@@ -97,36 +189,125 @@ class Test:
# Number of rounds to execute per test run. This should be
# adjusted to a figure that results in a test run-time of between
- # 20-50 seconds.
- rounds = 10000
+ # 1-2 seconds.
+ rounds = 100000
### Internal variables
# Mark this class as implementing a test
is_a_test = 1
- # Misc. internal variables
- last_timing = (0,0,0) # last timing (real,run,calibration)
- warp = 1 # warp factor this test uses
- cruns = 20 # number of calibration runs
- overhead = None # list of calibration timings
+ # Last timing: (real, run, overhead)
+ last_timing = (0.0, 0.0, 0.0)
+
+ # Warp factor to use for this test
+ warp = 1
- def __init__(self,warp=1):
+ # Number of calibration runs to use
+ calibration_runs = CALIBRATION_RUNS
- if warp > 1:
- self.rounds = self.rounds / warp
+ # List of calibration timings
+ overhead_times = None
+
+ # List of test run timings
+ times = []
+
+ # Timer used for the benchmark
+ timer = TIMER_PLATFORM_DEFAULT
+
+ def __init__(self, warp=None, calibration_runs=None, timer=None):
+
+ # Set parameters
+ if warp is not None:
+ self.rounds = int(self.rounds / warp)
if self.rounds == 0:
- self.rounds = 1
+ raise ValueError('warp factor set too high')
self.warp = warp
+ if calibration_runs is not None:
+ if (not ALLOW_SKIPPING_CALIBRATION and
+ calibration_runs < 1):
+ raise ValueError('at least one calibration run is required')
+ self.calibration_runs = calibration_runs
+ if timer is not None:
+ timer = timer
+
+ # Init variables
self.times = []
- self.overhead = []
+ self.overhead_times = []
+
# We want these to be in the instance dict, so that pickle
# saves them
self.version = self.version
self.operations = self.operations
self.rounds = self.rounds
- def run(self, cruns):
+ def get_timer(self):
+
+ """ Return the timer function to use for the test.
+
+ """
+ return get_timer(self.timer)
+
+ def compatible(self, other):
+
+ """ Return 1/0 depending on whether the test is compatible
+ with the other Test instance or not.
+
+ """
+ if self.version != other.version:
+ return 0
+ if self.rounds != other.rounds:
+ return 0
+ return 1
+
+ def calibrate_test(self):
+
+ if self.calibration_runs == 0:
+ self.overhead_times = [0.0]
+ return
+
+ calibrate = self.calibrate
+ timer = self.get_timer()
+ calibration_loops = range(CALIBRATION_LOOPS)
+
+ # Time the calibration loop overhead
+ prep_times = []
+ for i in range(self.calibration_runs):
+ t = timer()
+ for i in calibration_loops:
+ pass
+ t = timer() - t
+ prep_times.append(t)
+ min_prep_time = min(prep_times)
+ if _debug:
+ print
+ print 'Calib. prep time = %.6fms' % (
+ min_prep_time * MILLI_SECONDS)
+
+ # Time the calibration runs (doing CALIBRATION_LOOPS loops of
+ # .calibrate() method calls each)
+ for i in range(self.calibration_runs):
+ t = timer()
+ for i in calibration_loops:
+ calibrate()
+ t = timer() - t
+ self.overhead_times.append(t / CALIBRATION_LOOPS
+ - min_prep_time)
+
+ # Check the measured times
+ min_overhead = min(self.overhead_times)
+ max_overhead = max(self.overhead_times)
+ if _debug:
+ print 'Calib. overhead time = %.6fms' % (
+ min_overhead * MILLI_SECONDS)
+ if min_overhead < 0.0:
+ raise ValueError('calibration setup did not work')
+ if max_overhead - min_overhead > 0.1:
+ raise ValueError(
+ 'overhead calibration timing range too inaccurate: '
+ '%r - %r' % (min_overhead, max_overhead))
+
+ def run(self):
""" Run the test in two phases: first calibrate, then
do the actual test. Be careful to keep the calibration
@@ -134,27 +315,23 @@ class Test:
"""
test = self.test
- calibrate = self.calibrate
- clock = time.clock
- # first calibrate
- t = clock()
- calibrate()
- offset = clock() - t
- if cruns:
- for i in range(cruns-1):
- t = clock()
- calibrate()
- t = clock() - t
- if t < offset:
- offset = t
- # now the real thing
- t = clock()
+ timer = self.get_timer()
+
+ # Get calibration
+ min_overhead = min(self.overhead_times)
+
+ # Test run
+ t = timer()
test()
- t = clock() - t
- if t < 0.01:
- sys.exit("Lower warp required: test times < 10 ms are unreliable")
- self.last_timing = (t-offset,t,offset)
- self.times.append(t-offset)
+ t = timer() - t
+ if t < MIN_TEST_RUNTIME:
+ raise ValueError('warp factor too high: '
+ 'test times are < 10ms')
+ eff_time = t - min_overhead
+ if eff_time < 0:
+ raise ValueError('wrong calibration')
+ self.last_timing = (eff_time, t, min_overhead)
+ self.times.append(eff_time)
def calibrate(self):
@@ -176,33 +353,33 @@ class Test:
self.operations number of operations each.
"""
- # do some tests
return
def stat(self):
- """ Returns four values:
- minimum round time
- average time per round
- average time per operation
- average overhead time
+ """ Return test run statistics as tuple:
+
+ (minimum run time,
+ average run time,
+ total run time,
+ average time per operation,
+ minimum overhead time)
- XXX Should this take warp factors into account?
"""
runs = len(self.times)
if runs == 0:
- return 0,0
- mintime = min(self.times)
- totaltime = reduce(operator.add,self.times,0.0)
- avg = totaltime / float(runs)
- op_avg = totaltime / float(runs * self.rounds * self.operations)
- if self.overhead:
- totaloverhead = reduce(operator.add,self.overhead,0.0)
- ov_avg = totaloverhead / float(runs)
+ return 0.0, 0.0, 0.0, 0.0
+ min_time = min(self.times)
+ total_time = reduce(operator.add, self.times, 0.0)
+ avg_time = total_time / float(runs)
+ operation_avg = total_time / float(runs
+ * self.rounds
+ * self.operations)
+ if self.overhead_times:
+ min_overhead = min(self.overhead_times)
else:
- # use self.last_timing - not too accurate
- ov_avg = self.last_timing[2]
- return mintime, avg, op_avg, ov_avg
+ min_overhead = self.last_timing[2]
+ return min_time, avg_time, total_time, operation_avg, min_overhead
### Load Setup
@@ -215,153 +392,353 @@ import Setup
class Benchmark:
- name = '?' # Name of the benchmark
- rounds = 1 # Number of rounds to run
+ # Name of the benchmark
+ name = ''
+
+ # Number of benchmark rounds to run
+ rounds = 1
+
+ # Warp factor use to run the tests
warp = 1 # Warp factor
- roundtime = 0 # Average round time
- version = None # Benchmark version number (see __init__)
- # as float x.yy
- def __init__(self):
+ # Average benchmark round time
+ roundtime = 0
- self.tests = {}
- self.version = 0.31
+ # Benchmark version number as float x.yy
+ version = 2.0
- def load_tests(self, setupmod, warp=1, limitnames="", verbose=0):
+ # Produce verbose output ?
+ verbose = 0
- self.warp = warp
- if limitnames:
- limitnames = re.compile(limitnames, re.I)
+ # Dictionary with the machine details
+ machine_details = None
+
+ # Timer used for the benchmark
+ timer = TIMER_PLATFORM_DEFAULT
+
+ def __init__(self, name, verbose=None, timer=None, warp=None,
+ calibration_runs=None):
+
+ if name:
+ self.name = name
else:
- limitnames = None
- tests = self.tests
- if verbose:
- print 'Searching for tests ...',
- setupmod.__dict__.values()
- for c in setupmod.__dict__.values():
- if not hasattr(c,'is_a_test'):
+ self.name = '%04i-%02i-%02i %02i:%02i:%02i' % \
+ (time.localtime(time.time())[:6])
+ if verbose is not None:
+ self.verbose = verbose
+ if timer is not None:
+ self.timer = timer
+ if warp is not None:
+ self.warp = warp
+ if calibration_runs is not None:
+ self.calibration_runs = calibration_runs
+
+ # Init vars
+ self.tests = {}
+ if _debug:
+ print 'Getting machine details...'
+ self.machine_details = get_machine_details()
+
+ # Make .version an instance attribute to have it saved in the
+ # Benchmark pickle
+ self.version = self.version
+
+ def get_timer(self):
+
+ """ Return the timer function to use for the test.
+
+ """
+ return get_timer(self.timer)
+
+ def compatible(self, other):
+
+ """ Return 1/0 depending on whether the benchmark is
+ compatible with the other Benchmark instance or not.
+
+ """
+ if self.version != other.version:
+ return 0
+ if (self.machine_details == other.machine_details and
+ self.timer != other.timer):
+ return 0
+ if (self.calibration_runs == 0 and
+ other.calibration_runs != 0):
+ return 0
+ if (self.calibration_runs != 0 and
+ other.calibration_runs == 0):
+ return 0
+ return 1
+
+ def load_tests(self, setupmod, limitnames=None):
+
+ # Add tests
+ if self.verbose:
+ print 'Searching for tests ...'
+ print '--------------------------------------'
+ for testclass in setupmod.__dict__.values():
+ if not hasattr(testclass, 'is_a_test'):
continue
- name = c.__name__
+ name = testclass.__name__
if name == 'Test':
continue
- if limitnames is not None and limitnames.search(name) is None:
+ if (limitnames is not None and
+ limitnames.search(name) is None):
continue
- tests[name] = c(warp)
- l = tests.keys()
+ self.tests[name] = testclass(
+ warp=self.warp,
+ calibration_runs=self.calibration_runs,
+ timer=self.timer)
+ l = self.tests.keys()
l.sort()
- if verbose:
+ if self.verbose:
+ for name in l:
+ print ' %s' % name
+ print '--------------------------------------'
+ print ' %i tests found' % len(l)
+ print
+
+ def calibrate(self):
+
+ print 'Calibrating tests. Please wait...'
+ if self.verbose:
print
- for t in l:
- print ' ', t
- print len(l), "tests found"
+ print 'Test min max'
+ print '-' * LINE
+ tests = self.tests.items()
+ tests.sort()
+ for i in range(len(tests)):
+ name, test = tests[i]
+ test.calibrate_test()
+ if self.verbose:
+ print '%30s: %6.3fms %6.3fms' % \
+ (name,
+ min(test.overhead_times) * MILLI_SECONDS,
+ max(test.overhead_times) * MILLI_SECONDS)
print
- def run(self, verbose, cruns):
+ def run(self):
tests = self.tests.items()
tests.sort()
- clock = time.clock
- print 'Running %i round(s) of the suite at warp factor %i:' % (self.rounds, self.warp)
+ timer = self.get_timer()
+ print 'Running %i round(s) of the suite at warp factor %i:' % \
+ (self.rounds, self.warp)
print
- roundtime = clock()
+ self.roundtimes = []
for i in range(self.rounds):
- roundstarttime = clock()
- if verbose:
- print ' Round %-25i real abs overhead' % (i+1)
+ if self.verbose:
+ print ' Round %-25i effective absolute overhead' % (i+1)
+ total_eff_time = 0.0
for j in range(len(tests)):
- name, t = tests[j]
- if verbose:
+ name, test = tests[j]
+ if self.verbose:
print '%30s:' % name,
- t.run(cruns)
- if verbose:
- print ' %.3fr %.3fa %.3fo' % t.last_timing
- if verbose:
- print ' ----------------------'
- print ' Average round time: %.3f seconds' % \
- ((clock() - roundtime)/(i+1))
+ test.run()
+ (eff_time, abs_time, min_overhead) = test.last_timing
+ total_eff_time = total_eff_time + eff_time
+ if self.verbose:
+ print ' %5.0fms %5.0fms %7.3fms' % \
+ (eff_time * MILLI_SECONDS,
+ abs_time * MILLI_SECONDS,
+ min_overhead * MILLI_SECONDS)
+ self.roundtimes.append(total_eff_time)
+ if self.verbose:
+ print (' '
+ ' ------------------------------')
+ print (' '
+ ' Totals: %6.0fms' %
+ (total_eff_time * MILLI_SECONDS))
print
else:
- print '%d done in %.3f seconds' % (i+1, (clock() - roundstarttime))
- self.roundtime = (clock() - roundtime) / self.rounds
+ print '* Round %i done in %.3f seconds.' % (i+1,
+ total_eff_time)
print
- def print_stat(self, compare_to=None, hidenoise=0):
-
- if not compare_to:
- print '%-30s min run avg run per oprn overhead' % 'Tests:'
- print '-'*77
- tests = self.tests.items()
- tests.sort()
- totalmintime = 0
- for name,t in tests:
- mintime,avg,op_avg,ov_avg = t.stat()
- totalmintime += mintime
- print '%30s: %9.2f ms %9.2f ms %6.2f us %6.2f' % \
- (name,mintime*1000.0,avg*1000.0,op_avg*1000000.0,ov_avg*1000.0)
- print '-'*77
- print '%30s: %9.2f ms' % \
- ('Notional minimum round time', totalmintime * 1000.0)
+ def stat(self):
- else:
- print 'Comparing with: %s (rounds=%i, warp=%i)' % \
- (compare_to.name,compare_to.rounds,compare_to.warp)
- print '%-30s min run cmp run avg run diff' % \
- 'Tests:'
- print '-'*77
- tests = self.tests.items()
- tests.sort()
- compatible = 1
- totalmintime = other_totalmintime = 0
- for name, t in tests:
- mintime, avg, op_avg, ov_avg = t.stat()
- totalmintime += mintime
- try:
- other = compare_to.tests[name]
- except KeyError:
- other = None
- if other and other.version == t.version and \
- other.operations == t.operations:
- mintime1, avg1, op_avg1, ov_avg1 = other.stat()
- other_totalmintime += mintime1
- diff = ((mintime*self.warp)/(mintime1*other.warp) - 1.0)*100.0
- if hidenoise and abs(qop_avg) < 10:
- diff = ''
+ """ Return benchmark run statistics as tuple:
+
+ (minimum round time,
+ average round time,
+ maximum round time)
+
+ XXX Currently not used, since the benchmark does test
+ statistics across all rounds.
+
+ """
+ runs = len(self.roundtimes)
+ if runs == 0:
+ return 0.0, 0.0
+ min_time = min(self.roundtimes)
+ total_time = reduce(operator.add, self.roundtimes, 0.0)
+ avg_time = total_time / float(runs)
+ max_time = max(self.roundtimes)
+ return (min_time, avg_time, max_time)
+
+ def print_header(self, title='Benchmark'):
+
+ print '-' * LINE
+ print '%s: %s' % (title, self.name)
+ print '-' * LINE
+ print
+ print ' Rounds: %s' % self.rounds
+ print ' Warp: %s' % self.warp
+ print ' Timer: %s' % self.timer
+ print
+ if self.machine_details:
+ print_machine_details(self.machine_details, indent=' ')
+ print
+
+ def print_benchmark(self, hidenoise=0, limitnames=None):
+
+ print ('Test '
+ ' minimum average operation overhead')
+ print '-' * LINE
+ tests = self.tests.items()
+ tests.sort()
+ total_min_time = 0.0
+ total_avg_time = 0.0
+ for name, test in tests:
+ if (limitnames is not None and
+ limitnames.search(name) is None):
+ continue
+ (min_time,
+ avg_time,
+ total_time,
+ op_avg,
+ min_overhead) = test.stat()
+ total_min_time = total_min_time + min_time
+ total_avg_time = total_avg_time + avg_time
+ print '%30s: %5.0fms %5.0fms %6.2fus %7.3fms' % \
+ (name,
+ min_time * MILLI_SECONDS,
+ avg_time * MILLI_SECONDS,
+ op_avg * MICRO_SECONDS,
+ min_overhead *MILLI_SECONDS)
+ print '-' * LINE
+ print ('Totals: '
+ ' %6.0fms %6.0fms' %
+ (total_min_time * MILLI_SECONDS,
+ total_avg_time * MILLI_SECONDS,
+ ))
+ print
+
+ def print_comparison(self, compare_to, hidenoise=0, limitnames=None):
+
+ # Check benchmark versions
+ if compare_to.version != self.version:
+ print ('* Benchmark versions differ: '
+ 'cannot compare this benchmark to "%s" !' %
+ compare_to.name)
+ print
+ self.print_benchmark(hidenoise=hidenoise,
+ limitnames=limitnames)
+ return
+
+ # Print header
+ compare_to.print_header('Comparing with')
+ print ('Test '
+ ' minimum run-time average run-time')
+ print (' '
+ ' this other diff this other diff')
+ print '-' * LINE
+
+ # Print test comparisons
+ tests = self.tests.items()
+ tests.sort()
+ total_min_time = other_total_min_time = 0.0
+ total_avg_time = other_total_avg_time = 0.0
+ benchmarks_compatible = self.compatible(compare_to)
+ tests_compatible = 1
+ for name, test in tests:
+ if (limitnames is not None and
+ limitnames.search(name) is None):
+ continue
+ (min_time,
+ avg_time,
+ total_time,
+ op_avg,
+ min_overhead) = test.stat()
+ total_min_time = total_min_time + min_time
+ total_avg_time = total_avg_time + avg_time
+ try:
+ other = compare_to.tests[name]
+ except KeyError:
+ other = None
+ if other is None:
+ # Other benchmark doesn't include the given test
+ min_diff, avg_diff = 'n/a', 'n/a'
+ other_min_time = 0.0
+ other_avg_time = 0.0
+ tests_compatible = 0
+ else:
+ (other_min_time,
+ other_avg_time,
+ other_total_time,
+ other_op_avg,
+ other_min_overhead) = other.stat()
+ other_total_min_time = other_total_min_time + other_min_time
+ other_total_avg_time = other_total_avg_time + other_avg_time
+ if (benchmarks_compatible and
+ test.compatible(other)):
+ # Both benchmark and tests are comparible
+ min_diff = ((min_time * self.warp) /
+ (other_min_time * other.warp) - 1.0)
+ avg_diff = ((avg_time * self.warp) /
+ (other_avg_time * other.warp) - 1.0)
+ if hidenoise and abs(min_diff) < 10.0:
+ min_diff = ''
+ else:
+ min_diff = '%+5.1f%%' % (min_diff * PERCENT)
+ if hidenoise and abs(avg_diff) < 10.0:
+ avg_diff = ''
else:
- diff = '%+7.2f%%' % diff
+ avg_diff = '%+5.1f%%' % (avg_diff * PERCENT)
else:
- qavg, diff = 'n/a', 'n/a'
- compatible = 0
- print '%30s: %8.2f ms %8.2f ms %8.2f ms %8s' % \
- (name,mintime*1000.0,mintime1*1000.0 * compare_to.warp/self.warp, avg*1000.0,diff)
- print '-'*77
- #
- # Summarise test results
- #
- if compatible and compare_to.roundtime > 0 and \
- compare_to.version == self.version:
- print '%30s: %8.2f ms %8.2f ms %+7.2f%%' % \
- ('Notional minimum round time', totalmintime * 1000.0,
- other_totalmintime * 1000.0 * compare_to.warp/self.warp,
- ((totalmintime*self.warp)/
- (other_totalmintime*compare_to.warp)-1.0)*100.0)
+ # Benchmark or tests are not comparible
+ min_diff, avg_diff = 'n/a', 'n/a'
+ tests_compatible = 0
+ print '%30s: %5.0fms %5.0fms %7s %5.0fms %5.0fms %7s' % \
+ (name,
+ min_time * MILLI_SECONDS,
+ other_min_time * MILLI_SECONDS * compare_to.warp / self.warp,
+ min_diff,
+ avg_time * MILLI_SECONDS,
+ other_avg_time * MILLI_SECONDS * compare_to.warp / self.warp,
+ avg_diff)
+ print '-' * LINE
+
+ # Summarise test results
+ if not benchmarks_compatible or not tests_compatible:
+ min_diff, avg_diff = 'n/a', 'n/a'
+ else:
+ if other_total_min_time != 0.0:
+ min_diff = '%+5.1f%%' % (
+ ((total_min_time * self.warp) /
+ (other_total_min_time * compare_to.warp) - 1.0) * PERCENT)
+ else:
+ min_diff = 'n/a'
+ if other_total_avg_time != 0.0:
+ avg_diff = '%+5.1f%%' % (
+ ((total_avg_time * self.warp) /
+ (other_total_avg_time * compare_to.warp) - 1.0) * PERCENT)
else:
- print '%30s: %9.2f ms n/a' % \
- ('Notional minimum round time', totalmintime * 1000.0)
+ avg_diff = 'n/a'
+ print ('Totals: '
+ ' %5.0fms %5.0fms %7s %5.0fms %5.0fms %7s' %
+ (total_min_time * MILLI_SECONDS,
+ (other_total_min_time * compare_to.warp/self.warp
+ * MILLI_SECONDS),
+ min_diff,
+ total_avg_time * MILLI_SECONDS,
+ (other_total_avg_time * compare_to.warp/self.warp
+ * MILLI_SECONDS),
+ avg_diff
+ ))
+ print
+ print '(this=%s, other=%s)' % (self.name,
+ compare_to.name)
print
-
-def print_machine():
-
- import platform
- print 'Machine Details:'
- print ' Platform ID: %s' % platform.platform()
- print ' Executable: %s' % sys.executable
- # There's a bug in Python 2.2b1+...
- if sys.version[:6] == '2.2b1+':
- return
- print ' Python: %s' % platform.python_version()
- print ' Compiler: %s' % platform.python_compiler()
- buildno, builddate = platform.python_build()
- print ' Build: %s (#%s)' % (builddate, buildno)
class PyBenchCmdline(Application):
@@ -370,50 +747,64 @@ class PyBenchCmdline(Application):
version = __version__
- options = [ArgumentOption('-n','number of rounds',Setup.Number_of_rounds),
- ArgumentOption('-f','save benchmark to file arg',''),
- ArgumentOption('-c','compare benchmark with the one in file arg',''),
- ArgumentOption('-s','show benchmark in file arg, then exit',''),
- SwitchOption('-S','show statistics of benchmarks',0),
- ArgumentOption('-w','set warp factor to arg',Setup.Warp_factor),
- SwitchOption('-d','hide noise in compares', 0),
- SwitchOption('-v','verbose output (not recommended)', 0),
- SwitchOption('--no-gc','disable garbage collection', 0),
- SwitchOption('--no-syscheck',
- '"disable" sys check interval (set to sys.maxint)', 0),
- ArgumentOption('-t', 'tests containing substring', ''),
- ArgumentOption('-C', 'number of calibration runs', 20)
+ debug = _debug
+
+ options = [ArgumentOption('-n',
+ 'number of rounds',
+ Setup.Number_of_rounds),
+ ArgumentOption('-f',
+ 'save benchmark to file arg',
+ ''),
+ ArgumentOption('-c',
+ 'compare benchmark with the one in file arg',
+ ''),
+ ArgumentOption('-s',
+ 'show benchmark in file arg, then exit',
+ ''),
+ ArgumentOption('-w',
+ 'set warp factor to arg',
+ Setup.Warp_factor),
+ ArgumentOption('-t',
+ 'run only tests with names matching arg',
+ ''),
+ ArgumentOption('-C',
+ 'set the number of calibration runs to arg',
+ CALIBRATION_RUNS),
+ SwitchOption('-d',
+ 'hide noise in comparisons',
+ 0),
+ SwitchOption('-v',
+ 'verbose output (not recommended)',
+ 0),
+ SwitchOption('--with-gc',
+ 'enable garbage collection',
+ 0),
+ SwitchOption('--with-syscheck',
+ 'use default sys check interval',
+ 0),
+ ArgumentOption('--timer',
+ 'use given timer',
+ TIMER_PLATFORM_DEFAULT),
]
about = """\
The normal operation is to run the suite and display the
-results. Use -f to save them for later reuse or comparisms.
+results. Use -f to save them for later reuse or comparisons.
+
+Available timers:
+
+ time.time
+ time.clock
+ systimes.processtime
Examples:
-python1.5 pybench.py -w 100 -f p15
-python1.4 pybench.py -w 100 -f p14
-python pybench.py -s p15 -c p14
+python2.1 pybench.py -f p21.pybench
+python2.5 pybench.py -f p25.pybench
+python pybench.py -s p25.pybench -c p21.pybench
"""
copyright = __copyright__
- def handle_S(self, value):
-
- """ Display one line stats for each benchmark file given on the
- command line.
-
- """
- for benchmark in self.files:
- try:
- f = open(benchmark, 'rb')
- bench = pickle.load(f)
- f.close()
- except IOError:
- print '* Error opening/reading file %s' % repr(benchmark)
- else:
- print '%s,%-.2f,ms' % (benchmark, bench.roundtime*1000.0)
- return 0
-
def main(self):
rounds = self.values['-n']
@@ -421,38 +812,52 @@ python pybench.py -s p15 -c p14
show_bench = self.values['-s']
compare_to = self.values['-c']
hidenoise = self.values['-d']
- warp = self.values['-w']
- nogc = self.values['--no-gc']
+ warp = int(self.values['-w'])
+ withgc = self.values['--with-gc']
limitnames = self.values['-t']
+ if limitnames:
+ if _debug:
+ print '* limiting test names to one with substring "%s"' % \
+ limitnames
+ limitnames = re.compile(limitnames, re.I)
+ else:
+ limitnames = None
verbose = self.verbose
- nosyscheck = self.values['--no-syscheck']
- cruns = self.values['-C']
- print "CRUNS:", cruns
+ withsyscheck = self.values['--with-syscheck']
+ calibration_runs = self.values['-C']
+ timer = self.values['--timer']
- print 'PYBENCH',__version__
+ print '-' * LINE
+ print 'PYBENCH %s' % __version__
+ print '-' * LINE
+ print '* using Python %s' % (string.split(sys.version)[0])
- # Switch off GC
- if nogc:
+ # Switch off garbage collection
+ if not withgc:
try:
import gc
except ImportError:
- nogc = 0
+ print '* Python version doesn\'t support garbage collection'
else:
- if self.values['--no-gc']:
- gc.disable()
- print 'NO GC'
-
- # maximise sys check interval
- if nosyscheck:
- sys.setcheckinterval(sys.maxint)
- print 'CHECKINTERVAL =', sys.maxint
+ gc.disable()
+ print '* disabled garbage collection'
+
+ # "Disable" sys check interval
+ if not withsyscheck:
+ # Too bad the check interval uses an int instead of a long...
+ value = 2147483647
+ sys.setcheckinterval(value)
+ print '* system check interval set to maximum: %s' % value
+
+ if timer == TIMER_SYSTIMES_PROCESSTIME:
+ import systimes
+ print '* using timer: systimes.processtime (%s)' % \
+ systimes.SYSTIMES_IMPLEMENTATION
+ else:
+ print '* using timer: %s' % timer
print
- if not compare_to:
- print_machine()
- print
-
if compare_to:
try:
f = open(compare_to,'rb')
@@ -460,8 +865,10 @@ python pybench.py -s p15 -c p14
bench.name = compare_to
f.close()
compare_to = bench
- except IOError:
- print '* Error opening/reading file',compare_to
+ except IOError, reason:
+ print '* Error opening/reading file %s: %s' % (
+ repr(compare_to),
+ reason)
compare_to = None
if show_bench:
@@ -470,37 +877,52 @@ python pybench.py -s p15 -c p14
bench = pickle.load(f)
bench.name = show_bench
f.close()
- print 'Benchmark: %s (rounds=%i, warp=%i)' % \
- (bench.name,bench.rounds,bench.warp)
- print
- bench.print_stat(compare_to, hidenoise)
+ bench.print_header()
+ if compare_to:
+ bench.print_comparison(compare_to,
+ hidenoise=hidenoise,
+ limitnames=limitnames)
+ else:
+ bench.print_benchmark(hidenoise=hidenoise,
+ limitnames=limitnames)
except IOError:
- print '* Error opening/reading file',show_bench
+ print '* Error opening/reading file %s: %s' % (
+ repr(show_bench),
+ reason)
print
return
if reportfile:
- if nogc:
- print 'Benchmark: %s (rounds=%i, warp=%i, no GC)' % \
- (reportfile,rounds,warp)
- else:
- print 'Benchmark: %s (rounds=%i, warp=%i)' % \
- (reportfile,rounds,warp)
+ print 'Creating benchmark: %s (rounds=%i, warp=%i)' % \
+ (reportfile, rounds, warp)
print
# Create benchmark object
- bench = Benchmark()
+ bench = Benchmark(reportfile,
+ verbose=verbose,
+ timer=timer,
+ warp=warp,
+ calibration_runs=calibration_runs)
bench.rounds = rounds
- bench.load_tests(Setup, warp, limitnames, verbose)
+ bench.load_tests(Setup, limitnames=limitnames)
try:
- bench.run(verbose, cruns)
+ bench.calibrate()
+ bench.run()
except KeyboardInterrupt:
print
print '*** KeyboardInterrupt -- Aborting'
print
return
- bench.print_stat(compare_to)
- # ring bell
+ bench.print_header()
+ if compare_to:
+ bench.print_comparison(compare_to,
+ hidenoise=hidenoise,
+ limitnames=limitnames)
+ else:
+ bench.print_benchmark(hidenoise=hidenoise,
+ limitnames=limitnames)
+
+ # Ring bell
sys.stderr.write('\007')
if reportfile:
diff --git a/Tools/pybench/systimes.py b/Tools/pybench/systimes.py
index 79d249f..bf07e36 100644
--- a/Tools/pybench/systimes.py
+++ b/Tools/pybench/systimes.py
@@ -16,7 +16,7 @@
platforms.
If no supported timing methods based on process time can be found,
- the module reverts to the highest resolution wall-time timer
+ the module reverts to the highest resolution wall-clock timer
instead. The system time part will then always be 0.0.
The module exports one public API:
@@ -52,8 +52,8 @@ USE_CTYPES_GETPROCESSTIMES = 'cytpes GetProcessTimes() wrapper'
USE_WIN32PROCESS_GETPROCESSTIMES = 'win32process.GetProcessTimes()'
USE_RESOURCE_GETRUSAGE = 'resource.getrusage()'
USE_PROCESS_TIME_CLOCK = 'time.clock() (process time)'
-USE_WALL_TIME_CLOCK = 'time.clock() (wall-time)'
-USE_WALL_TIME_TIME = 'time.time() (wall-time)'
+USE_WALL_TIME_CLOCK = 'time.clock() (wall-clock)'
+USE_WALL_TIME_TIME = 'time.time() (wall-clock)'
if sys.platform[:3] == 'win':
# Windows platform
@@ -63,7 +63,7 @@ if sys.platform[:3] == 'win':
try:
import ctypes
except ImportError:
- # Use the wall-time implementation time.clock(), since this
+ # Use the wall-clock implementation time.clock(), since this
# is the highest resolution clock available on Windows
SYSTIMES_IMPLEMENTATION = USE_WALL_TIME_CLOCK
else:
@@ -91,7 +91,7 @@ if SYSTIMES_IMPLEMENTATION is None:
# time)
SYSTIMES_IMPLEMENTATION = USE_PROCESS_TIME_CLOCK
else:
- # Use wall-time implementation time.time() since this provides
+ # Use wall-clock implementation time.time() since this provides
# the highest resolution clock on most systems
SYSTIMES_IMPLEMENTATION = USE_WALL_TIME_TIME
@@ -103,24 +103,27 @@ def getrusage_systimes():
def process_time_clock_systimes():
return (time.clock(), 0.0)
-def wall_time_clock_systimes():
+def wall_clock_clock_systimes():
return (time.clock(), 0.0)
-def wall_time_time_systimes():
+def wall_clock_time_systimes():
return (time.time(), 0.0)
# Number of clock ticks per second for the values returned
# by GetProcessTimes() on Windows.
#
-# Note: Ticks returned by GetProcessTimes() are micro-seconds on
-# Windows XP (though the docs say 100ns intervals)
-WIN32_PROCESS_TIMES_TICKS_PER_SECOND = 10e6
+# Note: Ticks returned by GetProcessTimes() are 100ns intervals on
+# Windows XP. However, the process times are only updated with every
+# clock tick and the frequency of these is somewhat lower: depending
+# on the OS version between 10ms and 15ms. Even worse, the process
+# time seems to be allocated to process currently running when the
+# clock interrupt arrives, ie. it is possible that the current time
+# slice gets accounted to a different process.
+
+WIN32_PROCESS_TIMES_TICKS_PER_SECOND = 1e7
def win32process_getprocesstimes_systimes():
d = win32process.GetProcessTimes(win32process.GetCurrentProcess())
- # Note: I'm not sure whether KernelTime on Windows is the same as
- # system time on Unix - I've yet to see a non-zero value for
- # KernelTime on Windows.
return (d['UserTime'] / WIN32_PROCESS_TIMES_TICKS_PER_SECOND,
d['KernelTime'] / WIN32_PROCESS_TIMES_TICKS_PER_SECOND)
@@ -149,10 +152,10 @@ elif SYSTIMES_IMPLEMENTATION is USE_PROCESS_TIME_CLOCK:
systimes = process_time_clock_systimes
elif SYSTIMES_IMPLEMENTATION is USE_WALL_TIME_CLOCK:
- systimes = wall_time_clock_systimes
+ systimes = wall_clock_clock_systimes
elif SYSTIMES_IMPLEMENTATION is USE_WALL_TIME_TIME:
- systimes = wall_time_time_systimes
+ systimes = wall_clock_time_systimes
elif SYSTIMES_IMPLEMENTATION is USE_WIN32PROCESS_GETPROCESSTIMES:
systimes = win32process_getprocesstimes_systimes
@@ -163,6 +166,17 @@ elif SYSTIMES_IMPLEMENTATION is USE_CTYPES_GETPROCESSTIMES:
else:
raise TypeError('no suitable systimes() implementation found')
+def processtime():
+
+ """ Return the total time spent on the process.
+
+ This is the sum of user and system time as returned by
+ systimes().
+
+ """
+ user, system = systimes()
+ return user + system
+
### Testing
def some_workload():