summaryrefslogtreecommitdiffstats
path: root/tcllib/modules/bench
diff options
context:
space:
mode:
Diffstat (limited to 'tcllib/modules/bench')
-rw-r--r--tcllib/modules/bench/ChangeLog541
-rw-r--r--tcllib/modules/bench/bench.man296
-rw-r--r--tcllib/modules/bench/bench.tcl553
-rw-r--r--tcllib/modules/bench/bench_intro.man91
-rw-r--r--tcllib/modules/bench/bench_lang_intro.man153
-rw-r--r--tcllib/modules/bench/bench_lang_spec.man132
-rw-r--r--tcllib/modules/bench/bench_read.man65
-rw-r--r--tcllib/modules/bench/bench_read.tcl162
-rw-r--r--tcllib/modules/bench/bench_wcsv.man54
-rw-r--r--tcllib/modules/bench/bench_wcsv.tcl101
-rw-r--r--tcllib/modules/bench/bench_wtext.man55
-rw-r--r--tcllib/modules/bench/bench_wtext.tcl165
-rw-r--r--tcllib/modules/bench/libbench.tcl561
-rw-r--r--tcllib/modules/bench/pkgIndex.tcl7
14 files changed, 2936 insertions, 0 deletions
diff --git a/tcllib/modules/bench/ChangeLog b/tcllib/modules/bench/ChangeLog
new file mode 100644
index 0000000..7693aad
--- /dev/null
+++ b/tcllib/modules/bench/ChangeLog
@@ -0,0 +1,541 @@
+2013-02-01 Andreas Kupries <andreas_kupries@users.sourceforge.net>
+
+ *
+ * Released and tagged Tcllib 1.15 ========================
+ *
+
+2011-12-13 Andreas Kupries <andreas_kupries@users.sourceforge.net>
+
+ *
+ * Released and tagged Tcllib 1.14 ========================
+ *
+
+2011-01-24 Andreas Kupries <andreas_kupries@users.sourceforge.net>
+
+ *
+ * Released and tagged Tcllib 1.13 ========================
+ *
+
+2009-12-07 Andreas Kupries <andreas_kupries@users.sourceforge.net>
+
+ *
+ * Released and tagged Tcllib 1.12 ========================
+ *
+
+2008-12-12 Andreas Kupries <andreas_kupries@users.sourceforge.net>
+
+ *
+ * Released and tagged Tcllib 1.11.1 ========================
+ *
+
+2008-10-16 Andreas Kupries <andreas_kupries@users.sourceforge.net>
+
+ *
+ * Released and tagged Tcllib 1.11 ========================
+ *
+
+2008-10-07 Andreas Kupries <andreas_kupries@users.sourceforge.net>
+
+ * bench.tcl: Bumped to version 0.4 for 2008-06-30 commit by
+ * bench.man: myself. Was a major rewrite of the internals,
+ * pkgIndex.tcl: should have been bumped then.
+
+2008-06-30 Andreas Kupries <andreask@activestate.com>
+
+ * bench.tcl (::bench::Invoke): Reworked the protocol between
+ * libbench.tcl: manager and execution system to allow for
+ incremental returning of results and proper progress
+ feedback. This enables users to see how a benchmark progresses,
+ and to provide their own notes about conditions and decisions as
+ well.
+
+2007-09-12 Andreas Kupries <andreas_kupries@users.sourceforge.net>
+
+ *
+ * Released and tagged Tcllib 1.10 ========================
+ *
+
+2007-08-23 Andreas Kupries <andreask@activestate.com>
+
+ * bench.tcl: Fixed problem with the glob patterns used to query
+ * bench.man: the data array, was not matching the list quoting
+ * pkgIndex.tcl: used to generate the keys. Was fine while we had
+ no keys with spaces in the interp reference, but with -pkgdir
+ this is possible, and broke. Version bumped to 0.3.1. Reported
+ by Rolf Ade.
+
+2007-08-21 Andreas Kupries <andreask@activestate.com>
+
+ * bench.tcl (::bench::run): Extended with a new option -pkgdir
+ * bench.man: helping in the handling of multiple versions of a
+ * pkgIndex.tcl: package to benchmark, as suggested and first
+ * libbench.tcl: implemented by Rolf Ade. Moved invokation of
+ libbench to a separate helper procedure. Extended the
+ documentation. Version bumped to 0.3.
+
+2007-04-10 Andreas Kupries <andreas_kupries@users.sourceforge.net>
+
+ * bench_lang_intro.man: New files, documentation of the
+ * bench_lang_spec.man: benchmark declaration language, and
+ * bench_read.man: of the supporting packages.
+ * bench_wcsv.man:
+ * bench_wtext.man:
+
+2007-01-22 Andreas Kupries <andreas_kupries@users.sourceforge.net>
+
+ * libbench.tcl: Added new benchmark options -ipre, -ipost. Per
+ * pkgIndex.tcl: iteration pre/post scripts, untimed. Version of
+ * bench.cl: package 'bench' is now 0.2.
+
+2007-01-21 Andreas Kupries <andreas_kupries@users.sourceforge.net>
+
+ * bench_wcsv.tcl: Fixed sorting of descriptions in text and
+ * bench_wtext.tcl: csv output. Version is now 0.1.2.
+
+2007-01-18 Andreas Kupries <andreas_kupries@users.sourceforge.net>
+
+ * bench.tcl (bench::norm): Removed 'split ,' from code, was left
+ * pkgIndex.tcl: in wrongly after the rewrite of the raw
+ representation. The relevant key is a list which we can and have
+ to use directly, no split required. The fixed bug caused the
+ normalization to fail and return the empty string for all
+ cells. Version number bumped to 0.1.1 for this.
+
+2006-10-03 Andreas Kupries <andreas_kupries@users.sourceforge.net>
+
+ *
+ * Released and tagged Tcllib 1.9 ========================
+ *
+
+2006-06-13 Andreas Kupries <andreask@activestate.com>
+
+ * bench_read.tcl: Rewrite the internal raw representation, use
+ * bench.tcl: lists as array keys, easier to handle, no
+ * bench_wcsv.tcl: splitting, and quoting is done automatically
+ * bench_wtext.tcl: by Tcl itself. See [Tcllib SF Bug 1414159].
+
+2006-05-27 Andreas Kupries <andreas_kupries@users.sourceforge.net>
+
+ * bench_read.tcl: Fixed typo "-error" --> "-code error".
+
+2006-01-25 Michael Schlenker <mic42@users.sourceforge.net>
+
+ * bench_wcsv.tcl : Fixed bug when trying to format benchs on windows.
+ * bench_wtext.tcl: The interpreter path was truncated due to a misuse of
+ split and lindex, where string first was appropriate.
+
+2005-10-27 Andreas Kupries <andreask@activestate.com>
+
+ * bench.tcl (::bench::norm): Fixed bug leaving time data in
+ non-reference column when the reference is empty. To the unwary
+ the result looks like factors, which have ridiculous values. Now
+ the row is shown, but empty.
+
+2005-10-21 Andreas Kupries <andreask@activestate.com>
+
+ * bench.tcl (::bench::del): New command. Removal of a column from
+ benchmark data.
+
+2005-10-18 Andreas Kupries <andreask@activestate.com>
+
+ * bench_read.tcl: New file. Command and package to read benchmark
+ data in the text, csv, or raw formats.
+
+ * bench.tcl (::bench::edit): New command. Changes specified
+ interpreter path to user specified value. Needed if we wish to
+ merge data coming from the same interpreter, for different
+ revisions of the package under test.
+
+2005-10-17 Andreas Kupries <andreas_kupries@users.sourceforge.net>
+
+ * Package derived from the original code added to Tcllib.
+
+2004-12-29 Jeff Hobbs <jeffh@ActiveState.com>
+
+ * tcl/parse.bench: ensure file size is consistent between interp
+ runs with formatted BOUND string.
+
+2004-12-27 Jeff Hobbs <jeffh@ActiveState.com>
+
+ * doc/runbench.1: fix doc for -throwerrors [Bug 1091766]
+
+ * runbench.tcl (getInterps): use exec << instead of echo [Bug 1091764]
+
+2004-12-24 Miguel Sofer <msofer@users.sf.net>
+
+ * tcl/namespace.bench: new benchmark, measures the cost of calling
+ the same global command alternating different namespaces.
+
+2004-12-20 Jeff Hobbs <jeffh@ActiveState.com>
+
+ * tcl/array.bench (new): array hash benchmarks
+
+ * tcl/file.bench: fix checkall to operate for tclsh <=8.0
+
+ * tcl/string.bench: fix string match -nocase for tclsh <=8.2
+
+ * runbench.tcl (convertVersion): add -globtclsh -globwish file
+ path glob opts (tclsh* and wish* by default).
+ Normalize soft-links.
+
+ * normbench.tcl (normalize-text): harden time norm check
+
+2003-08-06 Jeff Hobbs <jeffh@ActiveState.com>
+
+ * normbench.tcl (normalize): correct normalization of new-style
+ stats where TclX data is present in output.
+
+2003-02-11 Jeff Hobbs <jeffh@ActiveState.com>
+
+ * tcl/list.bench: lsearch -regexp benchmarks
+
+ * tcl/file.bench: updated with more benchmarks
+
+2003-02-08 Jeff Hobbs <jeffh@ActiveState.com>
+
+ * tcl/startup.bench: replaced by file benchmarks
+ * tcl/file.bench: file benchmarks
+
+2002-11-13 Jeff Hobbs <jeffh@ActiveState.com>
+
+ * tcl/regexp.bench: added anchored re tests
+
+ * tcl/klist.bench: allow method filter from command lineinvocation.
+
+ * tcl/list.bench: add lset benchmarks
+
+ * tcl/md5.bench: correct to work with pre-8.2 interps
+
+ * tcl/string.bench: add string growth, remove split benchmarks
+ * tcl/split.bench: more split benchmarks
+
+ * runbench.tcl: allow tclsh*/wish* (no version required)
+
+2002-07-24 Miguel Sofer <msofer@users.sourceforge.net>
+
+ * tcl/base64.bench: added the current code from tcllib.
+
+2002-06-20 Miguel Sofer <msofer@users.sourceforge.net>
+
+ * tcl/read.bench: modified to actually "use" the data being read
+ by setting a local variable.
+
+2002-06-20 Miguel Sofer <msofer@users.sourceforge.net>
+
+ * tcl/md5.bench: added the faster implementation from tcllib
+
+2002-06-12 Jeff Hobbs <jeffh@ActiveState.com>
+
+ * tcl/catch.bench: corrected use of string map in toplevel code
+
+ * tcl/expr.bench: corrected use of string repeat in toplevel code
+
+ * tcl/sha1.bench: correct wideint problem for 8.4 in sha1DF
+
+ * tcl/string.bench: corrected string equality checks to use
+ different variables (objects)
+
+ * tcl/gccont.bench: new benchmark that does some bioinformatics
+ manipulation on dna sequences
+
+2002-06-12 Miguel Sofer <msofer@users.sourceforge.net>
+
+ * tcl/klist.bench:
+ * tcl/heapsort.bench: added algorithms using [lset]
+
+2002-06-11 Miguel Sofer <msofer@users.sourceforge.net>
+
+ * tcl/regexp.bench: made the bench access the match variables, to
+ benchmark also the read access to them.
+ * tcl/vars.bench: added a "VAR ref local" benchmark, to be able to
+ compare the access times of linked variables to those of local
+ variables.
+
+2002-05-29 Jeff Hobbs <jeffh@ActiveState.com>
+
+ * tcl/parse.bench: more complex string parsing benchmark (8.0+)
+
+ * tcl/encoding.bench: start of some encoding benchmarks (8.1+)
+
+ * tcl/expr.bench: added ==/!= expr benchmarks
+
+ * tcl/string.bench: corrected the equality benchmarks to not use
+ the same object unless specified.
+
+2002-04-25 Jeff Hobbs <jeffh@ActiveState.com>
+
+ * runbench.tcl:
+ * libbench.tcl: added ability to set # threads to use if Thread
+ package can be loaded.
+ improved -result error checking
+
+ * tcl/base64.bench: verify result of encode/decode
+
+ * tcl/proc.bench: added empty proc benchmarks
+
+ * tcl/list.bench: added LIST concat benchmarks (hartweg)
+
+2002-03-27 Miguel Sofer <msofer@users.sourceforge.net>
+
+ * tcl/catch.bench: modified the catch benchmarks to allow
+ comparison with catching non-error exceptions; added new
+ "CATCH except" benchmark.
+
+2002-03-15 Miguel Sofer <msofer@users.sourceforge.net>
+
+ * tcl/catch.bench: added benchmark for catch in a body with many
+ nested exception ranges.
+
+2002-02-22 Jeff Hobbs <jeffh@ActiveState.com>
+
+ * tcl/loops.bench: added while 1 benchmark
+
+ * tcl/conditional.bench: added if 1/0 benchmark
+
+2002-02-07 Jeff Hobbs <jeffh@ActiveState.com>
+
+ * runbench.tcl: noted thread option.
+
+ * libbench.tcl: added ability to check result of test
+
+ * tcl/base64.bench: stripped arg stuff out of code to make it work
+ in 8.0 as well.
+
+ * tcl/list.bench: corrected list-2.11 to append to simple var.
+
+ * tcl/map.bench: added http mapReply & simple regsubs benchmarks
+
+ * tcl/read.bench: commented out new changing buffersize benchmarks
+ as they do weird things to various interp versions.
+
+ * tcl/regexp.bench: added static regexp benchmarks
+
+ * tcl/string.bench: added string first utf benchmarks
+
+ * tcl/vars.bench: corrected namespace usage for pre-8 interps.
+
+2001-09-25 Jeff Hobbs <jeffh@ActiveState.com>
+
+ * tcl/string.bench: added exact string match benchmark and fixed
+ other string match benchmarks
+
+ * tcl/list.bench: added simple list benchmark
+
+ * tcl/vars.bench: added mset benchmarks
+
+ * libbench.tcl:
+ * runbench.tcl: added support for -threads option to try and load
+ a thread package and run separate benchmark files simultaneously.
+
+2001-08-29 Jeff Hobbs <jeffh@ActiveState.com>
+
+ * tcl/methods.bench:
+ * tcl/vars.bench: added some more benchmarks
+
+2001-07-18 Andreas Kupries <andreas_kupries@users.sourceforge.net>
+
+ * tcl/read.bench: new "read" benchmarks detailing the effect of
+ the buffersize on IO performance. Created to check out the
+ performance patch associated with SF item #427196.
+
+2001-06-19 Jeff Hobbs <jeffh@ActiveState.com>
+
+ * tcl/binary.bench: new "binary" benchmarks
+
+ * tcl/string.bench: more random split benchmarks
+
+2001-06-03 Jeff Hobbs <jeffh@ActiveState.com>
+
+ * libbench.tcl:
+ * runbench.tcl: reduced default iterations to 1000 (still quite
+ sufficient to remove random noise).
+
+2001-05-31 Jeff Hobbs <jeffh@ActiveState.com>
+
+ * tcl/conditional.bench: added switch/if comparison bench.
+
+ * tcl/base64.bench: new benchmark with base64 code (from tcllib).
+
+ * tcl/md5.bench: new benchmark with Libes' md5 (from tcllib).
+
+ * tcl/sha1.bench: new benchmark with a couple of pure tcl sha1
+ routines (Libes and Fellows).
+
+2001-05-29 Andreas Kupries <andreas_kupries@users.sourceforge.net>
+
+ * doc/libbench.n:
+ * doc/runbench.1:
+ * doc/normbench.1: Added documentation of benchmark library and
+ applications.
+
+ * doc: Added documentation directory.
+
+2001-05-22 Jeff Hobbs <jeffh@ActiveState.com>
+
+ * runbench.tcl: corrected error for reporting errors in sourced files
+
+ * tcl/fcopy.bench: made use of bench_tmpfile for more accurate
+ data (not skewed by network).
+
+ * libbench.tcl (bench_tmpfile): correctly allow multiple calls to
+ bench_tmpfile within one file.
+
+ * normbench.tcl: new file that allows for post-process
+ normalization of the benchmark data.
+ Corrected last minute code checkin bug.
+ Added support for moving left (to higher versions) to normalize
+ when the requested version returned non-double data.
+
+ * tcl/libbench.tcl:
+ * tcl/runbench.tcl: changed -iterations to be a maximum number for
+ timings, to override any larger number the benchmark may set for
+ itself.
+ Rearranged result format of benchmarks to return data by benchmark
+ description. Benchmarks are now always returned in alphabetical
+ order of the benchmark description.
+ Changed benchmarks to rerun the interpreter per benchmark file
+ instead of sourcing all files into the same interpreter. This
+ reduces any skew related to excessive mem usage or other factors
+ that may arise for one benchmark file.
+ Changed midpoint numbers to time elapsed calculation.
+ Added -normalize option that post-processes the time information
+ to normalize against one version as a baseline.
+ Changed -errors <bool> to -throwerrors with no arg, and changed
+ the default to not throw errors in benchmark files.
+ Added version string to verbose run info.
+
+ * tcl/klist.bench: added support for <8.0 to all benchmarks except
+ shuffle0, with notably reduced default run iters due to extreme
+ slowness of <8.0 interps for these tasks.
+
+ * tcl/string.bench:
+ * tcl/regexp.bench: fixed incorrect str-repeat replacement function
+
+2001-05-18 Jeff Hobbs <jeffh@ActiveState.com>
+
+ * tcl/string.bench: added <8.0 compatible rev-recursive benchmark,
+ fixed non-octal escape in ustring instantiation.
+
+ * tcl/wordcount.bench: added <8.1 compatible benchmarks
+
+ * tcl/methods.bench: return for interps <8.0
+
+2001-05-19 Andreas Kupries <andreas_kupries@users.sourceforge.net>
+
+ * tcl/conditional.bench: Changed some descriptions to make them
+ unique and matching to the code.
+
+ * tcl/fcopy.bench: New benchmarks for the [fcopy] command
+ (unsupported0 in older versions of the core).
+
+2001-05-16 Jeff Hobbs <jeffh@ActiveState.com>
+
+ * tcl/string.bench: added static string length benchmarks
+
+ * tcl/wordcount.in:
+ * tcl/wordcount.bench: wordcount benchmarks
+
+ * tcl/heapsort.bench: new file with heapsort benchmark
+ * tcl/string.bench:
+ * tcl/matrix.bench:
+ * tcl/regexp.bench: extended benchmarks
+
+2001-05-11 Jeff Hobbs <jeffh@ActiveState.com>
+
+ * tcl/string.bench: clarified string reverse benchmarks, added
+ more to the string compare benchmarks.
+
+ * tcl/matrix.bench: some new matrix benchmarks. Basically a seed
+ file looking for more. procs courtesy Sofer.
+
+ * tcl/list.bench: added a list-iter benchmark
+
+ * tcl/klist.bench: reduced default iters in klist.bench. Accuracy
+ seems about the same without the wait...
+
+ * libbench.tcl:
+ * runbench.tcl: added support for -rmatch option (regexp match of
+ benchmark description).
+ Added MIDPOINT verbose puts for interim time notes.
+
+2001-04-11 Jeff Hobbs <jeffh@ActiveState.com>
+
+ * tcl/klist.bench: added shuffle5* from wiki.
+
+2001-03-28 Jeff Hobbs <jeffh@activestate.com>
+
+ * tcl/string.bench: fixed str-first proc that had bogus code in it.
+ added more split benchmarks for dkf's split improvement in 8.4.
+
+ * tk/canvas.bench: expanded item draw benchmarks
+
+2001-03-23 <jeffh@activestate.com>
+
+ * tk/canvas.bench: added simple item draw benchmarks
+
+2001-03-15 <jeffh@activestate.com>
+
+ * tcl/klist.bench: improved non-tclbench data output.
+
+ * runbench.tcl: added more error capturing.
+
+ * tcl/string.bench: fixed calls to string repeat to work with
+ <8.1.1 interps.
+
+ * tcl/klist.bench: new file to benchmark various list shuffling
+ techniques (from wiki).
+ * tcl/methods.bench: new file to benchmark various method
+ invocation speeds (petasis).
+
+2000-10-19 Jeff Hobbs <hobbs@ajubasolutions.com>
+
+ * tcl/string.bench (str-append-2): added more append tests
+
+2000-08-30 Jeff Hobbs <hobbs@scriptics.com>
+
+ * tcl/string.bench: made string repeat calls compatible with
+ pre-8.1.1 interpreters.
+
+ * libbench.tcl (bench_tmpfile): add env to global list
+
+2000-08-29 Eric Melski <ericm@ajubasolutions.com>
+
+ * tcl/string.bench: Extended string append benchmarks to exploit
+ new growth algorithm for string objects in Tcl 8.4a2.
+
+2000-05-31 Jeff Hobbs <hobbs@scriptics.com>
+
+ * runbench.tcl: new options -errors (passed to libbench), -verbose
+ (by default we are now quieter on output), -output <text|list|csv>
+ (different output types - csv is char-sep-value for Excel).
+ Added start/finish times (in -verbose mode).
+ * libbench.tcl: libbench now takes -option switches for
+ flexibility, options for -errors BOOL (error suppression), -interp
+ NAME (to specify interp), -match PATTERN (glob pattern to filter
+ tests by desc), -iters NUM (default number of iters to run).
+ Reorganized how data is returned to runbench master.
+
+ * tk/entry.bench (new):
+ * tk/canvas.bench (new): new tests for widget creation, config
+
+ * tcl/array.bench (removed):
+ * tcl/vars.bench: merged array.bench tests into VAR
+
+ * tcl/map.bench: fixed for compatability with Tcl7.4-
+
+2000-05-25 Jeff Hobbs <hobbs@scriptics.com>
+
+ * runbench.tcl: added -match, -notcl, -notk options, restructured
+ startup sequence.
+
+ * libbench.tcl: added ability to return string values from bench
+ tests and support for filtering tests to run.
+
+ * tcl/string.bench: moved string mapping benchmarks and added more
+ string equality benchmarks
+ * tcl/map.bench: added extended string mapping benchmark
+
+ * tcl/read.bench:
+ * tcl/startup.bench:
+ * tk/startup.bench: updated code to reflect proc-oriented tmpfile
+ operations.
diff --git a/tcllib/modules/bench/bench.man b/tcllib/modules/bench/bench.man
new file mode 100644
index 0000000..fdee2cf
--- /dev/null
+++ b/tcllib/modules/bench/bench.man
@@ -0,0 +1,296 @@
+[comment {-*- tcl -*- doctools manpage}]
+[manpage_begin bench n 0.4]
+[see_also bench_intro]
+[see_also bench_lang_intro]
+[see_also bench_lang_spec]
+[see_also bench_read]
+[see_also bench_wcsv]
+[see_also bench_wtext]
+[keywords benchmark]
+[keywords merging]
+[keywords normalization]
+[keywords performance]
+[keywords testing]
+[copyright {2007-2008 Andreas Kupries <andreas_kupries@users.sourceforge.net>}]
+[moddesc {Benchmarking/Performance tools}]
+[titledesc {bench - Processing benchmark suites}]
+[category {Benchmark tools}]
+[require Tcl 8.2]
+[require bench [opt 0.4]]
+[description]
+
+This package provides commands for the execution of benchmarks written
+in the bench language, and for the processing of results generated by
+such execution.
+
+[para]
+
+A reader interested in the bench language itself should start with the
+[term {bench language introduction}] and proceed from there to the
+formal [term {bench language specification}].
+
+[para]
+
+[section {PUBLIC API}]
+[subsection {Benchmark execution}]
+
+[list_begin definitions]
+
+[call [cmd ::bench::locate] [arg pattern] [arg paths]]
+
+This command locates Tcl interpreters and returns a list containing
+their paths. It searches them in the list of [arg paths] specified by
+the caller, using the glob [arg pattern].
+
+[para]
+
+The command resolves soft links to find the actual executables
+matching the pattern. Note that only interpreters which are marked as
+executable and are actually executable on the current platform are put
+into the result.
+
+[call [cmd ::bench::run] [opt [arg "option value"]...] [arg interp_list] [arg file]...]
+
+This command executes the benchmarks declared in the set of files,
+once per Tcl interpreter specified via the [arg interp_list], and per
+the configuration specified by the options, and then returns the
+accumulated timing results. The format of this result is described in
+section [sectref {Result format}].
+
+[para]
+
+It is assumed that the contents of the files are written in the bench
+language.
+
+[para]
+
+The available options are
+
+[list_begin options]
+[opt_def -errors [arg flag]]
+
+The argument is a boolean value. If set errors in benchmarks are
+propagated to the command, aborting benchmark execution. Otherwise
+they are recorded in the timing result via a special result code. The
+default is to propagate and abort.
+
+[opt_def -threads [arg n]]
+
+The argument is a non-negative integer value declaring the number of
+threads to use while executing the benchmarks. The default value is
+[const 0], to not use threads.
+
+[opt_def -match [arg pattern]]
+
+The argument is a glob pattern. Only benchmarks whose description
+matches the pattern are executed. The default is the empty string, to
+execute all patterns.
+
+[opt_def -rmatch [arg pattern]]
+
+The argument is a regular expression pattern. Only benchmarks whose
+description matches the pattern are executed. The default is the empty
+string, to execute all patterns.
+
+[opt_def -iters [arg n]]
+
+The argument is positive integer number, the maximal number of
+iterations for any benchmark. The default is [const 1000]. Individual
+benchmarks can override this.
+
+[opt_def -pkgdir [arg path]]
+
+The argument is a path to an existing, readable directory. Multiple
+paths can be specified, simply use the option multiple times, each
+time with one of the paths to use.
+
+[para]
+
+If no paths were specified the system will behave as before.
+If one or more paths are specified, say [var N], each of the specified
+interpreters will be invoked [var N] times, with one of the specified
+paths. The chosen path is put into the interpreters' [var auto_path],
+thus allowing it to find specific versions of a package.
+
+[para]
+
+In this way the use of [option -pkgdir] allows the user to benchmark
+several different versions of a package, against one or more interpreters.
+
+[para]
+
+[emph Note:] The empty string is allowed as a path and causes the system to
+run the specified interpreters with an unmodified [var auto_path]. In case
+the package in question is available there as well.
+
+[list_end]
+[para]
+
+[call [cmd ::bench::versions] [arg interp_list]]
+
+This command takes a list of Tcl interpreters, identified by their
+path, and returns a dictionary mapping from the interpreters to their
+versions. Interpreters which are not actually executable, or fail when
+interrogated, are not put into the result. I.e the result may contain
+less interpreters than there in the input list.
+
+[para]
+
+The command uses builtin command [cmd {info patchlevel}] to determine
+the version of each interpreter.
+
+[list_end]
+
+[subsection {Result manipulation}]
+
+[list_begin definitions]
+
+[call [cmd ::bench::del] [arg bench_result] [arg column]]
+
+This command removes a column, i.e. all benchmark results for a
+specific Tcl interpreter, from the specified benchmark result and
+returns the modified result.
+
+[para]
+The benchmark results are in the format described in section
+[sectref {Result format}].
+[para]
+The column is identified by an integer number.
+
+[call [cmd ::bench::edit] [arg bench_result] [arg column] [arg newvalue]]
+
+This command renames a column in the specified benchmark result and
+returns the modified result. This means that the path of the Tcl
+interpreter in the identified column is changed to an arbitrary
+string.
+
+[para]
+The benchmark results are in the format described in section
+[sectref {Result format}].
+[para]
+The column is identified by an integer number.
+
+[call [cmd ::bench::merge] [arg bench_result]...]
+
+This commands takes one or more benchmark results, merges them into
+one big result, and returns that as its result.
+
+[para]
+All benchmark results are in the format described in section
+[sectref {Result format}].
+
+[call [cmd ::bench::norm] [arg bench_result] [arg column]]
+
+This command normalizes the timing results in the specified benchmark
+result and returns the modified result. This means that the cell
+values are not times anymore, but factors showing how much faster or
+slower the execution was relative to the baseline.
+
+[para]
+
+The baseline against which the command normalizes are the timing
+results in the chosen column. This means that after the normalization
+the values in this column are all [const 1], as these benchmarks are
+neither faster nor slower than the baseline.
+
+[para]
+
+A factor less than [const 1] indicates a benchmark which was faster
+than the baseline, whereas a factor greater than [const 1] indicates a
+slower execution.
+
+[para]
+The benchmark results are in the format described in section
+[sectref {Result format}].
+[para]
+The column is identified by an integer number.
+
+[call [cmd ::bench::out::raw] [arg bench_result]]
+
+This command formats the specified benchmark result for output to a
+file, socket, etc. This specific command does no formatting at all,
+it passes the input through unchanged.
+
+[para]
+
+For other formatting styles see the packages [package bench::out::text]
+and [package bench::out::csv] which provide commands to format
+benchmark results for human consumption, or as CSV data importable by
+spread sheets, respectively.
+
+[para]
+
+Complementary, to read benchmark results from files, sockets etc. look
+for the package [package bench::in] and the commands provided by it.
+
+[list_end]
+
+[subsection {Result format}]
+
+After the execution of a set of benchmarks the raw result returned by
+this package is a Tcl dictionary containing all the relevant
+information.
+
+The dictionary is a compact representation, i.e. serialization, of a
+2-dimensional table which has Tcl interpreters as columns and
+benchmarks as rows. The cells of the table contain the timing
+results.
+
+The Tcl interpreters / columns are identified by their paths.
+The benchmarks / rows are identified by their description.
+
+[para]
+
+The possible keys are all valid Tcl lists of two or three elements and
+have one of the following forms:
+
+[list_begin definitions]
+
+[def {{interp *}}]
+
+The set of keys matching this glob pattern capture the information
+about all the Tcl interpreters used to run the benchmarks. The second
+element of the key is the path to the interpreter.
+
+[para]
+
+The associated value is the version of the Tcl interpreter.
+
+[def {{desc *}}]
+
+The set of keys matching this glob pattern capture the information
+about all the benchmarks found in the executed benchmark suite. The
+second element of the key is the description of the benchmark, which
+has to be unique.
+
+[para]
+
+The associated value is irrelevant, and set to the empty string.
+
+[def {{usec * *}}]
+
+The set of keys matching this glob pattern capture the performance
+information, i.e. timing results. The second element of the key is the
+description of the benchmark, the third element the path of the Tcl
+interpreter which was used to run it.
+
+[para]
+
+The associated value is either one of several special result codes, or
+the time it took to execute the benchmark, in microseconds. The
+possible special result codes are
+
+[list_begin definitions]
+[def ERR]
+Benchmark could not be executed, failed with a Tcl error.
+
+[def BAD_RES]
+The benchmark could be executed, however the result from its body did
+not match the declared expectations.
+
+[list_end]
+[list_end]
+
+[vset CATEGORY bench]
+[include ../doctools2base/include/feedback.inc]
+[manpage_end]
diff --git a/tcllib/modules/bench/bench.tcl b/tcllib/modules/bench/bench.tcl
new file mode 100644
index 0000000..461afbc
--- /dev/null
+++ b/tcllib/modules/bench/bench.tcl
@@ -0,0 +1,553 @@
+# bench.tcl --
+#
+# Management of benchmarks.
+#
+# Copyright (c) 2005-2008 by Andreas Kupries <andreas_kupries@users.sourceforge.net>
+# library derived from runbench.tcl application (C) Jeff Hobbs.
+#
+# See the file "license.terms" for information on usage and redistribution
+# of this file, and for a DISCLAIMER OF ALL WARRANTIES.
+#
+# RCS: @(#) $Id: bench.tcl,v 1.14 2008/10/08 03:30:48 andreas_kupries Exp $
+
+# ### ### ### ######### ######### ######### ###########################
+## Requisites - Packages and namespace for the commands and data.
+
+package require Tcl 8.2
+package require logger
+package require csv
+package require struct::matrix
+package require report
+
+namespace eval ::bench {}
+namespace eval ::bench::out {}
+
+# @mdgen OWNER: libbench.tcl
+
+# ### ### ### ######### ######### ######### ###########################
+## Public API - Benchmark execution
+
+# ::bench::run --
+#
+# Run a series of benchmarks.
+#
+# Arguments:
+# ...
+#
+# Results:
+# Dictionary.
+
+proc ::bench::run {args} {
+ log::debug [linsert $args 0 ::bench::run]
+
+ # -errors 0|1 default 1, propagate errors in benchmarks
+ # -threads <num> default 0, no threads, #threads to use
+ # -match <pattern> only run tests matching this pattern
+ # -rmatch <pattern> only run tests matching this pattern
+ # -iters <num> default 1000, max#iterations for any benchmark
+ # -pkgdir <dir> Defaults to nothing, regular bench invokation.
+
+ # interps - dict (path -> version)
+ # files - list (of files)
+
+ # Process arguments ......................................
+ # Defaults first, then overides by the user
+
+ set errors 1 ; # Propagate errors
+ set threads 0 ; # Do not use threads
+ set match {} ; # Do not exclude benchmarks based on glob pattern
+ set rmatch {} ; # Do not exclude benchmarks based on regex pattern
+ set iters 1000 ; # Limit #iterations for any benchmark
+ set pkgdirs {} ; # List of dirs to put in front of auto_path in the
+ # bench interpreters. Default: nothing.
+
+ while {[string match "-*" [set opt [lindex $args 0]]]} {
+ set val [lindex $args 1]
+ switch -exact -- $opt {
+ -errors {
+ if {![string is boolean -strict $val]} {
+ return -code error "Expected boolean, got \"$val\""
+ }
+ set errors $val
+ }
+ -threads {
+ if {![string is int -strict $val] || ($val < 0)} {
+ return -code error "Expected int >= 0, got \"$val\""
+ }
+ set threads [lindex $args 1]
+ }
+ -match {
+ set match [lindex $args 1]
+ }
+ -rmatch {
+ set rmatch [lindex $args 1]
+ }
+ -iters {
+ if {![string is int -strict $val] || ($val <= 0)} {
+ return -code error "Expected int > 0, got \"$val\""
+ }
+ set iters [lindex $args 1]
+ }
+ -pkgdir {
+ CheckPkgDirArg $val
+ lappend pkgdirs $val
+ }
+ default {
+ return -code error "Unknown option \"$opt\", should -errors, -threads, -match, -rmatch, or -iters"
+ }
+ }
+ set args [lrange $args 2 end]
+ }
+ if {[llength $args] != 2} {
+ return -code error "wrong\#args, should be: ?options? interp files"
+ }
+ foreach {interps files} $args break
+
+ # Run the benchmarks .....................................
+
+ array set DATA {}
+
+ if {![llength $pkgdirs]} {
+ # No user specified package directories => Simple run.
+ foreach {ip ver} $interps {
+ Invoke $ip $ver {} ;# DATA etc passed via upvar.
+ }
+ } else {
+ # User specified package directories.
+ foreach {ip ver} $interps {
+ foreach pkgdir $pkgdirs {
+ Invoke $ip $ver $pkgdir ;# DATA etc passed via upvar.
+ }
+ }
+ }
+
+ # Benchmark data ... Structure, dict (key -> value)
+ #
+ # Key || Value
+ # ============ ++ =========================================
+ # interp IP -> Version. Shell IP was used to run benchmarks. IP is
+ # the path to the shell.
+ #
+ # desc DESC -> "". DESC is description of an executed benchmark.
+ #
+ # usec DESC IP -> Result. Result of benchmark DESC when run by the
+ # shell IP. Usually time in microseconds, but can be
+ # a special code as well (ERR, BAD_RES).
+ # ============ ++ =========================================
+
+ return [array get DATA]
+}
+
+# ::bench::locate --
+#
+# Locate interpreters on the pathlist, based on a pattern.
+#
+# Arguments:
+# ...
+#
+# Results:
+# List of paths.
+
+proc ::bench::locate {pattern paths} {
+ # Cache of executables already found.
+ array set var {}
+ set res {}
+
+ foreach path $paths {
+ foreach ip [glob -nocomplain [file join $path $pattern]] {
+ if {[package vsatisfies [package provide Tcl] 8.4]} {
+ set ip [file normalize $ip]
+ }
+
+ # Follow soft-links to the actual executable.
+ while {[string equal link [file type $ip]]} {
+ set link [file readlink $ip]
+ if {[string match relative [file pathtype $link]]} {
+ set ip [file join [file dirname $ip] $link]
+ } else {
+ set ip $link
+ }
+ }
+
+ if {
+ [file executable $ip] && ![info exists var($ip)]
+ } {
+ if {[catch {exec $ip << "exit"} dummy]} {
+ log::debug "$ip: $dummy"
+ continue
+ }
+ set var($ip) .
+ lappend res $ip
+ }
+ }
+ }
+
+ return $res
+}
+
+# ::bench::versions --
+#
+# Take list of interpreters, find their versions.
+# Removes all interps for which it cannot do so.
+#
+# Arguments:
+# List of interpreters (paths)
+#
+# Results:
+# dictionary: interpreter -> version.
+
+proc ::bench::versions {interps} {
+ set res {}
+ foreach ip $interps {
+ if {[catch {
+ exec $ip << {puts [info patchlevel] ; exit}
+ } patchlevel]} {
+ log::debug "$ip: $patchlevel"
+ continue
+ }
+
+ lappend res [list $patchlevel $ip]
+ }
+
+ # -uniq 8.4-ism, replaced with use of array.
+ array set tmp {}
+ set resx {}
+ foreach item [lsort -dictionary -decreasing -index 0 $res] {
+ foreach {p ip} $item break
+ if {[info exists tmp($p)]} continue
+ set tmp($p) .
+ lappend resx $ip $p
+ }
+
+ return $resx
+}
+
+# ::bench::merge --
+#
+# Take the data of several benchmark runs and merge them into
+# one data set.
+#
+# Arguments:
+# One or more data sets to merge
+#
+# Results:
+# The merged data set.
+
+proc ::bench::merge {args} {
+ if {[llength $args] == 1} {
+ return [lindex $args 0]
+ }
+
+ array set DATA {}
+ foreach data $args {
+ array set DATA $data
+ }
+ return [array get DATA]
+}
+
+# ::bench::norm --
+#
+# Normalize the time data in the dataset, using one of the
+# columns as reference.
+#
+# Arguments:
+# Data to normalize
+# Index of reference column
+#
+# Results:
+# The normalized data set.
+
+proc ::bench::norm {data col} {
+
+ if {![string is integer -strict $col]} {
+ return -code error "Ref.column: Expected integer, but got \"$col\""
+ }
+ if {$col < 1} {
+ return -code error "Ref.column out of bounds"
+ }
+
+ array set DATA $data
+ set ipkeys [array names DATA interp*]
+
+ if {$col > [llength $ipkeys]} {
+ return -code error "Ref.column out of bounds"
+ }
+ incr col -1
+ set refip [lindex [lindex [lsort -dict $ipkeys] $col] 1]
+
+ foreach key [array names DATA] {
+ if {[string match "desc*" $key]} continue
+ if {[string match "interp*" $key]} continue
+
+ foreach {_ desc ip} $key break
+ if {[string equal $ip $refip]} continue
+
+ set v $DATA($key)
+ if {![string is double -strict $v]} continue
+
+ if {![info exists DATA([list usec $desc $refip])]} {
+ # We cannot normalize, we do not keep the time value.
+ # The row will be shown, empty.
+ set DATA($key) ""
+ continue
+ }
+ set vref $DATA([list usec $desc $refip])
+
+ if {![string is double -strict $vref]} continue
+
+ set DATA($key) [expr {$v/double($vref)}]
+ }
+
+ foreach key [array names DATA [list * $refip]] {
+ if {![string is double -strict $DATA($key)]} continue
+ set DATA($key) 1
+ }
+
+ return [array get DATA]
+}
+
+# ::bench::edit --
+#
+# Change the 'path' of an interp to a user-defined value.
+#
+# Arguments:
+# Data to edit
+# Index of column to change
+# The value replacing the current path
+#
+# Results:
+# The changed data set.
+
+proc ::bench::edit {data col new} {
+
+ if {![string is integer -strict $col]} {
+ return -code error "Ref.column: Expected integer, but got \"$col\""
+ }
+ if {$col < 1} {
+ return -code error "Ref.column out of bounds"
+ }
+
+ array set DATA $data
+ set ipkeys [array names DATA interp*]
+
+ if {$col > [llength $ipkeys]} {
+ return -code error "Ref.column out of bounds"
+ }
+ incr col -1
+ set refip [lindex [lindex [lsort -dict $ipkeys] $col] 1]
+
+ if {[string equal $new $refip]} {
+ # No change, quick return
+ return $data
+ }
+
+ set refkey [list interp $refip]
+ set DATA([list interp $new]) $DATA($refkey)
+ unset DATA($refkey)
+
+ foreach key [array names DATA [list * $refip]] {
+ if {![string equal [lindex $key 0] "usec"]} continue
+ foreach {__ desc ip} $key break
+ set DATA([list usec $desc $new]) $DATA($key)
+ unset DATA($key)
+ }
+
+ return [array get DATA]
+}
+
+# ::bench::del --
+#
+# Remove the data for an interp.
+#
+# Arguments:
+# Data to edit
+# Index of column to remove
+#
+# Results:
+# The changed data set.
+
+proc ::bench::del {data col} {
+
+ if {![string is integer -strict $col]} {
+ return -code error "Ref.column: Expected integer, but got \"$col\""
+ }
+ if {$col < 1} {
+ return -code error "Ref.column out of bounds"
+ }
+
+ array set DATA $data
+ set ipkeys [array names DATA interp*]
+
+ if {$col > [llength $ipkeys]} {
+ return -code error "Ref.column out of bounds"
+ }
+ incr col -1
+ set refip [lindex [lindex [lsort -dict $ipkeys] $col] 1]
+
+ unset DATA([list interp $refip])
+
+ # Do not use 'array unset'. Keep 8.2 clean.
+ foreach key [array names DATA [list * $refip]] {
+ if {![string equal [lindex $key 0] "usec"]} continue
+ unset DATA($key)
+ }
+
+ return [array get DATA]
+}
+
+# ### ### ### ######### ######### ######### ###########################
+## Public API - Result formatting.
+
+# ::bench::out::raw --
+#
+# Format the result of a benchmark run.
+# Style: Raw data.
+#
+# Arguments:
+# DATA dict
+#
+# Results:
+# String containing the formatted DATA.
+
+proc ::bench::out::raw {data} {
+ return $data
+}
+
+# ### ### ### ######### ######### ######### ###########################
+## Internal commands
+
+proc ::bench::CheckPkgDirArg {path {expected {}}} {
+ # Allow empty string, special.
+ if {![string length $path]} return
+
+ if {![file isdirectory $path]} {
+ return -code error \
+ "The path \"$path\" is not a directory."
+ }
+ if {![file readable $path]} {
+ return -code error \
+ "The path \"$path\" is not readable."
+ }
+}
+
+proc ::bench::Invoke {ip ver pkgdir} {
+ variable self
+ # Import remainder of the current configuration/settings.
+
+ upvar 1 DATA DATA match match rmatch rmatch \
+ iters iters errors errors threads threads \
+ files files
+
+ if {[string length $pkgdir]} {
+ log::info "Benchmark $ver ($pkgdir) $ip"
+ set idstr "$ip ($pkgdir)"
+ } else {
+ log::info "Benchmark $ver $ip"
+ set idstr $ip
+ }
+
+ set DATA([list interp $idstr]) $ver
+
+ set cmd [list $ip [file join $self libbench.tcl] \
+ -match $match \
+ -rmatch $rmatch \
+ -iters $iters \
+ -interp $ip \
+ -errors $errors \
+ -threads $threads \
+ -pkgdir $pkgdir \
+ ]
+
+ # Determine elapsed time per file, logged.
+ set start [clock seconds]
+
+ array set tmp {}
+
+ if {$threads} {
+ foreach f $files { lappend cmd $f }
+ if {[catch {
+ close [Process [open |$cmd r+]]
+ } output]} {
+ if {$errors} {
+ error $::errorInfo
+ }
+ }
+ } else {
+ foreach file $files {
+ log::info [file tail $file]
+ if {[catch {
+ close [Process [open |[linsert $cmd end $file] r+]]
+ } output]} {
+ if {$errors} {
+ error $::errorInfo
+ } else {
+ continue
+ }
+ }
+ }
+ }
+
+ foreach desc [array names tmp] {
+ set DATA([list desc $desc]) {}
+ set DATA([list usec $desc $idstr]) $tmp($desc)
+ }
+
+ unset tmp
+ set elapsed [expr {[clock seconds] - $start}]
+
+ set hour [expr {$elapsed / 3600}]
+ set min [expr {$elapsed / 60}]
+ set sec [expr {$elapsed % 60}]
+ log::info " [format %.2d:%.2d:%.2d $hour $min $sec] elapsed"
+ return
+}
+
+
+proc ::bench::Process {pipe} {
+ while {1} {
+ if {[eof $pipe]} break
+ if {[gets $pipe line] < 0} break
+ # AK: FUTURE: Log all lines?!
+ #puts |$line|
+ set line [string trim $line]
+ if {[string equal $line ""]} continue
+
+ Result
+ Feedback
+ # Unknown lines are printed. Future: Callback?!
+ log::info $line
+ }
+ return $pipe
+}
+
+proc ::bench::Result {} {
+ upvar 1 line line
+ if {[lindex $line 0] ne "RESULT"} return
+ upvar 2 tmp tmp
+ foreach {_ desc result} $line break
+ set tmp($desc) $result
+ return -code continue
+}
+
+proc ::bench::Feedback {} {
+ upvar 1 line line
+ if {[lindex $line 0] ne "LOG"} return
+ # AK: Future - Run through callback?!
+ log::info [lindex $line 1]
+ return -code continue
+}
+
+# ### ### ### ######### ######### ######### ###########################
+## Initialize internal data structures.
+
+namespace eval ::bench {
+ variable self [file join [pwd] [file dirname [info script]]]
+
+ logger::init bench
+ logger::import -force -all -namespace log bench
+}
+
+# ### ### ### ######### ######### ######### ###########################
+## Ready to run
+
+package provide bench 0.4
diff --git a/tcllib/modules/bench/bench_intro.man b/tcllib/modules/bench/bench_intro.man
new file mode 100644
index 0000000..8ab4e03
--- /dev/null
+++ b/tcllib/modules/bench/bench_intro.man
@@ -0,0 +1,91 @@
+[comment {-*- tcl -*- doctools manpage}]
+[manpage_begin bench_intro n 1.0]
+[see_also bench]
+[see_also bench_lang_faq]
+[see_also bench_lang_intro]
+[see_also bench_lang_spec]
+[keywords {bench language}]
+[keywords benchmark]
+[keywords performance]
+[keywords testing]
+[copyright {2007 Andreas Kupries <andreas_kupries@users.sourceforge.net>}]
+[moddesc {Benchmarking/Performance tools}]
+[titledesc {bench introduction}]
+[category {Benchmark tools}]
+[description]
+[para]
+
+The [term bench] (short for [emph {benchmark tools}]), is a set of
+related, yet different, entities which are working together for the
+easy creation and execution of performance test suites, also known as
+benchmarks. These are
+
+[list_begin enumerated]
+[enum]
+
+A tcl based language for the declaration of test cases. A test case is
+represented by a tcl command declaring the various parts needed to
+execute it, like setup, cleanup, the commands to test, etc.
+
+[enum]
+
+A package providing the ability to execute test cases written in that
+language.
+
+[comment {
+[enum]
+In the future we will also provide an application which wraps around the package.
+}]
+[list_end]
+
+[para]
+
+Which of the more detailed documents are relevant to the reader of
+this introduction depends on their role in the benchmarking process.
+
+[para]
+
+[list_begin enumerated]
+[enum]
+
+A [term writer] of benchmarks has to understand the bench language
+itself. A beginner to bench should read the more informally written
+[term {bench language introduction}] first. Having digested this the
+formal [term {bench language specification}] should become
+understandable. A writer experienced with bench may only need this
+last document from time to time, to refresh her memory.
+
+[comment {
+[para]
+
+While a benchmark is written the [syscmd bench] application can be
+used to validate it, and after completion it also performs the
+execution of the whole benchmark suite.
+}]
+
+[enum]
+A [term user] of benchmark suites written in the [term bench] language
+has to know which tools are available for use.
+
+[comment {
+[para]
+
+The main tool is the aforementioned [syscmd bench] application
+provided by Tcllib.
+}]
+
+At the bottom level sits the package [package bench], providing the
+basic facilities to read and execute files containing benchmarks
+written in the bench language, and to manipulate benchmark results.
+
+[list_end]
+
+[section {HISTORICAL NOTES}]
+
+This module and package have been derived from Jeff Hobbs'
+[syscmd tclbench] application for the benchmarking of the Tcl core and
+its ancestor [file runbench.tcl].
+
+[vset CATEGORY bench]
+[include ../doctools2base/include/feedback.inc]
+[manpage_end]
diff --git a/tcllib/modules/bench/bench_lang_intro.man b/tcllib/modules/bench/bench_lang_intro.man
new file mode 100644
index 0000000..c795dec
--- /dev/null
+++ b/tcllib/modules/bench/bench_lang_intro.man
@@ -0,0 +1,153 @@
+[comment {-*- tcl -*- doctools manpage}]
+[manpage_begin bench_lang_intro n 1.0]
+[see_also bench_intro]
+[see_also bench_lang_spec]
+[keywords {bench language}]
+[keywords benchmark]
+[keywords examples]
+[keywords performance]
+[keywords testing]
+[copyright {2007 Andreas Kupries <andreas_kupries@users.sourceforge.net>}]
+[moddesc {Benchmarking/Performance tools}]
+[titledesc {bench language introduction}]
+[category {Benchmark tools}]
+[description]
+[para]
+
+This document is an informal introduction to version 1 of the bench
+language based on a multitude of examples. After reading this a
+benchmark writer should be ready to understand the formal
+[term {bench language specification}].
+
+[subsection Fundamentals]
+
+In the broadest terms possible the [term {bench language}] is
+essentially Tcl, plus a number of commands to support the declaration
+of benchmarks.
+
+A document written in this language is a Tcl script and has the same
+syntax.
+
+[para]
+
+[subsection {Basics}]
+
+One of the most simplest benchmarks which can be written in bench is
+
+[example_begin]
+bench -desc LABEL -body {
+ set a b
+}
+[example_end]
+
+This code declares a benchmark named [const LABEL] which measures the
+time it takes to assign a value to a variable. The Tcl code doing this
+assignment is the [option -body] of the benchmark.
+
+[subsection {Pre- and postprocessing}]
+
+Our next example demonstrates how to declare [term initialization] and
+[term cleanup] code, i.e. code computing information for the use of
+the [option -body], and for releasing such resources after the
+measurement is done.
+
+They are the [option -pre]- and the [option -post]-body, respectively.
+
+[para]
+
+In our example, directly drawn from the benchmark suite of Tcllib's
+[package aes] package, the concrete initialization code constructs the
+key schedule used by the encryption command whose speed we measure,
+and the cleanup code releases any resources bound to that schedule.
+
+[example_begin]
+bench -desc "AES-${len} ECB encryption core" [option -pre] {
+ set key [lb]aes::Init ecb $k $i[rb]
+} -body {
+ aes::Encrypt $key $p
+} [option -post] {
+ aes::Final $key
+}
+[example_end]
+
+[subsection {Advanced pre- and postprocessing}]
+
+Our last example again deals with initialization and cleanup code. To
+see the difference to the regular initialization and cleanup discussed
+in the last section it is necessary to know a bit more about how bench
+actually measures the speed of the the [option -body].
+
+[para]
+
+Instead of running the [option -body] just once the system actually
+executes the [option -body] several hundred times and then returns the
+average of the found execution times. This is done to remove
+environmental effects like machine load from the result as much as
+possible, with outliers canceling each other out in the average.
+
+[para]
+
+The drawback of doing things this way is that when we measure
+operations which are not idempotent we will most likely not measure
+the time for the operation we want, but of the state(s) the system is
+in after the first iteration, a mixture of things we have no interest
+in.
+
+[para]
+
+Should we wish, for example, to measure the time it takes to include
+an element into a set, with the element not yet in the set, and the
+set having specific properties like being a shared Tcl_Obj, then the
+first iteration will measure the time for this. [emph However] all
+subsequent iterations will measure the time to include an element
+which is already in the set, and the Tcl_Obj holding the set will not
+be shared anymore either. In the end the timings taken for the several
+hundred iterations of this state will overwhelm the time taken from
+the first iteration, the only one which actually measured what we
+wanted.
+
+[para]
+
+The advanced initialization and cleanup codes, [option -ipre]- and the
+[option -ipost]-body respectively, are present to solve this very
+problem. While the regular initialization and cleanup codes are
+executed before and after the whole series of iterations the advanced
+codes are executed before and after each iteration of the body,
+without being measured themselves. This allows them to bring the
+system into the exact state the body wishes to measure.
+
+[para]
+
+Our example, directly drawn from the benchmark suite of Tcllib's
+[package struct::set] package, is for exactly the example we used
+above to demonstrate the necessity for the advanced initialization and
+cleanup. Its concrete initialization code constructs a variable
+refering to a set with specific properties (The set has a string
+representation, which is shared) affecting the speed of the inclusion
+command, and the cleanup code releases the temporary variables created
+by this initialization.
+
+[example_begin]
+bench -desc "set include, missing <SC> x$times $n" [option -ipre] {
+ set A $sx($times,$n)
+ set B $A
+} -body {
+ struct::set include A x
+} [option -ipost] {
+ unset A B
+}
+[example_end]
+
+[section {FURTHER READING}]
+
+Now that this document has been digested the reader, assumed to be a
+[term writer] of benchmarks, he should be fortified enough to be able
+to understand the formal [term {bench language specfication}]. It will
+also serve as the detailed specification and cheat sheet for all
+available commands and their syntax.
+
+[para]
+
+[vset CATEGORY bench]
+[include ../doctools2base/include/feedback.inc]
+[manpage_end]
diff --git a/tcllib/modules/bench/bench_lang_spec.man b/tcllib/modules/bench/bench_lang_spec.man
new file mode 100644
index 0000000..a9b0e14
--- /dev/null
+++ b/tcllib/modules/bench/bench_lang_spec.man
@@ -0,0 +1,132 @@
+[comment {-*- tcl -*- doctools manpage}]
+[manpage_begin bench_lang_spec n 1.0]
+[see_also bench_intro]
+[see_also bench_lang_intro]
+[keywords {bench language}]
+[keywords benchmark]
+[keywords performance]
+[keywords specification]
+[keywords testing]
+[copyright {2007 Andreas Kupries <andreas_kupries@users.sourceforge.net>}]
+[moddesc {Documentation tools}]
+[titledesc {bench language specification}]
+[category {Benchmark tools}]
+[description]
+[para]
+
+This document specifies both names and syntax of all the commands
+which together are the bench language, version 1.
+
+As this document is intended to be a reference the commands are listed
+in alphabetical order, and the descriptions are relatively short.
+
+A beginner should read the more informally written
+[term {bench language introduction}] first.
+
+[section Commands]
+[list_begin definitions]
+
+[call [cmd bench_rm] [arg path]...]
+
+This command silently removes the files specified as its arguments and
+then returns the empty string as its result.
+
+The command is [emph trusted], there is no checking if the specified
+files are outside of whatever restricted area the benchmarks are run
+in.
+
+[call [cmd bench_tmpfile]]
+
+This command returns the path to a bench specific unique temporary
+file. The uniqueness means that multiple calls will return different
+paths. While the path may exist from previous runs, the command itself
+does [emph not] create aynthing.
+
+[para]
+
+The base location of the temporary files is platform dependent:
+
+[list_begin definitions]
+[def {Unix, and indeterminate platform}]
+[file /tmp]
+[def Windows]
+[var \$TEMP]
+[def {Anything else}]
+The current working directory.
+[list_end]
+[para]
+
+[call [cmd bench] [arg options]...]
+
+This command declares a single benchmark. Its result is the empty
+string. All parts of the benchmark are declared via options, and their
+values. The options can occur in any order. The accepted options are:
+
+[list_begin options]
+[opt_def -body script]
+
+The argument of this option declares the body of the benchmark, the
+Tcl script whose performance we wish to measure. This option, and
+[option -desc], are the two required parts of each benchmark.
+
+[opt_def -desc msg]
+
+The argument of this option declares the name of the benchmark. It has
+to be unique, or timing data from different benchmarks will be mixed
+together.
+
+[para]
+
+[emph Beware!] This requirement is not checked when benchmarks are
+executed, and the system will silently produce bogus data. This
+option, and [option -body], are the two required parts of each
+benchmark.
+
+[opt_def -ipost script]
+
+The argument of this option declares a script which is run immediately
+[emph after] each iteration of the body. Its responsibility is to
+release resources created by the body, or [option -ipre]-bodym which
+we do not wish to live into the next iteration.
+
+[opt_def -ipre script]
+
+The argument of this option declares a script which is run immediately
+[emph before] each iteration of the body. Its responsibility is to
+create the state of the system expected by the body so that we measure
+the right thing.
+
+[opt_def -iterations num]
+
+The argument of this option declares the maximum number of times to
+run the [option -body] of the benchmark. During execution this and the
+global maximum number of iterations are compared and the smaller of
+the two values is used.
+
+[para]
+
+This option should be used only for benchmarks which are expected or
+known to take a long time per run. I.e. reduce the number of times
+they are run to keep the overall time for the execution of the whole
+benchmark within manageable limits.
+
+[opt_def -post script]
+
+The argument of this option declares a script which is run
+[emph after] all iterations of the body have been run. Its
+responsibility is to release resources created by the body,
+or [option -pre]-body.
+
+[opt_def -pre script]
+
+The argument of this option declares a script which is run
+[emph before] any of the iterations of the body are run. Its
+responsibility is to create whatever resources are needed by the body
+to run without failing.
+
+[list_end]
+[list_end]
+
+[vset CATEGORY bench]
+[include ../doctools2base/include/feedback.inc]
+[manpage_end]
diff --git a/tcllib/modules/bench/bench_read.man b/tcllib/modules/bench/bench_read.man
new file mode 100644
index 0000000..31428ae
--- /dev/null
+++ b/tcllib/modules/bench/bench_read.man
@@ -0,0 +1,65 @@
+[comment {-*- tcl -*- doctools manpage}]
+[manpage_begin bench::in n 0.1]
+[see_also bench]
+[see_also bench::out::csv]
+[see_also bench::out::text]
+[see_also bench_intro]
+[keywords benchmark]
+[keywords csv]
+[keywords formatting]
+[keywords {human readable}]
+[keywords parsing]
+[keywords performance]
+[keywords reading]
+[keywords testing]
+[keywords text]
+[copyright {2007 Andreas Kupries <andreas_kupries@users.sourceforge.net>}]
+[moddesc {Benchmarking/Performance tools}]
+[titledesc {bench::in - Reading benchmark results}]
+[category {Benchmark tools}]
+[require Tcl 8.2]
+[require csv]
+[require bench::in [opt 0.1]]
+[description]
+
+This package provides a command for reading benchmark results from
+files, sockets, etc.
+
+[para]
+
+A reader interested in the creation, processing or writing of such
+results should go and read
+[term {bench - Processing benchmark suites}] instead.
+
+[para]
+
+If the bench language itself is the actual interest please start with
+the [term {bench language introduction}] and then proceed from there
+to the formal [term {bench language specification}].
+
+[para]
+
+[section {PUBLIC API}]
+
+[list_begin definitions]
+
+[call [cmd ::bench::in::read] [arg file]]
+
+This command reads a benchmark result from the specified [arg file]
+and returns it as its result. The command understands the three
+formats created by the commands
+
+[list_begin commands]
+[cmd_def bench::out::raw] Provided by package [package bench].
+[cmd_def bench::out::csv] Provided by package [package bench::out::csv].
+[cmd_def bench::out::text] Provided by package [package bench::out::text].
+[list_end]
+[para]
+
+and automatically detects which format is used by the input file.
+
+[list_end]
+
+[vset CATEGORY bench]
+[include ../doctools2base/include/feedback.inc]
+[manpage_end]
diff --git a/tcllib/modules/bench/bench_read.tcl b/tcllib/modules/bench/bench_read.tcl
new file mode 100644
index 0000000..7cebb7b
--- /dev/null
+++ b/tcllib/modules/bench/bench_read.tcl
@@ -0,0 +1,162 @@
+# bench_read.tcl --
+#
+# Management of benchmarks, reading results in various formats.
+#
+# Copyright (c) 2005 by Andreas Kupries <andreas_kupries@users.sourceforge.net>
+# library derived from runbench.tcl application (C) Jeff Hobbs.
+#
+# See the file "license.terms" for information on usage and redistribution
+# of this file, and for a DISCLAIMER OF ALL WARRANTIES.
+#
+# RCS: @(#) $Id: bench_read.tcl,v 1.3 2006/06/13 23:20:30 andreas_kupries Exp $
+
+# ### ### ### ######### ######### ######### ###########################
+## Requisites - Packages and namespace for the commands and data.
+
+package require Tcl 8.2
+package require csv
+
+namespace eval ::bench::in {}
+
+# ### ### ### ######### ######### ######### ###########################
+## Public API - Result reading
+
+# ::bench::in::read --
+#
+# Read a bench result in any of the raw/csv/text formats
+#
+# Arguments:
+# path to file to read
+#
+# Results:
+# DATA dictionary, internal representation of the bench results.
+
+proc ::bench::in::read {file} {
+
+ set f [open $file r]
+ set head [gets $f]
+
+ if {![string match "# -\\*- tcl -\\*- bench/*" $head]} {
+ return -code error "Bad file format, not a benchmark file"
+ } else {
+ regexp {bench/(.*)$} $head -> format
+
+ switch -exact -- $format {
+ raw - csv - text {
+ set res [RD$format $f]
+ }
+ default {
+ return -code error "Bad format \"$val\", expected text, csv, or raw"
+ }
+ }
+ }
+ close $f
+ return $res
+}
+
+# ### ### ### ######### ######### ######### ###########################
+## Internal commands
+
+proc ::bench::in::RDraw {chan} {
+ return [string trimright [::read $chan]]
+}
+
+proc ::bench::in::RDcsv {chan} {
+ # Lines Format
+ # First line is number of interpreters #n. int
+ # Next to 1+n is interpreter data. id,ver,path
+ # Beyond is benchmark results. id,desc,res1,...,res#n
+
+ array set DATA {}
+
+ # #Interp ...
+
+ set nip [lindex [csv::split [gets $chan]] 0]
+
+ # Interp data ...
+
+ set iplist {}
+ for {set i 0} {$i < $nip} {incr i} {
+ foreach {__ ver ip} [csv::split [gets $chan]] break
+
+ set DATA([list interp $ip]) $ver
+ lappend iplist $ip
+ }
+
+ # Benchmark data ...
+
+ while {[gets $chan line] >= 0} {
+ set line [string trim $line]
+ if {$line == {}} break
+ set line [csv::split $line]
+ set desc [lindex $line 1]
+
+ set DATA([list desc $desc]) {}
+ foreach val [lrange $line 2 end] ip $iplist {
+ if {$val == {}} continue
+ set DATA([list usec $desc $ip]) $val
+ }
+ }
+
+ return [array get DATA]
+}
+
+proc ::bench::in::RDtext {chan} {
+ array set DATA {}
+
+ # Interp data ...
+
+ # Empty line - ignore
+ # "id: ver path" - interp data.
+ # Empty line - separator before benchmark data.
+
+ set n 0
+ set iplist {}
+ while {[gets $chan line] >= 0} {
+ set line [string trim $line]
+ if {$line == {}} {
+ incr n
+ if {$n == 2} break
+ continue
+ }
+
+ regexp {[^:]+: ([^ ]+) (.*)$} $line -> ver ip
+ set DATA([list interp $ip]) $ver
+ lappend iplist $ip
+ }
+
+ # Benchmark data ...
+
+ # '---' -> Ignore.
+ # '|' column separators. Remove spaces around it. Then treat line
+ # as CSV data with a particular separator.
+ # Ignore the INTERP line.
+
+ while {[gets $chan line] >= 0} {
+ set line [string trim $line]
+ if {$line == {}} continue
+ if {[string match "+---*" $line]} continue
+ if {[string match "*INTERP*" $line]} continue
+
+ regsub -all "\\| +" $line {|} line
+ regsub -all " +\\|" $line {|} line
+ set line [csv::split [string trim $line |] |]
+ set desc [lindex $line 1]
+
+ set DATA([list desc $desc]) {}
+ foreach val [lrange $line 2 end] ip $iplist {
+ if {$val == {}} continue
+ set DATA([list usec $desc $ip]) $val
+ }
+ }
+
+ return [array get DATA]
+}
+
+# ### ### ### ######### ######### ######### ###########################
+## Initialize internal data structures.
+
+# ### ### ### ######### ######### ######### ###########################
+## Ready to run
+
+package provide bench::in 0.1
diff --git a/tcllib/modules/bench/bench_wcsv.man b/tcllib/modules/bench/bench_wcsv.man
new file mode 100644
index 0000000..52554a9
--- /dev/null
+++ b/tcllib/modules/bench/bench_wcsv.man
@@ -0,0 +1,54 @@
+[comment {-*- tcl -*- doctools manpage}]
+[manpage_begin bench::out::csv n 0.1.2]
+[see_also bench]
+[see_also bench::out::text]
+[keywords benchmark]
+[keywords csv]
+[keywords formatting]
+[keywords performance]
+[keywords testing]
+[copyright {2007 Andreas Kupries <andreas_kupries@users.sourceforge.net>}]
+[moddesc {Benchmarking/Performance tools}]
+[titledesc {bench::out::csv - Formatting benchmark results as CSV}]
+[category {Benchmark tools}]
+[require Tcl 8.2]
+[require bench::out::csv [opt 0.1.2]]
+[description]
+
+This package provides commands for fomatting of benchmark results into
+a CSV table importable by spread sheets.
+
+[para]
+
+A reader interested in the generation or processing of such results should
+go and read [term {bench - Processing benchmark suites}] instead.
+
+[para]
+
+If the bench language itself is the actual interest please start with
+the [term {bench language introduction}] and then proceed from there
+to the formal [term {bench language specification}].
+
+[para]
+
+[section {PUBLIC API}]
+
+[list_begin definitions]
+
+[call [cmd ::bench::out::csv] [arg bench_result]]
+
+This command formats the specified benchmark result for output to a
+file, socket, etc. This specific command generates CSV data importable
+by spread sheets.
+
+[para]
+
+For other formatting styles see the packages [package bench] and
+[package bench::out::text] which provide commands to format benchmark
+results in raw form, or for human consumption, respectively.
+
+[list_end]
+
+[vset CATEGORY bench]
+[include ../doctools2base/include/feedback.inc]
+[manpage_end]
diff --git a/tcllib/modules/bench/bench_wcsv.tcl b/tcllib/modules/bench/bench_wcsv.tcl
new file mode 100644
index 0000000..cb3d4c5
--- /dev/null
+++ b/tcllib/modules/bench/bench_wcsv.tcl
@@ -0,0 +1,101 @@
+# bench_wtext.tcl --
+#
+# Management of benchmarks, formatted text.
+#
+# Copyright (c) 2005 by Andreas Kupries <andreas_kupries@users.sourceforge.net>
+# library derived from runbench.tcl application (C) Jeff Hobbs.
+#
+# See the file "license.terms" for information on usage and redistribution
+# of this file, and for a DISCLAIMER OF ALL WARRANTIES.
+#
+# RCS: @(#) $Id: bench_wcsv.tcl,v 1.4 2007/01/21 23:29:06 andreas_kupries Exp $
+
+# ### ### ### ######### ######### ######### ###########################
+## Requisites - Packages and namespace for the commands and data.
+
+package require Tcl 8.2
+package require csv
+
+namespace eval ::bench::out {}
+
+# ### ### ### ######### ######### ######### ###########################
+## Public API - Benchmark execution
+
+# ### ### ### ######### ######### ######### ###########################
+## Public API - Result formatting.
+
+# ::bench::out::csv --
+#
+# Format the result of a benchmark run.
+# Style: CSV
+#
+# Arguments:
+# DATA dict
+#
+# Results:
+# String containing the formatted DATA.
+
+proc ::bench::out::csv {data} {
+ array set DATA $data
+ set CSV {}
+
+ # 1st record: #shells
+ # 2nd record to #shells+1: Interpreter data (id, version, path)
+ # #shells+2 to end: Benchmark data (id,desc,result1,...,result#shells)
+
+ # --- --- ----
+ # #interpreters used
+
+ set ipkeys [array names DATA interp*]
+ lappend CSV [csv::join [list [llength $ipkeys]]]
+
+ # --- --- ----
+ # Table 1: Interpreter information.
+
+ set n 1
+ set iplist {}
+ foreach key [lsort -dict $ipkeys] {
+ set ip [lindex $key 1]
+ lappend CSV [csv::join [list $n $DATA($key) $ip]]
+ set DATA($key) $n
+ incr n
+ lappend iplist $ip
+ }
+
+ # --- --- ----
+ # Table 2: Benchmark information
+
+ set dlist {}
+ foreach key [lsort -dict -index 1 [array names DATA desc*]] {
+ lappend dlist [lindex $key 1]
+ }
+
+ set n 1
+ foreach desc $dlist {
+ set record {}
+ lappend record $n
+ lappend record $desc
+ foreach ip $iplist {
+ if {[catch {
+ lappend record $DATA([list usec $desc $ip])
+ }]} {
+ lappend record {}
+ }
+ }
+ lappend CSV [csv::join $record]
+ incr n
+ }
+
+ return [join $CSV \n]
+}
+
+# ### ### ### ######### ######### ######### ###########################
+## Internal commands
+
+# ### ### ### ######### ######### ######### ###########################
+## Initialize internal data structures.
+
+# ### ### ### ######### ######### ######### ###########################
+## Ready to run
+
+package provide bench::out::csv 0.1.2
diff --git a/tcllib/modules/bench/bench_wtext.man b/tcllib/modules/bench/bench_wtext.man
new file mode 100644
index 0000000..b374b51
--- /dev/null
+++ b/tcllib/modules/bench/bench_wtext.man
@@ -0,0 +1,55 @@
+[comment {-*- tcl -*- doctools manpage}]
+[manpage_begin bench::out::text n 0.1.2]
+[see_also bench]
+[see_also bench::out::csv]
+[keywords benchmark]
+[keywords formatting]
+[keywords {human readable}]
+[keywords performance]
+[keywords testing]
+[keywords text]
+[copyright {2007 Andreas Kupries <andreas_kupries@users.sourceforge.net>}]
+[moddesc {Benchmarking/Performance tools}]
+[titledesc {bench::out::text - Formatting benchmark results as human readable text}]
+[category {Benchmark tools}]
+[require Tcl 8.2]
+[require bench::out::text [opt 0.1.2]]
+[description]
+
+This package provides commands for fomatting of benchmark results into
+human readable text.
+
+[para]
+
+A reader interested in the generation or processing of such results should
+go and read [term {bench - Processing benchmark suites}] instead.
+
+[para]
+
+If the bench language itself is the actual interest please start with
+the [term {bench language introduction}] and then proceed from there
+to the formal [term {bench language specification}].
+
+[para]
+
+[section {PUBLIC API}]
+
+[list_begin definitions]
+
+[call [cmd ::bench::out::text] [arg bench_result]]
+
+This command formats the specified benchmark result for output to a
+file, socket, etc. This specific command generates human readable
+text.
+
+[para]
+
+For other formatting styles see the packages [package bench] and
+[package bench::out::csv] which provide commands to format benchmark
+results in raw form, or as importable CSV data, respectively.
+
+[list_end]
+
+[vset CATEGORY bench]
+[include ../doctools2base/include/feedback.inc]
+[manpage_end]
diff --git a/tcllib/modules/bench/bench_wtext.tcl b/tcllib/modules/bench/bench_wtext.tcl
new file mode 100644
index 0000000..aaa4100
--- /dev/null
+++ b/tcllib/modules/bench/bench_wtext.tcl
@@ -0,0 +1,165 @@
+# bench_wtext.tcl --
+#
+# Management of benchmarks, formatted text.
+#
+# Copyright (c) 2005 by Andreas Kupries <andreas_kupries@users.sourceforge.net>
+# library derived from runbench.tcl application (C) Jeff Hobbs.
+#
+# See the file "license.terms" for information on usage and redistribution
+# of this file, and for a DISCLAIMER OF ALL WARRANTIES.
+#
+# RCS: @(#) $Id: bench_wtext.tcl,v 1.4 2007/01/21 23:29:06 andreas_kupries Exp $
+
+# ### ### ### ######### ######### ######### ###########################
+## Requisites - Packages and namespace for the commands and data.
+
+package require Tcl 8.2
+package require struct::matrix
+package require report
+
+namespace eval ::bench::out {}
+
+# ### ### ### ######### ######### ######### ###########################
+## Public API - Result formatting.
+
+# ::bench::out::text --
+#
+# Format the result of a benchmark run.
+# Style: TEXT
+#
+# General structure like CSV, but nicely formatted and aligned
+# columns.
+#
+# Arguments:
+# DATA dict
+#
+# Results:
+# String containing the formatted DATA.
+
+proc ::bench::out::text {data} {
+ array set DATA $data
+ set LINES {}
+
+ # 1st line to #shells: Interpreter data (id, version, path)
+ # #shells+1 to end: Benchmark data (id,desc,result1,...,result#shells)
+
+ lappend LINES {}
+
+ # --- --- ----
+ # Table 1: Interpreter information.
+
+ set ipkeys [array names DATA interp*]
+ set n 1
+ set iplist {}
+ set vlen 0
+ foreach key [lsort -dict $ipkeys] {
+ lappend iplist [lindex $key 1]
+ incr n
+ set l [string length $DATA($key)]
+ if {$l > $vlen} {set vlen $l}
+ }
+ set idlen [string length $n]
+
+ set dlist {}
+ set n 1
+ foreach key [lsort -dict -index 1 [array names DATA desc*]] {
+ lappend dlist [lindex $key 1]
+ incr n
+ }
+ set didlen [string length $n]
+
+ set n 1
+ set record [list "" INTERP]
+ foreach ip $iplist {
+ set v $DATA([list interp $ip])
+ lappend LINES " [PADL $idlen $n]: [PADR $vlen $v] $ip"
+ lappend record $n
+ incr n
+ }
+
+ lappend LINES {}
+
+ # --- --- ----
+ # Table 2: Benchmark information
+
+ set m [struct::matrix m]
+ $m add columns [expr {2 + [llength $iplist]}]
+ $m add row $record
+
+ set n 1
+ foreach desc $dlist {
+ set record [list $n]
+ lappend record $desc
+
+ foreach ip $iplist {
+ if {[catch {
+ set val $DATA([list usec $desc $ip])
+ }]} {
+ set val {}
+ }
+ if {[string is double -strict $val]} {
+ lappend record [format %.2f $val]
+ } else {
+ lappend record [format %s $val]
+ }
+ }
+ $m add row $record
+ incr n
+ }
+
+ ::report::defstyle simpletable {} {
+ data set [split "[string repeat "| " [columns]]|"]
+ top set [split "[string repeat "+ - " [columns]]+"]
+ bottom set [top get]
+ top enable
+ bottom enable
+
+ set c [columns]
+ justify 0 right
+ pad 0 both
+
+ if {$c > 1} {
+ justify 1 left
+ pad 1 both
+ }
+ for {set i 2} {$i < $c} {incr i} {
+ justify $i right
+ pad $i both
+ }
+ }
+ ::report::defstyle captionedtable {{n 1}} {
+ simpletable
+ topdata set [data get]
+ topcapsep set [top get]
+ topcapsep enable
+ tcaption $n
+ }
+
+ set r [report::report r [$m columns] style captionedtable]
+ lappend LINES [$m format 2string $r]
+ $m destroy
+ $r destroy
+
+ return [join $LINES \n]
+}
+
+# ### ### ### ######### ######### ######### ###########################
+## Internal commands
+
+proc ::bench::out::PADL {max str} {
+ format "%${max}s" $str
+ #return "[PAD $max $str]$str"
+}
+
+proc ::bench::out::PADR {max str} {
+ format "%-${max}s" $str
+ #return "$str[PAD $max $str]"
+}
+
+# ### ### ### ######### ######### ######### ###########################
+## Initialize internal data structures.
+
+# ### ### ### ######### ######### ######### ###########################
+## Ready to run
+
+package provide bench::out::text 0.1.2
diff --git a/tcllib/modules/bench/libbench.tcl b/tcllib/modules/bench/libbench.tcl
new file mode 100644
index 0000000..ebf9f71
--- /dev/null
+++ b/tcllib/modules/bench/libbench.tcl
@@ -0,0 +1,561 @@
+# -*- tcl -*-
+# libbench.tcl ?(<option> <value>)...? <benchFile>...
+#
+# This file has to have code that works in any version of Tcl that
+# the user would want to benchmark.
+#
+# RCS: @(#) $Id: libbench.tcl,v 1.4 2008/07/02 23:34:06 andreas_kupries Exp $
+#
+# Copyright (c) 2000-2001 Jeffrey Hobbs.
+# Copyright (c) 2007 Andreas Kupries
+#
+
+# This code provides the supporting commands for the execution of a
+# benchmark files. It is actually an application and is exec'd by the
+# management code.
+
+# Options:
+# -help Print usage message.
+# -rmatch <regexp-pattern> Run only tests whose description matches the pattern.
+# -match <glob-pattern> Run only tests whose description matches the pattern.
+# -interp <name> Name of the interp running the benchmarks.
+# -thread <num> Invoke threaded benchmarks, number of threads to use.
+# -errors <boolean> Throw errors, or not.
+
+# Note: If both -match and -rmatch are specified then _both_
+# apply. I.e. a benchmark will be run if and only if it matches both
+# patterns.
+
+# Application activity and results are communicated to the highlevel
+# management via text written to stdout. Each line written is a list
+# and has one of the following forms:
+#
+# __THREADED <version> - Indicates threaded mode, and version
+# of package Thread in use.
+#
+# Sourcing {<desc>: <res>} - Benchmark <desc> has started.
+# <res> is the result from executing
+# it once (compilation of body.)
+#
+# Sourcing <file> - Benchmark file <file> starts execution.
+#
+# <desc> <res> - Result of a benchmark.
+#
+# The above implies that no benchmark may use the strings 'Sourcing'
+# or '__THREADED' as their description.
+
+# We will put our data into these named globals.
+
+global BENCH bench
+
+# 'BENCH' contents:
+#
+# - ERRORS : Boolean flag. If set benchmark output mismatches are
+# reported by throwing an error. Otherwise they are simply
+# listed as BAD_RES. Default true. Can be set/reset via
+# option -errors.
+#
+# - MATCH : Match pattern, see -match, default empty, aka everything
+# matches.
+#
+# - RMATCH : Match pattern, see -rmatch, default empty, aka
+# everything matches.
+#
+# - OUTFILE : Name of output file, default is special value "stdout".
+# - OUTFID : Channel for output.
+#
+# The outfile cannot be set by the caller, thus output is always
+# written to stdout.
+#
+# - FILES : List of benchmark files to run.
+#
+# - ITERS : Number of iterations to run a benchmark body, default
+# 1000. Can be overridden by the individual benchmarks.
+#
+# - THREADS : Number of threads to use. 0 signals no threading.
+# Limited to number of files if there are less files than
+# requested threads.
+#
+# - EXIT : Boolean flag. True when appplication is run by wish, for
+# special exit processing. ... Actually always true.
+#
+# - INTERP : Name of the interpreter running the benchmarks. Is the
+# executable running this code. Can be overridden via the
+# command line option -interp.
+#
+# - uniqid : Counter for 'bench_tmpfile' to generate unique names of
+# tmp files.
+#
+# - us : Thread id of main thread.
+#
+# - inuse : Number of threads active, present and relevant only in
+# threaded mode.
+#
+# - file : Currently executed benchmark file. Relevant only in
+# non-threaded mode.
+
+#
+# 'bench' contents.
+
+# Benchmark results, mapping from the benchmark descriptions to their
+# results. Usually time in microseconds, but the following special
+# values can occur:
+#
+# - BAD_RES - Result from benchmark body does not match expectations.
+# - ERR - Benchmark body aborted with an error.
+# - Any string - Forced by error code 666 to pass to management.
+
+#
+# We claim all procedures starting with bench*
+#
+
+# bench_tmpfile --
+#
+# Return a temp file name that can be modified at will
+#
+# Arguments:
+# None
+#
+# Results:
+# Returns file name
+#
+proc bench_tmpfile {} {
+ global tcl_platform env BENCH
+ if {![info exists BENCH(uniqid)]} { set BENCH(uniqid) 0 }
+ set base "tclbench[incr BENCH(uniqid)].dat"
+ if {[info exists tcl_platform(platform)]} {
+ if {$tcl_platform(platform) == "unix"} {
+ return "/tmp/$base"
+ } elseif {$tcl_platform(platform) == "windows"} {
+ return [file join $env(TEMP) $base]
+ } else {
+ return $base
+ }
+ } else {
+ # The Good Ol' Days (?) when only Unix support existed
+ return "/tmp/$base"
+ }
+}
+
+# bench_rm --
+#
+# Remove a file silently (no complaining)
+#
+# Arguments:
+# args Files to delete
+#
+# Results:
+# Returns nothing
+#
+proc bench_rm {args} {
+ foreach file $args {
+ if {[info tclversion] > 7.4} {
+ catch {file delete $file}
+ } else {
+ catch {exec /bin/rm $file}
+ }
+ }
+}
+
+proc bench_puts {args} {
+ eval [linsert $args 0 FEEDBACK]
+ return
+}
+
+# bench --
+#
+# Main bench procedure.
+# The bench test is expected to exit cleanly. If an error occurs,
+# it will be thrown all the way up. A bench proc may return the
+# special code 666, which says take the string as the bench value.
+# This is usually used for N/A feature situations.
+#
+# Arguments:
+#
+# -pre script to run before main timed body
+# -body script to run as main timed body
+# -post script to run after main timed body
+# -ipre script to run before timed body, per iteration of the body.
+# -ipost script to run after timed body, per iteration of the body.
+# -desc message text
+# -iterations <#>
+#
+# Note:
+#
+# Using -ipre and/or -ipost will cause us to compute the average
+# time ourselves, i.e. 'time body 1' n times. Required to ensure
+# that prefix/post operation are executed, yet not timed themselves.
+#
+# Results:
+#
+# Returns nothing
+#
+# Side effects:
+#
+# Sets up data in bench global array
+#
+proc bench {args} {
+ global BENCH bench errorInfo errorCode
+
+ # -pre script
+ # -body script
+ # -desc msg
+ # -post script
+ # -ipre script
+ # -ipost script
+ # -iterations <#>
+ array set opts {
+ -pre {}
+ -body {}
+ -desc {}
+ -post {}
+ -ipre {}
+ -ipost {}
+ }
+ set opts(-iter) $BENCH(ITERS)
+ while {[llength $args]} {
+ set key [lindex $args 0]
+ switch -glob -- $key {
+ -res* { set opts(-res) [lindex $args 1] }
+ -pr* { set opts(-pre) [lindex $args 1] }
+ -po* { set opts(-post) [lindex $args 1] }
+ -ipr* { set opts(-ipre) [lindex $args 1] }
+ -ipo* { set opts(-ipost) [lindex $args 1] }
+ -bo* { set opts(-body) [lindex $args 1] }
+ -de* { set opts(-desc) [lindex $args 1] }
+ -it* {
+ # Only change the iterations when it is smaller than
+ # the requested default
+ set val [lindex $args 1]
+ if {$opts(-iter) > $val} { set opts(-iter) $val }
+ }
+ default {
+ error "unknown option $key"
+ }
+ }
+ set args [lreplace $args 0 1]
+ }
+
+ FEEDBACK "Running <$opts(-desc)>"
+
+ if {($BENCH(MATCH) != "") && ![string match $BENCH(MATCH) $opts(-desc)]} {
+ return
+ }
+ if {($BENCH(RMATCH) != "") && ![regexp $BENCH(RMATCH) $opts(-desc)]} {
+ return
+ }
+ if {$opts(-pre) != ""} {
+ uplevel \#0 $opts(-pre)
+ }
+ if {$opts(-body) != ""} {
+ # always run it once to remove compile phase confusion
+ if {$opts(-ipre) != ""} {
+ uplevel \#0 $opts(-ipre)
+ }
+ set code [catch {uplevel \#0 $opts(-body)} res]
+ if {$opts(-ipost) != ""} {
+ uplevel \#0 $opts(-ipost)
+ }
+ if {!$code && [info exists opts(-res)] \
+ && [string compare $opts(-res) $res]} {
+ if {$BENCH(ERRORS)} {
+ return -code error "Result was:\n$res\nResult\
+ should have been:\n$opts(-res)"
+ } else {
+ set res "BAD_RES"
+ }
+ #set bench($opts(-desc)) $res
+ RESULT $opts(-desc) $res
+ } else {
+ if {($opts(-ipre) != "") || ($opts(-ipost) != "")} {
+ # We do the averaging on our own, to allow untimed
+ # pre/post execution per iteration. We catch and
+ # handle problems in the pre/post code as if
+ # everything was executed as one block (like it would
+ # be in the other path). We are using floating point
+ # to avoid integer overflow, easily happening when
+ # accumulating a high number (iterations) of large
+ # integers (microseconds).
+
+ set total 0.0
+ for {set i 0} {$i < $opts(-iter)} {incr i} {
+ set code 0
+ if {$opts(-ipre) != ""} {
+ set code [catch {uplevel \#0 $opts(-ipre)} res]
+ if {$code} break
+ }
+ set code [catch {uplevel \#0 [list time $opts(-body) 1]} res]
+ if {$code} break
+ set total [expr {$total + [lindex $res 0]}]
+ if {$opts(-ipost) != ""} {
+ set code [catch {uplevel \#0 $opts(-ipost)} res]
+ if {$code} break
+ }
+ }
+ if {!$code} {
+ set res [list [expr {int ($total/$opts(-iter))}] microseconds per iteration]
+ }
+ } else {
+ set code [catch {uplevel \#0 \
+ [list time $opts(-body) $opts(-iter)]} res]
+ }
+ if {!$BENCH(THREADS)} {
+ if {$code == 0} {
+ # Get just the microseconds value from the time result
+ set res [lindex $res 0]
+ } elseif {$code != 666} {
+ # A 666 result code means pass it through to the bench
+ # suite. Otherwise throw errors all the way out, unless
+ # we specified not to throw errors (option -errors 0 to
+ # libbench).
+ if {$BENCH(ERRORS)} {
+ return -code $code -errorinfo $errorInfo \
+ -errorcode $errorCode
+ } else {
+ set res "ERR"
+ }
+ }
+ #set bench($opts(-desc)) $res
+ RESULT $opts(-desc) $res
+ } else {
+ # Threaded runs report back asynchronously
+ thread::send $BENCH(us) \
+ [list thread_report $opts(-desc) $code $res]
+ }
+ }
+ }
+ if {($opts(-post) != "") && [catch {uplevel \#0 $opts(-post)} err] \
+ && $BENCH(ERRORS)} {
+ return -code error "post code threw error:\n$err"
+ }
+ return
+}
+
+proc RESULT {desc time} {
+ global BENCH
+ puts $BENCH(OUTFID) [list RESULT $desc $time]
+ return
+}
+
+proc FEEDBACK {text} {
+ global BENCH
+ puts $BENCH(OUTFID) [list LOG $text]
+ return
+}
+
+
+proc usage {} {
+ set me [file tail [info script]]
+ puts stderr "Usage: $me ?options?\
+ \n\t-help # print out this message\
+ \n\t-rmatch <regexp> # only run tests matching this pattern\
+ \n\t-match <glob> # only run tests matching this pattern\
+ \n\t-interp <name> # name of interp (tries to get it right)\
+ \n\t-thread <num> # number of threads to use\
+ \n\tfileList # files to benchmark"
+ exit 1
+}
+
+#
+# Process args
+#
+if {[catch {set BENCH(INTERP) [info nameofexec]}]} {
+ set BENCH(INTERP) $argv0
+}
+foreach {var val} {
+ ERRORS 1
+ MATCH {}
+ RMATCH {}
+ OUTFILE stdout
+ FILES {}
+ ITERS 1000
+ THREADS 0
+ PKGDIR {}
+ EXIT "[info exists tk_version]"
+} {
+ if {![info exists BENCH($var)]} {
+ set BENCH($var) [subst $val]
+ }
+}
+set BENCH(EXIT) 1
+
+if {[llength $argv]} {
+ while {[llength $argv]} {
+ set key [lindex $argv 0]
+ switch -glob -- $key {
+ -help* { usage }
+ -err* { set BENCH(ERRORS) [lindex $argv 1] }
+ -int* { set BENCH(INTERP) [lindex $argv 1] }
+ -rmat* { set BENCH(RMATCH) [lindex $argv 1] }
+ -mat* { set BENCH(MATCH) [lindex $argv 1] }
+ -iter* { set BENCH(ITERS) [lindex $argv 1] }
+ -thr* { set BENCH(THREADS) [lindex $argv 1] }
+ -pkg* { set BENCH(PKGDIR) [lindex $argv 1] }
+ default {
+ foreach arg $argv {
+ if {![file exists $arg]} { usage }
+ lappend BENCH(FILES) $arg
+ }
+ break
+ }
+ }
+ set argv [lreplace $argv 0 1]
+ }
+}
+
+if {[string length $BENCH(PKGDIR)]} {
+ set auto_path [linsert $auto_path 0 $BENCH(PKGDIR)]
+}
+
+if {$BENCH(THREADS)} {
+ # We have to be able to load threads if we want to use threads, and
+ # we don't want to create more threads than we have files.
+ if {[catch {package require Thread}]} {
+ set BENCH(THREADS) 0
+ } elseif {[llength $BENCH(FILES)] < $BENCH(THREADS)} {
+ set BENCH(THREADS) [llength $BENCH(FILES)]
+ }
+}
+
+rename exit exit.true
+proc exit args {
+ error "called \"exit $args\" in benchmark test"
+}
+
+if {[string compare $BENCH(OUTFILE) stdout]} {
+ set BENCH(OUTFID) [open $BENCH(OUTFILE) w]
+} else {
+ set BENCH(OUTFID) stdout
+}
+
+#
+# Everything that gets output must be in pairwise format, because
+# the data will be collected in via an 'array set'.
+#
+
+if {$BENCH(THREADS)} {
+ # Each file must run in it's own thread because of all the extra
+ # header stuff they have.
+ #set DEBUG 1
+ proc thread_one {{id 0}} {
+ global BENCH
+ set file [lindex $BENCH(FILES) 0]
+ set BENCH(FILES) [lrange $BENCH(FILES) 1 end]
+ if {[file exists $file]} {
+ incr BENCH(inuse)
+ FEEDBACK [list Sourcing $file]
+ if {$id} {
+ set them $id
+ } else {
+ set them [thread::create]
+ thread::send -async $them { load {} Thread }
+ thread::send -async $them \
+ [list array set BENCH [array get BENCH]]
+ thread::send -async $them \
+ [list proc bench_tmpfile {} [info body bench_tmpfile]]
+ thread::send -async $them \
+ [list proc bench_rm {args} [info body bench_rm]]
+ thread::send -async $them \
+ [list proc bench {args} [info body bench]]
+ }
+ if {[info exists ::DEBUG]} {
+ FEEDBACK "SEND [clock seconds] thread $them $file INUSE\
+ $BENCH(inuse) of $BENCH(THREADS)"
+ }
+ thread::send -async $them [list source $file]
+ thread::send -async $them \
+ [list thread::send $BENCH(us) [list thread_ready $them]]
+ #thread::send -async $them { thread::unwind }
+ }
+ }
+
+ proc thread_em {} {
+ global BENCH
+ while {[llength $BENCH(FILES)]} {
+ if {[info exists ::DEBUG]} {
+ FEEDBACK "THREAD ONE [lindex $BENCH(FILES) 0]"
+ }
+ thread_one
+ if {$BENCH(inuse) >= $BENCH(THREADS)} {
+ break
+ }
+ }
+ }
+
+ proc thread_ready {id} {
+ global BENCH
+
+ incr BENCH(inuse) -1
+ if {[llength $BENCH(FILES)]} {
+ if {[info exists ::DEBUG]} {
+ FEEDBACK "SEND ONE [clock seconds] thread $id"
+ }
+ thread_one $id
+ } else {
+ if {[info exists ::DEBUG]} {
+ FEEDBACK "UNWIND thread $id"
+ }
+ thread::send -async $id { thread::unwind }
+ }
+ }
+
+ proc thread_report {desc code res} {
+ global BENCH bench errorInfo errorCode
+
+ if {$code == 0} {
+ # Get just the microseconds value from the time result
+ set res [lindex $res 0]
+ } elseif {$code != 666} {
+ # A 666 result code means pass it through to the bench suite.
+ # Otherwise throw errors all the way out, unless we specified
+ # not to throw errors (option -errors 0 to libbench).
+ if {$BENCH(ERRORS)} {
+ return -code $code -errorinfo $errorInfo \
+ -errorcode $errorCode
+ } else {
+ set res "ERR"
+ }
+ }
+ #set bench($desc) $res
+ RESULT $desc $res
+ }
+
+ proc thread_finish {{delay 4000}} {
+ global BENCH bench
+ set val [expr {[llength [thread::names]] > 1}]
+ #set val [expr {$BENCH(inuse)}]
+ if {$val} {
+ after $delay [info level 0]
+ } else {
+ if {0} {foreach desc [array names bench] {
+ RESULT $desc $bench($desc)
+ }}
+ if {$BENCH(EXIT)} {
+ exit.true ; # needed for Tk tests
+ }
+ }
+ }
+
+ set BENCH(us) [thread::id]
+ set BENCH(inuse) 0 ; # num threads in use
+ FEEDBACK [list __THREADED [package provide Thread]]
+
+ thread_em
+ thread_finish
+ vwait forever
+} else {
+ foreach BENCH(file) $BENCH(FILES) {
+ if {[file exists $BENCH(file)]} {
+ FEEDBACK [list Sourcing $BENCH(file)]
+ source $BENCH(file)
+ }
+ }
+
+ if {0} {foreach desc [array names bench] {
+ RESULT $desc $bench($desc)
+ }}
+
+ if {$BENCH(EXIT)} {
+ exit.true ; # needed for Tk tests
+ }
+}
diff --git a/tcllib/modules/bench/pkgIndex.tcl b/tcllib/modules/bench/pkgIndex.tcl
new file mode 100644
index 0000000..e9b25f9
--- /dev/null
+++ b/tcllib/modules/bench/pkgIndex.tcl
@@ -0,0 +1,7 @@
+if {![package vsatisfies [package provide Tcl] 8.2]} {
+ return
+}
+package ifneeded bench 0.4 [list source [file join $dir bench.tcl]]
+package ifneeded bench::out::text 0.1.2 [list source [file join $dir bench_wtext.tcl]]
+package ifneeded bench::out::csv 0.1.2 [list source [file join $dir bench_wcsv.tcl]]
+package ifneeded bench::in 0.1 [list source [file join $dir bench_read.tcl]]