summaryrefslogtreecommitdiffstats
path: root/Lib/test/regrtest.py
blob: ea4f4cedd0130d5a126e52dce8218275c7d40483 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
#! /usr/bin/env python

"""Regression test.

This will find all modules whose name is "test_*" in the test
directory, and run them.  Various command line options provide
additional facilities.

Command line options:

-v: verbose -- print the name name of each test as it is being run
-q: quiet -- don't print anything except if a test fails
-g: generate -- write the output file for a test instead of comparing it
-x: exclude -- arguments are tests to *exclude*

If non-option arguments are present, they are names for tests to run,
unless -x is given, in which case they are names for tests not to run.
If no test names are given, all tests are run.

If -v is given *twice*, the tests themselves are run in verbose mode.
This is incompatible with -g and does not compare test output files.
"""

import sys
import string
import os
import getopt
import traceback

import test_support

def main():
    try:
	opts, args = getopt.getopt(sys.argv[1:], 'vgqx')
    except getopt.error, msg:
	print msg
	print __doc__
	sys.exit(2)
    verbose = 0
    quiet = 0
    generate = 0
    exclude = 0
    for o, a in opts:
	if o == '-v': verbose = verbose+1
	if o == '-q': quiet = 1
	if o == '-g': generate = 1
	if o == '-x': exclude = 1
    if generate and verbose>1:
	print "-g and more than one -v don't go together!"
	sys.exit(2)
    good = []
    bad = []
    skipped = []
    if exclude:
	nottests[:0] = args
	args = []
    tests = args or findtests()
    test_support.verbose = verbose>1	# Tell tests to be moderately quiet
    for test in tests:
	if verbose:
	    print test
	ok = runtest(test, generate, verbose>1)
	if ok > 0:
	    good.append(test)
	elif ok == 0:
	    bad.append(test)
	else:
	    if not quiet:
		print "test", test,
		print "skipped -- an optional feature could not be imported"
	    skipped.append(test)
    if good and not quiet:
	if not bad and not skipped and len(good) > 1:
	    print "All",
	print count(len(good), "test"), "OK."
    if bad:
	print count(len(bad), "test"), "failed:",
	print string.join(bad)
    if skipped and not quiet:
	print count(len(skipped), "test"), "skipped:",
	print string.join(skipped)
    sys.exit(len(bad) > 0)

stdtests = [
    'test_grammar',
    'test_opcodes',
    'test_operations',
    'test_builtin',
    'test_exceptions',
    'test_types',
   ]

nottests = [
    'test_support',
    'test_b1',
    'test_b2',
    ]

def findtests():
    """Return a list of all applicable test modules."""
    testdir = findtestdir()
    names = os.listdir(testdir)
    tests = []
    for name in names:
	if name[:5] == "test_" and name[-3:] == ".py":
	    modname = name[:-3]
	    if modname not in stdtests and modname not in nottests:
		tests.append(modname)
    tests.sort()
    return stdtests + tests

def runtest(test, generate, verbose2):
    test_support.unload(test)
    testdir = findtestdir()
    outputdir = os.path.join(testdir, "output")
    outputfile = os.path.join(outputdir, test)
    try:
	if generate:
	    cfp = open(outputfile, "w")
	elif verbose2:
	    cfp = sys.stdout
	else:
	    cfp = Compare(outputfile)
    except IOError:
	cfp = None
	print "Warning: can't open", outputfile
    try:
	save_stdout = sys.stdout
	try:
	    if cfp:
		sys.stdout = cfp
		print test		# Output file starts with test name
	    __import__(test)
	finally:
	    sys.stdout = save_stdout
    except ImportError, msg:
	return -1
    except test_support.TestFailed, msg:
	print "test", test, "failed --", msg
	return 0
    except:
	print "test", test, "crashed --", sys.exc_type, ":", sys.exc_value
	if verbose2:
	    traceback.print_exc(file=sys.stdout)
	return 0
    else:
	return 1

def findtestdir():
    if __name__ == '__main__':
	file = sys.argv[0]
    else:
	file = __file__
    testdir = os.path.dirname(file) or os.curdir
    return testdir

def count(n, word):
    if n == 1:
	return "%d %s" % (n, word)
    else:
	return "%d %ss" % (n, word)

class Compare:

    def __init__(self, filename):
	self.fp = open(filename, 'r')

    def write(self, data):
	expected = self.fp.read(len(data))
	if data <> expected:
	    raise test_support.TestFailed, \
		    'Writing: '+`data`+', expected: '+`expected`

    def flush(self):
	pass

    def close(self):
	leftover = self.fp.read()
	if leftover:
	    raise test_support.TestFailed, 'Unread: '+`leftover`
	self.fp.close()

    def isatty(self):
	return 0

if __name__ == '__main__':
    main()