diff options
-rw-r--r-- | QMTest/README.txt | 55 | ||||
-rw-r--r-- | QMTest/TestCmd.py | 367 | ||||
-rw-r--r-- | QMTest/TestCmdTests.py | 3457 | ||||
-rw-r--r-- | QMTest/TestCommon.py | 4 | ||||
-rw-r--r-- | QMTest/TestCommonTests.py | 2094 | ||||
-rw-r--r-- | src/engine/SCons/Scanner/LaTeX.py | 28 | ||||
-rw-r--r-- | src/engine/SCons/Tool/tex.py | 24 | ||||
-rw-r--r-- | src/test_strings.py | 2 | ||||
-rw-r--r-- | test/SConstruct.py | 7 | ||||
-rw-r--r-- | test/TEX/eps_graphics2.py | 6 | ||||
-rw-r--r-- | test/TEX/multi-line_include_options.py | 90 | ||||
-rw-r--r-- | test/option-n.py | 4 |
12 files changed, 6017 insertions, 121 deletions
diff --git a/QMTest/README.txt b/QMTest/README.txt new file mode 100644 index 0000000..992a45b --- /dev/null +++ b/QMTest/README.txt @@ -0,0 +1,55 @@ +This directory contains testing infrastructure. Note that not all of +the pieces here are local to SCons. + + README.txt + + What you're looking at right now. + + SConscript + + Configuration for our packaging build, to copy the necessary + parts of the infrastructure into a build directory. + + TestCmd.py + TestCmdTests.py + TestCommon.py + TestCommonTests.py + + The TestCmd infrastructure for testing external commands. + These are for generic command testing, are used by some + other projects, and are developed separately from SCons. + (They're developed by SK, but still...) + + We've captured the unit tests (Test*Tests.py) for these files + along with the actual modules themselves to make it a little + easier to hack on them for our purposes. Note, however, + that any SCons-specific functionality should be implemented + in one of the + + TestRuntest.py + + Test infrastructure for our runtest.py script. + + TestSCons.py + + Test infrastructure for SCons itself. + + TestSConsMSVS.py + + Test infrastructure for SCons' Visual Studio support. + + TestSCons_time.py + + Test infrastructure for the scons-time.py script. + + TestSConsign.py + + Test infrastructure for the sconsign.py script. + + classes.qmc + configuration + scons-tdb.py + + Pieces for the use of QMTest to test SCons. We're moving away + from this infrastructure, in no small part because we're not + really using it as originally envisioned. diff --git a/QMTest/TestCmd.py b/QMTest/TestCmd.py index 3fca4ec..2c90302 100644 --- a/QMTest/TestCmd.py +++ b/QMTest/TestCmd.py @@ -25,7 +25,11 @@ There are a bunch of keyword arguments available at instantiation: subdir = 'subdir', verbose = Boolean, match = default_match_function, - diff = default_diff_function, + match_stdout = default_match_stdout_function, + match_stderr = default_match_stderr_function, + diff = default_diff_stderr_function, + diff_stdout = default_diff_stdout_function, + diff_stderr = default_diff_stderr_function, combine = Boolean) There are a bunch of methods that let you do different things: @@ -109,8 +113,18 @@ There are a bunch of methods that let you do different things: test.diff(actual, expected) + test.diff_stderr(actual, expected) + + test.diff_stdout(actual, expected) + test.match(actual, expected) + test.match_stderr(actual, expected) + + test.match_stdout(actual, expected) + + test.set_match_function(match, stdout, stderr) + test.match_exact("actual 1\nactual 2\n", "expected 1\nexpected 2\n") test.match_exact(["actual 1\n", "actual 2\n"], ["expected 1\n", "expected 2\n"]) @@ -159,8 +173,8 @@ or incorrect permissions). TestCmd.no_result(condition, function) TestCmd.no_result(condition, function, skip) -The TestCmd module also provides unbound functions that handle matching -in the same way as the match_*() methods described above. +The TestCmd module also provides unbound global functions that handle +matching in the same way as the match_*() methods described above. import TestCmd @@ -170,8 +184,28 @@ in the same way as the match_*() methods described above. test = TestCmd.TestCmd(match = TestCmd.match_re_dotall) -The TestCmd module provides unbound functions that can be used for the -"diff" argument to TestCmd.TestCmd instantiation: +These functions are also available as static methods: + + import TestCmd + + test = TestCmd.TestCmd(match = TestCmd.TestCmd.match_exact) + + test = TestCmd.TestCmd(match = TestCmd.TestCmd.match_re) + + test = TestCmd.TestCmd(match = TestCmd.TestCmd.match_re_dotall) + +These static methods can be accessed by a string naming the method: + + import TestCmd + + test = TestCmd.TestCmd(match = 'match_exact') + + test = TestCmd.TestCmd(match = 'match_re') + + test = TestCmd.TestCmd(match = 'match_re_dotall') + +The TestCmd module provides unbound global functions that can be used +for the "diff" argument to TestCmd.TestCmd instantiation: import TestCmd @@ -180,6 +214,35 @@ The TestCmd module provides unbound functions that can be used for the test = TestCmd.TestCmd(diff = TestCmd.simple_diff) + test = TestCmd.TestCmd(diff = TestCmd.context_diff) + + test = TestCmd.TestCmd(diff = TestCmd.unified_diff) + +These functions are also available as static methods: + + import TestCmd + + test = TestCmd.TestCmd(match = TestCmd.TestCmd.match_re, + diff = TestCmd.TestCmd.diff_re) + + test = TestCmd.TestCmd(diff = TestCmd.TestCmd.simple_diff) + + test = TestCmd.TestCmd(diff = TestCmd.TestCmd.context_diff) + + test = TestCmd.TestCmd(diff = TestCmd.TestCmd.unified_diff) + +These static methods can be accessed by a string naming the method: + + import TestCmd + + test = TestCmd.TestCmd(match = 'match_re', diff = 'diff_re') + + test = TestCmd.TestCmd(diff = 'simple_diff') + + test = TestCmd.TestCmd(diff = 'context_diff') + + test = TestCmd.TestCmd(diff = 'unified_diff') + The "diff" argument can also be used with standard difflib functions: import difflib @@ -216,19 +279,27 @@ version. from __future__ import division __author__ = "Steven Knight <knight at baldmt dot com>" -__revision__ = "TestCmd.py 1.1.D002 2010/05/27 14:47:22 knight" -__version__ = "1.1" +__revision__ = "TestCmd.py 1.3.D001 2010/06/03 12:58:27 knight" +__version__ = "1.3" import atexit +import difflib import errno import os import re import shutil +import signal import stat import sys import tempfile +import threading import time import traceback +import types + +class null(object): + pass +_Null = null() try: from collections import UserList, UserString @@ -264,11 +335,6 @@ __all__ = [ 'TestCmd' ] -try: - import difflib -except ImportError: - __all__.append('simple_diff') - def is_List(e): return isinstance(e, (list, UserList)) @@ -435,35 +501,30 @@ def match_re_dotall(lines = None, res = None): raise re.error(msg % (repr(s), e.args[0])) return expr.match(lines) -try: - import difflib -except ImportError: - pass -else: - def simple_diff(a, b, fromfile='', tofile='', - fromfiledate='', tofiledate='', n=3, lineterm='\n'): - """ - A function with the same calling signature as difflib.context_diff - (diff -c) and difflib.unified_diff (diff -u) but which prints - output like the simple, unadorned 'diff" command. - """ - sm = difflib.SequenceMatcher(None, a, b) - def comma(x1, x2): - return x1+1 == x2 and str(x2) or '%s,%s' % (x1+1, x2) - result = [] - for op, a1, a2, b1, b2 in sm.get_opcodes(): - if op == 'delete': - result.append("%sd%d" % (comma(a1, a2), b1)) - result.extend([ '< ' + l for l in a[a1:a2] ]) - elif op == 'insert': - result.append("%da%s" % (a1, comma(b1, b2))) - result.extend([ '> ' + l for l in b[b1:b2] ]) - elif op == 'replace': - result.append("%sc%s" % (comma(a1, a2), comma(b1, b2))) - result.extend([ '< ' + l for l in a[a1:a2] ]) - result.append('---') - result.extend([ '> ' + l for l in b[b1:b2] ]) - return result +def simple_diff(a, b, fromfile='', tofile='', + fromfiledate='', tofiledate='', n=3, lineterm='\n'): + """ + A function with the same calling signature as difflib.context_diff + (diff -c) and difflib.unified_diff (diff -u) but which prints + output like the simple, unadorned 'diff" command. + """ + sm = difflib.SequenceMatcher(None, a, b) + def comma(x1, x2): + return x1+1 == x2 and str(x2) or '%s,%s' % (x1+1, x2) + result = [] + for op, a1, a2, b1, b2 in sm.get_opcodes(): + if op == 'delete': + result.append("%sd%d" % (comma(a1, a2), b1)) + result.extend([ '< ' + l for l in a[a1:a2] ]) + elif op == 'insert': + result.append("%da%s" % (a1, comma(b1, b2))) + result.extend([ '> ' + l for l in b[b1:b2] ]) + elif op == 'replace': + result.append("%sc%s" % (comma(a1, a2), comma(b1, b2))) + result.extend([ '< ' + l for l in a[a1:a2] ]) + result.append('---') + result.extend([ '> ' + l for l in b[b1:b2] ]) + return result def diff_re(a, b, fromfile='', tofile='', fromfiledate='', tofiledate='', n=3, lineterm='\n'): @@ -576,7 +637,6 @@ except ImportError: # so we're going to cobble up something that looks just enough # like its API for our purposes below. import popen2 - import types subprocess = types.ModuleType('subprocess') subprocess.PIPE = 'PIPE' @@ -607,16 +667,31 @@ except ImportError: self.stderr.close() self.returncode = self.wait() return (out, err) + def terminate(self): + os.kill(self.pid, signal.SIGTERM) def wait(self, *args, **kw): resultcode = popen2.Popen3.wait(self, *args, **kw) - if os.WIFEXITED(resultcode): + if os.WIFSIGNALED(resultcode): + return (- os.WTERMSIG(resultcode)) + elif os.WIFEXITED(resultcode): return os.WEXITSTATUS(resultcode) - elif os.WIFSIGNALED(resultcode): - return os.WTERMSIG(resultcode) else: return None subprocess.Popen = Popen +else: + try: + subprocess.Popen.terminate + except AttributeError: + if sys.platform == 'win32': + import win32process + def terminate(self): + win32process.TerminateProcess(self._handle, 1) + else: + def terminate(self): + os.kill(self.pid, signal.SIGTERM) + method = types.MethodType(terminate, None, subprocess.Popen) + setattr(subprocess.Popen, 'terminate', method) @@ -790,9 +865,14 @@ class TestCmd(object): subdir = None, verbose = None, match = None, + match_stdout = None, + match_stderr = None, diff = None, + diff_stdout = None, + diff_stderr = None, combine = 0, - universal_newlines = 1): + universal_newlines = 1, + timeout = None): self._cwd = os.getcwd() self.description_set(description) self.program_set(program) @@ -805,21 +885,10 @@ class TestCmd(object): self.verbose_set(verbose) self.combine = combine self.universal_newlines = universal_newlines - if not match is None: - self.match_function = match - else: - self.match_function = match_re - if not diff is None: - self.diff_function = diff - else: - try: - difflib - except NameError: - pass - else: - self.diff_function = simple_diff - #self.diff_function = difflib.context_diff - #self.diff_function = difflib.unified_diff + self.process = None + self.set_timeout(timeout) + self.set_match_function(match, match_stdout, match_stderr) + self.set_diff_function(diff, diff_stdout, diff_stderr) self._dirlist = [] self._preserve = {'pass_test': 0, 'fail_test': 0, 'no_result': 0} if 'PRESERVE' in os.environ and not os.environ['PRESERVE'] is '': @@ -941,21 +1010,55 @@ class TestCmd(object): """ self.description = description - try: - difflib - except NameError: - def diff(self, a, b, name, *args, **kw): - print self.banner('Expected %s' % name) - print a - print self.banner('Actual %s' % name) - print b - else: - def diff(self, a, b, name, *args, **kw): + def set_diff_function(self, diff=_Null, stdout=_Null, stderr=_Null): + """Sets the specified diff functions. + """ + if diff is not _Null: + self._diff_function = diff + if stdout is not _Null: + self._diff_stdout_function = stdout + if stderr is not _Null: + self._diff_stderr_function = stderr + + def diff(self, a, b, name=None, diff_function=None, *args, **kw): + if diff_function is None: + try: + diff_function = getattr(self, self._diff_function) + except TypeError: + diff_function = self._diff_function + if diff_function is None: + diff_function = self.simple_diff + if name is not None: print self.banner(name) - args = (a.splitlines(), b.splitlines()) + args - lines = self.diff_function(*args, **kw) - for l in lines: - print l + args = (a.splitlines(), b.splitlines()) + args + for line in diff_function(*args, **kw): + print line + + def diff_stderr(self, a, b, *args, **kw): + """Compare actual and expected file contents. + """ + try: + diff_stderr_function = getattr(self, self._diff_stderr_function) + except TypeError: + diff_stderr_function = self._diff_stderr_function + return self.diff(a, b, diff_function=diff_stderr_function, *args, **kw) + + def diff_stdout(self, a, b, *args, **kw): + """Compare actual and expected file contents. + """ + try: + diff_stdout_function = getattr(self, self._diff_stdout_function) + except TypeError: + diff_stdout_function = self._diff_stdout_function + return self.diff(a, b, diff_function=diff_stdout_function, *args, **kw) + + simple_diff = staticmethod(simple_diff) + + diff_re = staticmethod(diff_re) + + context_diff = staticmethod(difflib.context_diff) + + unified_diff = staticmethod(difflib.unified_diff) def fail_test(self, condition = 1, function = None, skip = 0): """Cause the test to fail. @@ -974,25 +1077,57 @@ class TestCmd(object): """ self.interpreter = interpreter - def match(self, lines, matches): - """Compare actual and expected file contents. + def set_match_function(self, match=_Null, stdout=_Null, stderr=_Null): + """Sets the specified match functions. """ - return self.match_function(lines, matches) + if match is not _Null: + self._match_function = match + if stdout is not _Null: + self._match_stdout_function = stdout + if stderr is not _Null: + self._match_stderr_function = stderr - def match_exact(self, lines, matches): + def match(self, lines, matches): """Compare actual and expected file contents. """ - return match_exact(lines, matches) - - def match_re(self, lines, res): + try: + match_function = getattr(self, self._match_function) + except TypeError: + match_function = self._match_function + if match_function is None: + # Default is regular expression matches. + match_function = self.match_re + return match_function(lines, matches) + + def match_stderr(self, lines, matches): """Compare actual and expected file contents. """ - return match_re(lines, res) - - def match_re_dotall(self, lines, res): + try: + match_stderr_function = getattr(self, self._match_stderr_function) + except TypeError: + match_stderr_function = self._match_stderr_function + if match_stderr_function is None: + # Default is to use whatever match= is set to. + match_stderr_function = self.match + return match_stderr_function(lines, matches) + + def match_stdout(self, lines, matches): """Compare actual and expected file contents. """ - return match_re_dotall(lines, res) + try: + match_stdout_function = getattr(self, self._match_stdout_function) + except TypeError: + match_stdout_function = self._match_stdout_function + if match_stdout_function is None: + # Default is to use whatever match= is set to. + match_stdout_function = self.match + return match_stdout_function(lines, matches) + + match_exact = staticmethod(match_exact) + + match_re = staticmethod(match_re) + + match_re_dotall = staticmethod(match_re_dotall) def no_result(self, condition = 1, function = None, skip = 0): """Report that the test could not be run. @@ -1057,10 +1192,20 @@ class TestCmd(object): dir = self.canonicalize(dir) os.rmdir(dir) + def _timeout(self): + self.process.terminate() + self.timer.cancel() + self.timer = None + + def set_timeout(self, timeout): + self.timeout = timeout + self.timer = None + def start(self, program = None, interpreter = None, arguments = None, universal_newlines = None, + timeout = _Null, **kw): """ Starts a program or script for the test environment. @@ -1089,20 +1234,33 @@ class TestCmd(object): else: stderr_value = subprocess.PIPE - return Popen(cmd, - stdin=stdin, - stdout=subprocess.PIPE, - stderr=stderr_value, - universal_newlines=universal_newlines) - - def finish(self, popen, **kw): + if timeout is _Null: + timeout = self.timeout + if timeout: + self.timer = threading.Timer(float(timeout), self._timeout) + self.timer.start() + p = Popen(cmd, + stdin=stdin, + stdout=subprocess.PIPE, + stderr=stderr_value, + universal_newlines=universal_newlines) + self.process = p + return p + + def finish(self, popen=None, **kw): """ Finishes and waits for the process being run under control of the specified popen argument, recording the exit status, standard output and error output. """ + if popen is None: + popen = self.process stdout, stderr = popen.communicate() + if self.timer: + self.timer.cancel() + self.timer = None self.status = popen.returncode + self.process = None self._stdout.append(stdout or '') self._stderr.append(stderr or '') @@ -1111,7 +1269,8 @@ class TestCmd(object): arguments = None, chdir = None, stdin = None, - universal_newlines = None): + universal_newlines = None, + timeout = _Null): """Runs a test of the program or script for the test environment. Standard output and error output are saved for future retrieval via the stdout() and stderr() methods. @@ -1126,15 +1285,25 @@ class TestCmd(object): if self.verbose: sys.stderr.write("chdir(" + chdir + ")\n") os.chdir(chdir) - p = self.start(program, - interpreter, - arguments, - universal_newlines, - stdin=stdin) + p = self.start(program = program, + interpreter = interpreter, + arguments = arguments, + universal_newlines = universal_newlines, + timeout = timeout, + stdin = stdin) if is_List(stdin): stdin = ''.join(stdin) + # TODO(sgk): figure out how to re-use the logic in the .finish() + # method above. Just calling it from here causes problems with + # subclasses that redefine .finish(). We could abstract this + # into Yet Another common method called both here and by .finish(), + # but that seems ill-thought-out. stdout, stderr = p.communicate(input=stdin) + if self.timer: + self.timer.cancel() + self.timer = None self.status = p.returncode + self.process = None self._stdout.append(stdout or '') self._stderr.append(stderr or '') diff --git a/QMTest/TestCmdTests.py b/QMTest/TestCmdTests.py new file mode 100644 index 0000000..357413f --- /dev/null +++ b/QMTest/TestCmdTests.py @@ -0,0 +1,3457 @@ +#!/usr/bin/env python +""" +TestCmdTests.py: Unit tests for the TestCmd.py module. + +Copyright 2000-2010 Steven Knight +This module is free software, and you may redistribute it and/or modify +it under the same terms as Python itself, so long as this copyright message +and disclaimer are retained in their original form. + +IN NO EVENT SHALL THE AUTHOR BE LIABLE TO ANY PARTY FOR DIRECT, INDIRECT, +SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OF +THIS CODE, EVEN IF THE AUTHOR HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH +DAMAGE. + +THE AUTHOR SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A +PARTICULAR PURPOSE. THE CODE PROVIDED HEREUNDER IS ON AN "AS IS" BASIS, +AND THERE IS NO OBLIGATION WHATSOEVER TO PROVIDE MAINTENANCE, +SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS. +""" + +__author__ = "Steven Knight <knight at baldmt dot com>" +__revision__ = "TestCmdTests.py 1.3.D001 2010/06/03 12:58:27 knight" + +import os +import shutil +import signal +import stat +import StringIO +import sys +import tempfile +import time +import types +import unittest +import UserList + +# Strip the current directory so we get the right TestCmd.py module. +sys.path = sys.path[1:] + +import TestCmd + +def _is_readable(path): + # XXX this doesn't take into account UID, it assumes it's our file + return os.stat(path)[stat.ST_MODE] & stat.S_IREAD + +def _is_writable(path): + # XXX this doesn't take into account UID, it assumes it's our file + return os.stat(path)[stat.ST_MODE] & stat.S_IWRITE + +def _is_executable(path): + # XXX this doesn't take into account UID, it assumes it's our file + return os.stat(path)[stat.ST_MODE] & stat.S_IEXEC + +def _clear_dict(dict, *keys): + for key in keys: + try: + dict[key] = '' # del dict[key] + except KeyError: + pass + +try: + import subprocess +except ImportError: + # The subprocess module doesn't exist in this version of Python, + # so we're going to cobble up something that looks just enough + # like its API for our purposes below. + import popen2 + subprocess = types.ModuleType('subprocess') + + subprocess.PIPE = 'PIPE' + subprocess.STDOUT = 'STDOUT' + subprocess.mswindows = (sys.platform == 'win32') + + class Popen(popen2.Popen3, popen2.Popen4): + universal_newlines = 1 + def __init__(self, command, **kw): + if kw.get('stderr') == 'STDOUT': + popen2.Popen4.__init__(self, command, 1) + else: + popen2.Popen3.__init__(self, command, 1) + self.stdin = self.tochild + self.stdout = self.fromchild + self.stderr = self.childerr + def communicate(self, input=None): + if input: + self.stdin.write(input) + self.stdin.close() + out = self.stdout.read() + if self.stderr is None: + err = None + else: + err = self.stderr.read() + self.stdout.close() + if self.stderr is not None: + self.stderr.close() + self.returncode = self.wait() + return (out, err) + def terminate(self): + os.kill(self.pid, signal.SIGTERM) + def wait(self, *args, **kw): + resultcode = popen2.Popen3.wait(self, *args, **kw) + if os.WIFEXITED(resultcode): + return os.WEXITSTATUS(resultcode) + elif os.WIFSIGNALED(resultcode): + return os.WTERMSIG(resultcode) + else: + return None + + subprocess.Popen = Popen +else: + try: + subprocess.Popen.terminate + except AttributeError: + if sys.platform == 'win32': + import win32process + def terminate(self): + win32process.TerminateProcess(self._handle, 1) + else: + def terminate(self): + os.kill(self.pid, signal.SIGTERM) + method = types.MethodType(terminate, None, subprocess.Popen) + setattr(subprocess.Popen, 'terminate', method) + +class ExitError(Exception): + pass + +class TestCmdTestCase(unittest.TestCase): + """Base class for TestCmd test cases, with fixture and utility methods.""" + + def setUp(self): + self.orig_cwd = os.getcwd() + + def tearDown(self): + os.chdir(self.orig_cwd) + + def setup_run_scripts(self): + class T: + pass + + t = T() + + t.script = 'script' + t.scriptx = 'scriptx.bat' + t.script1 = 'script_1.txt' + t.scriptout = 'scriptout' + t.scripterr = 'scripterr' + fmt = "import os, sys; cwd = os.getcwd(); " + \ + "sys.stdout.write('%s: STDOUT: %%s: %%s\\n' %% (cwd, sys.argv[1:])); " + \ + "sys.stderr.write('%s: STDERR: %%s: %%s\\n' %% (cwd, sys.argv[1:]))" + fmtout = "import os, sys; cwd = os.getcwd(); " + \ + "sys.stdout.write('%s: STDOUT: %%s: %%s\\n' %% (cwd, sys.argv[1:]))" + fmterr = "import os, sys; cwd = os.getcwd(); " + \ + "sys.stderr.write('%s: STDERR: %%s: %%s\\n' %% (cwd, sys.argv[1:]))" + text = fmt % (t.script, t.script) + textx = fmt % (t.scriptx, t.scriptx) + if sys.platform == 'win32': + textx = textx.replace('%', '%%') + textx = '@python -c "%s"' % textx + ' %1 %2 %3 %4 %5 %6 %7 %8 %9\n' + else: + textx = '#! /usr/bin/env python\n' + textx + '\n' + text1 = 'A first line to be ignored!\n' + fmt % (t.script1, t.script1) + textout = fmtout % (t.scriptout) + texterr = fmterr % (t.scripterr) + + run_env = TestCmd.TestCmd(workdir = '') + run_env.subdir('sub dir') + t.run_env = run_env + + t.sub_dir = run_env.workpath('sub dir') + t.script_path = run_env.workpath('sub dir', t.script) + t.scriptx_path = run_env.workpath('sub dir', t.scriptx) + t.script1_path = run_env.workpath('sub dir', t.script1) + t.scriptout_path = run_env.workpath('sub dir', t.scriptout) + t.scripterr_path = run_env.workpath('sub dir', t.scripterr) + + run_env.write(t.script_path, text) + run_env.write(t.scriptx_path, textx) + run_env.write(t.script1_path, text1) + run_env.write(t.scriptout_path, textout) + run_env.write(t.scripterr_path, texterr) + + os.chmod(t.script_path, 0644) # XXX UNIX-specific + os.chmod(t.scriptx_path, 0755) # XXX UNIX-specific + os.chmod(t.script1_path, 0644) # XXX UNIX-specific + os.chmod(t.scriptout_path, 0644) # XXX UNIX-specific + os.chmod(t.scripterr_path, 0644) # XXX UNIX-specific + + t.orig_cwd = os.getcwd() + + t.workdir = run_env.workpath('sub dir') + os.chdir(t.workdir) + + return t + + def translate_newlines(self, data): + data = data.replace("\r\n", "\n") + return data + + def call_python(self, input, python=None): + if python is None: + python = sys.executable + p = subprocess.Popen(python, + stdin=subprocess.PIPE, + stderr=subprocess.PIPE, + stdout=subprocess.PIPE) + stdout, stderr = p.communicate(input) + stdout = self.translate_newlines(stdout) + stderr = self.translate_newlines(stderr) + return stdout, stderr, p.returncode + + def popen_python(self, input, status=0, stdout="", stderr="", python=None): + if python is None: + python = sys.executable + _stdout, _stderr, _status = self.call_python(input, python) + _stdout = self.translate_newlines(_stdout) + _stderr = self.translate_newlines(_stderr) + assert _status == status, \ + "status = %s, expected %s\n" % (str(_status), str(status)) + \ + "STDOUT ===================\n" + _stdout + \ + "STDERR ===================\n" + _stderr + assert _stdout == stdout, \ + "Expected STDOUT ==========\n" + stdout + \ + "Actual STDOUT ============\n" + _stdout + \ + "STDERR ===================\n" + _stderr + assert _stderr == stderr, \ + "Expected STDERR ==========\n" + stderr + \ + "Actual STDERR ============\n" + _stderr + + def run_match(self, content, *args): + expect = "%s: %s: %s: %s\n" % args + content = self.translate_newlines(content) + assert content == expect, \ + "Expected %s ==========\n" % args[1] + expect + \ + "Actual %s ============\n" % args[1] + content + + + +class __init__TestCase(TestCmdTestCase): + def test_init(self): + """Test init()""" + test = TestCmd.TestCmd() + test = TestCmd.TestCmd(description = 'test') + test = TestCmd.TestCmd(description = 'test', program = 'foo') + test = TestCmd.TestCmd(description = 'test', + program = 'foo', + universal_newlines=None) + + + +class basename_TestCase(TestCmdTestCase): + def test_basename(self): + """Test basename() [XXX TO BE WRITTEN]""" + assert 1 == 1 + + + +class cleanup_TestCase(TestCmdTestCase): + def test_cleanup(self): + """Test cleanup()""" + test = TestCmd.TestCmd(workdir = '') + wdir = test.workdir + test.write('file1', "Test file #1\n") + test.cleanup() + assert not os.path.exists(wdir) + + def test_writable(self): + """Test cleanup() when the directory isn't writable""" + test = TestCmd.TestCmd(workdir = '') + wdir = test.workdir + test.write('file2', "Test file #2\n") + os.chmod(test.workpath('file2'), 0400) + os.chmod(wdir, 0500) + test.cleanup() + assert not os.path.exists(wdir) + + def test_shutil(self): + """Test cleanup() when used with shutil""" + test = TestCmd.TestCmd(workdir = '') + wdir = test.workdir + os.chdir(wdir) + + import shutil + save_rmtree = shutil.rmtree + def my_rmtree(dir, ignore_errors=0, wdir=wdir, _rmtree=save_rmtree): + assert os.getcwd() != wdir + return _rmtree(dir, ignore_errors=ignore_errors) + try: + shutil.rmtree = my_rmtree + test.cleanup() + finally: + shutil.rmtree = save_rmtree + + def test_atexit(self): + """Test cleanup() when atexit is used""" + self.popen_python("""import sys +sys.path = ['%s'] + sys.path +import atexit +def my_exitfunc(): + print "my_exitfunc()" +atexit.register(my_exitfunc) +import TestCmd +result = TestCmd.TestCmd(workdir = '') +sys.exit(0) +""" % self.orig_cwd, stdout='my_exitfunc()\n') + + def test_exitfunc(self): + """Test cleanup() when sys.exitfunc is set""" + self.popen_python("""import sys +sys.path = ['%s'] + sys.path +def my_exitfunc(): + print "my_exitfunc()" +sys.exitfunc = my_exitfunc +import TestCmd +result = TestCmd.TestCmd(workdir = '') +sys.exit(0) +""" % self.orig_cwd, stdout='my_exitfunc()\n') + + + +class chmod_TestCase(TestCmdTestCase): + def test_chmod(self): + """Test chmod()""" + test = TestCmd.TestCmd(workdir = '', subdir = 'sub') + + wdir_file1 = os.path.join(test.workdir, 'file1') + wdir_sub_file2 = os.path.join(test.workdir, 'sub', 'file2') + + open(wdir_file1, 'w').write("") + open(wdir_sub_file2, 'w').write("") + + if sys.platform == 'win32': + + test.chmod(wdir_file1, stat.S_IREAD) + test.chmod(['sub', 'file2'], stat.S_IWRITE) + + file1_mode = stat.S_IMODE(os.stat(wdir_file1)[stat.ST_MODE]) + assert file1_mode == 0444, '0%o' % file1_mode + file2_mode = stat.S_IMODE(os.stat(wdir_sub_file2)[stat.ST_MODE]) + assert file2_mode == 0666, '0%o' % file2_mode + + test.chmod('file1', stat.S_IWRITE) + test.chmod(wdir_sub_file2, stat.S_IREAD) + + file1_mode = stat.S_IMODE(os.stat(wdir_file1)[stat.ST_MODE]) + assert file1_mode == 0666, '0%o' % file1_mode + file2_mode = stat.S_IMODE(os.stat(wdir_sub_file2)[stat.ST_MODE]) + assert file2_mode == 0444, '0%o' % file2_mode + + else: + + test.chmod(wdir_file1, 0700) + test.chmod(['sub', 'file2'], 0760) + + file1_mode = stat.S_IMODE(os.stat(wdir_file1)[stat.ST_MODE]) + assert file1_mode == 0700, '0%o' % file1_mode + file2_mode = stat.S_IMODE(os.stat(wdir_sub_file2)[stat.ST_MODE]) + assert file2_mode == 0760, '0%o' % file2_mode + + test.chmod('file1', 0765) + test.chmod(wdir_sub_file2, 0567) + + file1_mode = stat.S_IMODE(os.stat(wdir_file1)[stat.ST_MODE]) + assert file1_mode == 0765, '0%o' % file1_mode + file2_mode = stat.S_IMODE(os.stat(wdir_sub_file2)[stat.ST_MODE]) + assert file2_mode == 0567, '0%o' % file2_mode + + + +class combine_TestCase(TestCmdTestCase): + def test_combine(self): + """Test combining stdout and stderr""" + run_env = TestCmd.TestCmd(workdir = '') + run_env.write('run1', """import sys +sys.stdout.write("run1 STDOUT %s\\n" % sys.argv[1:]) +sys.stdout.write("run1 STDOUT second line\\n") +sys.stderr.write("run1 STDERR %s\\n" % sys.argv[1:]) +sys.stderr.write("run1 STDERR second line\\n") +sys.stdout.write("run1 STDOUT third line\\n") +sys.stderr.write("run1 STDERR third line\\n") +""") + run_env.write('run2', """import sys +sys.stdout.write("run2 STDOUT %s\\n" % sys.argv[1:]) +sys.stdout.write("run2 STDOUT second line\\n") +sys.stderr.write("run2 STDERR %s\\n" % sys.argv[1:]) +sys.stderr.write("run2 STDERR second line\\n") +sys.stdout.write("run2 STDOUT third line\\n") +sys.stderr.write("run2 STDERR third line\\n") +""") + cwd = os.getcwd() + os.chdir(run_env.workdir) + # Everything before this prepared our "source directory." + # Now do the real test. + try: + test = TestCmd.TestCmd(interpreter = 'python', + workdir = '', + combine = 1) + try: + output = test.stdout() + except IndexError: + pass + else: + raise IndexError("got unexpected output:\n\t`%s'\n" % output) + + # The underlying system subprocess implementations can combine + # stdout and stderr in different orders, so we accomodate both. + + test.program_set('run1') + test.run(arguments = 'foo bar') + stdout_lines = """\ +run1 STDOUT ['foo', 'bar'] +run1 STDOUT second line +run1 STDOUT third line +""" + stderr_lines = """\ +run1 STDERR ['foo', 'bar'] +run1 STDERR second line +run1 STDERR third line +""" + foo_bar_expect = (stdout_lines + stderr_lines, + stderr_lines + stdout_lines) + + test.program_set('run2') + test.run(arguments = 'snafu') + stdout_lines = """\ +run2 STDOUT ['snafu'] +run2 STDOUT second line +run2 STDOUT third line +""" + stderr_lines = """\ +run2 STDERR ['snafu'] +run2 STDERR second line +run2 STDERR third line +""" + snafu_expect = (stdout_lines + stderr_lines, + stderr_lines + stdout_lines) + + # XXX SHOULD TEST ABSOLUTE NUMBER AS WELL + output = test.stdout() + output = self.translate_newlines(output) + assert output in snafu_expect, output + error = test.stderr() + assert error == '', error + + output = test.stdout(run = -1) + output = self.translate_newlines(output) + assert output in foo_bar_expect, output + error = test.stderr(-1) + assert error == '', error + finally: + os.chdir(cwd) + + + +class description_TestCase(TestCmdTestCase): + def test_description(self): + """Test description()""" + test = TestCmd.TestCmd() + assert test.description is None, 'initialized description?' + test = TestCmd.TestCmd(description = 'test') + assert test.description == 'test', 'uninitialized description' + test.description_set('foo') + assert test.description == 'foo', 'did not set description' + + + +class diff_TestCase(TestCmdTestCase): + def test_diff_re(self): + """Test diff_re()""" + result = TestCmd.diff_re(["abcde"], ["abcde"]) + assert result == [], result + result = TestCmd.diff_re(["a.*e"], ["abcde"]) + assert result == [], result + result = TestCmd.diff_re(["a.*e"], ["xxx"]) + assert result == ['1c1', "< 'a.*e'", '---', "> 'xxx'"], result + + def test_diff_custom_function(self): + """Test diff() using a custom function""" + self.popen_python("""import sys +sys.path = ['%s'] + sys.path +import TestCmd +def my_diff(a, b): + return [ + '*****', + a, + '*****', + b, + '*****', + ] +test = TestCmd.TestCmd(diff = my_diff) +test.diff("a\\nb1\\nc\\n", "a\\nb2\\nc\\n", "STDOUT") +sys.exit(0) +""" % self.orig_cwd, + stdout = """\ +STDOUT========================================================================== +***** +['a', 'b1', 'c'] +***** +['a', 'b2', 'c'] +***** +""") + + def test_diff_string(self): + self.popen_python("""import sys +sys.path = ['%s'] + sys.path +import TestCmd +test = TestCmd.TestCmd(diff = 'diff_re') +test.diff("a\\nb1\\nc\\n", "a\\nb2\\nc\\n", 'STDOUT') +sys.exit(0) +""" % self.orig_cwd, + stdout = """\ +STDOUT========================================================================== +2c2 +< 'b1' +--- +> 'b2' +""") + + def test_error(self): + """Test handling a compilation error in TestCmd.diff_re()""" + script_input = """import sys +sys.path = ['%s'] + sys.path +import TestCmd +assert TestCmd.diff_re(["a.*(e"], ["abcde"]) +sys.exit(0) +""" % self.orig_cwd + stdout, stderr, status = self.call_python(script_input) + assert status == 1, status + expect1 = "Regular expression error in '^a.*(e$': missing )\n" + expect2 = "Regular expression error in '^a.*(e$': unbalanced parenthesis\n" + assert (stderr.find(expect1) != -1 or + stderr.find(expect2) != -1), repr(stderr) + + def test_simple_diff_static_method(self): + """Test calling the TestCmd.TestCmd.simple_diff() static method""" + self.popen_python("""import sys +sys.path = ['%s'] + sys.path +import TestCmd +result = TestCmd.TestCmd.simple_diff(['a', 'b', 'c', 'e', 'f1'], + ['a', 'c', 'd', 'e', 'f2']) +expect = ['2d1', '< b', '3a3', '> d', '5c5', '< f1', '---', '> f2'] +assert result == expect, result +sys.exit(0) +""" % self.orig_cwd) + + def test_context_diff_static_method(self): + """Test calling the TestCmd.TestCmd.context_diff() static method""" + self.popen_python("""import sys +sys.path = ['%s'] + sys.path +import TestCmd +result = TestCmd.TestCmd.context_diff(['a\\n', 'b\\n', 'c\\n', 'e\\n', 'f1\\n'], + ['a\\n', 'c\\n', 'd\\n', 'e\\n', 'f2\\n']) +result = list(result) +expect = [ + '*** \\n', + '--- \\n', + '***************\\n', + '*** 1,5 ****\\n', + ' a\\n', + '- b\\n', + ' c\\n', + ' e\\n', + '! f1\\n', + '--- 1,5 ----\\n', + ' a\\n', + ' c\\n', + '+ d\\n', + ' e\\n', + '! f2\\n', +] +assert result == expect, result +sys.exit(0) +""" % self.orig_cwd) + + def test_unified_diff_static_method(self): + """Test calling the TestCmd.TestCmd.unified_diff() static method""" + self.popen_python("""import sys +sys.path = ['%s'] + sys.path +import TestCmd +result = TestCmd.TestCmd.unified_diff(['a\\n', 'b\\n', 'c\\n', 'e\\n', 'f1\\n'], + ['a\\n', 'c\\n', 'd\\n', 'e\\n', 'f2\\n']) +result = list(result) +expect = [ + '--- \\n', + '+++ \\n', + '@@ -1,5 +1,5 @@\\n', + ' a\\n', + '-b\\n', + ' c\\n', + '+d\\n', + ' e\\n', + '-f1\\n', + '+f2\\n' +] +assert result == expect, result +sys.exit(0) +""" % self.orig_cwd) + + def test_diff_re_static_method(self): + """Test calling the TestCmd.TestCmd.diff_re() static method""" + self.popen_python("""import sys +sys.path = ['%s'] + sys.path +import TestCmd +result = TestCmd.TestCmd.diff_re(['a', 'b', 'c', '.', 'f1'], + ['a', 'c', 'd', 'e', 'f2']) +expect = [ + '2c2', + "< 'b'", + '---', + "> 'c'", + '3c3', + "< 'c'", + '---', + "> 'd'", + '5c5', + "< 'f1'", + '---', + "> 'f2'" +] +assert result == expect, result +sys.exit(0) +""" % self.orig_cwd) + + + +class diff_stderr_TestCase(TestCmdTestCase): + def test_diff_stderr_default(self): + """Test diff_stderr() default behavior""" + self.popen_python(r"""import sys +sys.path = ['%s'] + sys.path +import TestCmd +test = TestCmd.TestCmd() +test.diff_stderr('a\nb1\nc\n', 'a\nb2\nc\n') +sys.exit(0) +""" % self.orig_cwd, + stdout="""\ +2c2 +< b1 +--- +> b2 +""") + + def test_diff_stderr_not_affecting_diff_stdout(self): + """Test diff_stderr() not affecting diff_stdout() behavior""" + self.popen_python(r"""import sys +sys.path = ['%s'] + sys.path +import TestCmd +test = TestCmd.TestCmd(diff_stderr='diff_re') +print "diff_stderr:" +test.diff_stderr('a\nb.\nc\n', 'a\nbb\nc\n') +print "diff_stdout:" +test.diff_stdout('a\nb.\nc\n', 'a\nbb\nc\n') +sys.exit(0) +""" % self.orig_cwd, + stdout="""\ +diff_stderr: +diff_stdout: +2c2 +< b. +--- +> bb +""") + + def test_diff_stderr_custom_function(self): + """Test diff_stderr() using a custom function""" + self.popen_python(r"""import sys +sys.path = ['%s'] + sys.path +import TestCmd +def my_diff(a, b): + return ["a:"] + a + ["b:"] + b +test = TestCmd.TestCmd(diff_stderr=my_diff) +test.diff_stderr('abc', 'def') +sys.exit(0) +""" % self.orig_cwd, + stdout="""\ +a: +abc +b: +def +""") + + def test_diff_stderr_TestCmd_function(self): + """Test diff_stderr() using a TestCmd function""" + self.popen_python(r"""import sys +sys.path = ['%s'] + sys.path +import TestCmd +test = TestCmd.TestCmd(diff_stderr = TestCmd.diff_re) +test.diff_stderr('a\n.\n', 'b\nc\n') +sys.exit(0) +""" % self.orig_cwd, + stdout="""\ +1c1 +< 'a' +--- +> 'b' +""") + + def test_diff_stderr_static_method(self): + """Test diff_stderr() using a static method""" + self.popen_python(r"""import sys +sys.path = ['%s'] + sys.path +import TestCmd +test = TestCmd.TestCmd(diff_stderr=TestCmd.TestCmd.diff_re) +test.diff_stderr('a\n.\n', 'b\nc\n') +sys.exit(0) +""" % self.orig_cwd, + stdout="""\ +1c1 +< 'a' +--- +> 'b' +""") + + def test_diff_stderr_string(self): + """Test diff_stderr() using a string to fetch the diff method""" + self.popen_python(r"""import sys +sys.path = ['%s'] + sys.path +import TestCmd +test = TestCmd.TestCmd(diff_stderr='diff_re') +test.diff_stderr('a\n.\n', 'b\nc\n') +sys.exit(0) +""" % self.orig_cwd, + stdout="""\ +1c1 +< 'a' +--- +> 'b' +""") + + + +class diff_stdout_TestCase(TestCmdTestCase): + def test_diff_stdout_default(self): + """Test diff_stdout() default behavior""" + self.popen_python(r"""import sys +sys.path = ['%s'] + sys.path +import TestCmd +test = TestCmd.TestCmd() +test.diff_stdout('a\nb1\nc\n', 'a\nb2\nc\n') +sys.exit(0) +""" % self.orig_cwd, + stdout="""\ +2c2 +< b1 +--- +> b2 +""") + + def test_diff_stdout_not_affecting_diff_stderr(self): + """Test diff_stdout() not affecting diff_stderr() behavior""" + self.popen_python(r"""import sys +sys.path = ['%s'] + sys.path +import TestCmd +test = TestCmd.TestCmd(diff_stdout='diff_re') +print "diff_stdout:" +test.diff_stdout('a\nb.\nc\n', 'a\nbb\nc\n') +print "diff_stderr:" +test.diff_stderr('a\nb.\nc\n', 'a\nbb\nc\n') +sys.exit(0) +""" % self.orig_cwd, + stdout="""\ +diff_stdout: +diff_stderr: +2c2 +< b. +--- +> bb +""") + + def test_diff_stdout_custom_function(self): + """Test diff_stdout() using a custom function""" + self.popen_python(r"""import sys +sys.path = ['%s'] + sys.path +import TestCmd +def my_diff(a, b): + return ["a:"] + a + ["b:"] + b +test = TestCmd.TestCmd(diff_stdout=my_diff) +test.diff_stdout('abc', 'def') +sys.exit(0) +""" % self.orig_cwd, + stdout="""\ +a: +abc +b: +def +""") + + def test_diff_stdout_TestCmd_function(self): + """Test diff_stdout() using a TestCmd function""" + self.popen_python(r"""import sys +sys.path = ['%s'] + sys.path +import TestCmd +test = TestCmd.TestCmd(diff_stdout = TestCmd.diff_re) +test.diff_stdout('a\n.\n', 'b\nc\n') +sys.exit(0) +""" % self.orig_cwd, + stdout="""\ +1c1 +< 'a' +--- +> 'b' +""") + + def test_diff_stdout_static_method(self): + """Test diff_stdout() using a static method""" + self.popen_python(r"""import sys +sys.path = ['%s'] + sys.path +import TestCmd +test = TestCmd.TestCmd(diff_stdout=TestCmd.TestCmd.diff_re) +test.diff_stdout('a\n.\n', 'b\nc\n') +sys.exit(0) +""" % self.orig_cwd, + stdout="""\ +1c1 +< 'a' +--- +> 'b' +""") + + def test_diff_stdout_string(self): + """Test diff_stdout() using a string to fetch the diff method""" + self.popen_python(r"""import sys +sys.path = ['%s'] + sys.path +import TestCmd +test = TestCmd.TestCmd(diff_stdout='diff_re') +test.diff_stdout('a\n.\n', 'b\nc\n') +sys.exit(0) +""" % self.orig_cwd, + stdout="""\ +1c1 +< 'a' +--- +> 'b' +""") + + + +class exit_TestCase(TestCmdTestCase): + def test_exit(self): + """Test exit()""" + def _test_it(cwd, tempdir, condition, preserved): + close_true = {'pass_test': 1, 'fail_test': 0, 'no_result': 0} + exit_status = {'pass_test': 0, 'fail_test': 1, 'no_result': 2} + result_string = {'pass_test': "PASSED\n", + 'fail_test': "FAILED test at line 5 of <stdin>\n", + 'no_result': "NO RESULT for test at line 5 of <stdin>\n"} + global ExitError + input = """import sys +sys.path = ['%s'] + sys.path +import TestCmd +test = TestCmd.TestCmd(workdir = '%s') +test.%s() +""" % (cwd, tempdir, condition) + stdout, stderr, status = self.call_python(input, python="python") + if close_true[condition]: + unexpected = (status != 0) + else: + unexpected = (status == 0) + if unexpected: + msg = "Unexpected exit status from python: %s\n" + raise ExitError(msg % status + stdout + stderr) + if status != exit_status[condition]: + msg = "Expected exit status %d, got %d\n" + raise ExitError(msg % (exit_status[condition], status)) + if stderr != result_string[condition]: + msg = "Expected error output:\n%sGot error output:\n%s" + raise ExitError(msg % (result_string[condition], stderr)) + if preserved: + if not os.path.exists(tempdir): + msg = "Working directory %s was mistakenly removed\n" + raise ExitError(msg % tempdir + stdout) + else: + if os.path.exists(tempdir): + msg = "Working directory %s was mistakenly preserved\n" + raise ExitError(msg % tempdir + stdout) + + run_env = TestCmd.TestCmd(workdir = '') + os.chdir(run_env.workdir) + # Everything before this prepared our "source directory." + # Now do the real test. + try: + cwd = self.orig_cwd + _clear_dict(os.environ, 'PRESERVE', 'PRESERVE_PASS', 'PRESERVE_FAIL', 'PRESERVE_NO_RESULT') + _test_it(cwd, 'dir01', 'pass_test', 0) + _test_it(cwd, 'dir02', 'fail_test', 0) + _test_it(cwd, 'dir03', 'no_result', 0) + os.environ['PRESERVE'] = '1' + _test_it(cwd, 'dir04', 'pass_test', 1) + _test_it(cwd, 'dir05', 'fail_test', 1) + _test_it(cwd, 'dir06', 'no_result', 1) + os.environ['PRESERVE'] = '' # del os.environ['PRESERVE'] + os.environ['PRESERVE_PASS'] = '1' + _test_it(cwd, 'dir07', 'pass_test', 1) + _test_it(cwd, 'dir08', 'fail_test', 0) + _test_it(cwd, 'dir09', 'no_result', 0) + os.environ['PRESERVE_PASS'] = '' # del os.environ['PRESERVE_PASS'] + os.environ['PRESERVE_FAIL'] = '1' + _test_it(cwd, 'dir10', 'pass_test', 0) + _test_it(cwd, 'dir11', 'fail_test', 1) + _test_it(cwd, 'dir12', 'no_result', 0) + os.environ['PRESERVE_FAIL'] = '' # del os.environ['PRESERVE_FAIL'] + os.environ['PRESERVE_NO_RESULT'] = '1' + _test_it(cwd, 'dir13', 'pass_test', 0) + _test_it(cwd, 'dir14', 'fail_test', 0) + _test_it(cwd, 'dir15', 'no_result', 1) + os.environ['PRESERVE_NO_RESULT'] = '' # del os.environ['PRESERVE_NO_RESULT'] + finally: + _clear_dict(os.environ, 'PRESERVE', 'PRESERVE_PASS', 'PRESERVE_FAIL', 'PRESERVE_NO_RESULT') + + + +class fail_test_TestCase(TestCmdTestCase): + def test_fail_test(self): + """Test fail_test()""" + run_env = TestCmd.TestCmd(workdir = '') + run_env.write('run', """import sys +sys.stdout.write("run: STDOUT\\n") +sys.stderr.write("run: STDERR\\n") +""") + os.chdir(run_env.workdir) + # Everything before this prepared our "source directory." + # Now do the real test. + self.popen_python("""import sys +sys.path = ['%s'] + sys.path +import TestCmd +TestCmd.fail_test(condition = 1) +""" % self.orig_cwd, status = 1, stderr = "FAILED test at line 4 of <stdin>\n") + + self.popen_python("""import sys +sys.path = ['%s'] + sys.path +import TestCmd +test = TestCmd.TestCmd(program = 'run', interpreter = 'python', workdir = '') +test.run() +test.fail_test(condition = (test.status == 0)) +""" % self.orig_cwd, status = 1, stderr = "FAILED test of %s\n\tat line 6 of <stdin>\n" % run_env.workpath('run')) + + self.popen_python("""import sys +sys.path = ['%s'] + sys.path +import TestCmd +test = TestCmd.TestCmd(program = 'run', interpreter = 'python', description = 'xyzzy', workdir = '') +test.run() +test.fail_test(condition = (test.status == 0)) +""" % self.orig_cwd, status = 1, stderr = "FAILED test of %s [xyzzy]\n\tat line 6 of <stdin>\n" % run_env.workpath('run')) + + self.popen_python("""import sys +sys.path = ['%s'] + sys.path +import TestCmd +test = TestCmd.TestCmd(program = 'run', interpreter = 'python', workdir = '') +test.run() +def xxx(): + sys.stderr.write("printed on failure\\n") +test.fail_test(condition = (test.status == 0), function = xxx) +""" % self.orig_cwd, status = 1, stderr = "printed on failure\nFAILED test of %s\n\tat line 8 of <stdin>\n" % run_env.workpath('run')) + + self.popen_python("""import sys +sys.path = ['%s'] + sys.path +import TestCmd +def test1(self): + self.run() + self.fail_test(condition = (self.status == 0)) +def test2(self): + test1(self) +test2(TestCmd.TestCmd(program = 'run', interpreter = 'python', workdir = '')) +""" % self.orig_cwd, status = 1, stderr = "FAILED test of %s\n\tat line 6 of <stdin> (test1)\n\tfrom line 8 of <stdin> (test2)\n\tfrom line 9 of <stdin>\n" % run_env.workpath('run')) + + self.popen_python("""import sys +sys.path = ['%s'] + sys.path +import TestCmd +def test1(self): + self.run() + self.fail_test(condition = (self.status == 0), skip = 1) +def test2(self): + test1(self) +test2(TestCmd.TestCmd(program = 'run', interpreter = 'python', workdir = '')) +""" % self.orig_cwd, status = 1, stderr = "FAILED test of %s\n\tat line 8 of <stdin> (test2)\n\tfrom line 9 of <stdin>\n" % run_env.workpath('run')) + + + +class interpreter_TestCase(TestCmdTestCase): + def test_interpreter(self): + """Test interpreter()""" + run_env = TestCmd.TestCmd(workdir = '') + run_env.write('run', """import sys +sys.stdout.write("run: STDOUT\\n") +sys.stderr.write("run: STDERR\\n") +""") + os.chdir(run_env.workdir) + # Everything before this prepared our "source directory." + # Now do the real test. + test = TestCmd.TestCmd(program = 'run', workdir = '') + test.interpreter_set('foo') + assert test.interpreter == 'foo', 'did not set interpreter' + test.interpreter_set('python') + assert test.interpreter == 'python', 'did not set interpreter' + test.run() + + + +class match_TestCase(TestCmdTestCase): + def test_match_default(self): + """Test match() default behavior""" + test = TestCmd.TestCmd() + assert test.match("abcde\n", "a.*e\n") + assert test.match("12345\nabcde\n", "1\\d+5\na.*e\n") + lines = ["vwxyz\n", "67890\n"] + regexes = ["v[^a-u]*z\n", "6[^ ]+0\n"] + assert test.match(lines, regexes) + + def test_match_custom_function(self): + """Test match() using a custom function""" + def match_length(lines, matches): + return len(lines) == len(matches) + test = TestCmd.TestCmd(match=match_length) + assert not test.match("123\n", "1\n") + assert test.match("123\n", "111\n") + assert not test.match("123\n123\n", "1\n1\n") + assert test.match("123\n123\n", "111\n111\n") + lines = ["123\n", "123\n"] + regexes = ["1\n", "1\n"] + assert test.match(lines, regexes) # due to equal numbers of lines + + def test_match_TestCmd_function(self): + """Test match() using a TestCmd function""" + test = TestCmd.TestCmd(match = TestCmd.match_exact) + assert not test.match("abcde\n", "a.*e\n") + assert test.match("abcde\n", "abcde\n") + assert not test.match("12345\nabcde\n", "1\d+5\na.*e\n") + assert test.match("12345\nabcde\n", "12345\nabcde\n") + lines = ["vwxyz\n", "67890\n"] + regexes = ["v[^a-u]*z\n", "6[^ ]+0\n"] + assert not test.match(lines, regexes) + assert test.match(lines, lines) + + def test_match_static_method(self): + """Test match() using a static method""" + test = TestCmd.TestCmd(match=TestCmd.TestCmd.match_exact) + assert not test.match("abcde\n", "a.*e\n") + assert test.match("abcde\n", "abcde\n") + assert not test.match("12345\nabcde\n", "1\d+5\na.*e\n") + assert test.match("12345\nabcde\n", "12345\nabcde\n") + lines = ["vwxyz\n", "67890\n"] + regexes = ["v[^a-u]*z\n", "6[^ ]+0\n"] + assert not test.match(lines, regexes) + assert test.match(lines, lines) + + def test_match_string(self): + """Test match() using a string to fetch the match method""" + test = TestCmd.TestCmd(match='match_exact') + assert not test.match("abcde\n", "a.*e\n") + assert test.match("abcde\n", "abcde\n") + assert not test.match("12345\nabcde\n", "1\d+5\na.*e\n") + assert test.match("12345\nabcde\n", "12345\nabcde\n") + lines = ["vwxyz\n", "67890\n"] + regexes = ["v[^a-u]*z\n", "6[^ ]+0\n"] + assert not test.match(lines, regexes) + assert test.match(lines, lines) + + + +class match_exact_TestCase(TestCmdTestCase): + def test_match_exact_function(self): + """Test calling the TestCmd.match_exact() function""" + assert not TestCmd.match_exact("abcde\\n", "a.*e\\n") + assert TestCmd.match_exact("abcde\\n", "abcde\\n") + + def test_match_exact_instance_method(self): + """Test calling the TestCmd.TestCmd().match_exact() instance method""" + test = TestCmd.TestCmd() + assert not test.match_exact("abcde\\n", "a.*e\\n") + assert test.match_exact("abcde\\n", "abcde\\n") + + def test_match_exact_static_method(self): + """Test calling the TestCmd.TestCmd.match_exact() static method""" + assert not TestCmd.TestCmd.match_exact("abcde\\n", "a.*e\\n") + assert TestCmd.TestCmd.match_exact("abcde\\n", "abcde\\n") + + def test_evaluation(self): + """Test match_exact() evaluation""" + test = TestCmd.TestCmd() + assert not test.match_exact("abcde\n", "a.*e\n") + assert test.match_exact("abcde\n", "abcde\n") + assert not test.match_exact(["12345\n", "abcde\n"], ["1[0-9]*5\n", "a.*e\n"]) + assert test.match_exact(["12345\n", "abcde\n"], ["12345\n", "abcde\n"]) + assert not test.match_exact(UserList.UserList(["12345\n", "abcde\n"]), + ["1[0-9]*5\n", "a.*e\n"]) + assert test.match_exact(UserList.UserList(["12345\n", "abcde\n"]), + ["12345\n", "abcde\n"]) + assert not test.match_exact(["12345\n", "abcde\n"], + UserList.UserList(["1[0-9]*5\n", "a.*e\n"])) + assert test.match_exact(["12345\n", "abcde\n"], + UserList.UserList(["12345\n", "abcde\n"])) + assert not test.match_exact("12345\nabcde\n", "1[0-9]*5\na.*e\n") + assert test.match_exact("12345\nabcde\n", "12345\nabcde\n") + lines = ["vwxyz\n", "67890\n"] + regexes = ["v[^a-u]*z\n", "6[^ ]+0\n"] + assert not test.match_exact(lines, regexes) + assert test.match_exact(lines, lines) + + + +class match_re_dotall_TestCase(TestCmdTestCase): + def test_match_re_dotall_function(self): + """Test calling the TestCmd.match_re_dotall() function""" + assert TestCmd.match_re_dotall("abcde\nfghij\n", "a.*j\n") + + def test_match_re_dotall_instance_method(self): + """Test calling the TestCmd.TestCmd().match_re_dotall() instance method""" + test = TestCmd.TestCmd() + test.match_re_dotall("abcde\\nfghij\\n", "a.*j\\n") + + def test_match_re_dotall_static_method(self): + """Test calling the TestCmd.TestCmd.match_re_dotall() static method""" + assert TestCmd.TestCmd.match_re_dotall("abcde\nfghij\n", "a.*j\n") + + def test_error(self): + """Test handling a compilation error in TestCmd.match_re_dotall()""" + run_env = TestCmd.TestCmd(workdir = '') + cwd = os.getcwd() + os.chdir(run_env.workdir) + # Everything before this prepared our "source directory." + # Now do the real test. + try: + script_input = """import sys +sys.path = ['%s'] + sys.path +import TestCmd +assert TestCmd.match_re_dotall("abcde", "a.*(e") +sys.exit(0) +""" % cwd + stdout, stderr, status = self.call_python(script_input) + assert status == 1, status + expect1 = "Regular expression error in '^a.*(e$': missing )\n" + expect2 = "Regular expression error in '^a.*(e$': unbalanced parenthesis\n" + assert (stderr.find(expect1) != -1 or + stderr.find(expect2) != -1), repr(stderr) + finally: + os.chdir(cwd) + + def test_evaluation(self): + """Test match_re_dotall() evaluation""" + test = TestCmd.TestCmd() + assert test.match_re_dotall("abcde\nfghij\n", "a.*e\nf.*j\n") + assert test.match_re_dotall("abcde\nfghij\n", "a[^j]*j\n") + assert test.match_re_dotall("abcde\nfghij\n", "abcde\nfghij\n") + assert test.match_re_dotall(["12345\n", "abcde\n", "fghij\n"], + ["1[0-9]*5\n", "a.*e\n", "f.*j\n"]) + assert test.match_re_dotall(["12345\n", "abcde\n", "fghij\n"], + ["1.*j\n"]) + assert test.match_re_dotall(["12345\n", "abcde\n", "fghij\n"], + ["12345\n", "abcde\n", "fghij\n"]) + assert test.match_re_dotall(UserList.UserList(["12345\n", + "abcde\n", + "fghij\n"]), + ["1[0-9]*5\n", "a.*e\n", "f.*j\n"]) + assert test.match_re_dotall(UserList.UserList(["12345\n", + "abcde\n", + "fghij\n"]), + ["1.*j\n"]) + assert test.match_re_dotall(UserList.UserList(["12345\n", + "abcde\n", + "fghij\n"]), + ["12345\n", "abcde\n", "fghij\n"]) + assert test.match_re_dotall(["12345\n", "abcde\n", "fghij\n"], + UserList.UserList(["1[0-9]*5\n", + "a.*e\n", + "f.*j\n"])) + assert test.match_re_dotall(["12345\n", "abcde\n", "fghij\n"], + UserList.UserList(["1.*j\n"])) + assert test.match_re_dotall(["12345\n", "abcde\n", "fghij\n"], + UserList.UserList(["12345\n", + "abcde\n", + "fghij\n"])) + assert test.match_re_dotall("12345\nabcde\nfghij\n", + "1[0-9]*5\na.*e\nf.*j\n") + assert test.match_re_dotall("12345\nabcde\nfghij\n", "1.*j\n") + assert test.match_re_dotall("12345\nabcde\nfghij\n", + "12345\nabcde\nfghij\n") + lines = ["vwxyz\n", "67890\n"] + regexes = ["v[^a-u]*z\n", "6[^ ]+0\n"] + assert test.match_re_dotall(lines, regexes) + assert test.match_re_dotall(lines, lines) + + + +class match_re_TestCase(TestCmdTestCase): + def test_match_re_function(self): + """Test calling the TestCmd.match_re() function""" + assert TestCmd.match_re("abcde\n", "a.*e\n") + + def test_match_re_instance_method(self): + """Test calling the TestCmd.TestCmd().match_re() instance method""" + test = TestCmd.TestCmd() + assert test.match_re("abcde\n", "a.*e\n") + + def test_match_re_static_method(self): + """Test calling the TestCmd.TestCmd.match_re() static method""" + assert TestCmd.TestCmd.match_re("abcde\n", "a.*e\n") + + def test_error(self): + """Test handling a compilation error in TestCmd.match_re()""" + run_env = TestCmd.TestCmd(workdir = '') + cwd = os.getcwd() + os.chdir(run_env.workdir) + # Everything before this prepared our "source directory." + # Now do the real test. + try: + script_input = """import sys +sys.path = ['%s'] + sys.path +import TestCmd +assert TestCmd.match_re("abcde\\n", "a.*(e\\n") +sys.exit(0) +""" % cwd + stdout, stderr, status = self.call_python(script_input) + assert status == 1, status + expect1 = "Regular expression error in '^a.*(e$': missing )\n" + expect2 = "Regular expression error in '^a.*(e$': unbalanced parenthesis\n" + assert (stderr.find(expect1) != -1 or + stderr.find(expect2) != -1), repr(stderr) + finally: + os.chdir(cwd) + + def test_evaluation(self): + """Test match_re() evaluation""" + test = TestCmd.TestCmd() + assert test.match_re("abcde\n", "a.*e\n") + assert test.match_re("abcde\n", "abcde\n") + assert test.match_re(["12345\n", "abcde\n"], ["1[0-9]*5\n", "a.*e\n"]) + assert test.match_re(["12345\n", "abcde\n"], ["12345\n", "abcde\n"]) + assert test.match_re(UserList.UserList(["12345\n", "abcde\n"]), + ["1[0-9]*5\n", "a.*e\n"]) + assert test.match_re(UserList.UserList(["12345\n", "abcde\n"]), + ["12345\n", "abcde\n"]) + assert test.match_re(["12345\n", "abcde\n"], + UserList.UserList(["1[0-9]*5\n", "a.*e\n"])) + assert test.match_re(["12345\n", "abcde\n"], + UserList.UserList(["12345\n", "abcde\n"])) + assert test.match_re("12345\nabcde\n", "1[0-9]*5\na.*e\n") + assert test.match_re("12345\nabcde\n", "12345\nabcde\n") + lines = ["vwxyz\n", "67890\n"] + regexes = ["v[^a-u]*z\n", "6[^ ]+0\n"] + assert test.match_re(lines, regexes) + assert test.match_re(lines, lines) + + + +class match_stderr_TestCase(TestCmdTestCase): + def test_match_stderr_default(self): + """Test match_stderr() default behavior""" + test = TestCmd.TestCmd() + assert test.match_stderr("abcde\n", "a.*e\n") + assert test.match_stderr("12345\nabcde\n", "1\\d+5\na.*e\n") + lines = ["vwxyz\n", "67890\n"] + regexes = ["v[^a-u]*z\n", "6[^ ]+0\n"] + assert test.match_stderr(lines, regexes) + + def test_match_stderr_not_affecting_match_stdout(self): + """Test match_stderr() not affecting match_stdout() behavior""" + test = TestCmd.TestCmd(match_stderr=TestCmd.TestCmd.match_exact) + + assert not test.match_stderr("abcde\n", "a.*e\n") + assert test.match_stderr("abcde\n", "abcde\n") + assert not test.match_stderr("12345\nabcde\n", "1\\d+5\na.*e\n") + assert test.match_stderr("12345\nabcde\n", "12345\nabcde\n") + lines = ["vwxyz\n", "67890\n"] + regexes = ["v[^a-u]*z\n", "6[^ ]+0\n"] + assert not test.match_stderr(lines, regexes) + assert test.match_stderr(lines, lines) + + assert test.match_stdout("abcde\n", "a.*e\n") + assert test.match_stdout("12345\nabcde\n", "1\\d+5\na.*e\n") + lines = ["vwxyz\n", "67890\n"] + regexes = ["v[^a-u]*z\n", "6[^ ]+0\n"] + assert test.match_stdout(lines, regexes) + + def test_match_stderr_custom_function(self): + """Test match_stderr() using a custom function""" + def match_length(lines, matches): + return len(lines) == len(matches) + test = TestCmd.TestCmd(match_stderr=match_length) + assert not test.match_stderr("123\n", "1\n") + assert test.match_stderr("123\n", "111\n") + assert not test.match_stderr("123\n123\n", "1\n1\n") + assert test.match_stderr("123\n123\n", "111\n111\n") + lines = ["123\n", "123\n"] + regexes = ["1\n", "1\n"] + assert test.match_stderr(lines, regexes) # equal numbers of lines + + def test_match_stderr_TestCmd_function(self): + """Test match_stderr() using a TestCmd function""" + test = TestCmd.TestCmd(match_stderr = TestCmd.match_exact) + assert not test.match_stderr("abcde\n", "a.*e\n") + assert test.match_stderr("abcde\n", "abcde\n") + assert not test.match_stderr("12345\nabcde\n", "1\d+5\na.*e\n") + assert test.match_stderr("12345\nabcde\n", "12345\nabcde\n") + lines = ["vwxyz\n", "67890\n"] + regexes = ["v[^a-u]*z\n", "6[^ ]+0\n"] + assert not test.match_stderr(lines, regexes) + assert test.match_stderr(lines, lines) + + def test_match_stderr_static_method(self): + """Test match_stderr() using a static method""" + test = TestCmd.TestCmd(match_stderr=TestCmd.TestCmd.match_exact) + assert not test.match_stderr("abcde\n", "a.*e\n") + assert test.match_stderr("abcde\n", "abcde\n") + assert not test.match_stderr("12345\nabcde\n", "1\d+5\na.*e\n") + assert test.match_stderr("12345\nabcde\n", "12345\nabcde\n") + lines = ["vwxyz\n", "67890\n"] + regexes = ["v[^a-u]*z\n", "6[^ ]+0\n"] + assert not test.match_stderr(lines, regexes) + assert test.match_stderr(lines, lines) + + def test_match_stderr_string(self): + """Test match_stderr() using a string to fetch the match method""" + test = TestCmd.TestCmd(match_stderr='match_exact') + assert not test.match_stderr("abcde\n", "a.*e\n") + assert test.match_stderr("abcde\n", "abcde\n") + assert not test.match_stderr("12345\nabcde\n", "1\d+5\na.*e\n") + assert test.match_stderr("12345\nabcde\n", "12345\nabcde\n") + lines = ["vwxyz\n", "67890\n"] + regexes = ["v[^a-u]*z\n", "6[^ ]+0\n"] + assert not test.match_stderr(lines, regexes) + assert test.match_stderr(lines, lines) + + + +class match_stdout_TestCase(TestCmdTestCase): + def test_match_stdout_default(self): + """Test match_stdout() default behavior""" + test = TestCmd.TestCmd() + assert test.match_stdout("abcde\n", "a.*e\n") + assert test.match_stdout("12345\nabcde\n", "1\\d+5\na.*e\n") + lines = ["vwxyz\n", "67890\n"] + regexes = ["v[^a-u]*z\n", "6[^ ]+0\n"] + assert test.match_stdout(lines, regexes) + + def test_match_stdout_not_affecting_match_stderr(self): + """Test match_stdout() not affecting match_stderr() behavior""" + test = TestCmd.TestCmd(match_stdout=TestCmd.TestCmd.match_exact) + + assert not test.match_stdout("abcde\n", "a.*e\n") + assert test.match_stdout("abcde\n", "abcde\n") + assert not test.match_stdout("12345\nabcde\n", "1\\d+5\na.*e\n") + assert test.match_stdout("12345\nabcde\n", "12345\nabcde\n") + lines = ["vwxyz\n", "67890\n"] + regexes = ["v[^a-u]*z\n", "6[^ ]+0\n"] + assert not test.match_stdout(lines, regexes) + assert test.match_stdout(lines, lines) + + assert test.match_stderr("abcde\n", "a.*e\n") + assert test.match_stderr("12345\nabcde\n", "1\\d+5\na.*e\n") + lines = ["vwxyz\n", "67890\n"] + regexes = ["v[^a-u]*z\n", "6[^ ]+0\n"] + assert test.match_stderr(lines, regexes) + + def test_match_stdout_custom_function(self): + """Test match_stdout() using a custom function""" + def match_length(lines, matches): + return len(lines) == len(matches) + test = TestCmd.TestCmd(match_stdout=match_length) + assert not test.match_stdout("123\n", "1\n") + assert test.match_stdout("123\n", "111\n") + assert not test.match_stdout("123\n123\n", "1\n1\n") + assert test.match_stdout("123\n123\n", "111\n111\n") + lines = ["123\n", "123\n"] + regexes = ["1\n", "1\n"] + assert test.match_stdout(lines, regexes) # equal numbers of lines + + def test_match_stdout_TestCmd_function(self): + """Test match_stdout() using a TestCmd function""" + test = TestCmd.TestCmd(match_stdout = TestCmd.match_exact) + assert not test.match_stdout("abcde\n", "a.*e\n") + assert test.match_stdout("abcde\n", "abcde\n") + assert not test.match_stdout("12345\nabcde\n", "1\d+5\na.*e\n") + assert test.match_stdout("12345\nabcde\n", "12345\nabcde\n") + lines = ["vwxyz\n", "67890\n"] + regexes = ["v[^a-u]*z\n", "6[^ ]+0\n"] + assert not test.match_stdout(lines, regexes) + assert test.match_stdout(lines, lines) + + def test_match_stdout_static_method(self): + """Test match_stdout() using a static method""" + test = TestCmd.TestCmd(match_stdout=TestCmd.TestCmd.match_exact) + assert not test.match_stdout("abcde\n", "a.*e\n") + assert test.match_stdout("abcde\n", "abcde\n") + assert not test.match_stdout("12345\nabcde\n", "1\d+5\na.*e\n") + assert test.match_stdout("12345\nabcde\n", "12345\nabcde\n") + lines = ["vwxyz\n", "67890\n"] + regexes = ["v[^a-u]*z\n", "6[^ ]+0\n"] + assert not test.match_stdout(lines, regexes) + assert test.match_stdout(lines, lines) + + def test_match_stdout_string(self): + """Test match_stdout() using a string to fetch the match method""" + test = TestCmd.TestCmd(match_stdout='match_exact') + assert not test.match_stdout("abcde\n", "a.*e\n") + assert test.match_stdout("abcde\n", "abcde\n") + assert not test.match_stdout("12345\nabcde\n", "1\d+5\na.*e\n") + assert test.match_stdout("12345\nabcde\n", "12345\nabcde\n") + lines = ["vwxyz\n", "67890\n"] + regexes = ["v[^a-u]*z\n", "6[^ ]+0\n"] + assert not test.match_stdout(lines, regexes) + assert test.match_stdout(lines, lines) + + + +class no_result_TestCase(TestCmdTestCase): + def test_no_result(self): + """Test no_result()""" + run_env = TestCmd.TestCmd(workdir = '') + run_env.write('run', """import sys +sys.stdout.write("run: STDOUT\\n") +sys.stderr.write("run: STDERR\\n") +""") + os.chdir(run_env.workdir) + # Everything before this prepared our "source directory." + # Now do the real test. + self.popen_python("""import sys +sys.path = ['%s'] + sys.path +import TestCmd +TestCmd.no_result(condition = 1) +""" % self.orig_cwd, status = 2, stderr = "NO RESULT for test at line 4 of <stdin>\n") + + self.popen_python("""import sys +sys.path = ['%s'] + sys.path +import TestCmd +test = TestCmd.TestCmd(program = 'run', interpreter = 'python', workdir = '') +test.run() +test.no_result(condition = (test.status == 0)) +""" % self.orig_cwd, status = 2, stderr = "NO RESULT for test of %s\n\tat line 6 of <stdin>\n" % run_env.workpath('run')) + + self.popen_python("""import sys +sys.path = ['%s'] + sys.path +import TestCmd +test = TestCmd.TestCmd(program = 'run', interpreter = 'python', description = 'xyzzy', workdir = '') +test.run() +test.no_result(condition = (test.status == 0)) +""" % self.orig_cwd, status = 2, stderr = "NO RESULT for test of %s [xyzzy]\n\tat line 6 of <stdin>\n" % run_env.workpath('run')) + + self.popen_python("""import sys +sys.path = ['%s'] + sys.path +import TestCmd +test = TestCmd.TestCmd(program = 'run', interpreter = 'python', workdir = '') +test.run() +def xxx(): + sys.stderr.write("printed on no result\\n") +test.no_result(condition = (test.status == 0), function = xxx) +""" % self.orig_cwd, status = 2, stderr = "printed on no result\nNO RESULT for test of %s\n\tat line 8 of <stdin>\n" % run_env.workpath('run')) + + self.popen_python("""import sys +sys.path = ['%s'] + sys.path +import TestCmd +def test1(self): + self.run() + self.no_result(condition = (self.status == 0)) +def test2(self): + test1(self) +test2(TestCmd.TestCmd(program = 'run', interpreter = 'python', workdir = '')) +""" % self.orig_cwd, status = 2, stderr = "NO RESULT for test of %s\n\tat line 6 of <stdin> (test1)\n\tfrom line 8 of <stdin> (test2)\n\tfrom line 9 of <stdin>\n" % run_env.workpath('run')) + + self.popen_python("""import sys +sys.path = ['%s'] + sys.path +import TestCmd +def test1(self): + self.run() + self.no_result(condition = (self.status == 0), skip = 1) +def test2(self): + test1(self) +test2(TestCmd.TestCmd(program = 'run', interpreter = 'python', workdir = '')) +""" % self.orig_cwd, status = 2, stderr = "NO RESULT for test of %s\n\tat line 8 of <stdin> (test2)\n\tfrom line 9 of <stdin>\n" % run_env.workpath('run')) + + + +class pass_test_TestCase(TestCmdTestCase): + def test_pass_test(self): + """Test pass_test()""" + run_env = TestCmd.TestCmd(workdir = '') + run_env.write('run', """import sys +sys.stdout.write("run: STDOUT\\n") +sys.stderr.write("run: STDERR\\n") +""") + os.chdir(run_env.workdir) + # Everything before this prepared our "source directory." + # Now do the real test. + self.popen_python("""import sys +sys.path = ['%s'] + sys.path +import TestCmd +TestCmd.pass_test(condition = 1) +""" % self.orig_cwd, stderr = "PASSED\n") + + self.popen_python("""import sys +sys.path = ['%s'] + sys.path +import TestCmd +test = TestCmd.TestCmd(program = 'run', interpreter = 'python', workdir = '') +test.run() +test.pass_test(condition = (test.status == 0)) +""" % self.orig_cwd, stderr = "PASSED\n") + + self.popen_python("""import sys +sys.path = ['%s'] + sys.path +import TestCmd +test = TestCmd.TestCmd(program = 'run', interpreter = 'python', workdir = '') +test.run() +def brag(): + sys.stderr.write("printed on success\\n") +test.pass_test(condition = (test.status == 0), function = brag) +""" % self.orig_cwd, stderr = "printed on success\nPASSED\n") + + # TODO(sgk): SHOULD ALSO TEST FAILURE CONDITIONS + + + +class preserve_TestCase(TestCmdTestCase): + def test_preserve(self): + """Test preserve()""" + def cleanup_test(test, cond=None, stdout=""): + io = StringIO.StringIO() + save = sys.stdout + sys.stdout = io + try: + if cond: + test.cleanup(cond) + else: + test.cleanup() + o = io.getvalue() + assert o == stdout, "o = `%s', stdout = `%s'" % (o, stdout) + finally: + sys.stdout = save + + test = TestCmd.TestCmd(workdir = '') + wdir = test.workdir + try: + test.write('file1', "Test file #1\n") + #test.cleanup() + cleanup_test(test, ) + assert not os.path.exists(wdir) + finally: + if os.path.exists(wdir): + shutil.rmtree(wdir, ignore_errors = 1) + test._dirlist.remove(wdir) + + test = TestCmd.TestCmd(workdir = '') + wdir = test.workdir + try: + test.write('file2', "Test file #2\n") + test.preserve('pass_test') + cleanup_test(test, 'pass_test', "Preserved directory %s\n" % wdir) + assert os.path.isdir(wdir) + cleanup_test(test, 'fail_test') + assert not os.path.exists(wdir) + finally: + if os.path.exists(wdir): + shutil.rmtree(wdir, ignore_errors = 1) + test._dirlist.remove(wdir) + + test = TestCmd.TestCmd(workdir = '') + wdir = test.workdir + try: + test.write('file3', "Test file #3\n") + test.preserve('fail_test') + cleanup_test(test, 'fail_test', "Preserved directory %s\n" % wdir) + assert os.path.isdir(wdir) + cleanup_test(test, 'pass_test') + assert not os.path.exists(wdir) + finally: + if os.path.exists(wdir): + shutil.rmtree(wdir, ignore_errors = 1) + test._dirlist.remove(wdir) + + test = TestCmd.TestCmd(workdir = '') + wdir = test.workdir + try: + test.write('file4', "Test file #4\n") + test.preserve('fail_test', 'no_result') + cleanup_test(test, 'fail_test', "Preserved directory %s\n" % wdir) + assert os.path.isdir(wdir) + cleanup_test(test, 'no_result', "Preserved directory %s\n" % wdir) + assert os.path.isdir(wdir) + cleanup_test(test, 'pass_test') + assert not os.path.exists(wdir) + finally: + if os.path.exists(wdir): + shutil.rmtree(wdir, ignore_errors = 1) + test._dirlist.remove(wdir) + + test = TestCmd.TestCmd(workdir = '') + wdir = test.workdir + try: + test.preserve() + cleanup_test(test, 'pass_test', "Preserved directory %s\n" % wdir) + assert os.path.isdir(wdir) + cleanup_test(test, 'fail_test', "Preserved directory %s\n" % wdir) + assert os.path.isdir(wdir) + cleanup_test(test, 'no_result', "Preserved directory %s\n" % wdir) + assert os.path.isdir(wdir) + finally: + if os.path.exists(wdir): + shutil.rmtree(wdir, ignore_errors = 1) + test._dirlist.remove(wdir) + + + +class program_TestCase(TestCmdTestCase): + def test_program(self): + """Test program()""" + test = TestCmd.TestCmd() + assert test.program is None, 'initialized program?' + test = TestCmd.TestCmd(program = 'test') + assert test.program == os.path.join(os.getcwd(), 'test'), 'uninitialized program' + test.program_set('foo') + assert test.program == os.path.join(os.getcwd(), 'foo'), 'did not set program' + + + +class read_TestCase(TestCmdTestCase): + def test_read(self): + """Test read()""" + test = TestCmd.TestCmd(workdir = '', subdir = 'foo') + wdir_file1 = os.path.join(test.workdir, 'file1') + wdir_file2 = os.path.join(test.workdir, 'file2') + wdir_foo_file3 = os.path.join(test.workdir, 'foo', 'file3') + wdir_file4 = os.path.join(test.workdir, 'file4') + wdir_file5 = os.path.join(test.workdir, 'file5') + + open(wdir_file1, 'wb').write("") + open(wdir_file2, 'wb').write("Test\nfile\n#2.\n") + open(wdir_foo_file3, 'wb').write("Test\nfile\n#3.\n") + open(wdir_file4, 'wb').write("Test\nfile\n#4.\n") + open(wdir_file5, 'wb').write("Test\r\nfile\r\n#5.\r\n") + + try: + contents = test.read('no_file') + except IOError: # expect "No such file or directory" + pass + except: + raise + + try: + test.read(test.workpath('file_x'), mode = 'w') + except ValueError: # expect "mode must begin with 'r' + pass + except: + raise + + def _file_matches(file, contents, expected): + assert contents == expected, \ + "Expected contents of " + str(file) + "==========\n" + \ + expected + \ + "Actual contents of " + str(file) + "============\n" + \ + contents + + _file_matches(wdir_file1, test.read('file1'), "") + _file_matches(wdir_file2, test.read('file2'), "Test\nfile\n#2.\n") + _file_matches(wdir_foo_file3, test.read(['foo', 'file3']), + "Test\nfile\n#3.\n") + _file_matches(wdir_foo_file3, + test.read(UserList.UserList(['foo', 'file3'])), + "Test\nfile\n#3.\n") + _file_matches(wdir_file4, test.read('file4', mode = 'r'), + "Test\nfile\n#4.\n") + _file_matches(wdir_file5, test.read('file5', mode = 'rb'), + "Test\r\nfile\r\n#5.\r\n") + + + +class rmdir_TestCase(TestCmdTestCase): + def test_rmdir(self): + """Test rmdir()""" + test = TestCmd.TestCmd(workdir = '') + + try: + test.rmdir(['no', 'such', 'dir']) + except EnvironmentError: + pass + else: + raise Exception("did not catch expected EnvironmentError") + + test.subdir(['sub'], + ['sub', 'dir'], + ['sub', 'dir', 'one']) + + s = test.workpath('sub') + s_d = test.workpath('sub', 'dir') + s_d_o = test.workpath('sub', 'dir', 'one') + + try: + test.rmdir(['sub']) + except EnvironmentError: + pass + else: + raise Exception("did not catch expected EnvironmentError") + + assert os.path.isdir(s_d_o), "%s is gone?" % s_d_o + + try: + test.rmdir(['sub']) + except EnvironmentError: + pass + else: + raise Exception("did not catch expected EnvironmentError") + + assert os.path.isdir(s_d_o), "%s is gone?" % s_d_o + + test.rmdir(['sub', 'dir', 'one']) + + assert not os.path.exists(s_d_o), "%s exists?" % s_d_o + assert os.path.isdir(s_d), "%s is gone?" % s_d + + test.rmdir(['sub', 'dir']) + + assert not os.path.exists(s_d), "%s exists?" % s_d + assert os.path.isdir(s), "%s is gone?" % s + + test.rmdir('sub') + + assert not os.path.exists(s), "%s exists?" % s + + + +class run_TestCase(TestCmdTestCase): + def test_run(self): + """Test run()""" + + t = self.setup_run_scripts() + + # Everything before this prepared our "source directory." + # Now do the real test. + try: + test = TestCmd.TestCmd(program = t.script, + interpreter = 'python', + workdir = '', + subdir = 'script_subdir') + + test.run() + self.run_match(test.stdout(), t.script, "STDOUT", t.workdir, + repr([])) + self.run_match(test.stderr(), t.script, "STDERR", t.workdir, + repr([])) + + test.run(arguments = 'arg1 arg2 arg3') + self.run_match(test.stdout(), t.script, "STDOUT", t.workdir, + repr(['arg1', 'arg2', 'arg3'])) + self.run_match(test.stderr(), t.script, "STDERR", t.workdir, + repr(['arg1', 'arg2', 'arg3'])) + + test.run(program = t.scriptx, arguments = 'foo') + self.run_match(test.stdout(), t.scriptx, "STDOUT", t.workdir, + repr(['foo'])) + self.run_match(test.stderr(), t.scriptx, "STDERR", t.workdir, + repr(['foo'])) + + test.run(chdir = os.curdir, arguments = 'x y z') + self.run_match(test.stdout(), t.script, "STDOUT", test.workdir, + repr(['x', 'y', 'z'])) + self.run_match(test.stderr(), t.script, "STDERR", test.workdir, + repr(['x', 'y', 'z'])) + + test.run(chdir = 'script_subdir') + script_subdir = test.workpath('script_subdir') + self.run_match(test.stdout(), t.script, "STDOUT", script_subdir, + repr([])) + self.run_match(test.stderr(), t.script, "STDERR", script_subdir, + repr([])) + + test.run(program = t.script1, interpreter = ['python', '-x']) + self.run_match(test.stdout(), t.script1, "STDOUT", t.workdir, + repr([])) + self.run_match(test.stderr(), t.script1, "STDERR", t.workdir, + repr([])) + + try: + test.run(chdir = 'no_subdir') + except OSError: + pass + + test.run(program = 'no_script', interpreter = 'python') + assert test.status != None, test.status + + try: + test.run(program = 'no_script', interpreter = 'no_interpreter') + except OSError: + # Python versions that use subprocess throw an OSError + # exception when they try to execute something that + # isn't there. + pass + else: + # Python versions that use os.popen3() or the Popen3 + # class run things through the shell, which just returns + # a non-zero exit status. + assert test.status != None, test.status + + testx = TestCmd.TestCmd(program = t.scriptx, + workdir = '', + subdir = 't.scriptx_subdir') + + testx.run() + self.run_match(testx.stdout(), t.scriptx, "STDOUT", t.workdir, + repr([])) + self.run_match(testx.stderr(), t.scriptx, "STDERR", t.workdir, + repr([])) + + testx.run(arguments = 'foo bar') + self.run_match(testx.stdout(), t.scriptx, "STDOUT", t.workdir, + repr(['foo', 'bar'])) + self.run_match(testx.stderr(), t.scriptx, "STDERR", t.workdir, + repr(['foo', 'bar'])) + + testx.run(program = t.script, interpreter = 'python', arguments = 'bar') + self.run_match(testx.stdout(), t.script, "STDOUT", t.workdir, + repr(['bar'])) + self.run_match(testx.stderr(), t.script, "STDERR", t.workdir, + repr(['bar'])) + + testx.run(chdir = os.curdir, arguments = 'baz') + self.run_match(testx.stdout(), t.scriptx, "STDOUT", testx.workdir, + repr(['baz'])) + self.run_match(testx.stderr(), t.scriptx, "STDERR", testx.workdir, + repr(['baz'])) + + testx.run(chdir = 't.scriptx_subdir') + t.scriptx_subdir = testx.workpath('t.scriptx_subdir') + self.run_match(testx.stdout(), t.scriptx, "STDOUT", t.scriptx_subdir, + repr([])) + self.run_match(testx.stderr(), t.scriptx, "STDERR", t.scriptx_subdir, + repr([])) + + testx.run(program = t.script1, interpreter = ('python', '-x')) + self.run_match(testx.stdout(), t.script1, "STDOUT", t.workdir, + repr([])) + self.run_match(testx.stderr(), t.script1, "STDERR", t.workdir, + repr([])) + + s = os.path.join('.', t.scriptx) + testx.run(program = [s]) + self.run_match(testx.stdout(), t.scriptx, "STDOUT", t.workdir, + repr([])) + self.run_match(testx.stderr(), t.scriptx, "STDERR", t.workdir, + repr([])) + + try: + testx.run(chdir = 'no_subdir') + except OSError: + pass + + try: + testx.run(program = 'no_program') + except OSError: + # Python versions that use subprocess throw an OSError + # exception when they try to execute something that + # isn't there. + pass + else: + # Python versions that use os.popen3() or the Popen3 + # class run things through the shell, which just returns + # a non-zero exit status. + assert test.status != None + + test1 = TestCmd.TestCmd(program = t.script1, + interpreter = ['python', '-x'], + workdir = '') + + test1.run() + self.run_match(test1.stdout(), t.script1, "STDOUT", t.workdir, + repr([])) + self.run_match(test1.stderr(), t.script1, "STDERR", t.workdir, + repr([])) + + finally: + os.chdir(t.orig_cwd) + + def test_run_subclass(self): + """Test run() through a subclass with different signatures""" + + t = self.setup_run_scripts() + + # Everything before this prepared our "source directory." + # Now do the real test. + + class MyTestCmdSubclass(TestCmd.TestCmd): + def start(self, additional_argument=None, **kw): + return TestCmd.TestCmd.start(self, **kw) + + try: + test = MyTestCmdSubclass(program = t.script, + interpreter = 'python', + workdir = '', + subdir = 'script_subdir') + + test.run() + self.run_match(test.stdout(), t.script, "STDOUT", t.workdir, + repr([])) + self.run_match(test.stderr(), t.script, "STDERR", t.workdir, + repr([])) + finally: + os.chdir(t.orig_cwd) + + +class run_verbose_TestCase(TestCmdTestCase): + def test_run_verbose(self): + """Test the run() method's verbose attribute""" + + # Prepare our "source directory." + t = self.setup_run_scripts() + + save_stdout = sys.stderr + save_stderr = sys.stderr + + try: + # Test calling TestCmd() with an explicit verbose = 1. + + test = TestCmd.TestCmd(program = t.script, + interpreter = 'python', + workdir = '', + verbose = 1) + + sys.stdout = StringIO.StringIO() + sys.stderr = StringIO.StringIO() + + test.run(arguments = ['arg1 arg2']) + o = sys.stdout.getvalue() + assert o == '', o + e = sys.stderr.getvalue() + expect = 'python "%s" "arg1 arg2"\n' % t.script_path + assert expect == e, (expect, e) + + testx = TestCmd.TestCmd(program = t.scriptx, + workdir = '', + verbose = 1) + + sys.stdout = StringIO.StringIO() + sys.stderr = StringIO.StringIO() + + testx.run(arguments = ['arg1 arg2']) + expect = '"%s" "arg1 arg2"\n' % t.scriptx_path + o = sys.stdout.getvalue() + assert o == '', o + e = sys.stderr.getvalue() + assert expect == e, (expect, e) + + # Test calling TestCmd() with an explicit verbose = 2. + + outerr_fmt = """\ +============ STATUS: 0 +============ BEGIN STDOUT (len=%s): +%s============ END STDOUT +============ BEGIN STDERR (len=%s) +%s============ END STDERR +""" + + out_fmt = """\ +============ STATUS: 0 +============ BEGIN STDOUT (len=%s): +%s============ END STDOUT +""" + + err_fmt = """\ +============ STATUS: 0 +============ BEGIN STDERR (len=%s) +%s============ END STDERR +""" + + test = TestCmd.TestCmd(program = t.script, + interpreter = 'python', + workdir = '', + verbose = 2) + + sys.stdout = StringIO.StringIO() + sys.stderr = StringIO.StringIO() + + test.run(arguments = ['arg1 arg2']) + + line_fmt = "script: %s: %s: ['arg1 arg2']\n" + stdout_line = line_fmt % ('STDOUT', t.sub_dir) + stderr_line = line_fmt % ('STDERR', t.sub_dir) + expect = outerr_fmt % (len(stdout_line), stdout_line, + len(stderr_line), stderr_line) + o = sys.stdout.getvalue() + assert expect == o, (expect, o) + + expect = 'python "%s" "arg1 arg2"\n' % t.script_path + e = sys.stderr.getvalue() + assert e == expect, (e, expect) + + testx = TestCmd.TestCmd(program = t.scriptx, + workdir = '', + verbose = 2) + + sys.stdout = StringIO.StringIO() + sys.stderr = StringIO.StringIO() + + testx.run(arguments = ['arg1 arg2']) + + line_fmt = "scriptx.bat: %s: %s: ['arg1 arg2']\n" + stdout_line = line_fmt % ('STDOUT', t.sub_dir) + stderr_line = line_fmt % ('STDERR', t.sub_dir) + expect = outerr_fmt % (len(stdout_line), stdout_line, + len(stderr_line), stderr_line) + o = sys.stdout.getvalue() + assert expect == o, (expect, o) + + expect = '"%s" "arg1 arg2"\n' % t.scriptx_path + e = sys.stderr.getvalue() + assert e == expect, (e, expect) + + # Test calling TestCmd() with an explicit verbose = 3. + + test = TestCmd.TestCmd(program = t.scriptout, + interpreter = 'python', + workdir = '', + verbose = 2) + + sys.stdout = StringIO.StringIO() + sys.stderr = StringIO.StringIO() + + test.run(arguments = ['arg1 arg2']) + + line_fmt = "scriptout: %s: %s: ['arg1 arg2']\n" + stdout_line = line_fmt % ('STDOUT', t.sub_dir) + expect = out_fmt % (len(stdout_line), stdout_line) + o = sys.stdout.getvalue() + assert expect == o, (expect, o) + + e = sys.stderr.getvalue() + expect = 'python "%s" "arg1 arg2"\n' % (t.scriptout_path) + assert e == expect, (e, expect) + + test = TestCmd.TestCmd(program = t.scriptout, + interpreter = 'python', + workdir = '', + verbose = 3) + + sys.stdout = StringIO.StringIO() + sys.stderr = StringIO.StringIO() + + test.run(arguments = ['arg1 arg2']) + + line_fmt = "scriptout: %s: %s: ['arg1 arg2']\n" + stdout_line = line_fmt % ('STDOUT', t.sub_dir) + expect = outerr_fmt % (len(stdout_line), stdout_line, + '0', '') + o = sys.stdout.getvalue() + assert expect == o, (expect, o) + + e = sys.stderr.getvalue() + expect = 'python "%s" "arg1 arg2"\n' % (t.scriptout_path) + assert e == expect, (e, expect) + + # Test letting TestCmd() pick up verbose = 2 from the environment. + + os.environ['TESTCMD_VERBOSE'] = '2' + + test = TestCmd.TestCmd(program = t.script, + interpreter = 'python', + workdir = '') + + sys.stdout = StringIO.StringIO() + sys.stderr = StringIO.StringIO() + + test.run(arguments = ['arg1 arg2']) + + line_fmt = "script: %s: %s: ['arg1 arg2']\n" + stdout_line = line_fmt % ('STDOUT', t.sub_dir) + stderr_line = line_fmt % ('STDERR', t.sub_dir) + expect = outerr_fmt % (len(stdout_line), stdout_line, + len(stderr_line), stderr_line) + o = sys.stdout.getvalue() + assert expect == o, (expect, o) + + expect = 'python "%s" "arg1 arg2"\n' % t.script_path + e = sys.stderr.getvalue() + assert e == expect, (e, expect) + + testx = TestCmd.TestCmd(program = t.scriptx, + workdir = '') + + sys.stdout = StringIO.StringIO() + sys.stderr = StringIO.StringIO() + + testx.run(arguments = ['arg1 arg2']) + + line_fmt = "scriptx.bat: %s: %s: ['arg1 arg2']\n" + stdout_line = line_fmt % ('STDOUT', t.sub_dir) + stderr_line = line_fmt % ('STDERR', t.sub_dir) + expect = outerr_fmt % (len(stdout_line), stdout_line, + len(stderr_line), stderr_line) + o = sys.stdout.getvalue() + assert expect == o, (expect, o) + + expect = '"%s" "arg1 arg2"\n' % t.scriptx_path + e = sys.stderr.getvalue() + assert e == expect, (e, expect) + + # Test letting TestCmd() pick up verbose = 1 from the environment. + + os.environ['TESTCMD_VERBOSE'] = '1' + + test = TestCmd.TestCmd(program = t.script, + interpreter = 'python', + workdir = '', + verbose = 1) + + sys.stdout = StringIO.StringIO() + sys.stderr = StringIO.StringIO() + + test.run(arguments = ['arg1 arg2']) + o = sys.stdout.getvalue() + assert o == '', o + e = sys.stderr.getvalue() + expect = 'python "%s" "arg1 arg2"\n' % t.script_path + assert expect == e, (expect, e) + + testx = TestCmd.TestCmd(program = t.scriptx, + workdir = '', + verbose = 1) + + sys.stdout = StringIO.StringIO() + sys.stderr = StringIO.StringIO() + + testx.run(arguments = ['arg1 arg2']) + expect = '"%s" "arg1 arg2"\n' % t.scriptx_path + o = sys.stdout.getvalue() + assert o == '', o + e = sys.stderr.getvalue() + assert expect == e, (expect, e) + + finally: + sys.stdout = save_stdout + sys.stderr = save_stderr + os.chdir(t.orig_cwd) + os.environ['TESTCMD_VERBOSE'] = '' + + + +class set_diff_function_TestCase(TestCmdTestCase): + def test_set_diff_function(self): + """Test set_diff_function()""" + self.popen_python(r"""import sys +sys.path = ['%s'] + sys.path +import TestCmd +test = TestCmd.TestCmd() +test.diff("a\n", "a\n") +test.set_diff_function('diff_re') +test.diff(".\n", "a\n") +sys.exit(0) +""" % self.orig_cwd) + + def test_set_diff_function_stdout(self): + """Test set_diff_function(): stdout""" + self.popen_python("""import sys +sys.path = ['%s'] + sys.path +import TestCmd +test = TestCmd.TestCmd() +print "diff:" +test.diff("a\\n", "a\\n") +print "diff_stdout:" +test.diff_stdout("a\\n", "a\\n") +test.set_diff_function(stdout='diff_re') +print "diff:" +test.diff(".\\n", "a\\n") +print "diff_stdout:" +test.diff_stdout(".\\n", "a\\n") +sys.exit(0) +""" % self.orig_cwd, + stdout="""\ +diff: +diff_stdout: +diff: +1c1 +< . +--- +> a +diff_stdout: +""") + + def test_set_diff_function_stderr(self): + """Test set_diff_function(): stderr """ + self.popen_python("""import sys +sys.path = ['%s'] + sys.path +import TestCmd +test = TestCmd.TestCmd() +print "diff:" +test.diff("a\\n", "a\\n") +print "diff_stderr:" +test.diff_stderr("a\\n", "a\\n") +test.set_diff_function(stderr='diff_re') +print "diff:" +test.diff(".\\n", "a\\n") +print "diff_stderr:" +test.diff_stderr(".\\n", "a\\n") +sys.exit(0) +""" % self.orig_cwd, + stdout="""\ +diff: +diff_stderr: +diff: +1c1 +< . +--- +> a +diff_stderr: +""") + + + +class set_match_function_TestCase(TestCmdTestCase): + def test_set_match_function(self): + """Test set_match_function()""" + test = TestCmd.TestCmd() + assert test.match("abcde\n", "a.*e\n") + assert test.match("abcde\n", "abcde\n") + + test.set_match_function('match_exact') + + assert not test.match("abcde\n", "a.*e\n") + assert test.match("abcde\n", "abcde\n") + + def test_set_match_function_stdout(self): + """Test set_match_function(): stdout """ + test = TestCmd.TestCmd() + assert test.match("abcde\n", "a.*e\n") + assert test.match("abcde\n", "abcde\n") + assert test.match_stdout("abcde\n", "a.*e\n") + assert test.match_stdout("abcde\n", "abcde\n") + + test.set_match_function(stdout='match_exact') + + assert test.match("abcde\n", "a.*e\n") + assert test.match("abcde\n", "abcde\n") + assert not test.match_stdout("abcde\n", "a.*e\n") + assert test.match_stdout("abcde\n", "abcde\n") + + def test_set_match_function_stderr(self): + """Test set_match_function(): stderr """ + test = TestCmd.TestCmd() + assert test.match("abcde\n", "a.*e\n") + assert test.match("abcde\n", "abcde\n") + assert test.match_stderr("abcde\n", "a.*e\n") + assert test.match_stderr("abcde\n", "abcde\n") + + test.set_match_function(stderr='match_exact') + + assert test.match("abcde\n", "a.*e\n") + assert test.match("abcde\n", "abcde\n") + assert not test.match_stderr("abcde\n", "a.*e\n") + assert test.match_stderr("abcde\n", "abcde\n") + + + +class sleep_TestCase(TestCmdTestCase): + def test_sleep(self): + """Test sleep()""" + test = TestCmd.TestCmd() + + start = time.time() + test.sleep() + end = time.time() + diff = end - start + assert diff > 0.9, "only slept %f seconds (start %f, end %f), not default" % (diff, start, end) + + start = time.time() + test.sleep(3) + end = time.time() + diff = end - start + assert diff > 2.9, "only slept %f seconds (start %f, end %f), not 3" % (diff, start, end) + + + +class stderr_TestCase(TestCmdTestCase): + def test_stderr(self): + """Test stderr()""" + run_env = TestCmd.TestCmd(workdir = '') + run_env.write('run1', """import sys +sys.stdout.write("run1 STDOUT %s\\n" % sys.argv[1:]) +sys.stdout.write("run1 STDOUT second line\\n") +sys.stderr.write("run1 STDERR %s\\n" % sys.argv[1:]) +sys.stderr.write("run1 STDERR second line\\n") +""") + run_env.write('run2', """import sys +sys.stdout.write("run2 STDOUT %s\\n" % sys.argv[1:]) +sys.stdout.write("run2 STDOUT second line\\n") +sys.stderr.write("run2 STDERR %s\\n" % sys.argv[1:]) +sys.stderr.write("run2 STDERR second line\\n") +""") + os.chdir(run_env.workdir) + # Everything before this prepared our "source directory." + # Now do the real test. + test = TestCmd.TestCmd(interpreter = 'python', workdir = '') + try: + output = test.stderr() + except IndexError: + pass + else: + raise IndexError("got unexpected output:\n" + output) + test.program_set('run1') + test.run(arguments = 'foo bar') + test.program_set('run2') + test.run(arguments = 'snafu') + # XXX SHOULD TEST ABSOLUTE NUMBER AS WELL + output = test.stderr() + assert output == "run2 STDERR ['snafu']\nrun2 STDERR second line\n", output + output = test.stderr(run = -1) + assert output == "run1 STDERR ['foo', 'bar']\nrun1 STDERR second line\n", output + + + +class command_args_TestCase(TestCmdTestCase): + def test_command_args(self): + """Test command_args()""" + run_env = TestCmd.TestCmd(workdir = '') + os.chdir(run_env.workdir) + # Everything before this prepared our "source directory." + # Now do the real test. + test = TestCmd.TestCmd(workdir = '') + + r = test.command_args('prog') + expect = [run_env.workpath('prog')] + assert r == expect, (expect, r) + + r = test.command_args(test.workpath('new_prog')) + expect = [test.workpath('new_prog')] + assert r == expect, (expect, r) + + r = test.command_args('prog', 'python') + expect = ['python', run_env.workpath('prog')] + assert r == expect, (expect, r) + + r = test.command_args('prog', 'python', 'arg1 arg2') + expect = ['python', run_env.workpath('prog'), 'arg1', 'arg2'] + assert r == expect, (expect, r) + + test.program_set('default_prog') + default_prog = run_env.workpath('default_prog') + + r = test.command_args() + expect = [default_prog] + assert r == expect, (expect, r) + + r = test.command_args(interpreter='PYTHON') + expect = ['PYTHON', default_prog] + assert r == expect, (expect, r) + + r = test.command_args(interpreter='PYTHON', arguments='arg3 arg4') + expect = ['PYTHON', default_prog, 'arg3', 'arg4'] + assert r == expect, (expect, r) + + test.interpreter_set('default_python') + + r = test.command_args() + expect = ['default_python', default_prog] + assert r == expect, (expect, r) + + r = test.command_args(arguments='arg5 arg6') + expect = ['default_python', default_prog, 'arg5', 'arg6'] + assert r == expect, (expect, r) + + r = test.command_args('new_prog_1') + expect = [run_env.workpath('new_prog_1')] + assert r == expect, (expect, r) + + r = test.command_args(program='new_prog_2') + expect = [run_env.workpath('new_prog_2')] + assert r == expect, (expect, r) + + + +class start_TestCase(TestCmdTestCase): + def setup_run_scripts(self): + t = TestCmdTestCase.setup_run_scripts(self) + t.recv_script = 'script_recv' + t.recv_script_path = t.run_env.workpath(t.sub_dir, t.recv_script) + t.recv_out_path = t.run_env.workpath('script_recv.out') + text = """\ +import os +import sys + +class Unbuffered: + def __init__(self, file): + self.file = file + def write(self, arg): + self.file.write(arg) + self.file.flush() + def __getattr__(self, attr): + return getattr(self.file, attr) + +sys.stdout = Unbuffered(sys.stdout) +sys.stderr = Unbuffered(sys.stderr) + +sys.stdout.write('script_recv: STDOUT\\n') +sys.stderr.write('script_recv: STDERR\\n') +logfp = open(r'%s', 'wb') +while 1: + line = sys.stdin.readline() + if not line: + break + logfp.write('script_recv: ' + line) + sys.stdout.write('script_recv: STDOUT: ' + line) + sys.stderr.write('script_recv: STDERR: ' + line) +logfp.close() + """ % t.recv_out_path + t.run_env.write(t.recv_script_path, text) + os.chmod(t.recv_script_path, 0644) # XXX UNIX-specific + return t + + def test_start(self): + """Test start()""" + + t = self.setup_run_scripts() + + # Everything before this prepared our "source directory." + # Now do the real test. + try: + test = TestCmd.TestCmd(program = t.script, + interpreter = 'python', + workdir = '', + subdir = 'script_subdir') + + p = test.start() + self.run_match(p.stdout.read(), t.script, "STDOUT", t.workdir, + repr([])) + self.run_match(p.stderr.read(), t.script, "STDERR", t.workdir, + repr([])) + p.wait() + + p = test.start(arguments='arg1 arg2 arg3') + self.run_match(p.stdout.read(), t.script, "STDOUT", t.workdir, + repr(['arg1', 'arg2', 'arg3'])) + self.run_match(p.stderr.read(), t.script, "STDERR", t.workdir, + repr(['arg1', 'arg2', 'arg3'])) + p.wait() + + p = test.start(program=t.scriptx, arguments='foo') + self.run_match(p.stdout.read(), t.scriptx, "STDOUT", t.workdir, + repr(['foo'])) + self.run_match(p.stderr.read(), t.scriptx, "STDERR", t.workdir, + repr(['foo'])) + p.wait() + + p = test.start(program=t.script1, interpreter=['python', '-x']) + self.run_match(p.stdout.read(), t.script1, "STDOUT", t.workdir, + repr([])) + self.run_match(p.stderr.read(), t.script1, "STDERR", t.workdir, + repr([])) + p.wait() + + p = test.start(program='no_script', interpreter='python') + status = p.wait() + assert status != None, status + + try: + p = test.start(program='no_script', interpreter='no_interpreter') + except OSError: + # Python versions that use subprocess throw an OSError + # exception when they try to execute something that + # isn't there. + pass + else: + status = p.wait() + # Python versions that use os.popen3() or the Popen3 + # class run things through the shell, which just returns + # a non-zero exit status. + assert status != None, status + + testx = TestCmd.TestCmd(program = t.scriptx, + workdir = '', + subdir = 't.scriptx_subdir') + + p = testx.start() + self.run_match(p.stdout.read(), t.scriptx, "STDOUT", t.workdir, + repr([])) + self.run_match(p.stderr.read(), t.scriptx, "STDERR", t.workdir, + repr([])) + p.wait() + + p = testx.start(arguments='foo bar') + self.run_match(p.stdout.read(), t.scriptx, "STDOUT", t.workdir, + repr(['foo', 'bar'])) + self.run_match(p.stderr.read(), t.scriptx, "STDERR", t.workdir, + repr(['foo', 'bar'])) + p.wait() + + p = testx.start(program=t.script, interpreter='python', arguments='bar') + self.run_match(p.stdout.read(), t.script, "STDOUT", t.workdir, + repr(['bar'])) + self.run_match(p.stderr.read(), t.script, "STDERR", t.workdir, + repr(['bar'])) + p.wait() + + p = testx.start(program=t.script1, interpreter=('python', '-x')) + self.run_match(p.stdout.read(), t.script1, "STDOUT", t.workdir, + repr([])) + self.run_match(p.stderr.read(), t.script1, "STDERR", t.workdir, + repr([])) + p.wait() + + s = os.path.join('.', t.scriptx) + p = testx.start(program=[s]) + self.run_match(p.stdout.read(), t.scriptx, "STDOUT", t.workdir, + repr([])) + self.run_match(p.stderr.read(), t.scriptx, "STDERR", t.workdir, + repr([])) + p.wait() + + try: + testx.start(program='no_program') + except OSError: + # Python versions that use subprocess throw an OSError + # exception when they try to execute something that + # isn't there. + pass + else: + # Python versions that use os.popen3() or the Popen3 + # class run things through the shell, which just dies + # trying to execute the non-existent program before + # we can wait() for it. + try: + p = p.wait() + except OSError: + pass + + test1 = TestCmd.TestCmd(program = t.script1, + interpreter = ['python', '-x'], + workdir = '') + + p = test1.start() + self.run_match(p.stdout.read(), t.script1, "STDOUT", t.workdir, + repr([])) + self.run_match(p.stderr.read(), t.script1, "STDERR", t.workdir, + repr([])) + p.wait() + + finally: + os.chdir(t.orig_cwd) + + def test_finish(self): + """Test finish()""" + + t = self.setup_run_scripts() + + # Everything before this prepared our "source directory." + # Now do the real test. + try: + + test = TestCmd.TestCmd(program = t.recv_script, + interpreter = 'python', + workdir = '', + subdir = 'script_subdir') + + test.start(stdin=1) + test.finish() + expect_stdout = """\ +script_recv: STDOUT +""" + expect_stderr = """\ +script_recv: STDERR +""" + stdout = test.stdout() + assert stdout == expect_stdout, stdout + stderr = test.stderr() + assert stderr == expect_stderr, stderr + + p = test.start(stdin=1) + p.send('input\n') + test.finish(p) + expect_stdout = """\ +script_recv: STDOUT +script_recv: STDOUT: input +""" + expect_stderr = """\ +script_recv: STDERR +script_recv: STDERR: input +""" + stdout = test.stdout() + assert stdout == expect_stdout, stdout + stderr = test.stderr() + assert stderr == expect_stderr, stderr + + p = test.start(combine=1, stdin=1) + p.send('input\n') + test.finish(p) + expect_stdout = """\ +script_recv: STDOUT +script_recv: STDERR +script_recv: STDOUT: input +script_recv: STDERR: input +""" + expect_stderr = "" + stdout = test.stdout() + assert stdout == expect_stdout, stdout + stderr = test.stderr() + assert stderr == expect_stderr, stderr + + finally: + os.chdir(t.orig_cwd) + + def test_recv(self): + """Test the recv() method of objects returned by start()""" + + t = self.setup_run_scripts() + + # Everything before this prepared our "source directory." + # Now do the real test. + try: + test = TestCmd.TestCmd(program = t.script, + interpreter = 'python', + workdir = '', + subdir = 'script_subdir') + + p = test.start() + stdout = p.recv() + while stdout == '': + import time + time.sleep(1) + stdout = p.recv() + self.run_match(stdout, t.script, "STDOUT", t.workdir, + repr([])) + p.wait() + + finally: + os.chdir(t.orig_cwd) + + def test_recv_err(self): + """Test the recv_err() method of objects returned by start()""" + + t = self.setup_run_scripts() + + # Everything before this prepared our "source directory." + # Now do the real test. + try: + + test = TestCmd.TestCmd(program = t.script, + interpreter = 'python', + workdir = '', + subdir = 'script_subdir') + + p = test.start() + stderr = p.recv_err() + while stderr == '': + import time + time.sleep(1) + stderr = p.recv_err() + self.run_match(stderr, t.script, "STDERR", t.workdir, + repr([])) + p.wait() + + + finally: + os.chdir(t.orig_cwd) + + def test_send(self): + """Test the send() method of objects returned by start()""" + + t = self.setup_run_scripts() + + # Everything before this prepared our "source directory." + # Now do the real test. + try: + + test = TestCmd.TestCmd(program = t.recv_script, + interpreter = 'python', + workdir = '', + subdir = 'script_subdir') + + p = test.start(stdin=1) + input = 'stdin.write() input to the receive script\n' + p.stdin.write(input) + p.stdin.close() + p.wait() + result = open(t.recv_out_path, 'rb').read() + expect = 'script_recv: ' + input + assert result == expect, repr(result) + + p = test.start(stdin=1) + input = 'send() input to the receive script\n' + p.send(input) + p.stdin.close() + p.wait() + result = open(t.recv_out_path, 'rb').read() + expect = 'script_recv: ' + input + assert result == expect, repr(result) + + finally: + os.chdir(t.orig_cwd) + + # TODO(sgk): figure out how to eliminate the race conditions here. + def __FLAKY__test_send_recv(self): + """Test the send_recv() method of objects returned by start()""" + + t = self.setup_run_scripts() + + # Everything before this prepared our "source directory." + # Now do the real test. + try: + + test = TestCmd.TestCmd(program = t.recv_script, + interpreter = 'python', + workdir = '', + subdir = 'script_subdir') + + def do_send_recv(p, input): + send, stdout, stderr = p.send_recv(input) + stdout = self.translate_newlines(stdout) + stderr = self.translate_newlines(stderr) + return send, stdout, stderr + + p = test.start(stdin=1) + input = 'input to the receive script\n' + send, stdout, stderr = do_send_recv(p, input) + # Buffering issues and a race condition prevent this from + # being completely deterministic, so check for both null + # output and the first write() on each stream. + assert stdout in ("", "script_recv: STDOUT\n"), stdout + assert stderr in ("", "script_recv: STDERR\n"), stderr + send, stdout, stderr = do_send_recv(p, input) + assert stdout in ("", "script_recv: STDOUT\n"), stdout + assert stderr in ("", "script_recv: STDERR\n"), stderr + p.stdin.close() + stdout = self.translate_newlines(p.recv()) + stderr = self.translate_newlines(p.recv_err()) + assert stdout in ("", "script_recv: STDOUT\n"), stdout + assert stderr in ("", "script_recv: STDERR\n"), stderr + p.wait() + stdout = self.translate_newlines(p.recv()) + stderr = self.translate_newlines(p.recv_err()) + expect_stdout = """\ +script_recv: STDOUT +script_recv: STDOUT: input to the receive script +script_recv: STDOUT: input to the receive script +""" + expect_stderr = """\ +script_recv: STDERR +script_recv: STDERR: input to the receive script +script_recv: STDERR: input to the receive script +""" + assert stdout == expect_stdout, stdout + assert stderr == expect_stderr, stderr + result = open(t.recv_out_path, 'rb').read() + expect = ('script_recv: ' + input) * 2 + assert result == expect, (result, stdout, stderr) + + finally: + os.chdir(t.orig_cwd) + + + +class stdin_TestCase(TestCmdTestCase): + def test_stdin(self): + """Test stdin()""" + run_env = TestCmd.TestCmd(workdir = '') + run_env.write('run', """import fileinput +for line in fileinput.input(): + print 'Y'.join(line[:-1].split('X')) +""") + run_env.write('input', "X on X this X line X\n") + os.chdir(run_env.workdir) + # Everything before this prepared our "source directory." + # Now do the real test. + test = TestCmd.TestCmd(program = 'run', interpreter = 'python', workdir = '') + test.run(arguments = 'input') + assert test.stdout() == "Y on Y this Y line Y\n" + test.run(stdin = "X is X here X tooX\n") + assert test.stdout() == "Y is Y here Y tooY\n" + test.run(stdin = """X here X +X there X +""") + assert test.stdout() == "Y here Y\nY there Y\n" + test.run(stdin = ["X line X\n", "X another X\n"]) + assert test.stdout() == "Y line Y\nY another Y\n" + + + +class stdout_TestCase(TestCmdTestCase): + def test_stdout(self): + """Test stdout()""" + run_env = TestCmd.TestCmd(workdir = '') + run_env.write('run1', """import sys +sys.stdout.write("run1 STDOUT %s\\n" % sys.argv[1:]) +sys.stdout.write("run1 STDOUT second line\\n") +sys.stderr.write("run1 STDERR %s\\n" % sys.argv[1:]) +sys.stderr.write("run1 STDERR second line\\n") +""") + run_env.write('run2', """import sys +sys.stdout.write("run2 STDOUT %s\\n" % sys.argv[1:]) +sys.stdout.write("run2 STDOUT second line\\n") +sys.stderr.write("run2 STDERR %s\\n" % sys.argv[1:]) +sys.stderr.write("run2 STDERR second line\\n") +""") + os.chdir(run_env.workdir) + # Everything before this prepared our "source directory." + # Now do the real test. + test = TestCmd.TestCmd(interpreter = 'python', workdir = '') + try: + output = test.stdout() + except IndexError: + pass + else: + raise IndexError("got unexpected output:\n\t`%s'\n" % output) + test.program_set('run1') + test.run(arguments = 'foo bar') + test.program_set('run2') + test.run(arguments = 'snafu') + # XXX SHOULD TEST ABSOLUTE NUMBER AS WELL + output = test.stdout() + assert output == "run2 STDOUT ['snafu']\nrun2 STDOUT second line\n", output + output = test.stdout(run = -1) + assert output == "run1 STDOUT ['foo', 'bar']\nrun1 STDOUT second line\n", output + + + +class subdir_TestCase(TestCmdTestCase): + def test_subdir(self): + """Test subdir()""" + test = TestCmd.TestCmd(workdir = '', subdir = ['no', 'such', 'subdir']) + assert not os.path.exists(test.workpath('no')) + + test = TestCmd.TestCmd(workdir = '', subdir = 'foo') + assert test.subdir('bar') == 1 + assert test.subdir(['foo', 'succeed']) == 1 + if os.name != "nt": + os.chmod(test.workpath('foo'), 0500) + assert test.subdir(['foo', 'fail']) == 0 + assert test.subdir(['sub', 'dir', 'ectory'], 'sub') == 1 + assert test.subdir('one', + UserList.UserList(['one', 'two']), + ['one', 'two', 'three']) == 3 + assert os.path.isdir(test.workpath('foo')) + assert os.path.isdir(test.workpath('bar')) + assert os.path.isdir(test.workpath('foo', 'succeed')) + if os.name != "nt": + assert not os.path.exists(test.workpath('foo', 'fail')) + assert os.path.isdir(test.workpath('sub')) + assert not os.path.exists(test.workpath('sub', 'dir')) + assert not os.path.exists(test.workpath('sub', 'dir', 'ectory')) + assert os.path.isdir(test.workpath('one', 'two', 'three')) + + + +class symlink_TestCase(TestCmdTestCase): + def test_symlink(self): + """Test symlink()""" + try: os.symlink + except AttributeError: return + + test = TestCmd.TestCmd(workdir = '', subdir = 'foo') + wdir_file1 = os.path.join(test.workdir, 'file1') + wdir_target1 = os.path.join(test.workdir, 'target1') + wdir_foo_file2 = os.path.join(test.workdir, 'foo', 'file2') + wdir_target2 = os.path.join(test.workdir, 'target2') + wdir_foo_target2 = os.path.join(test.workdir, 'foo', 'target2') + + test.symlink('target1', 'file1') + assert os.path.islink(wdir_file1) + assert not os.path.exists(wdir_file1) + open(wdir_target1, 'w').write("") + assert os.path.exists(wdir_file1) + + test.symlink('target2', ['foo', 'file2']) + assert os.path.islink(wdir_foo_file2) + assert not os.path.exists(wdir_foo_file2) + open(wdir_target2, 'w').write("") + assert not os.path.exists(wdir_foo_file2) + open(wdir_foo_target2, 'w').write("") + assert os.path.exists(wdir_foo_file2) + + + +class tempdir_TestCase(TestCmdTestCase): + def setUp(self): + TestCmdTestCase.setUp(self) + self._tempdir = tempfile.mktemp() + os.mkdir(self._tempdir) + os.chdir(self._tempdir) + + def tearDown(self): + TestCmdTestCase.tearDown(self) + os.rmdir(self._tempdir) + + def test_tempdir(self): + """Test tempdir()""" + test = TestCmd.TestCmd() + tdir1 = test.tempdir() + assert os.path.isdir(tdir1) + test.workdir_set(None) + test.cleanup() + assert not os.path.exists(tdir1) + + test = TestCmd.TestCmd() + tdir2 = test.tempdir('temp') + assert os.path.isdir(tdir2) + tdir3 = test.tempdir() + assert os.path.isdir(tdir3) + test.workdir_set(None) + test.cleanup() + assert not os.path.exists(tdir2) + assert not os.path.exists(tdir3) + + +timeout_script = """\ +import sys +import time +seconds = int(sys.argv[1]) +sys.stdout.write('sleeping %s\\n' % seconds) +sys.stdout.flush() +time.sleep(seconds) +sys.stdout.write('slept %s\\n' % seconds) +sys.stdout.flush() +sys.exit(0) +""" + +class timeout_TestCase(TestCmdTestCase): + def test_initialization(self): + """Test initialization timeout""" + test = TestCmd.TestCmd(workdir='', timeout=2) + test.write('sleep.py', timeout_script) + + test.run([sys.executable, test.workpath('sleep.py'), '4']) + assert test.stderr() == '', test.stderr() + assert test.stdout() == 'sleeping 4\n', test.stdout() + + test.run([sys.executable, test.workpath('sleep.py'), '4']) + assert test.stderr() == '', test.stderr() + assert test.stdout() == 'sleeping 4\n', test.stdout() + + def test_cancellation(self): + """Test timer cancellation after firing""" + test = TestCmd.TestCmd(workdir='', timeout=4) + test.write('sleep.py', timeout_script) + + test.run([sys.executable, test.workpath('sleep.py'), '6']) + assert test.stderr() == '', test.stderr() + assert test.stdout() == 'sleeping 6\n', test.stdout() + + test.run([sys.executable, test.workpath('sleep.py'), '2']) + assert test.stderr() == '', test.stderr() + assert test.stdout() == 'sleeping 2\nslept 2\n', test.stdout() + + test.run([sys.executable, test.workpath('sleep.py'), '6']) + assert test.stderr() == '', test.stderr() + assert test.stdout() == 'sleeping 6\n', test.stdout() + + def test_run(self): + """Test run() timeout""" + test = TestCmd.TestCmd(workdir='', timeout=8) + test.write('sleep.py', timeout_script) + + test.run([sys.executable, test.workpath('sleep.py'), '2'], + timeout=4) + assert test.stderr() == '', test.stderr() + assert test.stdout() == 'sleeping 2\nslept 2\n', test.stdout() + + test.run([sys.executable, test.workpath('sleep.py'), '6'], + timeout=4) + assert test.stderr() == '', test.stderr() + assert test.stdout() == 'sleeping 6\n', test.stdout() + + def test_set_timeout(self): + """Test set_timeout()""" + test = TestCmd.TestCmd(workdir='', timeout=2) + test.write('sleep.py', timeout_script) + + test.run([sys.executable, test.workpath('sleep.py'), '4']) + assert test.stderr() == '', test.stderr() + assert test.stdout() == 'sleeping 4\n', test.stdout() + + test.set_timeout(None) + + test.run([sys.executable, test.workpath('sleep.py'), '4']) + assert test.stderr() == '', test.stderr() + assert test.stdout() == 'sleeping 4\nslept 4\n', test.stdout() + + test.set_timeout(6) + + test.run([sys.executable, test.workpath('sleep.py'), '4']) + assert test.stderr() == '', test.stderr() + assert test.stdout() == 'sleeping 4\nslept 4\n', test.stdout() + + test.run([sys.executable, test.workpath('sleep.py'), '8']) + assert test.stderr() == '', test.stderr() + assert test.stdout() == 'sleeping 8\n', test.stdout() + + + +class unlink_TestCase(TestCmdTestCase): + def test_unlink(self): + """Test unlink()""" + test = TestCmd.TestCmd(workdir = '', subdir = 'foo') + wdir_file1 = os.path.join(test.workdir, 'file1') + wdir_file2 = os.path.join(test.workdir, 'file2') + wdir_foo_file3a = os.path.join(test.workdir, 'foo', 'file3a') + wdir_foo_file3b = os.path.join(test.workdir, 'foo', 'file3b') + wdir_foo_file4 = os.path.join(test.workdir, 'foo', 'file4') + wdir_file5 = os.path.join(test.workdir, 'file5') + + open(wdir_file1, 'w').write("") + open(wdir_file2, 'w').write("") + open(wdir_foo_file3a, 'w').write("") + open(wdir_foo_file3b, 'w').write("") + open(wdir_foo_file4, 'w').write("") + open(wdir_file5, 'w').write("") + + try: + contents = test.unlink('no_file') + except OSError: # expect "No such file or directory" + pass + except: + raise + + test.unlink("file1") + assert not os.path.exists(wdir_file1) + + test.unlink(wdir_file2) + assert not os.path.exists(wdir_file2) + + test.unlink(['foo', 'file3a']) + assert not os.path.exists(wdir_foo_file3a) + + test.unlink(UserList.UserList(['foo', 'file3b'])) + assert not os.path.exists(wdir_foo_file3b) + + test.unlink([test.workdir, 'foo', 'file4']) + assert not os.path.exists(wdir_foo_file4) + + # Make it so we can't unlink file5. + # For UNIX, remove write permission from the dir and the file. + # For Windows, open the file. + os.chmod(test.workdir, 0500) + os.chmod(wdir_file5, 0400) + f = open(wdir_file5, 'r') + + try: + try: + test.unlink('file5') + except OSError: # expect "Permission denied" + pass + except: + raise + finally: + os.chmod(test.workdir, 0700) + os.chmod(wdir_file5, 0600) + f.close() + + + +class touch_TestCase(TestCmdTestCase): + def test_touch(self): + """Test touch()""" + test = TestCmd.TestCmd(workdir = '', subdir = 'sub') + + wdir_file1 = os.path.join(test.workdir, 'file1') + wdir_sub_file2 = os.path.join(test.workdir, 'sub', 'file2') + + open(wdir_file1, 'w').write("") + open(wdir_sub_file2, 'w').write("") + + file1_old_time = os.path.getmtime(wdir_file1) + file2_old_time = os.path.getmtime(wdir_sub_file2) + + test.sleep() + + test.touch(wdir_file1) + + file1_new_time = os.path.getmtime(wdir_file1) + assert file1_new_time > file1_old_time + + test.touch('file1', file1_old_time) + + result = os.path.getmtime(wdir_file1) + # Sub-second granularity of file systems may still vary. + # On Windows, the two times may be off by a microsecond. + assert int(result) == int(file1_old_time), (result, file1_old_time) + + test.touch(['sub', 'file2']) + + file2_new_time = os.path.getmtime(wdir_sub_file2) + assert file2_new_time > file2_old_time + + + +class verbose_TestCase(TestCmdTestCase): + def test_verbose(self): + """Test verbose()""" + test = TestCmd.TestCmd() + assert test.verbose == 0, 'verbose already initialized?' + test = TestCmd.TestCmd(verbose = 1) + assert test.verbose == 1, 'did not initialize verbose' + test.verbose = 2 + assert test.verbose == 2, 'did not set verbose' + + + +class workdir_TestCase(TestCmdTestCase): + def test_workdir(self): + """Test workdir()""" + run_env = TestCmd.TestCmd(workdir = '') + os.chdir(run_env.workdir) + # Everything before this prepared our "source directory." + # Now do the real test. + test = TestCmd.TestCmd() + assert test.workdir is None + + test = TestCmd.TestCmd(workdir = None) + assert test.workdir is None + + test = TestCmd.TestCmd(workdir = '') + assert test.workdir != None + assert os.path.isdir(test.workdir) + + test = TestCmd.TestCmd(workdir = 'dir') + assert test.workdir != None + assert os.path.isdir(test.workdir) + + no_such_subdir = os.path.join('no', 'such', 'subdir') + try: + test = TestCmd.TestCmd(workdir = no_such_subdir) + except OSError: # expect "No such file or directory" + pass + except: + raise + + test = TestCmd.TestCmd(workdir = 'foo') + workdir_foo = test.workdir + assert workdir_foo != None + + test.workdir_set('bar') + workdir_bar = test.workdir + assert workdir_bar != None + + try: + test.workdir_set(no_such_subdir) + except OSError: + pass # expect "No such file or directory" + except: + raise + assert workdir_bar == test.workdir + + assert os.path.isdir(workdir_foo) + assert os.path.isdir(workdir_bar) + + + +class workdirs_TestCase(TestCmdTestCase): + def test_workdirs(self): + """Test workdirs()""" + test = TestCmd.TestCmd() + assert test.workdir is None + test.workdir_set('') + wdir1 = test.workdir + test.workdir_set('') + wdir2 = test.workdir + assert os.path.isdir(wdir1) + assert os.path.isdir(wdir2) + test.cleanup() + assert not os.path.exists(wdir1) + assert not os.path.exists(wdir2) + + + +class workpath_TestCase(TestCmdTestCase): + def test_workpath(self): + """Test workpath()""" + test = TestCmd.TestCmd() + assert test.workdir is None + + test = TestCmd.TestCmd(workdir = '') + wpath = test.workpath('foo', 'bar') + assert wpath == os.path.join(test.workdir, 'foo', 'bar') + + + +class readable_TestCase(TestCmdTestCase): + def test_readable(self): + """Test readable()""" + test = TestCmd.TestCmd(workdir = '', subdir = 'foo') + test.write('file1', "Test file #1\n") + test.write(['foo', 'file2'], "Test file #2\n") + + try: symlink = os.symlink + except AttributeError: pass + else: symlink('no_such_file', test.workpath('dangling_symlink')) + + test.readable(test.workdir, 0) + # XXX skip these tests if euid == 0? + assert not _is_readable(test.workdir) + assert not _is_readable(test.workpath('file1')) + assert not _is_readable(test.workpath('foo')) + assert not _is_readable(test.workpath('foo', 'file2')) + + test.readable(test.workdir, 1) + assert _is_readable(test.workdir) + assert _is_readable(test.workpath('file1')) + assert _is_readable(test.workpath('foo')) + assert _is_readable(test.workpath('foo', 'file2')) + + test.readable(test.workdir, 0) + # XXX skip these tests if euid == 0? + assert not _is_readable(test.workdir) + assert not _is_readable(test.workpath('file1')) + assert not _is_readable(test.workpath('foo')) + assert not _is_readable(test.workpath('foo', 'file2')) + + test.readable(test.workpath('file1'), 1) + assert _is_readable(test.workpath('file1')) + + test.readable(test.workpath('file1'), 0) + assert not _is_readable(test.workpath('file1')) + + test.readable(test.workdir, 1) + + + +class writable_TestCase(TestCmdTestCase): + def test_writable(self): + """Test writable()""" + test = TestCmd.TestCmd(workdir = '', subdir = 'foo') + test.write('file1', "Test file #1\n") + test.write(['foo', 'file2'], "Test file #2\n") + + try: symlink = os.symlink + except AttributeError: pass + else: symlink('no_such_file', test.workpath('dangling_symlink')) + + test.writable(test.workdir, 0) + # XXX skip these tests if euid == 0? + assert not _is_writable(test.workdir) + assert not _is_writable(test.workpath('file1')) + assert not _is_writable(test.workpath('foo')) + assert not _is_writable(test.workpath('foo', 'file2')) + + test.writable(test.workdir, 1) + assert _is_writable(test.workdir) + assert _is_writable(test.workpath('file1')) + assert _is_writable(test.workpath('foo')) + assert _is_writable(test.workpath('foo', 'file2')) + + test.writable(test.workdir, 0) + # XXX skip these tests if euid == 0? + assert not _is_writable(test.workdir) + assert not _is_writable(test.workpath('file1')) + assert not _is_writable(test.workpath('foo')) + assert not _is_writable(test.workpath('foo', 'file2')) + + test.writable(test.workpath('file1'), 1) + assert _is_writable(test.workpath('file1')) + + test.writable(test.workpath('file1'), 0) + assert not _is_writable(test.workpath('file1')) + + + +class executable_TestCase(TestCmdTestCase): + def test_executable(self): + """Test executable()""" + test = TestCmd.TestCmd(workdir = '', subdir = 'foo') + test.write('file1', "Test file #1\n") + test.write(['foo', 'file2'], "Test file #2\n") + + try: symlink = os.symlink + except AttributeError: pass + else: symlink('no_such_file', test.workpath('dangling_symlink')) + + def make_executable(fname): + st = os.stat(fname) + os.chmod(fname, stat.S_IMODE(st[stat.ST_MODE]|0100)) + + def make_non_executable(fname): + st = os.stat(fname) + os.chmod(fname, stat.S_IMODE(st[stat.ST_MODE]&~0100)) + + test.executable(test.workdir, 0) + # XXX skip these tests if euid == 0? + assert not _is_executable(test.workdir) + make_executable(test.workdir) + assert not _is_executable(test.workpath('file1')) + assert not _is_executable(test.workpath('foo')) + make_executable(test.workpath('foo')) + assert not _is_executable(test.workpath('foo', 'file2')) + make_non_executable(test.workpath('foo')) + make_non_executable(test.workdir) + + test.executable(test.workdir, 1) + assert _is_executable(test.workdir) + assert _is_executable(test.workpath('file1')) + assert _is_executable(test.workpath('foo')) + assert _is_executable(test.workpath('foo', 'file2')) + + test.executable(test.workdir, 0) + # XXX skip these tests if euid == 0? + assert not _is_executable(test.workdir) + make_executable(test.workdir) + assert not _is_executable(test.workpath('file1')) + assert not _is_executable(test.workpath('foo')) + make_executable(test.workpath('foo')) + assert not _is_executable(test.workpath('foo', 'file2')) + + test.executable(test.workpath('file1'), 1) + assert _is_executable(test.workpath('file1')) + + test.executable(test.workpath('file1'), 0) + assert not _is_executable(test.workpath('file1')) + + test.executable(test.workdir, 1) + + + +class write_TestCase(TestCmdTestCase): + def test_write(self): + """Test write()""" + test = TestCmd.TestCmd(workdir = '', subdir = 'foo') + test.write('file1', "Test file #1\n") + test.write(['foo', 'file2'], "Test file #2\n") + try: + test.write(['bar', 'file3'], "Test file #3 (should not get created)\n") + except IOError: # expect "No such file or directory" + pass + except: + raise + test.write(test.workpath('file4'), "Test file #4.\n") + test.write(test.workpath('foo', 'file5'), "Test file #5.\n") + try: + test.write(test.workpath('bar', 'file6'), "Test file #6 (should not get created)\n") + except IOError: # expect "No such file or directory" + pass + except: + raise + + try: + test.write('file7', "Test file #8.\n", mode = 'r') + except ValueError: # expect "mode must begin with 'w' + pass + except: + raise + + test.write('file8', "Test file #8.\n", mode = 'w') + test.write('file9', "Test file #9.\r\n", mode = 'wb') + + if os.name != "nt": + os.chmod(test.workdir, 0500) + try: + test.write('file10', "Test file #10 (should not get created).\n") + except IOError: # expect "Permission denied" + pass + except: + raise + + assert os.path.isdir(test.workpath('foo')) + assert not os.path.exists(test.workpath('bar')) + assert os.path.isfile(test.workpath('file1')) + assert os.path.isfile(test.workpath('foo', 'file2')) + assert not os.path.exists(test.workpath('bar', 'file3')) + assert os.path.isfile(test.workpath('file4')) + assert os.path.isfile(test.workpath('foo', 'file5')) + assert not os.path.exists(test.workpath('bar', 'file6')) + assert not os.path.exists(test.workpath('file7')) + assert os.path.isfile(test.workpath('file8')) + assert os.path.isfile(test.workpath('file9')) + if os.name != "nt": + assert not os.path.exists(test.workpath('file10')) + + assert open(test.workpath('file8'), 'r').read() == "Test file #8.\n" + assert open(test.workpath('file9'), 'rb').read() == "Test file #9.\r\n" + + + +class variables_TestCase(TestCmdTestCase): + def test_variables(self): + """Test global variables""" + run_env = TestCmd.TestCmd(workdir = '') + + variables = [ + 'fail_test', + 'no_result', + 'pass_test', + 'match_exact', + 'match_re', + 'match_re_dotall', + 'python', + '_python_', + 'TestCmd', + ] + + script = "import TestCmd\n" + \ + '\n'.join([ "print TestCmd.%s\n" % v for v in variables ]) + run_env.run(program=sys.executable, stdin=script) + stderr = run_env.stderr() + assert stderr == "", stderr + + script = "from TestCmd import *\n" + \ + '\n'.join([ "print %s" % v for v in variables ]) + run_env.run(program=sys.executable, stdin=script) + stderr = run_env.stderr() + assert stderr == "", stderr + + + +if __name__ == "__main__": + tclasses = [ + __init__TestCase, + basename_TestCase, + cleanup_TestCase, + chmod_TestCase, + combine_TestCase, + command_args_TestCase, + description_TestCase, + diff_TestCase, + diff_stderr_TestCase, + diff_stdout_TestCase, + exit_TestCase, + fail_test_TestCase, + interpreter_TestCase, + match_TestCase, + match_exact_TestCase, + match_re_dotall_TestCase, + match_re_TestCase, + match_stderr_TestCase, + match_stdout_TestCase, + no_result_TestCase, + pass_test_TestCase, + preserve_TestCase, + program_TestCase, + read_TestCase, + rmdir_TestCase, + run_TestCase, + run_verbose_TestCase, + set_diff_function_TestCase, + set_match_function_TestCase, + sleep_TestCase, + start_TestCase, + stderr_TestCase, + stdin_TestCase, + stdout_TestCase, + subdir_TestCase, + symlink_TestCase, + tempdir_TestCase, + timeout_TestCase, + unlink_TestCase, + touch_TestCase, + verbose_TestCase, + workdir_TestCase, + workdirs_TestCase, + workpath_TestCase, + writable_TestCase, + write_TestCase, + variables_TestCase, + ] + if sys.platform != 'win32': + tclasses.extend([ + executable_TestCase, + readable_TestCase, + ]) + suite = unittest.TestSuite() + for tclass in tclasses: + names = unittest.getTestCaseNames(tclass, 'test_') + suite.addTests([ tclass(n) for n in names ]) + if not unittest.TextTestRunner().run(suite).wasSuccessful(): + sys.exit(1) + +# Local Variables: +# tab-width:4 +# indent-tabs-mode:nil +# End: +# vim: set expandtab tabstop=4 shiftwidth=4: diff --git a/QMTest/TestCommon.py b/QMTest/TestCommon.py index dac9556..37d3f9c 100644 --- a/QMTest/TestCommon.py +++ b/QMTest/TestCommon.py @@ -92,8 +92,8 @@ The TestCommon module also provides the following variables # SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS. __author__ = "Steven Knight <knight at baldmt dot com>" -__revision__ = "TestCommon.py 1.1.D001 2010/05/27 14:16:37 knight" -__version__ = "1.1" +__revision__ = "TestCommon.py 1.3.D001 2010/06/03 12:58:27 knight" +__version__ = "1.3" import copy import os diff --git a/QMTest/TestCommonTests.py b/QMTest/TestCommonTests.py new file mode 100644 index 0000000..8bf64fc --- /dev/null +++ b/QMTest/TestCommonTests.py @@ -0,0 +1,2094 @@ +#!/usr/bin/env python +""" +TestCommonTests.py: Unit tests for the TestCommon.py module. + +Copyright 2000-2010 Steven Knight +This module is free software, and you may redistribute it and/or modify +it under the same terms as Python itself, so long as this copyright message +and disclaimer are retained in their original form. + +IN NO EVENT SHALL THE AUTHOR BE LIABLE TO ANY PARTY FOR DIRECT, INDIRECT, +SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OF +THIS CODE, EVEN IF THE AUTHOR HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH +DAMAGE. + +THE AUTHOR SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A +PARTICULAR PURPOSE. THE CODE PROVIDED HEREUNDER IS ON AN "AS IS" BASIS, +AND THERE IS NO OBLIGATION WHATSOEVER TO PROVIDE MAINTENANCE, +SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS. +""" + +__author__ = "Steven Knight <knight at baldmt dot com>" +__revision__ = "TestCommonTests.py 1.3.D001 2010/06/03 12:58:27 knight" + +import difflib +import os +import re +import signal +import stat +import sys +import unittest + +# Strip the current directory so we get the right TestCommon.py module. +sys.path = sys.path[1:] + +import TestCmd +import TestCommon + +def lstrip(s): + lines = [ _.expandtabs() for _ in s.split('\n') ] + if lines[0] == '': + lines = lines[1:] + spaces = len(re.match('^( *).*', lines[0]).group(1)) + if spaces: + lines = [ l[spaces:] for l in lines ] + return '\n'.join(lines) + +if sys.version[:3] == '1.5': + expected_newline = '\\012' +else: + expected_newline = '\\n' + +def assert_display(expect, result, error=None): + try: + expect = expect.pattern + except AttributeError: + pass + result = [ + '\n', + ('*'*80) + '\n', + expect, + ('*'*80) + '\n', + result, + ('*'*80) + '\n', + ] + if error: + result.append(error) + return ''.join(result) + + +class TestCommonTestCase(unittest.TestCase): + """Base class for TestCommon test cases, fixture and utility methods.""" + create_run_env = True + + def setUp(self): + self.orig_cwd = os.getcwd() + if self.create_run_env: + self.run_env = TestCmd.TestCmd(workdir = '') + + def tearDown(self): + os.chdir(self.orig_cwd) + + def set_up_execution_scripts(self): + run_env = self.run_env + + run_env.subdir('sub dir') + + self.python = sys.executable + + self.pass_script = run_env.workpath('sub dir', 'pass') + self.fail_script = run_env.workpath('sub dir', 'fail') + self.stdout_script = run_env.workpath('sub dir', 'stdout') + self.stderr_script = run_env.workpath('sub dir', 'stderr') + self.signal_script = run_env.workpath('sub dir', 'signal') + self.stdin_script = run_env.workpath('sub dir', 'stdin') + + preamble = "import sys" + stdout = "; sys.stdout.write(r'%s: STDOUT: ' + repr(sys.argv[1:]) + '\\n')" + stderr = "; sys.stderr.write(r'%s: STDERR: ' + repr(sys.argv[1:]) + '\\n')" + exit0 = "; sys.exit(0)" + exit1 = "; sys.exit(1)" + if sys.platform == 'win32': + wrapper = '@python -c "%s" %%1 %%2 %%3 %%4 %%5 %%6 %%7 %%8 %%9\n' + else: + wrapper = '#! /usr/bin/env python\n%s\n' + wrapper = '#! /usr/bin/env python\n%s\n' + + pass_body = preamble + stdout % self.pass_script + exit0 + fail_body = preamble + stdout % self.fail_script + exit1 + stderr_body = preamble + stderr % self.stderr_script + exit0 + + run_env.write(self.pass_script, wrapper % pass_body) + run_env.write(self.fail_script, wrapper % fail_body) + run_env.write(self.stderr_script, wrapper % stderr_body) + + signal_body = lstrip("""\ + import os + import signal + os.kill(os.getpid(), signal.SIGTERM) + """) + + run_env.write(self.signal_script, wrapper % signal_body) + + stdin_body = lstrip("""\ + import sys + input = sys.stdin.read()[:-1] + sys.stdout.write(r'%s: STDOUT: ' + repr(input) + '\\n') + sys.stderr.write(r'%s: STDERR: ' + repr(input) + '\\n') + """ % (self.stdin_script, self.stdin_script)) + + run_env.write(self.stdin_script, wrapper % stdin_body) + + def run_execution_test(self, script, expect_stdout, expect_stderr): + self.set_up_execution_scripts() + + run_env = self.run_env + + os.chdir(run_env.workpath('sub dir')) + + # Everything before this prepared our "source directory." + # Now do the real test. + script = script % self.__dict__ + run_env.run(program=sys.executable, stdin=script) + + stdout = run_env.stdout() + stderr = run_env.stderr() + + expect_stdout = expect_stdout % self.__dict__ + assert stdout == expect_stdout, assert_display(expect_stdout, + stdout, + stderr) + + try: + match = expect_stderr.match + except AttributeError: + expect_stderr = expect_stderr % self.__dict__ + assert stderr == expect_stderr, assert_display(expect_stderr, + stderr) + else: + assert expect_stderr.match(stderr), assert_display(expect_stderr, + stderr) + + +class __init__TestCase(TestCommonTestCase): + def test___init__(self): + """Test initialization""" + run_env = self.run_env + + os.chdir(run_env.workdir) + script = lstrip("""\ + from TestCommon import TestCommon + tc = TestCommon(workdir='') + import os + print os.getcwd() + """) + run_env.run(program=sys.executable, stdin=script) + stdout = run_env.stdout()[:-1] + assert stdout != run_env.workdir, stdout + stderr = run_env.stderr() + assert stderr == "", stderr + + +class banner_TestCase(TestCommonTestCase): + create_run_env = False + def test_banner(self): + """Test banner()""" + tc = TestCommon.TestCommon(workdir='') + + b = tc.banner('xyzzy ') + assert b == "xyzzy ==========================================================================", b + + tc.banner_width = 10 + + b = tc.banner('xyzzy ') + assert b == "xyzzy ====", b + + b = tc.banner('xyzzy ', 20) + assert b == "xyzzy ==============", b + + tc.banner_char = '-' + + b = tc.banner('xyzzy ') + assert b == "xyzzy ----", b + +class must_be_writable_TestCase(TestCommonTestCase): + def test_file_does_not_exists(self): + """Test must_be_writable(): file does not exist""" + run_env = self.run_env + + script = lstrip("""\ + from TestCommon import TestCommon + tc = TestCommon(workdir='') + tc.must_be_writable('file1') + tc.pass_test() + """) + run_env.run(program=sys.executable, stdin=script) + stdout = run_env.stdout() + assert stdout == "Missing files: `file1'\n", stdout + stderr = run_env.stderr() + assert stderr.find("FAILED") != -1, stderr + + def test_writable_file_exists(self): + """Test must_be_writable(): writable file exists""" + run_env = self.run_env + + script = lstrip("""\ + import os + import stat + from TestCommon import TestCommon + tc = TestCommon(workdir='') + tc.write('file1', "file1\\n") + f1 = tc.workpath('file1') + mode = os.stat(f1)[stat.ST_MODE] + os.chmod(f1, mode | stat.S_IWUSR) + tc.must_be_writable('file1') + tc.pass_test() + """) + run_env.run(program=sys.executable, stdin=script) + stdout = run_env.stdout() + assert stdout == "", stdout + stderr = run_env.stderr() + assert stderr == "PASSED\n", stderr + + def test_non_writable_file_exists(self): + """Test must_be_writable(): non-writable file exists""" + run_env = self.run_env + + script = lstrip("""\ + import os + import stat + from TestCommon import TestCommon + tc = TestCommon(workdir='') + tc.write('file1', "file1\\n") + f1 = tc.workpath('file1') + mode = os.stat(f1)[stat.ST_MODE] + os.chmod(f1, mode & ~stat.S_IWUSR) + tc.must_be_writable('file1') + tc.pass_test() + """) + run_env.run(program=sys.executable, stdin=script) + stdout = run_env.stdout() + assert stdout == "Unwritable files: `file1'\n", stdout + stderr = run_env.stderr() + assert stderr.find("FAILED") != -1, stderr + + def test_file_specified_as_list(self): + """Test must_be_writable(): file specified as list""" + run_env = self.run_env + + script = lstrip("""\ + import os + import stat + from TestCommon import TestCommon + tc = TestCommon(workdir='') + tc.subdir('sub') + tc.write(['sub', 'file1'], "sub/file1\\n") + f1 = tc.workpath('sub', 'file1') + mode = os.stat(f1)[stat.ST_MODE] + os.chmod(f1, mode | stat.S_IWUSR) + tc.must_be_writable(['sub', 'file1']) + tc.pass_test() + """) + run_env.run(program=sys.executable, stdin=script) + stdout = run_env.stdout() + assert stdout == "", stdout + stderr = run_env.stderr() + assert stderr == "PASSED\n", stderr + + +class must_contain_TestCase(TestCommonTestCase): + def test_success(self): + """Test must_contain(): success""" + run_env = self.run_env + + script = lstrip("""\ + from TestCommon import TestCommon + tc = TestCommon(workdir='') + tc.write('file1', "file1 contents\\n") + tc.must_contain('file1', "1 c") + tc.pass_test() + """) + run_env.run(program=sys.executable, stdin=script) + stdout = run_env.stdout() + assert stdout == "", stdout + stderr = run_env.stderr() + assert stderr == "PASSED\n", stderr + + def test_file_missing(self): + """Test must_contain(): file missing""" + run_env = self.run_env + + script = lstrip("""\ + from TestCommon import TestCommon + tc = TestCommon(workdir='') + tc.must_contain('file1', "1 c\\n") + tc.pass_test() + """) + run_env.run(program=sys.executable, stdin=script) + stdout = run_env.stdout() + assert stdout == "", stdout + stderr = run_env.stderr() + assert stderr.find("No such file or directory:") != -1, stderr + + def test_failure(self): + """Test must_contain(): failure""" + run_env = self.run_env + + script = lstrip("""\ + from TestCommon import TestCommon + tc = TestCommon(workdir='') + tc.write('file1', "file1 does not match\\n") + tc.must_contain('file1', "1 c") + tc.run() + """) + expect = lstrip("""\ + File `file1' does not contain required string. + Required string ================================================================ + 1 c + file1 contents ================================================================= + file1 does not match + + """) + run_env.run(program=sys.executable, stdin=script) + stdout = run_env.stdout() + assert stdout == expect, repr(stdout) + stderr = run_env.stderr() + assert stderr.find("FAILED") != -1, stderr + + def test_mode(self): + """Test must_contain(): mode""" + run_env = self.run_env + + script = lstrip("""\ + from TestCommon import TestCommon + tc = TestCommon(workdir='') + tc.write('file1', "file1 contents\\n", mode='w') + tc.must_contain('file1', "1 c", mode='r') + tc.write('file2', "file2 contents\\n", mode='wb') + tc.must_contain('file2', "2 c", mode='rb') + tc.pass_test() + """) + run_env.run(program=sys.executable, stdin=script) + stdout = run_env.stdout() + assert stdout == "", stdout + stderr = run_env.stderr() + assert stderr == "PASSED\n", stderr + + + +class must_contain_all_lines_TestCase(TestCommonTestCase): + def test_success(self): + """Test must_contain_all_lines(): success""" + run_env = self.run_env + + script = lstrip(""" + import TestCommon + test = TestCommon.TestCommon(workdir='') + + lines = [ + 'xxx\\n', + 'yyy\\n', + ] + + output = '''\\ + www + xxx + yyy + zzz + ''' + + test.must_contain_all_lines(output, lines) + + test.must_contain_all_lines(output, ['www\\n']) + + test.pass_test() + """) + + run_env.run(program=sys.executable, stdin=script) + stdout = run_env.stdout() + assert stdout == "", stdout + stderr = run_env.stderr() + assert stderr == "PASSED\n", stderr + + def test_failure(self): + """Test must_contain_all_lines(): failure""" + run_env = self.run_env + + script = lstrip(""" + import TestCommon + test = TestCommon.TestCommon(workdir='') + + lines = [ + 'xxx\\n', + 'yyy\\n', + ] + + output = '''\\ + www + zzz + ''' + + test.must_contain_all_lines(output, lines) + + test.pass_test() + """) + + expect = lstrip("""\ + Missing expected lines from output: + 'xxx%(expected_newline)s' + 'yyy%(expected_newline)s' + output ========================================================================= + www + zzz + """ % globals()) + + run_env.run(program=sys.executable, stdin=script) + stdout = run_env.stdout() + stderr = run_env.stderr() + assert stdout == expect, assert_display(expect, stdout, stderr) + assert stderr.find("FAILED") != -1, stderr + + def test_find(self): + """Test must_contain_all_lines(): find""" + run_env = self.run_env + + script = lstrip(""" + import re + import TestCommon + test = TestCommon.TestCommon(workdir='') + + lines = [ + 'x.*', + '.*y', + ] + + output = '''\\ + www + xxx + yyy + zzz + ''' + + def re_search(output, line): + return re.compile(line, re.S).search(output) + test.must_contain_all_lines(output, lines, find=re_search) + + test.pass_test() + """) + + run_env.run(program=sys.executable, stdin=script) + stdout = run_env.stdout() + assert stdout == "", stdout + stderr = run_env.stderr() + assert stderr == "PASSED\n", stderr + + def test_title(self): + """Test must_contain_all_lines(): title""" + run_env = self.run_env + + script = lstrip(""" + import TestCommon + test = TestCommon.TestCommon(workdir='') + + lines = [ + 'xxx\\n', + 'yyy\\n', + ] + + output = '''\\ + www + zzz + ''' + + test.must_contain_all_lines(output, lines, title='STDERR') + + test.pass_test() + """) + + expect = lstrip("""\ + Missing expected lines from STDERR: + 'xxx%(expected_newline)s' + 'yyy%(expected_newline)s' + STDERR ========================================================================= + www + zzz + """ % globals()) + + run_env.run(program=sys.executable, stdin=script) + stdout = run_env.stdout() + stderr = run_env.stderr() + assert stdout == expect, assert_display(expect, stdout, stderr) + assert stderr.find("FAILED") != -1, stderr + + + +class must_contain_any_line_TestCase(TestCommonTestCase): + def test_success(self): + """Test must_contain_any_line(): success""" + run_env = self.run_env + + script = lstrip(""" + import TestCommon + test = TestCommon.TestCommon(workdir='') + + lines = [ + 'aaa\\n', + 'yyy\\n', + ] + + output = '''\\ + www + xxx + yyy + zzz + ''' + + test.must_contain_any_line(output, lines) + + test.must_contain_any_line(output, ['www\\n']) + + test.pass_test() + """) + + run_env.run(program=sys.executable, stdin=script) + stdout = run_env.stdout() + assert stdout == "", stdout + stderr = run_env.stderr() + assert stderr == "PASSED\n", stderr + + def test_failure(self): + """Test must_contain_any_line(): failure""" + run_env = self.run_env + + script = lstrip(""" + import TestCommon + test = TestCommon.TestCommon(workdir='') + + lines = [ + 'xxx\\n', + 'yyy\\n', + ] + + output = '''\\ + www + zzz + ''' + + test.must_contain_any_line(output, lines) + + test.pass_test() + """) + + expect = lstrip("""\ + Missing any expected line from output: + 'xxx%(expected_newline)s' + 'yyy%(expected_newline)s' + output ========================================================================= + www + zzz + """ % globals()) + + run_env.run(program=sys.executable, stdin=script) + stdout = run_env.stdout() + stderr = run_env.stderr() + assert stdout == expect, assert_display(expect, stdout, stderr) + assert stderr.find("FAILED") != -1, stderr + + def test_find(self): + """Test must_contain_any_line(): find""" + run_env = self.run_env + + script = lstrip(""" + import re + import TestCommon + test = TestCommon.TestCommon(workdir='') + + lines = [ + 'aaa', + '.*y', + ] + + output = '''\\ + www + xxx + yyy + zzz + ''' + + def re_search(output, line): + return re.compile(line, re.S).search(output) + test.must_contain_any_line(output, lines, find=re_search) + + test.pass_test() + """) + + run_env.run(program=sys.executable, stdin=script) + stdout = run_env.stdout() + assert stdout == "", stdout + stderr = run_env.stderr() + assert stderr == "PASSED\n", stderr + + def test_title(self): + """Test must_contain_any_line(): title""" + run_env = self.run_env + + script = lstrip(""" + import TestCommon + test = TestCommon.TestCommon(workdir='') + + lines = [ + 'xxx\\n', + 'yyy\\n', + ] + + output = '''\\ + www + zzz + ''' + + test.must_contain_any_line(output, lines, title='STDOUT') + + test.pass_test() + """) + + expect = lstrip("""\ + Missing any expected line from STDOUT: + 'xxx%(expected_newline)s' + 'yyy%(expected_newline)s' + STDOUT ========================================================================= + www + zzz + """ % globals()) + + run_env.run(program=sys.executable, stdin=script) + stdout = run_env.stdout() + stderr = run_env.stderr() + assert stdout == expect, assert_display(expect, stdout, stderr) + assert stderr.find("FAILED") != -1, stderr + + + +class must_contain_exactly_lines_TestCase(TestCommonTestCase): + def test_success_list(self): + """Test must_contain_exactly_lines(): success (input list)""" + run_env = self.run_env + + script = lstrip(""" + import TestCommon + test = TestCommon.TestCommon(workdir='') + + lines = [ + 'yyy\\n', + 'xxx\\n', + 'zzz', + 'www\\n', + ] + + output = '''\\ + www + xxx + yyy + zzz + ''' + + test.must_contain_exactly_lines(output, lines) + + test.pass_test() + """) + + run_env.run(program=sys.executable, stdin=script) + stdout = run_env.stdout() + assert stdout == "", stdout + stderr = run_env.stderr() + assert stderr == "PASSED\n", stderr + + def test_success_string(self): + """Test must_contain_exactly_lines(): success (input string)""" + run_env = self.run_env + + script = lstrip(""" + import TestCommon + test = TestCommon.TestCommon(workdir='') + + lines = '''\\ + yyy + xxx + zzz + www + ''' + + output = '''\\ + www + xxx + yyy + zzz + ''' + + test.must_contain_exactly_lines(output, lines) + + test.pass_test() + """) + + run_env.run(program=sys.executable, stdin=script) + stdout = run_env.stdout() + assert stdout == "", stdout + stderr = run_env.stderr() + assert stderr == "PASSED\n", stderr + + def test_failure(self): + """Test must_contain_exactly_lines(): failure""" + run_env = self.run_env + + script = lstrip(""" + import TestCommon + test = TestCommon.TestCommon(workdir='') + + lines = [ + 'xxx\\n', + 'yyy\\n', + ] + + output = '''\\ + www + zzz + ''' + + test.must_contain_exactly_lines(output, lines) + + test.pass_test() + """) + + expect = lstrip("""\ + Missing expected lines from output: + 'xxx' + 'yyy' + Missing output ================================================================= + Extra unexpected lines from output: + 'www' + 'zzz' + Extra output =================================================================== + """ % globals()) + + run_env.run(program=sys.executable, stdin=script) + stdout = run_env.stdout() + stderr = run_env.stderr() + assert stdout == expect, assert_display(expect, stdout, stderr) + assert stderr.find("FAILED") != -1, stderr + + def test_find(self): + """Test must_contain_exactly_lines(): find""" + run_env = self.run_env + + script = lstrip(""" + import re + import TestCommon + test = TestCommon.TestCommon(workdir='') + + lines = [ + 'zzz', + '.*y', + 'xxx', + 'www', + ] + + output = '''\\\ + www + xxx + yyy + zzz + ''' + + def re_search(output, line): + pattern = re.compile(line, re.S) + index = 0 + for o in output: + if pattern.search(o): + return index + index +=1 + return None + test.must_contain_exactly_lines(output, lines, find=re_search) + + test.pass_test() + """) + + run_env.run(program=sys.executable, stdin=script) + stdout = run_env.stdout() + assert stdout == "", stdout + stderr = run_env.stderr() + assert stderr == "PASSED\n", stderr + + def test_title(self): + """Test must_contain_exactly_lines(): title""" + run_env = self.run_env + + script = lstrip(""" + import TestCommon + test = TestCommon.TestCommon(workdir='') + + lines = [ + 'xxx\\n', + 'yyy\\n', + ] + + output = '''\\ + www + zzz + ''' + + test.must_contain_exactly_lines(output, lines, title='STDOUT') + + test.pass_test() + """) + + expect = lstrip("""\ + Missing expected lines from STDOUT: + 'xxx' + 'yyy' + Missing STDOUT ================================================================= + Extra unexpected lines from STDOUT: + 'www' + 'zzz' + Extra STDOUT =================================================================== + """ % globals()) + + run_env.run(program=sys.executable, stdin=script) + stdout = run_env.stdout() + stderr = run_env.stderr() + assert stdout == expect, assert_display(expect, stdout, stderr) + assert stderr.find("FAILED") != -1, stderr + + + +class must_contain_lines_TestCase(TestCommonTestCase): + def test_success(self): + """Test must_contain_lines(): success""" + run_env = self.run_env + + script = lstrip(""" + import TestCommon + test = TestCommon.TestCommon(workdir='') + + lines = [ + 'xxx\\n', + 'yyy\\n', + ] + + output = '''\\ + www + xxx + yyy + zzz + ''' + + test.must_contain_lines(lines, output) + + test.pass_test() + """) + + run_env.run(program=sys.executable, stdin=script) + stdout = run_env.stdout() + assert stdout == "", stdout + stderr = run_env.stderr() + assert stderr == "PASSED\n", stderr + + def test_failure(self): + """Test must_contain_lines(): failure""" + run_env = self.run_env + + script = lstrip(""" + import TestCommon + test = TestCommon.TestCommon(workdir='') + + lines = [ + 'xxx\\n', + 'yyy\\n', + ] + + output = '''\\ + www + zzz + ''' + + test.must_contain_lines(lines, output) + + test.pass_test() + """) + + expect = lstrip("""\ + Missing expected lines from output: + 'xxx%(expected_newline)s' + 'yyy%(expected_newline)s' + output ========================================================================= + www + zzz + """ % globals()) + + run_env.run(program=sys.executable, stdin=script) + stdout = run_env.stdout() + stderr = run_env.stderr() + assert stdout == expect, assert_display(expect, stdout, stderr) + assert stderr.find("FAILED") != -1, stderr + + + +class must_exist_TestCase(TestCommonTestCase): + def test_success(self): + """Test must_exist(): success""" + run_env = self.run_env + + script = lstrip("""\ + from TestCommon import TestCommon + tc = TestCommon(workdir='') + tc.write('file1', "file1\\n") + tc.must_exist('file1') + tc.pass_test() + """) + run_env.run(program=sys.executable, stdin=script) + stdout = run_env.stdout() + assert stdout == "", stdout + stderr = run_env.stderr() + assert stderr == "PASSED\n", stderr + + def test_failure(self): + """Test must_exist(): failure""" + run_env = self.run_env + + script = lstrip("""\ + from TestCommon import TestCommon + tc = TestCommon(workdir='') + tc.must_exist('file1') + tc.pass_test() + """) + run_env.run(program=sys.executable, stdin=script) + stdout = run_env.stdout() + assert stdout == "Missing files: `file1'\n", stdout + stderr = run_env.stderr() + assert stderr.find("FAILED") != -1, stderr + + def test_file_specified_as_list(self): + """Test must_exist(): file specified as list""" + run_env = self.run_env + + script = lstrip("""\ + from TestCommon import TestCommon + tc = TestCommon(workdir='') + tc.subdir('sub') + tc.write(['sub', 'file1'], "sub/file1\\n") + tc.must_exist(['sub', 'file1']) + tc.pass_test() + """) + run_env.run(program=sys.executable, stdin=script) + stdout = run_env.stdout() + assert stdout == "", stdout + stderr = run_env.stderr() + assert stderr == "PASSED\n", stderr + + + +class must_match_TestCase(TestCommonTestCase): + def test_success(self): + """Test must_match(): success""" + run_env = self.run_env + + script = lstrip("""\ + from TestCommon import TestCommon + tc = TestCommon(workdir='') + tc.write('file1', "file1\\n") + tc.must_match('file1', "file1\\n") + tc.pass_test() + """) + run_env.run(program=sys.executable, stdin=script) + stdout = run_env.stdout() + assert stdout == "", stdout + stderr = run_env.stderr() + assert stderr == "PASSED\n", stderr + + def test_file_does_not_exists(self): + """Test must_match(): file does not exist""" + run_env = self.run_env + + script = lstrip("""\ + from TestCommon import TestCommon + tc = TestCommon(workdir='') + tc.must_match('file1', "file1\\n") + tc.pass_test() + """) + run_env.run(program=sys.executable, stdin=script) + stdout = run_env.stdout() + assert stdout == "", stdout + stderr = run_env.stderr() + assert stderr.find("No such file or directory:") != -1, stderr + + def test_failure(self): + """Test must_match(): failure""" + run_env = self.run_env + + script = lstrip("""\ + from TestCommon import TestCommon + tc = TestCommon(workdir='') + tc.write('file1', "file1 does not match\\n") + tc.must_match('file1', "file1\\n") + tc.run() + """) + + expect = lstrip("""\ + Unexpected contents of `file1' + contents ======================================================================= + 1c1 + < file1 + --- + > file1 does not match + """) + run_env.run(program=sys.executable, stdin=script) + stdout = run_env.stdout() + assert stdout == expect, stdout + stderr = run_env.stderr() + assert stderr.find("FAILED") != -1, stderr + + def test_mode(self): + """Test must_match(): mode""" + run_env = self.run_env + + script = lstrip("""\ + from TestCommon import TestCommon + tc = TestCommon(workdir='') + tc.write('file1', "file1\\n", mode='w') + tc.must_match('file1', "file1\\n", mode='r') + tc.write('file2', "file2\\n", mode='wb') + tc.must_match('file2', "file2\\n", mode='rb') + tc.pass_test() + """) + run_env.run(program=sys.executable, stdin=script) + stdout = run_env.stdout() + assert stdout == "", stdout + stderr = run_env.stderr() + assert stderr == "PASSED\n", stderr + + + +class must_not_be_writable_TestCase(TestCommonTestCase): + def test_file_does_not_exists(self): + """Test must_not_be_writable(): file does not exist""" + run_env = self.run_env + + script = lstrip("""\ + from TestCommon import TestCommon + tc = TestCommon(workdir='') + tc.must_not_be_writable('file1') + tc.pass_test() + """) + run_env.run(program=sys.executable, stdin=script) + stdout = run_env.stdout() + assert stdout == "Missing files: `file1'\n", stdout + stderr = run_env.stderr() + assert stderr.find("FAILED") != -1, stderr + + def test_writable_file_exists(self): + """Test must_not_be_writable(): writable file exists""" + run_env = self.run_env + + script = lstrip("""\ + import os + import stat + from TestCommon import TestCommon + tc = TestCommon(workdir='') + tc.write('file1', "file1\\n") + f1 = tc.workpath('file1') + mode = os.stat(f1)[stat.ST_MODE] + os.chmod(f1, mode | stat.S_IWUSR) + tc.must_not_be_writable('file1') + tc.pass_test() + """) + run_env.run(program=sys.executable, stdin=script) + stdout = run_env.stdout() + assert stdout == "Writable files: `file1'\n", stdout + stderr = run_env.stderr() + assert stderr.find("FAILED") != -1, stderr + + def test_non_writable_file_exists(self): + """Test must_not_be_writable(): non-writable file exists""" + run_env = self.run_env + + script = lstrip("""\ + import os + import stat + from TestCommon import TestCommon + tc = TestCommon(workdir='') + tc.write('file1', "file1\\n") + f1 = tc.workpath('file1') + mode = os.stat(f1)[stat.ST_MODE] + os.chmod(f1, mode & ~stat.S_IWUSR) + tc.must_not_be_writable('file1') + tc.pass_test() + """) + run_env.run(program=sys.executable, stdin=script) + stdout = run_env.stdout() + assert stdout == "", stdout + stderr = run_env.stderr() + assert stderr == "PASSED\n", stderr + + def test_file_specified_as_list(self): + """Test must_not_be_writable(): file specified as list""" + run_env = self.run_env + + script = lstrip("""\ + import os + import stat + from TestCommon import TestCommon + tc = TestCommon(workdir='') + tc.subdir('sub') + tc.write(['sub', 'file1'], "sub/file1\\n") + f1 = tc.workpath('sub', 'file1') + mode = os.stat(f1)[stat.ST_MODE] + os.chmod(f1, mode & ~stat.S_IWUSR) + tc.must_not_be_writable(['sub', 'file1']) + tc.pass_test() + """) + run_env.run(program=sys.executable, stdin=script) + stdout = run_env.stdout() + assert stdout == "", stdout + stderr = run_env.stderr() + assert stderr == "PASSED\n", stderr + + + +class must_not_contain_TestCase(TestCommonTestCase): + def test_success(self): + """Test must_not_contain(): success""" + run_env = self.run_env + + script = lstrip("""\ + from TestCommon import TestCommon + tc = TestCommon(workdir='') + tc.write('file1', "file1 contents\\n") + tc.must_not_contain('file1', "1 does not contain c") + tc.pass_test() + """) + run_env.run(program=sys.executable, stdin=script) + stdout = run_env.stdout() + assert stdout == "", stdout + stderr = run_env.stderr() + assert stderr == "PASSED\n", stderr + + def test_file_does_not_exist(self): + """Test must_not_contain(): file does not exist""" + run_env = self.run_env + + script = lstrip("""\ + from TestCommon import TestCommon + tc = TestCommon(workdir='') + tc.must_not_contain('file1', "1 c\\n") + tc.pass_test() + """) + run_env.run(program=sys.executable, stdin=script) + stdout = run_env.stdout() + assert stdout == "", stdout + stderr = run_env.stderr() + assert stderr.find("No such file or directory:") != -1, stderr + + def test_failure(self): + """Test must_not_contain(): failure""" + run_env = self.run_env + + script = lstrip("""\ + from TestCommon import TestCommon + tc = TestCommon(workdir='') + tc.write('file1', "file1 does contain contents\\n") + tc.must_not_contain('file1', "1 does contain c") + tc.run() + """) + expect = lstrip("""\ + File `file1' contains banned string. + Banned string ================================================================== + 1 does contain c + file1 contents ================================================================= + file1 does contain contents + + """) + run_env.run(program=sys.executable, stdin=script) + stdout = run_env.stdout() + assert stdout == expect, repr(stdout) + stderr = run_env.stderr() + assert stderr.find("FAILED") != -1, stderr + + def test_mode(self): + """Test must_not_contain(): mode""" + run_env = self.run_env + + script = lstrip("""\ + from TestCommon import TestCommon + tc = TestCommon(workdir='') + tc.write('file1', "file1 contents\\n", mode='w') + tc.must_not_contain('file1', "1 does not contain c", mode='r') + tc.write('file2', "file2 contents\\n", mode='wb') + tc.must_not_contain('file2', "2 does not contain c", mode='rb') + tc.pass_test() + """) + run_env.run(program=sys.executable, stdin=script) + stdout = run_env.stdout() + assert stdout == "", stdout + stderr = run_env.stderr() + assert stderr == "PASSED\n", stderr + + + +class must_not_contain_any_line_TestCase(TestCommonTestCase): + def test_failure(self): + """Test must_not_contain_any_line(): failure""" + run_env = self.run_env + + script = lstrip(""" + import TestCommon + test = TestCommon.TestCommon(workdir='') + + lines = [ + 'xxx\\n', + 'yyy\\n', + 'www\\n', + ] + + output = '''\\ + www + xxx + yyy + zzz + ''' + + test.must_not_contain_any_line(output, lines) + + test.pass_test() + """) + + expect = lstrip("""\ + Unexpected lines in output: + 'xxx%(expected_newline)s' + 'yyy%(expected_newline)s' + 'www%(expected_newline)s' + output ========================================================================= + www + xxx + yyy + zzz + """ % globals()) + + run_env.run(program=sys.executable, stdin=script) + stdout = run_env.stdout() + stderr = run_env.stderr() + assert stdout == expect, assert_display(expect, stdout, stderr) + assert stderr.find("FAILED") != -1, stderr + + def test_find(self): + """Test must_not_contain_any_line(): find""" + run_env = self.run_env + + script = lstrip(""" + import re + import TestCommon + test = TestCommon.TestCommon(workdir='') + + lines = [ + 'x.*' + '.*y' + ] + + output = '''\\ + www + zzz + ''' + + def re_search(output, line): + return re.compile(line, re.S).search(output) + test.must_not_contain_any_line(output, lines, find=re_search) + + test.pass_test() + """) + + run_env.run(program=sys.executable, stdin=script) + stdout = run_env.stdout() + assert stdout == "", stdout + stderr = run_env.stderr() + assert stderr == "PASSED\n", stderr + + def test_success(self): + """Test must_not_contain_any_line(): success""" + run_env = self.run_env + + script = lstrip(""" + import TestCommon + test = TestCommon.TestCommon(workdir='') + + lines = [ + 'xxx\\n' + 'yyy\\n' + ] + + output = '''\\ + www + zzz + ''' + + test.must_not_contain_any_line(output, lines) + + test.pass_test() + """) + + run_env.run(program=sys.executable, stdin=script) + stdout = run_env.stdout() + assert stdout == "", stdout + stderr = run_env.stderr() + assert stderr == "PASSED\n", stderr + + def test_title(self): + """Test must_not_contain_any_line(): title""" + run_env = self.run_env + + script = lstrip(""" + import TestCommon + test = TestCommon.TestCommon(workdir='') + + lines = [ + 'xxx\\n', + 'yyy\\n', + ] + + output = '''\\ + www + xxx + yyy + zzz + ''' + + test.must_not_contain_any_line(output, lines, title='XYZZY') + + test.pass_test() + """) + + expect = lstrip("""\ + Unexpected lines in XYZZY: + 'xxx%(expected_newline)s' + 'yyy%(expected_newline)s' + XYZZY ========================================================================== + www + xxx + yyy + zzz + """ % globals()) + + run_env.run(program=sys.executable, stdin=script) + stdout = run_env.stdout() + stderr = run_env.stderr() + assert stdout == expect, assert_display(expect, stdout, stderr) + assert stderr.find("FAILED") != -1, stderr + + + +class must_not_contain_lines_TestCase(TestCommonTestCase): + def test_failure(self): + """Test must_not_contain_lines(): failure""" + run_env = self.run_env + + script = lstrip(""" + import TestCommon + test = TestCommon.TestCommon(workdir='') + + lines = [ + 'xxx\\n', + 'yyy\\n', + ] + + output = '''\\ + www + xxx + yyy + zzz + ''' + + test.must_not_contain_lines(lines, output) + + test.pass_test() + """) + + expect = lstrip("""\ + Unexpected lines in output: + 'xxx%(expected_newline)s' + 'yyy%(expected_newline)s' + output ========================================================================= + www + xxx + yyy + zzz + """ % globals()) + + run_env.run(program=sys.executable, stdin=script) + stdout = run_env.stdout() + stderr = run_env.stderr() + assert stdout == expect, assert_display(expect, stdout, stderr) + assert stderr.find("FAILED") != -1, stderr + + def test_success(self): + """Test must_not_contain_lines(): success""" + run_env = self.run_env + + script = lstrip(""" + import TestCommon + test = TestCommon.TestCommon(workdir='') + + lines = [ + 'xxx\\n' + 'yyy\\n' + ] + + output = '''\\ + www + zzz + ''' + + test.must_not_contain_lines(lines, output) + + test.pass_test() + """) + + run_env.run(program=sys.executable, stdin=script) + stdout = run_env.stdout() + assert stdout == "", stdout + stderr = run_env.stderr() + assert stderr == "PASSED\n", stderr + + + +class must_not_exist_TestCase(TestCommonTestCase): + def test_failure(self): + """Test must_not_exist(): failure""" + run_env = self.run_env + + script = lstrip("""\ + from TestCommon import TestCommon + tc = TestCommon(workdir='') + tc.write('file1', "file1\\n") + tc.must_not_exist('file1') + tc.pass_test() + """) + run_env.run(program=sys.executable, stdin=script) + stdout = run_env.stdout() + assert stdout == "Unexpected files exist: `file1'\n", stdout + stderr = run_env.stderr() + assert stderr.find("FAILED") != -1, stderr + + def test_success(self): + """Test must_not_exist(): success""" + run_env = self.run_env + + script = lstrip("""\ + from TestCommon import TestCommon + tc = TestCommon(workdir='') + tc.must_not_exist('file1') + tc.pass_test() + """) + run_env.run(program=sys.executable, stdin=script) + stdout = run_env.stdout() + assert stdout == "", stdout + stderr = run_env.stderr() + assert stderr == "PASSED\n", stderr + + def test_file_specified_as_list(self): + """Test must_not_exist(): file specified as list""" + run_env = self.run_env + + script = lstrip("""\ + from TestCommon import TestCommon + tc = TestCommon(workdir='') + tc.subdir('sub') + tc.must_not_exist(['sub', 'file1']) + tc.pass_test() + """) + run_env.run(program=sys.executable, stdin=script) + stdout = run_env.stdout() + assert stdout == "", stdout + stderr = run_env.stderr() + assert stderr == "PASSED\n", stderr + + + +class run_TestCase(TestCommonTestCase): + def test_argument_handling(self): + """Test run(): argument handling""" + + script = lstrip("""\ + from TestCommon import TestCommon, match_exact + tc = TestCommon(program=r'%(pass_script)s', + interpreter='%(python)s', + workdir="", + match=match_exact) + tc.run(arguments = "arg1 arg2 arg3", + stdout = r"%(pass_script)s: STDOUT: ['arg1', 'arg2', 'arg3']" + "\\n") + """) + + self.run_execution_test(script, "", "") + + def test_default_pass(self): + """Test run(): default arguments, script passes""" + + script = lstrip("""\ + from TestCommon import TestCommon + tc = TestCommon(program=r'%(pass_script)s', + interpreter=r'%(python)s', + workdir='') + tc.run() + """) + + self.run_execution_test(script, "", "") + + def test_default_fail(self): + """Test run(): default arguments, script fails""" + + script = lstrip("""\ + from TestCommon import TestCommon + tc = TestCommon(program=r'%(fail_script)s', + interpreter='%(python)s', + workdir='') + tc.run() + """) + + expect_stdout = lstrip("""\ + %(fail_script)s returned 1 + STDOUT ========================================================================= + %(fail_script)s: STDOUT: [] + + STDERR ========================================================================= + + """) + + expect_stderr = lstrip("""\ + FAILED test of .*fail + \\tat line \\d+ of .*TestCommon\\.py \\(_complete\\) + \\tfrom line \\d+ of .*TestCommon\\.py \\(run\\) + \\tfrom line \\d+ of <stdin>( \(<module>\))? + """) + expect_stderr = re.compile(expect_stderr, re.M) + + self.run_execution_test(script, expect_stdout, expect_stderr) + + def test_default_stderr(self): + """Test run(): default arguments, error output""" + script = lstrip("""\ + from TestCommon import TestCommon + tc = TestCommon(program=r'%(stderr_script)s', + interpreter='%(python)s', + workdir='') + tc.run() + """) + + expect_stdout = lstrip("""\ + STDOUT ========================================================================= + + STDERR ========================================================================= + 0a1 + > %(stderr_script)s: STDERR: [] + """) + + expect_stderr = lstrip("""\ + FAILED test of .*stderr + \\tat line \\d+ of .*TestCommon\\.py \\(_complete\\) + \\tfrom line \\d+ of .*TestCommon\\.py \\(run\\) + \\tfrom line \\d+ of <stdin> + """) + expect_stderr = re.compile(expect_stderr, re.M) + + self.run_execution_test(script, expect_stdout, expect_stderr) + + def test_exception_handling(self): + """Test run(): exception handling""" + script = lstrip("""\ + import TestCmd + from TestCommon import TestCommon + def raise_exception(*args, **kw): + raise TypeError("forced TypeError") + TestCmd.TestCmd.start = raise_exception + tc = TestCommon(program='%(pass_script)s', + interpreter='%(python)s', + workdir='') + tc.run() + """) + + expect_stdout = lstrip("""\ + STDOUT ========================================================================= + STDERR ========================================================================= + """) + + expect_stderr = lstrip("""\ + Exception trying to execute: \\[%s, '[^']*pass'\\] + Traceback \\((innermost|most recent call) last\\): + File "<stdin>", line \\d+, in (\\?|<module>) + File "[^"]+TestCommon.py", line \\d+, in run + TestCmd.run\\(self, \\*\\*kw\\) + File "[^"]+TestCmd.py", line \\d+, in run + .* + File "[^"]+TestCommon.py", line \\d+, in start + raise e + TypeError: forced TypeError + """ % re.escape(repr(sys.executable))) + expect_stderr = re.compile(expect_stderr, re.M) + + self.run_execution_test(script, expect_stdout, expect_stderr) + + def test_ignore_stderr(self): + """Test run(): ignore stderr""" + + script = lstrip("""\ + from TestCommon import TestCommon + tc = TestCommon(program=r'%(stderr_script)s', + interpreter='%(python)s', + workdir='') + tc.run(stderr = None) + """) + + self.run_execution_test(script, "", "") + + def test_match_function_stdout(self): + """Test run(): explicit match function, stdout""" + + script = lstrip("""\ + def my_match_exact(actual, expect): return actual == expect + from TestCommon import TestCommon, match_re_dotall + tc = TestCommon(program=r'%(pass_script)s', + interpreter='%(python)s', + workdir="", + match=match_re_dotall) + tc.run(arguments = "arg1 arg2 arg3", + stdout = r"%(pass_script)s: STDOUT: ['arg1', 'arg2', 'arg3']" + "\\n", + match = my_match_exact) + """) + + self.run_execution_test(script, "", "") + + def test_match_function_stderr(self): + """Test run(): explicit match function, stderr""" + + script = lstrip("""\ + def my_match_exact(actual, expect): return actual == expect + from TestCommon import TestCommon, match_re_dotall + tc = TestCommon(program=r'%(stderr_script)s', + interpreter='%(python)s', + workdir="", + match=match_re_dotall) + tc.run(arguments = "arg1 arg2 arg3", + stderr = r"%(stderr_script)s: STDERR: ['arg1', 'arg2', 'arg3']" + "\\n", + match = my_match_exact) + """) + + self.run_execution_test(script, "", "") + + def test_matched_status_fails(self): + """Test run(): matched status, script fails""" + + script = lstrip("""\ + from TestCommon import TestCommon + tc = TestCommon(program=r'%(fail_script)s', + interpreter='%(python)s', + workdir='') + tc.run(status = 1) + """) + + self.run_execution_test(script, "", "") + + def test_matched_stdout(self): + """Test run(): matched stdout""" + + script = lstrip("""\ + from TestCommon import TestCommon, match_exact + tc = TestCommon(program=r'%(pass_script)s', + interpreter='%(python)s', + workdir="", + match=match_exact) + tc.run(stdout = r"%(pass_script)s: STDOUT: []" + "\\n") + """) + + self.run_execution_test(script, "", "") + + def test_matched_stderr(self): + """Test run(): matched stderr""" + + script = lstrip("""\ + from TestCommon import TestCommon, match_exact + tc = TestCommon(program=r'%(stderr_script)s', + interpreter='%(python)s', + workdir="", + match=match_exact) + tc.run(stderr = r"%(stderr_script)s: STDERR: []" + "\\n") + """) + + self.run_execution_test(script, "", "") + + def test_mismatched_status_pass(self): + """Test run(): mismatched status, script passes""" + + script = lstrip("""\ + from TestCommon import TestCommon + tc = TestCommon(program=r'%(pass_script)s', + interpreter='%(python)s', + workdir='') + tc.run(status = 1) + """) + + expect_stdout = lstrip("""\ + %(pass_script)s returned 0 (expected 1) + STDOUT ========================================================================= + %(pass_script)s: STDOUT: [] + + STDERR ========================================================================= + + """) + + expect_stderr = lstrip("""\ + FAILED test of .*pass + \\tat line \\d+ of .*TestCommon\\.py \\(_complete\\) + \\tfrom line \\d+ of .*TestCommon\\.py \\(run\\) + \\tfrom line \\d+ of <stdin>( \(<module>\))? + """) + expect_stderr = re.compile(expect_stderr, re.M) + + self.run_execution_test(script, expect_stdout, expect_stderr) + + def test_mismatched_status_fail(self): + """Test run(): mismatched status, script fails""" + + script = lstrip("""\ + from TestCommon import TestCommon + tc = TestCommon(program=r'%(fail_script)s', + interpreter='%(python)s', + workdir='') + tc.run(status = 2) + """) + + expect_stdout = lstrip("""\ + %(fail_script)s returned 1 (expected 2) + STDOUT ========================================================================= + %(fail_script)s: STDOUT: [] + + STDERR ========================================================================= + + """) + + expect_stderr = lstrip("""\ + FAILED test of .*fail + \\tat line \\d+ of .*TestCommon\\.py \\(_complete\\) + \\tfrom line \\d+ of .*TestCommon\\.py \\(run\\) + \\tfrom line \\d+ of <stdin>( \(<module>\))? + """) + expect_stderr = re.compile(expect_stderr, re.M) + + self.run_execution_test(script, expect_stdout, expect_stderr) + + def test_mismatched_stdout(self): + """Test run(): mismatched stdout""" + + script = lstrip("""\ + from TestCommon import TestCommon + tc = TestCommon(program=r'%(pass_script)s', + interpreter='%(python)s', + workdir='') + tc.run(stdout = "Not found\\n") + """) + + expect_stdout = lstrip("""\ + STDOUT ========================================================================= + 1c1 + < Not found + --- + > %(pass_script)s: STDOUT: [] + """) + + expect_stderr = lstrip("""\ + FAILED test of .*pass + \\tat line \\d+ of .*TestCommon\\.py \\(_complete\\) + \\tfrom line \\d+ of .*TestCommon\\.py \\(run\\) + \\tfrom line \\d+ of <stdin>( \(<module>\))? + """) + expect_stderr = re.compile(expect_stderr, re.M) + + self.run_execution_test(script, expect_stdout, expect_stderr) + + def test_mismatched_stderr(self): + """Test run(): mismatched stderr""" + + script = lstrip("""\ + from TestCommon import TestCommon + tc = TestCommon(program=r'%(stderr_script)s', + interpreter='%(python)s', + workdir='') + tc.run(stderr = "Not found\\n") + """) + + expect_stdout = lstrip("""\ + STDOUT ========================================================================= + + STDERR ========================================================================= + 1c1 + < Not found + --- + > %(stderr_script)s: STDERR: [] + """) + + expect_stderr = lstrip("""\ + FAILED test of .*stderr + \\tat line \\d+ of .*TestCommon\\.py \\(_complete\\) + \\tfrom line \\d+ of .*TestCommon\\.py \\(run\\) + \\tfrom line \\d+ of <stdin>( \(<module>\))? + """) + expect_stderr = re.compile(expect_stderr, re.M) + + self.run_execution_test(script, expect_stdout, expect_stderr) + + def test_option_handling(self): + """Test run(): option handling""" + + script = lstrip("""\ + from TestCommon import TestCommon, match_exact + tc = TestCommon(program=r'%(pass_script)s', + interpreter='%(python)s', + workdir="", + match=match_exact) + tc.run(options = "opt1 opt2 opt3", + stdout = r"%(pass_script)s: STDOUT: ['opt1', 'opt2', 'opt3']" + "\\n") + """) + + self.run_execution_test(script, "", "") + + def test_options_plus_arguments(self): + """Test run(): option handling with arguments""" + + script = lstrip("""\ + from TestCommon import TestCommon, match_exact + tc = TestCommon(program=r'%(pass_script)s', + interpreter='%(python)s', + workdir="", + match=match_exact) + tc.run(options = "opt1 opt2 opt3", + arguments = "arg1 arg2 arg3", + stdout = r"%(pass_script)s: STDOUT: ['opt1', 'opt2', 'opt3', 'arg1', 'arg2', 'arg3']" + "\\n") + """) + + self.run_execution_test(script, "", "") + + def test_signal_handling(self): + """Test run(): signal handling""" + + try: + os.kill + except AttributeError: + sys.stderr.write('can not test, no os.kill ... ') + return + + script = lstrip("""\ + from TestCommon import TestCommon + tc = TestCommon(program=r'%(signal_script)s', + interpreter='%(python)s', + workdir='') + tc.run() + """) + + self.SIGTERM = signal.SIGTERM + + # Script returns the signal value as a negative number. + expect_stdout = lstrip("""\ + %(signal_script)s returned -%(SIGTERM)s + STDOUT ========================================================================= + + STDERR ========================================================================= + + """) + + expect_stderr = lstrip("""\ + FAILED test of .*signal + \\tat line \\d+ of .*TestCommon\\.py \\(_complete\\) + \\tfrom line \\d+ of .*TestCommon\\.py \\(run\\) + \\tfrom line \\d+ of <stdin> + """) + expect_stderr = re.compile(expect_stderr, re.M) + + self.run_execution_test(script, expect_stdout, expect_stderr) + + def test_stdin(self): + """Test run(): stdin handling""" + + script = lstrip("""\ + from TestCommon import TestCommon, match_exact + tc = TestCommon(program=r'%(stdin_script)s', + interpreter='%(python)s', + workdir='', + match=match_exact) + expect_stdout = r"%(stdin_script)s: STDOUT: 'input'" + "\\n" + expect_stderr = r"%(stdin_script)s: STDERR: 'input'" + "\\n" + tc.run(stdin="input\\n", stdout = expect_stdout, stderr = expect_stderr) + """) + + expect_stdout = lstrip("""\ + %(pass_script)s returned 0 (expected 1) + STDOUT ========================================================================= + %(pass_script)s: STDOUT: [] + + STDERR ========================================================================= + + """) + + self.run_execution_test(script, "", "") + + + +class start_TestCase(TestCommonTestCase): + def test_option_handling(self): + """Test start(): option handling""" + + script = lstrip("""\ + from TestCommon import TestCommon, match_exact + tc = TestCommon(program=r'%(pass_script)s', + interpreter='%(python)s', + workdir="", + match=match_exact) + p = tc.start(options = "opt1 opt2 opt3") + expect = r"%(pass_script)s: STDOUT: ['opt1', 'opt2', 'opt3']" + "\\n" + tc.finish(p, stdout = expect) + """) + + self.run_execution_test(script, "", "") + + def test_options_plus_arguments(self): + """Test start(): option handling with arguments""" + + script = lstrip("""\ + from TestCommon import TestCommon, match_exact + tc = TestCommon(program=r'%(pass_script)s', + interpreter='%(python)s', + workdir="", + match=match_exact) + p = tc.start(options = "opt1 opt2 opt3", + arguments = "arg1 arg2 arg3") + expect = r"%(pass_script)s: STDOUT: ['opt1', 'opt2', 'opt3', 'arg1', 'arg2', 'arg3']" + "\\n" + tc.finish(p, stdout = expect) + """) + + self.run_execution_test(script, "", "") + + + +class skip_test_TestCase(TestCommonTestCase): + def test_skip_test(self): + """Test skip_test()""" + run_env = self.run_env + + script = lstrip("""\ + import TestCommon + test = TestCommon.TestCommon(workdir='') + test.skip_test() + """) + run_env.run(program=sys.executable, stdin=script) + stdout = run_env.stdout() + assert stdout == "Skipping test.\n", stdout + stderr = run_env.stderr() + expect = [ + "NO RESULT for test at line 3 of <stdin>\n", + "NO RESULT for test at line 3 of <stdin> (<module>)\n", + ] + assert stderr in expect, repr(stderr) + + script = lstrip("""\ + import TestCommon + test = TestCommon.TestCommon(workdir='') + test.skip_test("skipping test because I said so\\n") + """) + run_env.run(program=sys.executable, stdin=script) + stdout = run_env.stdout() + assert stdout == "skipping test because I said so\n", stdout + stderr = run_env.stderr() + expect = [ + "NO RESULT for test at line 3 of <stdin>\n", + "NO RESULT for test at line 3 of <stdin> (<module>)\n", + ] + assert stderr in expect, repr(stderr) + + import os + os.environ['TESTCOMMON_PASS_SKIPS'] = '1' + + try: + script = lstrip("""\ + import TestCommon + test = TestCommon.TestCommon(workdir='') + test.skip_test() + """) + run_env.run(program=sys.executable, stdin=script) + stdout = run_env.stdout() + assert stdout == "Skipping test.\n", stdout + stderr = run_env.stderr() + assert stderr == "PASSED\n", stderr + + finally: + del os.environ['TESTCOMMON_PASS_SKIPS'] + + + +class variables_TestCase(TestCommonTestCase): + def test_variables(self): + """Test global variables""" + run_env = self.run_env + + variables = [ + 'fail_test', + 'no_result', + 'pass_test', + 'match_exact', + 'match_re', + 'match_re_dotall', + 'python', + '_python_', + 'TestCmd', + + 'TestCommon', + 'exe_suffix', + 'obj_suffix', + 'shobj_prefix', + 'shobj_suffix', + 'lib_prefix', + 'lib_suffix', + 'dll_prefix', + 'dll_suffix', + ] + + script = "import TestCommon\n" + \ + '\n'.join([ "print TestCommon.%s\n" % v for v in variables ]) + run_env.run(program=sys.executable, stdin=script) + stderr = run_env.stderr() + assert stderr == "", stderr + + script = "from TestCommon import *\n" + \ + '\n'.join([ "print %s" % v for v in variables ]) + run_env.run(program=sys.executable, stdin=script) + stderr = run_env.stderr() + assert stderr == "", stderr + + + +if __name__ == "__main__": + tclasses = [ + __init__TestCase, + banner_TestCase, + must_be_writable_TestCase, + must_contain_TestCase, + must_contain_all_lines_TestCase, + must_contain_any_line_TestCase, + must_contain_exactly_lines_TestCase, + must_contain_lines_TestCase, + must_exist_TestCase, + must_match_TestCase, + must_not_be_writable_TestCase, + must_not_contain_TestCase, + must_not_contain_any_line_TestCase, + must_not_contain_lines_TestCase, + must_not_exist_TestCase, + run_TestCase, + start_TestCase, + skip_test_TestCase, + variables_TestCase, + ] + suite = unittest.TestSuite() + for tclass in tclasses: + names = unittest.getTestCaseNames(tclass, 'test_') + suite.addTests([ tclass(n) for n in names ]) + if not unittest.TextTestRunner().run(suite).wasSuccessful(): + sys.exit(1) + +# Local Variables: +# tab-width:4 +# indent-tabs-mode:nil +# End: +# vim: set expandtab tabstop=4 shiftwidth=4: diff --git a/src/engine/SCons/Scanner/LaTeX.py b/src/engine/SCons/Scanner/LaTeX.py index d3c5c9a..b8d48b1 100644 --- a/src/engine/SCons/Scanner/LaTeX.py +++ b/src/engine/SCons/Scanner/LaTeX.py @@ -168,8 +168,11 @@ class LaTeX(SCons.Scanner.Base): # Without the \n, the ^ could match the beginning of a *previous* # line followed by one or more newline characters (i.e. blank # lines), interfering with a match on the next line. - regex = r'^[^%\n]*\\(include|includegraphics(?:\[[^\]]+\])?|lstinputlisting(?:\[[^\]]+\])?|input|bibliography|usepackage){([^}]*)}' + # add option for whitespace before the '[options]' or the '{filename}' + regex = r'^[^%\n]*\\(include|includegraphics(?:\s*\[[^\]]+\])?|lstinputlisting(?:\[[^\]]+\])?|input|bibliography|usepackage)\s*{([^}]*)}' self.cre = re.compile(regex, re.M) + self.comment_re = re.compile(r'^((?:(?:\\%)|[^%\n])*)(.*)$', re.M) + self.graphics_extensions = graphics_extensions def _scan(node, env, path=(), self=self): @@ -274,6 +277,23 @@ class LaTeX(SCons.Scanner.Base): return i, include return i, include + def canonical_text(self, text): + """Standardize an input TeX-file contents. + + Currently: + * removes comments, unwrapping comment-wrapped lines. + """ + out = [] + line_continues_a_comment = False + for line in text.splitlines(): + line,comment = self.comment_re.findall(line)[0] + if line_continues_a_comment == True: + out[-1] = out[-1] + line.lstrip() + else: + out.append(line) + line_continues_a_comment = len(comment) > 0 + return '\n'.join(out).rstrip()+'\n' + def scan(self, node): # Modify the default scan function to allow for the regular # expression to return a comma separated list of file names @@ -281,11 +301,13 @@ class LaTeX(SCons.Scanner.Base): # Cache the includes list in node so we only scan it once: # path_dict = dict(list(path)) - noopt_cre = re.compile('\[.*$') + # add option for whitespace (\s) before the '[' + noopt_cre = re.compile('\s*\[.*$') if node.includes != None: includes = node.includes else: - includes = self.cre.findall(node.get_text_contents()) + text = self.canonical_text(node.get_text_contents()) + includes = self.cre.findall(text) # 1. Split comma-separated lines, e.g. # ('bibliography', 'phys,comp') # should become two entries diff --git a/src/engine/SCons/Tool/tex.py b/src/engine/SCons/Tool/tex.py index a2870b5..37881bb 100644 --- a/src/engine/SCons/Tool/tex.py +++ b/src/engine/SCons/Tool/tex.py @@ -38,6 +38,7 @@ import os.path import re import shutil import sys +import platform import SCons.Action import SCons.Node @@ -757,45 +758,50 @@ def generate_common(env): if MakeAcronymsAction is None: MakeAcronymsAction = SCons.Action.Action("$MAKEACRONYMSCOM", "$MAKEACRONYMSCOMSTR") + CDCOM = 'cd ' + if platform.system() == 'Windows': + # allow cd command to change drives on Windows + CDCOM = 'cd /D ' + env['TEX'] = 'tex' env['TEXFLAGS'] = SCons.Util.CLVar('-interaction=nonstopmode -recorder') - env['TEXCOM'] = 'cd ${TARGET.dir} && $TEX $TEXFLAGS ${SOURCE.file}' + env['TEXCOM'] = CDCOM + '${TARGET.dir} && $TEX $TEXFLAGS ${SOURCE.file}' env['PDFTEX'] = 'pdftex' env['PDFTEXFLAGS'] = SCons.Util.CLVar('-interaction=nonstopmode -recorder') - env['PDFTEXCOM'] = 'cd ${TARGET.dir} && $PDFTEX $PDFTEXFLAGS ${SOURCE.file}' + env['PDFTEXCOM'] = CDCOM + '${TARGET.dir} && $PDFTEX $PDFTEXFLAGS ${SOURCE.file}' env['LATEX'] = 'latex' env['LATEXFLAGS'] = SCons.Util.CLVar('-interaction=nonstopmode -recorder') - env['LATEXCOM'] = 'cd ${TARGET.dir} && $LATEX $LATEXFLAGS ${SOURCE.file}' + env['LATEXCOM'] = CDCOM + '${TARGET.dir} && $LATEX $LATEXFLAGS ${SOURCE.file}' env['LATEXRETRIES'] = 3 env['PDFLATEX'] = 'pdflatex' env['PDFLATEXFLAGS'] = SCons.Util.CLVar('-interaction=nonstopmode -recorder') - env['PDFLATEXCOM'] = 'cd ${TARGET.dir} && $PDFLATEX $PDFLATEXFLAGS ${SOURCE.file}' + env['PDFLATEXCOM'] = CDCOM + '${TARGET.dir} && $PDFLATEX $PDFLATEXFLAGS ${SOURCE.file}' env['BIBTEX'] = 'bibtex' env['BIBTEXFLAGS'] = SCons.Util.CLVar('') - env['BIBTEXCOM'] = 'cd ${TARGET.dir} && $BIBTEX $BIBTEXFLAGS ${SOURCE.filebase}' + env['BIBTEXCOM'] = CDCOM + '${TARGET.dir} && $BIBTEX $BIBTEXFLAGS ${SOURCE.filebase}' env['MAKEINDEX'] = 'makeindex' env['MAKEINDEXFLAGS'] = SCons.Util.CLVar('') - env['MAKEINDEXCOM'] = 'cd ${TARGET.dir} && $MAKEINDEX $MAKEINDEXFLAGS ${SOURCE.file}' + env['MAKEINDEXCOM'] = CDCOM + '${TARGET.dir} && $MAKEINDEX $MAKEINDEXFLAGS ${SOURCE.file}' env['MAKEGLOSSARY'] = 'makeindex' env['MAKEGLOSSARYSTYLE'] = '${SOURCE.filebase}.ist' env['MAKEGLOSSARYFLAGS'] = SCons.Util.CLVar('-s ${MAKEGLOSSARYSTYLE} -t ${SOURCE.filebase}.glg') - env['MAKEGLOSSARYCOM'] = 'cd ${TARGET.dir} && $MAKEGLOSSARY ${SOURCE.filebase}.glo $MAKEGLOSSARYFLAGS -o ${SOURCE.filebase}.gls' + env['MAKEGLOSSARYCOM'] = CDCOM + '${TARGET.dir} && $MAKEGLOSSARY ${SOURCE.filebase}.glo $MAKEGLOSSARYFLAGS -o ${SOURCE.filebase}.gls' env['MAKEACRONYMS'] = 'makeindex' env['MAKEACRONYMSSTYLE'] = '${SOURCE.filebase}.ist' env['MAKEACRONYMSFLAGS'] = SCons.Util.CLVar('-s ${MAKEACRONYMSSTYLE} -t ${SOURCE.filebase}.alg') - env['MAKEACRONYMSCOM'] = 'cd ${TARGET.dir} && $MAKEACRONYMS ${SOURCE.filebase}.acn $MAKEACRONYMSFLAGS -o ${SOURCE.filebase}.acr' + env['MAKEACRONYMSCOM'] = CDCOM + '${TARGET.dir} && $MAKEACRONYMS ${SOURCE.filebase}.acn $MAKEACRONYMSFLAGS -o ${SOURCE.filebase}.acr' env['MAKENCL'] = 'makeindex' env['MAKENCLSTYLE'] = 'nomencl.ist' env['MAKENCLFLAGS'] = '-s ${MAKENCLSTYLE} -t ${SOURCE.filebase}.nlg' - env['MAKENCLCOM'] = 'cd ${TARGET.dir} && $MAKENCL ${SOURCE.filebase}.nlo $MAKENCLFLAGS -o ${SOURCE.filebase}.nls' + env['MAKENCLCOM'] = CDCOM + '${TARGET.dir} && $MAKENCL ${SOURCE.filebase}.nlo $MAKENCLFLAGS -o ${SOURCE.filebase}.nls' def exists(env): return env.Detect('tex') diff --git a/src/test_strings.py b/src/test_strings.py index 0b39592..26cd2ad 100644 --- a/src/test_strings.py +++ b/src/test_strings.py @@ -211,7 +211,9 @@ check_list = [ 'QMTest/classes.qmc', 'QMTest/configuration', 'QMTest/TestCmd.py', + 'QMTest/TestCmdTests.py', 'QMTest/TestCommon.py', + 'QMTest/TestCommonTests.py', 'src/os_spawnv_fix.diff', 'src/MANIFEST.in', 'src/setup.cfg', diff --git a/test/SConstruct.py b/test/SConstruct.py index f061728..15e1c1b 100644 --- a/test/SConstruct.py +++ b/test/SConstruct.py @@ -27,16 +27,15 @@ __revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__" import TestCmd import TestSCons -test = TestSCons.TestSCons(match = TestCmd.match_re) +test = TestSCons.TestSCons() test.run(arguments = ".", status = 2, stdout = "", stderr = r""" scons: \*\*\* No SConstruct file found. -""" + TestSCons.file_expr) - -test.match_function = TestCmd.match_exact +""" + TestSCons.file_expr, + match = TestCmd.match_re) wpath = test.workpath() diff --git a/test/TEX/eps_graphics2.py b/test/TEX/eps_graphics2.py index d291b0b..d388b69 100644 --- a/test/TEX/eps_graphics2.py +++ b/test/TEX/eps_graphics2.py @@ -158,7 +158,9 @@ To get a hard copy of this report call me. \begin{figure}[htbp] \begin{center} -\includegraphics{Fig1} +\includegraphics + [width=.5\textwidth] + {Fig1} \caption{Zone and Node indexing} \label{fig1} \end{center} @@ -170,7 +172,7 @@ All done now. """) # makeindex will write status messages to stderr (grrr...), so ignore it. -test.run(arguments = '.', stderr=None) +test.run(arguments = 'docs/test.pdf', stderr=None) # All (?) the files we expect will get created in the docs directory diff --git a/test/TEX/multi-line_include_options.py b/test/TEX/multi-line_include_options.py new file mode 100644 index 0000000..7d536dc --- /dev/null +++ b/test/TEX/multi-line_include_options.py @@ -0,0 +1,90 @@ +#!/usr/bin/env python +# +# __COPYRIGHT__ +# +# Permission is hereby granted, free of charge, to any person obtaining +# a copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, +# distribute, sublicense, and/or sell copies of the Software, and to +# permit persons to whom the Software is furnished to do so, subject to +# the following conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY +# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE +# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +# + +__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__" + +""" +When an inclusion's optional argument (enclosed in square brackets: +[]) spans multiple lines (via comment wrapping), ensure that the LaTeX +Scanner doesn't throw an IndexError. + +An example of this in the wild is in Thomas Heim's epsdice LaTeX package: + \includegraphics[height=1.75ex,viewport= 3 4 38 39,% + clip=true]{\dicefile}% +In epsdice 2007/02/15, v. 2.1. +""" + +import TestSCons + +_exe = TestSCons._exe + +test = TestSCons.TestSCons() + +latex = test.where_is('latex') + +if not latex: + test.skip_test("Could not find latex; skipping test(s).\n") + +test.write('SConstruct', """\ +import os +env = Environment(ENV = { 'PATH' : os.environ['PATH'] }) +env.DVI('root.tex') +""") + +test.write('root.tex', +r"""\documentclass{article} +\usepackage{graphicx} +\begin{document} + \includegraphics[height=1.75ex,% + clip=true]{square} +\end{document} +""") + +# Dummy EPS file drawing a square +test.write('square.eps', +r"""%!PS-Adobe-2.0 EPSF-1.2 +%%BoundingBox: 0 0 20 20 + newpath + 5 5 moveto + 15 5 lineto + 15 15 lineto + 5 15 lineto + 5 5 lineto + stroke +%%EOF +""") + +test.run(arguments = '.') + +test.must_exist(test.workpath('root.dvi')) +test.must_exist(test.workpath('root.log')) + +test.pass_test() + +# Local Variables: +# tab-width:4 +# indent-tabs-mode:nil +# End: +# vim: set expandtab tabstop=4 shiftwidth=4: + diff --git a/test/option-n.py b/test/option-n.py index b8dde0f..a32dfb7 100644 --- a/test/option-n.py +++ b/test/option-n.py @@ -168,8 +168,8 @@ test.fail_test(os.path.exists(test.workpath('build', 'f4.in'))) # test Configure-calls in conjunction with -n test.subdir('configure') -test.match_function = TestSCons.match_re_dotall -test.diff_function = TestSCons.diff_re +test.set_match_function(TestSCons.match_re_dotall) +test.set_diff_function(TestSCons.diff_re) test.write('configure/SConstruct', """def CustomTest(context): def userAction(target,source,env): |