summaryrefslogtreecommitdiffstats
path: root/googletest
diff options
context:
space:
mode:
authorTom Hughes <tomhughes@google.com>2023-01-25 17:13:35 (GMT)
committerCopybara-Service <copybara-worker@google.com>2023-01-25 17:14:26 (GMT)
commitd1ad27e0a4ba67161f554da25c5f281ec1e94c44 (patch)
treef78137b634cfea8b9d9037b36a42413d8f9bef4d /googletest
parent6c65a1ca359052e9fd671b550fe748056be74af6 (diff)
downloadgoogletest-d1ad27e0a4ba67161f554da25c5f281ec1e94c44.zip
googletest-d1ad27e0a4ba67161f554da25c5f281ec1e94c44.tar.gz
googletest-d1ad27e0a4ba67161f554da25c5f281ec1e94c44.tar.bz2
Fix formatting in subset of Python files
These files were formatted with automated tools. The remaining Python files require some manual fix ups, so they will be fixed separately. PiperOrigin-RevId: 504579820 Change-Id: I3923bd414bffe3ded6163ec496cd09ace3951928
Diffstat (limited to 'googletest')
-rwxr-xr-xgoogletest/test/googletest-break-on-failure-unittest.py67
-rwxr-xr-xgoogletest/test/googletest-catch-exceptions-test.py162
-rwxr-xr-xgoogletest/test/googletest-color-test.py1
-rwxr-xr-xgoogletest/test/googletest-env-var-test.py2
-rwxr-xr-xgoogletest/test/googletest-failfast-unittest.py201
-rwxr-xr-xgoogletest/test/googletest-filter-unittest.py382
-rw-r--r--googletest/test/googletest-global-environment-unittest.py89
-rw-r--r--googletest/test/googletest-json-outfiles-test.py146
-rw-r--r--googletest/test/googletest-json-output-unittest.py1294
-rwxr-xr-xgoogletest/test/googletest-list-tests-unittest.py69
-rwxr-xr-xgoogletest/test/googletest-output-test.py164
-rw-r--r--googletest/test/googletest-param-test-invalid-name1-test.py2
-rw-r--r--googletest/test/googletest-param-test-invalid-name2-test.py3
-rwxr-xr-xgoogletest/test/googletest-setuptestsuite-test.py16
-rwxr-xr-xgoogletest/test/googletest-throw-on-failure-test.py54
-rwxr-xr-xgoogletest/test/googletest-uninitialized-test.py7
-rwxr-xr-xgoogletest/test/gtest_help_test.py54
-rw-r--r--googletest/test/gtest_json_test_utils.py2
-rw-r--r--googletest/test/gtest_list_output_unittest.py27
-rwxr-xr-xgoogletest/test/gtest_skip_check_output_test.py3
-rwxr-xr-xgoogletest/test/gtest_skip_environment_check_output_test.py3
-rwxr-xr-xgoogletest/test/gtest_xml_outfiles_test.py15
22 files changed, 1441 insertions, 1322 deletions
diff --git a/googletest/test/googletest-break-on-failure-unittest.py b/googletest/test/googletest-break-on-failure-unittest.py
index c5c0b15..3f65812 100755
--- a/googletest/test/googletest-break-on-failure-unittest.py
+++ b/googletest/test/googletest-break-on-failure-unittest.py
@@ -59,7 +59,8 @@ CATCH_EXCEPTIONS_ENV_VAR = 'GTEST_CATCH_EXCEPTIONS'
# Path to the googletest-break-on-failure-unittest_ program.
EXE_PATH = gtest_test_utils.GetTestExecutablePath(
- 'googletest-break-on-failure-unittest_')
+ 'googletest-break-on-failure-unittest_'
+)
environ = gtest_test_utils.environ
@@ -98,11 +99,11 @@ class GTestBreakOnFailureUnitTest(gtest_test_utils.TestCase):
Args:
env_var_value: value of the GTEST_BREAK_ON_FAILURE environment
- variable; None if the variable should be unset.
- flag_value: value of the --gtest_break_on_failure flag;
- None if the flag should not be present.
- expect_seg_fault: 1 if the program is expected to generate a seg-fault;
- 0 otherwise.
+ variable; None if the variable should be unset.
+ flag_value: value of the --gtest_break_on_failure flag; None if the
+ flag should not be present.
+ expect_seg_fault: 1 if the program is expected to generate a seg-fault; 0
+ otherwise.
"""
SetEnvVar(BREAK_ON_FAILURE_ENV_VAR, env_var_value)
@@ -132,74 +133,56 @@ class GTestBreakOnFailureUnitTest(gtest_test_utils.TestCase):
SetEnvVar(BREAK_ON_FAILURE_ENV_VAR, None)
- msg = ('when %s%s, an assertion failure in "%s" %s cause a seg-fault.' %
- (BREAK_ON_FAILURE_ENV_VAR, env_var_value_msg, ' '.join(command),
- should_or_not))
+ msg = 'when %s%s, an assertion failure in "%s" %s cause a seg-fault.' % (
+ BREAK_ON_FAILURE_ENV_VAR,
+ env_var_value_msg,
+ ' '.join(command),
+ should_or_not,
+ )
self.assertTrue(has_seg_fault == expect_seg_fault, msg)
def testDefaultBehavior(self):
"""Tests the behavior of the default mode."""
- self.RunAndVerify(env_var_value=None,
- flag_value=None,
- expect_seg_fault=0)
+ self.RunAndVerify(env_var_value=None, flag_value=None, expect_seg_fault=0)
def testEnvVar(self):
"""Tests using the GTEST_BREAK_ON_FAILURE environment variable."""
- self.RunAndVerify(env_var_value='0',
- flag_value=None,
- expect_seg_fault=0)
- self.RunAndVerify(env_var_value='1',
- flag_value=None,
- expect_seg_fault=1)
+ self.RunAndVerify(env_var_value='0', flag_value=None, expect_seg_fault=0)
+ self.RunAndVerify(env_var_value='1', flag_value=None, expect_seg_fault=1)
def testFlag(self):
"""Tests using the --gtest_break_on_failure flag."""
- self.RunAndVerify(env_var_value=None,
- flag_value='0',
- expect_seg_fault=0)
- self.RunAndVerify(env_var_value=None,
- flag_value='1',
- expect_seg_fault=1)
+ self.RunAndVerify(env_var_value=None, flag_value='0', expect_seg_fault=0)
+ self.RunAndVerify(env_var_value=None, flag_value='1', expect_seg_fault=1)
def testFlagOverridesEnvVar(self):
"""Tests that the flag overrides the environment variable."""
- self.RunAndVerify(env_var_value='0',
- flag_value='0',
- expect_seg_fault=0)
- self.RunAndVerify(env_var_value='0',
- flag_value='1',
- expect_seg_fault=1)
- self.RunAndVerify(env_var_value='1',
- flag_value='0',
- expect_seg_fault=0)
- self.RunAndVerify(env_var_value='1',
- flag_value='1',
- expect_seg_fault=1)
+ self.RunAndVerify(env_var_value='0', flag_value='0', expect_seg_fault=0)
+ self.RunAndVerify(env_var_value='0', flag_value='1', expect_seg_fault=1)
+ self.RunAndVerify(env_var_value='1', flag_value='0', expect_seg_fault=0)
+ self.RunAndVerify(env_var_value='1', flag_value='1', expect_seg_fault=1)
def testBreakOnFailureOverridesThrowOnFailure(self):
"""Tests that gtest_break_on_failure overrides gtest_throw_on_failure."""
SetEnvVar(THROW_ON_FAILURE_ENV_VAR, '1')
try:
- self.RunAndVerify(env_var_value=None,
- flag_value='1',
- expect_seg_fault=1)
+ self.RunAndVerify(env_var_value=None, flag_value='1', expect_seg_fault=1)
finally:
SetEnvVar(THROW_ON_FAILURE_ENV_VAR, None)
if IS_WINDOWS:
+
def testCatchExceptionsDoesNotInterfere(self):
"""Tests that gtest_catch_exceptions doesn't interfere."""
SetEnvVar(CATCH_EXCEPTIONS_ENV_VAR, '1')
try:
- self.RunAndVerify(env_var_value='1',
- flag_value='1',
- expect_seg_fault=1)
+ self.RunAndVerify(env_var_value='1', flag_value='1', expect_seg_fault=1)
finally:
SetEnvVar(CATCH_EXCEPTIONS_ENV_VAR, None)
diff --git a/googletest/test/googletest-catch-exceptions-test.py b/googletest/test/googletest-catch-exceptions-test.py
index 07f1582..a850ea4 100755
--- a/googletest/test/googletest-catch-exceptions-test.py
+++ b/googletest/test/googletest-catch-exceptions-test.py
@@ -46,12 +46,14 @@ FILTER_FLAG = FLAG_PREFIX + 'filter'
# Path to the googletest-catch-exceptions-ex-test_ binary, compiled with
# exceptions enabled.
EX_EXE_PATH = gtest_test_utils.GetTestExecutablePath(
- 'googletest-catch-exceptions-ex-test_')
+ 'googletest-catch-exceptions-ex-test_'
+)
# Path to the googletest-catch-exceptions-test_ binary, compiled with
# exceptions disabled.
EXE_PATH = gtest_test_utils.GetTestExecutablePath(
- 'googletest-catch-exceptions-no-ex-test_')
+ 'googletest-catch-exceptions-no-ex-test_'
+)
environ = gtest_test_utils.environ
SetEnvVar = gtest_test_utils.SetEnvVar
@@ -64,7 +66,8 @@ SetEnvVar = gtest_test_utils.SetEnvVar
SetEnvVar(gtest_test_utils.PREMATURE_EXIT_FILE_ENV_VAR, None)
TEST_LIST = gtest_test_utils.Subprocess(
- [EXE_PATH, LIST_TESTS_FLAG], env=environ).output
+ [EXE_PATH, LIST_TESTS_FLAG], env=environ
+).output
SUPPORTS_SEH_EXCEPTIONS = 'ThrowsSehException' in TEST_LIST
@@ -72,7 +75,8 @@ if SUPPORTS_SEH_EXCEPTIONS:
BINARY_OUTPUT = gtest_test_utils.Subprocess([EXE_PATH], env=environ).output
EX_BINARY_OUTPUT = gtest_test_utils.Subprocess(
- [EX_EXE_PATH], env=environ).output
+ [EX_EXE_PATH], env=environ
+).output
# The tests.
@@ -123,18 +127,20 @@ if SUPPORTS_SEH_EXCEPTIONS:
class CatchCxxExceptionsTest(gtest_test_utils.TestCase):
"""Tests C++ exception-catching behavior.
- Tests in this test case verify that:
- * C++ exceptions are caught and logged as C++ (not SEH) exceptions
- * Exception thrown affect the remainder of the test work flow in the
- expected manner.
+ Tests in this test case verify that:
+ * C++ exceptions are caught and logged as C++ (not SEH) exceptions
+ * Exception thrown affect the remainder of the test work flow in the
+ expected manner.
"""
def testCatchesCxxExceptionsInFixtureConstructor(self):
self.assertTrue(
'C++ exception with description '
'"Standard C++ exception" thrown '
- 'in the test fixture\'s constructor' in EX_BINARY_OUTPUT,
- EX_BINARY_OUTPUT)
+ "in the test fixture's constructor"
+ in EX_BINARY_OUTPUT,
+ EX_BINARY_OUTPUT,
+ )
self.assertTrue(
'unexpected' not in EX_BINARY_OUTPUT,
(
@@ -144,60 +150,94 @@ class CatchCxxExceptionsTest(gtest_test_utils.TestCase):
),
)
- if ('CxxExceptionInDestructorTest.ThrowsExceptionInDestructor' in
- EX_BINARY_OUTPUT):
+ if (
+ 'CxxExceptionInDestructorTest.ThrowsExceptionInDestructor'
+ in EX_BINARY_OUTPUT
+ ):
def testCatchesCxxExceptionsInFixtureDestructor(self):
self.assertTrue(
'C++ exception with description '
'"Standard C++ exception" thrown '
- 'in the test fixture\'s destructor' in EX_BINARY_OUTPUT,
- EX_BINARY_OUTPUT)
+ "in the test fixture's destructor"
+ in EX_BINARY_OUTPUT,
+ EX_BINARY_OUTPUT,
+ )
self.assertTrue(
'CxxExceptionInDestructorTest::TearDownTestSuite() '
- 'called as expected.' in EX_BINARY_OUTPUT, EX_BINARY_OUTPUT)
+ 'called as expected.'
+ in EX_BINARY_OUTPUT,
+ EX_BINARY_OUTPUT,
+ )
def testCatchesCxxExceptionsInSetUpTestCase(self):
self.assertTrue(
'C++ exception with description "Standard C++ exception"'
- ' thrown in SetUpTestSuite()' in EX_BINARY_OUTPUT, EX_BINARY_OUTPUT)
+ ' thrown in SetUpTestSuite()'
+ in EX_BINARY_OUTPUT,
+ EX_BINARY_OUTPUT,
+ )
self.assertTrue(
- 'CxxExceptionInConstructorTest::TearDownTestSuite() '
- 'called as expected.' in EX_BINARY_OUTPUT, EX_BINARY_OUTPUT)
+ 'CxxExceptionInConstructorTest::TearDownTestSuite() called as expected.'
+ in EX_BINARY_OUTPUT,
+ EX_BINARY_OUTPUT,
+ )
self.assertFalse(
- 'CxxExceptionInSetUpTestSuiteTest constructor '
- 'called as expected.' in EX_BINARY_OUTPUT, EX_BINARY_OUTPUT)
+ 'CxxExceptionInSetUpTestSuiteTest constructor called as expected.'
+ in EX_BINARY_OUTPUT,
+ EX_BINARY_OUTPUT,
+ )
self.assertFalse(
- 'CxxExceptionInSetUpTestSuiteTest destructor '
- 'called as expected.' in EX_BINARY_OUTPUT, EX_BINARY_OUTPUT)
+ 'CxxExceptionInSetUpTestSuiteTest destructor called as expected.'
+ in EX_BINARY_OUTPUT,
+ EX_BINARY_OUTPUT,
+ )
self.assertFalse(
- 'CxxExceptionInSetUpTestSuiteTest::SetUp() '
- 'called as expected.' in EX_BINARY_OUTPUT, EX_BINARY_OUTPUT)
+ 'CxxExceptionInSetUpTestSuiteTest::SetUp() called as expected.'
+ in EX_BINARY_OUTPUT,
+ EX_BINARY_OUTPUT,
+ )
self.assertFalse(
- 'CxxExceptionInSetUpTestSuiteTest::TearDown() '
- 'called as expected.' in EX_BINARY_OUTPUT, EX_BINARY_OUTPUT)
+ 'CxxExceptionInSetUpTestSuiteTest::TearDown() called as expected.'
+ in EX_BINARY_OUTPUT,
+ EX_BINARY_OUTPUT,
+ )
self.assertFalse(
- 'CxxExceptionInSetUpTestSuiteTest test body '
- 'called as expected.' in EX_BINARY_OUTPUT, EX_BINARY_OUTPUT)
+ 'CxxExceptionInSetUpTestSuiteTest test body called as expected.'
+ in EX_BINARY_OUTPUT,
+ EX_BINARY_OUTPUT,
+ )
def testCatchesCxxExceptionsInTearDownTestCase(self):
self.assertTrue(
'C++ exception with description "Standard C++ exception"'
- ' thrown in TearDownTestSuite()' in EX_BINARY_OUTPUT, EX_BINARY_OUTPUT)
+ ' thrown in TearDownTestSuite()'
+ in EX_BINARY_OUTPUT,
+ EX_BINARY_OUTPUT,
+ )
def testCatchesCxxExceptionsInSetUp(self):
self.assertTrue(
'C++ exception with description "Standard C++ exception"'
- ' thrown in SetUp()' in EX_BINARY_OUTPUT, EX_BINARY_OUTPUT)
+ ' thrown in SetUp()'
+ in EX_BINARY_OUTPUT,
+ EX_BINARY_OUTPUT,
+ )
self.assertTrue(
- 'CxxExceptionInSetUpTest::TearDownTestSuite() '
- 'called as expected.' in EX_BINARY_OUTPUT, EX_BINARY_OUTPUT)
+ 'CxxExceptionInSetUpTest::TearDownTestSuite() called as expected.'
+ in EX_BINARY_OUTPUT,
+ EX_BINARY_OUTPUT,
+ )
self.assertTrue(
- 'CxxExceptionInSetUpTest destructor '
- 'called as expected.' in EX_BINARY_OUTPUT, EX_BINARY_OUTPUT)
+ 'CxxExceptionInSetUpTest destructor called as expected.'
+ in EX_BINARY_OUTPUT,
+ EX_BINARY_OUTPUT,
+ )
self.assertTrue(
- 'CxxExceptionInSetUpTest::TearDown() '
- 'called as expected.' in EX_BINARY_OUTPUT, EX_BINARY_OUTPUT)
+ 'CxxExceptionInSetUpTest::TearDown() called as expected.'
+ in EX_BINARY_OUTPUT,
+ EX_BINARY_OUTPUT,
+ )
self.assertTrue(
'unexpected' not in EX_BINARY_OUTPUT,
(
@@ -210,32 +250,49 @@ class CatchCxxExceptionsTest(gtest_test_utils.TestCase):
def testCatchesCxxExceptionsInTearDown(self):
self.assertTrue(
'C++ exception with description "Standard C++ exception"'
- ' thrown in TearDown()' in EX_BINARY_OUTPUT, EX_BINARY_OUTPUT)
+ ' thrown in TearDown()'
+ in EX_BINARY_OUTPUT,
+ EX_BINARY_OUTPUT,
+ )
self.assertTrue(
- 'CxxExceptionInTearDownTest::TearDownTestSuite() '
- 'called as expected.' in EX_BINARY_OUTPUT, EX_BINARY_OUTPUT)
+ 'CxxExceptionInTearDownTest::TearDownTestSuite() called as expected.'
+ in EX_BINARY_OUTPUT,
+ EX_BINARY_OUTPUT,
+ )
self.assertTrue(
- 'CxxExceptionInTearDownTest destructor '
- 'called as expected.' in EX_BINARY_OUTPUT, EX_BINARY_OUTPUT)
+ 'CxxExceptionInTearDownTest destructor called as expected.'
+ in EX_BINARY_OUTPUT,
+ EX_BINARY_OUTPUT,
+ )
def testCatchesCxxExceptionsInTestBody(self):
self.assertTrue(
'C++ exception with description "Standard C++ exception"'
- ' thrown in the test body' in EX_BINARY_OUTPUT, EX_BINARY_OUTPUT)
+ ' thrown in the test body'
+ in EX_BINARY_OUTPUT,
+ EX_BINARY_OUTPUT,
+ )
self.assertTrue(
- 'CxxExceptionInTestBodyTest::TearDownTestSuite() '
- 'called as expected.' in EX_BINARY_OUTPUT, EX_BINARY_OUTPUT)
+ 'CxxExceptionInTestBodyTest::TearDownTestSuite() called as expected.'
+ in EX_BINARY_OUTPUT,
+ EX_BINARY_OUTPUT,
+ )
self.assertTrue(
- 'CxxExceptionInTestBodyTest destructor '
- 'called as expected.' in EX_BINARY_OUTPUT, EX_BINARY_OUTPUT)
+ 'CxxExceptionInTestBodyTest destructor called as expected.'
+ in EX_BINARY_OUTPUT,
+ EX_BINARY_OUTPUT,
+ )
self.assertTrue(
- 'CxxExceptionInTestBodyTest::TearDown() '
- 'called as expected.' in EX_BINARY_OUTPUT, EX_BINARY_OUTPUT)
+ 'CxxExceptionInTestBodyTest::TearDown() called as expected.'
+ in EX_BINARY_OUTPUT,
+ EX_BINARY_OUTPUT,
+ )
def testCatchesNonStdCxxExceptions(self):
self.assertTrue(
'Unknown C++ exception thrown in the test body' in EX_BINARY_OUTPUT,
- EX_BINARY_OUTPUT)
+ EX_BINARY_OUTPUT,
+ )
def testUnhandledCxxExceptionsAbortTheProgram(self):
# Filters out SEH exception tests on Windows. Unhandled SEH exceptions
@@ -243,10 +300,9 @@ class CatchCxxExceptionsTest(gtest_test_utils.TestCase):
FITLER_OUT_SEH_TESTS_FLAG = FILTER_FLAG + '=-*Seh*'
# By default, Google Test doesn't catch the exceptions.
uncaught_exceptions_ex_binary_output = gtest_test_utils.Subprocess(
- [EX_EXE_PATH,
- NO_CATCH_EXCEPTIONS_FLAG,
- FITLER_OUT_SEH_TESTS_FLAG],
- env=environ).output
+ [EX_EXE_PATH, NO_CATCH_EXCEPTIONS_FLAG, FITLER_OUT_SEH_TESTS_FLAG],
+ env=environ,
+ ).output
self.assertIn(
'Unhandled C++ exception terminating the program',
diff --git a/googletest/test/googletest-color-test.py b/googletest/test/googletest-color-test.py
index 32dafcc..8926a48 100755
--- a/googletest/test/googletest-color-test.py
+++ b/googletest/test/googletest-color-test.py
@@ -65,6 +65,7 @@ def UsesColor(term, color_env_var, color_flag):
class GTestColorTest(gtest_test_utils.TestCase):
+
def testNoEnvVarNoFlag(self):
"""Tests the case when there's neither GTEST_COLOR nor --gtest_color."""
diff --git a/googletest/test/googletest-env-var-test.py b/googletest/test/googletest-env-var-test.py
index bc4d87d..24d8edb 100755
--- a/googletest/test/googletest-env-var-test.py
+++ b/googletest/test/googletest-env-var-test.py
@@ -101,7 +101,6 @@ class GTestEnvVarTest(gtest_test_utils.TestCase):
TestFlag('death_test_use_fork', '1', '0')
TestFlag('stack_trace_depth', '0', '100')
-
def testXmlOutputFile(self):
"""Tests that $XML_OUTPUT_FILE affects the output flag."""
@@ -116,5 +115,6 @@ class GTestEnvVarTest(gtest_test_utils.TestCase):
SetEnvVar('XML_OUTPUT_FILE', 'tmp/bar.xml')
AssertEq('xml:tmp/foo.xml', GetFlag('output'))
+
if __name__ == '__main__':
gtest_test_utils.Main()
diff --git a/googletest/test/googletest-failfast-unittest.py b/googletest/test/googletest-failfast-unittest.py
index 1356d4f..cdbce0c 100755
--- a/googletest/test/googletest-failfast-unittest.py
+++ b/googletest/test/googletest-failfast-unittest.py
@@ -62,15 +62,18 @@ FILTER_FLAG = 'gtest_filter'
# Command to run the googletest-failfast-unittest_ program.
COMMAND = gtest_test_utils.GetTestExecutablePath(
- 'googletest-failfast-unittest_')
+ 'googletest-failfast-unittest_'
+)
# The command line flag to tell Google Test to output the list of tests it
# will run.
LIST_TESTS_FLAG = '--gtest_list_tests'
# Indicates whether Google Test supports death tests.
-SUPPORTS_DEATH_TESTS = 'HasDeathTest' in gtest_test_utils.Subprocess(
- [COMMAND, LIST_TESTS_FLAG]).output
+SUPPORTS_DEATH_TESTS = (
+ 'HasDeathTest'
+ in gtest_test_utils.Subprocess([COMMAND, LIST_TESTS_FLAG]).output
+)
# Utilities.
@@ -90,8 +93,9 @@ def RunAndReturnOutput(test_suite=None, fail_fast=None, run_disabled=False):
"""Runs the test program and returns its output."""
args = []
- xml_path = os.path.join(gtest_test_utils.GetTempDir(),
- '.GTestFailFastUnitTest.xml')
+ xml_path = os.path.join(
+ gtest_test_utils.GetTempDir(), '.GTestFailFastUnitTest.xml'
+ )
args += ['--gtest_output=xml:' + xml_path]
if fail_fast is not None:
if isinstance(fail_fast, str):
@@ -188,49 +192,63 @@ class GTestFailFastUnitTest(gtest_test_utils.TestCase):
txt, _ = RunAndReturnOutput(test_suite='HasSkipTest', fail_fast=True)
self.assertIn('1 FAILED TEST', txt)
self.assertIn('[ SKIPPED ] 3 tests', txt)
- for expected_count, callback in [(1, 'OnTestSuiteStart'),
- (5, 'OnTestStart'),
- (5, 'OnTestEnd'),
- (5, 'OnTestPartResult'),
- (1, 'OnTestSuiteEnd')]:
+ for expected_count, callback in [
+ (1, 'OnTestSuiteStart'),
+ (5, 'OnTestStart'),
+ (5, 'OnTestEnd'),
+ (5, 'OnTestPartResult'),
+ (1, 'OnTestSuiteEnd'),
+ ]:
self.assertEqual(
- expected_count, txt.count(callback),
- 'Expected %d calls to callback %s match count on output: %s ' %
- (expected_count, callback, txt))
+ expected_count,
+ txt.count(callback),
+ 'Expected %d calls to callback %s match count on output: %s '
+ % (expected_count, callback, txt),
+ )
txt, _ = RunAndReturnOutput(test_suite='HasSkipTest', fail_fast=False)
self.assertIn('3 FAILED TEST', txt)
self.assertIn('[ SKIPPED ] 1 test', txt)
- for expected_count, callback in [(1, 'OnTestSuiteStart'),
- (5, 'OnTestStart'),
- (5, 'OnTestEnd'),
- (5, 'OnTestPartResult'),
- (1, 'OnTestSuiteEnd')]:
+ for expected_count, callback in [
+ (1, 'OnTestSuiteStart'),
+ (5, 'OnTestStart'),
+ (5, 'OnTestEnd'),
+ (5, 'OnTestPartResult'),
+ (1, 'OnTestSuiteEnd'),
+ ]:
self.assertEqual(
- expected_count, txt.count(callback),
- 'Expected %d calls to callback %s match count on output: %s ' %
- (expected_count, callback, txt))
+ expected_count,
+ txt.count(callback),
+ 'Expected %d calls to callback %s match count on output: %s '
+ % (expected_count, callback, txt),
+ )
def assertXmlResultCount(self, result, count, xml):
self.assertEqual(
- count, xml.count('result="%s"' % result),
- 'Expected \'result="%s"\' match count of %s: %s ' %
- (result, count, xml))
+ count,
+ xml.count('result="%s"' % result),
+ 'Expected \'result="%s"\' match count of %s: %s '
+ % (result, count, xml),
+ )
def assertXmlStatusCount(self, status, count, xml):
self.assertEqual(
- count, xml.count('status="%s"' % status),
- 'Expected \'status="%s"\' match count of %s: %s ' %
- (status, count, xml))
-
- def assertFailFastXmlAndTxtOutput(self,
- fail_fast,
- test_suite,
- passed_count,
- failure_count,
- skipped_count,
- suppressed_count,
- run_disabled=False):
+ count,
+ xml.count('status="%s"' % status),
+ 'Expected \'status="%s"\' match count of %s: %s '
+ % (status, count, xml),
+ )
+
+ def assertFailFastXmlAndTxtOutput(
+ self,
+ fail_fast,
+ test_suite,
+ passed_count,
+ failure_count,
+ skipped_count,
+ suppressed_count,
+ run_disabled=False,
+ ):
"""Assert XML and text output of a test execution."""
txt, xml = RunAndReturnOutput(test_suite, fail_fast, run_disabled)
@@ -240,40 +258,57 @@ class GTestFailFastUnitTest(gtest_test_utils.TestCase):
self.assertIn('%s DISABLED TEST' % suppressed_count, txt)
if skipped_count > 0:
self.assertIn('[ SKIPPED ] %s tests' % skipped_count, txt)
- self.assertXmlStatusCount('run',
- passed_count + failure_count + skipped_count, xml)
+ self.assertXmlStatusCount(
+ 'run', passed_count + failure_count + skipped_count, xml
+ )
self.assertXmlStatusCount('notrun', suppressed_count, xml)
self.assertXmlResultCount('completed', passed_count + failure_count, xml)
self.assertXmlResultCount('skipped', skipped_count, xml)
self.assertXmlResultCount('suppressed', suppressed_count, xml)
- def assertFailFastBehavior(self,
- test_suite,
- passed_count,
- failure_count,
- skipped_count,
- suppressed_count,
- run_disabled=False):
+ def assertFailFastBehavior(
+ self,
+ test_suite,
+ passed_count,
+ failure_count,
+ skipped_count,
+ suppressed_count,
+ run_disabled=False,
+ ):
"""Assert --fail_fast via flag."""
for fail_fast in ('true', '1', 't', True):
- self.assertFailFastXmlAndTxtOutput(fail_fast, test_suite, passed_count,
- failure_count, skipped_count,
- suppressed_count, run_disabled)
-
- def assertNotFailFastBehavior(self,
- test_suite,
- passed_count,
- failure_count,
- skipped_count,
- suppressed_count,
- run_disabled=False):
+ self.assertFailFastXmlAndTxtOutput(
+ fail_fast,
+ test_suite,
+ passed_count,
+ failure_count,
+ skipped_count,
+ suppressed_count,
+ run_disabled,
+ )
+
+ def assertNotFailFastBehavior(
+ self,
+ test_suite,
+ passed_count,
+ failure_count,
+ skipped_count,
+ suppressed_count,
+ run_disabled=False,
+ ):
"""Assert --nofail_fast via flag."""
for fail_fast in ('false', '0', 'f', False):
- self.assertFailFastXmlAndTxtOutput(fail_fast, test_suite, passed_count,
- failure_count, skipped_count,
- suppressed_count, run_disabled)
+ self.assertFailFastXmlAndTxtOutput(
+ fail_fast,
+ test_suite,
+ passed_count,
+ failure_count,
+ skipped_count,
+ suppressed_count,
+ run_disabled,
+ )
def testFlag_HasFixtureTest(self):
"""Tests the behavior of fail_fast and TEST_F."""
@@ -282,13 +317,15 @@ class GTestFailFastUnitTest(gtest_test_utils.TestCase):
passed_count=1,
failure_count=1,
skipped_count=3,
- suppressed_count=0)
+ suppressed_count=0,
+ )
self.assertNotFailFastBehavior(
test_suite='HasFixtureTest',
passed_count=1,
failure_count=4,
skipped_count=0,
- suppressed_count=0)
+ suppressed_count=0,
+ )
def testFlag_HasSimpleTest(self):
"""Tests the behavior of fail_fast and TEST."""
@@ -297,13 +334,15 @@ class GTestFailFastUnitTest(gtest_test_utils.TestCase):
passed_count=1,
failure_count=1,
skipped_count=3,
- suppressed_count=0)
+ suppressed_count=0,
+ )
self.assertNotFailFastBehavior(
test_suite='HasSimpleTest',
passed_count=1,
failure_count=4,
skipped_count=0,
- suppressed_count=0)
+ suppressed_count=0,
+ )
def testFlag_HasParametersTest(self):
"""Tests the behavior of fail_fast and TEST_P."""
@@ -312,13 +351,15 @@ class GTestFailFastUnitTest(gtest_test_utils.TestCase):
passed_count=0,
failure_count=1,
skipped_count=3,
- suppressed_count=0)
+ suppressed_count=0,
+ )
self.assertNotFailFastBehavior(
test_suite='HasParametersSuite/HasParametersTest',
passed_count=0,
failure_count=4,
skipped_count=0,
- suppressed_count=0)
+ suppressed_count=0,
+ )
def testFlag_HasDisabledTest(self):
"""Tests the behavior of fail_fast and Disabled test cases."""
@@ -328,14 +369,16 @@ class GTestFailFastUnitTest(gtest_test_utils.TestCase):
failure_count=1,
skipped_count=2,
suppressed_count=1,
- run_disabled=False)
+ run_disabled=False,
+ )
self.assertNotFailFastBehavior(
test_suite='HasDisabledTest',
passed_count=1,
failure_count=3,
skipped_count=0,
suppressed_count=1,
- run_disabled=False)
+ run_disabled=False,
+ )
def testFlag_HasDisabledRunDisabledTest(self):
"""Tests the behavior of fail_fast and Disabled test cases enabled."""
@@ -345,14 +388,16 @@ class GTestFailFastUnitTest(gtest_test_utils.TestCase):
failure_count=1,
skipped_count=3,
suppressed_count=0,
- run_disabled=True)
+ run_disabled=True,
+ )
self.assertNotFailFastBehavior(
test_suite='HasDisabledTest',
passed_count=1,
failure_count=4,
skipped_count=0,
suppressed_count=0,
- run_disabled=True)
+ run_disabled=True,
+ )
def testFlag_HasDisabledSuiteTest(self):
"""Tests the behavior of fail_fast and Disabled test suites."""
@@ -362,14 +407,16 @@ class GTestFailFastUnitTest(gtest_test_utils.TestCase):
failure_count=0,
skipped_count=0,
suppressed_count=5,
- run_disabled=False)
+ run_disabled=False,
+ )
self.assertNotFailFastBehavior(
test_suite='DISABLED_HasDisabledSuite',
passed_count=0,
failure_count=0,
skipped_count=0,
suppressed_count=5,
- run_disabled=False)
+ run_disabled=False,
+ )
def testFlag_HasDisabledSuiteRunDisabledTest(self):
"""Tests the behavior of fail_fast and Disabled test suites enabled."""
@@ -379,14 +426,16 @@ class GTestFailFastUnitTest(gtest_test_utils.TestCase):
failure_count=1,
skipped_count=3,
suppressed_count=0,
- run_disabled=True)
+ run_disabled=True,
+ )
self.assertNotFailFastBehavior(
test_suite='DISABLED_HasDisabledSuite',
passed_count=1,
failure_count=4,
skipped_count=0,
suppressed_count=0,
- run_disabled=True)
+ run_disabled=True,
+ )
if SUPPORTS_DEATH_TESTS:
@@ -397,13 +446,15 @@ class GTestFailFastUnitTest(gtest_test_utils.TestCase):
passed_count=1,
failure_count=1,
skipped_count=3,
- suppressed_count=0)
+ suppressed_count=0,
+ )
self.assertNotFailFastBehavior(
test_suite='HasDeathTest',
passed_count=1,
failure_count=4,
skipped_count=0,
- suppressed_count=0)
+ suppressed_count=0,
+ )
if __name__ == '__main__':
diff --git a/googletest/test/googletest-filter-unittest.py b/googletest/test/googletest-filter-unittest.py
index 6b23518..f1f3c7a 100755
--- a/googletest/test/googletest-filter-unittest.py
+++ b/googletest/test/googletest-filter-unittest.py
@@ -42,6 +42,7 @@ we test that here also.
import os
import re
+
try:
from sets import Set as set # For Python 2.3 compatibility
except ImportError:
@@ -60,7 +61,8 @@ CAN_PASS_EMPTY_ENV = False
if sys.executable:
os.environ['EMPTY_VAR'] = ''
child = gtest_test_utils.Subprocess(
- [sys.executable, '-c', 'import os; print(\'EMPTY_VAR\' in os.environ)'])
+ [sys.executable, '-c', "import os; print('EMPTY_VAR' in os.environ)"]
+ )
CAN_PASS_EMPTY_ENV = eval(child.output)
@@ -75,8 +77,8 @@ if sys.executable:
os.environ['UNSET_VAR'] = 'X'
del os.environ['UNSET_VAR']
child = gtest_test_utils.Subprocess(
- [sys.executable, '-c', 'import os; print(\'UNSET_VAR\' not in os.environ)'
- ])
+ [sys.executable, '-c', "import os; print('UNSET_VAR' not in os.environ)"]
+ )
CAN_UNSET_ENV = eval(child.output)
@@ -84,7 +86,7 @@ if sys.executable:
# make sense on platforms that cannot pass empty env variables (Win32)
# and on platforms that cannot unset variables (since we cannot tell
# the difference between "" and NULL -- Borland and Solaris < 5.10)
-CAN_TEST_EMPTY_FILTER = (CAN_PASS_EMPTY_ENV and CAN_UNSET_ENV)
+CAN_TEST_EMPTY_FILTER = CAN_PASS_EMPTY_ENV and CAN_UNSET_ENV
# The environment variable for specifying the test filters.
@@ -121,8 +123,10 @@ DISABLED_BANNER_REGEX = re.compile(r'^\[\s*DISABLED\s*\] (.*)')
LIST_TESTS_FLAG = '--gtest_list_tests'
# Indicates whether Google Test supports death tests.
-SUPPORTS_DEATH_TESTS = 'HasDeathTest' in gtest_test_utils.Subprocess(
- [COMMAND, LIST_TESTS_FLAG]).output
+SUPPORTS_DEATH_TESTS = (
+ 'HasDeathTest'
+ in gtest_test_utils.Subprocess([COMMAND, LIST_TESTS_FLAG]).output
+)
# Full names of all tests in googletest-filter-unittests_.
PARAM_TESTS = [
@@ -134,7 +138,7 @@ PARAM_TESTS = [
'SeqQ/ParamTest.TestX/1',
'SeqQ/ParamTest.TestY/0',
'SeqQ/ParamTest.TestY/1',
- ]
+]
DISABLED_TESTS = [
'BarTest.DISABLED_TestFour',
@@ -143,29 +147,31 @@ DISABLED_TESTS = [
'DISABLED_FoobarTest.Test1',
'DISABLED_FoobarTest.DISABLED_Test2',
'DISABLED_FoobarbazTest.TestA',
- ]
+]
if SUPPORTS_DEATH_TESTS:
DEATH_TESTS = [
- 'HasDeathTest.Test1',
- 'HasDeathTest.Test2',
- ]
+ 'HasDeathTest.Test1',
+ 'HasDeathTest.Test2',
+ ]
else:
DEATH_TESTS = []
# All the non-disabled tests.
-ACTIVE_TESTS = [
- 'FooTest.Abc',
- 'FooTest.Xyz',
-
- 'BarTest.TestOne',
- 'BarTest.TestTwo',
- 'BarTest.TestThree',
-
- 'BazTest.TestOne',
- 'BazTest.TestA',
- 'BazTest.TestB',
- ] + DEATH_TESTS + PARAM_TESTS
+ACTIVE_TESTS = (
+ [
+ 'FooTest.Abc',
+ 'FooTest.Xyz',
+ 'BarTest.TestOne',
+ 'BarTest.TestTwo',
+ 'BarTest.TestThree',
+ 'BazTest.TestOne',
+ 'BazTest.TestA',
+ 'BazTest.TestB',
+ ]
+ + DEATH_TESTS
+ + PARAM_TESTS
+)
param_tests_present = None
@@ -183,14 +189,15 @@ def SetEnvVar(env_var, value):
del environ[env_var]
-def RunAndReturnOutput(args = None):
+def RunAndReturnOutput(args=None):
"""Runs the test program and returns its output."""
- return gtest_test_utils.Subprocess([COMMAND] + (args or []),
- env=environ).output
+ return gtest_test_utils.Subprocess(
+ [COMMAND] + (args or []), env=environ
+ ).output
-def RunAndExtractTestList(args = None):
+def RunAndExtractTestList(args=None):
"""Runs the test program and returns its exit code and a list of tests run."""
p = gtest_test_utils.Subprocess([COMMAND] + (args or []), env=environ)
@@ -234,10 +241,13 @@ def InvokeWithModifiedEnv(extra_env, function, *args, **kwargs):
def RunWithSharding(total_shards, shard_index, command):
"""Runs a test program shard and returns exit code and a list of tests run."""
- extra_env = {SHARD_INDEX_ENV_VAR: str(shard_index),
- TOTAL_SHARDS_ENV_VAR: str(total_shards)}
+ extra_env = {
+ SHARD_INDEX_ENV_VAR: str(shard_index),
+ TOTAL_SHARDS_ENV_VAR: str(total_shards),
+ }
return InvokeWithModifiedEnv(extra_env, RunAndExtractTestList, command)
+
# The unit test.
@@ -303,8 +313,14 @@ class GTestFilterUnitTest(gtest_test_utils.TestCase):
tests_run = RunAndExtractTestList(args)[0]
self.AssertSetEqual(tests_run, tests_to_run)
- def RunAndVerifyWithSharding(self, gtest_filter, total_shards, tests_to_run,
- args=None, check_exit_0=False):
+ def RunAndVerifyWithSharding(
+ self,
+ gtest_filter,
+ total_shards,
+ tests_to_run,
+ args=None,
+ check_exit_0=False,
+ ):
"""Checks that binary runs correct tests for the given filter and shard.
Runs all shards of googletest-filter-unittest_ with the given filter, and
@@ -316,9 +332,9 @@ class GTestFilterUnitTest(gtest_test_utils.TestCase):
gtest_filter: A filter to apply to the tests.
total_shards: A total number of shards to split test run into.
tests_to_run: A set of tests expected to run.
- args : Arguments to pass to the to the test binary.
- check_exit_0: When set to a true value, make sure that all shards
- return 0.
+ args: Arguments to pass to the to the test binary.
+ check_exit_0: When set to a true value, make sure that all shards return
+ 0.
"""
tests_to_run = self.AdjustForParameterizedTests(tests_to_run)
@@ -372,8 +388,9 @@ class GTestFilterUnitTest(gtest_test_utils.TestCase):
global param_tests_present
if param_tests_present is None:
- param_tests_present = PARAM_TEST_REGEX.search(
- RunAndReturnOutput()) is not None
+ param_tests_present = (
+ PARAM_TEST_REGEX.search(RunAndReturnOutput()) is not None
+ )
def testDefaultBehavior(self):
"""Tests the behavior of not specifying the filter."""
@@ -425,8 +442,9 @@ class GTestFilterUnitTest(gtest_test_utils.TestCase):
BAZ_TESTS = ['BazTest.TestOne', 'BazTest.TestA', 'BazTest.TestB']
self.RunAndVerify('BazTest.*', BAZ_TESTS)
- self.RunAndVerifyAllowingDisabled('BazTest.*',
- BAZ_TESTS + ['BazTest.DISABLED_TestC'])
+ self.RunAndVerifyAllowingDisabled(
+ 'BazTest.*', BAZ_TESTS + ['BazTest.DISABLED_TestC']
+ )
def testFilterByTest(self):
"""Tests filtering by test name."""
@@ -437,38 +455,50 @@ class GTestFilterUnitTest(gtest_test_utils.TestCase):
"""Select only the disabled tests to run."""
self.RunAndVerify('DISABLED_FoobarTest.Test1', [])
- self.RunAndVerifyAllowingDisabled('DISABLED_FoobarTest.Test1',
- ['DISABLED_FoobarTest.Test1'])
+ self.RunAndVerifyAllowingDisabled(
+ 'DISABLED_FoobarTest.Test1', ['DISABLED_FoobarTest.Test1']
+ )
self.RunAndVerify('*DISABLED_*', [])
self.RunAndVerifyAllowingDisabled('*DISABLED_*', DISABLED_TESTS)
self.RunAndVerify('*.DISABLED_*', [])
- self.RunAndVerifyAllowingDisabled('*.DISABLED_*', [
- 'BarTest.DISABLED_TestFour',
- 'BarTest.DISABLED_TestFive',
- 'BazTest.DISABLED_TestC',
- 'DISABLED_FoobarTest.DISABLED_Test2',
- ])
+ self.RunAndVerifyAllowingDisabled(
+ '*.DISABLED_*',
+ [
+ 'BarTest.DISABLED_TestFour',
+ 'BarTest.DISABLED_TestFive',
+ 'BazTest.DISABLED_TestC',
+ 'DISABLED_FoobarTest.DISABLED_Test2',
+ ],
+ )
self.RunAndVerify('DISABLED_*', [])
- self.RunAndVerifyAllowingDisabled('DISABLED_*', [
- 'DISABLED_FoobarTest.Test1',
- 'DISABLED_FoobarTest.DISABLED_Test2',
- 'DISABLED_FoobarbazTest.TestA',
- ])
+ self.RunAndVerifyAllowingDisabled(
+ 'DISABLED_*',
+ [
+ 'DISABLED_FoobarTest.Test1',
+ 'DISABLED_FoobarTest.DISABLED_Test2',
+ 'DISABLED_FoobarbazTest.TestA',
+ ],
+ )
def testWildcardInTestCaseName(self):
"""Tests using wildcard in the test case name."""
- self.RunAndVerify('*a*.*', [
- 'BarTest.TestOne',
- 'BarTest.TestTwo',
- 'BarTest.TestThree',
-
- 'BazTest.TestOne',
- 'BazTest.TestA',
- 'BazTest.TestB', ] + DEATH_TESTS + PARAM_TESTS)
+ self.RunAndVerify(
+ '*a*.*',
+ [
+ 'BarTest.TestOne',
+ 'BarTest.TestTwo',
+ 'BarTest.TestThree',
+ 'BazTest.TestOne',
+ 'BazTest.TestA',
+ 'BazTest.TestB',
+ ]
+ + DEATH_TESTS
+ + PARAM_TESTS,
+ )
def testWildcardInTestName(self):
"""Tests using wildcard in the test name."""
@@ -478,23 +508,27 @@ class GTestFilterUnitTest(gtest_test_utils.TestCase):
def testFilterWithoutDot(self):
"""Tests a filter that has no '.' in it."""
- self.RunAndVerify('*z*', [
- 'FooTest.Xyz',
-
- 'BazTest.TestOne',
- 'BazTest.TestA',
- 'BazTest.TestB',
- ])
+ self.RunAndVerify(
+ '*z*',
+ [
+ 'FooTest.Xyz',
+ 'BazTest.TestOne',
+ 'BazTest.TestA',
+ 'BazTest.TestB',
+ ],
+ )
def testTwoPatterns(self):
"""Tests filters that consist of two patterns."""
- self.RunAndVerify('Foo*.*:*A*', [
- 'FooTest.Abc',
- 'FooTest.Xyz',
-
- 'BazTest.TestA',
- ])
+ self.RunAndVerify(
+ 'Foo*.*:*A*',
+ [
+ 'FooTest.Abc',
+ 'FooTest.Xyz',
+ 'BazTest.TestA',
+ ],
+ )
# An empty pattern + a non-empty one
self.RunAndVerify(':*A*', ['FooTest.Abc', 'BazTest.TestA'])
@@ -502,83 +536,109 @@ class GTestFilterUnitTest(gtest_test_utils.TestCase):
def testThreePatterns(self):
"""Tests filters that consist of three patterns."""
- self.RunAndVerify('*oo*:*A*:*One', [
- 'FooTest.Abc',
- 'FooTest.Xyz',
-
- 'BarTest.TestOne',
-
- 'BazTest.TestOne',
- 'BazTest.TestA',
- ])
+ self.RunAndVerify(
+ '*oo*:*A*:*One',
+ [
+ 'FooTest.Abc',
+ 'FooTest.Xyz',
+ 'BarTest.TestOne',
+ 'BazTest.TestOne',
+ 'BazTest.TestA',
+ ],
+ )
# The 2nd pattern is empty.
- self.RunAndVerify('*oo*::*One', [
- 'FooTest.Abc',
- 'FooTest.Xyz',
-
- 'BarTest.TestOne',
-
- 'BazTest.TestOne',
- ])
+ self.RunAndVerify(
+ '*oo*::*One',
+ [
+ 'FooTest.Abc',
+ 'FooTest.Xyz',
+ 'BarTest.TestOne',
+ 'BazTest.TestOne',
+ ],
+ )
# The last 2 patterns are empty.
- self.RunAndVerify('*oo*::', [
- 'FooTest.Abc',
- 'FooTest.Xyz',
- ])
+ self.RunAndVerify(
+ '*oo*::',
+ [
+ 'FooTest.Abc',
+ 'FooTest.Xyz',
+ ],
+ )
def testNegativeFilters(self):
- self.RunAndVerify('*-BazTest.TestOne', [
- 'FooTest.Abc',
- 'FooTest.Xyz',
-
- 'BarTest.TestOne',
- 'BarTest.TestTwo',
- 'BarTest.TestThree',
-
- 'BazTest.TestA',
- 'BazTest.TestB',
- ] + DEATH_TESTS + PARAM_TESTS)
-
- self.RunAndVerify('*-FooTest.Abc:BazTest.*', [
- 'FooTest.Xyz',
-
- 'BarTest.TestOne',
- 'BarTest.TestTwo',
- 'BarTest.TestThree',
- ] + DEATH_TESTS + PARAM_TESTS)
-
- self.RunAndVerify('BarTest.*-BarTest.TestOne', [
- 'BarTest.TestTwo',
- 'BarTest.TestThree',
- ])
+ self.RunAndVerify(
+ '*-BazTest.TestOne',
+ [
+ 'FooTest.Abc',
+ 'FooTest.Xyz',
+ 'BarTest.TestOne',
+ 'BarTest.TestTwo',
+ 'BarTest.TestThree',
+ 'BazTest.TestA',
+ 'BazTest.TestB',
+ ]
+ + DEATH_TESTS
+ + PARAM_TESTS,
+ )
+
+ self.RunAndVerify(
+ '*-FooTest.Abc:BazTest.*',
+ [
+ 'FooTest.Xyz',
+ 'BarTest.TestOne',
+ 'BarTest.TestTwo',
+ 'BarTest.TestThree',
+ ]
+ + DEATH_TESTS
+ + PARAM_TESTS,
+ )
+
+ self.RunAndVerify(
+ 'BarTest.*-BarTest.TestOne',
+ [
+ 'BarTest.TestTwo',
+ 'BarTest.TestThree',
+ ],
+ )
# Tests without leading '*'.
- self.RunAndVerify('-FooTest.Abc:FooTest.Xyz:BazTest.*', [
- 'BarTest.TestOne',
- 'BarTest.TestTwo',
- 'BarTest.TestThree',
- ] + DEATH_TESTS + PARAM_TESTS)
+ self.RunAndVerify(
+ '-FooTest.Abc:FooTest.Xyz:BazTest.*',
+ [
+ 'BarTest.TestOne',
+ 'BarTest.TestTwo',
+ 'BarTest.TestThree',
+ ]
+ + DEATH_TESTS
+ + PARAM_TESTS,
+ )
# Value parameterized tests.
self.RunAndVerify('*/*', PARAM_TESTS)
# Value parameterized tests filtering by the sequence name.
- self.RunAndVerify('SeqP/*', [
- 'SeqP/ParamTest.TestX/0',
- 'SeqP/ParamTest.TestX/1',
- 'SeqP/ParamTest.TestY/0',
- 'SeqP/ParamTest.TestY/1',
- ])
+ self.RunAndVerify(
+ 'SeqP/*',
+ [
+ 'SeqP/ParamTest.TestX/0',
+ 'SeqP/ParamTest.TestX/1',
+ 'SeqP/ParamTest.TestY/0',
+ 'SeqP/ParamTest.TestY/1',
+ ],
+ )
# Value parameterized tests filtering by the test name.
- self.RunAndVerify('*/0', [
- 'SeqP/ParamTest.TestX/0',
- 'SeqP/ParamTest.TestY/0',
- 'SeqQ/ParamTest.TestX/0',
- 'SeqQ/ParamTest.TestY/0',
- ])
+ self.RunAndVerify(
+ '*/0',
+ [
+ 'SeqP/ParamTest.TestX/0',
+ 'SeqP/ParamTest.TestY/0',
+ 'SeqQ/ParamTest.TestX/0',
+ 'SeqQ/ParamTest.TestY/0',
+ ],
+ )
def testFlagOverridesEnvVar(self):
"""Tests that the filter flag overrides the filtering env. variable."""
@@ -593,8 +653,9 @@ class GTestFilterUnitTest(gtest_test_utils.TestCase):
def testShardStatusFileIsCreated(self):
"""Tests that the shard file is created if specified in the environment."""
- shard_status_file = os.path.join(gtest_test_utils.GetTempDir(),
- 'shard_status_file')
+ shard_status_file = os.path.join(
+ gtest_test_utils.GetTempDir(), 'shard_status_file'
+ )
self.assertTrue(not os.path.exists(shard_status_file))
extra_env = {SHARD_STATUS_FILE_ENV_VAR: shard_status_file}
@@ -607,15 +668,16 @@ class GTestFilterUnitTest(gtest_test_utils.TestCase):
def testShardStatusFileIsCreatedWithListTests(self):
"""Tests that the shard file is created with the "list_tests" flag."""
- shard_status_file = os.path.join(gtest_test_utils.GetTempDir(),
- 'shard_status_file2')
+ shard_status_file = os.path.join(
+ gtest_test_utils.GetTempDir(), 'shard_status_file2'
+ )
self.assertTrue(not os.path.exists(shard_status_file))
extra_env = {SHARD_STATUS_FILE_ENV_VAR: shard_status_file}
try:
- output = InvokeWithModifiedEnv(extra_env,
- RunAndReturnOutput,
- [LIST_TESTS_FLAG])
+ output = InvokeWithModifiedEnv(
+ extra_env, RunAndReturnOutput, [LIST_TESTS_FLAG]
+ )
finally:
# This assertion ensures that Google Test enumerated the tests as
# opposed to running them.
@@ -636,19 +698,25 @@ class GTestFilterUnitTest(gtest_test_utils.TestCase):
make_filter = lambda s: ['--%s=%s' % (FILTER_FLAG, s)]
banners = RunAndExtractDisabledBannerList(make_filter('*'))
- self.AssertSetEqual(banners, [
- 'BarTest.DISABLED_TestFour', 'BarTest.DISABLED_TestFive',
- 'BazTest.DISABLED_TestC'
- ])
+ self.AssertSetEqual(
+ banners,
+ [
+ 'BarTest.DISABLED_TestFour',
+ 'BarTest.DISABLED_TestFive',
+ 'BazTest.DISABLED_TestC',
+ ],
+ )
banners = RunAndExtractDisabledBannerList(make_filter('Bar*'))
self.AssertSetEqual(
- banners, ['BarTest.DISABLED_TestFour', 'BarTest.DISABLED_TestFive'])
+ banners, ['BarTest.DISABLED_TestFour', 'BarTest.DISABLED_TestFive']
+ )
banners = RunAndExtractDisabledBannerList(make_filter('*-Bar*'))
self.AssertSetEqual(banners, ['BazTest.DISABLED_TestC'])
if SUPPORTS_DEATH_TESTS:
+
def testShardingWorksWithDeathTests(self):
"""Tests integration with death tests and sharding."""
@@ -656,19 +724,23 @@ class GTestFilterUnitTest(gtest_test_utils.TestCase):
expected_tests = [
'HasDeathTest.Test1',
'HasDeathTest.Test2',
-
'SeqP/ParamTest.TestX/0',
'SeqP/ParamTest.TestX/1',
'SeqP/ParamTest.TestY/0',
'SeqP/ParamTest.TestY/1',
- ]
-
- for flag in ['--gtest_death_test_style=threadsafe',
- '--gtest_death_test_style=fast']:
- self.RunAndVerifyWithSharding(gtest_filter, 3, expected_tests,
- check_exit_0=True, args=[flag])
- self.RunAndVerifyWithSharding(gtest_filter, 5, expected_tests,
- check_exit_0=True, args=[flag])
+ ]
+
+ for flag in [
+ '--gtest_death_test_style=threadsafe',
+ '--gtest_death_test_style=fast',
+ ]:
+ self.RunAndVerifyWithSharding(
+ gtest_filter, 3, expected_tests, check_exit_0=True, args=[flag]
+ )
+ self.RunAndVerifyWithSharding(
+ gtest_filter, 5, expected_tests, check_exit_0=True, args=[flag]
+ )
+
if __name__ == '__main__':
gtest_test_utils.Main()
diff --git a/googletest/test/googletest-global-environment-unittest.py b/googletest/test/googletest-global-environment-unittest.py
index 2657934..bd73a2e 100644
--- a/googletest/test/googletest-global-environment-unittest.py
+++ b/googletest/test/googletest-global-environment-unittest.py
@@ -42,10 +42,14 @@ from googletest.test import gtest_test_utils
def RunAndReturnOutput(args=None):
"""Runs the test program and returns its output."""
- return gtest_test_utils.Subprocess([
- gtest_test_utils.GetTestExecutablePath(
- 'googletest-global-environment-unittest_')
- ] + (args or [])).output
+ return gtest_test_utils.Subprocess(
+ [
+ gtest_test_utils.GetTestExecutablePath(
+ 'googletest-global-environment-unittest_'
+ )
+ ]
+ + (args or [])
+ ).output
class GTestGlobalEnvironmentUnitTest(gtest_test_utils.TestCase):
@@ -78,23 +82,25 @@ class GTestGlobalEnvironmentUnitTest(gtest_test_utils.TestCase):
'--gtest_recreate_environments_when_repeating=true',
])
- expected_pattern = ('(.|\n)*'
- r'Repeating all tests \(iteration 1\)'
- '(.|\n)*'
- 'Global test environment set-up.'
- '(.|\n)*'
- 'SomeTest.DoesFoo'
- '(.|\n)*'
- 'Global test environment tear-down'
- '(.|\n)*'
- r'Repeating all tests \(iteration 2\)'
- '(.|\n)*'
- 'Global test environment set-up.'
- '(.|\n)*'
- 'SomeTest.DoesFoo'
- '(.|\n)*'
- 'Global test environment tear-down'
- '(.|\n)*')
+ expected_pattern = (
+ '(.|\n)*'
+ r'Repeating all tests \(iteration 1\)'
+ '(.|\n)*'
+ 'Global test environment set-up.'
+ '(.|\n)*'
+ 'SomeTest.DoesFoo'
+ '(.|\n)*'
+ 'Global test environment tear-down'
+ '(.|\n)*'
+ r'Repeating all tests \(iteration 2\)'
+ '(.|\n)*'
+ 'Global test environment set-up.'
+ '(.|\n)*'
+ 'SomeTest.DoesFoo'
+ '(.|\n)*'
+ 'Global test environment tear-down'
+ '(.|\n)*'
+ )
self.assertRegex(txt, expected_pattern)
def testEnvironmentSetUpAndTornDownOnce(self):
@@ -102,28 +108,33 @@ class GTestGlobalEnvironmentUnitTest(gtest_test_utils.TestCase):
# By default the environment should only be set up and torn down once, at
# the start and end of the test respectively.
- txt = RunAndReturnOutput([
- '--gtest_repeat=2',
- ])
-
- expected_pattern = ('(.|\n)*'
- r'Repeating all tests \(iteration 1\)'
- '(.|\n)*'
- 'Global test environment set-up.'
- '(.|\n)*'
- 'SomeTest.DoesFoo'
- '(.|\n)*'
- r'Repeating all tests \(iteration 2\)'
- '(.|\n)*'
- 'SomeTest.DoesFoo'
- '(.|\n)*'
- 'Global test environment tear-down'
- '(.|\n)*')
+ txt = RunAndReturnOutput(
+ [
+ '--gtest_repeat=2',
+ ]
+ )
+
+ expected_pattern = (
+ '(.|\n)*'
+ r'Repeating all tests \(iteration 1\)'
+ '(.|\n)*'
+ 'Global test environment set-up.'
+ '(.|\n)*'
+ 'SomeTest.DoesFoo'
+ '(.|\n)*'
+ r'Repeating all tests \(iteration 2\)'
+ '(.|\n)*'
+ 'SomeTest.DoesFoo'
+ '(.|\n)*'
+ 'Global test environment tear-down'
+ '(.|\n)*'
+ )
self.assertRegex(txt, expected_pattern)
self.assertEqual(len(re.findall('Global test environment set-up', txt)), 1)
self.assertEqual(
- len(re.findall('Global test environment tear-down', txt)), 1)
+ len(re.findall('Global test environment tear-down', txt)), 1
+ )
if __name__ == '__main__':
diff --git a/googletest/test/googletest-json-outfiles-test.py b/googletest/test/googletest-json-outfiles-test.py
index 3abf355..bc17fe2 100644
--- a/googletest/test/googletest-json-outfiles-test.py
+++ b/googletest/test/googletest-json-outfiles-test.py
@@ -40,93 +40,65 @@ GTEST_OUTPUT_1_TEST = 'gtest_xml_outfile1_test_'
GTEST_OUTPUT_2_TEST = 'gtest_xml_outfile2_test_'
EXPECTED_1 = {
- u'tests':
- 1,
- u'failures':
- 0,
- u'disabled':
- 0,
- u'errors':
- 0,
- u'time':
- u'*',
- u'timestamp':
- u'*',
- u'name':
- u'AllTests',
- u'testsuites': [{
- u'name':
- u'PropertyOne',
- u'tests':
- 1,
- u'failures':
- 0,
- u'disabled':
- 0,
- u'errors':
- 0,
- u'time':
- u'*',
- u'timestamp':
- u'*',
- u'testsuite': [{
- u'name': u'TestSomeProperties',
- u'file': u'gtest_xml_outfile1_test_.cc',
- u'line': 41,
- u'status': u'RUN',
- u'result': u'COMPLETED',
- u'time': u'*',
- u'timestamp': u'*',
- u'classname': u'PropertyOne',
- u'SetUpProp': u'1',
- u'TestSomeProperty': u'1',
- u'TearDownProp': u'1',
+ 'tests': 1,
+ 'failures': 0,
+ 'disabled': 0,
+ 'errors': 0,
+ 'time': '*',
+ 'timestamp': '*',
+ 'name': 'AllTests',
+ 'testsuites': [{
+ 'name': 'PropertyOne',
+ 'tests': 1,
+ 'failures': 0,
+ 'disabled': 0,
+ 'errors': 0,
+ 'time': '*',
+ 'timestamp': '*',
+ 'testsuite': [{
+ 'name': 'TestSomeProperties',
+ 'file': 'gtest_xml_outfile1_test_.cc',
+ 'line': 41,
+ 'status': 'RUN',
+ 'result': 'COMPLETED',
+ 'time': '*',
+ 'timestamp': '*',
+ 'classname': 'PropertyOne',
+ 'SetUpProp': '1',
+ 'TestSomeProperty': '1',
+ 'TearDownProp': '1',
}],
}],
}
EXPECTED_2 = {
- u'tests':
- 1,
- u'failures':
- 0,
- u'disabled':
- 0,
- u'errors':
- 0,
- u'time':
- u'*',
- u'timestamp':
- u'*',
- u'name':
- u'AllTests',
- u'testsuites': [{
- u'name':
- u'PropertyTwo',
- u'tests':
- 1,
- u'failures':
- 0,
- u'disabled':
- 0,
- u'errors':
- 0,
- u'time':
- u'*',
- u'timestamp':
- u'*',
- u'testsuite': [{
- u'name': u'TestSomeProperties',
- u'file': u'gtest_xml_outfile2_test_.cc',
- u'line': 41,
- u'status': u'RUN',
- u'result': u'COMPLETED',
- u'timestamp': u'*',
- u'time': u'*',
- u'classname': u'PropertyTwo',
- u'SetUpProp': u'2',
- u'TestSomeProperty': u'2',
- u'TearDownProp': u'2',
+ 'tests': 1,
+ 'failures': 0,
+ 'disabled': 0,
+ 'errors': 0,
+ 'time': '*',
+ 'timestamp': '*',
+ 'name': 'AllTests',
+ 'testsuites': [{
+ 'name': 'PropertyTwo',
+ 'tests': 1,
+ 'failures': 0,
+ 'disabled': 0,
+ 'errors': 0,
+ 'time': '*',
+ 'timestamp': '*',
+ 'testsuite': [{
+ 'name': 'TestSomeProperties',
+ 'file': 'gtest_xml_outfile2_test_.cc',
+ 'line': 41,
+ 'status': 'RUN',
+ 'result': 'COMPLETED',
+ 'timestamp': '*',
+ 'time': '*',
+ 'classname': 'PropertyTwo',
+ 'SetUpProp': '2',
+ 'TestSomeProperty': '2',
+ 'TearDownProp': '2',
}],
}],
}
@@ -139,8 +111,9 @@ class GTestJsonOutFilesTest(gtest_test_utils.TestCase):
# We want the trailing '/' that the last "" provides in os.path.join, for
# telling Google Test to create an output directory instead of a single file
# for xml output.
- self.output_dir_ = os.path.join(gtest_test_utils.GetTempDir(),
- GTEST_OUTPUT_SUBDIR, '')
+ self.output_dir_ = os.path.join(
+ gtest_test_utils.GetTempDir(), GTEST_OUTPUT_SUBDIR, ''
+ )
self.DeleteFilesAndDir()
def tearDown(self):
@@ -169,8 +142,9 @@ class GTestJsonOutFilesTest(gtest_test_utils.TestCase):
def _TestOutFile(self, test_name, expected):
gtest_prog_path = gtest_test_utils.GetTestExecutablePath(test_name)
command = [gtest_prog_path, '--gtest_output=json:%s' % self.output_dir_]
- p = gtest_test_utils.Subprocess(command,
- working_dir=gtest_test_utils.GetTempDir())
+ p = gtest_test_utils.Subprocess(
+ command, working_dir=gtest_test_utils.GetTempDir()
+ )
self.assertTrue(p.exited)
self.assertEqual(0, p.exit_code)
diff --git a/googletest/test/googletest-json-output-unittest.py b/googletest/test/googletest-json-output-unittest.py
index c8915b2..b3a08de 100644
--- a/googletest/test/googletest-json-output-unittest.py
+++ b/googletest/test/googletest-json-output-unittest.py
@@ -57,689 +57,570 @@ else:
STACK_TRACE_TEMPLATE = ''
EXPECTED_NON_EMPTY = {
- u'tests':
- 26,
- u'failures':
- 5,
- u'disabled':
- 2,
- u'errors':
- 0,
- u'timestamp':
- u'*',
- u'time':
- u'*',
- u'ad_hoc_property':
- u'42',
- u'name':
- u'AllTests',
- u'testsuites': [{
- u'name':
- u'SuccessfulTest',
- u'tests':
- 1,
- u'failures':
- 0,
- u'disabled':
- 0,
- u'errors':
- 0,
- u'time':
- u'*',
- u'timestamp':
- u'*',
- u'testsuite': [{
- u'name': u'Succeeds',
- u'file': u'gtest_xml_output_unittest_.cc',
- u'line': 51,
- u'status': u'RUN',
- u'result': u'COMPLETED',
- u'time': u'*',
- u'timestamp': u'*',
- u'classname': u'SuccessfulTest'
- }]
- }, {
- u'name':
- u'FailedTest',
- u'tests':
- 1,
- u'failures':
- 1,
- u'disabled':
- 0,
- u'errors':
- 0,
- u'time':
- u'*',
- u'timestamp':
- u'*',
- u'testsuite': [{
- u'name':
- u'Fails',
- u'file':
- u'gtest_xml_output_unittest_.cc',
- u'line':
- 59,
- u'status':
- u'RUN',
- u'result':
- u'COMPLETED',
- u'time':
- u'*',
- u'timestamp':
- u'*',
- u'classname':
- u'FailedTest',
- u'failures': [{
- u'failure': u'gtest_xml_output_unittest_.cc:*\n'
- u'Expected equality of these values:\n'
- u' 1\n 2' + STACK_TRACE_TEMPLATE,
- u'type': u''
- }]
- }]
- }, {
- u'name':
- u'DisabledTest',
- u'tests':
- 1,
- u'failures':
- 0,
- u'disabled':
- 1,
- u'errors':
- 0,
- u'time':
- u'*',
- u'timestamp':
- u'*',
- u'testsuite': [{
- u'name': u'DISABLED_test_not_run',
- u'file': u'gtest_xml_output_unittest_.cc',
- u'line': 66,
- u'status': u'NOTRUN',
- u'result': u'SUPPRESSED',
- u'time': u'*',
- u'timestamp': u'*',
- u'classname': u'DisabledTest'
- }]
- }, {
- u'name':
- u'SkippedTest',
- u'tests':
- 3,
- u'failures':
- 1,
- u'disabled':
- 0,
- u'errors':
- 0,
- u'time':
- u'*',
- u'timestamp':
- u'*',
- u'testsuite': [{
- u'name': u'Skipped',
- u'file': 'gtest_xml_output_unittest_.cc',
- u'line': 73,
- u'status': u'RUN',
- u'result': u'SKIPPED',
- u'time': u'*',
- u'timestamp': u'*',
- u'classname': u'SkippedTest'
- }, {
- u'name': u'SkippedWithMessage',
- u'file': 'gtest_xml_output_unittest_.cc',
- u'line': 77,
- u'status': u'RUN',
- u'result': u'SKIPPED',
- u'time': u'*',
- u'timestamp': u'*',
- u'classname': u'SkippedTest'
- }, {
- u'name':
- u'SkippedAfterFailure',
- u'file':
- 'gtest_xml_output_unittest_.cc',
- u'line':
- 81,
- u'status':
- u'RUN',
- u'result':
- u'COMPLETED',
- u'time':
- u'*',
- u'timestamp':
- u'*',
- u'classname':
- u'SkippedTest',
- u'failures': [{
- u'failure': u'gtest_xml_output_unittest_.cc:*\n'
- u'Expected equality of these values:\n'
- u' 1\n 2' + STACK_TRACE_TEMPLATE,
- u'type': u''
- }]
- }]
- }, {
- u'name':
- u'MixedResultTest',
- u'tests':
- 3,
- u'failures':
- 1,
- u'disabled':
- 1,
- u'errors':
- 0,
- u'time':
- u'*',
- u'timestamp':
- u'*',
- u'testsuite': [{
- u'name': u'Succeeds',
- u'file': 'gtest_xml_output_unittest_.cc',
- u'line': 86,
- u'status': u'RUN',
- u'result': u'COMPLETED',
- u'time': u'*',
- u'timestamp': u'*',
- u'classname': u'MixedResultTest'
- }, {
- u'name':
- u'Fails',
- u'file':
- u'gtest_xml_output_unittest_.cc',
- u'line':
- 91,
- u'status':
- u'RUN',
- u'result':
- u'COMPLETED',
- u'time':
- u'*',
- u'timestamp':
- u'*',
- u'classname':
- u'MixedResultTest',
- u'failures': [{
- u'failure': u'gtest_xml_output_unittest_.cc:*\n'
- u'Expected equality of these values:\n'
- u' 1\n 2' + STACK_TRACE_TEMPLATE,
- u'type': u''
- }, {
- u'failure': u'gtest_xml_output_unittest_.cc:*\n'
- u'Expected equality of these values:\n'
- u' 2\n 3' + STACK_TRACE_TEMPLATE,
- u'type': u''
- }]
- }, {
- u'name': u'DISABLED_test',
- u'file': u'gtest_xml_output_unittest_.cc',
- u'line': 96,
- u'status': u'NOTRUN',
- u'result': u'SUPPRESSED',
- u'time': u'*',
- u'timestamp': u'*',
- u'classname': u'MixedResultTest'
- }]
- }, {
- u'name':
- u'XmlQuotingTest',
- u'tests':
- 1,
- u'failures':
- 1,
- u'disabled':
- 0,
- u'errors':
- 0,
- u'time':
- u'*',
- u'timestamp':
- u'*',
- u'testsuite': [{
- u'name':
- u'OutputsCData',
- u'file':
- u'gtest_xml_output_unittest_.cc',
- u'line':
- 100,
- u'status':
- u'RUN',
- u'result':
- u'COMPLETED',
- u'time':
- u'*',
- u'timestamp':
- u'*',
- u'classname':
- u'XmlQuotingTest',
- u'failures': [{
- u'failure': u'gtest_xml_output_unittest_.cc:*\n'
- u'Failed\nXML output: <?xml encoding="utf-8">'
- u'<top><![CDATA[cdata text]]></top>' +
- STACK_TRACE_TEMPLATE,
- u'type': u''
- }]
- }]
- }, {
- u'name':
- u'InvalidCharactersTest',
- u'tests':
- 1,
- u'failures':
- 1,
- u'disabled':
- 0,
- u'errors':
- 0,
- u'time':
- u'*',
- u'timestamp':
- u'*',
- u'testsuite': [{
- u'name':
- u'InvalidCharactersInMessage',
- u'file':
- u'gtest_xml_output_unittest_.cc',
- u'line':
- 107,
- u'status':
- u'RUN',
- u'result':
- u'COMPLETED',
- u'time':
- u'*',
- u'timestamp':
- u'*',
- u'classname':
- u'InvalidCharactersTest',
- u'failures': [{
- u'failure': u'gtest_xml_output_unittest_.cc:*\n'
- u'Failed\nInvalid characters in brackets'
- u' [\x01\x02]' + STACK_TRACE_TEMPLATE,
- u'type': u''
- }]
- }]
- }, {
- u'name':
- u'PropertyRecordingTest',
- u'tests':
- 4,
- u'failures':
- 0,
- u'disabled':
- 0,
- u'errors':
- 0,
- u'time':
- u'*',
- u'timestamp':
- u'*',
- u'SetUpTestSuite':
- u'yes',
- u'TearDownTestSuite':
- u'aye',
- u'testsuite': [{
- u'name': u'OneProperty',
- u'file': u'gtest_xml_output_unittest_.cc',
- u'line': 119,
- u'status': u'RUN',
- u'result': u'COMPLETED',
- u'time': u'*',
- u'timestamp': u'*',
- u'classname': u'PropertyRecordingTest',
- u'key_1': u'1'
- }, {
- u'name': u'IntValuedProperty',
- u'file': u'gtest_xml_output_unittest_.cc',
- u'line': 123,
- u'status': u'RUN',
- u'result': u'COMPLETED',
- u'time': u'*',
- u'timestamp': u'*',
- u'classname': u'PropertyRecordingTest',
- u'key_int': u'1'
- }, {
- u'name': u'ThreeProperties',
- u'file': u'gtest_xml_output_unittest_.cc',
- u'line': 127,
- u'status': u'RUN',
- u'result': u'COMPLETED',
- u'time': u'*',
- u'timestamp': u'*',
- u'classname': u'PropertyRecordingTest',
- u'key_1': u'1',
- u'key_2': u'2',
- u'key_3': u'3'
- }, {
- u'name': u'TwoValuesForOneKeyUsesLastValue',
- u'file': u'gtest_xml_output_unittest_.cc',
- u'line': 133,
- u'status': u'RUN',
- u'result': u'COMPLETED',
- u'time': u'*',
- u'timestamp': u'*',
- u'classname': u'PropertyRecordingTest',
- u'key_1': u'2'
- }]
- }, {
- u'name':
- u'NoFixtureTest',
- u'tests':
- 3,
- u'failures':
- 0,
- u'disabled':
- 0,
- u'errors':
- 0,
- u'time':
- u'*',
- u'timestamp':
- u'*',
- u'testsuite': [{
- u'name': u'RecordProperty',
- u'file': u'gtest_xml_output_unittest_.cc',
- u'line': 138,
- u'status': u'RUN',
- u'result': u'COMPLETED',
- u'time': u'*',
- u'timestamp': u'*',
- u'classname': u'NoFixtureTest',
- u'key': u'1'
- }, {
- u'name': u'ExternalUtilityThatCallsRecordIntValuedProperty',
- u'file': u'gtest_xml_output_unittest_.cc',
- u'line': 151,
- u'status': u'RUN',
- u'result': u'COMPLETED',
- u'time': u'*',
- u'timestamp': u'*',
- u'classname': u'NoFixtureTest',
- u'key_for_utility_int': u'1'
- }, {
- u'name': u'ExternalUtilityThatCallsRecordStringValuedProperty',
- u'file': u'gtest_xml_output_unittest_.cc',
- u'line': 155,
- u'status': u'RUN',
- u'result': u'COMPLETED',
- u'time': u'*',
- u'timestamp': u'*',
- u'classname': u'NoFixtureTest',
- u'key_for_utility_string': u'1'
- }]
- }, {
- u'name':
- u'TypedTest/0',
- u'tests':
- 1,
- u'failures':
- 0,
- u'disabled':
- 0,
- u'errors':
- 0,
- u'time':
- u'*',
- u'timestamp':
- u'*',
- u'testsuite': [{
- u'name': u'HasTypeParamAttribute',
- u'type_param': u'int',
- u'file': u'gtest_xml_output_unittest_.cc',
- u'line': 171,
- u'status': u'RUN',
- u'result': u'COMPLETED',
- u'time': u'*',
- u'timestamp': u'*',
- u'classname': u'TypedTest/0'
- }]
- }, {
- u'name':
- u'TypedTest/1',
- u'tests':
- 1,
- u'failures':
- 0,
- u'disabled':
- 0,
- u'errors':
- 0,
- u'time':
- u'*',
- u'timestamp':
- u'*',
- u'testsuite': [{
- u'name': u'HasTypeParamAttribute',
- u'type_param': u'long',
- u'file': u'gtest_xml_output_unittest_.cc',
- u'line': 171,
- u'status': u'RUN',
- u'result': u'COMPLETED',
- u'time': u'*',
- u'timestamp': u'*',
- u'classname': u'TypedTest/1'
- }]
- }, {
- u'name':
- u'Single/TypeParameterizedTestSuite/0',
- u'tests':
- 1,
- u'failures':
- 0,
- u'disabled':
- 0,
- u'errors':
- 0,
- u'time':
- u'*',
- u'timestamp':
- u'*',
- u'testsuite': [{
- u'name': u'HasTypeParamAttribute',
- u'type_param': u'int',
- u'file': u'gtest_xml_output_unittest_.cc',
- u'line': 178,
- u'status': u'RUN',
- u'result': u'COMPLETED',
- u'time': u'*',
- u'timestamp': u'*',
- u'classname': u'Single/TypeParameterizedTestSuite/0'
- }]
- }, {
- u'name':
- u'Single/TypeParameterizedTestSuite/1',
- u'tests':
- 1,
- u'failures':
- 0,
- u'disabled':
- 0,
- u'errors':
- 0,
- u'time':
- u'*',
- u'timestamp':
- u'*',
- u'testsuite': [{
- u'name': u'HasTypeParamAttribute',
- u'type_param': u'long',
- u'file': u'gtest_xml_output_unittest_.cc',
- u'line': 178,
- u'status': u'RUN',
- u'result': u'COMPLETED',
- u'time': u'*',
- u'timestamp': u'*',
- u'classname': u'Single/TypeParameterizedTestSuite/1'
- }]
- }, {
- u'name':
- u'Single/ValueParamTest',
- u'tests':
- 4,
- u'failures':
- 0,
- u'disabled':
- 0,
- u'errors':
- 0,
- u'time':
- u'*',
- u'timestamp':
- u'*',
- u'testsuite': [{
- u'name': u'HasValueParamAttribute/0',
- u'value_param': u'33',
- u'file': u'gtest_xml_output_unittest_.cc',
- u'line': 162,
- u'status': u'RUN',
- u'result': u'COMPLETED',
- u'time': u'*',
- u'timestamp': u'*',
- u'classname': u'Single/ValueParamTest'
- }, {
- u'name': u'HasValueParamAttribute/1',
- u'value_param': u'42',
- u'file': u'gtest_xml_output_unittest_.cc',
- u'line': 162,
- u'status': u'RUN',
- u'result': u'COMPLETED',
- u'time': u'*',
- u'timestamp': u'*',
- u'classname': u'Single/ValueParamTest'
- }, {
- u'name': u'AnotherTestThatHasValueParamAttribute/0',
- u'value_param': u'33',
- u'file': u'gtest_xml_output_unittest_.cc',
- u'line': 163,
- u'status': u'RUN',
- u'result': u'COMPLETED',
- u'time': u'*',
- u'timestamp': u'*',
- u'classname': u'Single/ValueParamTest'
- }, {
- u'name': u'AnotherTestThatHasValueParamAttribute/1',
- u'value_param': u'42',
- u'file': u'gtest_xml_output_unittest_.cc',
- u'line': 163,
- u'status': u'RUN',
- u'result': u'COMPLETED',
- u'time': u'*',
- u'timestamp': u'*',
- u'classname': u'Single/ValueParamTest'
- }]
- }]
+ 'tests': 26,
+ 'failures': 5,
+ 'disabled': 2,
+ 'errors': 0,
+ 'timestamp': '*',
+ 'time': '*',
+ 'ad_hoc_property': '42',
+ 'name': 'AllTests',
+ 'testsuites': [
+ {
+ 'name': 'SuccessfulTest',
+ 'tests': 1,
+ 'failures': 0,
+ 'disabled': 0,
+ 'errors': 0,
+ 'time': '*',
+ 'timestamp': '*',
+ 'testsuite': [{
+ 'name': 'Succeeds',
+ 'file': 'gtest_xml_output_unittest_.cc',
+ 'line': 51,
+ 'status': 'RUN',
+ 'result': 'COMPLETED',
+ 'time': '*',
+ 'timestamp': '*',
+ 'classname': 'SuccessfulTest',
+ }],
+ },
+ {
+ 'name': 'FailedTest',
+ 'tests': 1,
+ 'failures': 1,
+ 'disabled': 0,
+ 'errors': 0,
+ 'time': '*',
+ 'timestamp': '*',
+ 'testsuite': [{
+ 'name': 'Fails',
+ 'file': 'gtest_xml_output_unittest_.cc',
+ 'line': 59,
+ 'status': 'RUN',
+ 'result': 'COMPLETED',
+ 'time': '*',
+ 'timestamp': '*',
+ 'classname': 'FailedTest',
+ 'failures': [{
+ 'failure': (
+ 'gtest_xml_output_unittest_.cc:*\n'
+ 'Expected equality of these values:\n'
+ ' 1\n 2'
+ + STACK_TRACE_TEMPLATE
+ ),
+ 'type': '',
+ }],
+ }],
+ },
+ {
+ 'name': 'DisabledTest',
+ 'tests': 1,
+ 'failures': 0,
+ 'disabled': 1,
+ 'errors': 0,
+ 'time': '*',
+ 'timestamp': '*',
+ 'testsuite': [{
+ 'name': 'DISABLED_test_not_run',
+ 'file': 'gtest_xml_output_unittest_.cc',
+ 'line': 66,
+ 'status': 'NOTRUN',
+ 'result': 'SUPPRESSED',
+ 'time': '*',
+ 'timestamp': '*',
+ 'classname': 'DisabledTest',
+ }],
+ },
+ {
+ 'name': 'SkippedTest',
+ 'tests': 3,
+ 'failures': 1,
+ 'disabled': 0,
+ 'errors': 0,
+ 'time': '*',
+ 'timestamp': '*',
+ 'testsuite': [
+ {
+ 'name': 'Skipped',
+ 'file': 'gtest_xml_output_unittest_.cc',
+ 'line': 73,
+ 'status': 'RUN',
+ 'result': 'SKIPPED',
+ 'time': '*',
+ 'timestamp': '*',
+ 'classname': 'SkippedTest',
+ },
+ {
+ 'name': 'SkippedWithMessage',
+ 'file': 'gtest_xml_output_unittest_.cc',
+ 'line': 77,
+ 'status': 'RUN',
+ 'result': 'SKIPPED',
+ 'time': '*',
+ 'timestamp': '*',
+ 'classname': 'SkippedTest',
+ },
+ {
+ 'name': 'SkippedAfterFailure',
+ 'file': 'gtest_xml_output_unittest_.cc',
+ 'line': 81,
+ 'status': 'RUN',
+ 'result': 'COMPLETED',
+ 'time': '*',
+ 'timestamp': '*',
+ 'classname': 'SkippedTest',
+ 'failures': [{
+ 'failure': (
+ 'gtest_xml_output_unittest_.cc:*\n'
+ 'Expected equality of these values:\n'
+ ' 1\n 2'
+ + STACK_TRACE_TEMPLATE
+ ),
+ 'type': '',
+ }],
+ },
+ ],
+ },
+ {
+ 'name': 'MixedResultTest',
+ 'tests': 3,
+ 'failures': 1,
+ 'disabled': 1,
+ 'errors': 0,
+ 'time': '*',
+ 'timestamp': '*',
+ 'testsuite': [
+ {
+ 'name': 'Succeeds',
+ 'file': 'gtest_xml_output_unittest_.cc',
+ 'line': 86,
+ 'status': 'RUN',
+ 'result': 'COMPLETED',
+ 'time': '*',
+ 'timestamp': '*',
+ 'classname': 'MixedResultTest',
+ },
+ {
+ 'name': 'Fails',
+ 'file': 'gtest_xml_output_unittest_.cc',
+ 'line': 91,
+ 'status': 'RUN',
+ 'result': 'COMPLETED',
+ 'time': '*',
+ 'timestamp': '*',
+ 'classname': 'MixedResultTest',
+ 'failures': [
+ {
+ 'failure': (
+ 'gtest_xml_output_unittest_.cc:*\n'
+ 'Expected equality of these values:\n'
+ ' 1\n 2'
+ + STACK_TRACE_TEMPLATE
+ ),
+ 'type': '',
+ },
+ {
+ 'failure': (
+ 'gtest_xml_output_unittest_.cc:*\n'
+ 'Expected equality of these values:\n'
+ ' 2\n 3'
+ + STACK_TRACE_TEMPLATE
+ ),
+ 'type': '',
+ },
+ ],
+ },
+ {
+ 'name': 'DISABLED_test',
+ 'file': 'gtest_xml_output_unittest_.cc',
+ 'line': 96,
+ 'status': 'NOTRUN',
+ 'result': 'SUPPRESSED',
+ 'time': '*',
+ 'timestamp': '*',
+ 'classname': 'MixedResultTest',
+ },
+ ],
+ },
+ {
+ 'name': 'XmlQuotingTest',
+ 'tests': 1,
+ 'failures': 1,
+ 'disabled': 0,
+ 'errors': 0,
+ 'time': '*',
+ 'timestamp': '*',
+ 'testsuite': [{
+ 'name': 'OutputsCData',
+ 'file': 'gtest_xml_output_unittest_.cc',
+ 'line': 100,
+ 'status': 'RUN',
+ 'result': 'COMPLETED',
+ 'time': '*',
+ 'timestamp': '*',
+ 'classname': 'XmlQuotingTest',
+ 'failures': [{
+ 'failure': (
+ 'gtest_xml_output_unittest_.cc:*\n'
+ 'Failed\nXML output: <?xml encoding="utf-8">'
+ '<top><![CDATA[cdata text]]></top>'
+ + STACK_TRACE_TEMPLATE
+ ),
+ 'type': '',
+ }],
+ }],
+ },
+ {
+ 'name': 'InvalidCharactersTest',
+ 'tests': 1,
+ 'failures': 1,
+ 'disabled': 0,
+ 'errors': 0,
+ 'time': '*',
+ 'timestamp': '*',
+ 'testsuite': [{
+ 'name': 'InvalidCharactersInMessage',
+ 'file': 'gtest_xml_output_unittest_.cc',
+ 'line': 107,
+ 'status': 'RUN',
+ 'result': 'COMPLETED',
+ 'time': '*',
+ 'timestamp': '*',
+ 'classname': 'InvalidCharactersTest',
+ 'failures': [{
+ 'failure': (
+ 'gtest_xml_output_unittest_.cc:*\n'
+ 'Failed\nInvalid characters in brackets'
+ ' [\x01\x02]'
+ + STACK_TRACE_TEMPLATE
+ ),
+ 'type': '',
+ }],
+ }],
+ },
+ {
+ 'name': 'PropertyRecordingTest',
+ 'tests': 4,
+ 'failures': 0,
+ 'disabled': 0,
+ 'errors': 0,
+ 'time': '*',
+ 'timestamp': '*',
+ 'SetUpTestSuite': 'yes',
+ 'TearDownTestSuite': 'aye',
+ 'testsuite': [
+ {
+ 'name': 'OneProperty',
+ 'file': 'gtest_xml_output_unittest_.cc',
+ 'line': 119,
+ 'status': 'RUN',
+ 'result': 'COMPLETED',
+ 'time': '*',
+ 'timestamp': '*',
+ 'classname': 'PropertyRecordingTest',
+ 'key_1': '1',
+ },
+ {
+ 'name': 'IntValuedProperty',
+ 'file': 'gtest_xml_output_unittest_.cc',
+ 'line': 123,
+ 'status': 'RUN',
+ 'result': 'COMPLETED',
+ 'time': '*',
+ 'timestamp': '*',
+ 'classname': 'PropertyRecordingTest',
+ 'key_int': '1',
+ },
+ {
+ 'name': 'ThreeProperties',
+ 'file': 'gtest_xml_output_unittest_.cc',
+ 'line': 127,
+ 'status': 'RUN',
+ 'result': 'COMPLETED',
+ 'time': '*',
+ 'timestamp': '*',
+ 'classname': 'PropertyRecordingTest',
+ 'key_1': '1',
+ 'key_2': '2',
+ 'key_3': '3',
+ },
+ {
+ 'name': 'TwoValuesForOneKeyUsesLastValue',
+ 'file': 'gtest_xml_output_unittest_.cc',
+ 'line': 133,
+ 'status': 'RUN',
+ 'result': 'COMPLETED',
+ 'time': '*',
+ 'timestamp': '*',
+ 'classname': 'PropertyRecordingTest',
+ 'key_1': '2',
+ },
+ ],
+ },
+ {
+ 'name': 'NoFixtureTest',
+ 'tests': 3,
+ 'failures': 0,
+ 'disabled': 0,
+ 'errors': 0,
+ 'time': '*',
+ 'timestamp': '*',
+ 'testsuite': [
+ {
+ 'name': 'RecordProperty',
+ 'file': 'gtest_xml_output_unittest_.cc',
+ 'line': 138,
+ 'status': 'RUN',
+ 'result': 'COMPLETED',
+ 'time': '*',
+ 'timestamp': '*',
+ 'classname': 'NoFixtureTest',
+ 'key': '1',
+ },
+ {
+ 'name': 'ExternalUtilityThatCallsRecordIntValuedProperty',
+ 'file': 'gtest_xml_output_unittest_.cc',
+ 'line': 151,
+ 'status': 'RUN',
+ 'result': 'COMPLETED',
+ 'time': '*',
+ 'timestamp': '*',
+ 'classname': 'NoFixtureTest',
+ 'key_for_utility_int': '1',
+ },
+ {
+ 'name': (
+ 'ExternalUtilityThatCallsRecordStringValuedProperty'
+ ),
+ 'file': 'gtest_xml_output_unittest_.cc',
+ 'line': 155,
+ 'status': 'RUN',
+ 'result': 'COMPLETED',
+ 'time': '*',
+ 'timestamp': '*',
+ 'classname': 'NoFixtureTest',
+ 'key_for_utility_string': '1',
+ },
+ ],
+ },
+ {
+ 'name': 'TypedTest/0',
+ 'tests': 1,
+ 'failures': 0,
+ 'disabled': 0,
+ 'errors': 0,
+ 'time': '*',
+ 'timestamp': '*',
+ 'testsuite': [{
+ 'name': 'HasTypeParamAttribute',
+ 'type_param': 'int',
+ 'file': 'gtest_xml_output_unittest_.cc',
+ 'line': 171,
+ 'status': 'RUN',
+ 'result': 'COMPLETED',
+ 'time': '*',
+ 'timestamp': '*',
+ 'classname': 'TypedTest/0',
+ }],
+ },
+ {
+ 'name': 'TypedTest/1',
+ 'tests': 1,
+ 'failures': 0,
+ 'disabled': 0,
+ 'errors': 0,
+ 'time': '*',
+ 'timestamp': '*',
+ 'testsuite': [{
+ 'name': 'HasTypeParamAttribute',
+ 'type_param': 'long',
+ 'file': 'gtest_xml_output_unittest_.cc',
+ 'line': 171,
+ 'status': 'RUN',
+ 'result': 'COMPLETED',
+ 'time': '*',
+ 'timestamp': '*',
+ 'classname': 'TypedTest/1',
+ }],
+ },
+ {
+ 'name': 'Single/TypeParameterizedTestSuite/0',
+ 'tests': 1,
+ 'failures': 0,
+ 'disabled': 0,
+ 'errors': 0,
+ 'time': '*',
+ 'timestamp': '*',
+ 'testsuite': [{
+ 'name': 'HasTypeParamAttribute',
+ 'type_param': 'int',
+ 'file': 'gtest_xml_output_unittest_.cc',
+ 'line': 178,
+ 'status': 'RUN',
+ 'result': 'COMPLETED',
+ 'time': '*',
+ 'timestamp': '*',
+ 'classname': 'Single/TypeParameterizedTestSuite/0',
+ }],
+ },
+ {
+ 'name': 'Single/TypeParameterizedTestSuite/1',
+ 'tests': 1,
+ 'failures': 0,
+ 'disabled': 0,
+ 'errors': 0,
+ 'time': '*',
+ 'timestamp': '*',
+ 'testsuite': [{
+ 'name': 'HasTypeParamAttribute',
+ 'type_param': 'long',
+ 'file': 'gtest_xml_output_unittest_.cc',
+ 'line': 178,
+ 'status': 'RUN',
+ 'result': 'COMPLETED',
+ 'time': '*',
+ 'timestamp': '*',
+ 'classname': 'Single/TypeParameterizedTestSuite/1',
+ }],
+ },
+ {
+ 'name': 'Single/ValueParamTest',
+ 'tests': 4,
+ 'failures': 0,
+ 'disabled': 0,
+ 'errors': 0,
+ 'time': '*',
+ 'timestamp': '*',
+ 'testsuite': [
+ {
+ 'name': 'HasValueParamAttribute/0',
+ 'value_param': '33',
+ 'file': 'gtest_xml_output_unittest_.cc',
+ 'line': 162,
+ 'status': 'RUN',
+ 'result': 'COMPLETED',
+ 'time': '*',
+ 'timestamp': '*',
+ 'classname': 'Single/ValueParamTest',
+ },
+ {
+ 'name': 'HasValueParamAttribute/1',
+ 'value_param': '42',
+ 'file': 'gtest_xml_output_unittest_.cc',
+ 'line': 162,
+ 'status': 'RUN',
+ 'result': 'COMPLETED',
+ 'time': '*',
+ 'timestamp': '*',
+ 'classname': 'Single/ValueParamTest',
+ },
+ {
+ 'name': 'AnotherTestThatHasValueParamAttribute/0',
+ 'value_param': '33',
+ 'file': 'gtest_xml_output_unittest_.cc',
+ 'line': 163,
+ 'status': 'RUN',
+ 'result': 'COMPLETED',
+ 'time': '*',
+ 'timestamp': '*',
+ 'classname': 'Single/ValueParamTest',
+ },
+ {
+ 'name': 'AnotherTestThatHasValueParamAttribute/1',
+ 'value_param': '42',
+ 'file': 'gtest_xml_output_unittest_.cc',
+ 'line': 163,
+ 'status': 'RUN',
+ 'result': 'COMPLETED',
+ 'time': '*',
+ 'timestamp': '*',
+ 'classname': 'Single/ValueParamTest',
+ },
+ ],
+ },
+ ],
}
EXPECTED_FILTERED = {
- u'tests':
- 1,
- u'failures':
- 0,
- u'disabled':
- 0,
- u'errors':
- 0,
- u'time':
- u'*',
- u'timestamp':
- u'*',
- u'name':
- u'AllTests',
- u'ad_hoc_property':
- u'42',
- u'testsuites': [{
- u'name':
- u'SuccessfulTest',
- u'tests':
- 1,
- u'failures':
- 0,
- u'disabled':
- 0,
- u'errors':
- 0,
- u'time':
- u'*',
- u'timestamp':
- u'*',
- u'testsuite': [{
- u'name': u'Succeeds',
- u'file': u'gtest_xml_output_unittest_.cc',
- u'line': 51,
- u'status': u'RUN',
- u'result': u'COMPLETED',
- u'time': u'*',
- u'timestamp': u'*',
- u'classname': u'SuccessfulTest',
- }]
+ 'tests': 1,
+ 'failures': 0,
+ 'disabled': 0,
+ 'errors': 0,
+ 'time': '*',
+ 'timestamp': '*',
+ 'name': 'AllTests',
+ 'ad_hoc_property': '42',
+ 'testsuites': [{
+ 'name': 'SuccessfulTest',
+ 'tests': 1,
+ 'failures': 0,
+ 'disabled': 0,
+ 'errors': 0,
+ 'time': '*',
+ 'timestamp': '*',
+ 'testsuite': [{
+ 'name': 'Succeeds',
+ 'file': 'gtest_xml_output_unittest_.cc',
+ 'line': 51,
+ 'status': 'RUN',
+ 'result': 'COMPLETED',
+ 'time': '*',
+ 'timestamp': '*',
+ 'classname': 'SuccessfulTest',
+ }],
}],
}
EXPECTED_NO_TEST = {
- u'tests':
- 0,
- u'failures':
- 0,
- u'disabled':
- 0,
- u'errors':
- 0,
- u'time':
- u'*',
- u'timestamp':
- u'*',
- u'name':
- u'AllTests',
- u'testsuites': [{
- u'name':
- u'NonTestSuiteFailure',
- u'tests':
- 1,
- u'failures':
- 1,
- u'disabled':
- 0,
- u'skipped':
- 0,
- u'errors':
- 0,
- u'time':
- u'*',
- u'timestamp':
- u'*',
- u'testsuite': [{
- u'name':
- u'',
- u'status':
- u'RUN',
- u'result':
- u'COMPLETED',
- u'time':
- u'*',
- u'timestamp':
- u'*',
- u'classname':
- u'',
- u'failures': [{
- u'failure': u'gtest_no_test_unittest.cc:*\n'
- u'Expected equality of these values:\n'
- u' 1\n 2' + STACK_TRACE_TEMPLATE,
- u'type': u'',
- }]
- }]
+ 'tests': 0,
+ 'failures': 0,
+ 'disabled': 0,
+ 'errors': 0,
+ 'time': '*',
+ 'timestamp': '*',
+ 'name': 'AllTests',
+ 'testsuites': [{
+ 'name': 'NonTestSuiteFailure',
+ 'tests': 1,
+ 'failures': 1,
+ 'disabled': 0,
+ 'skipped': 0,
+ 'errors': 0,
+ 'time': '*',
+ 'timestamp': '*',
+ 'testsuite': [{
+ 'name': '',
+ 'status': 'RUN',
+ 'result': 'COMPLETED',
+ 'time': '*',
+ 'timestamp': '*',
+ 'classname': '',
+ 'failures': [{
+ 'failure': (
+ 'gtest_no_test_unittest.cc:*\n'
+ 'Expected equality of these values:\n'
+ ' 1\n 2'
+ + STACK_TRACE_TEMPLATE
+ ),
+ 'type': '',
+ }],
+ }],
}],
}
GTEST_PROGRAM_PATH = gtest_test_utils.GetTestExecutablePath(GTEST_PROGRAM_NAME)
-SUPPORTS_TYPED_TESTS = 'TypedTest' in gtest_test_utils.Subprocess(
- [GTEST_PROGRAM_PATH, GTEST_LIST_TESTS_FLAG], capture_stderr=False).output
+SUPPORTS_TYPED_TESTS = (
+ 'TypedTest'
+ in gtest_test_utils.Subprocess(
+ [GTEST_PROGRAM_PATH, GTEST_LIST_TESTS_FLAG], capture_stderr=False
+ ).output
+)
class GTestJsonOutputUnitTest(gtest_test_utils.TestCase):
- """Unit test for Google Test's JSON output functionality.
- """
+ """Unit test for Google Test's JSON output functionality."""
# This test currently breaks on platforms that do not support typed and
# type-parameterized tests, so we don't run it under them.
@@ -775,16 +656,23 @@ class GTestJsonOutputUnitTest(gtest_test_utils.TestCase):
match = re.match(r'(\d+)-(\d\d)-(\d\d)T(\d\d):(\d\d):(\d\d)', date_time_str)
self.assertTrue(
re.match,
- 'JSON datettime string %s has incorrect format' % date_time_str)
+ 'JSON datettime string %s has incorrect format' % date_time_str,
+ )
date_time_from_json = datetime.datetime(
- year=int(match.group(1)), month=int(match.group(2)),
- day=int(match.group(3)), hour=int(match.group(4)),
- minute=int(match.group(5)), second=int(match.group(6)))
+ year=int(match.group(1)),
+ month=int(match.group(2)),
+ day=int(match.group(3)),
+ hour=int(match.group(4)),
+ minute=int(match.group(5)),
+ second=int(match.group(6)),
+ )
time_delta = abs(datetime.datetime.now() - date_time_from_json)
# timestamp value should be near the current local time
- self.assertTrue(time_delta < datetime.timedelta(seconds=600),
- 'time_delta is %s' % time_delta)
+ self.assertTrue(
+ time_delta < datetime.timedelta(seconds=600),
+ 'time_delta is %s' % time_delta,
+ )
def testDefaultOutputFile(self):
"""Verifies the default output file name.
@@ -792,10 +680,12 @@ class GTestJsonOutputUnitTest(gtest_test_utils.TestCase):
Confirms that Google Test produces an JSON output file with the expected
default name if no name is explicitly specified.
"""
- output_file = os.path.join(gtest_test_utils.GetTempDir(),
- GTEST_DEFAULT_OUTPUT_FILE)
+ output_file = os.path.join(
+ gtest_test_utils.GetTempDir(), GTEST_DEFAULT_OUTPUT_FILE
+ )
gtest_prog_path = gtest_test_utils.GetTestExecutablePath(
- 'gtest_no_test_unittest')
+ 'gtest_no_test_unittest'
+ )
try:
os.remove(output_file)
except OSError:
@@ -805,7 +695,8 @@ class GTestJsonOutputUnitTest(gtest_test_utils.TestCase):
p = gtest_test_utils.Subprocess(
[gtest_prog_path, '%s=json' % GTEST_OUTPUT_FLAG],
- working_dir=gtest_test_utils.GetTempDir())
+ working_dir=gtest_test_utils.GetTempDir(),
+ )
self.assertTrue(p.exited)
self.assertEqual(0, p.exit_code)
self.assertTrue(os.path.isfile(output_file))
@@ -817,20 +708,24 @@ class GTestJsonOutputUnitTest(gtest_test_utils.TestCase):
shut down before RUN_ALL_TESTS is invoked.
"""
- json_path = os.path.join(gtest_test_utils.GetTempDir(),
- GTEST_PROGRAM_NAME + 'out.json')
+ json_path = os.path.join(
+ gtest_test_utils.GetTempDir(), GTEST_PROGRAM_NAME + 'out.json'
+ )
if os.path.isfile(json_path):
os.remove(json_path)
- command = [GTEST_PROGRAM_PATH,
- '%s=json:%s' % (GTEST_OUTPUT_FLAG, json_path),
- '--shut_down_xml']
+ command = [
+ GTEST_PROGRAM_PATH,
+ '%s=json:%s' % (GTEST_OUTPUT_FLAG, json_path),
+ '--shut_down_xml',
+ ]
p = gtest_test_utils.Subprocess(command)
if p.terminated_by_signal:
# p.signal is available only if p.terminated_by_signal is True.
self.assertFalse(
p.terminated_by_signal,
- '%s was killed by signal %d' % (GTEST_PROGRAM_NAME, p.signal))
+ '%s was killed by signal %d' % (GTEST_PROGRAM_NAME, p.signal),
+ )
else:
self.assertTrue(p.exited)
self.assertEqual(
@@ -849,8 +744,12 @@ class GTestJsonOutputUnitTest(gtest_test_utils.TestCase):
non-selected tests do not show up in the JSON output.
"""
- self._TestJsonOutput(GTEST_PROGRAM_NAME, EXPECTED_FILTERED, 0,
- extra_args=['%s=SuccessfulTest.*' % GTEST_FILTER_FLAG])
+ self._TestJsonOutput(
+ GTEST_PROGRAM_NAME,
+ EXPECTED_FILTERED,
+ 0,
+ extra_args=['%s=SuccessfulTest.*' % GTEST_FILTER_FLAG],
+ )
def _GetJsonOutput(self, gtest_prog_name, extra_args, expected_exit_code):
"""Returns the JSON output generated by running the program gtest_prog_name.
@@ -862,14 +761,15 @@ class GTestJsonOutputUnitTest(gtest_test_utils.TestCase):
extra_args: extra arguments to binary invocation.
expected_exit_code: program's exit code.
"""
- json_path = os.path.join(gtest_test_utils.GetTempDir(),
- gtest_prog_name + 'out.json')
+ json_path = os.path.join(
+ gtest_test_utils.GetTempDir(), gtest_prog_name + 'out.json'
+ )
gtest_prog_path = gtest_test_utils.GetTestExecutablePath(gtest_prog_name)
- command = (
- [gtest_prog_path, '%s=json:%s' % (GTEST_OUTPUT_FLAG, json_path)] +
- extra_args
- )
+ command = [
+ gtest_prog_path,
+ '%s=json:%s' % (GTEST_OUTPUT_FLAG, json_path),
+ ] + extra_args
p = gtest_test_utils.Subprocess(command)
if p.terminated_by_signal:
self.assertTrue(
@@ -888,8 +788,9 @@ class GTestJsonOutputUnitTest(gtest_test_utils.TestCase):
actual = json.load(f)
return actual
- def _TestJsonOutput(self, gtest_prog_name, expected,
- expected_exit_code, extra_args=None):
+ def _TestJsonOutput(
+ self, gtest_prog_name, expected, expected_exit_code, extra_args=None
+ ):
"""Checks the JSON output generated by the Google Test binary.
Asserts that the JSON document generated by running the program
@@ -904,8 +805,9 @@ class GTestJsonOutputUnitTest(gtest_test_utils.TestCase):
extra_args: extra arguments to binary invocation.
"""
- actual = self._GetJsonOutput(gtest_prog_name, extra_args or [],
- expected_exit_code)
+ actual = self._GetJsonOutput(
+ gtest_prog_name, extra_args or [], expected_exit_code
+ )
self.assertEqual(expected, gtest_json_test_utils.normalize(actual))
diff --git a/googletest/test/googletest-list-tests-unittest.py b/googletest/test/googletest-list-tests-unittest.py
index 485150d..f59fca0 100755
--- a/googletest/test/googletest-list-tests-unittest.py
+++ b/googletest/test/googletest-list-tests-unittest.py
@@ -46,11 +46,14 @@ from googletest.test import gtest_test_utils
LIST_TESTS_FLAG = 'gtest_list_tests'
# Path to the googletest-list-tests-unittest_ program.
-EXE_PATH = gtest_test_utils.GetTestExecutablePath('googletest-list-tests-unittest_')
+EXE_PATH = gtest_test_utils.GetTestExecutablePath(
+ 'googletest-list-tests-unittest_'
+)
# The expected output when running googletest-list-tests-unittest_ with
# --gtest_list_tests
-EXPECTED_OUTPUT_NO_FILTER_RE = re.compile(r"""FooDeathTest\.
+EXPECTED_OUTPUT_NO_FILTER_RE = re.compile(
+ r"""FooDeathTest\.
Test1
Foo\.
Bar1
@@ -90,11 +93,13 @@ MyInstantiation/ValueParamTest\.
TestB/0 # GetParam\(\) = one line
TestB/1 # GetParam\(\) = two\\nlines
TestB/2 # GetParam\(\) = a very\\nlo{241}\.\.\.
-""")
+"""
+)
# The expected output when running googletest-list-tests-unittest_ with
# --gtest_list_tests and --gtest_filter=Foo*.
-EXPECTED_OUTPUT_FILTER_FOO_RE = re.compile(r"""FooDeathTest\.
+EXPECTED_OUTPUT_FILTER_FOO_RE = re.compile(
+ r"""FooDeathTest\.
Test1
Foo\.
Bar1
@@ -106,7 +111,8 @@ FooTest\.
Test1
DISABLED_Test2
Test3
-""")
+"""
+)
# Utilities.
@@ -114,8 +120,9 @@ FooTest\.
def Run(args):
"""Runs googletest-list-tests-unittest_ and returns the list of tests printed."""
- return gtest_test_utils.Subprocess([EXE_PATH] + args,
- capture_stderr=False).output
+ return gtest_test_utils.Subprocess(
+ [EXE_PATH] + args, capture_stderr=False
+ ).output
# The unit test.
@@ -129,13 +136,12 @@ class GTestListTestsUnitTest(gtest_test_utils.TestCase):
the correct tests.
Args:
- flag_value: value of the --gtest_list_tests flag;
- None if the flag should not be present.
- expected_output_re: regular expression that matches the expected
- output after running command;
- other_flag: a different flag to be passed to command
- along with gtest_list_tests;
- None if the flag should not be present.
+ flag_value: value of the --gtest_list_tests flag; None if the flag
+ should not be present.
+ expected_output_re: regular expression that matches the expected output
+ after running command;
+ other_flag: a different flag to be passed to command along with
+ gtest_list_tests; None if the flag should not be present.
"""
if flag_value is None:
@@ -178,34 +184,37 @@ class GTestListTestsUnitTest(gtest_test_utils.TestCase):
def testDefaultBehavior(self):
"""Tests the behavior of the default mode."""
- self.RunAndVerify(flag_value=None,
- expected_output_re=None,
- other_flag=None)
+ self.RunAndVerify(flag_value=None, expected_output_re=None, other_flag=None)
def testFlag(self):
"""Tests using the --gtest_list_tests flag."""
- self.RunAndVerify(flag_value='0',
- expected_output_re=None,
- other_flag=None)
- self.RunAndVerify(flag_value='1',
- expected_output_re=EXPECTED_OUTPUT_NO_FILTER_RE,
- other_flag=None)
+ self.RunAndVerify(flag_value='0', expected_output_re=None, other_flag=None)
+ self.RunAndVerify(
+ flag_value='1',
+ expected_output_re=EXPECTED_OUTPUT_NO_FILTER_RE,
+ other_flag=None,
+ )
def testOverrideNonFilterFlags(self):
"""Tests that --gtest_list_tests overrides the non-filter flags."""
- self.RunAndVerify(flag_value='1',
- expected_output_re=EXPECTED_OUTPUT_NO_FILTER_RE,
- other_flag='--gtest_break_on_failure')
+ self.RunAndVerify(
+ flag_value='1',
+ expected_output_re=EXPECTED_OUTPUT_NO_FILTER_RE,
+ other_flag='--gtest_break_on_failure',
+ )
def testWithFilterFlags(self):
"""Tests that --gtest_list_tests takes into account the
- --gtest_filter flag."""
+ --gtest_filter flag.
+ """
- self.RunAndVerify(flag_value='1',
- expected_output_re=EXPECTED_OUTPUT_FILTER_FOO_RE,
- other_flag='--gtest_filter=Foo*')
+ self.RunAndVerify(
+ flag_value='1',
+ expected_output_re=EXPECTED_OUTPUT_FILTER_FOO_RE,
+ other_flag='--gtest_filter=Foo*',
+ )
if __name__ == '__main__':
diff --git a/googletest/test/googletest-output-test.py b/googletest/test/googletest-output-test.py
index ff44483..5185ed9 100755
--- a/googletest/test/googletest-output-test.py
+++ b/googletest/test/googletest-output-test.py
@@ -63,20 +63,32 @@ PROGRAM_PATH = gtest_test_utils.GetTestExecutablePath('googletest-output-test_')
# 'internal_skip_environment_and_ad_hoc_tests' argument.
COMMAND_LIST_TESTS = ({}, [PROGRAM_PATH, '--gtest_list_tests'])
COMMAND_WITH_COLOR = ({}, [PROGRAM_PATH, '--gtest_color=yes'])
-COMMAND_WITH_TIME = ({}, [PROGRAM_PATH,
- '--gtest_print_time',
- 'internal_skip_environment_and_ad_hoc_tests',
- '--gtest_filter=FatalFailureTest.*:LoggingTest.*'])
+COMMAND_WITH_TIME = (
+ {},
+ [
+ PROGRAM_PATH,
+ '--gtest_print_time',
+ 'internal_skip_environment_and_ad_hoc_tests',
+ '--gtest_filter=FatalFailureTest.*:LoggingTest.*',
+ ],
+)
COMMAND_WITH_DISABLED = (
- {}, [PROGRAM_PATH,
- '--gtest_also_run_disabled_tests',
- 'internal_skip_environment_and_ad_hoc_tests',
- '--gtest_filter=*DISABLED_*'])
+ {},
+ [
+ PROGRAM_PATH,
+ '--gtest_also_run_disabled_tests',
+ 'internal_skip_environment_and_ad_hoc_tests',
+ '--gtest_filter=*DISABLED_*',
+ ],
+)
COMMAND_WITH_SHARDING = (
{'GTEST_SHARD_INDEX': '1', 'GTEST_TOTAL_SHARDS': '2'},
- [PROGRAM_PATH,
- 'internal_skip_environment_and_ad_hoc_tests',
- '--gtest_filter=PassingTest.*'])
+ [
+ PROGRAM_PATH,
+ 'internal_skip_environment_and_ad_hoc_tests',
+ '--gtest_filter=PassingTest.*',
+ ],
+)
GOLDEN_PATH = os.path.join(gtest_test_utils.GetSourceDir(), GOLDEN_NAME)
@@ -100,16 +112,20 @@ def RemoveLocations(test_output):
'FILE_NAME:#: '.
"""
- return re.sub(r'.*[/\\]((googletest-output-test_|gtest).cc)(\:\d+|\(\d+\))\: ',
- r'\1:#: ', test_output)
+ return re.sub(
+ r'.*[/\\]((googletest-output-test_|gtest).cc)(\:\d+|\(\d+\))\: ',
+ r'\1:#: ',
+ test_output,
+ )
def RemoveStackTraceDetails(output):
"""Removes all stack traces from a Google Test program's output."""
# *? means "find the shortest string that matches".
- return re.sub(r'Stack trace:(.|\n)*?\n\n',
- 'Stack trace: (omitted)\n\n', output)
+ return re.sub(
+ r'Stack trace:(.|\n)*?\n\n', 'Stack trace: (omitted)\n\n', output
+ )
def RemoveStackTraces(output):
@@ -156,14 +172,12 @@ def NormalizeToCurrentPlatform(test_output):
def RemoveTestCounts(output):
"""Removes test counts from a Google Test program's output."""
- output = re.sub(r'\d+ tests?, listed below',
- '? tests, listed below', output)
- output = re.sub(r'\d+ FAILED TESTS',
- '? FAILED TESTS', output)
- output = re.sub(r'\d+ tests? from \d+ test cases?',
- '? tests from ? test cases', output)
- output = re.sub(r'\d+ tests? from ([a-zA-Z_])',
- r'? tests from \1', output)
+ output = re.sub(r'\d+ tests?, listed below', '? tests, listed below', output)
+ output = re.sub(r'\d+ FAILED TESTS', '? FAILED TESTS', output)
+ output = re.sub(
+ r'\d+ tests? from \d+ test cases?', '? tests from ? test cases', output
+ )
+ output = re.sub(r'\d+ tests? from ([a-zA-Z_])', r'? tests from \1', output)
return re.sub(r'\d+ tests?\.', '? tests.', output)
@@ -175,18 +189,19 @@ def RemoveMatchingTests(test_output, pattern):
Args:
test_output: A string containing the test output.
- pattern: A regex string that matches names of test cases or
- tests to remove.
+ pattern: A regex string that matches names of test cases or tests
+ to remove.
Returns:
Contents of test_output with tests whose names match pattern removed.
"""
test_output = re.sub(
- r'.*\[ RUN \] .*%s(.|\n)*?\[( FAILED | OK )\] .*%s.*\n' % (
- pattern, pattern),
+ r'.*\[ RUN \] .*%s(.|\n)*?\[( FAILED | OK )\] .*%s.*\n'
+ % (pattern, pattern),
'',
- test_output)
+ test_output,
+ )
return re.sub(r'.*%s.*\n' % pattern, '', test_output)
@@ -205,8 +220,8 @@ def GetShellCommandOutput(env_cmd):
Args:
env_cmd: The shell command. A 2-tuple where element 0 is a dict of extra
- environment variables to set, and element 1 is a string with
- the command and any flags.
+ environment variables to set, and element 1 is a string with the command
+ and any flags.
Returns:
A string with the command's combined standard and diagnostic output.
@@ -227,8 +242,8 @@ def GetCommandOutput(env_cmd):
Args:
env_cmd: The shell command. A 2-tuple where element 0 is a dict of extra
- environment variables to set, and element 1 is a string with
- the command and any flags.
+ environment variables to set, and element 1 is a string with the command
+ and any flags.
"""
# Disables exception pop-ups on Windows.
@@ -241,10 +256,12 @@ def GetCommandOutput(env_cmd):
def GetOutputOfAllCommands():
"""Returns concatenated output from several representative commands."""
- return (GetCommandOutput(COMMAND_WITH_COLOR) +
- GetCommandOutput(COMMAND_WITH_TIME) +
- GetCommandOutput(COMMAND_WITH_DISABLED) +
- GetCommandOutput(COMMAND_WITH_SHARDING))
+ return (
+ GetCommandOutput(COMMAND_WITH_COLOR)
+ + GetCommandOutput(COMMAND_WITH_TIME)
+ + GetCommandOutput(COMMAND_WITH_DISABLED)
+ + GetCommandOutput(COMMAND_WITH_SHARDING)
+ )
test_list = GetShellCommandOutput(COMMAND_LIST_TESTS)
@@ -253,12 +270,16 @@ SUPPORTS_TYPED_TESTS = 'TypedTest' in test_list
SUPPORTS_THREADS = 'ExpectFailureWithThreadsTest' in test_list
SUPPORTS_STACK_TRACES = NO_STACKTRACE_SUPPORT_FLAG not in sys.argv
-CAN_GENERATE_GOLDEN_FILE = (SUPPORTS_DEATH_TESTS and
- SUPPORTS_TYPED_TESTS and
- SUPPORTS_THREADS and
- SUPPORTS_STACK_TRACES)
+CAN_GENERATE_GOLDEN_FILE = (
+ SUPPORTS_DEATH_TESTS
+ and SUPPORTS_TYPED_TESTS
+ and SUPPORTS_THREADS
+ and SUPPORTS_STACK_TRACES
+)
+
class GTestOutputTest(gtest_test_utils.TestCase):
+
def RemoveUnsupportedTests(self, test_output):
if not SUPPORTS_DEATH_TESTS:
test_output = RemoveMatchingTests(test_output, 'DeathTest')
@@ -267,12 +288,13 @@ class GTestOutputTest(gtest_test_utils.TestCase):
test_output = RemoveMatchingTests(test_output, 'TypedDeathTest')
test_output = RemoveMatchingTests(test_output, 'TypeParamDeathTest')
if not SUPPORTS_THREADS:
- test_output = RemoveMatchingTests(test_output,
- 'ExpectFailureWithThreadsTest')
- test_output = RemoveMatchingTests(test_output,
- 'ScopedFakeTestPartResultReporterTest')
- test_output = RemoveMatchingTests(test_output,
- 'WorksConcurrently')
+ test_output = RemoveMatchingTests(
+ test_output, 'ExpectFailureWithThreadsTest'
+ )
+ test_output = RemoveMatchingTests(
+ test_output, 'ScopedFakeTestPartResultReporterTest'
+ )
+ test_output = RemoveMatchingTests(test_output, 'WorksConcurrently')
if not SUPPORTS_STACK_TRACES:
test_output = RemoveStackTraces(test_output)
@@ -297,27 +319,42 @@ class GTestOutputTest(gtest_test_utils.TestCase):
normalized_golden = RemoveTypeInfoDetails(golden)
if CAN_GENERATE_GOLDEN_FILE:
- self.assertEqual(normalized_golden, normalized_actual,
- '\n'.join(difflib.unified_diff(
- normalized_golden.split('\n'),
- normalized_actual.split('\n'),
- 'golden', 'actual')))
+ self.assertEqual(
+ normalized_golden,
+ normalized_actual,
+ '\n'.join(
+ difflib.unified_diff(
+ normalized_golden.split('\n'),
+ normalized_actual.split('\n'),
+ 'golden',
+ 'actual',
+ )
+ ),
+ )
else:
normalized_actual = NormalizeToCurrentPlatform(
- RemoveTestCounts(normalized_actual))
+ RemoveTestCounts(normalized_actual)
+ )
normalized_golden = NormalizeToCurrentPlatform(
- RemoveTestCounts(self.RemoveUnsupportedTests(normalized_golden)))
+ RemoveTestCounts(self.RemoveUnsupportedTests(normalized_golden))
+ )
# This code is very handy when debugging golden file differences:
if os.getenv('DEBUG_GTEST_OUTPUT_TEST'):
- open(os.path.join(
- gtest_test_utils.GetSourceDir(),
- '_googletest-output-test_normalized_actual.txt'), 'wb').write(
- normalized_actual)
- open(os.path.join(
- gtest_test_utils.GetSourceDir(),
- '_googletest-output-test_normalized_golden.txt'), 'wb').write(
- normalized_golden)
+ open(
+ os.path.join(
+ gtest_test_utils.GetSourceDir(),
+ '_googletest-output-test_normalized_actual.txt',
+ ),
+ 'wb',
+ ).write(normalized_actual)
+ open(
+ os.path.join(
+ gtest_test_utils.GetSourceDir(),
+ '_googletest-output-test_normalized_golden.txt',
+ ),
+ 'wb',
+ ).write(normalized_golden)
self.assertEqual(normalized_golden, normalized_actual)
@@ -334,11 +371,10 @@ if __name__ == '__main__':
golden_file.write(output.encode())
golden_file.close()
else:
- message = (
- """Unable to write a golden file when compiled in an environment
+ message = """Unable to write a golden file when compiled in an environment
that does not support all the required features (death tests,
typed tests, stack traces, and multiple threads).
-Please build this test and generate the golden file using Blaze on Linux.""")
+Please build this test and generate the golden file using Blaze on Linux."""
sys.stderr.write(message)
sys.exit(1)
diff --git a/googletest/test/googletest-param-test-invalid-name1-test.py b/googletest/test/googletest-param-test-invalid-name1-test.py
index b8d609a..4886e49 100644
--- a/googletest/test/googletest-param-test-invalid-name1-test.py
+++ b/googletest/test/googletest-param-test-invalid-name1-test.py
@@ -44,7 +44,7 @@ def Assert(condition):
def TestExitCodeAndOutput(command):
"""Runs the given command and verifies its exit code and output."""
- err = ('Parameterized test name \'"InvalidWithQuotes"\' is invalid')
+ err = 'Parameterized test name \'"InvalidWithQuotes"\' is invalid'
p = gtest_test_utils.Subprocess(command)
Assert(p.terminated_by_signal)
diff --git a/googletest/test/googletest-param-test-invalid-name2-test.py b/googletest/test/googletest-param-test-invalid-name2-test.py
index d92fa06..bcd8ddf 100644
--- a/googletest/test/googletest-param-test-invalid-name2-test.py
+++ b/googletest/test/googletest-param-test-invalid-name2-test.py
@@ -44,7 +44,7 @@ def Assert(condition):
def TestExitCodeAndOutput(command):
"""Runs the given command and verifies its exit code and output."""
- err = ('Duplicate parameterized test name \'a\'')
+ err = "Duplicate parameterized test name 'a'"
p = gtest_test_utils.Subprocess(command)
Assert(p.terminated_by_signal)
@@ -58,5 +58,6 @@ class GTestParamTestInvalidName2Test(gtest_test_utils.TestCase):
def testExitCodeAndOutput(self):
TestExitCodeAndOutput(COMMAND)
+
if __name__ == '__main__':
gtest_test_utils.Main()
diff --git a/googletest/test/googletest-setuptestsuite-test.py b/googletest/test/googletest-setuptestsuite-test.py
index 9d1fd02..899531f 100755
--- a/googletest/test/googletest-setuptestsuite-test.py
+++ b/googletest/test/googletest-setuptestsuite-test.py
@@ -34,7 +34,8 @@
from googletest.test import gtest_test_utils
COMMAND = gtest_test_utils.GetTestExecutablePath(
- 'googletest-setuptestsuite-test_')
+ 'googletest-setuptestsuite-test_'
+)
class GTestSetUpTestSuiteTest(gtest_test_utils.TestCase):
@@ -44,11 +45,14 @@ class GTestSetUpTestSuiteTest(gtest_test_utils.TestCase):
self.assertNotEqual(p.exit_code, 0, msg=p.output)
self.assertIn(
- '[ FAILED ] SetupFailTest: SetUpTestSuite or TearDownTestSuite\n'
- '[ FAILED ] TearDownFailTest: SetUpTestSuite or TearDownTestSuite\n'
- '\n'
- ' 2 FAILED TEST SUITES\n',
- p.output)
+ (
+ '[ FAILED ] SetupFailTest: SetUpTestSuite or TearDownTestSuite\n['
+ ' FAILED ] TearDownFailTest: SetUpTestSuite or'
+ ' TearDownTestSuite\n\n 2 FAILED TEST SUITES\n'
+ ),
+ p.output,
+ )
+
if __name__ == '__main__':
gtest_test_utils.Main()
diff --git a/googletest/test/googletest-throw-on-failure-test.py b/googletest/test/googletest-throw-on-failure-test.py
index 8fc4f54..282163b 100755
--- a/googletest/test/googletest-throw-on-failure-test.py
+++ b/googletest/test/googletest-throw-on-failure-test.py
@@ -47,7 +47,8 @@ THROW_ON_FAILURE = 'gtest_throw_on_failure'
# Path to the googletest-throw-on-failure-test_ program, compiled with
# exceptions disabled.
EXE_PATH = gtest_test_utils.GetTestExecutablePath(
- 'googletest-throw-on-failure-test_')
+ 'googletest-throw-on-failure-test_'
+)
# Utilities.
@@ -83,9 +84,9 @@ class ThrowOnFailureTest(gtest_test_utils.TestCase):
Args:
env_var_value: value of the GTEST_BREAK_ON_FAILURE environment
- variable; None if the variable should be unset.
- flag_value: value of the --gtest_break_on_failure flag;
- None if the flag should not be present.
+ variable; None if the variable should be unset.
+ flag_value: value of the --gtest_break_on_failure flag; None if the
+ flag should not be present.
should_fail: True if and only if the program is expected to fail.
"""
@@ -116,10 +117,15 @@ class ThrowOnFailureTest(gtest_test_utils.TestCase):
SetEnvVar(THROW_ON_FAILURE, None)
- msg = ('when %s%s, an assertion failure in "%s" %s cause a non-zero '
- 'exit code.' %
- (THROW_ON_FAILURE, env_var_value_msg, ' '.join(command),
- should_or_not))
+ msg = (
+ 'when %s%s, an assertion failure in "%s" %s cause a non-zero exit code.'
+ % (
+ THROW_ON_FAILURE,
+ env_var_value_msg,
+ ' '.join(command),
+ should_or_not,
+ )
+ )
self.assertTrue(failed == should_fail, msg)
def testDefaultBehavior(self):
@@ -130,38 +136,22 @@ class ThrowOnFailureTest(gtest_test_utils.TestCase):
def testThrowOnFailureEnvVar(self):
"""Tests using the GTEST_THROW_ON_FAILURE environment variable."""
- self.RunAndVerify(env_var_value='0',
- flag_value=None,
- should_fail=False)
- self.RunAndVerify(env_var_value='1',
- flag_value=None,
- should_fail=True)
+ self.RunAndVerify(env_var_value='0', flag_value=None, should_fail=False)
+ self.RunAndVerify(env_var_value='1', flag_value=None, should_fail=True)
def testThrowOnFailureFlag(self):
"""Tests using the --gtest_throw_on_failure flag."""
- self.RunAndVerify(env_var_value=None,
- flag_value='0',
- should_fail=False)
- self.RunAndVerify(env_var_value=None,
- flag_value='1',
- should_fail=True)
+ self.RunAndVerify(env_var_value=None, flag_value='0', should_fail=False)
+ self.RunAndVerify(env_var_value=None, flag_value='1', should_fail=True)
def testThrowOnFailureFlagOverridesEnvVar(self):
"""Tests that --gtest_throw_on_failure overrides GTEST_THROW_ON_FAILURE."""
- self.RunAndVerify(env_var_value='0',
- flag_value='0',
- should_fail=False)
- self.RunAndVerify(env_var_value='0',
- flag_value='1',
- should_fail=True)
- self.RunAndVerify(env_var_value='1',
- flag_value='0',
- should_fail=False)
- self.RunAndVerify(env_var_value='1',
- flag_value='1',
- should_fail=True)
+ self.RunAndVerify(env_var_value='0', flag_value='0', should_fail=False)
+ self.RunAndVerify(env_var_value='0', flag_value='1', should_fail=True)
+ self.RunAndVerify(env_var_value='1', flag_value='0', should_fail=False)
+ self.RunAndVerify(env_var_value='1', flag_value='1', should_fail=True)
if __name__ == '__main__':
diff --git a/googletest/test/googletest-uninitialized-test.py b/googletest/test/googletest-uninitialized-test.py
index 73c9176..e5af7c8 100755
--- a/googletest/test/googletest-uninitialized-test.py
+++ b/googletest/test/googletest-uninitialized-test.py
@@ -33,7 +33,9 @@
from googletest.test import gtest_test_utils
-COMMAND = gtest_test_utils.GetTestExecutablePath('googletest-uninitialized-test_')
+COMMAND = gtest_test_utils.GetTestExecutablePath(
+ 'googletest-uninitialized-test_'
+)
def Assert(condition):
@@ -54,11 +56,12 @@ def TestExitCodeAndOutput(command):
# Verifies that 'command' exits with code 1.
p = gtest_test_utils.Subprocess(command)
if p.exited and p.exit_code == 0:
- Assert('IMPORTANT NOTICE' in p.output);
+ Assert('IMPORTANT NOTICE' in p.output)
Assert('InitGoogleTest' in p.output)
class GTestUninitializedTest(gtest_test_utils.TestCase):
+
def testExitCodeAndOutput(self):
TestExitCodeAndOutput(COMMAND)
diff --git a/googletest/test/gtest_help_test.py b/googletest/test/gtest_help_test.py
index 642ab86..1d67415 100755
--- a/googletest/test/gtest_help_test.py
+++ b/googletest/test/gtest_help_test.py
@@ -57,27 +57,43 @@ UNKNOWN_GTEST_PREFIXED_FLAG = FLAG_PREFIX + 'unknown_flag_for_testing'
LIST_TESTS_FLAG = FLAG_PREFIX + 'list_tests'
INTERNAL_FLAG_FOR_TESTING = FLAG_PREFIX + 'internal_flag_for_testing'
-SUPPORTS_DEATH_TESTS = "DeathTest" in gtest_test_utils.Subprocess(
- [PROGRAM_PATH, LIST_TESTS_FLAG]).output
+SUPPORTS_DEATH_TESTS = (
+ 'DeathTest'
+ in gtest_test_utils.Subprocess([PROGRAM_PATH, LIST_TESTS_FLAG]).output
+)
HAS_ABSL_FLAGS = '--has_absl_flags' in sys.argv
# The help message must match this regex.
HELP_REGEX = re.compile(
- FLAG_PREFIX + r'list_tests.*' +
- FLAG_PREFIX + r'filter=.*' +
- FLAG_PREFIX + r'also_run_disabled_tests.*' +
- FLAG_PREFIX + r'repeat=.*' +
- FLAG_PREFIX + r'shuffle.*' +
- FLAG_PREFIX + r'random_seed=.*' +
- FLAG_PREFIX + r'color=.*' +
- FLAG_PREFIX + r'brief.*' +
- FLAG_PREFIX + r'print_time.*' +
- FLAG_PREFIX + r'output=.*' +
- FLAG_PREFIX + r'break_on_failure.*' +
- FLAG_PREFIX + r'throw_on_failure.*' +
- FLAG_PREFIX + r'catch_exceptions=0.*',
- re.DOTALL)
+ FLAG_PREFIX
+ + r'list_tests.*'
+ + FLAG_PREFIX
+ + r'filter=.*'
+ + FLAG_PREFIX
+ + r'also_run_disabled_tests.*'
+ + FLAG_PREFIX
+ + r'repeat=.*'
+ + FLAG_PREFIX
+ + r'shuffle.*'
+ + FLAG_PREFIX
+ + r'random_seed=.*'
+ + FLAG_PREFIX
+ + r'color=.*'
+ + FLAG_PREFIX
+ + r'brief.*'
+ + FLAG_PREFIX
+ + r'print_time.*'
+ + FLAG_PREFIX
+ + r'output=.*'
+ + FLAG_PREFIX
+ + r'break_on_failure.*'
+ + FLAG_PREFIX
+ + r'throw_on_failure.*'
+ + FLAG_PREFIX
+ + r'catch_exceptions=0.*',
+ re.DOTALL,
+)
def RunWithFlag(flag):
@@ -172,13 +188,15 @@ class GTestHelpTest(gtest_test_utils.TestCase):
def testRunsTestsWithoutHelpFlag(self):
"""Verifies that when no help flag is specified, the tests are run
- and the help message is not printed."""
+ and the help message is not printed.
+ """
self.TestNonHelpFlag(None)
def testRunsTestsWithGtestInternalFlag(self):
"""Verifies that the tests are run and no help message is printed when
- a flag starting with Google Test prefix and 'internal_' is supplied."""
+ a flag starting with Google Test prefix and 'internal_' is supplied.
+ """
self.TestNonHelpFlag(INTERNAL_FLAG_FOR_TESTING)
diff --git a/googletest/test/gtest_json_test_utils.py b/googletest/test/gtest_json_test_utils.py
index f62896c..86a5925 100644
--- a/googletest/test/gtest_json_test_utils.py
+++ b/googletest/test/gtest_json_test_utils.py
@@ -42,6 +42,7 @@ def normalize(obj):
Normalized output without any references to transient information that may
change from run to run.
"""
+
def _normalize(key, value):
if key == 'time':
return re.sub(r'^\d+(\.\d+)?s$', '*', value)
@@ -54,6 +55,7 @@ def normalize(obj):
return re.sub(r'^.*[/\\](.*)', '\\1', value)
else:
return normalize(value)
+
if isinstance(obj, dict):
return {k: _normalize(k, v) for k, v in obj.items()}
if isinstance(obj, list):
diff --git a/googletest/test/gtest_list_output_unittest.py b/googletest/test/gtest_list_output_unittest.py
index faacf10..afd521d 100644
--- a/googletest/test/gtest_list_output_unittest.py
+++ b/googletest/test/gtest_list_output_unittest.py
@@ -224,8 +224,7 @@ EXPECTED_JSON = """{
class GTestListTestsOutputUnitTest(gtest_test_utils.TestCase):
- """Unit test for Google Test's list tests with output to file functionality.
- """
+ """Unit test for Google Test's list tests with output to file functionality."""
def testXml(self):
"""Verifies XML output for listing tests in a Google Test binary.
@@ -244,19 +243,22 @@ class GTestListTestsOutputUnitTest(gtest_test_utils.TestCase):
self._TestOutput('json', EXPECTED_JSON)
def _GetOutput(self, out_format):
- file_path = os.path.join(gtest_test_utils.GetTempDir(),
- 'test_out.' + out_format)
+ file_path = os.path.join(
+ gtest_test_utils.GetTempDir(), 'test_out.' + out_format
+ )
gtest_prog_path = gtest_test_utils.GetTestExecutablePath(
- 'gtest_list_output_unittest_')
+ 'gtest_list_output_unittest_'
+ )
- command = ([
+ command = [
gtest_prog_path,
'%s=%s:%s' % (GTEST_OUTPUT_FLAG, out_format, file_path),
- '--gtest_list_tests'
- ])
+ '--gtest_list_tests',
+ ]
environ_copy = os.environ.copy()
p = gtest_test_utils.Subprocess(
- command, env=environ_copy, working_dir=gtest_test_utils.GetTempDir())
+ command, env=environ_copy, working_dir=gtest_test_utils.GetTempDir()
+ )
self.assertTrue(p.exited)
self.assertEqual(0, p.exit_code)
@@ -275,9 +277,10 @@ class GTestListTestsOutputUnitTest(gtest_test_utils.TestCase):
expected_line_re = re.compile(expected_line.strip())
self.assertTrue(
expected_line_re.match(actual_line.strip()),
- ('actual output of "%s",\n'
- 'which does not match expected regex of "%s"\n'
- 'on line %d' % (actual, expected_output, line_count)))
+ 'actual output of "%s",\n'
+ 'which does not match expected regex of "%s"\n'
+ 'on line %d' % (actual, expected_output, line_count),
+ )
line_count = line_count + 1
diff --git a/googletest/test/gtest_skip_check_output_test.py b/googletest/test/gtest_skip_check_output_test.py
index 1c87b44..b30a165 100755
--- a/googletest/test/gtest_skip_check_output_test.py
+++ b/googletest/test/gtest_skip_check_output_test.py
@@ -51,7 +51,8 @@ class SkipEntireEnvironmentTest(gtest_test_utils.TestCase):
skip_fixture = 'Skipped\nskipping all tests for this fixture\n'
self.assertIsNotNone(
re.search(skip_fixture + '.*' + skip_fixture, OUTPUT, flags=re.DOTALL),
- repr(OUTPUT))
+ repr(OUTPUT),
+ )
self.assertNotIn('FAILED', OUTPUT)
diff --git a/googletest/test/gtest_skip_environment_check_output_test.py b/googletest/test/gtest_skip_environment_check_output_test.py
index 6960b11..388a4e9 100755
--- a/googletest/test/gtest_skip_environment_check_output_test.py
+++ b/googletest/test/gtest_skip_environment_check_output_test.py
@@ -37,7 +37,8 @@ from googletest.test import gtest_test_utils
# Path to the gtest_skip_in_environment_setup_test binary
EXE_PATH = gtest_test_utils.GetTestExecutablePath(
- 'gtest_skip_in_environment_setup_test')
+ 'gtest_skip_in_environment_setup_test'
+)
OUTPUT = gtest_test_utils.Subprocess([EXE_PATH]).output
diff --git a/googletest/test/gtest_xml_outfiles_test.py b/googletest/test/gtest_xml_outfiles_test.py
index 73fb2b3..b482f02 100755
--- a/googletest/test/gtest_xml_outfiles_test.py
+++ b/googletest/test/gtest_xml_outfiles_test.py
@@ -76,8 +76,9 @@ class GTestXMLOutFilesTest(gtest_xml_test_utils.GTestXMLTestCase):
# We want the trailing '/' that the last "" provides in os.path.join, for
# telling Google Test to create an output directory instead of a single file
# for xml output.
- self.output_dir_ = os.path.join(gtest_test_utils.GetTempDir(),
- GTEST_OUTPUT_SUBDIR, "")
+ self.output_dir_ = os.path.join(
+ gtest_test_utils.GetTempDir(), GTEST_OUTPUT_SUBDIR, ""
+ )
self.DeleteFilesAndDir()
def tearDown(self):
@@ -106,14 +107,15 @@ class GTestXMLOutFilesTest(gtest_xml_test_utils.GTestXMLTestCase):
def _TestOutFile(self, test_name, expected_xml):
gtest_prog_path = gtest_test_utils.GetTestExecutablePath(test_name)
command = [gtest_prog_path, "--gtest_output=xml:%s" % self.output_dir_]
- p = gtest_test_utils.Subprocess(command,
- working_dir=gtest_test_utils.GetTempDir())
+ p = gtest_test_utils.Subprocess(
+ command, working_dir=gtest_test_utils.GetTempDir()
+ )
self.assertTrue(p.exited)
self.assertEqual(0, p.exit_code)
output_file_name1 = test_name + ".xml"
output_file1 = os.path.join(self.output_dir_, output_file_name1)
- output_file_name2 = 'lt-' + output_file_name1
+ output_file_name2 = "lt-" + output_file_name1
output_file2 = os.path.join(self.output_dir_, output_file_name2)
self.assertTrue(
os.path.isfile(output_file1) or os.path.isfile(output_file2),
@@ -126,8 +128,7 @@ class GTestXMLOutFilesTest(gtest_xml_test_utils.GTestXMLTestCase):
else:
actual = minidom.parse(output_file2)
self.NormalizeXml(actual.documentElement)
- self.AssertEquivalentNodes(expected.documentElement,
- actual.documentElement)
+ self.AssertEquivalentNodes(expected.documentElement, actual.documentElement)
expected.unlink()
actual.unlink()