diff options
author | zhanyong.wan <zhanyong.wan@861a406c-534a-0410-8894-cb66d6ee9925> | 2010-08-19 22:16:00 (GMT) |
---|---|---|
committer | zhanyong.wan <zhanyong.wan@861a406c-534a-0410-8894-cb66d6ee9925> | 2010-08-19 22:16:00 (GMT) |
commit | a9f380f5c7ff75cd715c58e11885dddc54baef02 (patch) | |
tree | 8cf27e0b642c7ad8eeb97858369d237fe104eb41 | |
parent | b83585c4de97ee0b6e797836b17b701947dfea93 (diff) | |
download | googletest-a9f380f5c7ff75cd715c58e11885dddc54baef02.zip googletest-a9f380f5c7ff75cd715c58e11885dddc54baef02.tar.gz googletest-a9f380f5c7ff75cd715c58e11885dddc54baef02.tar.bz2 |
Removes the Windows golden file (by Vlad Losev); implements test result streaming (by Nikhil Jindal and cleaned up by Zhanyong Wan).
-rw-r--r-- | Makefile.am | 1 | ||||
-rw-r--r-- | include/gtest/gtest.h | 7 | ||||
-rw-r--r-- | include/gtest/internal/gtest-port.h | 5 | ||||
-rw-r--r-- | src/gtest-internal-inl.h | 10 | ||||
-rw-r--r-- | src/gtest.cc | 248 | ||||
-rwxr-xr-x | test/gtest_help_test.py | 13 | ||||
-rwxr-xr-x | test/gtest_output_test.py | 40 | ||||
-rw-r--r-- | test/gtest_output_test_golden_win.txt | 577 | ||||
-rw-r--r-- | test/gtest_unittest.cc | 36 |
9 files changed, 319 insertions, 618 deletions
diff --git a/Makefile.am b/Makefile.am index 112e235..e4628aa 100644 --- a/Makefile.am +++ b/Makefile.am @@ -102,7 +102,6 @@ EXTRA_DIST += \ test/gtest_list_tests_unittest.py \ test/gtest_output_test.py \ test/gtest_output_test_golden_lin.txt \ - test/gtest_output_test_golden_win.txt \ test/gtest_shuffle_test.py \ test/gtest_test_utils.py \ test/gtest_throw_on_failure_test.py \ diff --git a/include/gtest/gtest.h b/include/gtest/gtest.h index 20028a1..3efbece 100644 --- a/include/gtest/gtest.h +++ b/include/gtest/gtest.h @@ -137,6 +137,11 @@ GTEST_DECLARE_int32_(stack_trace_depth); // non-zero code otherwise. GTEST_DECLARE_bool_(throw_on_failure); +// When this flag is set with a "host:port" string, on supported +// platforms test results are streamed to the specified port on +// the specified host machine. +GTEST_DECLARE_string_(stream_result_to); + // The upper limit for valid stack trace depths. const int kMaxStackTraceDepth = 100; @@ -155,8 +160,6 @@ class WindowsDeathTest; class UnitTestImpl* GetUnitTestImpl(); void ReportFailureInUnknownLocation(TestPartResult::Type result_type, const String& message); -class PrettyUnitTestResultPrinter; -class XmlUnitTestResultPrinter; // Converts a streamable value to a String. A NULL pointer is // converted to "(null)". When the input value is a ::string, diff --git a/include/gtest/internal/gtest-port.h b/include/gtest/internal/gtest-port.h index 733dae5..75c3f20 100644 --- a/include/gtest/internal/gtest-port.h +++ b/include/gtest/internal/gtest-port.h @@ -537,6 +537,11 @@ #define GTEST_WIDE_STRING_USES_UTF16_ \ (GTEST_OS_WINDOWS || GTEST_OS_CYGWIN || GTEST_OS_SYMBIAN || GTEST_OS_AIX) +// Determines whether test results can be streamed to a socket. +#if GTEST_OS_LINUX +#define GTEST_CAN_STREAM_RESULTS_ 1 +#endif + // Defines some utility macros. // The GNU compiler emits a warning if nested "if" statements are followed by diff --git a/src/gtest-internal-inl.h b/src/gtest-internal-inl.h index d1e0b59..73920e4 100644 --- a/src/gtest-internal-inl.h +++ b/src/gtest-internal-inl.h @@ -93,6 +93,7 @@ const char kRandomSeedFlag[] = "random_seed"; const char kRepeatFlag[] = "repeat"; const char kShuffleFlag[] = "shuffle"; const char kStackTraceDepthFlag[] = "stack_trace_depth"; +const char kStreamResultToFlag[] = "stream_result_to"; const char kThrowOnFailureFlag[] = "throw_on_failure"; // A valid random seed must be in [1, kMaxRandomSeed]. @@ -165,6 +166,7 @@ class GTestFlagSaver { repeat_ = GTEST_FLAG(repeat); shuffle_ = GTEST_FLAG(shuffle); stack_trace_depth_ = GTEST_FLAG(stack_trace_depth); + stream_result_to_ = GTEST_FLAG(stream_result_to); throw_on_failure_ = GTEST_FLAG(throw_on_failure); } @@ -185,6 +187,7 @@ class GTestFlagSaver { GTEST_FLAG(repeat) = repeat_; GTEST_FLAG(shuffle) = shuffle_; GTEST_FLAG(stack_trace_depth) = stack_trace_depth_; + GTEST_FLAG(stream_result_to) = stream_result_to_; GTEST_FLAG(throw_on_failure) = throw_on_failure_; } private: @@ -205,6 +208,7 @@ class GTestFlagSaver { internal::Int32 repeat_; bool shuffle_; internal::Int32 stack_trace_depth_; + String stream_result_to_; bool throw_on_failure_; } GTEST_ATTRIBUTE_UNUSED_; @@ -741,6 +745,12 @@ class GTEST_API_ UnitTestImpl { // UnitTestOptions. Must not be called before InitGoogleTest. void ConfigureXmlOutput(); +#if GTEST_CAN_STREAM_RESULTS_ + // Initializes the event listener for streaming test results to a socket. + // Must not be called before InitGoogleTest. + void ConfigureStreamingOutput(); +#endif + // Performs initialization dependent upon flag values obtained in // ParseGoogleTestFlagsOnly. Is called from InitGoogleTest after the call to // ParseGoogleTestFlagsOnly. In case a user neglects to call InitGoogleTest diff --git a/src/gtest.cc b/src/gtest.cc index f3768e3..93ab6a8 100644 --- a/src/gtest.cc +++ b/src/gtest.cc @@ -43,7 +43,7 @@ #include <wctype.h> #include <algorithm> -#include <ostream> +#include <ostream> // NOLINT #include <sstream> #include <vector> @@ -53,16 +53,15 @@ // gettimeofday(). #define GTEST_HAS_GETTIMEOFDAY_ 1 -#include <fcntl.h> -#include <limits.h> -#include <sched.h> +#include <fcntl.h> // NOLINT +#include <limits.h> // NOLINT +#include <sched.h> // NOLINT // Declares vsnprintf(). This header is not available on Windows. -#include <strings.h> -#include <sys/mman.h> -#include <sys/time.h> -#include <unistd.h> +#include <strings.h> // NOLINT +#include <sys/mman.h> // NOLINT +#include <sys/time.h> // NOLINT +#include <unistd.h> // NOLINT #include <string> -#include <vector> #elif GTEST_OS_SYMBIAN #define GTEST_HAS_GETTIMEOFDAY_ 1 @@ -119,6 +118,11 @@ #include <stdexcept> #endif +#if GTEST_CAN_STREAM_RESULTS_ +#include <arpa/inet.h> // NOLINT +#include <netdb.h> // NOLINT +#endif + // Indicates that this translation unit is part of Google Test's // implementation. It must come before gtest-internal-inl.h is // included, or there will be a compiler error. This trick is to @@ -258,6 +262,13 @@ GTEST_DEFINE_int32_( "The maximum number of stack frames to print when an " "assertion fails. The valid range is 0 through 100, inclusive."); +GTEST_DEFINE_string_( + stream_result_to, + internal::StringFromGTestEnv("stream_result_to", ""), + "This flag specifies the host name and the port number on which to stream " + "test results. Example: \"localhost:555\". The flag is effective only on " + "Linux."); + GTEST_DEFINE_bool_( throw_on_failure, internal::BoolFromGTestEnv("throw_on_failure", false), @@ -2286,8 +2297,8 @@ void TestInfo::Run() { factory_, &internal::TestFactoryBase::CreateTest, "the test fixture's constructor"); - // Runs the test only if the test object was created and its constructor didn't - // generate a fatal failure. + // Runs the test only if the test object was created and its + // constructor didn't generate a fatal failure. if ((test != NULL) && !Test::HasFatalFailure()) { // This doesn't throw as all user code that can throw are wrapped into // exception handling code. @@ -2800,8 +2811,8 @@ void PrettyUnitTestResultPrinter::PrintFailedTests(const UnitTest& unit_test) { } } - void PrettyUnitTestResultPrinter::OnTestIterationEnd(const UnitTest& unit_test, - int /*iteration*/) { +void PrettyUnitTestResultPrinter::OnTestIterationEnd(const UnitTest& unit_test, + int /*iteration*/) { ColoredPrintf(COLOR_GREEN, "[==========] "); printf("%s from %s ran.", FormatTestCount(unit_test.test_to_run_count()).c_str(), @@ -3266,6 +3277,182 @@ String XmlUnitTestResultPrinter::TestPropertiesAsXmlAttributes( // End XmlUnitTestResultPrinter +#if GTEST_CAN_STREAM_RESULTS_ + +// Streams test results to the given port on the given host machine. +class StreamingListener : public EmptyTestEventListener { + public: + // Escapes '=', '&', '%', and '\n' characters in str as "%xx". + static string UrlEncode(const char* str); + + StreamingListener(const string& host, const string& port) + : sockfd_(-1), host_name_(host), port_num_(port) { + MakeConnection(); + Send("gtest_streaming_protocol_version=1.0\n"); + } + + virtual ~StreamingListener() { + if (sockfd_ != -1) + CloseConnection(); + } + + void OnTestProgramStart(const UnitTest& /* unit_test */) { + Send("event=TestProgramStart\n"); + } + + void OnTestProgramEnd(const UnitTest& unit_test) { + // Note that Google Test current only report elapsed time for each + // test iteration, not for the entire test program. + Send(String::Format("event=TestProgramEnd&passed=%d\n", + unit_test.Passed())); + + // Notify the streaming server to stop. + CloseConnection(); + } + + void OnTestIterationStart(const UnitTest& /* unit_test */, int iteration) { + Send(String::Format("event=TestIterationStart&iteration=%d\n", + iteration)); + } + + void OnTestIterationEnd(const UnitTest& unit_test, int /* iteration */) { + Send(String::Format("event=TestIterationEnd&passed=%d&elapsed_time=%sms\n", + unit_test.Passed(), + StreamableToString(unit_test.elapsed_time()).c_str())); + } + + void OnTestCaseStart(const TestCase& test_case) { + Send(String::Format("event=TestCaseStart&name=%s\n", test_case.name())); + } + + void OnTestCaseEnd(const TestCase& test_case) { + Send(String::Format("event=TestCaseEnd&passed=%d&elapsed_time=%sms\n", + test_case.Passed(), + StreamableToString(test_case.elapsed_time()).c_str())); + } + + void OnTestStart(const TestInfo& test_info) { + Send(String::Format("event=TestStart&name=%s\n", test_info.name())); + } + + void OnTestEnd(const TestInfo& test_info) { + Send(String::Format( + "event=TestEnd&passed=%d&elapsed_time=%sms\n", + (test_info.result())->Passed(), + StreamableToString((test_info.result())->elapsed_time()).c_str())); + } + + void OnTestPartResult(const TestPartResult& test_part_result) { + const char* file_name = test_part_result.file_name(); + if (file_name == NULL) + file_name = ""; + Send(String::Format("event=TestPartResult&file=%s&line=%d&message=", + UrlEncode(file_name).c_str(), + test_part_result.line_number())); + Send(UrlEncode(test_part_result.message()) + "\n"); + } + + private: + // Creates a client socket and connects to the server. + void MakeConnection(); + + // Closes the socket. + void CloseConnection() { + GTEST_CHECK_(sockfd_ != -1) + << "CloseConnection() can be called only when there is a connection."; + + close(sockfd_); + sockfd_ = -1; + } + + // Sends a string to the socket. + void Send(const string& message) { + GTEST_CHECK_(sockfd_ != -1) + << "Send() can be called only when there is a connection."; + + const int len = static_cast<int>(message.length()); + if (write(sockfd_, message.c_str(), len) != len) { + GTEST_LOG_(WARNING) + << "stream_result_to: failed to stream to " + << host_name_ << ":" << port_num_; + } + } + + int sockfd_; // socket file descriptor + const string host_name_; + const string port_num_; + + GTEST_DISALLOW_COPY_AND_ASSIGN_(StreamingListener); +}; // class StreamingListener + +// Checks if str contains '=', '&', '%' or '\n' characters. If yes, +// replaces them by "%xx" where xx is their hexadecimal value. For +// example, replaces "=" with "%3D". This algorithm is O(strlen(str)) +// in both time and space -- important as the input str may contain an +// arbitrarily long test failure message and stack trace. +string StreamingListener::UrlEncode(const char* str) { + string result; + result.reserve(strlen(str) + 1); + for (char ch = *str; ch != '\0'; ch = *++str) { + switch (ch) { + case '%': + case '=': + case '&': + case '\n': + result.append(String::Format("%%%02x", static_cast<unsigned char>(ch))); + break; + default: + result.push_back(ch); + break; + } + } + return result; +} + +void StreamingListener::MakeConnection() { + GTEST_CHECK_(sockfd_ == -1) + << "MakeConnection() can't be called when there is already a connection."; + + addrinfo hints; + memset(&hints, 0, sizeof(hints)); + hints.ai_family = AF_UNSPEC; // To allow both IPv4 and IPv6 addresses. + hints.ai_socktype = SOCK_STREAM; + addrinfo* servinfo = NULL; + + // Use the getaddrinfo() to get a linked list of IP addresses for + // the given host name. + const int error_num = getaddrinfo( + host_name_.c_str(), port_num_.c_str(), &hints, &servinfo); + if (error_num != 0) { + GTEST_LOG_(WARNING) << "stream_result_to: getaddrinfo() failed: " + << gai_strerror(error_num); + } + + // Loop through all the results and connect to the first we can. + for (addrinfo* cur_addr = servinfo; sockfd_ == -1 && cur_addr != NULL; + cur_addr = cur_addr->ai_next) { + sockfd_ = socket( + cur_addr->ai_family, cur_addr->ai_socktype, cur_addr->ai_protocol); + if (sockfd_ != -1) { + // Connect the client socket to the server socket. + if (connect(sockfd_, cur_addr->ai_addr, cur_addr->ai_addrlen) == -1) { + close(sockfd_); + sockfd_ = -1; + } + } + } + + freeaddrinfo(servinfo); // all done with this structure + + if (sockfd_ == -1) { + GTEST_LOG_(WARNING) << "stream_result_to: failed to connect to " + << host_name_ << ":" << port_num_; + } +} + +// End of class Streaming Listener +#endif // GTEST_CAN_STREAM_RESULTS__ + // Class ScopedTrace // Pushes the given source file location and message onto a per-thread @@ -3770,6 +3957,25 @@ void UnitTestImpl::ConfigureXmlOutput() { } } +#if GTEST_CAN_STREAM_RESULTS_ +// Initializes event listeners for streaming test results in String form. +// Must not be called before InitGoogleTest. +void UnitTestImpl::ConfigureStreamingOutput() { + const string& target = GTEST_FLAG(stream_result_to); + if (!target.empty()) { + const size_t pos = target.find(':'); + if (pos != string::npos) { + listeners()->Append(new StreamingListener(target.substr(0, pos), + target.substr(pos+1))); + } else { + printf("WARNING: unrecognized streaming target \"%s\" ignored.\n", + target.c_str()); + fflush(stdout); + } + } +} +#endif // GTEST_CAN_STREAM_RESULTS_ + // Performs initialization dependent upon flag values obtained in // ParseGoogleTestFlagsOnly. Is called from InitGoogleTest after the call to // ParseGoogleTestFlagsOnly. In case a user neglects to call InitGoogleTest @@ -3793,6 +3999,11 @@ void UnitTestImpl::PostFlagParsingInit() { // Configures listeners for XML output. This makes it possible for users // to shut down the default XML output before invoking RUN_ALL_TESTS. ConfigureXmlOutput(); + +#if GTEST_CAN_STREAM_RESULTS_ + // Configures listeners for streaming test results to the specified server. + ConfigureStreamingOutput(); +#endif // GTEST_CAN_STREAM_RESULTS_ } } @@ -4471,6 +4682,10 @@ static const char kColorEncodedHelpMessage[] = GTEST_PATH_SEP_ "@Y|@G:@YFILE_PATH]@D\n" " Generate an XML report in the given directory or with the given file\n" " name. @YFILE_PATH@D defaults to @Gtest_details.xml@D.\n" +#if GTEST_CAN_STREAM_RESULTS_ +" @G--" GTEST_FLAG_PREFIX_ "stream_result_to=@YHOST@G:@YPORT@D\n" +" Stream test results to the given server.\n" +#endif // GTEST_CAN_STREAM_RESULTS_ "\n" "Assertion Behavior:\n" #if GTEST_HAS_DEATH_TEST && !GTEST_OS_WINDOWS @@ -4481,10 +4696,8 @@ static const char kColorEncodedHelpMessage[] = " Turn assertion failures into debugger break-points.\n" " @G--" GTEST_FLAG_PREFIX_ "throw_on_failure@D\n" " Turn assertion failures into C++ exceptions.\n" -#if GTEST_OS_WINDOWS " @G--" GTEST_FLAG_PREFIX_ "catch_exceptions@D\n" " Suppress pop-ups caused by exceptions.\n" -#endif // GTEST_OS_WINDOWS "\n" "Except for @G--" GTEST_FLAG_PREFIX_ "list_tests@D, you can alternatively set " "the corresponding\n" @@ -4534,7 +4747,10 @@ void ParseGoogleTestFlagsOnlyImpl(int* argc, CharType** argv) { ParseBoolFlag(arg, kShuffleFlag, >EST_FLAG(shuffle)) || ParseInt32Flag(arg, kStackTraceDepthFlag, >EST_FLAG(stack_trace_depth)) || - ParseBoolFlag(arg, kThrowOnFailureFlag, >EST_FLAG(throw_on_failure)) + ParseStringFlag(arg, kStreamResultToFlag, + >EST_FLAG(stream_result_to)) || + ParseBoolFlag(arg, kThrowOnFailureFlag, + >EST_FLAG(throw_on_failure)) ) { // Yes. Shift the remainder of the argv list left by one. Note // that argv has (*argc + 1) elements, the last one always being diff --git a/test/gtest_help_test.py b/test/gtest_help_test.py index dc67ed3..0777106 100755 --- a/test/gtest_help_test.py +++ b/test/gtest_help_test.py @@ -44,12 +44,13 @@ import re import gtest_test_utils +IS_LINUX = os.name == 'posix' and os.uname()[0] == 'Linux' IS_WINDOWS = os.name == 'nt' PROGRAM_PATH = gtest_test_utils.GetTestExecutablePath('gtest_help_test_') FLAG_PREFIX = '--gtest_' -CATCH_EXCEPTIONS_FLAG = FLAG_PREFIX + 'catch_exceptions' DEATH_TEST_STYLE_FLAG = FLAG_PREFIX + 'death_test_style' +STREAM_RESULT_TO_FLAG = FLAG_PREFIX + 'stream_result_to' UNKNOWN_FLAG = FLAG_PREFIX + 'unknown_flag_for_testing' LIST_TESTS_FLAG = FLAG_PREFIX + 'list_tests' INCORRECT_FLAG_VARIANTS = [re.sub('^--', '-', LIST_TESTS_FLAG), @@ -72,7 +73,8 @@ HELP_REGEX = re.compile( FLAG_PREFIX + r'print_time.*' + FLAG_PREFIX + r'output=.*' + FLAG_PREFIX + r'break_on_failure.*' + - FLAG_PREFIX + r'throw_on_failure.*', + FLAG_PREFIX + r'throw_on_failure.*' + + FLAG_PREFIX + r'catch_exceptions.*', re.DOTALL) @@ -109,10 +111,11 @@ class GTestHelpTest(gtest_test_utils.TestCase): exit_code, output = RunWithFlag(flag) self.assertEquals(0, exit_code) self.assert_(HELP_REGEX.search(output), output) - if IS_WINDOWS: - self.assert_(CATCH_EXCEPTIONS_FLAG in output, output) + + if IS_LINUX: + self.assert_(STREAM_RESULT_TO_FLAG in output, output) else: - self.assert_(CATCH_EXCEPTIONS_FLAG not in output, output) + self.assert_(STREAM_RESULT_TO_FLAG not in output, output) if SUPPORTS_DEATH_TESTS and not IS_WINDOWS: self.assert_(DEATH_TEST_STYLE_FLAG in output, output) diff --git a/test/gtest_output_test.py b/test/gtest_output_test.py index 425d9da..f409e2a 100755 --- a/test/gtest_output_test.py +++ b/test/gtest_output_test.py @@ -52,10 +52,8 @@ CATCH_EXCEPTIONS_ENV_VAR_NAME = 'GTEST_CATCH_EXCEPTIONS' IS_WINDOWS = os.name == 'nt' -if IS_WINDOWS: - GOLDEN_NAME = 'gtest_output_test_golden_win.txt' -else: - GOLDEN_NAME = 'gtest_output_test_golden_lin.txt' +# TODO(vladl@google.com): remove the _lin suffix. +GOLDEN_NAME = 'gtest_output_test_golden_lin.txt' PROGRAM_PATH = gtest_test_utils.GetTestExecutablePath('gtest_output_test_') @@ -138,6 +136,20 @@ def RemoveTypeInfoDetails(test_output): return re.sub(r'unsigned int', 'unsigned', test_output) +def NormalizeToCurrentPlatform(test_output): + """Normalizes platform specific output details for easier comparison.""" + + if IS_WINDOWS: + # Removes the color information that is not present on Windows. + test_output = re.sub('\x1b\\[(0;3\d)?m', '', test_output) + # Changes failure message headers into the Windows format. + test_output = re.sub(r': Failure\n', r': error: ', test_output) + # Changes file(line_number) to file:line_number. + test_output = re.sub(r'((\w|\.)+)\((\d+)\):', r'\1:\3:', test_output) + + return test_output + + def RemoveTestCounts(output): """Removes test counts from a Google Test program's output.""" @@ -240,7 +252,7 @@ SUPPORTS_STACK_TRACES = False CAN_GENERATE_GOLDEN_FILE = (SUPPORTS_DEATH_TESTS and SUPPORTS_TYPED_TESTS and - (SUPPORTS_THREADS or IS_WINDOWS)) + SUPPORTS_THREADS) class GTestOutputTest(gtest_test_utils.TestCase): @@ -284,9 +296,10 @@ class GTestOutputTest(gtest_test_utils.TestCase): if CAN_GENERATE_GOLDEN_FILE: self.assertEqual(normalized_golden, normalized_actual) else: - normalized_actual = RemoveTestCounts(normalized_actual) - normalized_golden = RemoveTestCounts(self.RemoveUnsupportedTests( - normalized_golden)) + normalized_actual = NormalizeToCurrentPlatform( + RemoveTestCounts(normalized_actual)) + normalized_golden = NormalizeToCurrentPlatform( + RemoveTestCounts(self.RemoveUnsupportedTests(normalized_golden))) # This code is very handy when debugging golden file differences: if os.getenv('DEBUG_GTEST_OUTPUT_TEST'): @@ -312,14 +325,9 @@ if __name__ == '__main__': else: message = ( """Unable to write a golden file when compiled in an environment -that does not support all the required features (death tests""") - if IS_WINDOWS: - message += ( - """\nand typed tests). Please check that you are using VC++ 8.0 SP1 -or higher as your compiler.""") - else: - message += """\ntyped tests, and threads). Please generate the -golden file using a binary built with those features enabled.""" +that does not support all the required features (death tests, typed tests, +and multiple threads). Please generate the golden file using a binary built +with those features enabled.""") sys.stderr.write(message) sys.exit(1) diff --git a/test/gtest_output_test_golden_win.txt b/test/gtest_output_test_golden_win.txt deleted file mode 100644 index 8251c23..0000000 --- a/test/gtest_output_test_golden_win.txt +++ /dev/null @@ -1,577 +0,0 @@ -The non-test part of the code is expected to have 2 failures. - -gtest_output_test_.cc:#: error: Value of: false - Actual: false -Expected: true -gtest_output_test_.cc:#: error: Value of: 3 -Expected: 2 -[==========] Running 58 tests from 25 test cases. -[----------] Global test environment set-up. -FooEnvironment::SetUp() called. -BarEnvironment::SetUp() called. -[----------] 1 test from ADeathTest -[ RUN ] ADeathTest.ShouldRunFirst -[ OK ] ADeathTest.ShouldRunFirst -[----------] 1 test from ATypedDeathTest/0, where TypeParam = int -[ RUN ] ATypedDeathTest/0.ShouldRunFirst -[ OK ] ATypedDeathTest/0.ShouldRunFirst -[----------] 1 test from ATypedDeathTest/1, where TypeParam = double -[ RUN ] ATypedDeathTest/1.ShouldRunFirst -[ OK ] ATypedDeathTest/1.ShouldRunFirst -[----------] 1 test from My/ATypeParamDeathTest/0, where TypeParam = int -[ RUN ] My/ATypeParamDeathTest/0.ShouldRunFirst -[ OK ] My/ATypeParamDeathTest/0.ShouldRunFirst -[----------] 1 test from My/ATypeParamDeathTest/1, where TypeParam = double -[ RUN ] My/ATypeParamDeathTest/1.ShouldRunFirst -[ OK ] My/ATypeParamDeathTest/1.ShouldRunFirst -[----------] 2 tests from PassingTest -[ RUN ] PassingTest.PassingTest1 -[ OK ] PassingTest.PassingTest1 -[ RUN ] PassingTest.PassingTest2 -[ OK ] PassingTest.PassingTest2 -[----------] 3 tests from FatalFailureTest -[ RUN ] FatalFailureTest.FatalFailureInSubroutine -(expecting a failure that x should be 1) -gtest_output_test_.cc:#: error: Value of: x - Actual: 2 -Expected: 1 -[ FAILED ] FatalFailureTest.FatalFailureInSubroutine -[ RUN ] FatalFailureTest.FatalFailureInNestedSubroutine -(expecting a failure that x should be 1) -gtest_output_test_.cc:#: error: Value of: x - Actual: 2 -Expected: 1 -[ FAILED ] FatalFailureTest.FatalFailureInNestedSubroutine -[ RUN ] FatalFailureTest.NonfatalFailureInSubroutine -(expecting a failure on false) -gtest_output_test_.cc:#: error: Value of: false - Actual: false -Expected: true -[ FAILED ] FatalFailureTest.NonfatalFailureInSubroutine -[----------] 1 test from LoggingTest -[ RUN ] LoggingTest.InterleavingLoggingAndAssertions -(expecting 2 failures on (3) >= (a[i])) -i == 0 -i == 1 -gtest_output_test_.cc:#: error: Expected: (3) >= (a[i]), actual: 3 vs 9 -i == 2 -i == 3 -gtest_output_test_.cc:#: error: Expected: (3) >= (a[i]), actual: 3 vs 6 -[ FAILED ] LoggingTest.InterleavingLoggingAndAssertions -[----------] 5 tests from SCOPED_TRACETest -[ RUN ] SCOPED_TRACETest.ObeysScopes -(expected to fail) -gtest_output_test_.cc:#: error: Failed -This failure is expected, and shouldn't have a trace. -gtest_output_test_.cc:#: error: Failed -This failure is expected, and should have a trace. -Google Test trace: -gtest_output_test_.cc:#: Expected trace -gtest_output_test_.cc:#: error: Failed -This failure is expected, and shouldn't have a trace. -[ FAILED ] SCOPED_TRACETest.ObeysScopes -[ RUN ] SCOPED_TRACETest.WorksInLoop -(expected to fail) -gtest_output_test_.cc:#: error: Value of: n - Actual: 1 -Expected: 2 -Google Test trace: -gtest_output_test_.cc:#: i = 1 -gtest_output_test_.cc:#: error: Value of: n - Actual: 2 -Expected: 1 -Google Test trace: -gtest_output_test_.cc:#: i = 2 -[ FAILED ] SCOPED_TRACETest.WorksInLoop -[ RUN ] SCOPED_TRACETest.WorksInSubroutine -(expected to fail) -gtest_output_test_.cc:#: error: Value of: n - Actual: 1 -Expected: 2 -Google Test trace: -gtest_output_test_.cc:#: n = 1 -gtest_output_test_.cc:#: error: Value of: n - Actual: 2 -Expected: 1 -Google Test trace: -gtest_output_test_.cc:#: n = 2 -[ FAILED ] SCOPED_TRACETest.WorksInSubroutine -[ RUN ] SCOPED_TRACETest.CanBeNested -(expected to fail) -gtest_output_test_.cc:#: error: Value of: n - Actual: 2 -Expected: 1 -Google Test trace: -gtest_output_test_.cc:#: n = 2 -gtest_output_test_.cc:#: -[ FAILED ] SCOPED_TRACETest.CanBeNested -[ RUN ] SCOPED_TRACETest.CanBeRepeated -(expected to fail) -gtest_output_test_.cc:#: error: Failed -This failure is expected, and should contain trace point A. -Google Test trace: -gtest_output_test_.cc:#: A -gtest_output_test_.cc:#: error: Failed -This failure is expected, and should contain trace point A and B. -Google Test trace: -gtest_output_test_.cc:#: B -gtest_output_test_.cc:#: A -gtest_output_test_.cc:#: error: Failed -This failure is expected, and should contain trace point A, B, and C. -Google Test trace: -gtest_output_test_.cc:#: C -gtest_output_test_.cc:#: B -gtest_output_test_.cc:#: A -gtest_output_test_.cc:#: error: Failed -This failure is expected, and should contain trace point A, B, and D. -Google Test trace: -gtest_output_test_.cc:#: D -gtest_output_test_.cc:#: B -gtest_output_test_.cc:#: A -[ FAILED ] SCOPED_TRACETest.CanBeRepeated -[----------] 1 test from NonFatalFailureInFixtureConstructorTest -[ RUN ] NonFatalFailureInFixtureConstructorTest.FailureInConstructor -(expecting 5 failures) -gtest_output_test_.cc:#: error: Failed -Expected failure #1, in the test fixture c'tor. -gtest_output_test_.cc:#: error: Failed -Expected failure #2, in SetUp(). -gtest_output_test_.cc:#: error: Failed -Expected failure #3, in the test body. -gtest_output_test_.cc:#: error: Failed -Expected failure #4, in TearDown. -gtest_output_test_.cc:#: error: Failed -Expected failure #5, in the test fixture d'tor. -[ FAILED ] NonFatalFailureInFixtureConstructorTest.FailureInConstructor -[----------] 1 test from FatalFailureInFixtureConstructorTest -[ RUN ] FatalFailureInFixtureConstructorTest.FailureInConstructor -(expecting 2 failures) -gtest_output_test_.cc:#: error: Failed -Expected failure #1, in the test fixture c'tor. -gtest_output_test_.cc:#: error: Failed -Expected failure #2, in the test fixture d'tor. -[ FAILED ] FatalFailureInFixtureConstructorTest.FailureInConstructor -[----------] 1 test from NonFatalFailureInSetUpTest -[ RUN ] NonFatalFailureInSetUpTest.FailureInSetUp -(expecting 4 failures) -gtest_output_test_.cc:#: error: Failed -Expected failure #1, in SetUp(). -gtest_output_test_.cc:#: error: Failed -Expected failure #2, in the test function. -gtest_output_test_.cc:#: error: Failed -Expected failure #3, in TearDown(). -gtest_output_test_.cc:#: error: Failed -Expected failure #4, in the test fixture d'tor. -[ FAILED ] NonFatalFailureInSetUpTest.FailureInSetUp -[----------] 1 test from FatalFailureInSetUpTest -[ RUN ] FatalFailureInSetUpTest.FailureInSetUp -(expecting 3 failures) -gtest_output_test_.cc:#: error: Failed -Expected failure #1, in SetUp(). -gtest_output_test_.cc:#: error: Failed -Expected failure #2, in TearDown(). -gtest_output_test_.cc:#: error: Failed -Expected failure #3, in the test fixture d'tor. -[ FAILED ] FatalFailureInSetUpTest.FailureInSetUp -[----------] 1 test from AddFailureAtTest -[ RUN ] AddFailureAtTest.MessageContainsSpecifiedFileAndLineNumber -foo.cc(42): error: Failed -Expected failure in foo.cc -[ FAILED ] AddFailureAtTest.MessageContainsSpecifiedFileAndLineNumber -[----------] 4 tests from MixedUpTestCaseTest -[ RUN ] MixedUpTestCaseTest.FirstTestFromNamespaceFoo -[ OK ] MixedUpTestCaseTest.FirstTestFromNamespaceFoo -[ RUN ] MixedUpTestCaseTest.SecondTestFromNamespaceFoo -[ OK ] MixedUpTestCaseTest.SecondTestFromNamespaceFoo -[ RUN ] MixedUpTestCaseTest.ThisShouldFail -gtest.cc:#: error: Failed -All tests in the same test case must use the same test fixture -class. However, in test case MixedUpTestCaseTest, -you defined test FirstTestFromNamespaceFoo and test ThisShouldFail -using two different test fixture classes. This can happen if -the two classes are from different namespaces or translation -units and have the same name. You should probably rename one -of the classes to put the tests into different test cases. -[ FAILED ] MixedUpTestCaseTest.ThisShouldFail -[ RUN ] MixedUpTestCaseTest.ThisShouldFailToo -gtest.cc:#: error: Failed -All tests in the same test case must use the same test fixture -class. However, in test case MixedUpTestCaseTest, -you defined test FirstTestFromNamespaceFoo and test ThisShouldFailToo -using two different test fixture classes. This can happen if -the two classes are from different namespaces or translation -units and have the same name. You should probably rename one -of the classes to put the tests into different test cases. -[ FAILED ] MixedUpTestCaseTest.ThisShouldFailToo -[----------] 2 tests from MixedUpTestCaseWithSameTestNameTest -[ RUN ] MixedUpTestCaseWithSameTestNameTest.TheSecondTestWithThisNameShouldFail -[ OK ] MixedUpTestCaseWithSameTestNameTest.TheSecondTestWithThisNameShouldFail -[ RUN ] MixedUpTestCaseWithSameTestNameTest.TheSecondTestWithThisNameShouldFail -gtest.cc:#: error: Failed -All tests in the same test case must use the same test fixture -class. However, in test case MixedUpTestCaseWithSameTestNameTest, -you defined test TheSecondTestWithThisNameShouldFail and test TheSecondTestWithThisNameShouldFail -using two different test fixture classes. This can happen if -the two classes are from different namespaces or translation -units and have the same name. You should probably rename one -of the classes to put the tests into different test cases. -[ FAILED ] MixedUpTestCaseWithSameTestNameTest.TheSecondTestWithThisNameShouldFail -[----------] 2 tests from TEST_F_before_TEST_in_same_test_case -[ RUN ] TEST_F_before_TEST_in_same_test_case.DefinedUsingTEST_F -[ OK ] TEST_F_before_TEST_in_same_test_case.DefinedUsingTEST_F -[ RUN ] TEST_F_before_TEST_in_same_test_case.DefinedUsingTESTAndShouldFail -gtest.cc:#: error: Failed -All tests in the same test case must use the same test fixture -class, so mixing TEST_F and TEST in the same test case is -illegal. In test case TEST_F_before_TEST_in_same_test_case, -test DefinedUsingTEST_F is defined using TEST_F but -test DefinedUsingTESTAndShouldFail is defined using TEST. You probably -want to change the TEST to TEST_F or move it to another test -case. -[ FAILED ] TEST_F_before_TEST_in_same_test_case.DefinedUsingTESTAndShouldFail -[----------] 2 tests from TEST_before_TEST_F_in_same_test_case -[ RUN ] TEST_before_TEST_F_in_same_test_case.DefinedUsingTEST -[ OK ] TEST_before_TEST_F_in_same_test_case.DefinedUsingTEST -[ RUN ] TEST_before_TEST_F_in_same_test_case.DefinedUsingTEST_FAndShouldFail -gtest.cc:#: error: Failed -All tests in the same test case must use the same test fixture -class, so mixing TEST_F and TEST in the same test case is -illegal. In test case TEST_before_TEST_F_in_same_test_case, -test DefinedUsingTEST_FAndShouldFail is defined using TEST_F but -test DefinedUsingTEST is defined using TEST. You probably -want to change the TEST to TEST_F or move it to another test -case. -[ FAILED ] TEST_before_TEST_F_in_same_test_case.DefinedUsingTEST_FAndShouldFail -[----------] 8 tests from ExpectNonfatalFailureTest -[ RUN ] ExpectNonfatalFailureTest.CanReferenceGlobalVariables -[ OK ] ExpectNonfatalFailureTest.CanReferenceGlobalVariables -[ RUN ] ExpectNonfatalFailureTest.CanReferenceLocalVariables -[ OK ] ExpectNonfatalFailureTest.CanReferenceLocalVariables -[ RUN ] ExpectNonfatalFailureTest.SucceedsWhenThereIsOneNonfatalFailure -[ OK ] ExpectNonfatalFailureTest.SucceedsWhenThereIsOneNonfatalFailure -[ RUN ] ExpectNonfatalFailureTest.FailsWhenThereIsNoNonfatalFailure -(expecting a failure) -gtest.cc:#: error: Expected: 1 non-fatal failure - Actual: 0 failures -[ FAILED ] ExpectNonfatalFailureTest.FailsWhenThereIsNoNonfatalFailure -[ RUN ] ExpectNonfatalFailureTest.FailsWhenThereAreTwoNonfatalFailures -(expecting a failure) -gtest.cc:#: error: Expected: 1 non-fatal failure - Actual: 2 failures -gtest_output_test_.cc:#: Non-fatal failure: -Failed -Expected non-fatal failure 1. - -gtest_output_test_.cc:#: Non-fatal failure: -Failed -Expected non-fatal failure 2. - -[ FAILED ] ExpectNonfatalFailureTest.FailsWhenThereAreTwoNonfatalFailures -[ RUN ] ExpectNonfatalFailureTest.FailsWhenThereIsOneFatalFailure -(expecting a failure) -gtest.cc:#: error: Expected: 1 non-fatal failure - Actual: -gtest_output_test_.cc:#: Fatal failure: -Failed -Expected fatal failure. - -[ FAILED ] ExpectNonfatalFailureTest.FailsWhenThereIsOneFatalFailure -[ RUN ] ExpectNonfatalFailureTest.FailsWhenStatementReturns -(expecting a failure) -gtest.cc:#: error: Expected: 1 non-fatal failure - Actual: 0 failures -[ FAILED ] ExpectNonfatalFailureTest.FailsWhenStatementReturns -[ RUN ] ExpectNonfatalFailureTest.FailsWhenStatementThrows -(expecting a failure) -gtest.cc:#: error: Expected: 1 non-fatal failure - Actual: 0 failures -[ FAILED ] ExpectNonfatalFailureTest.FailsWhenStatementThrows -[----------] 8 tests from ExpectFatalFailureTest -[ RUN ] ExpectFatalFailureTest.CanReferenceGlobalVariables -[ OK ] ExpectFatalFailureTest.CanReferenceGlobalVariables -[ RUN ] ExpectFatalFailureTest.CanReferenceLocalStaticVariables -[ OK ] ExpectFatalFailureTest.CanReferenceLocalStaticVariables -[ RUN ] ExpectFatalFailureTest.SucceedsWhenThereIsOneFatalFailure -[ OK ] ExpectFatalFailureTest.SucceedsWhenThereIsOneFatalFailure -[ RUN ] ExpectFatalFailureTest.FailsWhenThereIsNoFatalFailure -(expecting a failure) -gtest.cc:#: error: Expected: 1 fatal failure - Actual: 0 failures -[ FAILED ] ExpectFatalFailureTest.FailsWhenThereIsNoFatalFailure -[ RUN ] ExpectFatalFailureTest.FailsWhenThereAreTwoFatalFailures -(expecting a failure) -gtest.cc:#: error: Expected: 1 fatal failure - Actual: 2 failures -gtest_output_test_.cc:#: Fatal failure: -Failed -Expected fatal failure. - -gtest_output_test_.cc:#: Fatal failure: -Failed -Expected fatal failure. - -[ FAILED ] ExpectFatalFailureTest.FailsWhenThereAreTwoFatalFailures -[ RUN ] ExpectFatalFailureTest.FailsWhenThereIsOneNonfatalFailure -(expecting a failure) -gtest.cc:#: error: Expected: 1 fatal failure - Actual: -gtest_output_test_.cc:#: Non-fatal failure: -Failed -Expected non-fatal failure. - -[ FAILED ] ExpectFatalFailureTest.FailsWhenThereIsOneNonfatalFailure -[ RUN ] ExpectFatalFailureTest.FailsWhenStatementReturns -(expecting a failure) -gtest.cc:#: error: Expected: 1 fatal failure - Actual: 0 failures -[ FAILED ] ExpectFatalFailureTest.FailsWhenStatementReturns -[ RUN ] ExpectFatalFailureTest.FailsWhenStatementThrows -(expecting a failure) -gtest.cc:#: error: Expected: 1 fatal failure - Actual: 0 failures -[ FAILED ] ExpectFatalFailureTest.FailsWhenStatementThrows -[----------] 2 tests from TypedTest/0, where TypeParam = int -[ RUN ] TypedTest/0.Success -[ OK ] TypedTest/0.Success -[ RUN ] TypedTest/0.Failure -gtest_output_test_.cc:#: error: Value of: TypeParam() - Actual: 0 -Expected: 1 -Expected failure -[ FAILED ] TypedTest/0.Failure, where TypeParam = int -[----------] 2 tests from Unsigned/TypedTestP/0, where TypeParam = unsigned char -[ RUN ] Unsigned/TypedTestP/0.Success -[ OK ] Unsigned/TypedTestP/0.Success -[ RUN ] Unsigned/TypedTestP/0.Failure -gtest_output_test_.cc:#: error: Value of: TypeParam() - Actual: '\0' -Expected: 1U -Which is: 1 -Expected failure -[ FAILED ] Unsigned/TypedTestP/0.Failure, where TypeParam = unsigned char -[----------] 2 tests from Unsigned/TypedTestP/1, where TypeParam = unsigned int -[ RUN ] Unsigned/TypedTestP/1.Success -[ OK ] Unsigned/TypedTestP/1.Success -[ RUN ] Unsigned/TypedTestP/1.Failure -gtest_output_test_.cc:#: error: Value of: TypeParam() - Actual: 0 -Expected: 1U -Which is: 1 -Expected failure -[ FAILED ] Unsigned/TypedTestP/1.Failure, where TypeParam = unsigned int -[----------] 4 tests from ExpectFailureTest -[ RUN ] ExpectFailureTest.ExpectFatalFailure -(expecting 1 failure) -gtest.cc:#: error: Expected: 1 fatal failure - Actual: -gtest_output_test_.cc:#: Success: -Succeeded - -(expecting 1 failure) -gtest.cc:#: error: Expected: 1 fatal failure - Actual: -gtest_output_test_.cc:#: Non-fatal failure: -Failed -Expected non-fatal failure. - -(expecting 1 failure) -gtest.cc:#: error: Expected: 1 fatal failure containing "Some other fatal failure expected." - Actual: -gtest_output_test_.cc:#: Fatal failure: -Failed -Expected fatal failure. - -[ FAILED ] ExpectFailureTest.ExpectFatalFailure -[ RUN ] ExpectFailureTest.ExpectNonFatalFailure -(expecting 1 failure) -gtest.cc:#: error: Expected: 1 non-fatal failure - Actual: -gtest_output_test_.cc:#: Success: -Succeeded - -(expecting 1 failure) -gtest.cc:#: error: Expected: 1 non-fatal failure - Actual: -gtest_output_test_.cc:#: Fatal failure: -Failed -Expected fatal failure. - -(expecting 1 failure) -gtest.cc:#: error: Expected: 1 non-fatal failure containing "Some other non-fatal failure." - Actual: -gtest_output_test_.cc:#: Non-fatal failure: -Failed -Expected non-fatal failure. - -[ FAILED ] ExpectFailureTest.ExpectNonFatalFailure -[ RUN ] ExpectFailureTest.ExpectFatalFailureOnAllThreads -(expecting 1 failure) -gtest.cc:#: error: Expected: 1 fatal failure - Actual: -gtest_output_test_.cc:#: Success: -Succeeded - -(expecting 1 failure) -gtest.cc:#: error: Expected: 1 fatal failure - Actual: -gtest_output_test_.cc:#: Non-fatal failure: -Failed -Expected non-fatal failure. - -(expecting 1 failure) -gtest.cc:#: error: Expected: 1 fatal failure containing "Some other fatal failure expected." - Actual: -gtest_output_test_.cc:#: Fatal failure: -Failed -Expected fatal failure. - -[ FAILED ] ExpectFailureTest.ExpectFatalFailureOnAllThreads -[ RUN ] ExpectFailureTest.ExpectNonFatalFailureOnAllThreads -(expecting 1 failure) -gtest.cc:#: error: Expected: 1 non-fatal failure - Actual: -gtest_output_test_.cc:#: Success: -Succeeded - -(expecting 1 failure) -gtest.cc:#: error: Expected: 1 non-fatal failure - Actual: -gtest_output_test_.cc:#: Fatal failure: -Failed -Expected fatal failure. - -(expecting 1 failure) -gtest.cc:#: error: Expected: 1 non-fatal failure containing "Some other non-fatal failure." - Actual: -gtest_output_test_.cc:#: Non-fatal failure: -Failed -Expected non-fatal failure. - -[ FAILED ] ExpectFailureTest.ExpectNonFatalFailureOnAllThreads -[----------] 1 test from PrintingFailingParams/FailingParamTest -[ RUN ] PrintingFailingParams/FailingParamTest.Fails/0 -gtest_output_test_.cc:#: error: Value of: GetParam() - Actual: 2 -Expected: 1 -[ FAILED ] PrintingFailingParams/FailingParamTest.Fails/0, where GetParam() = 2 -[----------] Global test environment tear-down -BarEnvironment::TearDown() called. -gtest_output_test_.cc:#: error: Failed -Expected non-fatal failure. -FooEnvironment::TearDown() called. -gtest_output_test_.cc:#: error: Failed -Expected fatal failure. -[==========] 58 tests from 25 test cases ran. -[ PASSED ] 21 tests. -[ FAILED ] 37 tests, listed below: -[ FAILED ] FatalFailureTest.FatalFailureInSubroutine -[ FAILED ] FatalFailureTest.FatalFailureInNestedSubroutine -[ FAILED ] FatalFailureTest.NonfatalFailureInSubroutine -[ FAILED ] LoggingTest.InterleavingLoggingAndAssertions -[ FAILED ] SCOPED_TRACETest.ObeysScopes -[ FAILED ] SCOPED_TRACETest.WorksInLoop -[ FAILED ] SCOPED_TRACETest.WorksInSubroutine -[ FAILED ] SCOPED_TRACETest.CanBeNested -[ FAILED ] SCOPED_TRACETest.CanBeRepeated -[ FAILED ] NonFatalFailureInFixtureConstructorTest.FailureInConstructor -[ FAILED ] FatalFailureInFixtureConstructorTest.FailureInConstructor -[ FAILED ] NonFatalFailureInSetUpTest.FailureInSetUp -[ FAILED ] FatalFailureInSetUpTest.FailureInSetUp -[ FAILED ] AddFailureAtTest.MessageContainsSpecifiedFileAndLineNumber -[ FAILED ] MixedUpTestCaseTest.ThisShouldFail -[ FAILED ] MixedUpTestCaseTest.ThisShouldFailToo -[ FAILED ] MixedUpTestCaseWithSameTestNameTest.TheSecondTestWithThisNameShouldFail -[ FAILED ] TEST_F_before_TEST_in_same_test_case.DefinedUsingTESTAndShouldFail -[ FAILED ] TEST_before_TEST_F_in_same_test_case.DefinedUsingTEST_FAndShouldFail -[ FAILED ] ExpectNonfatalFailureTest.FailsWhenThereIsNoNonfatalFailure -[ FAILED ] ExpectNonfatalFailureTest.FailsWhenThereAreTwoNonfatalFailures -[ FAILED ] ExpectNonfatalFailureTest.FailsWhenThereIsOneFatalFailure -[ FAILED ] ExpectNonfatalFailureTest.FailsWhenStatementReturns -[ FAILED ] ExpectNonfatalFailureTest.FailsWhenStatementThrows -[ FAILED ] ExpectFatalFailureTest.FailsWhenThereIsNoFatalFailure -[ FAILED ] ExpectFatalFailureTest.FailsWhenThereAreTwoFatalFailures -[ FAILED ] ExpectFatalFailureTest.FailsWhenThereIsOneNonfatalFailure -[ FAILED ] ExpectFatalFailureTest.FailsWhenStatementReturns -[ FAILED ] ExpectFatalFailureTest.FailsWhenStatementThrows -[ FAILED ] TypedTest/0.Failure, where TypeParam = int -[ FAILED ] Unsigned/TypedTestP/0.Failure, where TypeParam = unsigned char -[ FAILED ] Unsigned/TypedTestP/1.Failure, where TypeParam = unsigned int -[ FAILED ] ExpectFailureTest.ExpectFatalFailure -[ FAILED ] ExpectFailureTest.ExpectNonFatalFailure -[ FAILED ] ExpectFailureTest.ExpectFatalFailureOnAllThreads -[ FAILED ] ExpectFailureTest.ExpectNonFatalFailureOnAllThreads -[ FAILED ] PrintingFailingParams/FailingParamTest.Fails/0, where GetParam() = 2 - -37 FAILED TESTS - YOU HAVE 1 DISABLED TEST - -Note: Google Test filter = FatalFailureTest.*:LoggingTest.* -[==========] Running 4 tests from 2 test cases. -[----------] Global test environment set-up. -[----------] 3 tests from FatalFailureTest -[ RUN ] FatalFailureTest.FatalFailureInSubroutine -(expecting a failure that x should be 1) -gtest_output_test_.cc:#: error: Value of: x - Actual: 2 -Expected: 1 -[ FAILED ] FatalFailureTest.FatalFailureInSubroutine (? ms) -[ RUN ] FatalFailureTest.FatalFailureInNestedSubroutine -(expecting a failure that x should be 1) -gtest_output_test_.cc:#: error: Value of: x - Actual: 2 -Expected: 1 -[ FAILED ] FatalFailureTest.FatalFailureInNestedSubroutine (? ms) -[ RUN ] FatalFailureTest.NonfatalFailureInSubroutine -(expecting a failure on false) -gtest_output_test_.cc:#: error: Value of: false - Actual: false -Expected: true -[ FAILED ] FatalFailureTest.NonfatalFailureInSubroutine (? ms) -[----------] 3 tests from FatalFailureTest (? ms total) - -[----------] 1 test from LoggingTest -[ RUN ] LoggingTest.InterleavingLoggingAndAssertions -(expecting 2 failures on (3) >= (a[i])) -i == 0 -i == 1 -gtest_output_test_.cc:#: error: Expected: (3) >= (a[i]), actual: 3 vs 9 -i == 2 -i == 3 -gtest_output_test_.cc:#: error: Expected: (3) >= (a[i]), actual: 3 vs 6 -[ FAILED ] LoggingTest.InterleavingLoggingAndAssertions (? ms) -[----------] 1 test from LoggingTest (? ms total) - -[----------] Global test environment tear-down -[==========] 4 tests from 2 test cases ran. (? ms total) -[ PASSED ] 0 tests. -[ FAILED ] 4 tests, listed below: -[ FAILED ] FatalFailureTest.FatalFailureInSubroutine -[ FAILED ] FatalFailureTest.FatalFailureInNestedSubroutine -[ FAILED ] FatalFailureTest.NonfatalFailureInSubroutine -[ FAILED ] LoggingTest.InterleavingLoggingAndAssertions - - 4 FAILED TESTS - YOU HAVE 1 DISABLED TEST - -Note: Google Test filter = *DISABLED_* -[==========] Running 1 test from 1 test case. -[----------] Global test environment set-up. -[----------] 1 test from DisabledTestsWarningTest -[ RUN ] DisabledTestsWarningTest.DISABLED_AlsoRunDisabledTestsFlagSuppressesWarning -[ OK ] DisabledTestsWarningTest.DISABLED_AlsoRunDisabledTestsFlagSuppressesWarning -[----------] Global test environment tear-down -[==========] 1 test from 1 test case ran. -[ PASSED ] 1 test. -Note: Google Test filter = PassingTest.* -Note: This is test shard 1 of 2. -[==========] Running 1 test from 1 test case. -[----------] Global test environment set-up. -[----------] 1 test from PassingTest -[ RUN ] PassingTest.PassingTest2 -[ OK ] PassingTest.PassingTest2 -[----------] Global test environment tear-down -[==========] 1 test from 1 test case ran. -[ PASSED ] 1 test. - - YOU HAVE 1 DISABLED TEST - diff --git a/test/gtest_unittest.cc b/test/gtest_unittest.cc index 9b95b13..d993c04 100644 --- a/test/gtest_unittest.cc +++ b/test/gtest_unittest.cc @@ -52,6 +52,7 @@ TEST(CommandLineFlagsTest, CanBeAccessedInCodeOnceGTestHIsIncluded) { || testing::GTEST_FLAG(show_internal_stack_frames) || testing::GTEST_FLAG(shuffle) || testing::GTEST_FLAG(stack_trace_depth) > 0 + || testing::GTEST_FLAG(stream_result_to) != "unknown" || testing::GTEST_FLAG(throw_on_failure); EXPECT_TRUE(dummy || !dummy); // Suppresses warning that dummy is unused. } @@ -125,6 +126,7 @@ using testing::GTEST_FLAG(repeat); using testing::GTEST_FLAG(show_internal_stack_frames); using testing::GTEST_FLAG(shuffle); using testing::GTEST_FLAG(stack_trace_depth); +using testing::GTEST_FLAG(stream_result_to); using testing::GTEST_FLAG(throw_on_failure); using testing::IsNotSubstring; using testing::IsSubstring; @@ -1718,6 +1720,7 @@ class GTestFlagSaverTest : public Test { GTEST_FLAG(repeat) = 1; GTEST_FLAG(shuffle) = false; GTEST_FLAG(stack_trace_depth) = kMaxStackTraceDepth; + GTEST_FLAG(stream_result_to) = ""; GTEST_FLAG(throw_on_failure) = false; } @@ -1744,6 +1747,7 @@ class GTestFlagSaverTest : public Test { EXPECT_EQ(1, GTEST_FLAG(repeat)); EXPECT_FALSE(GTEST_FLAG(shuffle)); EXPECT_EQ(kMaxStackTraceDepth, GTEST_FLAG(stack_trace_depth)); + EXPECT_STREQ("", GTEST_FLAG(stream_result_to).c_str()); EXPECT_FALSE(GTEST_FLAG(throw_on_failure)); GTEST_FLAG(also_run_disabled_tests) = true; @@ -1759,6 +1763,7 @@ class GTestFlagSaverTest : public Test { GTEST_FLAG(repeat) = 100; GTEST_FLAG(shuffle) = true; GTEST_FLAG(stack_trace_depth) = 1; + GTEST_FLAG(stream_result_to) = "localhost:1234"; GTEST_FLAG(throw_on_failure) = true; } private: @@ -5142,6 +5147,7 @@ struct Flags { repeat(1), shuffle(false), stack_trace_depth(kMaxStackTraceDepth), + stream_result_to(""), throw_on_failure(false) {} // Factory methods. @@ -5242,6 +5248,14 @@ struct Flags { return flags; } + // Creates a Flags struct where the GTEST_FLAG(stream_result_to) flag has + // the given value. + static Flags StreamResultTo(const char* stream_result_to) { + Flags flags; + flags.stream_result_to = stream_result_to; + return flags; + } + // Creates a Flags struct where the gtest_throw_on_failure flag has // the given value. static Flags ThrowOnFailure(bool throw_on_failure) { @@ -5263,6 +5277,7 @@ struct Flags { Int32 repeat; bool shuffle; Int32 stack_trace_depth; + const char* stream_result_to; bool throw_on_failure; }; @@ -5283,6 +5298,7 @@ class InitGoogleTestTest : public Test { GTEST_FLAG(repeat) = 1; GTEST_FLAG(shuffle) = false; GTEST_FLAG(stack_trace_depth) = kMaxStackTraceDepth; + GTEST_FLAG(stream_result_to) = ""; GTEST_FLAG(throw_on_failure) = false; } @@ -5311,8 +5327,10 @@ class InitGoogleTestTest : public Test { EXPECT_EQ(expected.random_seed, GTEST_FLAG(random_seed)); EXPECT_EQ(expected.repeat, GTEST_FLAG(repeat)); EXPECT_EQ(expected.shuffle, GTEST_FLAG(shuffle)); - EXPECT_EQ(expected.throw_on_failure, GTEST_FLAG(throw_on_failure)); EXPECT_EQ(expected.stack_trace_depth, GTEST_FLAG(stack_trace_depth)); + EXPECT_STREQ(expected.stream_result_to, + GTEST_FLAG(stream_result_to).c_str()); + EXPECT_EQ(expected.throw_on_failure, GTEST_FLAG(throw_on_failure)); } // Parses a command line (specified by argc1 and argv1), then @@ -5973,6 +5991,22 @@ TEST_F(InitGoogleTestTest, StackTraceDepth) { GTEST_TEST_PARSING_FLAGS_(argv, argv2, Flags::StackTraceDepth(5), false); } +TEST_F(InitGoogleTestTest, StreamResultTo) { + const char* argv[] = { + "foo.exe", + "--gtest_stream_result_to=localhost:1234", + NULL + }; + + const char* argv2[] = { + "foo.exe", + NULL + }; + + GTEST_TEST_PARSING_FLAGS_( + argv, argv2, Flags::StreamResultTo("localhost:1234"), false); +} + // Tests parsing --gtest_throw_on_failure. TEST_F(InitGoogleTestTest, ThrowOnFailureWithoutValue) { const char* argv[] = { |