summaryrefslogtreecommitdiffstats
path: root/googletest
diff options
context:
space:
mode:
Diffstat (limited to 'googletest')
-rw-r--r--googletest/CMakeLists.txt2
-rw-r--r--googletest/README.md6
-rw-r--r--googletest/docs/advanced.md558
-rw-r--r--googletest/docs/faq.md18
-rw-r--r--googletest/docs/pkgconfig.md53
-rw-r--r--googletest/docs/primer.md93
-rw-r--r--googletest/docs/pump_manual.md169
-rw-r--r--googletest/docs/samples.md6
-rw-r--r--googletest/include/gtest/gtest-death-test.h4
-rw-r--r--googletest/include/gtest/gtest-matchers.h8
-rw-r--r--googletest/include/gtest/gtest-test-part.h10
-rw-r--r--googletest/include/gtest/gtest.h71
-rw-r--r--googletest/include/gtest/internal/gtest-filepath.h2
-rw-r--r--googletest/include/gtest/internal/gtest-internal.h10
-rw-r--r--googletest/include/gtest/internal/gtest-port.h24
-rw-r--r--googletest/include/gtest/internal/gtest-string.h10
-rw-r--r--googletest/include/gtest/internal/gtest-type-util.h2
-rw-r--r--googletest/include/gtest/internal/gtest-type-util.h.pump2
-rw-r--r--googletest/samples/prime_tables.h2
-rw-r--r--googletest/samples/sample1.cc2
-rw-r--r--googletest/samples/sample1.h2
-rw-r--r--googletest/samples/sample9_unittest.cc8
-rw-r--r--googletest/src/gtest-death-test.cc60
-rw-r--r--googletest/src/gtest-internal-inl.h18
-rw-r--r--googletest/src/gtest-port.cc24
-rw-r--r--googletest/src/gtest.cc181
-rw-r--r--googletest/test/googletest-death-test-test.cc6
-rw-r--r--googletest/test/googletest-json-outfiles-test.py6
-rw-r--r--googletest/test/googletest-json-output-unittest.py880
-rw-r--r--googletest/test/googletest-listener-test.cc113
-rwxr-xr-xgoogletest/test/googletest-throw-on-failure-test.py2
-rw-r--r--googletest/test/gtest_environment_test.cc2
-rw-r--r--googletest/test/gtest_pred_impl_unittest.cc20
-rw-r--r--googletest/test/gtest_premature_exit_test.cc4
-rwxr-xr-xgoogletest/test/gtest_test_utils.py4
-rw-r--r--googletest/test/gtest_unittest.cc19
-rwxr-xr-xgoogletest/test/gtest_xml_outfiles_test.py8
-rwxr-xr-xgoogletest/test/gtest_xml_output_unittest.py92
-rwxr-xr-xgoogletest/test/gtest_xml_test_utils.py2
39 files changed, 1357 insertions, 1146 deletions
diff --git a/googletest/CMakeLists.txt b/googletest/CMakeLists.txt
index 52a615d..db29294 100644
--- a/googletest/CMakeLists.txt
+++ b/googletest/CMakeLists.txt
@@ -188,7 +188,7 @@ if (gtest_build_tests)
"$project_bin = \"${CMAKE_BINARY_DIR}/bin/$<CONFIG>\"
$env:Path = \"$project_bin;$env:Path\"
& $args")
- elseif (MINGW)
+ elseif (MINGW OR CYGWIN)
file(GENERATE OUTPUT "${CMAKE_CURRENT_BINARY_DIR}/RunTest.ps1"
CONTENT
"$project_bin = (cygpath --windows ${CMAKE_BINARY_DIR}/bin)
diff --git a/googletest/README.md b/googletest/README.md
index 6992f3c..766ddc1 100644
--- a/googletest/README.md
+++ b/googletest/README.md
@@ -174,9 +174,9 @@ We list the most frequently used macros below. For a complete list, see file
### Multi-threaded Tests
Google Test is thread-safe where the pthread library is available. After
-`#include "gtest/gtest.h"`, you can check the `GTEST_IS_THREADSAFE` macro to see
-whether this is the case (yes if the macro is `#defined` to 1, no if it's
-undefined.).
+`#include "gtest/gtest.h"`, you can check the
+`GTEST_IS_THREADSAFE` macro to see whether this is the case (yes if the macro is
+`#defined` to 1, no if it's undefined.).
If Google Test doesn't correctly detect whether pthread is available in your
environment, you can force it with
diff --git a/googletest/docs/advanced.md b/googletest/docs/advanced.md
index 603777c..e05d317 100644
--- a/googletest/docs/advanced.md
+++ b/googletest/docs/advanced.md
@@ -1,6 +1,6 @@
# Advanced googletest Topics
-<!-- GOOGLETEST_CM0015 DO NOT DELETE -->
+<!-- GOOGLETEST_CM0016 DO NOT DELETE -->
## Introduction
@@ -57,8 +57,6 @@ switch(expression) {
NOTE: you can only use `FAIL()` in functions that return `void`. See the
[Assertion Placement section](#assertion-placement) for more information.
-**Availability**: Linux, Windows, Mac.
-
### Exception Assertions
These are for verifying that a piece of code throws (or does not throw) an
@@ -81,8 +79,7 @@ EXPECT_NO_THROW({
});
```
-**Availability**: Linux, Windows, Mac; requires exceptions to be enabled in the
-build environment (note that `google3` **disables** exceptions).
+**Availability**: requires exceptions to be enabled in the build environment
### Predicate Assertions for Better Error Messages
@@ -104,14 +101,15 @@ If you already have a function or functor that returns `bool` (or a type that
can be implicitly converted to `bool`), you can use it in a *predicate
assertion* to get the function arguments printed for free:
-| Fatal assertion | Nonfatal assertion | Verifies |
-| -------------------- | -------------------- | --------------------------- |
-| `ASSERT_PRED1(pred1, | `EXPECT_PRED1(pred1, | `pred1(val1)` is true |
-: val1);` : val1);` : :
-| `ASSERT_PRED2(pred2, | `EXPECT_PRED2(pred2, | `pred2(val1, val2)` is true |
-: val1, val2);` : val1, val2);` : :
-| `...` | `...` | ... |
+<!-- mdformat off(github rendering does not support multiline tables) -->
+
+| Fatal assertion | Nonfatal assertion | Verifies |
+| --------------------------------- | --------------------------------- | --------------------------- |
+| `ASSERT_PRED1(pred1, val1)` | `EXPECT_PRED1(pred1, val1)` | `pred1(val1)` is true |
+| `ASSERT_PRED2(pred2, val1, val2)` | `EXPECT_PRED2(pred2, val1, val2)` | `pred1(val1, val2)` is true |
+| `...` | `...` | `...` |
+<!-- mdformat on-->
In the above, `predn` is an `n`-ary predicate function or functor, where `val1`,
`val2`, ..., and `valn` are its arguments. The assertion succeeds if the
predicate returns `true` when applied to the given arguments, and fails
@@ -153,11 +151,8 @@ c is 10
>
> 1. If you see a compiler error "no matching function to call" when using
> `ASSERT_PRED*` or `EXPECT_PRED*`, please see
-> [this](faq.md#the-compiler-complains-no-matching-function-to-call-when-i-use-assert_pred-how-do-i-fix-it) for how to resolve it.
-> 1. Currently we only provide predicate assertions of arity <= 5. If you need
-> a higher-arity assertion, let [us](https://github.com/google/googletest/issues) know.
-
-**Availability**: Linux, Windows, Mac.
+> [this](faq.md#the-compiler-complains-no-matching-function-to-call-when-i-use-assert-pred-how-do-i-fix-it)
+> for how to resolve it.
#### Using a Function That Returns an AssertionResult
@@ -246,8 +241,6 @@ Then the statement `EXPECT_FALSE(IsEven(Fib(6)))` will print
Expected: false
```
-**Availability**: Linux, Windows, Mac.
-
#### Using a Predicate-Formatter
If you find the default message generated by `(ASSERT|EXPECT)_PRED*` and
@@ -320,8 +313,6 @@ As you may have realized, many of the built-in assertions we introduced earlier
are special cases of `(EXPECT|ASSERT)_PRED_FORMAT*`. In fact, most of them are
indeed defined using `(EXPECT|ASSERT)_PRED_FORMAT*`.
-**Availability**: Linux, Windows, Mac.
-
### Floating-Point Comparison
Comparing floating-point numbers is tricky. Due to round-off errors, it is very
@@ -340,25 +331,26 @@ want to learn more, see
#### Floating-Point Macros
-| Fatal assertion | Nonfatal assertion | Verifies |
-| ----------------------- | ----------------------- | ----------------------- |
-| `ASSERT_FLOAT_EQ(val1, | `EXPECT_FLOAT_EQ(val1, | the two `float` values |
-: val2);` : val2);` : are almost equal :
-| `ASSERT_DOUBLE_EQ(val1, | `EXPECT_DOUBLE_EQ(val1, | the two `double` values |
-: val2);` : val2);` : are almost equal :
+<!-- mdformat off(github rendering does not support multiline tables) -->
+
+| Fatal assertion | Nonfatal assertion | Verifies |
+| ------------------------------- | ------------------------------- | ---------------------------------------- |
+| `ASSERT_FLOAT_EQ(val1, val2);` | `EXPECT_FLOAT_EQ(val1, val2);` | the two `float` values are almost equal |
+| `ASSERT_DOUBLE_EQ(val1, val2);` | `EXPECT_DOUBLE_EQ(val1, val2);` | the two `double` values are almost equal |
+
+<!-- mdformat on-->
By "almost equal" we mean the values are within 4 ULP's from each other.
The following assertions allow you to choose the acceptable error bound:
-| Fatal assertion | Nonfatal assertion | Verifies |
-| ------------------ | ------------------------ | ------------------------- |
-| `ASSERT_NEAR(val1, | `EXPECT_NEAR(val1, val2, | the difference between |
-: val2, abs_error);` : abs_error);` : `val1` and `val2` doesn't :
-: : : exceed the given absolute :
-: : : error :
+<!-- mdformat off(github rendering does not support multiline tables) -->
+
+| Fatal assertion | Nonfatal assertion | Verifies |
+| ------------------------------------- | ------------------------------------- | -------------------------------------------------------------------------------- |
+| `ASSERT_NEAR(val1, val2, abs_error);` | `EXPECT_NEAR(val1, val2, abs_error);` | the difference between `val1` and `val2` doesn't exceed the given absolute error |
-**Availability**: Linux, Windows, Mac.
+<!-- mdformat on-->
#### Floating-Point Predicate-Format Functions
@@ -375,19 +367,20 @@ EXPECT_PRED_FORMAT2(::testing::DoubleLE, val1, val2);
Verifies that `val1` is less than, or almost equal to, `val2`. You can replace
`EXPECT_PRED_FORMAT2` in the above table with `ASSERT_PRED_FORMAT2`.
-**Availability**: Linux, Windows, Mac.
-
### Asserting Using gMock Matchers
-Google-developed C++ mocking framework [gMock](../../googlemock) comes with a
-library of matchers for validating arguments passed to mock objects. A gMock
-*matcher* is basically a predicate that knows how to describe itself. It can be
-used in these assertion macros:
+[gMock](../../googlemock) comes with a library of matchers for validating
+arguments passed to mock objects. A gMock *matcher* is basically a predicate
+that knows how to describe itself. It can be used in these assertion macros:
+
+<!-- mdformat off(github rendering does not support multiline tables) -->
| Fatal assertion | Nonfatal assertion | Verifies |
| ------------------------------ | ------------------------------ | --------------------- |
| `ASSERT_THAT(value, matcher);` | `EXPECT_THAT(value, matcher);` | value matches matcher |
+<!-- mdformat on-->
+
For example, `StartsWith(prefix)` is a matcher that matches a string starting
with `prefix`, and you can write:
@@ -398,39 +391,27 @@ using ::testing::StartsWith;
EXPECT_THAT(Foo(), StartsWith("Hello"));
```
-Read this [recipe](../../googlemock/docs/cook_book.md#using-matchers-in-google-test-assertions) in
-the gMock Cookbook for more details.
+Read this
+[recipe](../../googlemock/docs/cook_book.md#using-matchers-in-googletest-assertions)
+in the gMock Cookbook for more details.
gMock has a rich set of matchers. You can do many things googletest cannot do
alone with them. For a list of matchers gMock provides, read
-[this](../../googlemock/docs/cook_book.md#using-matchers). Especially useful among them are
-some [protocol buffer matchers](https://github.com/google/nucleus/blob/master/nucleus/testing/protocol-buffer-matchers.h). It's easy to write
-your [own matchers](../../googlemock/docs/cook_book.md#writing-new-matchers-quickly) too.
-
-For example, you can use gMock's
-[EqualsProto](https://github.com/google/nucleus/blob/master/nucleus/testing/protocol-buffer-matchers.h)
-to compare protos in your tests:
-
-```c++
-#include "testing/base/public/gmock.h"
-using ::testing::EqualsProto;
-...
- EXPECT_THAT(actual_proto, EqualsProto("foo: 123 bar: 'xyz'"));
- EXPECT_THAT(*actual_proto_ptr, EqualsProto(expected_proto));
-```
+[this](../../googlemock/docs/cook_book.md##using-matchers). It's easy to write
+your [own matchers](../../googlemock/docs/cook_book.md#NewMatchers) too.
gMock is bundled with googletest, so you don't need to add any build dependency
in order to take advantage of this. Just include `"testing/base/public/gmock.h"`
and you're ready to go.
-**Availability**: Linux, Windows, and Mac.
-
### More String Assertions
-(Please read the [previous](#asserting-using-gmock-matchers) section first if you haven't.)
+(Please read the [previous](#asserting-using-gmock-matchers) section first if
+you haven't.)
-You can use the gMock [string matchers](../../googlemock/docs/CheatSheet.md#string-matchers)
-with `EXPECT_THAT()` or `ASSERT_THAT()` to do more string comparison tricks
+You can use the gMock
+[string matchers](../../googlemock/docs/cheat_sheet.md#string-matchers) with
+`EXPECT_THAT()` or `ASSERT_THAT()` to do more string comparison tricks
(sub-string, prefix, suffix, regular expression, and etc). For example,
```c++
@@ -441,11 +422,9 @@ using ::testing::MatchesRegex;
EXPECT_THAT(bar_string, MatchesRegex("\\w*\\d+"));
```
-**Availability**: Linux, Windows, Mac.
-
If the string contains a well-formed HTML or XML document, you can check whether
-its DOM tree matches an [XPath
-expression](http://www.w3.org/TR/xpath/#contents):
+its DOM tree matches an
+[XPath expression](http://www.w3.org/TR/xpath/#contents):
```c++
// Currently still in //template/prototemplate/testing:xpath_matcher
@@ -454,8 +433,6 @@ using prototemplate::testing::MatchesXPath;
EXPECT_THAT(html_string, MatchesXPath("//a[text()='click here']"));
```
-**Availability**: Linux.
-
### Windows HRESULT assertions
These assertions test for `HRESULT` success or failure.
@@ -477,8 +454,6 @@ CComVariant empty;
ASSERT_HRESULT_SUCCEEDED(shell->ShellExecute(CComBSTR(url), empty, empty, empty, empty));
```
-**Availability**: Windows.
-
### Type Assertions
You can call the function
@@ -519,8 +494,6 @@ void Test2() { Foo<bool> foo; foo.Bar(); }
to cause a compiler error.
-**Availability**: Linux, Windows, Mac.
-
### Assertion Placement
You can use assertions in any C++ function. In particular, it doesn't have to be
@@ -544,14 +517,17 @@ that generate non-fatal failures, such as `ADD_FAILURE*` and `EXPECT_*`.
NOTE: Constructors and destructors are not considered void-returning functions,
according to the C++ language specification, and so you may not use fatal
-assertions in them. You'll get a compilation error if you try. A simple
-workaround is to transfer the entire body of the constructor or destructor to a
-private void-returning method. However, you should be aware that a fatal
-assertion failure in a constructor does not terminate the current test, as your
-intuition might suggest; it merely returns from the constructor early, possibly
-leaving your object in a partially-constructed state. Likewise, a fatal
-assertion failure in a destructor may leave your object in a
-partially-destructed state. Use assertions carefully in these situations!
+assertions in them; you'll get a compilation error if you try. Instead, either
+call `abort` and crash the entire test executable, or put the fatal assertion in
+a `SetUp`/`TearDown` function; see
+[constructor/destructor vs. `SetUp`/`TearDown`](faq.md#CtorVsSetUp)
+
+WARNING: A fatal assertion in a helper function (private void-returning method)
+called from a constructor or destructor does not does not terminate the current
+test, as your intuition might suggest: it merely returns from the constructor or
+destructor early, possibly leaving your object in a partially-constructed or
+partially-destructed state! You almost certainly want to `abort` or use
+`SetUp`/`TearDown` instead.
## Teaching googletest How to Print Your Values
@@ -649,11 +625,10 @@ Since these precondition checks cause the processes to die, we call such tests
_death tests_. More generally, any test that checks that a program terminates
(except by throwing an exception) in an expected fashion is also a death test.
-
Note that if a piece of code throws an exception, we don't consider it "death"
for the purpose of death tests, as the caller of the code could catch the
exception and avoid the crash. If you want to verify exceptions thrown by your
-code, see [Exception Assertions](#exception-assertions).
+code, see [Exception Assertions](#ExceptionAssertions).
If you want to test `EXPECT_*()/ASSERT_*()` failures in your test code, see
Catching Failures
@@ -662,19 +637,20 @@ Catching Failures
googletest has the following macros to support death tests:
-Fatal assertion | Nonfatal assertion | Verifies
----------------------------------------------- | ---------------------------------------------- | --------
-`ASSERT_DEATH(statement, regex);` | `EXPECT_DEATH(statement, regex);` | `statement` crashes with the given error
-`ASSERT_DEATH_IF_SUPPORTED(statement, regex);` | `EXPECT_DEATH_IF_SUPPORTED(statement, regex);` | if death tests are supported, verifies that `statement` crashes with the given error; otherwise verifies nothing
-`ASSERT_EXIT(statement, predicate, regex);` | `EXPECT_EXIT(statement, predicate, regex);` | `statement` exits with the given error, and its exit code matches `predicate`
+Fatal assertion | Nonfatal assertion | Verifies
+------------------------------------------------ | ------------------------------------------------ | --------
+`ASSERT_DEATH(statement, matcher);` | `EXPECT_DEATH(statement, matcher);` | `statement` crashes with the given error
+`ASSERT_DEATH_IF_SUPPORTED(statement, matcher);` | `EXPECT_DEATH_IF_SUPPORTED(statement, matcher);` | if death tests are supported, verifies that `statement` crashes with the given error; otherwise verifies nothing
+`ASSERT_EXIT(statement, predicate, matcher);` | `EXPECT_EXIT(statement, predicate, matcher);` | `statement` exits with the given error, and its exit code matches `predicate`
where `statement` is a statement that is expected to cause the process to die,
`predicate` is a function or function object that evaluates an integer exit
-status, and `regex` is a (Perl) regular expression that the stderr output of
-`statement` is expected to match. Note that `statement` can be *any valid
-statement* (including *compound statement*) and doesn't have to be an
-expression.
-
+status, and `matcher` is either a GMock matcher matching a `const std::string&`
+or a (Perl) regular expression - either of which is matched against the stderr
+output of `statement`. For legacy reasons, a bare string (i.e. with no matcher)
+is interpreted as `ContainsRegex(str)`, **not** `Eq(str)`. Note that `statement`
+can be *any valid statement* (including *compound statement*) and doesn't have
+to be an expression.
As usual, the `ASSERT` variants abort the current test function, while the
`EXPECT` variants do not.
@@ -756,8 +732,8 @@ necessary.
IMPORTANT: We strongly recommend you to follow the convention of naming your
**test suite** (not test) `*DeathTest` when it contains a death test, as
-demonstrated in the above example. The [Death Tests And
-Threads](#death-tests-and-threads) section below explains why.
+demonstrated in the above example. The
+[Death Tests And Threads](#death-tests-and-threads) section below explains why.
If a test fixture class is shared by normal tests and death tests, you can use
`using` or `typedef` to introduce an alias for the fixture class and avoid
@@ -777,11 +753,8 @@ TEST_F(FooDeathTest, DoesThat) {
}
```
-**Availability**: Linux, Windows, Cygwin, and Mac
-
### Regular Expression Syntax
-
On POSIX systems (e.g. Linux, Cygwin, and Mac), googletest uses the
[POSIX extended regular expression](http://www.opengroup.org/onlinepubs/009695399/basedefs/xbd_chap09.html#tag_09_04)
syntax. To learn about this syntax, you may want to read this
@@ -819,10 +792,9 @@ Expression | Meaning
To help you determine which capability is available on your system, googletest
defines macros to govern which regular expression it is using. The macros are:
-<!--absl:google3-begin(google3-only)-->`GTEST_USES_PCRE=1`, or
-<!--absl:google3-end--> `GTEST_USES_SIMPLE_RE=1` or `GTEST_USES_POSIX_RE=1`. If
-you want your death tests to work in all cases, you can either `#if` on these
-macros or use the more limited syntax only.
+`GTEST_USES_SIMPLE_RE=1` or `GTEST_USES_POSIX_RE=1`. If you want your death
+tests to work in all cases, you can either `#if` on these macros or use the more
+limited syntax only.
### How It Works
@@ -844,11 +816,7 @@ initialized from the command-line flag `--gtest_death_test_style`).
consideration to be run - much like the `threadsafe` mode on POSIX.
Other values for the variable are illegal and will cause the death test to fail.
-Currently, the flag's default value is
-"fast". However, we reserve
-the right to change it in the future. Therefore, your tests should not depend on
-this. In either case, the parent process waits for the child process to
-complete, and checks that
+Currently, the flag's default value is **"fast"**
1. the child's exit status satisfies the predicate, and
2. the child's stderr matches the regular expression.
@@ -869,7 +837,8 @@ googletest has three features intended to raise awareness of threading issues.
1. A warning is emitted if multiple threads are running when a death test is
encountered.
-2. Test suites with a name ending in "DeathTest" are run before all other tests.
+2. Test suites with a name ending in "DeathTest" are run before all other
+ tests.
3. It uses `clone()` instead of `fork()` to spawn the child process on Linux
(`clone()` is not available on Cygwin and Mac), as `fork()` is more likely
to cause the child to hang when the parent process has multiple threads.
@@ -879,7 +848,6 @@ executed in a separate process and cannot affect the parent.
### Death Test Styles
-
The "threadsafe" death test style was introduced in order to help mitigate the
risks of testing in a possibly multithreaded environment. It trades increased
test execution time (potentially dramatically so) for improved thread safety.
@@ -914,7 +882,6 @@ TEST(MyDeathTest, TestTwo) {
}
```
-
### Caveats
The `statement` argument of `ASSERT_EXIT()` can be any valid C++ statement. If
@@ -948,10 +915,9 @@ handlers registered with `pthread_atfork(3)`.
If a test sub-routine is called from several places, when an assertion inside it
fails, it can be hard to tell which invocation of the sub-routine the failure is
-from.
-You can alleviate this problem using extra logging or custom failure messages,
-but that usually clutters up your tests. A better solution is to use the
-`SCOPED_TRACE` macro or the `ScopedTrace` utility:
+from. You can alleviate this problem using extra logging or custom failure
+messages, but that usually clutters up your tests. A better solution is to use
+the `SCOPED_TRACE` macro or the `ScopedTrace` utility:
```c++
SCOPED_TRACE(message);
@@ -968,8 +934,8 @@ For example,
```c++
10: void Sub1(int n) {
-11: EXPECT_EQ(1, Bar(n));
-12: EXPECT_EQ(2, Bar(n + 1));
+11: EXPECT_EQ(Bar(n), 1);
+12: EXPECT_EQ(Bar(n + 1), 2);
13: }
14:
15: TEST(FooTest, Bar) {
@@ -1000,10 +966,9 @@ Expected: 2
```
Without the trace, it would've been difficult to know which invocation of
-`Sub1()` the two failures come from respectively. (You could add
-
-an extra message to each assertion in `Sub1()` to indicate the value of `n`, but
-that's tedious.)
+`Sub1()` the two failures come from respectively. (You could add an extra
+message to each assertion in `Sub1()` to indicate the value of `n`, but that's
+tedious.)
Some tips on using `SCOPED_TRACE`:
@@ -1021,8 +986,6 @@ Some tips on using `SCOPED_TRACE`:
5. The trace dump is clickable in Emacs - hit `return` on a line number and
you'll be taken to that line in the source file!
-**Availability**: Linux, Windows, Mac.
-
### Propagating Fatal Failures
A common pitfall when using `ASSERT_*` and `FAIL*` is not understanding that
@@ -1103,8 +1066,7 @@ EXPECT_NO_FATAL_FAILURE({
});
```
-**Availability**: Linux, Windows, Mac. Assertions from multiple threads are
-currently not supported on Windows.
+Assertions from multiple threads are currently not supported on Windows.
#### Checking for Failures in the Current Test
@@ -1145,14 +1107,13 @@ Similarly, `HasNonfatalFailure()` returns `true` if the current test has at
least one non-fatal failure, and `HasFailure()` returns `true` if the current
test has at least one failure of either kind.
-**Availability**: Linux, Windows, Mac.
-
## Logging Additional Information
In your test code, you can call `RecordProperty("key", value)` to log additional
information, where `value` can be either a string or an `int`. The *last* value
-recorded for a key will be emitted to the [XML output](#generating-an-xml-report) if you
-specify one. For example, the test
+recorded for a key will be emitted to the
+[XML output](#generating-an-xml-report) if you specify one. For example, the
+test
```c++
TEST_F(WidgetUsageTest, MinAndMaxWidgets) {
@@ -1179,12 +1140,10 @@ will output XML like this:
> `type_param`, and `value_param`).
> * Calling `RecordProperty()` outside of the lifespan of a test is allowed.
> If it's called outside of a test but between a test suite's
-> `SetUpTestSuite()` and `TearDownTestSuite()` methods, it will be attributed
-> to the XML element for the test suite. If it's called outside of all test
-> suites (e.g. in a test environment), it will be attributed to the top-level
-> XML element.
-
-**Availability**: Linux, Windows, Mac.
+> `SetUpTestSuite()` and `TearDownTestSuite()` methods, it will be
+> attributed to the XML element for the test suite. If it's called outside
+> of all test suites (e.g. in a test environment), it will be attributed to
+> the top-level XML element.
## Sharing Resources Between Tests in the Same Test Suite
@@ -1199,11 +1158,11 @@ also supports per-test-suite set-up/tear-down. To use it:
1. In your test fixture class (say `FooTest` ), declare as `static` some member
variables to hold the shared resources.
-1. Outside your test fixture class (typically just below it), define those
+2. Outside your test fixture class (typically just below it), define those
member variables, optionally giving them initial values.
-1. In the same test fixture class, define a `static void SetUpTestSuite()`
- function (remember not to spell it as **`SetupTestSuite`** with a small `u`!)
- to set up the shared resources and a `static void TearDownTestSuite()`
+3. In the same test fixture class, define a `static void SetUpTestSuite()`
+ function (remember not to spell it as **`SetupTestSuite`** with a small
+ `u`!) to set up the shared resources and a `static void TearDownTestSuite()`
function to tear them down.
That's it! googletest automatically calls `SetUpTestSuite()` before running the
@@ -1262,8 +1221,6 @@ NOTE: Though the above code declares `SetUpTestSuite()` protected, it may
sometimes be necessary to declare it public, such as when using it with
`TEST_P`.
-**Availability**: Linux, Windows, Mac.
-
## Global Set-Up and Tear-Down
Just as you can do set-up and tear-down at the test level and the test suite
@@ -1273,15 +1230,15 @@ First, you subclass the `::testing::Environment` class to define a test
environment, which knows how to set-up and tear-down:
```c++
-class Environment {
+class Environment : public ::testing::Environment {
public:
virtual ~Environment() {}
// Override this to define how to set up the environment.
- virtual void SetUp() {}
+ void SetUp() override {}
// Override this to define how to tear down the environment.
- virtual void TearDown() {}
+ void TearDown() override {}
};
```
@@ -1295,10 +1252,10 @@ Environment* AddGlobalTestEnvironment(Environment* env);
Now, when `RUN_ALL_TESTS()` is called, it first calls the `SetUp()` method of
each environment object, then runs the tests if none of the environments
reported fatal failures and `GTEST_SKIP()` was not called. `RUN_ALL_TESTS()`
-always calls `TearDown()` with each environment object, regardless of whether
-or not the tests were run.
+always calls `TearDown()` with each environment object, regardless of whether or
+not the tests were run.
-It's OK to register multiple environment objects. In this case, their `SetUp()`
+It's OK to register multiple environment objects. In this suite, their `SetUp()`
will be called in the order they are registered, and their `TearDown()` will be
called in the reverse order.
@@ -1339,13 +1296,13 @@ number of situations, for example:
### How to Write Value-Parameterized Tests
To write value-parameterized tests, first you should define a fixture class. It
-must be derived from both `::testing::Test` and
-`::testing::WithParamInterface<T>` (the latter is a pure interface), where `T`
-is the type of your parameter values. For convenience, you can just derive the
-fixture class from `::testing::TestWithParam<T>`, which itself is derived from
-both `::testing::Test` and `::testing::WithParamInterface<T>`. `T` can be any
-copyable type. If it's a raw pointer, you are responsible for managing the
-lifespan of the pointed values.
+must be derived from both `testing::Test` and `testing::WithParamInterface<T>`
+(the latter is a pure interface), where `T` is the type of your parameter
+values. For convenience, you can just derive the fixture class from
+`testing::TestWithParam<T>`, which itself is derived from both `testing::Test`
+and `testing::WithParamInterface<T>`. `T` can be any copyable type. If it's a
+raw pointer, you are responsible for managing the lifespan of the pointed
+values.
NOTE: If your test fixture defines `SetUpTestSuite()` or `TearDownTestSuite()`
they must be declared **public** rather than **protected** in order to use
@@ -1353,18 +1310,18 @@ they must be declared **public** rather than **protected** in order to use
```c++
class FooTest :
- public ::testing::TestWithParam<const char*> {
+ public testing::TestWithParam<const char*> {
// You can implement all the usual fixture class members here.
// To access the test parameter, call GetParam() from class
// TestWithParam<T>.
};
// Or, when you want to add parameters to a pre-existing fixture class:
-class BaseTest : public ::testing::Test {
+class BaseTest : public testing::Test {
...
};
class BarTest : public BaseTest,
- public ::testing::WithParamInterface<const char*> {
+ public testing::WithParamInterface<const char*> {
...
};
```
@@ -1386,31 +1343,33 @@ TEST_P(FooTest, HasBlahBlah) {
}
```
-Finally, you can use `INSTANTIATE_TEST_SUITE_P` to instantiate the test suite with
-any set of parameters you want. googletest defines a number of functions for
-generating test parameters. They return what we call (surprise!) *parameter
+Finally, you can use `INSTANTIATE_TEST_SUITE_P` to instantiate the test suite
+with any set of parameters you want. googletest defines a number of functions
+for generating test parameters. They return what we call (surprise!) *parameter
generators*. Here is a summary of them, which are all in the `testing`
namespace:
-| Parameter Generator | Behavior |
-| ---------------------------- | ------------------------------------------- |
-| `Range(begin, end [, step])` | Yields values `{begin, begin+step, begin+step+step, ...}`. The values do not include `end`. `step` defaults to 1. |
-| `Values(v1, v2, ..., vN)` | Yields values `{v1, v2, ..., vN}`. |
-| `ValuesIn(container)` and `ValuesIn(begin,end)` | Yields values from a C-style array, an STL-style container, or an iterator range `[begin, end)`. |
-| `Bool()` | Yields sequence `{false, true}`. |
-| `Combine(g1, g2, ..., gN)` | Yields all combinations (Cartesian product) as std\:\:tuples of the values generated by the `N` generators. |
+<!-- mdformat off(github rendering does not support multiline tables) -->
-For more details, see the comments at the definitions of these functions.
+| Parameter Generator | Behavior |
+| ----------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------- |
+| `Range(begin, end [, step])` | Yields values `{begin, begin+step, begin+step+step, ...}`. The values do not include `end`. `step` defaults to 1. |
+| `Values(v1, v2, ..., vN)` | Yields values `{v1, v2, ..., vN}`. |
+| `ValuesIn(container)` and `ValuesIn(begin,end)` | Yields values from a C-style array, an STL-style container, or an iterator range `[begin, end)` |
+| `Bool()` | Yields sequence `{false, true}`. |
+| `Combine(g1, g2, ..., gN)` | Yields all combinations (Cartesian product) as std\:\:tuples of the values generated by the `N` generators. |
-NOTE: The `INSTANTIATE_TEST_SUITE_P` keyword is recommended (addressing https://github.com/google/googletest/issues/1085) For 1.8.1 and previous releases the keyword is `INSTANTIATE_TEST_CASE_P`. which has been deprecated in favor of INSTANTIATE_TEST_SUITE_P.
+<!-- mdformat on-->
-The following statement will instantiate tests from the `FooTest` test suite each
-with parameter values `"meeny"`, `"miny"`, and `"moe"`.
+For more details, see the comments at the definitions of these functions.
+
+The following statement will instantiate tests from the `FooTest` test suite
+each with parameter values `"meeny"`, `"miny"`, and `"moe"`.
```c++
INSTANTIATE_TEST_SUITE_P(InstantiationName,
- FooTest,
- ::testing::Values("meeny", "miny", "moe"));
+ FooTest,
+ testing::Values("meeny", "miny", "moe"));
```
NOTE: The code above must be placed at global or namespace scope, not at
@@ -1440,7 +1399,7 @@ parameter values `"cat"` and `"dog"`:
```c++
const char* pets[] = {"cat", "dog"};
INSTANTIATE_TEST_SUITE_P(AnotherInstantiationName, FooTest,
- ::testing::ValuesIn(pets));
+ testing::ValuesIn(pets));
```
The tests from the instantiation above will have these names:
@@ -1454,9 +1413,10 @@ Please note that `INSTANTIATE_TEST_SUITE_P` will instantiate *all* tests in the
given test suite, whether their definitions come before or *after* the
`INSTANTIATE_TEST_SUITE_P` statement.
-You can see sample7_unittest.cc and sample8_unittest.cc for more examples.
+You can see [sample7_unittest.cc] and [sample8_unittest.cc] for more examples.
-**Availability**: Linux, Windows, Mac
+[sample7_unittest.cc]: ../samples/sample7_unittest.cc "Parameterized Test example"
+[sample8_unittest.cc]: ../samples/sample8_unittest.cc "Parameterized Test example with multiple parameters"
### Creating Value-Parameterized Abstract Tests
@@ -1474,7 +1434,7 @@ To define abstract tests, you should organize your code like this:
1. Put the definition of the parameterized test fixture class (e.g. `FooTest`)
in a header file, say `foo_param_test.h`. Think of this as *declaring* your
abstract tests.
-1. Put the `TEST_P` definitions in `foo_param_test.cc`, which includes
+2. Put the `TEST_P` definitions in `foo_param_test.cc`, which includes
`foo_param_test.h`. Think of this as *implementing* your abstract tests.
Once they are defined, you can instantiate them by including `foo_param_test.h`,
@@ -1494,22 +1454,49 @@ returns the value of `testing::PrintToString(GetParam())`. It does not work for
`std::string` or C strings.
NOTE: test names must be non-empty, unique, and may only contain ASCII
-alphanumeric characters. In particular, they [should not contain
-underscores](https://github.com/google/googletest/blob/master/googletest/docs/faq.md#why-should-test-suite-names-and-test-names-not-contain-underscore).
+alphanumeric characters. In particular, they
+[should not contain underscores](faq.md#why-should-test-suite-names-and-test-names-not-contain-underscore)
```c++
-class MyTestsuite : public testing::TestWithParam<int> {};
+class MyTestSuite : public testing::TestWithParam<int> {};
-TEST_P(MyTestsuite, MyTest)
+TEST_P(MyTestSuite, MyTest)
{
std::cout << "Example Test Param: " << GetParam() << std::endl;
}
-INSTANTIATE_TEST_SUITE_P(MyGroup, MyTestsuite, testing::Range(0, 10),
- testing::PrintToStringParamName());
+INSTANTIATE_TEST_SUITE_P(MyGroup, MyTestSuite, testing::Range(0, 10),
+ testing::PrintToStringParamName());
+```
+
+Providing a custom functor allows for more control over test parameter name
+generation, especially for types where the automatic conversion does not
+generate helpful parameter names (e.g. strings as demonstrated above). The
+following example illustrates this for multiple parameters, an enumeration type
+and a string, and also demonstrates how to combine generators. It uses a lambda
+for conciseness:
+
+```c++
+enum class MyType { MY_FOO = 0, MY_BAR = 1 };
+
+class MyTestSuite : public testing::TestWithParam<std::tuple<MyType, string>> {
+};
+
+INSTANTIATE_TEST_SUITE_P(
+ MyGroup, MyTestSuite,
+ testing::Combine(
+ testing::Values(MyType::VALUE_0, MyType::VALUE_1),
+ testing::ValuesIn("", "")),
+ [](const testing::TestParamInfo<MyTestSuite::ParamType>& info) {
+ string name = absl::StrCat(
+ std::get<0>(info.param) == MY_FOO ? "Foo" : "Bar", "_",
+ std::get<1>(info.param));
+ absl::c_replace_if(name, [](char c) { return !std::isalnum(c); }, '_');
+ return name;
+ });
```
-## Typed Tests</id>
+## Typed Tests
Suppose you have multiple implementations of the same interface and want to make
sure that all of them satisfy some common requirements. Or, you may have defined
@@ -1577,9 +1564,9 @@ TYPED_TEST(FooTest, DoesBlah) {
TYPED_TEST(FooTest, HasPropertyA) { ... }
```
-You can see sample6_unittest.cc
+You can see [sample6_unittest.cc] for a complete example.
-**Availability**: Linux, Windows, Mac
+[sample6_unittest.cc]: ../samples/sample6_unittest.cc "Typed Test example"
## Type-Parameterized Tests
@@ -1625,12 +1612,12 @@ TYPED_TEST_P(FooTest, HasPropertyA) { ... }
Now the tricky part: you need to register all test patterns using the
`REGISTER_TYPED_TEST_SUITE_P` macro before you can instantiate them. The first
-argument of the macro is the test suite name; the rest are the names of the tests
-in this test suite:
+argument of the macro is the test suite name; the rest are the names of the
+tests in this test suite:
```c++
REGISTER_TYPED_TEST_SUITE_P(FooTest,
- DoesBlah, HasPropertyA);
+ DoesBlah, HasPropertyA);
```
Finally, you are free to instantiate the pattern with the types you want. If you
@@ -1644,7 +1631,8 @@ INSTANTIATE_TYPED_TEST_SUITE_P(My, FooTest, MyTypes);
To distinguish different instances of the pattern, the first argument to the
`INSTANTIATE_TYPED_TEST_SUITE_P` macro is a prefix that will be added to the
-actual test suite name. Remember to pick unique prefixes for different instances.
+actual test suite name. Remember to pick unique prefixes for different
+instances.
In the special case where the type list contains only one type, you can write
that type directly without `::testing::Types<...>`, like this:
@@ -1653,9 +1641,7 @@ that type directly without `::testing::Types<...>`, like this:
INSTANTIATE_TYPED_TEST_SUITE_P(My, FooTest, int);
```
-You can see `sample6_unittest.cc` for a complete example.
-
-**Availability**: Linux, Windows, Mac
+You can see [sample6_unittest.cc] for a complete example.
## Testing Private Code
@@ -1682,7 +1668,7 @@ To test them, we use the following special techniques:
* Both static functions and definitions/declarations in an unnamed namespace
are only visible within the same translation unit. To test them, you can
`#include` the entire `.cc` file being tested in your `*_test.cc` file.
- (including `.cc` files is not a good way to reuse code - you should not do
+ (#including `.cc` files is not a good way to reuse code - you should not do
this in production code!)
However, a better approach is to move the private code into the
@@ -1712,19 +1698,16 @@ To test them, we use the following special techniques:
this line in the class body:
```c++
- FRIEND_TEST(TestsuiteName, TestName);
+ FRIEND_TEST(TestSuiteName, TestName);
```
For example,
```c++
// foo.h
-
- #include "gtest/gtest_prod.h"
-
class Foo {
...
- private:
+ private:
FRIEND_TEST(FooTest, BarReturnsZeroOnNull);
int Bar(void* x);
@@ -1734,7 +1717,7 @@ To test them, we use the following special techniques:
...
TEST(FooTest, BarReturnsZeroOnNull) {
Foo foo;
- EXPECT_EQ(0, foo.Bar(NULL)); // Uses Foo's private member Bar().
+ EXPECT_EQ(foo.Bar(NULL), 0); // Uses Foo's private member Bar().
}
```
@@ -1772,7 +1755,6 @@ To test them, we use the following special techniques:
} // namespace my_namespace
```
-
## "Catching" Failures
If you are building a testing utility on top of googletest, you'll want to test
@@ -1815,56 +1797,67 @@ For technical reasons, there are some caveats:
1. You cannot stream a failure message to either macro.
-1. `statement` in `EXPECT_FATAL_FAILURE{_ON_ALL_THREADS}()` cannot reference
+2. `statement` in `EXPECT_FATAL_FAILURE{_ON_ALL_THREADS}()` cannot reference
local non-static variables or non-static members of `this` object.
-1. `statement` in `EXPECT_FATAL_FAILURE{_ON_ALL_THREADS}()()` cannot return a
+3. `statement` in `EXPECT_FATAL_FAILURE{_ON_ALL_THREADS}()` cannot return a
value.
## Registering tests programmatically
- The `TEST` macros handle the vast majority of all use cases, but there are few
+The `TEST` macros handle the vast majority of all use cases, but there are few
were runtime registration logic is required. For those cases, the framework
provides the `::testing::RegisterTest` that allows callers to register arbitrary
tests dynamically.
- This is an advanced API only to be used when the `TEST` macros are insufficient.
+
+This is an advanced API only to be used when the `TEST` macros are insufficient.
The macros should be preferred when possible, as they avoid most of the
complexity of calling this function.
- It provides the following signature:
- ```c++
+
+It provides the following signature:
+
+```c++
template <typename Factory>
-TestInfo* RegisterTest(const char* test_case_name, const char* test_name,
+TestInfo* RegisterTest(const char* test_suite_name, const char* test_name,
const char* type_param, const char* value_param,
const char* file, int line, Factory factory);
```
- The `factory` argument is a factory callable (move-constructible) object or
+
+The `factory` argument is a factory callable (move-constructible) object or
function pointer that creates a new instance of the Test object. It handles
ownership to the caller. The signature of the callable is `Fixture*()`, where
`Fixture` is the test fixture class for the test. All tests registered with the
-same `test_case_name` must return the same fixture type. This is checked at
+same `test_suite_name` must return the same fixture type. This is checked at
runtime.
- The framework will infer the fixture class from the factory and will call the
-`SetUpTestCase` and `TearDownTestCase` for it.
- Must be called before `RUN_ALL_TESTS()` is invoked, otherwise behavior is
+
+The framework will infer the fixture class from the factory and will call the
+`SetUpTestSuite` and `TearDownTestSuite` for it.
+
+Must be called before `RUN_ALL_TESTS()` is invoked, otherwise behavior is
undefined.
- Use case example:
- ```c++
+
+Use case example:
+
+```c++
class MyFixture : public ::testing::Test {
public:
// All of these optional, just like in regular macro usage.
- static void SetUpTestCase() { ... }
- static void TearDownTestCase() { ... }
+ static void SetUpTestSuite() { ... }
+ static void TearDownTestSuite() { ... }
void SetUp() override { ... }
void TearDown() override { ... }
};
- class MyTest : public MyFixture {
+
+class MyTest : public MyFixture {
public:
explicit MyTest(int data) : data_(data) {}
void TestBody() override { ... }
- private:
+
+ private:
int data_;
};
- void RegisterMyTests(const std::vector<int>& values) {
+
+void RegisterMyTests(const std::vector<int>& values) {
for (int v : values) {
::testing::RegisterTest(
"MyFixture", ("Test" + std::to_string(v)).c_str(), nullptr,
@@ -1882,7 +1875,6 @@ int main(int argc, char** argv) {
return RUN_ALL_TESTS();
}
```
-
## Getting the Current Test's Name
Sometimes a function may need to know the name of the currently running test.
@@ -1923,12 +1915,10 @@ To obtain a `TestInfo` object for the currently running test, call
```
`current_test_info()` returns a null pointer if no test is running. In
-particular, you cannot find the test suite name in `TestsuiteSetUp()`,
-`TestsuiteTearDown()` (where you know the test suite name implicitly), or
+particular, you cannot find the test suite name in `TestSuiteSetUp()`,
+`TestSuiteTearDown()` (where you know the test suite name implicitly), or
functions called from them.
-**Availability**: Linux, Windows, Mac.
-
## Extending googletest by Handling Test Events
googletest provides an **event listener API** to let you receive notifications
@@ -1939,8 +1929,6 @@ console output, replace the XML output, or provide a completely different form
of output, such as a GUI or a database. You can also use test events as
checkpoints to implement a resource leak checker, for example.
-**Availability**: Linux, Windows, Mac.
-
### Defining Event Listeners
To define a event listener, you subclass either testing::TestEventListener or
@@ -1954,7 +1942,7 @@ When an event is fired, its context is passed to the handler function as an
argument. The following argument types are used:
* UnitTest reflects the state of the entire test program,
-* Testsuite has information about a test suite, which can contain one or more
+* TestSuite has information about a test suite, which can contain one or more
tests,
* TestInfo contains the state of a test, and
* TestPartResult represents the result of a test assertion.
@@ -2021,7 +2009,9 @@ You can do so by adding one line:
```
Now, sit back and enjoy a completely different output from your tests. For more
-details, you can read this sample9_unittest.cc
+details, see [sample9_unittest.cc].
+
+[sample9_unittest.cc]: ../samples/sample9_unittest.cc "Event listener example"
You may append more than one listener to the list. When an `On*Start()` or
`OnTestPartResult()` event is fired, the listeners will receive it in the order
@@ -2038,7 +2028,7 @@ when processing an event. There are some restrictions:
1. You cannot generate any failure in `OnTestPartResult()` (otherwise it will
cause `OnTestPartResult()` to be called recursively).
-1. A listener that handles `OnTestPartResult()` is not allowed to generate any
+2. A listener that handles `OnTestPartResult()` is not allowed to generate any
failure.
When you add listeners to the listener list, you should put listeners that
@@ -2046,7 +2036,9 @@ handle `OnTestPartResult()` *before* listeners that can generate failures. This
ensures that failures generated by the latter are attributed to the right test
by the former.
-We have a sample of failure-raising listener sample10_unittest.cc
+See [sample10_unittest.cc] for an example of a failure-raising listener.
+
+[sample10_unittest.cc]: ../samples/sample10_unittest.cc "Failure-raising listener example"
## Running Test Programs: Advanced Options
@@ -2071,25 +2063,23 @@ running them so that a filter may be applied if needed. Including the flag
format:
```none
-Testsuite1.
+TestSuite1.
TestName1
TestName2
-Testsuite2.
+TestSuite2.
TestName
```
None of the tests listed are actually run if the flag is provided. There is no
corresponding environment variable for this flag.
-**Availability**: Linux, Windows, Mac.
-
#### Running a Subset of the Tests
By default, a googletest program runs all tests the user has defined. Sometimes,
you want to run only a subset of the tests (e.g. for debugging or quickly
verifying a change). If you set the `GTEST_FILTER` environment variable or the
`--gtest_filter` flag to a filter string, googletest will only run the tests
-whose full names (in the form of `TestsuiteName.TestName`) match the filter.
+whose full names (in the form of `TestSuiteName.TestName`) match the filter.
The format of a filter is a '`:`'-separated list of wildcard patterns (called
the *positive patterns*) optionally followed by a '`-`' and another
@@ -2098,17 +2088,16 @@ the filter if and only if it matches any of the positive patterns but does not
match any of the negative patterns.
A pattern may contain `'*'` (matches any string) or `'?'` (matches any single
-character). For convenience, the filter
-
-`'*-NegativePatterns'` can be also written as `'-NegativePatterns'`.
+character). For convenience, the filter `'*-NegativePatterns'` can be also
+written as `'-NegativePatterns'`.
For example:
* `./foo_test` Has no flag, and thus runs all its tests.
* `./foo_test --gtest_filter=*` Also runs everything, due to the single
match-everything `*` value.
-* `./foo_test --gtest_filter=FooTest.*` Runs everything in test suite `FooTest`
- .
+* `./foo_test --gtest_filter=FooTest.*` Runs everything in test suite
+ `FooTest` .
* `./foo_test --gtest_filter=*Null*:*Constructor*` Runs any test whose full
name contains either `"Null"` or `"Constructor"` .
* `./foo_test --gtest_filter=-*DeathTest.*` Runs all non-death tests.
@@ -2150,8 +2139,6 @@ TIP: You can easily count the number of disabled tests you have using `gsearch`
and/or `grep`. This number can be used as a metric for improving your test
quality.
-**Availability**: Linux, Windows, Mac.
-
#### Temporarily Enabling Disabled Tests
To include disabled tests in test execution, just invoke the test program with
@@ -2160,8 +2147,6 @@ the `--gtest_also_run_disabled_tests` flag or set the
You can combine this with the `--gtest_filter` flag to further select which
disabled tests to run.
-**Availability**: Linux, Windows, Mac.
-
### Repeating the Tests
Once in a while you'll run into a test whose result is hit-or-miss. Perhaps it
@@ -2189,12 +2174,10 @@ $ foo_test --gtest_repeat=1000 --gtest_filter=FooBar.*
Repeat the tests whose name matches the filter 1000 times.
```
-If your test program contains [global set-up/tear-down](#global-set-up-and-tear-down) code, it
-will be repeated in each iteration as well, as the flakiness may be in it. You
-can also specify the repeat count by setting the `GTEST_REPEAT` environment
-variable.
-
-**Availability**: Linux, Windows, Mac.
+If your test program contains
+[global set-up/tear-down](#global-set-up-and-tear-down) code, it will be
+repeated in each iteration as well, as the flakiness may be in it. You can also
+specify the repeat count by setting the `GTEST_REPEAT` environment variable.
### Shuffling the Tests
@@ -2214,8 +2197,6 @@ time.
If you combine this with `--gtest_repeat=N`, googletest will pick a different
random seed and re-shuffle the tests in each iteration.
-**Availability**: Linux, Windows, Mac.
-
### Controlling Test Output
#### Colored Terminal Output
@@ -2223,23 +2204,38 @@ random seed and re-shuffle the tests in each iteration.
googletest can use colors in its terminal output to make it easier to spot the
important information:
+<code>
...<br/>
-<span style="color:green">[----------]<span style="color:black"> 1 test from FooTest<br/>
-<span style="color:green">[ RUN ]<span style="color:black"> FooTest.DoesAbc<br/>
-<span style="color:green">[ OK ]<span style="color:black"> FooTest.DoesAbc<br/>
-<span style="color:green">[----------]<span style="color:black"> 2 tests from BarTest<br/>
-<span style="color:green">[ RUN ]<span style="color:black"> BarTest.HasXyzProperty<br/>
-<span style="color:green">[ OK ]<span style="color:black"> BarTest.HasXyzProperty<br/>
-<span style="color:green">[ RUN ]<span style="color:black"> BarTest.ReturnsTrueOnSuccess<br/>
-... some error messages ...<br/>
-<span style="color:red">[ FAILED ] <span style="color:black">BarTest.ReturnsTrueOnSuccess<br/>
-...<br/>
-<span style="color:green">[==========]<span style="color:black"> 30 tests from 14 test suites ran.<br/>
-<span style="color:green">[ PASSED ]<span style="color:black"> 28 tests.<br/>
-<span style="color:red">[ FAILED ]<span style="color:black"> 2 tests, listed below:<br/>
-<span style="color:red">[ FAILED ]<span style="color:black"> BarTest.ReturnsTrueOnSuccess<br/>
-<span style="color:red">[ FAILED ]<span style="color:black"> AnotherTest.DoesXyz<br/>
+ <font color="green">[----------]</font><font color="black"> 1 test from
+ FooTest</font><br/>
+ <font color="green">[ RUN &nbsp; &nbsp; &nbsp;]</font><font color="black">
+ FooTest.DoesAbc</font><br/>
+ <font color="green">[ &nbsp; &nbsp; &nbsp; OK ]</font><font color="black">
+ FooTest.DoesAbc </font><br/>
+ <font color="green">[----------]</font><font color="black">
+ 2 tests from BarTest</font><br/>
+ <font color="green">[ RUN &nbsp; &nbsp; &nbsp;]</font><font color="black">
+ BarTest.HasXyzProperty </font><br/>
+ <font color="green">[ &nbsp; &nbsp; &nbsp; OK ]</font><font color="black">
+ BarTest.HasXyzProperty</font><br/>
+ <font color="green">[ RUN &nbsp; &nbsp; &nbsp;]</font><font color="black">
+ BarTest.ReturnsTrueOnSuccess ... some error messages ...</font><br/>
+ <font color="red">[ &nbsp; FAILED ]</font><font color="black">
+ BarTest.ReturnsTrueOnSuccess ...</font><br/>
+ <font color="green">[==========]</font><font color="black">
+ 30 tests from 14 test suites ran.</font><br/>
+ <font color="green">[ &nbsp; PASSED ]</font><font color="black">
+ 28 tests.</font><br/>
+ <font color="red">[ &nbsp; FAILED ]</font><font color="black">
+ 2 tests, listed below:</font><br/>
+ <font color="red">[ &nbsp; FAILED ]</font><font color="black">
+ BarTest.ReturnsTrueOnSuccess</font><br/>
+ <font color="red">[ &nbsp; FAILED ]</font><font color="black">
+ AnotherTest.DoesXyz<br/>
+<br/>
2 FAILED TESTS
+ </font>
+</code>
You can set the `GTEST_COLOR` environment variable or the `--gtest_color`
command line flag to `yes`, `no`, or `auto` (the default) to enable colors,
@@ -2247,16 +2243,12 @@ disable colors, or let googletest decide. When the value is `auto`, googletest
will use colors if and only if the output goes to a terminal and (on non-Windows
platforms) the `TERM` environment variable is set to `xterm` or `xterm-color`.
- **Availability**: Linux, Windows, Mac.
-
#### Suppressing the Elapsed Time
By default, googletest prints the time it takes to run each test. To disable
that, run the test program with the `--gtest_print_time=0` command line flag, or
set the GTEST_PRINT_TIME environment variable to `0`.
-**Availability**: Linux, Windows, Mac.
-
#### Suppressing UTF-8 Text Output
In case of assertion failures, googletest prints expected and actual values of
@@ -2266,7 +2258,6 @@ text because, for example, you don't have an UTF-8 compatible output medium, run
the test program with `--gtest_print_utf8=0` or set the `GTEST_PRINT_UTF8`
environment variable to `0`.
-**Availability**: Linux, Windows, Mac.
#### Generating an XML Report
@@ -2289,14 +2280,13 @@ program `foo_test` or `foo_test.exe`). If the file already exists (perhaps left
over from a previous run), googletest will pick a different name (e.g.
`foo_test_1.xml`) to avoid overwriting it.
-
The report is based on the `junitreport` Ant task. Since that format was
originally intended for Java, a little interpretation is required to make it
apply to googletest tests, as shown here:
```xml
<testsuites name="AllTests" ...>
- <testsuite name="test_suite_name" ...>
+ <testsuite name="test_case_name" ...>
<testcase name="test_name" ...>
<failure message="..."/>
<failure message="..."/>
@@ -2353,9 +2343,7 @@ Things to note:
* Each `<failure>` element corresponds to a single failed googletest
assertion.
-**Availability**: Linux, Windows, Mac.
-
-#### Generating an JSON Report
+#### Generating a JSON Report
googletest can also emit a JSON report as an alternative format to XML. To
generate the JSON report, set the `GTEST_OUTPUT` environment variable or the
@@ -2371,7 +2359,7 @@ The report format conforms to the following JSON Schema:
"$schema": "http://json-schema.org/schema#",
"type": "object",
"definitions": {
- "Testsuite": {
+ "TestCase": {
"type": "object",
"properties": {
"name": { "type": "string" },
@@ -2427,15 +2415,15 @@ The report format conforms to the following JSON Schema:
"testsuites": {
"type": "array",
"items": {
- "$ref": "#/definitions/Testsuite"
+ "$ref": "#/definitions/TestCase"
}
}
}
}
```
-The report uses the format that conforms to the following Proto3 using the [JSON
-encoding](https://developers.google.com/protocol-buffers/docs/proto3#json):
+The report uses the format that conforms to the following Proto3 using the
+[JSON encoding](https://developers.google.com/protocol-buffers/docs/proto3#json):
```proto
syntax = "proto3";
@@ -2453,7 +2441,7 @@ message UnitTest {
google.protobuf.Timestamp timestamp = 5;
google.protobuf.Duration time = 6;
string name = 7;
- repeated Testsuite testsuites = 8;
+ repeated TestCase testsuites = 8;
}
message TestCase {
@@ -2554,8 +2542,6 @@ could generate this report:
IMPORTANT: The exact format of the JSON document is subject to change.
-**Availability**: Linux, Windows, Mac.
-
### Controlling How Failures Are Reported
#### Turning Assertion Failures into Break-Points
@@ -2565,11 +2551,9 @@ debugger can catch an assertion failure and automatically drop into interactive
mode. googletest's *break-on-failure* mode supports this behavior.
To enable it, set the `GTEST_BREAK_ON_FAILURE` environment variable to a value
-other than `0` . Alternatively, you can use the `--gtest_break_on_failure`
+other than `0`. Alternatively, you can use the `--gtest_break_on_failure`
command line flag.
-**Availability**: Linux, Windows, Mac.
-
#### Disabling Catching Test-Thrown Exceptions
googletest can be used either with or without exceptions enabled. If a test
@@ -2584,5 +2568,3 @@ to be handled by the debugger, such that you can examine the call stack when an
exception is thrown. To achieve that, set the `GTEST_CATCH_EXCEPTIONS`
environment variable to `0`, or use the `--gtest_catch_exceptions=0` flag when
running the tests.
-
-**Availability**: Linux, Windows, Mac.
diff --git a/googletest/docs/faq.md b/googletest/docs/faq.md
index 05baf23..9949fec 100644
--- a/googletest/docs/faq.md
+++ b/googletest/docs/faq.md
@@ -1,6 +1,6 @@
# Googletest FAQ
-<!-- GOOGLETEST_CM0013 DO NOT DELETE -->
+<!-- GOOGLETEST_CM0014 DO NOT DELETE -->
## Why should test suite names and test names not contain underscore?
@@ -8,7 +8,7 @@ Underscore (`_`) is special, as C++ reserves the following to be used by the
compiler and the standard library:
1. any identifier that starts with an `_` followed by an upper-case letter, and
-1. any identifier that contains two consecutive underscores (i.e. `__`)
+2. any identifier that contains two consecutive underscores (i.e. `__`)
*anywhere* in its name.
User code is *prohibited* from using such identifiers.
@@ -22,11 +22,11 @@ contains `_`?
1. If `TestSuiteName` starts with an `_` followed by an upper-case letter (say,
`_Foo`), we end up with `_Foo_TestName_Test`, which is reserved and thus
invalid.
-1. If `TestSuiteName` ends with an `_` (say, `Foo_`), we get
+2. If `TestSuiteName` ends with an `_` (say, `Foo_`), we get
`Foo__TestName_Test`, which is invalid.
-1. If `TestName` starts with an `_` (say, `_Bar`), we get
+3. If `TestName` starts with an `_` (say, `_Bar`), we get
`TestSuiteName__Bar_Test`, which is invalid.
-1. If `TestName` ends with an `_` (say, `Bar_`), we get
+4. If `TestName` ends with an `_` (say, `Bar_`), we get
`TestSuiteName_Bar__Test`, which is invalid.
So clearly `TestSuiteName` and `TestName` cannot start or end with `_`
@@ -263,7 +263,7 @@ If necessary, you can continue to derive test fixtures from a derived fixture.
googletest has no limit on how deep the hierarchy can be.
For a complete example using derived test fixtures, see
-[googletest sample](https://github.com/google/googletest/blob/master/googletest/samples/sample5_unittest.cc)
+[sample5_unittest.cc](../samples/sample5_unittest.cc).
## My compiler complains "void value not ignored as it ought to be." What does this mean?
@@ -332,7 +332,7 @@ You may still want to use `SetUp()/TearDown()` in the following cases:
* In the body of a constructor (or destructor), it's not possible to use the
`ASSERT_xx` macros. Therefore, if the set-up operation could cause a fatal
test failure that should prevent the test from running, it's necessary to
- use `abort` <!-- GOOGLETEST_CM0014 DO NOT DELETE --> and abort the whole test executable,
+ use `abort` <!-- GOOGLETEST_CM0015 DO NOT DELETE --> and abort the whole test executable,
or to use `SetUp()` instead of a constructor.
* If the tear-down operation could throw an exception, you must use
`TearDown()` as opposed to the destructor, as throwing in a destructor leads
@@ -524,8 +524,8 @@ There are several good reasons:
contaminating others, making debugging difficult. By using fixtures, each
test has a fresh set of variables that's different (but with the same
names). Thus, tests are kept independent of each other.
-1. Global variables pollute the global namespace.
-1. Test fixtures can be reused via subclassing, which cannot be done easily
+2. Global variables pollute the global namespace.
+3. Test fixtures can be reused via subclassing, which cannot be done easily
with global variables. This is useful if many test suites have something in
common.
diff --git a/googletest/docs/pkgconfig.md b/googletest/docs/pkgconfig.md
index 5ad1f01..b775873 100644
--- a/googletest/docs/pkgconfig.md
+++ b/googletest/docs/pkgconfig.md
@@ -1,25 +1,24 @@
-## Using GoogleTest from various build systems ##
+## Using GoogleTest from various build systems
GoogleTest comes with pkg-config files that can be used to determine all
necessary flags for compiling and linking to GoogleTest (and GoogleMock).
Pkg-config is a standardised plain-text format containing
- * the includedir (-I) path
- * necessary macro (-D) definitions
- * further required flags (-pthread)
- * the library (-L) path
- * the library (-l) to link to
+* the includedir (-I) path
+* necessary macro (-D) definitions
+* further required flags (-pthread)
+* the library (-L) path
+* the library (-l) to link to
-All current build systems support pkg-config in one way or another. For
-all examples here we assume you want to compile the sample
+All current build systems support pkg-config in one way or another. For all
+examples here we assume you want to compile the sample
`samples/sample3_unittest.cc`.
-
-### CMake ###
+### CMake
Using `pkg-config` in CMake is fairly easy:
-``` cmake
+```cmake
cmake_minimum_required(VERSION 3.0)
cmake_policy(SET CMP0048 NEW)
@@ -43,11 +42,10 @@ that all libraries have been compiled with threading enabled. In addition,
GoogleTest might also require `-pthread` in the compiling step, and as such
splitting the pkg-config `Cflags` variable into include dirs and macros for
`target_compile_definitions()` might still miss this). The same recommendation
-goes for using `_LDFLAGS` over the more commonplace `_LIBRARIES`, which
-happens to discard `-L` flags and `-pthread`.
-
+goes for using `_LDFLAGS` over the more commonplace `_LIBRARIES`, which happens
+to discard `-L` flags and `-pthread`.
-### Autotools ###
+### Autotools
Finding GoogleTest in Autoconf and using it from Automake is also fairly easy:
@@ -77,8 +75,7 @@ testapp_CXXFLAGS = $(GTEST_CFLAGS)
testapp_LDADD = $(GTEST_LIBS)
```
-
-### Meson ###
+### Meson
Meson natively uses pkgconfig to query dependencies:
@@ -96,13 +93,12 @@ testapp = executable(
test('first_and_only_test', testapp)
```
+### Plain Makefiles
-### Plain Makefiles ###
+Since `pkg-config` is a small Unix command-line utility, it can be used in
+handwritten `Makefile`s too:
-Since `pkg-config` is a small Unix command-line utility, it can be used
-in handwritten `Makefile`s too:
-
-``` Makefile
+```Makefile
GTEST_CFLAGS = `pkg-config --cflags gtest_main`
GTEST_LIBS = `pkg-config --libs gtest_main`
@@ -120,12 +116,11 @@ testapp.o: samples/sample3_unittest.cc
$(CXX) $(CPPFLAGS) $(CXXFLAGS) $< -c -o $@ $(GTEST_CFLAGS)
```
-
-### Help! pkg-config can't find GoogleTest! ###
+### Help! pkg-config can't find GoogleTest!
Let's say you have a `CMakeLists.txt` along the lines of the one in this
-tutorial and you try to run `cmake`. It is very possible that you get a
-failure along the lines of:
+tutorial and you try to run `cmake`. It is very possible that you get a failure
+along the lines of:
```
-- Checking for one of the modules 'gtest_main'
@@ -135,9 +130,9 @@ CMake Error at /usr/share/cmake/Modules/FindPkgConfig.cmake:640 (message):
These failures are common if you installed GoogleTest yourself and have not
sourced it from a distro or other package manager. If so, you need to tell
-pkg-config where it can find the `.pc` files containing the information.
-Say you installed GoogleTest to `/usr/local`, then it might be that the
-`.pc` files are installed under `/usr/local/lib64/pkgconfig`. If you set
+pkg-config where it can find the `.pc` files containing the information. Say you
+installed GoogleTest to `/usr/local`, then it might be that the `.pc` files are
+installed under `/usr/local/lib64/pkgconfig`. If you set
```
export PKG_CONFIG_PATH=/usr/local/lib64/pkgconfig
diff --git a/googletest/docs/primer.md b/googletest/docs/primer.md
index ba17ce8..30d86a5 100644
--- a/googletest/docs/primer.md
+++ b/googletest/docs/primer.md
@@ -15,26 +15,26 @@ So what makes a good test, and how does googletest fit in? We believe:
that succeeds or fails as a result of other tests. googletest isolates the
tests by running each of them on a different object. When a test fails,
googletest allows you to run it in isolation for quick debugging.
-1. Tests should be well *organized* and reflect the structure of the tested
+2. Tests should be well *organized* and reflect the structure of the tested
code. googletest groups related tests into test suites that can share data
and subroutines. This common pattern is easy to recognize and makes tests
easy to maintain. Such consistency is especially helpful when people switch
projects and start to work on a new code base.
-1. Tests should be *portable* and *reusable*. Google has a lot of code that is
+3. Tests should be *portable* and *reusable*. Google has a lot of code that is
platform-neutral, its tests should also be platform-neutral. googletest
works on different OSes, with different compilers, with or without
exceptions, so googletest tests can work with a variety of configurations.
-1. When tests fail, they should provide as much *information* about the problem
+4. When tests fail, they should provide as much *information* about the problem
as possible. googletest doesn't stop at the first test failure. Instead, it
only stops the current test and continues with the next. You can also set up
tests that report non-fatal failures after which the current test continues.
Thus, you can detect and fix multiple bugs in a single run-edit-compile
cycle.
-1. The testing framework should liberate test writers from housekeeping chores
+5. The testing framework should liberate test writers from housekeeping chores
and let them focus on the test *content*. googletest automatically keeps
track of all tests defined, and doesn't require the user to enumerate them
in order to run them.
-1. Tests should be *fast*. With googletest, you can reuse shared resources
+6. Tests should be *fast*. With googletest, you can reuse shared resources
across tests and pay for the set-up/tear-down only once, without making
tests depend on each other.
@@ -51,26 +51,34 @@ of misunderstanding these.
Historically, googletest started to use the term _Test Case_ for grouping
related tests, whereas current publications including the International Software
Testing Qualifications Board ([ISTQB](http://www.istqb.org/)) and various
-textbooks on Software Quality use the term _[Test
-Suite](http://glossary.istqb.org/search/test%20suite)_ for this.
+textbooks on Software Quality use the term _[Test Suite][istqb test suite]_ for
+this.
The related term _Test_, as it is used in the googletest, is corresponding to
-the term _[Test Case](http://glossary.istqb.org/search/test%20case)_ of ISTQB
-and others.
+the term _[Test Case][istqb test case]_ of ISTQB and others.
The term _Test_ is commonly of broad enough sense, including ISTQB's definition
of _Test Case_, so it's not much of a problem here. But the term _Test Case_ as
was used in Google Test is of contradictory sense and thus confusing.
-googletest recently started replacing the term _Test Case_ by _Test Suite_ The
-preferred API is TestSuite*. The older TestCase* API is being slowly deprecated
-and refactored away
+googletest recently started replacing the term _Test Case_ with _Test Suite_.
+The preferred API is *TestSuite*. The older TestCase API is being slowly
+deprecated and refactored away.
So please be aware of the different definitions of the terms:
-Meaning | googletest Term | [ISTQB](http://www.istqb.org/) Term
-:----------------------------------------------------------------------------------- | :---------------------- | :----------------------------------
-Exercise a particular program path with specific input values and verify the results | [TEST()](#simple-tests) | [Test Case](http://glossary.istqb.org/search/test%20case)
+| Meaning | googletest Term | [ISTQB](http://www.istqb.org/) |
+: : : Term :
+| :---------------- | :---------------------- | :----------------------------- |
+| Exercise a | [TEST()](#simple-tests) | [Test Case][istqb test case] |
+: particular : : :
+: program path with : : :
+: specific input : : :
+: values and verify : : :
+: the results : : :
+
+[istqb test case]: http://glossary.istqb.org/en/search/test%20case
+[istqb test suite]: http://glossary.istqb.org/en/search/test%20suite
## Basic Concepts
@@ -164,7 +172,7 @@ you'll get a compiler error. We used to require the arguments to support the
`<<` is supported, it will be called to print the arguments when the assertion
fails; otherwise googletest will attempt to print them in the best way it can.
For more details and how to customize the printing of the arguments, see
-[documentation](https://github.com/google/googletest/blob/master/googlemock/docs/cook_book.md#teaching-gmock-how-to-print-your-values)
+[documentation](../../googlemock/docs/cook_book.md#teaching-gmock-how-to-print-your-values)
These assertions can work with a user-defined type, but only if you define the
corresponding comparison operator (e.g. `==`, `<`, etc). Since this is
@@ -193,7 +201,7 @@ objects, you should use `ASSERT_EQ`.
When doing pointer comparisons use `*_EQ(ptr, nullptr)` and `*_NE(ptr, nullptr)`
instead of `*_EQ(ptr, NULL)` and `*_NE(ptr, NULL)`. This is because `nullptr` is
-typed while `NULL` is not. See [FAQ](faq.md)for more details.
+typed while `NULL` is not. See [FAQ](faq.md) for more details.
If you're working with floating point numbers, you may want to use the floating
point variations of some of these macros in order to avoid problems caused by
@@ -213,18 +221,16 @@ as `ASSERT_EQ(expected, actual)`, so lots of existing code uses this order. Now
The assertions in this group compare two **C strings**. If you want to compare
two `string` objects, use `EXPECT_EQ`, `EXPECT_NE`, and etc instead.
-| Fatal assertion | Nonfatal assertion | Verifies |
-| ----------------------- | ----------------------- | ---------------------- |
-| `ASSERT_STREQ(str1, | `EXPECT_STREQ(str1, | the two C strings have |
-: str2);` : str2);` : the same content :
-| `ASSERT_STRNE(str1, | `EXPECT_STRNE(str1, | the two C strings have |
-: str2);` : str2);` : different contents :
-| `ASSERT_STRCASEEQ(str1, | `EXPECT_STRCASEEQ(str1, | the two C strings have |
-: str2);` : str2);` : the same content, :
-: : : ignoring case :
-| `ASSERT_STRCASENE(str1, | `EXPECT_STRCASENE(str1, | the two C strings have |
-: str2);` : str2);` : different contents, :
-: : : ignoring case :
+<!-- mdformat off(github rendering does not support multiline tables) -->
+
+| Fatal assertion | Nonfatal assertion | Verifies |
+| -------------------------- | ------------------------------ | -------------------------------------------------------- |
+| `ASSERT_STREQ(str1,str2);` | `EXPECT_STREQ(str1,str2);` | the two C strings have the same content |
+| `ASSERT_STRNE(str1,str2);` | `EXPECT_STRNE(str1,str2);` | the two C strings have different contents |
+| `ASSERT_STRCASEEQ(str1,str2);` | `EXPECT_STRCASEEQ(str1,str2);` | the two C strings have the same content, ignoring case |
+| `ASSERT_STRCASENE(str1,str2);` | `EXPECT_STRCASENE(str1,str2);` | the two C strings have different contents, ignoring case |
+
+<!-- mdformat on-->
Note that "CASE" in an assertion name means that case is ignored. A `NULL`
pointer and an empty string are considered *different*.
@@ -235,9 +241,8 @@ of two wide strings fails, their values will be printed as UTF-8 narrow strings.
**Availability**: Linux, Windows, Mac.
**See also**: For more string comparison tricks (substring, prefix, suffix, and
-regular expression matching, for example), see
-[this](https://github.com/google/googletest/blob/master/googletest/docs/advanced.md)
-in the Advanced googletest Guide.
+regular expression matching, for example), see [this](advanced.md) in the
+Advanced googletest Guide.
## Simple Tests
@@ -245,9 +250,9 @@ To create a test:
1. Use the `TEST()` macro to define and name a test function, These are
ordinary C++ functions that don't return a value.
-1. In this function, along with any valid C++ statements you want to include,
+2. In this function, along with any valid C++ statements you want to include,
use the various googletest assertions to check values.
-1. The test's result is determined by the assertions; if any assertion in the
+3. The test's result is determined by the assertions; if any assertion in the
test fails (either fatally or non-fatally), or if the test crashes, the
entire test fails. Otherwise, it succeeds.
@@ -309,16 +314,16 @@ To create a fixture:
1. Derive a class from `::testing::Test` . Start its body with `protected:` as
we'll want to access fixture members from sub-classes.
-1. Inside the class, declare any objects you plan to use.
-1. If necessary, write a default constructor or `SetUp()` function to prepare
+2. Inside the class, declare any objects you plan to use.
+3. If necessary, write a default constructor or `SetUp()` function to prepare
the objects for each test. A common mistake is to spell `SetUp()` as
**`Setup()`** with a small `u` - Use `override` in C++11 to make sure you
spelled it correctly
-1. If necessary, write a destructor or `TearDown()` function to release any
+4. If necessary, write a destructor or `TearDown()` function to release any
resources you allocated in `SetUp()` . To learn when you should use the
constructor/destructor and when you should use `SetUp()/TearDown()`, read
the [FAQ](faq.md).
-1. If needed, define subroutines for your tests to share.
+5. If needed, define subroutines for your tests to share.
When using a fixture, use `TEST_F()` instead of `TEST()` as it allows you to
access objects and subroutines in the test fixture:
@@ -422,11 +427,11 @@ would lead to a segfault when `n` is `NULL`.
When these tests run, the following happens:
1. googletest constructs a `QueueTest` object (let's call it `t1` ).
-1. `t1.SetUp()` initializes `t1` .
-1. The first test ( `IsEmptyInitially` ) runs on `t1` .
-1. `t1.TearDown()` cleans up after the test finishes.
-1. `t1` is destructed.
-1. The above steps are repeated on another `QueueTest` object, this time
+2. `t1.SetUp()` initializes `t1` .
+3. The first test ( `IsEmptyInitially` ) runs on `t1` .
+4. `t1.TearDown()` cleans up after the test finishes.
+5. `t1` is destructed.
+6. The above steps are repeated on another `QueueTest` object, this time
running the `DequeueWorks` test.
**Availability**: Linux, Windows, Mac.
@@ -456,7 +461,7 @@ When invoked, the `RUN_ALL_TESTS()` macro:
* Deletes the fixture.
-* Restores the state of all all googletest flags
+* Restores the state of all googletest flags
* Repeats the above steps for the next test, until all tests have run.
diff --git a/googletest/docs/pump_manual.md b/googletest/docs/pump_manual.md
index 3ec428e..10b3c5f 100644
--- a/googletest/docs/pump_manual.md
+++ b/googletest/docs/pump_manual.md
@@ -1,52 +1,51 @@
-
-
<b>P</b>ump is <b>U</b>seful for <b>M</b>eta <b>P</b>rogramming.
-# The Problem #
+# The Problem
-Template and macro libraries often need to define many classes,
-functions, or macros that vary only (or almost only) in the number of
-arguments they take. It's a lot of repetitive, mechanical, and
-error-prone work.
+Template and macro libraries often need to define many classes, functions, or
+macros that vary only (or almost only) in the number of arguments they take.
+It's a lot of repetitive, mechanical, and error-prone work.
-Variadic templates and variadic macros can alleviate the problem.
-However, while both are being considered by the C++ committee, neither
-is in the standard yet or widely supported by compilers. Thus they
-are often not a good choice, especially when your code needs to be
-portable. And their capabilities are still limited.
+Variadic templates and variadic macros can alleviate the problem. However, while
+both are being considered by the C++ committee, neither is in the standard yet
+or widely supported by compilers. Thus they are often not a good choice,
+especially when your code needs to be portable. And their capabilities are still
+limited.
-As a result, authors of such libraries often have to write scripts to
-generate their implementation. However, our experience is that it's
-tedious to write such scripts, which tend to reflect the structure of
-the generated code poorly and are often hard to read and edit. For
-example, a small change needed in the generated code may require some
-non-intuitive, non-trivial changes in the script. This is especially
-painful when experimenting with the code.
+As a result, authors of such libraries often have to write scripts to generate
+their implementation. However, our experience is that it's tedious to write such
+scripts, which tend to reflect the structure of the generated code poorly and
+are often hard to read and edit. For example, a small change needed in the
+generated code may require some non-intuitive, non-trivial changes in the
+script. This is especially painful when experimenting with the code.
-# Our Solution #
+# Our Solution
Pump (for Pump is Useful for Meta Programming, Pretty Useful for Meta
-Programming, or Practical Utility for Meta Programming, whichever you
-prefer) is a simple meta-programming tool for C++. The idea is that a
-programmer writes a `foo.pump` file which contains C++ code plus meta
-code that manipulates the C++ code. The meta code can handle
-iterations over a range, nested iterations, local meta variable
-definitions, simple arithmetic, and conditional expressions. You can
-view it as a small Domain-Specific Language. The meta language is
-designed to be non-intrusive (s.t. it won't confuse Emacs' C++ mode,
-for example) and concise, making Pump code intuitive and easy to
-maintain.
-
-## Highlights ##
-
- * The implementation is in a single Python script and thus ultra portable: no build or installation is needed and it works cross platforms.
- * Pump tries to be smart with respect to [Google's style guide](https://github.com/google/styleguide): it breaks long lines (easy to have when they are generated) at acceptable places to fit within 80 columns and indent the continuation lines correctly.
- * The format is human-readable and more concise than XML.
- * The format works relatively well with Emacs' C++ mode.
-
-## Examples ##
-
-The following Pump code (where meta keywords start with `$`, `[[` and `]]` are meta brackets, and `$$` starts a meta comment that ends with the line):
+Programming, or Practical Utility for Meta Programming, whichever you prefer) is
+a simple meta-programming tool for C++. The idea is that a programmer writes a
+`foo.pump` file which contains C++ code plus meta code that manipulates the C++
+code. The meta code can handle iterations over a range, nested iterations, local
+meta variable definitions, simple arithmetic, and conditional expressions. You
+can view it as a small Domain-Specific Language. The meta language is designed
+to be non-intrusive (s.t. it won't confuse Emacs' C++ mode, for example) and
+concise, making Pump code intuitive and easy to maintain.
+
+## Highlights
+
+* The implementation is in a single Python script and thus ultra portable: no
+ build or installation is needed and it works cross platforms.
+* Pump tries to be smart with respect to
+ [Google's style guide](https://github.com/google/styleguide): it breaks long
+ lines (easy to have when they are generated) at acceptable places to fit
+ within 80 columns and indent the continuation lines correctly.
+* The format is human-readable and more concise than XML.
+* The format works relatively well with Emacs' C++ mode.
+
+## Examples
+
+The following Pump code (where meta keywords start with `$`, `[[` and `]]` are
+meta brackets, and `$$` starts a meta comment that ends with the line):
```
$var n = 3 $$ Defines a meta variable n.
@@ -71,7 +70,7 @@ $if i == 0 [[
will be translated by the Pump compiler to:
-``` cpp
+```cpp
// Foo0 does blah for 0-ary predicates.
template <size_t N>
class Foo0 {
@@ -105,9 +104,10 @@ Func($for i + [[a$i]]);
$$ The text between i and [[ is the separator between iterations.
```
-will generate one of the following lines (without the comments), depending on the value of `n`:
+will generate one of the following lines (without the comments), depending on
+the value of `n`:
-``` cpp
+```cpp
Func(); // If n is 0.
Func(a1); // If n is 1.
Func(a1 + a2); // If n is 2.
@@ -115,32 +115,38 @@ Func(a1 + a2 + a3); // If n is 3.
// And so on...
```
-## Constructs ##
+## Constructs
We support the following meta programming constructs:
-| `$var id = exp` | Defines a named constant value. `$id` is valid util the end of the current meta lexical block. |
-|:----------------|:-----------------------------------------------------------------------------------------------|
-| `$range id exp..exp` | Sets the range of an iteration variable, which can be reused in multiple loops later. |
-| `$for id sep [[ code ]]` | Iteration. The range of `id` must have been defined earlier. `$id` is valid in `code`. |
-| `$($)` | Generates a single `$` character. |
-| `$id` | Value of the named constant or iteration variable. |
-| `$(exp)` | Value of the expression. |
-| `$if exp [[ code ]] else_branch` | Conditional. |
-| `[[ code ]]` | Meta lexical block. |
-| `cpp_code` | Raw C++ code. |
-| `$$ comment` | Meta comment. |
-
-**Note:** To give the user some freedom in formatting the Pump source
-code, Pump ignores a new-line character if it's right after `$for foo`
-or next to `[[` or `]]`. Without this rule you'll often be forced to write
-very long lines to get the desired output. Therefore sometimes you may
-need to insert an extra new-line in such places for a new-line to show
-up in your output.
-
-## Grammar ##
-
-``` ebnf
+| `$var id = exp` | Defines a named constant value. `$id` is |
+: : valid util the end of the current meta :
+: : lexical block. :
+| :------------------------------- | :--------------------------------------- |
+| `$range id exp..exp` | Sets the range of an iteration variable, |
+: : which can be reused in multiple loops :
+: : later. :
+| `$for id sep [[ code ]]` | Iteration. The range of `id` must have |
+: : been defined earlier. `$id` is valid in :
+: : `code`. :
+| `$($)` | Generates a single `$` character. |
+| `$id` | Value of the named constant or iteration |
+: : variable. :
+| `$(exp)` | Value of the expression. |
+| `$if exp [[ code ]] else_branch` | Conditional. |
+| `[[ code ]]` | Meta lexical block. |
+| `cpp_code` | Raw C++ code. |
+| `$$ comment` | Meta comment. |
+
+**Note:** To give the user some freedom in formatting the Pump source code, Pump
+ignores a new-line character if it's right after `$for foo` or next to `[[` or
+`]]`. Without this rule you'll often be forced to write very long lines to get
+the desired output. Therefore sometimes you may need to insert an extra new-line
+in such places for a new-line to show up in your output.
+
+## Grammar
+
+```ebnf
code ::= atomic_code*
atomic_code ::= $var id = exp
| $var id = [[ code ]]
@@ -159,19 +165,26 @@ else_branch ::= $else [[ code ]]
exp ::= simple_expression_in_Python_syntax
```
-## Code ##
+## Code
-You can find the source code of Pump in [scripts/pump.py](../scripts/pump.py). It is still
-very unpolished and lacks automated tests, although it has been
-successfully used many times. If you find a chance to use it in your
-project, please let us know what you think! We also welcome help on
-improving Pump.
+You can find the source code of Pump in [scripts/pump.py](../scripts/pump.py).
+It is still very unpolished and lacks automated tests, although it has been
+successfully used many times. If you find a chance to use it in your project,
+please let us know what you think! We also welcome help on improving Pump.
-## Real Examples ##
+## Real Examples
-You can find real-world applications of Pump in [Google Test](https://github.com/google/googletest/tree/master/googletest) and [Google Mock](https://github.com/google/googletest/tree/master/googlemock). The source file `foo.h.pump` generates `foo.h`.
+You can find real-world applications of Pump in
+[Google Test](https://github.com/google/googletest/tree/master/googletest) and
+[Google Mock](https://github.com/google/googletest/tree/master/googlemock). The
+source file `foo.h.pump` generates `foo.h`.
-## Tips ##
+## Tips
- * If a meta variable is followed by a letter or digit, you can separate them using `[[]]`, which inserts an empty string. For example `Foo$j[[]]Helper` generate `Foo1Helper` when `j` is 1.
- * To avoid extra-long Pump source lines, you can break a line anywhere you want by inserting `[[]]` followed by a new line. Since any new-line character next to `[[` or `]]` is ignored, the generated code won't contain this new line.
+* If a meta variable is followed by a letter or digit, you can separate them
+ using `[[]]`, which inserts an empty string. For example `Foo$j[[]]Helper`
+ generate `Foo1Helper` when `j` is 1.
+* To avoid extra-long Pump source lines, you can break a line anywhere you
+ want by inserting `[[]]` followed by a new line. Since any new-line
+ character next to `[[` or `]]` is ignored, the generated code won't contain
+ this new line.
diff --git a/googletest/docs/samples.md b/googletest/docs/samples.md
index eebdf37..aaa5883 100644
--- a/googletest/docs/samples.md
+++ b/googletest/docs/samples.md
@@ -1,7 +1,7 @@
-# Googletest Samples
+# Googletest Samples {#samples}
-If you're like us, you'd like to look at [googletest
-samples.](https://github.com/google/googletest/tree/master/googletest/samples)
+If you're like us, you'd like to look at
+[googletest samples.](https://github.com/google/googletest/tree/master/googletest/samples)
The sample directory has a number of well-commented samples showing how to use a
variety of googletest features.
diff --git a/googletest/include/gtest/gtest-death-test.h b/googletest/include/gtest/gtest-death-test.h
index 0eb5b27..cec9629 100644
--- a/googletest/include/gtest/gtest-death-test.h
+++ b/googletest/include/gtest/gtest-death-test.h
@@ -276,7 +276,7 @@ class GTEST_API_ KilledBySignal {
// This macro is used for implementing macros such as
// EXPECT_DEATH_IF_SUPPORTED and ASSERT_DEATH_IF_SUPPORTED on systems where
// death tests are not supported. Those macros must compile on such systems
-// iff EXPECT_DEATH and ASSERT_DEATH compile with the same parameters on
+// if EXPECT_DEATH and ASSERT_DEATH compile with the same parameters on
// systems that support death tests. This allows one to write such a macro
// on a system that does not support death tests and be sure that it will
// compile on a death-test supporting system. It is exposed publicly so that
@@ -289,7 +289,7 @@ class GTEST_API_ KilledBySignal {
// for program termination. This macro has to make sure this
// statement is compiled but not executed, to ensure that
// EXPECT_DEATH_IF_SUPPORTED compiles with a certain
-// parameter iff EXPECT_DEATH compiles with it.
+// parameter if EXPECT_DEATH compiles with it.
// regex - A regex that a macro such as EXPECT_DEATH would use to test
// the output of statement. This parameter has to be
// compiled but not evaluated by this macro, to ensure that
diff --git a/googletest/include/gtest/gtest-matchers.h b/googletest/include/gtest/gtest-matchers.h
index 6e73ba1..7711178 100644
--- a/googletest/include/gtest/gtest-matchers.h
+++ b/googletest/include/gtest/gtest-matchers.h
@@ -95,7 +95,7 @@ class MatchResultListener {
// Returns the underlying ostream.
::std::ostream* stream() { return stream_; }
- // Returns true iff the listener is interested in an explanation of
+ // Returns true if the listener is interested in an explanation of
// the match result. A matcher's MatchAndExplain() method can use
// this information to avoid generating the explanation when no one
// intends to hear it.
@@ -140,7 +140,7 @@ class MatcherDescriberInterface {
template <typename T>
class MatcherInterface : public MatcherDescriberInterface {
public:
- // Returns true iff the matcher matches x; also explains the match
+ // Returns true if the matcher matches x; also explains the match
// result to 'listener' if necessary (see the next paragraph), in
// the form of a non-restrictive relative clause ("which ...",
// "whose ...", etc) that describes x. For example, the
@@ -257,13 +257,13 @@ class StreamMatchResultListener : public MatchResultListener {
template <typename T>
class MatcherBase {
public:
- // Returns true iff the matcher matches x; also explains the match
+ // Returns true if the matcher matches x; also explains the match
// result to 'listener'.
bool MatchAndExplain(const T& x, MatchResultListener* listener) const {
return impl_->MatchAndExplain(x, listener);
}
- // Returns true iff this matcher matches x.
+ // Returns true if this matcher matches x.
bool Matches(const T& x) const {
DummyMatchResultListener dummy;
return MatchAndExplain(x, &dummy);
diff --git a/googletest/include/gtest/gtest-test-part.h b/googletest/include/gtest/gtest-test-part.h
index 1e1cb09..4f189b6 100644
--- a/googletest/include/gtest/gtest-test-part.h
+++ b/googletest/include/gtest/gtest-test-part.h
@@ -87,19 +87,19 @@ class GTEST_API_ TestPartResult {
// Gets the message associated with the test part.
const char* message() const { return message_.c_str(); }
- // Returns true iff the test part was skipped.
+ // Returns true if the test part was skipped.
bool skipped() const { return type_ == kSkip; }
- // Returns true iff the test part passed.
+ // Returns true if the test part passed.
bool passed() const { return type_ == kSuccess; }
- // Returns true iff the test part non-fatally failed.
+ // Returns true if the test part non-fatally failed.
bool nonfatally_failed() const { return type_ == kNonFatalFailure; }
- // Returns true iff the test part fatally failed.
+ // Returns true if the test part fatally failed.
bool fatally_failed() const { return type_ == kFatalFailure; }
- // Returns true iff the test part failed.
+ // Returns true if the test part failed.
bool failed() const { return fatally_failed() || nonfatally_failed(); }
private:
diff --git a/googletest/include/gtest/gtest.h b/googletest/include/gtest/gtest.h
index 0cb5a2e..fbff73e 100644
--- a/googletest/include/gtest/gtest.h
+++ b/googletest/include/gtest/gtest.h
@@ -308,7 +308,7 @@ class GTEST_API_ AssertionResult {
return *this;
}
- // Returns true iff the assertion succeeded.
+ // Returns true if the assertion succeeded.
operator bool() const { return success_; } // NOLINT
// Returns the assertion's negation. Used with EXPECT/ASSERT_FALSE.
@@ -412,6 +412,8 @@ class GTEST_API_ Test {
// test in test case Foo. Hence a sub-class can define its own
// SetUpTestSuite() method to shadow the one defined in the super
// class.
+ // Failures that happen during SetUpTestSuite are logged but otherwise
+ // ignored.
static void SetUpTestSuite() {}
// Tears down the stuff shared by all tests in this test suite.
@@ -420,6 +422,8 @@ class GTEST_API_ Test {
// test in test case Foo. Hence a sub-class can define its own
// TearDownTestSuite() method to shadow the one defined in the super
// class.
+ // Failures that happen during TearDownTestSuite are logged but otherwise
+ // ignored.
static void TearDownTestSuite() {}
// Legacy API is deprecated but still available
@@ -428,16 +432,16 @@ class GTEST_API_ Test {
static void SetUpTestCase() {}
#endif // GTEST_REMOVE_LEGACY_TEST_CASEAPI_
- // Returns true iff the current test has a fatal failure.
+ // Returns true if the current test has a fatal failure.
static bool HasFatalFailure();
- // Returns true iff the current test has a non-fatal failure.
+ // Returns true if the current test has a non-fatal failure.
static bool HasNonfatalFailure();
- // Returns true iff the current test was skipped.
+ // Returns true if the current test was skipped.
static bool IsSkipped();
- // Returns true iff the current test has a (either fatal or
+ // Returns true if the current test has a (either fatal or
// non-fatal) failure.
static bool HasFailure() { return HasFatalFailure() || HasNonfatalFailure(); }
@@ -468,7 +472,7 @@ class GTEST_API_ Test {
virtual void TearDown();
private:
- // Returns true iff the current test has the same fixture class as
+ // Returns true if the current test has the same fixture class as
// the first test in the current test suite.
static bool HasSameFixtureClass();
@@ -570,24 +574,28 @@ class GTEST_API_ TestResult {
// Returns the number of the test properties.
int test_property_count() const;
- // Returns true iff the test passed (i.e. no test part failed).
+ // Returns true if the test passed (i.e. no test part failed).
bool Passed() const { return !Skipped() && !Failed(); }
- // Returns true iff the test was skipped.
+ // Returns true if the test was skipped.
bool Skipped() const;
- // Returns true iff the test failed.
+ // Returns true if the test failed.
bool Failed() const;
- // Returns true iff the test fatally failed.
+ // Returns true if the test fatally failed.
bool HasFatalFailure() const;
- // Returns true iff the test has a non-fatal failure.
+ // Returns true if the test has a non-fatal failure.
bool HasNonfatalFailure() const;
// Returns the elapsed time, in milliseconds.
TimeInMillis elapsed_time() const { return elapsed_time_; }
+ // Gets the time of the test case start, in ms from the start of the
+ // UNIX epoch.
+ TimeInMillis start_timestamp() const { return start_timestamp_; }
+
// Returns the i-th test part result among all the results. i can range from 0
// to total_part_count() - 1. If i is not in that range, aborts the program.
const TestPartResult& GetTestPartResult(int i) const;
@@ -618,6 +626,9 @@ class GTEST_API_ TestResult {
return test_properties_;
}
+ // Sets the start time.
+ void set_start_timestamp(TimeInMillis start) { start_timestamp_ = start; }
+
// Sets the elapsed time.
void set_elapsed_time(TimeInMillis elapsed) { elapsed_time_ = elapsed; }
@@ -661,6 +672,8 @@ class GTEST_API_ TestResult {
std::vector<TestProperty> test_properties_;
// Running count of death tests.
int death_test_count_;
+ // The start time, in milliseconds since UNIX Epoch.
+ TimeInMillis start_timestamp_;
// The elapsed time, in milliseconds.
TimeInMillis elapsed_time_;
@@ -737,7 +750,7 @@ class GTEST_API_ TestInfo {
// contains the character 'A' or starts with "Foo.".
bool should_run() const { return should_run_; }
- // Returns true iff this test will appear in the XML report.
+ // Returns true if this test will appear in the XML report.
bool is_reportable() const {
// The XML report includes tests matching the filter, excluding those
// run in other shards.
@@ -796,8 +809,8 @@ class GTEST_API_ TestInfo {
const std::unique_ptr<const ::std::string> value_param_;
internal::CodeLocation location_;
const internal::TypeId fixture_class_id_; // ID of the test fixture class
- bool should_run_; // True iff this test should run
- bool is_disabled_; // True iff this test is disabled
+ bool should_run_; // True if this test should run
+ bool is_disabled_; // True if this test is disabled
bool matches_filter_; // True if this test matches the
// user-specified filter.
bool is_in_another_shard_; // Will be run in another shard.
@@ -872,15 +885,19 @@ class GTEST_API_ TestSuite {
// Gets the number of all tests in this test suite.
int total_test_count() const;
- // Returns true iff the test suite passed.
+ // Returns true if the test suite passed.
bool Passed() const { return !Failed(); }
- // Returns true iff the test suite failed.
+ // Returns true if the test suite failed.
bool Failed() const { return failed_test_count() > 0; }
// Returns the elapsed time, in milliseconds.
TimeInMillis elapsed_time() const { return elapsed_time_; }
+ // Gets the time of the test suite start, in ms from the start of the
+ // UNIX epoch.
+ TimeInMillis start_timestamp() const { return start_timestamp_; }
+
// Returns the i-th test among all the tests. i can range from 0 to
// total_test_count() - 1. If i is not in that range, returns NULL.
const TestInfo* GetTestInfo(int i) const;
@@ -939,33 +956,33 @@ class GTEST_API_ TestSuite {
}
}
- // Returns true iff test passed.
+ // Returns true if test passed.
static bool TestPassed(const TestInfo* test_info) {
return test_info->should_run() && test_info->result()->Passed();
}
- // Returns true iff test skipped.
+ // Returns true if test skipped.
static bool TestSkipped(const TestInfo* test_info) {
return test_info->should_run() && test_info->result()->Skipped();
}
- // Returns true iff test failed.
+ // Returns true if test failed.
static bool TestFailed(const TestInfo* test_info) {
return test_info->should_run() && test_info->result()->Failed();
}
- // Returns true iff the test is disabled and will be reported in the XML
+ // Returns true if the test is disabled and will be reported in the XML
// report.
static bool TestReportableDisabled(const TestInfo* test_info) {
return test_info->is_reportable() && test_info->is_disabled_;
}
- // Returns true iff test is disabled.
+ // Returns true if test is disabled.
static bool TestDisabled(const TestInfo* test_info) {
return test_info->is_disabled_;
}
- // Returns true iff this test will appear in the XML report.
+ // Returns true if this test will appear in the XML report.
static bool TestReportable(const TestInfo* test_info) {
return test_info->is_reportable();
}
@@ -997,8 +1014,10 @@ class GTEST_API_ TestSuite {
internal::SetUpTestSuiteFunc set_up_tc_;
// Pointer to the function that tears down the test suite.
internal::TearDownTestSuiteFunc tear_down_tc_;
- // True iff any test in this test suite should run.
+ // True if any test in this test suite should run.
bool should_run_;
+ // The start time, in milliseconds since UNIX Epoch.
+ TimeInMillis start_timestamp_;
// Elapsed time, in milliseconds.
TimeInMillis elapsed_time_;
// Holds test properties recorded during execution of SetUpTestSuite and
@@ -1330,10 +1349,10 @@ class GTEST_API_ UnitTest {
// Gets the elapsed time, in milliseconds.
TimeInMillis elapsed_time() const;
- // Returns true iff the unit test passed (i.e. all test suites passed).
+ // Returns true if the unit test passed (i.e. all test suites passed).
bool Passed() const;
- // Returns true iff the unit test failed (i.e. some test suite failed
+ // Returns true if the unit test failed (i.e. some test suite failed
// or something outside of all tests failed).
bool Failed() const;
@@ -2248,7 +2267,7 @@ class GTEST_API_ ScopedTrace {
// Compile-time assertion for type equality.
-// StaticAssertTypeEq<type1, type2>() compiles iff type1 and type2 are
+// StaticAssertTypeEq<type1, type2>() compiles if type1 and type2 are
// the same type. The value it returns is not interesting.
//
// Instead of making StaticAssertTypeEq a class template, we make it a
diff --git a/googletest/include/gtest/internal/gtest-filepath.h b/googletest/include/gtest/internal/gtest-filepath.h
index ae38d95..5d21bbc 100644
--- a/googletest/include/gtest/internal/gtest-filepath.h
+++ b/googletest/include/gtest/internal/gtest-filepath.h
@@ -110,7 +110,7 @@ class GTEST_API_ FilePath {
const FilePath& base_name,
const char* extension);
- // Returns true iff the path is "".
+ // Returns true if the path is "".
bool IsEmpty() const { return pathname_.empty(); }
// If input name has a trailing separator character, removes it and returns
diff --git a/googletest/include/gtest/internal/gtest-internal.h b/googletest/include/gtest/internal/gtest-internal.h
index 2732660..08531d8 100644
--- a/googletest/include/gtest/internal/gtest-internal.h
+++ b/googletest/include/gtest/internal/gtest-internal.h
@@ -189,7 +189,7 @@ GTEST_API_ std::string DiffStrings(const std::string& left,
// expected_value: "5"
// actual_value: "6"
//
-// The ignoring_case parameter is true iff the assertion is a
+// The ignoring_case parameter is true if the assertion is a
// *_STRCASEEQ*. When it's true, the string " (ignoring case)" will
// be inserted into the message.
GTEST_API_ AssertionResult EqFailure(const char* expected_expression,
@@ -318,14 +318,14 @@ class FloatingPoint {
// Returns the sign bit of this number.
Bits sign_bit() const { return kSignBitMask & u_.bits_; }
- // Returns true iff this is NAN (not a number).
+ // Returns true if this is NAN (not a number).
bool is_nan() const {
// It's a NAN if the exponent bits are all ones and the fraction
// bits are not entirely zeros.
return (exponent_bits() == kExponentBitMask) && (fraction_bits() != 0);
}
- // Returns true iff this number is at most kMaxUlps ULP's away from
+ // Returns true if this number is at most kMaxUlps ULP's away from
// rhs. In particular, this function:
//
// - returns false if either number is (or both are) NAN.
@@ -848,7 +848,7 @@ class GTEST_API_ Random {
};
// Defining a variable of type CompileAssertTypesEqual<T1, T2> will cause a
-// compiler error iff T1 and T2 are different types.
+// compiler error if T1 and T2 are different types.
template <typename T1, typename T2>
struct CompileAssertTypesEqual;
@@ -895,7 +895,7 @@ struct RemoveConst<const T[N]> {
GTEST_REMOVE_CONST_(GTEST_REMOVE_REFERENCE_(T))
// IsAProtocolMessage<T>::value is a compile-time bool constant that's
-// true iff T is type proto2::Message or a subclass of it.
+// true if T is type proto2::Message or a subclass of it.
template <typename T>
struct IsAProtocolMessage
: public bool_constant<
diff --git a/googletest/include/gtest/internal/gtest-port.h b/googletest/include/gtest/internal/gtest-port.h
index 0a9a331..4f887c5 100644
--- a/googletest/include/gtest/internal/gtest-port.h
+++ b/googletest/include/gtest/internal/gtest-port.h
@@ -362,7 +362,7 @@ typedef struct _RTL_CRITICAL_SECTION GTEST_CRITICAL_SECTION;
# include <android/api-level.h> // NOLINT
#endif
-// Defines this to true iff Google Test can use POSIX regular expressions.
+// Defines this to true if Google Test can use POSIX regular expressions.
#ifndef GTEST_HAS_POSIX_RE
# if GTEST_OS_LINUX_ANDROID
// On Android, <regex.h> is only available starting with Gingerbread.
@@ -403,7 +403,7 @@ typedef struct _RTL_CRITICAL_SECTION GTEST_CRITICAL_SECTION;
// The user didn't tell us whether exceptions are enabled, so we need
// to figure it out.
# if defined(_MSC_VER) && defined(_CPPUNWIND)
-// MSVC defines _CPPUNWIND to 1 iff exceptions are enabled.
+// MSVC defines _CPPUNWIND to 1 if exceptions are enabled.
# define GTEST_HAS_EXCEPTIONS 1
# elif defined(__BORLANDC__)
// C++Builder's implementation of the STL uses the _HAS_EXCEPTIONS
@@ -414,8 +414,8 @@ typedef struct _RTL_CRITICAL_SECTION GTEST_CRITICAL_SECTION;
# endif // _HAS_EXCEPTIONS
# define GTEST_HAS_EXCEPTIONS _HAS_EXCEPTIONS
# elif defined(__clang__)
-// clang defines __EXCEPTIONS iff exceptions are enabled before clang 220714,
-// but iff cleanups are enabled after that. In Obj-C++ files, there can be
+// clang defines __EXCEPTIONS if exceptions are enabled before clang 220714,
+// but if cleanups are enabled after that. In Obj-C++ files, there can be
// cleanups for ObjC exceptions which also need cleanups, even if C++ exceptions
// are disabled. clang has __has_feature(cxx_exceptions) which checks for C++
// exceptions starting at clang r206352, but which checked for cleanups prior to
@@ -423,7 +423,7 @@ typedef struct _RTL_CRITICAL_SECTION GTEST_CRITICAL_SECTION;
// __EXCEPTIONS && __has_feature(cxx_exceptions).
# define GTEST_HAS_EXCEPTIONS (__EXCEPTIONS && __has_feature(cxx_exceptions))
# elif defined(__GNUC__) && __EXCEPTIONS
-// gcc defines __EXCEPTIONS to 1 iff exceptions are enabled.
+// gcc defines __EXCEPTIONS to 1 if exceptions are enabled.
# define GTEST_HAS_EXCEPTIONS 1
# elif defined(__SUNPRO_CC)
// Sun Pro CC supports exceptions. However, there is no compile-time way of
@@ -431,7 +431,7 @@ typedef struct _RTL_CRITICAL_SECTION GTEST_CRITICAL_SECTION;
// they are enabled unless the user tells us otherwise.
# define GTEST_HAS_EXCEPTIONS 1
# elif defined(__IBMCPP__) && __EXCEPTIONS
-// xlC defines __EXCEPTIONS to 1 iff exceptions are enabled.
+// xlC defines __EXCEPTIONS to 1 if exceptions are enabled.
# define GTEST_HAS_EXCEPTIONS 1
# elif defined(__HP_aCC)
// Exception handling is in effect by default in HP aCC compiler. It has to
@@ -472,13 +472,13 @@ typedef struct _RTL_CRITICAL_SECTION GTEST_CRITICAL_SECTION;
# ifdef _MSC_VER
-# ifdef _CPPRTTI // MSVC defines this macro iff RTTI is enabled.
+# ifdef _CPPRTTI // MSVC defines this macro if RTTI is enabled.
# define GTEST_HAS_RTTI 1
# else
# define GTEST_HAS_RTTI 0
# endif
-// Starting with version 4.3.2, gcc defines __GXX_RTTI iff RTTI is enabled.
+// Starting with version 4.3.2, gcc defines __GXX_RTTI if RTTI is enabled.
# elif defined(__GNUC__)
# ifdef __GXX_RTTI
@@ -909,9 +909,9 @@ class GTEST_API_ RE {
// Returns the string representation of the regex.
const char* pattern() const { return pattern_; }
- // FullMatch(str, re) returns true iff regular expression re matches
+ // FullMatch(str, re) returns true if regular expression re matches
// the entire str.
- // PartialMatch(str, re) returns true iff regular expression re
+ // PartialMatch(str, re) returns true if regular expression re
// matches a substring of str (including str itself).
static bool FullMatch(const ::std::string& str, const RE& re) {
return FullMatch(str.c_str(), re);
@@ -1266,7 +1266,7 @@ class GTEST_API_ AutoHandle {
void Reset(Handle handle);
private:
- // Returns true iff the handle is a valid handle object that can be closed.
+ // Returns true if the handle is a valid handle object that can be closed.
bool IsCloseable() const;
Handle handle_;
@@ -1368,7 +1368,7 @@ class ThreadWithParam : public ThreadWithParamBase {
// When non-NULL, used to block execution until the controller thread
// notifies.
Notification* const thread_can_start_;
- bool finished_; // true iff we know that the thread function has finished.
+ bool finished_; // true if we know that the thread function has finished.
pthread_t thread_; // The native thread object.
GTEST_DISALLOW_COPY_AND_ASSIGN_(ThreadWithParam);
diff --git a/googletest/include/gtest/internal/gtest-string.h b/googletest/include/gtest/internal/gtest-string.h
index 884b1e1..26d8407 100644
--- a/googletest/include/gtest/internal/gtest-string.h
+++ b/googletest/include/gtest/internal/gtest-string.h
@@ -94,7 +94,7 @@ class GTEST_API_ String {
static const char* Utf16ToAnsi(LPCWSTR utf16_str);
#endif
- // Compares two C strings. Returns true iff they have the same content.
+ // Compares two C strings. Returns true if they have the same content.
//
// Unlike strcmp(), this function can handle NULL argument(s). A
// NULL C string is considered different to any non-NULL C string,
@@ -107,7 +107,7 @@ class GTEST_API_ String {
// returned.
static std::string ShowWideCString(const wchar_t* wide_c_str);
- // Compares two wide C strings. Returns true iff they have the same
+ // Compares two wide C strings. Returns true if they have the same
// content.
//
// Unlike wcscmp(), this function can handle NULL argument(s). A
@@ -115,7 +115,7 @@ class GTEST_API_ String {
// including the empty string.
static bool WideCStringEquals(const wchar_t* lhs, const wchar_t* rhs);
- // Compares two C strings, ignoring case. Returns true iff they
+ // Compares two C strings, ignoring case. Returns true if they
// have the same content.
//
// Unlike strcasecmp(), this function can handle NULL argument(s).
@@ -124,7 +124,7 @@ class GTEST_API_ String {
static bool CaseInsensitiveCStringEquals(const char* lhs,
const char* rhs);
- // Compares two wide C strings, ignoring case. Returns true iff they
+ // Compares two wide C strings, ignoring case. Returns true if they
// have the same content.
//
// Unlike wcscasecmp(), this function can handle NULL argument(s).
@@ -139,7 +139,7 @@ class GTEST_API_ String {
static bool CaseInsensitiveWideCStringEquals(const wchar_t* lhs,
const wchar_t* rhs);
- // Returns true iff the given string ends with the given suffix, ignoring
+ // Returns true if the given string ends with the given suffix, ignoring
// case. Any string is considered to end with an empty suffix.
static bool EndsWithCaseInsensitive(
const std::string& str, const std::string& suffix);
diff --git a/googletest/include/gtest/internal/gtest-type-util.h b/googletest/include/gtest/internal/gtest-type-util.h
index 4cd1cf3..5f9a056 100644
--- a/googletest/include/gtest/internal/gtest-type-util.h
+++ b/googletest/include/gtest/internal/gtest-type-util.h
@@ -105,7 +105,7 @@ std::string GetTypeName() {
#if GTEST_HAS_TYPED_TEST || GTEST_HAS_TYPED_TEST_P
-// AssertyTypeEq<T1, T2>::type is defined iff T1 and T2 are the same
+// AssertyTypeEq<T1, T2>::type is defined if T1 and T2 are the same
// type. This can be used as a compile-time assertion to ensure that
// two types are equal.
diff --git a/googletest/include/gtest/internal/gtest-type-util.h.pump b/googletest/include/gtest/internal/gtest-type-util.h.pump
index eb014ee..3a3896b 100644
--- a/googletest/include/gtest/internal/gtest-type-util.h.pump
+++ b/googletest/include/gtest/internal/gtest-type-util.h.pump
@@ -104,7 +104,7 @@ std::string GetTypeName() {
#if GTEST_HAS_TYPED_TEST || GTEST_HAS_TYPED_TEST_P
-// AssertyTypeEq<T1, T2>::type is defined iff T1 and T2 are the same
+// AssertyTypeEq<T1, T2>::type is defined if T1 and T2 are the same
// type. This can be used as a compile-time assertion to ensure that
// two types are equal.
diff --git a/googletest/samples/prime_tables.h b/googletest/samples/prime_tables.h
index 119545a..4178e70 100644
--- a/googletest/samples/prime_tables.h
+++ b/googletest/samples/prime_tables.h
@@ -43,7 +43,7 @@ class PrimeTable {
public:
virtual ~PrimeTable() {}
- // Returns true iff n is a prime number.
+ // Returns true if n is a prime number.
virtual bool IsPrime(int n) const = 0;
// Returns the smallest prime number greater than p; or returns -1
diff --git a/googletest/samples/sample1.cc b/googletest/samples/sample1.cc
index 13cec1d..58dbf17 100644
--- a/googletest/samples/sample1.cc
+++ b/googletest/samples/sample1.cc
@@ -41,7 +41,7 @@ int Factorial(int n) {
return result;
}
-// Returns true iff n is a prime number.
+// Returns true if n is a prime number.
bool IsPrime(int n) {
// Trivial case 1: small numbers
if (n <= 1) return false;
diff --git a/googletest/samples/sample1.h b/googletest/samples/sample1.h
index 2c3e9f0..a90eae4 100644
--- a/googletest/samples/sample1.h
+++ b/googletest/samples/sample1.h
@@ -35,7 +35,7 @@
// Returns n! (the factorial of n). For negative n, n! is defined to be 1.
int Factorial(int n);
-// Returns true iff n is a prime number.
+// Returns true if n is a prime number.
bool IsPrime(int n);
#endif // GTEST_SAMPLES_SAMPLE1_H_
diff --git a/googletest/samples/sample9_unittest.cc b/googletest/samples/sample9_unittest.cc
index c0d8ff2..e502d08 100644
--- a/googletest/samples/sample9_unittest.cc
+++ b/googletest/samples/sample9_unittest.cc
@@ -135,10 +135,10 @@ int main(int argc, char **argv) {
// This is an example of using the UnitTest reflection API to inspect test
// results. Here we discount failures from the tests we expected to fail.
int unexpectedly_failed_tests = 0;
- for (int i = 0; i < unit_test.total_test_case_count(); ++i) {
- const TestCase& test_case = *unit_test.GetTestCase(i);
- for (int j = 0; j < test_case.total_test_count(); ++j) {
- const TestInfo& test_info = *test_case.GetTestInfo(j);
+ for (int i = 0; i < unit_test.total_test_suite_count(); ++i) {
+ const testing::TestSuite& test_suite = *unit_test.GetTestSuite(i);
+ for (int j = 0; j < test_suite.total_test_count(); ++j) {
+ const TestInfo& test_info = *test_suite.GetTestInfo(j);
// Counts failed tests that were not meant to fail (those without
// 'Fails' in the name).
if (test_info.result()->Failed() &&
diff --git a/googletest/src/gtest-death-test.cc b/googletest/src/gtest-death-test.cc
index 350e610..e5ec287 100644
--- a/googletest/src/gtest-death-test.cc
+++ b/googletest/src/gtest-death-test.cc
@@ -68,6 +68,7 @@
# include <lib/fdio/fd.h>
# include <lib/fdio/io.h>
# include <lib/fdio/spawn.h>
+# include <lib/zx/channel.h>
# include <lib/zx/port.h>
# include <lib/zx/process.h>
# include <lib/zx/socket.h>
@@ -562,7 +563,7 @@ static ::std::string FormatDeathTestOutput(const ::std::string& output) {
// status_ok: true if exit_status is acceptable in the context of
// this particular death test, which fails if it is false
//
-// Returns true iff all of the above conditions are met. Otherwise, the
+// Returns true if all of the above conditions are met. Otherwise, the
// first failing condition, in the order given above, is the one that is
// reported. Also sets the last death test message string.
bool DeathTestImpl::Passed(bool status_ok) {
@@ -831,7 +832,7 @@ class FuchsiaDeathTest : public DeathTestImpl {
std::string captured_stderr_;
zx::process child_process_;
- zx::port port_;
+ zx::channel exception_channel_;
zx::socket stderr_socket_;
};
@@ -876,41 +877,51 @@ class Arguments {
int FuchsiaDeathTest::Wait() {
const int kProcessKey = 0;
const int kSocketKey = 1;
+ const int kExceptionKey = 2;
if (!spawned())
return 0;
- // Register to wait for the child process to terminate.
+ // Create a port to wait for socket/task/exception events.
zx_status_t status_zx;
+ zx::port port;
+ status_zx = zx::port::create(0, &port);
+ GTEST_DEATH_TEST_CHECK_(status_zx == ZX_OK);
+
+ // Register to wait for the child process to terminate.
status_zx = child_process_.wait_async(
- port_, kProcessKey, ZX_PROCESS_TERMINATED, ZX_WAIT_ASYNC_ONCE);
+ port, kProcessKey, ZX_PROCESS_TERMINATED, ZX_WAIT_ASYNC_ONCE);
GTEST_DEATH_TEST_CHECK_(status_zx == ZX_OK);
+
// Register to wait for the socket to be readable or closed.
status_zx = stderr_socket_.wait_async(
- port_, kSocketKey, ZX_SOCKET_READABLE | ZX_SOCKET_PEER_CLOSED,
+ port, kSocketKey, ZX_SOCKET_READABLE | ZX_SOCKET_PEER_CLOSED,
ZX_WAIT_ASYNC_ONCE);
GTEST_DEATH_TEST_CHECK_(status_zx == ZX_OK);
+ // Register to wait for an exception.
+ status_zx = exception_channel_.wait_async(
+ port, kExceptionKey, ZX_CHANNEL_READABLE, ZX_WAIT_ASYNC_ONCE);
+ GTEST_DEATH_TEST_CHECK_(status_zx == ZX_OK);
+
bool process_terminated = false;
bool socket_closed = false;
do {
zx_port_packet_t packet = {};
- status_zx = port_.wait(zx::time::infinite(), &packet);
+ status_zx = port.wait(zx::time::infinite(), &packet);
GTEST_DEATH_TEST_CHECK_(status_zx == ZX_OK);
- if (packet.key == kProcessKey) {
- if (ZX_PKT_IS_EXCEPTION(packet.type)) {
- // Process encountered an exception. Kill it directly rather than
- // letting other handlers process the event. We will get a second
- // kProcessKey event when the process actually terminates.
- status_zx = child_process_.kill();
- GTEST_DEATH_TEST_CHECK_(status_zx == ZX_OK);
- } else {
- // Process terminated.
- GTEST_DEATH_TEST_CHECK_(ZX_PKT_IS_SIGNAL_ONE(packet.type));
- GTEST_DEATH_TEST_CHECK_(packet.signal.observed & ZX_PROCESS_TERMINATED);
- process_terminated = true;
- }
+ if (packet.key == kExceptionKey) {
+ // Process encountered an exception. Kill it directly rather than
+ // letting other handlers process the event. We will get a kProcessKey
+ // event when the process actually terminates.
+ status_zx = child_process_.kill();
+ GTEST_DEATH_TEST_CHECK_(status_zx == ZX_OK);
+ } else if (packet.key == kProcessKey) {
+ // Process terminated.
+ GTEST_DEATH_TEST_CHECK_(ZX_PKT_IS_SIGNAL_ONE(packet.type));
+ GTEST_DEATH_TEST_CHECK_(packet.signal.observed & ZX_PROCESS_TERMINATED);
+ process_terminated = true;
} else if (packet.key == kSocketKey) {
GTEST_DEATH_TEST_CHECK_(ZX_PKT_IS_SIGNAL_ONE(packet.type));
if (packet.signal.observed & ZX_SOCKET_READABLE) {
@@ -930,7 +941,7 @@ int FuchsiaDeathTest::Wait() {
} else {
GTEST_DEATH_TEST_CHECK_(status_zx == ZX_ERR_SHOULD_WAIT);
status_zx = stderr_socket_.wait_async(
- port_, kSocketKey, ZX_SOCKET_READABLE | ZX_SOCKET_PEER_CLOSED,
+ port, kSocketKey, ZX_SOCKET_READABLE | ZX_SOCKET_PEER_CLOSED,
ZX_WAIT_ASYNC_ONCE);
GTEST_DEATH_TEST_CHECK_(status_zx == ZX_OK);
}
@@ -1033,12 +1044,11 @@ DeathTest::TestRole FuchsiaDeathTest::AssumeRole() {
child_job, ZX_JOB_POL_RELATIVE, ZX_JOB_POL_BASIC, &policy, 1);
GTEST_DEATH_TEST_CHECK_(status == ZX_OK);
- // Create an exception port and attach it to the |child_job|, to allow
+ // Create an exception channel attached to the |child_job|, to allow
// us to suppress the system default exception handler from firing.
- status = zx::port::create(0, &port_);
- GTEST_DEATH_TEST_CHECK_(status == ZX_OK);
- status = zx_task_bind_exception_port(
- child_job, port_.get(), 0 /* key */, 0 /*options */);
+ status =
+ zx_task_create_exception_channel(
+ child_job, 0, exception_channel_.reset_and_get_address());
GTEST_DEATH_TEST_CHECK_(status == ZX_OK);
// Spawn the child process.
diff --git a/googletest/src/gtest-internal-inl.h b/googletest/src/gtest-internal-inl.h
index 53cd22b..e29d992 100644
--- a/googletest/src/gtest-internal-inl.h
+++ b/googletest/src/gtest-internal-inl.h
@@ -99,14 +99,14 @@ const char kFlagfileFlag[] = "flagfile";
// A valid random seed must be in [1, kMaxRandomSeed].
const int kMaxRandomSeed = 99999;
-// g_help_flag is true iff the --help flag or an equivalent form is
+// g_help_flag is true if the --help flag or an equivalent form is
// specified on the command line.
GTEST_API_ extern bool g_help_flag;
// Returns the current time in milliseconds.
GTEST_API_ TimeInMillis GetTimeInMillis();
-// Returns true iff Google Test should use colors in the output.
+// Returns true if Google Test should use colors in the output.
GTEST_API_ bool ShouldUseColor(bool stdout_is_tty);
// Formats the given time in milliseconds as seconds.
@@ -266,7 +266,7 @@ GTEST_API_ bool ShouldShard(const char* total_shards_str,
GTEST_API_ Int32 Int32FromEnvOrDie(const char* env_var, Int32 default_val);
// Given the total number of shards, the shard index, and the test id,
-// returns true iff the test should be run on this shard. The test id is
+// returns true if the test should be run on this shard. The test id is
// some arbitrary but unique non-negative integer assigned to each test
// method. Assumes that 0 <= shard_index < total_shards.
GTEST_API_ bool ShouldRunTestOnShard(
@@ -352,7 +352,7 @@ class TestPropertyKeyIs {
// TestPropertyKeyIs has NO default constructor.
explicit TestPropertyKeyIs(const std::string& key) : key_(key) {}
- // Returns true iff the test name of test property matches on key_.
+ // Returns true if the test name of test property matches on key_.
bool operator()(const TestProperty& test_property) const {
return test_property.key() == key_;
}
@@ -385,14 +385,14 @@ class GTEST_API_ UnitTestOptions {
// Functions for processing the gtest_filter flag.
- // Returns true iff the wildcard pattern matches the string. The
+ // Returns true if the wildcard pattern matches the string. The
// first ':' or '\0' character in pattern marks the end of it.
//
// This recursive algorithm isn't very efficient, but is clear and
// works well enough for matching test names, which are short.
static bool PatternMatchesString(const char *pattern, const char *str);
- // Returns true iff the user-specified filter matches the test suite
+ // Returns true if the user-specified filter matches the test suite
// name and the test name.
static bool FilterMatchesTest(const std::string& test_suite_name,
const std::string& test_name);
@@ -577,10 +577,10 @@ class GTEST_API_ UnitTestImpl {
// Gets the elapsed time, in milliseconds.
TimeInMillis elapsed_time() const { return elapsed_time_; }
- // Returns true iff the unit test passed (i.e. all test suites passed).
+ // Returns true if the unit test passed (i.e. all test suites passed).
bool Passed() const { return !Failed(); }
- // Returns true iff the unit test failed (i.e. some test suite failed
+ // Returns true if the unit test failed (i.e. some test suite failed
// or something outside of all tests failed).
bool Failed() const {
return failed_test_suite_count() > 0 || ad_hoc_test_result()->Failed();
@@ -911,7 +911,7 @@ class GTEST_API_ UnitTestImpl {
// desired.
OsStackTraceGetterInterface* os_stack_trace_getter_;
- // True iff PostFlagParsingInit() has been called.
+ // True if PostFlagParsingInit() has been called.
bool post_flag_parse_init_performed_;
// The random number seed used at the beginning of the test run.
diff --git a/googletest/src/gtest-port.cc b/googletest/src/gtest-port.cc
index 74daaaa..9024f03 100644
--- a/googletest/src/gtest-port.cc
+++ b/googletest/src/gtest-port.cc
@@ -715,7 +715,7 @@ RE::~RE() {
free(const_cast<char*>(pattern_));
}
-// Returns true iff regular expression re matches the entire str.
+// Returns true if regular expression re matches the entire str.
bool RE::FullMatch(const char* str, const RE& re) {
if (!re.is_valid_) return false;
@@ -723,7 +723,7 @@ bool RE::FullMatch(const char* str, const RE& re) {
return regexec(&re.full_regex_, str, 1, &match, 0) == 0;
}
-// Returns true iff regular expression re matches a substring of str
+// Returns true if regular expression re matches a substring of str
// (including str itself).
bool RE::PartialMatch(const char* str, const RE& re) {
if (!re.is_valid_) return false;
@@ -764,13 +764,13 @@ void RE::Init(const char* regex) {
#elif GTEST_USES_SIMPLE_RE
-// Returns true iff ch appears anywhere in str (excluding the
+// Returns true if ch appears anywhere in str (excluding the
// terminating '\0' character).
bool IsInSet(char ch, const char* str) {
return ch != '\0' && strchr(str, ch) != nullptr;
}
-// Returns true iff ch belongs to the given classification. Unlike
+// Returns true if ch belongs to the given classification. Unlike
// similar functions in <ctype.h>, these aren't affected by the
// current locale.
bool IsAsciiDigit(char ch) { return '0' <= ch && ch <= '9'; }
@@ -784,12 +784,12 @@ bool IsAsciiWordChar(char ch) {
('0' <= ch && ch <= '9') || ch == '_';
}
-// Returns true iff "\\c" is a supported escape sequence.
+// Returns true if "\\c" is a supported escape sequence.
bool IsValidEscape(char c) {
return (IsAsciiPunct(c) || IsInSet(c, "dDfnrsStvwW"));
}
-// Returns true iff the given atom (specified by escaped and pattern)
+// Returns true if the given atom (specified by escaped and pattern)
// matches ch. The result is undefined if the atom is invalid.
bool AtomMatchesChar(bool escaped, char pattern_char, char ch) {
if (escaped) { // "\\p" where p is pattern_char.
@@ -828,7 +828,7 @@ bool ValidateRegex(const char* regex) {
bool is_valid = true;
- // True iff ?, *, or + can follow the previous atom.
+ // True if ?, *, or + can follow the previous atom.
bool prev_repeatable = false;
for (int i = 0; regex[i]; i++) {
if (regex[i] == '\\') { // An escape sequence
@@ -904,7 +904,7 @@ bool MatchRepetitionAndRegexAtHead(
return false;
}
-// Returns true iff regex matches a prefix of str. regex must be a
+// Returns true if regex matches a prefix of str. regex must be a
// valid simple regular expression and not start with "^", or the
// result is undefined.
bool MatchRegexAtHead(const char* regex, const char* str) {
@@ -935,7 +935,7 @@ bool MatchRegexAtHead(const char* regex, const char* str) {
}
}
-// Returns true iff regex matches any substring of str. regex must be
+// Returns true if regex matches any substring of str. regex must be
// a valid simple regular expression, or the result is undefined.
//
// The algorithm is recursive, but the recursion depth doesn't exceed
@@ -964,12 +964,12 @@ RE::~RE() {
free(const_cast<char*>(full_pattern_));
}
-// Returns true iff regular expression re matches the entire str.
+// Returns true if regular expression re matches the entire str.
bool RE::FullMatch(const char* str, const RE& re) {
return re.is_valid_ && MatchRegexAnywhere(re.full_pattern_, str);
}
-// Returns true iff regular expression re matches a substring of str
+// Returns true if regular expression re matches a substring of str
// (including str itself).
bool RE::PartialMatch(const char* str, const RE& re) {
return re.is_valid_ && MatchRegexAnywhere(re.pattern_, str);
@@ -1330,7 +1330,7 @@ bool ParseInt32(const Message& src_text, const char* str, Int32* value) {
// Reads and returns the Boolean environment variable corresponding to
// the given flag; if it's not set, returns default_value.
//
-// The value is considered true iff it's not "0".
+// The value is considered true if it's not "0".
bool BoolFromGTestEnv(const char* flag, bool default_value) {
#if defined(GTEST_GET_BOOL_FROM_ENV_)
return GTEST_GET_BOOL_FROM_ENV_(flag, default_value);
diff --git a/googletest/src/gtest.cc b/googletest/src/gtest.cc
index 0da3cc2..a74041e 100644
--- a/googletest/src/gtest.cc
+++ b/googletest/src/gtest.cc
@@ -177,7 +177,7 @@ namespace internal {
// stack trace.
const char kStackTraceMarker[] = "\nStack trace:\n";
-// g_help_flag is true iff the --help flag or an equivalent form is
+// g_help_flag is true if the --help flag or an equivalent form is
// specified on the command line.
bool g_help_flag = false;
@@ -217,12 +217,12 @@ GTEST_DEFINE_bool_(
GTEST_DEFINE_bool_(
break_on_failure,
internal::BoolFromGTestEnv("break_on_failure", false),
- "True iff a failed assertion should be a debugger break-point.");
+ "True if a failed assertion should be a debugger break-point.");
GTEST_DEFINE_bool_(
catch_exceptions,
internal::BoolFromGTestEnv("catch_exceptions", true),
- "True iff " GTEST_NAME_
+ "True if " GTEST_NAME_
" should catch exceptions and treat them as test failures.");
GTEST_DEFINE_string_(
@@ -273,13 +273,13 @@ GTEST_DEFINE_string_(
GTEST_DEFINE_bool_(
print_time,
internal::BoolFromGTestEnv("print_time", true),
- "True iff " GTEST_NAME_
+ "True if " GTEST_NAME_
" should display elapsed time in text output.");
GTEST_DEFINE_bool_(
print_utf8,
internal::BoolFromGTestEnv("print_utf8", true),
- "True iff " GTEST_NAME_
+ "True if " GTEST_NAME_
" prints UTF8 characters as text.");
GTEST_DEFINE_int32_(
@@ -296,13 +296,13 @@ GTEST_DEFINE_int32_(
GTEST_DEFINE_bool_(
show_internal_stack_frames, false,
- "True iff " GTEST_NAME_ " should include internal stack frames when "
+ "True if " GTEST_NAME_ " should include internal stack frames when "
"printing test failure stack traces.");
GTEST_DEFINE_bool_(
shuffle,
internal::BoolFromGTestEnv("shuffle", false),
- "True iff " GTEST_NAME_
+ "True if " GTEST_NAME_
" should randomize tests' order on every run.");
GTEST_DEFINE_int32_(
@@ -354,7 +354,7 @@ UInt32 Random::Generate(UInt32 range) {
return state_ % range;
}
-// GTestIsInitialized() returns true iff the user has initialized
+// GTestIsInitialized() returns true if the user has initialized
// Google Test. Useful for catching the user mistake of not initializing
// Google Test before calling RUN_ALL_TESTS().
static bool GTestIsInitialized() { return GetArgvs().size() > 0; }
@@ -371,17 +371,17 @@ static int SumOverTestSuiteList(const std::vector<TestSuite*>& case_list,
return sum;
}
-// Returns true iff the test suite passed.
+// Returns true if the test suite passed.
static bool TestSuitePassed(const TestSuite* test_suite) {
return test_suite->should_run() && test_suite->Passed();
}
-// Returns true iff the test suite failed.
+// Returns true if the test suite failed.
static bool TestSuiteFailed(const TestSuite* test_suite) {
return test_suite->should_run() && test_suite->Failed();
}
-// Returns true iff test_suite contains at least one test that should
+// Returns true if test_suite contains at least one test that should
// run.
static bool ShouldRunTestSuite(const TestSuite* test_suite) {
return test_suite->should_run();
@@ -482,7 +482,7 @@ std::string UnitTestOptions::GetAbsolutePathToOutputFile() {
return result.string();
}
-// Returns true iff the wildcard pattern matches the string. The
+// Returns true if the wildcard pattern matches the string. The
// first ':' or '\0' character in pattern marks the end of it.
//
// This recursive algorithm isn't very efficient, but is clear and
@@ -525,7 +525,7 @@ bool UnitTestOptions::MatchesFilter(
}
}
-// Returns true iff the user-specified filter matches the test suite
+// Returns true if the user-specified filter matches the test suite
// name and the test name.
bool UnitTestOptions::FilterMatchesTest(const std::string& test_suite_name,
const std::string& test_name) {
@@ -910,7 +910,7 @@ const char* String::Utf16ToAnsi(LPCWSTR utf16_str) {
#endif // GTEST_OS_WINDOWS_MOBILE
-// Compares two C strings. Returns true iff they have the same content.
+// Compares two C strings. Returns true if they have the same content.
//
// Unlike strcmp(), this function can handle NULL argument(s). A NULL
// C string is considered different to any non-NULL C string,
@@ -1320,7 +1320,7 @@ std::vector<std::string> SplitEscapedString(const std::string& str) {
// lhs_value: "5"
// rhs_value: "6"
//
-// The ignoring_case parameter is true iff the assertion is a
+// The ignoring_case parameter is true if the assertion is a
// *_STRCASEEQ*. When it's true, the string "Ignoring case" will
// be inserted into the message.
AssertionResult EqFailure(const char* lhs_expression,
@@ -1563,7 +1563,7 @@ namespace {
// Helper functions for implementing IsSubString() and IsNotSubstring().
-// This group of overloaded functions return true iff needle is a
+// This group of overloaded functions return true if needle is a
// substring of haystack. NULL is considered a substring of itself
// only.
@@ -1865,7 +1865,7 @@ std::string String::ShowWideCString(const wchar_t * wide_c_str) {
return internal::WideStringToUtf8(wide_c_str, -1);
}
-// Compares two wide C strings. Returns true iff they have the same
+// Compares two wide C strings. Returns true if they have the same
// content.
//
// Unlike wcscmp(), this function can handle NULL argument(s). A NULL
@@ -1910,7 +1910,7 @@ AssertionResult CmpHelperSTRNE(const char* s1_expression,
<< " vs " << PrintToString(s2);
}
-// Compares two C strings, ignoring case. Returns true iff they have
+// Compares two C strings, ignoring case. Returns true if they have
// the same content.
//
// Unlike strcasecmp(), this function can handle NULL argument(s). A
@@ -1922,7 +1922,7 @@ bool String::CaseInsensitiveCStringEquals(const char * lhs, const char * rhs) {
return posix::StrCaseCmp(lhs, rhs) == 0;
}
- // Compares two wide C strings, ignoring case. Returns true iff they
+ // Compares two wide C strings, ignoring case. Returns true if they
// have the same content.
//
// Unlike wcscasecmp(), this function can handle NULL argument(s).
@@ -1949,14 +1949,14 @@ bool String::CaseInsensitiveWideCStringEquals(const wchar_t* lhs,
// Other unknown OSes may not define it either.
wint_t left, right;
do {
- left = towlower(*lhs++);
- right = towlower(*rhs++);
+ left = towlower(static_cast<wint_t>(*lhs++));
+ right = towlower(static_cast<wint_t>(*rhs++));
} while (left && left == right);
return left == right;
#endif // OS selector
}
-// Returns true iff str ends with the given suffix, ignoring case.
+// Returns true if str ends with the given suffix, ignoring case.
// Any string is considered to end with an empty suffix.
bool String::EndsWithCaseInsensitive(
const std::string& str, const std::string& suffix) {
@@ -2032,9 +2032,7 @@ std::string AppendUserMessage(const std::string& gtest_msg,
// Creates an empty TestResult.
TestResult::TestResult()
- : death_test_count_(0),
- elapsed_time_(0) {
-}
+ : death_test_count_(0), start_timestamp_(0), elapsed_time_(0) {}
// D'tor.
TestResult::~TestResult() {
@@ -2103,13 +2101,7 @@ static const char* const kReservedTestSuitesAttributes[] = {
// The list of reserved attributes used in the <testsuite> element of XML
// output.
static const char* const kReservedTestSuiteAttributes[] = {
- "disabled",
- "errors",
- "failures",
- "name",
- "tests",
- "time"
-};
+ "disabled", "errors", "failures", "name", "tests", "time", "timestamp"};
// The list of reserved attributes used in the <testcase> element of XML output.
static const char* const kReservedTestCaseAttributes[] = {
@@ -2117,10 +2109,10 @@ static const char* const kReservedTestCaseAttributes[] = {
"value_param", "file", "line"};
// Use a slightly different set for allowed output to ensure existing tests can
-// still RecordProperty("result")
+// still RecordProperty("result") or "RecordProperty(timestamp")
static const char* const kReservedOutputTestCaseAttributes[] = {
- "classname", "name", "status", "time", "type_param",
- "value_param", "file", "line", "result"};
+ "classname", "name", "status", "time", "type_param",
+ "value_param", "file", "line", "result", "timestamp"};
template <int kSize>
std::vector<std::string> ArrayAsVector(const char* const (&array)[kSize]) {
@@ -2206,12 +2198,12 @@ static bool TestPartSkipped(const TestPartResult& result) {
return result.skipped();
}
-// Returns true iff the test was skipped.
+// Returns true if the test was skipped.
bool TestResult::Skipped() const {
return !Failed() && CountIf(test_part_results_, TestPartSkipped) > 0;
}
-// Returns true iff the test failed.
+// Returns true if the test failed.
bool TestResult::Failed() const {
for (int i = 0; i < total_part_count(); ++i) {
if (GetTestPartResult(i).failed())
@@ -2220,22 +2212,22 @@ bool TestResult::Failed() const {
return false;
}
-// Returns true iff the test part fatally failed.
+// Returns true if the test part fatally failed.
static bool TestPartFatallyFailed(const TestPartResult& result) {
return result.fatally_failed();
}
-// Returns true iff the test fatally failed.
+// Returns true if the test fatally failed.
bool TestResult::HasFatalFailure() const {
return CountIf(test_part_results_, TestPartFatallyFailed) > 0;
}
-// Returns true iff the test part non-fatally failed.
+// Returns true if the test part non-fatally failed.
static bool TestPartNonfatallyFailed(const TestPartResult& result) {
return result.nonfatally_failed();
}
-// Returns true iff the test has a non-fatal failure.
+// Returns true if the test has a non-fatal failure.
bool TestResult::HasNonfatalFailure() const {
return CountIf(test_part_results_, TestPartNonfatallyFailed) > 0;
}
@@ -2531,18 +2523,18 @@ void Test::Run() {
this, &Test::TearDown, "TearDown()");
}
-// Returns true iff the current test has a fatal failure.
+// Returns true if the current test has a fatal failure.
bool Test::HasFatalFailure() {
return internal::GetUnitTestImpl()->current_test_result()->HasFatalFailure();
}
-// Returns true iff the current test has a non-fatal failure.
+// Returns true if the current test has a non-fatal failure.
bool Test::HasNonfatalFailure() {
return internal::GetUnitTestImpl()->current_test_result()->
HasNonfatalFailure();
}
-// Returns true iff the current test was skipped.
+// Returns true if the current test was skipped.
bool Test::IsSkipped() {
return internal::GetUnitTestImpl()->current_test_result()->Skipped();
}
@@ -2641,7 +2633,7 @@ class TestNameIs {
explicit TestNameIs(const char* name)
: name_(name) {}
- // Returns true iff the test name of test_info matches name_.
+ // Returns true if the test name of test_info matches name_.
bool operator()(const TestInfo * test_info) const {
return test_info && test_info->name() == name_;
}
@@ -2705,6 +2697,7 @@ void TestInfo::Run() {
test, &Test::DeleteSelf_, "the test fixture's destructor");
}
+ result_.set_start_timestamp(start);
result_.set_elapsed_time(internal::GetTimeInMillis() - start);
// Notifies the unit test event listener that a test has just finished.
@@ -2774,6 +2767,7 @@ TestSuite::TestSuite(const char* a_name, const char* a_type_param,
set_up_tc_(set_up_tc),
tear_down_tc_(tear_down_tc),
should_run_(false),
+ start_timestamp_(0),
elapsed_time_(0) {}
// Destructor of TestSuite.
@@ -2823,11 +2817,11 @@ void TestSuite::Run() {
internal::HandleExceptionsInMethodIfSupported(
this, &TestSuite::RunSetUpTestSuite, "SetUpTestSuite()");
- const internal::TimeInMillis start = internal::GetTimeInMillis();
+ start_timestamp_ = internal::GetTimeInMillis();
for (int i = 0; i < total_test_count(); i++) {
GetMutableTestInfo(i)->Run();
}
- elapsed_time_ = internal::GetTimeInMillis() - start;
+ elapsed_time_ = internal::GetTimeInMillis() - start_timestamp_;
impl->os_stack_trace_getter()->UponLeavingGTest();
internal::HandleExceptionsInMethodIfSupported(
@@ -2998,7 +2992,7 @@ static const char* GetAnsiColorCode(GTestColor color) {
#endif // GTEST_OS_WINDOWS && !GTEST_OS_WINDOWS_MOBILE
-// Returns true iff Google Test should use colors in the output.
+// Returns true if Google Test should use colors in the output.
bool ShouldUseColor(bool stdout_is_tty) {
const char* const gtest_color = GTEST_FLAG(color).c_str();
@@ -3123,11 +3117,22 @@ class PrettyUnitTestResultPrinter : public TestEventListener {
void OnTestIterationStart(const UnitTest& unit_test, int iteration) override;
void OnEnvironmentsSetUpStart(const UnitTest& unit_test) override;
void OnEnvironmentsSetUpEnd(const UnitTest& /*unit_test*/) override {}
- void OnTestCaseStart(const TestSuite& test_suite) override;
+#ifndef GTEST_REMOVE_LEGACY_TEST_CASEAPI_
+ void OnTestCaseStart(const TestCase& test_case) override;
+#else
+ void OnTestSuiteStart(const TestSuite& test_suite) override;
+#endif // OnTestCaseStart
+
void OnTestStart(const TestInfo& test_info) override;
+
void OnTestPartResult(const TestPartResult& result) override;
void OnTestEnd(const TestInfo& test_info) override;
- void OnTestCaseEnd(const TestSuite& test_suite) override;
+#ifndef GTEST_REMOVE_LEGACY_TEST_CASEAPI_
+ void OnTestCaseEnd(const TestCase& test_case) override;
+#else
+ void OnTestSuiteEnd(const TestSuite& test_suite) override;
+#endif // GTEST_REMOVE_LEGACY_TEST_CASEAPI_
+
void OnEnvironmentsTearDownStart(const UnitTest& unit_test) override;
void OnEnvironmentsTearDownEnd(const UnitTest& /*unit_test*/) override {}
void OnTestIterationEnd(const UnitTest& unit_test, int iteration) override;
@@ -3181,7 +3186,22 @@ void PrettyUnitTestResultPrinter::OnEnvironmentsSetUpStart(
fflush(stdout);
}
-void PrettyUnitTestResultPrinter::OnTestCaseStart(const TestSuite& test_suite) {
+#ifndef GTEST_REMOVE_LEGACY_TEST_CASEAPI_
+void PrettyUnitTestResultPrinter::OnTestCaseStart(const TestCase& test_case) {
+ const std::string counts =
+ FormatCountableNoun(test_case.test_to_run_count(), "test", "tests");
+ ColoredPrintf(COLOR_GREEN, "[----------] ");
+ printf("%s from %s", counts.c_str(), test_case.name());
+ if (test_case.type_param() == nullptr) {
+ printf("\n");
+ } else {
+ printf(", where %s = %s\n", kTypeParamLabel, test_case.type_param());
+ }
+ fflush(stdout);
+}
+#else
+void PrettyUnitTestResultPrinter::OnTestSuiteStart(
+ const TestSuite& test_suite) {
const std::string counts =
FormatCountableNoun(test_suite.test_to_run_count(), "test", "tests");
ColoredPrintf(COLOR_GREEN, "[----------] ");
@@ -3193,6 +3213,7 @@ void PrettyUnitTestResultPrinter::OnTestCaseStart(const TestSuite& test_suite) {
}
fflush(stdout);
}
+#endif // GTEST_REMOVE_LEGACY_TEST_CASEAPI_
void PrettyUnitTestResultPrinter::OnTestStart(const TestInfo& test_info) {
ColoredPrintf(COLOR_GREEN, "[ RUN ] ");
@@ -3239,7 +3260,19 @@ void PrettyUnitTestResultPrinter::OnTestEnd(const TestInfo& test_info) {
fflush(stdout);
}
-void PrettyUnitTestResultPrinter::OnTestCaseEnd(const TestSuite& test_suite) {
+#ifndef GTEST_REMOVE_LEGACY_TEST_CASEAPI_
+void PrettyUnitTestResultPrinter::OnTestCaseEnd(const TestCase& test_case) {
+ if (!GTEST_FLAG(print_time)) return;
+
+ const std::string counts =
+ FormatCountableNoun(test_case.test_to_run_count(), "test", "tests");
+ ColoredPrintf(COLOR_GREEN, "[----------] ");
+ printf("%s from %s (%s ms total)\n\n", counts.c_str(), test_case.name(),
+ internal::StreamableToString(test_case.elapsed_time()).c_str());
+ fflush(stdout);
+}
+#else
+void PrettyUnitTestResultPrinter::OnTestSuiteEnd(const TestSuite& test_suite) {
if (!GTEST_FLAG(print_time)) return;
const std::string counts =
@@ -3249,6 +3282,7 @@ void PrettyUnitTestResultPrinter::OnTestCaseEnd(const TestSuite& test_suite) {
internal::StreamableToString(test_suite.elapsed_time()).c_str());
fflush(stdout);
}
+#endif // GTEST_REMOVE_LEGACY_TEST_CASEAPI_
void PrettyUnitTestResultPrinter::OnEnvironmentsTearDownStart(
const UnitTest& /*unit_test*/) {
@@ -3373,17 +3407,17 @@ class TestEventRepeater : public TestEventListener {
void OnEnvironmentsSetUpStart(const UnitTest& unit_test) override;
void OnEnvironmentsSetUpEnd(const UnitTest& unit_test) override;
// Legacy API is deprecated but still available
-#ifndef GTEST_REMOVE_LEGACY_TEST_CASEAPI
+#ifndef GTEST_REMOVE_LEGACY_TEST_CASEAPI_
void OnTestCaseStart(const TestSuite& parameter) override;
-#endif // GTEST_REMOVE_LEGACY_TEST_CASEAPI
+#endif // GTEST_REMOVE_LEGACY_TEST_CASEAPI_
void OnTestSuiteStart(const TestSuite& parameter) override;
void OnTestStart(const TestInfo& test_info) override;
void OnTestPartResult(const TestPartResult& result) override;
void OnTestEnd(const TestInfo& test_info) override;
// Legacy API is deprecated but still available
-#ifndef GTEST_REMOVE_LEGACY_TEST_CASEAPI
- void OnTestCaseEnd(const TestSuite& parameter) override;
-#endif // GTEST_REMOVE_LEGACY_TEST_CASEAPI
+#ifndef GTEST_REMOVE_LEGACY_TEST_CASEAPI_
+ void OnTestCaseEnd(const TestCase& parameter) override;
+#endif // GTEST_REMOVE_LEGACY_TEST_CASEAPI_
void OnTestSuiteEnd(const TestSuite& parameter) override;
void OnEnvironmentsTearDownStart(const UnitTest& unit_test) override;
void OnEnvironmentsTearDownEnd(const UnitTest& unit_test) override;
@@ -3788,6 +3822,9 @@ void XmlUnitTestResultPrinter::OutputXmlTestInfo(::std::ostream* stream,
: "suppressed");
OutputXmlAttribute(stream, kTestsuite, "time",
FormatTimeInMillisAsSeconds(result.elapsed_time()));
+ OutputXmlAttribute(
+ stream, kTestsuite, "timestamp",
+ FormatEpochTimeInMillisAsIso8601(result.start_timestamp()));
OutputXmlAttribute(stream, kTestsuite, "classname", test_suite_name);
int failures = 0;
@@ -3838,6 +3875,9 @@ void XmlUnitTestResultPrinter::PrintXmlTestSuite(std::ostream* stream,
OutputXmlAttribute(stream, kTestsuite, "errors", "0");
OutputXmlAttribute(stream, kTestsuite, "time",
FormatTimeInMillisAsSeconds(test_suite.elapsed_time()));
+ OutputXmlAttribute(
+ stream, kTestsuite, "timestamp",
+ FormatEpochTimeInMillisAsIso8601(test_suite.start_timestamp()));
*stream << TestPropertiesAsXmlAttributes(test_suite.ad_hoc_test_result());
}
*stream << ">\n";
@@ -3864,11 +3904,11 @@ void XmlUnitTestResultPrinter::PrintXmlUnitTest(std::ostream* stream,
stream, kTestsuites, "disabled",
StreamableToString(unit_test.reportable_disabled_test_count()));
OutputXmlAttribute(stream, kTestsuites, "errors", "0");
+ OutputXmlAttribute(stream, kTestsuites, "time",
+ FormatTimeInMillisAsSeconds(unit_test.elapsed_time()));
OutputXmlAttribute(
stream, kTestsuites, "timestamp",
FormatEpochTimeInMillisAsIso8601(unit_test.start_timestamp()));
- OutputXmlAttribute(stream, kTestsuites, "time",
- FormatTimeInMillisAsSeconds(unit_test.elapsed_time()));
if (GTEST_FLAG(shuffle)) {
OutputXmlAttribute(stream, kTestsuites, "random_seed",
@@ -4156,6 +4196,9 @@ void JsonUnitTestResultPrinter::OutputJsonTestInfo(::std::ostream* stream,
? (result.Skipped() ? "SKIPPED" : "COMPLETED")
: "SUPPRESSED",
kIndent);
+ OutputJsonKey(stream, kTestsuite, "timestamp",
+ FormatEpochTimeInMillisAsRFC3339(result.start_timestamp()),
+ kIndent);
OutputJsonKey(stream, kTestsuite, "time",
FormatTimeInMillisAsDuration(result.elapsed_time()), kIndent);
OutputJsonKey(stream, kTestsuite, "classname", test_suite_name, kIndent,
@@ -4202,6 +4245,10 @@ void JsonUnitTestResultPrinter::PrintJsonTestSuite(
OutputJsonKey(stream, kTestsuite, "disabled",
test_suite.reportable_disabled_test_count(), kIndent);
OutputJsonKey(stream, kTestsuite, "errors", 0, kIndent);
+ OutputJsonKey(
+ stream, kTestsuite, "timestamp",
+ FormatEpochTimeInMillisAsRFC3339(test_suite.start_timestamp()),
+ kIndent);
OutputJsonKey(stream, kTestsuite, "time",
FormatTimeInMillisAsDuration(test_suite.elapsed_time()),
kIndent, false);
@@ -4662,10 +4709,10 @@ internal::TimeInMillis UnitTest::elapsed_time() const {
return impl()->elapsed_time();
}
-// Returns true iff the unit test passed (i.e. all test suites passed).
+// Returns true if the unit test passed (i.e. all test suites passed).
bool UnitTest::Passed() const { return impl()->Passed(); }
-// Returns true iff the unit test failed (i.e. some test suite failed
+// Returns true if the unit test failed (i.e. some test suite failed
// or something outside of all tests failed).
bool UnitTest::Failed() const { return impl()->Failed(); }
@@ -5104,7 +5151,7 @@ class TestSuiteNameIs {
// Constructor.
explicit TestSuiteNameIs(const std::string& name) : name_(name) {}
- // Returns true iff the name of test_suite matches name_.
+ // Returns true if the name of test_suite matches name_.
bool operator()(const TestSuite* test_suite) const {
return test_suite != nullptr &&
strcmp(test_suite->name(), name_.c_str()) == 0;
@@ -5175,7 +5222,7 @@ static void TearDownEnvironment(Environment* env) { env->TearDown(); }
// All other functions called from RunAllTests() may safely assume that
// parameterized tests are ready to be counted and run.
bool UnitTestImpl::RunAllTests() {
- // True iff Google Test is initialized before RUN_ALL_TESTS() is called.
+ // True if Google Test is initialized before RUN_ALL_TESTS() is called.
const bool gtest_is_initialized_before_run_all_tests = GTestIsInitialized();
// Do not run any test if the --help flag was specified.
@@ -5191,7 +5238,7 @@ bool UnitTestImpl::RunAllTests() {
// protocol.
internal::WriteToShardStatusFileIfNeeded();
- // True iff we are in a subprocess for running a thread-safe-style
+ // True if we are in a subprocess for running a thread-safe-style
// death test.
bool in_subprocess_for_death_test = false;
@@ -5224,7 +5271,7 @@ bool UnitTestImpl::RunAllTests() {
random_seed_ = GTEST_FLAG(shuffle) ?
GetRandomSeedFromFlag(GTEST_FLAG(random_seed)) : 0;
- // True iff at least one test has failed.
+ // True if at least one test has failed.
bool failed = false;
TestEventListener* repeater = listeners()->repeater();
@@ -5422,7 +5469,7 @@ Int32 Int32FromEnvOrDie(const char* var, Int32 default_val) {
}
// Given the total number of shards, the shard index, and the test id,
-// returns true iff the test should be run on this shard. The test id is
+// returns true if the test should be run on this shard. The test id is
// some arbitrary but unique non-negative integer assigned to each test
// method. Assumes that 0 <= shard_index < total_shards.
bool ShouldRunTestOnShard(int total_shards, int shard_index, int test_id) {
@@ -5996,7 +6043,7 @@ void ParseGoogleTestFlagsOnlyImpl(int* argc, CharType** argv) {
void ParseGoogleTestFlagsOnly(int* argc, char** argv) {
ParseGoogleTestFlagsOnlyImpl(argc, argv);
- // Fix the value of *_NSGetArgc() on macOS, but iff
+ // Fix the value of *_NSGetArgc() on macOS, but if
// *_NSGetArgv() == argv
// Only applicable to char** version of argv
#if GTEST_OS_MAC
diff --git a/googletest/test/googletest-death-test-test.cc b/googletest/test/googletest-death-test-test.cc
index 272b9c3..6c71fd8 100644
--- a/googletest/test/googletest-death-test-test.cc
+++ b/googletest/test/googletest-death-test-test.cc
@@ -139,7 +139,7 @@ class TestForDeathTest : public testing::Test {
DieInside("MemberFunction");
}
- // True iff MemberFunction() should die.
+ // True if MemberFunction() should die.
bool should_die_;
const FilePath original_dir_;
};
@@ -156,7 +156,7 @@ class MayDie {
}
private:
- // True iff MemberFunction() should die.
+ // True if MemberFunction() should die.
bool should_die_;
};
@@ -551,7 +551,7 @@ TEST_F(TestForDeathTest, ErrorMessageMismatch) {
}, "died but not with expected error");
}
-// On exit, *aborted will be true iff the EXPECT_DEATH() statement
+// On exit, *aborted will be true if the EXPECT_DEATH() statement
// aborted the function.
void ExpectDeathTestHelper(bool* aborted) {
*aborted = true;
diff --git a/googletest/test/googletest-json-outfiles-test.py b/googletest/test/googletest-json-outfiles-test.py
index b81a03d..8ef47b8 100644
--- a/googletest/test/googletest-json-outfiles-test.py
+++ b/googletest/test/googletest-json-outfiles-test.py
@@ -67,11 +67,14 @@ EXPECTED_1 = {
0,
u'time':
u'*',
+ u'timestamp':
+ u'*',
u'testsuite': [{
u'name': u'TestSomeProperties',
u'status': u'RUN',
u'result': u'COMPLETED',
u'time': u'*',
+ u'timestamp': u'*',
u'classname': u'PropertyOne',
u'SetUpProp': u'1',
u'TestSomeProperty': u'1',
@@ -108,10 +111,13 @@ EXPECTED_2 = {
0,
u'time':
u'*',
+ u'timestamp':
+ u'*',
u'testsuite': [{
u'name': u'TestSomeProperties',
u'status': u'RUN',
u'result': u'COMPLETED',
+ u'timestamp': u'*',
u'time': u'*',
u'classname': u'PropertyTwo',
u'SetUpProp': u'2',
diff --git a/googletest/test/googletest-json-output-unittest.py b/googletest/test/googletest-json-output-unittest.py
index 64d7534..15861f7 100644
--- a/googletest/test/googletest-json-output-unittest.py
+++ b/googletest/test/googletest-json-output-unittest.py
@@ -73,457 +73,474 @@ EXPECTED_NON_EMPTY = {
u'42',
u'name':
u'AllTests',
- u'testsuites': [
- {
+ u'testsuites': [{
+ u'name':
+ u'SuccessfulTest',
+ u'tests':
+ 1,
+ u'failures':
+ 0,
+ u'disabled':
+ 0,
+ u'errors':
+ 0,
+ u'time':
+ u'*',
+ u'timestamp':
+ u'*',
+ u'testsuite': [{
+ u'name': u'Succeeds',
+ u'status': u'RUN',
+ u'result': u'COMPLETED',
+ u'time': u'*',
+ u'timestamp': u'*',
+ u'classname': u'SuccessfulTest'
+ }]
+ }, {
+ u'name':
+ u'FailedTest',
+ u'tests':
+ 1,
+ u'failures':
+ 1,
+ u'disabled':
+ 0,
+ u'errors':
+ 0,
+ u'time':
+ u'*',
+ u'timestamp':
+ u'*',
+ u'testsuite': [{
u'name':
- u'SuccessfulTest',
- u'tests':
- 1,
- u'failures':
- 0,
- u'disabled':
- 0,
- u'errors':
- 0,
+ u'Fails',
+ u'status':
+ u'RUN',
+ u'result':
+ u'COMPLETED',
u'time':
u'*',
- u'testsuite': [{
- u'name': u'Succeeds',
- u'status': u'RUN',
- u'result': u'COMPLETED',
- u'time': u'*',
- u'classname': u'SuccessfulTest'
- }]
- },
- {
- u'name':
- u'FailedTest',
- u'tests':
- 1,
- u'failures':
- 1,
- u'disabled':
- 0,
- u'errors':
- 0,
- u'time':
+ u'timestamp':
u'*',
- u'testsuite': [{
- u'name':
- u'Fails',
- u'status':
- u'RUN',
- u'result':
- u'COMPLETED',
- u'time':
- u'*',
- u'classname':
- u'FailedTest',
- u'failures': [{
- u'failure': u'gtest_xml_output_unittest_.cc:*\n'
- u'Expected equality of these values:\n'
- u' 1\n 2' + STACK_TRACE_TEMPLATE,
- u'type': u''
- }]
+ u'classname':
+ u'FailedTest',
+ u'failures': [{
+ u'failure': u'gtest_xml_output_unittest_.cc:*\n'
+ u'Expected equality of these values:\n'
+ u' 1\n 2' + STACK_TRACE_TEMPLATE,
+ u'type': u''
}]
- },
- {
+ }]
+ }, {
+ u'name':
+ u'DisabledTest',
+ u'tests':
+ 1,
+ u'failures':
+ 0,
+ u'disabled':
+ 1,
+ u'errors':
+ 0,
+ u'time':
+ u'*',
+ u'timestamp':
+ u'*',
+ u'testsuite': [{
+ u'name': u'DISABLED_test_not_run',
+ u'status': u'NOTRUN',
+ u'result': u'SUPPRESSED',
+ u'time': u'*',
+ u'timestamp': u'*',
+ u'classname': u'DisabledTest'
+ }]
+ }, {
+ u'name':
+ u'SkippedTest',
+ u'tests':
+ 1,
+ u'failures':
+ 0,
+ u'disabled':
+ 0,
+ u'errors':
+ 0,
+ u'time':
+ u'*',
+ u'timestamp':
+ u'*',
+ u'testsuite': [{
+ u'name': u'Skipped',
+ u'status': u'RUN',
+ u'result': u'SKIPPED',
+ u'time': u'*',
+ u'timestamp': u'*',
+ u'classname': u'SkippedTest'
+ }]
+ }, {
+ u'name':
+ u'MixedResultTest',
+ u'tests':
+ 3,
+ u'failures':
+ 1,
+ u'disabled':
+ 1,
+ u'errors':
+ 0,
+ u'time':
+ u'*',
+ u'timestamp':
+ u'*',
+ u'testsuite': [{
+ u'name': u'Succeeds',
+ u'status': u'RUN',
+ u'result': u'COMPLETED',
+ u'time': u'*',
+ u'timestamp': u'*',
+ u'classname': u'MixedResultTest'
+ }, {
u'name':
- u'DisabledTest',
- u'tests':
- 1,
- u'failures':
- 0,
- u'disabled':
- 1,
- u'errors':
- 0,
+ u'Fails',
+ u'status':
+ u'RUN',
+ u'result':
+ u'COMPLETED',
u'time':
u'*',
- u'testsuite': [{
- u'name': u'DISABLED_test_not_run',
- u'status': u'NOTRUN',
- u'result': u'SUPPRESSED',
- u'time': u'*',
- u'classname': u'DisabledTest'
- }]
- },
- {
- u'name':
- u'SkippedTest',
- u'tests':
- 1,
- u'failures':
- 0,
- u'disabled':
- 0,
- u'errors':
- 0,
- u'time':
+ u'timestamp':
u'*',
- u'testsuite': [{
- u'name': u'Skipped',
- u'status': u'RUN',
- u'result': u'SKIPPED',
- u'time': u'*',
- u'classname': u'SkippedTest'
- }]
- },
- {
- u'name':
+ u'classname':
u'MixedResultTest',
- u'tests':
- 3,
- u'failures':
- 1,
- u'disabled':
- 1,
- u'errors':
- 0,
- u'time':
- u'*',
- u'testsuite': [
- {
- u'name': u'Succeeds',
- u'status': u'RUN',
- u'result': u'COMPLETED',
- u'time': u'*',
- u'classname': u'MixedResultTest'
- },
- {
- u'name':
- u'Fails',
- u'status':
- u'RUN',
- u'result':
- u'COMPLETED',
- u'time':
- u'*',
- u'classname':
- u'MixedResultTest',
- u'failures':
- [{
- u'failure': u'gtest_xml_output_unittest_.cc:*\n'
- u'Expected equality of these values:\n'
- u' 1\n 2' + STACK_TRACE_TEMPLATE,
- u'type': u''
- },
- {
- u'failure': u'gtest_xml_output_unittest_.cc:*\n'
- u'Expected equality of these values:\n'
- u' 2\n 3' + STACK_TRACE_TEMPLATE,
- u'type': u''
- }]
- },
- {
- u'name': u'DISABLED_test',
- u'status': u'NOTRUN',
- u'result': u'SUPPRESSED',
- u'time': u'*',
- u'classname': u'MixedResultTest'
- }
- ]
- },
- {
- u'name':
- u'XmlQuotingTest',
- u'tests':
- 1,
- u'failures':
- 1,
- u'disabled':
- 0,
- u'errors':
- 0,
- u'time':
- u'*',
- u'testsuite': [{
- u'name':
- u'OutputsCData',
- u'status':
- u'RUN',
- u'result':
- u'COMPLETED',
- u'time':
- u'*',
- u'classname':
- u'XmlQuotingTest',
- u'failures': [{
- u'failure': u'gtest_xml_output_unittest_.cc:*\n'
- u'Failed\nXML output: <?xml encoding="utf-8">'
- u'<top><![CDATA[cdata text]]></top>' +
- STACK_TRACE_TEMPLATE,
- u'type': u''
- }]
- }]
- },
- {
- u'name':
- u'InvalidCharactersTest',
- u'tests':
- 1,
- u'failures':
- 1,
- u'disabled':
- 0,
- u'errors':
- 0,
- u'time':
- u'*',
- u'testsuite': [{
- u'name':
- u'InvalidCharactersInMessage',
- u'status':
- u'RUN',
- u'result':
- u'COMPLETED',
- u'time':
- u'*',
- u'classname':
- u'InvalidCharactersTest',
- u'failures': [{
- u'failure': u'gtest_xml_output_unittest_.cc:*\n'
- u'Failed\nInvalid characters in brackets'
- u' [\x01\x02]' + STACK_TRACE_TEMPLATE,
- u'type': u''
- }]
+ u'failures': [{
+ u'failure': u'gtest_xml_output_unittest_.cc:*\n'
+ u'Expected equality of these values:\n'
+ u' 1\n 2' + STACK_TRACE_TEMPLATE,
+ u'type': u''
+ }, {
+ u'failure': u'gtest_xml_output_unittest_.cc:*\n'
+ u'Expected equality of these values:\n'
+ u' 2\n 3' + STACK_TRACE_TEMPLATE,
+ u'type': u''
}]
- },
- {
- u'name':
- u'PropertyRecordingTest',
- u'tests':
- 4,
- u'failures':
- 0,
- u'disabled':
- 0,
- u'errors':
- 0,
- u'time':
- u'*',
- u'SetUpTestSuite':
- u'yes',
- u'TearDownTestSuite':
- u'aye',
- u'testsuite': [{
- u'name': u'OneProperty',
- u'status': u'RUN',
- u'result': u'COMPLETED',
- u'time': u'*',
- u'classname': u'PropertyRecordingTest',
- u'key_1': u'1'
- },
- {
- u'name': u'IntValuedProperty',
- u'status': u'RUN',
- u'result': u'COMPLETED',
- u'time': u'*',
- u'classname': u'PropertyRecordingTest',
- u'key_int': u'1'
- },
- {
- u'name': u'ThreeProperties',
- u'status': u'RUN',
- u'result': u'COMPLETED',
- u'time': u'*',
- u'classname': u'PropertyRecordingTest',
- u'key_1': u'1',
- u'key_2': u'2',
- u'key_3': u'3'
- },
- {
- u'name': u'TwoValuesForOneKeyUsesLastValue',
- u'status': u'RUN',
- u'result': u'COMPLETED',
- u'time': u'*',
- u'classname': u'PropertyRecordingTest',
- u'key_1': u'2'
- }]
- },
- {
- u'name':
- u'NoFixtureTest',
- u'tests':
- 3,
- u'failures':
- 0,
- u'disabled':
- 0,
- u'errors':
- 0,
- u'time':
- u'*',
- u'testsuite': [
- {
- u'name': u'RecordProperty',
- u'status': u'RUN',
- u'result': u'COMPLETED',
- u'time': u'*',
- u'classname': u'NoFixtureTest',
- u'key': u'1'
- },
- {
- u'name': u'ExternalUtilityThatCallsRecordIntValuedProperty',
- u'status': u'RUN',
- u'result': u'COMPLETED',
- u'time': u'*',
- u'classname': u'NoFixtureTest',
- u'key_for_utility_int': u'1'
- },
- {
- u'name':
- u'ExternalUtilityThatCallsRecordStringValuedProperty',
- u'status':
- u'RUN',
- u'result':
- u'COMPLETED',
- u'time':
- u'*',
- u'classname':
- u'NoFixtureTest',
- u'key_for_utility_string':
- u'1'
- }
- ]
- },
- {
+ }, {
+ u'name': u'DISABLED_test',
+ u'status': u'NOTRUN',
+ u'result': u'SUPPRESSED',
+ u'time': u'*',
+ u'timestamp': u'*',
+ u'classname': u'MixedResultTest'
+ }]
+ }, {
+ u'name':
+ u'XmlQuotingTest',
+ u'tests':
+ 1,
+ u'failures':
+ 1,
+ u'disabled':
+ 0,
+ u'errors':
+ 0,
+ u'time':
+ u'*',
+ u'timestamp':
+ u'*',
+ u'testsuite': [{
u'name':
- u'TypedTest/0',
- u'tests':
- 1,
- u'failures':
- 0,
- u'disabled':
- 0,
- u'errors':
- 0,
+ u'OutputsCData',
+ u'status':
+ u'RUN',
+ u'result':
+ u'COMPLETED',
u'time':
u'*',
- u'testsuite': [{
- u'name': u'HasTypeParamAttribute',
- u'type_param': u'int',
- u'status': u'RUN',
- u'result': u'COMPLETED',
- u'time': u'*',
- u'classname': u'TypedTest/0'
- }]
- },
- {
- u'name':
- u'TypedTest/1',
- u'tests':
- 1,
- u'failures':
- 0,
- u'disabled':
- 0,
- u'errors':
- 0,
- u'time':
+ u'timestamp':
u'*',
- u'testsuite': [{
- u'name': u'HasTypeParamAttribute',
- u'type_param': u'long',
- u'status': u'RUN',
- u'result': u'COMPLETED',
- u'time': u'*',
- u'classname': u'TypedTest/1'
+ u'classname':
+ u'XmlQuotingTest',
+ u'failures': [{
+ u'failure': u'gtest_xml_output_unittest_.cc:*\n'
+ u'Failed\nXML output: <?xml encoding="utf-8">'
+ u'<top><![CDATA[cdata text]]></top>' +
+ STACK_TRACE_TEMPLATE,
+ u'type': u''
}]
- },
- {
+ }]
+ }, {
+ u'name':
+ u'InvalidCharactersTest',
+ u'tests':
+ 1,
+ u'failures':
+ 1,
+ u'disabled':
+ 0,
+ u'errors':
+ 0,
+ u'time':
+ u'*',
+ u'timestamp':
+ u'*',
+ u'testsuite': [{
u'name':
- u'Single/TypeParameterizedTestSuite/0',
- u'tests':
- 1,
- u'failures':
- 0,
- u'disabled':
- 0,
- u'errors':
- 0,
+ u'InvalidCharactersInMessage',
+ u'status':
+ u'RUN',
+ u'result':
+ u'COMPLETED',
u'time':
u'*',
- u'testsuite': [{
- u'name': u'HasTypeParamAttribute',
- u'type_param': u'int',
- u'status': u'RUN',
- u'result': u'COMPLETED',
- u'time': u'*',
- u'classname': u'Single/TypeParameterizedTestSuite/0'
- }]
- },
- {
- u'name':
- u'Single/TypeParameterizedTestSuite/1',
- u'tests':
- 1,
- u'failures':
- 0,
- u'disabled':
- 0,
- u'errors':
- 0,
- u'time':
+ u'timestamp':
u'*',
- u'testsuite': [{
- u'name': u'HasTypeParamAttribute',
- u'type_param': u'long',
- u'status': u'RUN',
- u'result': u'COMPLETED',
- u'time': u'*',
- u'classname': u'Single/TypeParameterizedTestSuite/1'
+ u'classname':
+ u'InvalidCharactersTest',
+ u'failures': [{
+ u'failure': u'gtest_xml_output_unittest_.cc:*\n'
+ u'Failed\nInvalid characters in brackets'
+ u' [\x01\x02]' + STACK_TRACE_TEMPLATE,
+ u'type': u''
}]
- },
- {
- u'name':
- u'Single/ValueParamTest',
- u'tests':
- 4,
- u'failures':
- 0,
- u'disabled':
- 0,
- u'errors':
- 0,
- u'time':
- u'*',
- u'testsuite': [
- {
- u'name': u'HasValueParamAttribute/0',
- u'value_param': u'33',
- u'status': u'RUN',
- u'result': u'COMPLETED',
- u'time': u'*',
- u'classname': u'Single/ValueParamTest'
- },
- {
- u'name': u'HasValueParamAttribute/1',
- u'value_param': u'42',
- u'status': u'RUN',
- u'result': u'COMPLETED',
- u'time': u'*',
- u'classname': u'Single/ValueParamTest'
- },
- {
- u'name': u'AnotherTestThatHasValueParamAttribute/0',
- u'value_param': u'33',
- u'status': u'RUN',
- u'result': u'COMPLETED',
- u'time': u'*',
- u'classname': u'Single/ValueParamTest'
- },
- {
- u'name': u'AnotherTestThatHasValueParamAttribute/1',
- u'value_param': u'42',
- u'status': u'RUN',
- u'result': u'COMPLETED',
- u'time': u'*',
- u'classname': u'Single/ValueParamTest'
- }
- ]
- }
- ]
+ }]
+ }, {
+ u'name':
+ u'PropertyRecordingTest',
+ u'tests':
+ 4,
+ u'failures':
+ 0,
+ u'disabled':
+ 0,
+ u'errors':
+ 0,
+ u'time':
+ u'*',
+ u'timestamp':
+ u'*',
+ u'SetUpTestSuite':
+ u'yes',
+ u'TearDownTestSuite':
+ u'aye',
+ u'testsuite': [{
+ u'name': u'OneProperty',
+ u'status': u'RUN',
+ u'result': u'COMPLETED',
+ u'time': u'*',
+ u'timestamp': u'*',
+ u'classname': u'PropertyRecordingTest',
+ u'key_1': u'1'
+ }, {
+ u'name': u'IntValuedProperty',
+ u'status': u'RUN',
+ u'result': u'COMPLETED',
+ u'time': u'*',
+ u'timestamp': u'*',
+ u'classname': u'PropertyRecordingTest',
+ u'key_int': u'1'
+ }, {
+ u'name': u'ThreeProperties',
+ u'status': u'RUN',
+ u'result': u'COMPLETED',
+ u'time': u'*',
+ u'timestamp': u'*',
+ u'classname': u'PropertyRecordingTest',
+ u'key_1': u'1',
+ u'key_2': u'2',
+ u'key_3': u'3'
+ }, {
+ u'name': u'TwoValuesForOneKeyUsesLastValue',
+ u'status': u'RUN',
+ u'result': u'COMPLETED',
+ u'time': u'*',
+ u'timestamp': u'*',
+ u'classname': u'PropertyRecordingTest',
+ u'key_1': u'2'
+ }]
+ }, {
+ u'name':
+ u'NoFixtureTest',
+ u'tests':
+ 3,
+ u'failures':
+ 0,
+ u'disabled':
+ 0,
+ u'errors':
+ 0,
+ u'time':
+ u'*',
+ u'timestamp':
+ u'*',
+ u'testsuite': [{
+ u'name': u'RecordProperty',
+ u'status': u'RUN',
+ u'result': u'COMPLETED',
+ u'time': u'*',
+ u'timestamp': u'*',
+ u'classname': u'NoFixtureTest',
+ u'key': u'1'
+ }, {
+ u'name': u'ExternalUtilityThatCallsRecordIntValuedProperty',
+ u'status': u'RUN',
+ u'result': u'COMPLETED',
+ u'time': u'*',
+ u'timestamp': u'*',
+ u'classname': u'NoFixtureTest',
+ u'key_for_utility_int': u'1'
+ }, {
+ u'name': u'ExternalUtilityThatCallsRecordStringValuedProperty',
+ u'status': u'RUN',
+ u'result': u'COMPLETED',
+ u'time': u'*',
+ u'timestamp': u'*',
+ u'classname': u'NoFixtureTest',
+ u'key_for_utility_string': u'1'
+ }]
+ }, {
+ u'name':
+ u'TypedTest/0',
+ u'tests':
+ 1,
+ u'failures':
+ 0,
+ u'disabled':
+ 0,
+ u'errors':
+ 0,
+ u'time':
+ u'*',
+ u'timestamp':
+ u'*',
+ u'testsuite': [{
+ u'name': u'HasTypeParamAttribute',
+ u'type_param': u'int',
+ u'status': u'RUN',
+ u'result': u'COMPLETED',
+ u'time': u'*',
+ u'timestamp': u'*',
+ u'classname': u'TypedTest/0'
+ }]
+ }, {
+ u'name':
+ u'TypedTest/1',
+ u'tests':
+ 1,
+ u'failures':
+ 0,
+ u'disabled':
+ 0,
+ u'errors':
+ 0,
+ u'time':
+ u'*',
+ u'timestamp':
+ u'*',
+ u'testsuite': [{
+ u'name': u'HasTypeParamAttribute',
+ u'type_param': u'long',
+ u'status': u'RUN',
+ u'result': u'COMPLETED',
+ u'time': u'*',
+ u'timestamp': u'*',
+ u'classname': u'TypedTest/1'
+ }]
+ }, {
+ u'name':
+ u'Single/TypeParameterizedTestSuite/0',
+ u'tests':
+ 1,
+ u'failures':
+ 0,
+ u'disabled':
+ 0,
+ u'errors':
+ 0,
+ u'time':
+ u'*',
+ u'timestamp':
+ u'*',
+ u'testsuite': [{
+ u'name': u'HasTypeParamAttribute',
+ u'type_param': u'int',
+ u'status': u'RUN',
+ u'result': u'COMPLETED',
+ u'time': u'*',
+ u'timestamp': u'*',
+ u'classname': u'Single/TypeParameterizedTestSuite/0'
+ }]
+ }, {
+ u'name':
+ u'Single/TypeParameterizedTestSuite/1',
+ u'tests':
+ 1,
+ u'failures':
+ 0,
+ u'disabled':
+ 0,
+ u'errors':
+ 0,
+ u'time':
+ u'*',
+ u'timestamp':
+ u'*',
+ u'testsuite': [{
+ u'name': u'HasTypeParamAttribute',
+ u'type_param': u'long',
+ u'status': u'RUN',
+ u'result': u'COMPLETED',
+ u'time': u'*',
+ u'timestamp': u'*',
+ u'classname': u'Single/TypeParameterizedTestSuite/1'
+ }]
+ }, {
+ u'name':
+ u'Single/ValueParamTest',
+ u'tests':
+ 4,
+ u'failures':
+ 0,
+ u'disabled':
+ 0,
+ u'errors':
+ 0,
+ u'time':
+ u'*',
+ u'timestamp':
+ u'*',
+ u'testsuite': [{
+ u'name': u'HasValueParamAttribute/0',
+ u'value_param': u'33',
+ u'status': u'RUN',
+ u'result': u'COMPLETED',
+ u'time': u'*',
+ u'timestamp': u'*',
+ u'classname': u'Single/ValueParamTest'
+ }, {
+ u'name': u'HasValueParamAttribute/1',
+ u'value_param': u'42',
+ u'status': u'RUN',
+ u'result': u'COMPLETED',
+ u'time': u'*',
+ u'timestamp': u'*',
+ u'classname': u'Single/ValueParamTest'
+ }, {
+ u'name': u'AnotherTestThatHasValueParamAttribute/0',
+ u'value_param': u'33',
+ u'status': u'RUN',
+ u'result': u'COMPLETED',
+ u'time': u'*',
+ u'timestamp': u'*',
+ u'classname': u'Single/ValueParamTest'
+ }, {
+ u'name': u'AnotherTestThatHasValueParamAttribute/1',
+ u'value_param': u'42',
+ u'status': u'RUN',
+ u'result': u'COMPLETED',
+ u'time': u'*',
+ u'timestamp': u'*',
+ u'classname': u'Single/ValueParamTest'
+ }]
+ }]
}
EXPECTED_FILTERED = {
@@ -556,11 +573,14 @@ EXPECTED_FILTERED = {
0,
u'time':
u'*',
+ u'timestamp':
+ u'*',
u'testsuite': [{
u'name': u'Succeeds',
u'status': u'RUN',
u'result': u'COMPLETED',
u'time': u'*',
+ u'timestamp': u'*',
u'classname': u'SuccessfulTest',
}]
}],
diff --git a/googletest/test/googletest-listener-test.cc b/googletest/test/googletest-listener-test.cc
index f50faaf..10457af 100644
--- a/googletest/test/googletest-listener-test.cc
+++ b/googletest/test/googletest-listener-test.cc
@@ -35,6 +35,7 @@
#include <vector>
#include "gtest/gtest.h"
+#include "gtest/internal/custom/gtest.h"
using ::testing::AddGlobalTestEnvironment;
using ::testing::Environment;
@@ -76,10 +77,11 @@ class EventRecordingListener : public TestEventListener {
void OnEnvironmentsSetUpEnd(const UnitTest& /*unit_test*/) override {
g_events->push_back(GetFullMethodName("OnEnvironmentsSetUpEnd"));
}
-
+#ifndef GTEST_REMOVE_LEGACY_TEST_CASEAPI_
void OnTestCaseStart(const TestCase& /*test_case*/) override {
g_events->push_back(GetFullMethodName("OnTestCaseStart"));
}
+#endif // GTEST_REMOVE_LEGACY_TEST_CASEAPI_
void OnTestStart(const TestInfo& /*test_info*/) override {
g_events->push_back(GetFullMethodName("OnTestStart"));
@@ -93,9 +95,11 @@ class EventRecordingListener : public TestEventListener {
g_events->push_back(GetFullMethodName("OnTestEnd"));
}
+#ifndef GTEST_REMOVE_LEGACY_TEST_CASEAPI_
void OnTestCaseEnd(const TestCase& /*test_case*/) override {
g_events->push_back(GetFullMethodName("OnTestCaseEnd"));
}
+#endif // GTEST_REMOVE_LEGACY_TEST_CASEAPI_
void OnEnvironmentsTearDownStart(const UnitTest& /*unit_test*/) override {
g_events->push_back(GetFullMethodName("OnEnvironmentsTearDownStart"));
@@ -283,6 +287,9 @@ int main(int argc, char **argv) {
::testing::GTEST_FLAG(repeat) = 2;
int ret_val = RUN_ALL_TESTS();
+#ifndef GTEST_REMOVE_LEGACY_TEST_CASEAPI_
+
+ // The deprecated OnTestSuiteStart/OnTestCaseStart events are included
const char* const expected_events[] = {"1st.OnTestProgramStart",
"2nd.OnTestProgramStart",
"3rd.OnTestProgramStart",
@@ -393,6 +400,110 @@ int main(int argc, char **argv) {
"3rd.OnTestProgramEnd",
"2nd.OnTestProgramEnd",
"1st.OnTestProgramEnd"};
+#else
+ const char* const expected_events[] = {"1st.OnTestProgramStart",
+ "2nd.OnTestProgramStart",
+ "3rd.OnTestProgramStart",
+ "1st.OnTestIterationStart(0)",
+ "2nd.OnTestIterationStart(0)",
+ "3rd.OnTestIterationStart(0)",
+ "1st.OnEnvironmentsSetUpStart",
+ "2nd.OnEnvironmentsSetUpStart",
+ "3rd.OnEnvironmentsSetUpStart",
+ "Environment::SetUp",
+ "3rd.OnEnvironmentsSetUpEnd",
+ "2nd.OnEnvironmentsSetUpEnd",
+ "1st.OnEnvironmentsSetUpEnd",
+ "3rd.OnTestSuiteStart",
+ "ListenerTest::SetUpTestSuite",
+ "1st.OnTestStart",
+ "2nd.OnTestStart",
+ "3rd.OnTestStart",
+ "ListenerTest::SetUp",
+ "ListenerTest::* Test Body",
+ "1st.OnTestPartResult",
+ "2nd.OnTestPartResult",
+ "3rd.OnTestPartResult",
+ "ListenerTest::TearDown",
+ "3rd.OnTestEnd",
+ "2nd.OnTestEnd",
+ "1st.OnTestEnd",
+ "1st.OnTestStart",
+ "2nd.OnTestStart",
+ "3rd.OnTestStart",
+ "ListenerTest::SetUp",
+ "ListenerTest::* Test Body",
+ "1st.OnTestPartResult",
+ "2nd.OnTestPartResult",
+ "3rd.OnTestPartResult",
+ "ListenerTest::TearDown",
+ "3rd.OnTestEnd",
+ "2nd.OnTestEnd",
+ "1st.OnTestEnd",
+ "ListenerTest::TearDownTestSuite",
+ "3rd.OnTestSuiteEnd",
+ "1st.OnEnvironmentsTearDownStart",
+ "2nd.OnEnvironmentsTearDownStart",
+ "3rd.OnEnvironmentsTearDownStart",
+ "Environment::TearDown",
+ "3rd.OnEnvironmentsTearDownEnd",
+ "2nd.OnEnvironmentsTearDownEnd",
+ "1st.OnEnvironmentsTearDownEnd",
+ "3rd.OnTestIterationEnd(0)",
+ "2nd.OnTestIterationEnd(0)",
+ "1st.OnTestIterationEnd(0)",
+ "1st.OnTestIterationStart(1)",
+ "2nd.OnTestIterationStart(1)",
+ "3rd.OnTestIterationStart(1)",
+ "1st.OnEnvironmentsSetUpStart",
+ "2nd.OnEnvironmentsSetUpStart",
+ "3rd.OnEnvironmentsSetUpStart",
+ "Environment::SetUp",
+ "3rd.OnEnvironmentsSetUpEnd",
+ "2nd.OnEnvironmentsSetUpEnd",
+ "1st.OnEnvironmentsSetUpEnd",
+ "3rd.OnTestSuiteStart",
+ "ListenerTest::SetUpTestSuite",
+ "1st.OnTestStart",
+ "2nd.OnTestStart",
+ "3rd.OnTestStart",
+ "ListenerTest::SetUp",
+ "ListenerTest::* Test Body",
+ "1st.OnTestPartResult",
+ "2nd.OnTestPartResult",
+ "3rd.OnTestPartResult",
+ "ListenerTest::TearDown",
+ "3rd.OnTestEnd",
+ "2nd.OnTestEnd",
+ "1st.OnTestEnd",
+ "1st.OnTestStart",
+ "2nd.OnTestStart",
+ "3rd.OnTestStart",
+ "ListenerTest::SetUp",
+ "ListenerTest::* Test Body",
+ "1st.OnTestPartResult",
+ "2nd.OnTestPartResult",
+ "3rd.OnTestPartResult",
+ "ListenerTest::TearDown",
+ "3rd.OnTestEnd",
+ "2nd.OnTestEnd",
+ "1st.OnTestEnd",
+ "ListenerTest::TearDownTestSuite",
+ "3rd.OnTestSuiteEnd",
+ "1st.OnEnvironmentsTearDownStart",
+ "2nd.OnEnvironmentsTearDownStart",
+ "3rd.OnEnvironmentsTearDownStart",
+ "Environment::TearDown",
+ "3rd.OnEnvironmentsTearDownEnd",
+ "2nd.OnEnvironmentsTearDownEnd",
+ "1st.OnEnvironmentsTearDownEnd",
+ "3rd.OnTestIterationEnd(1)",
+ "2nd.OnTestIterationEnd(1)",
+ "1st.OnTestIterationEnd(1)",
+ "3rd.OnTestProgramEnd",
+ "2nd.OnTestProgramEnd",
+ "1st.OnTestProgramEnd"};
+#endif // GTEST_REMOVE_LEGACY_TEST_CASEAPI_
VerifyResults(events,
expected_events,
diff --git a/googletest/test/googletest-throw-on-failure-test.py b/googletest/test/googletest-throw-on-failure-test.py
index 7e4b158..a38cd33 100755
--- a/googletest/test/googletest-throw-on-failure-test.py
+++ b/googletest/test/googletest-throw-on-failure-test.py
@@ -86,7 +86,7 @@ class ThrowOnFailureTest(gtest_test_utils.TestCase):
variable; None if the variable should be unset.
flag_value: value of the --gtest_break_on_failure flag;
None if the flag should not be present.
- should_fail: True iff the program is expected to fail.
+ should_fail: True if the program is expected to fail.
"""
SetEnvVar(THROW_ON_FAILURE, env_var_value)
diff --git a/googletest/test/gtest_environment_test.cc b/googletest/test/gtest_environment_test.cc
index fea542a..58908e6 100644
--- a/googletest/test/gtest_environment_test.cc
+++ b/googletest/test/gtest_environment_test.cc
@@ -116,7 +116,7 @@ void Check(bool condition, const char* msg) {
}
}
-// Runs the tests. Return true iff successful.
+// Runs the tests. Return true if successful.
//
// The 'failure' parameter specifies the type of failure that should
// be generated by the global set-up.
diff --git a/googletest/test/gtest_pred_impl_unittest.cc b/googletest/test/gtest_pred_impl_unittest.cc
index 049ef98..4d77896 100644
--- a/googletest/test/gtest_pred_impl_unittest.cc
+++ b/googletest/test/gtest_pred_impl_unittest.cc
@@ -144,10 +144,10 @@ class Predicate1Test : public testing::Test {
}
}
- // true iff the test function is expected to run to finish.
+ // true if the test function is expected to run to finish.
static bool expected_to_finish_;
- // true iff the test function did run to finish.
+ // true if the test function did run to finish.
static bool finished_;
static int n1_;
@@ -539,10 +539,10 @@ class Predicate2Test : public testing::Test {
}
}
- // true iff the test function is expected to run to finish.
+ // true if the test function is expected to run to finish.
static bool expected_to_finish_;
- // true iff the test function did run to finish.
+ // true if the test function did run to finish.
static bool finished_;
static int n1_;
@@ -976,10 +976,10 @@ class Predicate3Test : public testing::Test {
}
}
- // true iff the test function is expected to run to finish.
+ // true if the test function is expected to run to finish.
static bool expected_to_finish_;
- // true iff the test function did run to finish.
+ // true if the test function did run to finish.
static bool finished_;
static int n1_;
@@ -1455,10 +1455,10 @@ class Predicate4Test : public testing::Test {
}
}
- // true iff the test function is expected to run to finish.
+ // true if the test function is expected to run to finish.
static bool expected_to_finish_;
- // true iff the test function did run to finish.
+ // true if the test function did run to finish.
static bool finished_;
static int n1_;
@@ -1976,10 +1976,10 @@ class Predicate5Test : public testing::Test {
}
}
- // true iff the test function is expected to run to finish.
+ // true if the test function is expected to run to finish.
static bool expected_to_finish_;
- // true iff the test function did run to finish.
+ // true if the test function did run to finish.
static bool finished_;
static int n1_;
diff --git a/googletest/test/gtest_premature_exit_test.cc b/googletest/test/gtest_premature_exit_test.cc
index 0920a97..777a8bf 100644
--- a/googletest/test/gtest_premature_exit_test.cc
+++ b/googletest/test/gtest_premature_exit_test.cc
@@ -45,7 +45,7 @@ namespace {
class PrematureExitTest : public Test {
public:
- // Returns true iff the given file exists.
+ // Returns true if the given file exists.
static bool FileExists(const char* filepath) {
StatStruct stat;
return Stat(filepath, &stat) == 0;
@@ -61,7 +61,7 @@ class PrematureExitTest : public Test {
}
}
- // Returns true iff the premature-exit file exists.
+ // Returns true if the premature-exit file exists.
bool PrematureExitFileExists() const {
return FileExists(premature_exit_file_path_);
}
diff --git a/googletest/test/gtest_test_utils.py b/googletest/test/gtest_test_utils.py
index 9a4dcb8..abd56ec 100755
--- a/googletest/test/gtest_test_utils.py
+++ b/googletest/test/gtest_test_utils.py
@@ -215,10 +215,10 @@ class Subprocess:
Returns:
An object that represents outcome of the executed process. It has the
following attributes:
- terminated_by_signal True iff the child process has been terminated
+ terminated_by_signal True if the child process has been terminated
by a signal.
signal Sygnal that terminated the child process.
- exited True iff the child process exited normally.
+ exited True if the child process exited normally.
exit_code The code with which the child process exited.
output Child process's stdout and stderr output
combined in a string.
diff --git a/googletest/test/gtest_unittest.cc b/googletest/test/gtest_unittest.cc
index d481056..2b00b70 100644
--- a/googletest/test/gtest_unittest.cc
+++ b/googletest/test/gtest_unittest.cc
@@ -2016,10 +2016,11 @@ void ExpectNonFatalFailureRecordingPropertyWithReservedKeyForCurrentTest(
void ExpectNonFatalFailureRecordingPropertyWithReservedKeyForCurrentTestSuite(
const char* key) {
- const TestCase* test_case = UnitTest::GetInstance()->current_test_case();
- ASSERT_TRUE(test_case != nullptr);
+ const testing::TestSuite* test_suite =
+ UnitTest::GetInstance()->current_test_suite();
+ ASSERT_TRUE(test_suite != nullptr);
ExpectNonFatalFailureRecordingPropertyWithReservedKey(
- test_case->ad_hoc_test_result(), key);
+ test_suite->ad_hoc_test_result(), key);
}
void ExpectNonFatalFailureRecordingPropertyWithReservedKeyOutsideOfTestSuite(
@@ -2049,8 +2050,10 @@ class UnitTestRecordPropertyTest :
"time");
Test::RecordProperty("test_case_key_1", "1");
+
const testing::TestSuite* test_suite =
- UnitTest::GetInstance()->current_test_case();
+ UnitTest::GetInstance()->current_test_suite();
+
ASSERT_TRUE(test_suite != nullptr);
ASSERT_EQ(1, test_suite->ad_hoc_test_result().test_property_count());
@@ -2167,12 +2170,12 @@ static Environment* record_property_env GTEST_ATTRIBUTE_UNUSED_ =
// First, some predicates and predicate-formatters needed by the tests.
-// Returns true iff the argument is an even number.
+// Returns true if the argument is an even number.
bool IsEven(int n) {
return (n % 2) == 0;
}
-// A functor that returns true iff the argument is an even number.
+// A functor that returns true if the argument is an even number.
struct IsEvenFunctor {
bool operator()(int n) { return IsEven(n); }
};
@@ -2216,12 +2219,12 @@ struct AssertIsEvenFunctor {
}
};
-// Returns true iff the sum of the arguments is an even number.
+// Returns true if the sum of the arguments is an even number.
bool SumIsEven2(int n1, int n2) {
return IsEven(n1 + n2);
}
-// A functor that returns true iff the sum of the arguments is an even
+// A functor that returns true if the sum of the arguments is an even
// number.
struct SumIsEven3Functor {
bool operator()(int n1, int n2, int n3) {
diff --git a/googletest/test/gtest_xml_outfiles_test.py b/googletest/test/gtest_xml_outfiles_test.py
index eaca1a7..e093f6f 100755
--- a/googletest/test/gtest_xml_outfiles_test.py
+++ b/googletest/test/gtest_xml_outfiles_test.py
@@ -42,8 +42,8 @@ GTEST_OUTPUT_2_TEST = "gtest_xml_outfile2_test_"
EXPECTED_XML_1 = """<?xml version="1.0" encoding="UTF-8"?>
<testsuites tests="1" failures="0" disabled="0" errors="0" time="*" timestamp="*" name="AllTests">
- <testsuite name="PropertyOne" tests="1" failures="0" disabled="0" errors="0" time="*">
- <testcase name="TestSomeProperties" status="run" result="completed" time="*" classname="PropertyOne">
+ <testsuite name="PropertyOne" tests="1" failures="0" disabled="0" errors="0" time="*" timestamp="*">
+ <testcase name="TestSomeProperties" status="run" result="completed" time="*" timestamp="*" classname="PropertyOne">
<properties>
<property name="SetUpProp" value="1"/>
<property name="TestSomeProperty" value="1"/>
@@ -56,8 +56,8 @@ EXPECTED_XML_1 = """<?xml version="1.0" encoding="UTF-8"?>
EXPECTED_XML_2 = """<?xml version="1.0" encoding="UTF-8"?>
<testsuites tests="1" failures="0" disabled="0" errors="0" time="*" timestamp="*" name="AllTests">
- <testsuite name="PropertyTwo" tests="1" failures="0" disabled="0" errors="0" time="*">
- <testcase name="TestSomeProperties" status="run" result="completed" time="*" classname="PropertyTwo">
+ <testsuite name="PropertyTwo" tests="1" failures="0" disabled="0" errors="0" time="*" timestamp="*">
+ <testcase name="TestSomeProperties" status="run" result="completed" time="*" timestamp="*" classname="PropertyTwo">
<properties>
<property name="SetUpProp" value="2"/>
<property name="TestSomeProperty" value="2"/>
diff --git a/googletest/test/gtest_xml_output_unittest.py b/googletest/test/gtest_xml_output_unittest.py
index 745a134..63b1af0 100755
--- a/googletest/test/gtest_xml_output_unittest.py
+++ b/googletest/test/gtest_xml_output_unittest.py
@@ -66,20 +66,20 @@ else:
EXPECTED_NON_EMPTY_XML = """<?xml version="1.0" encoding="UTF-8"?>
<testsuites tests="24" failures="4" disabled="2" errors="0" time="*" timestamp="*" name="AllTests" ad_hoc_property="42">
- <testsuite name="SuccessfulTest" tests="1" failures="0" disabled="0" errors="0" time="*">
- <testcase name="Succeeds" status="run" result="completed" time="*" classname="SuccessfulTest"/>
+ <testsuite name="SuccessfulTest" tests="1" failures="0" disabled="0" errors="0" time="*" timestamp="*">
+ <testcase name="Succeeds" status="run" result="completed" time="*" timestamp="*" classname="SuccessfulTest"/>
</testsuite>
- <testsuite name="FailedTest" tests="1" failures="1" disabled="0" errors="0" time="*">
- <testcase name="Fails" status="run" result="completed" time="*" classname="FailedTest">
+ <testsuite name="FailedTest" tests="1" failures="1" disabled="0" errors="0" time="*" timestamp="*">
+ <testcase name="Fails" status="run" result="completed" time="*" timestamp="*" classname="FailedTest">
<failure message="gtest_xml_output_unittest_.cc:*&#x0A;Expected equality of these values:&#x0A; 1&#x0A; 2" type=""><![CDATA[gtest_xml_output_unittest_.cc:*
Expected equality of these values:
1
2%(stack)s]]></failure>
</testcase>
</testsuite>
- <testsuite name="MixedResultTest" tests="3" failures="1" disabled="1" errors="0" time="*">
- <testcase name="Succeeds" status="run" result="completed" time="*" classname="MixedResultTest"/>
- <testcase name="Fails" status="run" result="completed" time="*" classname="MixedResultTest">
+ <testsuite name="MixedResultTest" tests="3" failures="1" disabled="1" errors="0" time="*" timestamp="*">
+ <testcase name="Succeeds" status="run" result="completed" time="*" timestamp="*" classname="MixedResultTest"/>
+ <testcase name="Fails" status="run" result="completed" time="*" timestamp="*" classname="MixedResultTest">
<failure message="gtest_xml_output_unittest_.cc:*&#x0A;Expected equality of these values:&#x0A; 1&#x0A; 2" type=""><![CDATA[gtest_xml_output_unittest_.cc:*
Expected equality of these values:
1
@@ -89,86 +89,86 @@ Expected equality of these values:
2
3%(stack)s]]></failure>
</testcase>
- <testcase name="DISABLED_test" status="notrun" result="suppressed" time="*" classname="MixedResultTest"/>
+ <testcase name="DISABLED_test" status="notrun" result="suppressed" time="*" timestamp="*" classname="MixedResultTest"/>
</testsuite>
- <testsuite name="XmlQuotingTest" tests="1" failures="1" disabled="0" errors="0" time="*">
- <testcase name="OutputsCData" status="run" result="completed" time="*" classname="XmlQuotingTest">
+ <testsuite name="XmlQuotingTest" tests="1" failures="1" disabled="0" errors="0" time="*" timestamp="*">
+ <testcase name="OutputsCData" status="run" result="completed" time="*" timestamp="*" classname="XmlQuotingTest">
<failure message="gtest_xml_output_unittest_.cc:*&#x0A;Failed&#x0A;XML output: &lt;?xml encoding=&quot;utf-8&quot;&gt;&lt;top&gt;&lt;![CDATA[cdata text]]&gt;&lt;/top&gt;" type=""><![CDATA[gtest_xml_output_unittest_.cc:*
Failed
XML output: <?xml encoding="utf-8"><top><![CDATA[cdata text]]>]]&gt;<![CDATA[</top>%(stack)s]]></failure>
</testcase>
</testsuite>
- <testsuite name="InvalidCharactersTest" tests="1" failures="1" disabled="0" errors="0" time="*">
- <testcase name="InvalidCharactersInMessage" status="run" result="completed" time="*" classname="InvalidCharactersTest">
+ <testsuite name="InvalidCharactersTest" tests="1" failures="1" disabled="0" errors="0" time="*" timestamp="*">
+ <testcase name="InvalidCharactersInMessage" status="run" result="completed" time="*" timestamp="*" classname="InvalidCharactersTest">
<failure message="gtest_xml_output_unittest_.cc:*&#x0A;Failed&#x0A;Invalid characters in brackets []" type=""><![CDATA[gtest_xml_output_unittest_.cc:*
Failed
Invalid characters in brackets []%(stack)s]]></failure>
</testcase>
</testsuite>
- <testsuite name="DisabledTest" tests="1" failures="0" disabled="1" errors="0" time="*">
- <testcase name="DISABLED_test_not_run" status="notrun" result="suppressed" time="*" classname="DisabledTest"/>
+ <testsuite name="DisabledTest" tests="1" failures="0" disabled="1" errors="0" time="*" timestamp="*">
+ <testcase name="DISABLED_test_not_run" status="notrun" result="suppressed" time="*" timestamp="*" classname="DisabledTest"/>
</testsuite>
- <testsuite name="SkippedTest" tests="1" failures="0" disabled="0" errors="0" time="*">
- <testcase name="Skipped" status="run" result="skipped" time="*" classname="SkippedTest"/>
+ <testsuite name="SkippedTest" tests="1" failures="0" disabled="0" errors="0" time="*" timestamp="*">
+ <testcase name="Skipped" status="run" result="skipped" time="*" timestamp="*" classname="SkippedTest"/>
</testsuite>
- <testsuite name="PropertyRecordingTest" tests="4" failures="0" disabled="0" errors="0" time="*" SetUpTestSuite="yes" TearDownTestSuite="aye">
- <testcase name="OneProperty" status="run" result="completed" time="*" classname="PropertyRecordingTest">
+ <testsuite name="PropertyRecordingTest" tests="4" failures="0" disabled="0" errors="0" time="*" timestamp="*" SetUpTestSuite="yes" TearDownTestSuite="aye">
+ <testcase name="OneProperty" status="run" result="completed" time="*" timestamp="*" classname="PropertyRecordingTest">
<properties>
<property name="key_1" value="1"/>
</properties>
</testcase>
- <testcase name="IntValuedProperty" status="run" result="completed" time="*" classname="PropertyRecordingTest">
+ <testcase name="IntValuedProperty" status="run" result="completed" time="*" timestamp="*" classname="PropertyRecordingTest">
<properties>
<property name="key_int" value="1"/>
</properties>
</testcase>
- <testcase name="ThreeProperties" status="run" result="completed" time="*" classname="PropertyRecordingTest">
+ <testcase name="ThreeProperties" status="run" result="completed" time="*" timestamp="*" classname="PropertyRecordingTest">
<properties>
<property name="key_1" value="1"/>
<property name="key_2" value="2"/>
<property name="key_3" value="3"/>
</properties>
</testcase>
- <testcase name="TwoValuesForOneKeyUsesLastValue" status="run" result="completed" time="*" classname="PropertyRecordingTest">
+ <testcase name="TwoValuesForOneKeyUsesLastValue" status="run" result="completed" time="*" timestamp="*" classname="PropertyRecordingTest">
<properties>
<property name="key_1" value="2"/>
</properties>
</testcase>
</testsuite>
- <testsuite name="NoFixtureTest" tests="3" failures="0" disabled="0" errors="0" time="*">
- <testcase name="RecordProperty" status="run" result="completed" time="*" classname="NoFixtureTest">
+ <testsuite name="NoFixtureTest" tests="3" failures="0" disabled="0" errors="0" time="*" timestamp="*">
+ <testcase name="RecordProperty" status="run" result="completed" time="*" timestamp="*" classname="NoFixtureTest">
<properties>
<property name="key" value="1"/>
</properties>
</testcase>
- <testcase name="ExternalUtilityThatCallsRecordIntValuedProperty" status="run" result="completed" time="*" classname="NoFixtureTest">
+ <testcase name="ExternalUtilityThatCallsRecordIntValuedProperty" status="run" result="completed" time="*" timestamp="*" classname="NoFixtureTest">
<properties>
<property name="key_for_utility_int" value="1"/>
</properties>
</testcase>
- <testcase name="ExternalUtilityThatCallsRecordStringValuedProperty" status="run" result="completed" time="*" classname="NoFixtureTest">
+ <testcase name="ExternalUtilityThatCallsRecordStringValuedProperty" status="run" result="completed" time="*" timestamp="*" classname="NoFixtureTest">
<properties>
<property name="key_for_utility_string" value="1"/>
</properties>
</testcase>
</testsuite>
- <testsuite name="Single/ValueParamTest" tests="4" failures="0" disabled="0" errors="0" time="*">
- <testcase name="HasValueParamAttribute/0" value_param="33" status="run" result="completed" time="*" classname="Single/ValueParamTest" />
- <testcase name="HasValueParamAttribute/1" value_param="42" status="run" result="completed" time="*" classname="Single/ValueParamTest" />
- <testcase name="AnotherTestThatHasValueParamAttribute/0" value_param="33" status="run" result="completed" time="*" classname="Single/ValueParamTest" />
- <testcase name="AnotherTestThatHasValueParamAttribute/1" value_param="42" status="run" result="completed" time="*" classname="Single/ValueParamTest" />
+ <testsuite name="Single/ValueParamTest" tests="4" failures="0" disabled="0" errors="0" time="*" timestamp="*">
+ <testcase name="HasValueParamAttribute/0" value_param="33" status="run" result="completed" time="*" timestamp="*" classname="Single/ValueParamTest" />
+ <testcase name="HasValueParamAttribute/1" value_param="42" status="run" result="completed" time="*" timestamp="*" classname="Single/ValueParamTest" />
+ <testcase name="AnotherTestThatHasValueParamAttribute/0" value_param="33" status="run" result="completed" time="*" timestamp="*" classname="Single/ValueParamTest" />
+ <testcase name="AnotherTestThatHasValueParamAttribute/1" value_param="42" status="run" result="completed" time="*" timestamp="*" classname="Single/ValueParamTest" />
</testsuite>
- <testsuite name="TypedTest/0" tests="1" failures="0" disabled="0" errors="0" time="*">
- <testcase name="HasTypeParamAttribute" type_param="*" status="run" result="completed" time="*" classname="TypedTest/0" />
+ <testsuite name="TypedTest/0" tests="1" failures="0" disabled="0" errors="0" time="*" timestamp="*">
+ <testcase name="HasTypeParamAttribute" type_param="*" status="run" result="completed" time="*" timestamp="*" classname="TypedTest/0" />
</testsuite>
- <testsuite name="TypedTest/1" tests="1" failures="0" disabled="0" errors="0" time="*">
- <testcase name="HasTypeParamAttribute" type_param="*" status="run" result="completed" time="*" classname="TypedTest/1" />
+ <testsuite name="TypedTest/1" tests="1" failures="0" disabled="0" errors="0" time="*" timestamp="*">
+ <testcase name="HasTypeParamAttribute" type_param="*" status="run" result="completed" time="*" timestamp="*" classname="TypedTest/1" />
</testsuite>
- <testsuite name="Single/TypeParameterizedTestSuite/0" tests="1" failures="0" disabled="0" errors="0" time="*">
- <testcase name="HasTypeParamAttribute" type_param="*" status="run" result="completed" time="*" classname="Single/TypeParameterizedTestSuite/0" />
+ <testsuite name="Single/TypeParameterizedTestSuite/0" tests="1" failures="0" disabled="0" errors="0" time="*" timestamp="*">
+ <testcase name="HasTypeParamAttribute" type_param="*" status="run" result="completed" time="*" timestamp="*" classname="Single/TypeParameterizedTestSuite/0" />
</testsuite>
- <testsuite name="Single/TypeParameterizedTestSuite/1" tests="1" failures="0" disabled="0" errors="0" time="*">
- <testcase name="HasTypeParamAttribute" type_param="*" status="run" result="completed" time="*" classname="Single/TypeParameterizedTestSuite/1" />
+ <testsuite name="Single/TypeParameterizedTestSuite/1" tests="1" failures="0" disabled="0" errors="0" time="*" timestamp="*">
+ <testcase name="HasTypeParamAttribute" type_param="*" status="run" result="completed" time="*" timestamp="*" classname="Single/TypeParameterizedTestSuite/1" />
</testsuite>
</testsuites>""" % {
'stack': STACK_TRACE_TEMPLATE
@@ -178,25 +178,25 @@ EXPECTED_FILTERED_TEST_XML = """<?xml version="1.0" encoding="UTF-8"?>
<testsuites tests="1" failures="0" disabled="0" errors="0" time="*"
timestamp="*" name="AllTests" ad_hoc_property="42">
<testsuite name="SuccessfulTest" tests="1" failures="0" disabled="0"
- errors="0" time="*">
- <testcase name="Succeeds" status="run" result="completed" time="*" classname="SuccessfulTest"/>
+ errors="0" time="*" timestamp="*">
+ <testcase name="Succeeds" status="run" result="completed" time="*" timestamp="*" classname="SuccessfulTest"/>
</testsuite>
</testsuites>"""
EXPECTED_SHARDED_TEST_XML = """<?xml version="1.0" encoding="UTF-8"?>
<testsuites tests="3" failures="0" disabled="0" errors="0" time="*" timestamp="*" name="AllTests" ad_hoc_property="42">
- <testsuite name="SuccessfulTest" tests="1" failures="0" disabled="0" errors="0" time="*">
- <testcase name="Succeeds" status="run" result="completed" time="*" classname="SuccessfulTest"/>
+ <testsuite name="SuccessfulTest" tests="1" failures="0" disabled="0" errors="0" time="*" timestamp="*">
+ <testcase name="Succeeds" status="run" result="completed" time="*" timestamp="*" classname="SuccessfulTest"/>
</testsuite>
- <testsuite name="PropertyRecordingTest" tests="1" failures="0" disabled="0" errors="0" time="*" SetUpTestSuite="yes" TearDownTestSuite="aye">
- <testcase name="TwoValuesForOneKeyUsesLastValue" status="run" result="completed" time="*" classname="PropertyRecordingTest">
+ <testsuite name="PropertyRecordingTest" tests="1" failures="0" disabled="0" errors="0" time="*" timestamp="*" SetUpTestSuite="yes" TearDownTestSuite="aye">
+ <testcase name="TwoValuesForOneKeyUsesLastValue" status="run" result="completed" time="*" timestamp="*" classname="PropertyRecordingTest">
<properties>
<property name="key_1" value="2"/>
</properties>
</testcase>
</testsuite>
- <testsuite name="Single/ValueParamTest" tests="1" failures="0" disabled="0" errors="0" time="*">
- <testcase name="AnotherTestThatHasValueParamAttribute/0" value_param="33" status="run" result="completed" time="*" classname="Single/ValueParamTest" />
+ <testsuite name="Single/ValueParamTest" tests="1" failures="0" disabled="0" errors="0" time="*" timestamp="*">
+ <testcase name="AnotherTestThatHasValueParamAttribute/0" value_param="33" status="run" result="completed" time="*" timestamp="*" classname="Single/ValueParamTest" />
</testsuite>
</testsuites>"""
diff --git a/googletest/test/gtest_xml_test_utils.py b/googletest/test/gtest_xml_test_utils.py
index afcf55e..9914a49 100755
--- a/googletest/test/gtest_xml_test_utils.py
+++ b/googletest/test/gtest_xml_test_utils.py
@@ -169,7 +169,7 @@ class GTestXMLTestCase(gtest_test_utils.TestCase):
* The stack traces are removed.
"""
- if element.tagName == 'testsuites':
+ if element.tagName in ('testsuites', 'testsuite', 'testcase'):
timestamp = element.getAttributeNode('timestamp')
timestamp.value = re.sub(r'^\d{4}-\d\d-\d\dT\d\d:\d\d:\d\d$',
'*', timestamp.value)