mirror of
https://github.com/google/googletest.git
synced 2025-12-06 08:46:50 +08:00
Rotten Green Tests are tests that have assertions that did not
execute, even though they were contained in a test method that has a Pass result. This patch detects and reports such assertions contained in a TEST or TEST_F method, if the method result is Pass. Skipped, disabled, or otherwise not-run tests are not analyzed for rotten assertions. By default, Rotten assertions do not count as a failure. Use `--gtest_treat_rotten_as_pass=0` to make rotten tests into failures. This is customizable. Rotten assertions in helper functions are also reported if the overall result of the test program was Pass. Currently, TEST_P, TYPED_TEST, and TYPED_TEST_P are treated as if they were helpers, for Rotten reporting purposes. This can lead to false positives for these kinds of Test methods, especially if they are skipped, disabled, or otherwise not run.
This commit is contained in:
parent
9b4993ca7d
commit
48eed2d258
@ -275,15 +275,10 @@ TEST(LosslessArithmeticConvertibleTest, FloatingPointToFloatingPoint) {
|
||||
|
||||
// Larger size => smaller size is not fine.
|
||||
EXPECT_FALSE((LosslessArithmeticConvertible<double, float>::value));
|
||||
GTEST_INTENTIONAL_CONST_COND_PUSH_()
|
||||
if (sizeof(double) == sizeof(long double)) { // NOLINT
|
||||
GTEST_INTENTIONAL_CONST_COND_POP_()
|
||||
// In some implementations (e.g. MSVC), double and long double
|
||||
// have the same size.
|
||||
EXPECT_TRUE((LosslessArithmeticConvertible<long double, double>::value));
|
||||
} else {
|
||||
EXPECT_FALSE((LosslessArithmeticConvertible<long double, double>::value));
|
||||
}
|
||||
// In some implementations (e.g. MSVC), double and long double
|
||||
// have the same size.
|
||||
EXPECT_EQ(sizeof(double) == sizeof(long double),
|
||||
(LosslessArithmeticConvertible<long double, double>::value));
|
||||
}
|
||||
|
||||
// Tests the TupleMatches() template function.
|
||||
|
||||
@ -319,6 +319,9 @@ if (gtest_build_tests)
|
||||
cxx_executable(gtest_list_output_unittest_ test gtest)
|
||||
py_test(gtest_list_output_unittest)
|
||||
|
||||
cxx_executable(gtest_rgt_output_test_ test gtest_main)
|
||||
py_test(gtest_rgt_output_test)
|
||||
|
||||
cxx_executable(gtest_xml_outfile1_test_ test gtest_main)
|
||||
cxx_executable(gtest_xml_outfile2_test_ test gtest_main)
|
||||
py_test(gtest_xml_outfiles_test)
|
||||
|
||||
@ -152,6 +152,7 @@ GTEST_DISABLE_MSC_WARNINGS_POP_() // 4251
|
||||
public: \
|
||||
static void Execute() { statement; } \
|
||||
}; \
|
||||
GTEST_RGT_USES_EXPECT_FAILURE_; \
|
||||
::testing::TestPartResultArray gtest_failures; \
|
||||
::testing::internal::SingleFailureChecker gtest_checker( \
|
||||
>est_failures, ::testing::TestPartResult::kFatalFailure, (substr)); \
|
||||
@ -170,6 +171,7 @@ GTEST_DISABLE_MSC_WARNINGS_POP_() // 4251
|
||||
public: \
|
||||
static void Execute() { statement; } \
|
||||
}; \
|
||||
GTEST_RGT_USES_EXPECT_FAILURE_; \
|
||||
::testing::TestPartResultArray gtest_failures; \
|
||||
::testing::internal::SingleFailureChecker gtest_checker( \
|
||||
>est_failures, ::testing::TestPartResult::kFatalFailure, (substr)); \
|
||||
@ -216,6 +218,7 @@ GTEST_DISABLE_MSC_WARNINGS_POP_() // 4251
|
||||
// to avoid an MSVC warning on unreachable code.
|
||||
#define EXPECT_NONFATAL_FAILURE(statement, substr) \
|
||||
do { \
|
||||
GTEST_RGT_USES_EXPECT_FAILURE_; \
|
||||
::testing::TestPartResultArray gtest_failures; \
|
||||
::testing::internal::SingleFailureChecker gtest_checker( \
|
||||
>est_failures, ::testing::TestPartResult::kNonFatalFailure, \
|
||||
@ -233,6 +236,7 @@ GTEST_DISABLE_MSC_WARNINGS_POP_() // 4251
|
||||
|
||||
#define EXPECT_NONFATAL_FAILURE_ON_ALL_THREADS(statement, substr) \
|
||||
do { \
|
||||
GTEST_RGT_USES_EXPECT_FAILURE_; \
|
||||
::testing::TestPartResultArray gtest_failures; \
|
||||
::testing::internal::SingleFailureChecker gtest_checker( \
|
||||
>est_failures, ::testing::TestPartResult::kNonFatalFailure, \
|
||||
|
||||
@ -59,7 +59,8 @@ class GTEST_API_ TestPartResult {
|
||||
kSuccess, // Succeeded.
|
||||
kNonFatalFailure, // Failed but the test can continue.
|
||||
kFatalFailure, // Failed and the test should be terminated.
|
||||
kSkip // Skipped.
|
||||
kSkip, // Skipped.
|
||||
kRotten // Not executed but should have.
|
||||
};
|
||||
|
||||
// C'tor. TestPartResult does NOT have a default constructor.
|
||||
@ -95,6 +96,9 @@ class GTEST_API_ TestPartResult {
|
||||
// Returns true if and only if the test part was skipped.
|
||||
bool skipped() const { return type_ == kSkip; }
|
||||
|
||||
// Returns true if and only if the test part didn't execute but should have.
|
||||
bool rotten() const { return type_ == kRotten; }
|
||||
|
||||
// Returns true if and only if the test part passed.
|
||||
bool passed() const { return type_ == kSuccess; }
|
||||
|
||||
|
||||
@ -152,6 +152,15 @@ GTEST_DECLARE_int32_(stack_trace_depth);
|
||||
// non-zero code otherwise. For use with an external test framework.
|
||||
GTEST_DECLARE_bool_(throw_on_failure);
|
||||
|
||||
// This flag controls whether an un-executed assertion within an otherwise
|
||||
// passing test is treated as pass or fail.
|
||||
GTEST_DECLARE_bool_(treat_rotten_as_pass);
|
||||
|
||||
#if GTEST_DEBUG_RGT
|
||||
// Optionally dump all assertions locations, for debugging RGT.
|
||||
GTEST_DECLARE_string_(dump_assertions_to);
|
||||
#endif // GTEST_DEBUG_RGT
|
||||
|
||||
// When this flag is set with a "host:port" string, on supported
|
||||
// platforms test results are streamed to the specified port on
|
||||
// the specified host machine.
|
||||
@ -417,6 +426,9 @@ class GTEST_API_ TestResult {
|
||||
// Returns true if and only if the test was skipped.
|
||||
bool Skipped() const;
|
||||
|
||||
// Returns true if and only if the test passed but had a rotten assertion.
|
||||
bool Rotten() const;
|
||||
|
||||
// Returns true if and only if the test failed.
|
||||
bool Failed() const;
|
||||
|
||||
@ -595,6 +607,15 @@ class GTEST_API_ TestInfo {
|
||||
return matches_filter_ && !is_in_another_shard_;
|
||||
}
|
||||
|
||||
#if GTEST_HAS_RGT
|
||||
// Readonly access to the test assertion info.
|
||||
const internal::RgtAssertInfo& asserts() const { return asserts_; }
|
||||
|
||||
// Track whether the test uses an EXPECT_*_FAILURE macro.
|
||||
bool uses_expect_failure() const { return uses_expect_failure_; }
|
||||
void set_uses_expect_failure() { uses_expect_failure_ = true; }
|
||||
#endif // GTEST_HAS_RGT
|
||||
|
||||
// Returns the result of the test.
|
||||
const TestResult* result() const { return &result_; }
|
||||
|
||||
@ -612,6 +633,9 @@ class GTEST_API_ TestInfo {
|
||||
internal::TypeId fixture_class_id, internal::SetUpTestSuiteFunc set_up_tc,
|
||||
internal::TearDownTestSuiteFunc tear_down_tc,
|
||||
internal::TestFactoryBase* factory);
|
||||
#if GTEST_HAS_RGT
|
||||
friend size_t internal::RgtInit();
|
||||
#endif // GTEST_HAS_RGT
|
||||
|
||||
// Constructs a TestInfo object. The newly constructed instance assumes
|
||||
// ownership of the factory object.
|
||||
@ -655,8 +679,13 @@ class GTEST_API_ TestInfo {
|
||||
bool matches_filter_; // True if this test matches the
|
||||
// user-specified filter.
|
||||
bool is_in_another_shard_; // Will be run in another shard.
|
||||
bool uses_expect_failure_; // Uses an EXPECT_*_FAILURE macro.
|
||||
internal::TestFactoryBase* const factory_; // The factory that creates
|
||||
// the test object
|
||||
#if GTEST_HAS_RGT
|
||||
internal::RgtAssertInfo asserts_; // Info on each assertion macro contained
|
||||
// within this test.
|
||||
#endif // GTEST_HAS_RGT
|
||||
|
||||
// This field is mutable and needs to be reset before running the
|
||||
// test for the second time.
|
||||
@ -709,6 +738,9 @@ class GTEST_API_ TestSuite {
|
||||
// Gets the number of skipped tests in this test suite.
|
||||
int skipped_test_count() const;
|
||||
|
||||
// Gets the number of rotten tests in this test suite.
|
||||
int rotten_test_count() const;
|
||||
|
||||
// Gets the number of failed tests in this test suite.
|
||||
int failed_test_count() const;
|
||||
|
||||
@ -730,6 +762,10 @@ class GTEST_API_ TestSuite {
|
||||
// Returns true if and only if the test suite passed.
|
||||
bool Passed() const { return !Failed(); }
|
||||
|
||||
// Returns true if and only if the test suite passed but there is at least
|
||||
// one rotten assertion.
|
||||
bool Rotten() const { return Passed() && rotten_test_count() > 0; }
|
||||
|
||||
// Returns true if and only if the test suite failed.
|
||||
bool Failed() const {
|
||||
return failed_test_count() > 0 || ad_hoc_test_result().Failed();
|
||||
@ -813,6 +849,11 @@ class GTEST_API_ TestSuite {
|
||||
return test_info->should_run() && test_info->result()->Skipped();
|
||||
}
|
||||
|
||||
// Returns true if and only if test was rotten.
|
||||
static bool TestRotten(const TestInfo* test_info) {
|
||||
return test_info->should_run() && test_info->result()->Rotten();
|
||||
}
|
||||
|
||||
// Returns true if and only if test failed.
|
||||
static bool TestFailed(const TestInfo* test_info) {
|
||||
return test_info->should_run() && test_info->result()->Failed();
|
||||
@ -1155,6 +1196,9 @@ class GTEST_API_ UnitTest {
|
||||
// Gets the number of successful test suites.
|
||||
int successful_test_suite_count() const;
|
||||
|
||||
// Gets the number of test suites with rotten assertions.
|
||||
int rotten_test_suite_count() const;
|
||||
|
||||
// Gets the number of failed test suites.
|
||||
int failed_test_suite_count() const;
|
||||
|
||||
@ -1179,6 +1223,9 @@ class GTEST_API_ UnitTest {
|
||||
// Gets the number of skipped tests.
|
||||
int skipped_test_count() const;
|
||||
|
||||
// Gets the number of rotten tests.
|
||||
int rotten_test_count() const;
|
||||
|
||||
// Gets the number of failed tests.
|
||||
int failed_test_count() const;
|
||||
|
||||
@ -1208,6 +1255,10 @@ class GTEST_API_ UnitTest {
|
||||
// passed).
|
||||
bool Passed() const;
|
||||
|
||||
// Returns true if and only if the unit test passed but had at least one
|
||||
// rotten assertion.
|
||||
bool Rotten() const;
|
||||
|
||||
// Returns true if and only if the unit test failed (i.e. some test suite
|
||||
// failed or something outside of all tests failed).
|
||||
bool Failed() const;
|
||||
|
||||
@ -77,7 +77,7 @@ namespace testing {
|
||||
#define GTEST_ASSERT_(expression, on_failure) \
|
||||
GTEST_AMBIGUOUS_ELSE_BLOCKER_ \
|
||||
if (const ::testing::AssertionResult gtest_ar = (expression)) \
|
||||
; \
|
||||
{ GTEST_RGT_DECLARE } \
|
||||
else \
|
||||
on_failure(gtest_ar.failure_message())
|
||||
|
||||
|
||||
@ -221,6 +221,7 @@ GTEST_API_ bool ExitedUnsuccessfully(int exit_status);
|
||||
#define GTEST_DEATH_TEST_(statement, predicate, regex_or_matcher, fail) \
|
||||
GTEST_AMBIGUOUS_ELSE_BLOCKER_ \
|
||||
if (::testing::internal::AlwaysTrue()) { \
|
||||
{ GTEST_RGT_DECLARE } \
|
||||
::testing::internal::DeathTest* gtest_dt; \
|
||||
if (!::testing::internal::DeathTest::Create( \
|
||||
#statement, \
|
||||
@ -259,6 +260,7 @@ GTEST_API_ bool ExitedUnsuccessfully(int exit_status);
|
||||
#define GTEST_EXECUTE_STATEMENT_(statement, regex_or_matcher) \
|
||||
GTEST_AMBIGUOUS_ELSE_BLOCKER_ \
|
||||
if (::testing::internal::AlwaysTrue()) { \
|
||||
{ GTEST_RGT_DECLARE } \
|
||||
GTEST_SUPPRESS_UNREACHABLE_CODE_WARNING_BELOW_(statement); \
|
||||
} else if (!::testing::internal::AlwaysTrue()) { \
|
||||
::testing::internal::MakeDeathTestMatcher(regex_or_matcher); \
|
||||
|
||||
@ -68,6 +68,7 @@
|
||||
|
||||
#include "gtest/gtest-message.h"
|
||||
#include "gtest/internal/gtest-filepath.h"
|
||||
#include "gtest/internal/gtest-rgt.h"
|
||||
#include "gtest/internal/gtest-string.h"
|
||||
#include "gtest/internal/gtest-type-util.h"
|
||||
|
||||
@ -1368,6 +1369,7 @@ class NeverThrown {
|
||||
GTEST_AMBIGUOUS_ELSE_BLOCKER_ \
|
||||
if (::testing::internal::TrueWithString gtest_msg{}) { \
|
||||
bool gtest_caught_expected = false; \
|
||||
{ GTEST_RGT_DECLARE } \
|
||||
try { \
|
||||
GTEST_SUPPRESS_UNREACHABLE_CODE_WARNING_BELOW_(statement); \
|
||||
} catch (expected_exception const&) { \
|
||||
@ -1411,6 +1413,7 @@ class NeverThrown {
|
||||
#define GTEST_TEST_NO_THROW_(statement, fail) \
|
||||
GTEST_AMBIGUOUS_ELSE_BLOCKER_ \
|
||||
if (::testing::internal::TrueWithString gtest_msg{}) { \
|
||||
{ GTEST_RGT_DECLARE } \
|
||||
try { \
|
||||
GTEST_SUPPRESS_UNREACHABLE_CODE_WARNING_BELOW_(statement); \
|
||||
} \
|
||||
@ -1430,6 +1433,7 @@ class NeverThrown {
|
||||
GTEST_AMBIGUOUS_ELSE_BLOCKER_ \
|
||||
if (::testing::internal::AlwaysTrue()) { \
|
||||
bool gtest_caught_any = false; \
|
||||
{ GTEST_RGT_DECLARE } \
|
||||
try { \
|
||||
GTEST_SUPPRESS_UNREACHABLE_CODE_WARNING_BELOW_(statement); \
|
||||
} catch (...) { \
|
||||
@ -1451,7 +1455,7 @@ class NeverThrown {
|
||||
GTEST_AMBIGUOUS_ELSE_BLOCKER_ \
|
||||
if (const ::testing::AssertionResult gtest_ar_ = \
|
||||
::testing::AssertionResult(expression)) \
|
||||
; \
|
||||
{ GTEST_RGT_DECLARE } \
|
||||
else \
|
||||
fail(::testing::internal::GetBoolAssertionFailureMessage( \
|
||||
gtest_ar_, text, #actual, #expected) \
|
||||
@ -1462,6 +1466,7 @@ class NeverThrown {
|
||||
if (::testing::internal::AlwaysTrue()) { \
|
||||
const ::testing::internal::HasNewFatalFailureHelper \
|
||||
gtest_fatal_failure_checker; \
|
||||
{ GTEST_RGT_DECLARE } \
|
||||
GTEST_SUPPRESS_UNREACHABLE_CODE_WARNING_BELOW_(statement); \
|
||||
if (gtest_fatal_failure_checker.has_new_fatal_failure()) { \
|
||||
goto GTEST_CONCAT_TOKEN_(gtest_label_testnofatal_, __LINE__); \
|
||||
@ -1502,11 +1507,11 @@ class NeverThrown {
|
||||
private: \
|
||||
void TestBody() override; \
|
||||
GTEST_INTERNAL_ATTRIBUTE_MAYBE_UNUSED static ::testing::TestInfo* const \
|
||||
test_info_; \
|
||||
gtest_test_info_; \
|
||||
}; \
|
||||
\
|
||||
::testing::TestInfo* const GTEST_TEST_CLASS_NAME_(test_suite_name, \
|
||||
test_name)::test_info_ = \
|
||||
::testing::TestInfo* const GTEST_TEST_CLASS_NAME_( \
|
||||
test_suite_name, test_name)::gtest_test_info_ = \
|
||||
::testing::internal::MakeAndRegisterTestInfo( \
|
||||
#test_suite_name, #test_name, nullptr, nullptr, \
|
||||
::testing::internal::CodeLocation(__FILE__, __LINE__), (parent_id), \
|
||||
|
||||
@ -78,6 +78,12 @@
|
||||
// expressions are/aren't available.
|
||||
// GTEST_HAS_PTHREAD - Define it to 1/0 to indicate that <pthread.h>
|
||||
// is/isn't available.
|
||||
// GTEST_HAS_RGT - Define it to 1/0 to indicate that Rotten Green
|
||||
// Test detection is/isn't enabled. On by default.
|
||||
// GTEST_DEFAULT_RGT_PASS - The default for --gtest_treat_rotten_as_pass.
|
||||
// Set true here so existing test suites don't fail
|
||||
// en masse. The recommended value is false and can
|
||||
// be set in custom/gtest-port.h.
|
||||
// GTEST_HAS_RTTI - Define it to 1/0 to indicate that RTTI is/isn't
|
||||
// enabled.
|
||||
// GTEST_HAS_STD_WSTRING - Define it to 1/0 to indicate that
|
||||
@ -1951,6 +1957,22 @@ class GTEST_API_ ThreadLocal {
|
||||
// we cannot detect it.
|
||||
GTEST_API_ size_t GetThreadCount();
|
||||
|
||||
// Determine whether the compiler can support Rotten Green Test detection.
|
||||
// If it does, set the default for whether rotten tests imply pass or fail.
|
||||
// The definitions below are guarded by #ifndef to give embedders a chance to
|
||||
// define them in gtest/internal/custom/gtest-port.h
|
||||
#ifndef GTEST_HAS_RGT
|
||||
#define GTEST_HAS_RGT 1
|
||||
#endif // GTEST_HAS_RGT
|
||||
|
||||
#ifndef GTEST_DEFAULT_RGT_PASS
|
||||
#define GTEST_DEFAULT_RGT_PASS true
|
||||
#endif // GTEST_DEFAULT_RGT_PASS
|
||||
|
||||
#ifndef GTEST_DEBUG_RGT
|
||||
#define GTEST_DEBUG_RGT 0
|
||||
#endif // GTEST_DEBUG_RGT
|
||||
|
||||
#ifdef GTEST_OS_WINDOWS
|
||||
#define GTEST_PATH_SEP_ "\\"
|
||||
#define GTEST_HAS_ALT_PATH_SEP_ 1
|
||||
|
||||
319
googletest/include/gtest/internal/gtest-rgt.h
Normal file
319
googletest/include/gtest/internal/gtest-rgt.h
Normal file
@ -0,0 +1,319 @@
|
||||
// Copyright (C) 2024 Sony Interactive Entertainment Inc.
|
||||
// All rights reserved.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Sony Interactive Entertainment Inc. nor the
|
||||
// names of its contributors may be used to endorse or promote products
|
||||
// derived from this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
// This file defines macros and classes for detecting Rotten Green Tests
|
||||
// in the LLVM unittests.
|
||||
|
||||
#ifndef GTEST_INCLUDE_GTEST_INTERNAL_GTEST_RGT_H_
|
||||
#define GTEST_INCLUDE_GTEST_INTERNAL_GTEST_RGT_H_
|
||||
|
||||
#include "gtest/internal/gtest-port.h"
|
||||
|
||||
#if GTEST_HAS_RGT
|
||||
|
||||
// A Rotten Green Test is a test assertion that _looks_ like it is verifying
|
||||
// something useful about code behavior, but in fact the assertion is never
|
||||
// executed. Because the test didn't explicitly fail, it's assumed to have
|
||||
// passed. This file supports instrumenting Google Tests to detect EXPECT_*
|
||||
// and ASSERT_* calls that are not executed, indicating that they are
|
||||
// Rotten Green Tests.
|
||||
//
|
||||
// Inspired by "Rotten Green Tests", Delplanque et al., ICSE 2019
|
||||
// DOI 10.1109/ICSE.2019.00062
|
||||
|
||||
#include <map>
|
||||
#include <string>
|
||||
#include <utility>
|
||||
|
||||
// Overview:
|
||||
//
|
||||
// Rotten Green Test checking involves four phases. First, statically identify
|
||||
// all test assertions. Second, record which test assertions are executed as
|
||||
// the test runs. Third, any necessary data cleanup (for example: different
|
||||
// template instantiations might not all use the same test assertions, so we
|
||||
// merge results for multiple instances of the same assertion). Fourth, report
|
||||
// un-executed (rotten) assertions.
|
||||
//
|
||||
// In order to avoid false positives, we do not report any test that was
|
||||
// filtered out, skipped, disabled, or otherwise not run. We also don't report
|
||||
// any failed tests, because failures (especially with ASSERT* macros) might
|
||||
// well skip other assertions, and so reporting those as rotten isn't really
|
||||
// useful. (This is Rotten *Green* Test detection, after all.)
|
||||
|
||||
// Implementation:
|
||||
//
|
||||
// We instrument the assertion macros to statically record the source location
|
||||
// of each assertion. We then attach this information to the associated
|
||||
// TestInfo (if possible). During Test execution, each assertion records that
|
||||
// it was executed. Finally, for each Test that was executed, we look for and
|
||||
// report un-executed assertions.
|
||||
//
|
||||
// The implementation depends on having local static data allocated and
|
||||
// constant-initialized at compile/link time, in order to identify each test
|
||||
// assertion. Because it's _local_ static data, we can't rely on ordered
|
||||
// initialization; that works only for non-local data. We also can't rely on
|
||||
// dynamic initialization, because dynamic initialization requires that
|
||||
// control pass over the definition, and the whole point is to detect when
|
||||
// that *doesn't* happen. Getting this to work depends on a variety of
|
||||
// environment-dependent tricks, described later.
|
||||
|
||||
// FIXME: During development of this feature, sometimes there were cases
|
||||
// where the RGT mechanism failed to compile, or otherwise didn't work.
|
||||
// You can disable checking for a problematic test by doing
|
||||
// #undef GTEST_RGT_DECLARE
|
||||
// #define GTEST_RGT_DECLARE
|
||||
// before the test, and re-enabling it with
|
||||
// #undef GTEST_RGT_DECLARE
|
||||
// #define GTEST_RGT_DECLARE GTEST_RGT_DECLARE_
|
||||
// afterward.
|
||||
#define GTEST_RGT_DECLARE GTEST_RGT_DECLARE_
|
||||
|
||||
namespace testing {
|
||||
|
||||
class TestInfo;
|
||||
|
||||
namespace internal {
|
||||
|
||||
// The data to record per assertion site. All members of RgtStaticItem must
|
||||
// use static initialization, which is why we don't record TestInfo* directly;
|
||||
// those are created dynamically.
|
||||
struct RgtStaticItem {
|
||||
testing::TestInfo *const *test_info;
|
||||
const char *file;
|
||||
int line;
|
||||
bool executed;
|
||||
// constexpr to guarantee compile-time initialization.
|
||||
constexpr RgtStaticItem(testing::TestInfo *const *a_test_info,
|
||||
const char *a_file, int a_line) :
|
||||
test_info(a_test_info), file(a_file), line(a_line), executed(false) {}
|
||||
};
|
||||
|
||||
// Each RgtStaticItem that describes an assertion within a Test function is
|
||||
// recorded in a vector attached to the appropriate TestInfo instance. Other
|
||||
// assertions (e.g., from a helper function) are remembered on the side. We
|
||||
// use vectors because we don't want to deduplicate items (yet).
|
||||
using RgtAssertInfo = std::vector<RgtStaticItem *>;
|
||||
|
||||
// RgtReportRotten takes an RgtAssertInfo vector, deduplicates the info, and
|
||||
// calls GTEST_MESSAGE_AT_ on each un-executed assertion. If the parameter is
|
||||
// nullptr, it uses the helper method assertion vector. Returns the number of
|
||||
// unique items in assert_info.
|
||||
size_t RgtReportRotten(const RgtAssertInfo *assert_info);
|
||||
|
||||
// We need to be able to find all those static items, so they (or their
|
||||
// addresses) need to be somewhere that has a name we can use at runtime to
|
||||
// find them. Unfortunately, there doesn't seem to be one consistent way to
|
||||
// do this across toolchains.
|
||||
//
|
||||
// For Clang (non-Windows), we allocate the static data into a custom section
|
||||
// that has a name that is a legal C identifier. Then we can use symbols that
|
||||
// the linker defines for us to find the start and end of the section. This
|
||||
// allows us to allocate static data piecemeal in the source, and still have
|
||||
// effectively a single array of all such data at runtime. This tactic works
|
||||
// using GNU linkers or LLD.
|
||||
//
|
||||
// With gcc, due to how it handles data allocation for items in inline
|
||||
// functions, we can't put local static data in a custom section. See:
|
||||
// https://stackoverflow.com/questions/35091862/inline-static-data-causes-a-section-type-conflict
|
||||
// and also https://gcc.gnu.org/bugzilla/show_bug.cgi?id=41091 for details.
|
||||
// (It seems that GCC 14 fixes most cases, but still not inline functions.)
|
||||
// In order to address this, we apply the solution described here:
|
||||
// https://stackoverflow.com/questions/29903391/find-unexecuted-lines-of-c-code
|
||||
// We use the .init_array hack to collect the addresses of all the data items,
|
||||
// which are allocated without any special section or attributes on them. This
|
||||
// lets us have each item be registered automatically, and because .init_array
|
||||
// is a predefined section, gcc doesn't give us any trouble with section
|
||||
// attributes.
|
||||
//
|
||||
// For MSVC, allocating arbitrary data to a custom section tends to have
|
||||
// padding issues, and we need to be able to treat the custom section as an
|
||||
// array. So, we do a gcc-like thing, allocating the data normally, and
|
||||
// capturing a pointer to an initializer function in the custom section;
|
||||
// capturing pointers doesn't seem to have the padding issues, or not as
|
||||
// badly, as we do end up with some null pointers in there. We use an
|
||||
// initializer function instead of just capturing a pointer to the data, in
|
||||
// order to be more consistent with the gcc scheme. We use documented
|
||||
// section-ordering rules to define our own symbols for the array start and
|
||||
// end, and call all the initializer functions manually.
|
||||
|
||||
// Define a toggle to see which kind of initialization we're doing.
|
||||
#if defined(__GNUC__) || defined(_WIN32)
|
||||
#define GTEST_RGT_STARTUP_INIT_ 1
|
||||
#else
|
||||
#define GTEST_RGT_STARTUP_INIT_ 0
|
||||
#endif // __GNUC__ || _WIN32
|
||||
|
||||
#if GTEST_RGT_STARTUP_INIT_
|
||||
// Record the existence of a test point, when we can't arrange for that using
|
||||
// a custom section.
|
||||
GTEST_API_ void RgtRecord(RgtStaticItem *item);
|
||||
#endif // GTEST_RGT_STARTUP_INIT_
|
||||
|
||||
// Finish off the RGT initialization; attach each RGT_item to its TestInfo
|
||||
// instance, and remember the ones that aren't lexically in a Test. Called
|
||||
// from OnTestProgramStart(). Returns the number of items it found.
|
||||
size_t RgtInit();
|
||||
|
||||
#if GTEST_DEBUG_RGT
|
||||
// For debugging: Dump the locations of all test assertions to the
|
||||
// specified file.
|
||||
void RgtDumpAllAssertionsTo(std::string &filename);
|
||||
#endif // GTEST_DEBUG_RGT
|
||||
|
||||
} // end namespace internal
|
||||
} // end namespace testing
|
||||
|
||||
// When a test assertion is lexically contained within a Test, an RgtStaticItem
|
||||
// captures a pointer to the Test's static gtest_test_info_ member, allowing
|
||||
// us to associate assertions with tests. However, assertions may also be in
|
||||
// helper functions outside of a test; for those cases, we use a data item with
|
||||
// the same name in the anonymous namespace, so that an unqualified use of the
|
||||
// name will always be satisfied. Normally global variables in the anonymous
|
||||
// namespace are not cool, but in this case it's exactly what we need.
|
||||
namespace {
|
||||
testing::TestInfo* const gtest_test_info_ = nullptr;
|
||||
} // namespace
|
||||
|
||||
// Define the section name (on Windows, the prefix) to use for RGT data.
|
||||
// Note, the non-Windows case requires this be a legal C identifier.
|
||||
#define GTEST_RGT_SECTION_NAME_BASE_ GTEST_RGT
|
||||
|
||||
#if GTEST_RGT_STARTUP_INIT_
|
||||
|
||||
// Conjure up a function to do startup-time initialization, given that
|
||||
// we can't arrange for static initialization.
|
||||
|
||||
#ifdef __GNUC__
|
||||
|
||||
#define GTEST_RGT_RECORD_ITEM_(ITEM) \
|
||||
struct RgtRecorderHelper_##ITEM { \
|
||||
static void Record() { ::testing::internal::RgtRecord(&ITEM); } \
|
||||
}; \
|
||||
static auto RgtHelper2_##ITEM __attribute__((section(".init_array"))) \
|
||||
= RgtRecorderHelper_##ITEM::Record; \
|
||||
(void)RgtHelper2_##ITEM;
|
||||
|
||||
#else // __GNUC__
|
||||
|
||||
// Windows doesn't automatically provide start/end symbol names for sections,
|
||||
// so we roll our own start/end entries. Sections are sorted lexically, so we
|
||||
// paste suffixes onto the base name to get correct sorting. Extra fun macro
|
||||
// indirection required.
|
||||
|
||||
#define GTEST_RGT_SECTION_NAME_WITH_SUFFIX_(SUFFIX) \
|
||||
GTEST_RGT_SECTION_NAME_2(GTEST_RGT_SECTION_NAME_BASE_, SUFFIX)
|
||||
#define GTEST_RGT_SECTION_NAME_2(NAME, SUFFIX) \
|
||||
GTEST_RGT_SECTION_NAME_3(NAME, SUFFIX)
|
||||
#define GTEST_RGT_SECTION_NAME_3(NAME, SUFFIX) \
|
||||
GTEST_STRINGIFY_(NAME ## SUFFIX)
|
||||
|
||||
#define GTEST_RGT_SECTION_NAME_ GTEST_RGT_SECTION_NAME_WITH_SUFFIX_($d)
|
||||
|
||||
namespace testing {
|
||||
namespace internal {
|
||||
|
||||
// The type of the functions we're going to track in the section that we're
|
||||
// using instead of .init_array.
|
||||
typedef void(*RgtRecorder)(void);
|
||||
|
||||
// Because MSVC has no equivalent of attribute(used) we need to fake up a
|
||||
// pointer escaping so the function pointers will look used.
|
||||
GTEST_API_ extern void *rgt_fake_use;
|
||||
|
||||
} // namespace internal
|
||||
} // namespace testing
|
||||
|
||||
// MSVC requires the section to be declared before being used, but simply
|
||||
// using a global #pragma seems not to work (despite the documentation).
|
||||
// So, do a __pragma every time.
|
||||
|
||||
#define GTEST_RGT_RECORD_ITEM_(ITEM) \
|
||||
struct RgtRecorderHelper { \
|
||||
static void record() { ::testing::internal::RgtRecord(&ITEM); } \
|
||||
}; \
|
||||
__pragma(section(GTEST_RGT_SECTION_NAME_,read,write)) \
|
||||
__declspec(allocate(GTEST_RGT_SECTION_NAME_)) \
|
||||
static ::testing::internal::RgtRecorder rgt_record_item = \
|
||||
RgtRecorderHelper::record; \
|
||||
::testing::internal::rgt_fake_use = (void*)&rgt_record_item;
|
||||
|
||||
#endif // __GNUC__
|
||||
|
||||
// In the runtime-init case, the actual assertion tracking data doesn't go
|
||||
// anywhere special.
|
||||
#define GTEST_RGT_SECTION_ATTR
|
||||
|
||||
#else // GTEST_RGT_STARTUP_INIT_
|
||||
|
||||
// In the "normal" case, allocate local static data to a custom section which
|
||||
// we can then iterate over when reporting.
|
||||
#define GTEST_RGT_SECTION_NAME_ GTEST_RGT_SECTION_NAME_BASE_
|
||||
|
||||
// Define how to decorate the data declarations.
|
||||
#define GTEST_RGT_SECTION_ATTR GTEST_RGT_SECTION_ATTR_2(GTEST_RGT_SECTION_NAME_)
|
||||
#define GTEST_RGT_SECTION_ATTR_2(NAME) GTEST_RGT_SECTION_ATTR_3(NAME)
|
||||
#define GTEST_RGT_SECTION_ATTR_3(NAME) __attribute__((section(#NAME),used))
|
||||
|
||||
// No special "record" action needed.
|
||||
#define GTEST_RGT_RECORD_ITEM_(ITEM)
|
||||
|
||||
#endif // GTEST_RGT_STARTUP_INIT_
|
||||
|
||||
// Define the bookkeeping macro to use in the various assertion macros.
|
||||
// Statically initialize 'executed' to false, dynamically set it true.
|
||||
|
||||
#define GTEST_RGT_DECLARE_ \
|
||||
GTEST_RGT_SECTION_ATTR static ::testing::internal::RgtStaticItem \
|
||||
gtest_rgt_item(>est_test_info_, __FILE__, __LINE__); \
|
||||
gtest_rgt_item.executed = true; \
|
||||
GTEST_RGT_RECORD_ITEM_(gtest_rgt_item)
|
||||
|
||||
// If the test uses an EXPECT_[NON]FATAL_FAILURE macro, the rotten-test
|
||||
// tracking becomes unreliable, because those macros exercise assertions that
|
||||
// are intended to fail, and therefore will appear rotten. Remember tests
|
||||
// where these macros are used, to avoid false positives.
|
||||
namespace testing {
|
||||
namespace internal {
|
||||
GTEST_API_ void RgtUsesExpectFailure(::testing::TestInfo* test_info);
|
||||
} // namespace internal
|
||||
} // namespace testing
|
||||
|
||||
#define GTEST_RGT_USES_EXPECT_FAILURE_ \
|
||||
::testing::internal::RgtUsesExpectFailure(gtest_test_info_)
|
||||
|
||||
#else // GTEST_HAS_RGT
|
||||
|
||||
// With RGT disabled, don't instrument anything.
|
||||
|
||||
#define GTEST_RGT_DECLARE
|
||||
#define GTEST_RGT_DECLARE_
|
||||
|
||||
#endif // GTEST_HAS_RGT
|
||||
|
||||
#endif // GTEST_INCLUDE_GTEST_INTERNAL_GTEST_RGT_H_
|
||||
@ -133,6 +133,10 @@ int main(int argc, char** argv) {
|
||||
// We don't need to worry about deleting the new listener later, as
|
||||
// Google Test will do it.
|
||||
listeners.Append(new LeakChecker);
|
||||
} else {
|
||||
// If the leak checker isn't installed, it will be reported rotten.
|
||||
// Keep that from causing the test to fail.
|
||||
GTEST_FLAG_SET(treat_rotten_as_pass, true);
|
||||
}
|
||||
return RUN_ALL_TESTS();
|
||||
}
|
||||
|
||||
@ -44,6 +44,7 @@
|
||||
#include "src/gtest-matchers.cc"
|
||||
#include "src/gtest-port.cc"
|
||||
#include "src/gtest-printers.cc"
|
||||
#include "src/gtest-rgt.cc"
|
||||
#include "src/gtest-test-part.cc"
|
||||
#include "src/gtest-typed-test.cc"
|
||||
#include "src/gtest.cc"
|
||||
|
||||
@ -164,6 +164,10 @@ class GTestFlagSaver {
|
||||
stack_trace_depth_ = GTEST_FLAG_GET(stack_trace_depth);
|
||||
stream_result_to_ = GTEST_FLAG_GET(stream_result_to);
|
||||
throw_on_failure_ = GTEST_FLAG_GET(throw_on_failure);
|
||||
treat_rotten_as_pass_ = GTEST_FLAG_GET(treat_rotten_as_pass);
|
||||
#if GTEST_DEBUG_RGT
|
||||
dump_assertions_to_ = GTEST_FLAG_GET(dump_assertions_to);
|
||||
#endif // GTEST_DEBUG_RGT
|
||||
}
|
||||
|
||||
// The d'tor is not virtual. DO NOT INHERIT FROM THIS CLASS.
|
||||
@ -190,6 +194,10 @@ class GTestFlagSaver {
|
||||
GTEST_FLAG_SET(stack_trace_depth, stack_trace_depth_);
|
||||
GTEST_FLAG_SET(stream_result_to, stream_result_to_);
|
||||
GTEST_FLAG_SET(throw_on_failure, throw_on_failure_);
|
||||
GTEST_FLAG_SET(treat_rotten_as_pass, treat_rotten_as_pass_);
|
||||
#if GTEST_DEBUG_RGT
|
||||
GTEST_FLAG_SET(dump_assertions_to, dump_assertions_to_);
|
||||
#endif // GTEST_DEBUG_RGT
|
||||
}
|
||||
|
||||
private:
|
||||
@ -215,6 +223,10 @@ class GTestFlagSaver {
|
||||
int32_t stack_trace_depth_;
|
||||
std::string stream_result_to_;
|
||||
bool throw_on_failure_;
|
||||
bool treat_rotten_as_pass_;
|
||||
#if GTEST_DEBUG_RGT
|
||||
std::string dump_assertions_to_;
|
||||
#endif // GTEST_DEBUG_RGT
|
||||
};
|
||||
|
||||
// Converts a Unicode code point to a narrow string in UTF-8 encoding.
|
||||
@ -535,6 +547,9 @@ class GTEST_API_ UnitTestImpl {
|
||||
// Gets the number of successful test suites.
|
||||
int successful_test_suite_count() const;
|
||||
|
||||
// Gets the number of rotten test suites.
|
||||
int rotten_test_suite_count() const;
|
||||
|
||||
// Gets the number of failed test suites.
|
||||
int failed_test_suite_count() const;
|
||||
|
||||
@ -551,6 +566,9 @@ class GTEST_API_ UnitTestImpl {
|
||||
// Gets the number of skipped tests.
|
||||
int skipped_test_count() const;
|
||||
|
||||
// Gets the number of rotten tests.
|
||||
int rotten_test_count() const;
|
||||
|
||||
// Gets the number of failed tests.
|
||||
int failed_test_count() const;
|
||||
|
||||
@ -580,6 +598,12 @@ class GTEST_API_ UnitTestImpl {
|
||||
// passed).
|
||||
bool Passed() const { return !Failed(); }
|
||||
|
||||
// Returns true if and only if the unit test had at least one test suite
|
||||
// with a rotten assertion.
|
||||
bool Rotten() const {
|
||||
return rotten_test_suite_count() > 0 || ad_hoc_test_result()->Rotten();
|
||||
}
|
||||
|
||||
// Returns true if and only if the unit test failed (i.e. some test suite
|
||||
// failed or something outside of all tests failed).
|
||||
bool Failed() const {
|
||||
|
||||
264
googletest/src/gtest-rgt.cc
Normal file
264
googletest/src/gtest-rgt.cc
Normal file
@ -0,0 +1,264 @@
|
||||
// Copyright (C) 2024 Sony Interactive Entertainment Inc.
|
||||
// All rights reserved.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Sony Interactive Entertainment Inc. nor the
|
||||
// names of its contributors may be used to endorse or promote products
|
||||
// derived from this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
// Support for Rotten Green Test detection within Google Test.
|
||||
|
||||
#include "gtest/internal/gtest-rgt.h"
|
||||
|
||||
#if GTEST_HAS_RGT
|
||||
|
||||
#include <algorithm>
|
||||
#include <cassert>
|
||||
#include <map>
|
||||
#include <vector>
|
||||
|
||||
#include "gtest/gtest.h"
|
||||
#include "src/gtest-internal-inl.h"
|
||||
|
||||
namespace {
|
||||
|
||||
// Define the vector where we will keep RgtStaticItem-s that describe
|
||||
// assertions from outside any Test (e.g., in a helper function).
|
||||
testing::internal::RgtAssertInfo rgt_helper_asserts;
|
||||
|
||||
} // end anonymous namespace
|
||||
|
||||
// Report rotten assertions using GTEST_MESSAGE_AT_.
|
||||
size_t testing::internal::RgtReportRotten(
|
||||
const testing::internal::RgtAssertInfo *assert_info) {
|
||||
if (!assert_info)
|
||||
assert_info = &rgt_helper_asserts;
|
||||
|
||||
// Collect raw data into a map with filename as the key; the value is
|
||||
// another map with line number as the key; the value of the second key
|
||||
// is the logical-or of all "executed" flags with the same source
|
||||
// location. This de-duplicates things like template instantiations.
|
||||
struct RgtFilenameCompare {
|
||||
bool operator()(const char *lhs, const char *rhs) const {
|
||||
return strcmp(lhs, rhs) < 0;
|
||||
}
|
||||
};
|
||||
using RgtFileInfo = std::map<int, bool>;
|
||||
using RgtTestInfo = std::map<const char*, RgtFileInfo, RgtFilenameCompare>;
|
||||
RgtTestInfo executed_map;
|
||||
for (testing::internal::RgtStaticItem *item : *assert_info) {
|
||||
RgtFileInfo &fileinfo = executed_map[item->file];
|
||||
bool &executed = fileinfo[item->line];
|
||||
executed = executed || item->executed;
|
||||
}
|
||||
|
||||
// For any assertion that wasn't executed, report it as kRotten.
|
||||
for (auto &M : executed_map) {
|
||||
// There are two helper methods within googletest itself;
|
||||
// don't report those, they are often unused.
|
||||
if (String::EndsWithCaseInsensitive(M.first, "gtest.cc") ||
|
||||
String::EndsWithCaseInsensitive(M.first, "gtest-port.cc"))
|
||||
continue;
|
||||
for (auto &E : M.second) {
|
||||
if (!E.second)
|
||||
GTEST_MESSAGE_AT_(M.first, E.first, "",
|
||||
testing::TestPartResult::kRotten);
|
||||
}
|
||||
}
|
||||
return assert_info->size();
|
||||
}
|
||||
|
||||
// When we can't statically allocate the array of items in a custom section,
|
||||
// they get registered on startup and we keep pointers to the data in this
|
||||
// vector. And for consistency, we build this vector at startup even when we
|
||||
// do allocate into a custom section.
|
||||
|
||||
namespace {
|
||||
|
||||
using ItemVector = std::vector<testing::internal::RgtStaticItem *>;
|
||||
class RegisteredItems {
|
||||
static ItemVector *items_;
|
||||
public:
|
||||
RegisteredItems() = default;
|
||||
ItemVector *getItems() {
|
||||
if (!items_)
|
||||
items_ = new ItemVector;
|
||||
return items_;
|
||||
}
|
||||
size_t size() { return getItems()->size(); }
|
||||
bool empty() { return getItems()->empty(); }
|
||||
auto begin() { return getItems()->begin(); }
|
||||
auto end() { return getItems()->end(); }
|
||||
void push_back(testing::internal::RgtStaticItem *item) {
|
||||
getItems()->push_back(item);
|
||||
}
|
||||
};
|
||||
|
||||
RegisteredItems registered_items;
|
||||
ItemVector *RegisteredItems::items_ = nullptr;
|
||||
|
||||
} // end anonymous namespace
|
||||
|
||||
#if GTEST_RGT_STARTUP_INIT_
|
||||
|
||||
GTEST_API_
|
||||
void testing::internal::RgtRecord(testing::internal::RgtStaticItem *item) {
|
||||
registered_items.push_back(item);
|
||||
}
|
||||
|
||||
#ifdef _WIN32
|
||||
|
||||
// On Windows we have to allocate our own placeholder start/stop data items.
|
||||
// The linker will sort these into the right order relative to real data.
|
||||
#define START_SECTION_NAME GTEST_RGT_SECTION_NAME_WITH_SUFFIX_($a)
|
||||
#define STOP_SECTION_NAME GTEST_RGT_SECTION_NAME_WITH_SUFFIX_($z)
|
||||
|
||||
using ::testing::internal::RgtRecorder;
|
||||
|
||||
#pragma section (START_SECTION_NAME,read,write)
|
||||
__declspec(allocate(START_SECTION_NAME))
|
||||
static RgtRecorder rgt_manual_init_start = nullptr;
|
||||
|
||||
#pragma section (STOP_SECTION_NAME,read,write)
|
||||
__declspec(allocate(STOP_SECTION_NAME))
|
||||
static RgtRecorder rgt_manual_init_stop = nullptr;
|
||||
|
||||
static void RgtInitManual() {
|
||||
const RgtRecorder *F = &rgt_manual_init_start;
|
||||
// Because the concatenated sections might be padded, we have to skip over
|
||||
// any null pointers. Also skip the (known null) start and end markers.
|
||||
for (++F; F < &rgt_manual_init_stop; ++F) {
|
||||
if (*F)
|
||||
(*F)();
|
||||
}
|
||||
}
|
||||
|
||||
// Define the fake-use global that we use because MSVC has no "used" attribute.
|
||||
GTEST_API_ void *::testing::internal::rgt_fake_use = nullptr;
|
||||
|
||||
#else // _WIN32
|
||||
|
||||
// In this case, .init_array has called RgtRecord for us.
|
||||
|
||||
static void RgtInitManual() {}
|
||||
|
||||
#endif // _WIN32
|
||||
|
||||
#else // GTEST_RGT_STARTUP_INIT_
|
||||
|
||||
// Non-Windows linkers provide __start_<section> and __stop_<section> symbols.
|
||||
// These are unqualified global references.
|
||||
#define START_NAME GTEST_CONCAT_TOKEN_(__start_, GTEST_RGT_SECTION_NAME_)
|
||||
#define STOP_NAME GTEST_CONCAT_TOKEN_(__stop_, GTEST_RGT_SECTION_NAME_)
|
||||
|
||||
// extern "C" vars can't have qualified type names; we only care about their
|
||||
// addresses, so we'll do appropriate casts later.
|
||||
extern "C" int START_NAME;
|
||||
extern "C" int STOP_NAME;
|
||||
|
||||
// Build the registered_items vector by taking the address of each item in the
|
||||
// custom section.
|
||||
|
||||
static void RgtInitManual() {
|
||||
using testing::internal::RgtStaticItem;
|
||||
RgtStaticItem *I = (RgtStaticItem *)&START_NAME;
|
||||
RgtStaticItem *E = (RgtStaticItem *)&STOP_NAME;
|
||||
|
||||
unsigned count = 0;
|
||||
for (; I != E; ++I) {
|
||||
registered_items.push_back(I);
|
||||
++count;
|
||||
}
|
||||
}
|
||||
|
||||
#endif // GTEST_RGT_STARTUP_INIT_
|
||||
|
||||
size_t testing::internal::RgtInit() {
|
||||
// Collect all the RgtStaticItem addresses into registered_items.
|
||||
RgtInitManual();
|
||||
|
||||
// For each RgtStaticItem, if it has an associated TestInfo, attach it there;
|
||||
// if it doesn't, keep it on the side so we can check them at the end.
|
||||
for (RgtStaticItem *item : registered_items) {
|
||||
if (testing::TestInfo *TI = *item->test_info)
|
||||
TI->asserts_.push_back(item);
|
||||
else
|
||||
rgt_helper_asserts.push_back(item);
|
||||
}
|
||||
return registered_items.size();
|
||||
}
|
||||
|
||||
// Remember if a test uses an EXPECT_[NON]FATAL_FAILURE macro.
|
||||
GTEST_API_
|
||||
void testing::internal::RgtUsesExpectFailure(::testing::TestInfo* test_info) {
|
||||
if (test_info)
|
||||
test_info->set_uses_expect_failure();
|
||||
}
|
||||
|
||||
#if GTEST_DEBUG_RGT
|
||||
// Dump source location of all identified assertions. For debugging.
|
||||
// We sort and emit one per line, without deduplicating, for better diffing.
|
||||
|
||||
void testing::internal::RgtDumpAllAssertionsTo(std::string &filename) {
|
||||
if (filename.empty())
|
||||
return;
|
||||
|
||||
// Following is based on UnitTestOptions::GetAbsolutePathToOutputFile()
|
||||
// which has a note regarding certain Windows paths not working.
|
||||
internal::FilePath log_path(filename);
|
||||
if (!log_path.IsAbsolutePath()) {
|
||||
log_path = internal::FilePath::ConcatPaths(
|
||||
internal::FilePath(UnitTest::GetInstance()->original_working_dir()),
|
||||
internal::FilePath(filename));
|
||||
}
|
||||
|
||||
if (log_path.IsDirectory()) {
|
||||
fprintf(stderr, "Specified log file is a directory \"%s\"\n",
|
||||
filename.c_str());
|
||||
return;
|
||||
}
|
||||
FILE* logfile = posix::FOpen(log_path.c_str(), "w");
|
||||
if (!logfile) {
|
||||
fprintf(stderr, "Unable to open log file \"%s\"\n", log_path.c_str());
|
||||
return;
|
||||
}
|
||||
|
||||
// registered_items is a vector of RgtStaticItem; sort it by filename and
|
||||
// then by line.
|
||||
struct RgtItemCompare {
|
||||
bool operator()(const RgtStaticItem *lhs, const RgtStaticItem *rhs) const {
|
||||
int Cmp = strcmp(lhs->file, rhs->file);
|
||||
return Cmp == 0 ? lhs->line < rhs->line : Cmp < 0;
|
||||
}
|
||||
};
|
||||
std::sort(registered_items.getItems()->begin(),
|
||||
registered_items.getItems()->end(), RgtItemCompare());
|
||||
for (auto *item : *registered_items.getItems())
|
||||
fprintf(logfile, "%s::%d\n", item->file, item->line);
|
||||
posix::FClose(logfile);
|
||||
}
|
||||
|
||||
#endif // GTEST_DEBUG_RGT
|
||||
|
||||
#endif // GTEST_HAS_RGT
|
||||
@ -54,6 +54,7 @@ std::ostream& operator<<(std::ostream& os, const TestPartResult& result) {
|
||||
<< " "
|
||||
<< (result.type() == TestPartResult::kSuccess ? "Success"
|
||||
: result.type() == TestPartResult::kSkip ? "Skipped"
|
||||
: result.type() == TestPartResult::kRotten ? "Rotten"
|
||||
: result.type() == TestPartResult::kFatalFailure
|
||||
? "Fatal failure"
|
||||
: "Non-fatal failure")
|
||||
|
||||
@ -388,6 +388,21 @@ GTEST_DEFINE_bool_(
|
||||
"if exceptions are enabled or exit the program with a non-zero code "
|
||||
"otherwise. For use with an external test framework.");
|
||||
|
||||
GTEST_DEFINE_bool_(
|
||||
treat_rotten_as_pass,
|
||||
testing::internal::BoolFromGTestEnv("treat_rotten_as_pass",
|
||||
GTEST_DEFAULT_RGT_PASS),
|
||||
"This flag controls whether un-executed assertions within an executed "
|
||||
"test are treated as if they had passed.");
|
||||
|
||||
#if GTEST_DEBUG_RGT
|
||||
GTEST_DEFINE_string_(
|
||||
dump_assertions_to,
|
||||
testing::internal::StringFromGTestEnv("assertionlog", ""),
|
||||
"This flag specifies the log file to dump a list of all known "
|
||||
"assertion locations to.");
|
||||
#endif // GTEST_DEBUG_RGT
|
||||
|
||||
#if GTEST_USE_OWN_FLAGFILE_FLAG_
|
||||
GTEST_DEFINE_string_(
|
||||
flagfile, testing::internal::StringFromGTestEnv("flagfile", ""),
|
||||
@ -440,6 +455,11 @@ static bool TestSuitePassed(const TestSuite* test_suite) {
|
||||
return test_suite->should_run() && test_suite->Passed();
|
||||
}
|
||||
|
||||
// Returns true if and only if the test suite had rotten assertions.
|
||||
static bool TestSuiteRotten(const TestSuite* test_suite) {
|
||||
return test_suite->should_run() && test_suite->Rotten();
|
||||
}
|
||||
|
||||
// Returns true if and only if the test suite failed.
|
||||
static bool TestSuiteFailed(const TestSuite* test_suite) {
|
||||
return test_suite->should_run() && test_suite->Failed();
|
||||
@ -1093,6 +1113,11 @@ int UnitTestImpl::successful_test_suite_count() const {
|
||||
return CountIf(test_suites_, TestSuitePassed);
|
||||
}
|
||||
|
||||
// Gets the number of rotten test suites.
|
||||
int UnitTestImpl::rotten_test_suite_count() const {
|
||||
return CountIf(test_suites_, TestSuiteRotten);
|
||||
}
|
||||
|
||||
// Gets the number of failed test suites.
|
||||
int UnitTestImpl::failed_test_suite_count() const {
|
||||
return CountIf(test_suites_, TestSuiteFailed);
|
||||
@ -1119,6 +1144,11 @@ int UnitTestImpl::skipped_test_count() const {
|
||||
return SumOverTestSuiteList(test_suites_, &TestSuite::skipped_test_count);
|
||||
}
|
||||
|
||||
// Gets the number of rotten tests.
|
||||
int UnitTestImpl::rotten_test_count() const {
|
||||
return SumOverTestSuiteList(test_suites_, &TestSuite::rotten_test_count);
|
||||
}
|
||||
|
||||
// Gets the number of failed tests.
|
||||
int UnitTestImpl::failed_test_count() const {
|
||||
return SumOverTestSuiteList(test_suites_, &TestSuite::failed_test_count);
|
||||
@ -2437,6 +2467,17 @@ bool TestResult::Skipped() const {
|
||||
return !Failed() && CountIf(test_part_results_, TestPartSkipped) > 0;
|
||||
}
|
||||
|
||||
// Returns true if and only if the test part is rotten.
|
||||
static bool TestPartRotten(const TestPartResult& result) {
|
||||
return result.rotten();
|
||||
}
|
||||
|
||||
// Returns true if and only if the test was passed but had a rotten assertion.
|
||||
bool TestResult::Rotten() const {
|
||||
return !Skipped() && !Failed() &&
|
||||
CountIf(test_part_results_, TestPartRotten) > 0;
|
||||
}
|
||||
|
||||
// Returns true if and only if the test failed.
|
||||
bool TestResult::Failed() const {
|
||||
for (int i = 0; i < total_part_count(); ++i) {
|
||||
@ -2755,6 +2796,7 @@ TestInfo::TestInfo(std::string a_test_suite_name, std::string a_name,
|
||||
is_disabled_(false),
|
||||
matches_filter_(false),
|
||||
is_in_another_shard_(false),
|
||||
uses_expect_failure_(false),
|
||||
factory_(factory),
|
||||
result_() {}
|
||||
|
||||
@ -2907,6 +2949,11 @@ int TestSuite::skipped_test_count() const {
|
||||
return CountIf(test_info_list_, TestSkipped);
|
||||
}
|
||||
|
||||
// Gets the number of rotten tests in this test suite.
|
||||
int TestSuite::rotten_test_count() const {
|
||||
return CountIf(test_info_list_, TestRotten);
|
||||
}
|
||||
|
||||
// Gets the number of failed tests in this test suite.
|
||||
int TestSuite::failed_test_count() const {
|
||||
return CountIf(test_info_list_, TestFailed);
|
||||
@ -3135,6 +3182,8 @@ static const char* TestPartResultTypeToString(TestPartResult::Type type) {
|
||||
return "Skipped\n";
|
||||
case TestPartResult::kSuccess:
|
||||
return "Success";
|
||||
case TestPartResult::kRotten:
|
||||
return "Rotten";
|
||||
|
||||
case TestPartResult::kNonFatalFailure:
|
||||
case TestPartResult::kFatalFailure:
|
||||
@ -3405,6 +3454,7 @@ class PrettyUnitTestResultPrinter : public TestEventListener {
|
||||
static void PrintFailedTests(const UnitTest& unit_test);
|
||||
static void PrintFailedTestSuites(const UnitTest& unit_test);
|
||||
static void PrintSkippedTests(const UnitTest& unit_test);
|
||||
static void PrintRottenTests(const UnitTest& unit_test);
|
||||
};
|
||||
|
||||
// Fired before each iteration of tests starts.
|
||||
@ -3509,7 +3559,10 @@ void PrettyUnitTestResultPrinter::OnTestPartResult(
|
||||
}
|
||||
|
||||
void PrettyUnitTestResultPrinter::OnTestEnd(const TestInfo& test_info) {
|
||||
if (test_info.result()->Passed()) {
|
||||
// Rotten is a subset of Passed so check it first.
|
||||
if (test_info.result()->Rotten()) {
|
||||
ColoredPrintf(GTestColor::kYellow, "[ ROTTEN ] ");
|
||||
} else if (test_info.result()->Passed()) {
|
||||
ColoredPrintf(GTestColor::kGreen, "[ OK ] ");
|
||||
} else if (test_info.result()->Skipped()) {
|
||||
ColoredPrintf(GTestColor::kGreen, "[ SKIPPED ] ");
|
||||
@ -3632,6 +3685,29 @@ void PrettyUnitTestResultPrinter::PrintSkippedTests(const UnitTest& unit_test) {
|
||||
}
|
||||
}
|
||||
|
||||
// Internal helper for printing the list of tests with rotten assertion.
|
||||
void PrettyUnitTestResultPrinter::PrintRottenTests(const UnitTest& unit_test) {
|
||||
const int rotten_test_count = unit_test.rotten_test_count();
|
||||
if (rotten_test_count == 0) {
|
||||
return;
|
||||
}
|
||||
|
||||
for (int i = 0; i < unit_test.total_test_suite_count(); ++i) {
|
||||
const TestSuite& test_suite = *unit_test.GetTestSuite(i);
|
||||
if (!test_suite.should_run() || (test_suite.rotten_test_count() == 0)) {
|
||||
continue;
|
||||
}
|
||||
for (int j = 0; j < test_suite.total_test_count(); ++j) {
|
||||
const TestInfo& test_info = *test_suite.GetTestInfo(j);
|
||||
if (!test_info.should_run() || !test_info.result()->Rotten()) {
|
||||
continue;
|
||||
}
|
||||
ColoredPrintf(GTestColor::kYellow, "[ ROTTEN ] ");
|
||||
printf("%s.%s\n", test_suite.name(), test_info.name());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void PrettyUnitTestResultPrinter::OnTestIterationEnd(const UnitTest& unit_test,
|
||||
int /*iteration*/) {
|
||||
ColoredPrintf(GTestColor::kGreen, "[==========] ");
|
||||
@ -3658,10 +3734,19 @@ void PrettyUnitTestResultPrinter::OnTestIterationEnd(const UnitTest& unit_test,
|
||||
PrintFailedTestSuites(unit_test);
|
||||
}
|
||||
|
||||
int rotten_test_count = unit_test.rotten_test_count();
|
||||
if (unit_test.Passed() && rotten_test_count) {
|
||||
ColoredPrintf(GTestColor::kYellow, "[ ROTTEN ] ");
|
||||
printf("%s, listed below:\n", FormatTestCount(rotten_test_count).c_str());
|
||||
PrintRottenTests(unit_test);
|
||||
ColoredPrintf(GTestColor::kYellow, " YOU HAVE %d ROTTEN %s\n\n",
|
||||
rotten_test_count, rotten_test_count == 1 ? "TEST" : "TESTS");
|
||||
}
|
||||
|
||||
int num_disabled = unit_test.reportable_disabled_test_count();
|
||||
if (num_disabled && !GTEST_FLAG_GET(also_run_disabled_tests)) {
|
||||
if (unit_test.Passed()) {
|
||||
printf("\n"); // Add a spacer if no FAILURE banner is displayed.
|
||||
if (unit_test.Passed() && !rotten_test_count) {
|
||||
printf("\n"); // Add a spacer if no FAILURE or ROTTEN banner is displayed.
|
||||
}
|
||||
ColoredPrintf(GTestColor::kYellow, " YOU HAVE %d DISABLED %s\n\n",
|
||||
num_disabled, num_disabled == 1 ? "TEST" : "TESTS");
|
||||
@ -3777,6 +3862,57 @@ void BriefUnitTestResultPrinter::OnTestIterationEnd(const UnitTest& unit_test,
|
||||
|
||||
// End BriefUnitTestResultPrinter
|
||||
|
||||
#if GTEST_HAS_RGT
|
||||
|
||||
// This class implements the TestEventListener interface.
|
||||
//
|
||||
// Class RgtListener is not copyable.
|
||||
class RgtListener : public EmptyTestEventListener {
|
||||
public:
|
||||
RgtListener() {}
|
||||
void OnTestProgramStart(const UnitTest& unit_test) override;
|
||||
void OnTestEnd(const TestInfo& test_info) override;
|
||||
void OnEnvironmentsTearDownStart(const UnitTest& unit_test) override;
|
||||
private:
|
||||
size_t item_count_ = 0;
|
||||
};
|
||||
|
||||
void RgtListener::OnTestProgramStart(const UnitTest& /*unit_test*/) {
|
||||
this->item_count_ = internal::RgtInit();
|
||||
}
|
||||
|
||||
void RgtListener::OnTestEnd(const TestInfo& test_info) {
|
||||
// Skip this unless the test would be reported as passed.
|
||||
if (!test_info.result()->Passed())
|
||||
return;
|
||||
// Also skip if this test uses an EXPECT_[NON]FATAL_FAILURE macro,
|
||||
// which makes reporting rotten green tests unreliable.
|
||||
if (test_info.uses_expect_failure())
|
||||
return;
|
||||
|
||||
// Report rotten assertions for this Test.
|
||||
RgtReportRotten(&test_info.asserts());
|
||||
}
|
||||
|
||||
void RgtListener::OnEnvironmentsTearDownStart(const UnitTest& unit_test) {
|
||||
// Report rotten assertions for helper methods, unless we are in a
|
||||
// situation that is very likely to report false positives:
|
||||
// sharding, filtering, or the overall result is not Pass.
|
||||
// (We'll also get false positives if helpers are used by
|
||||
// disabled or skipped tests, but we can probably live with that.)
|
||||
if (testing::internal::ShouldShard(kTestTotalShards, kTestShardIndex, false))
|
||||
return;
|
||||
const char* const filter = GTEST_FLAG(filter).c_str();
|
||||
if (!String::CStringEquals(filter, kUniversalFilter))
|
||||
return;
|
||||
if (unit_test.Passed())
|
||||
(void)RgtReportRotten(nullptr);
|
||||
}
|
||||
|
||||
// End RgtListener
|
||||
|
||||
#endif // GTEST_HAS_RGT
|
||||
|
||||
// class TestEventRepeater
|
||||
//
|
||||
// This class forwards events to other event listeners.
|
||||
@ -5257,6 +5393,9 @@ int UnitTest::skipped_test_count() const {
|
||||
return impl()->skipped_test_count();
|
||||
}
|
||||
|
||||
// Gets the number of rotten tests.
|
||||
int UnitTest::rotten_test_count() const { return impl()->rotten_test_count(); }
|
||||
|
||||
// Gets the number of failed tests.
|
||||
int UnitTest::failed_test_count() const { return impl()->failed_test_count(); }
|
||||
|
||||
@ -5296,6 +5435,10 @@ internal::TimeInMillis UnitTest::elapsed_time() const {
|
||||
// passed).
|
||||
bool UnitTest::Passed() const { return impl()->Passed(); }
|
||||
|
||||
// Returns true if and only if the unit test passed but had at least one
|
||||
// rotten test.
|
||||
bool UnitTest::Rotten() const { return impl()->Rotten(); }
|
||||
|
||||
// Returns true if and only if the unit test failed (i.e. some test suite
|
||||
// failed or something outside of all tests failed).
|
||||
bool UnitTest::Failed() const { return impl()->Failed(); }
|
||||
@ -5761,6 +5904,10 @@ void UnitTestImpl::PostFlagParsingInit() {
|
||||
ConfigureStreamingOutput();
|
||||
#endif // GTEST_CAN_STREAM_RESULTS_
|
||||
|
||||
#if GTEST_HAS_RGT
|
||||
listeners()->Append(new RgtListener);
|
||||
#endif // GTEST_HAS_RGT
|
||||
|
||||
#ifdef GTEST_HAS_ABSL
|
||||
if (GTEST_FLAG_GET(install_failure_signal_handler)) {
|
||||
absl::FailureSignalHandlerOptions options;
|
||||
@ -5997,7 +6144,7 @@ bool UnitTestImpl::RunAllTests() {
|
||||
repeater->OnTestIterationEnd(*parent_, i);
|
||||
|
||||
// Gets the result and clears it.
|
||||
if (!Passed()) {
|
||||
if (!Passed() || (!GTEST_FLAG_GET(treat_rotten_as_pass) && Rotten())) {
|
||||
failed = true;
|
||||
}
|
||||
|
||||
@ -6033,6 +6180,12 @@ bool UnitTestImpl::RunAllTests() {
|
||||
"Please fix it ASAP, or IT WILL START TO FAIL.\n"); // NOLINT
|
||||
}
|
||||
|
||||
#if GTEST_DEBUG_RGT
|
||||
if (!in_subprocess_for_death_test &&
|
||||
!GTEST_FLAG_GET(dump_assertions_to).empty())
|
||||
RgtDumpAllAssertionsTo(GTEST_FLAG_GET(dump_assertions_to));
|
||||
#endif // GTEST_DEBUG_RGT
|
||||
|
||||
return !failed;
|
||||
}
|
||||
|
||||
@ -6578,6 +6731,9 @@ static const char kColorEncodedHelpMessage[] =
|
||||
" Generate a JSON or XML report in the given directory or with the "
|
||||
"given\n"
|
||||
" file name. @YFILE_PATH@D defaults to @Gtest_detail.xml@D.\n"
|
||||
" @G--" GTEST_FLAG_PREFIX_
|
||||
"treat_rotten_as_pass@D\n"
|
||||
" Treat un-executed assertions within a passing test as passing.\n"
|
||||
#if GTEST_CAN_STREAM_RESULTS_
|
||||
" @G--" GTEST_FLAG_PREFIX_
|
||||
"stream_result_to=@YHOST@G:@YPORT@D\n"
|
||||
@ -6651,6 +6807,10 @@ static bool ParseGoogleTestFlag(const char* const arg) {
|
||||
GTEST_INTERNAL_PARSE_FLAG(stack_trace_depth);
|
||||
GTEST_INTERNAL_PARSE_FLAG(stream_result_to);
|
||||
GTEST_INTERNAL_PARSE_FLAG(throw_on_failure);
|
||||
GTEST_INTERNAL_PARSE_FLAG(treat_rotten_as_pass);
|
||||
#if GTEST_DEBUG_RGT
|
||||
GTEST_INTERNAL_PARSE_FLAG(dump_assertions_to);
|
||||
#endif // GTEST_DEBUG_RGT
|
||||
return false;
|
||||
}
|
||||
|
||||
|
||||
@ -53,6 +53,7 @@ cc_test(
|
||||
"gtest_all_test.cc",
|
||||
"gtest-death-test_ex_test.cc",
|
||||
"gtest-listener_test.cc",
|
||||
"gtest_rgt_output_test_.cc",
|
||||
"gtest-unittest-api_test.cc",
|
||||
"googletest-param-test-test.cc",
|
||||
"googletest-param-test2-test.cc",
|
||||
@ -205,6 +206,24 @@ py_test(
|
||||
deps = [":gtest_test_utils"],
|
||||
)
|
||||
|
||||
cc_binary(
|
||||
name = "gtest_rgt_output_test_",
|
||||
testonly = 1,
|
||||
srcs = ["gtest_rgt_output_test_.cc"],
|
||||
deps = ["//:gtest_main"],
|
||||
)
|
||||
|
||||
py_test(
|
||||
name = "gtest_rgt_output_test",
|
||||
size = "small",
|
||||
srcs = ["gtest_rgt_output_test.py"],
|
||||
data = [
|
||||
"gtest_rgt_output_test_golden_lin.txt",
|
||||
":gtest_rgt_output_test_",
|
||||
],
|
||||
deps = [":gtest_test_utils"],
|
||||
)
|
||||
|
||||
cc_binary(
|
||||
name = "googletest-color-test_",
|
||||
testonly = 1,
|
||||
|
||||
@ -1508,3 +1508,12 @@ TEST(ConditionalDeathMacrosSyntaxDeathTest, SwitchStatement) {
|
||||
TEST(NotADeathTest, Test) { SUCCEED(); }
|
||||
|
||||
} // namespace
|
||||
|
||||
int main(int argc, char** argv) {
|
||||
::testing::InitGoogleTest(&argc, argv);
|
||||
|
||||
// This test has deliberately un-executed assertions in it.
|
||||
GTEST_FLAG_SET(treat_rotten_as_pass, true);
|
||||
|
||||
return RUN_ALL_TESTS();
|
||||
}
|
||||
|
||||
@ -278,6 +278,7 @@ int main(int argc, char** argv) {
|
||||
|
||||
GTEST_FLAG_SET(repeat, 2);
|
||||
GTEST_FLAG_SET(recreate_environments_when_repeating, true);
|
||||
GTEST_FLAG_SET(treat_rotten_as_pass, true);
|
||||
int ret_val = RUN_ALL_TESTS();
|
||||
|
||||
#ifndef GTEST_REMOVE_LEGACY_TEST_CASEAPI_
|
||||
@ -328,6 +329,12 @@ int main(int argc, char** argv) {
|
||||
"3rd.OnTestSuiteEnd",
|
||||
"2nd.OnTestCaseEnd",
|
||||
"1st.OnTestCaseEnd",
|
||||
"1st.OnTestPartResult",
|
||||
"2nd.OnTestPartResult",
|
||||
"3rd.OnTestPartResult",
|
||||
"1st.OnTestPartResult",
|
||||
"2nd.OnTestPartResult",
|
||||
"3rd.OnTestPartResult",
|
||||
"1st.OnEnvironmentsTearDownStart",
|
||||
"2nd.OnEnvironmentsTearDownStart",
|
||||
"3rd.OnEnvironmentsTearDownStart",
|
||||
@ -380,6 +387,12 @@ int main(int argc, char** argv) {
|
||||
"3rd.OnTestSuiteEnd",
|
||||
"2nd.OnTestCaseEnd",
|
||||
"1st.OnTestCaseEnd",
|
||||
"1st.OnTestPartResult",
|
||||
"2nd.OnTestPartResult",
|
||||
"3rd.OnTestPartResult",
|
||||
"1st.OnTestPartResult",
|
||||
"2nd.OnTestPartResult",
|
||||
"3rd.OnTestPartResult",
|
||||
"1st.OnEnvironmentsTearDownStart",
|
||||
"2nd.OnEnvironmentsTearDownStart",
|
||||
"3rd.OnEnvironmentsTearDownStart",
|
||||
@ -435,6 +448,12 @@ int main(int argc, char** argv) {
|
||||
"1st.OnTestEnd",
|
||||
"ListenerTest::TearDownTestSuite",
|
||||
"3rd.OnTestSuiteEnd",
|
||||
"1st.OnTestPartResult",
|
||||
"2nd.OnTestPartResult",
|
||||
"3rd.OnTestPartResult",
|
||||
"1st.OnTestPartResult",
|
||||
"2nd.OnTestPartResult",
|
||||
"3rd.OnTestPartResult",
|
||||
"1st.OnEnvironmentsTearDownStart",
|
||||
"2nd.OnEnvironmentsTearDownStart",
|
||||
"3rd.OnEnvironmentsTearDownStart",
|
||||
@ -483,6 +502,12 @@ int main(int argc, char** argv) {
|
||||
"1st.OnTestEnd",
|
||||
"ListenerTest::TearDownTestSuite",
|
||||
"3rd.OnTestSuiteEnd",
|
||||
"1st.OnTestPartResult",
|
||||
"2nd.OnTestPartResult",
|
||||
"3rd.OnTestPartResult",
|
||||
"1st.OnTestPartResult",
|
||||
"2nd.OnTestPartResult",
|
||||
"3rd.OnTestPartResult",
|
||||
"1st.OnEnvironmentsTearDownStart",
|
||||
"2nd.OnEnvironmentsTearDownStart",
|
||||
"3rd.OnEnvironmentsTearDownStart",
|
||||
|
||||
@ -4,15 +4,13 @@ googletest-output-test_.cc:#: Failure
|
||||
Value of: false
|
||||
Actual: false
|
||||
Expected: true
|
||||
Stack trace: (omitted)
|
||||
|
||||
googletest-output-test_.cc:#: Failure
|
||||
Expected equality of these values:
|
||||
2
|
||||
3
|
||||
Stack trace: (omitted)
|
||||
|
||||
[0;32m[==========] [mRunning 90 tests from 43 test suites.
|
||||
[0;32m[==========] [mRunning 91 tests from 44 test suites.
|
||||
[0;32m[----------] [mGlobal test environment set-up.
|
||||
FooEnvironment::SetUp() called.
|
||||
BarEnvironment::SetUp() called.
|
||||
@ -44,7 +42,6 @@ Expected equality of these values:
|
||||
Which is: "\"Line"
|
||||
actual
|
||||
Which is: "actual \"string\""
|
||||
Stack trace: (omitted)
|
||||
|
||||
googletest-output-test_.cc:#: Failure
|
||||
Expected equality of these values:
|
||||
@ -52,7 +49,6 @@ Expected equality of these values:
|
||||
Which is: "\"Line"
|
||||
actual
|
||||
Which is: "actual \"string\""
|
||||
Stack trace: (omitted)
|
||||
|
||||
[0;31m[ FAILED ] [mNonfatalFailureTest.EscapesStringOperands
|
||||
[0;32m[ RUN ] [mNonfatalFailureTest.DiffForLongStrings
|
||||
@ -66,7 +62,6 @@ With diff:
|
||||
-\"Line\0 1\"
|
||||
Line 2
|
||||
|
||||
Stack trace: (omitted)
|
||||
|
||||
[0;31m[ FAILED ] [mNonfatalFailureTest.DiffForLongStrings
|
||||
[0;32m[----------] [m3 tests from FatalFailureTest
|
||||
@ -77,7 +72,6 @@ Expected equality of these values:
|
||||
1
|
||||
x
|
||||
Which is: 2
|
||||
Stack trace: (omitted)
|
||||
|
||||
[0;31m[ FAILED ] [mFatalFailureTest.FatalFailureInSubroutine
|
||||
[0;32m[ RUN ] [mFatalFailureTest.FatalFailureInNestedSubroutine
|
||||
@ -87,7 +81,6 @@ Expected equality of these values:
|
||||
1
|
||||
x
|
||||
Which is: 2
|
||||
Stack trace: (omitted)
|
||||
|
||||
[0;31m[ FAILED ] [mFatalFailureTest.FatalFailureInNestedSubroutine
|
||||
[0;32m[ RUN ] [mFatalFailureTest.NonfatalFailureInSubroutine
|
||||
@ -96,7 +89,6 @@ googletest-output-test_.cc:#: Failure
|
||||
Value of: false
|
||||
Actual: false
|
||||
Expected: true
|
||||
Stack trace: (omitted)
|
||||
|
||||
[0;31m[ FAILED ] [mFatalFailureTest.NonfatalFailureInSubroutine
|
||||
[0;32m[----------] [m1 test from LoggingTest
|
||||
@ -106,15 +98,21 @@ i == 0
|
||||
i == 1
|
||||
googletest-output-test_.cc:#: Failure
|
||||
Expected: (3) >= (a[i]), actual: 3 vs 9
|
||||
Stack trace: (omitted)
|
||||
|
||||
i == 2
|
||||
i == 3
|
||||
googletest-output-test_.cc:#: Failure
|
||||
Expected: (3) >= (a[i]), actual: 3 vs 6
|
||||
Stack trace: (omitted)
|
||||
|
||||
[0;31m[ FAILED ] [mLoggingTest.InterleavingLoggingAndAssertions
|
||||
[0;32m[----------] [m1 test from NotRotten
|
||||
[0;32m[ RUN ] [mNotRotten.ExpectFailingWithUnexecutedIsntRotten
|
||||
googletest-output-test_.cc:#: Failure
|
||||
Expected equality of these values:
|
||||
1
|
||||
2
|
||||
|
||||
[0;31m[ FAILED ] [mNotRotten.ExpectFailingWithUnexecutedIsntRotten
|
||||
[0;32m[----------] [m7 tests from SCOPED_TRACETest
|
||||
[0;32m[ RUN ] [mSCOPED_TRACETest.AcceptedValues
|
||||
googletest-output-test_.cc:#: Failure
|
||||
@ -125,7 +123,6 @@ googletest-output-test_.cc:#: (null)
|
||||
googletest-output-test_.cc:#: 1337
|
||||
googletest-output-test_.cc:#: std::string
|
||||
googletest-output-test_.cc:#: literal string
|
||||
Stack trace: (omitted)
|
||||
|
||||
[0;31m[ FAILED ] [mSCOPED_TRACETest.AcceptedValues
|
||||
[0;32m[ RUN ] [mSCOPED_TRACETest.ObeysScopes
|
||||
@ -133,19 +130,16 @@ Stack trace: (omitted)
|
||||
googletest-output-test_.cc:#: Failure
|
||||
Failed
|
||||
This failure is expected, and shouldn't have a trace.
|
||||
Stack trace: (omitted)
|
||||
|
||||
googletest-output-test_.cc:#: Failure
|
||||
Failed
|
||||
This failure is expected, and should have a trace.
|
||||
Google Test trace:
|
||||
googletest-output-test_.cc:#: Expected trace
|
||||
Stack trace: (omitted)
|
||||
|
||||
googletest-output-test_.cc:#: Failure
|
||||
Failed
|
||||
This failure is expected, and shouldn't have a trace.
|
||||
Stack trace: (omitted)
|
||||
|
||||
[0;31m[ FAILED ] [mSCOPED_TRACETest.ObeysScopes
|
||||
[0;32m[ RUN ] [mSCOPED_TRACETest.WorksInLoop
|
||||
@ -157,7 +151,6 @@ Expected equality of these values:
|
||||
Which is: 1
|
||||
Google Test trace:
|
||||
googletest-output-test_.cc:#: i = 1
|
||||
Stack trace: (omitted)
|
||||
|
||||
googletest-output-test_.cc:#: Failure
|
||||
Expected equality of these values:
|
||||
@ -166,7 +159,6 @@ Expected equality of these values:
|
||||
Which is: 2
|
||||
Google Test trace:
|
||||
googletest-output-test_.cc:#: i = 2
|
||||
Stack trace: (omitted)
|
||||
|
||||
[0;31m[ FAILED ] [mSCOPED_TRACETest.WorksInLoop
|
||||
[0;32m[ RUN ] [mSCOPED_TRACETest.WorksInSubroutine
|
||||
@ -178,7 +170,6 @@ Expected equality of these values:
|
||||
Which is: 1
|
||||
Google Test trace:
|
||||
googletest-output-test_.cc:#: n = 1
|
||||
Stack trace: (omitted)
|
||||
|
||||
googletest-output-test_.cc:#: Failure
|
||||
Expected equality of these values:
|
||||
@ -187,7 +178,6 @@ Expected equality of these values:
|
||||
Which is: 2
|
||||
Google Test trace:
|
||||
googletest-output-test_.cc:#: n = 2
|
||||
Stack trace: (omitted)
|
||||
|
||||
[0;31m[ FAILED ] [mSCOPED_TRACETest.WorksInSubroutine
|
||||
[0;32m[ RUN ] [mSCOPED_TRACETest.CanBeNested
|
||||
@ -200,7 +190,6 @@ Expected equality of these values:
|
||||
Google Test trace:
|
||||
googletest-output-test_.cc:#: n = 2
|
||||
googletest-output-test_.cc:#:
|
||||
Stack trace: (omitted)
|
||||
|
||||
[0;31m[ FAILED ] [mSCOPED_TRACETest.CanBeNested
|
||||
[0;32m[ RUN ] [mSCOPED_TRACETest.CanBeRepeated
|
||||
@ -210,7 +199,6 @@ Failed
|
||||
This failure is expected, and should contain trace point A.
|
||||
Google Test trace:
|
||||
googletest-output-test_.cc:#: A
|
||||
Stack trace: (omitted)
|
||||
|
||||
googletest-output-test_.cc:#: Failure
|
||||
Failed
|
||||
@ -218,7 +206,6 @@ This failure is expected, and should contain trace point A and B.
|
||||
Google Test trace:
|
||||
googletest-output-test_.cc:#: B
|
||||
googletest-output-test_.cc:#: A
|
||||
Stack trace: (omitted)
|
||||
|
||||
googletest-output-test_.cc:#: Failure
|
||||
Failed
|
||||
@ -227,7 +214,6 @@ Google Test trace:
|
||||
googletest-output-test_.cc:#: C
|
||||
googletest-output-test_.cc:#: B
|
||||
googletest-output-test_.cc:#: A
|
||||
Stack trace: (omitted)
|
||||
|
||||
googletest-output-test_.cc:#: Failure
|
||||
Failed
|
||||
@ -236,7 +222,6 @@ Google Test trace:
|
||||
googletest-output-test_.cc:#: D
|
||||
googletest-output-test_.cc:#: B
|
||||
googletest-output-test_.cc:#: A
|
||||
Stack trace: (omitted)
|
||||
|
||||
[0;31m[ FAILED ] [mSCOPED_TRACETest.CanBeRepeated
|
||||
[0;32m[ RUN ] [mSCOPED_TRACETest.WorksConcurrently
|
||||
@ -246,38 +231,32 @@ Failed
|
||||
Expected failure #1 (in thread B, only trace B alive).
|
||||
Google Test trace:
|
||||
googletest-output-test_.cc:#: Trace B
|
||||
Stack trace: (omitted)
|
||||
|
||||
googletest-output-test_.cc:#: Failure
|
||||
Failed
|
||||
Expected failure #2 (in thread A, trace A & B both alive).
|
||||
Google Test trace:
|
||||
googletest-output-test_.cc:#: Trace A
|
||||
Stack trace: (omitted)
|
||||
|
||||
googletest-output-test_.cc:#: Failure
|
||||
Failed
|
||||
Expected failure #3 (in thread B, trace A & B both alive).
|
||||
Google Test trace:
|
||||
googletest-output-test_.cc:#: Trace B
|
||||
Stack trace: (omitted)
|
||||
|
||||
googletest-output-test_.cc:#: Failure
|
||||
Failed
|
||||
Expected failure #4 (in thread B, only trace A alive).
|
||||
Stack trace: (omitted)
|
||||
|
||||
googletest-output-test_.cc:#: Failure
|
||||
Failed
|
||||
Expected failure #5 (in thread A, only trace A alive).
|
||||
Google Test trace:
|
||||
googletest-output-test_.cc:#: Trace A
|
||||
Stack trace: (omitted)
|
||||
|
||||
googletest-output-test_.cc:#: Failure
|
||||
Failed
|
||||
Expected failure #6 (in thread A, no trace alive).
|
||||
Stack trace: (omitted)
|
||||
|
||||
[0;31m[ FAILED ] [mSCOPED_TRACETest.WorksConcurrently
|
||||
[0;32m[----------] [m1 test from ScopedTraceTest
|
||||
@ -287,7 +266,6 @@ Failed
|
||||
Check that the trace is attached to a particular location.
|
||||
Google Test trace:
|
||||
explicit_file.cc:123: expected trace message
|
||||
Stack trace: (omitted)
|
||||
|
||||
[0;31m[ FAILED ] [mScopedTraceTest.WithExplicitFileAndLine
|
||||
[0;32m[----------] [m1 test from NonFatalFailureInFixtureConstructorTest
|
||||
@ -296,27 +274,22 @@ Stack trace: (omitted)
|
||||
googletest-output-test_.cc:#: Failure
|
||||
Failed
|
||||
Expected failure #1, in the test fixture c'tor.
|
||||
Stack trace: (omitted)
|
||||
|
||||
googletest-output-test_.cc:#: Failure
|
||||
Failed
|
||||
Expected failure #2, in SetUp().
|
||||
Stack trace: (omitted)
|
||||
|
||||
googletest-output-test_.cc:#: Failure
|
||||
Failed
|
||||
Expected failure #3, in the test body.
|
||||
Stack trace: (omitted)
|
||||
|
||||
googletest-output-test_.cc:#: Failure
|
||||
Failed
|
||||
Expected failure #4, in TearDown.
|
||||
Stack trace: (omitted)
|
||||
|
||||
googletest-output-test_.cc:#: Failure
|
||||
Failed
|
||||
Expected failure #5, in the test fixture d'tor.
|
||||
Stack trace: (omitted)
|
||||
|
||||
[0;31m[ FAILED ] [mNonFatalFailureInFixtureConstructorTest.FailureInConstructor
|
||||
[0;32m[----------] [m1 test from FatalFailureInFixtureConstructorTest
|
||||
@ -325,12 +298,10 @@ Stack trace: (omitted)
|
||||
googletest-output-test_.cc:#: Failure
|
||||
Failed
|
||||
Expected failure #1, in the test fixture c'tor.
|
||||
Stack trace: (omitted)
|
||||
|
||||
googletest-output-test_.cc:#: Failure
|
||||
Failed
|
||||
Expected failure #2, in the test fixture d'tor.
|
||||
Stack trace: (omitted)
|
||||
|
||||
[0;31m[ FAILED ] [mFatalFailureInFixtureConstructorTest.FailureInConstructor
|
||||
[0;32m[----------] [m1 test from NonFatalFailureInSetUpTest
|
||||
@ -339,22 +310,18 @@ Stack trace: (omitted)
|
||||
googletest-output-test_.cc:#: Failure
|
||||
Failed
|
||||
Expected failure #1, in SetUp().
|
||||
Stack trace: (omitted)
|
||||
|
||||
googletest-output-test_.cc:#: Failure
|
||||
Failed
|
||||
Expected failure #2, in the test function.
|
||||
Stack trace: (omitted)
|
||||
|
||||
googletest-output-test_.cc:#: Failure
|
||||
Failed
|
||||
Expected failure #3, in TearDown().
|
||||
Stack trace: (omitted)
|
||||
|
||||
googletest-output-test_.cc:#: Failure
|
||||
Failed
|
||||
Expected failure #4, in the test fixture d'tor.
|
||||
Stack trace: (omitted)
|
||||
|
||||
[0;31m[ FAILED ] [mNonFatalFailureInSetUpTest.FailureInSetUp
|
||||
[0;32m[----------] [m1 test from FatalFailureInSetUpTest
|
||||
@ -363,17 +330,14 @@ Stack trace: (omitted)
|
||||
googletest-output-test_.cc:#: Failure
|
||||
Failed
|
||||
Expected failure #1, in SetUp().
|
||||
Stack trace: (omitted)
|
||||
|
||||
googletest-output-test_.cc:#: Failure
|
||||
Failed
|
||||
Expected failure #2, in TearDown().
|
||||
Stack trace: (omitted)
|
||||
|
||||
googletest-output-test_.cc:#: Failure
|
||||
Failed
|
||||
Expected failure #3, in the test fixture d'tor.
|
||||
Stack trace: (omitted)
|
||||
|
||||
[0;31m[ FAILED ] [mFatalFailureInSetUpTest.FailureInSetUp
|
||||
[0;32m[----------] [m1 test from AddFailureAtTest
|
||||
@ -381,7 +345,6 @@ Stack trace: (omitted)
|
||||
foo.cc:42: Failure
|
||||
Failed
|
||||
Expected nonfatal failure in foo.cc
|
||||
Stack trace: (omitted)
|
||||
|
||||
[0;31m[ FAILED ] [mAddFailureAtTest.MessageContainsSpecifiedFileAndLineNumber
|
||||
[0;32m[----------] [m1 test from GtestFailAtTest
|
||||
@ -389,7 +352,6 @@ Stack trace: (omitted)
|
||||
foo.cc:42: Failure
|
||||
Failed
|
||||
Expected fatal failure in foo.cc
|
||||
Stack trace: (omitted)
|
||||
|
||||
[0;31m[ FAILED ] [mGtestFailAtTest.MessageContainsSpecifiedFileAndLineNumber
|
||||
[0;32m[----------] [m4 tests from MixedUpTestSuiteTest
|
||||
@ -407,7 +369,6 @@ using two different test fixture classes. This can happen if
|
||||
the two classes are from different namespaces or translation
|
||||
units and have the same name. You should probably rename one
|
||||
of the classes to put the tests into different test suites.
|
||||
Stack trace: (omitted)
|
||||
|
||||
[0;31m[ FAILED ] [mMixedUpTestSuiteTest.ThisShouldFail
|
||||
[0;32m[ RUN ] [mMixedUpTestSuiteTest.ThisShouldFailToo
|
||||
@ -420,7 +381,6 @@ using two different test fixture classes. This can happen if
|
||||
the two classes are from different namespaces or translation
|
||||
units and have the same name. You should probably rename one
|
||||
of the classes to put the tests into different test suites.
|
||||
Stack trace: (omitted)
|
||||
|
||||
[0;31m[ FAILED ] [mMixedUpTestSuiteTest.ThisShouldFailToo
|
||||
[0;32m[----------] [m2 tests from MixedUpTestSuiteWithSameTestNameTest
|
||||
@ -436,7 +396,6 @@ using two different test fixture classes. This can happen if
|
||||
the two classes are from different namespaces or translation
|
||||
units and have the same name. You should probably rename one
|
||||
of the classes to put the tests into different test suites.
|
||||
Stack trace: (omitted)
|
||||
|
||||
[0;31m[ FAILED ] [mMixedUpTestSuiteWithSameTestNameTest.TheSecondTestWithThisNameShouldFail
|
||||
[0;32m[----------] [m2 tests from TEST_F_before_TEST_in_same_test_case
|
||||
@ -452,7 +411,6 @@ test DefinedUsingTEST_F is defined using TEST_F but
|
||||
test DefinedUsingTESTAndShouldFail is defined using TEST. You probably
|
||||
want to change the TEST to TEST_F or move it to another test
|
||||
case.
|
||||
Stack trace: (omitted)
|
||||
|
||||
[0;31m[ FAILED ] [mTEST_F_before_TEST_in_same_test_case.DefinedUsingTESTAndShouldFail
|
||||
[0;32m[----------] [m2 tests from TEST_before_TEST_F_in_same_test_case
|
||||
@ -468,7 +426,6 @@ test DefinedUsingTEST_FAndShouldFail is defined using TEST_F but
|
||||
test DefinedUsingTEST is defined using TEST. You probably
|
||||
want to change the TEST to TEST_F or move it to another test
|
||||
case.
|
||||
Stack trace: (omitted)
|
||||
|
||||
[0;31m[ FAILED ] [mTEST_before_TEST_F_in_same_test_case.DefinedUsingTEST_FAndShouldFail
|
||||
[0;32m[----------] [m8 tests from ExpectNonfatalFailureTest
|
||||
@ -483,7 +440,6 @@ Stack trace: (omitted)
|
||||
gtest.cc:#: Failure
|
||||
Expected: 1 non-fatal failure
|
||||
Actual: 0 failures
|
||||
Stack trace: (omitted)
|
||||
|
||||
[0;31m[ FAILED ] [mExpectNonfatalFailureTest.FailsWhenThereIsNoNonfatalFailure
|
||||
[0;32m[ RUN ] [mExpectNonfatalFailureTest.FailsWhenThereAreTwoNonfatalFailures
|
||||
@ -494,16 +450,13 @@ Expected: 1 non-fatal failure
|
||||
googletest-output-test_.cc:#: Non-fatal failure:
|
||||
Failed
|
||||
Expected non-fatal failure 1.
|
||||
Stack trace: (omitted)
|
||||
|
||||
|
||||
googletest-output-test_.cc:#: Non-fatal failure:
|
||||
Failed
|
||||
Expected non-fatal failure 2.
|
||||
Stack trace: (omitted)
|
||||
|
||||
|
||||
Stack trace: (omitted)
|
||||
|
||||
[0;31m[ FAILED ] [mExpectNonfatalFailureTest.FailsWhenThereAreTwoNonfatalFailures
|
||||
[0;32m[ RUN ] [mExpectNonfatalFailureTest.FailsWhenThereIsOneFatalFailure
|
||||
@ -514,10 +467,8 @@ Expected: 1 non-fatal failure
|
||||
googletest-output-test_.cc:#: Fatal failure:
|
||||
Failed
|
||||
Expected fatal failure.
|
||||
Stack trace: (omitted)
|
||||
|
||||
|
||||
Stack trace: (omitted)
|
||||
|
||||
[0;31m[ FAILED ] [mExpectNonfatalFailureTest.FailsWhenThereIsOneFatalFailure
|
||||
[0;32m[ RUN ] [mExpectNonfatalFailureTest.FailsWhenStatementReturns
|
||||
@ -525,7 +476,6 @@ Stack trace: (omitted)
|
||||
gtest.cc:#: Failure
|
||||
Expected: 1 non-fatal failure
|
||||
Actual: 0 failures
|
||||
Stack trace: (omitted)
|
||||
|
||||
[0;31m[ FAILED ] [mExpectNonfatalFailureTest.FailsWhenStatementReturns
|
||||
[0;32m[ RUN ] [mExpectNonfatalFailureTest.FailsWhenStatementThrows
|
||||
@ -533,7 +483,6 @@ Stack trace: (omitted)
|
||||
gtest.cc:#: Failure
|
||||
Expected: 1 non-fatal failure
|
||||
Actual: 0 failures
|
||||
Stack trace: (omitted)
|
||||
|
||||
[0;31m[ FAILED ] [mExpectNonfatalFailureTest.FailsWhenStatementThrows
|
||||
[0;32m[----------] [m8 tests from ExpectFatalFailureTest
|
||||
@ -548,7 +497,6 @@ Stack trace: (omitted)
|
||||
gtest.cc:#: Failure
|
||||
Expected: 1 fatal failure
|
||||
Actual: 0 failures
|
||||
Stack trace: (omitted)
|
||||
|
||||
[0;31m[ FAILED ] [mExpectFatalFailureTest.FailsWhenThereIsNoFatalFailure
|
||||
[0;32m[ RUN ] [mExpectFatalFailureTest.FailsWhenThereAreTwoFatalFailures
|
||||
@ -559,16 +507,13 @@ Expected: 1 fatal failure
|
||||
googletest-output-test_.cc:#: Fatal failure:
|
||||
Failed
|
||||
Expected fatal failure.
|
||||
Stack trace: (omitted)
|
||||
|
||||
|
||||
googletest-output-test_.cc:#: Fatal failure:
|
||||
Failed
|
||||
Expected fatal failure.
|
||||
Stack trace: (omitted)
|
||||
|
||||
|
||||
Stack trace: (omitted)
|
||||
|
||||
[0;31m[ FAILED ] [mExpectFatalFailureTest.FailsWhenThereAreTwoFatalFailures
|
||||
[0;32m[ RUN ] [mExpectFatalFailureTest.FailsWhenThereIsOneNonfatalFailure
|
||||
@ -579,10 +524,8 @@ Expected: 1 fatal failure
|
||||
googletest-output-test_.cc:#: Non-fatal failure:
|
||||
Failed
|
||||
Expected non-fatal failure.
|
||||
Stack trace: (omitted)
|
||||
|
||||
|
||||
Stack trace: (omitted)
|
||||
|
||||
[0;31m[ FAILED ] [mExpectFatalFailureTest.FailsWhenThereIsOneNonfatalFailure
|
||||
[0;32m[ RUN ] [mExpectFatalFailureTest.FailsWhenStatementReturns
|
||||
@ -590,7 +533,6 @@ Stack trace: (omitted)
|
||||
gtest.cc:#: Failure
|
||||
Expected: 1 fatal failure
|
||||
Actual: 0 failures
|
||||
Stack trace: (omitted)
|
||||
|
||||
[0;31m[ FAILED ] [mExpectFatalFailureTest.FailsWhenStatementReturns
|
||||
[0;32m[ RUN ] [mExpectFatalFailureTest.FailsWhenStatementThrows
|
||||
@ -598,7 +540,6 @@ Stack trace: (omitted)
|
||||
gtest.cc:#: Failure
|
||||
Expected: 1 fatal failure
|
||||
Actual: 0 failures
|
||||
Stack trace: (omitted)
|
||||
|
||||
[0;31m[ FAILED ] [mExpectFatalFailureTest.FailsWhenStatementThrows
|
||||
[0;32m[----------] [m2 tests from TypedTest/0, where TypeParam = int
|
||||
@ -611,7 +552,6 @@ Expected equality of these values:
|
||||
TypeParam()
|
||||
Which is: 0
|
||||
Expected failure
|
||||
Stack trace: (omitted)
|
||||
|
||||
[0;31m[ FAILED ] [mTypedTest/0.Failure, where TypeParam = int
|
||||
[0;32m[----------] [m2 tests from TypedTestWithNames/char0, where TypeParam = char
|
||||
@ -620,7 +560,6 @@ Stack trace: (omitted)
|
||||
[0;32m[ RUN ] [mTypedTestWithNames/char0.Failure
|
||||
googletest-output-test_.cc:#: Failure
|
||||
Failed
|
||||
Stack trace: (omitted)
|
||||
|
||||
[0;31m[ FAILED ] [mTypedTestWithNames/char0.Failure, where TypeParam = char
|
||||
[0;32m[----------] [m2 tests from TypedTestWithNames/int1, where TypeParam = int
|
||||
@ -629,7 +568,6 @@ Stack trace: (omitted)
|
||||
[0;32m[ RUN ] [mTypedTestWithNames/int1.Failure
|
||||
googletest-output-test_.cc:#: Failure
|
||||
Failed
|
||||
Stack trace: (omitted)
|
||||
|
||||
[0;31m[ FAILED ] [mTypedTestWithNames/int1.Failure, where TypeParam = int
|
||||
[0;32m[----------] [m2 tests from Unsigned/TypedTestP/0, where TypeParam = unsigned char
|
||||
@ -643,7 +581,6 @@ Expected equality of these values:
|
||||
TypeParam()
|
||||
Which is: '\0'
|
||||
Expected failure
|
||||
Stack trace: (omitted)
|
||||
|
||||
[0;31m[ FAILED ] [mUnsigned/TypedTestP/0.Failure, where TypeParam = unsigned char
|
||||
[0;32m[----------] [m2 tests from Unsigned/TypedTestP/1, where TypeParam = unsigned int
|
||||
@ -657,7 +594,6 @@ Expected equality of these values:
|
||||
TypeParam()
|
||||
Which is: 0
|
||||
Expected failure
|
||||
Stack trace: (omitted)
|
||||
|
||||
[0;31m[ FAILED ] [mUnsigned/TypedTestP/1.Failure, where TypeParam = unsigned int
|
||||
[0;32m[----------] [m2 tests from UnsignedCustomName/TypedTestP/unsignedChar0, where TypeParam = unsigned char
|
||||
@ -671,7 +607,6 @@ Expected equality of these values:
|
||||
TypeParam()
|
||||
Which is: '\0'
|
||||
Expected failure
|
||||
Stack trace: (omitted)
|
||||
|
||||
[0;31m[ FAILED ] [mUnsignedCustomName/TypedTestP/unsignedChar0.Failure, where TypeParam = unsigned char
|
||||
[0;32m[----------] [m2 tests from UnsignedCustomName/TypedTestP/unsignedInt1, where TypeParam = unsigned int
|
||||
@ -685,7 +620,6 @@ Expected equality of these values:
|
||||
TypeParam()
|
||||
Which is: 0
|
||||
Expected failure
|
||||
Stack trace: (omitted)
|
||||
|
||||
[0;31m[ FAILED ] [mUnsignedCustomName/TypedTestP/unsignedInt1.Failure, where TypeParam = unsigned int
|
||||
[0;32m[----------] [m4 tests from ExpectFailureTest
|
||||
@ -698,7 +632,6 @@ googletest-output-test_.cc:#: Success:
|
||||
Succeeded
|
||||
|
||||
|
||||
Stack trace: (omitted)
|
||||
|
||||
(expecting 1 failure)
|
||||
gtest.cc:#: Failure
|
||||
@ -707,10 +640,8 @@ Expected: 1 fatal failure
|
||||
googletest-output-test_.cc:#: Non-fatal failure:
|
||||
Failed
|
||||
Expected non-fatal failure.
|
||||
Stack trace: (omitted)
|
||||
|
||||
|
||||
Stack trace: (omitted)
|
||||
|
||||
(expecting 1 failure)
|
||||
gtest.cc:#: Failure
|
||||
@ -719,10 +650,8 @@ Expected: 1 fatal failure containing "Some other fatal failure expected."
|
||||
googletest-output-test_.cc:#: Fatal failure:
|
||||
Failed
|
||||
Expected fatal failure.
|
||||
Stack trace: (omitted)
|
||||
|
||||
|
||||
Stack trace: (omitted)
|
||||
|
||||
[0;31m[ FAILED ] [mExpectFailureTest.ExpectFatalFailure
|
||||
[0;32m[ RUN ] [mExpectFailureTest.ExpectNonFatalFailure
|
||||
@ -734,7 +663,6 @@ googletest-output-test_.cc:#: Success:
|
||||
Succeeded
|
||||
|
||||
|
||||
Stack trace: (omitted)
|
||||
|
||||
(expecting 1 failure)
|
||||
gtest.cc:#: Failure
|
||||
@ -743,10 +671,8 @@ Expected: 1 non-fatal failure
|
||||
googletest-output-test_.cc:#: Fatal failure:
|
||||
Failed
|
||||
Expected fatal failure.
|
||||
Stack trace: (omitted)
|
||||
|
||||
|
||||
Stack trace: (omitted)
|
||||
|
||||
(expecting 1 failure)
|
||||
gtest.cc:#: Failure
|
||||
@ -755,10 +681,8 @@ Expected: 1 non-fatal failure containing "Some other non-fatal failure."
|
||||
googletest-output-test_.cc:#: Non-fatal failure:
|
||||
Failed
|
||||
Expected non-fatal failure.
|
||||
Stack trace: (omitted)
|
||||
|
||||
|
||||
Stack trace: (omitted)
|
||||
|
||||
[0;31m[ FAILED ] [mExpectFailureTest.ExpectNonFatalFailure
|
||||
[0;32m[ RUN ] [mExpectFailureTest.ExpectFatalFailureOnAllThreads
|
||||
@ -770,7 +694,6 @@ googletest-output-test_.cc:#: Success:
|
||||
Succeeded
|
||||
|
||||
|
||||
Stack trace: (omitted)
|
||||
|
||||
(expecting 1 failure)
|
||||
gtest.cc:#: Failure
|
||||
@ -779,10 +702,8 @@ Expected: 1 fatal failure
|
||||
googletest-output-test_.cc:#: Non-fatal failure:
|
||||
Failed
|
||||
Expected non-fatal failure.
|
||||
Stack trace: (omitted)
|
||||
|
||||
|
||||
Stack trace: (omitted)
|
||||
|
||||
(expecting 1 failure)
|
||||
gtest.cc:#: Failure
|
||||
@ -791,10 +712,8 @@ Expected: 1 fatal failure containing "Some other fatal failure expected."
|
||||
googletest-output-test_.cc:#: Fatal failure:
|
||||
Failed
|
||||
Expected fatal failure.
|
||||
Stack trace: (omitted)
|
||||
|
||||
|
||||
Stack trace: (omitted)
|
||||
|
||||
[0;31m[ FAILED ] [mExpectFailureTest.ExpectFatalFailureOnAllThreads
|
||||
[0;32m[ RUN ] [mExpectFailureTest.ExpectNonFatalFailureOnAllThreads
|
||||
@ -806,7 +725,6 @@ googletest-output-test_.cc:#: Success:
|
||||
Succeeded
|
||||
|
||||
|
||||
Stack trace: (omitted)
|
||||
|
||||
(expecting 1 failure)
|
||||
gtest.cc:#: Failure
|
||||
@ -815,10 +733,8 @@ Expected: 1 non-fatal failure
|
||||
googletest-output-test_.cc:#: Fatal failure:
|
||||
Failed
|
||||
Expected fatal failure.
|
||||
Stack trace: (omitted)
|
||||
|
||||
|
||||
Stack trace: (omitted)
|
||||
|
||||
(expecting 1 failure)
|
||||
gtest.cc:#: Failure
|
||||
@ -827,10 +743,8 @@ Expected: 1 non-fatal failure containing "Some other non-fatal failure."
|
||||
googletest-output-test_.cc:#: Non-fatal failure:
|
||||
Failed
|
||||
Expected non-fatal failure.
|
||||
Stack trace: (omitted)
|
||||
|
||||
|
||||
Stack trace: (omitted)
|
||||
|
||||
[0;31m[ FAILED ] [mExpectFailureTest.ExpectNonFatalFailureOnAllThreads
|
||||
[0;32m[----------] [m2 tests from ExpectFailureWithThreadsTest
|
||||
@ -839,12 +753,10 @@ Stack trace: (omitted)
|
||||
googletest-output-test_.cc:#: Failure
|
||||
Failed
|
||||
Expected fatal failure.
|
||||
Stack trace: (omitted)
|
||||
|
||||
gtest.cc:#: Failure
|
||||
Expected: 1 fatal failure
|
||||
Actual: 0 failures
|
||||
Stack trace: (omitted)
|
||||
|
||||
[0;31m[ FAILED ] [mExpectFailureWithThreadsTest.ExpectFatalFailure
|
||||
[0;32m[ RUN ] [mExpectFailureWithThreadsTest.ExpectNonFatalFailure
|
||||
@ -852,12 +764,10 @@ Stack trace: (omitted)
|
||||
googletest-output-test_.cc:#: Failure
|
||||
Failed
|
||||
Expected non-fatal failure.
|
||||
Stack trace: (omitted)
|
||||
|
||||
gtest.cc:#: Failure
|
||||
Expected: 1 non-fatal failure
|
||||
Actual: 0 failures
|
||||
Stack trace: (omitted)
|
||||
|
||||
[0;31m[ FAILED ] [mExpectFailureWithThreadsTest.ExpectNonFatalFailure
|
||||
[0;32m[----------] [m1 test from ScopedFakeTestPartResultReporterTest
|
||||
@ -866,12 +776,10 @@ Stack trace: (omitted)
|
||||
googletest-output-test_.cc:#: Failure
|
||||
Failed
|
||||
Expected fatal failure.
|
||||
Stack trace: (omitted)
|
||||
|
||||
googletest-output-test_.cc:#: Failure
|
||||
Failed
|
||||
Expected non-fatal failure.
|
||||
Stack trace: (omitted)
|
||||
|
||||
[0;31m[ FAILED ] [mScopedFakeTestPartResultReporterTest.InterceptOnlyCurrentThread
|
||||
[0;32m[----------] [m2 tests from DynamicFixture
|
||||
@ -889,7 +797,6 @@ googletest-output-test_.cc:#: Failure
|
||||
Value of: Pass
|
||||
Actual: false
|
||||
Expected: true
|
||||
Stack trace: (omitted)
|
||||
|
||||
DynamicFixture::TearDown
|
||||
~DynamicFixture()
|
||||
@ -923,7 +830,6 @@ test FixtureBase is defined using TEST_F but
|
||||
test TestBase is defined using TEST. You probably
|
||||
want to change the TEST to TEST_F or move it to another test
|
||||
case.
|
||||
Stack trace: (omitted)
|
||||
|
||||
~DynamicFixture()
|
||||
[0;31m[ FAILED ] [mBadDynamicFixture1.TestBase
|
||||
@ -947,7 +853,6 @@ using two different test fixture classes. This can happen if
|
||||
the two classes are from different namespaces or translation
|
||||
units and have the same name. You should probably rename one
|
||||
of the classes to put the tests into different test suites.
|
||||
Stack trace: (omitted)
|
||||
|
||||
~DynamicFixture()
|
||||
[0;31m[ FAILED ] [mBadDynamicFixture2.Derived
|
||||
@ -957,7 +862,6 @@ googletest-output-test_.cc:#: Failure
|
||||
Value of: false
|
||||
Actual: false
|
||||
Expected: true
|
||||
Stack trace: (omitted)
|
||||
|
||||
[0;32m[ RUN ] [mTestSuiteThatFailsToSetUp.ShouldNotRun
|
||||
googletest-output-test_.cc:#: Skipped
|
||||
@ -978,7 +882,6 @@ Expected equality of these values:
|
||||
1
|
||||
GetParam()
|
||||
Which is: 2
|
||||
Stack trace: (omitted)
|
||||
|
||||
[0;31m[ FAILED ] [mPrintingFailingParams/FailingParamTest.Fails/0, where GetParam() = 2
|
||||
[0;32m[----------] [m1 test from EmptyBasenameParamInst
|
||||
@ -994,7 +897,6 @@ Expected equality of these values:
|
||||
GetParam()
|
||||
Which is: "a"
|
||||
Expected failure
|
||||
Stack trace: (omitted)
|
||||
|
||||
[0;31m[ FAILED ] [mPrintingStrings/ParamTest.Failure/a, where GetParam() = "a"
|
||||
[0;32m[----------] [m3 tests from GoogleTestVerification
|
||||
@ -1007,7 +909,6 @@ Ideally, INSTANTIATE_TEST_SUITE_P should only ever be invoked from code that alw
|
||||
To suppress this error for this test suite, insert the following line (in a non-header) in the namespace it is defined in:
|
||||
|
||||
GTEST_ALLOW_UNINSTANTIATED_PARAMETERIZED_TEST(NoTests);
|
||||
Stack trace: (omitted)
|
||||
|
||||
[0;31m[ FAILED ] [mGoogleTestVerification.UninstantiatedParameterizedTestSuite<NoTests>
|
||||
[0;32m[ RUN ] [mGoogleTestVerification.UninstantiatedParameterizedTestSuite<DetectNotInstantiatedTest>
|
||||
@ -1019,7 +920,6 @@ Ideally, TEST_P definitions should only ever be included as part of binaries tha
|
||||
To suppress this error for this test suite, insert the following line (in a non-header) in the namespace it is defined in:
|
||||
|
||||
GTEST_ALLOW_UNINSTANTIATED_PARAMETERIZED_TEST(DetectNotInstantiatedTest);
|
||||
Stack trace: (omitted)
|
||||
|
||||
[0;31m[ FAILED ] [mGoogleTestVerification.UninstantiatedParameterizedTestSuite<DetectNotInstantiatedTest>
|
||||
[0;32m[ RUN ] [mGoogleTestVerification.UninstantiatedTypeParameterizedTestSuite<DetectNotInstantiatedTypesTest>
|
||||
@ -1031,7 +931,6 @@ Ideally, TYPED_TEST_P definitions should only ever be included as part of binari
|
||||
To suppress this error for this test suite, insert the following line (in a non-header) in the namespace it is defined in:
|
||||
|
||||
GTEST_ALLOW_UNINSTANTIATED_PARAMETERIZED_TEST(DetectNotInstantiatedTypesTest);
|
||||
Stack trace: (omitted)
|
||||
|
||||
[0;31m[ FAILED ] [mGoogleTestVerification.UninstantiatedTypeParameterizedTestSuite<DetectNotInstantiatedTypesTest>
|
||||
[0;32m[----------] [mGlobal test environment tear-down
|
||||
@ -1039,26 +938,25 @@ BarEnvironment::TearDown() called.
|
||||
googletest-output-test_.cc:#: Failure
|
||||
Failed
|
||||
Expected non-fatal failure.
|
||||
Stack trace: (omitted)
|
||||
|
||||
FooEnvironment::TearDown() called.
|
||||
googletest-output-test_.cc:#: Failure
|
||||
Failed
|
||||
Expected fatal failure.
|
||||
Stack trace: (omitted)
|
||||
|
||||
[0;32m[==========] [m90 tests from 43 test suites ran.
|
||||
[0;32m[==========] [m91 tests from 44 test suites ran.
|
||||
[0;32m[ PASSED ] [m31 tests.
|
||||
[0;32m[ SKIPPED ] [m2 tests, listed below:
|
||||
[0;32m[ SKIPPED ] [mTestSuiteThatFailsToSetUp.ShouldNotRun
|
||||
[0;32m[ SKIPPED ] [mTestSuiteThatSkipsInSetUp.ShouldNotRun
|
||||
[0;31m[ FAILED ] [m57 tests, listed below:
|
||||
[0;31m[ FAILED ] [m58 tests, listed below:
|
||||
[0;31m[ FAILED ] [mNonfatalFailureTest.EscapesStringOperands
|
||||
[0;31m[ FAILED ] [mNonfatalFailureTest.DiffForLongStrings
|
||||
[0;31m[ FAILED ] [mFatalFailureTest.FatalFailureInSubroutine
|
||||
[0;31m[ FAILED ] [mFatalFailureTest.FatalFailureInNestedSubroutine
|
||||
[0;31m[ FAILED ] [mFatalFailureTest.NonfatalFailureInSubroutine
|
||||
[0;31m[ FAILED ] [mLoggingTest.InterleavingLoggingAndAssertions
|
||||
[0;31m[ FAILED ] [mNotRotten.ExpectFailingWithUnexecutedIsntRotten
|
||||
[0;31m[ FAILED ] [mSCOPED_TRACETest.AcceptedValues
|
||||
[0;31m[ FAILED ] [mSCOPED_TRACETest.ObeysScopes
|
||||
[0;31m[ FAILED ] [mSCOPED_TRACETest.WorksInLoop
|
||||
@ -1111,7 +1009,7 @@ Stack trace: (omitted)
|
||||
[0;31m[ FAILED ] [mGoogleTestVerification.UninstantiatedParameterizedTestSuite<DetectNotInstantiatedTest>
|
||||
[0;31m[ FAILED ] [mGoogleTestVerification.UninstantiatedTypeParameterizedTestSuite<DetectNotInstantiatedTypesTest>
|
||||
|
||||
57 FAILED TESTS
|
||||
58 FAILED TESTS
|
||||
[0;31m[ FAILED ] [mTestSuiteThatFailsToSetUp: SetUpTestSuite or TearDownTestSuite
|
||||
|
||||
1 FAILED TEST SUITE
|
||||
@ -1128,7 +1026,6 @@ Expected equality of these values:
|
||||
1
|
||||
x
|
||||
Which is: 2
|
||||
Stack trace: (omitted)
|
||||
|
||||
[ FAILED ] FatalFailureTest.FatalFailureInSubroutine (? ms)
|
||||
[ RUN ] FatalFailureTest.FatalFailureInNestedSubroutine
|
||||
@ -1138,7 +1035,6 @@ Expected equality of these values:
|
||||
1
|
||||
x
|
||||
Which is: 2
|
||||
Stack trace: (omitted)
|
||||
|
||||
[ FAILED ] FatalFailureTest.FatalFailureInNestedSubroutine (? ms)
|
||||
[ RUN ] FatalFailureTest.NonfatalFailureInSubroutine
|
||||
@ -1147,7 +1043,6 @@ googletest-output-test_.cc:#: Failure
|
||||
Value of: false
|
||||
Actual: false
|
||||
Expected: true
|
||||
Stack trace: (omitted)
|
||||
|
||||
[ FAILED ] FatalFailureTest.NonfatalFailureInSubroutine (? ms)
|
||||
[----------] 3 tests from FatalFailureTest (? ms total)
|
||||
@ -1159,13 +1054,11 @@ i == 0
|
||||
i == 1
|
||||
googletest-output-test_.cc:#: Failure
|
||||
Expected: (3) >= (a[i]), actual: 3 vs 9
|
||||
Stack trace: (omitted)
|
||||
|
||||
i == 2
|
||||
i == 3
|
||||
googletest-output-test_.cc:#: Failure
|
||||
Expected: (3) >= (a[i]), actual: 3 vs 6
|
||||
Stack trace: (omitted)
|
||||
|
||||
[ FAILED ] LoggingTest.InterleavingLoggingAndAssertions (? ms)
|
||||
[----------] 1 test from LoggingTest (? ms total)
|
||||
|
||||
@ -350,14 +350,14 @@ class GTestOutputTest(gtest_test_utils.TestCase):
|
||||
'_googletest-output-test_normalized_actual.txt',
|
||||
),
|
||||
'wb',
|
||||
).write(normalized_actual)
|
||||
).write(normalized_actual.encode())
|
||||
open(
|
||||
os.path.join(
|
||||
gtest_test_utils.GetSourceDir(),
|
||||
'_googletest-output-test_normalized_golden.txt',
|
||||
),
|
||||
'wb',
|
||||
).write(normalized_golden)
|
||||
).write(normalized_golden.encode())
|
||||
|
||||
self.assertEqual(normalized_golden, normalized_actual)
|
||||
|
||||
|
||||
@ -155,6 +155,15 @@ TEST(LoggingTest, InterleavingLoggingAndAssertions) {
|
||||
}
|
||||
}
|
||||
|
||||
// A test that fails isn't reported as rotten, even if it actually
|
||||
// contains rotten assertions. Only passing (green) tests can be rotten.
|
||||
TEST(NotRotten, ExpectFailingWithUnexecutedIsntRotten) {
|
||||
EXPECT_EQ(1, 2);
|
||||
if (testing::internal::AlwaysFalse()) {
|
||||
EXPECT_EQ(3, 3);
|
||||
}
|
||||
}
|
||||
|
||||
// Tests the SCOPED_TRACE macro.
|
||||
|
||||
// A helper function for testing SCOPED_TRACE.
|
||||
|
||||
@ -1164,6 +1164,10 @@ int main(int argc, char** argv) {
|
||||
|
||||
::testing::InitGoogleTest(&argc, argv);
|
||||
|
||||
// This test has assertions in the environment TearDown(), which
|
||||
// doesn't play nicely with rotten-test detection.
|
||||
GTEST_FLAG_SET(treat_rotten_as_pass, true);
|
||||
|
||||
// Used in GeneratorEvaluationTest test suite. Tests that value updated
|
||||
// here will NOT be used for instantiating tests in
|
||||
// GeneratorEvaluationTest.
|
||||
|
||||
@ -324,5 +324,9 @@ int main(int argc, char** argv) {
|
||||
|
||||
AddGlobalTestEnvironment(new testing::internal::FinalSuccessChecker());
|
||||
|
||||
// This test has assertions in the environment TearDown(), which
|
||||
// doesn't play nicely with rotten-test detection.
|
||||
GTEST_FLAG_SET(treat_rotten_as_pass, true);
|
||||
|
||||
return RUN_ALL_TESTS();
|
||||
}
|
||||
|
||||
284
googletest/test/gtest_rgt_output_test.py
Executable file
284
googletest/test/gtest_rgt_output_test.py
Executable file
@ -0,0 +1,284 @@
|
||||
#!/usr/bin/env python
|
||||
#
|
||||
# Copyright (C) 2024 Sony Interactive Entertainment Inc.
|
||||
# All rights reserved.
|
||||
#
|
||||
# Redistribution and use in source and binary forms, with or without
|
||||
# modification, are permitted provided that the following conditions are
|
||||
# met:
|
||||
#
|
||||
# * Redistributions of source code must retain the above copyright
|
||||
# notice, this list of conditions and the following disclaimer.
|
||||
# * Redistributions in binary form must reproduce the above
|
||||
# copyright notice, this list of conditions and the following disclaimer
|
||||
# in the documentation and/or other materials provided with the
|
||||
# distribution.
|
||||
# * Neither the name of Sony Interactive Entertainment Inc. nor the
|
||||
# names of its contributors may be used to endorse or promote products
|
||||
# derived from this software without specific prior written permission.
|
||||
#
|
||||
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
r"""Tests output of RGT detection for Google C++ Testing and Mocking Framework.
|
||||
|
||||
To update the golden file:
|
||||
gtest_rgt_output_test.py --build_dir=BUILD/DIR --gengolden
|
||||
where BUILD/DIR contains the built gtest_rgt_output_test_ file.
|
||||
"""
|
||||
|
||||
import difflib
|
||||
import os
|
||||
import re
|
||||
import sys
|
||||
from googletest.test import gtest_test_utils
|
||||
|
||||
|
||||
# The flag for generating the golden file
|
||||
GENGOLDEN_FLAG = '--gengolden'
|
||||
CATCH_EXCEPTIONS_ENV_VAR_NAME = 'GTEST_CATCH_EXCEPTIONS'
|
||||
|
||||
IS_LINUX = os.name == 'posix' and os.uname()[0] == 'Linux'
|
||||
IS_WINDOWS = os.name == 'nt'
|
||||
|
||||
GOLDEN_NAME = 'gtest_rgt_output_test_golden_lin.txt'
|
||||
|
||||
PROGRAM_PATH = gtest_test_utils.GetTestExecutablePath('gtest_rgt_output_test_')
|
||||
|
||||
COMMAND_LIST_TESTS = ({}, [PROGRAM_PATH, '--gtest_list_tests'])
|
||||
COMMAND_WITH_COLOR = ({}, [PROGRAM_PATH, '--gtest_color=yes',
|
||||
'--gtest_treat_rotten_as_pass=0'])
|
||||
COMMAND_WITH_RGT_PASSING = ({}, [PROGRAM_PATH,
|
||||
'--gtest_treat_rotten_as_pass=1'])
|
||||
|
||||
GOLDEN_PATH = os.path.join(gtest_test_utils.GetSourceDir(), GOLDEN_NAME)
|
||||
|
||||
|
||||
def ToUnixLineEnding(s):
|
||||
"""Changes all Windows/Mac line endings in s to UNIX line endings."""
|
||||
|
||||
return s.replace('\r\n', '\n').replace('\r', '\n')
|
||||
|
||||
|
||||
def RemoveFileLocations(test_output):
|
||||
"""Removes all file location info from a Google Test program's output.
|
||||
|
||||
Args:
|
||||
test_output: the output of a Google Test program.
|
||||
|
||||
Returns:
|
||||
output with all file location info (in the form of
|
||||
'DIRECTORY/FILE_NAME:LINE_NUMBER: 'or
|
||||
'DIRECTORY\\FILE_NAME(LINE_NUMBER): ') replaced by
|
||||
'FILE_NAME:LINE_NUMBER: '.
|
||||
"""
|
||||
|
||||
# Bazel uses gmock_main, CMake uses gtest_main, obscure both.
|
||||
test_output = re.sub(r'.*[/\\]((gtest|gmock)_main.cc)', r'\1', test_output)
|
||||
test_output = re.sub(r'(gtest|gmock)_main.cc', 'main.cc', test_output)
|
||||
test_output = re.sub(r'Running main\(\) from ', '', test_output)
|
||||
return re.sub(r'.*[/\\]((gtest_rgt_output_test_|gtest).cc)[:(](\d+)\)?\: ',
|
||||
r'\1:\3: ', test_output)
|
||||
|
||||
|
||||
def ObscureLineNumbers(output):
|
||||
"""Removes line numbers for error, Skipped and Failure notices.
|
||||
(But not Rotten notices. For helper methods the line number is
|
||||
all we get for identifying where they came from.)
|
||||
|
||||
Args:
|
||||
output: the output of a Google Test program.
|
||||
|
||||
Returns:
|
||||
output with all file location info (in the form of
|
||||
'FILE_NAME:LINE_NUMBER: (error|Failure|Skipped)' replaced by
|
||||
'FILE_NAME:#: (error|Failure|Skipped)'.
|
||||
"""
|
||||
|
||||
return re.sub(r'((gtest_rgt_output_test_|gtest).cc):\d+: (error|Failure|Skipped)',
|
||||
r'\1:#: \3', output)
|
||||
|
||||
|
||||
def RemoveTime(output):
|
||||
"""Removes all time information from a Google Test program's output."""
|
||||
|
||||
return re.sub(r'\(\d+ ms', '(? ms', output)
|
||||
|
||||
|
||||
def NormalizeToCurrentPlatform(test_output):
|
||||
"""Normalizes platform specific output details for easier comparison."""
|
||||
|
||||
if IS_WINDOWS:
|
||||
# Removes the color information that is not present on Windows.
|
||||
test_output = re.sub('\x1b\\[(0;3\d)?m', '', test_output)
|
||||
# Changes failure message headers into the Windows format.
|
||||
test_output = re.sub(r': Failure\n', r': error: ', test_output)
|
||||
# Changes file(line_number) to file:line_number.
|
||||
test_output = re.sub(r'((\w|\.)+)\((\d+)\):', r'\1:\3:', test_output)
|
||||
|
||||
return test_output
|
||||
|
||||
|
||||
def RemoveTestCounts(output):
|
||||
"""Removes test counts from a Google Test program's output."""
|
||||
|
||||
output = re.sub(r'\d+ tests?, listed below', '? tests, listed below', output)
|
||||
output = re.sub(r'\d+ FAILED TESTS', '? FAILED TESTS', output)
|
||||
output = re.sub(
|
||||
r'\d+ tests? from \d+ test cases?', '? tests from ? test cases', output
|
||||
)
|
||||
output = re.sub(r'\d+ tests? from ([a-zA-Z_])', r'? tests from \1', output)
|
||||
return re.sub(r'\d+ tests?\.', '? tests.', output)
|
||||
|
||||
|
||||
def NormalizeOutput(output):
|
||||
"""Normalizes output (the output of gtest_rgt_output_test_.exe)."""
|
||||
|
||||
output = ToUnixLineEnding(output)
|
||||
output = RemoveFileLocations(output)
|
||||
output = ObscureLineNumbers(output)
|
||||
output = RemoveTime(output)
|
||||
return output
|
||||
|
||||
|
||||
def GetShellCommandOutput(env_cmd):
|
||||
"""Runs a command in a sub-process, and returns its output in a string.
|
||||
|
||||
Args:
|
||||
env_cmd: The shell command. A 2-tuple where element 0 is a dict of extra
|
||||
environment variables to set, and element 1 is a string with
|
||||
the command and any flags.
|
||||
|
||||
Returns:
|
||||
A string with the command's combined standard and diagnostic output.
|
||||
"""
|
||||
|
||||
# Spawns cmd in a sub-process, and gets its standard I/O file objects.
|
||||
# Set and save the environment properly.
|
||||
environ = os.environ.copy()
|
||||
environ.update(env_cmd[0])
|
||||
p = gtest_test_utils.Subprocess(env_cmd[1], env=environ)
|
||||
|
||||
return p.output + ("Pass\n" if p.exited and p.exit_code == 0 else "Fail\n")
|
||||
|
||||
|
||||
def GetCommandOutput(env_cmd):
|
||||
"""Runs a command and returns its output with all file location
|
||||
info stripped off.
|
||||
|
||||
Args:
|
||||
env_cmd: The shell command. A 2-tuple where element 0 is a dict of extra
|
||||
environment variables to set, and element 1 is a string with
|
||||
the command and any flags.
|
||||
"""
|
||||
|
||||
# Disables exception pop-ups on Windows.
|
||||
environ, cmdline = env_cmd
|
||||
environ = dict(environ) # Ensures we are modifying a copy.
|
||||
environ[CATCH_EXCEPTIONS_ENV_VAR_NAME] = '1'
|
||||
return NormalizeOutput(GetShellCommandOutput((environ, cmdline)))
|
||||
|
||||
|
||||
def GetOutputOfAllCommands():
|
||||
"""Returns concatenated output from several representative commands."""
|
||||
|
||||
return (GetCommandOutput(COMMAND_WITH_COLOR) +
|
||||
GetCommandOutput(COMMAND_WITH_RGT_PASSING))
|
||||
|
||||
# The golden file should not use a test with GTEST_DEBUG_RGT enabled.
|
||||
# We can't directly check that, so instead look for Typed tests.
|
||||
# These tests don't behave consistently across environments due to
|
||||
# GCC issues, so they are enabled only when GTEST_DEBUG_RGT is set.
|
||||
|
||||
test_list = GetShellCommandOutput(COMMAND_LIST_TESTS)
|
||||
SUPPORTS_RGT = 'IsRottenFixture' in test_list
|
||||
SUPPORTS_TYPED = 'IsRottenTyped' in test_list
|
||||
|
||||
CAN_GENERATE_GOLDEN_FILE = (SUPPORTS_RGT and not SUPPORTS_TYPED)
|
||||
|
||||
class GTestOutputTest(gtest_test_utils.TestCase):
|
||||
def testOutput(self):
|
||||
output = GetOutputOfAllCommands()
|
||||
|
||||
golden_file = open(GOLDEN_PATH, 'rb')
|
||||
# A mis-configured source control system can cause \r appear in EOL
|
||||
# sequences when we read the golden file irrespective of an operating
|
||||
# system used. Therefore, we need to strip those \r's from newlines
|
||||
# unconditionally.
|
||||
golden = ToUnixLineEnding(golden_file.read().decode())
|
||||
golden_file.close()
|
||||
|
||||
# We want the test to pass regardless of certain features being
|
||||
# supported or not.
|
||||
|
||||
# We still have to remove type name specifics in all cases.
|
||||
normalized_actual = NormalizeToCurrentPlatform(output)
|
||||
normalized_golden = NormalizeToCurrentPlatform(golden)
|
||||
|
||||
if CAN_GENERATE_GOLDEN_FILE:
|
||||
self.assertEqual(
|
||||
normalized_golden,
|
||||
normalized_actual,
|
||||
'\n'.join(
|
||||
difflib.unified_diff(
|
||||
normalized_golden.split('\n'),
|
||||
normalized_actual.split('\n'),
|
||||
'golden',
|
||||
'actual',
|
||||
)
|
||||
)
|
||||
)
|
||||
else:
|
||||
normalized_actual = NormalizeToCurrentPlatform(
|
||||
RemoveTestCounts(normalized_actual)
|
||||
)
|
||||
normalized_golden = NormalizeToCurrentPlatform(
|
||||
RemoveTestCounts(normalized_golden)
|
||||
)
|
||||
|
||||
# This code is very handy when debugging golden file differences:
|
||||
if os.getenv('DEBUG_GTEST_OUTPUT_TEST'):
|
||||
with open(
|
||||
os.path.join(
|
||||
gtest_test_utils.GetSourceDir(),
|
||||
'_gtest_rgt_output_test_normalized_actual.txt'
|
||||
),
|
||||
'wb') as f:
|
||||
f.write(normalized_actual.encode())
|
||||
with open(
|
||||
os.path.join(
|
||||
gtest_test_utils.GetSourceDir(),
|
||||
'_gtest_rgt_output_test_normalized_golden.txt'
|
||||
),
|
||||
'wb') as f:
|
||||
f.write(normalized_golden.encode())
|
||||
|
||||
self.assertEqual(normalized_golden, normalized_actual)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
if GENGOLDEN_FLAG in sys.argv:
|
||||
if CAN_GENERATE_GOLDEN_FILE:
|
||||
output = GetOutputOfAllCommands()
|
||||
golden_file = open(GOLDEN_PATH, 'wb')
|
||||
golden_file.write(output.encode())
|
||||
golden_file.close()
|
||||
else:
|
||||
message = (
|
||||
"""Unable to write a golden file when compiled in an environment
|
||||
that does not support all the required features (rotten green tests).
|
||||
Please build this test and generate the golden file using Blaze on Linux.""")
|
||||
|
||||
sys.stderr.write(message)
|
||||
sys.exit(1)
|
||||
else:
|
||||
gtest_test_utils.Main()
|
||||
382
googletest/test/gtest_rgt_output_test_.cc
Normal file
382
googletest/test/gtest_rgt_output_test_.cc
Normal file
@ -0,0 +1,382 @@
|
||||
// Copyright (C) 2024 Sony Interactive Entertainment Inc.
|
||||
// All rights reserved.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Sony Interactive Entertainment Inc. nor the
|
||||
// names of its contributors may be used to endorse or promote products
|
||||
// derived from this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
// The purpose of this file is to generate Google Test output for various
|
||||
// "rotten green test" conditions. In a "rotten" test, the test passes, but
|
||||
// at least one test assertion was not executed. The output will then be
|
||||
// verified by gtest_rgt_output_test.py to ensure that Google Test generates
|
||||
// the desired messages. Therefore, many tests in this file are MEANT TO BE
|
||||
// ROTTEN.
|
||||
//
|
||||
// However, NONE ARE MEANT TO FAIL. If the test has an overall failing result,
|
||||
// the helper methods won't have their rotten assertions reported, and that is
|
||||
// something we do want to verify. Instead, googletest-ouput-test shows that
|
||||
// (a) an overall Fail result means that rotten helper methods aren't reported;
|
||||
// (b) failing assertions are not reported as rotten.
|
||||
//
|
||||
// This test shows (c) rotten assertions (and rotten helpers) in a passing test
|
||||
// are reported properly.
|
||||
//
|
||||
// Note: "assertions" here can mean either EXPECT_* or ASSERT_*.
|
||||
//
|
||||
// Modeled on googletest-output-test.
|
||||
|
||||
#include "gtest/gtest.h"
|
||||
#include "src/gtest-internal-inl.h"
|
||||
|
||||
#if GTEST_HAS_RGT
|
||||
|
||||
// Tests that will not be reported as rotten for various reasons.
|
||||
|
||||
TEST(NotRotten, EmptyIsntRotten) { }
|
||||
|
||||
TEST(NotRotten, DISABLED_DisabledIsntRotten) {
|
||||
EXPECT_EQ(0, 1);
|
||||
}
|
||||
|
||||
TEST(NotRotten, SkippedIsntRotten) {
|
||||
if (testing::internal::AlwaysTrue()) {
|
||||
GTEST_SKIP();
|
||||
}
|
||||
EXPECT_EQ(1, 2);
|
||||
}
|
||||
|
||||
TEST(NotRotten, AllPass) {
|
||||
EXPECT_EQ(0, 0);
|
||||
EXPECT_EQ(1, 1);
|
||||
}
|
||||
|
||||
// EXPECT_[NON]FATAL_FAILURE executes an assertion that is supposed to fail.
|
||||
// The bookkeeping for assertions will show the failing one as not executed,
|
||||
// but because it's containined in a Test that passes, we shouldn't report it.
|
||||
|
||||
TEST(NotRotten, ExpectNonfatalFailureIsntRotten) {
|
||||
EXPECT_NONFATAL_FAILURE({ EXPECT_EQ(1, 0) << "Non-fatal"; }, "Non-fatal");
|
||||
}
|
||||
|
||||
TEST(NotRotten, ExpectFatalFailureIsntRotten) {
|
||||
EXPECT_FATAL_FAILURE( { ASSERT_EQ(1, 0) << "Fatal"; }, "Fatal");
|
||||
}
|
||||
|
||||
// ASSERT/EXPECT_NO_FATAL_FAILURE that is executed (pass or fail) isn't rotten.
|
||||
|
||||
void MustPass() { }
|
||||
|
||||
TEST(NotRotten, ExpectNoFatalFailurePasses) {
|
||||
EXPECT_NO_FATAL_FAILURE(MustPass());
|
||||
ASSERT_NO_FATAL_FAILURE(MustPass());
|
||||
}
|
||||
|
||||
// As an unfortunate consequence of how EXPECT_[NON]FATAL_FAILURE is handled,
|
||||
// if one is executed, it disables rotten detection entirely within the
|
||||
// containing Test. It would be nice to fix that someday.
|
||||
TEST(NotRotten, ExpectNonfatalFailureSadlyDisablesRotten) {
|
||||
EXPECT_NONFATAL_FAILURE( { EXPECT_EQ(1, 0) << "Non-fatal"; }, "Non-fatal");
|
||||
if (testing::internal::AlwaysFalse()) {
|
||||
EXPECT_EQ(1, 0);
|
||||
}
|
||||
}
|
||||
|
||||
TEST(NotRotten, ExpectFatalFailureSadlyDisablesRotten) {
|
||||
EXPECT_FATAL_FAILURE( { ASSERT_EQ(1, 0) << "Fatal"; }, "Fatal");
|
||||
if (testing::internal::AlwaysFalse()) {
|
||||
EXPECT_EQ(1, 0);
|
||||
}
|
||||
}
|
||||
|
||||
// Tests that will be reported as rotten.
|
||||
|
||||
TEST(IsRotten, SimpleExpect) {
|
||||
if (testing::internal::AlwaysFalse()) {
|
||||
EXPECT_EQ(0, 0);
|
||||
}
|
||||
}
|
||||
|
||||
TEST(IsRotten, SimpleAssert) {
|
||||
if (testing::internal::AlwaysFalse()) {
|
||||
ASSERT_EQ(0, 0);
|
||||
}
|
||||
}
|
||||
|
||||
TEST(IsRotten, OneIsEnough) {
|
||||
EXPECT_EQ(0, 0);
|
||||
if (testing::internal::AlwaysFalse()) {
|
||||
ASSERT_EQ(1, 1);
|
||||
}
|
||||
}
|
||||
|
||||
// If EXPECT_[NON]FATAL_FAILURE exists, but isn't executed, that is detected.
|
||||
TEST(IsRotten, MissedExpectFailure) {
|
||||
if (testing::internal::AlwaysFalse()) {
|
||||
EXPECT_NONFATAL_FAILURE( { EXPECT_EQ(1, 0) << "Non-fatal"; }, "Non-fatal");
|
||||
EXPECT_FATAL_FAILURE( { ASSERT_EQ(1, 0) << "Fatal"; }, "Fatal");
|
||||
}
|
||||
}
|
||||
|
||||
TEST(IsRotten, ExpectNoFatalFailure) {
|
||||
if (testing::internal::AlwaysFalse()) {
|
||||
EXPECT_NO_FATAL_FAILURE(MustPass());
|
||||
ASSERT_NO_FATAL_FAILURE(MustPass());
|
||||
}
|
||||
}
|
||||
|
||||
void RottenHelperNeverCalled() {
|
||||
EXPECT_EQ(0, 0);
|
||||
}
|
||||
|
||||
// Test that RGT detection works correctly with fixtures.
|
||||
// Just a few sample repeats, not everything.
|
||||
|
||||
class NotRottenFixture : public testing::Test {};
|
||||
class IsRottenFixture : public testing::Test {};
|
||||
|
||||
TEST_F(NotRottenFixture, AllPass) {
|
||||
EXPECT_EQ(0, 0);
|
||||
ASSERT_EQ(1, 1);
|
||||
}
|
||||
|
||||
TEST_F(NotRottenFixture, SkippedIsntRotten) {
|
||||
if (testing::internal::AlwaysTrue()) {
|
||||
GTEST_SKIP();
|
||||
}
|
||||
ASSERT_EQ(0, 1);
|
||||
}
|
||||
|
||||
TEST_F(IsRottenFixture, SingleExpect) {
|
||||
if (testing::internal::AlwaysFalse()) {
|
||||
EXPECT_EQ(0, 0);
|
||||
}
|
||||
}
|
||||
|
||||
// RGT and Parameterized Tests.
|
||||
|
||||
// TEST_P has one broad issue with respect to RGT.
|
||||
//
|
||||
// Rotten test detection relies on having compile-time access to the TestInfo*
|
||||
// for each test. TEST_P doesn't have that. As a consequence, in RGT these
|
||||
// tests behave like helper methods. Lacking any better information, RGT
|
||||
// reporting assumes that all helper assertions should be executed.
|
||||
// Unfortunately this means the usual things that disable rotten detection
|
||||
// (disabled, skipped, EXPECT_[NON]FATAL_FAILURE) are ineffective.
|
||||
// These tests will therefore be prone to false positives.
|
||||
//
|
||||
// However, that behavior is consistent, so we include it in the output test.
|
||||
|
||||
class NotRottenParamTest : public testing::TestWithParam<int> {};
|
||||
|
||||
TEST_P(NotRottenParamTest, Passes) { EXPECT_GE(2, GetParam()); }
|
||||
|
||||
// If the assertions are conditional on the parameter, they aren't all executed
|
||||
// on each iteration. Because we treat these like helpers, we report them at
|
||||
// the end of the test, and if every path was eventually taken, we're fine.
|
||||
// If we support parameterized tests properly in the future, we should continue
|
||||
// not reporting until all iterations have executed, to avoid false positives.
|
||||
|
||||
TEST_P(NotRottenParamTest, ConditionalOnParam) {
|
||||
if (GetParam() == 1) {
|
||||
EXPECT_EQ(1, 1);
|
||||
} else {
|
||||
EXPECT_EQ(2, 2);
|
||||
}
|
||||
}
|
||||
|
||||
INSTANTIATE_TEST_SUITE_P(, NotRottenParamTest, testing::Values(1, 2));
|
||||
|
||||
class IsRottenParamTest : public testing::TestWithParam<int> {};
|
||||
|
||||
TEST_P(IsRottenParamTest, SkippedIsSadlyRotten) {
|
||||
if (testing::internal::AlwaysTrue()) {
|
||||
GTEST_SKIP();
|
||||
}
|
||||
EXPECT_EQ(0, GetParam());
|
||||
}
|
||||
|
||||
TEST_P(IsRottenParamTest, DISABLED_DisabledIsSadlyRotten) {
|
||||
EXPECT_EQ(0, GetParam());
|
||||
}
|
||||
|
||||
TEST_P(IsRottenParamTest, ActuallyRotten) {
|
||||
switch (GetParam()) {
|
||||
case 0:
|
||||
EXPECT_EQ(0, 0);
|
||||
break;
|
||||
case 1:
|
||||
EXPECT_EQ(1, 1);
|
||||
break;
|
||||
default:
|
||||
EXPECT_EQ(2, 2);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
INSTANTIATE_TEST_SUITE_P(, IsRottenParamTest, testing::Values(1, 2));
|
||||
|
||||
// RGT and Typed Tests.
|
||||
|
||||
// TYPED_TEST and TYPED_TEST_P have two broad issues with respect to RGT.
|
||||
//
|
||||
// 1) Rotten test detection relies on having compile-time access to the
|
||||
// TestInfo* for each test. TYPED_TEST[_P] don't have that. As a
|
||||
// consequence, in RGT they behave like helper methods. Lacking any
|
||||
// better information, RGT reporting assumes that all helper assertions
|
||||
// should be executed. Unfortunately this means the usual things that
|
||||
// disable rotten detection (disabled, skipped, EXPECT_[NON]FATAL_FAILURE)
|
||||
// are ineffective.
|
||||
// These tests will therefore be prone to false positives.
|
||||
//
|
||||
// 2) Assertion bookkeeping relies on certain static data being allocated to
|
||||
// particular object-file sections. GCC prior to GCC 14 (and sometimes,
|
||||
// even in GCC 14) does not obey section attributes for static data in a
|
||||
// template (or 'inline') function.
|
||||
// See https://gcc.gnu.org/bugzilla/show_bug.cgi?id=94342.
|
||||
// TYPED_TEST[_P] are implemented as templates, so the assertions info is
|
||||
// effectively lost with older GCC.
|
||||
// These tests will therefore be prone to false negatives.
|
||||
//
|
||||
// The first issue is a problem regardless of the test environment. If that
|
||||
// were the only problem it would be fine to verify the behavior of these
|
||||
// tests. (We do this with TEST_P, which has the same issue.)
|
||||
//
|
||||
// The second issue is specific to GCC prior to GCC 14. Being an environmental
|
||||
// issue (e.g., results with GCC and Clang will differ), we need to handle it
|
||||
// in a way that allows this test to pass for everyone.
|
||||
//
|
||||
// Therefore, we disable RGT output testing for TYPED_TEST and TYPED_TEST_P,
|
||||
// unless GTEST_DEBUG_RGT is enabled. This allows users to optionally exercise
|
||||
// these tests and make sure no Bad Stuff happens, but by default users aren't
|
||||
// affected.
|
||||
|
||||
#if GTEST_DEBUG_RGT
|
||||
|
||||
using TypedTestTypes = testing::Types<char, int>;
|
||||
|
||||
template <typename T>
|
||||
class NotRottenTypedTest : public testing::Test {};
|
||||
TYPED_TEST_SUITE(NotRottenTypedTest, TypedTestTypes);
|
||||
|
||||
TYPED_TEST(NotRottenTypedTest, Passes) { EXPECT_EQ(0, TypeParam()); }
|
||||
|
||||
// If the assertions are conditional on the parameter, they aren't all
|
||||
// executed in each instantiation. Because we treat these like helpers,
|
||||
// and all the instantiations are deduplicated for reporting purposes,
|
||||
// we report at the end of the test, and it's clean. If we support
|
||||
// typed tests properly, we should continue not reporting until all
|
||||
// instantiations have executed, to avoid false positives.
|
||||
|
||||
TYPED_TEST(NotRottenTypedTest, ConditionalOnType) {
|
||||
if (std::is_same<TypeParam, char>::value) {
|
||||
EXPECT_EQ(0, 0);
|
||||
} else {
|
||||
EXPECT_EQ(1, 1);
|
||||
}
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
class IsRottenTypedTest : public testing::Test {};
|
||||
TYPED_TEST_SUITE(IsRottenTypedTest, TypedTestTypes);
|
||||
|
||||
TYPED_TEST(IsRottenTypedTest, SkippedIsSadlyRotten) {
|
||||
assert(gtest_test_info_ == nullptr);
|
||||
if (testing::internal::AlwaysTrue()) {
|
||||
GTEST_SKIP();
|
||||
}
|
||||
EXPECT_EQ(0, 1);
|
||||
}
|
||||
|
||||
TYPED_TEST(IsRottenTypedTest, DISABLED_DisabledIsSadlyRotten) {
|
||||
EXPECT_EQ(0, 1);
|
||||
}
|
||||
|
||||
TYPED_TEST(IsRottenTypedTest, ActuallyRotten) {
|
||||
if (testing::internal::AlwaysFalse()) {
|
||||
EXPECT_EQ(0, 0);
|
||||
}
|
||||
}
|
||||
|
||||
// RGT and Typed Parameterized Tests.
|
||||
|
||||
// See the note above about general issues with TYPED_TEST_P.
|
||||
|
||||
template <typename T>
|
||||
class NotRottenTypeParamTest : public testing::Test {};
|
||||
TYPED_TEST_SUITE_P(NotRottenTypeParamTest);
|
||||
|
||||
TYPED_TEST_P(NotRottenTypeParamTest, Passes) { EXPECT_EQ(0, TypeParam()); }
|
||||
|
||||
// If the assertions are conditional on the parameter, they aren't all
|
||||
// executed in each instantiation. Because we treat these like helpers,
|
||||
// and all the instantiations are deduplicated for reporting purposes,
|
||||
// we report at the end of the test, and it's clean. If we support
|
||||
// typed tests properly, we should continue not reporting until all
|
||||
// instantiations have executed, to avoid false positives.
|
||||
|
||||
TYPED_TEST_P(NotRottenTypeParamTest, ConditionalOnType) {
|
||||
if (std::is_same<TypeParam, char>::value) {
|
||||
EXPECT_EQ(0, 0);
|
||||
} else {
|
||||
EXPECT_EQ(1, 1);
|
||||
}
|
||||
}
|
||||
|
||||
REGISTER_TYPED_TEST_SUITE_P(NotRottenTypeParamTest,
|
||||
Passes, ConditionalOnType);
|
||||
INSTANTIATE_TYPED_TEST_SUITE_P(My, NotRottenTypeParamTest,
|
||||
TypedTestTypes);
|
||||
|
||||
template <typename T>
|
||||
class IsRottenTypeParamTest : public testing::Test {};
|
||||
TYPED_TEST_SUITE_P(IsRottenTypeParamTest);
|
||||
|
||||
TYPED_TEST_P(IsRottenTypeParamTest, SkippedIsSadlyRotten) {
|
||||
if (testing::internal::AlwaysTrue()) {
|
||||
GTEST_SKIP();
|
||||
}
|
||||
EXPECT_EQ(0, 1);
|
||||
}
|
||||
|
||||
TYPED_TEST_P(IsRottenTypeParamTest, DISABLED_DisabledIsSadlyRotten) {
|
||||
EXPECT_EQ(0, 1);
|
||||
}
|
||||
|
||||
TYPED_TEST_P(IsRottenTypeParamTest, ActuallyRotten) {
|
||||
if (testing::internal::AlwaysFalse()) {
|
||||
EXPECT_EQ(0, 0);
|
||||
}
|
||||
}
|
||||
|
||||
REGISTER_TYPED_TEST_SUITE_P(IsRottenTypeParamTest,
|
||||
SkippedIsSadlyRotten,
|
||||
DISABLED_DisabledIsSadlyRotten,
|
||||
ActuallyRotten);
|
||||
INSTANTIATE_TYPED_TEST_SUITE_P(My, IsRottenTypeParamTest,
|
||||
TypedTestTypes);
|
||||
|
||||
#endif // GTEST_DEBUG_RGT
|
||||
|
||||
#endif // GTEST_HAS_RGT
|
||||
256
googletest/test/gtest_rgt_output_test_golden_lin.txt
Normal file
256
googletest/test/gtest_rgt_output_test_golden_lin.txt
Normal file
@ -0,0 +1,256 @@
|
||||
main.cc
|
||||
[0;32m[==========] [mRunning 24 tests from 6 test suites.
|
||||
[0;32m[----------] [mGlobal test environment set-up.
|
||||
[0;32m[----------] [m8 tests from NotRotten
|
||||
[0;32m[ RUN ] [mNotRotten.EmptyIsntRotten
|
||||
[0;32m[ OK ] [mNotRotten.EmptyIsntRotten (? ms)
|
||||
[0;33m[ DISABLED ] [mNotRotten.DISABLED_DisabledIsntRotten
|
||||
[0;32m[ RUN ] [mNotRotten.SkippedIsntRotten
|
||||
gtest_rgt_output_test_.cc:#: Skipped
|
||||
|
||||
|
||||
[0;32m[ SKIPPED ] [mNotRotten.SkippedIsntRotten (? ms)
|
||||
[0;32m[ RUN ] [mNotRotten.AllPass
|
||||
[0;32m[ OK ] [mNotRotten.AllPass (? ms)
|
||||
[0;32m[ RUN ] [mNotRotten.ExpectNonfatalFailureIsntRotten
|
||||
[0;32m[ OK ] [mNotRotten.ExpectNonfatalFailureIsntRotten (? ms)
|
||||
[0;32m[ RUN ] [mNotRotten.ExpectFatalFailureIsntRotten
|
||||
[0;32m[ OK ] [mNotRotten.ExpectFatalFailureIsntRotten (? ms)
|
||||
[0;32m[ RUN ] [mNotRotten.ExpectNoFatalFailurePasses
|
||||
[0;32m[ OK ] [mNotRotten.ExpectNoFatalFailurePasses (? ms)
|
||||
[0;32m[ RUN ] [mNotRotten.ExpectNonfatalFailureSadlyDisablesRotten
|
||||
[0;32m[ OK ] [mNotRotten.ExpectNonfatalFailureSadlyDisablesRotten (? ms)
|
||||
[0;32m[ RUN ] [mNotRotten.ExpectFatalFailureSadlyDisablesRotten
|
||||
[0;32m[ OK ] [mNotRotten.ExpectFatalFailureSadlyDisablesRotten (? ms)
|
||||
[0;32m[----------] [m8 tests from NotRotten (? ms total)
|
||||
|
||||
[0;32m[----------] [m5 tests from IsRotten
|
||||
[0;32m[ RUN ] [mIsRotten.SimpleExpect
|
||||
gtest_rgt_output_test_.cc:117: Rotten
|
||||
|
||||
[0;33m[ ROTTEN ] [mIsRotten.SimpleExpect (? ms)
|
||||
[0;32m[ RUN ] [mIsRotten.SimpleAssert
|
||||
gtest_rgt_output_test_.cc:123: Rotten
|
||||
|
||||
[0;33m[ ROTTEN ] [mIsRotten.SimpleAssert (? ms)
|
||||
[0;32m[ RUN ] [mIsRotten.OneIsEnough
|
||||
gtest_rgt_output_test_.cc:130: Rotten
|
||||
|
||||
[0;33m[ ROTTEN ] [mIsRotten.OneIsEnough (? ms)
|
||||
[0;32m[ RUN ] [mIsRotten.MissedExpectFailure
|
||||
gtest_rgt_output_test_.cc:137: Rotten
|
||||
|
||||
gtest_rgt_output_test_.cc:138: Rotten
|
||||
|
||||
[0;33m[ ROTTEN ] [mIsRotten.MissedExpectFailure (? ms)
|
||||
[0;32m[ RUN ] [mIsRotten.ExpectNoFatalFailure
|
||||
gtest_rgt_output_test_.cc:144: Rotten
|
||||
|
||||
gtest_rgt_output_test_.cc:145: Rotten
|
||||
|
||||
[0;33m[ ROTTEN ] [mIsRotten.ExpectNoFatalFailure (? ms)
|
||||
[0;32m[----------] [m5 tests from IsRotten (? ms total)
|
||||
|
||||
[0;32m[----------] [m2 tests from NotRottenFixture
|
||||
[0;32m[ RUN ] [mNotRottenFixture.AllPass
|
||||
[0;32m[ OK ] [mNotRottenFixture.AllPass (? ms)
|
||||
[0;32m[ RUN ] [mNotRottenFixture.SkippedIsntRotten
|
||||
gtest_rgt_output_test_.cc:#: Skipped
|
||||
|
||||
|
||||
[0;32m[ SKIPPED ] [mNotRottenFixture.SkippedIsntRotten (? ms)
|
||||
[0;32m[----------] [m2 tests from NotRottenFixture (? ms total)
|
||||
|
||||
[0;32m[----------] [m1 test from IsRottenFixture
|
||||
[0;32m[ RUN ] [mIsRottenFixture.SingleExpect
|
||||
gtest_rgt_output_test_.cc:173: Rotten
|
||||
|
||||
[0;33m[ ROTTEN ] [mIsRottenFixture.SingleExpect (? ms)
|
||||
[0;32m[----------] [m1 test from IsRottenFixture (? ms total)
|
||||
|
||||
[0;32m[----------] [m4 tests from NotRottenParamTest
|
||||
[0;32m[ RUN ] [mNotRottenParamTest.Passes/0
|
||||
[0;32m[ OK ] [mNotRottenParamTest.Passes/0 (? ms)
|
||||
[0;32m[ RUN ] [mNotRottenParamTest.Passes/1
|
||||
[0;32m[ OK ] [mNotRottenParamTest.Passes/1 (? ms)
|
||||
[0;32m[ RUN ] [mNotRottenParamTest.ConditionalOnParam/0
|
||||
[0;32m[ OK ] [mNotRottenParamTest.ConditionalOnParam/0 (? ms)
|
||||
[0;32m[ RUN ] [mNotRottenParamTest.ConditionalOnParam/1
|
||||
[0;32m[ OK ] [mNotRottenParamTest.ConditionalOnParam/1 (? ms)
|
||||
[0;32m[----------] [m4 tests from NotRottenParamTest (? ms total)
|
||||
|
||||
[0;32m[----------] [m4 tests from IsRottenParamTest
|
||||
[0;32m[ RUN ] [mIsRottenParamTest.SkippedIsSadlyRotten/0
|
||||
gtest_rgt_output_test_.cc:#: Skipped
|
||||
|
||||
|
||||
[0;32m[ SKIPPED ] [mIsRottenParamTest.SkippedIsSadlyRotten/0 (? ms)
|
||||
[0;32m[ RUN ] [mIsRottenParamTest.SkippedIsSadlyRotten/1
|
||||
gtest_rgt_output_test_.cc:#: Skipped
|
||||
|
||||
|
||||
[0;32m[ SKIPPED ] [mIsRottenParamTest.SkippedIsSadlyRotten/1 (? ms)
|
||||
[0;33m[ DISABLED ] [mIsRottenParamTest.DISABLED_DisabledIsSadlyRotten/0
|
||||
[0;33m[ DISABLED ] [mIsRottenParamTest.DISABLED_DisabledIsSadlyRotten/1
|
||||
[0;32m[ RUN ] [mIsRottenParamTest.ActuallyRotten/0
|
||||
[0;32m[ OK ] [mIsRottenParamTest.ActuallyRotten/0 (? ms)
|
||||
[0;32m[ RUN ] [mIsRottenParamTest.ActuallyRotten/1
|
||||
[0;32m[ OK ] [mIsRottenParamTest.ActuallyRotten/1 (? ms)
|
||||
[0;32m[----------] [m4 tests from IsRottenParamTest (? ms total)
|
||||
|
||||
[0;32m[----------] [mGlobal test environment tear-down
|
||||
gtest_rgt_output_test_.cc:150: Rotten
|
||||
|
||||
gtest_rgt_output_test_.cc:217: Rotten
|
||||
|
||||
gtest_rgt_output_test_.cc:221: Rotten
|
||||
|
||||
gtest_rgt_output_test_.cc:227: Rotten
|
||||
|
||||
[0;32m[==========] [m24 tests from 6 test suites ran. (? ms total)
|
||||
[0;32m[ PASSED ] [m20 tests.
|
||||
[0;32m[ SKIPPED ] [m4 tests, listed below:
|
||||
[0;32m[ SKIPPED ] [mNotRotten.SkippedIsntRotten
|
||||
[0;32m[ SKIPPED ] [mNotRottenFixture.SkippedIsntRotten
|
||||
[0;32m[ SKIPPED ] [mIsRottenParamTest.SkippedIsSadlyRotten/0
|
||||
[0;32m[ SKIPPED ] [mIsRottenParamTest.SkippedIsSadlyRotten/1
|
||||
[0;33m[ ROTTEN ] [m6 tests, listed below:
|
||||
[0;33m[ ROTTEN ] [mIsRotten.SimpleExpect
|
||||
[0;33m[ ROTTEN ] [mIsRotten.SimpleAssert
|
||||
[0;33m[ ROTTEN ] [mIsRotten.OneIsEnough
|
||||
[0;33m[ ROTTEN ] [mIsRotten.MissedExpectFailure
|
||||
[0;33m[ ROTTEN ] [mIsRotten.ExpectNoFatalFailure
|
||||
[0;33m[ ROTTEN ] [mIsRottenFixture.SingleExpect
|
||||
[0;33m YOU HAVE 6 ROTTEN TESTS
|
||||
|
||||
[m[0;33m YOU HAVE 3 DISABLED TESTS
|
||||
|
||||
[mFail
|
||||
main.cc
|
||||
[==========] Running 24 tests from 6 test suites.
|
||||
[----------] Global test environment set-up.
|
||||
[----------] 8 tests from NotRotten
|
||||
[ RUN ] NotRotten.EmptyIsntRotten
|
||||
[ OK ] NotRotten.EmptyIsntRotten (? ms)
|
||||
[ DISABLED ] NotRotten.DISABLED_DisabledIsntRotten
|
||||
[ RUN ] NotRotten.SkippedIsntRotten
|
||||
gtest_rgt_output_test_.cc:#: Skipped
|
||||
|
||||
|
||||
[ SKIPPED ] NotRotten.SkippedIsntRotten (? ms)
|
||||
[ RUN ] NotRotten.AllPass
|
||||
[ OK ] NotRotten.AllPass (? ms)
|
||||
[ RUN ] NotRotten.ExpectNonfatalFailureIsntRotten
|
||||
[ OK ] NotRotten.ExpectNonfatalFailureIsntRotten (? ms)
|
||||
[ RUN ] NotRotten.ExpectFatalFailureIsntRotten
|
||||
[ OK ] NotRotten.ExpectFatalFailureIsntRotten (? ms)
|
||||
[ RUN ] NotRotten.ExpectNoFatalFailurePasses
|
||||
[ OK ] NotRotten.ExpectNoFatalFailurePasses (? ms)
|
||||
[ RUN ] NotRotten.ExpectNonfatalFailureSadlyDisablesRotten
|
||||
[ OK ] NotRotten.ExpectNonfatalFailureSadlyDisablesRotten (? ms)
|
||||
[ RUN ] NotRotten.ExpectFatalFailureSadlyDisablesRotten
|
||||
[ OK ] NotRotten.ExpectFatalFailureSadlyDisablesRotten (? ms)
|
||||
[----------] 8 tests from NotRotten (? ms total)
|
||||
|
||||
[----------] 5 tests from IsRotten
|
||||
[ RUN ] IsRotten.SimpleExpect
|
||||
gtest_rgt_output_test_.cc:117: Rotten
|
||||
|
||||
[ ROTTEN ] IsRotten.SimpleExpect (? ms)
|
||||
[ RUN ] IsRotten.SimpleAssert
|
||||
gtest_rgt_output_test_.cc:123: Rotten
|
||||
|
||||
[ ROTTEN ] IsRotten.SimpleAssert (? ms)
|
||||
[ RUN ] IsRotten.OneIsEnough
|
||||
gtest_rgt_output_test_.cc:130: Rotten
|
||||
|
||||
[ ROTTEN ] IsRotten.OneIsEnough (? ms)
|
||||
[ RUN ] IsRotten.MissedExpectFailure
|
||||
gtest_rgt_output_test_.cc:137: Rotten
|
||||
|
||||
gtest_rgt_output_test_.cc:138: Rotten
|
||||
|
||||
[ ROTTEN ] IsRotten.MissedExpectFailure (? ms)
|
||||
[ RUN ] IsRotten.ExpectNoFatalFailure
|
||||
gtest_rgt_output_test_.cc:144: Rotten
|
||||
|
||||
gtest_rgt_output_test_.cc:145: Rotten
|
||||
|
||||
[ ROTTEN ] IsRotten.ExpectNoFatalFailure (? ms)
|
||||
[----------] 5 tests from IsRotten (? ms total)
|
||||
|
||||
[----------] 2 tests from NotRottenFixture
|
||||
[ RUN ] NotRottenFixture.AllPass
|
||||
[ OK ] NotRottenFixture.AllPass (? ms)
|
||||
[ RUN ] NotRottenFixture.SkippedIsntRotten
|
||||
gtest_rgt_output_test_.cc:#: Skipped
|
||||
|
||||
|
||||
[ SKIPPED ] NotRottenFixture.SkippedIsntRotten (? ms)
|
||||
[----------] 2 tests from NotRottenFixture (? ms total)
|
||||
|
||||
[----------] 1 test from IsRottenFixture
|
||||
[ RUN ] IsRottenFixture.SingleExpect
|
||||
gtest_rgt_output_test_.cc:173: Rotten
|
||||
|
||||
[ ROTTEN ] IsRottenFixture.SingleExpect (? ms)
|
||||
[----------] 1 test from IsRottenFixture (? ms total)
|
||||
|
||||
[----------] 4 tests from NotRottenParamTest
|
||||
[ RUN ] NotRottenParamTest.Passes/0
|
||||
[ OK ] NotRottenParamTest.Passes/0 (? ms)
|
||||
[ RUN ] NotRottenParamTest.Passes/1
|
||||
[ OK ] NotRottenParamTest.Passes/1 (? ms)
|
||||
[ RUN ] NotRottenParamTest.ConditionalOnParam/0
|
||||
[ OK ] NotRottenParamTest.ConditionalOnParam/0 (? ms)
|
||||
[ RUN ] NotRottenParamTest.ConditionalOnParam/1
|
||||
[ OK ] NotRottenParamTest.ConditionalOnParam/1 (? ms)
|
||||
[----------] 4 tests from NotRottenParamTest (? ms total)
|
||||
|
||||
[----------] 4 tests from IsRottenParamTest
|
||||
[ RUN ] IsRottenParamTest.SkippedIsSadlyRotten/0
|
||||
gtest_rgt_output_test_.cc:#: Skipped
|
||||
|
||||
|
||||
[ SKIPPED ] IsRottenParamTest.SkippedIsSadlyRotten/0 (? ms)
|
||||
[ RUN ] IsRottenParamTest.SkippedIsSadlyRotten/1
|
||||
gtest_rgt_output_test_.cc:#: Skipped
|
||||
|
||||
|
||||
[ SKIPPED ] IsRottenParamTest.SkippedIsSadlyRotten/1 (? ms)
|
||||
[ DISABLED ] IsRottenParamTest.DISABLED_DisabledIsSadlyRotten/0
|
||||
[ DISABLED ] IsRottenParamTest.DISABLED_DisabledIsSadlyRotten/1
|
||||
[ RUN ] IsRottenParamTest.ActuallyRotten/0
|
||||
[ OK ] IsRottenParamTest.ActuallyRotten/0 (? ms)
|
||||
[ RUN ] IsRottenParamTest.ActuallyRotten/1
|
||||
[ OK ] IsRottenParamTest.ActuallyRotten/1 (? ms)
|
||||
[----------] 4 tests from IsRottenParamTest (? ms total)
|
||||
|
||||
[----------] Global test environment tear-down
|
||||
gtest_rgt_output_test_.cc:150: Rotten
|
||||
|
||||
gtest_rgt_output_test_.cc:217: Rotten
|
||||
|
||||
gtest_rgt_output_test_.cc:221: Rotten
|
||||
|
||||
gtest_rgt_output_test_.cc:227: Rotten
|
||||
|
||||
[==========] 24 tests from 6 test suites ran. (? ms total)
|
||||
[ PASSED ] 20 tests.
|
||||
[ SKIPPED ] 4 tests, listed below:
|
||||
[ SKIPPED ] NotRotten.SkippedIsntRotten
|
||||
[ SKIPPED ] NotRottenFixture.SkippedIsntRotten
|
||||
[ SKIPPED ] IsRottenParamTest.SkippedIsSadlyRotten/0
|
||||
[ SKIPPED ] IsRottenParamTest.SkippedIsSadlyRotten/1
|
||||
[ ROTTEN ] 6 tests, listed below:
|
||||
[ ROTTEN ] IsRotten.SimpleExpect
|
||||
[ ROTTEN ] IsRotten.SimpleAssert
|
||||
[ ROTTEN ] IsRotten.OneIsEnough
|
||||
[ ROTTEN ] IsRotten.MissedExpectFailure
|
||||
[ ROTTEN ] IsRotten.ExpectNoFatalFailure
|
||||
[ ROTTEN ] IsRottenFixture.SingleExpect
|
||||
YOU HAVE 6 ROTTEN TESTS
|
||||
|
||||
YOU HAVE 3 DISABLED TESTS
|
||||
|
||||
Pass
|
||||
@ -49,7 +49,7 @@ TEST(CommandLineFlagsTest, CanBeAccessedInCodeOnceGTestHIsIncluded) {
|
||||
GTEST_FLAG_GET(show_internal_stack_frames) || GTEST_FLAG_GET(shuffle) ||
|
||||
GTEST_FLAG_GET(stack_trace_depth) > 0 ||
|
||||
GTEST_FLAG_GET(stream_result_to) != "unknown" ||
|
||||
GTEST_FLAG_GET(throw_on_failure);
|
||||
GTEST_FLAG_GET(throw_on_failure) || GTEST_FLAG_GET(treat_rotten_as_pass);
|
||||
EXPECT_TRUE(dummy || !dummy); // Suppresses warning that dummy is unused.
|
||||
}
|
||||
|
||||
@ -7773,3 +7773,12 @@ TEST(PatternGlobbingTest, MatchesFilterEdgeCases) {
|
||||
EXPECT_FALSE(testing::internal::UnitTestOptions::MatchesFilter("a", ""));
|
||||
EXPECT_TRUE(testing::internal::UnitTestOptions::MatchesFilter("", ""));
|
||||
}
|
||||
|
||||
int main(int argc, char** argv) {
|
||||
::testing::InitGoogleTest(&argc, argv);
|
||||
|
||||
// This test has deliberately un-executed assertions in it.
|
||||
GTEST_FLAG_SET(treat_rotten_as_pass, true);
|
||||
|
||||
return RUN_ALL_TESTS();
|
||||
}
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user