def test_missing(self): out, err, return_code = RunTest( [ '--clusters', '10', '--jobs', '1', '--result', self.filename, os.path.join( ROOT_DIR, 'tests', 'gtest_fake', 'gtest_fake_missing.py'), self.tempdirpath, ]) self.assertEqual(1, return_code) expected_out_re = [ r'\[0\/3\] \d\.\d\ds ', r'\[1\/4\] \d\.\d\ds Foo\.Bar1 \(\d+\.\d+s\)', # Only the header is included since the test was not run alone. re.escape('Note: Google Test filter = Foo.Bar3:Foo.Bar2:Foo.Bar1'), '', re.escape('[==========] Running 1 test from 1 test case.'), re.escape('[----------] Global test environment set-up.'), re.escape('[ RUN ] Foo.Bar1'), re.escape('[ FAILED ] Foo.Bar1 (100 ms)'), '', r'\[2/4\] \d\.\d\ds Foo\.Bar3 \(\d+\.\d+s\) *', r'\[3/5\] \d\.\d\ds Foo\.Bar2 \<unknown\> *', r'\[4/5\] \d\.\d\ds Foo\.Bar1 \(\d+\.\d+s\) \- retry \#1', # Both the header and footer is included since the test was run alone. ] + get_whole_test_re('Foo.Bar1', False, '100', 'Foo.Bar1') + [ r'\[5/6\] \d\.\d\ds Foo\.Bar2 \<unknown\> \- retry \#1 *', ] + [ re.escape(l) for l in run_test_cases.running_serial_warning() ] + [ r'\[6/6\] \d\.\d\ds Foo\.Bar2 \<unknown\> \- retry \#2 *', re.escape('Flaky tests:'), re.escape(' Foo.Bar1 (tried 2 times)'), re.escape('Failed tests:'), re.escape(' Foo.Bar2'), re.escape('Summary:'), re.escape(' Success: 1 33.33% ') + r' +\d+\.\d\ds', re.escape(' Flaky: 1 33.33% ') + r' +\d+\.\d\ds', re.escape(' Fail: 1 33.33% ') + r' +\d+\.\d\ds', r' \d+\.\d\ds Done running 3 tests with 6 executions. ' '\d+\.\d\d test/s', ] self._check_results(expected_out_re, out, err) test_cases = [ ('Foo.Bar1', 2), ('Foo.Bar2', 3), ('Foo.Bar3', 1) ] self._check_results_file( fail=[u'Foo.Bar2'], flaky=[u'Foo.Bar1'], missing=[], success=[u'Foo.Bar3'], test_cases=test_cases, duration=True)
def test_missing(self): out, err, return_code = RunTest([ '--clusters', '10', '--jobs', '1', '--result', self.filename, os.path.join(ROOT_DIR, 'tests', 'gtest_fake', 'gtest_fake_missing.py'), self.tempdirpath, ]) self.assertEqual(1, return_code) expected_out_re = [ r'\[0\/3\] \d\.\d\ds ', r'\[1\/4\] \d\.\d\ds Foo\.Bar1 \(\d+\.\d+s\)', # Only the header is included since the test was not run alone. re.escape('Note: Google Test filter = Foo.Bar3:Foo.Bar2:Foo.Bar1'), '', re.escape('[==========] Running 1 test from 1 test case.'), re.escape('[----------] Global test environment set-up.'), re.escape('[ RUN ] Foo.Bar1'), re.escape('[ FAILED ] Foo.Bar1 (100 ms)'), '', r'\[2/4\] \d\.\d\ds Foo\.Bar3 \(\d+\.\d+s\) *', r'\[3/5\] \d\.\d\ds Foo\.Bar2 \<unknown\> *', r'\[4/5\] \d\.\d\ds Foo\.Bar1 \(\d+\.\d+s\) \- retry \#1', # Both the header and footer is included since the test was run alone. ] + get_whole_test_re('Foo.Bar1', False, '100', 'Foo.Bar1') + [ r'\[5/6\] \d\.\d\ds Foo\.Bar2 \<unknown\> \- retry \#1 *', ] + [re.escape(l) for l in run_test_cases.running_serial_warning()] + [ r'\[6/6\] \d\.\d\ds Foo\.Bar2 \<unknown\> \- retry \#2 *', re.escape('Flaky tests:'), re.escape(' Foo.Bar1 (tried 2 times)'), re.escape('Failed tests:'), re.escape(' Foo.Bar2'), re.escape('Summary:'), re.escape(' Success: 1 33.33% ') + r' +\d+\.\d\ds', re.escape(' Flaky: 1 33.33% ') + r' +\d+\.\d\ds', re.escape(' Fail: 1 33.33% ') + r' +\d+\.\d\ds', r' \d+\.\d\ds Done running 3 tests with 6 executions. ' '\d+\.\d\d test/s', ] self._check_results(expected_out_re, out, err) test_cases = [('Foo.Bar1', 2), ('Foo.Bar2', 3), ('Foo.Bar3', 1)] self._check_results_file(fail=[u'Foo.Bar2'], flaky=[u'Foo.Bar1'], missing=[], success=[u'Foo.Bar3'], test_cases=test_cases, duration=True)
def test_simple_fail(self): out, err, return_code = RunTest( [ # Linearize execution. '--clusters', '1', '--jobs', '1', '--result', self.filename, os.path.join(ROOT_DIR, 'tests', 'gtest_fake', 'gtest_fake_fail.py'), ]) self.assertEqual(1, return_code) test_failure_output = get_whole_test_re('Baz.Fail', True, '100', 'Baz.Fail') expected_out_re = [ r'\[0/\d\] \d\.\d\ds ', r'\[1/\d\] \d\.\d\ds .+', r'\[2/\d\] \d\.\d\ds .+', r'\[3/\d\] \d\.\d\ds .+', r'\[4/\d\] \d\.\d\ds .+', ] + test_failure_output + [ # Retries r'\[5/\d\] \d\.\d\ds .+ retry \#1', ] + test_failure_output + [ re.escape(l) for l in run_test_cases.running_serial_warning() ] + [ r'\[6/\d\] \d\.\d\ds .+ retry \#2', ] + test_failure_output + [ re.escape('Failed tests:'), re.escape(' Baz.Fail'), re.escape('Summary:'), re.escape(' Success: 3 75.00%') + r' +\d+\.\d\ds', re.escape(' Flaky: 0 0.00%') + r' +\d+\.\d\ds', re.escape(' Fail: 1 25.00%') + r' +\d+\.\d\ds', r' \d+\.\d\ds Done running 4 tests with 6 executions. \d+\.\d\d test/s', ] self._check_results(expected_out_re, out, err) test_cases = [ ('Foo.Bar1', 1), ('Foo.Bar2', 1), ('Foo.Bar3', 1), ('Baz.Fail', 3) ] self._check_results_file( fail=['Baz.Fail'], flaky=[], missing=[], success=[u'Foo.Bar1', u'Foo.Bar2', u'Foo.Bar3'], test_cases=test_cases, duration=True)
def test_simple_fail(self): out, err, return_code = RunTest([ # Linearize execution. '--clusters', '1', '--jobs', '1', '--result', self.filename, os.path.join(ROOT_DIR, 'tests', 'gtest_fake', 'gtest_fake_fail.py'), ]) self.assertEqual(1, return_code) test_failure_output = get_whole_test_re('Baz.Fail', True, '100', 'Baz.Fail') expected_out_re = [ r'\[0/\d\] \d\.\d\ds ', r'\[1/\d\] \d\.\d\ds .+', r'\[2/\d\] \d\.\d\ds .+', r'\[3/\d\] \d\.\d\ds .+', r'\[4/\d\] \d\.\d\ds .+', ] + test_failure_output + [ # Retries r'\[5/\d\] \d\.\d\ds .+ retry \#1', ] + test_failure_output + [ re.escape(l) for l in run_test_cases.running_serial_warning() ] + [ r'\[6/\d\] \d\.\d\ds .+ retry \#2', ] + test_failure_output + [ re.escape('Failed tests:'), re.escape(' Baz.Fail'), re.escape('Summary:'), re.escape(' Success: 3 75.00%') + r' +\d+\.\d\ds', re.escape(' Flaky: 0 0.00%') + r' +\d+\.\d\ds', re.escape(' Fail: 1 25.00%') + r' +\d+\.\d\ds', r' \d+\.\d\ds Done running 4 tests with 6 executions. \d+\.\d\d test/s', ] self._check_results(expected_out_re, out, err) test_cases = [('Foo.Bar1', 1), ('Foo.Bar2', 1), ('Foo.Bar3', 1), ('Baz.Fail', 3)] self._check_results_file( fail=['Baz.Fail'], flaky=[], missing=[], success=[u'Foo.Bar1', u'Foo.Bar2', u'Foo.Bar3'], test_cases=test_cases, duration=True)
def test_confused_pass(self): # The test case reports that it passed but the process exit code is 1. out, err, return_code = RunTest( [ '--result', self.filename, os.path.join( ROOT_DIR, 'tests', 'gtest_fake', 'gtest_fake_confused_pass.py'), ]) self.assertEqual(1, return_code) expected_out_re = [ r'\[0\/1\] \d\.\d\ds ', r'\[1\/1\] \d\.\d\ds Foo\.Bar1 \(\d+\.\d\ds\)', # TODO(maruel): Why 2 empty lines are stripped off. ] + get_whole_test_re('Foo.Bar1', False, '100', 'Foo.Bar1')[:-2] + [ r'\[2\/2\] \d\.\d\ds Foo\.Bar1 \(\d+\.\d\ds\) - retry #1', ] + get_whole_test_re('Foo.Bar1', False, '100', 'Foo.Bar1') + [ re.escape(l) for l in run_test_cases.running_serial_warning() ] + [ r'\[3\/3\] \d\.\d\ds Foo\.Bar1 \(\d+\.\d\ds\) - retry #2', ] + get_whole_test_re('Foo.Bar1', False, '100', 'Foo.Bar1') + [ re.escape('Failed tests:'), re.escape(' Foo.Bar1'), re.escape('Summary:'), re.escape(' Success: 0 0.00% 0.00s'), re.escape(' Flaky: 0 0.00% 0.00s'), re.escape(' Fail: 1 100.00% ') + r' +\d+\.\d\ds', r' \d+\.\d\ds Done running 1 tests with 3 executions. \d+\.\d\d test/s', ] self._check_results(expected_out_re, out, err) test_cases = [ ('Foo.Bar1', 3), ] self._check_results_file( fail=[u'Foo.Bar1'], flaky=[], missing=[], success=[], test_cases=test_cases, duration=True)
def test_confused_pass(self): # The test case reports that it passed but the process exit code is 1. out, err, return_code = RunTest([ '--result', self.filename, os.path.join(ROOT_DIR, 'tests', 'gtest_fake', 'gtest_fake_confused_pass.py'), ]) self.assertEqual(1, return_code) expected_out_re = [ r'\[0\/1\] \d\.\d\ds ', r'\[1\/1\] \d\.\d\ds Foo\.Bar1 \(\d+\.\d\ds\)', # TODO(maruel): Why 2 empty lines are stripped off. ] + get_whole_test_re('Foo.Bar1', False, '100', 'Foo.Bar1')[:-2] + [ r'\[2\/2\] \d\.\d\ds Foo\.Bar1 \(\d+\.\d\ds\) - retry #1', ] + get_whole_test_re('Foo.Bar1', False, '100', 'Foo.Bar1') + [ re.escape(l) for l in run_test_cases.running_serial_warning() ] + [ r'\[3\/3\] \d\.\d\ds Foo\.Bar1 \(\d+\.\d\ds\) - retry #2', ] + get_whole_test_re('Foo.Bar1', False, '100', 'Foo.Bar1') + [ re.escape('Failed tests:'), re.escape(' Foo.Bar1'), re.escape('Summary:'), re.escape(' Success: 0 0.00% 0.00s'), re.escape(' Flaky: 0 0.00% 0.00s'), re.escape(' Fail: 1 100.00% ') + r' +\d+\.\d\ds', r' \d+\.\d\ds Done running 1 tests with 3 executions. \d+\.\d\d test/s', ] self._check_results(expected_out_re, out, err) test_cases = [ ('Foo.Bar1', 3), ] self._check_results_file(fail=[u'Foo.Bar1'], flaky=[], missing=[], success=[], test_cases=test_cases, duration=True)
def test_simple_fail_verbose(self): # We take verbosity seriously so test it. out, err, return_code = RunTest( [ # Linearize execution. '--clusters', '1', '--jobs', '1', '--verbose', '--result', self.filename, os.path.join(ROOT_DIR, 'tests', 'gtest_fake', 'gtest_fake_fail.py'), ]) self.assertEqual(1, return_code) expected_out_re = [ r'\[0/\d\] \d\.\d\ds ' ] test_cases = ( 'Foo.Bar3', 'Foo.Bar1', 'Foo.Bar2', 'Baz.Fail', 'Baz.Fail', 'Baz.Fail', ) for index, name in enumerate(test_cases): if index + 1 == len(test_cases): # We are about to retry the test serially, so check for the warning. expected_out_re.extend( re.escape(l) for l in run_test_cases.running_serial_warning()) expected_out_re.append( r'\[%d/\d\] \d\.\d\ds ' % (index + 1) + re.escape(name) + ' .+') expected_out_re.extend( get_whole_test_re(name, 'Fail' in name, '100', name)) expected_out_re.extend([ re.escape('Failed tests:'), re.escape(' Baz.Fail'), re.escape('Summary:'), re.escape(' Success: 3 75.00%') + r' +\d+\.\d\ds', re.escape(' Flaky: 0 0.00%') + r' +\d+\.\d\ds', re.escape(' Fail: 1 25.00%') + r' +\d+\.\d\ds', r' \d+\.\d\ds Done running 4 tests with 6 executions. \d+\.\d\d test/s', ]) self._check_results(expected_out_re, out, '') # Test 'err' manually. self.assertTrue( re.match( r'INFO run_test_cases\(\d+\)\: Found 4 test cases in \S+ ' r'\S+gtest_fake_fail.py', err.strip()), err) test_cases = [ ('Foo.Bar1', 1), ('Foo.Bar2', 1), ('Foo.Bar3', 1), ('Baz.Fail', 3) ] self._check_results_file( fail=['Baz.Fail'], flaky=[], missing=[], success=[u'Foo.Bar1', u'Foo.Bar2', u'Foo.Bar3'], test_cases=test_cases, duration=True)
def test_simple_fail_verbose(self): # We take verbosity seriously so test it. out, err, return_code = RunTest([ # Linearize execution. '--clusters', '1', '--jobs', '1', '--verbose', '--result', self.filename, os.path.join(ROOT_DIR, 'tests', 'gtest_fake', 'gtest_fake_fail.py'), ]) self.assertEqual(1, return_code) expected_out_re = [r'\[0/\d\] \d\.\d\ds '] test_cases = ( 'Foo.Bar3', 'Foo.Bar1', 'Foo.Bar2', 'Baz.Fail', 'Baz.Fail', 'Baz.Fail', ) for index, name in enumerate(test_cases): if index + 1 == len(test_cases): # We are about to retry the test serially, so check for the warning. expected_out_re.extend( re.escape(l) for l in run_test_cases.running_serial_warning()) expected_out_re.append(r'\[%d/\d\] \d\.\d\ds ' % (index + 1) + re.escape(name) + ' .+') expected_out_re.extend( get_whole_test_re(name, 'Fail' in name, '100', name)) expected_out_re.extend([ re.escape('Failed tests:'), re.escape(' Baz.Fail'), re.escape('Summary:'), re.escape(' Success: 3 75.00%') + r' +\d+\.\d\ds', re.escape(' Flaky: 0 0.00%') + r' +\d+\.\d\ds', re.escape(' Fail: 1 25.00%') + r' +\d+\.\d\ds', r' \d+\.\d\ds Done running 4 tests with 6 executions. \d+\.\d\d test/s', ]) self._check_results(expected_out_re, out, '') # Test 'err' manually. self.assertTrue( re.match( r'INFO run_test_cases\(\d+\)\: Found 4 test cases in \S+ ' r'\S+gtest_fake_fail.py', err.strip()), err) test_cases = [('Foo.Bar1', 1), ('Foo.Bar2', 1), ('Foo.Bar3', 1), ('Baz.Fail', 3)] self._check_results_file( fail=['Baz.Fail'], flaky=[], missing=[], success=[u'Foo.Bar1', u'Foo.Bar2', u'Foo.Bar3'], test_cases=test_cases, duration=True)