Beispiel #1
0
 def setUp(self):
     super(ResultCollectionTest, self).setUp()
     self.full_collection = ResultCollection(test_results=[
         PASSED_RESULT, FAILED_RESULT, FAILED_RESULT_DUPLICATE,
         DISABLED_RESULT, UNEXPECTED_SKIPPED_RESULT, CRASHED_RESULT,
         FLAKY_PASS_RESULT, FLAKY_FAIL_RESULT, ABORTED_RESULT
     ])
Beispiel #2
0
 def test_add_name_prefix_to_tests(self):
     """Tests add_name_prefix_to_tests."""
     passed = copy.copy(PASSED_RESULT)
     disabeld = copy.copy(DISABLED_RESULT)
     collection = ResultCollection(test_results=[passed, disabeld])
     some_prefix = 'Some/prefix'
     collection.add_name_prefix_to_tests(some_prefix)
     for test_result in collection.test_results:
         self.assertTrue(test_result.name.startswith(some_prefix))
Beispiel #3
0
    def __init__(self):
        # Test results from the parser.
        self._result_collection = ResultCollection()

        # State tracking for log parsing
        self.completed = False
        self._current_test = ''
        self._failure_description = []
        self._current_report_hash = ''
        self._current_report = []
        self._parsing_failures = False

        # Line number currently being processed.
        self._line_number = 0

        # List of parsing errors, as human-readable strings.
        self._internal_error_lines = []

        # Tests are stored here as 'test.name': (status, [description]).
        # The status should be one of ('started', 'OK', 'failed', 'timeout',
        # 'warning'). Warning indicates that a test did not pass when run in
        # parallel with other tests but passed when run alone. The description is
        # a list of lines detailing the test's error, as reported in the log.
        self._test_status = {}

        # This may be either text or a number. It will be used in the phrase
        # '%s disabled' or '%s flaky' on the waterfall display.
        self._disabled_tests = 0
        self._flaky_tests = 0

        test_name_regexp = r'\-\[(\w+)\s(\w+)\]'
        self._test_name = re.compile(test_name_regexp)
        self._test_start = re.compile(r'Test Case \'' + test_name_regexp +
                                      '\' started\.')
        self._test_ok = re.compile(r'Test Case \'' + test_name_regexp +
                                   '\' passed\s+\(\d+\.\d+\s+seconds\)?.')
        self._test_fail = re.compile(r'Test Case \'' + test_name_regexp +
                                     '\' failed\s+\(\d+\.\d+\s+seconds\)?.')
        self._test_execute_succeeded = re.compile(
            r'\*\*\s+TEST\s+EXECUTE\s+SUCCEEDED\s+\*\*')
        self._test_execute_failed = re.compile(
            r'\*\*\s+TEST\s+EXECUTE\s+FAILED\s+\*\*')
        self._retry_message = re.compile('RETRYING FAILED TESTS:')
        self.retrying_failed = False

        self._system_alert_present_message = re.compile(
            r'\bSystem alert view is present, so skipping all tests\b')
        self.system_alert_present = False

        self.TEST_STATUS_MAP = {
            'OK': TEST_SUCCESS_LABEL,
            'failed': TEST_FAILURE_LABEL,
            'timeout': TEST_TIMEOUT_LABEL,
            'warning': TEST_WARNING_LABEL
        }
Beispiel #4
0
 def test_add_and_report_test_names_status(self, mock_sink_init,
                                           mock_sink_close, mock_report):
     """Tests add_test_names_status."""
     test_names = ['test1', 'test2', 'test3']
     collection = ResultCollection(test_results=[PASSED_RESULT])
     collection.add_and_report_test_names_status(test_names,
                                                 TestStatus.SKIP)
     self.assertEqual(collection.test_results[0], PASSED_RESULT)
     unexpected_skipped = collection.tests_by_expression(
         lambda t: not t.expected() and t.status == TestStatus.SKIP)
     self.assertEqual(unexpected_skipped, set(['test1', 'test2', 'test3']))
     self.assertEqual(1, len(mock_sink_init.mock_calls))
     self.assertEqual(3, len(mock_report.mock_calls))
     self.assertEqual(1, len(mock_sink_close.mock_calls))
Beispiel #5
0
 def test_unexpected_skipped_not_reported(self, mock_result):
     """Unexpected skip not reported for these selecting tests at runtime."""
     crashed_collection = ResultCollection(
         test_results=[TestResult('Class1/passedTest1', TestStatus.PASS)])
     crashed_collection.crashed = True
     mock_result.return_value = crashed_collection
     tr = xcodebuild_runner.DeviceXcodeTestRunner(_FLAKY_EGTEST_APP_PATH,
                                                  "fake-host-app-path",
                                                  "fake-out-dir")
     self.assertFalse(tr.launch())
     self.assertEqual(len(tr.test_results['tests']), 2)
     tests = tr.test_results['tests']
     self.assertEqual(tests['BUILD_INTERRUPTED']['actual'], 'CRASH')
     self.assertEqual(tests['Class1/passedTest1']['actual'], 'PASS')
Beispiel #6
0
    def test_add_result_collection_default(self):
        """Tests add_result_collection default (merge crash info)."""
        collection = ResultCollection(test_results=[FAILED_RESULT])
        self.assertFalse(collection.crashed)
        collection.append_crash_message('Crash1')

        crashed_collection = ResultCollection(test_results=[PASSED_RESULT],
                                              crashed=True)
        crashed_collection.append_crash_message('Crash2')

        collection.add_result_collection(crashed_collection)
        self.assertTrue(collection.crashed)
        self.assertEqual(collection.crash_message, 'Crash1\nCrash2')
        self.assertEqual(collection.test_results,
                         [FAILED_RESULT, PASSED_RESULT])
Beispiel #7
0
 def test_launch_command_not_restart_crashed_attempt(
         self, mock_collect_results):
     """Crashed first attempt of runtime select test suite won't be retried."""
     egtests = test_apps.EgtestsApp(_FLAKY_EGTEST_APP_PATH)
     crashed_collection = ResultCollection()
     crashed_collection.crashed = True
     mock_collect_results.return_value = crashed_collection
     launch_command = xcodebuild_runner.LaunchCommand(egtests,
                                                      _DESTINATION,
                                                      shards=1,
                                                      retries=3)
     overall_result = launch_command.launch()
     self.assertEqual(len(overall_result.all_test_names()), 0)
     self.assertEqual(overall_result.expected_tests(), set([]))
     self.assertTrue(overall_result.crashed)
Beispiel #8
0
 def test_unexpected_skipped_crash_reported(self, mock_result):
     """Tests launch method in DeviceXcodeTestRunner"""
     tr = xcodebuild_runner.DeviceXcodeTestRunner("fake-app-path",
                                                  "fake-host-app-path",
                                                  "fake-out-dir")
     crashed_collection = ResultCollection(
         test_results=[TestResult('Class1/passedTest1', TestStatus.PASS)])
     crashed_collection.crashed = True
     mock_result.return_value = crashed_collection
     self.assertFalse(tr.launch())
     self.assertEqual(len(tr.test_results['tests']), 3)
     tests = tr.test_results['tests']
     self.assertEqual(tests['BUILD_INTERRUPTED']['actual'], 'CRASH')
     self.assertEqual(tests['Class1/passedTest1']['actual'], 'PASS')
     self.assertEqual(tests['Class1/passedTest2']['actual'], 'SKIP')
     self.assertEqual(tests['Class1/passedTest2']['expected'], 'PASS')
Beispiel #9
0
 def test_launch(self, mock_result):
     """Tests launch method in DeviceXcodeTestRunner"""
     tr = xcodebuild_runner.DeviceXcodeTestRunner("fake-app-path",
                                                  "fake-host-app-path",
                                                  "fake-out-dir")
     mock_result.return_value = ResultCollection(test_results=[
         TestResult('Class1/passedTest1', TestStatus.PASS),
         TestResult('Class1/passedTest2', TestStatus.PASS)
     ])
     self.assertTrue(tr.launch())
     self.assertEqual(len(tr.test_results['tests']), 2)
Beispiel #10
0
 def testLaunchCommand_restartCrashed1stAttempt(self, mock_collect_results):
     egtests = test_apps.EgtestsApp(_EGTESTS_APP_PATH)
     crashed_collection = ResultCollection()
     crashed_collection.crashed = True
     mock_collect_results.side_effect = [
         crashed_collection,
         ResultCollection(test_results=[
             TestResult('Class1/passedTest1', TestStatus.PASS),
             TestResult('Class1/passedTest2', TestStatus.PASS)
         ])
     ]
     launch_command = xcodebuild_runner.LaunchCommand(egtests,
                                                      _DESTINATION,
                                                      shards=1,
                                                      retries=3)
     overall_result = launch_command.launch()
     self.assertFalse(overall_result.crashed)
     self.assertEqual(len(overall_result.all_test_names()), 2)
     self.assertEqual(overall_result.expected_tests(),
                      set(['Class1/passedTest1', 'Class1/passedTest2']))
Beispiel #11
0
 def test_init(self):
     """Tests class initialization."""
     collection = ResultCollection(test_results=[
         PASSED_RESULT, DISABLED_RESULT, UNEXPECTED_SKIPPED_RESULT
     ],
                                   crashed=True)
     self.assertTrue(collection.crashed)
     self.assertEqual(collection.crash_message, '')
     self.assertEqual(
         collection.test_results,
         [PASSED_RESULT, DISABLED_RESULT, UNEXPECTED_SKIPPED_RESULT])
Beispiel #12
0
 def test_validate_kwargs(self):
     """Tests _validate_kwargs."""
     with self.assertRaises(AssertionError) as context:
         TestResult('name', TestStatus.PASS, unknown='foo')
     expected_message = (
         'Invalid keyword argument(s) in set([\'unknown\']) passed in!')
     self.assertTrue(expected_message in str(context.exception))
     with self.assertRaises(AssertionError) as context:
         ResultCollection(test_log='foo')
     expected_message = (
         'Invalid keyword argument(s) in set([\'test_log\']) passed in!')
     self.assertTrue(expected_message in str(context.exception))
Beispiel #13
0
    def _test_status_summary(summary_plist):
        """Gets status summary from TestSummaries.plist.

    Args:
      summary_plist: (str) A path to plist-file.

    Returns:
      test_results.ResultCollection: Results of tests parsed.
    """
        result = ResultCollection()
        root_summary = plistlib.readPlist(summary_plist)
        for summary in root_summary['TestableSummaries']:
            if not summary['Tests']:
                continue
            for test_suite in summary['Tests'][0]['Subtests'][0]['Subtests']:
                for test in test_suite['Subtests']:
                    if test['TestStatus'] == 'Success':
                        result.add_test_result(
                            TestResult(test['TestIdentifier'],
                                       TestStatus.PASS))
                    else:
                        message = ''
                        for failure_summary in test['FailureSummaries']:
                            failure_message = failure_summary['FileName']
                            if failure_summary['LineNumber']:
                                failure_message = '%s: line %s' % (
                                    failure_message,
                                    failure_summary['LineNumber'])
                            message += failure_message + '\n'
                            message += failure_summary['Message'] + '\n'
                        result.add_test_result(
                            TestResult(test['TestIdentifier'],
                                       TestStatus.FAIL,
                                       test_log=message))
        return result
Beispiel #14
0
    def test_add_result_collection_ignore(self):
        """Tests add_result_collection overwrite."""
        collection = ResultCollection(test_results=[FAILED_RESULT])
        self.assertFalse(collection.crashed)

        crashed_collection = ResultCollection(test_results=[PASSED_RESULT],
                                              crashed=True)
        crashed_collection.append_crash_message('Crash2')

        collection.add_result_collection(crashed_collection, ignore_crash=True)
        self.assertFalse(collection.crashed)
        self.assertEqual(collection.crash_message, '')
        self.assertEqual(collection.test_results,
                         [FAILED_RESULT, PASSED_RESULT])
Beispiel #15
0
    def collect_test_results(output_folder, output):
        """Gets XCtest result data from Info.plist and copies artifacts.

    Args:
      output_folder: (str) A path to output folder.
      output: [str] An output of test run.
    Returns:
      test_result.ResultCollection representing all test results.
    """
        output_folder = _sanitize_str(output_folder)
        output = _sanitize_str_list(output)
        overall_collected_result = ResultCollection()
        plist_path = os.path.join(output_folder, 'Info.plist')
        if not os.path.exists(plist_path):
            overall_collected_result.crashed = True
            overall_collected_result.crash_message += (
                '%s with test results does not exist.\n' % plist_path +
                '\n'.join(output))
            overall_collected_result.add_result_collection(
                parse_passed_failed_tests_for_interrupted_run(output))
            return overall_collected_result

        root = plistlib.readPlist(plist_path)

        for action in root['Actions']:
            action_result = action['ActionResult']
            if ((root['TestsCount'] == 0 and root['TestsFailedCount'] == 0)
                    or 'TestSummaryPath' not in action_result):
                overall_collected_result.crashed = True
                if ('ErrorSummaries' in action_result
                        and action_result['ErrorSummaries']):
                    overall_collected_result.crash_message = '\n'.join(
                        _sanitize_str_list([
                            error_summary['Message'] for error_summary in
                            action_result['ErrorSummaries']
                        ]))

            else:
                summary_plist = os.path.join(os.path.dirname(plist_path),
                                             action_result['TestSummaryPath'])
                overall_collected_result.add_result_collection(
                    XcodeLogParser._test_status_summary(summary_plist))

        XcodeLogParser._copy_screenshots(output_folder)
        return overall_collected_result
Beispiel #16
0
 def test_add_test_names_status(self):
     """Tests add_test_names_status."""
     test_names = ['test1', 'test2', 'test3']
     collection = ResultCollection(test_results=[PASSED_RESULT])
     collection.add_test_names_status(test_names, TestStatus.SKIP)
     disabled_test_names = ['test4', 'test5', 'test6']
     collection.add_test_names_status(disabled_test_names,
                                      TestStatus.SKIP,
                                      expected_status=TestStatus.SKIP)
     self.assertEqual(collection.test_results[0], PASSED_RESULT)
     unexpected_skipped = collection.tests_by_expression(
         lambda t: not t.expected() and t.status == TestStatus.SKIP)
     self.assertEqual(unexpected_skipped, set(['test1', 'test2', 'test3']))
     self.assertEqual(collection.disabled_tests(),
                      set(['test4', 'test5', 'test6']))
Beispiel #17
0
 def testappend_crash_message(self):
     """Tests append_crash_message."""
     collection = ResultCollection(test_results=[PASSED_RESULT])
     collection.append_crash_message('Crash message 1.')
     self.assertEqual(collection.crash_message, 'Crash message 1.')
     collection.append_crash_message('Crash message 2.')
     self.assertEqual(collection.crash_message,
                      'Crash message 1.\nCrash message 2.')
Beispiel #18
0
    def _list_of_failed_tests(actions_invocation_record, excluded=None):
        """Gets failed tests from xcresult root data.

    ActionsInvocationRecord is an object that contains properties:
      + metadataRef: id of the record that can be get as
        `xcresult get --path xcresult --id metadataRef`
      + metrics: number of run and failed tests.
      + issues: contains TestFailureIssueSummary in case of failure otherwise
        it contains just declaration of `issues` node.
      + actions: a list of ActionRecord.

    Args:
      actions_invocation_record: An output of `xcresult get --path xcresult`.
      excluded: A set of tests that will be excluded.

    Returns:
      test_results.ResultCollection: Results of failed tests.
    """
        excluded = excluded or set()
        result = ResultCollection()
        if 'testFailureSummaries' not in actions_invocation_record['issues']:
            return result
        for failure_summary in actions_invocation_record['issues'][
                'testFailureSummaries']['_values']:
            test_case_id = format_test_case(
                failure_summary['testCaseName']['_value'])
            if test_case_id in excluded:
                continue
            error_line = _sanitize_str(
                failure_summary['documentLocationInCreatingWorkspace']['url']
                ['_value'])
            fail_message = error_line + '\n' + _sanitize_str(
                failure_summary['message']['_value'])
            result.add_test_result(
                TestResult(test_case_id,
                           TestStatus.FAIL,
                           test_log=fail_message))
        return result
Beispiel #19
0
 def testLaunchCommand_notRestartPassedTest(self, mock_collect_results):
     egtests = test_apps.EgtestsApp(_EGTESTS_APP_PATH)
     collection = ResultCollection(test_results=[
         TestResult('Class1/passedTest1', TestStatus.PASS),
         TestResult('Class1/passedTest2', TestStatus.PASS)
     ])
     mock_collect_results.side_effect = [collection]
     launch_command = xcodebuild_runner.LaunchCommand(egtests,
                                                      _DESTINATION,
                                                      shards=1,
                                                      retries=3)
     launch_command.launch()
     xcodebuild_runner.LaunchCommand(egtests,
                                     _DESTINATION,
                                     shards=1,
                                     retries=3)
     self.assertEqual(1, len(mock_collect_results.mock_calls))
    def launch(self):
        """Entrance to launch tests in this runner."""
        success, log = self._launch_variations_smoke_test()

        test_status = TestStatus.PASS if success else TestStatus.FAIL
        # Report a single test named |VariationsSmokeTest| as part of runner output.
        overall_result = ResultCollection(test_results=[
            TestResult('VariationsSmokeTest', test_status, test_log=log)
        ])
        overall_result.report_to_result_sink()
        self.test_results = overall_result.standard_json_output(
            path_delimiter='/')
        self.logs.update(overall_result.test_runner_logs())
        self.tear_down()

        return success
Beispiel #21
0
 def test_disabled_reported(self, mock_test_app, mock_result, _):
     """Tests launch method in DeviceXcodeTestRunner"""
     test_app = mock_test_app.return_value
     test_app.test_app_path = _EGTESTS_APP_PATH
     test_app.disabled_tests = ['Class2/disabled_test3']
     test_app.get_all_tests.return_value = [
         'Class1/passedTest1', 'Class1/passedTest2'
     ]
     mock_result.return_value = ResultCollection(test_results=[
         TestResult('Class1/passedTest1', TestStatus.PASS),
         TestResult('Class1/passedTest2', TestStatus.PASS)
     ])
     tr = xcodebuild_runner.DeviceXcodeTestRunner("fake-app-path",
                                                  "fake-host-app-path",
                                                  "fake-out-dir")
     self.assertTrue(tr.launch())
     self.assertEqual(len(tr.test_results['tests']), 3)
     tests = tr.test_results['tests']
     self.assertEqual(tests['Class1/passedTest1']['actual'], 'PASS')
     self.assertEqual(tests['Class1/passedTest2']['actual'], 'PASS')
     self.assertEqual(tests['Class2/disabled_test3']['actual'], 'SKIP')
     self.assertEqual(tests['Class2/disabled_test3']['expected'], 'SKIP')
Beispiel #22
0
def parse_passed_failed_tests_for_interrupted_run(output):
    """Parses xcode runner output to get passed & failed tests.

  Args:
    output: [str] An output of test run.

  Returns:
    test_result_util.ResultCollection: Results of tests parsed.
  """
    result = ResultCollection()
    passed_tests = []
    failed_tests = []
    # Test has format:
    # [09:04:42:INFO] Test case '-[Test_class test_method]' passed.
    # [09:04:42:INFO] Test Case '-[Test_class test_method]' failed.
    passed_test_regex = re.compile(
        r'Test [Cc]ase \'\-\[(.+?)\s(.+?)\]\' passed')
    failed_test_regex = re.compile(
        r'Test [Cc]ase \'\-\[(.+?)\s(.+?)\]\' failed')

    def _find_list_of_tests(tests, regex):
        """Adds test names matched by regex to result list."""
        for test_line in output:
            m_test = regex.search(test_line)
            if m_test:
                tests.append('%s/%s' % (m_test.group(1), m_test.group(2)))

    _find_list_of_tests(passed_tests, passed_test_regex)
    _find_list_of_tests(failed_tests, failed_test_regex)
    result.add_test_names_status(passed_tests, TestStatus.PASS)
    result.add_test_names_status(
        failed_tests,
        TestStatus.FAIL,
        test_log='Test failed in interrupted(timedout) run.')

    LOGGER.info('%d passed tests for interrupted build.' % len(passed_tests))
    LOGGER.info('%d failed tests for interrupted build.' % len(failed_tests))
    return result
Beispiel #23
0
    def __init__(self):
        # Test results from the parser.
        self._result_collection = ResultCollection()

        # State tracking for log parsing
        self.completed = False
        self._current_test = ''
        self._failure_description = []
        self._parsing_failures = False

        # Line number currently being processed.
        self._line_number = 0

        # List of parsing errors, as human-readable strings.
        self._internal_error_lines = []

        # Tests are stored here as 'test.name': (status, [description]).
        # The status should be one of ('started', 'OK', 'failed', 'timeout',
        # 'warning'). Warning indicates that a test did not pass when run in
        # parallel with other tests but passed when run alone. The description is
        # a list of lines detailing the test's error, as reported in the log.
        self._test_status = {}

        # This may be either text or a number. It will be used in the phrase
        # '%s disabled' or '%s flaky' on the waterfall display.
        self._disabled_tests = 0

        # Disabled tests by parsing the compiled tests json file output from GTest.
        self._disabled_tests_from_compiled_tests_file = []
        self._flaky_tests = 0

        # Regular expressions for parsing GTest logs. Test names look like
        # "x.y", with 0 or more "w/" prefixes and 0 or more "/z" suffixes.
        # e.g.:
        #   SomeName/SomeTestCase.SomeTest/1
        #   SomeName/SomeTestCase/1.SomeTest
        #   SomeName/SomeTestCase/1.SomeTest/SomeModifider
        test_name_regexp = r'((\w+/)*\w+\.\w+(/\w+)*)'

        self._master_name_re = re.compile(r'\[Running for master: "([^"]*)"')
        self.master_name = ''

        self._test_name = re.compile(test_name_regexp)
        self._test_start = re.compile(r'\[\s+RUN\s+\] ' + test_name_regexp)
        self._test_ok = re.compile(r'\[\s+OK\s+\] ' + test_name_regexp)
        self._test_fail = re.compile(r'\[\s+FAILED\s+\] ' + test_name_regexp)
        self._test_passed = re.compile(r'\[\s+PASSED\s+\] \d+ tests?.')
        self._test_skipped = re.compile(r'\[\s+SKIPPED\s+\] ' +
                                        test_name_regexp)
        self._run_test_cases_line = re.compile(
            r'\[\s*\d+\/\d+\]\s+[0-9\.]+s ' + test_name_regexp + ' .+')
        self._test_timeout = re.compile(
            r'Test timeout \([0-9]+ ms\) exceeded for ' + test_name_regexp)
        self._disabled = re.compile(r'\s*YOU HAVE (\d+) DISABLED TEST')
        self._flaky = re.compile(r'\s*YOU HAVE (\d+) FLAKY TEST')

        self._retry_message = re.compile('RETRYING FAILED TESTS:')
        self.retrying_failed = False

        self._compiled_tests_file_path = re.compile(
            '.*Wrote compiled tests to file: (\S+)')

        self.TEST_STATUS_MAP = {
            'OK': TEST_SUCCESS_LABEL,
            'failed': TEST_FAILURE_LABEL,
            'skipped': TEST_SKIPPED_LABEL,
            'timeout': TEST_TIMEOUT_LABEL,
            'warning': TEST_WARNING_LABEL
        }
Beispiel #24
0
class GTestLogParser(object):
    """This helper class process GTest test output."""
    def __init__(self):
        # Test results from the parser.
        self._result_collection = ResultCollection()

        # State tracking for log parsing
        self.completed = False
        self._current_test = ''
        self._failure_description = []
        self._parsing_failures = False

        # Line number currently being processed.
        self._line_number = 0

        # List of parsing errors, as human-readable strings.
        self._internal_error_lines = []

        # Tests are stored here as 'test.name': (status, [description]).
        # The status should be one of ('started', 'OK', 'failed', 'timeout',
        # 'warning'). Warning indicates that a test did not pass when run in
        # parallel with other tests but passed when run alone. The description is
        # a list of lines detailing the test's error, as reported in the log.
        self._test_status = {}

        # This may be either text or a number. It will be used in the phrase
        # '%s disabled' or '%s flaky' on the waterfall display.
        self._disabled_tests = 0

        # Disabled tests by parsing the compiled tests json file output from GTest.
        self._disabled_tests_from_compiled_tests_file = []
        self._flaky_tests = 0

        # Regular expressions for parsing GTest logs. Test names look like
        # "x.y", with 0 or more "w/" prefixes and 0 or more "/z" suffixes.
        # e.g.:
        #   SomeName/SomeTestCase.SomeTest/1
        #   SomeName/SomeTestCase/1.SomeTest
        #   SomeName/SomeTestCase/1.SomeTest/SomeModifider
        test_name_regexp = r'((\w+/)*\w+\.\w+(/\w+)*)'

        self._master_name_re = re.compile(r'\[Running for master: "([^"]*)"')
        self.master_name = ''

        self._test_name = re.compile(test_name_regexp)
        self._test_start = re.compile(r'\[\s+RUN\s+\] ' + test_name_regexp)
        self._test_ok = re.compile(r'\[\s+OK\s+\] ' + test_name_regexp)
        self._test_fail = re.compile(r'\[\s+FAILED\s+\] ' + test_name_regexp)
        self._test_passed = re.compile(r'\[\s+PASSED\s+\] \d+ tests?.')
        self._test_skipped = re.compile(r'\[\s+SKIPPED\s+\] ' +
                                        test_name_regexp)
        self._run_test_cases_line = re.compile(
            r'\[\s*\d+\/\d+\]\s+[0-9\.]+s ' + test_name_regexp + ' .+')
        self._test_timeout = re.compile(
            r'Test timeout \([0-9]+ ms\) exceeded for ' + test_name_regexp)
        self._disabled = re.compile(r'\s*YOU HAVE (\d+) DISABLED TEST')
        self._flaky = re.compile(r'\s*YOU HAVE (\d+) FLAKY TEST')

        self._retry_message = re.compile('RETRYING FAILED TESTS:')
        self.retrying_failed = False

        self._compiled_tests_file_path = re.compile(
            '.*Wrote compiled tests to file: (\S+)')

        self.TEST_STATUS_MAP = {
            'OK': TEST_SUCCESS_LABEL,
            'failed': TEST_FAILURE_LABEL,
            'skipped': TEST_SKIPPED_LABEL,
            'timeout': TEST_TIMEOUT_LABEL,
            'warning': TEST_WARNING_LABEL
        }

    def GetCurrentTest(self):
        return self._current_test

    def GetResultCollection(self):
        return self._result_collection

    def _StatusOfTest(self, test):
        """Returns the status code for the given test, or 'not known'."""
        test_status = self._test_status.get(test, ('not known', []))
        return test_status[0]

    def _TestsByStatus(self, status, include_fails, include_flaky):
        """Returns list of tests with the given status.

    Args:
      include_fails: If False, tests containing 'FAILS_' anywhere in their
          names will be excluded from the list.
      include_flaky: If False, tests containing 'FLAKY_' anywhere in their
          names will be excluded from the list.
    """
        test_list = [
            x[0] for x in self._test_status.items()
            if self._StatusOfTest(x[0]) == status
        ]

        if not include_fails:
            test_list = [x for x in test_list if x.find('FAILS_') == -1]
        if not include_flaky:
            test_list = [x for x in test_list if x.find('FLAKY_') == -1]

        return test_list

    def _RecordError(self, line, reason):
        """Record a log line that produced a parsing error.

    Args:
      line: text of the line at which the error occurred
      reason: a string describing the error
    """
        self._internal_error_lines.append(
            '%s: %s [%s]' % (self._line_number, line.strip(), reason))

    def RunningTests(self):
        """Returns list of tests that appear to be currently running."""
        return self._TestsByStatus('started', True, True)

    def ParsingErrors(self):
        """Returns a list of lines that have caused parsing errors."""
        return self._internal_error_lines

    def ClearParsingErrors(self):
        """Clears the currently stored parsing errors."""
        self._internal_error_lines = ['Cleared.']

    def PassedTests(self, include_fails=False, include_flaky=False):
        """Returns list of tests that passed."""
        return self._TestsByStatus('OK', include_fails, include_flaky)

    def FailedTests(self, include_fails=False, include_flaky=False):
        """Returns list of tests that failed, timed out, or didn't finish
    (crashed).

    This list will be incorrect until the complete log has been processed,
    because it will show currently running tests as having failed.

    Args:
      include_fails: If true, all failing tests with FAILS_ in their names will
          be included. Otherwise, they will only be included if they crashed or
          timed out.
      include_flaky: If true, all failing tests with FLAKY_ in their names will
          be included. Otherwise, they will only be included if they crashed or
          timed out.

    """
        return (self._TestsByStatus('failed', include_fails, include_flaky) +
                self._TestsByStatus('timeout', True, True) +
                self._TestsByStatus('warning', include_fails, include_flaky) +
                self.RunningTests())

    def SkippedTests(self, include_fails=False, include_flaky=False):
        """Returns list of tests that were skipped"""
        return self._TestsByStatus('skipped', include_fails, include_flaky)

    def TriesForTest(self, test):
        """Returns a list containing the state for all tries of the given test.
    This parser doesn't support retries so a single result is returned."""
        return [
            self.TEST_STATUS_MAP.get(self._StatusOfTest(test),
                                     TEST_UNKNOWN_LABEL)
        ]

    def DisabledTests(self):
        """Returns the name of the disabled test (if there is only 1) or the number
    of disabled tests.
    """
        return self._disabled_tests

    def DisabledTestsFromCompiledTestsFile(self):
        """Returns the list of disabled tests in format '{TestCaseName}/{TestName}'.

       Find all test names starting with DISABLED_ from the compiled test json
       file if there is one. If there isn't or error in parsing, returns an
       empty list.
    """
        return self._disabled_tests_from_compiled_tests_file

    def FlakyTests(self):
        """Returns the name of the flaky test (if there is only 1) or the number
    of flaky tests.
    """
        return self._flaky_tests

    def FailureDescription(self, test):
        """Returns a list containing the failure description for the given test.

    If the test didn't fail or timeout, returns [].
    """
        test_status = self._test_status.get(test, ('', []))
        return ['%s: ' % test] + test_status[1]

    def CompletedWithoutFailure(self):
        """Returns True if all tests completed and no tests failed unexpectedly."""
        return self.completed and not self.FailedTests()

    def Finalize(self):
        """Finalize for |self._result_collection|.

    Called at the end to add unfinished tests and crash status for
        self._result_collection.
    """
        for test in self.RunningTests():
            self._result_collection.add_test_result(
                TestResult(test,
                           TestStatus.CRASH,
                           test_log='Did not complete.'))
            self._result_collection.crashed = True

        if not self.completed:
            self._result_collection.crashed = True

    def ProcessLine(self, line):
        """This is called once with each line of the test log."""
        # Track line number for error messages.
        self._line_number += 1

        # Some tests (net_unittests in particular) run subprocesses which can write
        # stuff to shared stdout buffer. Sometimes such output appears between new
        # line and gtest directives ('[  RUN  ]', etc) which breaks the parser.
        # Code below tries to detect such cases and recognize a mixed line as two
        # separate lines.

        # List of regexps that parses expects to find at the start of a line but
        # which can be somewhere in the middle.
        gtest_regexps = [
            self._test_start,
            self._test_ok,
            self._test_fail,
            self._test_passed,
            self._test_skipped,
        ]

        for regexp in gtest_regexps:
            match = regexp.search(line)
            if match:
                break

        if not match or match.start() == 0:
            self._ProcessLine(line)
        else:
            self._ProcessLine(line[:match.start()])
            self._ProcessLine(line[match.start():])

    def _ProcessLine(self, line):
        """Parses the line and changes the state of parsed tests accordingly.

    Will recognize newly started tests, OK or FAILED statuses, timeouts, etc.
    """
        # Note: When sharding, the number of disabled and flaky tests will be read
        # multiple times, so this will only show the most recent values (but they
        # should all be the same anyway).

        # Is it a line listing the master name?
        if not self.master_name:
            results = self._master_name_re.match(line)
            if results:
                self.master_name = results.group(1)

        results = self._run_test_cases_line.match(line)
        if results:
            # A run_test_cases.py output.
            if self._current_test:
                if self._test_status[self._current_test][0] == 'started':
                    self._test_status[self._current_test] = (
                        'timeout', self._failure_description)
                    self._result_collection.add_test_result(
                        TestResult(self._current_test,
                                   TestStatus.ABORT,
                                   test_log='\n'.join(
                                       self._failure_description)))
            self._current_test = ''
            self._failure_description = []
            return

        # Is it a line declaring all tests passed?
        results = self._test_passed.match(line)
        if results:
            self.completed = True
            self._current_test = ''
            return

        # Is it a line reporting disabled tests?
        results = self._disabled.match(line)
        if results:
            try:
                disabled = int(results.group(1))
            except ValueError:
                disabled = 0
            if disabled > 0 and isinstance(self._disabled_tests, int):
                self._disabled_tests = disabled
            else:
                # If we can't parse the line, at least give a heads-up. This is a
                # safety net for a case that shouldn't happen but isn't a fatal error.
                self._disabled_tests = 'some'
            return

        # Is it a line reporting flaky tests?
        results = self._flaky.match(line)
        if results:
            try:
                flaky = int(results.group(1))
            except ValueError:
                flaky = 0
            if flaky > 0 and isinstance(self._flaky_tests, int):
                self._flaky_tests = flaky
            else:
                # If we can't parse the line, at least give a heads-up. This is a
                # safety net for a case that shouldn't happen but isn't a fatal error.
                self._flaky_tests = 'some'
            return

        # Is it the start of a test?
        results = self._test_start.match(line)
        if results:
            if self._current_test:
                if self._test_status[self._current_test][0] == 'started':
                    self._test_status[self._current_test] = (
                        'timeout', self._failure_description)
                    self._result_collection.add_test_result(
                        TestResult(self._current_test,
                                   TestStatus.ABORT,
                                   test_log='\n'.join(
                                       self._failure_description)))
            test_name = results.group(1)
            self._test_status[test_name] = ('started', ['Did not complete.'])
            self._current_test = test_name
            if self.retrying_failed:
                self._failure_description = self._test_status[test_name][1]
                self._failure_description.extend(['', 'RETRY OUTPUT:', ''])
            else:
                self._failure_description = []
            return

        # Is it a test success line?
        results = self._test_ok.match(line)
        if results:
            test_name = results.group(1)
            status = self._StatusOfTest(test_name)
            if status != 'started':
                self._RecordError(line, 'success while in status %s' % status)
            if self.retrying_failed:
                self._test_status[test_name] = ('warning',
                                                self._failure_description)
                # This is a passed result. Previous failures were reported in separate
                # TestResult objects.
                self._result_collection.add_test_result(
                    TestResult(test_name,
                               TestStatus.PASS,
                               test_log='\n'.join(self._failure_description)))
            else:
                self._test_status[test_name] = ('OK', [])
                self._result_collection.add_test_result(
                    TestResult(test_name, TestStatus.PASS))
            self._failure_description = []
            self._current_test = ''
            return

        # Is it a test skipped line?
        results = self._test_skipped.match(line)
        if results:
            test_name = results.group(1)
            status = self._StatusOfTest(test_name)
            # Skipped tests are listed again in the summary.
            if status not in ('started', 'skipped'):
                self._RecordError(line, 'skipped while in status %s' % status)
            self._test_status[test_name] = ('skipped', [])
            self._result_collection.add_test_result(
                TestResult(test_name,
                           TestStatus.SKIP,
                           expected_status=TestStatus.SKIP,
                           test_log='Test skipped when running suite.'))
            self._failure_description = []
            self._current_test = ''
            return

        # Is it a test failure line?
        results = self._test_fail.match(line)
        if results:
            test_name = results.group(1)
            status = self._StatusOfTest(test_name)
            if status not in ('started', 'failed', 'timeout'):
                self._RecordError(line, 'failure while in status %s' % status)
            if self._current_test != test_name:
                if self._current_test:
                    self._RecordError(
                        line, '%s failure while in test %s' %
                        (test_name, self._current_test))
                return
            # Don't overwrite the failure description when a failing test is listed a
            # second time in the summary, or if it was already recorded as timing
            # out.
            if status not in ('failed', 'timeout'):
                self._test_status[test_name] = ('failed',
                                                self._failure_description)
            # Add to |test_results| regardless whether the test ran before.
            self._result_collection.add_test_result(
                TestResult(test_name,
                           TestStatus.FAIL,
                           test_log='\n'.join(self._failure_description)))
            self._failure_description = []
            self._current_test = ''
            return

        # Is it a test timeout line?
        results = self._test_timeout.search(line)
        if results:
            test_name = results.group(1)
            status = self._StatusOfTest(test_name)
            if status not in ('started', 'failed'):
                self._RecordError(line, 'timeout while in status %s' % status)
            self._test_status[test_name] = ('timeout',
                                            self._failure_description +
                                            ['Killed (timed out).'])
            self._result_collection.add_test_result(
                TestResult(test_name,
                           TestStatus.ABORT,
                           test_log='\n'.join(self._failure_description)))
            self._failure_description = []
            self._current_test = ''
            return

        # Is it the start of the retry tests?
        results = self._retry_message.match(line)
        if results:
            self.retrying_failed = True
            return

        # Is it the line containing path to the compiled tests json file?
        results = self._compiled_tests_file_path.match(line)
        if results:
            path = results.group(1)
            LOGGER.info('Compiled tests json file path: %s' % path)
            try:
                # TODO(crbug.com/1091345): Read the file when running on device.
                with open(path) as f:
                    disabled_tests_from_json = []
                    compiled_tests = json.load(f)
                    for single_test in compiled_tests:
                        test_case_name = single_test.get('test_case_name')
                        test_name = single_test.get('test_name')
                        if test_case_name and test_name and test_name.startswith(
                                'DISABLED_'):
                            full_test_name = str('%s/%s' %
                                                 (test_case_name, test_name))
                            disabled_tests_from_json.append(full_test_name)
                            self._result_collection.add_test_result(
                                TestResult(test_name,
                                           TestStatus.SKIP,
                                           expected_status=TestStatus.SKIP,
                                           test_log='Test disabled.'))
                    self._disabled_tests_from_compiled_tests_file = (
                        disabled_tests_from_json)
            except Exception as e:
                LOGGER.warning(
                    'Error when finding disabled tests in compiled tests json file: %s'
                    % e)
            return

        # Random line: if we're in a test, collect it for the failure description.
        # Tests may run simultaneously, so this might be off, but it's worth a try.
        # This also won't work if a test times out before it begins running.
        if self._current_test:
            self._failure_description.append(line)

        # Parse the "Failing tests:" list at the end of the output, and add any
        # additional failed tests to the list. For example, this includes tests
        # that crash after the OK line.
        if self._parsing_failures:
            results = self._test_name.match(line)
            if results:
                test_name = results.group(1)
                status = self._StatusOfTest(test_name)
                if status in ('not known', 'OK'):
                    unknown_error_log = 'Unknown error, see stdio log.'
                    self._test_status[test_name] = ('failed',
                                                    [unknown_error_log])
                    self._result_collection.add_test_result(
                        TestResult(test_name,
                                   TestStatus.FAIL,
                                   test_log=unknown_error_log))
            else:
                self._parsing_failures = False
        elif line.startswith('Failing tests:'):
            self._parsing_failures = True
Beispiel #25
0
class ResultCollectionTest(test_runner_test.TestCase):
    """Tests ResultCollection class APIs."""
    def setUp(self):
        super(ResultCollectionTest, self).setUp()
        self.full_collection = ResultCollection(test_results=[
            PASSED_RESULT, FAILED_RESULT, FAILED_RESULT_DUPLICATE,
            DISABLED_RESULT, UNEXPECTED_SKIPPED_RESULT, CRASHED_RESULT,
            FLAKY_PASS_RESULT, FLAKY_FAIL_RESULT, ABORTED_RESULT
        ])

    def test_init(self):
        """Tests class initialization."""
        collection = ResultCollection(test_results=[
            PASSED_RESULT, DISABLED_RESULT, UNEXPECTED_SKIPPED_RESULT
        ],
                                      crashed=True)
        self.assertTrue(collection.crashed)
        self.assertEqual(collection.crash_message, '')
        self.assertEqual(
            collection.test_results,
            [PASSED_RESULT, DISABLED_RESULT, UNEXPECTED_SKIPPED_RESULT])

    def test_add_result(self):
        """Tests add_test_result."""
        collection = ResultCollection(test_results=[FAILED_RESULT])
        collection.add_test_result(DISABLED_RESULT)
        self.assertEqual(collection.test_results,
                         [FAILED_RESULT, DISABLED_RESULT])

    def test_add_result_collection_default(self):
        """Tests add_result_collection default (merge crash info)."""
        collection = ResultCollection(test_results=[FAILED_RESULT])
        self.assertFalse(collection.crashed)
        collection.append_crash_message('Crash1')

        crashed_collection = ResultCollection(test_results=[PASSED_RESULT],
                                              crashed=True)
        crashed_collection.append_crash_message('Crash2')

        collection.add_result_collection(crashed_collection)
        self.assertTrue(collection.crashed)
        self.assertEqual(collection.crash_message, 'Crash1\nCrash2')
        self.assertEqual(collection.test_results,
                         [FAILED_RESULT, PASSED_RESULT])

    def test_add_result_collection_overwrite(self):
        """Tests add_result_collection overwrite."""
        collection = ResultCollection(test_results=[FAILED_RESULT],
                                      crashed=True)
        self.assertTrue(collection.crashed)
        collection.append_crash_message('Crash1')

        crashed_collection = ResultCollection(test_results=[PASSED_RESULT])

        collection.add_result_collection(crashed_collection,
                                         overwrite_crash=True)
        self.assertFalse(collection.crashed)
        self.assertEqual(collection.crash_message, '')
        self.assertEqual(collection.test_results,
                         [FAILED_RESULT, PASSED_RESULT])

    def test_add_result_collection_ignore(self):
        """Tests add_result_collection overwrite."""
        collection = ResultCollection(test_results=[FAILED_RESULT])
        self.assertFalse(collection.crashed)

        crashed_collection = ResultCollection(test_results=[PASSED_RESULT],
                                              crashed=True)
        crashed_collection.append_crash_message('Crash2')

        collection.add_result_collection(crashed_collection, ignore_crash=True)
        self.assertFalse(collection.crashed)
        self.assertEqual(collection.crash_message, '')
        self.assertEqual(collection.test_results,
                         [FAILED_RESULT, PASSED_RESULT])

    def test_add_results(self):
        """Tests add_results."""
        collection = ResultCollection(test_results=[PASSED_RESULT])
        collection.add_results([FAILED_RESULT, DISABLED_RESULT])
        self.assertEqual(collection.test_results,
                         [PASSED_RESULT, FAILED_RESULT, DISABLED_RESULT])

    def test_add_name_prefix_to_tests(self):
        """Tests add_name_prefix_to_tests."""
        passed = copy.copy(PASSED_RESULT)
        disabeld = copy.copy(DISABLED_RESULT)
        collection = ResultCollection(test_results=[passed, disabeld])
        some_prefix = 'Some/prefix'
        collection.add_name_prefix_to_tests(some_prefix)
        for test_result in collection.test_results:
            self.assertTrue(test_result.name.startswith(some_prefix))

    def test_add_test_names_status(self):
        """Tests add_test_names_status."""
        test_names = ['test1', 'test2', 'test3']
        collection = ResultCollection(test_results=[PASSED_RESULT])
        collection.add_test_names_status(test_names, TestStatus.SKIP)
        disabled_test_names = ['test4', 'test5', 'test6']
        collection.add_test_names_status(disabled_test_names,
                                         TestStatus.SKIP,
                                         expected_status=TestStatus.SKIP)
        self.assertEqual(collection.test_results[0], PASSED_RESULT)
        unexpected_skipped = collection.tests_by_expression(
            lambda t: not t.expected() and t.status == TestStatus.SKIP)
        self.assertEqual(unexpected_skipped, set(['test1', 'test2', 'test3']))
        self.assertEqual(collection.disabled_tests(),
                         set(['test4', 'test5', 'test6']))

    @mock.patch('test_result_util.TestResult.report_to_result_sink')
    @mock.patch('result_sink_util.ResultSinkClient.close')
    @mock.patch('result_sink_util.ResultSinkClient.__init__',
                return_value=None)
    def test_add_and_report_test_names_status(self, mock_sink_init,
                                              mock_sink_close, mock_report):
        """Tests add_test_names_status."""
        test_names = ['test1', 'test2', 'test3']
        collection = ResultCollection(test_results=[PASSED_RESULT])
        collection.add_and_report_test_names_status(test_names,
                                                    TestStatus.SKIP)
        self.assertEqual(collection.test_results[0], PASSED_RESULT)
        unexpected_skipped = collection.tests_by_expression(
            lambda t: not t.expected() and t.status == TestStatus.SKIP)
        self.assertEqual(unexpected_skipped, set(['test1', 'test2', 'test3']))
        self.assertEqual(1, len(mock_sink_init.mock_calls))
        self.assertEqual(3, len(mock_report.mock_calls))
        self.assertEqual(1, len(mock_sink_close.mock_calls))

    def testappend_crash_message(self):
        """Tests append_crash_message."""
        collection = ResultCollection(test_results=[PASSED_RESULT])
        collection.append_crash_message('Crash message 1.')
        self.assertEqual(collection.crash_message, 'Crash message 1.')
        collection.append_crash_message('Crash message 2.')
        self.assertEqual(collection.crash_message,
                         'Crash message 1.\nCrash message 2.')

    def test_tests_by_expression(self):
        """Tests tests_by_expression."""
        collection = self.full_collection
        exp = lambda result: result.status == TestStatus.SKIP
        skipped_tests = collection.tests_by_expression(exp)
        self.assertEqual(skipped_tests,
                         set(['unexpected/skipped_test', 'disabled/test']))

    def test_get_spcific_tests(self):
        """Tests getting sets of tests of specific status."""
        collection = self.full_collection
        self.assertEqual(
            collection.all_test_names(),
            set([
                'passed/test', 'disabled/test', 'failed/test',
                'unexpected/skipped_test', 'crashed/test', 'flaky/test',
                'aborted/test'
            ]))
        self.assertEqual(collection.crashed_tests(), set(['crashed/test']))
        self.assertEqual(collection.disabled_tests(), set(['disabled/test']))
        self.assertEqual(collection.expected_tests(),
                         set(['passed/test', 'disabled/test', 'flaky/test']))
        self.assertEqual(
            collection.unexpected_tests(),
            set([
                'failed/test', 'unexpected/skipped_test', 'crashed/test',
                'flaky/test', 'aborted/test'
            ]))
        self.assertEqual(collection.passed_tests(),
                         set(['passed/test', 'flaky/test']))
        self.assertEqual(collection.failed_tests(),
                         set(['failed/test', 'flaky/test']))
        self.assertEqual(collection.flaky_tests(), set(['flaky/test']))
        self.assertEqual(
            collection.never_expected_tests(),
            set([
                'failed/test', 'unexpected/skipped_test', 'crashed/test',
                'aborted/test'
            ]))
        self.assertEqual(collection.pure_expected_tests(),
                         set(['passed/test', 'disabled/test']))

    @mock.patch('test_result_util.TestResult.report_to_result_sink')
    @mock.patch('result_sink_util.ResultSinkClient.close')
    @mock.patch('result_sink_util.ResultSinkClient.__init__',
                return_value=None)
    def test_add_and_report_crash(self, mock_sink_init, mock_sink_close,
                                  mock_report):
        """Tests add_and_report_crash."""
        collection = copy.copy(self.full_collection)
        self.assertFalse('BUILD_INTERRUPTED' in collection.crashed_tests())

        collection.add_and_report_crash('Prefix Line')
        self.assertEqual(collection.crash_message, 'Prefix Line\n')
        self.assertTrue('BUILD_INTERRUPTED' in collection.crashed_tests())

        mock_sink_init.assert_called_once()
        mock_report.assert_called_once()
        mock_sink_close.assert_called_once()

    @mock.patch('test_result_util.TestResult.report_to_result_sink')
    @mock.patch('result_sink_util.ResultSinkClient.close')
    @mock.patch('result_sink_util.ResultSinkClient.__init__',
                return_value=None)
    def test_report_to_result_sink(self, mock_sink_init, mock_sink_close,
                                   mock_report):
        """Tests report_to_result_sink."""
        collection = copy.copy(self.full_collection)
        collection.report_to_result_sink()

        mock_sink_init.assert_called_once()
        self.assertEqual(len(collection.test_results),
                         len(mock_report.mock_calls))
        mock_sink_close.assert_called()

    @mock.patch('shard_util.shard_index', return_value=0)
    @mock.patch('time.time', return_value=10000)
    def test_standard_json_output(self, *args):
        """Tests standard_json_output."""
        passed_test_value = {
            'expected': 'PASS',
            'actual': 'PASS',
            'shard': 0,
            'is_unexpected': False
        }
        failed_test_value = {
            'expected': 'PASS',
            'actual': 'FAIL FAIL',
            'shard': 0,
            'is_unexpected': True
        }
        disabled_test_value = {
            'expected': 'SKIP',
            'actual': 'SKIP',
            'shard': 0,
            'is_unexpected': False
        }
        unexpected_skip_test_value = {
            'expected': 'PASS',
            'actual': 'SKIP',
            'shard': 0,
            'is_unexpected': True
        }
        crashed_test_value = {
            'expected': 'PASS',
            'actual': 'CRASH',
            'shard': 0,
            'is_unexpected': True
        }
        flaky_test_value = {
            'expected': 'PASS',
            'actual': 'PASS FAIL',
            'shard': 0,
            'is_unexpected': False,
            'is_flaky': True
        }
        aborted_test_value = {
            'expected': 'PASS',
            'actual': 'TIMEOUT',
            'shard': 0,
            'is_unexpected': True
        }
        expected_tests = collections.OrderedDict()
        expected_tests['passed/test'] = passed_test_value
        expected_tests['failed/test'] = failed_test_value
        expected_tests['disabled/test'] = disabled_test_value
        expected_tests['unexpected/skipped_test'] = unexpected_skip_test_value
        expected_tests['crashed/test'] = crashed_test_value
        expected_tests['flaky/test'] = flaky_test_value
        expected_tests['aborted/test'] = aborted_test_value
        expected_num_failures_by_type = {
            'PASS': 2,
            'FAIL': 1,
            'CRASH': 1,
            'SKIP': 2,
            'TIMEOUT': 1
        }
        expected_json = {
            'version': 3,
            'path_delimiter': '/',
            'seconds_since_epoch': 10000,
            'interrupted': False,
            'num_failures_by_type': expected_num_failures_by_type,
            'tests': expected_tests
        }
        self.assertEqual(
            self.full_collection.standard_json_output(path_delimiter='/'),
            expected_json)

    def test_test_runner_logs(self):
        """Test test_runner_logs."""
        expected_logs = collections.OrderedDict()
        expected_logs['passed tests'] = ['passed/test']
        expected_logs['disabled tests'] = ['disabled/test']
        flaky_logs = ['Failure log of attempt 1:', 'line1', 'line2']
        failed_logs = [
            'Failure log of attempt 1:', 'line1', 'line2',
            'Failure log of attempt 2:', 'line3', 'line4'
        ]
        no_logs = ['Failure log of attempt 1:', '']
        expected_logs['flaked tests'] = {'flaky/test': flaky_logs}
        expected_logs['failed tests'] = {
            'failed/test': failed_logs,
            'crashed/test': no_logs,
            'unexpected/skipped_test': no_logs,
            'aborted/test': no_logs
        }
        expected_logs['failed/test'] = failed_logs
        expected_logs['unexpected/skipped_test'] = no_logs
        expected_logs['flaky/test'] = flaky_logs
        expected_logs['crashed/test'] = no_logs
        expected_logs['aborted/test'] = no_logs
        generated_logs = self.full_collection.test_runner_logs()
        keys = [
            'passed tests', 'disabled tests', 'flaked tests', 'failed tests',
            'failed/test', 'unexpected/skipped_test', 'flaky/test',
            'crashed/test', 'aborted/test'
        ]
        for key in keys:
            self.assertEqual(generated_logs[key], expected_logs[key])
Beispiel #26
0
 def test_add_result(self):
     """Tests add_test_result."""
     collection = ResultCollection(test_results=[FAILED_RESULT])
     collection.add_test_result(DISABLED_RESULT)
     self.assertEqual(collection.test_results,
                      [FAILED_RESULT, DISABLED_RESULT])
Beispiel #27
0
 def test_add_results(self):
     """Tests add_results."""
     collection = ResultCollection(test_results=[PASSED_RESULT])
     collection.add_results([FAILED_RESULT, DISABLED_RESULT])
     self.assertEqual(collection.test_results,
                      [PASSED_RESULT, FAILED_RESULT, DISABLED_RESULT])
    def launch(self):
        """Launches tests using xcodebuild."""
        overall_launch_command_result = ResultCollection()
        shards = self.shards
        running_tests = set(self.egtests_app.get_all_tests())
        # total number of attempts is self.retries+1
        for attempt in range(self.retries + 1):
            # Erase all simulators per each attempt
            if iossim_util.is_device_with_udid_simulator(self.udid):
                # kill all running simulators to prevent possible memory leaks
                test_runner.SimulatorTestRunner.kill_simulators()
                shutdown_all_simulators()
                shutdown_all_simulators(XTDEVICE_FOLDER)
                erase_all_simulators()
                erase_all_simulators(XTDEVICE_FOLDER)
            outdir_attempt = os.path.join(self.out_dir, 'attempt_%d' % attempt)
            cmd_list = self.egtests_app.command(outdir_attempt,
                                                'id=%s' % self.udid, shards)
            # TODO(crbug.com/914878): add heartbeat logging to xcodebuild_runner.
            LOGGER.info('Start test attempt #%d for command [%s]' %
                        (attempt, ' '.join(cmd_list)))
            output = self.launch_attempt(cmd_list)

            if hasattr(self, 'use_clang_coverage') and self.use_clang_coverage:
                # out_dir of LaunchCommand object is the TestRunner out_dir joined with
                # UDID. Use os.path.dirname to retrieve the TestRunner out_dir.
                file_util.move_raw_coverage_data(self.udid,
                                                 os.path.dirname(self.out_dir))

            result = self._log_parser.collect_test_results(
                outdir_attempt, output)

            tests_selected_at_runtime = _tests_decided_at_runtime(
                self.egtests_app.test_app_path)
            # For most suites, only keep crash status from last attempt since retries
            # will cover any missing tests. For these decided at runtime, retain
            # crashes from all attempts and a dummy "crashed" result will be reported
            # to indicate some tests might never ran.
            # TODO(crbug.com/1235871): Switch back to excluded tests and set
            # |overall_crash| to always True.
            overall_launch_command_result.add_result_collection(
                result, overwrite_crash=not tests_selected_at_runtime)
            result.report_to_result_sink()

            tests_to_include = set()
            # |running_tests| are compiled tests in target intersecting with swarming
            # sharding. For some suites, they are more than what's needed to run.
            if not tests_selected_at_runtime:
                tests_to_include = tests_to_include | (
                    running_tests -
                    overall_launch_command_result.expected_tests())
            # Add failed tests from last rounds for runtime decided suites and device
            # suites.
            tests_to_include = (
                tests_to_include
                | overall_launch_command_result.never_expected_tests())
            self.egtests_app.included_tests = list(tests_to_include)

            # Nothing to run in retry.
            if not self.egtests_app.included_tests:
                break

            # If tests are not completed(interrupted or did not start) and there are
            # >= 20 remaining tests, run them with the same number of shards.
            # otherwise re-run with shards=1.
            if (not result.crashed
                    # If need to re-run less than 20 tests, 1 shard should be enough.
                    or (len(running_tests) -
                        len(overall_launch_command_result.expected_tests()) <=
                        MAXIMUM_TESTS_PER_SHARD_FOR_RERUN)):
                shards = 1

        return overall_launch_command_result
Beispiel #29
0
    def _get_test_statuses(xcresult):
        """Returns test results from xcresult.

    Also extracts and stores attachments for failed tests.

    Args:
      xcresult: (str) A path to xcresult.

    Returns:
      test_result.ResultCollection: Test results.
    """
        result = ResultCollection()
        # See TESTS_REF in xcode_log_parser_test.py for an example of |root|.
        root = json.loads(
            Xcode11LogParser._xcresulttool_get(xcresult, 'testsRef'))
        for summary in root['summaries']['_values'][0]['testableSummaries'][
                '_values']:
            if not summary['tests']:
                continue
            for test_suite in summary['tests']['_values'][0]['subtests'][
                    '_values'][0]['subtests']['_values']:
                if 'subtests' not in test_suite:
                    # Sometimes(if crash occurs) `subtests` node does not upload.
                    # It happens only for failed tests that and a list of failures
                    # can be parsed from root.
                    continue
                for test in test_suite['subtests']['_values']:
                    test_name = _sanitize_str(test['identifier']['_value'])
                    if any(
                            test_name.endswith(suffix)
                            for suffix in SYSTEM_ERROR_TEST_NAME_SUFFIXES):
                        result.crashed = True
                        result.crash_message += 'System error in %s: %s\n' % (
                            xcresult, test_name)
                        continue
                    # If a test case was executed multiple times, there will be multiple
                    # |test| objects of it. Each |test| corresponds to an execution of the
                    # test case.
                    if test['testStatus']['_value'] == 'Success':
                        result.add_test_result(
                            TestResult(test_name, TestStatus.PASS))
                    else:
                        # Parse data for failed test by its id. See SINGLE_TEST_SUMMARY_REF
                        # in xcode_log_parser_test.py for an example of |summary_ref|.
                        summary_ref = json.loads(
                            Xcode11LogParser._xcresulttool_get(
                                xcresult, test['summaryRef']['id']['_value']))

                        failure_message = 'Logs from "failureSummaries" in .xcresult:\n'
                        # On rare occasions rootFailure doesn't have 'failureSummaries'.
                        for failure in summary_ref.get('failureSummaries',
                                                       {}).get('_values', []):
                            file_name = _sanitize_str(
                                failure.get('fileName', {}).get('_value', ''))
                            line_number = _sanitize_str(
                                failure.get('lineNumber',
                                            {}).get('_value', ''))
                            failure_location = 'file: %s, line: %s' % (
                                file_name, line_number)
                            failure_message += failure_location + '\n'
                            failure_message += _sanitize_str(
                                failure['message']['_value']) + '\n'

                        attachments = Xcode11LogParser._extract_artifacts_for_test(
                            test_name, summary_ref, xcresult)

                        result.add_test_result(
                            TestResult(test_name,
                                       TestStatus.FAIL,
                                       test_log=failure_message,
                                       attachments=attachments))
        return result
Beispiel #30
0
    def collect_test_results(output_path, output):
        """Gets XCTest results, diagnostic data & artifacts from xcresult.

    Args:
      output_path: (str) An output path passed in --resultBundlePath when
          running xcodebuild.
      output: [str] An output of test run.

    Returns:
      test_result.ResultCollection: Test results.
    """
        output_path = _sanitize_str(output_path)
        output = _sanitize_str_list(output)
        LOGGER.info('Reading %s' % output_path)
        overall_collected_result = ResultCollection()

        # Xcodebuild writes staging data to |output_path| folder during test
        # execution. If |output_path| doesn't exist, it means tests didn't start at
        # all.
        if not os.path.exists(output_path):
            overall_collected_result.crashed = True
            overall_collected_result.crash_message = (
                '%s with staging data does not exist.\n' % output_path +
                '\n'.join(output))
            return overall_collected_result

        # During a run `xcodebuild .. -resultBundlePath %output_path%`
        # that generates output_path folder,
        # but Xcode 11+ generates `output_path.xcresult` and `output_path`
        # where output_path.xcresult is a folder with results and `output_path`
        # is symlink to the `output_path.xcresult` folder.
        # `xcresulttool` with folder/symlink behaves in different way on laptop and
        # on bots. This piece of code uses .xcresult folder.
        xcresult = output_path + _XCRESULT_SUFFIX

        # |output_path|.xcresult folder is created at the end of tests. If
        # |output_path| folder exists but |output_path|.xcresult folder doesn't
        # exist, it means xcodebuild exited or was killed half way during tests.
        if not os.path.exists(xcresult):
            overall_collected_result.crashed = True
            overall_collected_result.crash_message = (
                '%s with test results does not exist.\n' % xcresult +
                '\n'.join(output))
            overall_collected_result.add_result_collection(
                parse_passed_failed_tests_for_interrupted_run(output))
            return overall_collected_result

        # See XCRESULT_ROOT in xcode_log_parser_test.py for an example of |root|.
        root = json.loads(Xcode11LogParser._xcresulttool_get(xcresult))
        metrics = root['metrics']
        # In case of test crash both numbers of run and failed tests are equal to 0.
        if (metrics.get('testsCount', {}).get('_value', 0) == 0
                and metrics.get('testsFailedCount', {}).get('_value', 0) == 0):
            overall_collected_result.crashed = True
            overall_collected_result.crash_message = '0 tests executed!'
        else:
            overall_collected_result.add_result_collection(
                Xcode11LogParser._get_test_statuses(xcresult))
            # For some crashed tests info about error contained only in root node.
            overall_collected_result.add_result_collection(
                Xcode11LogParser._list_of_failed_tests(
                    root, excluded=overall_collected_result.all_test_names()))
        Xcode11LogParser.export_diagnostic_data(output_path)
        # Remove the symbol link file.
        if os.path.islink(output_path):
            os.unlink(output_path)
        file_util.zip_and_remove_folder(xcresult)
        return overall_collected_result