コード例 #1
0
    def test_add_result_collection_ignore(self):
        """Tests add_result_collection overwrite."""
        collection = ResultCollection(test_results=[FAILED_RESULT])
        self.assertFalse(collection.crashed)

        crashed_collection = ResultCollection(test_results=[PASSED_RESULT],
                                              crashed=True)
        crashed_collection.append_crash_message('Crash2')

        collection.add_result_collection(crashed_collection, ignore_crash=True)
        self.assertFalse(collection.crashed)
        self.assertEqual(collection.crash_message, '')
        self.assertEqual(collection.test_results,
                         [FAILED_RESULT, PASSED_RESULT])
コード例 #2
0
    def test_add_result_collection_default(self):
        """Tests add_result_collection default (merge crash info)."""
        collection = ResultCollection(test_results=[FAILED_RESULT])
        self.assertFalse(collection.crashed)
        collection.append_crash_message('Crash1')

        crashed_collection = ResultCollection(test_results=[PASSED_RESULT],
                                              crashed=True)
        crashed_collection.append_crash_message('Crash2')

        collection.add_result_collection(crashed_collection)
        self.assertTrue(collection.crashed)
        self.assertEqual(collection.crash_message, 'Crash1\nCrash2')
        self.assertEqual(collection.test_results,
                         [FAILED_RESULT, PASSED_RESULT])
コード例 #3
0
    def collect_test_results(output_folder, output):
        """Gets XCtest result data from Info.plist and copies artifacts.

    Args:
      output_folder: (str) A path to output folder.
      output: [str] An output of test run.
    Returns:
      test_result.ResultCollection representing all test results.
    """
        output_folder = _sanitize_str(output_folder)
        output = _sanitize_str_list(output)
        overall_collected_result = ResultCollection()
        plist_path = os.path.join(output_folder, 'Info.plist')
        if not os.path.exists(plist_path):
            overall_collected_result.crashed = True
            overall_collected_result.crash_message += (
                '%s with test results does not exist.\n' % plist_path +
                '\n'.join(output))
            overall_collected_result.add_result_collection(
                parse_passed_failed_tests_for_interrupted_run(output))
            return overall_collected_result

        root = plistlib.readPlist(plist_path)

        for action in root['Actions']:
            action_result = action['ActionResult']
            if ((root['TestsCount'] == 0 and root['TestsFailedCount'] == 0)
                    or 'TestSummaryPath' not in action_result):
                overall_collected_result.crashed = True
                if ('ErrorSummaries' in action_result
                        and action_result['ErrorSummaries']):
                    overall_collected_result.crash_message = '\n'.join(
                        _sanitize_str_list([
                            error_summary['Message'] for error_summary in
                            action_result['ErrorSummaries']
                        ]))

            else:
                summary_plist = os.path.join(os.path.dirname(plist_path),
                                             action_result['TestSummaryPath'])
                overall_collected_result.add_result_collection(
                    XcodeLogParser._test_status_summary(summary_plist))

        XcodeLogParser._copy_screenshots(output_folder)
        return overall_collected_result
コード例 #4
0
    def collect_test_results(output_path, output):
        """Gets XCTest results, diagnostic data & artifacts from xcresult.

    Args:
      output_path: (str) An output path passed in --resultBundlePath when
          running xcodebuild.
      output: [str] An output of test run.

    Returns:
      test_result.ResultCollection: Test results.
    """
        output_path = _sanitize_str(output_path)
        output = _sanitize_str_list(output)
        LOGGER.info('Reading %s' % output_path)
        overall_collected_result = ResultCollection()

        # Xcodebuild writes staging data to |output_path| folder during test
        # execution. If |output_path| doesn't exist, it means tests didn't start at
        # all.
        if not os.path.exists(output_path):
            overall_collected_result.crashed = True
            overall_collected_result.crash_message = (
                '%s with staging data does not exist.\n' % output_path +
                '\n'.join(output))
            return overall_collected_result

        # During a run `xcodebuild .. -resultBundlePath %output_path%`
        # that generates output_path folder,
        # but Xcode 11+ generates `output_path.xcresult` and `output_path`
        # where output_path.xcresult is a folder with results and `output_path`
        # is symlink to the `output_path.xcresult` folder.
        # `xcresulttool` with folder/symlink behaves in different way on laptop and
        # on bots. This piece of code uses .xcresult folder.
        xcresult = output_path + _XCRESULT_SUFFIX

        # |output_path|.xcresult folder is created at the end of tests. If
        # |output_path| folder exists but |output_path|.xcresult folder doesn't
        # exist, it means xcodebuild exited or was killed half way during tests.
        if not os.path.exists(xcresult):
            overall_collected_result.crashed = True
            overall_collected_result.crash_message = (
                '%s with test results does not exist.\n' % xcresult +
                '\n'.join(output))
            overall_collected_result.add_result_collection(
                parse_passed_failed_tests_for_interrupted_run(output))
            return overall_collected_result

        # See XCRESULT_ROOT in xcode_log_parser_test.py for an example of |root|.
        root = json.loads(Xcode11LogParser._xcresulttool_get(xcresult))
        metrics = root['metrics']
        # In case of test crash both numbers of run and failed tests are equal to 0.
        if (metrics.get('testsCount', {}).get('_value', 0) == 0
                and metrics.get('testsFailedCount', {}).get('_value', 0) == 0):
            overall_collected_result.crashed = True
            overall_collected_result.crash_message = '0 tests executed!'
        else:
            overall_collected_result.add_result_collection(
                Xcode11LogParser._get_test_statuses(xcresult))
            # For some crashed tests info about error contained only in root node.
            overall_collected_result.add_result_collection(
                Xcode11LogParser._list_of_failed_tests(
                    root, excluded=overall_collected_result.all_test_names()))
        Xcode11LogParser.export_diagnostic_data(output_path)
        # Remove the symbol link file.
        if os.path.islink(output_path):
            os.unlink(output_path)
        file_util.zip_and_remove_folder(xcresult)
        return overall_collected_result
コード例 #5
0
    def launch(self):
        """Launches tests using xcodebuild."""
        overall_launch_command_result = ResultCollection()
        shards = self.shards
        running_tests = set(self.egtests_app.get_all_tests())
        # total number of attempts is self.retries+1
        for attempt in range(self.retries + 1):
            # Erase all simulators per each attempt
            if iossim_util.is_device_with_udid_simulator(self.udid):
                # kill all running simulators to prevent possible memory leaks
                test_runner.SimulatorTestRunner.kill_simulators()
                shutdown_all_simulators()
                shutdown_all_simulators(XTDEVICE_FOLDER)
                erase_all_simulators()
                erase_all_simulators(XTDEVICE_FOLDER)
            outdir_attempt = os.path.join(self.out_dir, 'attempt_%d' % attempt)
            cmd_list = self.egtests_app.command(outdir_attempt,
                                                'id=%s' % self.udid, shards)
            # TODO(crbug.com/914878): add heartbeat logging to xcodebuild_runner.
            LOGGER.info('Start test attempt #%d for command [%s]' %
                        (attempt, ' '.join(cmd_list)))
            output = self.launch_attempt(cmd_list)

            if hasattr(self, 'use_clang_coverage') and self.use_clang_coverage:
                # out_dir of LaunchCommand object is the TestRunner out_dir joined with
                # UDID. Use os.path.dirname to retrieve the TestRunner out_dir.
                file_util.move_raw_coverage_data(self.udid,
                                                 os.path.dirname(self.out_dir))

            result = self._log_parser.collect_test_results(
                outdir_attempt, output)

            tests_selected_at_runtime = _tests_decided_at_runtime(
                self.egtests_app.test_app_path)
            # For most suites, only keep crash status from last attempt since retries
            # will cover any missing tests. For these decided at runtime, retain
            # crashes from all attempts and a dummy "crashed" result will be reported
            # to indicate some tests might never ran.
            # TODO(crbug.com/1235871): Switch back to excluded tests and set
            # |overall_crash| to always True.
            overall_launch_command_result.add_result_collection(
                result, overwrite_crash=not tests_selected_at_runtime)
            result.report_to_result_sink()

            tests_to_include = set()
            # |running_tests| are compiled tests in target intersecting with swarming
            # sharding. For some suites, they are more than what's needed to run.
            if not tests_selected_at_runtime:
                tests_to_include = tests_to_include | (
                    running_tests -
                    overall_launch_command_result.expected_tests())
            # Add failed tests from last rounds for runtime decided suites and device
            # suites.
            tests_to_include = (
                tests_to_include
                | overall_launch_command_result.never_expected_tests())
            self.egtests_app.included_tests = list(tests_to_include)

            # Nothing to run in retry.
            if not self.egtests_app.included_tests:
                break

            # If tests are not completed(interrupted or did not start) and there are
            # >= 20 remaining tests, run them with the same number of shards.
            # otherwise re-run with shards=1.
            if (not result.crashed
                    # If need to re-run less than 20 tests, 1 shard should be enough.
                    or (len(running_tests) -
                        len(overall_launch_command_result.expected_tests()) <=
                        MAXIMUM_TESTS_PER_SHARD_FOR_RERUN)):
                shards = 1

        return overall_launch_command_result
コード例 #6
0
ファイル: test_runner.py プロジェクト: luojiguicai/chromium
    def launch(self):
        """Launches the test app."""
        self.set_up()
        # The overall ResultCorrection object holding all runs of all tests in the
        # runner run. It will be updated with each test application launch.
        overall_result = ResultCollection()
        destination = 'id=%s' % self.udid
        test_app = self.get_launch_test_app()
        out_dir = os.path.join(self.out_dir, 'TestResults')
        cmd = self.get_launch_command(test_app, out_dir, destination,
                                      self.shards)
        try:
            result = self._run(cmd=cmd, shards=self.shards or 1)
            if result.crashed and not result.crashed_tests():
                # If the app crashed but not during any particular test case, assume
                # it crashed on startup. Try one more time.
                self.shutdown_and_restart()
                LOGGER.warning('Crashed on startup, retrying...\n')
                out_dir = os.path.join(self.out_dir,
                                       'retry_after_crash_on_startup')
                cmd = self.get_launch_command(test_app, out_dir, destination,
                                              self.shards)
                result = self._run(cmd)

            result.report_to_result_sink()

            if result.crashed and not result.crashed_tests():
                raise AppLaunchError

            overall_result.add_result_collection(result)

            try:
                while result.crashed and result.crashed_tests():
                    # If the app crashes during a specific test case, then resume at the
                    # next test case. This is achieved by filtering out every test case
                    # which has already run.
                    LOGGER.warning('Crashed during %s, resuming...\n',
                                   list(result.crashed_tests()))
                    test_app.excluded_tests = list(
                        overall_result.all_test_names())
                    retry_out_dir = os.path.join(
                        self.out_dir,
                        'retry_after_crash_%d' % int(time.time()))
                    result = self._run(
                        self.get_launch_command(
                            test_app,
                            os.path.join(retry_out_dir, str(int(time.time()))),
                            destination))
                    result.report_to_result_sink()
                    # Only keep the last crash status in crash retries in overall crash
                    # status.
                    overall_result.add_result_collection(result,
                                                         overwrite_crash=True)

            except OSError as e:
                if e.errno == errno.E2BIG:
                    LOGGER.error('Too many test cases to resume.')
                else:
                    raise

            # Retry failed test cases.
            test_app.excluded_tests = []
            never_expected_tests = overall_result.never_expected_tests()
            if self.retries and never_expected_tests:
                LOGGER.warning('%s tests failed and will be retried.\n',
                               len(never_expected_tests))
                for i in xrange(self.retries):
                    tests_to_retry = list(
                        overall_result.never_expected_tests())
                    for test in tests_to_retry:
                        LOGGER.info('Retry #%s for %s.\n', i + 1, test)
                        test_app.included_tests = [test]
                        retry_out_dir = os.path.join(self.out_dir,
                                                     test + '_failed',
                                                     'retry_%d' % i)
                        retry_result = self._run(
                            self.get_launch_command(test_app, retry_out_dir,
                                                    destination))

                        if not retry_result.all_test_names():
                            retry_result.add_test_result(
                                TestResult(
                                    test,
                                    TestStatus.SKIP,
                                    test_log=
                                    'In single test retry, result of this test '
                                    'didn\'t appear in log.'))
                        retry_result.report_to_result_sink()
                        # No unknown tests might be skipped so do not change
                        # |overall_result|'s crash status.
                        overall_result.add_result_collection(retry_result,
                                                             ignore_crash=True)

            interrupted = overall_result.crashed

            if interrupted:
                overall_result.add_and_report_crash(
                    crash_message_prefix_line=
                    'Test application crashed when running '
                    'tests which might have caused some tests never ran or finished.'
                )

            self.test_results = overall_result.standard_json_output()
            self.logs.update(overall_result.test_runner_logs())

            return not overall_result.never_expected_tests(
            ) and not interrupted
        finally:
            self.tear_down()
コード例 #7
0
ファイル: wpr_runner.py プロジェクト: luojiguicai/chromium
    def _run(self, cmd, shards=1):
        """Runs the specified command, parsing GTest output.

    Args:
      cmd: List of strings forming the command to run.
      NOTE: in the case of WprProxySimulatorTestRunner, cmd
        is a dict forming the configuration for the test (including
        filter rules), and not indicative of the actual command
        we build and execute in _run.

    Returns:
      TestResult.ResultCollection() object.
    Raises:
      ShardingDisabledError: If shards > 1 as currently sharding is not
        supported.
      SystemAlertPresentError: If system alert is shown on the device.
    """
        overall_result = ResultCollection()
        if shards > 1:
            # TODO(crbug.com/881096): reimplement sharding in the future
            raise test_runner.ShardingDisabledError()

        # TODO(crbug.com/812705): Implement test sharding for unit tests.
        # TODO(crbug.com/812712): Use thread pool for DeviceTestRunner as well.

        # Create a simulator for these tests, and prepare it with the
        # certificate needed for HTTPS proxying.
        udid = self.getSimulator()

        self.copy_trusted_certificate()

        for recipe_path in glob.glob('{}/*.test'.format(self.replay_path)):
            base_name = os.path.basename(recipe_path)
            test_name = os.path.splitext(base_name)[0]
            replay_path = '{}/{}'.format(self.replay_path, test_name)

            if self.should_run_wpr_test(test_name, cmd['test_filter'],
                                        cmd['invert']):

                parser, returncode = self.run_wpr_test(udid, test_name,
                                                       recipe_path,
                                                       replay_path)
                recipe_result = parser.GetResultCollection()

                # If this test fails, immediately rerun it to see if it deflakes.
                # We simply overwrite the first result with the second.
                if recipe_result.never_expected_tests():
                    parser, returncode = self.run_wpr_test(
                        udid, test_name, recipe_path, replay_path)
                    recipe_result = parser.GetResultCollection()

                # All test names will be the same since we re-run the same suite;
                # therefore, to differentiate the results, we append the recipe
                # name to the test suite.
                recipe_result.add_name_prefix_to_tests(base_name + '.')
                overall_result.add_result_collection(recipe_result)

                # Check for runtime errors.
                if self.xctest_path and parser.SystemAlertPresent():
                    raise test_runner.SystemAlertPresentError()
                LOGGER.info('%s test returned %s\n', recipe_path, returncode)

        self.deleteSimulator(udid)

        return overall_result