Exemple #1
0
    def execute(self, options, args, tool):
        self._tool = tool

        # TODO(qyearsley): Consider calling ensure_manifest in WebKitPatch.
        # See: crbug.com/698294
        WPTManifest.ensure_manifest(tool)

        if not self.check_ok_to_run():
            return 1

        jobs = self.git_cl().latest_try_jobs(self._try_bots())
        self._log_jobs(jobs)
        builders_with_no_jobs = self._try_bots() - {
            b.builder_name
            for b in jobs
        }

        if not options.trigger_jobs and not jobs:
            _log.info('Aborted: no try jobs and --no-trigger-jobs passed.')
            return 1

        if options.trigger_jobs and builders_with_no_jobs:
            self.trigger_try_jobs(builders_with_no_jobs)
            return 1

        jobs_to_results = self._fetch_results(jobs)

        builders_with_results = {b.builder_name for b in jobs_to_results}
        builders_without_results = set(
            self._try_bots()) - builders_with_results
        if builders_without_results:
            _log.info('There are some builders with no results:')
            self._log_builder_list(builders_without_results)

        if not options.fill_missing and builders_without_results:
            options.fill_missing = self._tool.user.confirm(
                'Would you like to try to fill in missing results with\n'
                'available results? This assumes that layout test results\n'
                'for the platforms with missing results are the same as\n'
                'results on other platforms.',
                default=self._tool.user.DEFAULT_NO)
            if not options.fill_missing:
                _log.info('Aborting.')
                return 1

        if args:
            test_baseline_set = self._make_test_baseline_set_for_tests(
                args, jobs_to_results)
        else:
            test_baseline_set = self._make_test_baseline_set(
                jobs_to_results, options.only_changed_tests)

        if options.fill_missing:
            self.fill_in_missing_results(test_baseline_set)

        _log.debug('Rebaselining: %s', test_baseline_set)

        if not options.dry_run:
            self.rebaseline(options, test_baseline_set)
        return 0
Exemple #2
0
    def _delete_orphaned_baselines(self):
        _log.info('Deleting any orphaned baselines.')

        is_baseline_filter = lambda fs, dirname, basename: is_testharness_baseline(
            basename)

        baselines = self.fs.files_under(self.dest_path,
                                        file_filter=is_baseline_filter)

        # TODO(qyearsley): Factor out the manifest path to a common location.
        # TODO(qyearsley): Factor out the manifest reading from here and Port
        # to WPTManifest.
        manifest_path = self.finder.path_from_layout_tests(
            'external', 'wpt', 'MANIFEST.json')
        manifest = WPTManifest(self.fs.read_text_file(manifest_path))
        wpt_urls = manifest.all_urls()

        # Currently baselines for tests with query strings are merged,
        # so that the tests foo.html?r=1 and foo.html?r=2 both have the same
        # baseline, foo-expected.txt.
        # TODO(qyearsley): Remove this when this behavior is fixed.
        wpt_urls = [url.split('?')[0] for url in wpt_urls]

        wpt_dir = self.finder.path_from_layout_tests('external', 'wpt')
        for full_path in baselines:
            rel_path = self.fs.relpath(full_path, wpt_dir)
            if not self._has_corresponding_test(rel_path, wpt_urls):
                self.fs.remove(full_path)
Exemple #3
0
def main(argv, stderr, host=None):
    parser = optparse.OptionParser(option_list=platform_options(
        use_globs=True))
    parser.add_option('--json', help='Path to JSON output file')
    options, _ = parser.parse_args(argv)

    if not host:
        if options.platform and 'test' in options.platform:
            # It's a bit lame to import mocks into real code, but this allows the user
            # to run tests against the test platform interactively, which is useful for
            # debugging test failures.
            from webkitpy.common.host_mock import MockHost
            host = MockHost()
        else:
            host = Host()

    # Need to generate MANIFEST.json since some expectations correspond to WPT
    # tests that aren't files and only exist in the manifest.
    _log.info('Generating MANIFEST.json for web-platform-tests ...')
    WPTManifest.ensure_manifest(host)

    try:
        exit_status = run_checks(host, options, stderr)
    except KeyboardInterrupt:
        exit_status = exit_codes.INTERRUPTED_EXIT_STATUS
    except Exception as error:  # pylint: disable=broad-except
        print >> stderr, '\n%s raised: %s' % (error.__class__.__name__, error)
        traceback.print_exc(file=stderr)
        exit_status = exit_codes.EXCEPTIONAL_EXIT_STATUS

    return exit_status
Exemple #4
0
    def execute(self, options, args, tool):
        self._tool = tool

        # TODO(qyearsley): Move this call to somewhere else.
        WPTManifest.ensure_manifest(tool)

        unstaged_baselines = self.unstaged_baselines()
        if unstaged_baselines:
            _log.error('Aborting: there are unstaged baselines:')
            for path in unstaged_baselines:
                _log.error('  %s', path)
            return 1

        issue_number = self._get_issue_number()
        if issue_number is None:
            _log.error('No issue number for current branch.')
            return 1
        _log.debug('Issue number for current branch: %s', issue_number)

        builds = self.git_cl().latest_try_jobs(self._try_bots())

        builders_with_pending_builds = self.builders_with_pending_builds(builds)
        if builders_with_pending_builds:
            _log.info('There are existing pending builds for:')
            for builder in sorted(builders_with_pending_builds):
                _log.info('  %s', builder)
        builders_with_no_results = self.builders_with_no_results(builds)

        if options.trigger_jobs and builders_with_no_results:
            self.trigger_builds(builders_with_no_results)
            _log.info('Please re-run webkit-patch rebaseline-cl once all pending try jobs have finished.')
            return 1

        if builders_with_no_results:
            # TODO(qyearsley): Support trying to continue as long as there are
            # some results from some builder; see http://crbug.com/673966.
            _log.error('The following builders have no results:')
            for builder in builders_with_no_results:
                _log.error('  %s', builder)
            return 1

        _log.debug('Getting results for issue %d.', issue_number)
        builds_to_results = self._fetch_results(builds)
        if builds_to_results is None:
            return 1

        test_prefix_list = {}
        if args:
            for test in args:
                test_prefix_list[test] = builds
        else:
            test_prefix_list = self._test_prefix_list(
                builds_to_results,
                only_changed_tests=options.only_changed_tests)

        self._log_test_prefix_list(test_prefix_list)

        if not options.dry_run:
            self.rebaseline(options, test_prefix_list)
        return 0
Exemple #5
0
    def _generate_manifest(self):
        """Generates MANIFEST.json for imported tests.

        Runs the (newly-updated) manifest command if it's found, and then
        stages the generated MANIFEST.json in the git index, ready to commit.
        """
        _log.info('Generating MANIFEST.json')
        WPTManifest.generate_manifest(self.host, self.dest_path)
        manifest_path = self.fs.join(self.dest_path, 'MANIFEST.json')
        assert self.fs.exists(manifest_path)
        manifest_base_path = self.fs.normpath(
            self.fs.join(self.dest_path, '..', 'WPT_BASE_MANIFEST.json'))
        self.copyfile(manifest_path, manifest_base_path)
        self.chromium_git.add_list([manifest_base_path])
Exemple #6
0
    def _generate_manifest(self, dest_path):
        """Generates MANIFEST.json for imported tests.

        Args:
            dest_path: Path to the destination WPT directory.

        Runs the (newly-updated) manifest command if it's found, and then
        stages the generated MANIFEST.json in the git index, ready to commit.
        """
        if 'css' in dest_path:
            # Do nothing for csswg-test.
            return
        _log.info('Generating MANIFEST.json')
        WPTManifest.generate_manifest(self.host, dest_path)
        self.run(['git', 'add', self.fs.join(dest_path, 'MANIFEST.json')])
Exemple #7
0
    def _ensure_manifest(self):
        fs = self._filesystem
        external_path = self._webkit_finder.path_from_webkit_base(
            'LayoutTests', 'external')
        wpt_path = fs.join(external_path, 'wpt')
        manifest_path = fs.join(external_path, 'wpt', 'MANIFEST.json')
        base_manifest_path = fs.join(external_path, 'WPT_BASE_MANIFEST.json')

        if not self._filesystem.exists(manifest_path):
            fs.copyfile(base_manifest_path, manifest_path)

        self._printer.write_update(
            'Generating MANIFEST.json for web-platform-tests ...')

        # TODO(jeffcarp): handle errors
        WPTManifest.generate_manifest(self._port.host, wpt_path)
    def update_expectations(self):
        """Downloads text new baselines and adds test expectations lines.

        Returns:
            A pair: A set of tests that are rebaselined, and a dictionary
            mapping tests that couldn't be rebaselined to lists of expectation
            lines written to TestExpectations.
        """
        issue_number = self.get_issue_number()
        if issue_number == 'None':
            raise ScriptError('No issue on current branch.')

        build_to_status = self.get_latest_try_jobs()
        _log.debug('Latest try jobs: %r', build_to_status)
        if not build_to_status:
            raise ScriptError('No try job information was collected.')

        # The manifest may be used below to do check which tests are reference tests.
        WPTManifest.ensure_manifest(self.host)

        # Here we build up a dict of failing test results for all platforms.
        test_expectations = {}
        for build, job_status in build_to_status.iteritems():
            if job_status.result == 'SUCCESS':
                self.ports_with_all_pass.add(self.port_name(build))
            port_results = self.get_failing_results_dict(build)
            test_expectations = self.merge_dicts(test_expectations,
                                                 port_results)

        # And then we merge results for different platforms that had the same results.
        for test_name, platform_result in test_expectations.iteritems():
            # platform_result is a dict mapping platforms to results.
            test_expectations[test_name] = self.merge_same_valued_keys(
                platform_result)

        # At this point, test_expectations looks like: {
        #     'test-with-failing-result': {
        #         ('port-name1', 'port-name2'): SimpleTestResult,
        #         'port-name3': SimpleTestResult
        #     }
        # }

        rebaselined_tests, test_expectations = self.download_text_baselines(
            test_expectations)
        test_expectation_lines = self.create_line_dict(test_expectations)
        self.write_to_test_expectations(test_expectation_lines)
        return rebaselined_tests, test_expectation_lines
Exemple #9
0
    def test_ensure_manifest_copies_new_manifest(self):
        host = MockHost()
        manifest_path = '/mock-checkout/third_party/WebKit/LayoutTests/external/wpt/MANIFEST.json'

        self.assertFalse(host.filesystem.exists(manifest_path))
        WPTManifest.ensure_manifest(host)
        self.assertTrue(host.filesystem.exists(manifest_path))

        webkit_base = '/mock-checkout/third_party/WebKit'
        self.assertEqual(host.executive.calls, [[
            'python',
            webkit_base + '/Tools/Scripts/webkitpy/thirdparty/wpt/wpt/wpt',
            'manifest',
            '--work',
            '--tests-root',
            webkit_base + '/LayoutTests/external/wpt',
        ]])
    def run(self, args=None):
        """Downloads text new baselines and adds test expectations lines."""
        parser = argparse.ArgumentParser(description=__doc__)
        parser.add_argument('-v',
                            '--verbose',
                            action='store_true',
                            help='More verbose logging.')
        args = parser.parse_args(args)

        log_level = logging.DEBUG if args.verbose else logging.INFO
        logging.basicConfig(level=log_level, format='%(message)s')

        issue_number = self.get_issue_number()
        if issue_number == 'None':
            _log.error('No issue on current branch.')
            return 1

        build_to_status = self.get_latest_try_jobs()
        _log.debug('Latest try jobs: %r', build_to_status)
        if not build_to_status:
            _log.error('No try job information was collected.')
            return 1

        # The manifest may be used below to do check which tests are reference tests.
        WPTManifest.ensure_manifest(self.host)

        # Here we build up a dict of failing test results for all platforms.
        test_expectations = {}
        for build, job_status in build_to_status.iteritems():
            if job_status.result == 'SUCCESS':
                self.ports_with_all_pass.add(self.port_name(build))

            port_results = self.get_failing_results_dict(build)
            test_expectations = self.merge_dicts(test_expectations,
                                                 port_results)

        # And then we merge results for different platforms that had the same results.
        for test_name, platform_result in test_expectations.iteritems():
            # platform_result is a dict mapping platforms to results.
            test_expectations[test_name] = self.merge_same_valued_keys(
                platform_result)

        test_expectations = self.download_text_baselines(test_expectations)
        test_expectation_lines = self.create_line_list(test_expectations)
        self.write_to_test_expectations(test_expectation_lines)
        return 0
    def _generate_manifest(self, dest_path):
        """Generates MANIFEST.json for imported tests.

        Args:
            dest_path: Path to the destination WPT directory.

        Runs the (newly-updated) manifest command if it's found, and then
        stages the generated MANIFEST.json in the git index, ready to commit.
        """
        _log.info('Generating MANIFEST.json')
        WPTManifest.generate_manifest(self.host, dest_path)
        manifest_path = self.fs.join(dest_path, 'MANIFEST.json')
        assert self.fs.exists(manifest_path)
        manifest_base_path = self.fs.normpath(
            self.fs.join(dest_path, '..', 'WPT_BASE_MANIFEST.json'))
        self.copyfile(manifest_path, manifest_base_path)
        self.run(['git', 'add', manifest_base_path])
Exemple #12
0
def main(argv, stderr, host=None):
    parser = optparse.OptionParser(option_list=platform_options(
        use_globs=True))
    parser.add_option('--json', help='Path to JSON output file')
    parser.add_option(
        '--verbose',
        action='store_true',
        default=False,
        help='log extra details that may be helpful when debugging')
    options, _ = parser.parse_args(argv)

    if not host:
        if options.platform and 'test' in options.platform:
            # It's a bit lame to import mocks into real code, but this allows the user
            # to run tests against the test platform interactively, which is useful for
            # debugging test failures.
            from webkitpy.common.host_mock import MockHost
            host = MockHost()
        else:
            host = Host()

    if options.verbose:
        configure_logging(logging_level=logging.DEBUG, stream=stderr)
        # Print full stdout/stderr when a command fails.
        host.executive.error_output_limit = None
    else:
        # PRESUBMIT.py relies on our output, so don't include timestamps.
        configure_logging(logging_level=logging.INFO,
                          stream=stderr,
                          include_time=False)

    try:
        # Need to generate MANIFEST.json since some expectations correspond to WPT
        # tests that aren't files and only exist in the manifest.
        _log.debug('Generating MANIFEST.json for web-platform-tests ...')
        WPTManifest.ensure_manifest(host)
        exit_status = run_checks(host, options)
    except KeyboardInterrupt:
        exit_status = exit_codes.INTERRUPTED_EXIT_STATUS
    except Exception as error:  # pylint: disable=broad-except
        print >> stderr, '\n%s raised: %s' % (error.__class__.__name__, error)
        traceback.print_exc(file=stderr)
        exit_status = exit_codes.EXCEPTIONAL_EXIT_STATUS

    return exit_status
Exemple #13
0
    def execute(self, options, args, tool):
        self._tool = tool
        # TODO(qyearsley): Consider calling ensure_manifest in Command or WebKitPatch.
        WPTManifest.ensure_manifest(tool)
        if not self.check_ok_to_run():
            return 1

        jobs = self.latest_try_jobs()
        self._log_scheduled_jobs(jobs)
        builders_with_no_jobs = self.builders_with_no_jobs(jobs)

        if options.trigger_jobs and builders_with_no_jobs:
            self.trigger_try_jobs(builders_with_no_jobs)
            return 1

        if not options.fill_missing and builders_with_no_jobs:
            _log.error('The following builders have no jobs:')
            for builder in builders_with_no_jobs:
                _log.error('  %s', builder)
            _log.error('Add --fill-missing to continue rebaselining anyway, '
                       'filling in results for missing platforms.')
            return 1

        jobs_to_results = self._fetch_results(jobs)
        if not options.fill_missing and len(jobs_to_results) < len(jobs):
            return 1

        if args:
            test_baseline_set = self._make_test_baseline_set_for_tests(
                args, jobs_to_results)
        else:
            test_baseline_set = self._make_test_baseline_set(
                jobs_to_results, options.only_changed_tests)

        if options.fill_missing:
            self.fill_in_missing_results(test_baseline_set)

        _log.debug('Rebaselining: %s', test_baseline_set)

        if not options.dry_run:
            self.rebaseline(options, test_baseline_set)
        return 0
Exemple #14
0
    def run(self, args):
        """Runs the tests and return a RunDetails object with the results."""
        start_time = time.time()
        self._printer.write_update('Collecting tests ...')
        running_all_tests = False

        if not args or any('external' in path for path in args):
            self._printer.write_update(
                'Generating MANIFEST.json for web-platform-tests ...')
            WPTManifest.ensure_manifest(self._port.host)
            self._printer.write_update('Completed generating manifest.')

        self._printer.write_update('Collecting tests ...')
        try:
            paths, all_test_names, running_all_tests = self._collect_tests(
                args)
        except IOError:
            # This is raised if --test-list doesn't exist
            return test_run_results.RunDetails(
                exit_code=exit_codes.NO_TESTS_EXIT_STATUS)

        # Create a sorted list of test files so the subset chunk,
        # if used, contains alphabetically consecutive tests.
        if self._options.order == 'natural':
            all_test_names.sort(key=self._port.test_key)
        elif self._options.order == 'random':
            all_test_names.sort()
            random.Random(self._options.seed).shuffle(all_test_names)

        test_names, tests_in_other_chunks = self._finder.split_into_chunks(
            all_test_names)

        self._printer.write_update('Parsing expectations ...')
        self._expectations = test_expectations.TestExpectations(
            self._port, test_names)

        tests_to_run, tests_to_skip = self._prepare_lists(paths, test_names)

        self._expectations.remove_tests_from_expectations(
            tests_in_other_chunks)

        self._printer.print_found(len(all_test_names), len(test_names),
                                  len(tests_to_run), self._options.repeat_each,
                                  self._options.iterations)

        # Check to make sure we're not skipping every test.
        if not tests_to_run:
            _log.critical('No tests to run.')
            return test_run_results.RunDetails(
                exit_code=exit_codes.NO_TESTS_EXIT_STATUS)

        exit_code = self._set_up_run(tests_to_run)
        if exit_code:
            return test_run_results.RunDetails(exit_code=exit_code)

        # Don't retry failures if an explicit list of tests was passed in.
        if self._options.retry_failures is None:
            should_retry_failures = len(paths) < len(test_names)
        else:
            should_retry_failures = self._options.retry_failures

        try:
            self._start_servers(tests_to_run)
            if self._options.watch:
                run_results = self._run_test_loop(tests_to_run, tests_to_skip)
            else:
                run_results = self._run_test_once(tests_to_run, tests_to_skip,
                                                  should_retry_failures)
            initial_results, all_retry_results, enabled_pixel_tests_in_retry = run_results
        finally:
            self._stop_servers()
            self._clean_up_run()

        # Some crash logs can take a long time to be written out so look
        # for new logs after the test run finishes.
        self._printer.write_update('Looking for new crash logs ...')
        self._look_for_new_crash_logs(initial_results, start_time)
        for retry_attempt_results in all_retry_results:
            self._look_for_new_crash_logs(retry_attempt_results, start_time)

        self._printer.write_update('Summarizing results ...')
        summarized_full_results = test_run_results.summarize_results(
            self._port, self._expectations, initial_results, all_retry_results,
            enabled_pixel_tests_in_retry)
        summarized_failing_results = test_run_results.summarize_results(
            self._port,
            self._expectations,
            initial_results,
            all_retry_results,
            enabled_pixel_tests_in_retry,
            only_include_failing=True)

        exit_code = summarized_failing_results['num_regressions']
        if exit_code > exit_codes.MAX_FAILURES_EXIT_STATUS:
            _log.warning('num regressions (%d) exceeds max exit status (%d)',
                         exit_code, exit_codes.MAX_FAILURES_EXIT_STATUS)
            exit_code = exit_codes.MAX_FAILURES_EXIT_STATUS

        if not self._options.dry_run:
            self._write_json_files(summarized_full_results,
                                   summarized_failing_results, initial_results,
                                   running_all_tests)

            self._upload_json_files()

            self._copy_results_html_file(self._results_directory,
                                         'results.html')
            self._copy_results_html_file(self._results_directory,
                                         'legacy-results.html')
            if initial_results.keyboard_interrupted:
                exit_code = exit_codes.INTERRUPTED_EXIT_STATUS
            else:
                if initial_results.interrupted:
                    exit_code = exit_codes.EARLY_EXIT_STATUS
                if self._options.show_results and (
                        exit_code or initial_results.total_failures):
                    self._port.show_results_html_file(
                        self._filesystem.join(self._results_directory,
                                              'results.html'))
                self._printer.print_results(time.time() - start_time,
                                            initial_results)

        return test_run_results.RunDetails(exit_code, summarized_full_results,
                                           summarized_failing_results,
                                           initial_results, all_retry_results,
                                           enabled_pixel_tests_in_retry)
Exemple #15
0
    def test_ensure_manifest_raises_exception(self):
        host = MockHost()
        host.executive = MockExecutive(should_throw=True)

        with self.assertRaises(ScriptError):
            WPTManifest.ensure_manifest(host)
Exemple #16
0
    def run(self, args):
        """Run the tests and return a RunDetails object with the results."""
        start_time = time.time()
        self._printer.write_update("Collecting tests ...")
        running_all_tests = False

        self._printer.write_update(
            'Generating MANIFEST.json for web-platform-tests ...')
        WPTManifest.ensure_manifest(self._port.host)

        try:
            paths, all_test_names, running_all_tests = self._collect_tests(
                args)
        except IOError:
            # This is raised if --test-list doesn't exist
            return test_run_results.RunDetails(
                exit_code=test_run_results.NO_TESTS_EXIT_STATUS)

        # Create a sorted list of test files so the subset chunk,
        # if used, contains alphabetically consecutive tests.
        if self._options.order == 'natural':
            all_test_names.sort(key=self._port.test_key)
        elif self._options.order == 'random':
            all_test_names.sort()
            random.Random(self._options.seed).shuffle(all_test_names)

        test_names, tests_in_other_chunks = self._finder.split_into_chunks(
            all_test_names)

        self._printer.write_update("Parsing expectations ...")
        self._expectations = test_expectations.TestExpectations(
            self._port, test_names)

        tests_to_run, tests_to_skip = self._prepare_lists(paths, test_names)

        self._expectations.remove_tests(tests_in_other_chunks)

        self._printer.print_found(len(all_test_names), len(test_names),
                                  len(tests_to_run), self._options.repeat_each,
                                  self._options.iterations)

        # Check to make sure we're not skipping every test.
        if not tests_to_run:
            _log.critical('No tests to run.')
            return test_run_results.RunDetails(
                exit_code=test_run_results.NO_TESTS_EXIT_STATUS)

        exit_code = self._set_up_run(tests_to_run)
        if exit_code:
            return test_run_results.RunDetails(exit_code=exit_code)

        # Don't retry failures if an explicit list of tests was passed in.
        if self._options.retry_failures is None:
            should_retry_failures = len(paths) < len(test_names)
        else:
            should_retry_failures = self._options.retry_failures

        enabled_pixel_tests_in_retry = False
        try:
            self._start_servers(tests_to_run)

            num_workers = self._port.num_workers(
                int(self._options.child_processes))

            initial_results = self._run_tests(tests_to_run, tests_to_skip,
                                              self._options.repeat_each,
                                              self._options.iterations,
                                              num_workers)

            # Don't retry failures when interrupted by user or failures limit exception.
            should_retry_failures = should_retry_failures and not (
                initial_results.interrupted
                or initial_results.keyboard_interrupted)

            tests_to_retry = self._tests_to_retry(initial_results)
            all_retry_results = []
            if should_retry_failures and tests_to_retry:
                enabled_pixel_tests_in_retry = self._force_pixel_tests_if_needed(
                )

                for retry_attempt in xrange(1, self._options.num_retries + 1):
                    if not tests_to_retry:
                        break

                    _log.info('')
                    _log.info(
                        'Retrying %s, attempt %d of %d...',
                        grammar.pluralize('unexpected failure',
                                          len(tests_to_retry)), retry_attempt,
                        self._options.num_retries)

                    retry_results = self._run_tests(
                        tests_to_retry,
                        tests_to_skip=set(),
                        repeat_each=1,
                        iterations=1,
                        num_workers=num_workers,
                        retry_attempt=retry_attempt)
                    all_retry_results.append(retry_results)

                    tests_to_retry = self._tests_to_retry(retry_results)

                if enabled_pixel_tests_in_retry:
                    self._options.pixel_tests = False
        finally:
            self._stop_servers()
            self._clean_up_run()

        # Some crash logs can take a long time to be written out so look
        # for new logs after the test run finishes.
        self._printer.write_update("looking for new crash logs")
        self._look_for_new_crash_logs(initial_results, start_time)
        for retry_attempt_results in all_retry_results:
            self._look_for_new_crash_logs(retry_attempt_results, start_time)

        _log.debug("summarizing results")
        summarized_full_results = test_run_results.summarize_results(
            self._port, self._expectations, initial_results, all_retry_results,
            enabled_pixel_tests_in_retry)
        summarized_failing_results = test_run_results.summarize_results(
            self._port,
            self._expectations,
            initial_results,
            all_retry_results,
            enabled_pixel_tests_in_retry,
            only_include_failing=True)

        exit_code = summarized_failing_results['num_regressions']
        if exit_code > test_run_results.MAX_FAILURES_EXIT_STATUS:
            _log.warning('num regressions (%d) exceeds max exit status (%d)',
                         exit_code, test_run_results.MAX_FAILURES_EXIT_STATUS)
            exit_code = test_run_results.MAX_FAILURES_EXIT_STATUS

        if not self._options.dry_run:
            self._write_json_files(summarized_full_results,
                                   summarized_failing_results, initial_results,
                                   running_all_tests)

            if self._options.write_full_results_to:
                self._filesystem.copyfile(
                    self._filesystem.join(self._results_directory,
                                          "full_results.json"),
                    self._options.write_full_results_to)

            self._upload_json_files()

            results_path = self._filesystem.join(self._results_directory,
                                                 "results.html")
            self._copy_results_html_file(results_path)
            if initial_results.keyboard_interrupted:
                exit_code = test_run_results.INTERRUPTED_EXIT_STATUS
            else:
                if initial_results.interrupted:
                    exit_code = test_run_results.EARLY_EXIT_STATUS
                if self._options.show_results and (
                        exit_code or (self._options.full_results_html
                                      and initial_results.total_failures)):
                    self._port.show_results_html_file(results_path)
                self._printer.print_results(time.time() - start_time,
                                            initial_results,
                                            summarized_failing_results)

        return test_run_results.RunDetails(exit_code, summarized_full_results,
                                           summarized_failing_results,
                                           initial_results, all_retry_results,
                                           enabled_pixel_tests_in_retry)
    def execute(self, options, args, tool):
        self._tool = tool
        self.git_cl = self.git_cl or GitCL(tool)

        # The WPT manifest is required when iterating through tests
        # TestBaselineSet if there are any tests in web-platform-tests.
        # TODO(qyearsley): Consider calling ensure_manifest in WebKitPatch.
        # See: crbug.com/698294
        WPTManifest.ensure_manifest(tool)

        if not self.check_ok_to_run():
            return 1

        jobs = self.git_cl.latest_try_jobs(self._try_bots())
        self._log_jobs(jobs)
        builders_with_no_jobs = self._try_bots() - {b.builder_name for b in jobs}

        if not options.trigger_jobs and not jobs:
            _log.info('Aborted: no try jobs and --no-trigger-jobs passed.')
            return 1

        if options.trigger_jobs and builders_with_no_jobs:
            self.trigger_try_jobs(builders_with_no_jobs)
            return 1

        jobs_to_results = self._fetch_results(jobs)

        builders_with_results = {b.builder_name for b in jobs_to_results}
        builders_without_results = set(self._try_bots()) - builders_with_results
        if builders_without_results:
            _log.info('There are some builders with no results:')
            self._log_builder_list(builders_without_results)

        if options.fill_missing is None and builders_without_results:
            should_continue = self._tool.user.confirm(
                'Would you like to continue?',
                default=self._tool.user.DEFAULT_NO)
            if not should_continue:
                _log.info('Aborting.')
                return 1
            options.fill_missing = self._tool.user.confirm(
                'Would you like to try to fill in missing results with\n'
                'available results?\n'
                'Note: This will generally yield correct results\n'
                'as long as the results are not platform-specific.',
                default=self._tool.user.DEFAULT_NO)

        if args:
            test_baseline_set = self._make_test_baseline_set_for_tests(
                args, jobs_to_results)
        else:
            test_baseline_set = self._make_test_baseline_set(
                jobs_to_results, options.only_changed_tests)

        if options.fill_missing:
            self.fill_in_missing_results(test_baseline_set)

        _log.debug('Rebaselining: %s', test_baseline_set)

        if not options.dry_run:
            self.rebaseline(options, test_baseline_set)
        return 0
Exemple #18
0
    def execute(self, options, args, tool):
        self._tool = tool

        # TODO(qyearsley): Move this call to somewhere else.
        WPTManifest.ensure_manifest(tool)

        unstaged_baselines = self.unstaged_baselines()
        if unstaged_baselines:
            _log.error('Aborting: there are unstaged baselines:')
            for path in unstaged_baselines:
                _log.error('  %s', path)
            return 1

        issue_number = self._get_issue_number()
        if issue_number is None:
            _log.error('No issue number for current branch.')
            return 1
        _log.debug('Issue number for current branch: %s', issue_number)

        builds = self.git_cl().latest_try_jobs(self._try_bots())

        builders_with_pending_builds = self.builders_with_pending_builds(builds)
        if builders_with_pending_builds:
            _log.info('There are existing pending builds for:')
            for builder in sorted(builders_with_pending_builds):
                _log.info('  %s', builder)
        builders_with_no_results = self.builders_with_no_results(builds)

        if options.trigger_jobs and builders_with_no_results:
            self.trigger_builds(builders_with_no_results)
            _log.info('Please re-run webkit-patch rebaseline-cl once all pending try jobs have finished.')
            return 1

        if builders_with_no_results and not options.fill_missing:
            _log.error('The following builders have no results:')
            for builder in builders_with_no_results:
                _log.error('  %s', builder)
            return 1

        _log.debug('Getting results for issue %d.', issue_number)
        builds_to_results = self._fetch_results(builds)
        if not options.fill_missing and len(builds_to_results) < len(builds):
            return 1

        test_baseline_set = TestBaselineSet(tool)
        if args:
            for test in args:
                for build in builds:
                    if not builds_to_results.get(build):
                        continue
                    test_baseline_set.add(test, build)
        else:
            test_baseline_set = self._make_test_baseline_set(
                builds_to_results,
                only_changed_tests=options.only_changed_tests)

        if options.fill_missing:
            self.fill_in_missing_results(test_baseline_set)

        _log.debug('Rebaselining: %s', test_baseline_set)

        if not options.dry_run:
            self.rebaseline(options, test_baseline_set)
        return 0