Esempio n. 1
0
    def run(self, args):
        """Runs the tests and return a RunDetails object with the results."""
        start_time = time.time()
        self._printer.write_update('Collecting tests ...')
        running_all_tests = False

        if not args or any('external' in path for path in args):
            self._printer.write_update('Generating MANIFEST.json for web-platform-tests ...')
            WPTManifest.ensure_manifest(self._port.host)
            self._printer.write_update('Completed generating manifest.')

        self._printer.write_update('Collecting tests ...')
        try:
            paths, all_test_names, running_all_tests = self._collect_tests(args)
        except IOError:
            # This is raised if --test-list doesn't exist
            return test_run_results.RunDetails(exit_code=exit_codes.NO_TESTS_EXIT_STATUS)

        test_names, tests_in_other_chunks = self._finder.split_into_chunks(all_test_names)

        if self._options.order == 'natural':
            test_names.sort(key=self._port.test_key)
        elif self._options.order == 'random':
            test_names.sort()
            random.Random(self._options.seed).shuffle(test_names)

        self._printer.write_update('Parsing expectations ...')
        self._expectations = test_expectations.TestExpectations(self._port, test_names)

        tests_to_run, tests_to_skip = self._prepare_lists(paths, test_names)

        self._expectations.remove_tests_from_expectations(tests_in_other_chunks)

        self._printer.print_found(
            len(all_test_names), len(test_names), len(tests_to_run),
            self._options.repeat_each, self._options.iterations)

        # Check to make sure we're not skipping every test.
        if not tests_to_run:
            msg = 'No tests to run.'
            if self._options.zero_tests_executed_ok:
                _log.info(msg)
                # Keep executing to produce valid (but empty) results.
            else:
                _log.critical(msg)
                code = exit_codes.NO_TESTS_EXIT_STATUS
                return test_run_results.RunDetails(exit_code=code)

        exit_code = self._set_up_run(tests_to_run)
        if exit_code:
            return test_run_results.RunDetails(exit_code=exit_code)

        if self._options.num_retries is None:
            # Don't retry failures if an explicit list of tests was passed in.
            should_retry_failures = len(paths) < len(test_names)
            # Retry failures 3 times by default.
            if should_retry_failures:
                self._options.num_retries = 3
        else:
            should_retry_failures = self._options.num_retries > 0

        try:
            self._start_servers(tests_to_run)
            if self._options.watch:
                run_results = self._run_test_loop(tests_to_run, tests_to_skip)
            else:
                run_results = self._run_test_once(tests_to_run, tests_to_skip, should_retry_failures)
            initial_results, all_retry_results, enabled_pixel_tests_in_retry = run_results
        finally:
            self._stop_servers()
            self._clean_up_run()

        # Some crash logs can take a long time to be written out so look
        # for new logs after the test run finishes.
        self._printer.write_update('Looking for new crash logs ...')
        self._look_for_new_crash_logs(initial_results, start_time)
        for retry_attempt_results in all_retry_results:
            self._look_for_new_crash_logs(retry_attempt_results, start_time)

        self._printer.write_update('Summarizing results ...')
        summarized_full_results = test_run_results.summarize_results(
            self._port, self._expectations, initial_results, all_retry_results,
            enabled_pixel_tests_in_retry)
        summarized_failing_results = test_run_results.summarize_results(
            self._port, self._expectations, initial_results, all_retry_results,
            enabled_pixel_tests_in_retry, only_include_failing=True)

        exit_code = summarized_failing_results['num_regressions']
        if exit_code > exit_codes.MAX_FAILURES_EXIT_STATUS:
            _log.warning('num regressions (%d) exceeds max exit status (%d)',
                         exit_code, exit_codes.MAX_FAILURES_EXIT_STATUS)
            exit_code = exit_codes.MAX_FAILURES_EXIT_STATUS

        if not self._options.dry_run:
            self._write_json_files(summarized_full_results, summarized_failing_results, initial_results, running_all_tests)

            self._upload_json_files()

            self._copy_results_html_file(self._results_directory, 'results.html')
            self._copy_results_html_file(self._results_directory, 'legacy-results.html')
            if initial_results.keyboard_interrupted:
                exit_code = exit_codes.INTERRUPTED_EXIT_STATUS
            else:
                if initial_results.interrupted:
                    exit_code = exit_codes.EARLY_EXIT_STATUS
                if self._options.show_results and (exit_code or initial_results.total_failures):
                    self._port.show_results_html_file(
                        self._filesystem.join(self._results_directory, 'results.html'))
                self._printer.print_results(time.time() - start_time, initial_results)

        return test_run_results.RunDetails(
            exit_code, summarized_full_results, summarized_failing_results,
            initial_results, all_retry_results, enabled_pixel_tests_in_retry)
Esempio n. 2
0
    def run(self, args):
        """Runs the tests and return a RunDetails object with the results."""
        start_time = time.time()
        self._printer.write_update('Collecting tests ...')
        running_all_tests = False

        try:
            paths, all_test_names, running_all_tests = self._collect_tests(
                args)
        except IOError:
            # This is raised if --test-list doesn't exist
            return test_run_results.RunDetails(
                exit_code=exit_codes.NO_TESTS_EXIT_STATUS)

        test_names = self._finder.split_into_chunks(all_test_names)
        if self._options.order == 'natural':
            test_names.sort(key=self._port.test_key)
        elif self._options.order == 'random':
            test_names.sort()
            random.Random(self._options.seed).shuffle(test_names)
        elif self._options.order == 'none':
            # Restore the test order to user specified order.
            # base.tests() may change the order as it returns tests in the
            # real, external/wpt, virtual order.
            if paths:
                test_names = self._restore_order(paths, test_names)

        if not self._options.no_expectations:
            self._printer.write_update('Parsing expectations ...')
            self._expectations = test_expectations.TestExpectations(self._port)

        tests_to_run, tests_to_skip = self._prepare_lists(paths, test_names)

        self._printer.print_found(len(all_test_names), len(test_names),
                                  len(tests_to_run), self._options.repeat_each,
                                  self._options.iterations)

        # Check to make sure we're not skipping every test.
        if not tests_to_run:
            msg = 'No tests to run.'
            if self._options.zero_tests_executed_ok:
                _log.info(msg)
                # Keep executing to produce valid (but empty) results.
            else:
                _log.critical(msg)
                code = exit_codes.NO_TESTS_EXIT_STATUS
                return test_run_results.RunDetails(exit_code=code)

        exit_code = self._set_up_run(tests_to_run)
        if exit_code:
            return test_run_results.RunDetails(exit_code=exit_code)

        if self._options.num_retries is None:
            # If --test-list is passed, or if no test narrowing is specified,
            # default to 3 retries. Otherwise [e.g. if tests are being passed by
            # name], default to 0 retries.
            if self._options.test_list or len(paths) < len(test_names):
                self._options.num_retries = 3
            else:
                self._options.num_retries = 0

        should_retry_failures = self._options.num_retries > 0

        try:
            self._start_servers(tests_to_run)
            if self._options.watch:
                run_results = self._run_test_loop(tests_to_run, tests_to_skip)
            else:
                run_results = self._run_test_once(tests_to_run, tests_to_skip,
                                                  should_retry_failures)
            initial_results, all_retry_results = run_results
        finally:
            self._stop_servers()
            self._clean_up_run()

        if self._options.no_expectations:
            return test_run_results.RunDetails(0, [], [], initial_results,
                                               all_retry_results)

        # Some crash logs can take a long time to be written out so look
        # for new logs after the test run finishes.
        self._printer.write_update('Looking for new crash logs ...')
        self._look_for_new_crash_logs(initial_results, start_time)
        for retry_attempt_results in all_retry_results:
            self._look_for_new_crash_logs(retry_attempt_results, start_time)

        self._printer.write_update('Summarizing results ...')
        summarized_full_results = test_run_results.summarize_results(
            self._port, self._expectations, initial_results, all_retry_results)
        summarized_failing_results = test_run_results.summarize_results(
            self._port,
            self._expectations,
            initial_results,
            all_retry_results,
            only_include_failing=True)

        exit_code = summarized_failing_results['num_regressions']
        if exit_code > exit_codes.MAX_FAILURES_EXIT_STATUS:
            _log.warning('num regressions (%d) exceeds max exit status (%d)',
                         exit_code, exit_codes.MAX_FAILURES_EXIT_STATUS)
            exit_code = exit_codes.MAX_FAILURES_EXIT_STATUS

        if not self._options.dry_run:
            self._write_json_files(summarized_full_results,
                                   summarized_failing_results, initial_results,
                                   running_all_tests)

            self._upload_json_files()

            self._copy_results_html_file(self._artifacts_directory,
                                         'results.html')
            if initial_results.keyboard_interrupted:
                exit_code = exit_codes.INTERRUPTED_EXIT_STATUS
            else:
                if initial_results.interrupted:
                    exit_code = exit_codes.EARLY_EXIT_STATUS
                if (self._options.show_results
                        and (exit_code or initial_results.total_failures)):
                    self._port.show_results_html_file(
                        self._filesystem.join(self._artifacts_directory,
                                              'results.html'))
                self._printer.print_results(time.time() - start_time,
                                            initial_results)

        return test_run_results.RunDetails(exit_code, summarized_full_results,
                                           summarized_failing_results,
                                           initial_results, all_retry_results)