Example #1
0
    def _end_test_run(self, start_time, end_time, initial_results, retry_results, enabled_pixel_tests_in_retry):
        # Some crash logs can take a long time to be written out so look
        # for new logs after the test run finishes.

        _log.debug("looking for new crash logs")
        self._look_for_new_crash_logs(initial_results, start_time)
        if retry_results:
            self._look_for_new_crash_logs(retry_results, start_time)

        _log.debug("summarizing results")
        summarized_results = test_run_results.summarize_results(self._port, self._expectations, initial_results, retry_results, enabled_pixel_tests_in_retry)
        results_including_passes = None
        if self._options.results_server_host:
            results_including_passes = test_run_results.summarize_results(self._port, self._expectations, initial_results, retry_results, enabled_pixel_tests_in_retry, include_passes=True, include_time_and_modifiers=True)
        self._printer.print_results(end_time - start_time, initial_results, summarized_results)

        exit_code = -1
        if not self._options.dry_run:
            self._port.print_leaks_summary()
            self._upload_json_files(summarized_results, initial_results, results_including_passes, start_time, end_time)

            results_path = self._filesystem.join(self._results_directory, "results.html")
            self._copy_results_html_file(results_path)
            if initial_results.keyboard_interrupted:
                exit_code = INTERRUPTED_EXIT_STATUS
            else:
                if self._options.show_results and (initial_results.unexpected_results_by_name or
                    (self._options.full_results_html and initial_results.total_failures)):
                    self._port.show_results_html_file(results_path)
                exit_code = self._port.exit_code_from_summarized_results(summarized_results)
        return test_run_results.RunDetails(exit_code, summarized_results, initial_results, retry_results, enabled_pixel_tests_in_retry)
Example #2
0
    def _end_test_run(self, start_time, end_time, initial_results, retry_results, enabled_pixel_tests_in_retry):
        # Some crash logs can take a long time to be written out so look
        # for new logs after the test run finishes.

        _log.debug("looking for new crash logs")
        self._look_for_new_crash_logs(initial_results, start_time)
        if retry_results:
            self._look_for_new_crash_logs(retry_results, start_time)

        _log.debug("summarizing results")
        summarized_results = test_run_results.summarize_results(self._port, self._expectations, initial_results, retry_results, enabled_pixel_tests_in_retry)
        results_including_passes = None
        if self._options.results_server_host:
            results_including_passes = test_run_results.summarize_results(self._port, self._expectations, initial_results, retry_results, enabled_pixel_tests_in_retry, include_passes=True, include_time_and_modifiers=True)
        self._printer.print_results(end_time - start_time, initial_results, summarized_results)

        exit_code = -1
        if not self._options.dry_run:
            self._port.print_leaks_summary()
            self._upload_json_files(summarized_results, initial_results, results_including_passes, start_time, end_time)

            results_path = self._filesystem.join(self._results_directory, "results.html")
            self._copy_results_html_file(results_path)
            if initial_results.keyboard_interrupted:
                exit_code = INTERRUPTED_EXIT_STATUS
            else:
                if self._options.show_results and (initial_results.unexpected_results_by_name or
                    (self._options.full_results_html and initial_results.total_failures)):
                    self._port.show_results_html_file(results_path)
                exit_code = self._port.exit_code_from_summarized_results(summarized_results)
        return test_run_results.RunDetails(exit_code, summarized_results, initial_results, retry_results, enabled_pixel_tests_in_retry)
Example #3
0
 def test_timeout_then_unexpected_pass(self):
     tests = ['failures/expected/image.html']
     expectations = test_expectations.TestExpectations(self.port, tests)
     initial_results = test_run_results.TestRunResults(
         expectations, len(tests))
     initial_results.add(
         get_result(
             'failures/expected/image.html',
             test_expectations.TIMEOUT,
             run_time=1), False, False)
     retry_results = test_run_results.TestRunResults(
         expectations, len(tests))
     retry_results.add(
         get_result(
             'failures/expected/image.html',
             test_expectations.PASS,
             run_time=0.1), False, False)
     summary = test_run_results.summarize_results(
         self.port,
         expectations,
         initial_results,
         retry_results,
         enabled_pixel_tests_in_retry=True,
         only_include_failing=True)
     self.assertEquals(summary['num_regressions'], 0)
     self.assertEquals(summary['num_passes'], 1)
def summarized_results(port, expected, passing, flaky, include_passes=False):
    test_is_slow = False

    initial_results = run_results(port)
    if expected:
        initial_results.add(get_result('passes/text.html', test_expectations.PASS), expected, test_is_slow)
        initial_results.add(get_result('failures/expected/audio.html', test_expectations.AUDIO), expected, test_is_slow)
        initial_results.add(get_result('failures/expected/timeout.html', test_expectations.TIMEOUT), expected, test_is_slow)
        initial_results.add(get_result('failures/expected/crash.html', test_expectations.CRASH), expected, test_is_slow)
    elif passing:
        initial_results.add(get_result('passes/text.html'), expected, test_is_slow)
        initial_results.add(get_result('failures/expected/audio.html'), expected, test_is_slow)
        initial_results.add(get_result('failures/expected/timeout.html'), expected, test_is_slow)
        initial_results.add(get_result('failures/expected/crash.html'), expected, test_is_slow)
    else:
        initial_results.add(get_result('passes/text.html', test_expectations.TIMEOUT), expected, test_is_slow)
        initial_results.add(get_result('failures/expected/audio.html', test_expectations.AUDIO), expected, test_is_slow)
        initial_results.add(get_result('failures/expected/timeout.html', test_expectations.CRASH), expected, test_is_slow)
        initial_results.add(get_result('failures/expected/crash.html', test_expectations.TIMEOUT), expected, test_is_slow)

        # we only list hang.html here, since normally this is WontFix
        initial_results.add(get_result('failures/expected/hang.html', test_expectations.TIMEOUT), expected, test_is_slow)

    if flaky:
        retry_results = run_results(port)
        retry_results.add(get_result('passes/text.html'), True, test_is_slow)
        retry_results.add(get_result('failures/expected/timeout.html'), True, test_is_slow)
        retry_results.add(get_result('failures/expected/crash.html'), True, test_is_slow)
    else:
        retry_results = None

    return test_run_results.summarize_results(port, initial_results.expectations, initial_results, retry_results,
        enabled_pixel_tests_in_retry=False, include_passes=include_passes)
 def test_summarized_results_flaky_pass_after_first_retry(self):
     test_name = 'passes/text.html'
     expectations = test_expectations.TestExpectations(
         self.port, [test_name])
     initial_results = test_run_results.TestRunResults(expectations, 1)
     initial_results.add(get_result(test_name, test_expectations.CRASH),
                         False, False)
     all_retry_results = [
         test_run_results.TestRunResults(expectations, 1),
         test_run_results.TestRunResults(expectations, 1),
         test_run_results.TestRunResults(expectations, 1)
     ]
     all_retry_results[0].add(
         get_result(test_name, test_expectations.TIMEOUT), False, False)
     all_retry_results[1].add(get_result(test_name, test_expectations.PASS),
                              True, False)
     all_retry_results[2].add(get_result(test_name, test_expectations.PASS),
                              True, False)
     summary = test_run_results.summarize_results(
         self.port,
         expectations,
         initial_results,
         all_retry_results,
         enabled_pixel_tests_in_retry=True)
     self.assertTrue(
         'is_unexpected' not in summary['tests']['passes']['text.html'])
     self.assertEquals(summary['tests']['passes']['text.html']['expected'],
                       'PASS')
     self.assertEquals(summary['tests']['passes']['text.html']['actual'],
                       'CRASH TIMEOUT PASS PASS')
     self.assertEquals(summary['num_flaky'], 1)
     self.assertEquals(summary['num_passes'], 0)
     self.assertEquals(summary['num_regressions'], 0)
def summarized_results(port, expected, passing, flaky, only_include_failing=False, extra_skipped_tests=[]):
    test_is_slow = False

    initial_results = run_results(port, extra_skipped_tests)
    if expected:
        initial_results.add(get_result("passes/text.html", test_expectations.PASS), expected, test_is_slow)
        initial_results.add(get_result("failures/expected/audio.html", test_expectations.AUDIO), expected, test_is_slow)
        initial_results.add(
            get_result("failures/expected/timeout.html", test_expectations.TIMEOUT), expected, test_is_slow
        )
        initial_results.add(get_result("failures/expected/crash.html", test_expectations.CRASH), expected, test_is_slow)
    elif passing:
        skipped_result = get_result("passes/skipped/skip.html")
        skipped_result.type = test_expectations.SKIP
        initial_results.add(skipped_result, expected, test_is_slow)

        initial_results.add(get_result("passes/text.html", run_time=1), expected, test_is_slow)
        initial_results.add(get_result("failures/expected/audio.html"), expected, test_is_slow)
        initial_results.add(get_result("failures/expected/timeout.html"), expected, test_is_slow)
        initial_results.add(get_result("failures/expected/crash.html"), expected, test_is_slow)
    else:
        initial_results.add(
            get_result("passes/text.html", test_expectations.TIMEOUT, run_time=1), expected, test_is_slow
        )
        initial_results.add(
            get_result("failures/expected/audio.html", test_expectations.AUDIO, run_time=0.049), expected, test_is_slow
        )
        initial_results.add(
            get_result("failures/expected/timeout.html", test_expectations.CRASH, run_time=0.05), expected, test_is_slow
        )
        initial_results.add(
            get_result("failures/expected/crash.html", test_expectations.TIMEOUT), expected, test_is_slow
        )

        # we only list hang.html here, since normally this is WontFix
        initial_results.add(
            get_result("failures/expected/hang.html", test_expectations.TIMEOUT), expected, test_is_slow
        )

    if flaky:
        retry_results = run_results(port, extra_skipped_tests)
        retry_results.add(get_result("passes/text.html"), True, test_is_slow)
        retry_results.add(get_result("failures/expected/timeout.html"), True, test_is_slow)
        retry_results.add(get_result("failures/expected/crash.html"), True, test_is_slow)
    else:
        retry_results = None

    return test_run_results.summarize_results(
        port,
        initial_results.expectations,
        initial_results,
        retry_results,
        enabled_pixel_tests_in_retry=False,
        only_include_failing=only_include_failing,
    )
 def test_timeout_then_unexpected_pass(self):
     test_name = 'failures/expected/text.html'
     expectations = test_expectations.TestExpectations(self.port, [test_name])
     initial_results = test_run_results.TestRunResults(expectations, 1)
     initial_results.add(get_result(test_name, test_expectations.TIMEOUT, run_time=1), False, False)
     all_retry_results = [test_run_results.TestRunResults(expectations, 1),
                          test_run_results.TestRunResults(expectations, 1),
                          test_run_results.TestRunResults(expectations, 1)]
     all_retry_results[0].add(get_result(test_name, test_expectations.LEAK, run_time=0.1), False, False)
     all_retry_results[1].add(get_result(test_name, test_expectations.PASS, run_time=0.1), False, False)
     all_retry_results[2].add(get_result(test_name, test_expectations.PASS, run_time=0.1), False, False)
     summary = test_run_results.summarize_results(
         self.port, expectations, initial_results, all_retry_results,
         enabled_pixel_tests_in_retry=True)
     self.assertTrue('is_unexpected' in summary['tests']['failures']['expected']['text.html'])
     self.assertEquals(summary['tests']['failures']['expected']['text.html']['expected'], 'FAIL')
     self.assertEquals(summary['tests']['failures']['expected']['text.html']['actual'], 'TIMEOUT LEAK PASS PASS')
     self.assertEquals(summary['num_passes'], 1)
     self.assertEquals(summary['num_regressions'], 0)
     self.assertEquals(summary['num_flaky'], 0)
 def test_summarized_results_flaky_pass_after_first_retry(self):
     test_name = 'passes/text.html'
     expectations = test_expectations.TestExpectations(self.port, [test_name])
     initial_results = test_run_results.TestRunResults(expectations, 1)
     initial_results.add(get_result(test_name, test_expectations.CRASH), False, False)
     all_retry_results = [test_run_results.TestRunResults(expectations, 1),
                          test_run_results.TestRunResults(expectations, 1),
                          test_run_results.TestRunResults(expectations, 1)]
     all_retry_results[0].add(get_result(test_name, test_expectations.TIMEOUT), False, False)
     all_retry_results[1].add(get_result(test_name, test_expectations.PASS), True, False)
     all_retry_results[2].add(get_result(test_name, test_expectations.PASS), True, False)
     summary = test_run_results.summarize_results(
         self.port, expectations, initial_results, all_retry_results,
         enabled_pixel_tests_in_retry=True)
     self.assertTrue('is_unexpected' not in summary['tests']['passes']['text.html'])
     self.assertEquals(summary['tests']['passes']['text.html']['expected'], 'PASS')
     self.assertEquals(summary['tests']['passes']['text.html']['actual'], 'CRASH TIMEOUT PASS PASS')
     self.assertEquals(summary['num_flaky'], 1)
     self.assertEquals(summary['num_passes'], 0)
     self.assertEquals(summary['num_regressions'], 0)
Example #9
0
def summarized_results(port, expected, passing, flaky, only_include_failing=False, extra_skipped_tests=[]):
    test_is_slow = False

    initial_results = run_results(port, extra_skipped_tests)
    if expected:
        initial_results.add(get_result('passes/text.html', test_expectations.PASS), expected, test_is_slow)
        initial_results.add(get_result('failures/expected/audio.html', test_expectations.AUDIO), expected, test_is_slow)
        initial_results.add(get_result('failures/expected/timeout.html', test_expectations.TIMEOUT), expected, test_is_slow)
        initial_results.add(get_result('failures/expected/crash.html', test_expectations.CRASH), expected, test_is_slow)
    elif passing:
        skipped_result = get_result('passes/skipped/skip.html')
        skipped_result.type = test_expectations.SKIP
        initial_results.add(skipped_result, expected, test_is_slow)

        initial_results.add(get_result('passes/text.html', run_time=1), expected, test_is_slow)
        initial_results.add(get_result('failures/expected/audio.html'), expected, test_is_slow)
        initial_results.add(get_result('failures/expected/timeout.html'), expected, test_is_slow)
        initial_results.add(get_result('failures/expected/crash.html'), expected, test_is_slow)
    else:
        initial_results.add(get_result('passes/text.html', test_expectations.TIMEOUT, run_time=1), expected, test_is_slow)
        initial_results.add(get_result('failures/expected/audio.html', test_expectations.AUDIO, run_time=0.049), expected, test_is_slow)
        initial_results.add(get_result('failures/expected/timeout.html', test_expectations.CRASH, run_time=0.05), expected, test_is_slow)
        initial_results.add(get_result('failures/expected/crash.html', test_expectations.TIMEOUT), expected, test_is_slow)

        # we only list hang.html here, since normally this is WontFix
        initial_results.add(get_result('failures/expected/hang.html', test_expectations.SKIP), expected, test_is_slow)

    if flaky:
        retry_results = run_results(port, extra_skipped_tests)
        retry_results.add(get_result('passes/text.html'), True, test_is_slow)
        retry_results.add(get_result('failures/expected/timeout.html'), True, test_is_slow)
        retry_results.add(get_result('failures/expected/crash.html'), True, test_is_slow)
    else:
        retry_results = None

    return test_run_results.summarize_results(port, initial_results.expectations, initial_results, retry_results, enabled_pixel_tests_in_retry=False, only_include_failing=only_include_failing)
 def test_timeout_then_unexpected_pass(self):
     tests = ['failures/expected/image.html']
     expectations = test_expectations.TestExpectations(self.port, tests)
     initial_results = test_run_results.TestRunResults(
         expectations, len(tests))
     initial_results.add(
         get_result('failures/expected/image.html',
                    test_expectations.TIMEOUT,
                    run_time=1), False, False)
     retry_results = test_run_results.TestRunResults(
         expectations, len(tests))
     retry_results.add(
         get_result('failures/expected/image.html',
                    test_expectations.PASS,
                    run_time=0.1), False, False)
     summary = test_run_results.summarize_results(
         self.port,
         expectations,
         initial_results,
         retry_results,
         enabled_pixel_tests_in_retry=True,
         only_include_failing=True)
     self.assertEquals(summary['num_regressions'], 0)
     self.assertEquals(summary['num_passes'], 1)
Example #11
0
    def run(self, args):
        """Run the tests and return a RunDetails object with the results."""
        start_time = time.time()
        self._printer.write_update("Collecting tests ...")
        running_all_tests = False
        try:
            paths, test_names, running_all_tests = self._collect_tests(args)
        except IOError:
            # This is raised if --test-list doesn't exist
            return test_run_results.RunDetails(
                exit_code=test_run_results.NO_TESTS_EXIT_STATUS)

        self._printer.write_update("Parsing expectations ...")
        self._expectations = test_expectations.TestExpectations(
            self._port, test_names)

        tests_to_run, tests_to_skip = self._prepare_lists(paths, test_names)
        self._printer.print_found(len(test_names), len(tests_to_run),
                                  self._options.repeat_each,
                                  self._options.iterations)

        # Check to make sure we're not skipping every test.
        if not tests_to_run:
            _log.critical('No tests to run.')
            return test_run_results.RunDetails(
                exit_code=test_run_results.NO_TESTS_EXIT_STATUS)

        exit_code = self._set_up_run(tests_to_run)
        if exit_code:
            return test_run_results.RunDetails(exit_code=exit_code)

        # Don't retry failures if an explicit list of tests was passed in.
        if self._options.retry_failures is None:
            should_retry_failures = len(paths) < len(test_names)
        else:
            should_retry_failures = self._options.retry_failures

        enabled_pixel_tests_in_retry = False
        try:
            self._start_servers(tests_to_run)

            num_workers = self._port.num_workers(
                int(self._options.child_processes))

            initial_results = self._run_tests(tests_to_run, tests_to_skip,
                                              self._options.repeat_each,
                                              self._options.iterations,
                                              num_workers)

            # Don't retry failures when interrupted by user or failures limit exception.
            should_retry_failures = should_retry_failures and not (
                initial_results.interrupted
                or initial_results.keyboard_interrupted)

            tests_to_retry = self._tests_to_retry(initial_results)
            all_retry_results = []
            if should_retry_failures and tests_to_retry:
                enabled_pixel_tests_in_retry = self._force_pixel_tests_if_needed(
                )

                for retry_attempt in xrange(1, self._options.num_retries + 1):
                    if not tests_to_retry:
                        break

                    _log.info('')
                    _log.info(
                        'Retrying %s, attempt %d of %d...',
                        grammar.pluralize('unexpected failure',
                                          len(tests_to_retry)), retry_attempt,
                        self._options.num_retries)

                    retry_results = self._run_tests(
                        tests_to_retry,
                        tests_to_skip=set(),
                        repeat_each=1,
                        iterations=1,
                        num_workers=num_workers,
                        retry_attempt=retry_attempt)
                    all_retry_results.append(retry_results)

                    tests_to_retry = self._tests_to_retry(retry_results)

                if enabled_pixel_tests_in_retry:
                    self._options.pixel_tests = False
        finally:
            self._stop_servers()
            self._clean_up_run()

        # Some crash logs can take a long time to be written out so look
        # for new logs after the test run finishes.
        self._printer.write_update("looking for new crash logs")
        self._look_for_new_crash_logs(initial_results, start_time)
        for retry_attempt_results in all_retry_results:
            self._look_for_new_crash_logs(retry_attempt_results, start_time)

        _log.debug("summarizing results")
        summarized_full_results = test_run_results.summarize_results(
            self._port, self._expectations, initial_results, all_retry_results,
            enabled_pixel_tests_in_retry)
        summarized_failing_results = test_run_results.summarize_results(
            self._port,
            self._expectations,
            initial_results,
            all_retry_results,
            enabled_pixel_tests_in_retry,
            only_include_failing=True)

        exit_code = summarized_failing_results['num_regressions']
        if exit_code > test_run_results.MAX_FAILURES_EXIT_STATUS:
            _log.warning('num regressions (%d) exceeds max exit status (%d)',
                         exit_code, test_run_results.MAX_FAILURES_EXIT_STATUS)
            exit_code = test_run_results.MAX_FAILURES_EXIT_STATUS

        if not self._options.dry_run:
            self._write_json_files(summarized_full_results,
                                   summarized_failing_results, initial_results,
                                   running_all_tests)

            if self._options.write_full_results_to:
                self._filesystem.copyfile(
                    self._filesystem.join(self._results_directory,
                                          "full_results.json"),
                    self._options.write_full_results_to)

            self._upload_json_files()

            results_path = self._filesystem.join(self._results_directory,
                                                 "results.html")
            self._copy_results_html_file(results_path)
            if initial_results.keyboard_interrupted:
                exit_code = test_run_results.INTERRUPTED_EXIT_STATUS
            else:
                if initial_results.interrupted:
                    exit_code = test_run_results.EARLY_EXIT_STATUS
                if self._options.show_results and (
                        exit_code or (self._options.full_results_html
                                      and initial_results.total_failures)):
                    self._port.show_results_html_file(results_path)
                self._printer.print_results(time.time() - start_time,
                                            initial_results,
                                            summarized_failing_results)

        return test_run_results.RunDetails(exit_code, summarized_full_results,
                                           summarized_failing_results,
                                           initial_results, all_retry_results,
                                           enabled_pixel_tests_in_retry)
Example #12
0
    def run(self, args):
        """Run the tests and return a RunDetails object with the results."""
        start_time = time.time()
        self._printer.write_update("Collecting tests ...")
        running_all_tests = False
        try:
            paths, test_names, running_all_tests = self._collect_tests(args)
        except IOError:
            # This is raised if --test-list doesn't exist
            return test_run_results.RunDetails(exit_code=test_run_results.NO_TESTS_EXIT_STATUS)

        self._printer.write_update("Parsing expectations ...")
        self._expectations = test_expectations.TestExpectations(self._port, test_names)

        tests_to_run, tests_to_skip = self._prepare_lists(paths, test_names)
        self._printer.print_found(len(test_names), len(tests_to_run), self._options.repeat_each, self._options.iterations)

        # Check to make sure we're not skipping every test.
        if not tests_to_run:
            _log.critical('No tests to run.')
            return test_run_results.RunDetails(exit_code=test_run_results.NO_TESTS_EXIT_STATUS)

        exit_code = self._set_up_run(tests_to_run)
        if exit_code:
            return test_run_results.RunDetails(exit_code=exit_code)

        # Don't retry failures if an explicit list of tests was passed in.
        if self._options.retry_failures is None:
            should_retry_failures = len(paths) < len(test_names)
        else:
            should_retry_failures = self._options.retry_failures

        enabled_pixel_tests_in_retry = False
        try:
            self._start_servers(tests_to_run)

            num_workers = self._port.num_workers(int(self._options.child_processes))

            initial_results = self._run_tests(
                tests_to_run, tests_to_skip, self._options.repeat_each, self._options.iterations,
                num_workers)

            # Don't retry failures when interrupted by user or failures limit exception.
            should_retry_failures = should_retry_failures and not (
                initial_results.interrupted or initial_results.keyboard_interrupted)

            tests_to_retry = self._tests_to_retry(initial_results)
            all_retry_results = []
            if should_retry_failures and tests_to_retry:
                enabled_pixel_tests_in_retry = self._force_pixel_tests_if_needed()

                for retry_attempt in xrange(1, self._options.num_retries + 1):
                    if not tests_to_retry:
                        break

                    _log.info('')
                    _log.info('Retrying %s, attempt %d of %d...',
                              grammar.pluralize('unexpected failure', len(tests_to_retry)),
                              retry_attempt, self._options.num_retries)

                    retry_results = self._run_tests(tests_to_retry,
                                                    tests_to_skip=set(),
                                                    repeat_each=1,
                                                    iterations=1,
                                                    num_workers=num_workers,
                                                    retry_attempt=retry_attempt)
                    all_retry_results.append(retry_results)

                    tests_to_retry = self._tests_to_retry(retry_results)

                if enabled_pixel_tests_in_retry:
                    self._options.pixel_tests = False
        finally:
            self._stop_servers()
            self._clean_up_run()

        # Some crash logs can take a long time to be written out so look
        # for new logs after the test run finishes.
        self._printer.write_update("looking for new crash logs")
        self._look_for_new_crash_logs(initial_results, start_time)
        for retry_attempt_results in all_retry_results:
            self._look_for_new_crash_logs(retry_attempt_results, start_time)

        _log.debug("summarizing results")
        summarized_full_results = test_run_results.summarize_results(
            self._port, self._expectations, initial_results, all_retry_results,
            enabled_pixel_tests_in_retry)
        summarized_failing_results = test_run_results.summarize_results(
            self._port, self._expectations, initial_results, all_retry_results,
            enabled_pixel_tests_in_retry, only_include_failing=True)

        exit_code = summarized_failing_results['num_regressions']
        if exit_code > test_run_results.MAX_FAILURES_EXIT_STATUS:
            _log.warning('num regressions (%d) exceeds max exit status (%d)',
                         exit_code, test_run_results.MAX_FAILURES_EXIT_STATUS)
            exit_code = test_run_results.MAX_FAILURES_EXIT_STATUS

        if not self._options.dry_run:
            self._write_json_files(summarized_full_results, summarized_failing_results, initial_results, running_all_tests)

            if self._options.write_full_results_to:
                self._filesystem.copyfile(self._filesystem.join(self._results_directory, "full_results.json"),
                                          self._options.write_full_results_to)

            self._upload_json_files()

            results_path = self._filesystem.join(self._results_directory, "results.html")
            self._copy_results_html_file(results_path)
            if initial_results.keyboard_interrupted:
                exit_code = test_run_results.INTERRUPTED_EXIT_STATUS
            else:
                if initial_results.interrupted:
                    exit_code = test_run_results.EARLY_EXIT_STATUS
                if self._options.show_results and (
                        exit_code or (self._options.full_results_html and initial_results.total_failures)):
                    self._port.show_results_html_file(results_path)
                self._printer.print_results(time.time() - start_time, initial_results, summarized_failing_results)

        return test_run_results.RunDetails(
            exit_code, summarized_full_results, summarized_failing_results,
            initial_results, all_retry_results, enabled_pixel_tests_in_retry)
Example #13
0
    def run(self, args):
        """Runs the tests and return a RunDetails object with the results."""
        start_time = time.time()
        self._printer.write_update('Collecting tests ...')
        running_all_tests = False

        if not args or any('external' in path for path in args):
            self._printer.write_update(
                'Generating MANIFEST.json for web-platform-tests ...')
            WPTManifest.ensure_manifest(self._port.host)
            self._printer.write_update('Completed generating manifest.')

        self._printer.write_update('Collecting tests ...')
        try:
            paths, all_test_names, running_all_tests = self._collect_tests(
                args)
        except IOError:
            # This is raised if --test-list doesn't exist
            return test_run_results.RunDetails(
                exit_code=exit_codes.NO_TESTS_EXIT_STATUS)

        # Create a sorted list of test files so the subset chunk,
        # if used, contains alphabetically consecutive tests.
        if self._options.order == 'natural':
            all_test_names.sort(key=self._port.test_key)
        elif self._options.order == 'random':
            all_test_names.sort()
            random.Random(self._options.seed).shuffle(all_test_names)

        test_names, tests_in_other_chunks = self._finder.split_into_chunks(
            all_test_names)

        self._printer.write_update('Parsing expectations ...')
        self._expectations = test_expectations.TestExpectations(
            self._port, test_names)

        tests_to_run, tests_to_skip = self._prepare_lists(paths, test_names)

        self._expectations.remove_tests_from_expectations(
            tests_in_other_chunks)

        self._printer.print_found(len(all_test_names), len(test_names),
                                  len(tests_to_run), self._options.repeat_each,
                                  self._options.iterations)

        # Check to make sure we're not skipping every test.
        if not tests_to_run:
            _log.critical('No tests to run.')
            return test_run_results.RunDetails(
                exit_code=exit_codes.NO_TESTS_EXIT_STATUS)

        exit_code = self._set_up_run(tests_to_run)
        if exit_code:
            return test_run_results.RunDetails(exit_code=exit_code)

        # Don't retry failures if an explicit list of tests was passed in.
        if self._options.retry_failures is None:
            should_retry_failures = len(paths) < len(test_names)
        else:
            should_retry_failures = self._options.retry_failures

        try:
            self._start_servers(tests_to_run)
            if self._options.watch:
                run_results = self._run_test_loop(tests_to_run, tests_to_skip)
            else:
                run_results = self._run_test_once(tests_to_run, tests_to_skip,
                                                  should_retry_failures)
            initial_results, all_retry_results, enabled_pixel_tests_in_retry = run_results
        finally:
            self._stop_servers()
            self._clean_up_run()

        # Some crash logs can take a long time to be written out so look
        # for new logs after the test run finishes.
        self._printer.write_update('Looking for new crash logs ...')
        self._look_for_new_crash_logs(initial_results, start_time)
        for retry_attempt_results in all_retry_results:
            self._look_for_new_crash_logs(retry_attempt_results, start_time)

        self._printer.write_update('Summarizing results ...')
        summarized_full_results = test_run_results.summarize_results(
            self._port, self._expectations, initial_results, all_retry_results,
            enabled_pixel_tests_in_retry)
        summarized_failing_results = test_run_results.summarize_results(
            self._port,
            self._expectations,
            initial_results,
            all_retry_results,
            enabled_pixel_tests_in_retry,
            only_include_failing=True)

        exit_code = summarized_failing_results['num_regressions']
        if exit_code > exit_codes.MAX_FAILURES_EXIT_STATUS:
            _log.warning('num regressions (%d) exceeds max exit status (%d)',
                         exit_code, exit_codes.MAX_FAILURES_EXIT_STATUS)
            exit_code = exit_codes.MAX_FAILURES_EXIT_STATUS

        if not self._options.dry_run:
            self._write_json_files(summarized_full_results,
                                   summarized_failing_results, initial_results,
                                   running_all_tests)

            self._upload_json_files()

            self._copy_results_html_file(self._results_directory,
                                         'results.html')
            self._copy_results_html_file(self._results_directory,
                                         'legacy-results.html')
            if initial_results.keyboard_interrupted:
                exit_code = exit_codes.INTERRUPTED_EXIT_STATUS
            else:
                if initial_results.interrupted:
                    exit_code = exit_codes.EARLY_EXIT_STATUS
                if self._options.show_results and (
                        exit_code or initial_results.total_failures):
                    self._port.show_results_html_file(
                        self._filesystem.join(self._results_directory,
                                              'results.html'))
                self._printer.print_results(time.time() - start_time,
                                            initial_results)

        return test_run_results.RunDetails(exit_code, summarized_full_results,
                                           summarized_failing_results,
                                           initial_results, all_retry_results,
                                           enabled_pixel_tests_in_retry)
Example #14
0
    def run(self, args):
        """Run the tests and return a RunDetails object with the results."""
        self._printer.write_update("Collecting tests ...")
        try:
            paths, test_names = self._collect_tests(args)
        except IOError:
            # This is raised if --test-list doesn't exist
            return test_run_results.RunDetails(exit_code=-1)

        self._printer.write_update("Parsing expectations ...")
        self._expectations = test_expectations.TestExpectations(self._port, test_names, force_expectations_pass=self._options.force)
        self._expectations.parse_all_expectations()

        tests_to_run, tests_to_skip = self._prepare_lists(paths, test_names)
        self._printer.print_found(len(test_names), len(tests_to_run), self._options.repeat_each, self._options.iterations)
        start_time = time.time()

        # Check to make sure we're not skipping every test.
        if not tests_to_run:
            _log.critical('No tests to run.')
            return test_run_results.RunDetails(exit_code=-1)

        try:
            if not self._set_up_run(tests_to_run):
                return test_run_results.RunDetails(exit_code=-1)

            enabled_pixel_tests_in_retry = False
            initial_results = self._run_tests(tests_to_run, tests_to_skip, self._options.repeat_each, self._options.iterations,
                int(self._options.child_processes), retrying=False)

            tests_to_retry = self._tests_to_retry(initial_results, include_crashes=self._port.should_retry_crashes())
            # Don't retry failures when interrupted by user or failures limit exception.
            retry_failures = self._options.retry_failures and not (initial_results.interrupted or initial_results.keyboard_interrupted)
            if retry_failures and tests_to_retry:
                enabled_pixel_tests_in_retry = self._force_pixel_tests_if_needed()

                _log.info('')
                _log.info("Retrying %s ..." % pluralize(len(tests_to_retry), "unexpected failure"))
                _log.info('')
                retry_results = self._run_tests(tests_to_retry, tests_to_skip=set(), repeat_each=1, iterations=1,
                    num_workers=1, retrying=True)

                if enabled_pixel_tests_in_retry:
                    self._options.pixel_tests = False
            else:
                retry_results = None
        finally:
            self._clean_up_run()

        end_time = time.time()

        # Some crash logs can take a long time to be written out so look
        # for new logs after the test run finishes.
        _log.debug("looking for new crash logs")
        self._look_for_new_crash_logs(initial_results, start_time)
        if retry_results:
            self._look_for_new_crash_logs(retry_results, start_time)

        _log.debug("summarizing results")
        summarized_results = test_run_results.summarize_results(self._port, self._expectations, initial_results, retry_results, enabled_pixel_tests_in_retry)
        results_including_passes = None
        if self._options.results_server_host:
            results_including_passes = test_run_results.summarize_results(self._port, self._expectations, initial_results, retry_results, enabled_pixel_tests_in_retry, include_passes=True, include_time_and_modifiers=True)
        self._printer.print_results(end_time - start_time, initial_results, summarized_results)

        exit_code = -1
        if not self._options.dry_run:
            self._port.print_leaks_summary()
            self._upload_json_files(summarized_results, initial_results, results_including_passes, start_time, end_time)

            results_path = self._filesystem.join(self._results_directory, "results.html")
            self._copy_results_html_file(results_path)
            if initial_results.keyboard_interrupted:
                exit_code = INTERRUPTED_EXIT_STATUS
            else:
                if self._options.show_results and (initial_results.unexpected_results_by_name or
                    (self._options.full_results_html and initial_results.total_failures)):
                    self._port.show_results_html_file(results_path)
                exit_code = self._port.exit_code_from_summarized_results(summarized_results)
        return test_run_results.RunDetails(exit_code, summarized_results, initial_results, retry_results, enabled_pixel_tests_in_retry)
Example #15
0
File: manager.py Project: EQ4/h5vcc
    def run(self, args):
        """Run the tests and return a RunDetails object with the results."""
        self._printer.write_update("Collecting tests ...")
        try:
            paths, test_names = self._collect_tests(args)
        except IOError:
            # This is raised if --test-list doesn't exist
            return test_run_results.RunDetails(exit_code=-1)

        self._printer.write_update("Parsing expectations ...")
        self._expectations = test_expectations.TestExpectations(self._port, test_names)

        tests_to_run, tests_to_skip = self._prepare_lists(paths, test_names)
        self._printer.print_found(len(test_names), len(tests_to_run), self._options.repeat_each, self._options.iterations)

        # Check to make sure we're not skipping every test.
        if not tests_to_run:
            _log.critical('No tests to run.')
            return test_run_results.RunDetails(exit_code=-1)

        if not self._set_up_run(tests_to_run):
            return test_run_results.RunDetails(exit_code=-1)

        start_time = time.time()
        try:
            initial_results = self._run_tests(tests_to_run, tests_to_skip, self._options.repeat_each, self._options.iterations,
                int(self._options.child_processes), retrying=False)

            tests_to_retry = self._tests_to_retry(initial_results, include_crashes=self._port.should_retry_crashes())
            if self._options.retry_failures and tests_to_retry and not initial_results.interrupted:
                _log.info('')
                _log.info("Retrying %d unexpected failure(s) ..." % len(tests_to_retry))
                _log.info('')
                retry_results = self._run_tests(tests_to_retry, tests_to_skip=set(), repeat_each=1, iterations=1,
                    num_workers=1, retrying=True)
            else:
                retry_results = None
        finally:
            self._clean_up_run()

        end_time = time.time()

        # Some crash logs can take a long time to be written out so look
        # for new logs after the test run finishes.
        self._look_for_new_crash_logs(initial_results, start_time)
        if retry_results:
            self._look_for_new_crash_logs(retry_results, start_time)

        summarized_results = test_run_results.summarize_results(self._port, self._expectations, initial_results, retry_results)
        self._printer.print_results(end_time - start_time, initial_results, summarized_results)

        if not self._options.dry_run:
            self._port.print_leaks_summary()
            self._upload_json_files(summarized_results, initial_results)

            results_path = self._filesystem.join(self._results_directory, "results.html")
            self._copy_results_html_file(results_path)
            if self._options.show_results and (initial_results.unexpected_results_by_name or
                                               (self._options.full_results_html and initial_results.total_failures)):
                self._port.show_results_html_file(results_path)

        return test_run_results.RunDetails(self._port.exit_code_from_summarized_results(summarized_results),
                                           summarized_results, initial_results, retry_results)
def summarized_results(port, expected, passing, flaky, only_include_failing=False, extra_skipped_tests=[], fail_on_retry=False):
    test_is_slow = False

    all_retry_results = []
    initial_results = run_results(port, extra_skipped_tests)
    if expected:
        initial_results.add(get_result('passes/text.html', test_expectations.PASS), expected, test_is_slow)
        initial_results.add(get_result('failures/expected/audio.html', test_expectations.AUDIO), expected, test_is_slow)
        initial_results.add(get_result('failures/expected/timeout.html', test_expectations.TIMEOUT), expected, test_is_slow)
        initial_results.add(get_result('failures/expected/crash.html', test_expectations.CRASH), expected, test_is_slow)
        initial_results.add(get_result('failures/expected/leak.html', test_expectations.LEAK), expected, test_is_slow)
    elif passing:
        skipped_result = get_result('passes/skipped/skip.html')
        skipped_result.type = test_expectations.SKIP
        initial_results.add(skipped_result, expected, test_is_slow)

        initial_results.add(get_result('passes/text.html', run_time=1), expected, test_is_slow)
        initial_results.add(get_result('failures/expected/audio.html'), expected, test_is_slow)
        initial_results.add(get_result('failures/expected/timeout.html'), expected, test_is_slow)
        initial_results.add(get_result('failures/expected/crash.html'), expected, test_is_slow)
        initial_results.add(get_result('failures/expected/leak.html'), expected, test_is_slow)
    else:
        initial_results.add(get_result('passes/text.html', test_expectations.TIMEOUT, run_time=1), expected, test_is_slow)
        initial_results.add(get_result('failures/expected/audio.html',
                                       test_expectations.CRASH, run_time=0.049), expected, test_is_slow)
        initial_results.add(get_result('failures/expected/timeout.html',
                                       test_expectations.TEXT, run_time=0.05), expected, test_is_slow)
        initial_results.add(get_result('failures/expected/crash.html', test_expectations.TIMEOUT), expected, test_is_slow)
        initial_results.add(get_result('failures/expected/leak.html', test_expectations.TIMEOUT), expected, test_is_slow)

        # we only list keyboard.html here, since normally this is WontFix
        initial_results.add(get_result('failures/expected/keyboard.html', test_expectations.SKIP), expected, test_is_slow)

        initial_results.add(get_result('failures/expected/text.html', test_expectations.IMAGE), expected, test_is_slow)

        all_retry_results = [run_results(port, extra_skipped_tests),
                             run_results(port, extra_skipped_tests),
                             run_results(port, extra_skipped_tests)]

        def add_result_to_all_retries(new_result, expected):
            for run_result in all_retry_results:
                run_result.add(new_result, expected, test_is_slow)

        if flaky:
            add_result_to_all_retries(get_result('passes/text.html', test_expectations.PASS), True)
            add_result_to_all_retries(
                get_result('failures/expected/audio.html', test_expectations.AUDIO), True)
            add_result_to_all_retries(
                get_result('failures/expected/leak.html', test_expectations.LEAK), True)
            add_result_to_all_retries(
                get_result('failures/expected/timeout.html', test_expectations.AUDIO), True)

            all_retry_results[0].add(
                get_result('failures/expected/crash.html', test_expectations.AUDIO),
                False, test_is_slow)
            all_retry_results[1].add(
                get_result('failures/expected/crash.html', test_expectations.CRASH),
                True, test_is_slow)
            all_retry_results[2].add(
                get_result('failures/expected/crash.html', test_expectations.LEAK),
                False, test_is_slow)

            all_retry_results[0].add(
                get_result('failures/expected/text.html', test_expectations.TEXT),
                True, test_is_slow)

        else:
            add_result_to_all_retries(
                get_result('passes/text.html', test_expectations.TIMEOUT), False)
            add_result_to_all_retries(
                get_result('failures/expected/audio.html', test_expectations.LEAK), False)
            add_result_to_all_retries(
                get_result('failures/expected/crash.html', test_expectations.TIMEOUT), False)
            add_result_to_all_retries(
                get_result('failures/expected/leak.html', test_expectations.TIMEOUT), False)

            all_retry_results[0].add(
                get_result('failures/expected/timeout.html', test_expectations.AUDIO),
                False, test_is_slow)
            all_retry_results[1].add(
                get_result('failures/expected/timeout.html', test_expectations.CRASH),
                False, test_is_slow)
            all_retry_results[2].add(
                get_result('failures/expected/timeout.html', test_expectations.LEAK),
                False, test_is_slow)

    return test_run_results.summarize_results(
        port, initial_results.expectations, initial_results, all_retry_results,
        enabled_pixel_tests_in_retry=False,
        only_include_failing=only_include_failing)
def summarized_results(port,
                       expected,
                       passing,
                       flaky,
                       only_include_failing=False,
                       extra_skipped_tests=[],
                       fail_on_retry=False):
    test_is_slow = False

    all_retry_results = []
    initial_results = run_results(port, extra_skipped_tests)
    if expected:
        initial_results.add(
            get_result('passes/text.html', test_expectations.PASS), expected,
            test_is_slow)
        initial_results.add(
            get_result('failures/expected/audio.html',
                       test_expectations.AUDIO), expected, test_is_slow)
        initial_results.add(
            get_result('failures/expected/timeout.html',
                       test_expectations.TIMEOUT), expected, test_is_slow)
        initial_results.add(
            get_result('failures/expected/crash.html',
                       test_expectations.CRASH), expected, test_is_slow)
        initial_results.add(
            get_result('failures/expected/leak.html', test_expectations.LEAK),
            expected, test_is_slow)
    elif passing:
        skipped_result = get_result('passes/skipped/skip.html')
        skipped_result.type = test_expectations.SKIP
        initial_results.add(skipped_result, expected, test_is_slow)

        initial_results.add(get_result('passes/text.html', run_time=1),
                            expected, test_is_slow)
        initial_results.add(get_result('failures/expected/audio.html'),
                            expected, test_is_slow)
        initial_results.add(get_result('failures/expected/timeout.html'),
                            expected, test_is_slow)
        initial_results.add(get_result('failures/expected/crash.html'),
                            expected, test_is_slow)
        initial_results.add(get_result('failures/expected/leak.html'),
                            expected, test_is_slow)
    else:
        initial_results.add(
            get_result('passes/text.html',
                       test_expectations.TIMEOUT,
                       run_time=1), expected, test_is_slow)
        initial_results.add(
            get_result('failures/expected/audio.html',
                       test_expectations.CRASH,
                       run_time=0.049), expected, test_is_slow)
        initial_results.add(
            get_result('failures/expected/timeout.html',
                       test_expectations.TEXT,
                       run_time=0.05), expected, test_is_slow)
        initial_results.add(
            get_result('failures/expected/crash.html',
                       test_expectations.TIMEOUT), expected, test_is_slow)
        initial_results.add(
            get_result('failures/expected/leak.html',
                       test_expectations.TIMEOUT), expected, test_is_slow)

        # we only list keyboard.html here, since normally this is WontFix
        initial_results.add(
            get_result('failures/expected/keyboard.html',
                       test_expectations.SKIP), expected, test_is_slow)

        initial_results.add(
            get_result('failures/expected/text.html', test_expectations.IMAGE),
            expected, test_is_slow)

        all_retry_results = [
            run_results(port, extra_skipped_tests),
            run_results(port, extra_skipped_tests),
            run_results(port, extra_skipped_tests)
        ]

        def add_result_to_all_retries(new_result, expected):
            for run_result in all_retry_results:
                run_result.add(new_result, expected, test_is_slow)

        if flaky:
            add_result_to_all_retries(
                get_result('passes/text.html', test_expectations.PASS), True)
            add_result_to_all_retries(
                get_result('failures/expected/audio.html',
                           test_expectations.AUDIO), True)
            add_result_to_all_retries(
                get_result('failures/expected/leak.html',
                           test_expectations.LEAK), True)
            add_result_to_all_retries(
                get_result('failures/expected/timeout.html',
                           test_expectations.AUDIO), True)

            all_retry_results[0].add(
                get_result('failures/expected/crash.html',
                           test_expectations.AUDIO), False, test_is_slow)
            all_retry_results[1].add(
                get_result('failures/expected/crash.html',
                           test_expectations.CRASH), True, test_is_slow)
            all_retry_results[2].add(
                get_result('failures/expected/crash.html',
                           test_expectations.LEAK), False, test_is_slow)

            all_retry_results[0].add(
                get_result('failures/expected/text.html',
                           test_expectations.TEXT), True, test_is_slow)

        else:
            add_result_to_all_retries(
                get_result('passes/text.html', test_expectations.TIMEOUT),
                False)
            add_result_to_all_retries(
                get_result('failures/expected/audio.html',
                           test_expectations.LEAK), False)
            add_result_to_all_retries(
                get_result('failures/expected/crash.html',
                           test_expectations.TIMEOUT), False)
            add_result_to_all_retries(
                get_result('failures/expected/leak.html',
                           test_expectations.TIMEOUT), False)

            all_retry_results[0].add(
                get_result('failures/expected/timeout.html',
                           test_expectations.AUDIO), False, test_is_slow)
            all_retry_results[1].add(
                get_result('failures/expected/timeout.html',
                           test_expectations.CRASH), False, test_is_slow)
            all_retry_results[2].add(
                get_result('failures/expected/timeout.html',
                           test_expectations.LEAK), False, test_is_slow)

    return test_run_results.summarize_results(
        port,
        initial_results.expectations,
        initial_results,
        all_retry_results,
        enabled_pixel_tests_in_retry=False,
        only_include_failing=only_include_failing)
Example #18
0
    def run(self, args):
        """Run the tests and return a RunDetails object with the results."""
        self._printer.write_update("Collecting tests ...")
        try:
            paths, test_names = self._collect_tests(args)
        except IOError:
            # This is raised if --test-list doesn't exist
            return test_run_results.RunDetails(exit_code=-1)

        self._printer.write_update("Parsing expectations ...")
        self._expectations = test_expectations.TestExpectations(self._port, test_names)

        tests_to_run, tests_to_skip = self._prepare_lists(paths, test_names)
        self._printer.print_found(len(test_names), len(tests_to_run), self._options.repeat_each, self._options.iterations)

        # Check to make sure we're not skipping every test.
        if not tests_to_run:
            _log.critical('No tests to run.')
            return test_run_results.RunDetails(exit_code=-1)

        if not self._set_up_run(tests_to_run):
            return test_run_results.RunDetails(exit_code=-1)

        # Don't retry failures if an explicit list of tests was passed in.
        if self._options.retry_failures is None:
            should_retry_failures = len(paths) < len(test_names)
        else:
            should_retry_failures = self._options.retry_failures

        start_time = time.time()
        enabled_pixel_tests_in_retry = False
        try:
            initial_results = self._run_tests(tests_to_run, tests_to_skip, self._options.repeat_each, self._options.iterations,
                int(self._options.child_processes), retrying=False)

            tests_to_retry = self._tests_to_retry(initial_results, include_crashes=self._port.should_retry_crashes())
            if should_retry_failures and tests_to_retry and not initial_results.interrupted:
                enabled_pixel_tests_in_retry = self._force_pixel_tests_if_needed()

                _log.info('')
                _log.info("Retrying %d unexpected failure(s) ..." % len(tests_to_retry))
                _log.info('')
                retry_results = self._run_tests(tests_to_retry, tests_to_skip=set(), repeat_each=1, iterations=1,
                    num_workers=1, retrying=True)

                if enabled_pixel_tests_in_retry:
                    self._options.pixel_tests = False
            else:
                retry_results = None
        finally:
            self._clean_up_run()

        end_time = time.time()

        # Some crash logs can take a long time to be written out so look
        # for new logs after the test run finishes.
        _log.debug("looking for new crash logs")
        self._look_for_new_crash_logs(initial_results, start_time)
        if retry_results:
            self._look_for_new_crash_logs(retry_results, start_time)

        _log.debug("summarizing results")
        summarized_full_results = test_run_results.summarize_results(self._port, self._expectations, initial_results, retry_results, enabled_pixel_tests_in_retry)
        summarized_failing_results = test_run_results.summarize_results(self._port, self._expectations, initial_results, retry_results, enabled_pixel_tests_in_retry, only_include_failing=True)
        self._printer.print_results(end_time - start_time, initial_results, summarized_failing_results)

        exit_code = self._port.exit_code_from_summarized_results(summarized_failing_results)
        if not self._options.dry_run:
            self._write_json_files(summarized_full_results, summarized_failing_results, initial_results)
            self._upload_json_files()

            results_path = self._filesystem.join(self._results_directory, "results.html")
            self._copy_results_html_file(results_path)
            if self._options.show_results and (exit_code or (self._options.full_results_html and initial_results.total_failures)):
                self._port.show_results_html_file(results_path)

        return test_run_results.RunDetails(exit_code, summarized_full_results, summarized_failing_results, initial_results, retry_results, enabled_pixel_tests_in_retry)
Example #19
0
    def run(self, args):
        """Run the tests and return a RunDetails object with the results."""
        self._printer.write_update("Collecting tests ...")
        try:
            paths, test_names = self._collect_tests(args)
        except IOError:
            # This is raised if --test-list doesn't exist
            return test_run_results.RunDetails(exit_code=-1)

        self._printer.write_update("Parsing expectations ...")
        self._expectations = test_expectations.TestExpectations(
            self._port,
            test_names,
            force_expectations_pass=self._options.force)
        self._expectations.parse_all_expectations()

        tests_to_run, tests_to_skip = self._prepare_lists(paths, test_names)
        self._printer.print_found(len(test_names), len(tests_to_run),
                                  self._options.repeat_each,
                                  self._options.iterations)
        start_time = time.time()

        # Check to make sure we're not skipping every test.
        if not tests_to_run:
            _log.critical('No tests to run.')
            return test_run_results.RunDetails(exit_code=-1)

        if not self._set_up_run(tests_to_run):
            return test_run_results.RunDetails(exit_code=-1)

        enabled_pixel_tests_in_retry = False
        try:
            initial_results = self._run_tests(
                tests_to_run,
                tests_to_skip,
                self._options.repeat_each,
                self._options.iterations,
                int(self._options.child_processes),
                retrying=False)

            tests_to_retry = self._tests_to_retry(
                initial_results,
                include_crashes=self._port.should_retry_crashes())
            # Don't retry failures when interrupted by user or failures limit exception.
            retry_failures = self._options.retry_failures and not (
                initial_results.interrupted
                or initial_results.keyboard_interrupted)
            if retry_failures and tests_to_retry:
                enabled_pixel_tests_in_retry = self._force_pixel_tests_if_needed(
                )

                _log.info('')
                _log.info("Retrying %s ..." %
                          pluralize(len(tests_to_retry), "unexpected failure"))
                _log.info('')
                retry_results = self._run_tests(tests_to_retry,
                                                tests_to_skip=set(),
                                                repeat_each=1,
                                                iterations=1,
                                                num_workers=1,
                                                retrying=True)

                if enabled_pixel_tests_in_retry:
                    self._options.pixel_tests = False
            else:
                retry_results = None
        finally:
            self._clean_up_run()

        end_time = time.time()

        # Some crash logs can take a long time to be written out so look
        # for new logs after the test run finishes.
        _log.debug("looking for new crash logs")
        self._look_for_new_crash_logs(initial_results, start_time)
        if retry_results:
            self._look_for_new_crash_logs(retry_results, start_time)

        _log.debug("summarizing results")
        summarized_results = test_run_results.summarize_results(
            self._port, self._expectations, initial_results, retry_results,
            enabled_pixel_tests_in_retry)
        results_including_passes = None
        if self._options.results_server_host:
            results_including_passes = test_run_results.summarize_results(
                self._port,
                self._expectations,
                initial_results,
                retry_results,
                enabled_pixel_tests_in_retry,
                include_passes=True,
                include_time_and_modifiers=True)
        self._printer.print_results(end_time - start_time, initial_results,
                                    summarized_results)

        exit_code = -1
        if not self._options.dry_run:
            self._port.print_leaks_summary()
            self._upload_json_files(summarized_results, initial_results,
                                    results_including_passes, start_time,
                                    end_time)

            results_path = self._filesystem.join(self._results_directory,
                                                 "results.html")
            self._copy_results_html_file(results_path)
            if initial_results.keyboard_interrupted:
                exit_code = INTERRUPTED_EXIT_STATUS
            else:
                if self._options.show_results and (
                        initial_results.unexpected_results_by_name or
                    (self._options.full_results_html
                     and initial_results.total_failures)):
                    self._port.show_results_html_file(results_path)
                exit_code = self._port.exit_code_from_summarized_results(
                    summarized_results)
        return test_run_results.RunDetails(exit_code, summarized_results,
                                           initial_results, retry_results,
                                           enabled_pixel_tests_in_retry)
def summarized_results(port, expected, passing, flaky, include_passes=False):
    initial_results = run_results(port)
    if expected:
        initial_results.add(
            get_result('passes/text.html', test_expectations.PASS), expected)
        initial_results.add(
            get_result('failures/expected/audio.html',
                       test_expectations.AUDIO), expected)
        initial_results.add(
            get_result('failures/expected/timeout.html',
                       test_expectations.TIMEOUT), expected)
        initial_results.add(
            get_result('failures/expected/crash.html',
                       test_expectations.CRASH), expected)

        if port._options.pixel_tests:
            initial_results.add(
                get_result('failures/expected/pixel-fail.html',
                           test_expectations.IMAGE), expected)
        else:
            initial_results.add(
                get_result('failures/expected/pixel-fail.html',
                           test_expectations.PASS), expected)

        if port._options.world_leaks:
            initial_results.add(
                get_result('failures/expected/leak.html',
                           test_expectations.LEAK), expected)
        else:
            initial_results.add(
                get_result('failures/expected/leak.html',
                           test_expectations.PASS), expected)

    elif passing:
        initial_results.add(get_result('passes/text.html'), expected)
        initial_results.add(get_result('failures/expected/audio.html'),
                            expected)
        initial_results.add(get_result('failures/expected/timeout.html'),
                            expected)
        initial_results.add(get_result('failures/expected/crash.html'),
                            expected)

        if port._options.pixel_tests:
            initial_results.add(
                get_result('failures/expected/pixel-fail.html'), expected)
        else:
            initial_results.add(
                get_result('failures/expected/pixel-fail.html',
                           test_expectations.IMAGE), expected)

        if port._options.world_leaks:
            initial_results.add(get_result('failures/expected/leak.html'),
                                expected)
        else:
            initial_results.add(
                get_result('failures/expected/leak.html',
                           test_expectations.PASS), expected)
    else:
        initial_results.add(
            get_result('passes/text.html', test_expectations.TIMEOUT),
            expected)
        initial_results.add(
            get_result('failures/expected/audio.html',
                       test_expectations.AUDIO), expected)
        initial_results.add(
            get_result('failures/expected/timeout.html',
                       test_expectations.CRASH), expected)
        initial_results.add(
            get_result('failures/expected/crash.html',
                       test_expectations.TIMEOUT), expected)
        initial_results.add(
            get_result('failures/expected/pixel-fail.html',
                       test_expectations.TIMEOUT), expected)
        initial_results.add(
            get_result('failures/expected/leak.html', test_expectations.CRASH),
            expected)

        # we only list hang.html here, since normally this is WontFix
        initial_results.add(
            get_result('failures/expected/hang.html',
                       test_expectations.TIMEOUT), expected)

    if flaky:
        retry_results = run_results(port)
        retry_results.add(get_result('passes/text.html'), True)
        retry_results.add(get_result('failures/expected/timeout.html'), True)
        retry_results.add(get_result('failures/expected/crash.html'), True)
        retry_results.add(get_result('failures/expected/pixel-fail.html'),
                          True)
        retry_results.add(get_result('failures/expected/leak.html'), True)
    else:
        retry_results = None

    return test_run_results.summarize_results(
        port, {None: initial_results.expectations},
        initial_results,
        retry_results,
        enabled_pixel_tests_in_retry=False,
        include_passes=include_passes)