コード例 #1
0
 def test_summarized_results_flaky_pass_after_first_retry(self):
     test_name = 'passes/text.html'
     expectations = test_expectations.TestExpectations(self.port)
     initial_results = test_run_results.TestRunResults(
         expectations, 1, None)
     initial_results.add(get_result(test_name, ResultType.Crash), False,
                         False)
     all_retry_results = [
         test_run_results.TestRunResults(expectations, 1, None),
         test_run_results.TestRunResults(expectations, 1, None),
         test_run_results.TestRunResults(expectations, 1, None)
     ]
     all_retry_results[0].add(get_result(test_name, ResultType.Timeout),
                              False, False)
     all_retry_results[1].add(get_result(test_name, ResultType.Pass), True,
                              False)
     all_retry_results[2].add(get_result(test_name, ResultType.Pass), True,
                              False)
     summary = test_run_results.summarize_results(self.port, expectations,
                                                  initial_results,
                                                  all_retry_results)
     self.assertTrue(
         'is_unexpected' not in summary['tests']['passes']['text.html'])
     self.assertEquals(summary['tests']['passes']['text.html']['expected'],
                       'PASS')
     self.assertEquals(summary['tests']['passes']['text.html']['actual'],
                       'CRASH TIMEOUT PASS PASS')
     self.assertEquals(summary['num_flaky'], 1)
     self.assertEquals(summary['num_passes'], 0)
     self.assertEquals(summary['num_regressions'], 0)
コード例 #2
0
    def test_summarized_results_with_iterations(self):
        test_name = 'passes/text.html'
        expectations = test_expectations.TestExpectations(self.port)
        initial_results = test_run_results.TestRunResults(
            expectations, 3, None)
        initial_results.add(get_result(test_name, ResultType.Crash), False,
                            False)
        initial_results.add(get_result(test_name, ResultType.Failure), False,
                            False)
        initial_results.add(get_result(test_name, ResultType.Timeout), False,
                            False)
        all_retry_results = [
            test_run_results.TestRunResults(expectations, 2, None)
        ]
        all_retry_results[0].add(get_result(test_name, ResultType.Failure),
                                 False, False)
        all_retry_results[0].add(get_result(test_name, ResultType.Failure),
                                 False, False)

        summary = test_run_results.summarize_results(self.port, expectations,
                                                     initial_results,
                                                     all_retry_results)
        self.assertEquals(summary['tests']['passes']['text.html']['expected'],
                          'PASS')
        self.assertEquals(summary['tests']['passes']['text.html']['actual'],
                          'CRASH FAIL TIMEOUT FAIL FAIL')
        self.assertEquals(summary['num_flaky'], 0)
        self.assertEquals(summary['num_passes'], 0)
        self.assertEquals(summary['num_regressions'], 1)
コード例 #3
0
 def test_timeout_then_unexpected_pass(self):
     test_name = 'failures/expected/text.html'
     expectations = test_expectations.TestExpectations(self.port)
     initial_results = test_run_results.TestRunResults(
         expectations, 1, None)
     initial_results.add(
         get_result(test_name, ResultType.Timeout, run_time=1), False,
         False)
     all_retry_results = [
         test_run_results.TestRunResults(expectations, 1, None),
         test_run_results.TestRunResults(expectations, 1, None),
         test_run_results.TestRunResults(expectations, 1, None)
     ]
     all_retry_results[0].add(
         get_result(test_name, ResultType.Failure, run_time=0.1), False,
         False)
     all_retry_results[1].add(
         get_result(test_name, ResultType.Pass, run_time=0.1), False, False)
     all_retry_results[2].add(
         get_result(test_name, ResultType.Pass, run_time=0.1), False, False)
     summary = test_run_results.summarize_results(self.port, expectations,
                                                  initial_results,
                                                  all_retry_results)
     self.assertIn('is_unexpected',
                   summary['tests']['failures']['expected']['text.html'])
     self.assertEquals(
         summary['tests']['failures']['expected']['text.html']['expected'],
         'FAIL')
     self.assertEquals(
         summary['tests']['failures']['expected']['text.html']['actual'],
         'TIMEOUT FAIL PASS PASS')
     self.assertEquals(summary['num_passes'], 1)
     self.assertEquals(summary['num_regressions'], 0)
     self.assertEquals(summary['num_flaky'], 0)
コード例 #4
0
    def test_summarized_results_with_iterations(self):
        test_name = 'passes/text.html'
        expectations = test_expectations.TestExpectations(
            self.port, [test_name])
        initial_results = test_run_results.TestRunResults(expectations, 3)
        initial_results.add(get_result(test_name, test_expectations.CRASH),
                            False, False)
        initial_results.add(get_result(test_name, test_expectations.IMAGE),
                            False, False)
        initial_results.add(get_result(test_name, test_expectations.TIMEOUT),
                            False, False)
        all_retry_results = [test_run_results.TestRunResults(expectations, 2)]
        all_retry_results[0].add(get_result(test_name, test_expectations.TEXT),
                                 False, False)
        all_retry_results[0].add(get_result(test_name, test_expectations.LEAK),
                                 False, False)

        summary = test_run_results.summarize_results(
            self.port,
            expectations,
            initial_results,
            all_retry_results,
            enabled_pixel_tests_in_retry=True)
        print summary
        self.assertEquals(summary['tests']['passes']['text.html']['expected'],
                          'PASS')
        self.assertEquals(summary['tests']['passes']['text.html']['actual'],
                          'CRASH IMAGE TIMEOUT TEXT LEAK')
        self.assertEquals(summary['num_flaky'], 0)
        self.assertEquals(summary['num_passes'], 0)
        self.assertEquals(summary['num_regressions'], 1)
コード例 #5
0
 def test_summarized_results_flaky_pass_after_first_retry(self):
     test_name = 'passes/text.html'
     expectations = test_expectations.TestExpectations(
         self.port, [test_name])
     initial_results = test_run_results.TestRunResults(expectations, 1)
     initial_results.add(get_result(test_name, test_expectations.CRASH),
                         False, False)
     all_retry_results = [
         test_run_results.TestRunResults(expectations, 1),
         test_run_results.TestRunResults(expectations, 1),
         test_run_results.TestRunResults(expectations, 1)
     ]
     all_retry_results[0].add(
         get_result(test_name, test_expectations.TIMEOUT), False, False)
     all_retry_results[1].add(get_result(test_name, test_expectations.PASS),
                              True, False)
     all_retry_results[2].add(get_result(test_name, test_expectations.PASS),
                              True, False)
     summary = test_run_results.summarize_results(
         self.port,
         expectations,
         initial_results,
         all_retry_results,
         enabled_pixel_tests_in_retry=True)
     self.assertTrue(
         'is_unexpected' not in summary['tests']['passes']['text.html'])
     self.assertEquals(summary['tests']['passes']['text.html']['expected'],
                       'PASS')
     self.assertEquals(summary['tests']['passes']['text.html']['actual'],
                       'CRASH TIMEOUT PASS PASS')
     self.assertEquals(summary['num_flaky'], 1)
     self.assertEquals(summary['num_passes'], 0)
     self.assertEquals(summary['num_regressions'], 0)
コード例 #6
0
 def test_timeout_then_unexpected_pass(self):
     test_name = 'failures/expected/text.html'
     expectations = test_expectations.TestExpectations(self.port, [test_name])
     initial_results = test_run_results.TestRunResults(expectations, 1)
     initial_results.add(get_result(test_name, test_expectations.TIMEOUT, run_time=1), False, False)
     all_retry_results = [test_run_results.TestRunResults(expectations, 1),
                          test_run_results.TestRunResults(expectations, 1),
                          test_run_results.TestRunResults(expectations, 1)]
     all_retry_results[0].add(get_result(test_name, test_expectations.LEAK, run_time=0.1), False, False)
     all_retry_results[1].add(get_result(test_name, test_expectations.PASS, run_time=0.1), False, False)
     all_retry_results[2].add(get_result(test_name, test_expectations.PASS, run_time=0.1), False, False)
     summary = test_run_results.summarize_results(
         self.port, expectations, initial_results, all_retry_results,
         enabled_pixel_tests_in_retry=True)
     self.assertIn('is_unexpected', summary['tests']['failures']['expected']['text.html'])
     self.assertEquals(summary['tests']['failures']['expected']['text.html']['expected'], 'FAIL')
     self.assertEquals(summary['tests']['failures']['expected']['text.html']['actual'], 'TIMEOUT LEAK PASS PASS')
     self.assertEquals(summary['num_passes'], 1)
     self.assertEquals(summary['num_regressions'], 0)
     self.assertEquals(summary['num_flaky'], 0)
コード例 #7
0
    def run(self, args):
        """Runs the tests and return a RunDetails object with the results."""
        start_time = time.time()
        self._printer.write_update('Collecting tests ...')
        running_all_tests = False

        if not args or any('external' in path for path in args):
            self._printer.write_update('Generating MANIFEST.json for web-platform-tests ...')
            WPTManifest.ensure_manifest(self._port.host)
            self._printer.write_update('Completed generating manifest.')

        self._printer.write_update('Collecting tests ...')
        try:
            paths, all_test_names, running_all_tests = self._collect_tests(args)
        except IOError:
            # This is raised if --test-list doesn't exist
            return test_run_results.RunDetails(exit_code=exit_codes.NO_TESTS_EXIT_STATUS)

        test_names, tests_in_other_chunks = self._finder.split_into_chunks(all_test_names)

        if self._options.order == 'natural':
            test_names.sort(key=self._port.test_key)
        elif self._options.order == 'random':
            test_names.sort()
            random.Random(self._options.seed).shuffle(test_names)

        self._printer.write_update('Parsing expectations ...')
        self._expectations = test_expectations.TestExpectations(self._port, test_names)

        tests_to_run, tests_to_skip = self._prepare_lists(paths, test_names)

        self._expectations.remove_tests_from_expectations(tests_in_other_chunks)

        self._printer.print_found(
            len(all_test_names), len(test_names), len(tests_to_run),
            self._options.repeat_each, self._options.iterations)

        # Check to make sure we're not skipping every test.
        if not tests_to_run:
            msg = 'No tests to run.'
            if self._options.zero_tests_executed_ok:
                _log.info(msg)
                # Keep executing to produce valid (but empty) results.
            else:
                _log.critical(msg)
                code = exit_codes.NO_TESTS_EXIT_STATUS
                return test_run_results.RunDetails(exit_code=code)

        exit_code = self._set_up_run(tests_to_run)
        if exit_code:
            return test_run_results.RunDetails(exit_code=exit_code)

        if self._options.num_retries is None:
            # Don't retry failures if an explicit list of tests was passed in.
            should_retry_failures = len(paths) < len(test_names)
            # Retry failures 3 times by default.
            if should_retry_failures:
                self._options.num_retries = 3
        else:
            should_retry_failures = self._options.num_retries > 0

        try:
            self._start_servers(tests_to_run)
            if self._options.watch:
                run_results = self._run_test_loop(tests_to_run, tests_to_skip)
            else:
                run_results = self._run_test_once(tests_to_run, tests_to_skip, should_retry_failures)
            initial_results, all_retry_results, enabled_pixel_tests_in_retry = run_results
        finally:
            self._stop_servers()
            self._clean_up_run()

        # Some crash logs can take a long time to be written out so look
        # for new logs after the test run finishes.
        self._printer.write_update('Looking for new crash logs ...')
        self._look_for_new_crash_logs(initial_results, start_time)
        for retry_attempt_results in all_retry_results:
            self._look_for_new_crash_logs(retry_attempt_results, start_time)

        self._printer.write_update('Summarizing results ...')
        summarized_full_results = test_run_results.summarize_results(
            self._port, self._expectations, initial_results, all_retry_results,
            enabled_pixel_tests_in_retry)
        summarized_failing_results = test_run_results.summarize_results(
            self._port, self._expectations, initial_results, all_retry_results,
            enabled_pixel_tests_in_retry, only_include_failing=True)

        exit_code = summarized_failing_results['num_regressions']
        if exit_code > exit_codes.MAX_FAILURES_EXIT_STATUS:
            _log.warning('num regressions (%d) exceeds max exit status (%d)',
                         exit_code, exit_codes.MAX_FAILURES_EXIT_STATUS)
            exit_code = exit_codes.MAX_FAILURES_EXIT_STATUS

        if not self._options.dry_run:
            self._write_json_files(summarized_full_results, summarized_failing_results, initial_results, running_all_tests)

            self._upload_json_files()

            self._copy_results_html_file(self._results_directory, 'results.html')
            self._copy_results_html_file(self._results_directory, 'legacy-results.html')
            if initial_results.keyboard_interrupted:
                exit_code = exit_codes.INTERRUPTED_EXIT_STATUS
            else:
                if initial_results.interrupted:
                    exit_code = exit_codes.EARLY_EXIT_STATUS
                if self._options.show_results and (exit_code or initial_results.total_failures):
                    self._port.show_results_html_file(
                        self._filesystem.join(self._results_directory, 'results.html'))
                self._printer.print_results(time.time() - start_time, initial_results)

        return test_run_results.RunDetails(
            exit_code, summarized_full_results, summarized_failing_results,
            initial_results, all_retry_results, enabled_pixel_tests_in_retry)
コード例 #8
0
ファイル: manager.py プロジェクト: Azile-Ttenneb/chromium-1
    def run(self, args):
        """Runs the tests and return a RunDetails object with the results."""
        start_time = time.time()
        self._printer.write_update('Collecting tests ...')
        running_all_tests = False

        try:
            paths, all_test_names, running_all_tests = self._collect_tests(
                args)
        except IOError:
            # This is raised if --test-list doesn't exist
            return test_run_results.RunDetails(
                exit_code=exit_codes.NO_TESTS_EXIT_STATUS)

        test_names = self._finder.split_into_chunks(all_test_names)
        if self._options.order == 'natural':
            test_names.sort(key=self._port.test_key)
        elif self._options.order == 'random':
            test_names.sort()
            random.Random(self._options.seed).shuffle(test_names)
        elif self._options.order == 'none':
            # Restore the test order to user specified order.
            # base.tests() may change the order as it returns tests in the
            # real, external/wpt, virtual order.
            if paths:
                test_names = self._restore_order(paths, test_names)

        if not self._options.no_expectations:
            self._printer.write_update('Parsing expectations ...')
            self._expectations = test_expectations.TestExpectations(self._port)

        tests_to_run, tests_to_skip = self._prepare_lists(paths, test_names)

        self._printer.print_found(len(all_test_names), len(test_names),
                                  len(tests_to_run), self._options.repeat_each,
                                  self._options.iterations)

        # Check to make sure we're not skipping every test.
        if not tests_to_run:
            msg = 'No tests to run.'
            if self._options.zero_tests_executed_ok:
                _log.info(msg)
                # Keep executing to produce valid (but empty) results.
            else:
                _log.critical(msg)
                code = exit_codes.NO_TESTS_EXIT_STATUS
                return test_run_results.RunDetails(exit_code=code)

        exit_code = self._set_up_run(tests_to_run)
        if exit_code:
            return test_run_results.RunDetails(exit_code=exit_code)

        if self._options.num_retries is None:
            # If --test-list is passed, or if no test narrowing is specified,
            # default to 3 retries. Otherwise [e.g. if tests are being passed by
            # name], default to 0 retries.
            if self._options.test_list or len(paths) < len(test_names):
                self._options.num_retries = 3
            else:
                self._options.num_retries = 0

        should_retry_failures = self._options.num_retries > 0

        try:
            self._start_servers(tests_to_run)
            if self._options.watch:
                run_results = self._run_test_loop(tests_to_run, tests_to_skip)
            else:
                run_results = self._run_test_once(tests_to_run, tests_to_skip,
                                                  should_retry_failures)
            initial_results, all_retry_results = run_results
        finally:
            self._stop_servers()
            self._clean_up_run()

        if self._options.no_expectations:
            return test_run_results.RunDetails(0, [], [], initial_results,
                                               all_retry_results)

        # Some crash logs can take a long time to be written out so look
        # for new logs after the test run finishes.
        self._printer.write_update('Looking for new crash logs ...')
        self._look_for_new_crash_logs(initial_results, start_time)
        for retry_attempt_results in all_retry_results:
            self._look_for_new_crash_logs(retry_attempt_results, start_time)

        self._printer.write_update('Summarizing results ...')
        summarized_full_results = test_run_results.summarize_results(
            self._port, self._expectations, initial_results, all_retry_results)
        summarized_failing_results = test_run_results.summarize_results(
            self._port,
            self._expectations,
            initial_results,
            all_retry_results,
            only_include_failing=True)

        exit_code = summarized_failing_results['num_regressions']
        if exit_code > exit_codes.MAX_FAILURES_EXIT_STATUS:
            _log.warning('num regressions (%d) exceeds max exit status (%d)',
                         exit_code, exit_codes.MAX_FAILURES_EXIT_STATUS)
            exit_code = exit_codes.MAX_FAILURES_EXIT_STATUS

        if not self._options.dry_run:
            self._write_json_files(summarized_full_results,
                                   summarized_failing_results, initial_results,
                                   running_all_tests)

            self._upload_json_files()

            self._copy_results_html_file(self._artifacts_directory,
                                         'results.html')
            if initial_results.keyboard_interrupted:
                exit_code = exit_codes.INTERRUPTED_EXIT_STATUS
            else:
                if initial_results.interrupted:
                    exit_code = exit_codes.EARLY_EXIT_STATUS
                if (self._options.show_results
                        and (exit_code or initial_results.total_failures)):
                    self._port.show_results_html_file(
                        self._filesystem.join(self._artifacts_directory,
                                              'results.html'))
                self._printer.print_results(time.time() - start_time,
                                            initial_results)

        return test_run_results.RunDetails(exit_code, summarized_full_results,
                                           summarized_failing_results,
                                           initial_results, all_retry_results)
コード例 #9
0
def summarized_results(port,
                       expected,
                       passing,
                       flaky,
                       only_include_failing=False,
                       extra_skipped_tests=None):
    test_is_slow = False

    all_retry_results = []
    initial_results = run_results(port, extra_skipped_tests)
    if expected:
        initial_results.add(get_result('passes/text.html', ResultType.Pass),
                            expected, test_is_slow)
        initial_results.add(
            get_result('failures/expected/audio.html', ResultType.Failure),
            expected, test_is_slow)
        initial_results.add(
            get_result('failures/expected/timeout.html', ResultType.Timeout),
            expected, test_is_slow)
        initial_results.add(
            get_result('failures/expected/crash.html', ResultType.Crash),
            expected, test_is_slow)
        initial_results.add(
            get_result('failures/expected/leak.html', ResultType.Failure),
            expected, test_is_slow)
    elif passing:
        skipped_result = get_result('passes/skipped/skip.html')
        skipped_result.type = ResultType.Skip
        initial_results.add(skipped_result, True, test_is_slow)

        initial_results.add(get_result('passes/text.html', run_time=1),
                            expected, test_is_slow)
        initial_results.add(get_result('failures/expected/audio.html'),
                            expected, test_is_slow)
        initial_results.add(get_result('failures/expected/timeout.html'),
                            expected, test_is_slow)
        initial_results.add(get_result('failures/expected/crash.html'),
                            expected, test_is_slow)
        initial_results.add(get_result('failures/expected/leak.html'),
                            expected, test_is_slow)
    else:
        initial_results.add(
            get_result('passes/text.html', ResultType.Timeout, run_time=1),
            expected, test_is_slow)
        initial_results.add(
            get_result('failures/expected/audio.html',
                       ResultType.Crash,
                       run_time=0.049), expected, test_is_slow)
        initial_results.add(
            get_result('failures/expected/timeout.html',
                       ResultType.Failure,
                       run_time=0.05), expected, test_is_slow)
        initial_results.add(
            get_result('failures/expected/crash.html', ResultType.Timeout),
            expected, test_is_slow)
        initial_results.add(
            get_result('failures/expected/leak.html', ResultType.Timeout),
            expected, test_is_slow)

        # we only list keyboard.html here, since normally this is WontFix
        initial_results.add(
            get_result('failures/expected/keyboard.html', ResultType.Skip),
            expected, test_is_slow)

        initial_results.add(
            get_result('failures/expected/text.html', ResultType.Failure),
            expected, test_is_slow)

        all_retry_results = [
            run_results(port, extra_skipped_tests),
            run_results(port, extra_skipped_tests),
            run_results(port, extra_skipped_tests)
        ]

        def add_result_to_all_retries(new_result, expected):
            for run_result in all_retry_results:
                run_result.add(new_result, expected, test_is_slow)

        if flaky:
            add_result_to_all_retries(
                get_result('passes/text.html', ResultType.Pass), True)
            add_result_to_all_retries(
                get_result('failures/expected/audio.html', ResultType.Failure),
                True)
            add_result_to_all_retries(
                get_result('failures/expected/leak.html', ResultType.Failure),
                True)
            add_result_to_all_retries(
                get_result('failures/expected/timeout.html',
                           ResultType.Failure), True)

            all_retry_results[0].add(
                get_result('failures/expected/crash.html', ResultType.Failure),
                False, test_is_slow)
            all_retry_results[1].add(
                get_result('failures/expected/crash.html', ResultType.Crash),
                True, test_is_slow)
            all_retry_results[2].add(
                get_result('failures/expected/crash.html', ResultType.Failure),
                False, test_is_slow)

            all_retry_results[0].add(
                get_result('failures/expected/text.html', ResultType.Failure),
                True, test_is_slow)

        else:
            add_result_to_all_retries(
                get_result('passes/text.html', ResultType.Timeout), False)
            add_result_to_all_retries(
                get_result('failures/expected/audio.html', ResultType.Failure),
                False)
            add_result_to_all_retries(
                get_result('failures/expected/crash.html', ResultType.Timeout),
                False)
            add_result_to_all_retries(
                get_result('failures/expected/leak.html', ResultType.Timeout),
                False)

            all_retry_results[0].add(
                get_result('failures/expected/timeout.html',
                           ResultType.Failure), False, test_is_slow)
            all_retry_results[1].add(
                get_result('failures/expected/timeout.html', ResultType.Crash),
                False, test_is_slow)
            all_retry_results[2].add(
                get_result('failures/expected/timeout.html',
                           ResultType.Failure), False, test_is_slow)

    return test_run_results.summarize_results(
        port,
        initial_results.expectations,
        initial_results,
        all_retry_results,
        only_include_failing=only_include_failing)
コード例 #10
0
def summarized_results(port,
                       expected,
                       passing,
                       flaky,
                       only_include_failing=False,
                       extra_skipped_tests=None):
    test_is_slow = False

    all_retry_results = []
    initial_results = run_results(port, extra_skipped_tests)
    if expected:
        initial_results.add(
            get_result('passes/text.html', test_expectations.PASS), expected,
            test_is_slow)
        initial_results.add(
            get_result('failures/expected/audio.html',
                       test_expectations.AUDIO), expected, test_is_slow)
        initial_results.add(
            get_result('failures/expected/timeout.html',
                       test_expectations.TIMEOUT), expected, test_is_slow)
        initial_results.add(
            get_result('failures/expected/crash.html',
                       test_expectations.CRASH), expected, test_is_slow)
        initial_results.add(
            get_result('failures/expected/leak.html', test_expectations.LEAK),
            expected, test_is_slow)
    elif passing:
        skipped_result = get_result('passes/skipped/skip.html')
        skipped_result.type = test_expectations.SKIP
        initial_results.add(skipped_result, expected, test_is_slow)

        initial_results.add(get_result('passes/text.html', run_time=1),
                            expected, test_is_slow)
        initial_results.add(get_result('failures/expected/audio.html'),
                            expected, test_is_slow)
        initial_results.add(get_result('failures/expected/timeout.html'),
                            expected, test_is_slow)
        initial_results.add(get_result('failures/expected/crash.html'),
                            expected, test_is_slow)
        initial_results.add(get_result('failures/expected/leak.html'),
                            expected, test_is_slow)
    else:
        initial_results.add(
            get_result('passes/text.html',
                       test_expectations.TIMEOUT,
                       run_time=1), expected, test_is_slow)
        initial_results.add(
            get_result('failures/expected/audio.html',
                       test_expectations.CRASH,
                       run_time=0.049), expected, test_is_slow)
        initial_results.add(
            get_result('failures/expected/timeout.html',
                       test_expectations.TEXT,
                       run_time=0.05), expected, test_is_slow)
        initial_results.add(
            get_result('failures/expected/crash.html',
                       test_expectations.TIMEOUT), expected, test_is_slow)
        initial_results.add(
            get_result('failures/expected/leak.html',
                       test_expectations.TIMEOUT), expected, test_is_slow)

        # we only list keyboard.html here, since normally this is WontFix
        initial_results.add(
            get_result('failures/expected/keyboard.html',
                       test_expectations.SKIP), expected, test_is_slow)

        initial_results.add(
            get_result('failures/expected/text.html', test_expectations.IMAGE),
            expected, test_is_slow)

        all_retry_results = [
            run_results(port, extra_skipped_tests),
            run_results(port, extra_skipped_tests),
            run_results(port, extra_skipped_tests)
        ]

        def add_result_to_all_retries(new_result, expected):
            for run_result in all_retry_results:
                run_result.add(new_result, expected, test_is_slow)

        if flaky:
            add_result_to_all_retries(
                get_result('passes/text.html', test_expectations.PASS), True)
            add_result_to_all_retries(
                get_result('failures/expected/audio.html',
                           test_expectations.AUDIO), True)
            add_result_to_all_retries(
                get_result('failures/expected/leak.html',
                           test_expectations.LEAK), True)
            add_result_to_all_retries(
                get_result('failures/expected/timeout.html',
                           test_expectations.AUDIO), True)

            all_retry_results[0].add(
                get_result('failures/expected/crash.html',
                           test_expectations.AUDIO), False, test_is_slow)
            all_retry_results[1].add(
                get_result('failures/expected/crash.html',
                           test_expectations.CRASH), True, test_is_slow)
            all_retry_results[2].add(
                get_result('failures/expected/crash.html',
                           test_expectations.LEAK), False, test_is_slow)

            all_retry_results[0].add(
                get_result('failures/expected/text.html',
                           test_expectations.TEXT), True, test_is_slow)

        else:
            add_result_to_all_retries(
                get_result('passes/text.html', test_expectations.TIMEOUT),
                False)
            add_result_to_all_retries(
                get_result('failures/expected/audio.html',
                           test_expectations.LEAK), False)
            add_result_to_all_retries(
                get_result('failures/expected/crash.html',
                           test_expectations.TIMEOUT), False)
            add_result_to_all_retries(
                get_result('failures/expected/leak.html',
                           test_expectations.TIMEOUT), False)

            all_retry_results[0].add(
                get_result('failures/expected/timeout.html',
                           test_expectations.AUDIO), False, test_is_slow)
            all_retry_results[1].add(
                get_result('failures/expected/timeout.html',
                           test_expectations.CRASH), False, test_is_slow)
            all_retry_results[2].add(
                get_result('failures/expected/timeout.html',
                           test_expectations.LEAK), False, test_is_slow)

    return test_run_results.summarize_results(
        port,
        initial_results.expectations,
        initial_results,
        all_retry_results,
        enabled_pixel_tests_in_retry=False,
        only_include_failing=only_include_failing)