Пример #1
0
 def _update_or_add_new_baselines(self, driver_output, failures):
     """Updates or adds new baselines for the test if necessary."""
     if (test_failures.has_failure_type(test_failures.FailureTimeout,
                                        failures)
             or test_failures.has_failure_type(test_failures.FailureCrash,
                                               failures)):
         return
     self._save_baseline_data(
         driver_output.text, '.txt',
         test_failures.has_failure_type(test_failures.FailureMissingResult,
                                        failures))
     self._save_baseline_data(
         driver_output.audio, '.wav',
         test_failures.has_failure_type(test_failures.FailureMissingAudio,
                                        failures))
     self._save_baseline_data(
         driver_output.image, '.png',
         test_failures.has_failure_type(test_failures.FailureMissingImage,
                                        failures))
Пример #2
0
    def _update_or_add_new_baselines(self, driver_output, failures):
        """Updates or adds new baselines for the test if necessary."""
        if (test_failures.has_failure_type(test_failures.FailureTimeout, failures) or
            test_failures.has_failure_type(test_failures.FailureCrash, failures)):
            return
        # We usually don't want to create a new baseline if there isn't one
        # existing (which usually means this baseline isn't necessary, e.g.
        # an image-only test without text expectation files). However, in the
        # following cases, we do:
        # 1. The failure is MISSING; a baseline is apparently needed.
        # 2. A testharness.js test fails assertions: testharness.js tests
        #    without baselines are implicitly expected to pass all assertions;
        #    if there are failed assertions we need to create a new baseline.
        #    Note that the created baseline might be redundant, but users can
        #    optimize them later with optimize-baselines.
        if self._is_all_pass_testharness_text_not_needing_baseline(driver_output.text):
            driver_output.text = None
        self._save_baseline_data(
            driver_output.text, '.txt',
            test_failures.has_failure_type(test_failures.FailureMissingResult, failures) or
            test_failures.has_failure_type(test_failures.FailureTestHarnessAssertion, failures))
        self._save_baseline_data(
            driver_output.audio, '.wav',
            test_failures.has_failure_type(test_failures.FailureMissingAudio, failures))

        expected_png = driver_output.image
        if self._reference_files:
            _log.warning('Can not rebaseline the image baseline of reftest %s', self._test_name)
            # Let _save_baseline_data remove the '-expected.png' if it exists.
            expected_png = None
        self._save_baseline_data(
            expected_png, '.png',
            test_failures.has_failure_type(test_failures.FailureMissingImage, failures))
Пример #3
0
 def _update_or_add_new_baselines(self, driver_output, failures):
     """Updates or adds new baselines for the test if necessary."""
     if (test_failures.has_failure_type(test_failures.FailureTimeout,
                                        failures)
             or test_failures.has_failure_type(test_failures.FailureCrash,
                                               failures)):
         return
     # We usually don't want to create a new baseline if there isn't one
     # existing (which usually means this baseline isn't necessary, e.g.
     # an image-first test without text expectation files). However, in the
     # following cases, we do:
     # 1. The failure is MISSING; a baseline is apparently needed.
     # 2. A testharness.js test fails assertions: testharness.js tests
     #    without baselines are implicitly expected to pass all assertions;
     #    if there are failed assertions we need to create a new baseline.
     #    Note that the created baseline might be redundant, but users can
     #    optimize them later with optimize-baselines.
     self._save_baseline_data(
         driver_output.text, '.txt',
         test_failures.has_failure_type(test_failures.FailureMissingResult,
                                        failures)
         or test_failures.has_failure_type(
             test_failures.FailureTestHarnessAssertion, failures))
     self._save_baseline_data(
         driver_output.audio, '.wav',
         test_failures.has_failure_type(test_failures.FailureMissingAudio,
                                        failures))
     self._save_baseline_data(
         driver_output.image, '.png',
         test_failures.has_failure_type(test_failures.FailureMissingImage,
                                        failures))
Пример #4
0
def summarize_results(port_obj,
                      expectations,
                      initial_results,
                      all_retry_results,
                      only_include_failing=False):
    """Returns a dictionary containing a summary of the test runs, with the following fields:
        'version': a version indicator
        'fixable': The number of fixable tests (NOW - PASS)
        'skipped': The number of skipped tests (NOW & SKIPPED)
        'num_regressions': The number of non-flaky failures
        'num_flaky': The number of flaky failures
        'num_passes': The number of expected and unexpected passes
        'tests': a dict of tests -> {'expected': '...', 'actual': '...'}
    """
    results = {}
    results['version'] = 3
    all_retry_results = all_retry_results or []

    tbe = initial_results.tests_by_expectation

    results['skipped'] = len(tbe[ResultType.Skip])

    # TODO(dpranke): Some or all of these counters can be removed.
    num_passes = 0
    num_flaky = 0
    num_regressions = 0

    # Calculate the number of failures by types (only in initial results).
    num_failures_by_type = {}
    for expected_result in initial_results.tests_by_expectation:
        tests = initial_results.tests_by_expectation[expected_result]
        num_failures_by_type[expected_result] = len(tests)
    results['num_failures_by_type'] = num_failures_by_type

    # Combine all iterations and retries together into a dictionary with the
    # following structure:
    #    { test_name: [ (result, is_unexpected), ... ], ... }
    # where result is a single TestResult, is_unexpected is a boolean
    # representing whether the result is unexpected in that run.
    merged_results_by_name = collections.defaultdict(list)
    for test_run_results in [initial_results] + all_retry_results:
        # all_results does not include SKIP, so we need results_by_name.
        for test_name, result in test_run_results.results_by_name.iteritems():
            if result.type == ResultType.Skip:
                is_unexpected = test_name in test_run_results.unexpected_results_by_name
                merged_results_by_name[test_name].append(
                    (result, is_unexpected))

        # results_by_name only includes the last result, so we need all_results.
        for result in test_run_results.all_results:
            test_name = result.test_name
            is_unexpected = test_name in test_run_results.unexpected_results_by_name
            merged_results_by_name[test_name].append((result, is_unexpected))

    # Finally, compute the tests dict.
    tests = {}
    for test_name, merged_results in merged_results_by_name.iteritems():
        initial_result = merged_results[0][0]

        if only_include_failing and initial_result.type == ResultType.Skip:
            continue
        exp = expectations.get_expectations(test_name)
        expected_results, bugs = exp.results, exp.reason
        expected = ' '.join(expected_results)
        actual = []
        actual_types = []
        crash_sites = []

        all_pass = True
        has_expected = False
        has_unexpected = False
        has_unexpected_pass = False
        has_stderr = False
        for result, is_unexpected in merged_results:
            actual.append(result.type)
            actual_types.append(result.type)
            crash_sites.append(result.crash_site)

            if result.type != ResultType.Pass:
                all_pass = False
            if result.has_stderr:
                has_stderr = True
            if is_unexpected:
                has_unexpected = True
                if result.type == ResultType.Pass:
                    has_unexpected_pass = True
            else:
                has_expected = True

        # TODO(crbug.com/855255): This code calls a test flaky if it has both
        # expected and unexpected runs (NOT pass and failure); this is generally
        # wrong (really it should just be if there are multiple kinds of results),
        # but this works in the normal case because a test will only be retried
        # if a result is unexpected, and if you get an expected result on the
        # retry, then you did get multiple results. This fails if you get
        # one kind of unexpected failure initially and another kind of
        # unexpected failure on the retry (e.g., TIMEOUT CRASH), or if you
        # explicitly run a test multiple times and get multiple expected results.
        is_flaky = has_expected and has_unexpected

        test_dict = {}
        test_dict['expected'] = expected
        test_dict['actual'] = ' '.join(actual)

        # If a flag was added then add flag specific test expectations to the per test field
        flag_exp = expectations.get_flag_expectations(test_name)
        if flag_exp:
            base_exp = expectations.get_base_expectations(test_name)
            test_dict['flag_expectations'] = list(flag_exp.results)
            test_dict['base_expectations'] = list(base_exp.results)

        # Fields below are optional. To avoid bloating the output results json
        # too much, only add them when they are True or non-empty.

        if is_flaky:
            num_flaky += 1
            test_dict['is_flaky'] = True
        elif all_pass or has_unexpected_pass:
            # We count two situations as a "pass":
            # 1. All test runs pass (which is obviously non-flaky, but does not
            #    imply whether the runs are expected, e.g. they can be all
            #    unexpected passes).
            # 2. The test isn't flaky and has at least one unexpected pass
            #    (which implies all runs are unexpected). One tricky example
            #    that doesn't satisfy #1 is that if a test is expected to
            #    crash but in fact fails and then passes, it will be counted
            #    as "pass".
            num_passes += 1
            if not has_stderr and only_include_failing:
                continue
        elif has_unexpected:
            # Either no retries or all retries failed unexpectedly.
            num_regressions += 1

        rounded_run_time = round(initial_result.test_run_time, 1)
        if rounded_run_time:
            test_dict['time'] = rounded_run_time

        if exp.is_slow_test:
            test_dict['is_slow_test'] = True

        if has_stderr:
            test_dict['has_stderr'] = True

        if bugs:
            test_dict['bugs'] = bugs.split()

        if initial_result.reftest_type:
            test_dict.update(reftest_type=list(initial_result.reftest_type))

        crash_sites = [site for site in crash_sites if site]
        if len(crash_sites) > 0:
            test_dict['crash_site'] = crash_sites[0]

        if test_failures.has_failure_type(test_failures.FailureTextMismatch,
                                          initial_result.failures):
            for failure in initial_result.failures:
                if isinstance(failure, test_failures.FailureTextMismatch):
                    test_dict['text_mismatch'] = \
                        failure.text_mismatch_category()
                    break

        # Note: is_unexpected and is_regression are intended to reflect the
        # *last* result. In the normal use case (stop retrying failures
        # once they pass), this is equivalent to saying that all of the
        # results were unexpected failures.
        last_result = actual_types[-1]
        if not expectations.matches_an_expected_result(test_name, last_result):
            test_dict['is_unexpected'] = True
            if last_result != ResultType.Pass:
                test_dict['is_regression'] = True

        if initial_result.has_repaint_overlay:
            test_dict['has_repaint_overlay'] = True

        test_dict.update(_interpret_test_failures(initial_result.failures))
        for retry_result, is_unexpected in merged_results[1:]:
            # TODO(robertma): Why do we only update unexpected retry failures?
            if is_unexpected:
                test_dict.update(
                    _interpret_test_failures(retry_result.failures))

        for test_result, _ in merged_results:
            for artifact_name, artifacts in \
                test_result.artifacts.artifacts.items():
                artifact_dict = test_dict.setdefault('artifacts', {})
                artifact_dict.setdefault(artifact_name, []).extend(artifacts)

        # Store test hierarchically by directory. e.g.
        # foo/bar/baz.html: test_dict
        # foo/bar/baz1.html: test_dict
        #
        # becomes
        # foo: {
        #     bar: {
        #         baz.html: test_dict,
        #         baz1.html: test_dict
        #     }
        # }
        parts = test_name.split('/')
        current_map = tests
        for i, part in enumerate(parts):
            if i == (len(parts) - 1):
                current_map[part] = test_dict
                break
            if part not in current_map:
                current_map[part] = {}
            current_map = current_map[part]

    results['tests'] = tests
    results['num_passes'] = num_passes
    results['num_flaky'] = num_flaky
    results['num_regressions'] = num_regressions
    # Does results.html have enough information to compute this itself? (by
    # checking total number of results vs. total number of tests?)
    results['interrupted'] = initial_results.interrupted
    results['layout_tests_dir'] = port_obj.web_tests_dir()
    results['seconds_since_epoch'] = int(time.time())
    results['build_number'] = port_obj.get_option('build_number')
    results['builder_name'] = port_obj.get_option('builder_name')
    if port_obj.get_option('order') == 'random':
        results['random_order_seed'] = port_obj.get_option('seed')
    results['path_delimiter'] = '/'

    # If there is a flag name then add the flag name field
    if expectations.flag_name:
        results['flag_name'] = expectations.flag_name

    # Don't do this by default since it takes >100ms.
    # It's only used for rebaselining and uploading data to the flakiness dashboard.
    results['chromium_revision'] = ''
    if port_obj.get_option('builder_name'):
        path = port_obj.repository_path()
        git = port_obj.host.git(path=path)
        if git:
            results['chromium_revision'] = str(git.commit_position(path))
        else:
            _log.warning(
                'Failed to determine chromium commit position for %s, '
                'leaving "chromium_revision" key blank in full_results.json.',
                path)

    return results
Пример #5
0
def summarize_results(port_obj,
                      expectations,
                      initial_results,
                      all_retry_results,
                      enabled_pixel_tests_in_retry,
                      only_include_failing=False):
    """Returns a dictionary containing a summary of the test runs, with the following fields:
        'version': a version indicator
        'fixable': The number of fixable tests (NOW - PASS)
        'skipped': The number of skipped tests (NOW & SKIPPED)
        'num_regressions': The number of non-flaky failures
        'num_flaky': The number of flaky failures
        'num_passes': The number of expected and unexpected passes
        'tests': a dict of tests -> {'expected': '...', 'actual': '...'}
    """
    results = {}
    results['version'] = 3
    all_retry_results = all_retry_results or []

    tbe = initial_results.tests_by_expectation
    tbt = initial_results.tests_by_timeline
    results['fixable'] = len(tbt[test_expectations.NOW] -
                             tbe[test_expectations.PASS])
    # FIXME: Remove this. It is redundant with results['num_failures_by_type'].
    results['skipped'] = len(tbt[test_expectations.NOW]
                             & tbe[test_expectations.SKIP])

    # TODO(dpranke): Some or all of these counters can be removed.
    num_passes = 0
    num_flaky = 0
    num_regressions = 0

    keywords = test_expectations.TestExpectations.EXPECTATIONS_TO_STRING

    # Calculate the number of failures by types (only in initial results).
    num_failures_by_type = {}
    for expectation in initial_results.tests_by_expectation:
        tests = initial_results.tests_by_expectation[expectation]
        if expectation != test_expectations.WONTFIX:
            tests &= tbt[test_expectations.NOW]
        num_failures_by_type[keywords[expectation]] = len(tests)
    results['num_failures_by_type'] = num_failures_by_type

    # Combine all iterations and retries together into a dictionary with the
    # following structure:
    #    { test_name: [ (result, is_unexpected), ... ], ... }
    # where result is a single TestResult, is_unexpected is a boolean
    # representing whether the result is unexpected in that run.
    merged_results_by_name = collections.defaultdict(list)
    for test_run_results in [initial_results] + all_retry_results:
        # all_results does not include SKIP, so we need results_by_name.
        for test_name, result in test_run_results.results_by_name.iteritems():
            if result.type == test_expectations.SKIP:
                is_unexpected = test_name in test_run_results.unexpected_results_by_name
                merged_results_by_name[test_name].append(
                    (result, is_unexpected))

        # results_by_name only includes the last result, so we need all_results.
        for result in test_run_results.all_results:
            test_name = result.test_name
            is_unexpected = test_name in test_run_results.unexpected_results_by_name
            merged_results_by_name[test_name].append((result, is_unexpected))

    # Finally, compute the tests dict.
    tests = {}
    for test_name, merged_results in merged_results_by_name.iteritems():
        initial_result = merged_results[0][0]

        if only_include_failing and initial_result.type == test_expectations.SKIP:
            continue

        expected = expectations.get_expectations_string(test_name)
        actual = []
        actual_types = []
        crash_sites = []

        all_pass = True
        has_expected = False
        has_unexpected = False
        has_unexpected_pass = False
        has_stderr = False
        for result, is_unexpected in merged_results:
            actual.append(keywords[result.type])
            actual_types.append(result.type)
            crash_sites.append(result.crash_site)

            if result.type != test_expectations.PASS:
                all_pass = False
            if result.has_stderr:
                has_stderr = True
            if is_unexpected:
                has_unexpected = True
                if result.type == test_expectations.PASS:
                    has_unexpected_pass = True
            else:
                has_expected = True
        # A test is flaky if it has both expected and unexpected runs (NOT pass
        # and failure).
        is_flaky = has_expected and has_unexpected

        if len(set(actual)) == 1:
            actual = [actual[0]]
            actual_types = [actual_types[0]]

        if is_flaky:
            num_flaky += 1
        elif all_pass or has_unexpected_pass:
            # We count two situations as a "pass":
            # 1. All test runs pass (which is obviously non-flaky, but does not
            #    imply whether the runs are expected, e.g. they can be all
            #    unexpected passes).
            # 2. The test isn't flaky and has at least one unexpected pass
            #    (which implies all runs are unexpected). One tricky example
            #    that doesn't satisfy #1 is that if a test is expected to
            #    crash but in fact fails and then passes, it will be counted
            #    as "pass".
            num_passes += 1
            if not has_stderr and only_include_failing:
                continue
        elif has_unexpected and result.type != test_expectations.SKIP:
            # Either no retries or all retries failed unexpectedly.
            # TODO(robertma): When will there be unexpected skip? Do we really
            # want to ignore them when counting regressions?
            num_regressions += 1

        test_dict = {}

        test_dict['expected'] = expected
        test_dict['actual'] = ' '.join(actual)

        # Fields below are optional. To avoid bloating the output results json
        # too much, only add them when they are True or non-empty.

        rounded_run_time = round(initial_result.test_run_time, 1)
        if rounded_run_time:
            test_dict['time'] = rounded_run_time

        if has_stderr:
            test_dict['has_stderr'] = True

        expectation_line = expectations.model().get_expectation_line(test_name)
        bugs = expectation_line.bugs
        if bugs:
            test_dict['bugs'] = bugs
        if expectation_line.flag_expectations:
            test_dict['flag_expectations'] = expectation_line.flag_expectations

        base_expectations = expectation_line.base_expectations
        if base_expectations:
            test_dict['base_expectations'] = base_expectations

        if initial_result.reftest_type:
            test_dict.update(reftest_type=list(initial_result.reftest_type))

        crash_sites = [site for site in crash_sites if site]
        if len(crash_sites) > 0:
            test_dict['crash_site'] = crash_sites[0]

        if test_failures.has_failure_type(test_failures.FailureTextMismatch,
                                          initial_result.failures):
            for failure in initial_result.failures:
                if isinstance(failure, test_failures.FailureTextMismatch):
                    test_dict[
                        'text_mismatch'] = failure.text_mismatch_category()
                    break

        def is_expected(actual_result):
            return expectations.matches_an_expected_result(
                test_name, actual_result,
                port_obj.get_option('pixel_tests')
                or initial_result.reftest_type,
                port_obj.get_option('enable_sanitizer'))

        # Note: is_unexpected is intended to capture the *last* result. In the
        # normal use case (stop retrying failures once they pass), this is
        # equivalent to checking if none of the results is expected.
        if not any(
                is_expected(actual_result) for actual_result in actual_types):
            test_dict['is_unexpected'] = True

        if initial_result.has_repaint_overlay:
            test_dict['has_repaint_overlay'] = True

        test_dict.update(_interpret_test_failures(initial_result.failures))
        for retry_result, is_unexpected in merged_results[1:]:
            # TODO(robertma): Why do we only update unexpected retry failures?
            if is_unexpected:
                test_dict.update(
                    _interpret_test_failures(retry_result.failures))

        # Store test hierarchically by directory. e.g.
        # foo/bar/baz.html: test_dict
        # foo/bar/baz1.html: test_dict
        #
        # becomes
        # foo: {
        #     bar: {
        #         baz.html: test_dict,
        #         baz1.html: test_dict
        #     }
        # }
        parts = test_name.split('/')
        current_map = tests
        for i, part in enumerate(parts):
            if i == (len(parts) - 1):
                current_map[part] = test_dict
                break
            if part not in current_map:
                current_map[part] = {}
            current_map = current_map[part]

    results['tests'] = tests
    results['num_passes'] = num_passes
    results['num_flaky'] = num_flaky
    results['num_regressions'] = num_regressions
    # Does results.html have enough information to compute this itself? (by
    # checking total number of results vs. total number of tests?)
    results['interrupted'] = initial_results.interrupted
    results['layout_tests_dir'] = port_obj.layout_tests_dir()
    results['pixel_tests_enabled'] = port_obj.get_option('pixel_tests')
    results['seconds_since_epoch'] = int(time.time())
    results['build_number'] = port_obj.get_option('build_number')
    results['builder_name'] = port_obj.get_option('builder_name')
    if port_obj.get_option('order') == 'random':
        results['random_order_seed'] = port_obj.get_option('seed')
    results['path_delimiter'] = '/'
    results['flag_name'] = expectations.model().get_flag_name()

    # Don't do this by default since it takes >100ms.
    # It's only used for rebaselining and uploading data to the flakiness dashboard.
    results['chromium_revision'] = ''
    if port_obj.get_option('builder_name'):
        path = port_obj.repository_path()
        git = port_obj.host.git(path=path)
        if git:
            results['chromium_revision'] = str(git.commit_position(path))
        else:
            _log.warning(
                'Failed to determine chromium commit position for %s, '
                'leaving "chromium_revision" key blank in full_results.json.',
                path)

    return results