Ejemplo n.º 1
0
 def update_test_expectations(self, deleted_tests, renamed_tests):
     """Updates the TestExpectations file entries for tests that have been deleted or renamed."""
     port = self.host.port_factory.get()
     test_expectations = TestExpectations(port, include_overrides=False)
     # Tests for which files don't exist aren't stored in TestExpectationsModel,
     # so methods like TestExpectations.remove_expectation_line don't work; instead
     # we can run through the TestExpectationLine objects that were parsed.
     # FIXME: This won't work for removed or renamed directories with test expectations
     # that are directories rather than individual tests.
     new_lines = []
     changed_lines = []
     for expectation_line in test_expectations.expectations():
         if expectation_line.name in deleted_tests:
             continue
         if expectation_line.name in renamed_tests:
             expectation_line.name = renamed_tests[expectation_line.name]
             # Upon parsing the file, a "path does not exist" warning is expected
             # to be there for tests that have been renamed, and if there are warnings,
             # then the original string is used. If the warnings are reset, then the
             # expectation line is re-serialized when output.
             expectation_line.warnings = []
             changed_lines.append(expectation_line)
         new_lines.append(expectation_line)
     self.host.filesystem.write_text_file(
         port.path_to_generic_test_expectations_file(),
         TestExpectations.list_to_string(
             new_lines, reconstitute_only_these=changed_lines))
Ejemplo n.º 2
0
    def _update_expectations(self):
        """Updates all test expectations that are affected by the move.
        """
        _log.info('Updating expectations')
        test_expectations = TestExpectations(self._port, include_overrides=False, model_all_expectations=True)

        for expectation in self._get_expectations(test_expectations.model(), self._origin):
            path = expectation.path
            if self._is_child_path(self._origin, path):
                # If the existing expectation is a child of the moved path, we simply replace it
                # with an expectation for the updated path.
                new_path = self._move_path(path, self._origin, self._destination)
                _log.debug('Updating expectation for %s to %s' % (path, new_path))
                test_expectations.remove_expectation_line(path)
                test_expectations.add_expectation_line(testsMover._clone_expectation_line_for_path(expectation, new_path))
            else:
                # If the existing expectation is not a child of the moved path, we have to leave it
                # in place. But we also add a new expectation for the destination path.
                new_path = self._destination
                _log.warning('Copying expectation for %s to %s. You should check that these expectations are still correct.' %
                             (path, new_path))
                test_expectations.add_expectation_line(testsMover._clone_expectation_line_for_path(expectation, new_path))

        expectations_file = self._port.path_to_generic_test_expectations_file()
        self._filesystem.write_text_file(expectations_file,
                                         TestExpectations.list_to_string(test_expectations._expectations, reconstitute_only_these=[]))
        self._scm.add(self._filesystem.relpath(expectations_file, self._scm.checkout_root))
Ejemplo n.º 3
0
    def _update_expectations_files(self, lines_to_remove):
        # FIXME: This routine is way too expensive. We're creating N ports and N TestExpectations
        # objects and (re-)writing the actual expectations file N times, for each test we update.
        # We should be able to update everything in memory, once, and then write the file out a single time.
        for test in lines_to_remove:
            for builder in lines_to_remove[test]:
                port = self._tool.port_factory.get_from_builder_name(builder)
                path = port.path_to_generic_test_expectations_file()
                expectations = TestExpectations(port, include_overrides=False)
                for test_configuration in port.all_test_configurations():
                    if test_configuration.version == port.test_configuration().version:
                        expectationsString = expectations.remove_configuration_from_test(test, test_configuration)
                self._tool.filesystem.write_text_file(path, expectationsString)

            for port_name in self._tool.port_factory.all_port_names():
                port = self._tool.port_factory.get(port_name)
                generic_expectations = TestExpectations(port, tests=[test], include_overrides=False)
                if self._port_skips_test(port, test, generic_expectations):
                    for test_configuration in port.all_test_configurations():
                        if test_configuration.version == port.test_configuration().version:
                            expectationsString = generic_expectations.remove_configuration_from_test(
                                test, test_configuration
                            )
                    generic_path = port.path_to_generic_test_expectations_file()
                    self._tool.filesystem.write_text_file(generic_path, expectationsString)
Ejemplo n.º 4
0
 def _tests_to_rebaseline(port):
     tests_to_rebaseline = []
     for path, value in port.expectations_dict().items():
         expectations = TestExpectations(port, include_overrides=False, expectations_dict={path: value})
         for test in expectations.get_rebaselining_failures():
             tests_to_rebaseline.append(test)
     return tests_to_rebaseline
Ejemplo n.º 5
0
    def _update_expectations_files(self, port_name):
        port = self._tool.port_factory.get(port_name)

        expectations = TestExpectations(port)
        for path in port.expectations_dict():
            if self._tool.filesystem.exists(path):
                self._tool.filesystem.write_text_file(path, expectations.remove_rebaselined_tests(expectations.get_rebaselining_failures(), path))
Ejemplo n.º 6
0
 def _tests_to_rebaseline(self, port):
     tests_to_rebaseline = {}
     expectations = TestExpectations(port, include_overrides=True)
     for test in expectations.get_rebaselining_failures():
         suffixes = TestExpectations.suffixes_for_expectations(expectations.get_expectations(test))
         tests_to_rebaseline[test] = suffixes or BASELINE_SUFFIX_LIST
     return tests_to_rebaseline
Ejemplo n.º 7
0
    def _update_expectations_files(self, port_name):
        port = self._tool.port_factory.get(port_name)

        expectations = TestExpectations(port)
        for path in port.expectations_dict():
            if self._tool.filesystem.exists(path):
                self._tool.filesystem.write_text_file(path, expectations.remove_rebaselined_tests(expectations.get_rebaselining_failures(), path))
Ejemplo n.º 8
0
    def _update_expectations_files(self, lines_to_remove):
        # FIXME: This routine is way too expensive. We're creating O(n ports) TestExpectations objects.
        # This is slow and uses a lot of memory.
        tests = lines_to_remove.keys()
        to_remove = []

        # This is so we remove lines for builders that skip this test, e.g. Android skips most
        # tests and we don't want to leave stray [ Android ] lines in TestExpectations..
        # This is only necessary for "webkit-patch rebaseline" and for rebaselining expected
        # failures from garden-o-matic. rebaseline-expectations and auto-rebaseline will always
        # pass the exact set of ports to rebaseline.
        for port_name in self._tool.port_factory.all_port_names():
            port = self._tool.port_factory.get(port_name)
            generic_expectations = TestExpectations(port, tests=tests, include_overrides=False)
            full_expectations = TestExpectations(port, tests=tests, include_overrides=True)
            for test in tests:
                if port.skips_test(test, generic_expectations, full_expectations):
                    for test_configuration in port.all_test_configurations():
                        if test_configuration.version == port.test_configuration().version:
                            to_remove.append((test, test_configuration))

        for test in lines_to_remove:
            for builder in lines_to_remove[test]:
                port = self._tool.port_factory.get_from_builder_name(builder)
                for test_configuration in port.all_test_configurations():
                    if test_configuration.version == port.test_configuration().version:
                        to_remove.append((test, test_configuration))

        port = self._tool.port_factory.get()
        expectations = TestExpectations(port, include_overrides=False)
        expectations_string = expectations.remove_configurations(to_remove)
        path = port.path_to_generic_test_expectations_file()
        self._tool.filesystem.write_text_file(path, expectations_string)
Ejemplo n.º 9
0
 def test_skips_test_port_doesnt_skip_smoke_tests(self):
     port = self.make_port(with_tests=True)
     port.default_smoke_test_only = lambda: False
     self.assertFalse(port.skips_test(
         'failures/expected/image.html',
         generic_expectations=TestExpectations(port, include_overrides=False),
         full_expectations=TestExpectations(port, include_overrides=True)))
Ejemplo n.º 10
0
 def execute(self, options, args, tool):
     port = tool.port_factory.get()
     full_port_name = port.determine_full_port_name(tool, options,
                                                    port.port_name)
     expectations = BotTestExpectationsFactory().expectations_for_port(
         full_port_name)
     print TestExpectations.list_to_string(expectations.expectation_lines())
Ejemplo n.º 11
0
    def _update_expectations_files(self, lines_to_remove):
        # FIXME: This routine is way too expensive. We're creating O(n ports) TestExpectations objects.
        # This is slow and uses a lot of memory.
        tests = lines_to_remove.keys()
        to_remove = []

        # This is so we remove lines for builders that skip this test, e.g. Android skips most
        # tests and we don't want to leave stray [ Android ] lines in TestExpectations..
        # This is only necessary for "webkit-patch rebaseline" and for rebaselining expected
        # failures from garden-o-matic. rebaseline-expectations and auto-rebaseline will always
        # pass the exact set of ports to rebaseline.
        for port_name in self._tool.port_factory.all_port_names():
            port = self._tool.port_factory.get(port_name)
            generic_expectations = TestExpectations(port, tests=tests, include_overrides=False)
            full_expectations = TestExpectations(port, tests=tests, include_overrides=True)
            for test in tests:
                if port.skips_test(test, generic_expectations, full_expectations):
                    for test_configuration in port.all_test_configurations():
                        if test_configuration.version == port.test_configuration().version:
                            to_remove.append((test, test_configuration))

        for test in lines_to_remove:
            for builder in lines_to_remove[test]:
                port = self._tool.port_factory.get_from_builder_name(builder)
                for test_configuration in port.all_test_configurations():
                    if test_configuration.version == port.test_configuration().version:
                        to_remove.append((test, test_configuration))

        port = self._tool.port_factory.get()
        expectations = TestExpectations(port, include_overrides=False)
        expectations_string = expectations.remove_configurations(to_remove)
        path = port.path_to_generic_test_expectations_file()
        self._tool.filesystem.write_text_file(path, expectations_string)
Ejemplo n.º 12
0
    def _update_expectations_files(self, lines_to_remove):
        tests = lines_to_remove.keys()
        to_remove = []

        # This is so we remove lines for builders that skip this test.
        # For example, Android skips most tests and we don't want to leave
        # stray [ Android ] lines in TestExpectations.
        # This is only necessary for "blink_tool.py rebaseline".
        for port_name in self._tool.port_factory.all_port_names():
            port = self._tool.port_factory.get(port_name)
            for test in tests:
                if port.skips_test(test):
                    for test_configuration in port.all_test_configurations():
                        if test_configuration.version == port.test_configuration().version:
                            to_remove.append((test, test_configuration))

        for test in lines_to_remove:
            for port_name in lines_to_remove[test]:
                port = self._tool.port_factory.get(port_name)
                for test_configuration in port.all_test_configurations():
                    if test_configuration.version == port.test_configuration().version:
                        to_remove.append((test, test_configuration))

        port = self._tool.port_factory.get()
        expectations = TestExpectations(port, include_overrides=False)
        expectations_string = expectations.remove_configurations(to_remove)
        path = port.path_to_generic_test_expectations_file()
        self._tool.filesystem.write_text_file(path, expectations_string)
Ejemplo n.º 13
0
 def _tests_to_rebaseline(self, port):
     tests_to_rebaseline = {}
     expectations = TestExpectations(port, include_overrides=True)
     for test in expectations.get_rebaselining_failures():
         tests_to_rebaseline[
             test] = TestExpectations.suffixes_for_expectations(
                 expectations.get_expectations(test))
     return tests_to_rebaseline
Ejemplo n.º 14
0
 def _tests_to_rebaseline(port):
     tests_to_rebaseline = {}
     for path, value in port.expectations_dict().items():
         expectations = TestExpectations(port, include_overrides=False, expectations_dict={path: value})
         for test in expectations.get_rebaselining_failures():
             suffixes = TestExpectations.suffixes_for_expectations(expectations.get_expectations(test))
             tests_to_rebaseline[test] = suffixes or BASELINE_SUFFIX_LIST
     return tests_to_rebaseline
Ejemplo n.º 15
0
 def _tests_to_rebaseline(port):
     tests_to_rebaseline = {}
     for path, value in port.expectations_dict().items():
         expectations = TestExpectations(port, include_overrides=False, expectations_dict={path: value})
         for test in expectations.get_rebaselining_failures():
             suffixes = TestExpectations.suffixes_for_expectations(expectations.get_expectations(test))
             tests_to_rebaseline[test] = suffixes or BASELINE_SUFFIX_LIST
     return tests_to_rebaseline
Ejemplo n.º 16
0
 def test_skips_test_in_smoke_tests(self):
     port = self.make_port(with_tests=True)
     port.default_smoke_test_only = lambda: True
     port.host.filesystem.write_text_file(port.path_to_smoke_tests_file(), 'passes/text.html\n')
     self.assertTrue(port.skips_test(
         'failures/expected/image.html',
         generic_expectations=TestExpectations(port, include_overrides=False),
         full_expectations=TestExpectations(port, include_overrides=True)))
Ejemplo n.º 17
0
    def _update_expectations_file(self, builder_name, test_name):
        port = self._tool.port_factory.get_from_builder_name(builder_name)
        expectations = TestExpectations(port, include_overrides=False)

        for test_configuration in port.all_test_configurations():
            if test_configuration.version == port.test_configuration().version:
                expectationsString = expectations.remove_configuration_from_test(test_name, test_configuration)

        self._tool.filesystem.write_text_file(port.path_to_test_expectations_file(), expectationsString)
Ejemplo n.º 18
0
    def _update_expectations_file(self, builder_name, test_name):
        port = self._tool.port_factory.get_from_builder_name(builder_name)
        expectations = TestExpectations(port, include_overrides=False)

        for test_configuration in port.all_test_configurations():
            if test_configuration.version == port.test_configuration().version:
                expectationsString = expectations.remove_configuration_from_test(test_name, test_configuration)

        self._tool.filesystem.write_text_file(port.path_to_test_expectations_file(), expectationsString)
Ejemplo n.º 19
0
    def _copy_existing_baseline(self, port_name, test_name, suffix):
        """Copies the baseline for the given builder to all "predecessor" directories."""
        baseline_directory = self._tool.port_factory.get(
            port_name).baseline_version_dir()
        ports = [
            self._port_for_primary_baseline(baseline) for baseline in
            self._immediate_predecessors_in_fallback(baseline_directory)
        ]

        old_baselines = []
        new_baselines = []

        # Need to gather all the baseline paths before modifying the filesystem since
        # the modifications can affect the results of port.expected_filename.
        for port in ports:
            old_baseline = port.expected_filename(test_name, '.' + suffix)
            if not self._tool.filesystem.exists(old_baseline):
                _log.debug('No existing baseline for %s.', test_name)
                continue

            new_baseline = self._tool.filesystem.join(
                port.baseline_version_dir(),
                self._file_name_for_expected_result(test_name, suffix))
            if self._tool.filesystem.exists(new_baseline):
                _log.debug('Existing baseline at %s, not copying over it.',
                           new_baseline)
                continue

            generic_expectations = TestExpectations(port,
                                                    tests=[test_name],
                                                    include_overrides=False)
            full_expectations = TestExpectations(port,
                                                 tests=[test_name],
                                                 include_overrides=True)
            # TODO(qyearsley): Change Port.skips_test so that this can be simplified.
            if SKIP in full_expectations.get_expectations(test_name):
                _log.debug('%s is skipped (perhaps temporarily) on %s.',
                           test_name, port.name())
                continue
            if port.skips_test(test_name, generic_expectations,
                               full_expectations):
                _log.debug('%s is skipped on %s.', test_name, port.name())
                continue

            old_baselines.append(old_baseline)
            new_baselines.append(new_baseline)

        for i in range(len(old_baselines)):
            old_baseline = old_baselines[i]
            new_baseline = new_baselines[i]

            _log.debug('Copying baseline from %s to %s.', old_baseline,
                       new_baseline)
            self._tool.filesystem.maybe_make_directory(
                self._tool.filesystem.dirname(new_baseline))
            self._tool.filesystem.copyfile(old_baseline, new_baseline)
Ejemplo n.º 20
0
 def test_skips_test_skip_in_full_expectations(self):
     port = self.make_port(with_tests=True)
     port.default_smoke_test_only = lambda: False
     port.host.filesystem.write_text_file(
         port.host.filesystem.join(port.layout_tests_dir(), 'NeverFixTests'),
         'Bug(test) failures/expected/image.html [ WontFix ]\n')
     self.assertTrue(port.skips_test(
         'failures/expected/image.html',
         generic_expectations=TestExpectations(port, include_overrides=False),
         full_expectations=TestExpectations(port, include_overrides=True)))
Ejemplo n.º 21
0
    def _update_expectations_file(self, port_name):
        port = self._tool.port_factory.get(port_name)

        # FIXME: This will intentionally skip over any REBASELINE expectations that were in an overrides file.
        # This is not good, but avoids having the overrides getting written into the main file.
        # See https://bugs.webkit.org/show_bug.cgi?id=88456 for context. This will no longer be needed
        # once we properly support cascading expectations files.
        expectations = TestExpectations(port, include_overrides=False)
        path = port.path_to_test_expectations_file()
        self._tool.filesystem.write_text_file(path, expectations.remove_rebaselined_tests(expectations.get_rebaselining_failures()))
Ejemplo n.º 22
0
    def _update_expectations_file(self, port_name):
        port = self._tool.port_factory.get(port_name)

        # FIXME: This will intentionally skip over any REBASELINE expectations that were in an overrides file.
        # This is not good, but avoids having the overrides getting written into the main file.
        # See https://bugs.webkit.org/show_bug.cgi?id=88456 for context. This will no longer be needed
        # once we properly support cascading expectations files.
        expectations = TestExpectations(port, include_overrides=False)
        path = port.path_to_test_expectations_file()
        self._tool.filesystem.write_text_file(path, expectations.remove_rebaselined_tests(expectations.get_rebaselining_failures()))
Ejemplo n.º 23
0
 def _update_expectations_files(self, lines_to_remove):
     for test in lines_to_remove:
         for builder in lines_to_remove[test]:
             port = self._tool.port_factory.get_from_builder_name(builder)
             path = port.path_to_generic_test_expectations_file()
             expectations = TestExpectations(port, include_overrides=False)
             for test_configuration in port.all_test_configurations():
                 if test_configuration.version == port.test_configuration().version:
                     expectationsString = expectations.remove_configuration_from_test(test, test_configuration)
             self._tool.filesystem.write_text_file(path, expectationsString)
Ejemplo n.º 24
0
 def skipped_specifiers(self, test_name):
     """Returns a list of platform specifiers for which the test is skipped."""
     # TODO(qyearsley): Change Port.skips_test so that this can be simplified.
     specifiers = []
     for port in self.all_try_builder_ports():
         generic_expectations = TestExpectations(port, tests=[test_name], include_overrides=False)
         full_expectations = TestExpectations(port, tests=[test_name], include_overrides=True)
         if port.skips_test(test_name, generic_expectations, full_expectations):
             specifiers.append(self.host.builders.version_specifier_for_port_name(port.name()))
     return specifiers
Ejemplo n.º 25
0
    def _port_skips_test(self, port, test, generic_expectations):
        fs = port.host.filesystem
        if port.default_smoke_test_only():
            smoke_test_filename = fs.join(port.layout_tests_dir(), 'SmokeTests')
            if fs.exists(smoke_test_filename) and test not in fs.read_text_file(smoke_test_filename):
                return True

        full_expectations = TestExpectations(port, tests=[test], include_overrides=True)
        return (SKIP in full_expectations.get_expectations(test) and
                SKIP not in generic_expectations.get_expectations(test))
Ejemplo n.º 26
0
 def _make_test_baseline_set(self, tests):
     test_baseline_set = TestBaselineSet(self._tool)
     for builder_name in self._release_builders():
         port_name = self._tool.builders.port_name_for_builder_name(builder_name)
         port = self._tool.port_factory.get(port_name)
         expectations = TestExpectations(port, include_overrides=True)
         for test in expectations.get_needs_rebaseline_failures():
             if test not in tests:
                 continue
             test_baseline_set.add(test, Build(builder_name))
     return test_baseline_set
Ejemplo n.º 27
0
 def did_run_as_expected(self):
     actual_results = self._actual_as_tokens()
     expected_results = self._expected_as_tokens()
     # FIXME: We should only call remove_pixel_failures when this JSONResult
     # came from a test run without pixel tests!
     if not TestExpectations.has_pixel_failures(actual_results):
         expected_results = TestExpectations.remove_pixel_failures(expected_results)
     for actual_result in actual_results:
         if not TestExpectations.result_was_expected(actual_result, expected_results, False):
             return False
     return True
Ejemplo n.º 28
0
    def _update_expectations_files(self, port_name):
        port = self._tool.port_factory.get(port_name)

        expectations = TestExpectations(port)

        rebaseline_tests = expectations.get_rebaselining_failures()
        filtered_rebaseline_tests = [test for test in rebaseline_tests if not port.reference_files(test)]

        for path in port.expectations_dict():
            if self._tool.filesystem.exists(path):
                self._tool.filesystem.write_text_file(path, expectations.remove_rebaselined_tests(filtered_rebaseline_tests, path))
 def _update_expectations_files(self, lines_to_remove):
     for test in lines_to_remove:
         for builder in lines_to_remove[test]:
             port = self._tool.port_factory.get_from_builder_name(builder)
             path = port.path_to_generic_test_expectations_file()
             expectations = TestExpectations(port, include_overrides=False)
             for test_configuration in port.all_test_configurations():
                 if test_configuration.version == port.test_configuration(
                 ).version:
                     expectationsString = expectations.remove_configuration_from_test(
                         test, test_configuration)
             self._tool.filesystem.write_text_file(path, expectationsString)
Ejemplo n.º 30
0
 def did_run_as_expected(self):
     actual_results = self._actual_as_tokens()
     expected_results = self._expected_as_tokens()
     # FIXME: We should only call remove_pixel_failures when this JSONResult
     # came from a test run without pixel tests!
     if not TestExpectations.has_pixel_failures(actual_results):
         expected_results = TestExpectations.remove_pixel_failures(
             expected_results)
     for actual_result in actual_results:
         if not TestExpectations.result_was_expected(
                 actual_result, expected_results, False, False):
             return False
     return True
Ejemplo n.º 31
0
    def _port_skips_test(self, port, test, generic_expectations):
        fs = port.host.filesystem
        if port.default_smoke_test_only():
            smoke_test_filename = fs.join(port.layout_tests_dir(),
                                          'SmokeTests')
            if fs.exists(
                    smoke_test_filename) and test not in fs.read_text_file(
                        smoke_test_filename):
                return True

        full_expectations = TestExpectations(port,
                                             tests=[test],
                                             include_overrides=True)
        return (SKIP in full_expectations.get_expectations(test)
                and SKIP not in generic_expectations.get_expectations(test))
Ejemplo n.º 32
0
 def test_update_summary_with_result(self):
     host = MockHost()
     port = host.port_factory.get('test-win-xp')
     test = 'failures/expected/reftest.html'
     expectations = TestExpectations(
         port,
         tests=[test],
         expectations='WONTFIX : failures/expected/reftest.html = IMAGE',
         test_config=port.test_configuration())
     # Reftests expected to be image mismatch should be respected when pixel_tests=False.
     manager = Manager(port=port,
                       options=MockOptions(
                           pixel_tests=False,
                           exit_after_n_failures=None,
                           exit_after_n_crashes_or_timeouts=None),
                       printer=Mock())
     manager._expectations = expectations
     result_summary = ResultSummary(expectations=expectations,
                                    test_files=[test])
     result = TestResult(
         test_name=test,
         failures=[test_failures.FailureReftestMismatchDidNotOccur()])
     manager._update_summary_with_result(result_summary, result)
     self.assertEquals(1, result_summary.expected)
     self.assertEquals(0, result_summary.unexpected)
Ejemplo n.º 33
0
    def _flaky_types_in_results(self, results_entry, only_ignore_very_flaky):
        flaky_results = set()

        # Always include pass as an expected result. Passes will never turn the bot red.
        # This fixes cases where the expectations have an implicit Pass, e.g. [ Slow ].
        latest_expectations = [PASS]
        if self.results_json.EXPECTATIONS_KEY in results_entry:
            expectations_list = results_entry[
                self.results_json.EXPECTATIONS_KEY].split(' ')
            latest_expectations += [
                self._result_to_enum(expectation)
                for expectation in expectations_list
            ]

        for result_item in results_entry[self.results_json.RESULTS_KEY]:
            _, result_types_str = self.results_json.occurances_and_type_from_result_item(
                result_item)

            result_types = []
            for result_type in result_types_str:
                # TODO(ojan): Remove this if-statement once crbug.com/514378 is fixed.
                if result_type not in self.NON_RESULT_TYPES:
                    result_types.append(
                        self.results_json.expectation_for_type(result_type))

            # It didn't flake if it didn't retry.
            if len(result_types) <= 1:
                continue

            # If the test ran as expected after only one retry, it's not very flaky.
            # It's only very flaky if it failed the first run and the first retry
            # and then ran as expected in one of the subsequent retries.
            # If there are only two entries, then that means it failed on the first
            # try and ran as expected on the second because otherwise we'd have
            # a third entry from the next try.
            if only_ignore_very_flaky and len(result_types) == 2:
                continue

            has_unexpected_results = False
            for result_type in result_types:
                result_enum = self._result_to_enum(result_type)
                # TODO(ojan): We really should be grabbing the expected results from the time
                # of the run instead of looking at the latest expected results. That's a lot
                # more complicated though. So far we've been looking at the aggregated
                # results_small.json off test_results.appspot, which has all the information
                # for the last 100 runs. In order to do this, we'd need to look at the
                # individual runs' full_results.json, which would be slow and more complicated.
                # The only thing we lose by not fixing this is that a test that was flaky
                # and got fixed will still get printed out until 100 runs have passed.
                if not TestExpectations.result_was_expected(
                        result_enum,
                        latest_expectations,
                        test_needs_rebaselining=False):
                    has_unexpected_results = True
                    break

            if has_unexpected_results:
                flaky_results = flaky_results.union(set(result_types))

        return flaky_results
Ejemplo n.º 34
0
    def execute(self, options, args, tool):
        factory = self.expectations_factory()

        # FIXME: WebKit Linux 32 and WebKit Linux have the same specifiers;
        # if we include both of them, we'll get duplicate lines. Ideally
        # Linux 32 would have unique speicifiers.
        most_builders = builders.all_builder_names()
        if 'WebKit Linux 32' in most_builders:
            most_builders.remove('WebKit Linux 32')

        lines = self._collect_expectation_lines(most_builders, factory)
        lines.sort(key=lambda line: line.path)

        port = tool.port_factory.get()
        # Skip any tests which are mentioned in the dashboard but not in our checkout:
        fs = tool.filesystem
        lines = filter(lambda line: fs.exists(fs.join(port.layout_tests_dir(), line.path)), lines)

        # Note: This includes all flaky tests from the dashboard, even ones mentioned
        # in existing TestExpectations. We could certainly load existing TestExpecations
        # and filter accordingly, or update existing TestExpectations instead of FlakyTests.
        flaky_tests_path = fs.join(port.layout_tests_dir(), 'FlakyTests')
        flaky_tests_contents = self.FLAKY_TEST_CONTENTS % TestExpectations.list_to_string(lines)
        fs.write_text_file(flaky_tests_path, flaky_tests_contents)
        print "Updated %s" % flaky_tests_path

        if options.upload:
            return self._commit_and_upload(tool, options)
 def _suffixes_for_actual_failures(self, test, builder_name, existing_suffixes):
     if builder_name not in self.builder_data():
         return set()
     actual_results = self.builder_data()[builder_name].actual_results(test)
     if not actual_results:
         return set()
     return set(existing_suffixes) & TestExpectations.suffixes_for_actual_expectations_string(actual_results)
Ejemplo n.º 36
0
 def _suffixes_for_actual_failures(self, test, builder_name, existing_suffixes):
     if builder_name not in self.builder_data():
         return set()
     actual_results = self.builder_data()[builder_name].actual_results(test)
     if not actual_results:
         return set()
     return set(existing_suffixes) & TestExpectations.suffixes_for_actual_expectations_string(actual_results)
Ejemplo n.º 37
0
    def execute(self, options, args, tool):
        factory = self.expectations_factory()

        # FIXME: WebKit Linux 32 and WebKit Linux have the same specifiers;
        # if we include both of them, we'll get duplicate lines. Ideally
        # Linux 32 would have unique speicifiers.
        most_builders = builders.all_builder_names()
        if 'WebKit Linux 32' in most_builders:
            most_builders.remove('WebKit Linux 32')

        lines = self._collect_expectation_lines(most_builders, factory)
        lines.sort(key=lambda line: line.path)

        port = tool.port_factory.get()
        # Skip any tests which are mentioned in the dashboard but not in our checkout:
        fs = tool.filesystem
        lines = filter(lambda line: fs.exists(fs.join(port.layout_tests_dir(), line.path)), lines)

        # Note: This includes all flaky tests from the dashboard, even ones mentioned
        # in existing TestExpectations. We could certainly load existing TestExpecations
        # and filter accordingly, or update existing TestExpectations instead of FlakyTests.
        flaky_tests_path = fs.join(port.layout_tests_dir(), 'FlakyTests')
        flaky_tests_contents = self.FLAKY_TEST_CONTENTS % TestExpectations.list_to_string(lines)
        fs.write_text_file(flaky_tests_path, flaky_tests_contents)
        print "Updated %s" % flaky_tests_path

        if options.upload:
            return self._commit_and_upload(tool, options)
Ejemplo n.º 38
0
 def _model(self, options, port_name, tests):
     port = self._tool.port_factory.get(port_name, options)
     expectations_path = port.path_to_test_expectations_file()
     if not expectations_path in self._expectation_models:
         self._expectation_models[expectations_path] = TestExpectations(
             port, tests).model()
     return self._expectation_models[expectations_path]
Ejemplo n.º 39
0
    def _copy_existing_baseline(self, builder_name, test_name, suffix):
        baseline_directory = self._baseline_directory(builder_name)
        ports = [
            self._port_for_primary_baseline(baseline) for baseline in
            self._immediate_predecessors_in_fallback(baseline_directory)
        ]

        old_baselines = []
        new_baselines = []

        # Need to gather all the baseline paths before modifying the filesystem since
        # the modifications can affect the results of port.expected_filename.
        for port in ports:
            old_baseline = port.expected_filename(test_name, "." + suffix)
            if not self._tool.filesystem.exists(old_baseline):
                _log.debug("No existing baseline for %s." % test_name)
                continue

            new_baseline = self._tool.filesystem.join(
                port.baseline_path(),
                self._file_name_for_expected_result(test_name, suffix))
            if self._tool.filesystem.exists(new_baseline):
                _log.debug("Existing baseline at %s, not copying over it." %
                           new_baseline)
                continue

            expectations = TestExpectations(port, [test_name])
            if SKIP in expectations.get_expectations(test_name):
                _log.debug("%s is skipped on %s." % (test_name, port.name()))
                continue

            old_baselines.append(old_baseline)
            new_baselines.append(new_baseline)

        for i in range(len(old_baselines)):
            old_baseline = old_baselines[i]
            new_baseline = new_baselines[i]

            _log.debug("Copying baseline from %s to %s." %
                       (old_baseline, new_baseline))
            self._tool.filesystem.maybe_make_directory(
                self._tool.filesystem.dirname(new_baseline))
            self._tool.filesystem.copyfile(old_baseline, new_baseline)
            if not self._tool.scm().exists(new_baseline):
                self._add_to_scm_later(new_baseline)
Ejemplo n.º 40
0
    def _copy_existing_baseline(self, builder_name, test_name, suffix):
        baseline_directory = self._baseline_directory(builder_name)
        ports = [
            self._port_for_primary_baseline(baseline)
            for baseline in self._immediate_predecessors_in_fallback(baseline_directory)
        ]

        old_baselines = []
        new_baselines = []

        # Need to gather all the baseline paths before modifying the filesystem since
        # the modifications can affect the results of port.expected_filename.
        for port in ports:
            old_baseline = port.expected_filename(test_name, "." + suffix)
            if not self._tool.filesystem.exists(old_baseline):
                _log.debug("No existing baseline for %s.", test_name)
                continue

            new_baseline = self._tool.filesystem.join(
                port.baseline_version_dir(), self._file_name_for_expected_result(test_name, suffix)
            )
            if self._tool.filesystem.exists(new_baseline):
                _log.debug("Existing baseline at %s, not copying over it.", new_baseline)
                continue

            generic_expectations = TestExpectations(port, tests=[test_name], include_overrides=False)
            full_expectations = TestExpectations(port, tests=[test_name], include_overrides=True)
            # TODO(qyearsley): Change Port.skips_test so that this can be simplified.
            if SKIP in full_expectations.get_expectations(test_name):
                _log.debug("%s is skipped (perhaps temporarily) on %s.", test_name, port.name())
                continue
            if port.skips_test(test_name, generic_expectations, full_expectations):
                _log.debug("%s is skipped on %s.", test_name, port.name())
                continue

            old_baselines.append(old_baseline)
            new_baselines.append(new_baseline)

        for i in range(len(old_baselines)):
            old_baseline = old_baselines[i]
            new_baseline = new_baselines[i]

            _log.debug("Copying baseline from %s to %s.", old_baseline, new_baseline)
            self._tool.filesystem.maybe_make_directory(self._tool.filesystem.dirname(new_baseline))
            self._tool.filesystem.copyfile(old_baseline, new_baseline)
Ejemplo n.º 41
0
 def _suffixes_for_actual_failures(self, test, builder_name,
                                   existing_suffixes):
     if builder_name not in self.builder_data():
         return set()
     test_result = self.builder_data()[builder_name].result_for_test(test)
     if not test_result:
         return set()
     return set(existing_suffixes
                ) & TestExpectations.suffixes_for_test_result(test_result)
Ejemplo n.º 42
0
    def get_test_prefix_list(self, tests):
        test_prefix_list = {}
        lines_to_remove = {}

        for builder_name in self._release_builders():
            port_name = builders.port_name_for_builder_name(builder_name)
            port = self._tool.port_factory.get(port_name)
            expectations = TestExpectations(port, include_overrides=True)
            for test in expectations.get_needs_rebaseline_failures():
                if test not in tests:
                    continue

                if test not in test_prefix_list:
                    lines_to_remove[test] = []
                    test_prefix_list[test] = {}
                lines_to_remove[test].append(builder_name)
                test_prefix_list[test][builder_name] = BASELINE_SUFFIX_LIST

        return test_prefix_list, lines_to_remove
Ejemplo n.º 43
0
    def get_test_prefix_list(self, tests):
        test_prefix_list = {}
        lines_to_remove = {}

        for builder_name in self._release_builders():
            port_name = builders.port_name_for_builder_name(builder_name)
            port = self._tool.port_factory.get(port_name)
            expectations = TestExpectations(port, include_overrides=True)
            for test in expectations.get_needs_rebaseline_failures():
                if test not in tests:
                    continue

                if test not in test_prefix_list:
                    lines_to_remove[test] = []
                    test_prefix_list[test] = {}
                lines_to_remove[test].append(builder_name)
                test_prefix_list[test][builder_name] = BASELINE_SUFFIX_LIST

        return test_prefix_list, lines_to_remove
Ejemplo n.º 44
0
    def _copy_existing_baseline(self, builder_name, test_name, suffix):
        baseline_directory = self._baseline_directory(builder_name)
        ports = [
            self._port_for_primary_baseline(baseline)
            for baseline in self._immediate_predecessors_in_fallback(baseline_directory)
        ]

        old_baselines = []
        new_baselines = []

        # Need to gather all the baseline paths before modifying the filesystem since
        # the modifications can affect the results of port.expected_filename.
        for port in ports:
            old_baseline = port.expected_filename(test_name, "." + suffix)
            if not self._tool.filesystem.exists(old_baseline):
                _log.debug("No existing baseline for %s." % test_name)
                continue

            new_baseline = self._tool.filesystem.join(
                port.baseline_path(), self._file_name_for_expected_result(test_name, suffix)
            )
            if self._tool.filesystem.exists(new_baseline):
                _log.debug("Existing baseline at %s, not copying over it." % new_baseline)
                continue

            expectations = TestExpectations(port, [test_name])
            if SKIP in expectations.get_expectations(test_name):
                _log.debug("%s is skipped on %s." % (test_name, port.name()))
                continue

            old_baselines.append(old_baseline)
            new_baselines.append(new_baseline)

        for i in range(len(old_baselines)):
            old_baseline = old_baselines[i]
            new_baseline = new_baselines[i]

            _log.debug("Copying baseline from %s to %s." % (old_baseline, new_baseline))
            self._tool.filesystem.maybe_make_directory(self._tool.filesystem.dirname(new_baseline))
            self._tool.filesystem.copyfile(old_baseline, new_baseline)
            if not self._tool.scm().exists(new_baseline):
                self._add_to_scm(new_baseline)
Ejemplo n.º 45
0
    def execute(self, options, args, tool):
        if not options.paths and not args and not options.all:
            print "You must either specify one or more test paths or --all."
            return

        if options.platform:
            port_names = fnmatch.filter(tool.port_factory.all_port_names(),
                                        options.platform)
            if not port_names:
                default_port = tool.port_factory.get(options.platform)
                if default_port:
                    port_names = [default_port.name()]
                else:
                    print "No port names match '%s'" % options.platform
                    return
            else:
                default_port = tool.port_factory.get(port_names[0])
        else:
            default_port = tool.port_factory.get(options=options)
            port_names = [default_port.name()]

        if options.paths:
            files = default_port.expectations_files()
            layout_tests_dir = default_port.layout_tests_dir()
            for file in files:
                if file.startswith(layout_tests_dir):
                    file = file.replace(layout_tests_dir, 'LayoutTests')
                print file
            return

        tests = set(default_port.tests(args))
        for port_name in port_names:
            port = tool.port_factory.get(port_name, options)
            model = TestExpectations(port, tests).model()
            tests_to_print = self._filter_tests(options, model, tests)
            lines = [
                model.get_expectation_line(test)
                for test in sorted(tests_to_print)
            ]
            if port_name != port_names[0]:
                print
            print '\n'.join(self._format_lines(options, port_name, lines))
Ejemplo n.º 46
0
    def execute(self, options, args, tool):
        factory = self.expectations_factory()
        lines = self._collect_expectation_lines(builders.all_builder_names(), factory)
        lines.sort(key=lambda line: line.path)

        port = tool.port_factory.get()
        # Skip any tests which are mentioned in the dashboard but not in our checkout:
        fs = tool.filesystem
        lines = filter(lambda line: fs.exists(fs.join(port.layout_tests_dir(), line.path)), lines)

        print self.FLAKY_TEST_CONTENTS % TestExpectations.list_to_string(lines)  # pylint: disable=E1601
Ejemplo n.º 47
0
    def _update_expectations_file(self, builder_name, test_name):
        port = self._tool.port_factory.get_from_builder_name(builder_name)

        # Since rebaseline-test-internal can be called multiple times in parallel,
        # we need to ensure that we're not trying to update the expectations file
        # concurrently as well.
        # FIXME: We should rework the code to not need this; maybe just download
        # the files in parallel and rebaseline local files serially?
        try:
            path = port.path_to_test_expectations_file()
            lock = self._tool.make_file_lock(path + '.lock')
            lock.acquire_lock()
            expectations = TestExpectations(port, include_overrides=False)
            for test_configuration in port.all_test_configurations():
                if test_configuration.version == port.test_configuration().version:
                    expectationsString = expectations.remove_configuration_from_test(test_name, test_configuration)

            self._tool.filesystem.write_text_file(path, expectationsString)
        finally:
            lock.release_lock()
Ejemplo n.º 48
0
    def write_test_expectations(self, test_expectations, test_expectations_file):
        """Writes the given TestExpectations object to the filesystem.

        Args:
            test_expectations: The TestExpectations object to write.
            test_expectations_file: The full file path of the Blink
                TestExpectations file. This file will be overwritten.
        """
        self._host.filesystem.write_text_file(
            test_expectations_file,
            TestExpectations.list_to_string(test_expectations, reconstitute_only_these=[]))
Ejemplo n.º 49
0
 def _tests_to_rebaseline(self, port):
     tests_to_rebaseline = {}
     expectations = TestExpectations(port, include_overrides=True)
     expectations.parse_all_expectations()
     for test in expectations.get_rebaselining_failures():
         tests_to_rebaseline[test] = TestExpectations.suffixes_for_expectations(expectations.model().get_expectations(test))
     return tests_to_rebaseline
Ejemplo n.º 50
0
    def _flaky_types_in_results(self, results_entry, only_ignore_very_flaky):
        flaky_results = set()

        # Always include pass as an expected result. Passes will never turn the bot red.
        # This fixes cases where the expectations have an implicit Pass, e.g. [ Slow ].
        latest_expectations = [PASS]
        if self.results_json.EXPECTATIONS_KEY in results_entry:
            expectations_list = results_entry[self.results_json.EXPECTATIONS_KEY].split(' ')
            latest_expectations += [self._result_to_enum(expectation) for expectation in expectations_list]

        for result_item in results_entry[self.results_json.RESULTS_KEY]:
            _, result_types_str = self.results_json.occurances_and_type_from_result_item(result_item)

            result_types = []
            for result_type in result_types_str:
                # TODO(ojan): Remove this if-statement once crbug.com/514378 is fixed.
                if result_type not in self.NON_RESULT_TYPES:
                    result_types.append(self.results_json.expectation_for_type(result_type))

            # It didn't flake if it didn't retry.
            if len(result_types) <= 1:
                continue

            # If the test ran as expected after only one retry, it's not very flaky.
            # It's only very flaky if it failed the first run and the first retry
            # and then ran as expected in one of the subsequent retries.
            # If there are only two entries, then that means it failed on the first
            # try and ran as expected on the second because otherwise we'd have
            # a third entry from the next try.
            second_result_type_enum_value = self._result_to_enum(result_types[1])
            if only_ignore_very_flaky and len(result_types) == 2:
                continue

            has_unexpected_results = False
            for result_type in result_types:
                result_enum = self._result_to_enum(result_type)
                # TODO(ojan): We really should be grabbing the expected results from the time
                # of the run instead of looking at the latest expected results. That's a lot
                # more complicated though. So far we've been looking at the aggregated
                # results_small.json off test_results.appspot, which has all the information
                # for the last 100 runs. In order to do this, we'd need to look at the
                # individual runs' full_results.json, which would be slow and more complicated.
                # The only thing we lose by not fixing this is that a test that was flaky
                # and got fixed will still get printed out until 100 runs have passed.
                if not TestExpectations.result_was_expected(result_enum, latest_expectations, test_needs_rebaselining=False):
                    has_unexpected_results = True
                    break

            if has_unexpected_results:
                flaky_results = flaky_results.union(set(result_types))

        return flaky_results
Ejemplo n.º 51
0
    def get_updated_test_expectations(self):
        """Filters out passing lines from TestExpectations file.

        Reads the current TestExpectations file and, using results from the
        build bots, removes lines that are passing. That is, removes lines that
        were not needed to keep the bots green.

        Returns:
            A TestExpectations object with the passing lines filtered out.
        """

        test_expectations = TestExpectations(self._port, include_overrides=False).expectations()
        for expectation in self._expectations_to_remove():
            index = test_expectations.index(expectation)
            test_expectations.remove(expectation)

            # Remove associated comments and whitespace if we've removed the last expectation under
            # a comment block. Only remove a comment block if it's not separated from the test
            # expectation line by whitespace.
            self._remove_associated_comments_and_whitespace(test_expectations, index)

        return test_expectations
Ejemplo n.º 52
0
    def execute(self, options, args, tool):
        if not options.paths and not args and not options.all:
            print "You must either specify one or more test paths or --all."
            return

        if options.platform:
            port_names = fnmatch.filter(tool.port_factory.all_port_names(), options.platform)
            if not port_names:
                default_port = tool.port_factory.get(options.platform)
                if default_port:
                    port_names = [default_port.name()]
                else:
                    print "No port names match '%s'" % options.platform
                    return
            else:
                default_port = tool.port_factory.get(port_names[0])
        else:
            default_port = tool.port_factory.get(options=options)
            port_names = [default_port.name()]

        if options.paths:
            files = default_port.expectations_files()
            layout_tests_dir = default_port.layout_tests_dir()
            for file in files:
                if file.startswith(layout_tests_dir):
                    file = file.replace(layout_tests_dir, 'LayoutTests')
                print file
            return

        tests = set(default_port.tests(args))
        for port_name in port_names:
            port = tool.port_factory.get(port_name, options)
            model = TestExpectations(port, tests).model()
            tests_to_print = self._filter_tests(options, model, tests)
            lines = [model.get_expectation_line(test) for test in sorted(tests_to_print)]
            if port_name != port_names[0]:
                print
            print '\n'.join(self._format_lines(options, port_name, lines))
Ejemplo n.º 53
0
    def execute(self, options, args, tool):
        factory = self.expectations_factory(tool.builders)
        lines = self._collect_expectation_lines(tool.builders.all_builder_names(), factory)
        lines.sort(key=lambda line: line.path)

        port = tool.port_factory.get()
        # Skip any tests which are mentioned in the dashboard but not in our checkout:
        fs = tool.filesystem
        lines = [line for line in lines if fs.exists(fs.join(port.layout_tests_dir(), line.path))]

        test_names = [line.name for line in lines]
        flakiness_dashboard_url = self.FLAKINESS_DASHBOARD_URL % ','.join(test_names)
        expectations_string = TestExpectations.list_to_string(lines)

        print self.OUTPUT % (self.HEADER, expectations_string, flakiness_dashboard_url)
Ejemplo n.º 54
0
 def update_expectations(self, failure_info_list):
     expectation_lines = self._parser.parse(self._path_to_test_expectations_file, self._tool.filesystem.read_text_file(self._path_to_test_expectations_file))
     editor = TestExpectationsEditor(expectation_lines, self)
     updated_expectation_lines = []
     # FIXME: Group failures by testName+failureTypeList.
     for failure_info in failure_info_list:
         expectation_set = set(filter(lambda expectation: expectation is not None,
                                      map(TestExpectations.expectation_from_string, failure_info['failureTypeList'])))
         assert(expectation_set)
         test_name = failure_info['testName']
         assert(test_name)
         builder_name = failure_info['builderName']
         affected_test_configuration_set = self._extrapolator.extrapolate_test_configurations(builder_name)
         updated_expectation_lines.extend(editor.update_expectation(test_name, affected_test_configuration_set, expectation_set))
     self._tool.filesystem.write_text_file(self._path_to_test_expectations_file, TestExpectations.list_to_string(expectation_lines, self._converter, reconstitute_only_these=updated_expectation_lines))
Ejemplo n.º 55
0
    def unexpected_results_by_path(self):
        """For tests with unexpected results, returns original expectations + results."""
        def exp_to_string(exp):
            return (TestExpectations.EXPECTATIONS_TO_STRING.get(exp, None) or
                    TestExpectations.MODIFIERS_TO_STRING.get(exp, None)).upper()

        def string_to_exp(string):
            # Needs a bit more logic than the method above,
            # since a PASS is 0 and evaluates to False.
            result = TestExpectations.EXPECTATIONS.get(string.lower(), None)
            if not result is None:
                return result
            result = TestExpectations.MODIFIERS.get(string.lower(), None)
            if not result is None:
                return result
            raise ValueError(string)

        unexpected_results_by_path = {}
        for test_path, entry in self.results_json.walk_results():
            # Expectations for this test. No expectation defaults to PASS.
            exp_string = entry.get(self.results_json.EXPECTATIONS_KEY, u'PASS')

            # All run-length-encoded results for this test.
            results_dict = entry.get(self.results_json.RESULTS_KEY, {})

            # Set of expectations for this test.
            expectations = set(map(string_to_exp, exp_string.split(' ')))

            # Set of distinct results for this test.
            result_types = self._flaky_types_in_results(results_dict)

            # Distinct results as non-encoded strings.
            result_strings = map(self.results_json.expectation_for_type, result_types)

            # Distinct resulting expectations.
            result_exp = map(string_to_exp, result_strings)

            expected = lambda e: TestExpectations.result_was_expected(e, expectations, False)

            additional_expectations = set(e for e in result_exp if not expected(e))

            # Test did not have unexpected results.
            if not additional_expectations:
                continue

            expectations.update(additional_expectations)
            unexpected_results_by_path[test_path] = sorted(map(exp_to_string, expectations))
        return unexpected_results_by_path
Ejemplo n.º 56
0
    def execute(self, options, args, tool):
        factory = self.expectations_factory()
        lines = self._collect_expectation_lines(builders.all_builder_names(), factory)
        lines.sort(key=lambda line: line.path)

        port = tool.port_factory.get()
        # Skip any tests which are mentioned in the dashboard but not in our checkout:
        fs = tool.filesystem
        lines = filter(lambda line: fs.exists(fs.join(port.layout_tests_dir(), line.path)), lines)

        # Note: This includes all flaky tests from the dashboard, even ones mentioned
        # in existing TestExpectations. We could certainly load existing TestExpecations
        # and filter accordingly, or update existing TestExpectations instead of FlakyTests.
        flaky_tests_path = fs.join(port.layout_tests_dir(), 'FlakyTests')
        flaky_tests_contents = self.FLAKY_TEST_CONTENTS % TestExpectations.list_to_string(lines)
        fs.write_text_file(flaky_tests_path, flaky_tests_contents)
        print "Updated %s" % flaky_tests_path

        if options.upload:
            return self._commit_and_upload(tool, options)
Ejemplo n.º 57
0
    def _suffixes_for_actual_failures(self, test, build, existing_suffixes):
        """Gets the baseline suffixes for actual mismatch failures in some results.

        Args:
            test: A full test path string.
            build: A Build object.
            existing_suffixes: A collection of all suffixes to consider.

        Returns:
            A set of file suffix strings.
        """
        results = self._tool.buildbot.fetch_results(build)
        if not results:
            _log.debug("No results found for build %s", build)
            return set()
        test_result = results.result_for_test(test)
        if not test_result:
            _log.debug("No test result for test %s in build %s", test, build)
            return set()
        return set(existing_suffixes) & TestExpectations.suffixes_for_test_result(test_result)
Ejemplo n.º 58
0
 def _update_single_test_expectations_file(self, path, expectation_lines, deleted_tests, renamed_tests):
     """Updates single test expectations file."""
     # FIXME: This won't work for removed or renamed directories with test expectations
     # that are directories rather than individual tests.
     new_lines = []
     changed_lines = []
     for expectation_line in expectation_lines:
         if expectation_line.name in deleted_tests:
             continue
         if expectation_line.name in renamed_tests:
             expectation_line.name = renamed_tests[expectation_line.name]
             # Upon parsing the file, a "path does not exist" warning is expected
             # to be there for tests that have been renamed, and if there are warnings,
             # then the original string is used. If the warnings are reset, then the
             # expectation line is re-serialized when output.
             expectation_line.warnings = []
             changed_lines.append(expectation_line)
         new_lines.append(expectation_line)
     new_file_contents = TestExpectations.list_to_string(new_lines, reconstitute_only_these=changed_lines)
     self.host.filesystem.write_text_file(path, new_file_contents)