Ejemplo n.º 1
0
class FailureMapTest(unittest.TestCase):
    builder1 = MockBuilder("Builder1")
    builder2 = MockBuilder("Builder2")

    build1a = Build(builder1, build_number=22, revision=1233, is_green=True)
    build1b = Build(builder1, build_number=23, revision=1234, is_green=False)
    build2a = Build(builder2, build_number=89, revision=1233, is_green=True)
    build2b = Build(builder2, build_number=90, revision=1235, is_green=False)

    regression_window1 = RegressionWindow(build1a,
                                          build1b,
                                          failing_tests=[u'test1', u'test1'])
    regression_window2 = RegressionWindow(build2a,
                                          build2b,
                                          failing_tests=[u'test1'])

    def _make_failure_map(self):
        failure_map = FailureMap()
        failure_map.add_regression_window(self.builder1,
                                          self.regression_window1)
        failure_map.add_regression_window(self.builder2,
                                          self.regression_window2)
        return failure_map

    def test_failing_revisions(self):
        failure_map = self._make_failure_map()
        self.assertEqual(failure_map.failing_revisions(), [1234, 1235])

    def test_new_failures(self):
        failure_map = self._make_failure_map()
        failure_map.filter_out_old_failures(lambda revision: False)
        self.assertEqual(failure_map.failing_revisions(), [1234, 1235])

    def test_new_failures_with_old_revisions(self):
        failure_map = self._make_failure_map()
        failure_map.filter_out_old_failures(lambda revision: revision == 1234)
        self.assertEqual(failure_map.failing_revisions(), [])

    def test_new_failures_with_more_old_revisions(self):
        failure_map = self._make_failure_map()
        failure_map.filter_out_old_failures(lambda revision: revision == 1235)
        self.assertEqual(failure_map.failing_revisions(), [1234])

    def test_tests_failing_for(self):
        failure_map = self._make_failure_map()
        self.assertEqual(failure_map.tests_failing_for(1234), [u'test1'])

    def test_failing_tests(self):
        failure_map = self._make_failure_map()
        self.assertEqual(failure_map.failing_tests(), set([u'test1']))
Ejemplo n.º 2
0
    def _explain_failures_for_builder(self, builder, start_revision):
        print("Examining failures for \"%s\", starting at r%s" %
              (builder.name(), start_revision))
        revision_to_test = start_revision
        build = builder.build_for_revision(revision_to_test,
                                           allow_failed_lookups=True)
        layout_test_results = build.layout_test_results()
        if not layout_test_results:
            # FIXME: This could be made more user friendly.
            print(
                "Failed to load layout test results from %s; can't continue. (start revision = r%s)"
                % (build.results_url(), start_revision))
            return 1

        results_to_explain = set(layout_test_results.failing_tests())
        last_build_with_results = build
        print("Starting at %s" % revision_to_test)
        while results_to_explain and not self._done_explaining():
            revision_to_test -= 1
            new_build = builder.build_for_revision(revision_to_test,
                                                   allow_failed_lookups=True)
            if not new_build:
                print("No build for %s" % revision_to_test)
                continue
            build = new_build
            latest_results = build.layout_test_results()
            if not latest_results:
                print("No results build %s (r%s)" %
                      (build._number, build.revision()))
                continue
            failures = set(latest_results.failing_tests())
            if len(failures) >= 500:
                # FIXME: We may need to move this logic into the LayoutTestResults class.
                # The buildbot stops runs after 500 failures so we don't have full results to work with here.
                print("Too many failures in build %s (r%s), ignoring." %
                      (build._number, build.revision()))
                continue
            fixed_results = results_to_explain - failures
            if not fixed_results:
                print(
                    "No change in build %s (r%s), %s unexplained failures (%s in this build)"
                    % (build._number, build.revision(),
                       len(results_to_explain), len(failures)))
                last_build_with_results = build
                continue
            self.explained_failures.update(fixed_results)
            regression_window = RegressionWindow(build,
                                                 last_build_with_results)
            self._print_blame_information_for_transition(
                regression_window, fixed_results)
            last_build_with_results = build
            results_to_explain -= fixed_results
        if results_to_explain:
            print("Failed to explain failures: %s" % results_to_explain)
            return 1
        print("Explained all results for %s" % builder.name())
        return 0
Ejemplo n.º 3
0
 def find_regression_window(self, red_build, look_back_limit=30):
     if not red_build or red_build.is_green():
         return RegressionWindow(None, None)
     common_failures = None
     current_build = red_build
     build_after_current_build = None
     look_back_count = 0
     while current_build:
         if current_build.is_green():
             # current_build can't possibly have any failures in common
             # with red_build because it's green.
             break
         results = current_build.layout_test_results()
         # We treat a lack of results as if all the test failed.
         # This occurs, for example, when we can't compile at all.
         if results:
             failures = set(results.failing_tests())
             if common_failures == None:
                 common_failures = failures
             else:
                 common_failures = common_failures.intersection(failures)
                 if not common_failures:
                     # current_build doesn't have any failures in common with
                     # the red build we're worried about.  We assume that any
                     # failures in current_build were due to flakiness.
                     break
         look_back_count += 1
         if look_back_count > look_back_limit:
             return RegressionWindow(None,
                                     current_build,
                                     failing_tests=common_failures)
         build_after_current_build = current_build
         current_build = current_build.previous_build()
     # We must iterate at least once because red_build is red.
     assert (build_after_current_build)
     # Current build must either be green or have no failures in common
     # with red build, so we've found our failure transition.
     return RegressionWindow(current_build,
                             build_after_current_build,
                             failing_tests=common_failures)