def test_set_bug_duplicate(failure_lines, classified_failures, test_matcher):
    classified_failures[0].bug_number = 1234
    classified_failures[0].save()
    match = failure_lines[0].matches.all()[0]
    match.score = 0.7
    match.save()
    # Add a FailureMatch that will have the same (failure_line_id, classified_failure_id)
    # as another FailureMatch when classified_failure[1] is replaced by classified_failure[0]
    duplicate_match = FailureMatch(failure_line=failure_lines[0],
                                   classified_failure=classified_failures[1],
                                   matcher=test_matcher.db_object,
                                   score=0.8)
    duplicate_match.save()
    assert len(failure_lines[0].matches.all()) == 2
    rv = classified_failures[1].set_bug(1234)
    assert rv == classified_failures[0]
    assert rv.bug_number == 1234
    for item in failure_lines:
        item.refresh_from_db()
    match.refresh_from_db()
    # Check that we updated the best classification that previously pointed
    # to the now-defunct classified_failures[0]
    assert failure_lines[1].best_classification == classified_failures[0]
    # Check that we only have one match for the first failure line
    matches = failure_lines[0].matches.all()
    assert len(matches) == 1
    # Check we picked the better of the two scores for the new match.
    assert matches[0].score == Decimal("0.8")
    # Ensure we deleted the ClassifiedFailure on which we tried to set the bug
    assert len(
        ClassifiedFailure.objects.filter(id=classified_failures[1].id)) == 0
Exemplo n.º 2
0
def classified_failures(request, jm, eleven_jobs_stored, initial_data, failure_lines):
    from treeherder.model.models import ClassifiedFailure, FailureMatch, Matcher
    from treeherder.autoclassify import detectors

    job_1 = jm.get_job(1)[0]

    class TreeherderUnitTestDetector(detectors.Detector):
        def __call__(self, failure_lines):
            pass

    test_matcher = Matcher.objects.register_detector(TreeherderUnitTestDetector)

    def finalize():
        Matcher._detector_funcs = {}
        Matcher._matcher_funcs = {}
    request.addfinalizer(finalize)

    classified_failures = []

    for failure_line in failure_lines:
        if failure_line.job_guid == job_1["job_guid"]:
            classified_failure = ClassifiedFailure()
            classified_failure.save()
            match = FailureMatch(failure_line=failure_line,
                                 classified_failure=classified_failure,
                                 matcher=test_matcher.db_object,
                                 score=1.0,
                                 is_best=True)
            match.save()
            classified_failures.append(classified_failure)

    return classified_failures
def test_set_bug_duplicate(failure_lines, classified_failures, test_matcher):
    classified_failures[0].bug_number = 1234
    classified_failures[0].save()
    match = failure_lines[0].matches.all()[0]
    match.score = 0.7
    match.save()
    # Add a FailureMatch that will have the same (failure_line_id, classified_failure_id)
    # as another FailureMatch when classified_failure[1] is replaced by classified_failure[0]
    duplicate_match = FailureMatch(
        failure_line=failure_lines[0],
        classified_failure=classified_failures[1],
        matcher=test_matcher.db_object,
        score=0.8)
    duplicate_match.save()
    assert len(failure_lines[0].matches.all()) == 2
    rv = classified_failures[1].set_bug(1234)
    assert rv == classified_failures[0]
    assert rv.bug_number == 1234
    for item in failure_lines:
        item.refresh_from_db()
    match.refresh_from_db()
    # Check that we updated the best classification that previously pointed
    # to the now-defunct classified_failures[0]
    assert failure_lines[1].best_classification == classified_failures[0]
    # Check that we only have one match for the first failure line
    matches = failure_lines[0].matches.all()
    assert len(matches) == 1
    # Check we picked the better of the two scores for the new match.
    assert matches[0].score == Decimal("0.8")
    # Ensure we deleted the ClassifiedFailure on which we tried to set the bug
    assert len(ClassifiedFailure.objects.filter(id=classified_failures[1].id)) == 0
Exemplo n.º 4
0
def classified_failures(request, jm, eleven_jobs_stored, failure_lines):
    from treeherder.model.models import ClassifiedFailure, FailureMatch, Matcher
    from treeherder.autoclassify import detectors

    job_1 = jm.get_job(1)[0]

    class TreeherderUnitTestDetector(detectors.Detector):
        def __call__(self, failure_lines):
            pass

    test_matcher = Matcher.objects.register_detector(
        TreeherderUnitTestDetector)

    def finalize():
        Matcher._detector_funcs = {}
        Matcher._matcher_funcs = {}

    request.addfinalizer(finalize)

    classified_failures = []

    for failure_line in failure_lines:
        if failure_line.job_guid == job_1["job_guid"]:
            classified_failure = ClassifiedFailure()
            classified_failure.save()
            match = FailureMatch(failure_line=failure_line,
                                 classified_failure=classified_failure,
                                 matcher=test_matcher.db_object,
                                 score=1.0,
                                 is_best=True)
            match.save()
            classified_failures.append(classified_failure)

    return classified_failures
Exemplo n.º 5
0
def match_errors(repository, job_guid):
    unmatched_failures = FailureLine.objects.unmatched_for_job(
        repository, job_guid)

    if not unmatched_failures:
        return

    all_matched = set()

    for matcher in Matcher.objects.registered_matchers():
        matches = matcher(unmatched_failures)
        for match in matches:
            match.failure_line.matches.add(
                FailureMatch(score=match.score,
                             matcher=matcher.db_object,
                             classified_failure=match.classified_failure))
            match.failure_line.save()
            logger.info("Matched failure %i with intermittent %i" %
                        (match.failure_line.id, match.classified_failure.id))
            all_matched.add(match.failure_line)

        if all_lines_matched(unmatched_failures):
            break

    for failure_line in all_matched:
        # TODO: store all matches
        best_match = failure_line.best_match(AUTOCLASSIFY_CUTOFF_RATIO)
        if best_match:
            best_match.is_best = True
            best_match.save()
Exemplo n.º 6
0
    def _update(self, data, email, many=True):
        by_project = defaultdict(list)

        ids = []
        failure_line_ids = set()
        classification_ids = set()

        for item in data:
            line_id = int(item.get("id"))
            if line_id is None:
                return "No failure line id provided", 400

            failure_line_ids.add(line_id)

            if "best_classification" not in item:
                return "No classification id provided", 400

            classification_id = item.get("best_classification")

            if classification_id is not None:
                classification_ids.add(classification_id)

            ids.append((line_id, classification_id))

        failure_lines = as_dict(
            FailureLine.objects.prefetch_related('classified_failures').filter(
                id__in=failure_line_ids), "id")

        if len(failure_lines) != len(failure_line_ids):
            missing = failure_line_ids - set(failure_lines.keys())
            return "No failure line with id: {0}".format(
                ", ".join(missing)), 404

        classifications = as_dict(
            ClassifiedFailure.objects.filter(id__in=classification_ids), "id")

        if len(classifications) != len(classification_ids):
            missing = classification_ids - set(classifications.keys())
            return "No classification with id: {0}".format(
                ", ".join(missing)), 404

        for line_id, classification_id in ids:
            failure_line = failure_lines[line_id]
            if classification_id is not None:
                classification = classifications[classification_id]
            else:
                classification = None

            by_project[failure_line.repository.name].append(
                failure_line.job_guid)

            failure_line.best_classification = classification
            failure_line.best_is_verified = True
            failure_line.save()

            if (classification is not None and classification
                    not in failure_line.classified_failures.all()):
                manual_detector = Matcher.objects.get(name="ManualDetector")
                match = FailureMatch(failure_line=failure_line,
                                     classified_failure=classification,
                                     matcher=manual_detector,
                                     score=1.0)
                match.save()

        for project, job_guids in by_project.iteritems():
            with JobsModel(project) as jm:
                jobs = jm.get_job_ids_by_guid(job_guids)
                for job in jobs.values():
                    jm.update_after_verification(job["id"], email)

        # Force failure line to be reloaded, including .classified_failures
        rv = FailureLine.objects.prefetch_related(
            'classified_failures').filter(id__in=failure_line_ids)

        if not many:
            rv = rv[0]

        return serializers.FailureLineNoStackSerializer(rv,
                                                        many=many).data, 200
Exemplo n.º 7
0
    def _update(self, data, email, many=True):
        by_project = defaultdict(list)

        ids = []
        failure_line_ids = set()
        classification_ids = set()

        for item in data:
            line_id = int(item.get("id"))
            if line_id is None:
                return "No failure line id provided", 400

            failure_line_ids.add(line_id)

            if "best_classification" not in item:
                return "No classification id provided", 400

            classification_id = item.get("best_classification")

            if classification_id is not None:
                classification_ids.add(classification_id)

            ids.append((line_id, classification_id))

        failure_lines = as_dict(
            FailureLine.objects.prefetch_related('classified_failures').filter(
                id__in=failure_line_ids), "id")

        if len(failure_lines) != len(failure_line_ids):
            missing = failure_line_ids - set(failure_lines.keys())
            return "No failure line with id: {0}".format(", ".join(missing)), 404

        classifications = as_dict(
            ClassifiedFailure.objects.filter(id__in=classification_ids), "id")

        if len(classifications) != len(classification_ids):
            missing = classification_ids - set(classifications.keys())
            return "No classification with id: {0}".format(", ".join(missing)), 404

        for line_id, classification_id in ids:
            failure_line = failure_lines[line_id]
            if classification_id is not None:
                classification = classifications[classification_id]
            else:
                classification = None

            by_project[failure_line.repository.name].append(failure_line.job_guid)

            failure_line.best_classification = classification
            failure_line.best_is_verified = True
            failure_line.save()

            if (classification is not None and
                classification not in failure_line.classified_failures.all()):
                manual_detector = Matcher.objects.get(name="ManualDetector")
                match = FailureMatch(failure_line=failure_line,
                                     classified_failure=classification,
                                     matcher=manual_detector,
                                     score=1.0)
                match.save()

        for project, job_guids in by_project.iteritems():
            with JobsModel(project) as jm:
                jobs = jm.get_job_ids_by_guid(job_guids)
                for job in jobs.values():
                    jm.update_after_verification(job["id"], email)

        # Force failure line to be reloaded, including .classified_failures
        rv = FailureLine.objects.prefetch_related('classified_failures').filter(
            id__in=failure_line_ids)

        if not many:
            rv = rv[0]

        return serializers.FailureLineNoStackSerializer(rv, many=many).data, 200