예제 #1
0
def classified_failures(test_job, text_log_errors_failure_lines, test_matcher,
                        failure_classifications):
    from treeherder.model.models import ClassifiedFailure
    from treeherder.services.elasticsearch import refresh_index

    _, failure_lines = text_log_errors_failure_lines

    classified_failures = []

    for failure_line in failure_lines:
        if failure_line.job_guid == test_job.guid:
            classified_failure = ClassifiedFailure()
            classified_failure.save()

            failure_line.error.set_classification(
                test_matcher,
                classified_failure,
                mark_best=True,
            )

            classified_failures.append(classified_failure)

    if settings.ELASTICSEARCH_URL:
        refresh_index()

    return classified_failures
예제 #2
0
def test_cycle_all_but_one_job(test_repository, failure_classifications,
                               sample_data, sample_push, mock_log_parser,
                               elasticsearch, failure_lines):
    """
    Test cycling all but one job in a group of jobs to confirm there are no
    unexpected deletions
    """

    job_data = sample_data.job_data[:20]
    test_utils.do_job_ingestion(test_repository, job_data, sample_push, False)

    # one job should not be deleted, set its submit time to now
    job_not_deleted = Job.objects.get(id=2)
    job_not_deleted.submit_time = datetime.datetime.now()
    job_not_deleted.save()

    extra_objects = {
        'failure_lines': (FailureLine,
                          create_failure_lines(job_not_deleted,
                                               [(test_line, {}),
                                                (test_line, {
                                                    "subtest": "subtest2"
                                                })])),
        'job_details': (JobDetail, [
            JobDetail.objects.create(job=job_not_deleted,
                                     title='test',
                                     value='testvalue')
        ])
    }

    # set other job's submit time to be a week ago from now
    cycle_date_ts = datetime.datetime.now() - datetime.timedelta(weeks=1)
    for job in Job.objects.all().exclude(id=job_not_deleted.id):
        job.submit_time = cycle_date_ts
        job.save()
    num_job_logs_to_be_deleted = JobLog.objects.all().exclude(
        id=job_not_deleted.id).count()
    num_job_logs_before = JobLog.objects.count()

    call_command('cycle_data',
                 'from:treeherder',
                 sleep_time=0,
                 days=1,
                 debug=True)

    assert Job.objects.count() == 1
    assert JobLog.objects.count() == (num_job_logs_before -
                                      num_job_logs_to_be_deleted)

    for (object_type, objects) in extra_objects.values():
        assert (set(item.id for item in object_type.objects.all()) == set(
            item.id for item in objects))

    if settings.ELASTICSEARCH_URL:
        refresh_index()
        # get all documents
        indexed_ids = set(int(item['_id']) for item in all_documents())
        expected = set(item.id for item in extra_objects["failure_lines"][1])
        assert indexed_ids == expected
예제 #3
0
def elasticsearch(request):
    from treeherder.services.elasticsearch import reinit_index, refresh_index

    if not settings.ELASTICSEARCH_URL:
        return

    reinit_index()
    refresh_index()
예제 #4
0
def elasticsearch(request):
    from treeherder.services.elasticsearch import reinit_index, refresh_index

    if not settings.ELASTICSEARCH_URL:
        return

    reinit_index()
    refresh_index()
예제 #5
0
def test_cycle_all_but_one_job(test_repository, failure_classifications, sample_data,
                               sample_push, mock_log_parser, elasticsearch,
                               failure_lines):
    """
    Test cycling all but one job in a group of jobs to confirm there are no
    unexpected deletions
    """

    job_data = sample_data.job_data[:20]
    test_utils.do_job_ingestion(test_repository, job_data, sample_push, False)

    # one job should not be deleted, set its submit time to now
    job_not_deleted = Job.objects.get(id=2)
    job_not_deleted.submit_time = datetime.datetime.now()
    job_not_deleted.save()

    extra_objects = {
        'failure_lines': (FailureLine,
                          create_failure_lines(
                              job_not_deleted,
                              [(test_line, {}),
                               (test_line, {"subtest": "subtest2"})])),
        'job_details': (JobDetail, [JobDetail.objects.create(
            job=job_not_deleted,
            title='test',
            value='testvalue')])
    }

    # set other job's submit time to be a week ago from now
    cycle_date_ts = datetime.datetime.now() - datetime.timedelta(weeks=1)
    for job in Job.objects.all().exclude(id=job_not_deleted.id):
        job.submit_time = cycle_date_ts
        job.save()
    num_job_logs_to_be_deleted = JobLog.objects.all().exclude(
        id=job_not_deleted.id).count()
    num_job_logs_before = JobLog.objects.count()

    call_command('cycle_data', sleep_time=0, days=1, debug=True)
    refresh_index()

    assert Job.objects.count() == 1
    assert JobLog.objects.count() == (num_job_logs_before -
                                      num_job_logs_to_be_deleted)

    for (object_type, objects) in extra_objects.values():
        assert (set(item.id for item in object_type.objects.all()) ==
                set(item.id for item in objects))

    # get all documents
    indexed_ids = set(int(item['_id']) for item in all_documents())
    expected = set(item.id for item in extra_objects["failure_lines"][1])
    assert indexed_ids == expected
예제 #6
0
def test_store_no_subtest(elasticsearch):
    doc = {
        'job_guid': '1234',
        'test': 'test',
        'status': 'FAIL',
        'expected': 'PASS',
        'message': 'Example',
    }
    es_conn.index(INDEX_NAME, DOC_TYPE, doc)

    refresh_index()

    docs = list(all_documents())
    assert 'subtest' not in docs[0]['_source']
    assert count_index() == 1
예제 #7
0
def test_store_no_subtest(elasticsearch):
    doc = {
        'job_guid': '1234',
        'test': 'test',
        'status': 'FAIL',
        'expected': 'PASS',
        'message': 'Example',
    }
    es_conn.index(INDEX_NAME, DOC_TYPE, doc)

    refresh_index()

    docs = list(all_documents())
    assert 'subtest' not in docs[0]['_source']
    assert count_index() == 1
예제 #8
0
def create_failure_lines(job, failure_line_list,
                         start_line=0):
    failure_lines = []
    for i, (base_data, updates) in enumerate(failure_line_list[start_line:]):
        data = {"job_guid": job.guid,
                "repository": job.repository,
                "line": i + start_line}
        data.update(base_data)
        data.update(updates)
        failure_line = FailureLine(**data)
        failure_line.save()
        failure_line.elastic_search_insert()
        failure_lines.append(failure_line)

    if settings.ELASTICSEARCH_URL:
        refresh_index()

    return failure_lines
예제 #9
0
파일: utils.py 프로젝트: ziqri11/treeherder
def create_failure_lines(job, failure_line_list, start_line=0):
    failure_lines = []
    for i, (base_data, updates) in enumerate(failure_line_list[start_line:]):
        data = {
            "job_guid": job.guid,
            "repository": job.repository,
            "line": i + start_line
        }
        data.update(base_data)
        data.update(updates)
        failure_line = FailureLine(**data)
        failure_line.save()
        failure_line.elastic_search_insert()
        failure_lines.append(failure_line)

    if settings.ELASTICSEARCH_URL:
        refresh_index()

    return failure_lines
예제 #10
0
def classified_failures(test_job, text_log_errors_failure_lines, test_matcher,
                        failure_classifications):
    from treeherder.model.models import ClassifiedFailure
    from treeherder.services.elasticsearch import refresh_index

    _, failure_lines = text_log_errors_failure_lines

    classified_failures = []

    for failure_line in failure_lines:
        if failure_line.job_guid == test_job.guid:
            classified_failure = ClassifiedFailure.objects.create()

            failure_line.error.create_match(test_matcher, classified_failure)
            mark_best_classification(failure_line.error, classified_failure)

            classified_failures.append(classified_failure)

    if settings.ELASTICSEARCH_URL:
        refresh_index()

    return classified_failures