示例#1
0
def test_cycle_all_data(test_repository, failure_classifications, sample_data,
                        sample_resultset, mock_log_parser, failure_lines):
    """
    Test cycling the sample data
    """
    job_data = sample_data.job_data[:20]
    test_utils.do_job_ingestion(test_repository, job_data, sample_resultset, False)

    # set the submit time to be a week before today
    cycle_date_ts = datetime.datetime.now() - datetime.timedelta(weeks=1)
    for job in Job.objects.all():
        job.submit_time = cycle_date_ts
        job.save()

    call_command('cycle_data', sleep_time=0, days=1)

    refresh_all()

    # There should be no jobs or failure lines after cycling
    assert Job.objects.count() == 0
    assert FailureLine.objects.count() == 0
    assert JobDetail.objects.count() == 0
    assert JobLog.objects.count() == 0

    # There should be nothing in elastic search after cycling
    assert TestFailureLine.search().count() == 0
示例#2
0
def test_cycle_all_data_in_chunks(test_repository, failure_classifications, sample_data,
                                  sample_resultset, mock_log_parser):
    """
    Test cycling the sample data in chunks.
    """
    job_data = sample_data.job_data[:20]
    test_utils.do_job_ingestion(test_repository, job_data, sample_resultset, False)

    # build a date that will cause the data to be cycled
    cycle_date_ts = datetime.datetime.now() - datetime.timedelta(weeks=1)
    for job in Job.objects.all():
        job.submit_time = cycle_date_ts
        job.save()

    create_failure_lines(Job.objects.get(id=1),
                         [(test_line, {})] * 7)

    assert TestFailureLine.search().count() > 0

    call_command('cycle_data', sleep_time=0, days=1, chunk_size=3)
    refresh_all()

    # There should be no jobs after cycling
    assert Job.objects.count() == 0
    assert FailureLine.objects.count() == 0
    assert JobDetail.objects.count() == 0
    assert TestFailureLine.search().count() == 0
示例#3
0
def elasticsearch(request):
    from treeherder.model.search import connection, doctypes, refresh_all

    for item in doctypes():
        connection.indices.delete(item._doc_type.index, ignore=404)
        refresh_all()
        item.init()
示例#4
0
def elasticsearch(request):
    from treeherder.model.search import connection, doctypes, refresh_all

    for item in doctypes():
        connection.indices.delete(item._doc_type.index, ignore=404)
        refresh_all()
        item.init()
示例#5
0
def test_cycle_all_but_one_job(test_repository, failure_classifications,
                               sample_data, sample_resultset, mock_log_parser,
                               elasticsearch, failure_lines):
    """
    Test cycling all but one job in a group of jobs to confirm there are no
    unexpected deletions
    """

    job_data = sample_data.job_data[:20]
    test_utils.do_job_ingestion(test_repository, job_data, sample_resultset,
                                False)

    # one job should not be deleted, set its submit time to now
    job_not_deleted = Job.objects.get(id=2)
    job_not_deleted.submit_time = datetime.datetime.now()
    job_not_deleted.save()

    extra_objects = {
        'failure_lines': (FailureLine,
                          create_failure_lines(job_not_deleted,
                                               [(test_line, {}),
                                                (test_line, {
                                                    "subtest": "subtest2"
                                                })])),
        'job_details': (JobDetail, [
            JobDetail.objects.create(job=job_not_deleted,
                                     title='test',
                                     value='testvalue')
        ])
    }

    # set other job's submit time to be a week ago from now
    cycle_date_ts = datetime.datetime.now() - datetime.timedelta(weeks=1)
    for job in Job.objects.all().exclude(id=job_not_deleted.id):
        job.submit_time = cycle_date_ts
        job.save()
    num_job_logs_to_be_deleted = JobLog.objects.all().exclude(
        id=job_not_deleted.id).count()
    num_job_logs_before = JobLog.objects.count()

    call_command('cycle_data', sleep_time=0, days=1, debug=True)
    refresh_all()

    assert Job.objects.count() == 1
    assert JobLog.objects.count() == (num_job_logs_before -
                                      num_job_logs_to_be_deleted)

    for (object_type, objects) in extra_objects.values():
        assert (set(item.id for item in object_type.objects.all()) == set(
            item.id for item in objects))

    assert set(
        int(item.meta.id)
        for item in TestFailureLine.search().execute()) == set(
            item.id for item in extra_objects["failure_lines"][1])
示例#6
0
def test_store_no_subtest(elasticsearch):
    doc = search.TestFailureLine(job_guid="1234",
                                 test="test",
                                 status="FAIL",
                                 expected="PASS",
                                 message="Example")
    doc.save()
    assert doc.subtest == ""
    search.refresh_all()

    docs = search.TestFailureLine.search().execute()
    assert len(docs) == 1
    assert docs[0].subtest == ""
示例#7
0
def test_cycle_all_but_one_job(test_repository, failure_classifications, sample_data,
                               sample_resultset, mock_log_parser, elasticsearch,
                               failure_lines):
    """
    Test cycling all but one job in a group of jobs to confirm there are no
    unexpected deletions
    """

    job_data = sample_data.job_data[:20]
    test_utils.do_job_ingestion(test_repository, job_data, sample_resultset, False)

    # one job should not be deleted, set its submit time to now
    job_not_deleted = Job.objects.get(id=2)
    job_not_deleted.submit_time = datetime.datetime.now()
    job_not_deleted.save()

    extra_objects = {
        'failure_lines': (FailureLine,
                          create_failure_lines(
                              job_not_deleted,
                              [(test_line, {}),
                               (test_line, {"subtest": "subtest2"})])),
        'job_details': (JobDetail, [JobDetail.objects.create(
            job=job_not_deleted,
            title='test',
            value='testvalue')])
    }

    # set other job's submit time to be a week ago from now
    cycle_date_ts = datetime.datetime.now() - datetime.timedelta(weeks=1)
    for job in Job.objects.all().exclude(id=job_not_deleted.id):
        job.submit_time = cycle_date_ts
        job.save()
    num_job_logs_to_be_deleted = JobLog.objects.all().exclude(
        id=job_not_deleted.id).count()
    num_job_logs_before = JobLog.objects.count()

    call_command('cycle_data', sleep_time=0, days=1, debug=True)
    refresh_all()

    assert Job.objects.count() == 1
    assert JobLog.objects.count() == (num_job_logs_before -
                                      num_job_logs_to_be_deleted)

    for (object_type, objects) in extra_objects.values():
        assert (set(item.id for item in object_type.objects.all()) ==
                set(item.id for item in objects))

    assert set(int(item.meta.id) for item in TestFailureLine.search().execute()) == set(item.id for item in extra_objects["failure_lines"][1])
示例#8
0
def create_failure_lines(repository, job_guid, failure_line_list):
    failure_lines = []
    for i, (base_data, updates) in enumerate(failure_line_list):
        data = {"job_guid": job_guid,
                "repository": repository,
                "line": i}
        data.update(base_data)
        data.update(updates)
        failure_line = FailureLine(**data)
        failure_line.save()
        failure_line.elastic_search_insert()
        failure_lines.append(failure_line)

    refresh_all()

    return failure_lines
示例#9
0
def classified_failures(test_job, failure_lines, test_matcher,
                        failure_classifications):
    from treeherder.model.models import ClassifiedFailure
    from treeherder.model.search import refresh_all

    classified_failures = []

    for failure_line in failure_lines:
        if failure_line.job_guid == test_job.guid:
            classified_failure = ClassifiedFailure()
            classified_failure.save()
            failure_line.set_classification(test_matcher.db_object,
                                            classified_failure,
                                            mark_best=True)
            classified_failures.append(classified_failure)

    refresh_all()
    return classified_failures
示例#10
0
def classified_failures(test_job, failure_lines, test_matcher,
                        failure_classifications):
    from treeherder.model.models import ClassifiedFailure
    from treeherder.model.search import refresh_all

    classified_failures = []

    for failure_line in failure_lines:
        if failure_line.job_guid == test_job.guid:
            classified_failure = ClassifiedFailure()
            classified_failure.save()
            failure_line.set_classification(test_matcher.db_object,
                                            classified_failure,
                                            mark_best=True)
            classified_failures.append(classified_failure)

    refresh_all()
    return classified_failures
示例#11
0
def test_cycle_all_data_in_chunks(jm, sample_data,
                                  sample_resultset, test_repository, mock_log_parser):
    """
    Test cycling the sample data in chunks.
    """
    job_data = sample_data.job_data[:20]
    test_utils.do_job_ingestion(jm, job_data, sample_resultset, False)

    # build a date that will cause the data to be cycled
    time_now = time.time()
    cycle_date_ts = int(time_now - 7 * 24 * 3600)

    jm.execute(
        proc="jobs_test.updates.set_jobs_submit_timestamp",
        placeholders=[cycle_date_ts]
    )

    jobs_to_be_deleted = jm.execute(
        proc="jobs_test.selects.get_jobs_for_cycling",
        placeholders=[time_now - 24 * 3600]
    )

    job = jm.get_job(jobs_to_be_deleted[0]['id'])[0]
    create_failure_lines(test_repository,
                         job["job_guid"],
                         [(test_line, {})] * 7)

    jobs_before = jm.execute(proc="jobs_test.selects.jobs")

    assert TestFailureLine.search().params(search_type="count").execute().hits.total > 0

    call_command('cycle_data', sleep_time=0, days=1, chunk_size=3)
    refresh_all()

    jobs_after = jm.execute(proc="jobs_test.selects.jobs")

    assert len(jobs_after) == len(jobs_before) - len(jobs_to_be_deleted)

    # There should be no jobs after cycling
    assert len(jobs_after) == 0
    assert Job.objects.count() == 0
    assert FailureLine.objects.count() == 0
    assert JobDetail.objects.count() == 0
    assert TestFailureLine.search().params(search_type="count").execute().hits.total == 0
示例#12
0
def test_cycle_all_data_in_chunks(jm, sample_data,
                                  sample_resultset, test_repository, mock_log_parser):
    """
    Test cycling the sample data in chunks.
    """
    job_data = sample_data.job_data[:20]
    test_utils.do_job_ingestion(jm, job_data, sample_resultset, False)

    # build a date that will cause the data to be cycled
    time_now = time.time()
    cycle_date_ts = int(time_now - 7 * 24 * 3600)

    jm.execute(
        proc="jobs_test.updates.set_jobs_last_modified",
        placeholders=[cycle_date_ts]
    )

    jobs_to_be_deleted = jm.execute(
        proc="jobs_test.selects.get_jobs_for_cycling",
        placeholders=[time_now - 24 * 3600]
    )

    job = jm.get_job(jobs_to_be_deleted[0]['id'])[0]
    create_failure_lines(test_repository,
                         job["job_guid"],
                         [(test_line, {})] * 7)

    jobs_before = jm.execute(proc="jobs_test.selects.jobs")

    assert TestFailureLine.search().params(search_type="count").execute().hits.total > 0

    call_command('cycle_data', sleep_time=0, days=1, chunk_size=3)
    refresh_all()

    jobs_after = jm.execute(proc="jobs_test.selects.jobs")

    assert len(jobs_after) == len(jobs_before) - len(jobs_to_be_deleted)

    # There should be no jobs after cycling
    assert len(jobs_after) == 0
    assert Job.objects.count() == 0
    assert FailureLine.objects.count() == 0
    assert JobDetail.objects.count() == 0
    assert TestFailureLine.search().params(search_type="count").execute().hits.total == 0
示例#13
0
def classified_failures(request, jm, eleven_jobs_stored, failure_lines,
                        test_matcher, failure_classifications):
    from treeherder.model.models import ClassifiedFailure
    from treeherder.model.search import refresh_all

    job_1 = jm.get_job(1)[0]

    classified_failures = []

    for failure_line in failure_lines:
        if failure_line.job_guid == job_1["job_guid"]:
            classified_failure = ClassifiedFailure()
            classified_failure.save()
            failure_line.set_classification(test_matcher.db_object, classified_failure,
                                            mark_best=True)
            classified_failures.append(classified_failure)

    refresh_all()
    return classified_failures
示例#14
0
def classified_failures(request, jm, eleven_jobs_stored, failure_lines,
                        test_matcher, failure_classifications):
    from treeherder.model.models import ClassifiedFailure
    from treeherder.model.search import refresh_all

    job_1 = jm.get_job(1)[0]

    classified_failures = []

    for failure_line in failure_lines:
        if failure_line.job_guid == job_1["job_guid"]:
            classified_failure = ClassifiedFailure()
            classified_failure.save()
            failure_line.set_classification(test_matcher.db_object, classified_failure,
                                            mark_best=True)
            classified_failures.append(classified_failure)

    refresh_all()
    return classified_failures
示例#15
0
def test_cycle_all_data(jm, sample_data,
                        sample_resultset, test_repository, mock_log_parser,
                        failure_lines):
    """
    Test cycling the sample data
    """
    job_data = sample_data.job_data[:20]
    test_utils.do_job_ingestion(jm, job_data, sample_resultset, False)

    time_now = time.time()
    cycle_date_ts = time_now - 7 * 24 * 3600

    jm.execute(
        proc="jobs_test.updates.set_jobs_submit_timestamp",
        placeholders=[cycle_date_ts]
    )

    jobs_to_be_deleted = jm.execute(
        proc="jobs_test.selects.get_jobs_for_cycling",
        placeholders=[time_now - 24 * 3600]
    )

    jobs_before = jm.execute(proc="jobs_test.selects.jobs")

    call_command('cycle_data', sleep_time=0, days=1)

    refresh_all()

    jobs_after = jm.execute(proc="jobs_test.selects.jobs")

    assert len(jobs_after) == len(jobs_before) - len(jobs_to_be_deleted)

    # There should be no jobs or failure lines after cycling
    assert len(jobs_after) == 0
    assert FailureLine.objects.count() == 0
    assert Job.objects.count() == 0
    assert JobDetail.objects.count() == 0
    assert JobLog.objects.count() == 0

    # There should be nothing in elastic search after cycling
    assert TestFailureLine.search().params(search_type="count").execute().hits.total == 0
示例#16
0
def test_cycle_all_data(jm, sample_data,
                        sample_resultset, test_repository, mock_log_parser,
                        failure_lines):
    """
    Test cycling the sample data
    """
    job_data = sample_data.job_data[:20]
    test_utils.do_job_ingestion(jm, job_data, sample_resultset, False)

    time_now = time.time()
    cycle_date_ts = time_now - 7 * 24 * 3600

    jm.execute(
        proc="jobs_test.updates.set_jobs_last_modified",
        placeholders=[cycle_date_ts]
    )

    jobs_to_be_deleted = jm.execute(
        proc="jobs_test.selects.get_jobs_for_cycling",
        placeholders=[time_now - 24 * 3600]
    )

    jobs_before = jm.execute(proc="jobs_test.selects.jobs")

    call_command('cycle_data', sleep_time=0, days=1)

    refresh_all()

    jobs_after = jm.execute(proc="jobs_test.selects.jobs")

    assert len(jobs_after) == len(jobs_before) - len(jobs_to_be_deleted)

    # There should be no jobs or failure lines after cycling
    assert len(jobs_after) == 0
    assert FailureLine.objects.count() == 0
    assert Job.objects.count() == 0
    assert JobDetail.objects.count() == 0
    assert JobLog.objects.count() == 0

    # There should be nothing in elastic search after cycling
    assert TestFailureLine.search().params(search_type="count").execute().hits.total == 0
示例#17
0
def test_cycle_one_job(jm, sample_data,
                       sample_resultset, test_repository, mock_log_parser,
                       elasticsearch, failure_lines):
    """
    Test cycling one job in a group of jobs to confirm there are no
    unexpected deletions
    """

    job_data = sample_data.job_data[:20]
    test_utils.do_job_ingestion(jm, job_data, sample_resultset, False)

    job_not_deleted = jm.get_job(2)[0]

    extra_objects = {
        'failure_lines': (FailureLine,
                          create_failure_lines(test_repository,
                                               job_not_deleted["job_guid"],
                                               [(test_line, {}),
                                                (test_line, {"subtest": "subtest2"})])),
        'job_details': (JobDetail, [JobDetail.objects.create(
            job=Job.objects.get(guid=job_not_deleted["job_guid"]),
            title='test',
            value='testvalue')])
    }

    time_now = time.time()
    cycle_date_ts = int(time_now - 7 * 24 * 3600)

    jm.execute(
        proc="jobs_test.updates.set_jobs_last_modified",
        placeholders=[time_now]
    )

    jm.execute(
        proc="jobs_test.updates.set_one_job_last_modified_timestamp",
        placeholders=[cycle_date_ts]
    )

    jobs_to_be_deleted = jm.execute(
        proc="jobs_test.selects.get_one_job_for_cycling",
        placeholders=[1]
    )
    num_job_logs_to_be_deleted = JobLog.objects.filter(
        job__project_specific_id__in=[job['id'] for job in
                                      jobs_to_be_deleted]).count()

    jobs_before = jm.execute(proc="jobs_test.selects.jobs")
    job_logs_before = JobLog.objects.count()

    call_command('cycle_data', sleep_time=0, days=1, debug=True)
    refresh_all()

    jobs_after = jm.execute(proc="jobs_test.selects.jobs")

    # Confirm that the target result set has no jobs in the
    # jobs table
    jobs_to_be_deleted_after = jm.execute(
        proc="jobs_test.selects.get_one_job_for_cycling",
        placeholders=[1]
    )

    assert len(jobs_to_be_deleted_after) == 0

    assert len(jobs_after) == len(jobs_before) - len(jobs_to_be_deleted)
    assert len(jobs_after) == Job.objects.count()

    assert JobLog.objects.count() == (job_logs_before -
                                      num_job_logs_to_be_deleted)

    for (object_type, objects) in extra_objects.values():
        assert (set(item.id for item in object_type.objects.all()) ==
                set(item.id for item in objects))

    assert set(int(item.meta.id) for item in TestFailureLine.search().execute()) == set(item.id for item in extra_objects["failure_lines"][1])
示例#18
0
def test_cycle_one_job(jm, sample_data, sample_resultset, test_repository,
                       mock_log_parser, elasticsearch, failure_lines):
    """
    Test cycling one job in a group of jobs to confirm there are no
    unexpected deletions
    """

    job_data = sample_data.job_data[:20]
    test_utils.do_job_ingestion(jm, job_data, sample_resultset, False)

    job_not_deleted = jm.get_job(2)[0]

    extra_objects = {
        'failure_lines':
        (FailureLine,
         create_failure_lines(test_repository, job_not_deleted["job_guid"],
                              [(test_line, {}),
                               (test_line, {
                                   "subtest": "subtest2"
                               })])),
        'job_details': (JobDetail, [
            JobDetail.objects.create(
                job=Job.objects.get(guid=job_not_deleted["job_guid"]),
                title='test',
                value='testvalue')
        ])
    }

    time_now = time.time()
    cycle_date_ts = int(time_now - 7 * 24 * 3600)

    jm.execute(proc="jobs_test.updates.set_jobs_submit_timestamp",
               placeholders=[time_now])

    jm.execute(proc="jobs_test.updates.set_one_job_submit_timestamp",
               placeholders=[cycle_date_ts])

    jobs_to_be_deleted = jm.execute(
        proc="jobs_test.selects.get_one_job_for_cycling", placeholders=[1])
    num_job_logs_to_be_deleted = JobLog.objects.filter(
        job__project_specific_id__in=[job['id']
                                      for job in jobs_to_be_deleted]).count()

    jobs_before = jm.execute(proc="jobs_test.selects.jobs")
    job_logs_before = JobLog.objects.count()

    call_command('cycle_data', sleep_time=0, days=1, debug=True)
    refresh_all()

    jobs_after = jm.execute(proc="jobs_test.selects.jobs")

    # Confirm that the target result set has no jobs in the
    # jobs table
    jobs_to_be_deleted_after = jm.execute(
        proc="jobs_test.selects.get_one_job_for_cycling", placeholders=[1])

    assert len(jobs_to_be_deleted_after) == 0

    assert len(jobs_after) == len(jobs_before) - len(jobs_to_be_deleted)
    assert len(jobs_after) == Job.objects.count()

    assert JobLog.objects.count() == (job_logs_before -
                                      num_job_logs_to_be_deleted)

    for (object_type, objects) in extra_objects.values():
        assert (set(item.id for item in object_type.objects.all()) == set(
            item.id for item in objects))

    assert set(
        int(item.meta.id)
        for item in TestFailureLine.search().execute()) == set(
            item.id for item in extra_objects["failure_lines"][1])