예제 #1
0
def test_create_error_summary(failure_classifications,
                              jobs_with_local_log, sample_push,
                              test_repository):
    """
    check that a bug suggestions artifact gets inserted when running
    a parse_log task for a failed job, and that the number of
    bug search terms/suggestions matches the number of error lines.
    """
    store_push_data(test_repository, sample_push)

    jobs = jobs_with_local_log
    for job in jobs:
        job['job']['result'] = "testfailed"
        job['revision'] = sample_push[0]['revision']

    store_job_data(test_repository, jobs)

    bug_suggestions = get_error_summary(Job.objects.get(id=1))

    # we must have one bugs item per error in bug_suggestions.
    # errors with no bug suggestions will just have an empty
    # bugs list
    assert TextLogError.objects.count() == len(bug_suggestions)

    # We really need to add some tests that check the values of each entry
    # in bug_suggestions, but for now this is better than nothing.
    expected_keys = set(["search", "search_terms", "bugs"])
    for failure_line in bug_suggestions:
        assert set(failure_line.keys()) == expected_keys
예제 #2
0
def eleven_job_blobs(sample_data, sample_push, test_repository, mock_log_parser):
    store_push_data(test_repository, sample_push)

    num_jobs = 11
    jobs = sample_data.job_data[0:num_jobs]

    max_index = len(sample_push) - 1
    push_index = 0
    task_id_index = 0

    blobs = []
    for blob in jobs:

        if push_index > max_index:
            push_index = 0

        # Modify job structure to sync with the push sample data
        if 'sources' in blob:
            del blob['sources']

        blob['revision'] = sample_push[push_index]['revision']
        blob['taskcluster_task_id'] = 'V3SVuxO8TFy37En_6HcXL{}'.format(task_id_index)
        blob['taskcluster_retry_id'] = '0'
        blobs.append(blob)

        push_index += 1
        task_id_index += 1
    return blobs
예제 #3
0
    def process(self, message_body, exchange):
        transformer = self.get_transformer_class(exchange)(message_body)
        try:
            newrelic.agent.add_custom_parameter("url", transformer.repo_url)
            newrelic.agent.add_custom_parameter("branch", transformer.branch)
            repo = Repository.objects.get(url=transformer.repo_url,
                                          branch=transformer.branch,
                                          active_status="active")
            newrelic.agent.add_custom_parameter("repository", repo.name)

        except ObjectDoesNotExist:
            repo_info = transformer.get_info()
            repo_info.update({
                "url": transformer.repo_url,
                "branch": transformer.branch,
            })
            newrelic.agent.record_custom_event("skip_unknown_repository",
                                               repo_info)
            logger.warn("Skipping unsupported repo: {} {}".format(
                transformer.repo_url,
                transformer.branch))
            return

        transformed_data = transformer.transform(repo.name)

        logger.info("Storing push for {} {} {}".format(
            repo.name,
            transformer.repo_url,
            transformer.branch))
        store_push_data(repo, [transformed_data])
예제 #4
0
def test_push_list_single_long_revision_stored_long(client, sample_push, test_repository):
    """
    test retrieving a push list with store long revision, filtered by a single long revision
    """
    long_revision = "21fb3eed1b5f3456789012345678901234567890"

    # store a push with long revision
    push = copy.deepcopy(sample_push[0])
    push["revisions"][0]["revision"] = long_revision
    store_push_data(test_repository, [push])

    resp = client.get(
        reverse("push-list", kwargs={"project": test_repository.name}),
        {"revision": long_revision}
    )
    assert resp.status_code == 200
    results = resp.json()['results']
    meta = resp.json()['meta']
    assert len(results) == 1
    assert set([ph["revision"] for ph in results]) == {sample_push[0]['revision']}
    assert(meta == {
        'count': 1,
        'revision': long_revision,
        'filter_params': {
            'revisions_long_revision': long_revision
        },
        'repository': test_repository.name}
    )
예제 #5
0
def push_with_three_jobs(sample_data, sample_push, test_repository):
    """
    Stores a number of jobs in the same push.
    """
    num_jobs = 3
    push = sample_push[0]
    jobs = copy.deepcopy(sample_data.job_data[0:num_jobs])

    # Only store data for the first push....
    store_push_data(test_repository, [push])

    blobs = []
    for index, blob in enumerate(jobs):
        # Modify job structure to sync with the push sample data
        if 'sources' in blob:
            del blob['sources']

        # Skip log references since they do not work correctly in pending state.
        if 'log_references' in blob['job']:
            del blob['job']['log_references']

        blob['revision'] = push['revision']
        blob['job']['state'] = 'pending'
        blobs.append(blob)

    # Store and process the jobs so they are present in the tables.
    store_job_data(test_repository, blobs)
    return Push.objects.get(repository=test_repository,
                            revision=push['revision'])
예제 #6
0
def test_ingest_running_to_retry_to_success_sample_job(test_repository,
                                                       failure_classifications,
                                                       sample_data,
                                                       sample_push,
                                                       mock_log_parser,
                                                       ingestion_cycles):
    # verifies that retries to success work, no matter how jobs are batched
    store_push_data(test_repository, sample_push)

    job_datum = copy.deepcopy(sample_data.job_data[0])
    job_datum['revision'] = sample_push[0]['revision']

    job = job_datum['job']
    job_guid_root = job['job_guid']

    job_data = []
    for (state, result, job_guid) in [
            ('running', 'unknown', job_guid_root),
            ('completed', 'retry',
             job_guid_root + "_" + str(job['end_timestamp'])[-5:]),
            ('completed', 'success', job_guid_root)]:
        new_job_datum = copy.deepcopy(job_datum)
        new_job_datum['job']['state'] = state
        new_job_datum['job']['result'] = result
        new_job_datum['job']['job_guid'] = job_guid
        job_data.append(new_job_datum)

    for (i, j) in ingestion_cycles:
        store_job_data(test_repository, job_data[i:j])

    assert Job.objects.count() == 2
    assert Job.objects.get(id=1).result == 'retry'
    assert Job.objects.get(id=2).result == 'success'
    assert JobLog.objects.count() == 2
예제 #7
0
def test_push_list_without_jobs(client,
                                test_repository,
                                sample_push):
    """
    test retrieving a push list without jobs
    """
    store_push_data(test_repository, sample_push)

    resp = client.get(
        reverse("push-list", kwargs={"project": test_repository.name})
    )
    assert resp.status_code == 200

    # The .json() method of the Django test client doesn't handle unicode properly on
    # Python 2, so we have to deserialize ourselves. TODO: Clean up once on Python 3.
    data = json.loads(resp.content)
    results = data['results']
    assert len(results) == 10
    assert all([('platforms' not in result) for result in results])

    meta = data['meta']

    assert meta == {
        u'count': len(results),
        u'filter_params': {},
        u'repository': test_repository.name
    }
예제 #8
0
    def process(self, message_body, exchange, root_url):
        transformer = self.get_transformer_class(exchange)(message_body)
        try:
            newrelic.agent.add_custom_parameter("url", transformer.repo_url)
            newrelic.agent.add_custom_parameter("branch", transformer.branch)
            repos = Repository.objects
            if transformer.branch:
                repos = repos.filter(branch__regex="(^|,)%s($|,)" %
                                     transformer.branch)
            else:
                repos = repos.filter(branch=None)
            repo = repos.get(url=transformer.repo_url, active_status="active")
            newrelic.agent.add_custom_parameter("repository", repo.name)
        except ObjectDoesNotExist:
            repo_info = transformer.get_info()
            repo_info.update({
                "url": transformer.repo_url,
                "branch": transformer.branch,
            })
            newrelic.agent.record_custom_event("skip_unknown_repository",
                                               repo_info)
            logger.warning("Skipping unsupported repo: %s %s",
                           transformer.repo_url, transformer.branch)
            return

        transformed_data = transformer.transform(repo.name)

        logger.info("Storing push for %s %s %s", repo.name,
                    transformer.repo_url, transformer.branch)
        store_push_data(repo, [transformed_data])
예제 #9
0
    def process(self, message_body, exchange):
        transformer = self.get_transformer_class(exchange)(message_body)
        try:
            newrelic.agent.add_custom_parameter("url", transformer.repo_url)
            newrelic.agent.add_custom_parameter("branch", transformer.branch)
            repos = Repository.objects
            if transformer.branch:
                repos = repos.filter(branch__regex="(^|,)%s($|,)" % transformer.branch)
            else:
                repos = repos.filter(branch=None)
            repo = repos.get(url=transformer.repo_url, active_status="active")
            newrelic.agent.add_custom_parameter("repository", repo.name)

        except ObjectDoesNotExist:
            repo_info = transformer.get_info()
            repo_info.update({
                "url": transformer.repo_url,
                "branch": transformer.branch,
            })
            newrelic.agent.record_custom_event("skip_unknown_repository",
                                               repo_info)
            logger.warning("Skipping unsupported repo: %s %s",
                           transformer.repo_url,
                           transformer.branch)
            return

        transformed_data = transformer.transform(repo.name)

        logger.info("Storing push for %s %s %s",
                    repo.name,
                    transformer.repo_url,
                    transformer.branch)
        store_push_data(repo, [transformed_data])
예제 #10
0
def test_create_error_summary(failure_classifications,
                              jobs_with_local_log, sample_push,
                              test_repository):
    """
    check that a bug suggestions artifact gets inserted when running
    a parse_log task for a failed job, and that the number of
    bug search terms/suggestions matches the number of error lines.
    """
    store_push_data(test_repository, sample_push)

    jobs = jobs_with_local_log
    for job in jobs:
        job['job']['result'] = "testfailed"
        job['revision'] = sample_push[0]['revision']

    store_job_data(test_repository, jobs)

    bug_suggestions = get_error_summary(Job.objects.get(id=1))

    # we must have one bugs item per error in bug_suggestions.
    # errors with no bug suggestions will just have an empty
    # bugs list
    assert TextLogError.objects.count() == len(bug_suggestions)

    # We really need to add some tests that check the values of each entry
    # in bug_suggestions, but for now this is better than nothing.
    expected_keys = set(["search", "search_terms", "bugs"])
    for failure_line in bug_suggestions:
        assert set(failure_line.keys()) == expected_keys
예제 #11
0
def push_with_three_jobs(sample_data, sample_push, test_repository):
    """
    Stores a number of jobs in the same push.
    """
    num_jobs = 3
    push = sample_push[0]
    jobs = copy.deepcopy(sample_data.job_data[0:num_jobs])

    # Only store data for the first push....
    store_push_data(test_repository, [push])

    blobs = []
    for blob in jobs:
        # Modify job structure to sync with the push sample data
        if 'sources' in blob:
            del blob['sources']

        # Skip log references since they do not work correctly in pending state.
        if 'log_references' in blob['job']:
            del blob['job']['log_references']

        blob['revision'] = push['revision']
        blob['job']['state'] = 'pending'
        blobs.append(blob)

    # Store and process the jobs so they are present in the tables.
    store_job_data(test_repository, blobs)
    return Push.objects.get(repository=test_repository,
                            revision=push['revision'])
예제 #12
0
def test_ingest_running_to_retry_to_success_sample_job(test_repository,
                                                       failure_classifications,
                                                       sample_data,
                                                       sample_push,
                                                       mock_log_parser,
                                                       ingestion_cycles):
    # verifies that retries to success work, no matter how jobs are batched
    store_push_data(test_repository, sample_push)

    job_datum = copy.deepcopy(sample_data.job_data[0])
    job_datum['revision'] = sample_push[0]['revision']

    job = job_datum['job']
    job_guid_root = job['job_guid']

    job_data = []
    for (state, result, job_guid) in [
            ('running', 'unknown', job_guid_root),
            ('completed', 'retry',
             job_guid_root + "_" + str(job['end_timestamp'])[-5:]),
            ('completed', 'success', job_guid_root)]:
        new_job_datum = copy.deepcopy(job_datum)
        new_job_datum['job']['state'] = state
        new_job_datum['job']['result'] = result
        new_job_datum['job']['job_guid'] = job_guid
        job_data.append(new_job_datum)

    for (i, j) in ingestion_cycles:
        store_job_data(test_repository, job_data[i:j])

    assert Job.objects.count() == 2
    assert Job.objects.get(id=1).result == 'retry'
    assert Job.objects.get(id=2).result == 'success'
    assert JobLog.objects.count() == 2
예제 #13
0
def test_push_list_single_long_revision_stored_long(webapp, sample_push, test_repository):
    """
    test retrieving a push list with store long revision, filtered by a single long revision
    """
    long_revision = "21fb3eed1b5f3456789012345678901234567890"

    # store a push with long revision
    push = copy.deepcopy(sample_push[0])
    push["revisions"][0]["revision"] = long_revision
    store_push_data(test_repository, [push])

    resp = webapp.get(
        reverse("push-list", kwargs={"project": test_repository.name}),
        {"revision": long_revision}
    )
    assert resp.status_int == 200
    results = resp.json['results']
    meta = resp.json['meta']
    assert len(results) == 1
    assert set([ph["revision"] for ph in results]) == {sample_push[0]['revision']}
    assert(meta == {
        'count': 1,
        'revision': long_revision,
        'filter_params': {
            'revisions_long_revision': long_revision
        },
        'repository': test_repository.name}
    )
예제 #14
0
def eleven_job_blobs(sample_data, sample_push, test_repository, mock_log_parser):
    store_push_data(test_repository, sample_push)

    num_jobs = 11
    jobs = sample_data.job_data[0:num_jobs]

    max_index = len(sample_push) - 1
    push_index = 0

    blobs = []
    for blob in jobs:

        if push_index > max_index:
            push_index = 0

        # Modify job structure to sync with the push sample data
        if 'sources' in blob:
            del blob['sources']

        blob['revision'] = sample_push[push_index]['revision']

        blobs.append(blob)

        push_index += 1
    return blobs
예제 #15
0
def eleven_job_blobs(sample_data, sample_push, test_repository,
                     mock_log_parser):
    store_push_data(test_repository, sample_push)

    num_jobs = 11
    jobs = sample_data.job_data[0:num_jobs]

    max_index = len(sample_push) - 1
    push_index = 0

    blobs = []
    for blob in jobs:

        if push_index > max_index:
            push_index = 0

        # Modify job structure to sync with the push sample data
        if 'sources' in blob:
            del blob['sources']

        blob['revision'] = sample_push[push_index]['revision']

        blobs.append(blob)

        push_index += 1
    return blobs
예제 #16
0
def test_ingest_buildbot_tier1_job(test_repository, sample_data, sample_push,
                                   failure_classifications, mock_log_parser):
    """Tier is set to 1 if no lower_tier_signatures is used (ie: TaskCluster)"""
    job_data = sample_data.job_data[:1]
    store_push_data(test_repository, sample_push)
    store_job_data(test_repository, job_data)
    job = Job.objects.all().first()
    assert job.tier == 1
예제 #17
0
def test_ingest_buildbot_tier1_job(test_repository, sample_data, sample_push,
                                   failure_classifications, mock_log_parser):
    """Tier is set to 1 if no lower_tier_signatures is used (ie: TaskCluster)"""
    job_data = sample_data.job_data[:1]
    store_push_data(test_repository, sample_push)
    store_job_data(test_repository, job_data)
    job = Job.objects.all().first()
    assert job.tier == 1
예제 #18
0
def test_ingest_job_default_tier(test_repository, sample_data, sample_push,
                                 failure_classifications, mock_log_parser):
    """Tier is set to 1 by default"""
    job_data = sample_data.job_data[:1]
    store_push_data(test_repository, sample_push)
    store_job_data(test_repository, job_data)
    job = Job.objects.all().first()
    assert job.tier == 1
예제 #19
0
def test_ingest_job_default_tier(test_repository, sample_data, sample_push,
                                 failure_classifications, mock_log_parser):
    """Tier is set to 1 by default"""
    job_data = sample_data.job_data[:1]
    store_push_data(test_repository, sample_push)
    store_job_data(test_repository, job_data)
    job = Job.objects.all().first()
    assert job.tier == 1
예제 #20
0
def test_bad_date_value_ingestion(test_repository, failure_classifications,
                                  sample_push, mock_log_parser):
    """
    Test ingesting a job blob with bad date value

    """
    blob = job_data(start_timestamp="foo", revision=sample_push[0]['revision'])

    store_push_data(test_repository, sample_push[:1])
    store_job_data(test_repository, [blob])
예제 #21
0
def test_bad_date_value_ingestion(test_repository, failure_classifications,
                                  sample_push, mock_log_parser):
    """
    Test ingesting a job blob with bad date value

    """
    blob = job_data(start_timestamp="foo",
                    revision=sample_push[0]['revision'])

    store_push_data(test_repository, sample_push[:1])
    store_job_data(test_repository, [blob])
예제 #22
0
def test_push_list_empty_push_still_show(client, sample_push, test_repository):
    """
    test retrieving a push list, when the push has no jobs.
    should show.
    """
    store_push_data(test_repository, sample_push)

    resp = client.get(reverse("push-list", kwargs={"project": test_repository.name}),)
    assert resp.status_code == 200
    data = resp.json()
    assert len(data['results']) == 10
예제 #23
0
def test_push_list_empty_push_still_show(webapp, sample_push, test_repository):
    """
    test retrieving a push list, when the push has no jobs.
    should show.
    """
    store_push_data(test_repository, sample_push)

    resp = webapp.get(
        reverse("push-list", kwargs={"project": test_repository.name}),
    )
    assert resp.status_int == 200
    assert len(resp.json['results']) == 10
예제 #24
0
def test_push_list_empty_push_still_show(client, sample_push, test_repository):
    """
    test retrieving a push list, when the push has no jobs.
    should show.
    """
    store_push_data(test_repository, sample_push)

    resp = client.get(
        reverse("push-list", kwargs={"project": test_repository.name}), )
    assert resp.status_code == 200
    # The .json() method of the Django test client doesn't handle unicode properly on
    # Python 2, so we have to deserialize ourselves. TODO: Clean up once on Python 3.
    data = json.loads(resp.content)
    assert len(data['results']) == 10
예제 #25
0
def test_push_list_empty_push_still_show(client, sample_push, test_repository):
    """
    test retrieving a push list, when the push has no jobs.
    should show.
    """
    store_push_data(test_repository, sample_push)

    resp = client.get(
        reverse("push-list", kwargs={"project": test_repository.name}),
    )
    assert resp.status_code == 200
    # The .json() method of the Django test client doesn't handle unicode properly on
    # Python 2, so we have to deserialize ourselves. TODO: Clean up once on Python 3.
    data = json.loads(resp.content)
    assert len(data['results']) == 10
예제 #26
0
def test_ingest_running_to_retry_sample_job(
    test_repository,
    failure_classifications,
    sample_data,
    sample_push,
    mock_log_parser,
    same_ingestion_cycle,
):
    """Process a single job structure in the job_data.txt file"""
    store_push_data(test_repository, sample_push)

    job_data = copy.deepcopy(sample_data.job_data[:1])
    job = job_data[0]['job']
    job_data[0]['revision'] = sample_push[0]['revision']
    job['state'] = 'running'
    job['result'] = 'unknown'

    def _simulate_retry_job(job):
        job['state'] = 'completed'
        job['result'] = 'retry'
        # convert the job_guid to what it would be on a retry
        job['job_guid'] = job['job_guid'] + "_" + str(
            job['end_timestamp'])[-5:]
        return job

    if same_ingestion_cycle:
        # now we simulate the complete version of the job coming in (on the
        # same push)
        new_job_datum = copy.deepcopy(job_data[0])
        new_job_datum['job'] = _simulate_retry_job(new_job_datum['job'])
        job_data.append(new_job_datum)
        store_job_data(test_repository, job_data)
    else:
        # store the job in the initial state
        store_job_data(test_repository, job_data)

        # now we simulate the complete version of the job coming in and
        # ingest a second time
        job = _simulate_retry_job(job)
        store_job_data(test_repository, job_data)

    assert Job.objects.count() == 1
    job = Job.objects.get(id=1)
    assert job.result == 'retry'
    # guid should be the retry one
    assert job.guid == job_data[-1]['job']['job_guid']
예제 #27
0
def test_parse_log(test_repository, failure_classifications, jobs_with_local_log, sample_push):
    """
    check that 2 job_artifacts get inserted when running a parse_log task for
    a successful job and that JobDetail objects get created
    """

    store_push_data(test_repository, sample_push)

    jobs = jobs_with_local_log
    for job in jobs:
        # make this a successful job, to check it's still parsed for errors
        job['job']['result'] = "success"
        job['revision'] = sample_push[0]['revision']

    store_job_data(test_repository, jobs)

    # this log generates 4 job detail objects at present
    print(JobDetail.objects.count() == 4)
예제 #28
0
def test_parse_log(test_repository, failure_classifications, jobs_with_local_log, sample_push):
    """
    check that 2 job_artifacts get inserted when running a parse_log task for
    a successful job and that JobDetail objects get created
    """

    store_push_data(test_repository, sample_push)

    jobs = jobs_with_local_log
    for job in jobs:
        # make this a successful job, to check it's still parsed for errors
        job['job']['result'] = "success"
        job['revision'] = sample_push[0]['revision']

    store_job_data(test_repository, jobs)

    # this log generates 4 job detail objects at present
    print(JobDetail.objects.count() == 4)
예제 #29
0
def test_ingest_running_to_retry_sample_job(test_repository,
                                            failure_classifications,
                                            sample_data,
                                            sample_push,
                                            mock_log_parser,
                                            same_ingestion_cycle):
    """Process a single job structure in the job_data.txt file"""
    store_push_data(test_repository, sample_push)

    job_data = copy.deepcopy(sample_data.job_data[:1])
    job = job_data[0]['job']
    job_data[0]['revision'] = sample_push[0]['revision']
    job['state'] = 'running'
    job['result'] = 'unknown'

    def _simulate_retry_job(job):
        job['state'] = 'completed'
        job['result'] = 'retry'
        # convert the job_guid to what it would be on a retry
        job['job_guid'] = job['job_guid'] + "_" + str(job['end_timestamp'])[-5:]
        return job

    if same_ingestion_cycle:
        # now we simulate the complete version of the job coming in (on the
        # same push)
        new_job_datum = copy.deepcopy(job_data[0])
        new_job_datum['job'] = _simulate_retry_job(new_job_datum['job'])
        job_data.append(new_job_datum)
        store_job_data(test_repository, job_data)
    else:
        # store the job in the initial state
        store_job_data(test_repository, job_data)

        # now we simulate the complete version of the job coming in and
        # ingest a second time
        job = _simulate_retry_job(job)
        store_job_data(test_repository, job_data)

    assert Job.objects.count() == 1
    job = Job.objects.get(id=1)
    assert job.result == 'retry'
    # guid should be the retry one
    assert job.guid == job_data[-1]['job']['job_guid']
예제 #30
0
def test_ingest_running_to_retry_to_success_sample_job_multiple_retries(
    test_repository,
    failure_classifications,
    sample_data,
    sample_push,
    mock_log_parser,
    ingestion_cycles,
):
    # this verifies that if we ingest multiple retries:
    # (1) nothing errors out
    # (2) we end up with three jobs (the original + 2 retry jobs)

    store_push_data(test_repository, sample_push)

    job_datum = copy.deepcopy(sample_data.job_data[0])
    job_datum['revision'] = sample_push[0]['revision']

    job = job_datum['job']
    job_guid_root = job['job_guid']

    job_data = []
    for (state, result, job_guid) in [
        ('running', 'unknown', job_guid_root),
        ('completed', 'retry',
         job_guid_root + "_" + str(job['end_timestamp'])[-5:]),
        ('completed', 'retry', job_guid_root + "_12345"),
        ('completed', 'success', job_guid_root),
    ]:
        new_job_datum = copy.deepcopy(job_datum)
        new_job_datum['job']['state'] = state
        new_job_datum['job']['result'] = result
        new_job_datum['job']['job_guid'] = job_guid
        job_data.append(new_job_datum)

    for (i, j) in ingestion_cycles:
        ins = job_data[i:j]
        store_job_data(test_repository, ins)

    assert Job.objects.count() == 3
    assert Job.objects.get(id=1).result == 'retry'
    assert Job.objects.get(id=2).result == 'retry'
    assert Job.objects.get(id=3).result == 'success'
    assert JobLog.objects.count() == 3
예제 #31
0
def test_push_list_without_jobs(client, test_repository, sample_push):
    """
    test retrieving a push list without jobs
    """
    store_push_data(test_repository, sample_push)

    resp = client.get(reverse("push-list", kwargs={"project": test_repository.name}))
    assert resp.status_code == 200
    data = resp.json()
    results = data['results']
    assert len(results) == 10
    assert all([('platforms' not in result) for result in results])

    meta = data['meta']

    assert meta == {
        u'count': len(results),
        u'filter_params': {},
        u'repository': test_repository.name,
    }
예제 #32
0
def test_push_list_filter_by_date(client, test_repository, sample_push):
    """
    test retrieving a push list, filtered by a date range
    """
    for (i, datestr) in zip(
        [3, 4, 5, 6, 7],
        ["2013-08-09", "2013-08-10", "2013-08-11", "2013-08-12", "2013-08-13"
         ]):
        sample_push[i]['push_timestamp'] = utils.to_timestamp(
            utils.to_datetime(datestr))

    store_push_data(test_repository, sample_push)

    resp = client.get(
        reverse("push-list", kwargs={"project": test_repository.name}), {
            "startdate": "2013-08-10",
            "enddate": "2013-08-13"
        })
    assert resp.status_code == 200
    # The .json() method of the Django test client doesn't handle unicode properly on
    # Python 2, so we have to deserialize ourselves. TODO: Clean up once on Python 3.
    data = json.loads(resp.content)
    results = data['results']
    meta = data['meta']
    assert len(results) == 4
    assert set([rs["revision"] for rs in results]) == {
        u'ce17cad5d554cfffddee13d1d8421ae9ec5aad82',
        u'7f417c3505e3d2599ac9540f02e3dbee307a3963',
        u'a69390334818373e2d7e6e9c8d626a328ed37d47',
        u'f361dcb60bbedaa01257fbca211452972f7a74b2'
    }
    assert (meta == {
        u'count': 4,
        u'enddate': u'2013-08-13',
        u'filter_params': {
            u'push_timestamp__gte': 1376092800.0,
            u'push_timestamp__lt': 1376438400.0
        },
        u'repository': test_repository.name,
        u'startdate': u'2013-08-10'
    })
예제 #33
0
def test_push_list_filter_by_date(client,
                                  test_repository,
                                  sample_push):
    """
    test retrieving a push list, filtered by a date range
    """
    for (i, datestr) in zip([3, 4, 5, 6, 7], ["2013-08-09", "2013-08-10",
                                              "2013-08-11", "2013-08-12",
                                              "2013-08-13"]):
        sample_push[i]['push_timestamp'] = utils.to_timestamp(
            utils.to_datetime(datestr))

    store_push_data(test_repository, sample_push)

    resp = client.get(
        reverse("push-list", kwargs={"project": test_repository.name}),
        {"startdate": "2013-08-10", "enddate": "2013-08-13"}
    )
    assert resp.status_code == 200
    # The .json() method of the Django test client doesn't handle unicode properly on
    # Python 2, so we have to deserialize ourselves. TODO: Clean up once on Python 3.
    data = json.loads(resp.content)
    results = data['results']
    meta = data['meta']
    assert len(results) == 4
    assert set([rs["revision"] for rs in results]) == {
        u'ce17cad5d554cfffddee13d1d8421ae9ec5aad82',
        u'7f417c3505e3d2599ac9540f02e3dbee307a3963',
        u'a69390334818373e2d7e6e9c8d626a328ed37d47',
        u'f361dcb60bbedaa01257fbca211452972f7a74b2'
    }
    assert(meta == {
        u'count': 4,
        u'enddate': u'2013-08-13',
        u'filter_params': {
            u'push_timestamp__gte': 1376092800.0,
            u'push_timestamp__lt': 1376438400.0
        },
        u'repository': test_repository.name,
        u'startdate': u'2013-08-10'}
    )
예제 #34
0
def test_push_list_filter_by_date(client, test_repository, sample_push):
    """
    test retrieving a push list, filtered by a date range
    """
    for (i, datestr) in zip(
        [3, 4, 5, 6, 7],
        ["2013-08-09", "2013-08-10", "2013-08-11", "2013-08-12", "2013-08-13"
         ]):
        sample_push[i]['push_timestamp'] = utils.to_timestamp(
            utils.to_datetime(datestr))

    store_push_data(test_repository, sample_push)

    resp = client.get(
        reverse("push-list", kwargs={"project": test_repository.name}),
        {
            "startdate": "2013-08-10",
            "enddate": "2013-08-13"
        },
    )
    assert resp.status_code == 200
    data = resp.json()
    results = data['results']
    meta = data['meta']
    assert len(results) == 4
    assert set([rs["revision"] for rs in results]) == {
        u'ce17cad5d554cfffddee13d1d8421ae9ec5aad82',
        u'7f417c3505e3d2599ac9540f02e3dbee307a3963',
        u'a69390334818373e2d7e6e9c8d626a328ed37d47',
        u'f361dcb60bbedaa01257fbca211452972f7a74b2',
    }
    assert meta == {
        u'count': 4,
        u'enddate': u'2013-08-13',
        u'filter_params': {
            u'push_timestamp__gte': 1376092800.0,
            u'push_timestamp__lt': 1376438400.0,
        },
        u'repository': test_repository.name,
        u'startdate': u'2013-08-10',
    }
예제 #35
0
def test_ingest_job_revision_hash_blank_revision(test_repository,
                                                 failure_classifications,
                                                 sample_data, mock_log_parser,
                                                 sample_push):

    # Given a push with a revision_hash value that is NOT the
    # top revision SHA, ingest a job with a different revision_hash, but a
    # matching revision SHA.  Ensure the job still goes to the right push.
    rs_revision_hash = "12345abc"
    push = sample_push[0].copy()
    push["revision_hash"] = rs_revision_hash
    store_push_data(test_repository, [push])

    first_job = sample_data.job_data[0]
    first_job["revision_hash"] = rs_revision_hash
    first_job["revision"] = ""
    store_job_data(test_repository, [first_job])

    assert Job.objects.count() == 1
    assert Job.objects.get(id=1).push_id == Push.objects.values_list(
        'id', flat=True).get(revision_hash=rs_revision_hash)
예제 #36
0
def test_ingest_job_revision_hash_blank_revision(test_repository,
                                                 failure_classifications,
                                                 sample_data, mock_log_parser,
                                                 sample_push):

    # Given a push with a revision_hash value that is NOT the
    # top revision SHA, ingest a job with a different revision_hash, but a
    # matching revision SHA.  Ensure the job still goes to the right push.
    rs_revision_hash = "12345abc"
    push = sample_push[0].copy()
    push["revision_hash"] = rs_revision_hash
    store_push_data(test_repository, [push])

    first_job = sample_data.job_data[0]
    first_job["revision_hash"] = rs_revision_hash
    first_job["revision"] = ""
    store_job_data(test_repository, [first_job])

    assert Job.objects.count() == 1
    assert Job.objects.get(id=1).push_id == Push.objects.values_list(
        'id', flat=True).get(revision_hash=rs_revision_hash)
예제 #37
0
def test_push_list_filter_by_date(webapp,
                                  test_repository,
                                  sample_push):
    """
    test retrieving a push list, filtered by a date range
    """
    for (i, datestr) in zip([3, 4, 5, 6, 7], ["2013-08-09", "2013-08-10",
                                              "2013-08-11", "2013-08-12",
                                              "2013-08-13"]):
        sample_push[i]['push_timestamp'] = utils.to_timestamp(
            utils.to_datetime(datestr))

    store_push_data(test_repository, sample_push)

    resp = webapp.get(
        reverse("push-list", kwargs={"project": test_repository.name}),
        {"startdate": "2013-08-10", "enddate": "2013-08-13"}
    )
    assert resp.status_int == 200
    results = resp.json['results']
    meta = resp.json['meta']
    assert len(results) == 4
    assert set([rs["revision"] for rs in results]) == {
        u'ce17cad5d554cfffddee13d1d8421ae9ec5aad82',
        u'7f417c3505e3d2599ac9540f02e3dbee307a3963',
        u'a69390334818373e2d7e6e9c8d626a328ed37d47',
        u'f361dcb60bbedaa01257fbca211452972f7a74b2'
    }
    assert(meta == {
        u'count': 4,
        u'enddate': u'2013-08-13',
        u'filter_params': {
            u'push_timestamp__gte': 1376092800.0,
            u'push_timestamp__lt': 1376438400.0
        },
        u'repository': test_repository.name,
        u'startdate': u'2013-08-10'}
    )
예제 #38
0
def test_ingest_running_to_retry_to_success_sample_job_multiple_retries(
        test_repository, failure_classifications, sample_data, sample_push,
        mock_log_parser, ingestion_cycles):
    # this verifies that if we ingest multiple retries:
    # (1) nothing errors out
    # (2) we end up with three jobs (the original + 2 retry jobs)

    store_push_data(test_repository, sample_push)

    job_datum = copy.deepcopy(sample_data.job_data[0])
    job_datum['revision'] = sample_push[0]['revision']

    job = job_datum['job']
    job_guid_root = job['job_guid']

    job_data = []
    for (state, result, job_guid) in [
            ('running', 'unknown', job_guid_root),
            ('completed', 'retry',
             job_guid_root + "_" + str(job['end_timestamp'])[-5:]),
            ('completed', 'retry',
             job_guid_root + "_12345"),
            ('completed', 'success', job_guid_root)]:
        new_job_datum = copy.deepcopy(job_datum)
        new_job_datum['job']['state'] = state
        new_job_datum['job']['result'] = result
        new_job_datum['job']['job_guid'] = job_guid
        job_data.append(new_job_datum)

    for (i, j) in ingestion_cycles:
        ins = job_data[i:j]
        store_job_data(test_repository, ins)

    assert Job.objects.count() == 3
    assert Job.objects.get(id=1).result == 'retry'
    assert Job.objects.get(id=2).result == 'retry'
    assert Job.objects.get(id=3).result == 'success'
    assert JobLog.objects.count() == 3
예제 #39
0
def test_ingest_job_with_revision_hash(test_repository,
                                       failure_classifications, sample_data,
                                       mock_log_parser, sample_push):
    """
    Test ingesting a job with only a revision hash, no revision.  And the
    revision_hash must NOT be the same SHA value as the top revision.

    This can happen if a user submits a new push in the API with their
    own revision_hash value.  If we just use the latest revision value, then
    their subsequent job submissions with the revision_hash they generated
    will fail and the jobs will be skipped.
    """
    revision_hash = "12345abc"
    push = sample_push[0].copy()
    push["revision_hash"] = revision_hash
    store_push_data(test_repository, [push])

    first_job = sample_data.job_data[0]
    first_job["revision_hash"] = revision_hash
    del first_job["revision"]
    store_job_data(test_repository, [first_job])

    assert Job.objects.count() == 1
예제 #40
0
def test_ingest_job_with_revision_hash(test_repository,
                                       failure_classifications, sample_data,
                                       mock_log_parser, sample_push):
    """
    Test ingesting a job with only a revision hash, no revision.  And the
    revision_hash must NOT be the same SHA value as the top revision.

    This can happen if a user submits a new push in the API with their
    own revision_hash value.  If we just use the latest revision value, then
    their subsequent job submissions with the revision_hash they generated
    will fail and the jobs will be skipped.
    """
    revision_hash = "12345abc"
    push = sample_push[0].copy()
    push["revision_hash"] = revision_hash
    store_push_data(test_repository, [push])

    first_job = sample_data.job_data[0]
    first_job["revision_hash"] = revision_hash
    del first_job["revision"]
    store_job_data(test_repository, [first_job])

    assert Job.objects.count() == 1
예제 #41
0
def test_ingest_retry_sample_job_no_running(test_repository,
                                            failure_classifications,
                                            sample_data, sample_push,
                                            mock_log_parser):
    """Process a single job structure in the job_data.txt file"""
    job_data = copy.deepcopy(sample_data.job_data[:1])
    job = job_data[0]['job']
    job_data[0]['revision'] = sample_push[0]['revision']

    store_push_data(test_repository, sample_push)

    # complete version of the job coming in
    job['state'] = 'completed'
    job['result'] = 'retry'
    # convert the job_guid to what it would be on a retry
    retry_guid = job['job_guid'] + "_" + str(job['end_timestamp'])[-5:]
    job['job_guid'] = retry_guid

    store_job_data(test_repository, job_data)

    assert Job.objects.count() == 1
    job = Job.objects.get(id=1)
    assert job.result == 'retry'
    assert job.guid == retry_guid
예제 #42
0
def test_ingest_retry_sample_job_no_running(test_repository,
                                            failure_classifications,
                                            sample_data, sample_push,
                                            mock_log_parser):
    """Process a single job structure in the job_data.txt file"""
    job_data = copy.deepcopy(sample_data.job_data[:1])
    job = job_data[0]['job']
    job_data[0]['revision'] = sample_push[0]['revision']

    store_push_data(test_repository, sample_push)

    # complete version of the job coming in
    job['state'] = 'completed'
    job['result'] = 'retry'
    # convert the job_guid to what it would be on a retry
    retry_guid = job['job_guid'] + "_" + str(job['end_timestamp'])[-5:]
    job['job_guid'] = retry_guid

    store_job_data(test_repository, job_data)

    assert Job.objects.count() == 1
    job = Job.objects.get(id=1)
    assert job.result == 'retry'
    assert job.guid == retry_guid
예제 #43
0
def test_push_list_without_jobs(webapp,
                                test_repository,
                                sample_push):
    """
    test retrieving a push list without jobs
    """
    store_push_data(test_repository, sample_push)

    resp = webapp.get(
        reverse("push-list", kwargs={"project": test_repository.name})
    )
    assert resp.status_int == 200

    results = resp.json['results']
    assert len(results) == 10
    assert all([('platforms' not in result) for result in results])

    meta = resp.json['meta']

    assert meta == {
        u'count': len(results),
        u'filter_params': {},
        u'repository': test_repository.name
    }
예제 #44
0
 def retry_mock(exc=None, countdown=None):
     assert isinstance(exc, MissingPushException)
     thread_data.retries += 1
     store_push_data(test_repository, [rs])
     return orig_retry(exc=exc, countdown=countdown)
예제 #45
0
def try_push_stored(try_repository, sample_push):
    store_push_data(try_repository, sample_push)

    return sample_push
예제 #46
0
 def retry_mock(exc=None, countdown=None):
     assert isinstance(exc, MissingPushException)
     thread_data.retries += 1
     store_push_data(test_repository, [rs])
     return orig_retry(exc=exc, countdown=countdown)
예제 #47
0
def do_job_ingestion(test_repository, job_data, sample_push, verify_data=True):
    """
    Ingest ``job_data`` which will be JSON job blobs.

    ``verify_data`` - whether or not to run the ingested jobs
                      through the verifier.
    """
    store_push_data(test_repository, sample_push)

    max_index = len(sample_push) - 1
    push_index = 0

    # Structures to test if we stored everything
    build_platforms_ref = set()
    machine_platforms_ref = set()

    machines_ref = set()
    options_ref = set()
    job_types_ref = set()
    products_ref = set()
    pushes_ref = set()
    log_urls_ref = set()
    superseded_job_guids = set()
    artifacts_ref = {}

    blobs = []
    for blob in job_data:

        if push_index > max_index:
            push_index = 0

        # Modify job structure to sync with the push sample data
        if 'sources' in blob:
            del blob['sources']

        blob['revision'] = sample_push[push_index]['revision']

        blobs.append(blob)

        push_index += 1

        # Build data structures to confirm everything is stored
        # as expected
        if verify_data:
            job = blob['job']

            build_platforms_ref.add("-".join([
                job.get('build_platform', {}).get('os_name', 'unknown'),
                job.get('build_platform', {}).get('platform', 'unknown'),
                job.get('build_platform', {}).get('architecture', 'unknown')
            ]))

            machine_platforms_ref.add("-".join([
                job.get('machine_platform', {}).get('os_name', 'unknown'),
                job.get('machine_platform', {}).get('platform', 'unknown'),
                job.get('machine_platform', {}).get('architecture', 'unknown')
            ]))

            machines_ref.add(job.get('machine', 'unknown'))

            options_ref = options_ref.union(
                job.get('option_collection', []).keys())

            job_types_ref.add(job.get('name', 'unknown'))
            products_ref.add(job.get('product_name', 'unknown'))
            pushes_ref.add(blob['revision'])

            log_url_list = job.get('log_references', [])
            for log_data in log_url_list:
                log_urls_ref.add(log_data['url'])

            artifact_name = job.get('artifact', {}).get('name')
            if artifact_name:
                artifacts_ref[artifact_name] = job.get('artifact')

            superseded = blob.get('superseded', [])
            superseded_job_guids.update(superseded)

    # Store the modified json blobs
    store_job_data(test_repository, blobs)

    if verify_data:
        # Confirms stored data matches whats in the reference data structs
        verify_build_platforms(build_platforms_ref)
        verify_machine_platforms(machine_platforms_ref)
        verify_machines(machines_ref)
        verify_options(options_ref)
        verify_job_types(job_types_ref)
        verify_products(products_ref)
        verify_pushes(test_repository, pushes_ref)
        verify_log_urls(test_repository, log_urls_ref)
        verify_superseded(superseded_job_guids)
예제 #48
0
def push_stored(test_repository, sample_push):
    store_push_data(test_repository, sample_push)

    return sample_push
예제 #49
0
def push_stored(test_repository, sample_push):
    store_push_data(test_repository, sample_push)

    return sample_push
예제 #50
0
def do_job_ingestion(test_repository, job_data, sample_push,
                     verify_data=True):
    """
    Ingest ``job_data`` which will be JSON job blobs.

    ``verify_data`` - whether or not to run the ingested jobs
                      through the verifier.
    """
    store_push_data(test_repository, sample_push)

    max_index = len(sample_push) - 1
    push_index = 0

    # Structures to test if we stored everything
    build_platforms_ref = set()
    machine_platforms_ref = set()

    machines_ref = set()
    options_ref = set()
    job_types_ref = set()
    products_ref = set()
    pushes_ref = set()
    log_urls_ref = set()
    superseded_job_guids = set()
    artifacts_ref = {}

    blobs = []
    for blob in job_data:

        if push_index > max_index:
            push_index = 0

        # Modify job structure to sync with the push sample data
        if 'sources' in blob:
            del blob['sources']

        blob['revision'] = sample_push[push_index]['revision']

        blobs.append(blob)

        push_index += 1

        # Build data structures to confirm everything is stored
        # as expected
        if verify_data:
            job = blob['job']

            build_platforms_ref.add(
                "-".join([
                    job.get('build_platform', {}).get('os_name', 'unknown'),
                    job.get('build_platform', {}).get('platform', 'unknown'),
                    job.get('build_platform', {}).get('architecture', 'unknown')
                ]))

            machine_platforms_ref.add(
                "-".join([
                    job.get('machine_platform', {}).get('os_name', 'unknown'),
                    job.get('machine_platform', {}).get('platform', 'unknown'),
                    job.get('machine_platform', {}).get('architecture', 'unknown')
                ]))

            machines_ref.add(job.get('machine', 'unknown'))

            options_ref = options_ref.union(job.get('option_collection', []).keys())

            job_types_ref.add(job.get('name', 'unknown'))
            products_ref.add(job.get('product_name', 'unknown'))
            pushes_ref.add(blob['revision'])

            log_url_list = job.get('log_references', [])
            for log_data in log_url_list:
                log_urls_ref.add(log_data['url'])

            artifact_name = job.get('artifact', {}).get('name')
            if artifact_name:
                artifacts_ref[artifact_name] = job.get('artifact')

            superseded = blob.get('superseded', [])
            superseded_job_guids.update(superseded)

    # Store the modified json blobs
    store_job_data(test_repository, blobs)

    if verify_data:
        # Confirms stored data matches whats in the reference data structs
        verify_build_platforms(build_platforms_ref)
        verify_machine_platforms(machine_platforms_ref)
        verify_machines(machines_ref)
        verify_options(options_ref)
        verify_job_types(job_types_ref)
        verify_products(products_ref)
        verify_pushes(test_repository, pushes_ref)
        verify_log_urls(test_repository, log_urls_ref)
        verify_superseded(superseded_job_guids)