コード例 #1
0
def test_ingest_job_with_updated_job_group(test_repository, failure_classifications,
                                           sample_data, mock_log_parser,
                                           push_stored):
    """
    The job_type and job_group for a job is independent of any other job_type
    and job_group combination.
    """
    first_job_datum = sample_data.job_data[0]
    first_job_datum["job"]["group_name"] = "first group name"
    first_job_datum["job"]["group_symbol"] = "1"
    first_job_guid = "first-unique-job-guid"
    first_job_datum["job"]["job_guid"] = first_job_guid
    first_job_datum["revision"] = push_stored[0]["revision"]
    store_job_data(test_repository, [first_job_datum])
    first_job = Job.objects.get(guid=first_job_guid)

    second_job_datum = copy.deepcopy(first_job_datum)
    # create a new guid to ingest the job again
    second_job_guid = "second-unique-job-guid"
    second_job_datum["job"]["job_guid"] = second_job_guid
    second_job_datum["job"]["group_name"] = "second group name"
    second_job_datum["job"]["group_symbol"] = "2"
    second_job_datum["revision"] = push_stored[0]["revision"]

    store_job_data(test_repository, [second_job_datum])

    second_job = Job.objects.get(guid=second_job_guid)

    assert second_job.job_group.name == second_job_datum["job"]["group_name"]
    assert first_job.job_group.name == first_job_datum["job"]["group_name"]
コード例 #2
0
def test_ingest_job_with_updated_job_group(test_repository,
                                           failure_classifications,
                                           sample_data, mock_log_parser,
                                           push_stored):
    """
    The job_type and job_group for a job is independent of any other job_type
    and job_group combination.
    """
    first_job_datum = sample_data.job_data[0]
    first_job_datum["job"]["group_name"] = "first group name"
    first_job_datum["job"]["group_symbol"] = "1"
    first_job_guid = "first-unique-job-guid"
    first_job_datum["job"]["job_guid"] = first_job_guid
    first_job_datum["revision"] = push_stored[0]["revision"]
    store_job_data(test_repository, [first_job_datum])
    first_job = Job.objects.get(guid=first_job_guid)

    second_job_datum = copy.deepcopy(first_job_datum)
    # create a new guid to ingest the job again
    second_job_guid = "second-unique-job-guid"
    second_job_datum["job"]["job_guid"] = second_job_guid
    second_job_datum["job"]["group_name"] = "second group name"
    second_job_datum["job"]["group_symbol"] = "2"
    second_job_datum["revision"] = push_stored[0]["revision"]

    store_job_data(test_repository, [second_job_datum])

    second_job = Job.objects.get(guid=second_job_guid)

    assert second_job.job_group.name == second_job_datum["job"]["group_name"]
    assert first_job.job_group.name == first_job_datum["job"]["group_name"]
コード例 #3
0
def completed_jobs_stored(test_repository, failure_classifications, completed_job, push_stored):
    """
    stores a list of buildapi completed jobs
    """
    completed_job['revision'] = push_stored[0]['revision']
    completed_job.update({'project': test_repository.name})
    store_job_data(test_repository, [completed_job])
コード例 #4
0
ファイル: job_loader.py プロジェクト: sshepanski7/treeherder
    def process_job_list(self, all_jobs_list):
        if not isinstance(all_jobs_list, list):
            all_jobs_list = [all_jobs_list]

        validated_jobs = self._get_validated_jobs_by_project(all_jobs_list)

        for project, job_list in validated_jobs.items():
            newrelic.agent.add_custom_parameter("project", project)
            try:
                repository = Repository.objects.get(name=project)

                storeable_job_list = []
                for pulse_job in job_list:
                    if pulse_job["state"] != "unscheduled":
                        try:
                            self.validate_revision(repository, pulse_job)
                            storeable_job_list.append(
                                self.transform(pulse_job))
                        except AttributeError:
                            logger.warn("Skipping job due to bad attribute",
                                        exc_info=1)

                store_job_data(repository, storeable_job_list)

            except Repository.DoesNotExist:
                logger.info("Job with unsupported project: {}".format(project))
コード例 #5
0
def pending_jobs_stored(test_repository, failure_classifications, pending_job, push_stored):
    """
    stores a list of buildapi pending jobs into the jobs store
    """
    pending_job.update(push_stored[0])
    pending_job.update({'project': test_repository.name})
    store_job_data(test_repository, [pending_job])
コード例 #6
0
def running_jobs_stored(test_repository, failure_classifications, running_job, push_stored):
    """
    stores a list of buildapi running jobs
    """
    running_job.update(push_stored[0])
    running_job.update({'project': test_repository.name})
    store_job_data(test_repository, [running_job])
コード例 #7
0
def test_store_job_with_parsed_log(test_repository, push_stored,
                                   failure_classifications, monkeypatch):
    """
    test submitting a job with a pre-parsed log gets job_log_url
    parse_status of "parsed" and does not parse, even though no text_log_summary
    exists.

    This is for the case where they may want to submit it at a later time.
    """

    mock_parse = MagicMock(name="parse_line")
    monkeypatch.setattr(StepParser, 'parse_line', mock_parse)

    job_guid = 'd22c74d4aa6d2a1dcba96d95dccbd5fdca70cf33'
    job_data = {
        'project': test_repository.name,
        'revision': push_stored[0]['revision'],
        'job': {
            'job_guid':
            job_guid,
            'state':
            'completed',
            'log_references': [{
                'url':
                'http://ftp.mozilla.org/pub/mozilla.org/spidermonkey/...',
                'name': 'buildbot_text',
                'parse_status': 'parsed'
            }]
        }
    }

    store_job_data(test_repository, [job_data])

    # ensure the parsing didn't happen
    assert mock_parse.called is False
コード例 #8
0
ファイル: test_builds.py プロジェクト: Yogita98/treeherder
def test_get_build_failures(
    failure_classifications, test_push, test_repository, sample_data, mock_log_parser
):
    jobs = sample_data.job_data[20:25]
    likely_build_regression_labels = ['B2G Emulator Image Build']

    for blob in jobs:
        blob['revision'] = test_push.revision
        blob['result'] = 'busted'
        blob['taskcluster_metadata__task_id'] = 'V3SVuxO8TFy37En_6HcXLs'
        blob['taskcluster_metadata__retry_id'] = '0'
        blob['job_type__name'] = blob['job']['name']
        blob['job_type__symbol'] = blob['job']['job_symbol']
        blob['machine_platform__platform'] = blob['job']['machine_platform']['platform']
        blob['job_group__name'] = None
        blob['job_group__symbol'] = blob['job']['group_symbol']
    store_job_data(test_repository, jobs)

    result, build_failures, in_progress = get_build_failures(
        test_push, likely_build_regression_labels, jobs
    )

    assert in_progress == 0
    assert result == 'fail'
    assert len(build_failures) == 2
コード例 #9
0
ファイル: job_loader.py プロジェクト: ziqri11/treeherder
    def process_job(self, pulse_job, root_url):
        if self._is_valid_job(pulse_job):
            try:
                project = pulse_job["origin"]["project"]
                newrelic.agent.add_custom_parameter("project", project)

                repository = Repository.objects.get(name=project)

                if repository.tc_root_url != root_url:
                    logger.warning(
                        "Skipping job for %s with incorrect root_url %s",
                        repository.name, root_url)
                    return

                if pulse_job["state"] != "unscheduled":
                    try:
                        self.validate_revision(repository, pulse_job)
                        transformed_job = self.transform(pulse_job)
                        store_job_data(repository, [transformed_job])
                        # Returning the transformed_job is only for testing purposes
                        return transformed_job
                    except AttributeError:
                        logger.warning("Skipping job due to bad attribute",
                                       exc_info=1)
            except Repository.DoesNotExist:
                logger.info("Job with unsupported project: %s", project)
コード例 #10
0
def test_store_job_pending_to_completed_with_unparsed_log(
    test_repository, push_stored, failure_classifications, activate_responses
):

    job_guid = 'd22c74d4aa6d2a1dcba96d95dccbd5fdca70cf33'

    # the first time, submit it as running (with no logs)
    job_data = {
        'project': test_repository.name,
        'revision': push_stored[0]['revision'],
        'job': {'job_guid': job_guid, 'state': 'running'},
    }
    store_job_data(test_repository, [job_data])
    # should have no text log errors or bug suggestions
    assert TextLogError.objects.count() == 0
    assert get_error_summary(Job.objects.get(guid=job_guid)) == []

    # the second time, post a log that will get parsed
    log_url = add_log_response("mozilla-central-macosx64-debug-bm65-build1-build15.txt.gz")
    job_data = {
        'project': test_repository.name,
        'revision': push_stored[0]['revision'],
        'job': {
            'job_guid': job_guid,
            'state': 'completed',
            'log_references': [
                {'url': log_url, 'name': 'live_backing_log', 'parse_status': 'pending'}
            ],
        },
    }
    store_job_data(test_repository, [job_data])

    # should have a full set of text log errors
    assert TextLogError.objects.count() == 4
    assert len(get_error_summary(Job.objects.get(guid=job_guid))) == 4
コード例 #11
0
ファイル: job_loader.py プロジェクト: kapy2010/treeherder
    def process_job_list(self, all_jobs_list):
        if not isinstance(all_jobs_list, list):
            all_jobs_list = [all_jobs_list]

        validated_jobs = self._get_validated_jobs_by_project(all_jobs_list)

        for project, job_list in validated_jobs.items():
            newrelic.agent.add_custom_parameter("project", project)
            try:
                repository = Repository.objects.get(name=project)

                storeable_job_list = []
                for pulse_job in job_list:
                    if pulse_job["state"] != "unscheduled":
                        try:
                            self.clean_revision(repository, pulse_job)
                            storeable_job_list.append(
                                self.transform(pulse_job)
                            )
                        except AttributeError:
                            logger.warn("Skipping job due to bad attribute",
                                        exc_info=1)

                store_job_data(repository, storeable_job_list)

            except Repository.DoesNotExist:
                logger.info("Job with unsupported project: {}".format(project))
コード例 #12
0
ファイル: test_tasks.py プロジェクト: jmaher/treeherder
def test_create_error_summary(
    failure_classifications, jobs_with_local_log, sample_push, test_repository
):
    """
    check that a bug suggestions artifact gets inserted when running
    a parse_log task for a failed job, and that the number of
    bug search terms/suggestions matches the number of error lines.
    """
    store_push_data(test_repository, sample_push)

    jobs = jobs_with_local_log
    for job in jobs:
        job['job']['result'] = "testfailed"
        job['revision'] = sample_push[0]['revision']

    store_job_data(test_repository, jobs)

    bug_suggestions = get_error_summary(Job.objects.get(id=1))

    # we must have one bugs item per error in bug_suggestions.
    # errors with no bug suggestions will just have an empty
    # bugs list
    assert TextLogError.objects.count() == len(bug_suggestions)

    # We really need to add some tests that check the values of each entry
    # in bug_suggestions, but for now this is better than nothing.
    expected_keys = set(["search", "path_end", "search_terms", "bugs", "line_number"])
    for failure_line in bug_suggestions:
        assert set(failure_line.keys()) == expected_keys
コード例 #13
0
def test_store_job_with_parsed_log(test_repository, push_stored,
                                   failure_classifications,
                                   monkeypatch):
    """
    test submitting a job with a pre-parsed log gets job_log_url
    parse_status of "parsed" and does not parse, even though no text_log_summary
    exists.

    This is for the case where they may want to submit it at a later time.
    """

    mock_parse = MagicMock(name="parse_line")
    monkeypatch.setattr(StepParser, 'parse_line', mock_parse)

    job_guid = 'd22c74d4aa6d2a1dcba96d95dccbd5fdca70cf33'
    job_data = {
        'project': test_repository.name,
        'revision': push_stored[0]['revision'],
        'job': {
            'job_guid': job_guid,
            'state': 'completed',
            'log_references': [{
                'url': 'http://ftp.mozilla.org/pub/mozilla.org/spidermonkey/...',
                'name': 'buildbot_text',
                'parse_status': 'parsed'
            }]
        }
    }

    store_job_data(test_repository, [job_data])

    # ensure the parsing didn't happen
    assert mock_parse.called is False
コード例 #14
0
def test_ingest_job_with_updated_job_group(test_repository, failure_classifications,
                                           sample_data, mock_log_parser,
                                           push_stored):
    """
    When a job_type is associated with a job group on data ingestion,
    that association will not updated ingesting a new job with the same
    job_type but different job_group
    """
    first_job_datum = sample_data.job_data[0]
    first_job_datum["job"]["group_name"] = "first group name"
    first_job_datum["job"]["group_symbol"] = "1"
    first_job_datum["revision"] = push_stored[0]["revision"]
    store_job_data(test_repository, [first_job_datum])

    second_job_datum = copy.deepcopy(first_job_datum)
    # create a new guid to ingest the job again
    second_job_guid = "a-unique-job-guid"
    second_job_datum["job"]["job_guid"] = second_job_guid
    second_job_datum["job"]["group_name"] = "second group name"
    second_job_datum["job"]["group_symbol"] = "2"
    second_job_datum["revision"] = push_stored[0]["revision"]

    store_job_data(test_repository, [second_job_datum])

    second_job = Job.objects.get(guid=second_job_guid)

    first_job_group_name = first_job_datum["job"]["group_name"]
    second_job_group_name = second_job.job_type.job_group.name

    assert first_job_group_name == second_job_group_name

    # make sure also we didn't create a new job group
    with pytest.raises(JobGroup.DoesNotExist):
        JobGroup.objects.get(name="second group name")
コード例 #15
0
def test_ingest_running_to_retry_to_success_sample_job(
        test_repository, failure_classifications, sample_data, sample_push,
        mock_log_parser, ingestion_cycles):
    # verifies that retries to success work, no matter how jobs are batched
    store_push_data(test_repository, sample_push)

    job_datum = copy.deepcopy(sample_data.job_data[0])
    job_datum['revision'] = sample_push[0]['revision']

    job = job_datum['job']
    job_guid_root = job['job_guid']

    job_data = []
    for (state, result,
         job_guid) in [('running', 'unknown', job_guid_root),
                       ('completed', 'retry',
                        job_guid_root + "_" + str(job['end_timestamp'])[-5:]),
                       ('completed', 'success', job_guid_root)]:
        new_job_datum = copy.deepcopy(job_datum)
        new_job_datum['job']['state'] = state
        new_job_datum['job']['result'] = result
        new_job_datum['job']['job_guid'] = job_guid
        job_data.append(new_job_datum)

    for (i, j) in ingestion_cycles:
        store_job_data(test_repository, job_data[i:j])

    assert Job.objects.count() == 2
    assert Job.objects.get(id=1).result == 'retry'
    assert Job.objects.get(id=2).result == 'success'
    assert JobLog.objects.count() == 2
コード例 #16
0
def test_create_error_summary(failure_classifications,
                              jobs_with_local_log, sample_resultset,
                              test_repository):
    """
    check that a bug suggestions artifact gets inserted when running
    a parse_log task for a failed job, and that the number of
    bug search terms/suggestions matches the number of error lines.
    """
    store_result_set_data(test_repository, sample_resultset)

    jobs = jobs_with_local_log
    for job in jobs:
        job['job']['result'] = "testfailed"
        job['revision'] = sample_resultset[0]['revision']

    store_job_data(test_repository, jobs)

    bug_suggestions = get_error_summary(Job.objects.get(id=1))

    # we must have one bugs item per error in bug_suggestions.
    # errors with no bug suggestions will just have an empty
    # bugs list
    assert TextLogError.objects.count() == len(bug_suggestions)

    # We really need to add some tests that check the values of each entry
    # in bug_suggestions, but for now this is better than nothing.
    expected_keys = set(["search", "search_terms", "bugs"])
    for failure_line in bug_suggestions:
        assert set(failure_line.keys()) == expected_keys
コード例 #17
0
def test_ingest_running_to_retry_to_success_sample_job(test_repository,
                                                       failure_classifications,
                                                       sample_data,
                                                       sample_push,
                                                       mock_log_parser,
                                                       ingestion_cycles):
    # verifies that retries to success work, no matter how jobs are batched
    store_push_data(test_repository, sample_push)

    job_datum = copy.deepcopy(sample_data.job_data[0])
    job_datum['revision'] = sample_push[0]['revision']

    job = job_datum['job']
    job_guid_root = job['job_guid']

    job_data = []
    for (state, result, job_guid) in [
            ('running', 'unknown', job_guid_root),
            ('completed', 'retry',
             job_guid_root + "_" + str(job['end_timestamp'])[-5:]),
            ('completed', 'success', job_guid_root)]:
        new_job_datum = copy.deepcopy(job_datum)
        new_job_datum['job']['state'] = state
        new_job_datum['job']['result'] = result
        new_job_datum['job']['job_guid'] = job_guid
        job_data.append(new_job_datum)

    for (i, j) in ingestion_cycles:
        store_job_data(test_repository, job_data[i:j])

    assert Job.objects.count() == 2
    assert Job.objects.get(id=1).result == 'retry'
    assert Job.objects.get(id=2).result == 'success'
    assert JobLog.objects.count() == 2
コード例 #18
0
def push_with_three_jobs(sample_data, sample_push, test_repository):
    """
    Stores a number of jobs in the same push.
    """
    num_jobs = 3
    push = sample_push[0]
    jobs = copy.deepcopy(sample_data.job_data[0:num_jobs])

    # Only store data for the first push....
    store_push_data(test_repository, [push])

    blobs = []
    for index, blob in enumerate(jobs):
        # Modify job structure to sync with the push sample data
        if 'sources' in blob:
            del blob['sources']

        # Skip log references since they do not work correctly in pending state.
        if 'log_references' in blob['job']:
            del blob['job']['log_references']

        blob['revision'] = push['revision']
        blob['job']['state'] = 'pending'
        blobs.append(blob)

    # Store and process the jobs so they are present in the tables.
    store_job_data(test_repository, blobs)
    return Push.objects.get(repository=test_repository,
                            revision=push['revision'])
コード例 #19
0
ファイル: conftest.py プロジェクト: SebastinSanty/treeherder
def push_with_three_jobs(sample_data, sample_resultset, test_repository):
    """
    Stores a number of jobs in the same resultset.
    """
    num_jobs = 3
    resultset = sample_resultset[0]
    jobs = copy.deepcopy(sample_data.job_data[0:num_jobs])

    # Only store data for the first resultset....
    store_result_set_data(test_repository, [resultset])

    blobs = []
    for index, blob in enumerate(jobs):
        # Modify job structure to sync with the resultset sample data
        if 'sources' in blob:
            del blob['sources']

        # Skip log references since they do not work correctly in pending state.
        if 'log_references' in blob['job']:
            del blob['job']['log_references']

        blob['revision'] = resultset['revision']
        blob['job']['state'] = 'pending'
        blobs.append(blob)

    # Store and process the jobs so they are present in the tables.
    store_job_data(test_repository, blobs)
    return Push.objects.get(repository=test_repository,
                            revision=resultset['revision'])
コード例 #20
0
ファイル: job_loader.py プロジェクト: tusharuiit/treeherder
    def process_job(self, pulse_job, root_url):
        if self._is_valid_job(pulse_job):
            try:
                project = pulse_job["origin"]["project"]
                newrelic.agent.add_custom_parameter("project", project)

                repository = Repository.objects.get(name=project)
                if repository.active_status != 'active':
                    (real_task_id, _) = task_and_retry_ids(pulse_job["taskId"])
                    logger.debug(
                        "Task %s belongs to a repository that is not active.",
                        real_task_id)
                    return

                if pulse_job["state"] != "unscheduled":
                    try:
                        self.validate_revision(repository, pulse_job)
                        transformed_job = self.transform(pulse_job)
                        store_job_data(repository, [transformed_job])
                        # Returning the transformed_job is only for testing purposes
                        return transformed_job
                    except AttributeError:
                        logger.warning("Skipping job due to bad attribute",
                                       exc_info=1)
            except Repository.DoesNotExist:
                logger.info("Job with unsupported project: %s", project)
コード例 #21
0
    def process_job_list(self, job):
        try:
            jsonschema.validate(job, job_json_schema)
        except (jsonschema.ValidationError, jsonschema.SchemaError) as e:
            logger.error(
                "JSON Schema validation error during job ingestion: {}".format(
                    e))

        try:
            repository = Repository.objects.get(name="project")

            storeable_job_list = []

            if job["state"] != "unscheduled":
                try:
                    self.clean_revision(repository, job)
                    storeable_job_list.append(self.transform(job))
                except AttributeError:
                    logger.warn("Skipping job due to bad attribute",
                                exc_info=1)

                store_job_data(repository, storeable_job_list)

        except Repository.DoesNotExist:
            logger.info("Job with unsupported project: {}".format(job))
コード例 #22
0
def test_jobs(eleven_job_blobs, failure_classifications, test_repository):
    job_blobs = [j for j in eleven_job_blobs if 'superseded' not in j]
    for i, job in enumerate(JOB_DATA):
        job_blobs[i]['job'].update(job)
        print(job_blobs[i])
    store_job_data(test_repository, job_blobs[0:len(JOB_DATA)])
    return [Job.objects.get(id=i) for i in range(1, len(JOB_DATA) + 1)]
コード例 #23
0
def test_ingest_job_with_updated_job_group(test_repository, failure_classifications,
                                           sample_data, mock_log_parser,
                                           result_set_stored):
    """
    When a job_type is associated with a job group on data ingestion,
    that association will not updated ingesting a new job with the same
    job_type but different job_group
    """
    first_job_datum = sample_data.job_data[0]
    first_job_datum["job"]["group_name"] = "first group name"
    first_job_datum["job"]["group_symbol"] = "1"
    first_job_datum["revision"] = result_set_stored[0]["revision"]
    store_job_data(test_repository, [first_job_datum])

    second_job_datum = copy.deepcopy(first_job_datum)
    # create a new guid to ingest the job again
    second_job_guid = "a-unique-job-guid"
    second_job_datum["job"]["job_guid"] = second_job_guid
    second_job_datum["job"]["group_name"] = "second group name"
    second_job_datum["job"]["group_symbol"] = "2"
    second_job_datum["revision"] = result_set_stored[0]["revision"]

    store_job_data(test_repository, [second_job_datum])

    second_job = Job.objects.get(guid=second_job_guid)

    first_job_group_name = first_job_datum["job"]["group_name"]
    second_job_group_name = second_job.job_type.job_group.name

    assert first_job_group_name == second_job_group_name

    # make sure also we didn't create a new job group
    with pytest.raises(JobGroup.DoesNotExist):
        JobGroup.objects.get(name="second group name")
コード例 #24
0
def test_jobs(eleven_job_blobs, failure_classifications, test_repository):
    job_blobs = [j for j in eleven_job_blobs if 'superseded' not in j]
    for i, job in enumerate(JOB_DATA):
        job_blobs[i]['job'].update(job)
        print(job_blobs[i])
    store_job_data(test_repository, job_blobs[0:len(JOB_DATA)])
    return [Job.objects.get(id=i) for i in range(1, len(JOB_DATA) + 1)]
コード例 #25
0
def test_get_build_failures_with_parent(failure_classifications, test_push,
                                        test_repository, sample_data,
                                        mock_log_parser):
    parent_revision = 'abcdef77949168d16c03a4cba167678b7ab65f76'
    parent_push = Push.objects.create(
        revision=parent_revision,
        repository=test_repository,
        author='*****@*****.**',
        time=datetime.datetime.now(),
    )

    jobs = sample_data.job_data[20:25]

    for blob in jobs:
        blob['revision'] = test_push.revision
        blob['job']['result'] = 'busted'
        blob['job']['taskcluster_task_id'] = 'V3SVuxO8TFy37En_6HcXLs'
        blob['job']['taskcluster_retry_id'] = '0'
    store_job_data(test_repository, jobs)

    parent_jobs = copy.deepcopy(jobs)
    for idx, blob in enumerate(parent_jobs, start=1):
        blob['revision'] = parent_push.revision
        blob['job']['job_guid'] = '{}{}'.format(parent_push.revision, idx)

    store_job_data(test_repository, parent_jobs)

    build_failures = get_build_failures(test_push, parent_push)
    first_build_failure = build_failures[0]

    assert len(build_failures) == 5
    assert first_build_failure['failedInParent']
コード例 #26
0
def test_ingest_job_default_tier(test_repository, sample_data, sample_push,
                                 failure_classifications, mock_log_parser):
    """Tier is set to 1 by default"""
    job_data = sample_data.job_data[:1]
    store_push_data(test_repository, sample_push)
    store_job_data(test_repository, job_data)
    job = Job.objects.all().first()
    assert job.tier == 1
コード例 #27
0
def test_ingest_buildbot_tier1_job(test_repository, sample_data, sample_push,
                                   failure_classifications, mock_log_parser):
    """Tier is set to 1 if no lower_tier_signatures is used (ie: TaskCluster)"""
    job_data = sample_data.job_data[:1]
    store_push_data(test_repository, sample_push)
    store_job_data(test_repository, job_data)
    job = Job.objects.all().first()
    assert job.tier == 1
コード例 #28
0
def test_ingest_job_default_tier(test_repository, sample_data, sample_push,
                                 failure_classifications, mock_log_parser):
    """Tier is set to 1 by default"""
    job_data = sample_data.job_data[:1]
    store_push_data(test_repository, sample_push)
    store_job_data(test_repository, job_data)
    job = Job.objects.all().first()
    assert job.tier == 1
コード例 #29
0
def test_ingest_buildbot_tier1_job(test_repository, sample_data, sample_push,
                                   failure_classifications, mock_log_parser):
    """Tier is set to 1 if no lower_tier_signatures is used (ie: TaskCluster)"""
    job_data = sample_data.job_data[:1]
    store_push_data(test_repository, sample_push)
    store_job_data(test_repository, job_data)
    job = Job.objects.all().first()
    assert job.tier == 1
コード例 #30
0
def test_store_perf_artifact_multiple(test_repository, failure_classifications,
                                      push_stored):
    PerformanceFramework.objects.get_or_create(name='cheezburger',
                                               enabled=True)
    perfobj = {
        "framework": {
            "name": "cheezburger"
        },
        "suites": [{
            "name":
            "cheezburger metrics",
            "value":
            10.0,
            "subtests": [{
                "name": "test1",
                "value": 20.0
            }, {
                "name": "test2",
                "value": 30.0
            }]
        }]
    }
    perfobj2 = copy.deepcopy(perfobj)
    perfobj2['suites'][0]['name'] = "cheezburger metrics 2"
    tjc = client.TreeherderJobCollection()
    job_guid = 'd22c74d4aa6d2a1dcba96d95dccbd5fdca70cf33'
    tj = client.TreeherderJob({
        'project': test_repository.name,
        'revision': push_stored[0]['revision'],
        'job': {
            'job_guid':
            job_guid,
            'state':
            'completed',
            'project':
            test_repository.name,
            'option_collection': {
                'opt': True
            },
            'artifacts': [{
                'blob': {
                    "performance_data": [perfobj, perfobj2]
                },
                'type': 'json',
                'name': 'performance_data',
                'job_guid': job_guid
            }]
        }
    })

    tjc.add(tj)

    store_job_data(test_repository, tjc.get_collection_data())

    # we'll just validate that we got the expected number of results
    # (we have validation elsewhere for the actual data adapters)
    assert PerformanceSignature.objects.all().count() == 6
    assert PerformanceDatum.objects.all().count() == 6
コード例 #31
0
ファイル: conftest.py プロジェクト: edmorley/treeherder
def running_jobs_stored(
        test_repository, failure_classifications, running_job,
        push_stored):
    """
    stores a list of buildapi running jobs
    """
    running_job.update(push_stored[0])
    running_job.update({'project': test_repository.name})
    store_job_data(test_repository, [running_job])
コード例 #32
0
def test_ingesting_skip_existing(test_repository, failure_classifications,
                                 sample_data, sample_push, mock_log_parser):
    """Remove single existing job prior to loading"""
    job_data = sample_data.job_data[:1]
    test_utils.do_job_ingestion(test_repository, job_data, sample_push)

    store_job_data(test_repository, sample_data.job_data[:2])

    assert Job.objects.count() == 2
コード例 #33
0
ファイル: conftest.py プロジェクト: edmorley/treeherder
def completed_jobs_stored(
        test_repository, failure_classifications, completed_job,
        push_stored):
    """
    stores a list of buildapi completed jobs
    """
    completed_job['revision'] = push_stored[0]['revision']
    completed_job.update({'project': test_repository.name})
    store_job_data(test_repository, [completed_job])
コード例 #34
0
ファイル: conftest.py プロジェクト: edmorley/treeherder
def pending_jobs_stored(
        test_repository, failure_classifications, pending_job,
        push_stored):
    """
    stores a list of buildapi pending jobs into the jobs store
    """
    pending_job.update(push_stored[0])
    pending_job.update({'project': test_repository.name})
    store_job_data(test_repository, [pending_job])
コード例 #35
0
def test_ingesting_skip_existing(test_repository, failure_classifications, sample_data,
                                 sample_push, mock_log_parser):
    """Remove single existing job prior to loading"""
    job_data = sample_data.job_data[:1]
    test_utils.do_job_ingestion(test_repository, job_data, sample_push)

    store_job_data(test_repository, sample_data.job_data[:2])

    assert Job.objects.count() == 2
コード例 #36
0
def test_store_perf_artifact(test_repository, failure_classifications,
                             push_stored):
    PerformanceFramework.objects.get_or_create(name='cheezburger',
                                               enabled=True)

    job_guid = 'd22c74d4aa6d2a1dcba96d95dccbd5fdca70cf33'
    job_data = {
        'project': test_repository.name,
        'revision': push_stored[0]['revision'],
        'job': {
            'job_guid':
            job_guid,
            'state':
            'completed',
            'project':
            test_repository.name,
            'option_collection': {
                'opt': True
            },
            'artifacts': [{
                'blob': {
                    "performance_data": {
                        "framework": {
                            "name": "cheezburger"
                        },
                        "suites": [{
                            "name":
                            "cheezburger metrics",
                            "value":
                            10.0,
                            "subtests": [
                                {
                                    "name": "test1",
                                    "value": 20.0
                                },
                                {
                                    "name": "test2",
                                    "value": 30.0
                                },
                            ],
                        }],
                    }
                },
                'type': 'json',
                'name': 'performance_data',
                'job_guid': job_guid,
            }],
        },
    }

    store_job_data(test_repository, [job_data])

    # we'll just validate that we got the expected number of results
    # (we have validation elsewhere for the actual data adapters)
    assert PerformanceSignature.objects.all().count() == 3
    assert PerformanceDatum.objects.all().count() == 3
コード例 #37
0
def test_bad_date_value_ingestion(test_repository, failure_classifications,
                                  sample_push, mock_log_parser):
    """
    Test ingesting a job blob with bad date value

    """
    blob = job_data(start_timestamp="foo", revision=sample_push[0]['revision'])

    store_push_data(test_repository, sample_push[:1])
    store_job_data(test_repository, [blob])
コード例 #38
0
ファイル: conftest.py プロジェクト: imbstack/treeherder
def retriggered_job(test_job, eleven_job_blobs):
    # a copy of test_job with a different guid, representing a "retrigger"
    from treeherder.model.models import Job
    original = eleven_job_blobs[0]
    retrigger = copy.deepcopy(original)
    retrigger['job']['job_guid'] = "f1c75261017c7c5ce3000931dce4c442fe0a129a"

    store_job_data(test_job.repository, [retrigger])

    return Job.objects.get(guid=retrigger['job']['job_guid'])
コード例 #39
0
ファイル: conftest.py プロジェクト: SebastinSanty/treeherder
def retriggered_job(test_job, eleven_job_blobs):
    # a copy of test_job with a different guid, representing a "retrigger"
    from treeherder.model.models import Job
    original = eleven_job_blobs[0]
    retrigger = copy.deepcopy(original)
    retrigger['job']['job_guid'] = "f1c75261017c7c5ce3000931dce4c442fe0a129a"

    store_job_data(test_job.repository, [retrigger])

    return Job.objects.get(guid=retrigger['job']['job_guid'])
コード例 #40
0
def test_store_job_with_unparsed_log(test_repository, failure_classifications,
                                     push_stored, monkeypatch,
                                     activate_responses):
    """
    test submitting a job with an unparsed log parses the log,
    generates an appropriate set of text log steps, and calls
    get_error_summary (to warm the bug suggestions cache)
    """

    # create a wrapper around get_error_summary that records whether
    # it's been called
    mock_get_error_summary = MagicMock(name='get_error_summary',
                                       wraps=get_error_summary)
    import treeherder.model.error_summary

    monkeypatch.setattr(treeherder.model.error_summary, 'get_error_summary',
                        mock_get_error_summary)
    log_url = add_log_response(
        "mozilla-central-macosx64-debug-bm65-build1-build15.txt.gz")
    errorsummary = add_log_response(
        "mochitest-browser-chrome_errorsummary.log")

    job_guid = 'd22c74d4aa6d2a1dcba96d95dccbd5fdca70cf33'
    job_data = {
        'project': test_repository.name,
        'revision': push_stored[0]['revision'],
        'job': {
            'job_guid':
            job_guid,
            'state':
            'completed',
            'log_references': [
                {
                    'url': log_url,
                    'name': 'live_backing_log',
                    'parse_status': 'pending'
                },
                {
                    'url': errorsummary,
                    'name': 'mochi_errorsummary.log',
                    'parse_status': 'pending'
                },
            ],
        },
    }
    store_job_data(test_repository, [job_data])

    # should have 4 errors
    assert TextLogError.objects.count() == 3
    # verify that get_error_summary was called (to warm the bug suggestions
    # cache)
    assert mock_get_error_summary.called
    # should have 3 error summary lines
    assert len(get_error_summary(Job.objects.get(id=1))) == 3
コード例 #41
0
def test_bad_date_value_ingestion(test_repository, failure_classifications,
                                  sample_push, mock_log_parser):
    """
    Test ingesting a job blob with bad date value

    """
    blob = job_data(start_timestamp="foo",
                    revision=sample_push[0]['revision'])

    store_push_data(test_repository, sample_push[:1])
    store_job_data(test_repository, [blob])
コード例 #42
0
def test_ingest_buildbot_tier2_job(test_repository, sample_data, sample_push,
                                   failure_classifications, mock_log_parser):
    """Tier is set to 2 if it matches the signature object"""
    job_data = sample_data.job_data[:1]
    test_utils.do_job_ingestion(test_repository, job_data, sample_push)
    job = Job.objects.all().first()
    lower_tier_signatures = {job.signature.signature: 2}
    job_data_2 = copy.deepcopy(job_data)
    job_data_2[0]['job']['job_guid'] = "foo"
    store_job_data(test_repository, job_data_2, lower_tier_signatures)
    job2 = Job.objects.get(guid="foo")
    assert job2.tier == 2
コード例 #43
0
ファイル: conftest.py プロジェクト: tomrittervg/treeherder
def completed_jobs_stored(test_repository, failure_classifications,
                          completed_jobs, push_stored):
    """
    stores a list of buildapi completed jobs
    """
    completed_jobs['revision'] = push_stored[0]['revision']
    completed_jobs.update({'project': test_repository.name})

    tjc = TreeherderJobCollection()
    tj = tjc.get_job(completed_jobs)
    tjc.add(tj)

    store_job_data(test_repository, tjc.get_collection_data())
コード例 #44
0
def test_store_job_with_tier(test_repository, failure_classifications, push_stored):
    """test submitting a job with tier specified"""
    job_guid = 'd22c74d4aa6d2a1dcba96d95dccbd5fdca70cf33'
    job_data = {
        'project': test_repository.name,
        'revision': push_stored[0]['revision'],
        'job': {'job_guid': job_guid, 'state': 'completed', 'tier': 3},
    }

    store_job_data(test_repository, [job_data])

    job = Job.objects.get(guid=job_guid)
    assert job.tier == 3
コード例 #45
0
ファイル: conftest.py プロジェクト: tomrittervg/treeherder
def running_jobs_stored(test_repository, failure_classifications, running_jobs,
                        push_stored):
    """
    stores a list of buildapi running jobs
    """
    running_jobs.update(push_stored[0])
    running_jobs.update({'project': test_repository.name})

    tjc = TreeherderJobCollection()
    tj = tjc.get_job(running_jobs)
    tjc.add(tj)

    store_job_data(test_repository, tjc.get_collection_data())
コード例 #46
0
    def create(self, request, project):
        """
        This method adds a job to a given resultset.
        """
        try:
            repository = Repository.objects.get(name=project)
        except ObjectDoesNotExist:
            return Response("No repository with name: {0}".format(project),
                            status=HTTP_404_NOT_FOUND)

        store_job_data(repository, request.data)

        return Response({'message': 'Job successfully updated'})
コード例 #47
0
ファイル: jobs.py プロジェクト: MikeLing/treeherder
    def create(self, request, project):
        """
        This method adds a job to a given push.
        """
        try:
            repository = Repository.objects.get(name=project)
        except ObjectDoesNotExist:
            return Response("No repository with name: {0}".format(project),
                            status=HTTP_404_NOT_FOUND)

        store_job_data(repository, request.data)

        return Response({'message': 'Job successfully updated'})
コード例 #48
0
ファイル: conftest.py プロジェクト: bclary/treeherder
def running_jobs_stored(
        test_repository, failure_classifications, running_jobs,
        push_stored):
    """
    stores a list of buildapi running jobs
    """
    running_jobs.update(push_stored[0])
    running_jobs.update({'project': test_repository.name})

    tjc = TreeherderJobCollection()
    tj = tjc.get_job(running_jobs)
    tjc.add(tj)

    store_job_data(test_repository, tjc.get_collection_data())
コード例 #49
0
ファイル: conftest.py プロジェクト: bclary/treeherder
def completed_jobs_stored(
        test_repository, failure_classifications, completed_jobs,
        push_stored):
    """
    stores a list of buildapi completed jobs
    """
    completed_jobs['revision'] = push_stored[0]['revision']
    completed_jobs.update({'project': test_repository.name})

    tjc = TreeherderJobCollection()
    tj = tjc.get_job(completed_jobs)
    tjc.add(tj)

    store_job_data(test_repository, tjc.get_collection_data())
コード例 #50
0
def test_ingest_buildbot_tier2_job(test_repository, sample_data, sample_push,
                                   failure_classifications, mock_log_parser):
    """Tier is set to 2 if it matches the signature object"""
    job_data = sample_data.job_data[:1]
    test_utils.do_job_ingestion(test_repository, job_data, sample_push)
    job = Job.objects.all().first()
    lower_tier_signatures = {
            job.signature.signature: 2
    }
    job_data_2 = copy.deepcopy(job_data)
    job_data_2[0]['job']['job_guid'] = "foo"
    store_job_data(test_repository, job_data_2, lower_tier_signatures)
    job2 = Job.objects.get(guid="foo")
    assert job2.tier == 2
コード例 #51
0
ファイル: conftest.py プロジェクト: bclary/treeherder
def pending_jobs_stored(
        test_repository, failure_classifications, pending_jobs,
        push_stored):
    """
    stores a list of buildapi pending jobs into the jobs store
    """

    pending_jobs.update(push_stored[0])
    pending_jobs.update({'project': test_repository.name})

    tjc = TreeherderJobCollection()
    tj = tjc.get_job(pending_jobs)
    tjc.add(tj)

    store_job_data(test_repository, tjc.get_collection_data())
コード例 #52
0
def test_store_job_with_text_log_summary_artifact_pending(
        test_repository,
        failure_classifications,
        push_stored,
        monkeypatch,
        text_log_summary_dict,
        ):
    """
    test submitting a job with a log set to pending, but with a text_log_summary.

    This should detect the artifact, not parse, and just mark the log as parsed,
    then generate bug suggestions.
    """

    mock_parse = MagicMock(name="parse_line")
    monkeypatch.setattr(StepParser, 'parse_line', mock_parse)

    job_guid = 'd22c74d4aa6d2a1dcba96d95dccbd5fdca70cf33'
    tjc = client.TreeherderJobCollection()
    tj = client.TreeherderJob({
        'project': test_repository.name,
        'revision': push_stored[0]['revision'],
        'job': {
            'job_guid': job_guid,
            'state': 'completed',
            'log_references': [{
                'url': 'http://ftp.mozilla.org/pub/mozilla.org/spidermonkey/...',
                'name': 'buildbot_text',
                'parse_status': 'pending'
            }],
            'artifacts': [{
                "blob": json.dumps(text_log_summary_dict),
                "type": "json",
                "name": "text_log_summary",
                "job_guid": job_guid
            }]
        }
    })

    tjc.add(tj)

    store_job_data(test_repository, tjc.get_collection_data())

    # should have 4 error summary lines (aka bug suggestions)
    assert len(get_error_summary(Job.objects.get(guid=job_guid))) == 4

    # ensure the parsing didn't happen
    assert mock_parse.called is False
コード例 #53
0
ファイル: job_loader.py プロジェクト: ahal/treeherder-service
    def process_job(self, pulse_job):
        if self._is_valid_job(pulse_job):
            try:
                project = pulse_job["origin"]["project"]
                newrelic.agent.add_custom_parameter("project", project)

                repository = Repository.objects.get(name=project)

                if pulse_job["state"] != "unscheduled":
                    try:
                        self.validate_revision(repository, pulse_job)
                        store_job_data(repository, [self.transform(pulse_job)])
                    except AttributeError:
                        logger.warning("Skipping job due to bad attribute", exc_info=1)
            except Repository.DoesNotExist:
                logger.info("Job with unsupported project: %s", project)
コード例 #54
0
def test_store_job_with_default_tier(test_repository, failure_classifications, push_stored):
    """test submitting a job with no tier specified gets default"""
    job_guid = 'd22c74d4aa6d2a1dcba96d95dccbd5fdca70cf33'
    job_data = {
        'project': test_repository.name,
        'revision': push_stored[0]['revision'],
        'job': {
            'job_guid': job_guid,
            'state': 'completed',
        }
    }

    store_job_data(test_repository, [job_data])

    job = Job.objects.get(guid=job_guid)
    assert job.tier == 1
コード例 #55
0
def test_parse_log(test_repository, failure_classifications, jobs_with_local_log, sample_resultset):
    """
    check that 2 job_artifacts get inserted when running a parse_log task for
    a successful job and that JobDetail objects get created
    """

    store_result_set_data(test_repository, sample_resultset)

    jobs = jobs_with_local_log
    for job in jobs:
        # make this a successful job, to check it's still parsed for errors
        job['job']['result'] = "success"
        job['revision'] = sample_resultset[0]['revision']

    store_job_data(test_repository, jobs)

    # this log generates 4 job detail objects at present
    print JobDetail.objects.count() == 4
コード例 #56
0
def test_store_job_with_text_log_summary_artifact_parsed(
        test_repository,
        failure_classifications,
        push_stored,
        monkeypatch,
        text_log_summary_dict,
        ):
    """
    test submitting a job with a pre-parsed log gets parse_status of
    "parsed" and doesn't parse the log, but we get the expected set of
    text log steps/errors and bug suggestions.
    """

    mock_parse = MagicMock(name="parse_line")
    monkeypatch.setattr(StepParser, 'parse_line', mock_parse)

    job_guid = 'd22c74d4aa6d2a1dcba96d95dccbd5fdca70cf33'
    job_data = {
        'project': test_repository.name,
        'revision': push_stored[0]['revision'],
        'job': {
            'job_guid': job_guid,
            'state': 'completed',
            'log_references': [{
                'url': 'http://ftp.mozilla.org/pub/mozilla.org/spidermonkey/...',
                'name': 'buildbot_text',
                'parse_status': 'parsed'
            }],
            'artifacts': [{
                "blob": json.dumps(text_log_summary_dict),
                "type": "json",
                "name": "text_log_summary",
                "job_guid": job_guid
            }]
        }
    }

    store_job_data(test_repository, [job_data])

    # should have 4 error summary lines (aka bug suggestions)
    assert len(get_error_summary(Job.objects.get(guid=job_guid))) == 4

    # ensure the parsing didn't happen
    assert mock_parse.called is False
コード例 #57
0
def test_store_job_pending_to_completed_with_unparsed_log(test_repository, push_stored,
                                                          failure_classifications,
                                                          activate_responses):

    job_guid = 'd22c74d4aa6d2a1dcba96d95dccbd5fdca70cf33'

    # the first time, submit it as running (with no logs)
    tjc = client.TreeherderJobCollection()
    tj = client.TreeherderJob({
        'project': test_repository.name,
        'revision': push_stored[0]['revision'],
        'job': {
            'job_guid': job_guid,
            'state': 'running'
        }
    })
    tjc.add(tj)
    store_job_data(test_repository, tjc.get_collection_data())
    # should have no text log errors or bug suggestions
    assert TextLogError.objects.count() == 0
    assert get_error_summary(Job.objects.get(guid=job_guid)) == []

    # the second time, post a log that will get parsed
    log_url = add_log_response("mozilla-central-macosx64-debug-bm65-build1-build15.txt.gz")
    tjc = client.TreeherderJobCollection()
    tj = client.TreeherderJob({
        'project': test_repository.name,
        'revision': push_stored[0]['revision'],
        'job': {
            'job_guid': job_guid,
            'state': 'completed',
            'log_references': [{
                'url': log_url,
                'name': 'buildbot_text',
                'parse_status': 'pending'
            }]
        }
    })
    tjc.add(tj)
    store_job_data(test_repository, tjc.get_collection_data())

    # should have a full set of text log errors
    assert TextLogError.objects.count() == 2
    assert len(get_error_summary(Job.objects.get(guid=job_guid))) == 2
コード例 #58
0
def test_store_job_with_default_tier(test_repository, failure_classifications, push_stored):
    """test submitting a job with no tier specified gets default"""

    tjc = client.TreeherderJobCollection()
    job_guid = 'd22c74d4aa6d2a1dcba96d95dccbd5fdca70cf33'
    tj = client.TreeherderJob({
        'project': test_repository.name,
        'revision': push_stored[0]['revision'],
        'job': {
            'job_guid': job_guid,
            'state': 'completed',
        }
    })
    tjc.add(tj)

    store_job_data(test_repository, tjc.get_collection_data())

    job = Job.objects.get(guid=job_guid)
    assert job.tier == 1
コード例 #59
0
ファイル: buildapi.py プロジェクト: SebastinSanty/treeherder
def store_jobs(job_collections, chunk_size):
    errors = []
    for repository_name, jobs in job_collections.iteritems():
        for collection in jobs.get_chunks(chunk_size=chunk_size):
            try:
                repository = Repository.objects.get(
                    name=repository_name)
                collection.validate()
                store_job_data(repository, collection.get_collection_data())
            except Exception:
                newrelic.agent.record_exception()
                errors.append({
                    "project": repository_name,
                    "collection": "job",
                    "message": traceback.format_exc()
                })

    if errors:
        raise common.CollectionNotStoredException(errors)
コード例 #60
0
def test_store_job_with_unparsed_log(test_repository, failure_classifications,
                                     push_stored, monkeypatch, activate_responses):
    """
    test submitting a job with an unparsed log parses the log,
    generates an appropriate set of text log steps, and calls
    get_error_summary (to warm the bug suggestions cache)
    """

    # create a wrapper around get_error_summary that records whether
    # it's been called
    mock_get_error_summary = MagicMock(name='get_error_summary',
                                       wraps=get_error_summary)
    import treeherder.model.error_summary
    monkeypatch.setattr(treeherder.model.error_summary, 'get_error_summary',
                        mock_get_error_summary)
    log_url = add_log_response("mozilla-central-macosx64-debug-bm65-build1-build15.txt.gz")

    tjc = client.TreeherderJobCollection()
    job_guid = 'd22c74d4aa6d2a1dcba96d95dccbd5fdca70cf33'
    tj = client.TreeherderJob({
        'project': test_repository.name,
        'revision': push_stored[0]['revision'],
        'job': {
            'job_guid': job_guid,
            'state': 'completed',
            'log_references': [{
                'url': log_url,
                'name': 'buildbot_text',
                'parse_status': 'pending'
            }]
        }
    })
    tjc.add(tj)
    store_job_data(test_repository, tjc.get_collection_data())

    # should have 2 errors
    assert TextLogError.objects.count() == 2
    # verify that get_error_summary was called (to warm the bug suggestions
    # cache)
    assert mock_get_error_summary.called
    # should have 2 error summary lines (aka bug suggestions)
    assert len(get_error_summary(Job.objects.get(id=1))) == 2