예제 #1
0
def test_store_job_pending_to_completed_with_unparsed_log(
    test_repository, push_stored, failure_classifications, activate_responses
):

    job_guid = 'd22c74d4aa6d2a1dcba96d95dccbd5fdca70cf33'

    # the first time, submit it as running (with no logs)
    job_data = {
        'project': test_repository.name,
        'revision': push_stored[0]['revision'],
        'job': {'job_guid': job_guid, 'state': 'running'},
    }
    store_job_data(test_repository, [job_data])
    # should have no text log errors or bug suggestions
    assert TextLogError.objects.count() == 0
    assert get_error_summary(Job.objects.get(guid=job_guid)) == []

    # the second time, post a log that will get parsed
    log_url = add_log_response("mozilla-central-macosx64-debug-bm65-build1-build15.txt.gz")
    job_data = {
        'project': test_repository.name,
        'revision': push_stored[0]['revision'],
        'job': {
            'job_guid': job_guid,
            'state': 'completed',
            'log_references': [
                {'url': log_url, 'name': 'live_backing_log', 'parse_status': 'pending'}
            ],
        },
    }
    store_job_data(test_repository, [job_data])

    # should have a full set of text log errors
    assert TextLogError.objects.count() == 4
    assert len(get_error_summary(Job.objects.get(guid=job_guid))) == 4
예제 #2
0
def test_create_error_summary(failure_classifications,
                              jobs_with_local_log, sample_resultset,
                              test_repository):
    """
    check that a bug suggestions artifact gets inserted when running
    a parse_log task for a failed job, and that the number of
    bug search terms/suggestions matches the number of error lines.
    """
    store_result_set_data(test_repository, sample_resultset)

    jobs = jobs_with_local_log
    for job in jobs:
        job['job']['result'] = "testfailed"
        job['revision'] = sample_resultset[0]['revision']

    store_job_data(test_repository, jobs)

    bug_suggestions = get_error_summary(Job.objects.get(id=1))

    # we must have one bugs item per error in bug_suggestions.
    # errors with no bug suggestions will just have an empty
    # bugs list
    assert TextLogError.objects.count() == len(bug_suggestions)

    # We really need to add some tests that check the values of each entry
    # in bug_suggestions, but for now this is better than nothing.
    expected_keys = set(["search", "search_terms", "bugs"])
    for failure_line in bug_suggestions:
        assert set(failure_line.keys()) == expected_keys
예제 #3
0
def test_create_error_summary(
    failure_classifications, jobs_with_local_log, sample_push, test_repository
):
    """
    check that a bug suggestions artifact gets inserted when running
    a parse_log task for a failed job, and that the number of
    bug search terms/suggestions matches the number of error lines.
    """
    store_push_data(test_repository, sample_push)

    jobs = jobs_with_local_log
    for job in jobs:
        job['job']['result'] = "testfailed"
        job['revision'] = sample_push[0]['revision']

    store_job_data(test_repository, jobs)

    bug_suggestions = get_error_summary(Job.objects.get(id=1))

    # we must have one bugs item per error in bug_suggestions.
    # errors with no bug suggestions will just have an empty
    # bugs list
    assert TextLogError.objects.count() == len(bug_suggestions)

    # We really need to add some tests that check the values of each entry
    # in bug_suggestions, but for now this is better than nothing.
    expected_keys = set(["search", "path_end", "search_terms", "bugs", "line_number"])
    for failure_line in bug_suggestions:
        assert set(failure_line.keys()) == expected_keys
예제 #4
0
파일: jobs.py 프로젝트: indygreg/treeherder
    def text_log_summary(self, request, project, pk=None):
        """
        Get a list of test failure lines for the job
        """
        try:
            job = Job.objects.get(repository__name=project,
                                  project_specific_id=pk)
        except ObjectDoesNotExist:
            return Response("No job with id: {0}".format(pk), status=HTTP_404_NOT_FOUND)

        summary = TextLogSummary.objects.filter(
            job_guid=job.guid
        ).prefetch_related("lines").all()

        if len(summary) > 1:
            raise ValueError("Got multiple TextLogSummaries for the same job")

        if not summary:
            return Response("No text_log_summary generated for job with id: {0}".format(pk),
                            status=HTTP_404_NOT_FOUND)

        summary = summary[0]

        lines_by_number = {error.line_number: error.line for error in
                           TextLogError.objects.filter(step__job=job)}

        rv = serializers.TextLogSummarySerializer(summary).data
        rv["bug_suggestions"] = get_error_summary(job)

        for line in rv["lines"]:
            line["line"] = lines_by_number[line["line_number"]]

        return Response(rv)
예제 #5
0
def test_post_job_pending_to_completed_with_unparsed_log(
        test_repository, push_stored, failure_classifications,
        activate_responses, mock_post_json):

    job_guid = 'd22c74d4aa6d2a1dcba96d95dccbd5fdca70cf33'

    # the first time, submit it as running (with no logs)
    tjc = client.TreeherderJobCollection()
    tj = client.TreeherderJob({
        'project': test_repository.name,
        'revision': push_stored[0]['revision'],
        'job': {
            'job_guid': job_guid,
            'state': 'running'
        }
    })
    tjc.add(tj)
    post_collection(test_repository.name, tjc)
    # should have no text log errors or bug suggestions
    assert TextLogError.objects.count() == 0
    assert get_error_summary(Job.objects.get(guid=job_guid)) == []

    # the second time, post a log that will get parsed
    log_url = add_log_response(
        "mozilla-central-macosx64-debug-bm65-build1-build15.txt.gz")
    tjc = client.TreeherderJobCollection()
    tj = client.TreeherderJob({
        'project': test_repository.name,
        'revision': push_stored[0]['revision'],
        'job': {
            'job_guid':
            job_guid,
            'state':
            'completed',
            'log_references': [{
                'url': log_url,
                'name': 'buildbot_text',
                'parse_status': 'pending'
            }]
        }
    })
    tjc.add(tj)
    post_collection(test_repository.name, tjc)

    # should have a full set of text log errors
    assert TextLogError.objects.count() == 2
    assert len(get_error_summary(Job.objects.get(guid=job_guid))) == 2
def test_post_job_pending_to_completed_with_unparsed_log(test_repository,
                                                         result_set_stored,
                                                         failure_classifications,
                                                         mock_post_json):

    job_guid = 'd22c74d4aa6d2a1dcba96d95dccbd5fdca70cf33'

    # the first time, submit it as running (with no logs)
    tjc = client.TreeherderJobCollection()
    tj = client.TreeherderJob({
        'project': test_repository.name,
        'revision': result_set_stored[0]['revision'],
        'job': {
            'job_guid': job_guid,
            'state': 'running'
        }
    })
    tjc.add(tj)
    post_collection(test_repository.name, tjc)
    # should have no text log errors or bug suggestions
    assert TextLogError.objects.count() == 0
    assert len(get_error_summary(Job.objects.get(guid=job_guid))) == 0

    # the second time, post a log that will get parsed
    log_url = "file://{0}".format(
        SampleData().get_log_path("mozilla-central-macosx64-debug-bm65-build1-build15.txt.gz"))
    tjc = client.TreeherderJobCollection()
    tj = client.TreeherderJob({
        'project': test_repository.name,
        'revision': result_set_stored[0]['revision'],
        'job': {
            'job_guid': job_guid,
            'state': 'completed',
            'log_references': [{
                'url': log_url,
                'name': 'buildbot_text',
                'parse_status': 'pending'
            }]
        }
    })
    tjc.add(tj)
    post_collection(test_repository.name, tjc)

    # should have a full set of text log errors
    assert TextLogError.objects.count() == 2
    assert len(get_error_summary(Job.objects.get(guid=job_guid))) == 2
예제 #7
0
    def bug_suggestions(self, request, project, pk=None):
        """
        Gets a set of bug suggestions for this job
        """
        try:
            job = Job.objects.get(repository__name=project, id=pk)
        except ObjectDoesNotExist:
            return Response("No job with id: {0}".format(pk), status=HTTP_404_NOT_FOUND)

        return Response(get_error_summary(job))
예제 #8
0
def store_text_log_summary_artifact(job, text_log_summary_artifact):
    """
    Store the contents of the text log summary artifact
    """
    errors = json.loads(text_log_summary_artifact['blob'])['errors']

    with transaction.atomic():
        for error in errors:
            obj, created = TextLogError.objects.get_or_create(
                job=job,
                line_number=error['linenumber'],
                line=astral_filter(error['line']),
            )
            if not created:
                logger.warning('duplicate error lines processed for job %s',
                               job.id)

    # get error summary immediately (to warm the cache)
    error_summary.get_error_summary(job)
예제 #9
0
파일: jobs.py 프로젝트: MikeLing/treeherder
    def bug_suggestions(self, request, project, pk=None):
        """
        Gets a set of bug suggestions for this job
        """
        try:
            job = Job.objects.get(repository__name=project, id=pk)
        except ObjectDoesNotExist:
            return Response("No job with id: {0}".format(pk), status=HTTP_404_NOT_FOUND)

        return Response(get_error_summary(job))
예제 #10
0
def test_store_job_with_unparsed_log(test_repository, failure_classifications,
                                     push_stored, monkeypatch,
                                     activate_responses):
    """
    test submitting a job with an unparsed log parses the log,
    generates an appropriate set of text log steps, and calls
    get_error_summary (to warm the bug suggestions cache)
    """

    # create a wrapper around get_error_summary that records whether
    # it's been called
    mock_get_error_summary = MagicMock(name='get_error_summary',
                                       wraps=get_error_summary)
    import treeherder.model.error_summary

    monkeypatch.setattr(treeherder.model.error_summary, 'get_error_summary',
                        mock_get_error_summary)
    log_url = add_log_response(
        "mozilla-central-macosx64-debug-bm65-build1-build15.txt.gz")
    errorsummary = add_log_response(
        "mochitest-browser-chrome_errorsummary.log")

    job_guid = 'd22c74d4aa6d2a1dcba96d95dccbd5fdca70cf33'
    job_data = {
        'project': test_repository.name,
        'revision': push_stored[0]['revision'],
        'job': {
            'job_guid':
            job_guid,
            'state':
            'completed',
            'log_references': [
                {
                    'url': log_url,
                    'name': 'live_backing_log',
                    'parse_status': 'pending'
                },
                {
                    'url': errorsummary,
                    'name': 'mochi_errorsummary.log',
                    'parse_status': 'pending'
                },
            ],
        },
    }
    store_job_data(test_repository, [job_data])

    # should have 4 errors
    assert TextLogError.objects.count() == 3
    # verify that get_error_summary was called (to warm the bug suggestions
    # cache)
    assert mock_get_error_summary.called
    # should have 3 error summary lines
    assert len(get_error_summary(Job.objects.get(id=1))) == 3
예제 #11
0
def store_text_log_summary_artifact(job, text_log_summary_artifact):
    """
    Store the contents of the text log summary artifact
    """
    step_data = json.loads(text_log_summary_artifact['blob'])['step_data']
    result_map = {v: k for (k, v) in TextLogStep.RESULTS}
    with transaction.atomic():
        for step in step_data['steps']:
            name = step['name'][:TextLogStep._meta.get_field('name').
                                max_length]
            # process start/end times if we have them
            # we currently don't support timezones in treeherder, so
            # just ignore that when importing/updating the bug to avoid
            # a ValueError (though by default the text log summaries
            # we produce should have time expressed in UTC anyway)
            time_kwargs = {}
            for tkey in ('started', 'finished'):
                if step.get(tkey):
                    time_kwargs[tkey] = dateutil.parser.parse(step[tkey],
                                                              ignoretz=True)

            log_step = TextLogStep.objects.create(
                job=job,
                started_line_number=step['started_linenumber'],
                finished_line_number=step['finished_linenumber'],
                name=name,
                result=result_map[step['result']],
                **time_kwargs,
            )

            if step.get('errors'):
                for error in step['errors']:
                    TextLogError.objects.create(
                        job=job,
                        step=log_step,
                        line_number=error['linenumber'],
                        line=astral_filter(error['line']),
                    )

    # get error summary immediately (to warm the cache)
    error_summary.get_error_summary(job)
예제 #12
0
def test_store_job_pending_to_completed_with_unparsed_log(test_repository, push_stored,
                                                          failure_classifications,
                                                          activate_responses):

    job_guid = 'd22c74d4aa6d2a1dcba96d95dccbd5fdca70cf33'

    # the first time, submit it as running (with no logs)
    job_data = {
        'project': test_repository.name,
        'revision': push_stored[0]['revision'],
        'job': {
            'job_guid': job_guid,
            'state': 'running'
        }
    }
    store_job_data(test_repository, [job_data])
    # should have no text log errors or bug suggestions
    assert TextLogError.objects.count() == 0
    assert get_error_summary(Job.objects.get(guid=job_guid)) == []

    # the second time, post a log that will get parsed
    log_url = add_log_response("mozilla-central-macosx64-debug-bm65-build1-build15.txt.gz")
    job_data = {
        'project': test_repository.name,
        'revision': push_stored[0]['revision'],
        'job': {
            'job_guid': job_guid,
            'state': 'completed',
            'log_references': [{
                'url': log_url,
                'name': 'buildbot_text',
                'parse_status': 'pending'
            }]
        }
    }
    store_job_data(test_repository, [job_data])

    # should have a full set of text log errors
    assert TextLogError.objects.count() == 2
    assert len(get_error_summary(Job.objects.get(guid=job_guid))) == 2
예제 #13
0
def test_post_job_with_text_log_summary_artifact_pending(
    test_repository,
    failure_classifications,
    push_stored,
    monkeypatch,
    mock_post_json,
    text_log_summary_dict,
):
    """
    test submitting a job with a log set to pending, but with a text_log_summary.

    This should detect the artifact, not parse, and just mark the log as parsed,
    then generate bug suggestions.
    """

    mock_parse = MagicMock(name="parse_line")
    monkeypatch.setattr(StepParser, 'parse_line', mock_parse)

    job_guid = 'd22c74d4aa6d2a1dcba96d95dccbd5fdca70cf33'
    tjc = client.TreeherderJobCollection()
    tj = client.TreeherderJob({
        'project': test_repository.name,
        'revision': push_stored[0]['revision'],
        'job': {
            'job_guid':
            job_guid,
            'state':
            'completed',
            'log_references': [{
                'url':
                'http://ftp.mozilla.org/pub/mozilla.org/spidermonkey/...',
                'name': 'buildbot_text',
                'parse_status': 'pending'
            }],
            'artifacts': [{
                "blob": json.dumps(text_log_summary_dict),
                "type": "json",
                "name": "text_log_summary",
                "job_guid": job_guid
            }]
        }
    })

    tjc.add(tj)

    post_collection(test_repository.name, tjc)

    # should have 4 error summary lines (aka bug suggestions)
    assert len(get_error_summary(Job.objects.get(guid=job_guid))) == 4

    # ensure the parsing didn't happen
    assert mock_parse.called is False
예제 #14
0
def store_text_log_summary_artifact(job, text_log_summary_artifact):
    """
    Store the contents of the text log summary artifact
    """
    step_data = json.loads(
        text_log_summary_artifact['blob'])['step_data']
    result_map = {v: k for (k, v) in TextLogStep.RESULTS}
    with transaction.atomic():
        for step in step_data['steps']:
            name = step['name'][:TextLogStep._meta.get_field('name').max_length]
            # process start/end times if we have them
            # we currently don't support timezones in treeherder, so
            # just ignore that when importing/updating the bug to avoid
            # a ValueError (though by default the text log summaries
            # we produce should have time expressed in UTC anyway)
            time_kwargs = {}
            for tkey in ('started', 'finished'):
                if step.get(tkey):
                    time_kwargs[tkey] = dateutil.parser.parse(
                        step[tkey], ignoretz=True)

            log_step = TextLogStep.objects.create(
                job=job,
                started_line_number=step['started_linenumber'],
                finished_line_number=step['finished_linenumber'],
                name=name,
                result=result_map[step['result']],
                **time_kwargs)

            if step.get('errors'):
                for error in step['errors']:
                    TextLogError.objects.create(
                        step=log_step,
                        line_number=error['linenumber'],
                        line=astral_filter(error['line']))

    # get error summary immediately (to warm the cache)
    error_summary.get_error_summary(job)
def test_post_job_with_text_log_summary_artifact_parsed(
    test_project,
    monkeypatch,
    result_set_stored,
    mock_post_json,
    text_log_summary_dict,
):
    """
    test submitting a job with a pre-parsed log gets parse_status of
    "parsed" and doesn't parse the log, but we get the expected set of
    text log steps/errors and bug suggestions.
    """

    mock_parse = MagicMock(name="parse_line")
    monkeypatch.setattr(StepParser, 'parse_line', mock_parse)

    job_guid = 'd22c74d4aa6d2a1dcba96d95dccbd5fdca70cf33'
    tjc = client.TreeherderJobCollection()
    tj = client.TreeherderJob({
        'project': test_project,
        'revision': result_set_stored[0]['revision'],
        'job': {
            'job_guid':
            job_guid,
            'state':
            'completed',
            'log_references': [{
                'url':
                'http://ftp.mozilla.org/pub/mozilla.org/spidermonkey/...',
                'name': 'buildbot_text',
                'parse_status': 'parsed'
            }],
            'artifacts': [{
                "blob": json.dumps(text_log_summary_dict),
                "type": "json",
                "name": "text_log_summary",
                "job_guid": job_guid
            }]
        }
    })
    tjc.add(tj)

    post_collection(test_project, tjc)

    # should have 4 error summary lines (aka bug suggestions)
    assert len(get_error_summary(Job.objects.get(guid=job_guid))) == 4

    # ensure the parsing didn't happen
    assert mock_parse.called is False
def test_post_job_with_text_log_summary_artifact_pending(
        test_repository,
        failure_classifications,
        result_set_stored,
        monkeypatch,
        mock_post_json,
        text_log_summary_dict,
        ):
    """
    test submitting a job with a log set to pending, but with a text_log_summary.

    This should detect the artifact, not parse, and just mark the log as parsed,
    then generate bug suggestions.
    """

    mock_parse = MagicMock(name="parse_line")
    monkeypatch.setattr(StepParser, 'parse_line', mock_parse)

    job_guid = 'd22c74d4aa6d2a1dcba96d95dccbd5fdca70cf33'
    tjc = client.TreeherderJobCollection()
    tj = client.TreeherderJob({
        'project': test_repository.name,
        'revision': result_set_stored[0]['revision'],
        'job': {
            'job_guid': job_guid,
            'state': 'completed',
            'log_references': [{
                'url': 'http://ftp.mozilla.org/pub/mozilla.org/spidermonkey/...',
                'name': 'buildbot_text',
                'parse_status': 'pending'
            }],
            'artifacts': [{
                "blob": json.dumps(text_log_summary_dict),
                "type": "json",
                "name": "text_log_summary",
                "job_guid": job_guid
            }]
        }
    })

    tjc.add(tj)

    post_collection(test_repository.name, tjc)

    # should have 4 error summary lines (aka bug suggestions)
    assert len(get_error_summary(Job.objects.get(guid=job_guid))) == 4

    # ensure the parsing didn't happen
    assert mock_parse.called is False
예제 #17
0
def test_post_job_with_unparsed_log(test_repository, failure_classifications,
                                    push_stored, mock_post_json, monkeypatch,
                                    activate_responses):
    """
    test submitting a job with an unparsed log parses the log,
    generates an appropriate set of text log steps, and calls
    get_error_summary (to warm the bug suggestions cache)
    """

    # create a wrapper around get_error_summary that records whether
    # it's been called
    mock_get_error_summary = MagicMock(name='get_error_summary',
                                       wraps=get_error_summary)
    import treeherder.model.error_summary
    monkeypatch.setattr(treeherder.model.error_summary, 'get_error_summary',
                        mock_get_error_summary)
    log_url = add_log_response(
        "mozilla-central-macosx64-debug-bm65-build1-build15.txt.gz")

    tjc = client.TreeherderJobCollection()
    job_guid = 'd22c74d4aa6d2a1dcba96d95dccbd5fdca70cf33'
    tj = client.TreeherderJob({
        'project': test_repository.name,
        'revision': push_stored[0]['revision'],
        'job': {
            'job_guid':
            job_guid,
            'state':
            'completed',
            'log_references': [{
                'url': log_url,
                'name': 'buildbot_text',
                'parse_status': 'pending'
            }]
        }
    })
    tjc.add(tj)
    post_collection(test_repository.name, tjc)

    # should have 2 errors
    assert TextLogError.objects.count() == 2
    # verify that get_error_summary was called (to warm the bug suggestions
    # cache)
    assert mock_get_error_summary.called
    # should have 2 error summary lines (aka bug suggestions)
    assert len(get_error_summary(Job.objects.get(id=1))) == 2
def test_post_job_with_text_log_summary_artifact_parsed(
        test_project,
        monkeypatch,
        result_set_stored,
        mock_post_json,
        text_log_summary_dict,
        ):
    """
    test submitting a job with a pre-parsed log gets parse_status of
    "parsed" and doesn't parse the log, but we get the expected set of
    text log steps/errors and bug suggestions.
    """

    mock_parse = MagicMock(name="parse_line")
    monkeypatch.setattr(StepParser, 'parse_line', mock_parse)

    job_guid = 'd22c74d4aa6d2a1dcba96d95dccbd5fdca70cf33'
    tjc = client.TreeherderJobCollection()
    tj = client.TreeherderJob({
        'project': test_project,
        'revision': result_set_stored[0]['revision'],
        'job': {
            'job_guid': job_guid,
            'state': 'completed',
            'log_references': [{
                'url': 'http://ftp.mozilla.org/pub/mozilla.org/spidermonkey/...',
                'name': 'buildbot_text',
                'parse_status': 'parsed'
            }],
            'artifacts': [{
                "blob": json.dumps(text_log_summary_dict),
                "type": "json",
                "name": "text_log_summary",
                "job_guid": job_guid
            }]
        }
    })
    tjc.add(tj)

    post_collection(test_project, tjc)

    # should have 4 error summary lines (aka bug suggestions)
    assert len(get_error_summary(Job.objects.get(guid=job_guid))) == 4

    # ensure the parsing didn't happen
    assert mock_parse.called is False
def test_post_job_with_unparsed_log(test_repository, failure_classifications,
                                    result_set_stored, mock_post_json,
                                    monkeypatch):
    """
    test submitting a job with an unparsed log parses the log,
    generates an appropriate set of text log steps, and calls
    get_error_summary (to warm the bug suggestions cache)
    """

    # create a wrapper around get_error_summary that records whether
    # it's been called
    mock_get_error_summary = MagicMock(name='get_error_summary',
                                       wraps=get_error_summary)
    import treeherder.model.error_summary
    monkeypatch.setattr(treeherder.model.error_summary, 'get_error_summary',
                        mock_get_error_summary)
    log_url = "file://{0}".format(
        SampleData().get_log_path("mozilla-central-macosx64-debug-bm65-build1-build15.txt.gz"))

    tjc = client.TreeherderJobCollection()
    job_guid = 'd22c74d4aa6d2a1dcba96d95dccbd5fdca70cf33'
    tj = client.TreeherderJob({
        'project': test_repository.name,
        'revision': result_set_stored[0]['revision'],
        'job': {
            'job_guid': job_guid,
            'state': 'completed',
            'log_references': [{
                'url': log_url,
                'name': 'buildbot_text',
                'parse_status': 'pending'
            }]
        }
    })
    tjc.add(tj)
    post_collection(test_repository.name, tjc)

    # should have 2 errors
    assert TextLogError.objects.count() == 2
    # verify that get_error_summary was called (to warm the bug suggestions
    # cache)
    assert mock_get_error_summary.called
    # should have 2 error summary lines (aka bug suggestions)
    assert len(get_error_summary(Job.objects.get(id=1))) == 2
예제 #20
0
def test_create_error_summary(jm, jobs_with_local_log, sample_resultset,
                              test_repository):
    """
    check that a bug suggestions artifact gets inserted when running
    a parse_log task for a failed job, and that the number of
    bug search terms/suggestions matches the number of error lines.
    """
    jm.store_result_set_data(sample_resultset)

    jobs = jobs_with_local_log
    for job in jobs:
        job['job']['result'] = "testfailed"
        job['revision'] = sample_resultset[0]['revision']

    jm.store_job_data(jobs)

    job_id = jm.get_dhub().execute(
        proc="jobs_test.selects.row_by_guid",
        placeholders=[jobs[0]['job']['job_guid']]
    )[0]['id']

    job_artifacts = jm.get_dhub().execute(
        proc="jobs_test.selects.job_artifact",
        placeholders=[job_id]
    )

    bug_suggestions = get_error_summary(Job.objects.get(id=1))

    # should no longer have any bug suggestions artifacts
    assert len(job_artifacts) == 0

    # we must have one bugs item per error in bug_suggestions.
    # errors with no bug suggestions will just have an empty
    # bugs list
    assert TextLogError.objects.count() == len(bug_suggestions)

    # We really need to add some tests that check the values of each entry
    # in bug_suggestions, but for now this is better than nothing.
    expected_keys = set(["search", "search_terms", "bugs"])
    for failure_line in bug_suggestions:
        assert set(failure_line.keys()) == expected_keys
예제 #21
0
def test_create_error_summary(jm, jobs_with_local_log, sample_resultset,
                              test_repository):
    """
    check that a bug suggestions artifact gets inserted when running
    a parse_log task for a failed job, and that the number of
    bug search terms/suggestions matches the number of error lines.
    """
    jm.store_result_set_data(sample_resultset)

    jobs = jobs_with_local_log
    for job in jobs:
        job['job']['result'] = "testfailed"
        job['revision'] = sample_resultset[0]['revision']

    jm.store_job_data(jobs)

    job_id = jm.get_dhub().execute(proc="jobs_test.selects.row_by_guid",
                                   placeholders=[jobs[0]['job']['job_guid']
                                                 ])[0]['id']

    job_artifacts = jm.get_dhub().execute(
        proc="jobs_test.selects.job_artifact", placeholders=[job_id])

    bug_suggestions = get_error_summary(Job.objects.get(id=1))

    # should no longer have any bug suggestions artifacts
    assert len(job_artifacts) == 0

    # we must have one bugs item per error in bug_suggestions.
    # errors with no bug suggestions will just have an empty
    # bugs list
    assert TextLogError.objects.count() == len(bug_suggestions)

    # We really need to add some tests that check the values of each entry
    # in bug_suggestions, but for now this is better than nothing.
    expected_keys = set(["search", "search_terms", "bugs"])
    for failure_line in bug_suggestions:
        assert set(failure_line.keys()) == expected_keys
예제 #22
0
파일: jobs.py 프로젝트: rugby110/treeherder
    def text_log_summary(self, request, project, pk=None):
        """
        Get a list of test failure lines for the job
        """
        try:
            job = Job.objects.get(repository__name=project,
                                  project_specific_id=pk)
        except ObjectDoesNotExist:
            return Response("No job with id: {0}".format(pk),
                            status=HTTP_404_NOT_FOUND)

        summary = TextLogSummary.objects.filter(
            job_guid=job.guid).prefetch_related("lines").all()

        if len(summary) > 1:
            raise ValueError("Got multiple TextLogSummaries for the same job")

        if not summary:
            return Response(
                "No text_log_summary generated for job with id: {0}".format(
                    pk),
                status=HTTP_404_NOT_FOUND)

        summary = summary[0]

        lines_by_number = {
            error.line_number: error.line
            for error in TextLogError.objects.filter(step__job=job)
        }

        rv = serializers.TextLogSummarySerializer(summary).data
        rv["bug_suggestions"] = get_error_summary(job)

        for line in rv["lines"]:
            line["line"] = lines_by_number[line["line_number"]]

        return Response(rv)
예제 #23
0
def test_store_job_artifacts_by_add_artifact(
        test_repository,
        failure_classifications,
        push_stored,
        monkeypatch,
        ):
    """
    test submitting a job with artifacts added by ``add_artifact``

    This has pre-parsed logs.  Verify parse_status of "parsed" and that it
    doesn't parse the logs.

    Submitted ``text_log_artifact`` should still trigger generation of the
    bug suggestions.
    """

    mock_parse = MagicMock(name="parse_line")
    monkeypatch.setattr(StepParser, 'parse_line', mock_parse)

    tls_blob = json.dumps({
        "logurl": "https://autophone-dev.s3.amazonaws.com/pub/mozilla.org/mobile/tinderbox-builds/mozilla-inbound-android-api-9/1432676531/en-US/autophone-autophone-s1s2-s1s2-nytimes-local.ini-1-nexus-one-1.log",
        "step_data": {
            "steps": [{
                "name": "foobar",
                "result": "testfailed",
                "started_linenumber": 1,
                "finished_linenumber": 100000,
                "started": "2016-07-13 16:09:31",
                "finished": "2016-07-13 16:09:31",
                "errors": [
                    {"line": "TEST_UNEXPECTED_FAIL | /sdcard/tests/autophone/s1s2test/nytimes.com/index.html | Failed to get uncached measurement.", "linenumber": 64435}
                ]
            }]
        }
    })

    ji_blob = json.dumps({"job_details": [{"title": "mytitle",
                                           "value": "myvalue"}]})
    pb_blob = json.dumps({"build_url": "feh", "chunk": 1, "config_file": "mah"})

    job_guid = 'd22c74d4aa6d2a1dcba96d95dccbd5fdca70cf33'
    job_data = {
        'project': test_repository.name,
        'revision': push_stored[0]['revision'],
        "job": {
            "artifacts": [
                {
                    'name': 'text_log_summary',
                    'type': 'json',
                    'blob': tls_blob,
                    'job_guid': job_guid,
                },
                {
                    'name': 'Job Info',
                    'type': 'json',
                    'blob': ji_blob,
                    'job_guid': job_guid,
                },
                {
                    'name': 'privatebuild',
                    'type': 'json',
                    'blob': pb_blob,
                    'job_guid': job_guid,
                },
            ],
            "job_guid": job_guid,
            "log_references": [
                {
                    "name": "autophone-nexus-one-1.log",
                    "parse_status": "parsed",
                    "url": "https://autophone-dev.s3.amazonaws.com/pub/mozilla.org/mobile/tinderbox-builds/mozilla-inbound-android-api-9/1432676531/en-US/autophone-autophone-s1s2-s1s2-nytimes-local.ini-1-nexus-one-1.log"
                }
            ],
            "state": "completed",
        },
    }

    store_job_data(test_repository, [job_data])

    assert JobDetail.objects.count() == 1
    assert model_to_dict(JobDetail.objects.get(job__guid=job_guid)) == {
        'id': 1,
        'job': 1,
        'title': 'mytitle',
        'value': 'myvalue',
        'url': None
    }

    assert TextLogStep.objects.count() == 1
    assert model_to_dict(TextLogStep.objects.get(job__guid=job_guid)) == {
        'id': 1,
        'job': 1,
        'started': datetime.datetime(2016, 7, 13, 16, 9, 31),
        'finished': datetime.datetime(2016, 7, 13, 16, 9, 31),
        'name': 'foobar',
        'result': 1,
        'started_line_number': 1,
        'finished_line_number': 100000
    }

    assert TextLogError.objects.count() == 1
    text_log_error = TextLogError.objects.get(step__job__guid=job_guid)
    assert model_to_dict(text_log_error) == {
        'id': 1,
        'line': 'TEST_UNEXPECTED_FAIL | /sdcard/tests/autophone/s1s2test/nytimes.com/index.html | Failed to get uncached measurement.',
        'line_number': 64435,
        'step': 1,
    }

    # assert that some bug suggestions got generated
    assert len(get_error_summary(Job.objects.get(guid=job_guid))) == 1

    check_job_log(test_repository, job_guid, JobLog.PARSED)

    # ensure the parsing didn't happen
    assert mock_parse.called is False
def test_post_job_artifacts_by_add_artifact(
    test_project,
    monkeypatch,
    result_set_stored,
    mock_post_json,
):
    """
    test submitting a job with artifacts added by ``add_artifact``

    This has pre-parsed logs.  Verify parse_status of "parsed" and that it
    doesn't parse the logs.

    Submitted ``text_log_artifact`` should still trigger generation of the
    bug suggestions.
    """

    mock_parse = MagicMock(name="parse_line")
    monkeypatch.setattr(StepParser, 'parse_line', mock_parse)

    job_guid = 'd22c74d4aa6d2a1dcba96d95dccbd5fdca70cf33'
    tjc = client.TreeherderJobCollection()
    tj = client.TreeherderJob({
        'project': test_project,
        'revision': result_set_stored[0]['revision'],
        "job": {
            "artifacts": [],
            "job_guid":
            job_guid,
            "log_references": [{
                "name":
                "autophone-nexus-one-1.log",
                "parse_status":
                "parsed",
                "url":
                "https://autophone-dev.s3.amazonaws.com/pub/mozilla.org/mobile/tinderbox-builds/mozilla-inbound-android-api-9/1432676531/en-US/autophone-autophone-s1s2-s1s2-nytimes-local.ini-1-nexus-one-1.log"
            }],
            "state":
            "completed",
        },
    })

    tls_blob = json.dumps({
        "logurl":
        "https://autophone-dev.s3.amazonaws.com/pub/mozilla.org/mobile/tinderbox-builds/mozilla-inbound-android-api-9/1432676531/en-US/autophone-autophone-s1s2-s1s2-nytimes-local.ini-1-nexus-one-1.log",
        "step_data": {
            "steps": [{
                "name":
                "foobar",
                "result":
                "testfailed",
                "started_linenumber":
                1,
                "finished_linenumber":
                100000,
                "started":
                "2016-07-13 16:09:31",
                "finished":
                "2016-07-13 16:09:31",
                "errors": [{
                    "line":
                    "TEST_UNEXPECTED_FAIL | /sdcard/tests/autophone/s1s2test/nytimes.com/index.html | Failed to get uncached measurement.",
                    "linenumber": 64435
                }]
            }]
        }
    })

    ji_blob = json.dumps(
        {"job_details": [{
            "title": "mytitle",
            "value": "myvalue"
        }]})
    bapi_blob = json.dumps({"buildername": "merd"})
    pb_blob = json.dumps({
        "build_url": "feh",
        "chunk": 1,
        "config_file": "mah"
    })

    tj.add_artifact("text_log_summary", "json", tls_blob)
    tj.add_artifact("Job Info", "json", ji_blob)
    tj.add_artifact("buildapi", "json", bapi_blob)
    tj.add_artifact("privatebuild", "json", pb_blob)

    tjc.add(tj)

    post_collection(test_project, tjc)

    assert JobDetail.objects.count() == 1
    assert model_to_dict(JobDetail.objects.get(job__guid=job_guid)) == {
        'id': 1,
        'job': 1,
        'title': 'mytitle',
        'value': 'myvalue',
        'url': None
    }

    assert TextLogStep.objects.count() == 1
    assert model_to_dict(TextLogStep.objects.get(job__guid=job_guid)) == {
        'id': 1,
        'job': 1,
        'started': datetime.datetime(2016, 7, 13, 16, 9, 31),
        'finished': datetime.datetime(2016, 7, 13, 16, 9, 31),
        'name': 'foobar',
        'result': 1,
        'started_line_number': 1,
        'finished_line_number': 100000
    }

    assert TextLogError.objects.count() == 1
    assert model_to_dict(
        TextLogError.objects.get(step__job__guid=job_guid)
    ) == {
        'id': 1,
        'line':
        'TEST_UNEXPECTED_FAIL | /sdcard/tests/autophone/s1s2test/nytimes.com/index.html | Failed to get uncached measurement.',
        'line_number': 64435,
        'step': 1,
        'failure_line': None,
        'best_classification': None,
        'best_is_verified': False
    }

    # assert that some bug suggestions got generated
    assert len(get_error_summary(Job.objects.get(guid=job_guid))) == 1

    check_job_log(test_project, job_guid, JobLog.PARSED)

    # ensure the parsing didn't happen
    assert mock_parse.called is False