Ejemplo n.º 1
0
def test_store_job_with_unparsed_log(test_repository, failure_classifications,
                                     push_stored, monkeypatch,
                                     activate_responses):
    """
    test submitting a job with an unparsed log parses the log,
    generates an appropriate set of text log steps, and calls
    get_error_summary (to warm the bug suggestions cache)
    """

    # create a wrapper around get_error_summary that records whether
    # it's been called
    mock_get_error_summary = MagicMock(name='get_error_summary',
                                       wraps=get_error_summary)
    import treeherder.model.error_summary

    monkeypatch.setattr(treeherder.model.error_summary, 'get_error_summary',
                        mock_get_error_summary)
    log_url = add_log_response(
        "mozilla-central-macosx64-debug-bm65-build1-build15.txt.gz")
    errorsummary = add_log_response(
        "mochitest-browser-chrome_errorsummary.log")

    job_guid = 'd22c74d4aa6d2a1dcba96d95dccbd5fdca70cf33'
    job_data = {
        'project': test_repository.name,
        'revision': push_stored[0]['revision'],
        'job': {
            'job_guid':
            job_guid,
            'state':
            'completed',
            'log_references': [
                {
                    'url': log_url,
                    'name': 'live_backing_log',
                    'parse_status': 'pending'
                },
                {
                    'url': errorsummary,
                    'name': 'mochi_errorsummary.log',
                    'parse_status': 'pending'
                },
            ],
        },
    }
    store_job_data(test_repository, [job_data])

    # should have 4 errors
    assert TextLogError.objects.count() == 3
    # verify that get_error_summary was called (to warm the bug suggestions
    # cache)
    assert mock_get_error_summary.called
    # should have 3 error summary lines
    assert len(get_error_summary(Job.objects.get(id=1))) == 3
def do_test(log):
    """
    Test a single log with the ``JobArtifactBuilder``.

    ``log`` - the url prefix of the log to test.  Also searches for the
              result file with the same prefix.
    """

    url = add_log_response("{}.txt.gz".format(log))

    builder = BuildbotJobArtifactBuilder(url)
    lpc = ArtifactBuilderCollection(url, builders=builder)
    lpc.parse()
    act = lpc.artifacts[builder.name]
    exp = test_utils.load_exp("{0}.jobartifact.json".format(log))

    # :: Uncomment to create the ``exp`` files, if you're making a lot of them
    # import json
    # from tests.sampledata import SampleData
    # with open(SampleData().get_log_path("{0}.jobartifact.json".format(log)), "w") as f:
    #     f.write(json.dumps(act, indent=4))

    # assert act == exp, diff(exp, act)

    # if you want to gather results for a new test, use this
    assert len(act) == len(exp)
    for index, artifact in act.items():
        assert artifact == exp[index]
Ejemplo n.º 3
0
def test_store_job_pending_to_completed_with_unparsed_log(
    test_repository, push_stored, failure_classifications, activate_responses
):

    job_guid = 'd22c74d4aa6d2a1dcba96d95dccbd5fdca70cf33'

    # the first time, submit it as running (with no logs)
    job_data = {
        'project': test_repository.name,
        'revision': push_stored[0]['revision'],
        'job': {'job_guid': job_guid, 'state': 'running'},
    }
    store_job_data(test_repository, [job_data])
    # should have no text log errors or bug suggestions
    assert TextLogError.objects.count() == 0
    assert get_error_summary(Job.objects.get(guid=job_guid)) == []

    # the second time, post a log that will get parsed
    log_url = add_log_response("mozilla-central-macosx64-debug-bm65-build1-build15.txt.gz")
    job_data = {
        'project': test_repository.name,
        'revision': push_stored[0]['revision'],
        'job': {
            'job_guid': job_guid,
            'state': 'completed',
            'log_references': [
                {'url': log_url, 'name': 'live_backing_log', 'parse_status': 'pending'}
            ],
        },
    }
    store_job_data(test_repository, [job_data])

    # should have a full set of text log errors
    assert TextLogError.objects.count() == 4
    assert len(get_error_summary(Job.objects.get(guid=job_guid))) == 4
def test_all_builders_complete():
    """test when parse.complete is true creates correct structure"""
    url = add_log_response(
        "mozilla-central_fedora-b2g_test-crashtest-1-bm54-tests1-linux-build50.txt.gz"
    )
    lpc = ArtifactBuilderCollection(url)
    for builder in lpc.builders:
        builder.parser.complete = True

    lpc.parse()
    exp = {
        "text_log_summary": {
            "step_data": {
                "steps": [],
                "errors_truncated": False
            },
            "logurl": url,
        },
        "Job Info": {
            "job_details": [],
            "logurl": url,
        }
    }

    assert exp == lpc.artifacts
def test_all_builders_complete():
    """test when parse.complete is true creates correct structure"""
    url = add_log_response(
        "mozilla-central_fedora-b2g_test-crashtest-1-bm54-tests1-linux-build50.txt.gz"
    )
    lpc = ArtifactBuilderCollection(url)
    for builder in lpc.builders:
        builder.parser.complete = True

    lpc.parse()
    exp = {
        "text_log_summary": {
            "step_data": {
                "steps": [],
                "errors_truncated": False
            },
            "logurl": url,
        },
        "Job Info": {
            "job_details": [],
            "logurl": url,
        }
    }

    assert exp == lpc.artifacts
Ejemplo n.º 6
0
def do_test(log):
    """
    Test a single log with the ``JobArtifactBuilder``.

    ``log`` - the url prefix of the log to test.  Also searches for the
              result file with the same prefix.
    """

    url = add_log_response("{}.txt.gz".format(log))

    builder = BuildbotJobArtifactBuilder(url)
    lpc = ArtifactBuilderCollection(url, builders=builder)
    lpc.parse()
    act = lpc.artifacts[builder.name]
    exp = test_utils.load_exp("{0}.jobartifact.json".format(log))

    # :: Uncomment to create the ``exp`` files, if you're making a lot of them
    # import json
    # from tests.sampledata import SampleData
    # with open(SampleData().get_log_path("{0}.jobartifact.json".format(log)), "w") as f:
    #     f.write(json.dumps(act, indent=4))

    # assert act == exp, diff(exp, act)

    # if you want to gather results for a new test, use this
    assert len(act) == len(exp)
    for index, artifact in act.items():
        assert artifact == exp[index]
Ejemplo n.º 7
0
def jobs_with_local_log(activate_responses):
    sample_data = SampleData()
    url = add_log_response(
        "mozilla-inbound_ubuntu64_vm-debug_test-mochitest-other-bm53-tests1-linux-build122.txt.gz"
    )

    job = sample_data.job_data[0]

    # substitute the log url with a local url
    job['job']['log_references'][0]['url'] = url
    return [job]
Ejemplo n.º 8
0
def jobs_with_local_log(activate_responses):
    sample_data = SampleData()
    url = add_log_response(
        "mozilla-inbound_ubuntu64_vm-debug_test-mochitest-other-bm53-tests1-linux-build122.txt.gz"
    )

    job = sample_data.job_data[0]

    # substitute the log url with a local url
    job['job']['log_references'][0]['url'] = url
    return [job]
Ejemplo n.º 9
0
def test_post_job_pending_to_completed_with_unparsed_log(
        test_repository, push_stored, failure_classifications,
        activate_responses, mock_post_json):

    job_guid = 'd22c74d4aa6d2a1dcba96d95dccbd5fdca70cf33'

    # the first time, submit it as running (with no logs)
    tjc = client.TreeherderJobCollection()
    tj = client.TreeherderJob({
        'project': test_repository.name,
        'revision': push_stored[0]['revision'],
        'job': {
            'job_guid': job_guid,
            'state': 'running'
        }
    })
    tjc.add(tj)
    post_collection(test_repository.name, tjc)
    # should have no text log errors or bug suggestions
    assert TextLogError.objects.count() == 0
    assert get_error_summary(Job.objects.get(guid=job_guid)) == []

    # the second time, post a log that will get parsed
    log_url = add_log_response(
        "mozilla-central-macosx64-debug-bm65-build1-build15.txt.gz")
    tjc = client.TreeherderJobCollection()
    tj = client.TreeherderJob({
        'project': test_repository.name,
        'revision': push_stored[0]['revision'],
        'job': {
            'job_guid':
            job_guid,
            'state':
            'completed',
            'log_references': [{
                'url': log_url,
                'name': 'buildbot_text',
                'parse_status': 'pending'
            }]
        }
    })
    tjc.add(tj)
    post_collection(test_repository.name, tjc)

    # should have a full set of text log errors
    assert TextLogError.objects.count() == 2
    assert len(get_error_summary(Job.objects.get(guid=job_guid))) == 2
Ejemplo n.º 10
0
def test_post_job_with_unparsed_log(test_repository, failure_classifications,
                                    push_stored, mock_post_json, monkeypatch,
                                    activate_responses):
    """
    test submitting a job with an unparsed log parses the log,
    generates an appropriate set of text log steps, and calls
    get_error_summary (to warm the bug suggestions cache)
    """

    # create a wrapper around get_error_summary that records whether
    # it's been called
    mock_get_error_summary = MagicMock(name='get_error_summary',
                                       wraps=get_error_summary)
    import treeherder.model.error_summary
    monkeypatch.setattr(treeherder.model.error_summary, 'get_error_summary',
                        mock_get_error_summary)
    log_url = add_log_response(
        "mozilla-central-macosx64-debug-bm65-build1-build15.txt.gz")

    tjc = client.TreeherderJobCollection()
    job_guid = 'd22c74d4aa6d2a1dcba96d95dccbd5fdca70cf33'
    tj = client.TreeherderJob({
        'project': test_repository.name,
        'revision': push_stored[0]['revision'],
        'job': {
            'job_guid':
            job_guid,
            'state':
            'completed',
            'log_references': [{
                'url': log_url,
                'name': 'buildbot_text',
                'parse_status': 'pending'
            }]
        }
    })
    tjc.add(tj)
    post_collection(test_repository.name, tjc)

    # should have 2 errors
    assert TextLogError.objects.count() == 2
    # verify that get_error_summary was called (to warm the bug suggestions
    # cache)
    assert mock_get_error_summary.called
    # should have 2 error summary lines (aka bug suggestions)
    assert len(get_error_summary(Job.objects.get(id=1))) == 2
def test_post_job_pending_to_completed_with_unparsed_log(test_repository,
                                                         push_stored,
                                                         failure_classifications,
                                                         activate_responses,
                                                         mock_post_json):

    job_guid = 'd22c74d4aa6d2a1dcba96d95dccbd5fdca70cf33'

    # the first time, submit it as running (with no logs)
    tjc = client.TreeherderJobCollection()
    tj = client.TreeherderJob({
        'project': test_repository.name,
        'revision': push_stored[0]['revision'],
        'job': {
            'job_guid': job_guid,
            'state': 'running'
        }
    })
    tjc.add(tj)
    post_collection(test_repository.name, tjc)
    # should have no text log errors or bug suggestions
    assert TextLogError.objects.count() == 0
    assert len(get_error_summary(Job.objects.get(guid=job_guid))) == 0

    # the second time, post a log that will get parsed
    log_url = add_log_response("mozilla-central-macosx64-debug-bm65-build1-build15.txt.gz")
    tjc = client.TreeherderJobCollection()
    tj = client.TreeherderJob({
        'project': test_repository.name,
        'revision': push_stored[0]['revision'],
        'job': {
            'job_guid': job_guid,
            'state': 'completed',
            'log_references': [{
                'url': log_url,
                'name': 'buildbot_text',
                'parse_status': 'pending'
            }]
        }
    })
    tjc.add(tj)
    post_collection(test_repository.name, tjc)

    # should have a full set of text log errors
    assert TextLogError.objects.count() == 2
    assert len(get_error_summary(Job.objects.get(guid=job_guid))) == 2
def test_post_job_with_unparsed_log(test_repository, failure_classifications,
                                    push_stored, mock_post_json,
                                    monkeypatch, activate_responses):
    """
    test submitting a job with an unparsed log parses the log,
    generates an appropriate set of text log steps, and calls
    get_error_summary (to warm the bug suggestions cache)
    """

    # create a wrapper around get_error_summary that records whether
    # it's been called
    mock_get_error_summary = MagicMock(name='get_error_summary',
                                       wraps=get_error_summary)
    import treeherder.model.error_summary
    monkeypatch.setattr(treeherder.model.error_summary, 'get_error_summary',
                        mock_get_error_summary)
    log_url = add_log_response("mozilla-central-macosx64-debug-bm65-build1-build15.txt.gz")

    tjc = client.TreeherderJobCollection()
    job_guid = 'd22c74d4aa6d2a1dcba96d95dccbd5fdca70cf33'
    tj = client.TreeherderJob({
        'project': test_repository.name,
        'revision': push_stored[0]['revision'],
        'job': {
            'job_guid': job_guid,
            'state': 'completed',
            'log_references': [{
                'url': log_url,
                'name': 'buildbot_text',
                'parse_status': 'pending'
            }]
        }
    })
    tjc.add(tj)
    post_collection(test_repository.name, tjc)

    # should have 2 errors
    assert TextLogError.objects.count() == 2
    # verify that get_error_summary was called (to warm the bug suggestions
    # cache)
    assert mock_get_error_summary.called
    # should have 2 error summary lines (aka bug suggestions)
    assert len(get_error_summary(Job.objects.get(id=1))) == 2
def test_performance_log_parsing():
    """
    Validate that we can parse a generic performance artifact
    """

    # first two have only one artifact, second has two artifacts
    for (logfile, num_perf_artifacts) in [
            ('mozilla-inbound-android-api-11-debug-bm91-build1-build1317.txt.gz', 1),
            ('try_ubuntu64_hw_test-chromez-bm103-tests1-linux-build1429.txt.gz', 1),
            ('mozilla-inbound-linux64-bm72-build1-build225.txt.gz', 2)]:
        url = add_log_response(logfile)

        builder = BuildbotPerformanceDataArtifactBuilder(url=url)
        lpc = ArtifactBuilderCollection(url, builders=[builder])
        lpc.parse()
        act = lpc.artifacts[builder.name]
        assert len(act['performance_data']) == num_perf_artifacts
        for perfherder_artifact in act['performance_data']:
            validate(perfherder_artifact, PERFHERDER_SCHEMA)
def test_performance_log_parsing():
    """
    Validate that we can parse a generic performance artifact
    """

    # first two have only one artifact, second has two artifacts
    for (logfile, num_perf_artifacts) in [
            ('mozilla-inbound-android-api-11-debug-bm91-build1-build1317.txt.gz', 1),
            ('try_ubuntu64_hw_test-chromez-bm103-tests1-linux-build1429.txt.gz', 1),
            ('mozilla-inbound-linux64-bm72-build1-build225.txt.gz', 2)]:
        url = add_log_response(logfile)

        builder = BuildbotPerformanceDataArtifactBuilder(url=url)
        lpc = ArtifactBuilderCollection(url, builders=[builder])
        lpc.parse()
        act = lpc.artifacts[builder.name]
        assert len(act['performance_data']) == num_perf_artifacts
        for perfherder_artifact in act['performance_data']:
            validate(perfherder_artifact, PERFHERDER_SCHEMA)
def do_test(log):
    """
    Test a single log.

    ``log`` - the url prefix of the log to test.  Also searches for the
              result file with the same prefix.
    """

    url = add_log_response("{}.txt.gz".format(log))

    builder = BuildbotLogViewArtifactBuilder(url)
    lpc = ArtifactBuilderCollection(url, builders=builder)
    lpc.parse()
    act = lpc.artifacts[builder.name]
    exp = test_utils.load_exp("{0}.logview.json".format(log))

    # :: Uncomment to create the ``exp`` files, if you're making a lot of them
    # import json
    # from tests.sampledata import SampleData
    # with open(SampleData().get_log_path("{0}.logview.json".format(log)), "w") as f:
    #     f.write(json.dumps(act, indent=2))

    assert act == exp  # , diff(exp, act)
def do_test(log):
    """
    Test a single log.

    ``log`` - the url prefix of the log to test.  Also searches for the
              result file with the same prefix.
    """

    url = add_log_response("{}.txt.gz".format(log))

    builder = BuildbotLogViewArtifactBuilder(url)
    lpc = ArtifactBuilderCollection(url, builders=builder)
    lpc.parse()
    act = lpc.artifacts[builder.name]
    exp = test_utils.load_exp("{0}.logview.json".format(log))

    # :: Uncomment to create the ``exp`` files, if you're making a lot of them
    # import json
    # from tests.sampledata import SampleData
    # with open(SampleData().get_log_path("{0}.logview.json".format(log)), "w") as f:
    #     f.write(json.dumps(act, indent=4))

    assert act == exp  # , diff(exp, act)