def test_same_signature_multiple_performance_frameworks(
        test_repository, perf_job):
    framework_names = ["some-perf-framework", "another-perf-framework"]
    for framework_name in framework_names:
        PerformanceFramework.objects.create(name=framework_name, enabled=True)
        datum = sample_perf_datum(framework_name)

        # the perf data adapter expects deserialized performance data
        submit_datum = copy.copy(datum)
        submit_datum['blob'] = json.dumps(
            {'performance_data': submit_datum['blob']})

        store_performance_artifact(perf_job, submit_datum)

    # we should have 2 performance signature objects, one for each framework
    # and one datum for each signature
    for framework_name in framework_names:
        s = PerformanceSignature.objects.get(
            framework__name=framework_name,
            repository=test_repository,
            suite="some-perf-suite",
            test="some-perf-test",
        )
        d = PerformanceDatum.objects.get(signature=s)
        assert d.value == 20.0
def _generate_perf_data_range(test_repository,
                              generic_reference_data,
                              create_perf_framework=True,
                              enable_framework=True,
                              add_suite_value=False,
                              extra_suite_metadata=None,
                              extra_subtest_metadata=None,
                              reverse_push_range=False):
    framework_name = "cheezburger"
    if create_perf_framework:
        PerformanceFramework.objects.create(name=framework_name, enabled=enable_framework)

    now = int(time.time())

    push_range = range(30)
    if reverse_push_range:
        push_range = reversed(push_range)

    for (i, value) in zip(push_range, [1]*15 + [2]*15):
        push_time = datetime.datetime.fromtimestamp(now+i)
        push = Push.objects.create(
            repository=test_repository,
            revision='abcdefgh%s' % i,
            author='*****@*****.**',
            time=push_time)
        job = create_generic_job('myguid%s' % i, test_repository,
                                 push.id, generic_reference_data)
        datum = {
            'job_guid': 'fake_job_guid',
            'name': 'test',
            'type': 'test',
            'blob': {
                'framework': {'name': framework_name},
                'suites': [
                    {
                        'name': 'cheezburger metrics',
                        'subtests': [
                            {
                                'name': 'test1',
                                'value': value
                            }
                        ]
                    }
                ]
            }
        }
        if add_suite_value:
            datum['blob']['suites'][0]['value'] = value
        if extra_suite_metadata:
            datum['blob']['suites'][0].update(extra_suite_metadata)
        if extra_subtest_metadata:
            datum['blob']['suites'][0]['subtests'][0].update(
                extra_subtest_metadata)

        # the perf data adapter expects unserialized performance data
        submit_datum = copy.copy(datum)
        submit_datum['blob'] = json.dumps({
            'performance_data': submit_datum['blob']
        })
        store_performance_artifact(job, submit_datum)
def test_measurement_unit_can_be_updated(test_repository, later_perf_push,
                                         perf_job, generic_reference_data,
                                         sample_perf_artifact,
                                         sample_perf_artifact_with_new_unit):
    _, submit_datum = _prepare_test_data(sample_perf_artifact)
    store_performance_artifact(perf_job, submit_datum)

    _, updated_submit_datum = _prepare_test_data(
        sample_perf_artifact_with_new_unit)
    later_job = create_generic_job('lateguid', test_repository,
                                   later_perf_push.id, generic_reference_data)
    store_performance_artifact(later_job, updated_submit_datum)

    summary_signature = PerformanceSignature.objects.get(
        suite='cheezburger metrics', test='')
    updated_subtest_signature = PerformanceSignature.objects.get(
        suite='cheezburger metrics', test='test1')
    assert summary_signature.measurement_unit == UPDATED_MEASUREMENT_UNIT
    assert updated_subtest_signature.measurement_unit == UPDATED_MEASUREMENT_UNIT

    # no side effects when parent/sibling signatures
    # change measurement units
    not_changed_subtest_signature = PerformanceSignature.objects.get(
        suite='cheezburger metrics', test='test2')
    assert not_changed_subtest_signature.measurement_unit == MEASUREMENT_UNIT
def test_ingest_workflow(test_repository, perf_push, later_perf_push, perf_job,
                         generic_reference_data, sample_perf_artifact):
    perf_datum, submit_datum = _prepare_test_data(sample_perf_artifact)

    store_performance_artifact(perf_job, submit_datum)

    assert 8 == PerformanceSignature.objects.all().count()
    assert 1 == PerformanceFramework.objects.all().count()
    framework = PerformanceFramework.objects.first()
    assert FRAMEWORK_NAME == framework.name
    for suite in perf_datum['suites']:
        # verify summary, then subtests
        _verify_signature(test_repository.name,
                          perf_datum['framework']['name'], suite['name'], '',
                          'my_option_hash', 'my_platform',
                          suite.get('lowerIsBetter',
                                    True), suite.get('extraOptions'),
                          suite.get('unit'), perf_push.time)
        _verify_datum(suite['name'], '', suite['value'], perf_push.time)
        for subtest in suite['subtests']:
            _verify_signature(test_repository.name,
                              perf_datum['framework']['name'], suite['name'],
                              subtest['name'], 'my_option_hash', 'my_platform',
                              subtest.get('lowerIsBetter',
                                          True), suite.get('extraOptions'),
                              suite.get('unit'), perf_push.time)
            _verify_datum(suite['name'], subtest['name'], subtest['value'],
                          perf_push.time)
Beispiel #5
0
def store_job_artifacts(artifact_data):
    """
    Store a list of job artifacts. All of the datums in artifact_data need
    to be in the following format:

        {
            'type': 'json',
            'name': 'my-artifact-name',
            # blob can be any kind of structured data
            'blob': { 'stuff': [1, 2, 3, 4, 5] },
            'job_guid': 'd22c74d4aa6d2a1dcba96d95dccbd5fdca70cf33'
        }

    """
    for artifact in artifact_data:
        # Determine what type of artifact we have received
        if artifact:
            artifact_name = artifact.get('name')
            if not artifact_name:
                logger.error(
                    "load_job_artifacts: Unnamed job artifact, skipping")
                continue
            job_guid = artifact.get('job_guid')
            if not job_guid:
                logger.error(
                    "load_job_artifacts: Artifact '%s' with no "
                    "job guid set, skipping",
                    artifact_name,
                )
                continue

            try:
                job = Job.objects.get(guid=job_guid)
            except Job.DoesNotExist:
                logger.error('load_job_artifacts: No job_id for guid %s',
                             job_guid)
                continue

            if artifact_name == 'performance_data':
                store_performance_artifact(job, artifact)
            elif artifact_name == 'text_log_summary':
                try:
                    store_text_log_summary_artifact(job, artifact)
                except IntegrityError:
                    logger.warning(
                        "Couldn't insert text log information "
                        "for job with guid %s, this probably "
                        "means the job was already parsed",
                        job_guid,
                    )
            else:
                logger.warning(
                    "Unknown artifact type: %s submitted with job %s",
                    artifact_name, job.guid)
        else:
            logger.error(
                'store_job_artifacts: artifact type %s not understood',
                artifact_name)
Beispiel #6
0
def store_job_artifacts(artifact_data):
    """
    Store a list of job artifacts. All of the datums in artifact_data need
    to be in the following format:

        {
            'type': 'json',
            'name': 'my-artifact-name',
            # blob can be any kind of structured data
            'blob': { 'stuff': [1, 2, 3, 4, 5] },
            'job_guid': 'd22c74d4aa6d2a1dcba96d95dccbd5fdca70cf33'
        }

    """
    for artifact in artifact_data:
        # Determine what type of artifact we have received
        if artifact:
            artifact_name = artifact.get('name')
            if not artifact_name:
                logger.error("load_job_artifacts: Unnamed job artifact, skipping")
                continue
            job_guid = artifact.get('job_guid')
            if not job_guid:
                logger.error("load_job_artifacts: Artifact '%s' with no "
                             "job guid set, skipping", artifact_name)
                continue

            try:
                job = Job.objects.get(guid=job_guid)
            except Job.DoesNotExist:
                logger.error('load_job_artifacts: No job_id for guid %s', job_guid)
                continue

            if artifact_name == 'performance_data':
                store_performance_artifact(job, artifact)
            elif artifact_name == 'Job Info':
                store_job_info_artifact(job, artifact)
            elif artifact_name == 'text_log_summary':
                try:
                    store_text_log_summary_artifact(job, artifact)
                except IntegrityError:
                    logger.warning("Couldn't insert text log information "
                                   "for job with guid %s, this probably "
                                   "means the job was already parsed",
                                   job_guid)
            elif artifact_name == 'buildapi':
                buildbot_request_id = json.loads(artifact['blob']).get(
                    'request_id')
                if buildbot_request_id:
                    JobDetail.objects.update_or_create(
                        job=job,
                        title='buildbot_request_id',
                        value=str(buildbot_request_id))
            else:
                logger.warning("Unknown artifact type: %s submitted with job %s",
                               artifact_name, job.guid)
        else:
            logger.error('store_job_artifacts: artifact type %s not understood', artifact_name)
Beispiel #7
0
def _generate_perf_data_range(test_repository,
                              generic_reference_data,
                              create_perf_framework=True,
                              enable_framework=True,
                              add_suite_value=False,
                              extra_suite_metadata=None,
                              extra_subtest_metadata=None,
                              reverse_push_range=False):
    framework_name = "cheezburger"
    if create_perf_framework:
        PerformanceFramework.objects.create(name=framework_name,
                                            enabled=enable_framework)

    now = int(time.time())

    push_range = range(30)
    if reverse_push_range:
        push_range = reversed(push_range)

    for (i, value) in zip(push_range, [1] * 15 + [2] * 15):
        push_time = datetime.datetime.fromtimestamp(now + i)
        push = Push.objects.create(repository=test_repository,
                                   revision='abcdefgh%s' % i,
                                   author='*****@*****.**',
                                   time=push_time)
        job = create_generic_job('myguid%s' % i, test_repository, push.id,
                                 generic_reference_data)
        datum = {
            'job_guid': 'fake_job_guid',
            'name': 'test',
            'type': 'test',
            'blob': {
                'framework': {
                    'name': framework_name
                },
                'suites': [{
                    'name': 'cheezburger metrics',
                    'subtests': [{
                        'name': 'test1',
                        'value': value
                    }]
                }]
            }
        }
        if add_suite_value:
            datum['blob']['suites'][0]['value'] = value
        if extra_suite_metadata:
            datum['blob']['suites'][0].update(extra_suite_metadata)
        if extra_subtest_metadata:
            datum['blob']['suites'][0]['subtests'][0].update(
                extra_subtest_metadata)

        # the perf data adapter expects unserialized performance data
        submit_datum = copy.copy(datum)
        submit_datum['blob'] = json.dumps(
            {'performance_data': submit_datum['blob']})
        store_performance_artifact(job, submit_datum)
Beispiel #8
0
def test_hash_remains_unchanged(test_repository, perf_job, sample_perf_artifact):
    _, submit_datum = _prepare_test_data(sample_perf_artifact)
    store_performance_artifact(perf_job, submit_datum)

    summary_signature = PerformanceSignature.objects.get(
        suite='cheezburger metrics', test='')
    # Ensure we don't inadvertently change the way we generate signature hashes.
    assert summary_signature.signature_hash == 'f451f0c9000a7f99e5dc2f05792bfdb0e11d0cac'
    subtest_signatures = PerformanceSignature.objects.filter(
        parent_signature=summary_signature).values_list('signature_hash', flat=True)
    assert len(subtest_signatures) == 3
def test_timestamp_can_be_updated(test_repository, perf_job, later_perf_push,
                                  generic_reference_data,
                                  sample_perf_artifact):
    _, submit_datum = _prepare_test_data(sample_perf_artifact)
    store_performance_artifact(perf_job, submit_datum)

    # send another datum, a little later, verify that signature is changed accordingly
    later_job = create_generic_job('lateguid', test_repository,
                                   later_perf_push.id, generic_reference_data)
    store_performance_artifact(later_job, submit_datum)

    signature = PerformanceSignature.objects.get(suite='cheezburger metrics',
                                                 test='test1')
    assert signature.last_updated == later_perf_push.time
def _generate_perf_data_range(
    test_repository,
    generic_reference_data,
    create_perf_framework=True,
    enable_framework=True,
    suite_provides_value=False,
    extra_suite_metadata=None,
    extra_subtest_metadata=None,
    reverse_push_range=False,
    job_tier=None,
):
    framework_name = "some-perf-framework"
    if create_perf_framework:
        PerformanceFramework.objects.create(name=framework_name,
                                            enabled=enable_framework)

    now = int(time.time())

    push_range = range(30)
    if reverse_push_range:
        push_range = reversed(push_range)

    for (i, value) in zip(push_range, [1] * 15 + [2] * 15):
        push_time = datetime.datetime.fromtimestamp(now + i)
        push = Push.objects.create(
            repository=test_repository,
            revision=f"abcdefgh{i}",
            author="*****@*****.**",
            time=push_time,
        )
        job = create_generic_job(f"myguid{i}",
                                 test_repository,
                                 push.id,
                                 generic_reference_data,
                                 tier=job_tier)
        datum = sample_perf_datum(framework_name, value)

        if suite_provides_value:
            datum['blob']['suites'][0]['value'] = value
        if extra_suite_metadata:
            datum['blob']['suites'][0].update(extra_suite_metadata)
        if extra_subtest_metadata:
            datum['blob']['suites'][0]['subtests'][0].update(
                extra_subtest_metadata)

        # the perf data adapter expects deserialized performance data
        submit_datum = copy.copy(datum)
        submit_datum['blob'] = json.dumps(
            {'performance_data': submit_datum['blob']})
        store_performance_artifact(job, submit_datum)
def test_same_signature_multiple_performance_frameworks(
        test_repository, perf_job):
    framework_names = ['cheezburger1', 'cheezburger2']
    for framework_name in framework_names:
        PerformanceFramework.objects.create(name=framework_name, enabled=True)
        datum = {
            'job_guid': 'fake_job_guid',
            'name': 'test',
            'type': 'test',
            'blob': {
                'framework': {
                    'name': framework_name
                },
                'suites': [{
                    'name':
                    'cheezburger metrics',
                    'subtests': [{
                        'name': 'test1',
                        'value': 20.0,
                        'unit': 'ms'
                    }],
                }],
            },
        }
        # the perf data adapter expects unserialized performance data
        submit_datum = copy.copy(datum)
        submit_datum['blob'] = json.dumps(
            {'performance_data': submit_datum['blob']})

        store_performance_artifact(perf_job, submit_datum)

    # we should have 2 performance signature objects, one for each framework
    # and one datum for each signature
    for framework_name in framework_names:
        s = PerformanceSignature.objects.get(
            framework__name=framework_name,
            repository=test_repository,
            suite='cheezburger metrics',
            test='test1',
        )
        d = PerformanceDatum.objects.get(signature=s)
        assert d.value == 20.0
def test_changing_extra_options_decouples_perf_signatures(
        test_repository, later_perf_push, perf_job, generic_reference_data,
        sample_perf_artifact):
    updated_perf_artifact = copy.deepcopy(sample_perf_artifact)
    updated_perf_artifact['blob']['suites'][0]['extraOptions'] = [
        'different-extra-options'
    ]
    later_job = create_generic_job('lateguid', test_repository,
                                   later_perf_push.id, generic_reference_data)
    _, submit_datum = _prepare_test_data(sample_perf_artifact)
    _, updated_submit_datum = _prepare_test_data(updated_perf_artifact)

    store_performance_artifact(perf_job, submit_datum)
    initial_signature_amount = PerformanceSignature.objects.all().count()
    store_performance_artifact(later_job, updated_submit_datum)

    # Perfherder treats perf data with new properties as entirely new data.
    # Thus, it creates new & separate signatures for them.
    assert initial_signature_amount < PerformanceSignature.objects.all().count(
    )
def test_same_signature_multiple_performance_frameworks(test_repository,
                                                        perf_job):
    framework_names = ['cheezburger1', 'cheezburger2']
    for framework_name in framework_names:
        PerformanceFramework.objects.create(name=framework_name, enabled=True)
        datum = {
            'job_guid': 'fake_job_guid',
            'name': 'test',
            'type': 'test',
            'blob': {
                'framework': {'name': framework_name},
                'suites': [
                    {
                        'name': 'cheezburger metrics',
                        'subtests': [
                            {
                                'name': 'test1',
                                'value': 20.0,
                            }
                        ]
                    }
                ]
            }
        }
        # the perf data adapter expects unserialized performance data
        submit_datum = copy.copy(datum)
        submit_datum['blob'] = json.dumps({
            'performance_data': submit_datum['blob']
        })

        store_performance_artifact(perf_job, submit_datum)

    # we should have 2 performance signature objects, one for each framework
    # and one datum for each signature
    for framework_name in framework_names:
        s = PerformanceSignature.objects.get(framework__name=framework_name,
                                             repository=test_repository,
                                             suite='cheezburger metrics',
                                             test='test1')
        d = PerformanceDatum.objects.get(signature=s)
        assert d.value == 20.0
Beispiel #14
0
def test_load_generic_data(test_repository, perf_push, perf_job,
                           generic_reference_data):
    framework_name = 'cheezburger'
    PerformanceFramework.objects.get_or_create(name=framework_name,
                                               enabled=True)

    datum = {
        'job_guid': 'fake_job_guid',
        'name': 'test',
        'type': 'test',
        'blob': {
            'framework': {
                'name': framework_name
            },
            'suites': [{
                'name':
                'cheezburger metrics',
                'extraOptions': ['shell', 'e10s'],
                'lowerIsBetter':
                True,
                'value':
                10.0,
                'subtests': [{
                    'name': 'test1',
                    'value': 20.0,
                    'lowerIsBetter': True
                }, {
                    'name': 'test2',
                    'value': 30.0,
                    'lowerIsBetter': False
                }, {
                    'name': 'test3',
                    'value': 40.0
                }]
            }, {
                'name': 'cheezburger metrics 2',
                'lowerIsBetter': False,
                'value': 10.0,
                'subtests': [{
                    'name': 'test1',
                    'value': 20.0
                }]
            }, {
                'name': 'cheezburger metrics 3',
                'value': 10.0,
                'subtests': [{
                    'name': 'test1',
                    'value': 20.0
                }]
            }]
        }
    }

    # the perf data adapter expects unserialized performance data
    submit_datum = copy.copy(datum)
    submit_datum['blob'] = json.dumps(
        {'performance_data': submit_datum['blob']})

    store_performance_artifact(perf_job, submit_datum)
    assert 8 == PerformanceSignature.objects.all().count()
    assert 1 == PerformanceFramework.objects.all().count()
    framework = PerformanceFramework.objects.all()[0]
    assert framework_name == framework.name

    perf_datum = datum['blob']

    for suite in perf_datum['suites']:
        # verify summary, then subtests
        _verify_signature(test_repository.name,
                          perf_datum['framework']['name'], suite['name'], '',
                          'my_option_hash', 'my_platform',
                          suite.get('lowerIsBetter', True),
                          suite.get('extraOptions'), perf_push.time)
        _verify_datum(suite['name'], '', suite['value'], perf_push.time)
        for subtest in suite['subtests']:
            _verify_signature(test_repository.name,
                              perf_datum['framework']['name'], suite['name'],
                              subtest['name'], 'my_option_hash', 'my_platform',
                              subtest.get('lowerIsBetter', True),
                              suite.get('extraOptions'), perf_push.time)
            _verify_datum(suite['name'], subtest['name'], subtest['value'],
                          perf_push.time)

    summary_signature = PerformanceSignature.objects.get(
        suite=perf_datum['suites'][0]['name'], test='')
    subtest_signatures = PerformanceSignature.objects.filter(
        parent_signature=summary_signature).values_list('signature_hash',
                                                        flat=True)
    assert len(subtest_signatures) == 3

    # send another datum, a little later, verify that signature's
    # `last_updated` is changed accordingly
    later_timestamp = datetime.datetime.fromtimestamp(int(time.time()) + 5)
    later_push = Push.objects.create(repository=test_repository,
                                     revision='1234abcd12',
                                     author='*****@*****.**',
                                     time=later_timestamp)
    later_job = create_generic_job('lateguid', test_repository, later_push.id,
                                   generic_reference_data)
    store_performance_artifact(later_job, submit_datum)
    signature = PerformanceSignature.objects.get(
        suite=perf_datum['suites'][0]['name'],
        test=perf_datum['suites'][0]['subtests'][0]['name'])
    assert signature.last_updated == later_timestamp
def test_load_generic_data(test_repository,
                           perf_push, perf_job, generic_reference_data):
    framework_name = 'cheezburger'
    PerformanceFramework.objects.get_or_create(name=framework_name, enabled=True)

    datum = {
        'job_guid': 'fake_job_guid',
        'name': 'test',
        'type': 'test',
        'blob': {
            'framework': {'name': framework_name},
            'suites': [
                {
                    'name': 'cheezburger metrics',
                    'extraOptions': ['shell', 'e10s'],
                    'lowerIsBetter': True,
                    'value': 10.0,
                    'subtests': [
                        {
                            'name': 'test1',
                            'value': 20.0,
                            'lowerIsBetter': True
                        },
                        {
                            'name': 'test2',
                            'value': 30.0,
                            'lowerIsBetter': False
                        },
                        {
                            'name': 'test3',
                            'value': 40.0
                        }
                    ]
                },
                {
                    'name': 'cheezburger metrics 2',
                    'lowerIsBetter': False,
                    'value': 10.0,
                    'subtests': [
                        {
                            'name': 'test1',
                            'value': 20.0
                        }
                    ]
                },
                {
                    'name': 'cheezburger metrics 3',
                    'value': 10.0,
                    'subtests': [
                        {
                            'name': 'test1',
                            'value': 20.0
                        }
                    ]
                }
            ]
        }
    }

    # the perf data adapter expects unserialized performance data
    submit_datum = copy.copy(datum)
    submit_datum['blob'] = json.dumps({
        'performance_data': submit_datum['blob']
    })

    store_performance_artifact(perf_job, submit_datum)
    assert 8 == PerformanceSignature.objects.all().count()
    assert 1 == PerformanceFramework.objects.all().count()
    framework = PerformanceFramework.objects.all()[0]
    assert framework_name == framework.name

    perf_datum = datum['blob']

    for suite in perf_datum['suites']:
        # verify summary, then subtests
        _verify_signature(test_repository.name,
                          perf_datum['framework']['name'],
                          suite['name'],
                          '',
                          'my_option_hash',
                          'my_platform',
                          suite.get('lowerIsBetter', True),
                          suite.get('extraOptions'),
                          perf_push.time)
        _verify_datum(suite['name'], '', suite['value'], perf_push.time)
        for subtest in suite['subtests']:
            _verify_signature(test_repository.name,
                              perf_datum['framework']['name'],
                              suite['name'],
                              subtest['name'],
                              'my_option_hash',
                              'my_platform',
                              subtest.get('lowerIsBetter', True),
                              suite.get('extraOptions'),
                              perf_push.time)
            _verify_datum(suite['name'], subtest['name'], subtest['value'],
                          perf_push.time)

    summary_signature = PerformanceSignature.objects.get(
        suite=perf_datum['suites'][0]['name'], test='')
    subtest_signatures = PerformanceSignature.objects.filter(
        parent_signature=summary_signature).values_list('signature_hash', flat=True)
    assert len(subtest_signatures) == 3

    # send another datum, a little later, verify that signature's
    # `last_updated` is changed accordingly
    later_timestamp = datetime.datetime.fromtimestamp(int(time.time()) + 5)
    later_push = Push.objects.create(
        repository=test_repository,
        revision='1234abcd12',
        author='*****@*****.**',
        time=later_timestamp)
    later_job = create_generic_job('lateguid', test_repository,
                                   later_push.id, 2, generic_reference_data)
    store_performance_artifact(later_job, submit_datum)
    signature = PerformanceSignature.objects.get(
        suite=perf_datum['suites'][0]['name'],
        test=perf_datum['suites'][0]['subtests'][0]['name'])
    assert signature.last_updated == later_timestamp
Beispiel #16
0
def store_job_artifacts(artifact_data):
    """
    Store a list of job artifacts. All of the datums in artifact_data need
    to be in the following format:

        {
            'type': 'json',
            'name': 'my-artifact-name',
            # blob can be any kind of structured data
            'blob': { 'stuff': [1, 2, 3, 4, 5] },
            'job_guid': 'd22c74d4aa6d2a1dcba96d95dccbd5fdca70cf33'
        }

    """
    for index, artifact in enumerate(artifact_data):
        # Determine what type of artifact we have received
        if artifact:
            artifact_name = artifact.get('name')
            if not artifact_name:
                logger.error("load_job_artifacts: Unnamed job artifact, "
                             "skipping")
                continue
            job_guid = artifact.get('job_guid')
            if not job_guid:
                logger.error("load_job_artifacts: Artifact '{}' with no "
                             "job guid set, skipping".format(
                                 artifact_name))
                continue

            try:
                job = Job.objects.get(guid=job_guid)
            except Job.DoesNotExist:
                logger.error(
                    ('load_job_artifacts: No job_id for '
                     'guid {}'.format(job_guid)))
                continue

            if artifact_name == 'performance_data':
                store_performance_artifact(job, artifact)
            elif artifact_name == 'Job Info':
                store_job_info_artifact(job, artifact)
            elif artifact_name == 'text_log_summary':
                try:
                    store_text_log_summary_artifact(job, artifact)
                except IntegrityError:
                    logger.warning("Couldn't insert text log information "
                                   "for job with guid %s, this probably "
                                   "means the job was already parsed",
                                   job_guid)
            elif artifact_name == 'buildapi':
                buildbot_request_id = json.loads(artifact['blob']).get(
                    'request_id')
                if buildbot_request_id:
                    JobDetail.objects.update_or_create(
                        job=job,
                        title='buildbot_request_id',
                        value=str(buildbot_request_id))
            else:
                logger.warning("Unknown artifact type: %s submitted with job %s",
                               artifact_name, job.guid)
        else:
            logger.error(
                ('store_job_artifacts: artifact type '
                 '{} not understood'.format(artifact_name)))