Esempio n. 1
0
    def store_performance_artifact(
            self, job_ids, performance_artifact_placeholders):
        """
        Store the performance data
        """

        # Retrieve list of job signatures associated with the jobs
        job_data = self.get_job_signatures_from_ids(job_ids)

        job_ref_data_signatures = set()
        map(
            lambda job_guid: job_ref_data_signatures.add(
                job_data[job_guid]['signature']
            ),
            job_data.keys()
        )

        for perf_data in performance_artifact_placeholders:
            job_guid = perf_data["job_guid"]
            ref_data_signature = job_data[job_guid]['signature']
            # At the moment there could be multiple signatures returned
            # by this, but let's just ignore that and take the first
            # if there are multiple (since the properties we care about should
            # be the same)
            ref_data = model_to_dict(ReferenceDataSignatures.objects.filter(
                signature=ref_data_signature,
                repository=self.project)[0])

            # adapt and load data into placeholder structures
            load_perf_artifacts(self.project, ref_data, job_data, perf_data)
Esempio n. 2
0
    def store_performance_artifact(
            self, job_ids, performance_artifact_placeholders):
        """
        Store the performance data
        """

        # Retrieve list of job signatures associated with the jobs
        job_data = self.get_job_signatures_from_ids(job_ids)

        job_ref_data_signatures = set()
        map(
            lambda job_guid: job_ref_data_signatures.add(
                job_data[job_guid]['signature']
            ),
            job_data.keys()
        )

        # Retrieve associated data in reference_data_signatures
        reference_data = self.refdata_model.get_reference_data(
            list(job_ref_data_signatures))

        for perf_data in performance_artifact_placeholders:
            job_guid = perf_data["job_guid"]
            ref_data_signature = job_data[job_guid]['signature']
            ref_data = reference_data[ref_data_signature]

            if 'signature' in ref_data:
                del ref_data['signature']

            # adapt and load data into placeholder structures
            if perf_data['name'] == 'talos_data':
                load_talos_artifacts(self.project, ref_data, job_data, perf_data)
            else:
                load_perf_artifacts(self.project, ref_data, job_data, perf_data)
Esempio n. 3
0
    def store_performance_artifact(self, job_ids,
                                   performance_artifact_placeholders):
        """
        Store the performance data
        """

        # Retrieve list of job signatures associated with the jobs
        job_data = self.get_job_signatures_from_ids(job_ids)

        job_ref_data_signatures = set()
        map(
            lambda job_guid: job_ref_data_signatures.add(job_data[job_guid][
                'signature']), job_data.keys())

        # Retrieve associated data in reference_data_signatures
        reference_data = self.refdata_model.get_reference_data(
            list(job_ref_data_signatures))

        for perf_data in performance_artifact_placeholders:
            job_guid = perf_data["job_guid"]
            ref_data_signature = job_data[job_guid]['signature']
            ref_data = reference_data[ref_data_signature]

            if 'signature' in ref_data:
                del ref_data['signature']

            # adapt and load data into placeholder structures
            if perf_data['name'] == 'talos_data':
                load_talos_artifacts(self.project, ref_data, job_data,
                                     perf_data)
            else:
                load_perf_artifacts(self.project, ref_data, job_data,
                                    perf_data)
Esempio n. 4
0
    def store_performance_artifact(self, job_ids,
                                   performance_artifact_placeholders):
        """
        Store the performance data
        """

        # Retrieve list of job signatures associated with the jobs
        job_data = self.get_job_signatures_from_ids(job_ids)

        job_ref_data_signatures = set()
        map(
            lambda job_guid: job_ref_data_signatures.add(job_data[job_guid][
                'signature']), job_data.keys())

        for perf_data in performance_artifact_placeholders:
            job_guid = perf_data["job_guid"]
            ref_data_signature = job_data[job_guid]['signature']
            # At the moment there could be multiple signatures returned
            # by this, but let's just ignore that and take the first
            # if there are multiple (since the properties we care about should
            # be the same)
            ref_data = model_to_dict(
                ReferenceDataSignatures.objects.filter(
                    signature=ref_data_signature, repository=self.project)[0])

            # adapt and load data into placeholder structures
            load_perf_artifacts(self.project, ref_data, job_data, perf_data)
def _generate_perf_data_range(test_project, test_repository,
                              perf_option_collection, perf_platform,
                              perf_reference_data,
                              create_perf_framework=True,
                              enable_framework=True,
                              add_suite_value=False,
                              extra_suite_metadata=None,
                              extra_subtest_metadata=None):
    framework_name = "cheezburger"
    if create_perf_framework:
        PerformanceFramework.objects.create(name=framework_name, enabled=enable_framework)

    now = int(time.time())

    for (i, value) in zip(range(30), [1]*15 + [2]*15):
        push = Push.objects.create(
            repository=test_repository,
            revision='abcdefgh%s' % i,
            author='*****@*****.**',
            time=datetime.datetime.fromtimestamp(now+i))
        job = Job.objects.create(repository=test_repository,
                                 guid='myguid%s' % i,
                                 push=push,
                                 project_specific_id=i)
        datum = {
            'job_guid': 'fake_job_guid',
            'name': 'test',
            'type': 'test',
            'blob': {
                'framework': {'name': framework_name},
                'suites': [
                    {
                        'name': 'cheezburger metrics',
                        'subtests': [
                            {
                                'name': 'test1',
                                'value': value
                            }
                        ]
                    }
                ]
            }
        }
        if add_suite_value:
            datum['blob']['suites'][0]['value'] = value
        if extra_suite_metadata:
            datum['blob']['suites'][0].update(extra_suite_metadata)
        if extra_subtest_metadata:
            datum['blob']['suites'][0]['subtests'][0].update(
                extra_subtest_metadata)

        # the perf data adapter expects unserialized performance data
        submit_datum = copy.copy(datum)
        submit_datum['blob'] = json.dumps({
            'performance_data': submit_datum['blob']
        })
        load_perf_artifacts(job, perf_reference_data, submit_datum)
Esempio n. 6
0
def _generate_perf_data_range(test_project,
                              test_repository,
                              perf_option_collection,
                              perf_platform,
                              perf_reference_data,
                              create_perf_framework=True,
                              enable_framework=True,
                              add_suite_value=False,
                              extra_suite_metadata=None,
                              extra_subtest_metadata=None):
    framework_name = "cheezburger"
    if create_perf_framework:
        PerformanceFramework.objects.create(name=framework_name,
                                            enabled=enable_framework)

    now = int(time.time())

    for (i, value) in zip(range(30), [1] * 15 + [2] * 15):
        push = Push.objects.create(repository=test_repository,
                                   revision='abcdefgh%s' % i,
                                   author='*****@*****.**',
                                   time=datetime.datetime.fromtimestamp(now +
                                                                        i))
        job = Job.objects.create(repository=test_repository,
                                 guid='myguid%s' % i,
                                 push=push,
                                 project_specific_id=i)
        datum = {
            'job_guid': 'fake_job_guid',
            'name': 'test',
            'type': 'test',
            'blob': {
                'framework': {
                    'name': framework_name
                },
                'suites': [{
                    'name': 'cheezburger metrics',
                    'subtests': [{
                        'name': 'test1',
                        'value': value
                    }]
                }]
            }
        }
        if add_suite_value:
            datum['blob']['suites'][0]['value'] = value
        if extra_suite_metadata:
            datum['blob']['suites'][0].update(extra_suite_metadata)
        if extra_subtest_metadata:
            datum['blob']['suites'][0]['subtests'][0].update(
                extra_subtest_metadata)

        # the perf data adapter expects unserialized performance data
        submit_datum = copy.copy(datum)
        submit_datum['blob'] = json.dumps(
            {'performance_data': submit_datum['blob']})
        load_perf_artifacts(job, perf_reference_data, submit_datum)
def _generate_perf_data_range(test_project,
                              test_repository,
                              perf_option_collection,
                              perf_platform,
                              perf_reference_data,
                              create_perf_framework=True,
                              enable_framework=True,
                              add_suite_value=False,
                              extra_suite_metadata=None,
                              extra_subtest_metadata=None):
    framework_name = "cheezburger"
    if create_perf_framework:
        PerformanceFramework.objects.create(name=framework_name,
                                            enabled=enable_framework)

    now = int(time.time())
    for (i, value) in zip(range(30), [1] * 15 + [2] * 15):
        perf_job_data = {
            'fake_job_guid': {
                'id': i,
                'result_set_id': i,
                'push_timestamp': now + i
            }
        }
        datum = {
            'job_guid': 'fake_job_guid',
            'name': 'test',
            'type': 'test',
            'blob': {
                'framework': {
                    'name': framework_name
                },
                'suites': [{
                    'name': 'cheezburger metrics',
                    'subtests': [{
                        'name': 'test1',
                        'value': value
                    }]
                }]
            }
        }
        if add_suite_value:
            datum['blob']['suites'][0]['value'] = value
        if extra_suite_metadata:
            datum['blob']['suites'][0].update(extra_suite_metadata)
        if extra_subtest_metadata:
            datum['blob']['suites'][0]['subtests'][0].update(
                extra_subtest_metadata)

        # the perf data adapter expects unserialized performance data
        submit_datum = copy.copy(datum)
        submit_datum['blob'] = json.dumps(
            {'performance_data': submit_datum['blob']})
        load_perf_artifacts(test_repository.name, perf_reference_data,
                            perf_job_data, submit_datum)
def _generate_perf_data_range(test_project, test_repository,
                              perf_option_collection, perf_platform,
                              perf_reference_data,
                              create_perf_framework=True,
                              enable_framework=True,
                              add_suite_value=False,
                              extra_suite_metadata=None,
                              extra_subtest_metadata=None):
    framework_name = "cheezburger"
    if create_perf_framework:
        PerformanceFramework.objects.create(name=framework_name, enabled=enable_framework)

    now = int(time.time())
    for (i, value) in zip(range(30), [1]*15 + [2]*15):
        perf_job_data = {
            'fake_job_guid': {
                'id': i,
                'result_set_id': i,
                'push_timestamp': now + i
            }
        }
        datum = {
            'job_guid': 'fake_job_guid',
            'name': 'test',
            'type': 'test',
            'blob': {
                'framework': {'name': framework_name},
                'suites': [
                    {
                        'name': 'cheezburger metrics',
                        'subtests': [
                            {
                                'name': 'test1',
                                'value': value
                            }
                        ]
                    }
                ]
            }
        }
        if add_suite_value:
            datum['blob']['suites'][0]['value'] = value
        if extra_suite_metadata:
            datum['blob']['suites'][0].update(extra_suite_metadata)
        if extra_subtest_metadata:
            datum['blob']['suites'][0]['subtests'][0].update(
                extra_subtest_metadata)

        # the perf data adapter expects unserialized performance data
        submit_datum = copy.copy(datum)
        submit_datum['blob'] = json.dumps({
            'performance_data': submit_datum['blob']
        })
        load_perf_artifacts(test_repository.name, perf_reference_data,
                            perf_job_data, submit_datum)
def test_same_signature_multiple_performance_frameworks(test_project,
                                                        test_repository,
                                                        perf_option_collection,
                                                        perf_platform,
                                                        perf_job_data,
                                                        perf_reference_data):
    framework_names = ['cheezburger1', 'cheezburger2']
    for framework_name in framework_names:
        PerformanceFramework.objects.create(name=framework_name)
        datum = {
            'job_guid': 'fake_job_guid',
            'name': 'test',
            'type': 'test',
            'blob': {
                'framework': {'name': framework_name},
                'suites': [
                    {
                        'name': 'cheezburger metrics',
                        'subtests': [
                            {
                                'name': 'test1',
                                'value': 20.0,
                            }
                        ]
                    }
                ]
            }
        }
        # the perf data adapter expects unserialized performance data
        submit_datum = copy.copy(datum)
        submit_datum['blob'] = json.dumps({
            'performance_data': submit_datum['blob']
        })

        load_perf_artifacts(test_repository.name, perf_reference_data,
                            perf_job_data, submit_datum)

    # we should have 2 performance signature objects, one for each framework
    # and one datum for each signature
    for framework_name in framework_names:
        s = PerformanceSignature.objects.get(framework__name=framework_name,
                                             repository=test_repository,
                                             suite='cheezburger metrics',
                                             test='test1')
        d = PerformanceDatum.objects.get(signature=s)
        assert d.value == 20.0
Esempio n. 10
0
    def store_performance_artifact(self, job, artifact):
        """
        Store a performance data artifact
        """

        # Retrieve list of job signatures associated with the jobs
        job_data = self.get_job_signatures_from_ids([job.project_specific_id])
        ref_data_signature_hash = job_data[job.guid]['signature']

        # At the moment there could be multiple signatures returned
        # by this, but let's just ignore that and take the first
        # if there are multiple (since the properties we care about should
        # be the same)
        ref_data = model_to_dict(
            ReferenceDataSignatures.objects.filter(
                signature=ref_data_signature_hash, repository=self.project)[0])

        # adapt and load data into placeholder structures
        load_perf_artifacts(job, ref_data, artifact)
def test_same_signature_multiple_performance_frameworks(
        test_project, test_repository, perf_option_collection, perf_platform,
        perf_job_data, perf_reference_data):
    framework_names = ['cheezburger1', 'cheezburger2']
    for framework_name in framework_names:
        PerformanceFramework.objects.create(name=framework_name)
        datum = {
            'job_guid': 'fake_job_guid',
            'name': 'test',
            'type': 'test',
            'blob': {
                'framework': {
                    'name': framework_name
                },
                'suites': [{
                    'name': 'cheezburger metrics',
                    'subtests': [{
                        'name': 'test1',
                        'value': 20.0,
                    }]
                }]
            }
        }
        # the perf data adapter expects unserialized performance data
        submit_datum = copy.copy(datum)
        submit_datum['blob'] = json.dumps(
            {'performance_data': submit_datum['blob']})

        load_perf_artifacts(test_repository.name, perf_reference_data,
                            perf_job_data, submit_datum)

    # we should have 2 performance signature objects, one for each framework
    # and one datum for each signature
    for framework_name in framework_names:
        s = PerformanceSignature.objects.get(framework__name=framework_name,
                                             repository=test_repository,
                                             suite='cheezburger metrics',
                                             test='test1')
        d = PerformanceDatum.objects.get(signature=s)
        assert d.value == 20.0
def test_load_generic_data(test_project, test_repository,
                           perf_option_collection, perf_platform,
                           perf_job_data, perf_reference_data):
    framework_name = 'cheezburger'
    PerformanceFramework.objects.get_or_create(name=framework_name)

    datum = {
        'job_guid': 'fake_job_guid',
        'name': 'test',
        'type': 'test',
        'blob': {
            'framework': {
                'name': framework_name
            },
            'suites': [{
                'name':
                'cheezburger metrics',
                'lowerIsBetter':
                True,
                'value':
                10.0,
                'subtests': [{
                    'name': 'test1',
                    'value': 20.0,
                    'lowerIsBetter': True
                }, {
                    'name': 'test2',
                    'value': 30.0,
                    'lowerIsBetter': False
                }, {
                    'name': 'test3',
                    'value': 40.0
                }]
            }, {
                'name': 'cheezburger metrics 2',
                'lowerIsBetter': False,
                'value': 10.0,
                'subtests': [{
                    'name': 'test1',
                    'value': 20.0
                }]
            }, {
                'name': 'cheezburger metrics 3',
                'value': 10.0,
                'subtests': [{
                    'name': 'test1',
                    'value': 20.0
                }]
            }]
        }
    }

    # the perf data adapter expects unserialized performance data
    submit_datum = copy.copy(datum)
    submit_datum['blob'] = json.dumps(
        {'performance_data': submit_datum['blob']})

    load_perf_artifacts(test_repository.name, perf_reference_data,
                        perf_job_data, submit_datum)
    assert 8 == PerformanceSignature.objects.all().count()
    assert 1 == PerformanceFramework.objects.all().count()
    framework = PerformanceFramework.objects.all()[0]
    assert framework_name == framework.name

    perf_datum = datum['blob']

    push_timestamp = perf_job_data['fake_job_guid']['push_timestamp']
    pushtime = datetime.datetime.fromtimestamp(push_timestamp)
    for suite in perf_datum['suites']:
        # verify summary, then subtests
        _verify_signature(test_repository.name,
                          perf_datum['framework']['name'], suite['name'], '',
                          'my_option_hash', 'my_platform',
                          suite.get('lowerIsBetter', True), pushtime)
        _verify_datum(suite['name'], '', suite['value'], pushtime)
        for subtest in suite['subtests']:
            _verify_signature(test_repository.name,
                              perf_datum['framework']['name'], suite['name'],
                              subtest['name'], 'my_option_hash', 'my_platform',
                              subtest.get('lowerIsBetter', True), pushtime)
            _verify_datum(suite['name'], subtest['name'], subtest['value'],
                          pushtime)

    summary_signature = PerformanceSignature.objects.get(
        suite=perf_datum['suites'][0]['name'], test='')
    subtest_signatures = PerformanceSignature.objects.filter(
        parent_signature=summary_signature).values_list('signature_hash',
                                                        flat=True)
    assert len(subtest_signatures) == 3
    # send another datum, a little later, verify that signature's
    # `last_updated` is changed accordingly
    perf_job_data['fake_job_guid']['push_timestamp'] += 1
    load_perf_artifacts(test_repository.name, perf_reference_data,
                        perf_job_data, submit_datum)
    signature = PerformanceSignature.objects.get(
        suite=perf_datum['suites'][0]['name'],
        test=perf_datum['suites'][0]['subtests'][0]['name'])
    signature.last_updated == datetime.datetime.fromtimestamp(push_timestamp +
                                                              1)
def test_load_generic_data(test_project, test_repository,
                           perf_option_collection, perf_platform,
                           perf_push, perf_job, perf_reference_data,
                           jm):
    framework_name = 'cheezburger'
    PerformanceFramework.objects.get_or_create(name=framework_name, enabled=True)

    datum = {
        'job_guid': 'fake_job_guid',
        'name': 'test',
        'type': 'test',
        'blob': {
            'framework': {'name': framework_name},
            'suites': [
                {
                    'name': 'cheezburger metrics',
                    'extraOptions': ['shell', 'e10s'],
                    'lowerIsBetter': True,
                    'value': 10.0,
                    'subtests': [
                        {
                            'name': 'test1',
                            'value': 20.0,
                            'lowerIsBetter': True
                        },
                        {
                            'name': 'test2',
                            'value': 30.0,
                            'lowerIsBetter': False
                        },
                        {
                            'name': 'test3',
                            'value': 40.0
                        }
                    ]
                },
                {
                    'name': 'cheezburger metrics 2',
                    'lowerIsBetter': False,
                    'value': 10.0,
                    'subtests': [
                        {
                            'name': 'test1',
                            'value': 20.0
                        }
                    ]
                },
                {
                    'name': 'cheezburger metrics 3',
                    'value': 10.0,
                    'subtests': [
                        {
                            'name': 'test1',
                            'value': 20.0
                        }
                    ]
                }
            ]
        }
    }

    # the perf data adapter expects unserialized performance data
    submit_datum = copy.copy(datum)
    submit_datum['blob'] = json.dumps({
        'performance_data': submit_datum['blob']
    })

    load_perf_artifacts(perf_job, perf_reference_data,
                        submit_datum)
    assert 8 == PerformanceSignature.objects.all().count()
    assert 1 == PerformanceFramework.objects.all().count()
    framework = PerformanceFramework.objects.all()[0]
    assert framework_name == framework.name

    perf_datum = datum['blob']

    for suite in perf_datum['suites']:
        # verify summary, then subtests
        _verify_signature(test_repository.name,
                          perf_datum['framework']['name'],
                          suite['name'],
                          '',
                          'my_option_hash',
                          'my_platform',
                          suite.get('lowerIsBetter', True),
                          suite.get('extraOptions'),
                          perf_push.time)
        _verify_datum(suite['name'], '', suite['value'], perf_push.time)
        for subtest in suite['subtests']:
            _verify_signature(test_repository.name,
                              perf_datum['framework']['name'],
                              suite['name'],
                              subtest['name'],
                              'my_option_hash',
                              'my_platform',
                              subtest.get('lowerIsBetter', True),
                              suite.get('extraOptions'),
                              perf_push.time)
            _verify_datum(suite['name'], subtest['name'], subtest['value'],
                          perf_push.time)

    summary_signature = PerformanceSignature.objects.get(
        suite=perf_datum['suites'][0]['name'], test='')
    subtest_signatures = PerformanceSignature.objects.filter(
        parent_signature=summary_signature).values_list('signature_hash', flat=True)
    assert len(subtest_signatures) == 3

    # send another datum, a little later, verify that signature's
    # `last_updated` is changed accordingly
    later_timestamp = datetime.datetime.fromtimestamp(int(time.time()) + 5)
    later_push = Push.objects.create(
        repository=test_repository,
        revision='1234abcd12',
        author='*****@*****.**',
        time=later_timestamp)
    later_job = Job.objects.create(
        repository=test_repository,
        push=later_push,
        guid='laterguid',
        project_specific_id=2)
    load_perf_artifacts(later_job, perf_reference_data, submit_datum)
    signature = PerformanceSignature.objects.get(
        suite=perf_datum['suites'][0]['name'],
        test=perf_datum['suites'][0]['subtests'][0]['name'])
    assert signature.last_updated == later_timestamp
    def test_load_generic_data(self):
        framework_name = "cheezburger"
        PerformanceFramework.objects.get_or_create(name=framework_name)

        (job_data, reference_data) = self._get_job_and_reference_data()
        datum = {
            "job_guid": self.JOB_GUID,
            "name": "test",
            "type": "test",
            "blob": {
                "framework": {"name": framework_name},
                "suites": [
                    {
                        "name": "cheezburger metrics",
                        "lowerIsBetter": True,
                        "value": 10.0,
                        "subtests": [
                            {
                                "name": "test1",
                                "value": 20.0,
                                "lowerIsBetter": True
                            },
                            {
                                "name": "test2",
                                "value": 30.0,
                                "lowerIsBetter": False
                            },
                            {
                                "name": "test3",
                                "value": 40.0
                            }
                        ]
                    }
                ]
            }
        }

        # the perf data adapter expects unserialized performance data
        submit_datum = copy.copy(datum)
        submit_datum['blob'] = json.dumps({
            'performance_data': submit_datum['blob']
        })

        load_perf_artifacts(self.REPO_NAME, reference_data, job_data,
                            submit_datum)
        self.assertEqual(4, PerformanceSignature.objects.all().count())
        self.assertEqual(1, PerformanceFramework.objects.all().count())
        framework = PerformanceFramework.objects.all()[0]
        self.assertEqual(framework_name, framework.name)

        perf_datum = datum['blob']

        # verify summary, then subtests
        self._verify_signature_datum(perf_datum['framework']['name'],
                                     perf_datum['suites'][0]['name'],
                                     '',
                                     perf_datum['suites'][0]['lowerIsBetter'],
                                     perf_datum['suites'][0]['value'],
                                     datetime.datetime.fromtimestamp(
                                         self.PUSH_TIMESTAMP))
        for subtest in perf_datum['suites'][0]['subtests']:
            self._verify_signature_datum(perf_datum['framework']['name'],
                                         perf_datum['suites'][0]['name'],
                                         subtest['name'],
                                         subtest.get('lowerIsBetter', True),
                                         subtest['value'],
                                         datetime.datetime.fromtimestamp(
                                             self.PUSH_TIMESTAMP))

        # send another datum, a little later, verify that signature's
        # `last_updated` is changed accordingly
        job_data[self.JOB_GUID]['push_timestamp'] += 1
        load_perf_artifacts(self.REPO_NAME, reference_data, job_data,
                            submit_datum)
        signature = PerformanceSignature.objects.get(
            suite=perf_datum['suites'][0]['name'],
            test=perf_datum['suites'][0]['subtests'][0]['name'])
        self.assertEqual(signature.last_updated,
                         datetime.datetime.fromtimestamp(
                             self.PUSH_TIMESTAMP + 1))
def test_load_generic_data(test_project, test_repository,
                           perf_option_collection, perf_platform,
                           perf_job_data, perf_reference_data):
    framework_name = 'cheezburger'
    PerformanceFramework.objects.get_or_create(name=framework_name)

    datum = {
        'job_guid': 'fake_job_guid',
        'name': 'test',
        'type': 'test',
        'blob': {
            'framework': {'name': framework_name},
            'suites': [
                {
                    'name': 'cheezburger metrics',
                    'lowerIsBetter': True,
                    'value': 10.0,
                    'subtests': [
                        {
                            'name': 'test1',
                            'value': 20.0,
                            'lowerIsBetter': True
                        },
                        {
                            'name': 'test2',
                            'value': 30.0,
                            'lowerIsBetter': False
                        },
                        {
                            'name': 'test3',
                            'value': 40.0
                        }
                    ]
                },
                {
                    'name': 'cheezburger metrics 2',
                    'lowerIsBetter': False,
                    'value': 10.0,
                    'subtests': [
                        {
                            'name': 'test1',
                            'value': 20.0
                        }
                    ]
                },
                {
                    'name': 'cheezburger metrics 3',
                    'value': 10.0,
                    'subtests': [
                        {
                            'name': 'test1',
                            'value': 20.0
                        }
                    ]
                }
            ]
        }
    }

    # the perf data adapter expects unserialized performance data
    submit_datum = copy.copy(datum)
    submit_datum['blob'] = json.dumps({
        'performance_data': submit_datum['blob']
    })

    load_perf_artifacts(test_repository.name, perf_reference_data,
                        perf_job_data, submit_datum)
    assert 8 == PerformanceSignature.objects.all().count()
    assert 1 == PerformanceFramework.objects.all().count()
    framework = PerformanceFramework.objects.all()[0]
    assert framework_name == framework.name

    perf_datum = datum['blob']

    push_timestamp = perf_job_data['fake_job_guid']['push_timestamp']
    pushtime = datetime.datetime.fromtimestamp(push_timestamp)
    for suite in perf_datum['suites']:
        # verify summary, then subtests
        _verify_signature(test_repository.name,
                          perf_datum['framework']['name'],
                          suite['name'],
                          '',
                          'my_option_hash',
                          'my_platform',
                          suite.get('lowerIsBetter', True),
                          pushtime)
        _verify_datum(suite['name'], '', suite['value'], pushtime)
        for subtest in suite['subtests']:
            _verify_signature(test_repository.name,
                              perf_datum['framework']['name'],
                              suite['name'],
                              subtest['name'],
                              'my_option_hash',
                              'my_platform',
                              subtest.get('lowerIsBetter', True),
                              pushtime)
            _verify_datum(suite['name'], subtest['name'], subtest['value'],
                          pushtime)

    summary_signature = PerformanceSignature.objects.get(
        suite=perf_datum['suites'][0]['name'], test='')
    subtest_signatures = PerformanceSignature.objects.filter(
        parent_signature=summary_signature).values_list('signature_hash', flat=True)
    assert len(subtest_signatures) == 3
    # send another datum, a little later, verify that signature's
    # `last_updated` is changed accordingly
    perf_job_data['fake_job_guid']['push_timestamp'] += 1
    load_perf_artifacts(test_repository.name, perf_reference_data,
                        perf_job_data, submit_datum)
    signature = PerformanceSignature.objects.get(
        suite=perf_datum['suites'][0]['name'],
        test=perf_datum['suites'][0]['subtests'][0]['name'])
    signature.last_updated == datetime.datetime.fromtimestamp(push_timestamp + 1)
Esempio n. 16
0
def test_load_generic_data(test_project, test_repository,
                           perf_option_collection, perf_platform, perf_push,
                           perf_job, perf_reference_data, jm):
    framework_name = 'cheezburger'
    PerformanceFramework.objects.get_or_create(name=framework_name,
                                               enabled=True)

    datum = {
        'job_guid': 'fake_job_guid',
        'name': 'test',
        'type': 'test',
        'blob': {
            'framework': {
                'name': framework_name
            },
            'suites': [{
                'name':
                'cheezburger metrics',
                'extraOptions': ['shell', 'e10s'],
                'lowerIsBetter':
                True,
                'value':
                10.0,
                'subtests': [{
                    'name': 'test1',
                    'value': 20.0,
                    'lowerIsBetter': True
                }, {
                    'name': 'test2',
                    'value': 30.0,
                    'lowerIsBetter': False
                }, {
                    'name': 'test3',
                    'value': 40.0
                }]
            }, {
                'name': 'cheezburger metrics 2',
                'lowerIsBetter': False,
                'value': 10.0,
                'subtests': [{
                    'name': 'test1',
                    'value': 20.0
                }]
            }, {
                'name': 'cheezburger metrics 3',
                'value': 10.0,
                'subtests': [{
                    'name': 'test1',
                    'value': 20.0
                }]
            }]
        }
    }

    # the perf data adapter expects unserialized performance data
    submit_datum = copy.copy(datum)
    submit_datum['blob'] = json.dumps(
        {'performance_data': submit_datum['blob']})

    load_perf_artifacts(perf_job, perf_reference_data, submit_datum)
    assert 8 == PerformanceSignature.objects.all().count()
    assert 1 == PerformanceFramework.objects.all().count()
    framework = PerformanceFramework.objects.all()[0]
    assert framework_name == framework.name

    perf_datum = datum['blob']

    for suite in perf_datum['suites']:
        # verify summary, then subtests
        _verify_signature(test_repository.name,
                          perf_datum['framework']['name'], suite['name'], '',
                          'my_option_hash', 'my_platform',
                          suite.get('lowerIsBetter', True),
                          suite.get('extraOptions'), perf_push.time)
        _verify_datum(suite['name'], '', suite['value'], perf_push.time)
        for subtest in suite['subtests']:
            _verify_signature(test_repository.name,
                              perf_datum['framework']['name'], suite['name'],
                              subtest['name'], 'my_option_hash', 'my_platform',
                              subtest.get('lowerIsBetter', True),
                              suite.get('extraOptions'), perf_push.time)
            _verify_datum(suite['name'], subtest['name'], subtest['value'],
                          perf_push.time)

    summary_signature = PerformanceSignature.objects.get(
        suite=perf_datum['suites'][0]['name'], test='')
    subtest_signatures = PerformanceSignature.objects.filter(
        parent_signature=summary_signature).values_list('signature_hash',
                                                        flat=True)
    assert len(subtest_signatures) == 3

    # send another datum, a little later, verify that signature's
    # `last_updated` is changed accordingly
    later_timestamp = datetime.datetime.fromtimestamp(int(time.time()) + 5)
    later_push = Push.objects.create(repository=test_repository,
                                     revision='1234abcd12',
                                     author='*****@*****.**',
                                     time=later_timestamp)
    later_job = Job.objects.create(repository=test_repository,
                                   push=later_push,
                                   guid='laterguid',
                                   project_specific_id=2)
    load_perf_artifacts(later_job, perf_reference_data, submit_datum)
    signature = PerformanceSignature.objects.get(
        suite=perf_datum['suites'][0]['name'],
        test=perf_datum['suites'][0]['subtests'][0]['name'])
    assert signature.last_updated == later_timestamp