def store_performance_artifact( self, job_ids, performance_artifact_placeholders): """ Store the performance data """ # Retrieve list of job signatures associated with the jobs job_data = self.get_job_signatures_from_ids(job_ids) job_ref_data_signatures = set() map( lambda job_guid: job_ref_data_signatures.add( job_data[job_guid]['signature'] ), job_data.keys() ) # Retrieve associated data in reference_data_signatures reference_data = self.refdata_model.get_reference_data( list(job_ref_data_signatures)) for perf_data in performance_artifact_placeholders: job_guid = perf_data["job_guid"] ref_data_signature = job_data[job_guid]['signature'] ref_data = reference_data[ref_data_signature] if 'signature' in ref_data: del ref_data['signature'] # adapt and load data into placeholder structures if perf_data['name'] == 'talos_data': load_talos_artifacts(self.project, ref_data, job_data, perf_data) else: load_perf_artifacts(self.project, ref_data, job_data, perf_data)
def store_performance_artifact(self, job_ids, performance_artifact_placeholders): """ Store the performance data """ # Retrieve list of job signatures associated with the jobs job_data = self.get_job_signatures_from_ids(job_ids) job_ref_data_signatures = set() map( lambda job_guid: job_ref_data_signatures.add(job_data[job_guid][ 'signature']), job_data.keys()) for perf_data in performance_artifact_placeholders: job_guid = perf_data["job_guid"] ref_data_signature = job_data[job_guid]['signature'] # At the moment there could be multiple signatures returned # by this, but let's just ignore that and take the first # if there are multiple (since the properties we care about should # be the same) ref_data = model_to_dict( ReferenceDataSignatures.objects.filter( signature=ref_data_signature, repository=self.project)[0]) # adapt and load data into placeholder structures if perf_data['name'] == 'talos_data': load_talos_artifacts(self.project, ref_data, job_data, perf_data) else: load_perf_artifacts(self.project, ref_data, job_data, perf_data)
def store_performance_artifact(self, job_ids, performance_artifact_placeholders): """ Store the performance data """ # Retrieve list of job signatures associated with the jobs job_data = self.get_job_signatures_from_ids(job_ids) job_ref_data_signatures = set() map( lambda job_guid: job_ref_data_signatures.add(job_data[job_guid][ 'signature']), job_data.keys()) # Retrieve associated data in reference_data_signatures reference_data = self.refdata_model.get_reference_data( list(job_ref_data_signatures)) for perf_data in performance_artifact_placeholders: job_guid = perf_data["job_guid"] ref_data_signature = job_data[job_guid]['signature'] ref_data = reference_data[ref_data_signature] if 'signature' in ref_data: del ref_data['signature'] # adapt and load data into placeholder structures if perf_data['name'] == 'talos_data': load_talos_artifacts(self.project, ref_data, job_data, perf_data) else: load_perf_artifacts(self.project, ref_data, job_data, perf_data)
def test_load_talos_data(test_project, test_repository, perf_option_collection, perf_platform, perf_job_data, perf_reference_data): PerformanceFramework.objects.create(name='talos') talos_perf_data = SampleData.get_talos_perf_data() for talos_datum in talos_perf_data: datum = { "job_guid": "fake_job_guid", "name": "test", "type": "test", "blob": talos_datum } # Mimic production environment, the blobs are serialized # when the web service receives them datum['blob'] = json.dumps({'talos_data': [datum['blob']]}) load_talos_artifacts(test_repository.name, perf_reference_data, perf_job_data, datum) # base: subtests + one extra result for the summary series expected_result_count = len(talos_datum["results"]) + 1 # we create one performance series per counter if 'talos_counters' in talos_datum: expected_result_count += len(talos_datum["talos_counters"]) # result count == number of signatures assert expected_result_count == PerformanceSignature.objects.all( ).count() expected_push_timestamp = datetime.datetime.fromtimestamp( perf_job_data['fake_job_guid']['push_timestamp']) # verify that we have signatures for the subtests for (testname, results) in talos_datum["results"].iteritems(): signature = PerformanceSignature.objects.get(test=testname) datum = PerformanceDatum.objects.get(signature=signature) if talos_datum.get('summary'): # if we have a summary, ensure the subtest summary values made # it in and that we ingested lowerIsBetter ok (if it was there) subtest = talos_datum['summary']['subtests'][testname] assert round(subtest['filtered'], 2) == datum.value assert signature.lower_is_better == subtest.get( 'lowerIsBetter', True) else: # this is an old style talos blob without a summary. these are # going away, so I'm not going to bother testing the # correctness. however let's at least verify that some values # are being generated here assert datum.value assert datum.push_timestamp == expected_push_timestamp # if we have counters, verify that the series for them is as expected for (counter, results) in talos_datum.get('talos_counters', {}).iteritems(): signature = PerformanceSignature.objects.get(test=counter) datum = PerformanceDatum.objects.get(signature=signature) assert round(float(results['mean']), 2) == datum.value assert datum.push_timestamp == expected_push_timestamp # we should be left with just the summary series signature = PerformanceSignature.objects.get( test='', suite=talos_datum['testrun']['suite']) datum = PerformanceDatum.objects.get(signature=signature) if talos_datum.get('summary'): assert round(talos_datum['summary']['suite'], 2) == datum.value else: # old style talos blob without summary. again, going away, # but let's at least test that we have the value assert datum.value assert datum.push_timestamp == expected_push_timestamp # delete perf objects for next iteration PerformanceSignature.objects.all().delete() PerformanceDatum.objects.all().delete()
def test_load_talos_data(self): PerformanceFramework.objects.get_or_create(name='talos') talos_perf_data = SampleData.get_talos_perf_data() for talos_datum in talos_perf_data: (job_data, reference_data) = self._get_job_and_reference_data() datum = { "job_guid": self.JOB_GUID, "name": "test", "type": "test", "blob": talos_datum } # Mimic production environment, the blobs are serialized # when the web service receives them datum['blob'] = json.dumps({'talos_data': [datum['blob']]}) load_talos_artifacts(self.REPO_NAME, reference_data, job_data, datum) # base: subtests + one extra result for the summary series expected_result_count = len(talos_datum["results"]) + 1 # we create one performance series per counter if 'talos_counters' in talos_datum: expected_result_count += len(talos_datum["talos_counters"]) # result count == number of signatures self.assertEqual(expected_result_count, PerformanceSignature.objects.all().count()) # verify that we have signatures for the subtests for (testname, results) in talos_datum["results"].iteritems(): signature = PerformanceSignature.objects.get(test=testname) datum = PerformanceDatum.objects.get(signature=signature) if talos_datum.get('summary'): # if we have a summary, ensure the subtest summary values made # it in and that we ingested lowerIsBetter ok (if it was there) subtest = talos_datum['summary']['subtests'][testname] self.assertEqual( round(subtest['filtered'], 2), datum.value) self.assertEqual(signature.lower_is_better, subtest.get('lowerIsBetter', True)) else: # this is an old style talos blob without a summary. these are going # away, so I'm not going to bother testing the correctness. however # let's at least verify that some values are being generated here self.assertTrue(datum.value) self.assertEqual(datum.push_timestamp, datetime.datetime.fromtimestamp( self.PUSH_TIMESTAMP)) # if we have counters, verify that the series for them is as expected for (counter, results) in talos_datum.get('talos_counters', {}).iteritems(): signature = PerformanceSignature.objects.get(test=counter) datum = PerformanceDatum.objects.get(signature=signature) self.assertEqual(round(float(results['mean']), 2), datum.value) self.assertEqual(datum.push_timestamp, datetime.datetime.fromtimestamp( self.PUSH_TIMESTAMP)) # we should be left with just the summary series signature = PerformanceSignature.objects.get( test='', suite=talos_datum['testrun']['suite']) datum = PerformanceDatum.objects.get(signature=signature) if talos_datum.get('summary'): self.assertEqual(round(talos_datum['summary']['suite'], 2), datum.value) else: # old style talos blob without summary. again, going away, # but let's at least test that we have the value self.assertTrue(datum.value) self.assertEqual(datum.push_timestamp, datetime.datetime.fromtimestamp( self.PUSH_TIMESTAMP)) # delete perf objects for next iteration PerformanceSignature.objects.all().delete() PerformanceDatum.objects.all().delete()
def test_load_talos_data(test_project, test_repository, perf_option_collection, perf_platform, perf_job_data, perf_reference_data): PerformanceFramework.objects.create(name='talos') talos_perf_data = SampleData.get_talos_perf_data() for talos_datum in talos_perf_data: datum = { "job_guid": "fake_job_guid", "name": "test", "type": "test", "blob": talos_datum } # Mimic production environment, the blobs are serialized # when the web service receives them datum['blob'] = json.dumps({'talos_data': [datum['blob']]}) load_talos_artifacts(test_repository.name, perf_reference_data, perf_job_data, datum) # base: subtests + one extra result for the summary series expected_result_count = len(talos_datum["results"]) + 1 # we create one performance series per counter if 'talos_counters' in talos_datum: expected_result_count += len(talos_datum["talos_counters"]) # result count == number of signatures assert expected_result_count == PerformanceSignature.objects.all().count() expected_push_timestamp = datetime.datetime.fromtimestamp( perf_job_data['fake_job_guid']['push_timestamp']) # verify that we have signatures for the subtests for (testname, results) in talos_datum["results"].iteritems(): signature = PerformanceSignature.objects.get(test=testname) datum = PerformanceDatum.objects.get(signature=signature) if talos_datum.get('summary'): # if we have a summary, ensure the subtest summary values made # it in and that we ingested lowerIsBetter ok (if it was there) subtest = talos_datum['summary']['subtests'][testname] assert round(subtest['filtered'], 2) == datum.value assert signature.lower_is_better == subtest.get('lowerIsBetter', True) else: # this is an old style talos blob without a summary. these are # going away, so I'm not going to bother testing the # correctness. however let's at least verify that some values # are being generated here assert datum.value assert datum.push_timestamp == expected_push_timestamp # if we have counters, verify that the series for them is as expected for (counter, results) in talos_datum.get('talos_counters', {}).iteritems(): signature = PerformanceSignature.objects.get(test=counter) datum = PerformanceDatum.objects.get(signature=signature) assert round(float(results['mean']), 2) == datum.value assert datum.push_timestamp == expected_push_timestamp # we should be left with just the summary series signature = PerformanceSignature.objects.get( test='', suite=talos_datum['testrun']['suite']) datum = PerformanceDatum.objects.get(signature=signature) if talos_datum.get('summary'): assert round(talos_datum['summary']['suite'], 2) == datum.value else: # old style talos blob without summary. again, going away, # but let's at least test that we have the value assert datum.value assert datum.push_timestamp == expected_push_timestamp # delete perf objects for next iteration PerformanceSignature.objects.all().delete() PerformanceDatum.objects.all().delete()