def test_adapt_and_load(): talos_perf_data = SampleData.get_talos_perf_data() tda = TalosDataAdapter() result_count = 0 for datum in talos_perf_data: datum = { "job_guid": 'oqiwy0q847365qiu', "name": "test", "type": "test", "blob": datum } job_data = { "oqiwy0q847365qiu": { "id": 1, "result_set_id": 1, "push_timestamp": 1402692388 } } reference_data = { "property1": "value1", "property2": "value2", "property3": "value3" } # one extra result for the summary series result_count += len(datum['blob']["results"]) + 1 # we create one performance series per counter if 'talos_counters' in datum['blob']: result_count += len(datum['blob']["talos_counters"]) # Mimic production environment, the blobs are serialized # when the web service receives them datum['blob'] = json.dumps({'talos_data': [datum['blob']]}) tda.adapt_and_load(reference_data, job_data, datum) assert result_count == len(tda.performance_artifact_placeholders)
def test_load_talos_data(test_project, test_repository, perf_option_collection, perf_platform, perf_job_data, perf_reference_data): PerformanceFramework.objects.create(name='talos') talos_perf_data = SampleData.get_talos_perf_data() for talos_datum in talos_perf_data: datum = { "job_guid": "fake_job_guid", "name": "test", "type": "test", "blob": talos_datum } # Mimic production environment, the blobs are serialized # when the web service receives them datum['blob'] = json.dumps({'talos_data': [datum['blob']]}) load_talos_artifacts(test_repository.name, perf_reference_data, perf_job_data, datum) # base: subtests + one extra result for the summary series expected_result_count = len(talos_datum["results"]) + 1 # we create one performance series per counter if 'talos_counters' in talos_datum: expected_result_count += len(talos_datum["talos_counters"]) # result count == number of signatures assert expected_result_count == PerformanceSignature.objects.all( ).count() expected_push_timestamp = datetime.datetime.fromtimestamp( perf_job_data['fake_job_guid']['push_timestamp']) # verify that we have signatures for the subtests for (testname, results) in talos_datum["results"].iteritems(): signature = PerformanceSignature.objects.get(test=testname) datum = PerformanceDatum.objects.get(signature=signature) if talos_datum.get('summary'): # if we have a summary, ensure the subtest summary values made # it in and that we ingested lowerIsBetter ok (if it was there) subtest = talos_datum['summary']['subtests'][testname] assert round(subtest['filtered'], 2) == datum.value assert signature.lower_is_better == subtest.get( 'lowerIsBetter', True) else: # this is an old style talos blob without a summary. these are # going away, so I'm not going to bother testing the # correctness. however let's at least verify that some values # are being generated here assert datum.value assert datum.push_timestamp == expected_push_timestamp # if we have counters, verify that the series for them is as expected for (counter, results) in talos_datum.get('talos_counters', {}).iteritems(): signature = PerformanceSignature.objects.get(test=counter) datum = PerformanceDatum.objects.get(signature=signature) assert round(float(results['mean']), 2) == datum.value assert datum.push_timestamp == expected_push_timestamp # we should be left with just the summary series signature = PerformanceSignature.objects.get( test='', suite=talos_datum['testrun']['suite']) datum = PerformanceDatum.objects.get(signature=signature) if talos_datum.get('summary'): assert round(talos_datum['summary']['suite'], 2) == datum.value else: # old style talos blob without summary. again, going away, # but let's at least test that we have the value assert datum.value assert datum.push_timestamp == expected_push_timestamp # delete perf objects for next iteration PerformanceSignature.objects.all().delete() PerformanceDatum.objects.all().delete()
def test_adapt_and_load(self): talos_perf_data = SampleData.get_talos_perf_data() for talos_datum in talos_perf_data: datum = { "job_guid": 'oqiwy0q847365qiu', "name": "test", "type": "test", "blob": talos_datum } job_data = { "oqiwy0q847365qiu": { "id": 1, "result_set_id": 1, "push_timestamp": 1402692388 } } reference_data = { "property1": "value1", "property2": "value2", "property3": "value3" } # Mimic production environment, the blobs are serialized # when the web service receives them datum['blob'] = json.dumps({'talos_data': [datum['blob']]}) tda = TalosDataAdapter() tda.adapt_and_load(reference_data, job_data, datum) # base: subtests + one extra result for the summary series expected_result_count = len(talos_datum["results"]) + 1 # we create one performance series per counter if 'talos_counters' in talos_datum: expected_result_count += len(talos_datum["talos_counters"]) # result count == number of signatures self.assertEqual(expected_result_count, len(tda.signatures.keys())) # verify that we have signatures for the subtests signature_placeholders = copy.copy( tda.signature_property_placeholders) for (testname, results) in talos_datum["results"].iteritems(): signature_placeholder = filter( lambda p: p[2] == testname, signature_placeholders) self.assertEqual(len(signature_placeholder), 1) signature_hash = signature_placeholder[0][0] perfdata = tda.signatures[signature_hash][0] if talos_datum.get('summary'): # if we have a summary, ensure the subtest summary values made # it in for measure in ['min', 'max', 'std', 'mean', 'median']: self.assertEqual( round(talos_datum['summary']['subtests'][testname][measure], 2), perfdata[measure]) else: # this is an old style talos blob without a summary. these are going # away, so I'm not going to bother testing the correctness. however # let's at least verify that some values are being generated here for measure in ['min', 'max', 'std', 'mean', 'median']: self.assertTrue(perfdata[measure]) # filter out this signature from data to process signature_placeholders = filter( lambda p: p[0] != signature_hash, signature_placeholders) # if we have counters, verify that the series for them is as expected for (counter, results) in talos_datum.get('talos_counters', {}).iteritems(): signature_placeholder = filter( lambda p: p[2] == counter, signature_placeholders) self.assertEqual(len(signature_placeholder), 1) signature_hash = signature_placeholder[0][0] perfdata = tda.signatures[signature_hash][0] for measure in ['max', 'mean']: self.assertEqual(round(float(results[measure]), 2), perfdata[measure]) # filter out this signature from data to process signature_placeholders = filter( lambda p: p[0] != signature_hash, signature_placeholders) # we should be left with just summary signature placeholders self.assertEqual(len(signature_placeholders), 2) perfdata = tda.signatures[signature_placeholders[0][0]][0] if talos_datum.get('summary'): self.assertEqual(round(talos_datum['summary']['suite'], 2), perfdata['geomean']) else: # old style talos blob without summary. again, going away, # but let's at least test that we have the 'geomean' value # generated self.assertTrue(perfdata['geomean'])
def test_adapt_and_load(self): talos_perf_data = SampleData.get_talos_perf_data() for talos_datum in talos_perf_data: datum = { "job_guid": 'oqiwy0q847365qiu', "name": "test", "type": "test", "blob": talos_datum } job_data = { "oqiwy0q847365qiu": { "id": 1, "result_set_id": 1, "push_timestamp": 1402692388 } } reference_data = { "property1": "value1", "property2": "value2", "property3": "value3" } # Mimic production environment, the blobs are serialized # when the web service receives them datum['blob'] = json.dumps({'talos_data': [datum['blob']]}) tda = TalosDataAdapter() tda.adapt_and_load(reference_data, job_data, datum) # base: subtests + one extra result for the summary series expected_result_count = len(talos_datum["results"]) + 1 # we create one performance series per counter if 'talos_counters' in talos_datum: expected_result_count += len(talos_datum["talos_counters"]) # result count == number of signatures self.assertEqual(expected_result_count, len(tda.signatures.keys())) # verify that we have signatures for the subtests signature_placeholders = copy.copy( tda.signature_property_placeholders) for (testname, results) in talos_datum["results"].iteritems(): signature_placeholder = filter(lambda p: p[2] == testname, signature_placeholders) self.assertEqual(len(signature_placeholder), 1) signature_hash = signature_placeholder[0][0] perfdata = tda.signatures[signature_hash][0] if talos_datum.get('summary'): # if we have a summary, ensure the subtest summary values made # it in for measure in ['min', 'max', 'std', 'mean', 'median']: self.assertEqual( round( talos_datum['summary']['subtests'][testname] [measure], 2), perfdata[measure]) else: # this is an old style talos blob without a summary. these are going # away, so I'm not going to bother testing the correctness. however # let's at least verify that some values are being generated here for measure in ['min', 'max', 'std', 'mean', 'median']: self.assertTrue(perfdata[measure]) # filter out this signature from data to process signature_placeholders = filter( lambda p: p[0] != signature_hash, signature_placeholders) # if we have counters, verify that the series for them is as expected for (counter, results) in talos_datum.get('talos_counters', {}).iteritems(): signature_placeholder = filter(lambda p: p[2] == counter, signature_placeholders) self.assertEqual(len(signature_placeholder), 1) signature_hash = signature_placeholder[0][0] perfdata = tda.signatures[signature_hash][0] for measure in ['max', 'mean']: self.assertEqual(round(float(results[measure]), 2), perfdata[measure]) # filter out this signature from data to process signature_placeholders = filter( lambda p: p[0] != signature_hash, signature_placeholders) # we should be left with just summary signature placeholders self.assertEqual(len(signature_placeholders), 2) perfdata = tda.signatures[signature_placeholders[0][0]][0] if talos_datum.get('summary'): self.assertEqual(round(talos_datum['summary']['suite'], 2), perfdata['geomean']) else: # old style talos blob without summary. again, going away, # but let's at least test that we have the 'geomean' value # generated self.assertTrue(perfdata['geomean'])
def test_load_talos_data(self): PerformanceFramework.objects.get_or_create(name='talos') talos_perf_data = SampleData.get_talos_perf_data() for talos_datum in talos_perf_data: (job_data, reference_data) = self._get_job_and_reference_data() datum = { "job_guid": self.JOB_GUID, "name": "test", "type": "test", "blob": talos_datum } # Mimic production environment, the blobs are serialized # when the web service receives them datum['blob'] = json.dumps({'talos_data': [datum['blob']]}) load_talos_artifacts(self.REPO_NAME, reference_data, job_data, datum) # base: subtests + one extra result for the summary series expected_result_count = len(talos_datum["results"]) + 1 # we create one performance series per counter if 'talos_counters' in talos_datum: expected_result_count += len(talos_datum["talos_counters"]) # result count == number of signatures self.assertEqual(expected_result_count, PerformanceSignature.objects.all().count()) # verify that we have signatures for the subtests for (testname, results) in talos_datum["results"].iteritems(): signature = PerformanceSignature.objects.get(test=testname) datum = PerformanceDatum.objects.get(signature=signature) if talos_datum.get('summary'): # if we have a summary, ensure the subtest summary values made # it in and that we ingested lowerIsBetter ok (if it was there) subtest = talos_datum['summary']['subtests'][testname] self.assertEqual( round(subtest['filtered'], 2), datum.value) self.assertEqual(signature.lower_is_better, subtest.get('lowerIsBetter', True)) else: # this is an old style talos blob without a summary. these are going # away, so I'm not going to bother testing the correctness. however # let's at least verify that some values are being generated here self.assertTrue(datum.value) self.assertEqual(datum.push_timestamp, datetime.datetime.fromtimestamp( self.PUSH_TIMESTAMP)) # if we have counters, verify that the series for them is as expected for (counter, results) in talos_datum.get('talos_counters', {}).iteritems(): signature = PerformanceSignature.objects.get(test=counter) datum = PerformanceDatum.objects.get(signature=signature) self.assertEqual(round(float(results['mean']), 2), datum.value) self.assertEqual(datum.push_timestamp, datetime.datetime.fromtimestamp( self.PUSH_TIMESTAMP)) # we should be left with just the summary series signature = PerformanceSignature.objects.get( test='', suite=talos_datum['testrun']['suite']) datum = PerformanceDatum.objects.get(signature=signature) if talos_datum.get('summary'): self.assertEqual(round(talos_datum['summary']['suite'], 2), datum.value) else: # old style talos blob without summary. again, going away, # but let's at least test that we have the value self.assertTrue(datum.value) self.assertEqual(datum.push_timestamp, datetime.datetime.fromtimestamp( self.PUSH_TIMESTAMP)) # delete perf objects for next iteration PerformanceSignature.objects.all().delete() PerformanceDatum.objects.all().delete()
def test_load_talos_data(test_project, test_repository, perf_option_collection, perf_platform, perf_job_data, perf_reference_data): PerformanceFramework.objects.create(name='talos') talos_perf_data = SampleData.get_talos_perf_data() for talos_datum in talos_perf_data: datum = { "job_guid": "fake_job_guid", "name": "test", "type": "test", "blob": talos_datum } # Mimic production environment, the blobs are serialized # when the web service receives them datum['blob'] = json.dumps({'talos_data': [datum['blob']]}) load_talos_artifacts(test_repository.name, perf_reference_data, perf_job_data, datum) # base: subtests + one extra result for the summary series expected_result_count = len(talos_datum["results"]) + 1 # we create one performance series per counter if 'talos_counters' in talos_datum: expected_result_count += len(talos_datum["talos_counters"]) # result count == number of signatures assert expected_result_count == PerformanceSignature.objects.all().count() expected_push_timestamp = datetime.datetime.fromtimestamp( perf_job_data['fake_job_guid']['push_timestamp']) # verify that we have signatures for the subtests for (testname, results) in talos_datum["results"].iteritems(): signature = PerformanceSignature.objects.get(test=testname) datum = PerformanceDatum.objects.get(signature=signature) if talos_datum.get('summary'): # if we have a summary, ensure the subtest summary values made # it in and that we ingested lowerIsBetter ok (if it was there) subtest = talos_datum['summary']['subtests'][testname] assert round(subtest['filtered'], 2) == datum.value assert signature.lower_is_better == subtest.get('lowerIsBetter', True) else: # this is an old style talos blob without a summary. these are # going away, so I'm not going to bother testing the # correctness. however let's at least verify that some values # are being generated here assert datum.value assert datum.push_timestamp == expected_push_timestamp # if we have counters, verify that the series for them is as expected for (counter, results) in talos_datum.get('talos_counters', {}).iteritems(): signature = PerformanceSignature.objects.get(test=counter) datum = PerformanceDatum.objects.get(signature=signature) assert round(float(results['mean']), 2) == datum.value assert datum.push_timestamp == expected_push_timestamp # we should be left with just the summary series signature = PerformanceSignature.objects.get( test='', suite=talos_datum['testrun']['suite']) datum = PerformanceDatum.objects.get(signature=signature) if talos_datum.get('summary'): assert round(talos_datum['summary']['suite'], 2) == datum.value else: # old style talos blob without summary. again, going away, # but let's at least test that we have the value assert datum.value assert datum.push_timestamp == expected_push_timestamp # delete perf objects for next iteration PerformanceSignature.objects.all().delete() PerformanceDatum.objects.all().delete()
def test_adapt_and_load(): talos_perf_data = SampleData.get_talos_perf_data() tda = TalosDataAdapter() result_count = 0 for datum in talos_perf_data: datum = { "job_guid": 'oqiwy0q847365qiu', "name": "test", "type": "test", "blob": datum } job_data = { "oqiwy0q847365qiu": { "id": 1, "result_set_id": 1, "push_timestamp": 1402692388 } } reference_data = { "property1": "value1", "property2": "value2", "property3": "value3" } # one extra result for the summary series result_count += len(datum['blob']["results"]) + 1 # we create one performance series per counter if 'talos_counters' in datum['blob']: result_count += len(datum['blob']["talos_counters"]) # Mimic production environment, the blobs are serialized # when the web service receives them datum['blob'] = json.dumps({'talos_data': [datum['blob']]}) tda.adapt_and_load(reference_data, job_data, datum) # we upload a summary with a suite and subtest values, +1 for suite if 'summary' in datum['blob']: results = json.loads(zlib.decompress(tda.performance_artifact_placeholders[-1][4])) data = json.loads(datum['blob'])['talos_data'][0] assert results["blob"]["performance_series"]["geomean"] == data['summary']['suite'] # deal with the subtests now for i in range(0, len(data['summary']['subtests'])): subresults = json.loads(zlib.decompress(tda.performance_artifact_placeholders[-1 - i][4])) if 'subtest_signatures' in subresults["blob"]['signature_properties']: # ignore summary signatures continue subdata = data['summary']['subtests'][subresults["blob"]['signature_properties']['test']] for datatype in ['min', 'max', 'mean', 'median', 'std']: assert subdata[datatype] == subresults["blob"]["performance_series"][datatype] if 'value' in subdata.keys(): assert subdata['value'] == subresults["blob"]["performance_series"]['value'] else: # FIXME: the talos data blob we're currently using contains datums with summaries and those without # we should probably test non-summarized data as well pass assert result_count == len(tda.performance_artifact_placeholders)
def test_adapt_and_load(self): talos_perf_data = SampleData.get_talos_perf_data() for talos_datum in talos_perf_data: # delete any previously-created perf objects # FIXME: because of https://bugzilla.mozilla.org/show_bug.cgi?id=1133273 # this can be really slow if we have a dev database with lots of # performance data in it (if the test succeeds, the transaction # will be rolled back so at least it won't pollute the production # database) PerformanceSignature.objects.all().delete() PerformanceDatum.objects.all().delete() datum = { "job_guid": 'oqiwy0q847365qiu', "name": "test", "type": "test", "blob": talos_datum } job_data = { "oqiwy0q847365qiu": { "id": 1, "result_set_id": 1, "push_timestamp": 1402692388 } } reference_data = { "option_collection_hash": self.OPTION_HASH, "machine_platform": self.MACHINE_PLATFORM, "property1": "value1", "property2": "value2", "property3": "value3" } # Mimic production environment, the blobs are serialized # when the web service receives them datum['blob'] = json.dumps({'talos_data': [datum['blob']]}) tda = TalosDataAdapter() tda.adapt_and_load(self.REPO_NAME, reference_data, job_data, datum) # base: subtests + one extra result for the summary series expected_result_count = len(talos_datum["results"]) + 1 # we create one performance series per counter if 'talos_counters' in talos_datum: expected_result_count += len(talos_datum["talos_counters"]) # result count == number of signatures self.assertEqual(expected_result_count, PerformanceSignature.objects.all().count()) # verify that we have signatures for the subtests for (testname, results) in talos_datum["results"].iteritems(): signature = PerformanceSignature.objects.get(test=testname) datum = PerformanceDatum.objects.get(signature=signature) if talos_datum.get('summary'): # if we have a summary, ensure the subtest summary values made # it in self.assertEqual( round(talos_datum['summary']['subtests'][testname]['filtered'], 2), datum.value) else: # this is an old style talos blob without a summary. these are going # away, so I'm not going to bother testing the correctness. however # let's at least verify that some values are being generated here self.assertTrue(datum.value) # if we have counters, verify that the series for them is as expected for (counter, results) in talos_datum.get('talos_counters', {}).iteritems(): signature = PerformanceSignature.objects.get(test=counter) datum = PerformanceDatum.objects.get(signature=signature) self.assertEqual(round(float(results['mean']), 2), datum.value) # we should be left with just the summary series signature = PerformanceSignature.objects.get( test='', suite=talos_datum['testrun']['suite']) datum = PerformanceDatum.objects.get(signature=signature) if talos_datum.get('summary'): self.assertEqual(round(talos_datum['summary']['suite'], 2), datum.value) else: # old style talos blob without summary. again, going away, # but let's at least test that we have the value self.assertTrue(datum.value)