def store_performance_artifact( self, job_ids, performance_artifact_placeholders): """ Store the performance data """ # Retrieve list of job signatures associated with the jobs job_data = self.get_job_signatures_from_ids(job_ids) job_ref_data_signatures = set() map( lambda job_guid: job_ref_data_signatures.add( job_data[job_guid]['signature'] ), job_data.keys() ) # Retrieve associated data in reference_data_signatures reference_data = self.refdata_model.get_reference_data( list(job_ref_data_signatures)) tda = TalosDataAdapter() for perf_data in performance_artifact_placeholders: job_guid = perf_data["job_guid"] ref_data_signature = job_data[job_guid]['signature'] ref_data = reference_data[ref_data_signature] if 'signature' in ref_data: del ref_data['signature'] # adapt and load data into placeholder structures tda.adapt_and_load(self.project, ref_data, job_data, perf_data)
def store_performance_artifact( self, job_ids, performance_artifact_placeholders): """ Store the performance data """ # Retrieve list of job signatures associated with the jobs job_data = self.get_job_signatures_from_ids(job_ids) job_ref_data_signatures = set() map( lambda job_guid: job_ref_data_signatures.add( job_data[job_guid]['signature'] ), job_data.keys() ) # Retrieve associated data in reference_data_signatures reference_data = self.refdata_model.get_reference_data( list(job_ref_data_signatures)) tda = TalosDataAdapter() for perf_data in performance_artifact_placeholders: job_guid = perf_data["job_guid"] ref_data_signature = job_data[job_guid]['signature'] ref_data = reference_data[ref_data_signature] if 'signature' in ref_data: del ref_data['signature'] # adapt and load data into placeholder structures tda.adapt_and_load(ref_data, job_data, perf_data) self.jobs_execute( proc="jobs.inserts.set_performance_artifact", debug_show=self.DEBUG, placeholders=tda.performance_artifact_placeholders, executemany=True) self.jobs_execute( proc='jobs.inserts.set_series_signature', debug_show=self.DEBUG, placeholders=tda.signature_property_placeholders, executemany=True) tda.submit_tasks(self.project)
def test_adapt_and_load(): talos_perf_data = SampleData.get_talos_perf_data() tda = TalosDataAdapter() result_count = 0 for datum in talos_perf_data: datum = { "job_guid": 'oqiwy0q847365qiu', "name": "test", "type": "test", "blob": datum } job_data = { "oqiwy0q847365qiu": { "id": 1, "result_set_id": 1, "push_timestamp": 1402692388 } } reference_data = { "property1": "value1", "property2": "value2", "property3": "value3" } # one extra result for the summary series result_count += len(datum['blob']["results"]) + 1 # we create one performance series per counter if 'talos_counters' in datum['blob']: result_count += len(datum['blob']["talos_counters"]) # Mimic production environment, the blobs are serialized # when the web service receives them datum['blob'] = json.dumps({'talos_data': [datum['blob']]}) tda.adapt_and_load(reference_data, job_data, datum) assert result_count == len(tda.performance_artifact_placeholders)
def test_adapt_and_load(self): talos_perf_data = SampleData.get_talos_perf_data() for talos_datum in talos_perf_data: datum = { "job_guid": 'oqiwy0q847365qiu', "name": "test", "type": "test", "blob": talos_datum } job_data = { "oqiwy0q847365qiu": { "id": 1, "result_set_id": 1, "push_timestamp": 1402692388 } } reference_data = { "property1": "value1", "property2": "value2", "property3": "value3" } # Mimic production environment, the blobs are serialized # when the web service receives them datum['blob'] = json.dumps({'talos_data': [datum['blob']]}) tda = TalosDataAdapter() tda.adapt_and_load(reference_data, job_data, datum) # base: subtests + one extra result for the summary series expected_result_count = len(talos_datum["results"]) + 1 # we create one performance series per counter if 'talos_counters' in talos_datum: expected_result_count += len(talos_datum["talos_counters"]) # result count == number of signatures self.assertEqual(expected_result_count, len(tda.signatures.keys())) # verify that we have signatures for the subtests signature_placeholders = copy.copy( tda.signature_property_placeholders) for (testname, results) in talos_datum["results"].iteritems(): signature_placeholder = filter( lambda p: p[2] == testname, signature_placeholders) self.assertEqual(len(signature_placeholder), 1) signature_hash = signature_placeholder[0][0] perfdata = tda.signatures[signature_hash][0] if talos_datum.get('summary'): # if we have a summary, ensure the subtest summary values made # it in for measure in ['min', 'max', 'std', 'mean', 'median']: self.assertEqual( round(talos_datum['summary']['subtests'][testname][measure], 2), perfdata[measure]) else: # this is an old style talos blob without a summary. these are going # away, so I'm not going to bother testing the correctness. however # let's at least verify that some values are being generated here for measure in ['min', 'max', 'std', 'mean', 'median']: self.assertTrue(perfdata[measure]) # filter out this signature from data to process signature_placeholders = filter( lambda p: p[0] != signature_hash, signature_placeholders) # if we have counters, verify that the series for them is as expected for (counter, results) in talos_datum.get('talos_counters', {}).iteritems(): signature_placeholder = filter( lambda p: p[2] == counter, signature_placeholders) self.assertEqual(len(signature_placeholder), 1) signature_hash = signature_placeholder[0][0] perfdata = tda.signatures[signature_hash][0] for measure in ['max', 'mean']: self.assertEqual(round(float(results[measure]), 2), perfdata[measure]) # filter out this signature from data to process signature_placeholders = filter( lambda p: p[0] != signature_hash, signature_placeholders) # we should be left with just summary signature placeholders self.assertEqual(len(signature_placeholders), 2) perfdata = tda.signatures[signature_placeholders[0][0]][0] if talos_datum.get('summary'): self.assertEqual(round(talos_datum['summary']['suite'], 2), perfdata['geomean']) else: # old style talos blob without summary. again, going away, # but let's at least test that we have the 'geomean' value # generated self.assertTrue(perfdata['geomean'])
def test_adapt_and_load(self): talos_perf_data = SampleData.get_talos_perf_data() for talos_datum in talos_perf_data: datum = { "job_guid": 'oqiwy0q847365qiu', "name": "test", "type": "test", "blob": talos_datum } job_data = { "oqiwy0q847365qiu": { "id": 1, "result_set_id": 1, "push_timestamp": 1402692388 } } reference_data = { "property1": "value1", "property2": "value2", "property3": "value3" } # Mimic production environment, the blobs are serialized # when the web service receives them datum['blob'] = json.dumps({'talos_data': [datum['blob']]}) tda = TalosDataAdapter() tda.adapt_and_load(reference_data, job_data, datum) # base: subtests + one extra result for the summary series expected_result_count = len(talos_datum["results"]) + 1 # we create one performance series per counter if 'talos_counters' in talos_datum: expected_result_count += len(talos_datum["talos_counters"]) # result count == number of signatures self.assertEqual(expected_result_count, len(tda.signatures.keys())) # verify that we have signatures for the subtests signature_placeholders = copy.copy( tda.signature_property_placeholders) for (testname, results) in talos_datum["results"].iteritems(): signature_placeholder = filter(lambda p: p[2] == testname, signature_placeholders) self.assertEqual(len(signature_placeholder), 1) signature_hash = signature_placeholder[0][0] perfdata = tda.signatures[signature_hash][0] if talos_datum.get('summary'): # if we have a summary, ensure the subtest summary values made # it in for measure in ['min', 'max', 'std', 'mean', 'median']: self.assertEqual( round( talos_datum['summary']['subtests'][testname] [measure], 2), perfdata[measure]) else: # this is an old style talos blob without a summary. these are going # away, so I'm not going to bother testing the correctness. however # let's at least verify that some values are being generated here for measure in ['min', 'max', 'std', 'mean', 'median']: self.assertTrue(perfdata[measure]) # filter out this signature from data to process signature_placeholders = filter( lambda p: p[0] != signature_hash, signature_placeholders) # if we have counters, verify that the series for them is as expected for (counter, results) in talos_datum.get('talos_counters', {}).iteritems(): signature_placeholder = filter(lambda p: p[2] == counter, signature_placeholders) self.assertEqual(len(signature_placeholder), 1) signature_hash = signature_placeholder[0][0] perfdata = tda.signatures[signature_hash][0] for measure in ['max', 'mean']: self.assertEqual(round(float(results[measure]), 2), perfdata[measure]) # filter out this signature from data to process signature_placeholders = filter( lambda p: p[0] != signature_hash, signature_placeholders) # we should be left with just summary signature placeholders self.assertEqual(len(signature_placeholders), 2) perfdata = tda.signatures[signature_placeholders[0][0]][0] if talos_datum.get('summary'): self.assertEqual(round(talos_datum['summary']['suite'], 2), perfdata['geomean']) else: # old style talos blob without summary. again, going away, # but let's at least test that we have the 'geomean' value # generated self.assertTrue(perfdata['geomean'])
def test_adapt_and_load(): talos_perf_data = SampleData.get_talos_perf_data() tda = TalosDataAdapter() result_count = 0 for datum in talos_perf_data: datum = { "job_guid": 'oqiwy0q847365qiu', "name": "test", "type": "test", "blob": datum } job_data = { "oqiwy0q847365qiu": { "id": 1, "result_set_id": 1, "push_timestamp": 1402692388 } } reference_data = { "property1": "value1", "property2": "value2", "property3": "value3" } # one extra result for the summary series result_count += len(datum['blob']["results"]) + 1 # we create one performance series per counter if 'talos_counters' in datum['blob']: result_count += len(datum['blob']["talos_counters"]) # Mimic production environment, the blobs are serialized # when the web service receives them datum['blob'] = json.dumps({'talos_data': [datum['blob']]}) tda.adapt_and_load(reference_data, job_data, datum) # we upload a summary with a suite and subtest values, +1 for suite if 'summary' in datum['blob']: results = json.loads(zlib.decompress(tda.performance_artifact_placeholders[-1][4])) data = json.loads(datum['blob'])['talos_data'][0] assert results["blob"]["performance_series"]["geomean"] == data['summary']['suite'] # deal with the subtests now for i in range(0, len(data['summary']['subtests'])): subresults = json.loads(zlib.decompress(tda.performance_artifact_placeholders[-1 - i][4])) if 'subtest_signatures' in subresults["blob"]['signature_properties']: # ignore summary signatures continue subdata = data['summary']['subtests'][subresults["blob"]['signature_properties']['test']] for datatype in ['min', 'max', 'mean', 'median', 'std']: assert subdata[datatype] == subresults["blob"]["performance_series"][datatype] if 'value' in subdata.keys(): assert subdata['value'] == subresults["blob"]["performance_series"]['value'] else: # FIXME: the talos data blob we're currently using contains datums with summaries and those without # we should probably test non-summarized data as well pass assert result_count == len(tda.performance_artifact_placeholders)
def test_adapt_and_load(self): talos_perf_data = SampleData.get_talos_perf_data() for talos_datum in talos_perf_data: # delete any previously-created perf objects # FIXME: because of https://bugzilla.mozilla.org/show_bug.cgi?id=1133273 # this can be really slow if we have a dev database with lots of # performance data in it (if the test succeeds, the transaction # will be rolled back so at least it won't pollute the production # database) PerformanceSignature.objects.all().delete() PerformanceDatum.objects.all().delete() datum = { "job_guid": 'oqiwy0q847365qiu', "name": "test", "type": "test", "blob": talos_datum } job_data = { "oqiwy0q847365qiu": { "id": 1, "result_set_id": 1, "push_timestamp": 1402692388 } } reference_data = { "option_collection_hash": self.OPTION_HASH, "machine_platform": self.MACHINE_PLATFORM, "property1": "value1", "property2": "value2", "property3": "value3" } # Mimic production environment, the blobs are serialized # when the web service receives them datum['blob'] = json.dumps({'talos_data': [datum['blob']]}) tda = TalosDataAdapter() tda.adapt_and_load(self.REPO_NAME, reference_data, job_data, datum) # base: subtests + one extra result for the summary series expected_result_count = len(talos_datum["results"]) + 1 # we create one performance series per counter if 'talos_counters' in talos_datum: expected_result_count += len(talos_datum["talos_counters"]) # result count == number of signatures self.assertEqual(expected_result_count, PerformanceSignature.objects.all().count()) # verify that we have signatures for the subtests for (testname, results) in talos_datum["results"].iteritems(): signature = PerformanceSignature.objects.get(test=testname) datum = PerformanceDatum.objects.get(signature=signature) if talos_datum.get('summary'): # if we have a summary, ensure the subtest summary values made # it in self.assertEqual( round(talos_datum['summary']['subtests'][testname]['filtered'], 2), datum.value) else: # this is an old style talos blob without a summary. these are going # away, so I'm not going to bother testing the correctness. however # let's at least verify that some values are being generated here self.assertTrue(datum.value) # if we have counters, verify that the series for them is as expected for (counter, results) in talos_datum.get('talos_counters', {}).iteritems(): signature = PerformanceSignature.objects.get(test=counter) datum = PerformanceDatum.objects.get(signature=signature) self.assertEqual(round(float(results['mean']), 2), datum.value) # we should be left with just the summary series signature = PerformanceSignature.objects.get( test='', suite=talos_datum['testrun']['suite']) datum = PerformanceDatum.objects.get(signature=signature) if talos_datum.get('summary'): self.assertEqual(round(talos_datum['summary']['suite'], 2), datum.value) else: # old style talos blob without summary. again, going away, # but let's at least test that we have the value self.assertTrue(datum.value)