def test_get_performance_data(self): pc = PerfherderClient() url = '{}?{}'.format( pc._get_endpoint_url(pc.PERFORMANCE_DATA_ENDPOINT, project='mozilla-central'), 'signatures=signature1&signatures=signature2') content = { 'signature1': [{ 'value': 1 }, { 'value': 2 }], 'signature2': [{ 'value': 2 }, { 'value': 1 }] } responses.add(responses.GET, url, json=content, match_querystring=True, status=200) series_list = pc.get_performance_data( 'mozilla-central', signatures=['signature1', 'signature2']) self.assertEqual(len(series_list), 2) self.assertEqual(series_list['signature1']['value'], [1, 2]) self.assertEqual(series_list['signature2']['value'], [2, 1])
def test_get_performance_data(self, mock_get): mock_get.return_value = self._get_mock_response({ 'signature1': [{'value': 1}, {'value': 2}], 'signature2': [{'value': 2}, {'value': 1}] }) pc = PerfherderClient() series_list = pc.get_performance_data('mozilla-central', signatures=['signature1', 'signature2']) self.assertEqual(len(series_list), 2) self.assertEqual(series_list['signature1']['value'], [1, 2]) self.assertEqual(series_list['signature2']['value'], [2, 1])
def test_get_performance_data(self): pc = PerfherderClient() url = '{}?{}'.format(pc._get_project_uri('mozilla-central', pc.PERFORMANCE_DATA_ENDPOINT), 'signatures=signature1&signatures=signature2') content = { 'signature1': [{'value': 1}, {'value': 2}], 'signature2': [{'value': 2}, {'value': 1}] } responses.add(responses.GET, url, json=content, match_querystring=True, status=200) series_list = pc.get_performance_data('mozilla-central', signatures=['signature1', 'signature2']) self.assertEqual(len(series_list), 2) self.assertEqual(series_list['signature1']['value'], [1, 2]) self.assertEqual(series_list['signature2']['value'], [2, 1])
def test_get_performance_data(self, mock_get): mock_get.return_value = self._get_mock_response({ 'signature1': [{ 'value': 1 }, { 'value': 2 }], 'signature2': [{ 'value': 2 }, { 'value': 1 }] }) pc = PerfherderClient() series_list = pc.get_performance_data( 'mozilla-central', signatures=['signature1', 'signature2']) self.assertEqual(len(series_list), 2) self.assertEqual(series_list['signature1']['value'], [1, 2]) self.assertEqual(series_list['signature2']['value'], [2, 1])
def handle(self, *args, **options): if options['server']: server_params = urlparse(options['server']) server_protocol = server_params.scheme server_host = server_params.netloc else: server_protocol = settings.TREEHERDER_REQUEST_PROTOCOL server_host = settings.TREEHERDER_REQUEST_HOST if not options['project']: raise CommandError("Must specify at least one project with " "--project") pc = PerfherderClient(protocol=server_protocol, host=server_host) option_collection_hash = pc.get_option_collection_hash() # print csv header print ','.join(["project", "platform", "signature", "series", "testrun_id", "push_timestamp", "change", "percent change", "t-value", "revision"]) for project in options['project']: if options['signature']: signatures = [options['signature']] signature_data = pc.get_performance_signatures( project, signatures=signatures, interval=options['time_interval']) else: signature_data = pc.get_performance_signatures( project, interval=options['time_interval']) signatures = [] signatures_to_ignore = set() # if doing everything, only handle summary series for (signature, properties) in signature_data.iteritems(): signatures.append(signature) if 'subtest_signatures' in properties: # Don't alert on subtests which have a summary signatures_to_ignore.update(properties['subtest_signatures']) signatures = [signature for signature in signatures if signature not in signatures_to_ignore] for signature in signatures: series = pc.get_performance_data( project, signatures=signature, interval=options['time_interval'])[signature] series_properties = signature_data.get(signature) data = [] for (result_set_id, timestamp, value) in zip( series['result_set_id'], series['push_timestamp'], series['value']): data.append(Datum(timestamp, value, testrun_id=result_set_id)) for r in detect_changes(data): if r.state == 'regression': resultsets = pc.get_resultsets(project, id=r.testrun_id) if len(resultsets): revision = resultsets[0]['revision'] else: revision = '' initial_value = r.historical_stats['avg'] new_value = r.forward_stats['avg'] if initial_value != 0: pct_change = 100.0 * abs(new_value - initial_value) / float(initial_value) else: pct_change = 0.0 delta = (new_value - initial_value) print ','.join(map( lambda v: str(v), [project, series_properties['machine_platform'], signature, self._get_series_description( option_collection_hash, series_properties), r.testrun_id, r.push_timestamp, delta, pct_change, r.t, revision[0:12]]))
def handle(self, *args, **options): if not options['project']: raise CommandError("Must specify at least one project with " "--project") pc = PerfherderClient(server_url=options['server']) option_collection_hash = pc.get_option_collection_hash() # print csv header print ','.join(["project", "platform", "signature", "series", "testrun_id", "push_timestamp", "change", "percent change", "t-value", "revision"]) for project in options['project']: if options['signature']: signatures = [options['signature']] signature_data = pc.get_performance_signatures( project, signatures=signatures, interval=options['time_interval']) else: signature_data = pc.get_performance_signatures( project, interval=options['time_interval']) signatures = [] signatures_to_ignore = set() # if doing everything, only handle summary series for (signature, properties) in signature_data.iteritems(): signatures.append(signature) if 'subtest_signatures' in properties: # Don't alert on subtests which have a summary signatures_to_ignore.update(properties['subtest_signatures']) signatures = [signature for signature in signatures if signature not in signatures_to_ignore] for signature in signatures: series = pc.get_performance_data( project, signatures=signature, interval=options['time_interval'])[signature] series_properties = signature_data.get(signature) data = [] for (result_set_id, timestamp, value) in zip( series['result_set_id'], series['push_timestamp'], series['value']): data.append(Datum(timestamp, value, testrun_id=result_set_id)) for r in detect_changes(data): if r.state == 'regression': resultsets = pc.get_resultsets(project, id=r.testrun_id) if len(resultsets): revision = resultsets[0]['revision'] else: revision = '' initial_value = r.historical_stats['avg'] new_value = r.forward_stats['avg'] if initial_value != 0: pct_change = 100.0 * abs(new_value - initial_value) / float(initial_value) else: pct_change = 0.0 delta = (new_value - initial_value) print ','.join(map( lambda v: str(v), [project, series_properties['machine_platform'], signature, self._get_series_description( option_collection_hash, series_properties), r.testrun_id, r.push_timestamp, delta, pct_change, r.t, revision[0:12]]))