def test_get_job_log_urls(test_repository, result_set_stored, failure_classifications, generic_reference_data, webapp): job1 = create_generic_job('1234', test_repository, 1, 1, generic_reference_data) job2 = create_generic_job('5678', test_repository, 1, 2, generic_reference_data) JobLog.objects.create(job=job1, name='test_log_1', url='http://google.com', status=JobLog.PENDING) JobLog.objects.create(job=job1, name='test_log_2', url='http://yahoo.com', status=JobLog.PARSED) JobLog.objects.create(job=job2, name='test_log_3', url='http://yahoo.com', status=JobLog.PARSED) resp = webapp.get( reverse('job-log-url-list', kwargs={"project": test_repository.name}) + '?job_id=1') assert resp.status_int == 200 assert len(resp.json) == 2 resp = webapp.get( reverse('job-log-url-list', kwargs={"project": test_repository.name}) + '?job_id=1&job_id=2') assert resp.status_int == 200 assert len(resp.json) == 3
def test_get_job_log_urls(test_repository, push_stored, failure_classifications, generic_reference_data, webapp): job1 = create_generic_job('1234', test_repository, 1, generic_reference_data) job2 = create_generic_job('5678', test_repository, 1, generic_reference_data) JobLog.objects.create(job=job1, name='test_log_1', url='http://google.com', status=JobLog.PENDING) JobLog.objects.create(job=job1, name='test_log_2', url='http://yahoo.com', status=JobLog.PARSED) JobLog.objects.create(job=job2, name='test_log_3', url='http://yahoo.com', status=JobLog.PARSED) resp = webapp.get(reverse('job-log-url-list', kwargs={"project": test_repository.name}) + '?job_id=1') assert resp.status_int == 200 assert len(resp.json) == 2 resp = webapp.get(reverse('job-log-url-list', kwargs={"project": test_repository.name}) + '?job_id=1&job_id=2') assert resp.status_int == 200 assert len(resp.json) == 3
def _generate_perf_data_range(test_repository, generic_reference_data, create_perf_framework=True, enable_framework=True, add_suite_value=False, extra_suite_metadata=None, extra_subtest_metadata=None, reverse_push_range=False): framework_name = "cheezburger" if create_perf_framework: PerformanceFramework.objects.create(name=framework_name, enabled=enable_framework) now = int(time.time()) push_range = range(30) if reverse_push_range: push_range = reversed(push_range) for (i, value) in zip(push_range, [1]*15 + [2]*15): push_time = datetime.datetime.fromtimestamp(now+i) push = Push.objects.create( repository=test_repository, revision='abcdefgh%s' % i, author='*****@*****.**', time=push_time) job = create_generic_job('myguid%s' % i, test_repository, push.id, generic_reference_data) datum = { 'job_guid': 'fake_job_guid', 'name': 'test', 'type': 'test', 'blob': { 'framework': {'name': framework_name}, 'suites': [ { 'name': 'cheezburger metrics', 'subtests': [ { 'name': 'test1', 'value': value } ] } ] } } if add_suite_value: datum['blob']['suites'][0]['value'] = value if extra_suite_metadata: datum['blob']['suites'][0].update(extra_suite_metadata) if extra_subtest_metadata: datum['blob']['suites'][0]['subtests'][0].update( extra_subtest_metadata) # the perf data adapter expects unserialized performance data submit_datum = copy.copy(datum) submit_datum['blob'] = json.dumps({ 'performance_data': submit_datum['blob'] }) store_performance_artifact(job, submit_datum)
def test_measurement_unit_can_be_updated(test_repository, later_perf_push, perf_job, generic_reference_data, sample_perf_artifact, sample_perf_artifact_with_new_unit): _, submit_datum = _prepare_test_data(sample_perf_artifact) store_performance_artifact(perf_job, submit_datum) _, updated_submit_datum = _prepare_test_data( sample_perf_artifact_with_new_unit) later_job = create_generic_job('lateguid', test_repository, later_perf_push.id, generic_reference_data) store_performance_artifact(later_job, updated_submit_datum) summary_signature = PerformanceSignature.objects.get( suite='cheezburger metrics', test='') updated_subtest_signature = PerformanceSignature.objects.get( suite='cheezburger metrics', test='test1') assert summary_signature.measurement_unit == UPDATED_MEASUREMENT_UNIT assert updated_subtest_signature.measurement_unit == UPDATED_MEASUREMENT_UNIT # no side effects when parent/sibling signatures # change measurement units not_changed_subtest_signature = PerformanceSignature.objects.get( suite='cheezburger metrics', test='test2') assert not_changed_subtest_signature.measurement_unit == MEASUREMENT_UNIT
def _generate_perf_data_range(test_repository, generic_reference_data, create_perf_framework=True, enable_framework=True, add_suite_value=False, extra_suite_metadata=None, extra_subtest_metadata=None, reverse_push_range=False): framework_name = "cheezburger" if create_perf_framework: PerformanceFramework.objects.create(name=framework_name, enabled=enable_framework) now = int(time.time()) push_range = range(30) if reverse_push_range: push_range = reversed(push_range) for (i, value) in zip(push_range, [1] * 15 + [2] * 15): push_time = datetime.datetime.fromtimestamp(now + i) push = Push.objects.create(repository=test_repository, revision='abcdefgh%s' % i, author='*****@*****.**', time=push_time) job = create_generic_job('myguid%s' % i, test_repository, push.id, generic_reference_data) datum = { 'job_guid': 'fake_job_guid', 'name': 'test', 'type': 'test', 'blob': { 'framework': { 'name': framework_name }, 'suites': [{ 'name': 'cheezburger metrics', 'subtests': [{ 'name': 'test1', 'value': value }] }] } } if add_suite_value: datum['blob']['suites'][0]['value'] = value if extra_suite_metadata: datum['blob']['suites'][0].update(extra_suite_metadata) if extra_subtest_metadata: datum['blob']['suites'][0]['subtests'][0].update( extra_subtest_metadata) # the perf data adapter expects unserialized performance data submit_datum = copy.copy(datum) submit_datum['blob'] = json.dumps( {'performance_data': submit_datum['blob']}) store_performance_artifact(job, submit_datum)
def test_timestamp_can_be_updated(test_repository, perf_job, later_perf_push, generic_reference_data, sample_perf_artifact): _, submit_datum = _prepare_test_data(sample_perf_artifact) store_performance_artifact(perf_job, submit_datum) # send another datum, a little later, verify that signature is changed accordingly later_job = create_generic_job('lateguid', test_repository, later_perf_push.id, generic_reference_data) store_performance_artifact(later_job, submit_datum) signature = PerformanceSignature.objects.get(suite='cheezburger metrics', test='test1') assert signature.last_updated == later_perf_push.time
def _generate_perf_data_range( test_repository, generic_reference_data, create_perf_framework=True, enable_framework=True, suite_provides_value=False, extra_suite_metadata=None, extra_subtest_metadata=None, reverse_push_range=False, job_tier=None, ): framework_name = "some-perf-framework" if create_perf_framework: PerformanceFramework.objects.create(name=framework_name, enabled=enable_framework) now = int(time.time()) push_range = range(30) if reverse_push_range: push_range = reversed(push_range) for (i, value) in zip(push_range, [1] * 15 + [2] * 15): push_time = datetime.datetime.fromtimestamp(now + i) push = Push.objects.create( repository=test_repository, revision=f"abcdefgh{i}", author="*****@*****.**", time=push_time, ) job = create_generic_job(f"myguid{i}", test_repository, push.id, generic_reference_data, tier=job_tier) datum = sample_perf_datum(framework_name, value) if suite_provides_value: datum['blob']['suites'][0]['value'] = value if extra_suite_metadata: datum['blob']['suites'][0].update(extra_suite_metadata) if extra_subtest_metadata: datum['blob']['suites'][0]['subtests'][0].update( extra_subtest_metadata) # the perf data adapter expects deserialized performance data submit_datum = copy.copy(datum) submit_datum['blob'] = json.dumps( {'performance_data': submit_datum['blob']}) store_performance_artifact(job, submit_datum)
def test_changing_extra_options_decouples_perf_signatures( test_repository, later_perf_push, perf_job, generic_reference_data, sample_perf_artifact): updated_perf_artifact = copy.deepcopy(sample_perf_artifact) updated_perf_artifact['blob']['suites'][0]['extraOptions'] = [ 'different-extra-options' ] later_job = create_generic_job('lateguid', test_repository, later_perf_push.id, generic_reference_data) _, submit_datum = _prepare_test_data(sample_perf_artifact) _, updated_submit_datum = _prepare_test_data(updated_perf_artifact) store_performance_artifact(perf_job, submit_datum) initial_signature_amount = PerformanceSignature.objects.all().count() store_performance_artifact(later_job, updated_submit_datum) # Perfherder treats perf data with new properties as entirely new data. # Thus, it creates new & separate signatures for them. assert initial_signature_amount < PerformanceSignature.objects.all().count( )
def test_job_details(test_repository, failure_classifications, generic_reference_data, result_set_stored, webapp): details = { 'abcd': { 'title': 'title', 'value': 'value1', 'url': None }, 'efgh': { 'title': None, 'value': 'value2', 'url': None }, 'ijkl': { 'title': 'title3', 'value': 'value3', 'url': 'https://localhost/foo' } } # create some job details for some fake jobs test_repository2 = Repository.objects.create( repository_group=test_repository.repository_group, name=test_repository.name + '_2', dvcs_type=test_repository.dvcs_type, url=test_repository.url + '_2', codebase=test_repository.codebase) i = 1 for (job_guid, params) in details.iteritems(): if i < 3: repository = test_repository else: # renumber last repository = test_repository2 i = 1 print(i, repository) job = create_generic_job(job_guid, repository, 1, i, generic_reference_data) JobDetail.objects.create(job=job, **params) i += 1 print JobDetail.objects.filter(job__guid='abcd') # trying to get them all should return an error resp = webapp.get(reverse('jobdetail-list'), expect_errors=True) assert resp.status_int == 400 # filter to just get one guid at a time for guid_identifier in ['job_guid', 'job__guid']: for (guid, detail) in details.iteritems(): resp = webapp.get( reverse('jobdetail-list') + '?{}={}'.format(guid_identifier, guid)) assert resp.status_int == 200 assert len(resp.json['results']) == 1 result = resp.json['results'][0] del result['job_guid'] del result['job_id'] assert result == details[guid] # filter to get first with (just) job_id resp = webapp.get(reverse('jobdetail-list') + '?job_id=1') assert resp.status_int == 200 assert len(resp.json['results']) == 1 assert set([v['job_guid'] for v in resp.json['results']]) == set(['abcd']) # filter to get the first and second with job_id__in and repository resp = webapp.get( reverse('jobdetail-list') + '?repository={}&job_id__in=1,2'.format(test_repository.name)) assert resp.status_int == 200 assert len(resp.json['results']) == 2 assert set([v['job_guid'] for v in resp.json['results']]) == set(['abcd', 'efgh']) # filter to get the last element with job_id__in and repository resp = webapp.get( reverse('jobdetail-list') + '?repository={}&job_id__in=3'.format(test_repository2.name)) assert resp.status_int == 200 assert len(resp.json['results']) == 1 assert set([v['job_guid'] for v in resp.json['results']]) == set(['ijkl']) # make sure that filtering by repository with a job id in # a different repository returns no results resp = webapp.get( reverse('jobdetail-list') + '?repository={}&job_id__in=3'.format(test_repository.name)) assert resp.status_int == 200 assert len(resp.json['results']) == 0 # add an extra one, but filter to just get those with a specific title. # we should only have one JobDetail.objects.create(title='title2', job=Job.objects.get(guid='abcd'), value='foo') resp = webapp.get(reverse('jobdetail-list') + '?title=title&job_guid=abcd') print resp.json assert resp.status_int == 200 assert len(resp.json['results']) == 1 assert set([v['job_guid'] for v in resp.json['results']]) == set(['abcd'])
def test_job_details(test_repository, failure_classifications, generic_reference_data, push_stored, webapp): details = { 'abcd': { 'title': 'title', 'value': 'value1', 'url': None }, 'efgh': { 'title': None, 'value': 'value2', 'url': None }, 'ijkl': { 'title': 'title3', 'value': 'value3', 'url': 'https://localhost/foo' } } # create some job details for some fake jobs test_repository2 = Repository.objects.create( repository_group=test_repository.repository_group, name=test_repository.name + '_2', dvcs_type=test_repository.dvcs_type, url=test_repository.url + '_2', codebase=test_repository.codebase) i = 1 for (job_guid, params) in details.iteritems(): if i < 3: repository = test_repository push_id = 1 else: # renumber last repository = test_repository2 push_id = 2 i = 1 print (i, repository) job = create_generic_job(job_guid, repository, push_id, i, generic_reference_data) JobDetail.objects.create( job=job, **params) i += 1 print JobDetail.objects.filter(job__guid='abcd') # trying to get them all should return an error resp = webapp.get(reverse('jobdetail-list'), expect_errors=True) assert resp.status_int == 400 # filter to just get one guid at a time for guid_identifier in ['job_guid', 'job__guid']: for (guid, detail) in details.iteritems(): resp = webapp.get(reverse('jobdetail-list') + '?{}={}'.format( guid_identifier, guid)) assert resp.status_int == 200 assert len(resp.json['results']) == 1 result = resp.json['results'][0] del result['job_guid'] del result['job_id'] assert result == details[guid] # filter to get first with (just) job_id resp = webapp.get(reverse('jobdetail-list') + '?job_id=1') assert resp.status_int == 200 assert len(resp.json['results']) == 1 assert set([v['job_guid'] for v in resp.json['results']]) == set( ['abcd']) # filter to get the first and second with job_id__in and repository resp = webapp.get(reverse('jobdetail-list') + '?repository={}&job_id__in=1,2'.format( test_repository.name)) assert resp.status_int == 200 assert len(resp.json['results']) == 2 assert set([v['job_guid'] for v in resp.json['results']]) == set( ['abcd', 'efgh']) # filter to get the last element with job_id__in and repository resp = webapp.get(reverse('jobdetail-list') + '?repository={}&job_id__in=3'.format( test_repository2.name)) assert resp.status_int == 200 assert len(resp.json['results']) == 1 assert set([v['job_guid'] for v in resp.json['results']]) == set( ['ijkl']) # make sure that filtering by repository with a job id in # a different repository returns no results resp = webapp.get(reverse('jobdetail-list') + '?repository={}&job_id__in=3'.format( test_repository.name)) assert resp.status_int == 200 assert len(resp.json['results']) == 0 # add an extra one, but filter to just get those with a specific title. # we should only have one JobDetail.objects.create(title='title2', job=Job.objects.get(guid='abcd'), value='foo') resp = webapp.get(reverse('jobdetail-list') + '?title=title&job_guid=abcd') assert resp.status_int == 200 assert len(resp.json['results']) == 1 assert set([v['job_guid'] for v in resp.json['results']]) == set(['abcd']) # should also be able to filter by value resp = webapp.get(reverse('jobdetail-list') + '?value=value1&job_guid=abcd') assert resp.status_int == 200 assert resp.json['results'] == [{ 'job_guid': 'abcd', 'job_id': 1, 'title': 'title', 'url': None, 'value': 'value1' }] # Should be able to filter by push_id resp = webapp.get(reverse('jobdetail-list') + '?push_id=2') assert resp.status_int == 200 assert resp.json['results'] == [{ 'job_guid': 'ijkl', 'job_id': 3, 'title': 'title3', 'url': 'https://localhost/foo', 'value': 'value3' }]
def perf_job(perf_push, failure_classifications, generic_reference_data): return create_generic_job('myfunguid', perf_push.repository, perf_push.id, generic_reference_data)
def test_load_generic_data(test_repository, perf_push, perf_job, generic_reference_data): framework_name = 'cheezburger' PerformanceFramework.objects.get_or_create(name=framework_name, enabled=True) datum = { 'job_guid': 'fake_job_guid', 'name': 'test', 'type': 'test', 'blob': { 'framework': { 'name': framework_name }, 'suites': [{ 'name': 'cheezburger metrics', 'extraOptions': ['shell', 'e10s'], 'lowerIsBetter': True, 'value': 10.0, 'subtests': [{ 'name': 'test1', 'value': 20.0, 'lowerIsBetter': True }, { 'name': 'test2', 'value': 30.0, 'lowerIsBetter': False }, { 'name': 'test3', 'value': 40.0 }] }, { 'name': 'cheezburger metrics 2', 'lowerIsBetter': False, 'value': 10.0, 'subtests': [{ 'name': 'test1', 'value': 20.0 }] }, { 'name': 'cheezburger metrics 3', 'value': 10.0, 'subtests': [{ 'name': 'test1', 'value': 20.0 }] }] } } # the perf data adapter expects unserialized performance data submit_datum = copy.copy(datum) submit_datum['blob'] = json.dumps( {'performance_data': submit_datum['blob']}) store_performance_artifact(perf_job, submit_datum) assert 8 == PerformanceSignature.objects.all().count() assert 1 == PerformanceFramework.objects.all().count() framework = PerformanceFramework.objects.all()[0] assert framework_name == framework.name perf_datum = datum['blob'] for suite in perf_datum['suites']: # verify summary, then subtests _verify_signature(test_repository.name, perf_datum['framework']['name'], suite['name'], '', 'my_option_hash', 'my_platform', suite.get('lowerIsBetter', True), suite.get('extraOptions'), perf_push.time) _verify_datum(suite['name'], '', suite['value'], perf_push.time) for subtest in suite['subtests']: _verify_signature(test_repository.name, perf_datum['framework']['name'], suite['name'], subtest['name'], 'my_option_hash', 'my_platform', subtest.get('lowerIsBetter', True), suite.get('extraOptions'), perf_push.time) _verify_datum(suite['name'], subtest['name'], subtest['value'], perf_push.time) summary_signature = PerformanceSignature.objects.get( suite=perf_datum['suites'][0]['name'], test='') subtest_signatures = PerformanceSignature.objects.filter( parent_signature=summary_signature).values_list('signature_hash', flat=True) assert len(subtest_signatures) == 3 # send another datum, a little later, verify that signature's # `last_updated` is changed accordingly later_timestamp = datetime.datetime.fromtimestamp(int(time.time()) + 5) later_push = Push.objects.create(repository=test_repository, revision='1234abcd12', author='*****@*****.**', time=later_timestamp) later_job = create_generic_job('lateguid', test_repository, later_push.id, generic_reference_data) store_performance_artifact(later_job, submit_datum) signature = PerformanceSignature.objects.get( suite=perf_datum['suites'][0]['name'], test=perf_datum['suites'][0]['subtests'][0]['name']) assert signature.last_updated == later_timestamp
def perf_job(perf_push, failure_classifications, generic_reference_data): return create_generic_job('myfunguid', perf_push.repository, perf_push.id, 1, generic_reference_data)
def test_load_generic_data(test_repository, perf_push, perf_job, generic_reference_data): framework_name = 'cheezburger' PerformanceFramework.objects.get_or_create(name=framework_name, enabled=True) datum = { 'job_guid': 'fake_job_guid', 'name': 'test', 'type': 'test', 'blob': { 'framework': {'name': framework_name}, 'suites': [ { 'name': 'cheezburger metrics', 'extraOptions': ['shell', 'e10s'], 'lowerIsBetter': True, 'value': 10.0, 'subtests': [ { 'name': 'test1', 'value': 20.0, 'lowerIsBetter': True }, { 'name': 'test2', 'value': 30.0, 'lowerIsBetter': False }, { 'name': 'test3', 'value': 40.0 } ] }, { 'name': 'cheezburger metrics 2', 'lowerIsBetter': False, 'value': 10.0, 'subtests': [ { 'name': 'test1', 'value': 20.0 } ] }, { 'name': 'cheezburger metrics 3', 'value': 10.0, 'subtests': [ { 'name': 'test1', 'value': 20.0 } ] } ] } } # the perf data adapter expects unserialized performance data submit_datum = copy.copy(datum) submit_datum['blob'] = json.dumps({ 'performance_data': submit_datum['blob'] }) store_performance_artifact(perf_job, submit_datum) assert 8 == PerformanceSignature.objects.all().count() assert 1 == PerformanceFramework.objects.all().count() framework = PerformanceFramework.objects.all()[0] assert framework_name == framework.name perf_datum = datum['blob'] for suite in perf_datum['suites']: # verify summary, then subtests _verify_signature(test_repository.name, perf_datum['framework']['name'], suite['name'], '', 'my_option_hash', 'my_platform', suite.get('lowerIsBetter', True), suite.get('extraOptions'), perf_push.time) _verify_datum(suite['name'], '', suite['value'], perf_push.time) for subtest in suite['subtests']: _verify_signature(test_repository.name, perf_datum['framework']['name'], suite['name'], subtest['name'], 'my_option_hash', 'my_platform', subtest.get('lowerIsBetter', True), suite.get('extraOptions'), perf_push.time) _verify_datum(suite['name'], subtest['name'], subtest['value'], perf_push.time) summary_signature = PerformanceSignature.objects.get( suite=perf_datum['suites'][0]['name'], test='') subtest_signatures = PerformanceSignature.objects.filter( parent_signature=summary_signature).values_list('signature_hash', flat=True) assert len(subtest_signatures) == 3 # send another datum, a little later, verify that signature's # `last_updated` is changed accordingly later_timestamp = datetime.datetime.fromtimestamp(int(time.time()) + 5) later_push = Push.objects.create( repository=test_repository, revision='1234abcd12', author='*****@*****.**', time=later_timestamp) later_job = create_generic_job('lateguid', test_repository, later_push.id, 2, generic_reference_data) store_performance_artifact(later_job, submit_datum) signature = PerformanceSignature.objects.get( suite=perf_datum['suites'][0]['name'], test=perf_datum['suites'][0]['subtests'][0]['name']) assert signature.last_updated == later_timestamp