def test_resultset_create(jm, test_repository, sample_resultset, mock_post_json): """ test posting data to the resultset endpoint via webtest. extected result are: - return code 200 - return message successful - 1 resultset stored in the jobs schema """ assert Push.objects.count() == 0 # store the first two, so we submit all, but should properly not re- # add the others. jm.store_result_set_data(sample_resultset[:2]) assert Push.objects.count() == 2 trsc = TreeherderResultSetCollection() exp_revision_hashes = set() for rs in sample_resultset: rs.update({'author': 'John Doe'}) result_set = trsc.get_resultset(rs) trsc.add(result_set) exp_revision_hashes.add(rs["revision"]) test_utils.post_collection(jm.project, trsc) assert Push.objects.count() == len(sample_resultset) assert set(Push.objects.values_list('revision', flat=True)) == set( [rs['revision'] for rs in sample_resultset])
def test_resultset_create(test_repository, sample_resultset, mock_post_json): """ test posting data to the resultset endpoint via webtest. extected result are: - return code 200 - return message successful - 1 resultset stored in the jobs schema """ assert Push.objects.count() == 0 # store the first two, so we submit all, but should properly not re- # add the others. store_result_set_data(test_repository, sample_resultset[:2]) assert Push.objects.count() == 2 trsc = TreeherderResultSetCollection() exp_revision_hashes = set() for rs in sample_resultset: rs.update({'author': 'John Doe'}) result_set = trsc.get_resultset(rs) trsc.add(result_set) exp_revision_hashes.add(rs["revision"]) test_utils.post_collection(test_repository.name, trsc) assert Push.objects.count() == len(sample_resultset) assert set(Push.objects.values_list('revision', flat=True)) == set( [rs['revision'] for rs in sample_resultset])
def test_post_job_with_text_log_summary_and_bug_suggestions_artifact( test_project, monkeypatch, result_set_stored, mock_post_json, mock_error_summary, text_log_summary_dict, ): """ test submitting a job with a pre-parsed log and both artifacts does not generate parse the log or generate any artifacts, just uses the supplied ones. """ mock_parse = MagicMock(name="parse_line") monkeypatch.setattr(StepParser, 'parse_line', mock_parse) mock_get_error_summary = MagicMock(name="get_error_summary_artifacts") monkeypatch.setattr(error_summary, 'get_error_summary_artifacts', mock_get_error_summary) error_summary_blob = ["fee", "fie", "foe", "fum"] job_guid = 'd22c74d4aa6d2a1dcba96d95dccbd5fdca70cf33' tjc = client.TreeherderJobCollection() tj = client.TreeherderJob({ 'project': test_project, 'revision': result_set_stored[0]['revision'], 'job': { 'job_guid': job_guid, 'state': 'completed', 'log_references': [{ 'url': 'http://ftp.mozilla.org/pub/mozilla.org/spidermonkey/...', 'name': 'buildbot_text', 'parse_status': 'parsed' }], 'artifacts': [ { "blob": json.dumps(text_log_summary_dict), "type": "json", "name": "text_log_summary", "job_guid": job_guid }, { "blob": json.dumps(error_summary_blob), "type": "json", "name": "Bug suggestions", "job_guid": job_guid }, ] } }) tjc.add(tj) post_collection(test_project, tjc) check_artifacts(test_project, job_guid, 'parsed', 2, {'Bug suggestions', 'text_log_summary'}, error_summary_blob) assert mock_parse.called is False assert mock_get_error_summary.called is False
def test_post_job_with_buildapi_artifact(test_repository, failure_classifications, result_set_stored, mock_post_json): """ test submitting a job with a buildapi artifact gets that stored (and we update the job object) """ tjc = client.TreeherderJobCollection() job_guid = 'd22c74d4aa6d2a1dcba96d95dccbd5fdca70cf33' tj = client.TreeherderJob({ 'project': test_repository.name, 'revision': result_set_stored[0]['revision'], 'job': { 'artifacts': [], 'job_guid': job_guid, 'state': 'completed', } }) tj.add_artifact("buildapi", "json", json.dumps({"buildername": "Windows 8 64-bit cheezburger", "request_id": 1234})) tjc.add(tj) post_collection(test_repository.name, tjc) assert Job.objects.count() == 1 assert JobDetail.objects.count() == 1 buildbot_request_id_detail = JobDetail.objects.all()[0] assert buildbot_request_id_detail.title == 'buildbot_request_id' assert buildbot_request_id_detail.value == str(1234) assert buildbot_request_id_detail.url is None
def test_post_job_with_default_tier(test_project, result_set_stored, mock_post_json): """test submitting a job with no tier specified gets default""" tjc = client.TreeherderJobCollection() job_guid = 'd22c74d4aa6d2a1dcba96d95dccbd5fdca70cf33' tj = client.TreeherderJob({ 'project': test_project, 'revision_hash': result_set_stored[0]['revision_hash'], 'job': { 'job_guid': job_guid, 'state': 'completed', } }) tjc.add(tj) post_collection(test_project, tjc) with JobsModel(test_project) as jobs_model: job = [ x for x in jobs_model.get_job_list(0, 20) if x['job_guid'] == job_guid ][0] assert job['tier'] == 1
def test_resultset_create(sample_resultset, jm, initial_data, mock_post_json): """ test posting data to the resultset endpoint via webtest. extected result are: - return code 200 - return message successful - 1 resultset stored in the jobs schema """ trsc = TreeherderResultSetCollection() for rs in sample_resultset: rs.update({'author': 'John Doe'}) result_set = trsc.get_resultset(rs) trsc.add(result_set) test_utils.post_collection(jm.project, trsc) stored_objs = jm.get_dhub().execute( proc="jobs_test.selects.resultset_by_rev_hash", placeholders=[sample_resultset[0]['revision_hash']] ) assert len(stored_objs) == 1 assert stored_objs[0]['revision_hash'] == sample_resultset[0]['revision_hash'] jm.disconnect()
def test_post_job_with_buildapi_artifact(test_project, result_set_stored, mock_post_json): """ test submitting a job with a buildapi artifact gets that stored (and we update the job object) """ tjc = client.TreeherderJobCollection() job_guid = 'd22c74d4aa6d2a1dcba96d95dccbd5fdca70cf33' tj = client.TreeherderJob({ 'project': test_project, 'revision': result_set_stored[0]['revision'], 'job': { 'artifacts': [], 'job_guid': job_guid, 'state': 'completed', } }) tj.add_artifact( "buildapi", "json", json.dumps({ "buildername": "Windows 8 64-bit cheezburger", "request_id": 1234 })) tjc.add(tj) post_collection(test_project, tjc) assert Job.objects.count() == 1 assert JobDetail.objects.count() == 1 buildbot_request_id_detail = JobDetail.objects.all()[0] assert buildbot_request_id_detail.title == 'buildbot_request_id' assert buildbot_request_id_detail.value == str(1234) assert buildbot_request_id_detail.url is None
def test_post_talos_artifact(test_project, test_repository, result_set_stored, mock_post_json): test_repository.save() tjc = client.TreeherderJobCollection() job_guid = 'd22c74d4aa6d2a1dcba96d95dccbd5fdca70cf33' tj = client.TreeherderJob({ 'project': test_repository.name, 'revision_hash': result_set_stored[0]['revision_hash'], 'job': { 'job_guid': job_guid, 'state': 'completed', 'project': test_repository.name, 'option_collection': {'opt': True}, 'artifacts': [{ 'blob': {'talos_data': SampleData.get_minimal_talos_perf_data()}, 'type': 'json', 'name': 'talos_data', 'job_guid': job_guid }] } }) tjc.add(tj) post_collection(test_project, tjc) # we'll just validate that we got the expected number of results for # talos (we have validation elsewhere for the actual data adapters) assert PerformanceSignature.objects.count() == 1 assert PerformanceDatum.objects.count() == 1
def test_post_perf_artifact_revision_hash(test_repository, failure_classifications, push_stored, mock_post_json): test_repository.save() PerformanceFramework.objects.get_or_create(name='cheezburger', enabled=True) tjc = client.TreeherderJobCollection() job_guid = 'd22c74d4aa6d2a1dcba96d95dccbd5fdca70cf33' tj = client.TreeherderJob({ 'project': test_repository.name, 'revision_hash': push_stored[0]['revision'], 'revision': push_stored[0]['revision'], 'job': { 'job_guid': job_guid, 'state': 'completed', 'project': test_repository.name, 'option_collection': { 'opt': True }, 'artifacts': [{ 'blob': { "performance_data": { "framework": { "name": "cheezburger" }, "suites": [{ "name": "cheezburger metrics", "value": 10.0, "subtests": [{ "name": "test1", "value": 20.0 }, { "name": "test2", "value": 30.0 }] }] } }, 'type': 'json', 'name': 'performance_data', 'job_guid': job_guid }] } }) tjc.add(tj) post_collection(test_repository.name, tjc) # we'll just validate that we got the expected number of results # (we have validation elsewhere for the actual data adapters) assert PerformanceSignature.objects.all().count() == 3 assert PerformanceDatum.objects.all().count() == 3
def test_post_perf_artifact_multiple(jobs_ds, test_repository, result_set_stored, mock_post_json): PerformanceFramework.objects.get_or_create(name='cheezburger') perfobj = { "framework": { "name": "cheezburger" }, "suites": [{ "name": "cheezburger metrics", "value": 10.0, "subtests": [{ "name": "test1", "value": 20.0 }, { "name": "test2", "value": 30.0 }] }] } perfobj2 = copy.deepcopy(perfobj) perfobj2['suites'][0]['name'] = "cheezburger metrics 2" tjc = client.TreeherderJobCollection() job_guid = 'd22c74d4aa6d2a1dcba96d95dccbd5fdca70cf33' tj = client.TreeherderJob({ 'project': test_repository.name, 'revision': result_set_stored[0]['revision'], 'job': { 'job_guid': job_guid, 'state': 'completed', 'project': test_repository.name, 'option_collection': { 'opt': True }, 'artifacts': [{ 'blob': { "performance_data": [perfobj, perfobj2] }, 'type': 'json', 'name': 'performance_data', 'job_guid': job_guid }] } }) tjc.add(tj) post_collection(test_repository.name, tjc) # we'll just validate that we got the expected number of results # (we have validation elsewhere for the actual data adapters) assert PerformanceSignature.objects.all().count() == 6 assert PerformanceDatum.objects.all().count() == 6
def test_post_job_with_text_log_summary_artifact_pending( test_project, monkeypatch, result_set_stored, mock_post_json, mock_error_summary, text_log_summary_dict, ): """ test submitting a job with a log set to pending, but with a text_log_summary. This should detect the artifact, not parse, and just mark the log as parsed, then generate bug suggestions. """ mock_parse = MagicMock(name="parse_line") monkeypatch.setattr(StepParser, 'parse_line', mock_parse) job_guid = 'd22c74d4aa6d2a1dcba96d95dccbd5fdca70cf33' tjc = client.TreeherderJobCollection() tj = client.TreeherderJob({ 'project': test_project, 'revision_hash': result_set_stored[0]['revision_hash'], 'job': { 'job_guid': job_guid, 'state': 'completed', 'log_references': [{ 'url': 'http://ftp.mozilla.org/pub/mozilla.org/spidermonkey/...', 'name': 'buildbot_text', 'parse_status': 'pending' }], 'artifacts': [{ "blob": json.dumps(text_log_summary_dict), "type": "json", "name": "text_log_summary", "job_guid": job_guid }] } }) tjc.add(tj) post_collection(test_project, tjc) check_artifacts(test_project, job_guid, 'parsed', 2, {'Bug suggestions', 'text_log_summary'}, mock_error_summary) # ensure the parsing didn't happen assert mock_parse.called is False
def running_jobs_stored(jm, running_jobs, result_set_stored): """ stores a list of buildapi running jobs """ running_jobs.update(result_set_stored[0]) tjc = TreeherderJobCollection() tj = tjc.get_job(running_jobs) tjc.add(tj) test_utils.post_collection(jm.project, tjc)
def completed_jobs_stored( jm, completed_jobs, result_set_stored, mock_send_request): """ stores a list of buildapi completed jobs into the objectstore """ completed_jobs['revision_hash'] = result_set_stored[0]['revision_hash'] tjc = TreeherderJobCollection() tj = tjc.get_job(completed_jobs) tjc.add(tj) test_utils.post_collection(jm.project, tjc)
def running_jobs_stored( jm, running_jobs, result_set_stored): """ stores a list of buildapi running jobs into the objectstore """ running_jobs.update(result_set_stored[0]) tjc = TreeherderJobCollection(job_type='update') tj = tjc.get_job(running_jobs) tjc.add(tj) test_utils.post_collection(jm.project, tjc)
def completed_jobs_stored(jm, completed_jobs, result_set_stored, mock_post_json): """ stores a list of buildapi completed jobs """ completed_jobs['revision_hash'] = result_set_stored[0]['revision_hash'] tjc = TreeherderJobCollection() tj = tjc.get_job(completed_jobs) tjc.add(tj) test_utils.post_collection(jm.project, tjc)
def test_post_job_with_text_log_summary_artifact_pending( test_repository, failure_classifications, push_stored, monkeypatch, mock_post_json, text_log_summary_dict, ): """ test submitting a job with a log set to pending, but with a text_log_summary. This should detect the artifact, not parse, and just mark the log as parsed, then generate bug suggestions. """ mock_parse = MagicMock(name="parse_line") monkeypatch.setattr(StepParser, 'parse_line', mock_parse) job_guid = 'd22c74d4aa6d2a1dcba96d95dccbd5fdca70cf33' tjc = client.TreeherderJobCollection() tj = client.TreeherderJob({ 'project': test_repository.name, 'revision': push_stored[0]['revision'], 'job': { 'job_guid': job_guid, 'state': 'completed', 'log_references': [{ 'url': 'http://ftp.mozilla.org/pub/mozilla.org/spidermonkey/...', 'name': 'buildbot_text', 'parse_status': 'pending' }], 'artifacts': [{ "blob": json.dumps(text_log_summary_dict), "type": "json", "name": "text_log_summary", "job_guid": job_guid }] } }) tjc.add(tj) post_collection(test_repository.name, tjc) # should have 4 error summary lines (aka bug suggestions) assert len(get_error_summary(Job.objects.get(guid=job_guid))) == 4 # ensure the parsing didn't happen assert mock_parse.called is False
def pending_jobs_stored(jm, pending_jobs, result_set_stored): """ stores a list of buildapi pending jobs into the jobs store using BuildApiTreeHerderAdapter """ pending_jobs.update(result_set_stored[0]) tjc = TreeherderJobCollection() tj = tjc.get_job(pending_jobs) tjc.add(tj) test_utils.post_collection(jm.project, tjc)
def running_jobs_stored( jm, running_jobs, result_set_stored, mock_post_json): """ stores a list of buildapi running jobs """ running_jobs.update(result_set_stored[0]) running_jobs.update({'project': jm.project}) tjc = TreeherderJobCollection() tj = tjc.get_job(running_jobs) tjc.add(tj) test_utils.post_collection(jm.project, tjc)
def completed_jobs_stored(test_repository, failure_classifications, completed_jobs, result_set_stored, mock_post_json): """ stores a list of buildapi completed jobs """ completed_jobs['revision'] = result_set_stored[0]['revision'] completed_jobs.update({'project': test_repository.name}) tjc = TreeherderJobCollection() tj = tjc.get_job(completed_jobs) tjc.add(tj) test_utils.post_collection(test_repository.name, tjc)
def completed_jobs_stored( jm, completed_jobs, result_set_stored, mock_post_json): """ stores a list of buildapi completed jobs """ completed_jobs['revision'] = result_set_stored[0]['revision'] completed_jobs.update({'project': jm.project}) tjc = TreeherderJobCollection() tj = tjc.get_job(completed_jobs) tjc.add(tj) test_utils.post_collection(jm.project, tjc)
def running_jobs_stored(test_repository, failure_classifications, running_jobs, result_set_stored, mock_post_json): """ stores a list of buildapi running jobs """ running_jobs.update(result_set_stored[0]) running_jobs.update({'project': test_repository.name}) tjc = TreeherderJobCollection() tj = tjc.get_job(running_jobs) tjc.add(tj) test_utils.post_collection(test_repository.name, tjc)
def completed_jobs_stored( test_repository, failure_classifications, completed_jobs, result_set_stored, mock_post_json): """ stores a list of buildapi completed jobs """ completed_jobs['revision'] = result_set_stored[0]['revision'] completed_jobs.update({'project': test_repository.name}) tjc = TreeherderJobCollection() tj = tjc.get_job(completed_jobs) tjc.add(tj) test_utils.post_collection(test_repository.name, tjc)
def running_jobs_stored( test_repository, failure_classifications, running_jobs, result_set_stored, mock_post_json): """ stores a list of buildapi running jobs """ running_jobs.update(result_set_stored[0]) running_jobs.update({'project': test_repository.name}) tjc = TreeherderJobCollection() tj = tjc.get_job(running_jobs) tjc.add(tj) test_utils.post_collection(test_repository.name, tjc)
def test_post_job_with_text_log_summary_artifact_parsed_dict_blob( test_project, monkeypatch, result_set_stored, mock_post_json, mock_error_summary, text_log_summary_dict, ): """ test submitting a job with a pre-parsed log gets parse_status of "parsed" and doesn't parse the log, but still generates the bug suggestions. """ mock_parse = MagicMock(name="parse_line") monkeypatch.setattr(StepParser, 'parse_line', mock_parse) job_guid = 'd22c74d4aa6d2a1dcba96d95dccbd5fdca70cf33' tjc = client.TreeherderJobCollection() tj = client.TreeherderJob({ 'project': test_project, 'revision': result_set_stored[0]['revision'], 'job': { 'job_guid': job_guid, 'state': 'completed', 'log_references': [{ 'url': 'http://ftp.mozilla.org/pub/mozilla.org/spidermonkey/...', 'name': 'buildbot_text', 'parse_status': 'parsed' }], 'artifacts': [{ "blob": text_log_summary_dict, "type": "json", "name": "text_log_summary", "job_guid": job_guid }] } }) tjc.add(tj) post_collection(test_project, tjc) check_artifacts(test_project, job_guid, JobLog.PARSED, 1, {'Bug suggestions'}, mock_error_summary) # ensure the parsing didn't happen assert mock_parse.called is False
def pending_jobs_stored( jm, pending_jobs, result_set_stored): """ stores a list of buildapi pending jobs into the jobs store using BuildApiTreeHerderAdapter """ pending_jobs.update(result_set_stored[0]) tjc = TreeherderJobCollection(job_type='update') tj = tjc.get_job(pending_jobs) tjc.add(tj) test_utils.post_collection(jm.project, tjc)
def test_post_job_with_text_log_summary_artifact_pending( test_repository, failure_classifications, result_set_stored, monkeypatch, mock_post_json, text_log_summary_dict, ): """ test submitting a job with a log set to pending, but with a text_log_summary. This should detect the artifact, not parse, and just mark the log as parsed, then generate bug suggestions. """ mock_parse = MagicMock(name="parse_line") monkeypatch.setattr(StepParser, 'parse_line', mock_parse) job_guid = 'd22c74d4aa6d2a1dcba96d95dccbd5fdca70cf33' tjc = client.TreeherderJobCollection() tj = client.TreeherderJob({ 'project': test_repository.name, 'revision': result_set_stored[0]['revision'], 'job': { 'job_guid': job_guid, 'state': 'completed', 'log_references': [{ 'url': 'http://ftp.mozilla.org/pub/mozilla.org/spidermonkey/...', 'name': 'buildbot_text', 'parse_status': 'pending' }], 'artifacts': [{ "blob": json.dumps(text_log_summary_dict), "type": "json", "name": "text_log_summary", "job_guid": job_guid }] } }) tjc.add(tj) post_collection(test_repository.name, tjc) # should have 4 error summary lines (aka bug suggestions) assert len(get_error_summary(Job.objects.get(guid=job_guid))) == 4 # ensure the parsing didn't happen assert mock_parse.called is False
def test_post_job_with_text_log_summary_artifact_parsed( test_project, monkeypatch, result_set_stored, mock_post_json, text_log_summary_dict, ): """ test submitting a job with a pre-parsed log gets parse_status of "parsed" and doesn't parse the log, but we get the expected set of text log steps/errors and bug suggestions. """ mock_parse = MagicMock(name="parse_line") monkeypatch.setattr(StepParser, 'parse_line', mock_parse) job_guid = 'd22c74d4aa6d2a1dcba96d95dccbd5fdca70cf33' tjc = client.TreeherderJobCollection() tj = client.TreeherderJob({ 'project': test_project, 'revision': result_set_stored[0]['revision'], 'job': { 'job_guid': job_guid, 'state': 'completed', 'log_references': [{ 'url': 'http://ftp.mozilla.org/pub/mozilla.org/spidermonkey/...', 'name': 'buildbot_text', 'parse_status': 'parsed' }], 'artifacts': [{ "blob": json.dumps(text_log_summary_dict), "type": "json", "name": "text_log_summary", "job_guid": job_guid }] } }) tjc.add(tj) post_collection(test_project, tjc) # should have 4 error summary lines (aka bug suggestions) assert len(get_error_summary(Job.objects.get(guid=job_guid))) == 4 # ensure the parsing didn't happen assert mock_parse.called is False
def pending_jobs_stored(test_repository, failure_classifications, pending_jobs, result_set_stored, mock_post_json): """ stores a list of buildapi pending jobs into the jobs store using BuildApiTreeHerderAdapter """ pending_jobs.update(result_set_stored[0]) pending_jobs.update({'project': test_repository.name}) tjc = TreeherderJobCollection() tj = tjc.get_job(pending_jobs) tjc.add(tj) test_utils.post_collection(test_repository.name, tjc)
def test_objectstore_create(job_sample, jm): """ test posting data to the objectstore via webtest. extected result are: - return code 200 - return message successful - 1 job stored in the objectstore """ tjc = TreeherderJobCollection() tj = tjc.get_job(job_sample) tjc.add(tj) resp = test_utils.post_collection(jm.project, tjc) assert resp.status_int == 200 assert resp.json['message'] == 'well-formed JSON stored' stored_objs = jm.get_os_dhub().execute( proc="objectstore_test.selects.row_by_guid", placeholders=[job_sample["job"]["job_guid"]] ) assert len(stored_objs) == 1 assert stored_objs[0]['job_guid'] == job_sample["job"]["job_guid"] jm.disconnect()
def test_resultset_create(sample_resultset, jm, initial_data): """ test posting data to the resultset endpoint via webtest. extected result are: - return code 200 - return message successful - 1 resultset stored in the jobs schema """ trsc = TreeherderResultSetCollection() for rs in sample_resultset: rs = trsc.get_resultset(rs) trsc.add(rs) resp = test_utils.post_collection(jm.project, trsc) assert resp.status_int == 200 assert resp.json['message'] == 'well-formed JSON stored' stored_objs = jm.get_dhub().execute( proc="jobs_test.selects.resultset_by_rev_hash", placeholders=[sample_resultset[0]['revision_hash']] ) assert len(stored_objs) == 1 assert stored_objs[0]['revision_hash'] == sample_resultset[0]['revision_hash'] jm.disconnect()
def test_resultset_create(jm, test_repository, sample_resultset, mock_post_json): """ test posting data to the resultset endpoint via webtest. extected result are: - return code 200 - return message successful - 1 resultset stored in the jobs schema """ # store the first two, so we submit all, but should properly not re- # add the others. jm.store_result_set_data(sample_resultset[:2]) trsc = TreeherderResultSetCollection() exp_revision_hashes = set() for rs in sample_resultset: rs.update({'author': 'John Doe'}) result_set = trsc.get_resultset(rs) trsc.add(result_set) exp_revision_hashes.add(rs["revision"]) resp = test_utils.post_collection(jm.project, trsc) act_revision_hashes = {x["long_revision"] for x in resp.json["resultsets"]} assert exp_revision_hashes == act_revision_hashes stored_objs = jm.get_dhub().execute( proc="jobs_test.selects.resultset_by_long_revision", placeholders=[sample_resultset[0]['revision']] ) assert len(stored_objs) == 1 assert stored_objs[0]['long_revision'] == sample_resultset[0]['revision']
def test_resultset_create(jm, test_repository, sample_resultset, mock_post_json): """ test posting data to the resultset endpoint via webtest. extected result are: - return code 200 - return message successful - 1 resultset stored in the jobs schema """ # store the first two, so we submit all, but should properly not re- # add the others. jm.store_result_set_data(sample_resultset[:2]) trsc = TreeherderResultSetCollection() exp_revision_hashes = set() for rs in sample_resultset: rs.update({'author': 'John Doe'}) result_set = trsc.get_resultset(rs) trsc.add(result_set) exp_revision_hashes.add(rs["revision"]) resp = test_utils.post_collection(jm.project, trsc) act_revision_hashes = {x["long_revision"] for x in resp.json["resultsets"]} assert exp_revision_hashes == act_revision_hashes stored_objs = jm.get_dhub().execute( proc="jobs_test.selects.resultset_by_long_revision", placeholders=[sample_resultset[0]['revision']]) assert len(stored_objs) == 1 assert stored_objs[0]['long_revision'] == sample_resultset[0]['revision']
def test_resultset_create(sample_resultset, jm, initial_data): """ test posting data to the resultset endpoint via webtest. extected result are: - return code 200 - return message successful - 1 resultset stored in the jobs schema """ trsc = TreeherderResultSetCollection() for rs in sample_resultset: rs = trsc.get_resultset(rs) trsc.add(rs) resp = test_utils.post_collection(jm.project, trsc) assert resp.status_int == 200 assert resp.json['message'] == 'well-formed JSON stored' stored_objs = jm.get_jobs_dhub().execute( proc="jobs_test.selects.resultset_by_rev_hash", placeholders=[sample_resultset[0]['revision_hash']]) assert len(stored_objs) == 1 assert stored_objs[0]['revision_hash'] == sample_resultset[0][ 'revision_hash'] jm.disconnect()
def test_objectstore_create(job_sample, jm): """ test posting data to the objectstore via webtest. extected result are: - return code 200 - return message successful - 1 job stored in the objectstore """ tjc = TreeherderJobCollection() tj = tjc.get_job(job_sample) tjc.add(tj) resp = test_utils.post_collection(jm.project, tjc) assert resp.status_int == 200 assert resp.json['message'] == 'well-formed JSON stored' stored_objs = jm.get_os_dhub().execute( proc="objectstore_test.selects.row_by_guid", placeholders=[job_sample["job"]["job_guid"]] ) assert len(stored_objs) == 1 assert stored_objs[0]['job_guid'] == job_sample["job"]["job_guid"] jm.disconnect()
def pending_jobs_stored( test_repository, failure_classifications, pending_jobs, result_set_stored, mock_post_json): """ stores a list of buildapi pending jobs into the jobs store using BuildApiTreeHerderAdapter """ pending_jobs.update(result_set_stored[0]) pending_jobs.update({'project': test_repository.name}) tjc = TreeherderJobCollection() tj = tjc.get_job(pending_jobs) tjc.add(tj) test_utils.post_collection(test_repository.name, tjc)
def test_post_job_pending_to_completed_with_unparsed_log( test_repository, push_stored, failure_classifications, activate_responses, mock_post_json): job_guid = 'd22c74d4aa6d2a1dcba96d95dccbd5fdca70cf33' # the first time, submit it as running (with no logs) tjc = client.TreeherderJobCollection() tj = client.TreeherderJob({ 'project': test_repository.name, 'revision': push_stored[0]['revision'], 'job': { 'job_guid': job_guid, 'state': 'running' } }) tjc.add(tj) post_collection(test_repository.name, tjc) # should have no text log errors or bug suggestions assert TextLogError.objects.count() == 0 assert get_error_summary(Job.objects.get(guid=job_guid)) == [] # the second time, post a log that will get parsed log_url = add_log_response( "mozilla-central-macosx64-debug-bm65-build1-build15.txt.gz") tjc = client.TreeherderJobCollection() tj = client.TreeherderJob({ 'project': test_repository.name, 'revision': push_stored[0]['revision'], 'job': { 'job_guid': job_guid, 'state': 'completed', 'log_references': [{ 'url': log_url, 'name': 'buildbot_text', 'parse_status': 'pending' }] } }) tjc.add(tj) post_collection(test_repository.name, tjc) # should have a full set of text log errors assert TextLogError.objects.count() == 2 assert len(get_error_summary(Job.objects.get(guid=job_guid))) == 2
def test_post_job_with_text_log_summary_artifact_parsed_dict_blob( test_project, monkeypatch, result_set_stored, mock_post_json, mock_error_summary, text_log_summary_dict, ): """ test submitting a job with a pre-parsed log gets parse_status of "parsed" and doesn't parse the log, but still generates the bug suggestions. """ mock_parse = MagicMock(name="parse_line") monkeypatch.setattr(StepParser, 'parse_line', mock_parse) job_guid = 'd22c74d4aa6d2a1dcba96d95dccbd5fdca70cf33' tjc = client.TreeherderJobCollection() tj = client.TreeherderJob({ 'project': test_project, 'revision': result_set_stored[0]['revision'], 'job': { 'job_guid': job_guid, 'state': 'completed', 'log_references': [{ 'url': 'http://ftp.mozilla.org/pub/mozilla.org/spidermonkey/...', 'name': 'buildbot_text', 'parse_status': 'parsed' }], 'artifacts': [{ "blob": text_log_summary_dict, "type": "json", "name": "text_log_summary", "job_guid": job_guid }] } }) tjc.add(tj) post_collection(test_project, tjc) check_artifacts(test_project, job_guid, 'parsed', 2, {'Bug suggestions', 'text_log_summary'}, mock_error_summary) # ensure the parsing didn't happen assert mock_parse.called is False
def test_post_perf_artifact_multiple(test_repository, failure_classifications, push_stored, mock_post_json): PerformanceFramework.objects.get_or_create(name='cheezburger', enabled=True) perfobj = { "framework": {"name": "cheezburger"}, "suites": [{ "name": "cheezburger metrics", "value": 10.0, "subtests": [ {"name": "test1", "value": 20.0}, {"name": "test2", "value": 30.0} ] }] } perfobj2 = copy.deepcopy(perfobj) perfobj2['suites'][0]['name'] = "cheezburger metrics 2" tjc = client.TreeherderJobCollection() job_guid = 'd22c74d4aa6d2a1dcba96d95dccbd5fdca70cf33' tj = client.TreeherderJob({ 'project': test_repository.name, 'revision': push_stored[0]['revision'], 'job': { 'job_guid': job_guid, 'state': 'completed', 'project': test_repository.name, 'option_collection': {'opt': True}, 'artifacts': [{ 'blob': { "performance_data": [perfobj, perfobj2] }, 'type': 'json', 'name': 'performance_data', 'job_guid': job_guid }] } }) tjc.add(tj) post_collection(test_repository.name, tjc) # we'll just validate that we got the expected number of results # (we have validation elsewhere for the actual data adapters) assert PerformanceSignature.objects.all().count() == 6 assert PerformanceDatum.objects.all().count() == 6
def test_post_job_with_text_log_summary_artifact_parsed( test_project, monkeypatch, result_set_stored, mock_post_json, text_log_summary_dict, ): """ test submitting a job with a pre-parsed log gets parse_status of "parsed" and doesn't parse the log, but we get the expected set of text log steps/errors and bug suggestions. """ mock_parse = MagicMock(name="parse_line") monkeypatch.setattr(StepParser, 'parse_line', mock_parse) job_guid = 'd22c74d4aa6d2a1dcba96d95dccbd5fdca70cf33' tjc = client.TreeherderJobCollection() tj = client.TreeherderJob({ 'project': test_project, 'revision': result_set_stored[0]['revision'], 'job': { 'job_guid': job_guid, 'state': 'completed', 'log_references': [{ 'url': 'http://ftp.mozilla.org/pub/mozilla.org/spidermonkey/...', 'name': 'buildbot_text', 'parse_status': 'parsed' }], 'artifacts': [{ "blob": json.dumps(text_log_summary_dict), "type": "json", "name": "text_log_summary", "job_guid": job_guid }] } }) tjc.add(tj) post_collection(test_project, tjc) # should have 4 error summary lines (aka bug suggestions) assert len(get_error_summary(Job.objects.get(guid=job_guid))) == 4 # ensure the parsing didn't happen assert mock_parse.called is False
def test_post_job_with_unparsed_log(test_repository, failure_classifications, push_stored, mock_post_json, monkeypatch, activate_responses): """ test submitting a job with an unparsed log parses the log, generates an appropriate set of text log steps, and calls get_error_summary (to warm the bug suggestions cache) """ # create a wrapper around get_error_summary that records whether # it's been called mock_get_error_summary = MagicMock(name='get_error_summary', wraps=get_error_summary) import treeherder.model.error_summary monkeypatch.setattr(treeherder.model.error_summary, 'get_error_summary', mock_get_error_summary) log_url = add_log_response( "mozilla-central-macosx64-debug-bm65-build1-build15.txt.gz") tjc = client.TreeherderJobCollection() job_guid = 'd22c74d4aa6d2a1dcba96d95dccbd5fdca70cf33' tj = client.TreeherderJob({ 'project': test_repository.name, 'revision': push_stored[0]['revision'], 'job': { 'job_guid': job_guid, 'state': 'completed', 'log_references': [{ 'url': log_url, 'name': 'buildbot_text', 'parse_status': 'pending' }] } }) tjc.add(tj) post_collection(test_repository.name, tjc) # should have 2 errors assert TextLogError.objects.count() == 2 # verify that get_error_summary was called (to warm the bug suggestions # cache) assert mock_get_error_summary.called # should have 2 error summary lines (aka bug suggestions) assert len(get_error_summary(Job.objects.get(id=1))) == 2
def test_post_job_pending_to_completed_with_unparsed_log(test_repository, result_set_stored, failure_classifications, mock_post_json): job_guid = 'd22c74d4aa6d2a1dcba96d95dccbd5fdca70cf33' # the first time, submit it as running (with no logs) tjc = client.TreeherderJobCollection() tj = client.TreeherderJob({ 'project': test_repository.name, 'revision': result_set_stored[0]['revision'], 'job': { 'job_guid': job_guid, 'state': 'running' } }) tjc.add(tj) post_collection(test_repository.name, tjc) # should have no text log errors or bug suggestions assert TextLogError.objects.count() == 0 assert len(get_error_summary(Job.objects.get(guid=job_guid))) == 0 # the second time, post a log that will get parsed log_url = "file://{0}".format( SampleData().get_log_path("mozilla-central-macosx64-debug-bm65-build1-build15.txt.gz")) tjc = client.TreeherderJobCollection() tj = client.TreeherderJob({ 'project': test_repository.name, 'revision': result_set_stored[0]['revision'], 'job': { 'job_guid': job_guid, 'state': 'completed', 'log_references': [{ 'url': log_url, 'name': 'buildbot_text', 'parse_status': 'pending' }] } }) tjc.add(tj) post_collection(test_repository.name, tjc) # should have a full set of text log errors assert TextLogError.objects.count() == 2 assert len(get_error_summary(Job.objects.get(guid=job_guid))) == 2
def test_post_job_with_parsed_log( test_project, result_set_stored, mock_post_json, monkeypatch, ): """ test submitting a job with a pre-parsed log gets job_log_url parse_status of "parsed" and does not parse, even though no text_log_summary exists. This is for the case where they may want to submit it at a later time. """ mock_parse = MagicMock(name="parse_line") monkeypatch.setattr(StepParser, 'parse_line', mock_parse) tjc = client.TreeherderJobCollection() job_guid = 'd22c74d4aa6d2a1dcba96d95dccbd5fdca70cf33' tj = client.TreeherderJob({ 'project': test_project, 'revision_hash': result_set_stored[0]['revision_hash'], 'job': { 'job_guid': job_guid, 'state': 'completed', 'log_references': [{ 'url': 'http://ftp.mozilla.org/pub/mozilla.org/spidermonkey/...', 'name': 'buildbot_text', 'parse_status': 'parsed' }] } }) tjc.add(tj) post_collection(test_project, tjc) check_artifacts(test_project, job_guid, 'parsed', 0) # ensure the parsing didn't happen assert mock_parse.called is False
def test_post_perf_artifact_revision_hash(test_repository, result_set_stored, mock_post_json): test_repository.save() PerformanceFramework.objects.get_or_create(name='cheezburger') tjc = client.TreeherderJobCollection() job_guid = 'd22c74d4aa6d2a1dcba96d95dccbd5fdca70cf33' tj = client.TreeherderJob({ 'project': test_repository.name, 'revision_hash': result_set_stored[0]['revision'], 'revision': result_set_stored[0]['revision'], 'job': { 'job_guid': job_guid, 'state': 'completed', 'project': test_repository.name, 'option_collection': {'opt': True}, 'artifacts': [{ 'blob': { "performance_data": { "framework": {"name": "cheezburger"}, "suites": [{ "name": "cheezburger metrics", "value": 10.0, "subtests": [ {"name": "test1", "value": 20.0}, {"name": "test2", "value": 30.0} ] }] } }, 'type': 'json', 'name': 'performance_data', 'job_guid': job_guid }] } }) tjc.add(tj) post_collection(test_repository.name, tjc) # we'll just validate that we got the expected number of results for # talos (we have validation elsewhere for the actual data adapters) assert PerformanceSignature.objects.all().count() == 3 assert PerformanceDatum.objects.all().count() == 3
def test_post_job_with_unparsed_log(test_repository, failure_classifications, result_set_stored, mock_post_json, monkeypatch): """ test submitting a job with an unparsed log parses the log, generates an appropriate set of text log steps, and calls get_error_summary (to warm the bug suggestions cache) """ # create a wrapper around get_error_summary that records whether # it's been called mock_get_error_summary = MagicMock(name='get_error_summary', wraps=get_error_summary) import treeherder.model.error_summary monkeypatch.setattr(treeherder.model.error_summary, 'get_error_summary', mock_get_error_summary) log_url = "file://{0}".format( SampleData().get_log_path("mozilla-central-macosx64-debug-bm65-build1-build15.txt.gz")) tjc = client.TreeherderJobCollection() job_guid = 'd22c74d4aa6d2a1dcba96d95dccbd5fdca70cf33' tj = client.TreeherderJob({ 'project': test_repository.name, 'revision': result_set_stored[0]['revision'], 'job': { 'job_guid': job_guid, 'state': 'completed', 'log_references': [{ 'url': log_url, 'name': 'buildbot_text', 'parse_status': 'pending' }] } }) tjc.add(tj) post_collection(test_repository.name, tjc) # should have 2 errors assert TextLogError.objects.count() == 2 # verify that get_error_summary was called (to warm the bug suggestions # cache) assert mock_get_error_summary.called # should have 2 error summary lines (aka bug suggestions) assert len(get_error_summary(Job.objects.get(id=1))) == 2
def test_post_job_with_default_tier(test_repository, failure_classifications, push_stored, mock_post_json): """test submitting a job with no tier specified gets default""" tjc = client.TreeherderJobCollection() job_guid = 'd22c74d4aa6d2a1dcba96d95dccbd5fdca70cf33' tj = client.TreeherderJob({ 'project': test_repository.name, 'revision': push_stored[0]['revision'], 'job': { 'job_guid': job_guid, 'state': 'completed', } }) tjc.add(tj) post_collection(test_repository.name, tjc) job = Job.objects.get(guid=job_guid) assert job.tier == 1
def test_resultset_with_bad_key(sample_resultset, jm, initial_data): trsc = TreeherderResultSetCollection() for rs in sample_resultset: rs = trsc.get_resultset(rs) trsc.add(rs) resp = test_utils.post_collection(jm.project, trsc, status=403, consumer_key="horrible key") assert resp.status_int == 403 assert resp.json["response"] == "access_denied" assert resp.json["message"] == "oauth_consumer_key does not match project, {0}, credentials".format(jm.project)
def test_resultset_with_bad_secret(sample_resultset, jm, initial_data): trsc = TreeherderResultSetCollection() for rs in sample_resultset: rs = trsc.get_resultset(rs) trsc.add(rs) resp = test_utils.post_collection(jm.project, trsc, status=403, consumer_secret="horrible secret") assert resp.status_int == 403 assert resp.json["message"] == "Client authentication failed for project, {0}".format(jm.project) assert resp.json["response"] == "invalid_client"
def test_post_job_with_default_tier(test_repository, failure_classifications, result_set_stored, mock_post_json): """test submitting a job with no tier specified gets default""" tjc = client.TreeherderJobCollection() job_guid = 'd22c74d4aa6d2a1dcba96d95dccbd5fdca70cf33' tj = client.TreeherderJob({ 'project': test_repository.name, 'revision': result_set_stored[0]['revision'], 'job': { 'job_guid': job_guid, 'state': 'completed', } }) tjc.add(tj) post_collection(test_repository.name, tjc) job = Job.objects.get(guid=job_guid) assert job.tier == 1
def test_post_talos_artifact(test_project, test_repository, result_set_stored, mock_post_json): test_repository.save() tjc = client.TreeherderJobCollection() job_guid = 'd22c74d4aa6d2a1dcba96d95dccbd5fdca70cf33' tj = client.TreeherderJob({ 'project': test_repository.name, 'revision_hash': result_set_stored[0]['revision_hash'], 'job': { 'job_guid': job_guid, 'state': 'completed', 'project': test_repository.name, 'option_collection': { 'opt': True }, 'artifacts': [{ 'blob': { 'talos_data': SampleData.get_minimal_talos_perf_data() }, 'type': 'json', 'name': 'talos_data', 'job_guid': job_guid }] } }) tjc.add(tj) post_collection(test_project, tjc) # we'll just validate that we got the expected number of results for # talos (we have validation elsewhere for the actual data adapters) assert PerformanceSignature.objects.count() == 1 assert PerformanceDatum.objects.count() == 1
def test_resultset_with_bad_key(sample_resultset, jm, initial_data): trsc = TreeherderResultSetCollection() for rs in sample_resultset: rs = trsc.get_resultset(rs) trsc.add(rs) resp = test_utils.post_collection( jm.project, trsc, status=403, consumer_key="horrible-key" ) assert resp.status_int == 403 assert resp.json['detail'] == 'oauth_consumer_key does not match credentials for project {0}'.format(jm.project)
def test_post_job_with_default_tier(test_project, result_set_stored, mock_post_json): """test submitting a job with no tier specified gets default""" tjc = client.TreeherderJobCollection() job_guid = 'd22c74d4aa6d2a1dcba96d95dccbd5fdca70cf33' tj = client.TreeherderJob({ 'project': test_project, 'revision': result_set_stored[0]['revision'], 'job': { 'job_guid': job_guid, 'state': 'completed', } }) tjc.add(tj) post_collection(test_project, tjc) with JobsModel(test_project) as jobs_model: job = [x for x in jobs_model.get_job_list(0, 20) if x['job_guid'] == job_guid][0] assert job['tier'] == 1
def test_post_job_with_parsed_log(test_project, result_set_stored, mock_post_json, monkeypatch, ): """ test submitting a job with a pre-parsed log gets job_log_url parse_status of "parsed" and does not parse, even though no text_log_summary exists. This is for the case where they may want to submit it at a later time. """ mock_parse = MagicMock(name="parse_line") monkeypatch.setattr(StepParser, 'parse_line', mock_parse) tjc = client.TreeherderJobCollection() job_guid = 'd22c74d4aa6d2a1dcba96d95dccbd5fdca70cf33' tj = client.TreeherderJob({ 'project': test_project, 'revision': result_set_stored[0]['revision'], 'job': { 'job_guid': job_guid, 'state': 'completed', 'log_references': [{ 'url': 'http://ftp.mozilla.org/pub/mozilla.org/spidermonkey/...', 'name': 'buildbot_text', 'parse_status': 'parsed' }] } }) tjc.add(tj) post_collection(test_project, tjc) check_artifacts(test_project, job_guid, 'parsed', 0) # ensure the parsing didn't happen assert mock_parse.called is False
def test_resultset_with_bad_secret(sample_resultset, jm, initial_data): trsc = TreeherderResultSetCollection() for rs in sample_resultset: rs = trsc.get_resultset(rs) trsc.add(rs) resp = test_utils.post_collection( jm.project, trsc, status=403, consumer_secret="horrible secret" ) assert resp.status_int == 403 assert resp.json['detail'] == "Client authentication failed for project, {0}".format(jm.project) assert resp.json['response'] == "invalid_client"
def test_resultset_with_bad_key(sample_resultset, jm, initial_data): trsc = TreeherderResultSetCollection() for rs in sample_resultset: rs = trsc.get_resultset(rs) trsc.add(rs) resp = test_utils.post_collection( jm.project, trsc, status=403, consumer_key="horrible key" ) assert resp.status_int == 403 assert resp.json['response'] == "access_denied" assert resp.json['detail'] == "oauth_consumer_key does not match project, {0}, credentials".format(jm.project)
def test_objectstore_with_bad_key(job_sample, jm): """ test calling with the wrong project key. extected result are: - return code 403 - return message failed """ tjc = TreeherderJobCollection() tj = tjc.get_job(job_sample) tjc.add(tj) resp = test_utils.post_collection( jm.project, tjc, status=403, consumer_key='wrong-key' ) assert resp.status_int == 403 assert resp.json['response'] == "access_denied" assert resp.json['detail'] == "oauth_consumer_key does not match project, {0}, credentials".format(jm.project)
def test_objectstore_with_bad_secret(job_sample, jm): """ test calling with the wrong project secret. extected result are: - return code 403 - return message authentication failed """ tjc = TreeherderJobCollection() tj = tjc.get_job(job_sample) tjc.add(tj) resp = test_utils.post_collection( jm.project, tjc, status=403, consumer_secret='not-so-secret' ) assert resp.status_int == 403 assert resp.json['detail'] == "Client authentication failed for project, {0}".format(jm.project) assert resp.json['response'] == "invalid_client"
def test_objectstore_with_bad_secret(job_sample, jm): """ test calling with the wrong project secret. extected result are: - return code 403 - return message authentication failed """ tjc = TreeherderJobCollection() tj = tjc.get_job(job_sample) tjc.add(tj) resp = test_utils.post_collection( jm.project, tjc, status=403, consumer_secret='not-so-secret' ) assert resp.status_int == 403 assert resp.json['detail'] == "Client authentication failed for project, {0}".format(jm.project) assert resp.json['response'] == "invalid_client"
def test_post_job_artifacts_by_add_artifact( test_project, monkeypatch, result_set_stored, mock_post_json, mock_error_summary, ): """ test submitting a job with artifacts added by ``add_artifact`` This has pre-parsed logs. Verify parse_status of "parsed" and that it doesn't parse the logs. Submitted ``text_log_artifact`` should still trigger generation of the bug suggestions. """ mock_parse = MagicMock(name="parse_line") monkeypatch.setattr(StepParser, 'parse_line', mock_parse) job_guid = 'd22c74d4aa6d2a1dcba96d95dccbd5fdca70cf33' tjc = client.TreeherderJobCollection() tj = client.TreeherderJob({ 'project': test_project, 'revision_hash': result_set_stored[0]['revision_hash'], "job": { "artifacts": [], "job_guid": job_guid, "log_references": [{ "name": "autophone-nexus-one-1.log", "parse_status": "parsed", "url": "https://autophone-dev.s3.amazonaws.com/pub/mozilla.org/mobile/tinderbox-builds/mozilla-inbound-android-api-9/1432676531/en-US/autophone-autophone-s1s2-s1s2-nytimes-local.ini-1-nexus-one-1.log" }], "state": "completed", }, }) tls_blob = json.dumps({ "logurl": "https://autophone-dev.s3.amazonaws.com/pub/mozilla.org/mobile/tinderbox-builds/mozilla-inbound-android-api-9/1432676531/en-US/autophone-autophone-s1s2-s1s2-nytimes-local.ini-1-nexus-one-1.log", "step_data": { "all_errors": [ { "line": "TEST_UNEXPECTED_FAIL | /sdcard/tests/autophone/s1s2test/nytimes.com/index.html | Failed to get uncached measurement.", "linenumber": 64435 }, ], "steps": [{ "buncha": "info" }] } }) ji_blob = json.dumps({"job_details": [{"foo": "fah"}]}) bapi_blob = json.dumps({"buildername": "merd"}) pb_blob = json.dumps({ "build_url": "feh", "chunk": 1, "config_file": "mah" }) tj.add_artifact("text_log_summary", "json", tls_blob) tj.add_artifact("Job Info", "json", ji_blob) tj.add_artifact("buildapi", "json", bapi_blob) tj.add_artifact("privatebuild", "json", pb_blob) tjc.add(tj) post_collection(test_project, tjc) check_artifacts( test_project, job_guid, 'parsed', 5, { 'Bug suggestions', 'text_log_summary', 'Job Info', 'privatebuild', 'buildapi' }, mock_error_summary) # ensure the parsing didn't happen assert mock_parse.called is False
def test_post_job_artifacts_by_add_artifact( test_project, monkeypatch, result_set_stored, mock_post_json, ): """ test submitting a job with artifacts added by ``add_artifact`` This has pre-parsed logs. Verify parse_status of "parsed" and that it doesn't parse the logs. Submitted ``text_log_artifact`` should still trigger generation of the bug suggestions. """ mock_parse = MagicMock(name="parse_line") monkeypatch.setattr(StepParser, 'parse_line', mock_parse) job_guid = 'd22c74d4aa6d2a1dcba96d95dccbd5fdca70cf33' tjc = client.TreeherderJobCollection() tj = client.TreeherderJob({ 'project': test_project, 'revision': result_set_stored[0]['revision'], "job": { "artifacts": [], "job_guid": job_guid, "log_references": [{ "name": "autophone-nexus-one-1.log", "parse_status": "parsed", "url": "https://autophone-dev.s3.amazonaws.com/pub/mozilla.org/mobile/tinderbox-builds/mozilla-inbound-android-api-9/1432676531/en-US/autophone-autophone-s1s2-s1s2-nytimes-local.ini-1-nexus-one-1.log" }], "state": "completed", }, }) tls_blob = json.dumps({ "logurl": "https://autophone-dev.s3.amazonaws.com/pub/mozilla.org/mobile/tinderbox-builds/mozilla-inbound-android-api-9/1432676531/en-US/autophone-autophone-s1s2-s1s2-nytimes-local.ini-1-nexus-one-1.log", "step_data": { "steps": [{ "name": "foobar", "result": "testfailed", "started_linenumber": 1, "finished_linenumber": 100000, "started": "2016-07-13 16:09:31", "finished": "2016-07-13 16:09:31", "errors": [{ "line": "TEST_UNEXPECTED_FAIL | /sdcard/tests/autophone/s1s2test/nytimes.com/index.html | Failed to get uncached measurement.", "linenumber": 64435 }] }] } }) ji_blob = json.dumps( {"job_details": [{ "title": "mytitle", "value": "myvalue" }]}) bapi_blob = json.dumps({"buildername": "merd"}) pb_blob = json.dumps({ "build_url": "feh", "chunk": 1, "config_file": "mah" }) tj.add_artifact("text_log_summary", "json", tls_blob) tj.add_artifact("Job Info", "json", ji_blob) tj.add_artifact("buildapi", "json", bapi_blob) tj.add_artifact("privatebuild", "json", pb_blob) tjc.add(tj) post_collection(test_project, tjc) assert JobDetail.objects.count() == 1 assert model_to_dict(JobDetail.objects.get(job__guid=job_guid)) == { 'id': 1, 'job': 1, 'title': 'mytitle', 'value': 'myvalue', 'url': None } assert TextLogStep.objects.count() == 1 assert model_to_dict(TextLogStep.objects.get(job__guid=job_guid)) == { 'id': 1, 'job': 1, 'started': datetime.datetime(2016, 7, 13, 16, 9, 31), 'finished': datetime.datetime(2016, 7, 13, 16, 9, 31), 'name': 'foobar', 'result': 1, 'started_line_number': 1, 'finished_line_number': 100000 } assert TextLogError.objects.count() == 1 assert model_to_dict( TextLogError.objects.get(step__job__guid=job_guid) ) == { 'id': 1, 'line': 'TEST_UNEXPECTED_FAIL | /sdcard/tests/autophone/s1s2test/nytimes.com/index.html | Failed to get uncached measurement.', 'line_number': 64435, 'step': 1, 'failure_line': None, 'best_classification': None, 'best_is_verified': False } # assert that some bug suggestions got generated assert len(get_error_summary(Job.objects.get(guid=job_guid))) == 1 check_job_log(test_project, job_guid, JobLog.PARSED) # ensure the parsing didn't happen assert mock_parse.called is False