def testUpdateTryJobMetadata(self): try_job_id = '1' url = 'url' build_data = { 'id': try_job_id, 'url': url, 'status': 'COMPLETED', 'completed_ts': '1454367574000000', 'created_ts': '1454367570000000', } report = { 'result': { 'rev1': 'passed', 'rev2': 'failed' }, 'metadata': { 'regression_range_size': 2 } } build = buildbucket_client.BuildbucketBuild(build_data) expected_error_dict = { 'message': 'Try job monitoring was abandoned.', 'reason': ('Timeout after %s hours' % waterfall_config.GetTryJobSettings().get('job_timeout_hours')) } try_job_data = WfTryJobData.Create(try_job_id) try_job_data.try_job_key = WfTryJob.Create('m', 'b', 123).key monitor_try_job_pipeline._UpdateTryJobMetadata(try_job_data, failure_type.COMPILE, build, None, False, report) try_job_data = WfTryJobData.Get(try_job_id) self.assertIsNone(try_job_data.error) self.assertEqual(try_job_data.regression_range_size, 2) self.assertEqual(try_job_data.number_of_commits_analyzed, 2) self.assertEqual(try_job_data.end_time, datetime(2016, 2, 1, 22, 59, 34)) self.assertEqual(try_job_data.request_time, datetime(2016, 2, 1, 22, 59, 30)) self.assertEqual(try_job_data.try_job_url, url) monitor_try_job_pipeline._UpdateTryJobMetadata(try_job_data, failure_type.COMPILE, build, None, True) self.assertEqual(try_job_data.error, expected_error_dict) self.assertEqual(try_job_data.error_code, try_job_error.TIMEOUT)
def _CreateRunTestTryJobParameters(self): master_name = 'm' builder_name = 'b' build_number = 1 try_job_id = '1' job = WfTryJob.Create(master_name, builder_name, build_number) try_job_data = WfTryJobData.Create(try_job_id) try_job_data.try_job_key = job.key try_job_data.try_job_url = ( 'https://build.chromium.org/p/m/builders/b/builds/1234') try_job_data.put() job.test_results = [{ 'report': None, 'url': 'https://build.chromium.org/p/m/builders/b/builds/1234', 'try_job_id': '1', }] job.status = analysis_status.RUNNING job.put() return RunTestTryJobParameters(build_key=BuildKey( master_name=master_name, builder_name=builder_name, build_number=build_number), good_revision='rev1', bad_revision='rev2', suspected_revisions=['r5'], cache_name=None, dimensions=[], targeted_tests={}, urlsafe_try_job_key=job.key.urlsafe())
def testGetWaterfallTryJobs(self): try_job_completed = WfTryJobData.Create(3) try_job_completed.try_job_key = WfTryJob.Create('m', 'b', 3).key try_job_completed.try_job_type = 'compile' try_job_completed.start_time = datetime(2016, 5, 4, 0, 0, 1) try_job_completed.request_time = datetime(2016, 5, 4, 0, 0, 0) try_job_completed.created_time = datetime(2016, 5, 4, 0, 0, 0) try_job_completed.end_time = datetime(2016, 5, 4, 0, 0, 2) try_job_completed.try_job_url = 'url3' try_job_completed.culprits = {'compile': {'12345': 'failed'}} try_job_completed.last_buildbucket_response = {'status': 'COMPLETED'} try_job_completed.put() expected_try_job_completed_display_data = { 'master_name': 'm', 'builder_name': 'b', 'build_number': 3, 'try_job_type': 'compile', 'request_time': '2016-05-04 00:00:00 UTC', 'try_job_url': 'url3', 'culprit_found': True, 'last_buildbucket_response': '{"status": "COMPLETED"}' } response = self.test_app.get( ('/try-job-dashboard?format=json&start_date=2016-05-03&' 'category=waterfall')) response_data = response.json_body successfully_completed_try_jobs = response_data.get( 'successfully_completed_try_jobs') self.assertEqual(response.status_int, 200) self.validateTryJobDisplayData( [expected_try_job_completed_display_data], successfully_completed_try_jobs)
def testIdentifyCulpritForTestTryJobNoTryJobResultNoHeuristicResult(self): master_name = 'm' builder_name = 'b' build_number = 1 try_job_id = '1' self._CreateEntities(master_name, builder_name, build_number, try_job_id, try_job_status=analysis_status.RUNNING) analysis = WfAnalysis.Create(master_name, builder_name, build_number) analysis.put() self.MockPipeline(RevertAndNotifyCulpritPipeline, None, expected_args=[master_name, builder_name, build_number, None, [], failure_type.TEST]) pipeline = IdentifyTryJobCulpritPipeline( master_name, builder_name, build_number, failure_type.TEST, '1', None) pipeline.start() self.execute_queued_tasks() try_job_data = WfTryJobData.Get(try_job_id) self.assertIsNone(try_job_data.culprits) self.assertIsNone(analysis.result_status) self.assertIsNone(analysis.suspected_cls)
def testIdentifyCulpritForCompileTryJobNoCulprit(self): master_name = 'm' builder_name = 'b' build_number = 1 try_job_id = '1' self._CreateEntities(master_name, builder_name, build_number, try_job_id) analysis = WfAnalysis.Create(master_name, builder_name, build_number) analysis.put() self.MockPipeline(RevertAndNotifyCulpritPipeline, None, expected_args=[master_name, builder_name, build_number, None, [], failure_type.COMPILE]) pipeline = IdentifyTryJobCulpritPipeline( master_name, builder_name, build_number, failure_type.COMPILE, '1', None) pipeline.start() self.execute_queued_tasks() try_job = WfTryJob.Get(master_name, builder_name, build_number) try_job_data = WfTryJobData.Get(try_job_id) self.assertEqual(analysis_status.COMPLETED, try_job.status) self.assertEqual([], try_job.compile_results) self.assertIsNone(try_job_data.culprits) self.assertIsNone(analysis.result_status) self.assertIsNone(analysis.suspected_cls)
def testIdentifyCulpritForFlakyCompile(self): master_name = 'm' builder_name = 'b' build_number = 1 try_job_id = '1' compile_result = { 'report': { 'result': { 'rev1': 'failed', 'rev2': 'failed' }, 'metadata': { 'sub_ranges': [ [ None, 'rev2' ] ] } }, 'url': 'url', 'try_job_id': try_job_id, } self._CreateEntities(master_name, builder_name, build_number, try_job_id) analysis = WfAnalysis.Create(master_name, builder_name, build_number) analysis.result = { 'failures': [ { 'step_name': 'compile', 'suspected_cls': [] } ] } analysis.put() self.MockPipeline(RevertAndNotifyCulpritPipeline, None, expected_args=[master_name, builder_name, build_number, {}, [], failure_type.COMPILE]) pipeline = IdentifyTryJobCulpritPipeline( master_name, builder_name, build_number, failure_type.COMPILE, '1', compile_result) pipeline.start() self.execute_queued_tasks() try_job = WfTryJob.Get(master_name, builder_name, build_number) self.assertEqual(analysis_status.COMPLETED, try_job.status) try_job_data = WfTryJobData.Get(try_job_id) self.assertIsNone(try_job_data.culprits) analysis = WfAnalysis.Get(master_name, builder_name, build_number) self.assertEqual(result_status.FLAKY, analysis.result_status) self.assertEqual([], analysis.suspected_cls)
def _CreateTryJobData(self, build_id, try_job_key, has_heuristic_results): try_job_data = WfTryJobData.Create(build_id) try_job_data.created_time = time_util.GetUTCNow() try_job_data.has_compile_targets = False try_job_data.has_heuristic_results = has_heuristic_results try_job_data.try_job_key = try_job_key try_job_data.try_job_type = failure_type.GetDescriptionForFailureType( failure_type.TEST) try_job_data.put()
def testUpdateTryJobMetadataForBuildError(self): error_data = {'reason': 'BUILD_NOT_FOUND', 'message': 'message'} error = buildbucket_client.BuildbucketError(error_data) try_job_data = WfTryJobData.Create('1') try_job_data.try_job_key = WfTryJob.Create('m', 'b', 123).key monitor_try_job_pipeline._UpdateTryJobMetadata(try_job_data, failure_type.COMPILE, None, error, False) self.assertEqual(try_job_data.error, error_data)
def testSuccessfullyScheduleNewTryJobForCompileWithSuspectedRevisions( self, mock_module): master_name = 'm' builder_name = 'b' build_number = 223 good_revision = 'rev1' bad_revision = 'rev2' build_id = '1' url = 'url' build = WfBuild.Create(master_name, builder_name, build_number) build.data = { 'properties': { 'parent_mastername': 'pm', 'parent_buildername': 'pb' } } build.put() response = { 'build': { 'id': build_id, 'url': url, 'status': 'SCHEDULED', } } results = [(None, buildbucket_client.BuildbucketBuild(response['build']))] mock_module.TriggerTryJobs.return_value = results WfTryJob.Create(master_name, builder_name, build_number).put() try_job_pipeline = ScheduleCompileTryJobPipeline() try_job_id = try_job_pipeline.run(master_name, builder_name, build_number, good_revision, bad_revision, failure_type.COMPILE, None, ['r5'], None, None) try_job = WfTryJob.Get(master_name, builder_name, build_number) try_job_data = WfTryJobData.Get(build_id) expected_try_job_id = '1' self.assertEqual(expected_try_job_id, try_job_id) self.assertEqual(expected_try_job_id, try_job.compile_results[-1]['try_job_id']) self.assertTrue(expected_try_job_id in try_job.try_job_ids) self.assertIsNotNone(try_job_data) self.assertEqual(try_job_data.master_name, master_name) self.assertEqual(try_job_data.builder_name, builder_name) self.assertEqual(try_job_data.build_number, build_number) self.assertEqual( try_job_data.try_job_type, failure_type.GetDescriptionForFailureType(failure_type.COMPILE)) self.assertFalse(try_job_data.has_compile_targets) self.assertTrue(try_job_data.has_heuristic_results)
def testProperties(self): master_name = 'm' builder_name = 'b' build_number = 123 try_job_id = 'try_job_id' try_job = WfTryJob.Create(master_name, builder_name, build_number) try_job_data = WfTryJobData.Create(try_job_id) try_job_data.try_job_key = try_job.key self.assertEqual(master_name, try_job_data.master_name) self.assertEqual(builder_name, try_job_data.builder_name) self.assertEqual(build_number, try_job_data.build_number)
def IdentifyCompileTryJobCulprit(parameters): """Processes try job result and identifies culprit.""" culprits = None flaky_compile = False master_name, builder_name, build_number = parameters.build_key.GetParts() result = parameters.result try_job_id = result.try_job_id if result else None if try_job_id and result and result.report: failed_revision = result.report.culprit failed_revisions = [failed_revision] if failed_revision else [] culprits = try_job_service.GetCulpritsWithoutNoBlameAccountsCLS( git.GetCommitsInfo(failed_revisions)) # In theory there are 2 cases where compile failure could be flaky: # 1. All revisions passed in the try job (try job will not run at good # revision in this case), # 2. The compile even failed at good revision. # We cannot guarantee in the first case the compile failure is flaky # because it's also possible the difference between buildbot and trybot # causes this. # So currently we'll only consider the second case. if not culprits and CompileFailureIsFlaky(result): flaky_compile = True if culprits: result.culprit = {'compile': culprits[failed_revision]} try_job_data = WfTryJobData.Get(try_job_id) try_job_data.culprits = {'compile': failed_revision} try_job_data.put() # Store try-job results. UpdateTryJobResult(parameters, culprits) # Saves cls found by heuristic approach to determine a culprit is found # by both heuristic and try job when sending notifications. # This part must be before UpdateWfAnalysisWithTryJobResult(). heuristic_cls = build_failure_analysis.GetHeuristicSuspectedCLs( master_name, builder_name, build_number) # Add try-job results to WfAnalysis. UpdateWfAnalysisWithTryJobResult(master_name, builder_name, build_number, result, culprits, flaky_compile) # TODO (chanli): Update suspected_cl for builds in the same group with # current build. # Updates suspected_cl. UpdateSuspectedCLs(master_name, builder_name, build_number, culprits) return culprits, heuristic_cls
def IdentifyTestTryJobCulprits(parameters): """Processes try job result and identifies culprit. Args: parameters (IdentifyTestTryJobCulpritParameters): Parameters to identify culprit from try job result. """ culprits = None flaky_failures = {} master_name, builder_name, build_number = parameters.build_key.GetParts() result = parameters.result try_job_id = result.try_job_id if result else None failure_to_culprit_map = None if try_job_id and result and result.report: culprit_map, failed_revisions = FindCulpritForEachTestFailure(result) culprits = try_job_service.GetCulpritsWithoutNoBlameAccountsCLS( git.GetCommitsInfo(failed_revisions)) if not culprits: flaky_failures = result.report.flakes if culprits: try_job_data = WfTryJobData.Get(try_job_id) UpdateCulpritMapWithCulpritInfo(culprit_map, culprits) failure_to_culprit_map = GetCulpritDataForTest(culprit_map) try_job_data.culprits = failure_to_culprit_map try_job_data.put() result.culprit = culprit_map # Store try-job results. UpdateTryJobResult(parameters, culprits) # Saves cls found by heuristic approach for later use. # This part must be before UpdateWfAnalysisWithTryJobResult(). heuristic_cls = build_failure_analysis.GetHeuristicSuspectedCLs( master_name, builder_name, build_number) # Add try-job results to WfAnalysis. UpdateWfAnalysisWithTryJobResult(master_name, builder_name, build_number, result, culprits, flaky_failures) # TODO (chanli): Update suspected_cl for builds in the same group with # current build. # Updates suspected_cl. UpdateSuspectedCLs(master_name, builder_name, build_number, culprits, result) return culprits, heuristic_cls, FailureToCulpritMap.FromSerializable( failure_to_culprit_map)
def _CreateEntities( self, master_name, builder_name, build_number, try_job_id, try_job_status=None, compile_results=None, test_results=None): try_job = WfTryJob.Create(master_name, builder_name, build_number) if try_job_status: try_job.status = try_job_status if compile_results: try_job.compile_results = compile_results if test_results: try_job.test_results = test_results try_job.put() try_job_data = WfTryJobData.Create(try_job_id) try_job_data.try_job_key = try_job.key try_job_data.put()
def CreateTryJobData(build_id, try_job_key, has_compile_targets, has_heuristic_results, try_job_type, runner_id=None): try_job_data = WfTryJobData.Create(build_id) try_job_data.created_time = time_util.GetUTCNow() try_job_data.has_compile_targets = has_compile_targets try_job_data.has_heuristic_results = has_heuristic_results try_job_data.try_job_key = try_job_key try_job_data.try_job_type = failure_type.GetDescriptionForFailureType( try_job_type) try_job_data.runner_id = runner_id try_job_data.put()
def testGetBuildbotLink(self): try_job_completed = WfTryJobData.Create(3) try_job_completed.try_job_key = WfTryJob.Create('m', 'b', 3).key try_job_completed.try_job_type = 'compile' try_job_completed.start_time = datetime(2016, 5, 4, 0, 0, 1) try_job_completed.request_time = datetime(2016, 5, 4, 0, 0, 0) try_job_completed.created_time = datetime(2016, 5, 4, 0, 0, 0) try_job_completed.end_time = datetime(2016, 5, 4, 0, 0, 2) try_job_completed.try_job_url = 'url3' try_job_completed.culprits = {'compile': {'12345': 'failed'}} try_job_completed.last_buildbucket_response = {'status': 'COMPLETED'} try_job_completed.put() response = self.test_app.get( ('/try-job-dashboard?start_date=2016-05-03&category=waterfall')) self.assertEqual(response.status_int, 200) self.assertIn('buildbot link', response.body)
def testSuccessfullyScheduleNewTryJobForTest(self, mock_module): master_name = 'm' builder_name = 'b' build_number = 223 good_revision = 'rev1' bad_revision = 'rev2' targeted_tests = ['a on platform', ['a', ['test1', 'test2']]] build_id = '1' build = WfBuild.Create(master_name, builder_name, build_number) build.data = {'properties': {'parent_mastername': 'pm', 'parent_buildername': 'pb'}} build.put() response = { 'build': { 'id': build_id, 'url': 'url', 'status': 'SCHEDULED', } } results = [(None, buildbucket_client.BuildbucketBuild(response['build']))] mock_module.TriggerTryJobs.return_value = results WfTryJob.Create(master_name, builder_name, build_number).put() try_job_pipeline = ScheduleTestTryJobPipeline() try_job_id = try_job_pipeline.run( master_name, builder_name, build_number, good_revision, bad_revision, failure_type.TEST, None, None, None, targeted_tests) try_job = WfTryJob.Get(master_name, builder_name, build_number) self.assertEqual(try_job_id, build_id) self.assertEqual(try_job.test_results[-1]['try_job_id'], build_id) try_job_data = WfTryJobData.Get(try_job_id) self.assertIsNotNone(try_job_data) self.assertEqual(try_job_data.master_name, master_name) self.assertEqual(try_job_data.builder_name, builder_name) self.assertEqual(try_job_data.build_number, build_number) self.assertEqual( try_job_data.try_job_type, failure_type.GetDescriptionForFailureType(failure_type.TEST)) self.assertFalse(try_job_data.has_compile_targets) self.assertFalse(try_job_data.has_heuristic_results)
def testIdentifyCulpritForTestTryJobReturnNoneIfNoRevisionToCheck(self): master_name = 'm' builder_name = 'b' build_number = 1 try_job_id = '1' test_result = { 'report': { 'result': { 'rev1': { 'a_test': { 'status': 'failed', 'valid': True, 'failures': ['a_test1'] } } } }, 'url': 'url', 'try_job_id': try_job_id } self._CreateEntities(master_name, builder_name, build_number, try_job_id, try_job_status=analysis_status.RUNNING) analysis = WfAnalysis.Create(master_name, builder_name, build_number) analysis.put() self.MockPipeline(RevertAndNotifyCulpritPipeline, None, expected_args=[master_name, builder_name, build_number, {}, [], failure_type.TEST]) pipeline = IdentifyTryJobCulpritPipeline( master_name, builder_name, build_number, failure_type.TEST, '1', test_result) pipeline.start() self.execute_queued_tasks() try_job_data = WfTryJobData.Get(try_job_id) self.assertIsNone(try_job_data.culprits) self.assertIsNone(analysis.result_status) self.assertIsNone(analysis.suspected_cls)
def GetCurrentTryJobID(urlsafe_try_job_key, runner_id): try_job = (ndb.Key( urlsafe=urlsafe_try_job_key).get() if urlsafe_try_job_key else None) if not try_job or not try_job.try_job_ids: return None try_job_ids = try_job.try_job_ids for i in xrange(len(try_job_ids) - 1, -1, -1): try_job_id = try_job_ids[i] try_job_data = (WfTryJobData.Get(try_job_id) if isinstance( try_job, WfTryJob) else FlakeTryJobData.Get(try_job_id)) if not try_job_data: continue if try_job_data.runner_id == runner_id: return try_job_id return None
def testTryJobPushMissingCallback(self, logging_mock): try_job_in_progress = WfTryJobData.Create(12345) try_job_in_progress.try_job_key = WfTryJob.Create('m', 'b', 1).key try_job_in_progress.try_job_type = 'compile' try_job_in_progress.start_time = datetime(2016, 5, 4, 0, 0, 1) try_job_in_progress.request_time = datetime(2016, 5, 4, 0, 0, 0) try_job_in_progress.try_job_url = 'url1' # NB That the try_job_data is not associated with a pipeline callback try_job_in_progress.last_buildbucket_response = {'status': 'STARTED'} try_job_in_progress.put() # This should not break, so that pubsub does not keep retrying. We'll only # log a message. self.test_app.post( '/pubsub/tryjobpush', params={ 'data': json.dumps({ 'message': { 'attributes': { 'auth_token': pubsub_callback.GetVerificationToken(), 'build_id': 12345, }, 'data': base64.b64encode( json.dumps({ 'build_id': 12345, 'user_data': json.dumps({ 'Message-Type': 'BuildbucketStatusChange', }), })), }, }), 'format': 'json', }) self.assertTrue(logging_mock.called)
def testIdentifyCulpritForCompileReturnNoneIfAllPassed(self): master_name = 'm' builder_name = 'b' build_number = 1 try_job_id = '1' compile_result = { 'report': { 'result': { 'rev1': 'passed', 'rev2': 'passed' } }, 'url': 'url', 'try_job_id': try_job_id, } self._CreateEntities(master_name, builder_name, build_number, try_job_id) analysis = WfAnalysis.Create(master_name, builder_name, build_number) analysis.put() self.MockPipeline(RevertAndNotifyCulpritPipeline, None, expected_args=[master_name, builder_name, build_number, {}, [], failure_type.COMPILE]) pipeline = IdentifyTryJobCulpritPipeline( master_name, builder_name, build_number, failure_type.COMPILE, '1', compile_result) pipeline.start() self.execute_queued_tasks() try_job = WfTryJob.Get(master_name, builder_name, build_number) self.assertEqual(analysis_status.COMPLETED, try_job.status) try_job_data = WfTryJobData.Get(try_job_id) self.assertIsNone(try_job_data.culprits) self.assertIsNone(analysis.result_status) self.assertIsNone(analysis.suspected_cls)
def testTryJobPush(self): try_job_in_progress = WfTryJobData.Create(12345) try_job_in_progress.try_job_key = WfTryJob.Create('m', 'b', 1).key try_job_in_progress.try_job_type = 'compile' try_job_in_progress.start_time = datetime(2016, 5, 4, 0, 0, 1) try_job_in_progress.request_time = datetime(2016, 5, 4, 0, 0, 0) try_job_in_progress.try_job_url = 'url1' try_job_in_progress.callback_url = '/callback?pipeline_id=f9f89162ef32c7fb7' try_job_in_progress.last_buildbucket_response = {'status': 'STARTED'} try_job_in_progress.put() with mock.patch('google.appengine.api.taskqueue.add') as mock_queue: self.test_app.post( '/pubsub/tryjobpush', params={ 'data': json.dumps({ 'message': { 'attributes': { 'auth_token': pubsub_callback.GetVerificationToken(), 'build_id': 12345, }, 'data': base64.b64encode( json.dumps({ 'build_id': 12345, 'user_data': json.dumps({ 'Message-Type': 'BuildbucketStatusChange', }), })), }, }), 'format': 'json', }) mock_queue.assert_called_once()
def testIdentifyCulpritForTestTryJobNoTryJobResultWithHeuristicResult(self): master_name = 'm' builder_name = 'b' build_number = 1 try_job_id = '1' suspected_cl = { 'revision': 'rev1', 'commit_position': 1, 'url': 'url_1', 'repo_name': 'chromium' } self._CreateEntities(master_name, builder_name, build_number, try_job_id, try_job_status=analysis_status.RUNNING) # Heuristic analysis already provided some results. analysis = WfAnalysis.Create(master_name, builder_name, build_number) analysis.result_status = result_status.FOUND_UNTRIAGED analysis.suspected_cls = [suspected_cl] analysis.put() self.MockPipeline(RevertAndNotifyCulpritPipeline, None, expected_args=[master_name, builder_name, build_number, None, [['chromium', 'rev1']], failure_type.TEST]) pipeline = IdentifyTryJobCulpritPipeline( master_name, builder_name, build_number, failure_type.TEST, '1', None) pipeline.start() self.execute_queued_tasks() try_job_data = WfTryJobData.Get(try_job_id) self.assertIsNone(try_job_data.culprits) # Ensure analysis results are not updated since no culprit from try job. self.assertEqual(analysis.result_status, result_status.FOUND_UNTRIAGED) self.assertEqual(analysis.suspected_cls, [suspected_cl])
html += '</tr>' html += '</table>' return html if __name__ == '__main__': # Set up the Remote API to use services on the live App Engine. remote_api.EnableRemoteApi(app_id='findit-for-me') START_DATE = datetime.datetime(2016, 2, 1) END_DATE = datetime.datetime(2016, 3, 8) wf_analysis_query = WfTryJobData.query( WfTryJobData.request_time >= START_DATE, WfTryJobData.request_time < END_DATE) data_list = wf_analysis_query.fetch() masters_to_builders = FinditConfig.Get().builders_to_trybots data = _CategorizeTryJobData(data_list) full_report_list = _GetReportListForMastersAndBuilders( masters_to_builders, data, START_DATE, END_DATE) findit_tmp_dir = os.environ.get('TMP_DIR') if not findit_tmp_dir: findit_tmp_dir = os.getcwd() report_path = os.path.join(findit_tmp_dir, 'try_job_data_report.html') with open(report_path, 'w') as f:
def testGetTryJobsForCompileSuccessSwarming(self, mock_buildbucket, mock_report): master_name = 'm' builder_name = 'b' build_number = 1 try_job_id = '1' try_job_url = 'https://luci-milo.appspot.com/swarming/task/3595be5002f4bc10' regression_range_size = 2 try_job = WfTryJob.Create(master_name, builder_name, build_number) try_job_data = WfTryJobData.Create(try_job_id) try_job_data.try_job_key = try_job.key try_job_data.try_job_url = try_job_url try_job_data.put() try_job.compile_results = [{ 'report': None, 'url': try_job_url, 'try_job_id': '1', }] try_job.status = analysis_status.RUNNING try_job.put() build_response = { 'id': '1', 'url': try_job_url, 'status': 'COMPLETED', } report = { 'result': { 'rev1': 'passed', 'rev2': 'failed' }, 'metadata': { 'regression_range_size': 2 } } mock_buildbucket.GetTryJobs.return_value = [ (None, buildbucket_client.BuildbucketBuild(build_response)) ] mock_report.return_value = json.dumps(report) pipeline = MonitorTryJobPipeline() pipeline.start_test() pipeline.run(try_job.key.urlsafe(), failure_type.COMPILE, try_job_id) pipeline.callback(callback_params=pipeline.last_params) # Reload from ID to get all internal properties in sync. pipeline = MonitorTryJobPipeline.from_id(pipeline.pipeline_id) pipeline.finalized() compile_result = pipeline.outputs.default.value expected_compile_result = { 'report': { 'result': { 'rev1': 'passed', 'rev2': 'failed' }, 'metadata': { 'regression_range_size': regression_range_size } }, 'url': try_job_url, 'try_job_id': '1', } self.assertEqual(expected_compile_result, compile_result) try_job = WfTryJob.Get(master_name, builder_name, build_number) self.assertEqual(expected_compile_result, try_job.compile_results[-1]) self.assertEqual(analysis_status.RUNNING, try_job.status) try_job_data = WfTryJobData.Get(try_job_id) self.assertEqual(try_job_data.regression_range_size, regression_range_size)
# Preserve order from original command. ordered_args = [] for original_arg in command_line_args: parsed_arg = original_arg.lstrip('-') if args_dict[parsed_arg]: ordered_args.append(parsed_arg) return ordered_args if __name__ == '__main__': # Set up the Remote API to use services on the live App Engine. remote_api.EnableRemoteApi(app_id='findit-for-me') START_DATE = datetime.datetime(2016, 4, 17) END_DATE = datetime.datetime(2016, 7, 15) try_job_data_query = WfTryJobData.query( WfTryJobData.request_time >= START_DATE, WfTryJobData.request_time < END_DATE) categorized_data = try_job_data_query.fetch() args = GetArgsInOrder() for arg in args: categorized_data = SplitStructByOption(categorized_data, arg) # TODO(lijeffrey): Display data in an html page instead of printing. PrettyPrint(categorized_data, START_DATE, END_DATE)
def HandleGet(self): """Shows a list of Findit try job results and statuses in an HTML page.""" category = self.request.get('category') start = self.request.get('start_date') end = self.request.get('end_date') start_date, end_date = _GetStartEndDates(start, end) if category.lower() == 'flake': try_job_data_list = FlakeTryJobData.query( FlakeTryJobData.created_time >= start_date, FlakeTryJobData.created_time < end_date).fetch() elif category.lower() == 'waterfall': try_job_data_list = WfTryJobData.query( WfTryJobData.created_time >= start_date, WfTryJobData.created_time < end_date).fetch() else: wf_try_job_query = WfTryJobData.query( WfTryJobData.created_time >= start_date, WfTryJobData.created_time < end_date) flake_try_job_query = FlakeTryJobData.query( FlakeTryJobData.created_time >= start_date, FlakeTryJobData.created_time < end_date) try_job_data_list = wf_try_job_query.fetch( ) + flake_try_job_query.fetch() # Sort try job data list by most recent first. try_job_data_list.sort(key=lambda x: x.created_time, reverse=True) try_jobs_in_progress = [] try_jobs_with_error = [] successfully_completed_try_jobs = [] for try_job_data in try_job_data_list: display_data = _FormatDisplayData(try_job_data) if not try_job_data.end_time and not try_job_data.error: start_time = try_job_data.request_time or try_job_data.created_time now = time_util.GetUTCNow() display_data['elapsed_time'] = (_FormatDuration( start_time, now) if start_time else None) display_data['status'] = ('running' if try_job_data.start_time else 'pending') try_jobs_in_progress.append(display_data) elif try_job_data.error: display_data['error'] = try_job_data.error['message'] # It is possible end_time is not available if the error was timeout. display_data['execution_time'] = _FormatDuration( try_job_data.start_time, try_job_data.end_time) try_jobs_with_error.append(display_data) else: display_data['culprit_found'] = (bool( try_job_data.culprits) if isinstance( try_job_data, WfTryJobData) else 'N/A') display_data['execution_time'] = _FormatDuration( try_job_data.start_time, try_job_data.end_time) successfully_completed_try_jobs.append(display_data) data = { 'start_date': time_util.FormatDatetime(start_date), 'end_date': time_util.FormatDatetime(end_date), 'category': category, 'try_jobs_in_progress': try_jobs_in_progress, 'try_jobs_with_error': try_jobs_with_error, 'successfully_completed_try_jobs': successfully_completed_try_jobs } return {'template': 'try_job_dashboard.html', 'data': data}
def testIdentifyCulpritForTestTryJobSuccess(self): master_name = 'm' builder_name = 'b' build_number = 1 try_job_id = '1' test_result = { 'report': { 'result': { 'rev0': { 'a_test': { 'status': 'passed', 'valid': True, }, 'b_test': { 'status': 'failed', 'valid': True, 'failures': ['b_test1'] } }, 'rev1': { 'a_test': { 'status': 'failed', 'valid': True, 'failures': ['a_test1'] }, 'b_test': { 'status': 'failed', 'valid': True, 'failures': ['b_test1'] } }, 'rev2': { 'a_test': { 'status': 'failed', 'valid': True, 'failures': ['a_test1', 'a_test2'] }, 'b_test': { 'status': 'failed', 'valid': True, 'failures': ['b_test1'] } } }, 'culprits': { 'a_test': { 'a_test1': 'rev1', 'a_test2': 'rev2' }, }, 'flakes': { 'b_test': ['b_test1'] } }, 'url': 'url', 'try_job_id': try_job_id } self._CreateEntities(master_name, builder_name, build_number, try_job_id, try_job_status=analysis_status.RUNNING, test_results=[test_result]) analysis = WfAnalysis.Create(master_name, builder_name, build_number) analysis.put() a_test1_suspected_cl = { 'revision': 'rev1', 'commit_position': 1, 'url': 'url_1', 'repo_name': 'chromium' } a_test2_suspected_cl = { 'revision': 'rev2', 'commit_position': 2, 'url': 'url_2', 'repo_name': 'chromium' } expected_test_result = { 'report': { 'result': { 'rev0': { 'a_test': { 'status': 'passed', 'valid': True, }, 'b_test': { 'status': 'failed', 'valid': True, 'failures': ['b_test1'] } }, 'rev1': { 'a_test': { 'status': 'failed', 'valid': True, 'failures': ['a_test1'] }, 'b_test': { 'status': 'failed', 'valid': True, 'failures': ['b_test1'] } }, 'rev2': { 'a_test': { 'status': 'failed', 'valid': True, 'failures': ['a_test1', 'a_test2'] }, 'b_test': { 'status': 'failed', 'valid': True, 'failures': ['b_test1'] } } }, 'culprits': { 'a_test': { 'a_test1': 'rev1', 'a_test2': 'rev2' }, }, 'flakes': { 'b_test': ['b_test1'] } }, 'url': 'url', 'try_job_id': try_job_id, 'culprit': { 'a_test': { 'tests': { 'a_test1': a_test1_suspected_cl, 'a_test2': a_test2_suspected_cl } } } } expected_culprits = { 'rev1': a_test1_suspected_cl, 'rev2': a_test2_suspected_cl } self.MockPipeline(RevertAndNotifyCulpritPipeline, None, expected_args=[master_name, builder_name, build_number, expected_culprits, [], failure_type.TEST]) pipeline = IdentifyTryJobCulpritPipeline( master_name, builder_name, build_number, failure_type.TEST, '1', test_result) pipeline.start() self.execute_queued_tasks() try_job = WfTryJob.Get(master_name, builder_name, build_number) self.assertEqual(expected_test_result, try_job.test_results[-1]) self.assertEqual(analysis_status.COMPLETED, try_job.status) try_job_data = WfTryJobData.Get(try_job_id) analysis = WfAnalysis.Get(master_name, builder_name, build_number) expected_culprit_data = { 'a_test': { 'a_test1': 'rev1', 'a_test2': 'rev2', } } expected_cls = [ { 'revision': 'rev1', 'commit_position': 1, 'url': 'url_1', 'repo_name': 'chromium', 'failures': { 'a_test': ['a_test1'], 'b_test': ['b_test1'], }, 'top_score': None }, { 'revision': 'rev2', 'commit_position': 2, 'url': 'url_2', 'repo_name': 'chromium', 'failures': { 'a_test': ['a_test1', 'a_test2'], 'b_test': ['b_test1'], }, 'top_score': None } ] self.assertEqual(expected_culprit_data, try_job_data.culprits) self.assertEqual(analysis.result_status, result_status.FOUND_UNTRIAGED) self.assertEqual(analysis.suspected_cls, expected_cls)
def testIdentifyCulpritForTestTryJobReturnRevisionIfNoCulpritInfo(self): master_name = 'm' builder_name = 'b' build_number = 1 try_job_id = '1' test_result = { 'report': { 'result': { 'rev3': { 'a_test': { 'status': 'failed', 'valid': True, 'failures': ['a_test1'] } } }, 'culprits': { 'a_test': { 'a_test1': 'rev3' } } }, 'url': 'url', 'try_job_id': try_job_id } self._CreateEntities(master_name, builder_name, build_number, try_job_id, try_job_status=analysis_status.RUNNING) analysis = WfAnalysis.Create(master_name, builder_name, build_number) analysis.put() expected_suspected_cl = { 'revision': 'rev3', 'repo_name': 'chromium' } expected_analysis_suspected_cls = [ { 'revision': 'rev3', 'repo_name': 'chromium', 'failures': {'a_test': ['a_test1']}, 'top_score': None } ] self.MockPipeline(RevertAndNotifyCulpritPipeline, None, expected_args=[master_name, builder_name, build_number, {'rev3': expected_suspected_cl}, [], failure_type.TEST]) pipeline = IdentifyTryJobCulpritPipeline( master_name, builder_name, build_number, failure_type.TEST, '1', test_result) pipeline.start() self.execute_queued_tasks() try_job_data = WfTryJobData.Get(try_job_id) analysis = WfAnalysis.Get(master_name, builder_name, build_number) expected_culprit_data = { 'a_test': { 'a_test1': 'rev3' } } self.assertEqual(expected_culprit_data, try_job_data.culprits) self.assertEqual(analysis.result_status, result_status.FOUND_UNTRIAGED) self.assertEqual(analysis.suspected_cls, expected_analysis_suspected_cls)
def testIdentifyCulpritForCompileTryJobSuccess(self): master_name = 'm' builder_name = 'b' build_number = 1 try_job_id = '1' compile_result = { 'report': { 'result': { 'rev1': 'passed', 'rev2': 'failed' }, 'culprit': 'rev2' }, 'try_job_id': try_job_id, } self._CreateEntities(master_name, builder_name, build_number, try_job_id, try_job_status=analysis_status.RUNNING, compile_results=[compile_result]) analysis = WfAnalysis.Create(master_name, builder_name, build_number) analysis.put() expected_culprit = 'rev2' expected_suspected_cl = { 'revision': 'rev2', 'commit_position': 2, 'url': 'url_2', 'repo_name': 'chromium' } expected_compile_result = { 'report': { 'result': { 'rev1': 'passed', 'rev2': 'failed' }, 'culprit': 'rev2' }, 'try_job_id': try_job_id, 'culprit': { 'compile': expected_suspected_cl } } expected_analysis_suspected_cls = [{ 'revision': 'rev2', 'commit_position': 2, 'url': 'url_2', 'repo_name': 'chromium', 'failures': {'compile': []}, 'top_score': None }] self.MockPipeline(RevertAndNotifyCulpritPipeline, None, expected_args=[master_name, builder_name, build_number, {expected_culprit: expected_suspected_cl}, [], failure_type.COMPILE]) pipeline = IdentifyTryJobCulpritPipeline( master_name, builder_name, build_number, failure_type.COMPILE, '1', compile_result) pipeline.start() self.execute_queued_tasks() try_job = WfTryJob.Get(master_name, builder_name, build_number) self.assertEqual(expected_compile_result, try_job.compile_results[-1]) self.assertEqual(analysis_status.COMPLETED, try_job.status) try_job_data = WfTryJobData.Get(try_job_id) analysis = WfAnalysis.Get(master_name, builder_name, build_number) self.assertEqual({'compile': expected_culprit}, try_job_data.culprits) self.assertEqual(analysis.result_status, result_status.FOUND_UNTRIAGED) self.assertEqual(analysis.suspected_cls, expected_analysis_suspected_cls)
def testGetTryJobsForCompileSuccessBackwardCompatibleCallback( self, mock_buildbucket, mock_report): master_name = 'm' builder_name = 'b' build_number = 1 try_job_id = '1' regression_range_size = 2 try_job = WfTryJob.Create(master_name, builder_name, build_number) try_job_data = WfTryJobData.Create(try_job_id) try_job_data.try_job_key = try_job.key try_job_data.try_job_url = ( 'https://build.chromium.org/p/m/builders/b/builds/1234') try_job_data.put() try_job.compile_results = [{ 'report': None, 'url': 'https://build.chromium.org/p/m/builders/b/builds/1234', 'try_job_id': '1', }] try_job.status = analysis_status.RUNNING try_job.put() report = { 'result': { 'rev1': 'passed', 'rev2': 'failed' }, 'metadata': { 'regression_range_size': 2 } } build_response = { 'id': '1', 'url': 'https://build.chromium.org/p/m/builders/b/builds/1234', 'status': 'COMPLETED', 'completed_ts': '1454367574000000', 'created_ts': '1454367570000000', 'updated_ts': '1454367574000000', } mock_buildbucket.GetTryJobs.return_value = [ (None, buildbucket_client.BuildbucketBuild(build_response)) ] mock_report.return_value = json.dumps(report) pipeline = MonitorTryJobPipeline(try_job.key.urlsafe(), failure_type.COMPILE, try_job_id) pipeline.start_test() pipeline.run(try_job.key.urlsafe(), failure_type.COMPILE, try_job_id) pipeline.callback(**pipeline.last_params) # Reload from ID to get all internal properties in sync. pipeline = MonitorTryJobPipeline.from_id(pipeline.pipeline_id) pipeline.finalized() compile_result = pipeline.outputs.default.value expected_compile_result = { 'report': { 'result': { 'rev1': 'passed', 'rev2': 'failed' }, 'metadata': { 'regression_range_size': regression_range_size } }, 'url': 'https://build.chromium.org/p/m/builders/b/builds/1234', 'try_job_id': '1', } self.assertEqual(expected_compile_result, compile_result) try_job = WfTryJob.Get(master_name, builder_name, build_number) self.assertEqual(expected_compile_result, try_job.compile_results[-1]) self.assertEqual(analysis_status.RUNNING, try_job.status) try_job_data = WfTryJobData.Get(try_job_id) self.assertEqual(try_job_data.regression_range_size, regression_range_size) self.assertIsInstance(try_job_data.start_time, datetime)
def testGetTryJobsForTestSuccess(self, mock_buildbucket, mock_report): master_name = 'm' builder_name = 'b' build_number = 1 try_job_id = '3' try_job = WfTryJob.Create(master_name, builder_name, build_number) try_job.test_results = [{ 'report': None, 'url': 'https://build.chromium.org/p/m/builders/b/builds/1234', 'try_job_id': try_job_id, }] try_job.status = analysis_status.RUNNING try_job.put() try_job_data = WfTryJobData.Create(try_job_id) try_job_data.try_job_key = try_job.key try_job_data.try_job_url = ( 'https://build.chromium.org/p/m/builders/b/builds/1234') try_job_data.put() data = [{ 'build': { 'id': '3', 'url': 'https://build.chromium.org/p/m/builders/b/builds/1234', 'status': 'STARTED' } }, { 'error': { 'reason': 'BUILD_NOT_FOUND', 'message': 'message', } }, { 'build': { 'id': '3', 'url': 'https://build.chromium.org/p/m/builders/b/builds/1234', 'status': 'STARTED' } }, { 'error': { 'reason': 'BUILD_NOT_FOUND', 'message': 'message', } }, { 'build': { 'id': '3', 'url': 'https://build.chromium.org/p/m/builders/b/builds/1234', 'status': 'COMPLETED', } }] report = { 'result': { 'rev1': { 'a_test': { 'status': 'passed', 'valid': True } }, 'rev2': { 'a_test': { 'status': 'failed', 'valid': True, 'failures': ['test1', 'test2'] } } } } get_tryjobs_responses = [ [(None, buildbucket_client.BuildbucketBuild(data[0]['build']))], [(buildbucket_client.BuildbucketError(data[1]['error']), None)], [(None, buildbucket_client.BuildbucketBuild(data[2]['build']))], [(buildbucket_client.BuildbucketError(data[3]['error']), None)], [(None, buildbucket_client.BuildbucketBuild(data[4]['build']))], ] mock_buildbucket.GetTryJobs.side_effect = get_tryjobs_responses mock_report.return_value = json.dumps(report) pipeline = MonitorTryJobPipeline() pipeline.start_test() pipeline.run(try_job.key.urlsafe(), failure_type.TEST, try_job_id) pipeline.run(try_job.key.urlsafe(), failure_type.TEST, try_job_id) # Since run() calls callback() immediately, we use -1. for _ in range(len(get_tryjobs_responses) - 1): pipeline.callback(callback_params=pipeline.last_params) # Reload from ID to get all internal properties in sync. pipeline = MonitorTryJobPipeline.from_id(pipeline.pipeline_id) pipeline.finalized() test_result = pipeline.outputs.default.value expected_test_result = { 'report': { 'result': { 'rev1': { 'a_test': { 'status': 'passed', 'valid': True } }, 'rev2': { 'a_test': { 'status': 'failed', 'valid': True, 'failures': ['test1', 'test2'] } } } }, 'url': 'https://build.chromium.org/p/m/builders/b/builds/1234', 'try_job_id': '3', } self.assertEqual(expected_test_result, test_result) try_job = WfTryJob.Get(master_name, builder_name, build_number) self.assertEqual(expected_test_result, try_job.test_results[-1]) self.assertEqual(analysis_status.RUNNING, try_job.status)