def _GetAllSwarmingTasks(self, failure_result_map): """Returns all swarming tasks related to one build. Args: A dict to map each step/test with the key to the build when it failed the first time. { 'step1': 'm/b/1', 'step2': { 'test1': 'm/b/1', 'test2': 'm/b/2' } } Returns: A dict of swarming tasks like below: { 'step1': { 'm/b/1': WfSwarmingTask( key=Key('WfBuild', 'm/b/1', 'WfSwarmingTask', 'step1'),...) }, ... } """ if not failure_result_map: return {} swarming_tasks = defaultdict(dict) for step_name, step_map in failure_result_map.iteritems(): if isinstance(step_map, basestring): swarming_tasks[step_name][step_map] = (WfSwarmingTask.Get( *BaseBuildModel.GetBuildInfoFromBuildKey(step_map), step_name=step_name)) else: for task_key in step_map.values(): if not swarming_tasks[step_name].get(task_key): swarming_tasks[step_name][task_key] = ( WfSwarmingTask.Get( *BaseBuildModel.GetBuildInfoFromBuildKey( task_key), step_name=step_name)) return swarming_tasks
def GetListOfTryJobBuilds(builds): displayed_builds = {} for build_key, build in builds.iteritems(): if analysis_approach_type.TRY_JOB not in build.get( 'approaches', []): continue build_info = BaseBuildModel.GetBuildInfoFromBuildKey(build_key) master_name = build_info[0] builder_name = build_info[1] build_number = build_info[2] if (not displayed_builds.get((master_name, builder_name)) or displayed_builds[(master_name, builder_name)][0] > build_number): displayed_builds[(master_name, builder_name)] = (build_number, ) return [k + v for k, v in displayed_builds.iteritems()]
def _PrepareTryJobDataForCompileFailure(analysis): try_job_data = {} if not (analysis.failure_result_map and # pragma: no branch. constants.COMPILE_STEP_NAME in analysis.failure_result_map): return try_job_data # pragma: no cover. referred_build_keys = BaseBuildModel.GetBuildInfoFromBuildKey( analysis.failure_result_map[constants.COMPILE_STEP_NAME]) try_job = WfTryJob.Get(*referred_build_keys) if not try_job or not try_job.compile_results: return try_job_data # pragma: no cover. result = try_job.compile_results[-1] try_job_data['status'] = analysis_status.STATUS_TO_DESCRIPTION.get( try_job.status, 'unknown').lower() try_job_data['url'] = result.get('url') try_job_data['completed'] = try_job.completed try_job_data['failed'] = try_job.failed try_job_data['culprit'] = result.get('culprit', {}).get(constants.COMPILE_STEP_NAME) return try_job_data
def _GetAdditionalInformationForCL(self, repo_name, revision, confidences, build, reference_build_key): """Gets additional information for a cl. Currently additional information contains: confidence of the result; approaches that found this cl: HEURISTIC, TRY_JOB or both; revert_cl_url if the cl has been reverted by Findit; if the revert has been committed. """ additional_info = {} cl = WfSuspectedCL.Get(repo_name, revision) if not cl: return additional_info master_name = buildbot.GetMasterNameFromUrl(build.master_url) builder_name = build.builder_name current_build = build.build_number # If the CL is found by a try job, only the first failure will be recorded. # So we might need to go to the first failure to get CL information. build_info = cl.GetBuildInfo(master_name, builder_name, current_build) first_build_info = None if not reference_build_key else cl.GetBuildInfo( *BaseBuildModel.GetBuildInfoFromBuildKey(reference_build_key)) additional_info['confidence'], additional_info['cl_approach'] = ( suspected_cl_util.GetSuspectedCLConfidenceScoreAndApproach( confidences, build_info, first_build_info)) # Gets the revert_cl_url for the CL if there is one. if cl.revert_cl_url: additional_info['revert_cl_url'] = cl.revert_cl_url additional_info['revert_committed'] = ( cl.revert_submission_status == analysis_status.COMPLETED) return additional_info
def _GetTryJobResultForCompile(failure_result_map): try_job_key = failure_result_map['compile'] referred_build_keys = BaseBuildModel.GetBuildInfoFromBuildKey(try_job_key) culprit_info = defaultdict(lambda: defaultdict(list)) try_job = WfTryJob.Get(*referred_build_keys) if not try_job or try_job.test_results: return culprit_info try_job_result = ( try_job.compile_results[-1] if try_job.compile_results else None) compile_try_job = {'try_job_key': try_job_key, 'status': try_job.status} if try_job_result: if try_job_result.get('url'): compile_try_job['try_job_url'] = try_job_result['url'] compile_try_job['try_job_build_number'] = ( _GetTryJobBuildNumber(try_job_result)) if try_job_result.get('culprit', {}).get('compile'): compile_try_job['culprit'] = try_job_result['culprit']['compile'] culprit_info['compile']['try_jobs'].append(compile_try_job) return culprit_info
def _GenerateSwarmingTasksData(failure_result_map): """Collects info for all related swarming tasks. Returns: A dict as below: { 'step1': { 'swarming_tasks': { 'm/b/121': { 'task_info': { 'status': 'Completed', 'task_id': 'task1', 'task_url': ('https://chromium-swarm.appspot.com/user' '/task/task1') }, 'all_tests': ['test2', 'test3', 'test4'], 'reliable_tests': ['test2'], 'flaky_tests': ['test3', 'test4'] } } }, 'step2': { 'swarming_tasks': { 'm/b/121': { 'task_info': { 'status': 'Pending' }, 'all_tests': ['test1'] } } }, 'step3': { 'swarming_tasks': { 'm/b/121': { 'task_info': { 'status': 'No swarming rerun found' }, 'all_tests': ['test1'] } } } } """ tasks_info = defaultdict(lambda: defaultdict(lambda: defaultdict(dict))) swarming_server = waterfall_config.GetSwarmingSettings()['server_host'] for step_name, failure in failure_result_map.iteritems(): step_tasks_info = tasks_info[step_name]['swarming_tasks'] if isinstance(failure, dict): # Only swarming test failures have swarming re-runs. swarming_task_keys = set(failure.values()) for key in swarming_task_keys: task_dict = step_tasks_info[key] referred_build_keys = BaseBuildModel.GetBuildInfoFromBuildKey(key) task = WfSwarmingTask.Get(*referred_build_keys, step_name=step_name) all_tests = _GetAllTestsForASwarmingTask(key, failure) task_dict['all_tests'] = all_tests if not task: # In case task got manually removed from data store. task_info = {'status': result_status.NO_SWARMING_TASK_FOUND} else: task_info = {'status': task.status} # Get the step name without platform. # This value should have been saved in task.parameters; # in case of no such value saved, split the step_name. task_dict['ref_name'] = ( step_name.split()[0] if not task.parameters or not task.parameters.get('ref_name') else task.parameters['ref_name']) if task.task_id: # Swarming rerun has started. task_info['task_id'] = task.task_id task_info['task_url'] = 'https://%s/user/task/%s' % ( swarming_server, task.task_id) if task.classified_tests: # Swarming rerun has completed. # Use its result to get reliable and flaky tests. # If task has not completed, there will be no try job yet, # the result will be grouped in unclassified failures temporarily. reliable_tests = task.classified_tests.get('reliable_tests', []) task_dict['reliable_tests'] = [ test for test in reliable_tests if test in all_tests ] flaky_tests = task.classified_tests.get('flaky_tests', []) task_dict['flaky_tests'] = [ test for test in flaky_tests if test in all_tests ] task_dict['task_info'] = task_info else: step_tasks_info[failure] = { 'task_info': { 'status': result_status.NON_SWARMING_NO_RERUN } } return tasks_info
def _GetCulpritInfoForTryJobResultForTest(try_job_key, culprits_info): referred_build_keys = BaseBuildModel.GetBuildInfoFromBuildKey(try_job_key) try_job = WfTryJob.Get(*referred_build_keys) if not try_job or try_job.compile_results: return try_job_result = try_job.test_results[-1] if try_job.test_results else None for step_try_jobs in culprits_info.values(): # If try job found different culprits for each test, split tests by culprit. additional_tests_culprit_info = [] for try_job_info in step_try_jobs['try_jobs']: if (try_job_key != try_job_info['try_job_key'] or try_job_info.get('status')): # Conditions that try_job_info has status are: # If there is no swarming task, there won't be try job; # If the swarming task is not completed yet, there won't be try job yet; # If there are flaky tests found, those tests will be marked as flaky, # and no try job for them will be triggered. continue try_job_info['status'] = try_job.status if try_job_result: # Needs to use ref_name to match step_name in try job. ref_name = try_job_info['ref_name'] # Saves try job information. if try_job_result.get('url'): # pragma: no cover try_job_info['try_job_url'] = try_job_result['url'] try_job_info['try_job_build_number'] = ( _GetTryJobBuildNumber(try_job_result)) if (try_job_result.get('culprit') and try_job_result['culprit'].get(ref_name)): # Saves try job culprits information. # Uses culprits to group tests. culprit_tests_map = _OrganizeTryJobResultByCulprits( try_job_result['culprit'][ref_name]) ungrouped_tests = try_job_info.get('tests', []) list_of_culprits = [] for culprit_info in culprit_tests_map.values(): failed_tests = culprit_info['failed_tests'] list_of_culprits.append(culprit_info) # Gets tests that haven't been grouped. ungrouped_tests = list(set(ungrouped_tests) ^ set(failed_tests)) if not ungrouped_tests: # All tests have been grouped. break index_start = 1 if ungrouped_tests: # There are tests don't have try job culprits. # Group these tests together. # Save them in current try_job_info. try_job_info['tests'] = ungrouped_tests try_job_info['culprit'] = {} # Saves all the tests that have culprits later. index_start = 0 else: # Saves the first culprit in current try_job_info. # Saves all the other culprits later. try_job_info['culprit'] = { 'revision': list_of_culprits[0]['revision'], 'commit_position': list_of_culprits[0]['commit_position'], 'review_url': list_of_culprits[0].get( 'url', list_of_culprits[0].get('review_url', None)) } try_job_info['tests'] = list_of_culprits[0]['failed_tests'] for n in xrange(index_start, len(list_of_culprits)): # Appends the rest of test groups to step_try_jobs['try_jobs']. iterate_culprit = list_of_culprits[n] tmp_try_job_info = copy.deepcopy(try_job_info) tmp_try_job_info['culprit'] = { 'revision': iterate_culprit['revision'], 'commit_position': iterate_culprit['commit_position'], 'review_url': iterate_culprit.get('url', iterate_culprit.get('review_url', None)) } tmp_try_job_info['tests'] = iterate_culprit['failed_tests'] additional_tests_culprit_info.append(tmp_try_job_info) if additional_tests_culprit_info: step_try_jobs['try_jobs'].extend(additional_tests_culprit_info)
test_results = [] compile_results = [] while more: analyses, cursor, more = WfAnalysis.query( ndb.AND(WfAnalysis.build_start_time >= start, WfAnalysis.build_start_time < end)).fetch_page( 100, start_cursor=cursor) for analysis in analyses: if not analysis.completed or not analysis.result: continue build_key = analysis.key.pairs()[0][1] master, builder_name, build_number = ( BaseBuildModel.GetBuildInfoFromBuildKey(build_key)) build_number = int(build_number) try_job = WfTryJob.Get(master, builder_name, build_number) for failure in analysis.result.get('failures', {}): result = None step_name = failure['step_name'] culprits = _GetTestTryJobCulprit(try_job, step_name) if failure.get('tests'): for test in failure['tests']: if test['first_failure'] != build_number: # Not first time failure. continue