def testToFromDict(self): data = { 'files': { 'a.cc': [2], 'd.cc': [] }, 'tests': ['suite.name'], 'keywords': { 'k1': 3 } } signal = FailureSignal.FromDict(data) self.assertEqual(data, signal.ToDict())
def testToFromDict(self): data = { 'files': { 'a.cc': [2], 'd.cc': [] }, 'keywords': { 'k1': 3 }, 'failed_output_nodes': [ 'obj/path/to/file.o', ], } signal = FailureSignal.FromDict(data) self.assertEqual(data, signal.ToDict())
def testToFromDictWithFailedTargets(self): data = { 'files': { 'a.cc': [2], 'd.cc': [] }, 'keywords': { 'k1': 3 }, 'failed_targets': [{ 'target': 'a.o', 'source': 'b/a.cc' }] } signal = FailureSignal.FromDict(data) self.assertEqual(data, signal.ToDict())
def testToFromDict(self): data = { 'files': { 'a.cc': [2], 'd.cc': [] }, 'keywords': { 'k1': 3 }, 'failed_output_nodes': [ 'obj/path/to/file.o', ], 'failed_edges': [{ 'rule': 'CXX', 'output_nodes': ['a.o', 'aa.o'], 'dependencies': ['a.h', 'a.c'] }] } signal = FailureSignal.FromDict(data) self.assertEqual(data, signal.ToDict())
def testCheckFilesAgainstDEPSRollWithUnrelatedLinesChanged(self): failure_signal_json = { 'files': { 'src/third_party/dep1/f.cc': [123], } } change_log_json = { 'revision': '12', 'touched_files': [ { 'change_type': ChangeType.MODIFY, 'old_path': 'DEPS', 'new_path': 'DEPS' }, ] } deps_info = { 'deps_rolls': { '12': [ { 'path': 'src/third_party/dep1', 'repo_url': 'https://url_dep1', 'old_revision': '7', 'new_revision': '9', }, ] } } self.mock(CachedGitilesRepository, 'GetChangeLog', self._MockGetChangeLog) self.mock(CachedGitilesRepository, 'GetChangeLogs', self._MockGetChangeLogs) self.mock(CachedGitilesRepository, 'GetBlame', self._MockGetBlame) justification = build_failure_analysis.CheckFiles( FailureSignal.FromDict(failure_signal_json), ChangeLogFromDict(change_log_json), deps_info) self.assertIsNotNone(justification) # The score is 1 because: # +1 rolled third_party/dep1/ and src/third_party/dep1/f.cc was in log. self.assertEqual(1, justification['score'])
def testCheckFilesAgainstUnrelatedCL(self): failure_signal_json = { 'files': { 'src/a/b/f.cc': [], } } change_log_json = { 'revision': 'rev', 'touched_files': [ { 'change_type': ChangeType.ADD, 'old_path': '/dev/null', 'new_path': 'a/d/f1.cc' }, ] } deps_info = {} justification = build_failure_analysis._CheckFiles( FailureSignal.FromDict(failure_signal_json), change_log_json, deps_info) self.assertIsNone(justification)
def testCheckFilesAgainstSuspectedCL(self): failure_signal_json = { 'files': { 'src/a/b/f1.cc': [], 'd/e/a2_test.cc': [], 'b/c/f2.cc': [10, 20], 'd/e/f3.h': [], 'x/y/f4.py': [], 'f5_impl.cc': [] } } change_log_json = { 'revision': '12', 'touched_files': [ { 'change_type': ChangeType.ADD, 'old_path': '/dev/null', 'new_path': 'a/b/f1.cc' }, { 'change_type': ChangeType.ADD, 'old_path': '/dev/null', 'new_path': 'd/e/a2.cc' }, { 'change_type': ChangeType.MODIFY, 'old_path': 'a/b/c/f2.h', 'new_path': 'a/b/c/f2.h' }, { 'change_type': ChangeType.MODIFY, 'old_path': 'd/e/f3.h', 'new_path': 'd/e/f3.h' }, { 'change_type': ChangeType.DELETE, 'old_path': 'x/y/f4.py', 'new_path': '/dev/null' }, { 'change_type': ChangeType.DELETE, 'old_path': 'h/f5.h', 'new_path': '/dev/null' }, { 'change_type': ChangeType.RENAME, 'old_path': 't/y/x.cc', 'new_path': 's/z/x.cc' }, ] } deps_info = {} justification = build_failure_analysis.CheckFiles( FailureSignal.FromDict(failure_signal_json), ChangeLogFromDict(change_log_json), deps_info) self.assertIsNotNone(justification) # The score is 15 because: # +5 added a/b/f1.cc (same file src/a/b/f1.cc in failure_signal log) # +1 added d/e/a2.cc (related file a2_test.cc in failure_signal log) # +1 modified b/c/f2.h (related file a/b/c/f2.cc in failure_signal log) # +2 modified d/e/f3.h (same file d/e/f3.h in failure_signal log) # +5 deleted x/y/f4.py (same file x/y/f4.py in failure_signal log) # +1 deleted h/f5.h (related file f5_impl.cc in failure_signal log) # +0 renamed t/y/x.cc -> s/z/x.cc (no related file in failure_signal log) self.assertEqual(15, justification['score'])
def AnalyzeTestFailure(failure_info, change_logs, deps_info, failure_signals): """Analyzes given failure signals, and figure out culprits of test failure. Args: failure_info (TestFailureInfo): Output of pipeline DetectFirstFailurePipeline. change_logs (dict): Output of pipeline PullChangelogPipeline. deps_info (dict): Output of pipeline ExtractDEPSInfoPipeline. failure_signals (dict): Output of pipeline ExtractSignalPipeline. Returns: A dict with the following form: { 'failures': [ { 'step_name': 'compile', 'supported': True 'first_failure': 230, 'last_pass': 229, 'suspected_cls': [ { 'build_number': 230, 'repo_name': 'chromium', 'revision': 'a_git_hash', 'commit_position': 56789, 'score': 11, 'hints': { 'add a/b/x.cc': 5, 'delete a/b/y.cc': 5, 'modify e/f/z.cc': 1, ... } }, ... ], }, ... ] } And a list of suspected_cls format as below: [ { 'repo_name': 'chromium', 'revision': 'r98_1', 'commit_position': None, 'url': None, 'failures': { 'b': ['Unittest2.Subtest1', 'Unittest3.Subtest2'] }, 'top_score': 4 }, ... ] """ analysis_result = {'failures': []} if not failure_signals: logging.debug('No failure signals when analyzing a test failure.') return analysis_result, [] failed_steps = failure_info.failed_steps builds = failure_info.builds cl_failure_map = defaultdict(build_failure_analysis.CLInfo) for step_name, step_failure_info in failed_steps.iteritems(): is_test_level = step_failure_info.tests is not None failed_build_number = step_failure_info.current_failure start_build_number = ( build_failure_analysis.GetLowerBoundForAnalysis(step_failure_info)) step_analysis_result = ( build_failure_analysis.InitializeStepLevelResult( step_name, step_failure_info)) if is_test_level: step_analysis_result['tests'] = [] tests = step_failure_info.tests or {} for test_name, test_failure in tests.iteritems(): test_analysis_result = { 'test_name': test_name, 'first_failure': test_failure.first_failure, 'last_pass': test_failure.last_pass, 'suspected_cls': [], } step_analysis_result['tests'].append(test_analysis_result) if step_analysis_result['supported']: step_failure_signal = FailureSignal.FromDict(failure_signals[step_name]) for build_number, build in builds.iteritems(): if (build_number > failed_build_number or build_number < start_build_number): continue for revision in build.blame_list: # TODO(crbug/842980): Deprecate blame_list in builds. if not change_logs.get(revision): continue if is_test_level: # Checks files at test level. for test_analysis_result in step_analysis_result['tests']: test_name = test_analysis_result['test_name'] test_signal = FailureSignal.FromDict( failure_signals[step_name]['tests'].get(test_name) or {}) _AnalyzeTestFailureOnOneBuild(build_number, step_name, test_name, test_signal, change_logs[revision], deps_info, test_analysis_result, cl_failure_map) # Checks Files on step level using step level signals # regardless of test level signals so we can make sure # no duplicate justifications added to the step result. _AnalyzeTestFailureOnOneBuild( build_number, step_name, None, step_failure_signal, change_logs[revision], deps_info, step_analysis_result, cl_failure_map, has_lower_level_info=is_test_level) # TODO(stgao): sort CLs by score. analysis_result['failures'].append(step_analysis_result) suspected_cls = build_failure_analysis.ConvertCLFailureMapToList( cl_failure_map) return analysis_result, suspected_cls
def AnalyzeCompileFailure(failure_info, change_logs, deps_info, failure_signals): """Analyzes given failure signals, and figure out culprits of compile failure. Args: failure_info (CompileFailureInfo): Output of pipeline DetectFirstFailurePipeline. change_logs (dict): Output of pipeline PullChangelogPipeline. deps_info (dict): Output of pipeline ExtractDEPSInfoPipeline. failure_signals (dict): Output of pipeline ExtractSignalPipeline. Returns: A dict with the following form: { 'failures': [ { 'step_name': 'compile', 'supported': True 'first_failure': 230, 'last_pass': 229, 'suspected_cls': [ { 'build_number': 230, 'repo_name': 'chromium', 'revision': 'a_git_hash', 'commit_position': 56789, 'score': 11, 'hints': { 'add a/b/x.cc': 5, 'delete a/b/y.cc': 5, 'modify e/f/z.cc': 1, ... } }, ... ], }, ... ] } And a list of suspected_cls format as below: [ { 'repo_name': 'chromium', 'revision': 'r98_1', 'commit_position': None, 'url': None, 'failures': { 'b': ['Unittest2.Subtest1', 'Unittest3.Subtest2'] }, 'top_score': 4 }, ... ] """ analysis_result = {'failures': []} cl_failure_map = defaultdict(build_failure_analysis.CLInfo) step_name = constants.COMPILE_STEP_NAME if not failure_signals: logging.debug('No failure signals when analyzing a compile failure.') return analysis_result, [] if step_name not in failure_info.failed_steps: logging.debug('No failed compile step when analyzing a compile failure.') return analysis_result, [] builds = failure_info.builds compile_failure_info = failure_info.failed_steps[step_name] failed_build_number = compile_failure_info.current_failure start_build_number = build_failure_analysis.GetLowerBoundForAnalysis( compile_failure_info) step_analysis_result = build_failure_analysis.InitializeStepLevelResult( step_name, compile_failure_info) if not step_analysis_result['supported']: return analysis_result, [] failure_signal = FailureSignal.FromDict(failure_signals[step_name]) _Analyze(start_build_number, failed_build_number, builds, step_name, failure_signal, change_logs, deps_info, step_analysis_result, cl_failure_map) if waterfall_config.GetDownloadBuildDataSettings().get( 'use_ninja_output_log'): step_analysis_result['new_compile_suspected_cls'] = [] _Analyze( start_build_number, failed_build_number, builds, step_name, failure_signal, change_logs, deps_info, step_analysis_result, cl_failure_map, use_ninja_output=True) if (not step_analysis_result['suspected_cls'] and step_analysis_result.get('new_compile_suspected_cls')): step_analysis_result['use_ninja_dependencies'] = True step_analysis_result['suspected_cls'] = step_analysis_result[ 'new_compile_suspected_cls'] for new_suspected_cl_dict in step_analysis_result['suspected_cls']: # Top score for new heuristic is always 2. build_failure_analysis.SaveFailureToMap( cl_failure_map, new_suspected_cl_dict, step_name, None, 2) # TODO(stgao): sort CLs by score. analysis_result['failures'].append(step_analysis_result) suspected_cls = build_failure_analysis.ConvertCLFailureMapToList( cl_failure_map) return analysis_result, suspected_cls
def AnalyzeBuildFailure( failure_info, change_logs, deps_info, failure_signals): """Analyze the given failure signals, and figure out culprit CLs. Args: failure_info (dict): Output of pipeline DetectFirstFailurePipeline. change_logs (dict): Output of pipeline PullChangelogPipeline. deps_info (dict): Output of pipeline ExtractDEPSInfoPipeline. failure_signals (dict): Output of pipeline ExtractSignalPipeline. Returns: A dict with the following form: { 'failures': [ { 'step_name': 'compile', 'first_failure': 230, 'last_pass': 229, 'suspected_cls': [ { 'build_number': 230, 'repo_name': 'chromium', 'revision': 'a_git_hash', 'commit_position': 56789, 'score': 11, 'hints': { 'add a/b/x.cc': 5, 'delete a/b/y.cc': 5, 'modify e/f/z.cc': 1, ... } }, ... ], }, ... ] } """ analysis_result = { 'failures': [] } if not failure_info['failed'] or not failure_info['chromium_revision']: # Bail out if no failed step or no chromium revision. return analysis_result def CreateCLInfoDict(justification_dict, build_number, change_log): # TODO(stgao): remove hard-coded 'chromium' when DEPS file parsing is # supported. cl_info = { 'build_number': build_number, 'repo_name': 'chromium', 'revision': change_log['revision'], 'commit_position': change_log.get('commit_position'), 'url': change_log.get('code_review_url') or change_log.get('commit_url'), } cl_info.update(justification_dict) return cl_info failed_steps = failure_info['failed_steps'] builds = failure_info['builds'] for step_name, step_failure_info in failed_steps.iteritems(): failure_signal = FailureSignal.FromDict(failure_signals[step_name]) failed_build_number = step_failure_info['current_failure'] if step_failure_info.get('last_pass') is not None: build_number = step_failure_info.get('last_pass') + 1 else: build_number = step_failure_info['first_failure'] step_analysis_result = { 'step_name': step_name, 'first_failure': step_failure_info['first_failure'], 'last_pass': step_failure_info.get('last_pass'), 'suspected_cls': [], } while build_number <= failed_build_number: for revision in builds[str(build_number)]['blame_list']: justification_dict = _CheckFiles( failure_signal, change_logs[revision], deps_info) if not justification_dict: continue step_analysis_result['suspected_cls'].append( CreateCLInfoDict(justification_dict, build_number, change_logs[revision])) build_number += 1 # TODO(stgao): sort CLs by score. analysis_result['failures'].append(step_analysis_result) return analysis_result
def AnalyzeBuildFailure(failure_info, change_logs, deps_info, failure_signals): """Analyzes the given failure signals, and figure out culprit CLs. Args: failure_info (dict): Output of pipeline DetectFirstFailurePipeline. change_logs (dict): Output of pipeline PullChangelogPipeline. deps_info (dict): Output of pipeline ExtractDEPSInfoPipeline. failure_signals (dict): Output of pipeline ExtractSignalPipeline. Returns: A dict with the following form: { 'failures': [ { 'step_name': 'compile', 'supported': True 'first_failure': 230, 'last_pass': 229, 'suspected_cls': [ { 'build_number': 230, 'repo_name': 'chromium', 'revision': 'a_git_hash', 'commit_position': 56789, 'score': 11, 'hints': { 'add a/b/x.cc': 5, 'delete a/b/y.cc': 5, 'modify e/f/z.cc': 1, ... } }, ... ], }, ... ] } And a list of suspected_cls format as below: [ { 'repo_name': 'chromium', 'revision': 'r98_1', 'commit_position': None, 'url': None, 'failures': { 'b': ['Unittest2.Subtest1', 'Unittest3.Subtest2'] }, 'top_score': 4 }, ... ] """ analysis_result = {'failures': []} if not failure_info['failed'] or not failure_info['chromium_revision']: # Bail out if no failed step or no chromium revision. return analysis_result, [] # Bail out on infra failure if failure_info.get('failure_type') == failure_type.INFRA: return analysis_result, [] def CreateCLInfoDict(justification_dict, build_number, change_log): # TODO(stgao): remove hard-coded 'chromium' when DEPS file parsing is # supported. cl_info = { 'build_number': build_number, 'repo_name': 'chromium', 'revision': change_log['revision'], 'commit_position': change_log.get('commit_position'), 'url': change_log.get('code_review_url') or change_log.get('commit_url'), } cl_info.update(justification_dict) return cl_info failed_steps = failure_info['failed_steps'] builds = failure_info['builds'] master_name = failure_info['master_name'] cl_failure_map = defaultdict(_CLInfo) for step_name, step_failure_info in failed_steps.iteritems(): is_test_level = step_failure_info.get('tests') is not None failed_build_number = step_failure_info['current_failure'] if step_failure_info.get('last_pass') is not None: start_build_number = step_failure_info.get('last_pass') + 1 else: start_build_number = step_failure_info['first_failure'] step_analysis_result = { 'step_name': step_name, 'first_failure': step_failure_info['first_failure'], 'last_pass': step_failure_info.get('last_pass'), 'suspected_cls': [], 'supported': waterfall_config.StepIsSupportedForMaster(step_name, master_name) } if is_test_level: step_analysis_result['tests'] = [] for test_name, test_failure in step_failure_info[ 'tests'].iteritems(): test_analysis_result = { 'test_name': test_name, 'first_failure': test_failure['first_failure'], 'last_pass': test_failure.get('last_pass'), 'suspected_cls': [], } step_analysis_result['tests'].append(test_analysis_result) if step_analysis_result['supported']: for build_number in range(start_build_number, failed_build_number + 1): for revision in builds[str(build_number)]['blame_list']: if is_test_level: # Checks files at test level. for test_analysis_result in step_analysis_result[ 'tests']: test_name = test_analysis_result['test_name'] test_signal = FailureSignal.FromDict( failure_signals[step_name]['tests'].get( test_name, {})) justification_dict = _CheckFiles( test_signal, change_logs[revision], deps_info) if not justification_dict: continue new_suspected_cl_dict = CreateCLInfoDict( justification_dict, build_number, change_logs[revision]) test_analysis_result['suspected_cls'].append( new_suspected_cl_dict) _SaveFailureToMap( cl_failure_map, new_suspected_cl_dict, step_name, test_name, max(justification_dict['hints'].values())) # Checks Files on step level using step level signals # regardless of test level signals so we can make sure # no duplicate justifications added to the step result. failure_signal = FailureSignal.FromDict( failure_signals[step_name]) justification_dict = _CheckFiles(failure_signal, change_logs[revision], deps_info) if not justification_dict: continue new_suspected_cl_dict = CreateCLInfoDict( justification_dict, build_number, change_logs[revision]) step_analysis_result['suspected_cls'].append( new_suspected_cl_dict) if not is_test_level: _SaveFailureToMap( cl_failure_map, new_suspected_cl_dict, step_name, None, max(justification_dict['hints'].values())) # TODO(stgao): sort CLs by score. analysis_result['failures'].append(step_analysis_result) suspected_cls = _ConvertCLFailureMapToList(cl_failure_map) return analysis_result, suspected_cls