def testGetDownloadBuildDataSettings(self): self.assertEqual( { 'download_interval_seconds': 10, 'memcache_master_download_expiration_seconds': 3600, 'use_chrome_build_extract': True }, waterfall_config.GetDownloadBuildDataSettings())
def DownloadBuildData(master_name, builder_name, build_number): """Downloads build data and returns a WfBuild instance.""" build = WfBuild.Get(master_name, builder_name, build_number) if not build: build = WfBuild.Create(master_name, builder_name, build_number) # Cache the data to avoid pulling from master again. if _BuildDataNeedUpdating(build): use_cbe = waterfall_config.GetDownloadBuildDataSettings().get( 'use_chrome_build_extract') if use_cbe: # Retrieve build data from build archive first. build.data = buildbot.GetBuildDataFromArchive( master_name, builder_name, build_number, HTTP_CLIENT_NO_404_ERROR) if build.data: build.data_source = CHROME_BUILD_EXTRACT elif not lock_util.WaitUntilDownloadAllowed( master_name): # pragma: no cover return None if not build.data or not use_cbe: # Retrieve build data from build master. build.data = buildbot.GetBuildDataFromBuildMaster( master_name, builder_name, build_number, HTTP_CLIENT_LOGGING_ERRORS) build.data_source = BUILDBOT_MASTER build.last_crawled_time = time_util.GetUTCNow() build.put() return build
def WaitUntilDownloadAllowed(master_name, timeout_seconds=90): # pragma: no cover """Waits until next download from the specified master is allowed. Returns: True if download is allowed to proceed. False if download is not still allowed when the given timeout occurs. """ client = memcache.Client() key = _MEMCACHE_MASTER_DOWNLOAD_LOCK % master_name deadline = time.time() + timeout_seconds download_interval_seconds = (waterfall_config.GetDownloadBuildDataSettings( ).get('download_interval_seconds')) memcache_master_download_expiration_seconds = ( waterfall_config.GetDownloadBuildDataSettings().get( 'memcache_master_download_expiration_seconds')) while True: info = client.gets(key) if not info or time.time() - info['time'] >= download_interval_seconds: new_info = {'time': time.time()} if not info: success = client.add( key, new_info, time=memcache_master_download_expiration_seconds) else: success = client.cas( key, new_info, time=memcache_master_download_expiration_seconds) if success: logging.info('Download from %s is allowed. Waited %s seconds.', master_name, (time.time() + timeout_seconds - deadline)) return True if time.time() > deadline: logging.info('Download from %s is not allowed. Waited %s seconds.', master_name, timeout_seconds) return False logging.info('Waiting to download from %s', master_name) time.sleep(download_interval_seconds + random.random())
def ExtractSignalsForCompileFailure(failure_info, http_client): signals = {} master_name = failure_info.master_name builder_name = failure_info.builder_name build_number = failure_info.build_number step_name = 'compile' if step_name not in (failure_info.failed_steps or {}): logging.debug( 'No compile failure found when extracting signals for failed ' 'build %s/%s/%d', master_name, builder_name, build_number) return signals if not failure_info.failed_steps[step_name].supported: # Bail out if the step is not supported. logging.info('Findit could not analyze compile failure for master %s.', master_name) return signals failure_log = None # 1. Tries to get stored failure log from step. step = (WfStep.Get(master_name, builder_name, build_number, step_name) or WfStep.Create(master_name, builder_name, build_number, step_name)) if step.log_data: failure_log = step.log_data else: # 2. Tries to get ninja_output as failure log. from_ninja_output = False use_ninja_output_log = (waterfall_config.GetDownloadBuildDataSettings( ).get('use_ninja_output_log')) if use_ninja_output_log: failure_log = step_util.GetWaterfallBuildStepLog( master_name, builder_name, build_number, step_name, http_client, 'json.output[ninja_info]') from_ninja_output = True if not failure_log: # 3. Tries to get stdout log for compile step. from_ninja_output = False failure_log = extract_signal.GetStdoutLog(master_name, builder_name, build_number, step_name, http_client) try: if not failure_log: raise extract_signal.FailedToGetFailureLogError( 'Failed to pull failure log (stdio or ninja output) of step %s of' ' %s/%s/%d' % (step_name, master_name, builder_name, build_number)) except extract_signal.FailedToGetFailureLogError: return {} # Save step log in datastore and avoid downloading again during retry. step.log_data = extract_signal.ExtractStorablePortionOfLog( failure_log, from_ninja_output) try: step.put() except Exception as e: # pragma: no cover # Sometimes, the step log is too large to save in datastore. logging.exception(e) signals[step_name] = extractors.ExtractSignal( master_name, builder_name, step_name, test_name=None, failure_log=failure_log).ToDict() extract_signal.SaveSignalInAnalysis(master_name, builder_name, build_number, signals) return signals
def AnalyzeCompileFailure(failure_info, change_logs, deps_info, failure_signals): """Analyzes given failure signals, and figure out culprits of compile failure. Args: failure_info (CompileFailureInfo): Output of pipeline DetectFirstFailurePipeline. change_logs (dict): Output of pipeline PullChangelogPipeline. deps_info (dict): Output of pipeline ExtractDEPSInfoPipeline. failure_signals (dict): Output of pipeline ExtractSignalPipeline. Returns: A dict with the following form: { 'failures': [ { 'step_name': 'compile', 'supported': True 'first_failure': 230, 'last_pass': 229, 'suspected_cls': [ { 'build_number': 230, 'repo_name': 'chromium', 'revision': 'a_git_hash', 'commit_position': 56789, 'score': 11, 'hints': { 'add a/b/x.cc': 5, 'delete a/b/y.cc': 5, 'modify e/f/z.cc': 1, ... } }, ... ], }, ... ] } And a list of suspected_cls format as below: [ { 'repo_name': 'chromium', 'revision': 'r98_1', 'commit_position': None, 'url': None, 'failures': { 'b': ['Unittest2.Subtest1', 'Unittest3.Subtest2'] }, 'top_score': 4 }, ... ] """ analysis_result = {'failures': []} cl_failure_map = defaultdict(build_failure_analysis.CLInfo) step_name = constants.COMPILE_STEP_NAME if not failure_signals: logging.debug('No failure signals when analyzing a compile failure.') return analysis_result, [] if step_name not in failure_info.failed_steps: logging.debug('No failed compile step when analyzing a compile failure.') return analysis_result, [] builds = failure_info.builds compile_failure_info = failure_info.failed_steps[step_name] failed_build_number = compile_failure_info.current_failure start_build_number = build_failure_analysis.GetLowerBoundForAnalysis( compile_failure_info) step_analysis_result = build_failure_analysis.InitializeStepLevelResult( step_name, compile_failure_info) if not step_analysis_result['supported']: return analysis_result, [] failure_signal = FailureSignal.FromDict(failure_signals[step_name]) _Analyze(start_build_number, failed_build_number, builds, step_name, failure_signal, change_logs, deps_info, step_analysis_result, cl_failure_map) if waterfall_config.GetDownloadBuildDataSettings().get( 'use_ninja_output_log'): step_analysis_result['new_compile_suspected_cls'] = [] _Analyze( start_build_number, failed_build_number, builds, step_name, failure_signal, change_logs, deps_info, step_analysis_result, cl_failure_map, use_ninja_output=True) if (not step_analysis_result['suspected_cls'] and step_analysis_result.get('new_compile_suspected_cls')): step_analysis_result['use_ninja_dependencies'] = True step_analysis_result['suspected_cls'] = step_analysis_result[ 'new_compile_suspected_cls'] for new_suspected_cl_dict in step_analysis_result['suspected_cls']: # Top score for new heuristic is always 2. build_failure_analysis.SaveFailureToMap( cl_failure_map, new_suspected_cl_dict, step_name, None, 2) # TODO(stgao): sort CLs by score. analysis_result['failures'].append(step_analysis_result) suspected_cls = build_failure_analysis.ConvertCLFailureMapToList( cl_failure_map) return analysis_result, suspected_cls