def test_reasons_for_failure(self): cache = buildbot.DiskCache(self.cache_path) build = AlertBuilderTest.k_example_failing_build step = build['steps'][0] builder_name = build['builderName'] master_url = 'https://build.chromium.org/p/chromium.lkgr' old_splitter_for_step = reasons_splitter.splitter_for_step split_step_invoked = [False] def mock_splitter_for_step(step): class MockSplitter(object): @classmethod def split_step(cls, step, build, builder_name, master_url): split_step_invoked[0] = True return {} return MockSplitter() try: reasons_splitter.splitter_for_step = mock_splitter_for_step alert_builder.reasons_for_failure(cache, step, build, builder_name, master_url) self.assertTrue(split_step_invoked[0]) split_step_invoked[0] = False alert_builder.reasons_for_failure(cache, step, build, builder_name, master_url) self.assertFalse(split_step_invoked[0]) finally: reasons_splitter.splitter_for_step = old_splitter_for_step
def main(args): # pragma: no cover logging.basicConfig(level=logging.DEBUG) parser = argparse.ArgumentParser() parser.add_argument('builder_url', action='store') args = parser.parse_args(args) # https://build.chromium.org/p/chromium.win/builders/XP%20Tests%20(1) url_regexp = re.compile('(?P<master_url>.*)/builders/(?P<builder_name>.*)/?') match = url_regexp.match(args.builder_url) # FIXME: HACK CACHE_PATH = 'build_cache' cache = buildbot.DiskCache(CACHE_PATH) master_url = match.group('master_url') builder_name = urllib.unquote_plus(match.group('builder_name')) master_json = buildbot.fetch_master_json(master_url) # This is kinda a hack, but uses more of our existing code this way: alerts = alerts_for_master(cache, master_url, master_json, builder_name) print json.dumps(alerts[0], indent=1)
def test_reasons_for_failure_no_splitter(self): cache = buildbot.DiskCache(self.cache_path) build = AlertBuilderTest.k_example_failing_build step = build['steps'][0] builder_name = build['builderName'] master_url = 'https://build.chromium.org/p/chromium.lkgr' old_splitter_for_step = reasons_splitter.splitter_for_step def mock_splitter_for_step(step): return None try: reasons_splitter.splitter_for_step = mock_splitter_for_step reasons = alert_builder.reasons_for_failure(cache, step, build, builder_name, master_url) self.assertTrue(not reasons) finally: reasons_splitter.splitter_for_step = old_splitter_for_step
def inner_loop(args): if not args.data_url: logging.warn('No /data url passed, will write to builder_alerts.json') if args.use_cache: requests_cache.install_cache('failure_stats') else: requests_cache.install_cache(backend='memory') # FIXME: gatekeeper_config should find gatekeeper.json for us. gatekeeper_path = os.path.abspath(args.gatekeeper) logging.debug('Processsing gatekeeper json: %s', gatekeeper_path) gatekeeper = gatekeeper_ng_config.load_gatekeeper_config(gatekeeper_path) gatekeeper_trees_path = os.path.abspath(args.gatekeeper_trees) logging.debug('Processing gatekeeper trees json: %s', gatekeeper_trees_path) gatekeeper_trees = gatekeeper_ng_config.load_gatekeeper_tree_config( gatekeeper_trees_path) master_urls = gatekeeper_extras.fetch_master_urls(gatekeeper, args) start_time = datetime.datetime.utcnow() cache = buildbot.DiskCache(CACHE_PATH) old_alerts = {} if args.data_url: try: old_alerts_raw = requests.get(args.data_url[0]).json() except ValueError: logging.debug('No old alerts found.') else: # internal-alerts will have a redirect instead of alerts if you're # signed in. if 'alerts' in old_alerts_raw: for alert in old_alerts_raw['alerts']: master = alert['master_url'] builder = alert['builder_name'] step = alert['step_name'] reason = alert['reason'] alert_key = alert_builder.generate_alert_key( master, builder, step, reason) if alert_key in old_alerts: logging.critical( 'Incorrectly overwriting an alert reason from the' ' old alert data. master: %s, builder: %s, step: %s, reason:' ' %s' % (master, builder, step, reason)) old_alerts[alert_key] = alert latest_builder_info = {} stale_builder_alerts = [] missing_masters = [] alerts = [] suspected_cls = [] pool = multiprocessing.Pool(processes=args.processes) master_datas = pool.map( SubProcess(cache, old_alerts, args.builder_filter, args.jobs), master_urls) pool.close() pool.join() for data in master_datas: # TODO(ojan): We should put an alert in the JSON for this master so # we can show that the master is down in the sheriff-o-matic UI. if not data[0]: missing_masters.extend([data[3]]) continue alerts.extend(data[0]) latest_builder_info.update(data[1]) stale_builder_alerts.extend(data[2]) logging.info('Fetch took: %s seconds.', (datetime.datetime.utcnow() - start_time).total_seconds()) alerts = gatekeeper_extras.apply_gatekeeper_rules(alerts, gatekeeper, gatekeeper_trees) stale_builder_alerts = gatekeeper_extras.apply_gatekeeper_rules( stale_builder_alerts, gatekeeper, gatekeeper_trees) alerts = analysis.assign_keys(alerts) reason_groups = analysis.group_by_reason(alerts) range_groups = analysis.merge_by_range(reason_groups) if args.findit_api_url and alerts: suspected_cls = query_findit(args.findit_api_url, alerts) data = { 'alerts': alerts, 'suspected_cls': suspected_cls, 'reason_groups': reason_groups, 'range_groups': range_groups, 'latest_builder_info': latest_builder_info, 'stale_builder_alerts': stale_builder_alerts, 'missing_masters': missing_masters, } if not args.data_url: with open('builder_alerts.json', 'w') as f: f.write(json.dumps(data, indent=1)) ret = True json_data = json.dumps(data) logging.info('Alerts json is %s bytes uncompressed.', len(json_data)) s = cStringIO.StringIO() with contextlib.closing(gzip.GzipFile(fileobj=s, mode='w')) as g: g.write(json_data) gzipped_data = s.getvalue() for url in args.data_url: logging.info('POST %s alerts (%s bytes compressed) to %s', len(alerts), len(gzipped_data), url) resp = requests.post(url, data=gzipped_data, headers={'content-encoding': 'gzip'}) try: resp.raise_for_status() except requests.HTTPError as e: logging.error('POST to %s failed! %d %s, %s, %s', url, resp.status_code, resp.reason, resp.content, e) ret = False return ret
def setUp(self): self.cache_path = tempfile.mkdtemp() self.cache = buildbot.DiskCache(self.cache_path)