def testMultipleSuccess(self): build_history = { 'm1': { 'b1': [ lkgr_lib.Build(1, self.fail, 1), lkgr_lib.Build(2, self.good, 2), lkgr_lib.Build(3, self.fail, 3), lkgr_lib.Build(4, self.good, 4), lkgr_lib.Build(5, self.good, 5) ] }, 'm2': { 'b2': [ lkgr_lib.Build(1, self.fail, 1), lkgr_lib.Build(2, self.fail, 2), lkgr_lib.Build(3, self.good, 3), lkgr_lib.Build(4, self.good, 4), lkgr_lib.Build(5, self.good, 5) ] } } revisions = [1, 2, 3, 4, 5] candidate = lkgr_lib.FindLKGRCandidate(build_history, revisions, self.keyfunc, self.status_stub) self.assertEquals(candidate, 5)
def testModerateFailsTwo(self): build_history = { 'm1': {'b1': [lkgr_lib.Build(1, self.fail, 1)]}, 'm2': {'b2': [lkgr_lib.Build(1, self.good, 1)]}} revisions = [1] candidate = lkgr_lib.FindLKGRCandidate( build_history, revisions, self.keyfunc, self.status_stub) self.assertEquals(candidate, None)
def testModerateSuccess(self): build_history = { 'm1': {'b1': [(1, self.good, 1)]}, 'm2': {'b2': [(1, self.good, 1)]}} revisions = [1] candidate = lkgr_lib.FindLKGRCandidate( build_history, revisions, self.keyfunc, self.status_stub) self.assertEquals(candidate, 1)
def testMissingFails(self): build_history = { 'm1': {'b1': [(1, self.fail, 1), (2, self.good, 2), (3, self.fail, 3), (5, self.good, 5)]}, 'm2': {'b2': [(1, self.fail, 1), (2, self.fail, 2), (3, self.good, 3), (4, self.good, 4)]}} revisions = [1, 2, 3, 4, 5] candidate = lkgr_lib.FindLKGRCandidate( build_history, revisions, self.keyfunc, self.status_stub) self.assertEquals(candidate, None)
def testNoBuilderHistory(self): build_history = { 'm1': {'b1': []}, } revisions = [1] def allow_norev_keyfunc(val): if val is lkgr_lib.NOREV: return -1 return int(val) candidate = lkgr_lib.FindLKGRCandidate( build_history, revisions, allow_norev_keyfunc, self.status_stub) self.assertEquals(candidate, None)
def main(argv): # TODO(agable): Refactor this into multiple sequential helper functions. args, config_arg_names = ParseArgs(argv) global LOGGER logging.basicConfig( # %(levelname)s is formatted to min-width 8 since CRITICAL is 8 letters. format='%(asctime)s | %(levelname)8s | %(name)s | %(message)s', level=args.loglevel) LOGGER = logging.getLogger(__name__) LOGGER.addFilter(lkgr_lib.RunLogger()) config = lkgr_lib.GetProjectConfig(args.project) for name in config_arg_names: cmd_line_config = getattr(args, name, NOTSET) if cmd_line_config is not NOTSET: config[name] = cmd_line_config # Calculate new candidate LKGR. LOGGER.info('Calculating LKGR for project %s', args.project) repo = lkgr_lib.GitWrapper( config['source_url'], os.path.join(os.path.dirname(os.path.abspath(__file__)), 'workdir', args.project)) monkeypatch_rev_map = config.get('monkeypatch_rev_map') if monkeypatch_rev_map: repo._position_cache.update(monkeypatch_rev_map) if args.manual: candidate = args.manual LOGGER.info('Using manually specified candidate %s', args.manual) if not repo.check_rev(candidate): LOGGER.fatal( 'Manually specified revision %s is not a valid revision for' ' project %s' % (args.manual, args.project)) return 1 else: lkgr_builders = config['masters'] if args.build_data: builds = lkgr_lib.ReadBuildData(args.build_data) else: builds, failures = lkgr_lib.FetchBuildData(lkgr_builders, args.max_threads, args.service_account) if failures > 0: return 1 if args.dump_build_data: try: with open(args.dump_build_data, 'w') as fh: json.dump(builds, fh, indent=2) except IOError, e: LOGGER.warn('Could not dump to %s:\n%s\n' % (args.dump_build_data, repr(e))) (build_history, revisions) = lkgr_lib.CollateRevisionHistory(builds, lkgr_builders, repo) status_gen = status_generator.StatusGeneratorStub() if args.html: viewvc = config.get('viewvc_url', config['source_url'] + '/+/%s') status_gen = status_generator.HTMLStatusGenerator(viewvc=viewvc) candidate = lkgr_lib.FindLKGRCandidate(build_history, revisions, repo.keyfunc, status_gen) if args.html: lkgr_lib.WriteHTML(status_gen, args.html, args.dry_run)
def testSimpleSucceeds(self): build_history = {'m1': {'b1': [lkgr_lib.Build(1, self.good, 1)]}} revisions = [1] candidate = lkgr_lib.FindLKGRCandidate(build_history, revisions, self.keyfunc, self.status_stub) self.assertEquals(candidate, 1)
def testSimpleFails(self): build_history = {'m1': {'b1': [(1, self.fail, 1)]}} revisions = [1] candidate = lkgr_lib.FindLKGRCandidate(build_history, revisions, self.keyfunc, self.status_stub) self.assertEquals(candidate, None)
def main(argv): # TODO(agable): Refactor this into multiple sequential helper functions. args, config_arg_names = ParseArgs(argv) global LOGGER logging.basicConfig( # %(levelname)s is formatted to min-width 8 since CRITICAL is 8 letters. format='%(asctime)s | %(levelname)8s | %(name)s | %(message)s', level=args.loglevel) LOGGER = logging.getLogger(__name__) LOGGER.addFilter(lkgr_lib.RunLogger()) if args.project_config_file: with open(args.project_config_file) as f: config = ast.literal_eval(f.read()) else: config = lkgr_lib.GetProjectConfig(args.project) for name in config_arg_names: cmd_line_config = getattr(args, name, NOTSET) if cmd_line_config is not NOTSET: config[name] = cmd_line_config # Calculate new candidate LKGR. LOGGER.info('Calculating LKGR for project %s', args.project) repo = lkgr_lib.GitWrapper( config['source_url'], os.path.join(args.workdir, args.project)) monkeypatch_rev_map = config.get('monkeypatch_rev_map') if monkeypatch_rev_map: repo._position_cache.update(monkeypatch_rev_map) if args.manual: candidate = args.manual LOGGER.info('Using manually specified candidate %s', args.manual) if not repo.check_rev(candidate): LOGGER.fatal('Manually specified revision %s is not a valid revision for' ' project %s' % (args.manual, args.project)) return 1 else: builds = None if args.build_data: try: builds = lkgr_lib.LoadBuilds(args.build_data) except IOError as e: LOGGER.error('Could not read build data from %s:\n%s\n', args.build_data, repr(e)) raise if builds is None: builds = {} buildbucket_builders = config.get('buckets', []) if buildbucket_builders: buildbucket_builds, failures = lkgr_lib.FetchBuildbucketBuilds( buildbucket_builders, args.max_threads, args.service_account) if failures > 0: return 1 builds.update(buildbucket_builds) if args.dump_build_data: try: lkgr_lib.DumpBuilds(builds, args.dump_build_data) except IOError as e: LOGGER.warn('Could not dump to %s:\n%s\n', args.dump_build_data, repr(e)) (build_history, revisions) = lkgr_lib.CollateRevisionHistory( builds, repo) status_gen = status_generator.StatusGeneratorStub() if args.html: viewvc = config.get('viewvc_url', config['source_url'] + '/+/%s') status_gen = status_generator.HTMLStatusGenerator( viewvc=viewvc, config=config) candidate = lkgr_lib.FindLKGRCandidate( build_history, revisions, repo.keyfunc, status_gen) if args.html: lkgr_lib.WriteHTML(status_gen, args.html, args.dry_run) LOGGER.info('Candidate LKGR is %s', candidate) lkgr = None if not args.force: # Get old/current LKGR. lkgr = '0' * 40 if args.read_from_file: lkgr = lkgr_lib.ReadLKGR(args.read_from_file) if lkgr is None: if args.email_errors and 'error_recipients' in config: lkgr_lib.SendMail(config['error_recipients'], 'Failed to read %s LKGR. Please seed an initial ' 'LKGR in file %s' % (args.project, args.read_from_file), '\n'.join(lkgr_lib.RunLogger.log), args.dry_run) LOGGER.fatal('Failed to read current %s LKGR. Please seed an initial ' 'LKGR in file %s' % (args.project, args.read_from_file)) return 1 if not repo.check_rev(lkgr): if args.email_errors and 'error_recipients' in config: lkgr_lib.SendMail(config['error_recipients'], 'Fetched bad current %s LKGR' % args.project, '\n'.join(lkgr_lib.RunLogger.log), args.dry_run) LOGGER.fatal('Fetched bad current %s LKGR: %s' % (args.project, lkgr)) return 1 LOGGER.info('Current LKGR is %s', lkgr) if candidate and (args.force or repo.keyfunc(candidate) > repo.keyfunc(lkgr)): # We found a new LKGR! LOGGER.info('Candidate is%snewer than current %s LKGR!', ' (forcefully) ' if args.force else ' ', args.project) if args.write_to_file: lkgr_lib.WriteLKGR(candidate, args.write_to_file, args.dry_run) if args.tag: # TODO(machenbach): Currently the wrapping recipe udpates the refs. We # should instead use this method here. lkgr_lib.UpdateTag(candidate, config['source_url'], args.dry_run) else: # No new LKGR found. LOGGER.info('Candidate is not newer than current %s LKGR.', args.project) if not args.manual and lkgr: rev_behind = repo.get_gap(revisions, lkgr) LOGGER.info('LKGR is %d revisions behind', rev_behind) if rev_behind > config['allowed_gap']: if args.email_errors and 'error_recipients' in config: lkgr_lib.SendMail( config['error_recipients'], '%s LKGR (%s) > %s revisions behind' % ( args.project, lkgr, config['allowed_gap']), '\n'.join(lkgr_lib.RunLogger.log), args.dry_run) LOGGER.fatal('LKGR exceeds allowed gap (%s > %s)', rev_behind, config['allowed_gap']) return 2 time_behind = repo.get_lag(lkgr) LOGGER.info('LKGR is %s behind', time_behind) if not lkgr_lib.CheckLKGRLag(time_behind, rev_behind, config['allowed_lag'], config['allowed_gap']): if args.email_errors and 'error_recipients' in config: lkgr_lib.SendMail( config['error_recipients'], '%s LKGR (%s) exceeds lag threshold' % (args.project, lkgr), '\n'.join(lkgr_lib.RunLogger.log), args.dry_run) LOGGER.fatal('LKGR exceeds lag threshold (%s > %s)', time_behind, config['allowed_lag']) return 2 return 0