def write_version_migrator_status(migrator, mctx): """write the status of the version migrator""" out = { "queued": [], "errored": [], "errors": {}, } mmctx = MigratorContext(session=mctx, migrator=migrator) migrator.bind_to_ctx(mmctx) for node in mmctx.effective_graph.nodes: attrs = mmctx.effective_graph.nodes[node]["payload"] new_version = attrs.get("new_version", None) if new_version is None: continue attempts = attrs.get("new_version_attempts", {}).get(new_version, 0) if attempts == 0: out["queued"].append(node) else: out["errored"].append(node) out["errors"][node] = attrs.get("new_version_errors", {}).get( new_version, "No error information available for version '%s'." % new_version, ) with open("./status/version_status.json", "w") as f: json.dump(out, f, sort_keys=True, indent=2)
def _compute_time_per_migrator(mctx): # we weight each migrator by the number of available nodes to migrate num_nodes = [] for migrator in MIGRATORS: mmctx = MigratorContext(session=mctx, migrator=migrator) migrator.bind_to_ctx(mmctx) num_nodes.append(len(mmctx.effective_graph.nodes)) num_nodes_tot = sum(num_nodes) time_per_node = float(env.get("TIMEOUT", 600)) / num_nodes_tot # also enforce a minimum of 300 seconds if any nodes can be migrated time_per_migrator = [] for i, migrator in enumerate(MIGRATORS): _time_per = num_nodes[i] * time_per_node if num_nodes[i] > 0 and _time_per < 300: _time_per = 300 time_per_migrator.append(_time_per) # finally rescale to fit in the time we have tot_time_per_migrator = sum(time_per_migrator) if tot_time_per_migrator > 0: time_fac = float(env.get("TIMEOUT", 600)) / tot_time_per_migrator else: time_fac = 1.0 for i in range(len(time_per_migrator)): time_per_migrator[i] = time_per_migrator[i] * time_fac # recompute the total here tot_time_per_migrator = sum(time_per_migrator) return num_nodes, time_per_migrator, tot_time_per_migrator
def _compute_time_per_migrator(mctx, migrators): # we weight each migrator by the number of available nodes to migrate num_nodes = [] for migrator in tqdm.tqdm(migrators): mmctx = MigratorContext(session=mctx, migrator=migrator) migrator.bind_to_ctx(mmctx) if isinstance(migrator, Version): _num_nodes = 0 for node_name in mmctx.effective_graph.nodes: with mmctx.effective_graph.nodes[node_name]["payload"] as attrs: _attempts = attrs.get("new_version_attempts", {}).get( attrs.get("new_version", ""), 0, ) if _attempts < 3: _num_nodes += 1 _num_nodes = max( _num_nodes, min(PR_LIMIT * 4, len(mmctx.effective_graph.nodes)), ) num_nodes.append(_num_nodes) else: num_nodes.append(len(mmctx.effective_graph.nodes)) num_nodes_tot = sum(num_nodes) # do not divide by zero time_per_node = float(env.get("TIMEOUT", 600)) / max(num_nodes_tot, 1) # also enforce a minimum of 300 seconds if any nodes can be migrated time_per_migrator = [] for i, migrator in enumerate(migrators): _time_per = num_nodes[i] * time_per_node if num_nodes[i] > 0 and _time_per < 300: _time_per = 300 time_per_migrator.append(_time_per) # finally rescale to fit in the time we have tot_time_per_migrator = sum(time_per_migrator) if tot_time_per_migrator > 0: time_fac = float(env.get("TIMEOUT", 600)) / tot_time_per_migrator else: time_fac = 1.0 for i in range(len(time_per_migrator)): time_per_migrator[i] = time_per_migrator[i] * time_fac # recompute the total here tot_time_per_migrator = sum(time_per_migrator) return num_nodes, time_per_migrator, tot_time_per_migrator
def main(args: "CLIArgs") -> None: # logging from .xonsh_utils import env debug = env.get("CONDA_FORGE_TICK_DEBUG", False) if debug: setup_logger(logging.getLogger("conda_forge_tick"), level="debug") else: setup_logger(logging.getLogger("conda_forge_tick")) github_username = env.get("USERNAME", "") github_password = env.get("PASSWORD", "") github_token = env.get("GITHUB_TOKEN") global MIGRATORS mctx, temp, MIGRATORS = initialize_migrators( github_username=github_username, github_password=github_password, dry_run=args.dry_run, github_token=github_token, ) # compute the time per migrator (num_nodes, time_per_migrator, tot_time_per_migrator) = _compute_time_per_migrator( mctx, ) for i, migrator in enumerate(MIGRATORS): if hasattr(migrator, "name"): extra_name = "-%s" % migrator.name else: extra_name = "" logger.info( "Total migrations for %s%s: %d - gets %f seconds (%f percent)", migrator.__class__.__name__, extra_name, num_nodes[i], time_per_migrator[i], time_per_migrator[i] / tot_time_per_migrator * 100, ) for mg_ind, migrator in enumerate(MIGRATORS): mmctx = MigratorContext(session=mctx, migrator=migrator) migrator.bind_to_ctx(mmctx) good_prs = 0 _mg_start = time.time() effective_graph = mmctx.effective_graph time_per = time_per_migrator[mg_ind] if hasattr(migrator, "name"): extra_name = "-%s" % migrator.name else: extra_name = "" logger.info( "Running migrations for %s%s: %d", migrator.__class__.__name__, extra_name, len(effective_graph.nodes), ) possible_nodes = list(migrator.order(effective_graph, mctx.graph)) # version debugging info if isinstance(migrator, Version): logger.info("possible version migrations:") for node_name in possible_nodes: with effective_graph.nodes[node_name]["payload"] as attrs: logger.info( " node|curr|new|attempts: %s|%s|%s|%d", node_name, attrs.get("version"), attrs.get("new_version"), ( attrs.get("new_version_attempts", {}).get( attrs.get("new_version", ""), 0, ) ), ) for node_name in possible_nodes: with mctx.graph.nodes[node_name]["payload"] as attrs: # Don't let CI timeout, break ahead of the timeout so we make certain # to write to the repo # TODO: convert these env vars _now = time.time() if ( ( _now - int(env.get("START_TIME", time.time())) > int(env.get("TIMEOUT", 600)) ) or good_prs >= migrator.pr_limit or (_now - _mg_start) > time_per ): break fctx = FeedstockContext( package_name=node_name, feedstock_name=attrs["feedstock_name"], attrs=attrs, ) print("\n", flush=True, end="") logger.info( "%s%s IS MIGRATING %s", migrator.__class__.__name__.upper(), extra_name, fctx.package_name, ) try: # Don't bother running if we are at zero if ( args.dry_run or mctx.gh.rate_limit()["resources"]["core"]["remaining"] == 0 ): break migrator_uid, pr_json = run( feedstock_ctx=fctx, migrator=migrator, rerender=migrator.rerender, protocol="https", hash_type=attrs.get("hash_type", "sha256"), ) # if migration successful if migrator_uid: d = frozen_to_json_friendly(migrator_uid) # if we have the PR already do nothing if d["data"] in [ existing_pr["data"] for existing_pr in attrs.get("PRed", []) ]: pass else: if not pr_json: pr_json = { "state": "closed", "head": {"ref": "<this_is_not_a_branch>"}, } d["PR"] = pr_json attrs.setdefault("PRed", []).append(d) attrs.update( { "smithy_version": mctx.smithy_version, "pinning_version": mctx.pinning_version, }, ) except github3.GitHubError as e: if e.msg == "Repository was archived so is read-only.": attrs["archived"] = True else: logger.critical( "GITHUB ERROR ON FEEDSTOCK: %s", fctx.feedstock_name, ) if is_github_api_limit_reached(e, mctx.gh): break except URLError as e: logger.exception("URLError ERROR") attrs["bad"] = { "exception": str(e), "traceback": str(traceback.format_exc()).split("\n"), "code": getattr(e, "code"), "url": getattr(e, "url"), } except Exception as e: logger.exception("NON GITHUB ERROR") attrs["bad"] = { "exception": str(e), "traceback": str(traceback.format_exc()).split("\n"), } else: if migrator_uid: # On successful PR add to our counter good_prs += 1 finally: # Write graph partially through if not args.dry_run: dump_graph(mctx.graph) eval_cmd(f"rm -rf {mctx.rever_dir}/*") logger.info(os.getcwd()) for f in glob.glob("/tmp/*"): if f not in temp: eval_cmd(f"rm -rf {f}") if not args.dry_run: logger.info( "API Calls Remaining: %d", mctx.gh.rate_limit()["resources"]["core"]["remaining"], ) logger.info("Done")
def run_test_migration( m, inp, output, kwargs, prb, mr_out, should_filter=False, tmpdir=None, ): mm_ctx = MigratorSessionContext( graph=G, smithy_version="", pinning_version="", github_username="", github_password="", circle_build_url=env["CIRCLE_BUILD_URL"], ) m_ctx = MigratorContext(mm_ctx, m) m.bind_to_ctx(m_ctx) if mr_out: mr_out.update(bot_rerun=False) with open(os.path.join(tmpdir, "meta.yaml"), "w") as f: f.write(inp) # read the conda-forge.yml if os.path.exists(os.path.join(tmpdir, "..", "conda-forge.yml")): with open(os.path.join(tmpdir, "..", "conda-forge.yml")) as fp: cf_yml = fp.read() else: cf_yml = "{}" # Load the meta.yaml (this is done in the graph) try: name = parse_meta_yaml(inp)["package"]["name"] except Exception: name = "blah" pmy = populate_feedstock_attributes(name, {}, inp, cf_yml) # these are here for legacy migrators pmy["version"] = pmy["meta_yaml"]["package"]["version"] pmy["req"] = set() for k in ["build", "host", "run"]: req = pmy["meta_yaml"].get("requirements", {}) or {} _set = req.get(k) or set() pmy["req"] |= set(_set) pmy["raw_meta_yaml"] = inp pmy.update(kwargs) assert m.filter(pmy) is should_filter if should_filter: return m.run_pre_piggyback_migrations( tmpdir, pmy, hash_type=pmy.get("hash_type", "sha256"), ) mr = m.migrate(tmpdir, pmy, hash_type=pmy.get("hash_type", "sha256")) m.run_post_piggyback_migrations( tmpdir, pmy, hash_type=pmy.get("hash_type", "sha256"), ) assert mr_out == mr if not mr: return pmy.update(PRed=[frozen_to_json_friendly(mr)]) with open(os.path.join(tmpdir, "meta.yaml")) as f: actual_output = f.read() # strip jinja comments pat = re.compile(r"{#.*#}") actual_output = pat.sub("", actual_output) output = pat.sub("", output) assert actual_output == output
def main(args: "CLIArgs") -> None: github_username = env.get("USERNAME", "") github_password = env.get("PASSWORD", "") github_token = env.get("GITHUB_TOKEN") global MIGRATORS mctx, temp, MIGRATORS = initialize_migrators( github_username=github_username, github_password=github_password, dry_run=args.dry_run, github_token=github_token, ) for migrator in MIGRATORS: mmctx = MigratorContext(session=mctx, migrator=migrator) migrator.bind_to_ctx(mmctx) good_prs = 0 effective_graph = mmctx.effective_graph logger.info( "Total migrations for %s: %d", migrator.__class__.__name__, len(effective_graph.nodes), ) for node_name in migrator.order(effective_graph, mctx.graph): with mctx.graph.nodes[node_name]["payload"] as attrs: # Don't let CI timeout, break ahead of the timeout so we make certain # to write to the repo # TODO: convert these env vars if ( time.time() - int(env.get("START_TIME", time.time())) > int(env.get("TIMEOUT", 600)) or good_prs >= migrator.pr_limit ): break fctx = FeedstockContext( package_name=node_name, feedstock_name=attrs["feedstock_name"], attrs=attrs, ) logger.info( "%s IS MIGRATING %s", migrator.__class__.__name__.upper(), fctx.package_name, ) try: # Don't bother running if we are at zero if ( args.dry_run or mctx.gh.rate_limit()["resources"]["core"]["remaining"] == 0 ): break # FIXME: this causes the bot to not-rerender things when it # should. For instance, if the bot rerenders but the PR is # left open then we don't rerender again even though we should. # This need logic to check if the rerender has been merged. rerender = ( attrs.get("smithy_version") != mctx.smithy_version or attrs.get("pinning_version") != mctx.pinning_version or migrator.rerender ) migrator_uid, pr_json = run( feedstock_ctx=fctx, migrator=migrator, rerender=rerender, protocol="https", hash_type=attrs.get("hash_type", "sha256"), ) # if migration successful if migrator_uid: d = frozen_to_json_friendly(migrator_uid) # if we have the PR already do nothing if d["data"] in [ existing_pr["data"] for existing_pr in attrs.get("PRed", []) ]: pass else: if not pr_json: pr_json = { "state": "closed", "head": {"ref": "<this_is_not_a_branch>"}, } d["PR"] = pr_json attrs.setdefault("PRed", []).append(d) attrs.update( { "smithy_version": mctx.smithy_version, "pinning_version": mctx.pinning_version, }, ) except github3.GitHubError as e: if e.msg == "Repository was archived so is read-only.": attrs["archived"] = True else: logger.critical( "GITHUB ERROR ON FEEDSTOCK: %s", fctx.feedstock_name, ) if is_github_api_limit_reached(e, mctx.gh): break except URLError as e: logger.exception("URLError ERROR") attrs["bad"] = { "exception": str(e), "traceback": str(traceback.format_exc()).split("\n"), "code": getattr(e, "code"), "url": getattr(e, "url"), } except Exception as e: logger.exception("NON GITHUB ERROR") attrs["bad"] = { "exception": str(e), "traceback": str(traceback.format_exc()).split("\n"), } else: if migrator_uid: # On successful PR add to our counter good_prs += 1 finally: # Write graph partially through dump_graph(mctx.graph) eval_xonsh(f"rm -rf {mctx.rever_dir}/*") logger.info(os.getcwd()) for f in glob.glob("/tmp/*"): if f not in temp: eval_xonsh(f"rm -rf {f}") if not args.dry_run: logger.info( "API Calls Remaining: %d", mctx.gh.rate_limit()["resources"]["core"]["remaining"], ) logger.info("Done")
def main(args: "CLIArgs") -> None: # logging if args.debug: setup_logger(logging.getLogger("conda_forge_tick"), level="debug") else: setup_logger(logging.getLogger("conda_forge_tick")) github_username = env.get("USERNAME", "") github_password = env.get("PASSWORD", "") github_token = env.get("GITHUB_TOKEN") mctx, temp, migrators = initialize_migrators( github_username=github_username, github_password=github_password, dry_run=args.dry_run, github_token=github_token, ) # compute the time per migrator print("computing time per migration", flush=True) (num_nodes, time_per_migrator, tot_time_per_migrator) = _compute_time_per_migrator( mctx, migrators, ) for i, migrator in enumerate(migrators): if hasattr(migrator, "name"): extra_name = "-%s" % migrator.name else: extra_name = "" print( " %s%s: %d - gets %f seconds (%f percent)" % ( migrator.__class__.__name__, extra_name, num_nodes[i], time_per_migrator[i], time_per_migrator[i] / max(tot_time_per_migrator, 1) * 100, ), flush=True, ) for mg_ind, migrator in enumerate(migrators): if hasattr(migrator, "name"): assert isinstance(migrator.name, str) migrator_name = migrator.name.lower().replace(" ", "") else: migrator_name = migrator.__class__.__name__.lower() mmctx = MigratorContext(session=mctx, migrator=migrator) migrator.bind_to_ctx(mmctx) good_prs = 0 _mg_start = time.time() effective_graph = mmctx.effective_graph time_per = time_per_migrator[mg_ind] if hasattr(migrator, "name"): extra_name = "-%s" % migrator.name else: extra_name = "" print( "\n========================================" "========================================" "\n" "========================================" "========================================", flush=True, ) print( "Running migrations for %s%s: %d\n" % ( migrator.__class__.__name__, extra_name, len(effective_graph.nodes), ), flush=True, ) possible_nodes = list(migrator.order(effective_graph, mctx.graph)) # version debugging info if isinstance(migrator, Version): LOGGER.info("possible version migrations:") for node_name in possible_nodes: with effective_graph.nodes[node_name]["payload"] as attrs: LOGGER.info( " node|curr|new|attempts: %s|%s|%s|%d", node_name, attrs.get("version"), attrs.get("new_version"), (attrs.get("new_version_attempts", {}).get( attrs.get("new_version", ""), 0, )), ) for node_name in possible_nodes: with mctx.graph.nodes[node_name]["payload"] as attrs: base_branches = migrator.get_possible_feedstock_branches(attrs) orig_branch = attrs.get("branch", "master") # Don't let CI timeout, break ahead of the timeout so we make certain # to write to the repo # TODO: convert these env vars _now = time.time() if ((_now - int(env.get("START_TIME", time.time())) > int( env.get("TIMEOUT", 600))) or good_prs >= migrator.pr_limit or (_now - _mg_start) > time_per): break fctx = FeedstockContext( package_name=node_name, feedstock_name=attrs["feedstock_name"], attrs=attrs, ) try: for base_branch in base_branches: attrs["branch"] = base_branch if migrator.filter(attrs): continue print("\n", flush=True, end="") LOGGER.info( "%s%s IS MIGRATING %s:%s", migrator.__class__.__name__.upper(), extra_name, fctx.package_name, base_branch, ) try: # Don't bother running if we are at zero if mctx.gh_api_requests_left == 0: break migrator_uid, pr_json = run( feedstock_ctx=fctx, migrator=migrator, rerender=migrator.rerender, protocol="https", hash_type=attrs.get("hash_type", "sha256"), base_branch=base_branch, ) # if migration successful if migrator_uid: d = frozen_to_json_friendly(migrator_uid) # if we have the PR already do nothing if d["data"] in [ existing_pr["data"] for existing_pr in attrs.get( "PRed", []) ]: pass else: if not pr_json: pr_json = { "state": "closed", "head": { "ref": "<this_is_not_a_branch>" }, } d["PR"] = pr_json attrs.setdefault("PRed", []).append(d) attrs.update( { "smithy_version": mctx.smithy_version, "pinning_version": mctx.pinning_version, }, ) except github3.GitHubError as e: if e.msg == "Repository was archived so is read-only.": attrs["archived"] = True else: LOGGER.critical( "GITHUB ERROR ON FEEDSTOCK: %s", fctx.feedstock_name, ) if is_github_api_limit_reached(e, mctx.gh): break except URLError as e: LOGGER.exception("URLError ERROR") attrs["bad"] = { "exception": str(e), "traceback": str(traceback.format_exc()).split("\n"), "code": getattr(e, "code"), "url": getattr(e, "url"), } pre_key = "pre_pr_migrator_status" if pre_key not in attrs: attrs[pre_key] = {} attrs[pre_key][migrator_name] = sanitize_string( "bot error (%s): %s: %s" % ( '<a href="' + os.getenv("CIRCLE_BUILD_URL", "") + '">bot CI job</a>', base_branch, str(traceback.format_exc()), ), ) except Exception as e: LOGGER.exception("NON GITHUB ERROR") # we don't set bad for rerendering errors if ("conda smithy rerender -c auto --no-check-uptodate" not in str(e)): attrs["bad"] = { "exception": str(e), "traceback": str(traceback.format_exc()).split("\n", ), } pre_key = "pre_pr_migrator_status" if pre_key not in attrs: attrs[pre_key] = {} attrs[pre_key][migrator_name] = sanitize_string( "bot error (%s): %s: %s" % ( '<a href="' + os.getenv("CIRCLE_BUILD_URL", "") + '">bot CI job</a>', base_branch, str(traceback.format_exc()), ), ) else: if migrator_uid: # On successful PR add to our counter good_prs += 1 finally: # reset branch attrs["branch"] = orig_branch # Write graph partially through if not args.dry_run: dump_graph(mctx.graph) eval_cmd(f"rm -rf {mctx.rever_dir}/*") LOGGER.info(os.getcwd()) for f in glob.glob("/tmp/*"): if f not in temp: try: eval_cmd(f"rm -rf {f}") except Exception: pass if mctx.gh_api_requests_left == 0: break print("\n", flush=True) LOGGER.info("API Calls Remaining: %d", mctx.gh_api_requests_left) LOGGER.info("Done")
def auto_tick(dry_run=False, debug=False, fork=False, organization='nsls-ii-forge'): ''' Automatically update package versions and submit pull requests to associated feedstocks Parameters ---------- dry_run: bool, optional Generate version migration yamls but do not run them debug: bool, optional Setup logging to be in debug mode fork: bool, optional Create a fork of the repo from the organization to $GITHUB_USERNAME organization: str, optional GitHub organization that manages feedstock repositories ''' from conda_forge_tick.xonsh_utils import env if debug: setup_logger(logger, level="debug") else: setup_logger(logger) # set Version.pr_body to custom pr_body function Version.pr_body = bot_pr_body # TODO: use ~/.netrc instead github_username = env.get("GITHUB_USERNAME", "") github_password = env.get("GITHUB_TOKEN", "") github_token = env.get("GITHUB_TOKEN") global MIGRATORS print('Initializing migrators...') mctx, MIGRATORS = initialize_migrators( github_username=github_username, github_password=github_password, dry_run=dry_run, github_token=github_token, ) # compute the time per migrator print('Computing time per migrator') (num_nodes, time_per_migrator, tot_time_per_migrator) = _compute_time_per_migrator(mctx, ) for i, migrator in enumerate(MIGRATORS): if hasattr(migrator, "name"): extra_name = "-%s" % migrator.name else: extra_name = "" logger.info( "Total migrations for %s%s: %d - gets %f seconds (%f percent)", migrator.__class__.__name__, extra_name, num_nodes[i], time_per_migrator[i], time_per_migrator[i] / tot_time_per_migrator * 100, ) print('Performing migrations...') for mg_ind, migrator in enumerate(MIGRATORS): mmctx = MigratorContext(session=mctx, migrator=migrator) migrator.bind_to_ctx(mmctx) good_prs = 0 _mg_start = time.time() effective_graph = mmctx.effective_graph time_per = time_per_migrator[mg_ind] if hasattr(migrator, "name"): extra_name = "-%s" % migrator.name else: extra_name = "" logger.info( "Running migrations for %s%s: %d", migrator.__class__.__name__, extra_name, len(effective_graph.nodes), ) possible_nodes = list(migrator.order(effective_graph, mctx.graph)) # version debugging info if isinstance(migrator, Version): logger.info("possible version migrations:") for node_name in possible_nodes: with effective_graph.nodes[node_name]["payload"] as attrs: logger.info( " node|curr|new|attempts: %s|%s|%s|%d", node_name, attrs.get("version"), attrs.get("new_version"), (attrs.get("new_version_attempts", {}).get( attrs.get("new_version", ""), 0, )), ) for node_name in possible_nodes: with mctx.graph.nodes[node_name]["payload"] as attrs: # Don't let CI timeout, break ahead of the timeout so we make certain # to write to the repo # TODO: convert these env vars _now = time.time() if ((_now - int(env.get("START_TIME", time.time())) > int( env.get("TIMEOUT", 600))) or good_prs >= migrator.pr_limit or (_now - _mg_start) > time_per): break fctx = FeedstockContext( package_name=node_name, feedstock_name=attrs["feedstock_name"], attrs=attrs, ) print("\n", flush=True, end="") logger.info( "%s%s IS MIGRATING %s", migrator.__class__.__name__.upper(), extra_name, fctx.package_name, ) try: # Don't bother running if we are at zero if (dry_run or mctx.gh.rate_limit()["resources"]["core"] ["remaining"] == 0): break migrator_uid, pr_json = run(feedstock_ctx=fctx, migrator=migrator, rerender=migrator.rerender, protocol="https", hash_type=attrs.get( "hash_type", "sha256"), fork=fork, organization=organization) # if migration successful if migrator_uid: d = frozen_to_json_friendly(migrator_uid) # if we have the PR already do nothing if d["data"] in [ existing_pr["data"] for existing_pr in attrs.get("PRed", []) ]: pass else: if pr_json is None: pr_json = { "state": "closed", "head": { "ref": "<this_is_not_a_branch>" }, } d["PR"] = pr_json attrs.setdefault("PRed", []).append(d) attrs.update( { "smithy_version": mctx.smithy_version, "pinning_version": mctx.pinning_version, }, ) except github3.GitHubError as e: if e.msg == "Repository was archived so is read-only.": attrs["archived"] = True else: logger.critical( "GITHUB ERROR ON FEEDSTOCK: %s", fctx.feedstock_name, ) if is_github_api_limit_reached(e, mctx.gh): break except URLError as e: logger.exception("URLError ERROR") attrs["bad"] = { "exception": str(e), "traceback": str(traceback.format_exc()).split("\n"), "code": getattr(e, "code"), "url": getattr(e, "url"), } except Exception as e: logger.exception("NON GITHUB ERROR") attrs["bad"] = { "exception": str(e), "traceback": str(traceback.format_exc()).split("\n"), } else: if migrator_uid: # On successful PR add to our counter good_prs += 1 finally: # Write graph partially through if not dry_run: dump_graph(mctx.graph) eval_cmd(f"rm -rf {mctx.rever_dir}/*") logger.info(os.getcwd()) if not dry_run: logger.info( "API Calls Remaining: %d", mctx.gh.rate_limit()["resources"]["core"]["remaining"], ) logger.info("Done")
def _run_migrator(migrator, mctx, temp, time_per, dry_run): if hasattr(migrator, "name"): assert isinstance(migrator.name, str) migrator_name = migrator.name.lower().replace(" ", "") else: migrator_name = migrator.__class__.__name__.lower() mmctx = MigratorContext(session=mctx, migrator=migrator) migrator.bind_to_ctx(mmctx) good_prs = 0 _mg_start = time.time() effective_graph = mmctx.effective_graph if hasattr(migrator, "name"): extra_name = "-%s" % migrator.name else: extra_name = "" print( "Running migrations for %s%s: %d\n" % ( migrator.__class__.__name__, extra_name, len(effective_graph.nodes), ), flush=True, ) possible_nodes = list(migrator.order(effective_graph, mctx.graph)) # version debugging info if isinstance(migrator, Version): LOGGER.info("possible version migrations:") for node_name in possible_nodes: with effective_graph.nodes[node_name]["payload"] as attrs: LOGGER.info( " node|curr|new|attempts: %s|%s|%s|%f", node_name, attrs.get("version"), attrs.get("new_version"), ( attrs.get("new_version_attempts", {}).get( attrs.get("new_version", ""), 0, ) ), ) for node_name in possible_nodes: with mctx.graph.nodes[node_name]["payload"] as attrs: # Don't let CI timeout, break ahead of the timeout so we make certain # to write to the repo # TODO: convert these env vars _now = time.time() if ( ( _now - int(env.get("START_TIME", time.time())) > int(env.get("TIMEOUT", 600)) ) or good_prs >= migrator.pr_limit or (_now - _mg_start) > time_per ): break base_branches = migrator.get_possible_feedstock_branches(attrs) if "branch" in attrs: has_attrs_branch = True orig_branch = attrs.get("branch") else: has_attrs_branch = False orig_branch = None fctx = FeedstockContext( package_name=node_name, feedstock_name=attrs["feedstock_name"], attrs=attrs, ) # map main to current default branch base_branches = [ br if br != "main" else fctx.default_branch for br in base_branches ] try: for base_branch in base_branches: attrs["branch"] = base_branch if migrator.filter(attrs): continue print("\n", flush=True, end="") sys.stderr.flush() sys.stdout.flush() LOGGER.info( "%s%s IS MIGRATING %s:%s", migrator.__class__.__name__.upper(), extra_name, fctx.package_name, base_branch, ) try: # Don't bother running if we are at zero if mctx.gh_api_requests_left == 0: break migrator_uid, pr_json = run( feedstock_ctx=fctx, migrator=migrator, rerender=migrator.rerender, protocol="https", hash_type=attrs.get("hash_type", "sha256"), base_branch=base_branch, ) # if migration successful if migrator_uid: d = frozen_to_json_friendly(migrator_uid) # if we have the PR already do nothing if d["data"] in [ existing_pr["data"] for existing_pr in attrs.get("PRed", []) ]: pass else: if not pr_json: pr_json = { "state": "closed", "head": {"ref": "<this_is_not_a_branch>"}, } d["PR"] = pr_json attrs.setdefault("PRed", []).append(d) attrs.update( { "smithy_version": mctx.smithy_version, "pinning_version": mctx.pinning_version, }, ) except github3.GitHubError as e: if e.msg == "Repository was archived so is read-only.": attrs["archived"] = True else: LOGGER.critical( "GITHUB ERROR ON FEEDSTOCK: %s", fctx.feedstock_name, ) if is_github_api_limit_reached(e, mctx.gh): break except URLError as e: LOGGER.exception("URLError ERROR") attrs["bad"] = { "exception": str(e), "traceback": str(traceback.format_exc()).split("\n"), "code": getattr(e, "code"), "url": getattr(e, "url"), } _set_pre_pr_migrator_fields( attrs, migrator_name, sanitize_string( "bot error (%s): %s: %s" % ( '<a href="' + os.getenv("CIRCLE_BUILD_URL", "") + '">bot CI job</a>', base_branch, str(traceback.format_exc()), ), ), ) except Exception as e: LOGGER.exception("NON GITHUB ERROR") # we don't set bad for rerendering errors if ( "conda smithy rerender -c auto --no-check-uptodate" not in str(e) ): attrs["bad"] = { "exception": str(e), "traceback": str(traceback.format_exc()).split( "\n", ), } _set_pre_pr_migrator_fields( attrs, migrator_name, sanitize_string( "bot error (%s): %s: %s" % ( '<a href="' + os.getenv("CIRCLE_BUILD_URL", "") + '">bot CI job</a>', base_branch, str(traceback.format_exc()), ), ), ) else: if migrator_uid: # On successful PR add to our counter good_prs += 1 finally: # reset branch if has_attrs_branch: attrs["branch"] = orig_branch # do this but it is crazy gc.collect() # Write graph partially through if not dry_run: dump_graph(mctx.graph) eval_cmd(f"rm -rf {mctx.rever_dir}/*") LOGGER.info(os.getcwd()) for f in glob.glob("/tmp/*"): if f not in temp: try: eval_cmd(f"rm -rf {f}") except Exception: pass if mctx.gh_api_requests_left == 0: break return good_prs