def main(args): gx = load_graph() ctx = MigratorSessionContext("", "", "") start_time = time.time() os.makedirs("audits", exist_ok=True) for k in AUDIT_REGISTRY: os.makedirs(os.path.join("audits", k), exist_ok=True) # TODO: generalize for cran skeleton # limit graph to things that depend on python python_des = nx.descendants(gx, "pypy-meta") for node in sorted( python_des, key=lambda x: (len(nx.descendants(gx, x)), x), reverse=True, ): if time.time() - int(env.get("START_TIME", start_time)) > int( env.get("TIMEOUT", 60 * 30), ): break # depfinder only work on python at the moment so only work on things # with python as runtime dep payload = gx.nodes[node]["payload"] for k, v in AUDIT_REGISTRY.items(): version = payload.get("version", None) ext = v["ext"] if (not payload.get("archived", False) and version and "python" in payload["requirements"]["run"] and f"{node}_{version}.{ext}" not in os.listdir(f"audits/{k}")): print(node) fctx = FeedstockContext( package_name=node, feedstock_name=payload["name"], attrs=payload, ) try: deps = v["run"](fctx, ctx) except Exception as e: deps = { "exception": str(e), "traceback": str(traceback.format_exc()).split("\n"), } if "dumper" in v: deps = v["dumper"](deps) finally: with open(f"audits/{k}/{node}_{version}.{ext}", "w") as f: v["writer"](deps, f) compare_grayskull_audits(gx) compare_depfinder_audits(gx)
def test_depfinder_audit_feedstock(): from conda_forge_tick.audit import depfinder_audit_feedstock mm_ctx = MigratorSessionContext( graph=G, smithy_version="", pinning_version="", github_username="", github_password="", circle_build_url="", ) with open( os.path.join(os.path.dirname(__file__), "test_yaml", "depfinder.json"), ) as f: attrs = load(f) fctx = FeedstockContext("depfinder", "depfinder", attrs) deps = depfinder_audit_feedstock(fctx, mm_ctx) assert deps == { "builtin": { "ConfigParser", "__future__", "argparse", "ast", "collections", "configparser", "copy", "distutils.command.build_py", "distutils.command.sdist", "distutils.core", "errno", "fnmatch", "io", "itertools", "json", "logging", "os", "pdb", "pkgutil", "pprint", "re", "subprocess", "sys", }, "questionable": {"setuptools", "ipython", "cx_freeze"}, "required": {"pyyaml", "stdlib-list", "setuptools", "versioneer"}, }
def test_grayskull_audit_feedstock(): mm_ctx = MigratorSessionContext( graph=G, smithy_version="", pinning_version="", github_username="", github_password="", circle_build_url="", ) with open( os.path.join(os.path.dirname(__file__), "test_yaml", "depfinder.json"), "r", ) as f: attrs = load(f) fctx = FeedstockContext("depfinder", "depfinder", attrs) recipe = grayskull_audit_feedstock(fctx, mm_ctx) assert recipe == DEPFINDER_RECIPE
def main(args): gx = load_graph() ctx = MigratorSessionContext("", "", "") start_time = time.time() # limit graph to things that depend on python python_des = nx.descendants(gx, "pypy-meta") for node in sorted( python_des, key=lambda x: (len(nx.descendants(gx, x)), x), reverse=True, ): if time.time() - int(env.get("START_TIME", start_time)) > int( env.get("TIMEOUT", 60 * 30)): break # depfinder only work on python at the moment so only work on things # with python as runtime dep os.makedirs("audits", exist_ok=True) with gx.nodes[node]["payload"] as payload: version = payload.get('version', None) if (not payload.get("archived", False) and version and "python" in payload["requirements"]["run"] and f'{node}_{version}.json' not in os.listdir("audits")): print(node) fctx = FeedstockContext(package_name=node, feedstock_name=payload["name"], attrs=payload) try: deps = audit_feedstock(fctx, ctx) except Exception as e: deps = { "exception": str(e), "traceback": str(traceback.format_exc()).split("\n"), } finally: with open(f"audits/{node}_{version}.json", "w") as f: dump(deps, f)
def main(args): gx = load_graph() ctx = MigratorSessionContext("", "", "") start_time = time.time() os.makedirs("audits", exist_ok=True) for k, v in AUDIT_REGISTRY.items(): audit_dir = os.path.join("audits", k) version_path = os.path.join(audit_dir, "_version.json") audit_version = "_".join([v["version"], v["creation_version"]]) if os.path.exists(version_path): version = load(open(version_path)) # if the version of the code generating the audits is different from our current audit data # clear out the audit data so we always use the latest version if version != audit_version: shutil.rmtree(audit_dir) os.makedirs(audit_dir, exist_ok=True) dump(audit_version, open(version_path, "w")) # TODO: generalize for cran skeleton # limit graph to things that depend on python python_des = nx.descendants(gx, "python") for node in sorted( python_des, key=lambda x: (len(nx.descendants(gx, x)), x), reverse=True, ): if time.time() - int(env.get("START_TIME", start_time)) > int( env.get("TIMEOUT", 60 * RUNTIME_MINUTES), ): break # depfinder only work on python at the moment so only work on things # with python as runtime dep payload = gx.nodes[node]["payload"] for k, v in AUDIT_REGISTRY.items(): version = payload.get("version", None) ext = v["ext"] if ( not payload.get("archived", False) and not payload.get("bad", False) and version and "python" in payload["requirements"]["run"] and f"{node}_{version}.{ext}" not in os.listdir(f"audits/{k}") ): fctx = FeedstockContext( package_name=node, feedstock_name=payload["feedstock_name"], attrs=payload, ) try: deps = v["run"](fctx, ctx) except Exception as e: deps = { "exception": str(e), "traceback": str(traceback.format_exc()).split("\n"), } if "dumper" in v: deps = v["dumper"](deps) finally: if deps: with open(f"audits/{k}/{node}_{version}.{ext}", "w") as f: v["writer"](deps, f) # grayskull_audit_outcome = compare_grayskull_audits(gx) # compute_grayskull_accuracy(grayskull_audit_outcome) depfinder_audit_outcome = compare_depfinder_audits(gx) compute_depfinder_accuracy(depfinder_audit_outcome)
def main(args: "CLIArgs") -> None: # logging from .xonsh_utils import env debug = env.get("CONDA_FORGE_TICK_DEBUG", False) if debug: setup_logger(logging.getLogger("conda_forge_tick"), level="debug") else: setup_logger(logging.getLogger("conda_forge_tick")) github_username = env.get("USERNAME", "") github_password = env.get("PASSWORD", "") github_token = env.get("GITHUB_TOKEN") global MIGRATORS mctx, temp, MIGRATORS = initialize_migrators( github_username=github_username, github_password=github_password, dry_run=args.dry_run, github_token=github_token, ) # compute the time per migrator (num_nodes, time_per_migrator, tot_time_per_migrator) = _compute_time_per_migrator( mctx, ) for i, migrator in enumerate(MIGRATORS): if hasattr(migrator, "name"): extra_name = "-%s" % migrator.name else: extra_name = "" logger.info( "Total migrations for %s%s: %d - gets %f seconds (%f percent)", migrator.__class__.__name__, extra_name, num_nodes[i], time_per_migrator[i], time_per_migrator[i] / tot_time_per_migrator * 100, ) for mg_ind, migrator in enumerate(MIGRATORS): mmctx = MigratorContext(session=mctx, migrator=migrator) migrator.bind_to_ctx(mmctx) good_prs = 0 _mg_start = time.time() effective_graph = mmctx.effective_graph time_per = time_per_migrator[mg_ind] if hasattr(migrator, "name"): extra_name = "-%s" % migrator.name else: extra_name = "" logger.info( "Running migrations for %s%s: %d", migrator.__class__.__name__, extra_name, len(effective_graph.nodes), ) possible_nodes = list(migrator.order(effective_graph, mctx.graph)) # version debugging info if isinstance(migrator, Version): logger.info("possible version migrations:") for node_name in possible_nodes: with effective_graph.nodes[node_name]["payload"] as attrs: logger.info( " node|curr|new|attempts: %s|%s|%s|%d", node_name, attrs.get("version"), attrs.get("new_version"), ( attrs.get("new_version_attempts", {}).get( attrs.get("new_version", ""), 0, ) ), ) for node_name in possible_nodes: with mctx.graph.nodes[node_name]["payload"] as attrs: # Don't let CI timeout, break ahead of the timeout so we make certain # to write to the repo # TODO: convert these env vars _now = time.time() if ( ( _now - int(env.get("START_TIME", time.time())) > int(env.get("TIMEOUT", 600)) ) or good_prs >= migrator.pr_limit or (_now - _mg_start) > time_per ): break fctx = FeedstockContext( package_name=node_name, feedstock_name=attrs["feedstock_name"], attrs=attrs, ) print("\n", flush=True, end="") logger.info( "%s%s IS MIGRATING %s", migrator.__class__.__name__.upper(), extra_name, fctx.package_name, ) try: # Don't bother running if we are at zero if ( args.dry_run or mctx.gh.rate_limit()["resources"]["core"]["remaining"] == 0 ): break migrator_uid, pr_json = run( feedstock_ctx=fctx, migrator=migrator, rerender=migrator.rerender, protocol="https", hash_type=attrs.get("hash_type", "sha256"), ) # if migration successful if migrator_uid: d = frozen_to_json_friendly(migrator_uid) # if we have the PR already do nothing if d["data"] in [ existing_pr["data"] for existing_pr in attrs.get("PRed", []) ]: pass else: if not pr_json: pr_json = { "state": "closed", "head": {"ref": "<this_is_not_a_branch>"}, } d["PR"] = pr_json attrs.setdefault("PRed", []).append(d) attrs.update( { "smithy_version": mctx.smithy_version, "pinning_version": mctx.pinning_version, }, ) except github3.GitHubError as e: if e.msg == "Repository was archived so is read-only.": attrs["archived"] = True else: logger.critical( "GITHUB ERROR ON FEEDSTOCK: %s", fctx.feedstock_name, ) if is_github_api_limit_reached(e, mctx.gh): break except URLError as e: logger.exception("URLError ERROR") attrs["bad"] = { "exception": str(e), "traceback": str(traceback.format_exc()).split("\n"), "code": getattr(e, "code"), "url": getattr(e, "url"), } except Exception as e: logger.exception("NON GITHUB ERROR") attrs["bad"] = { "exception": str(e), "traceback": str(traceback.format_exc()).split("\n"), } else: if migrator_uid: # On successful PR add to our counter good_prs += 1 finally: # Write graph partially through if not args.dry_run: dump_graph(mctx.graph) eval_cmd(f"rm -rf {mctx.rever_dir}/*") logger.info(os.getcwd()) for f in glob.glob("/tmp/*"): if f not in temp: eval_cmd(f"rm -rf {f}") if not args.dry_run: logger.info( "API Calls Remaining: %d", mctx.gh.rate_limit()["resources"]["core"]["remaining"], ) logger.info("Done")
def run( feedstock_ctx: FeedstockContext, migrator: Migrator, protocol: str = "ssh", pull_request: bool = True, rerender: bool = True, fork: bool = True, **kwargs: typing.Any, ) -> Tuple["MigrationUidTypedDict", dict]: """For a given feedstock and migration run the migration Parameters ---------- feedstock_ctx: FeedstockContext The node attributes migrator: Migrator instance The migrator to run on the feedstock protocol : str, optional The git protocol to use, defaults to ``ssh`` pull_request : bool, optional If true issue pull request, defaults to true rerender : bool Whether to rerender fork : bool If true create a fork, defaults to true gh : github3.GitHub instance, optional Object for communicating with GitHub, if None build from $USERNAME and $PASSWORD, defaults to None kwargs: dict The key word arguments to pass to the migrator Returns ------- migrate_return: MigrationUidTypedDict The migration return dict used for tracking finished migrations pr_json: dict The PR json object for recreating the PR as needed """ # get the repo # TODO: stop doing this. migrator.attrs = feedstock_ctx.attrs # type: ignore branch_name = migrator.remote_branch(feedstock_ctx) + "_h" + uuid4().hex[0:6] # TODO: run this in parallel feedstock_dir, repo = get_repo( ctx=migrator.ctx.session, fctx=feedstock_ctx, branch=branch_name, feedstock=feedstock_ctx.feedstock_name, protocol=protocol, pull_request=pull_request, fork=fork, ) recipe_dir = os.path.join(feedstock_dir, "recipe") # migrate the feedstock migrator.run_pre_piggyback_migrations(recipe_dir, feedstock_ctx.attrs, **kwargs) # TODO - make a commit here if the repo changed migrate_return = migrator.migrate(recipe_dir, feedstock_ctx.attrs, **kwargs) if not migrate_return: logger.critical( "Failed to migrate %s, %s", feedstock_ctx.package_name, feedstock_ctx.attrs.get("bad"), ) eval_cmd(f"rm -rf {feedstock_dir}") return False, False # TODO - commit main migration here migrator.run_post_piggyback_migrations(recipe_dir, feedstock_ctx.attrs, **kwargs) # TODO commit post migration here # rerender, maybe diffed_files: typing.List[str] = [] with indir(feedstock_dir), env.swap(RAISE_SUBPROC_ERROR=False): msg = migrator.commit_message(feedstock_ctx) # noqa eval_cmd("git add --all .") eval_cmd(f"git commit -am '{msg}'") if rerender: head_ref = eval_cmd("git rev-parse HEAD") # noqa logger.info("Rerendering the feedstock") # In the event we can't rerender, try to update the pinnings, # then bail if it does not work again try: eval_cmd( "conda smithy rerender -c auto --no-check-uptodate", timeout=300, ) except SubprocessError: return False, False # If we tried to run the MigrationYaml and rerender did nothing (we only # bumped the build number and dropped a yaml file in migrations) bail # for instance platform specific migrations gdiff = eval_cmd(f"git diff --name-only {head_ref}...HEAD") diffed_files = [ _ for _ in gdiff.split() if not ( _.startswith("recipe") or _.startswith("migrators") or _.startswith("README") ) ] if ( migrator.check_solvable or feedstock_ctx.attrs["conda-forge.yml"] .get("bot", {}) .get("check_solvable", False) ) and not is_recipe_solvable(feedstock_dir): eval_cmd(f"rm -rf {feedstock_dir}") return False, False # TODO: Better annotation here pr_json: typing.Union[MutableMapping, None, bool] if ( isinstance(migrator, MigrationYaml) and not diffed_files and feedstock_ctx.attrs["name"] != "conda-forge-pinning" ): # spoof this so it looks like the package is done pr_json = { "state": "closed", "merged_at": "never issued", "id": str(uuid4()), } else: # push up try: pr_json = push_repo( session_ctx=migrator.ctx.session, fctx=feedstock_ctx, feedstock_dir=feedstock_dir, body=migrator.pr_body(feedstock_ctx), repo=repo, title=migrator.pr_title(feedstock_ctx), head=f"{migrator.ctx.github_username}:{branch_name}", branch=branch_name, ) # This shouldn't happen too often any more since we won't double PR except github3.GitHubError as e: if e.msg != "Validation Failed": raise else: print(f"Error during push {e}") # If we just push to the existing PR then do nothing to the json pr_json = False ljpr = False if pr_json: ljpr = LazyJson( os.path.join(migrator.ctx.session.prjson_dir, str(pr_json["id"]) + ".json"), ) ljpr.update(**pr_json) # from .dynamo_models import PRJson # PRJson.dump(pr_json) # If we've gotten this far then the node is good feedstock_ctx.attrs["bad"] = False logger.info("Removing feedstock dir") eval_cmd(f"rm -rf {feedstock_dir}") return migrate_return, ljpr
def test_grayskull_audit_feedstock(): mm_ctx = MigratorSessionContext( graph=G, smithy_version="", pinning_version="", github_username="", github_password="", circle_build_url="", ) with open( os.path.join(os.path.dirname(__file__), "test_yaml", "depfinder.json"), "r", ) as f: attrs = load(f) fctx = FeedstockContext("depfinder", "depfinder", attrs) recipe = grayskull_audit_feedstock(fctx, mm_ctx) assert (recipe == """{% set name = "depfinder" %} {% set version = 2.3.0 %} package: name: {{ name|lower }} version: {{ version }} source: url: https://pypi.io/packages/source/{{ name[0] }}/{{ name }}/{{ name }}-{{ version }}.tar.gz sha256: 2694acbc8f7d94ca9bae55b8dc5b4860d5bc253c6a377b3b8ce63fb5bffa4000 build: number: 0 noarch: python entry_points: - depfinder = depfinder.cli:cli script: {{ PYTHON }} -m pip install . -vv requirements: host: - pip - python run: - python - pyyaml - stdlib-list test: imports: - depfinder commands: - pip check - depfinder --help requires: - pip about: home: http://github.com/ericdill/depfinder summary: Find all the imports in your library doc_url: https://pythonhosted.org/depfinder/ license: BSD-3-Clause license_file: LICENSE extra: recipe-maintainers: - ericdill - mariusvniekerk - tonyfast - ocefpaf """)
def graph_migrator_status( migrator: Migrator, gx: nx.DiGraph, ) -> Tuple[dict, list, nx.DiGraph]: """Gets the migrator progress for a given migrator""" if hasattr(migrator, "name"): assert isinstance(migrator.name, str) migrator_name = migrator.name.lower().replace(" ", "") else: migrator_name = migrator.__class__.__name__.lower() num_viz = 0 out: Dict[str, Set[str]] = { "done": set(), "in-pr": set(), "awaiting-pr": set(), "not-solvable": set(), "awaiting-parents": set(), "bot-error": set(), } gx2 = copy.deepcopy(getattr(migrator, "graph", gx)) top_level = {node for node in gx2 if not list(gx2.predecessors(node))} build_sequence = list(cyclic_topological_sort(gx2, top_level)) feedstock_metadata = dict() import graphviz from streamz.graph import _clean_text gv = graphviz.Digraph(graph_attr={"packmode": "array_3"}) # pinning isn't actually in the migration if "conda-forge-pinning" in gx2.nodes(): gx2.remove_node("conda-forge-pinning") for node, node_attrs in gx2.nodes.items(): attrs = node_attrs["payload"] # remove archived from status if attrs.get("archived", False): continue node_metadata: Dict = {} feedstock_metadata[node] = node_metadata nuid = migrator.migrator_uid(attrs) all_pr_jsons = [] for pr_json in attrs.get("PRed", []): all_pr_jsons.append(copy.deepcopy(pr_json)) feedstock_ctx = FeedstockContext( package_name=node, feedstock_name=attrs.get("feedstock_name", node), attrs=attrs, ) # hack around bug in migrator vs graph data for this one if isinstance(migrator, MatplotlibBase): if "name" in nuid: del nuid["name"] for i in range(len(all_pr_jsons)): if (all_pr_jsons[i] and "name" in all_pr_jsons[i]["data"] and all_pr_jsons[i]["data"]["migrator_name"] == "MatplotlibBase"): del all_pr_jsons[i]["data"]["name"] for pr_json in all_pr_jsons: if pr_json and pr_json["data"] == frozen_to_json_friendly( nuid)["data"]: break else: pr_json = None # No PR was ever issued but the migration was performed. # This is only the case when the migration was done manually # before the bot could issue any PR. manually_done = pr_json is None and frozen_to_json_friendly( nuid)["data"] in (z["data"] for z in all_pr_jsons) buildable = not migrator.filter(attrs) fntc = "black" status_icon = "" if manually_done: out["done"].add(node) fc = "#440154" fntc = "white" elif pr_json is None: if buildable: if "not solvable" in (attrs.get("pre_pr_migrator_status", {}).get(migrator_name, "")): out["not-solvable"].add(node) fc = "#ff8c00" elif "bot error" in (attrs.get("pre_pr_migrator_status", {}).get(migrator_name, "")): out["bot-error"].add(node) fc = "#000000" fntc = "white" else: out["awaiting-pr"].add(node) fc = "#35b779" elif not isinstance(migrator, Replacement): if "bot error" in (attrs.get("pre_pr_migrator_status", {}).get(migrator_name, "")): out["bot-error"].add(node) fc = "#000000" fntc = "white" else: out["awaiting-parents"].add(node) fc = "#fde725" elif "PR" not in pr_json: out["bot-error"].add(node) fc = "#000000" fntc = "white" elif pr_json["PR"]["state"] == "closed": out["done"].add(node) fc = "#440154" fntc = "white" else: out["in-pr"].add(node) fc = "#31688e" fntc = "white" pr_status = pr_json["PR"]["mergeable_state"] if pr_status == "clean": status_icon = " ✓" else: status_icon = " ❎" if node not in out["done"]: num_viz += 1 gv.node( node, label=_clean_text(node) + status_icon, fillcolor=fc, style="filled", fontcolor=fntc, URL=(pr_json or {}).get("PR", {}).get( "html_url", feedstock_url(fctx=feedstock_ctx, protocol="https").strip(".git"), ), ) # additional metadata for reporting node_metadata["num_descendants"] = len(nx.descendants(gx2, node)) node_metadata["immediate_children"] = [ k for k in sorted(gx2.successors(node)) if not gx2[k].get("payload", {}).get("archived", False) ] if node in out["not-solvable"] or node in out["bot-error"]: node_metadata["pre_pr_migrator_status"] = attrs.get( "pre_pr_migrator_status", {}, ).get(migrator_name, "") else: node_metadata["pre_pr_migrator_status"] = "" if pr_json and "PR" in pr_json: # I needed to fake some PRs they don't have html_urls though node_metadata["pr_url"] = pr_json["PR"].get( "html_url", feedstock_url(fctx=feedstock_ctx, protocol="https").strip(".git"), ) node_metadata["pr_status"] = pr_json["PR"].get("mergeable_state") out2: Dict = {} for k in out.keys(): out2[k] = list( sorted( out[k], key=lambda x: build_sequence.index(x) if x in build_sequence else -1, ), ) out2["_feedstock_status"] = feedstock_metadata for (e0, e1), edge_attrs in gx2.edges.items(): if (e0 not in out["done"] and e1 not in out["done"] and not gx2.nodes[e0]["payload"].get("archived", False) and not gx2.nodes[e1]["payload"].get("archived", False)): gv.edge(e0, e1) print(" len(gv):", num_viz, flush=True) out2["_num_viz"] = num_viz return out2, build_sequence, gv
def main(args: "CLIArgs") -> None: github_username = env.get("USERNAME", "") github_password = env.get("PASSWORD", "") github_token = env.get("GITHUB_TOKEN") global MIGRATORS mctx, temp, MIGRATORS = initialize_migrators( github_username=github_username, github_password=github_password, dry_run=args.dry_run, github_token=github_token, ) for migrator in MIGRATORS: mmctx = MigratorContext(session=mctx, migrator=migrator) migrator.bind_to_ctx(mmctx) good_prs = 0 effective_graph = mmctx.effective_graph logger.info( "Total migrations for %s: %d", migrator.__class__.__name__, len(effective_graph.nodes), ) for node_name in migrator.order(effective_graph, mctx.graph): with mctx.graph.nodes[node_name]["payload"] as attrs: # Don't let CI timeout, break ahead of the timeout so we make certain # to write to the repo # TODO: convert these env vars if ( time.time() - int(env.get("START_TIME", time.time())) > int(env.get("TIMEOUT", 600)) or good_prs >= migrator.pr_limit ): break fctx = FeedstockContext( package_name=node_name, feedstock_name=attrs["feedstock_name"], attrs=attrs, ) logger.info( "%s IS MIGRATING %s", migrator.__class__.__name__.upper(), fctx.package_name, ) try: # Don't bother running if we are at zero if ( args.dry_run or mctx.gh.rate_limit()["resources"]["core"]["remaining"] == 0 ): break # FIXME: this causes the bot to not-rerender things when it # should. For instance, if the bot rerenders but the PR is # left open then we don't rerender again even though we should. # This need logic to check if the rerender has been merged. rerender = ( attrs.get("smithy_version") != mctx.smithy_version or attrs.get("pinning_version") != mctx.pinning_version or migrator.rerender ) migrator_uid, pr_json = run( feedstock_ctx=fctx, migrator=migrator, rerender=rerender, protocol="https", hash_type=attrs.get("hash_type", "sha256"), ) # if migration successful if migrator_uid: d = frozen_to_json_friendly(migrator_uid) # if we have the PR already do nothing if d["data"] in [ existing_pr["data"] for existing_pr in attrs.get("PRed", []) ]: pass else: if not pr_json: pr_json = { "state": "closed", "head": {"ref": "<this_is_not_a_branch>"}, } d["PR"] = pr_json attrs.setdefault("PRed", []).append(d) attrs.update( { "smithy_version": mctx.smithy_version, "pinning_version": mctx.pinning_version, }, ) except github3.GitHubError as e: if e.msg == "Repository was archived so is read-only.": attrs["archived"] = True else: logger.critical( "GITHUB ERROR ON FEEDSTOCK: %s", fctx.feedstock_name, ) if is_github_api_limit_reached(e, mctx.gh): break except URLError as e: logger.exception("URLError ERROR") attrs["bad"] = { "exception": str(e), "traceback": str(traceback.format_exc()).split("\n"), "code": getattr(e, "code"), "url": getattr(e, "url"), } except Exception as e: logger.exception("NON GITHUB ERROR") attrs["bad"] = { "exception": str(e), "traceback": str(traceback.format_exc()).split("\n"), } else: if migrator_uid: # On successful PR add to our counter good_prs += 1 finally: # Write graph partially through dump_graph(mctx.graph) eval_xonsh(f"rm -rf {mctx.rever_dir}/*") logger.info(os.getcwd()) for f in glob.glob("/tmp/*"): if f not in temp: eval_xonsh(f"rm -rf {f}") if not args.dry_run: logger.info( "API Calls Remaining: %d", mctx.gh.rate_limit()["resources"]["core"]["remaining"], ) logger.info("Done")
def main(args: "CLIArgs") -> None: # logging if args.debug: setup_logger(logging.getLogger("conda_forge_tick"), level="debug") else: setup_logger(logging.getLogger("conda_forge_tick")) github_username = env.get("USERNAME", "") github_password = env.get("PASSWORD", "") github_token = env.get("GITHUB_TOKEN") mctx, temp, migrators = initialize_migrators( github_username=github_username, github_password=github_password, dry_run=args.dry_run, github_token=github_token, ) # compute the time per migrator print("computing time per migration", flush=True) (num_nodes, time_per_migrator, tot_time_per_migrator) = _compute_time_per_migrator( mctx, migrators, ) for i, migrator in enumerate(migrators): if hasattr(migrator, "name"): extra_name = "-%s" % migrator.name else: extra_name = "" print( " %s%s: %d - gets %f seconds (%f percent)" % ( migrator.__class__.__name__, extra_name, num_nodes[i], time_per_migrator[i], time_per_migrator[i] / max(tot_time_per_migrator, 1) * 100, ), flush=True, ) for mg_ind, migrator in enumerate(migrators): if hasattr(migrator, "name"): assert isinstance(migrator.name, str) migrator_name = migrator.name.lower().replace(" ", "") else: migrator_name = migrator.__class__.__name__.lower() mmctx = MigratorContext(session=mctx, migrator=migrator) migrator.bind_to_ctx(mmctx) good_prs = 0 _mg_start = time.time() effective_graph = mmctx.effective_graph time_per = time_per_migrator[mg_ind] if hasattr(migrator, "name"): extra_name = "-%s" % migrator.name else: extra_name = "" print( "\n========================================" "========================================" "\n" "========================================" "========================================", flush=True, ) print( "Running migrations for %s%s: %d\n" % ( migrator.__class__.__name__, extra_name, len(effective_graph.nodes), ), flush=True, ) possible_nodes = list(migrator.order(effective_graph, mctx.graph)) # version debugging info if isinstance(migrator, Version): LOGGER.info("possible version migrations:") for node_name in possible_nodes: with effective_graph.nodes[node_name]["payload"] as attrs: LOGGER.info( " node|curr|new|attempts: %s|%s|%s|%d", node_name, attrs.get("version"), attrs.get("new_version"), (attrs.get("new_version_attempts", {}).get( attrs.get("new_version", ""), 0, )), ) for node_name in possible_nodes: with mctx.graph.nodes[node_name]["payload"] as attrs: base_branches = migrator.get_possible_feedstock_branches(attrs) orig_branch = attrs.get("branch", "master") # Don't let CI timeout, break ahead of the timeout so we make certain # to write to the repo # TODO: convert these env vars _now = time.time() if ((_now - int(env.get("START_TIME", time.time())) > int( env.get("TIMEOUT", 600))) or good_prs >= migrator.pr_limit or (_now - _mg_start) > time_per): break fctx = FeedstockContext( package_name=node_name, feedstock_name=attrs["feedstock_name"], attrs=attrs, ) try: for base_branch in base_branches: attrs["branch"] = base_branch if migrator.filter(attrs): continue print("\n", flush=True, end="") LOGGER.info( "%s%s IS MIGRATING %s:%s", migrator.__class__.__name__.upper(), extra_name, fctx.package_name, base_branch, ) try: # Don't bother running if we are at zero if mctx.gh_api_requests_left == 0: break migrator_uid, pr_json = run( feedstock_ctx=fctx, migrator=migrator, rerender=migrator.rerender, protocol="https", hash_type=attrs.get("hash_type", "sha256"), base_branch=base_branch, ) # if migration successful if migrator_uid: d = frozen_to_json_friendly(migrator_uid) # if we have the PR already do nothing if d["data"] in [ existing_pr["data"] for existing_pr in attrs.get( "PRed", []) ]: pass else: if not pr_json: pr_json = { "state": "closed", "head": { "ref": "<this_is_not_a_branch>" }, } d["PR"] = pr_json attrs.setdefault("PRed", []).append(d) attrs.update( { "smithy_version": mctx.smithy_version, "pinning_version": mctx.pinning_version, }, ) except github3.GitHubError as e: if e.msg == "Repository was archived so is read-only.": attrs["archived"] = True else: LOGGER.critical( "GITHUB ERROR ON FEEDSTOCK: %s", fctx.feedstock_name, ) if is_github_api_limit_reached(e, mctx.gh): break except URLError as e: LOGGER.exception("URLError ERROR") attrs["bad"] = { "exception": str(e), "traceback": str(traceback.format_exc()).split("\n"), "code": getattr(e, "code"), "url": getattr(e, "url"), } pre_key = "pre_pr_migrator_status" if pre_key not in attrs: attrs[pre_key] = {} attrs[pre_key][migrator_name] = sanitize_string( "bot error (%s): %s: %s" % ( '<a href="' + os.getenv("CIRCLE_BUILD_URL", "") + '">bot CI job</a>', base_branch, str(traceback.format_exc()), ), ) except Exception as e: LOGGER.exception("NON GITHUB ERROR") # we don't set bad for rerendering errors if ("conda smithy rerender -c auto --no-check-uptodate" not in str(e)): attrs["bad"] = { "exception": str(e), "traceback": str(traceback.format_exc()).split("\n", ), } pre_key = "pre_pr_migrator_status" if pre_key not in attrs: attrs[pre_key] = {} attrs[pre_key][migrator_name] = sanitize_string( "bot error (%s): %s: %s" % ( '<a href="' + os.getenv("CIRCLE_BUILD_URL", "") + '">bot CI job</a>', base_branch, str(traceback.format_exc()), ), ) else: if migrator_uid: # On successful PR add to our counter good_prs += 1 finally: # reset branch attrs["branch"] = orig_branch # Write graph partially through if not args.dry_run: dump_graph(mctx.graph) eval_cmd(f"rm -rf {mctx.rever_dir}/*") LOGGER.info(os.getcwd()) for f in glob.glob("/tmp/*"): if f not in temp: try: eval_cmd(f"rm -rf {f}") except Exception: pass if mctx.gh_api_requests_left == 0: break print("\n", flush=True) LOGGER.info("API Calls Remaining: %d", mctx.gh_api_requests_left) LOGGER.info("Done")
def run( feedstock_ctx: FeedstockContext, migrator: Migrator, protocol: str = "ssh", pull_request: bool = True, rerender: bool = True, fork: bool = True, base_branch: str = "master", **kwargs: typing.Any, ) -> Tuple["MigrationUidTypedDict", dict]: """For a given feedstock and migration run the migration Parameters ---------- feedstock_ctx: FeedstockContext The node attributes migrator: Migrator instance The migrator to run on the feedstock protocol : str, optional The git protocol to use, defaults to ``ssh`` pull_request : bool, optional If true issue pull request, defaults to true rerender : bool Whether to rerender fork : bool If true create a fork, defaults to true base_branch : str, optional The base branch to which the PR will be targeted. Defaults to "master". kwargs: dict The key word arguments to pass to the migrator Returns ------- migrate_return: MigrationUidTypedDict The migration return dict used for tracking finished migrations pr_json: dict The PR json object for recreating the PR as needed """ # get the repo # TODO: stop doing this. migrator.attrs = feedstock_ctx.attrs # type: ignore branch_name = migrator.remote_branch( feedstock_ctx) + "_h" + uuid4().hex[0:6] if hasattr(migrator, "name"): assert isinstance(migrator.name, str) migrator_name = migrator.name.lower().replace(" ", "") else: migrator_name = migrator.__class__.__name__.lower() # TODO: run this in parallel feedstock_dir, repo = get_repo( ctx=migrator.ctx.session, fctx=feedstock_ctx, branch=branch_name, feedstock=feedstock_ctx.feedstock_name, protocol=protocol, pull_request=pull_request, fork=fork, base_branch=base_branch, ) if not feedstock_dir or not repo: LOGGER.critical( "Failed to migrate %s, %s", feedstock_ctx.package_name, feedstock_ctx.attrs.get("bad"), ) return False, False recipe_dir = os.path.join(feedstock_dir, "recipe") # migrate the feedstock migrator.run_pre_piggyback_migrations(recipe_dir, feedstock_ctx.attrs, **kwargs) # TODO - make a commit here if the repo changed migrate_return = migrator.migrate(recipe_dir, feedstock_ctx.attrs, **kwargs) if not migrate_return: LOGGER.critical( "Failed to migrate %s, %s", feedstock_ctx.package_name, feedstock_ctx.attrs.get("bad"), ) eval_cmd(f"rm -rf {feedstock_dir}") return False, False # TODO - commit main migration here migrator.run_post_piggyback_migrations(recipe_dir, feedstock_ctx.attrs, **kwargs) # TODO commit post migration here # rerender, maybe diffed_files: typing.List[str] = [] with indir(feedstock_dir), env.swap(RAISE_SUBPROC_ERROR=False): msg = migrator.commit_message(feedstock_ctx) # noqa try: eval_cmd("git add --all .") eval_cmd(f"git commit -am '{msg}'") except CalledProcessError as e: LOGGER.info( "could not commit to feedstock - " "likely no changes - error is '%s'" % (repr(e)), ) if rerender: head_ref = eval_cmd("git rev-parse HEAD").strip() LOGGER.info("Rerendering the feedstock") try: eval_cmd( "conda smithy rerender -c auto --no-check-uptodate", timeout=300, ) make_rerender_comment = False except Exception as e: # I am trying this bit of code to force these errors # to be surfaced in the logs at the right time. print(f"RERENDER ERROR: {e}", flush=True) if not isinstance(migrator, Version): raise else: # for check solvable or automerge, we always raise rerender errors if feedstock_ctx.attrs["conda-forge.yml"].get( "bot", {}).get( "check_solvable", False, ) or (feedstock_ctx.attrs["conda-forge.yml"].get( "bot", {}).get( "automerge", False, )): raise else: make_rerender_comment = True # If we tried to run the MigrationYaml and rerender did nothing (we only # bumped the build number and dropped a yaml file in migrations) bail # for instance platform specific migrations gdiff = eval_cmd(f"git diff --name-only {head_ref.strip()}...HEAD") diffed_files = [ _ for _ in gdiff.split() if not (_.startswith("recipe") or _.startswith("migrators") or _.startswith("README")) ] else: make_rerender_comment = False if (feedstock_ctx.feedstock_name != "conda-forge-pinning" and base_branch == "master" and (( migrator.check_solvable # we always let stuff in cycles go and feedstock_ctx.attrs["name"] not in getattr( migrator, "cycles", set()) # we always let stuff at the top go and feedstock_ctx.attrs["name"] not in getattr( migrator, "top_level", set()) # for solveability always assume automerge is on. and (feedstock_ctx.attrs["conda-forge.yml"].get("bot", {}).get( "automerge", True))) or feedstock_ctx.attrs["conda-forge.yml"].get("bot", {}).get( "check_solvable", False, ))): solvable, errors, _ = is_recipe_solvable( feedstock_dir, build_platform=feedstock_ctx.attrs["conda-forge.yml"].get( "build_platform", None, ), ) if not solvable: _solver_err_str = "not solvable ({}): {}: {}".format( ('<a href="' + os.getenv("CIRCLE_BUILD_URL", "") + '">bot CI job</a>'), base_branch, sorted(set(errors)), ) if isinstance(migrator, Version): _new_ver = feedstock_ctx.attrs["new_version"] if _new_ver in feedstock_ctx.attrs["new_version_errors"]: feedstock_ctx.attrs["new_version_errors"][ _new_ver] += "\n\nsolver error - {}".format( _solver_err_str, ) else: feedstock_ctx.attrs["new_version_errors"][ _new_ver] = _solver_err_str feedstock_ctx.attrs["new_version_errors"][ _new_ver] = sanitize_string( feedstock_ctx.attrs["new_version_errors"][_new_ver], ) # remove part of a try for solver errors to make those slightly # higher priority feedstock_ctx.attrs["new_version_attempts"][_new_ver] -= 0.8 pre_key = "pre_pr_migrator_status" if pre_key not in feedstock_ctx.attrs: feedstock_ctx.attrs[pre_key] = {} feedstock_ctx.attrs[pre_key][migrator_name] = sanitize_string( _solver_err_str, ) eval_cmd(f"rm -rf {feedstock_dir}") return False, False # TODO: Better annotation here pr_json: typing.Union[MutableMapping, None, bool] if (isinstance(migrator, MigrationYaml) and not diffed_files and feedstock_ctx.attrs["name"] != "conda-forge-pinning"): # spoof this so it looks like the package is done pr_json = { "state": "closed", "merged_at": "never issued", "id": str(uuid4()), } else: # push up try: # TODO: remove this hack, but for now this is the only way to get # the feedstock dir into pr_body feedstock_ctx.feedstock_dir = feedstock_dir pr_json = push_repo( session_ctx=migrator.ctx.session, fctx=feedstock_ctx, feedstock_dir=feedstock_dir, body=migrator.pr_body(feedstock_ctx), repo=repo, title=migrator.pr_title(feedstock_ctx), head=f"{migrator.ctx.github_username}:{branch_name}", branch=branch_name, base_branch=base_branch, ) # This shouldn't happen too often any more since we won't double PR except github3.GitHubError as e: if e.msg != "Validation Failed": raise else: print(f"Error during push {e}") # If we just push to the existing PR then do nothing to the json pr_json = False ljpr = False if pr_json and pr_json["state"] != "closed" and make_rerender_comment: comment_on_pr( pr_json, """\ Hi! This feedstock was not able to be rerendered after the version update changes. I have pushed the version update changes anyways and am trying to rerender again with this comment. Hopefully you all can fix this! @conda-forge-admin rerender""", repo, ) if pr_json: ljpr = LazyJson( os.path.join(migrator.ctx.session.prjson_dir, str(pr_json["id"]) + ".json"), ) ljpr.update(**pr_json) # from .dynamo_models import PRJson # PRJson.dump(pr_json) else: ljpr = False # If we've gotten this far then the node is good feedstock_ctx.attrs["bad"] = False LOGGER.info("Removing feedstock dir") eval_cmd(f"rm -rf {feedstock_dir}") return migrate_return, ljpr
def auto_tick(dry_run=False, debug=False, fork=False, organization='nsls-ii-forge'): ''' Automatically update package versions and submit pull requests to associated feedstocks Parameters ---------- dry_run: bool, optional Generate version migration yamls but do not run them debug: bool, optional Setup logging to be in debug mode fork: bool, optional Create a fork of the repo from the organization to $GITHUB_USERNAME organization: str, optional GitHub organization that manages feedstock repositories ''' from conda_forge_tick.xonsh_utils import env if debug: setup_logger(logger, level="debug") else: setup_logger(logger) # set Version.pr_body to custom pr_body function Version.pr_body = bot_pr_body # TODO: use ~/.netrc instead github_username = env.get("GITHUB_USERNAME", "") github_password = env.get("GITHUB_TOKEN", "") github_token = env.get("GITHUB_TOKEN") global MIGRATORS print('Initializing migrators...') mctx, MIGRATORS = initialize_migrators( github_username=github_username, github_password=github_password, dry_run=dry_run, github_token=github_token, ) # compute the time per migrator print('Computing time per migrator') (num_nodes, time_per_migrator, tot_time_per_migrator) = _compute_time_per_migrator(mctx, ) for i, migrator in enumerate(MIGRATORS): if hasattr(migrator, "name"): extra_name = "-%s" % migrator.name else: extra_name = "" logger.info( "Total migrations for %s%s: %d - gets %f seconds (%f percent)", migrator.__class__.__name__, extra_name, num_nodes[i], time_per_migrator[i], time_per_migrator[i] / tot_time_per_migrator * 100, ) print('Performing migrations...') for mg_ind, migrator in enumerate(MIGRATORS): mmctx = MigratorContext(session=mctx, migrator=migrator) migrator.bind_to_ctx(mmctx) good_prs = 0 _mg_start = time.time() effective_graph = mmctx.effective_graph time_per = time_per_migrator[mg_ind] if hasattr(migrator, "name"): extra_name = "-%s" % migrator.name else: extra_name = "" logger.info( "Running migrations for %s%s: %d", migrator.__class__.__name__, extra_name, len(effective_graph.nodes), ) possible_nodes = list(migrator.order(effective_graph, mctx.graph)) # version debugging info if isinstance(migrator, Version): logger.info("possible version migrations:") for node_name in possible_nodes: with effective_graph.nodes[node_name]["payload"] as attrs: logger.info( " node|curr|new|attempts: %s|%s|%s|%d", node_name, attrs.get("version"), attrs.get("new_version"), (attrs.get("new_version_attempts", {}).get( attrs.get("new_version", ""), 0, )), ) for node_name in possible_nodes: with mctx.graph.nodes[node_name]["payload"] as attrs: # Don't let CI timeout, break ahead of the timeout so we make certain # to write to the repo # TODO: convert these env vars _now = time.time() if ((_now - int(env.get("START_TIME", time.time())) > int( env.get("TIMEOUT", 600))) or good_prs >= migrator.pr_limit or (_now - _mg_start) > time_per): break fctx = FeedstockContext( package_name=node_name, feedstock_name=attrs["feedstock_name"], attrs=attrs, ) print("\n", flush=True, end="") logger.info( "%s%s IS MIGRATING %s", migrator.__class__.__name__.upper(), extra_name, fctx.package_name, ) try: # Don't bother running if we are at zero if (dry_run or mctx.gh.rate_limit()["resources"]["core"] ["remaining"] == 0): break migrator_uid, pr_json = run(feedstock_ctx=fctx, migrator=migrator, rerender=migrator.rerender, protocol="https", hash_type=attrs.get( "hash_type", "sha256"), fork=fork, organization=organization) # if migration successful if migrator_uid: d = frozen_to_json_friendly(migrator_uid) # if we have the PR already do nothing if d["data"] in [ existing_pr["data"] for existing_pr in attrs.get("PRed", []) ]: pass else: if pr_json is None: pr_json = { "state": "closed", "head": { "ref": "<this_is_not_a_branch>" }, } d["PR"] = pr_json attrs.setdefault("PRed", []).append(d) attrs.update( { "smithy_version": mctx.smithy_version, "pinning_version": mctx.pinning_version, }, ) except github3.GitHubError as e: if e.msg == "Repository was archived so is read-only.": attrs["archived"] = True else: logger.critical( "GITHUB ERROR ON FEEDSTOCK: %s", fctx.feedstock_name, ) if is_github_api_limit_reached(e, mctx.gh): break except URLError as e: logger.exception("URLError ERROR") attrs["bad"] = { "exception": str(e), "traceback": str(traceback.format_exc()).split("\n"), "code": getattr(e, "code"), "url": getattr(e, "url"), } except Exception as e: logger.exception("NON GITHUB ERROR") attrs["bad"] = { "exception": str(e), "traceback": str(traceback.format_exc()).split("\n"), } else: if migrator_uid: # On successful PR add to our counter good_prs += 1 finally: # Write graph partially through if not dry_run: dump_graph(mctx.graph) eval_cmd(f"rm -rf {mctx.rever_dir}/*") logger.info(os.getcwd()) if not dry_run: logger.info( "API Calls Remaining: %d", mctx.gh.rate_limit()["resources"]["core"]["remaining"], ) logger.info("Done")
def _run_migrator(migrator, mctx, temp, time_per, dry_run): if hasattr(migrator, "name"): assert isinstance(migrator.name, str) migrator_name = migrator.name.lower().replace(" ", "") else: migrator_name = migrator.__class__.__name__.lower() mmctx = MigratorContext(session=mctx, migrator=migrator) migrator.bind_to_ctx(mmctx) good_prs = 0 _mg_start = time.time() effective_graph = mmctx.effective_graph if hasattr(migrator, "name"): extra_name = "-%s" % migrator.name else: extra_name = "" print( "Running migrations for %s%s: %d\n" % ( migrator.__class__.__name__, extra_name, len(effective_graph.nodes), ), flush=True, ) possible_nodes = list(migrator.order(effective_graph, mctx.graph)) # version debugging info if isinstance(migrator, Version): LOGGER.info("possible version migrations:") for node_name in possible_nodes: with effective_graph.nodes[node_name]["payload"] as attrs: LOGGER.info( " node|curr|new|attempts: %s|%s|%s|%f", node_name, attrs.get("version"), attrs.get("new_version"), ( attrs.get("new_version_attempts", {}).get( attrs.get("new_version", ""), 0, ) ), ) for node_name in possible_nodes: with mctx.graph.nodes[node_name]["payload"] as attrs: # Don't let CI timeout, break ahead of the timeout so we make certain # to write to the repo # TODO: convert these env vars _now = time.time() if ( ( _now - int(env.get("START_TIME", time.time())) > int(env.get("TIMEOUT", 600)) ) or good_prs >= migrator.pr_limit or (_now - _mg_start) > time_per ): break base_branches = migrator.get_possible_feedstock_branches(attrs) if "branch" in attrs: has_attrs_branch = True orig_branch = attrs.get("branch") else: has_attrs_branch = False orig_branch = None fctx = FeedstockContext( package_name=node_name, feedstock_name=attrs["feedstock_name"], attrs=attrs, ) # map main to current default branch base_branches = [ br if br != "main" else fctx.default_branch for br in base_branches ] try: for base_branch in base_branches: attrs["branch"] = base_branch if migrator.filter(attrs): continue print("\n", flush=True, end="") sys.stderr.flush() sys.stdout.flush() LOGGER.info( "%s%s IS MIGRATING %s:%s", migrator.__class__.__name__.upper(), extra_name, fctx.package_name, base_branch, ) try: # Don't bother running if we are at zero if mctx.gh_api_requests_left == 0: break migrator_uid, pr_json = run( feedstock_ctx=fctx, migrator=migrator, rerender=migrator.rerender, protocol="https", hash_type=attrs.get("hash_type", "sha256"), base_branch=base_branch, ) # if migration successful if migrator_uid: d = frozen_to_json_friendly(migrator_uid) # if we have the PR already do nothing if d["data"] in [ existing_pr["data"] for existing_pr in attrs.get("PRed", []) ]: pass else: if not pr_json: pr_json = { "state": "closed", "head": {"ref": "<this_is_not_a_branch>"}, } d["PR"] = pr_json attrs.setdefault("PRed", []).append(d) attrs.update( { "smithy_version": mctx.smithy_version, "pinning_version": mctx.pinning_version, }, ) except github3.GitHubError as e: if e.msg == "Repository was archived so is read-only.": attrs["archived"] = True else: LOGGER.critical( "GITHUB ERROR ON FEEDSTOCK: %s", fctx.feedstock_name, ) if is_github_api_limit_reached(e, mctx.gh): break except URLError as e: LOGGER.exception("URLError ERROR") attrs["bad"] = { "exception": str(e), "traceback": str(traceback.format_exc()).split("\n"), "code": getattr(e, "code"), "url": getattr(e, "url"), } _set_pre_pr_migrator_fields( attrs, migrator_name, sanitize_string( "bot error (%s): %s: %s" % ( '<a href="' + os.getenv("CIRCLE_BUILD_URL", "") + '">bot CI job</a>', base_branch, str(traceback.format_exc()), ), ), ) except Exception as e: LOGGER.exception("NON GITHUB ERROR") # we don't set bad for rerendering errors if ( "conda smithy rerender -c auto --no-check-uptodate" not in str(e) ): attrs["bad"] = { "exception": str(e), "traceback": str(traceback.format_exc()).split( "\n", ), } _set_pre_pr_migrator_fields( attrs, migrator_name, sanitize_string( "bot error (%s): %s: %s" % ( '<a href="' + os.getenv("CIRCLE_BUILD_URL", "") + '">bot CI job</a>', base_branch, str(traceback.format_exc()), ), ), ) else: if migrator_uid: # On successful PR add to our counter good_prs += 1 finally: # reset branch if has_attrs_branch: attrs["branch"] = orig_branch # do this but it is crazy gc.collect() # Write graph partially through if not dry_run: dump_graph(mctx.graph) eval_cmd(f"rm -rf {mctx.rever_dir}/*") LOGGER.info(os.getcwd()) for f in glob.glob("/tmp/*"): if f not in temp: try: eval_cmd(f"rm -rf {f}") except Exception: pass if mctx.gh_api_requests_left == 0: break return good_prs