def add_rebuild_migration_yaml( migrators: MutableSequence[Migrator], gx: nx.DiGraph, package_names: Sequence[str], output_to_feedstock: Mapping[str, str], excluded_feedstocks: MutableSet[str], migration_yaml: str, config: dict = {}, migration_name: str = "", pr_limit: int = PR_LIMIT, ) -> None: """Adds rebuild migrator. Parameters ---------- migrators : list of Migrator The list of migrators to run. gx : networkx.DiGraph The feedstock graph package_names : list of str The package who's pin was moved output_to_feedstock : dict of str Mapping of output name to feedstock name excluded_feedstocks : set of str Feedstock names which should never be included in the migration migration_yaml : str The raw yaml for the migration variant dict config: dict The __migrator contents of the migration migration_name: str Name of the migration pr_limit : int, optional The number of PRs per hour, defaults to 5 """ total_graph = create_rebuild_graph( gx, package_names, excluded_feedstocks, include_noarch=config.get("include_noarch", False), ) # Note at this point the graph is made of all packages that have a # dependency on the pinned package via Host, run, or test. # Some packages don't have a host section so we use their # build section in its place. package_names = { p if p in gx.nodes else output_to_feedstock[p] for p in package_names } - excluded_feedstocks top_level = { node for node in {gx.successors(package_name) for package_name in package_names} if (node in total_graph) and len(list(total_graph.predecessors(node))) == 0 } cycles = list(nx.simple_cycles(total_graph)) migrator = MigrationYaml( migration_yaml, graph=total_graph, pr_limit=pr_limit, name=migration_name, top_level=top_level, cycles=cycles, piggy_back_migrations=[ Jinja2VarsCleanup(), PipMigrator(), LicenseMigrator(), CondaForgeYAMLCleanup(), ExtraJinja2KeysCleanup(), ], **config, ) print(f"bump number is {migrator.bump_number}") migrators.append(migrator)
def initialize_migrators( github_username: str = "", github_password: str = "", github_token: Optional[str] = None, dry_run: bool = False, ) -> Tuple[MigratorSessionContext, list, MutableSequence[Migrator]]: temp = glob.glob("/tmp/*") gx = load_graph() smithy_version = eval_cmd("conda smithy --version").strip() pinning_version = json.loads( eval_cmd("conda list conda-forge-pinning --json"))[0]["version"] migrators = [] add_arch_migrate(migrators, gx) migration_factory(migrators, gx) add_replacement_migrator( migrators, gx, "matplotlib", "matplotlib-base", ("Unless you need `pyqt`, recipes should depend only on " "`matplotlib-base`."), alt_migrator=MatplotlibBase, ) create_migration_yaml_creator(migrators=migrators, gx=gx) print("rebuild migration graph sizes:", flush=True) for m in migrators: print( f' {getattr(m, "name", m)} graph size: ' f'{len(getattr(m, "graph", []))}', flush=True, ) print(" ", flush=True) mctx = MigratorSessionContext( circle_build_url=os.getenv("CIRCLE_BUILD_URL", ""), graph=gx, smithy_version=smithy_version, pinning_version=pinning_version, github_username=github_username, github_password=github_password, github_token=github_token, dry_run=dry_run, ) print("building package import maps and version migrator", flush=True) python_nodes = { n for n, v in mctx.graph.nodes("payload") if "python" in v.get("req", "") } python_nodes.update([ k for node_name, node in mctx.graph.nodes("payload") for k in node.get("outputs_names", []) if node_name in python_nodes ], ) version_migrator = Version( python_nodes=python_nodes, pr_limit=PR_LIMIT * 4, piggy_back_migrations=[ Jinja2VarsCleanup(), DuplicateLinesCleanup(), PipMigrator(), LicenseMigrator(), CondaForgeYAMLCleanup(), ExtraJinja2KeysCleanup(), Build2HostMigrator(), NoCondaInspectMigrator(), Cos7Config(), ], ) migrators = [version_migrator] + migrators print(" ", flush=True) return mctx, temp, migrators
from .migrators_types import ( MetaYamlTypedDict, PackageName, ) logger = logging.getLogger("conda_forge_tick.auto_tick") PR_LIMIT = 5 MAX_PR_LIMIT = 50 MIGRATORS: MutableSequence[Migrator] = [ Version( pr_limit=PR_LIMIT * 2, piggy_back_migrations=[ Jinja2VarsCleanup(), PipMigrator(), LicenseMigrator(), CondaForgeYAMLCleanup(), ExtraJinja2KeysCleanup(), ], ), ] BOT_RERUN_LABEL = { "name": "bot-rerun", "color": "#191970", "description": ( "Apply this label if you want the bot to retry " "issuing a particular pull-request" ),
def main(args: "CLIArgs") -> None: # start profiler profile_profiler = cProfile.Profile() profile_profiler.enable() # logging from .xonsh_utils import env debug = env.get("CONDA_FORGE_TICK_DEBUG", False) if debug: setup_logger(logging.getLogger("conda_forge_tick"), level="debug") else: setup_logger(logging.getLogger("conda_forge_tick")) github_username = env.get("USERNAME", "") github_password = env.get("PASSWORD", "") github_token = env.get("GITHUB_TOKEN") global MIGRATORS mctx, temp, MIGRATORS = initialize_migrators( github_username=github_username, github_password=github_password, dry_run=args.dry_run, github_token=github_token, ) python_nodes = { n for n, v in mctx.graph.nodes("payload") if "python" in v.get("req", "") } python_nodes.update([ k for node_name, node in mctx.graph.nodes("payload") for k in node.get("outputs_names", []) if node_name in python_nodes ], ) imports_by_package, packages_by_import = create_package_import_maps( python_nodes) version_migrator = Version( python_nodes=python_nodes, imports_by_package=imports_by_package, packages_by_import=packages_by_import, pr_limit=PR_LIMIT * 2, piggy_back_migrations=[ Jinja2VarsCleanup(), PipMigrator(), LicenseMigrator(), CondaForgeYAMLCleanup(), ExtraJinja2KeysCleanup(), ], ) MIGRATORS = [version_migrator] + MIGRATORS # compute the time per migrator (num_nodes, time_per_migrator, tot_time_per_migrator) = _compute_time_per_migrator(mctx, ) for i, migrator in enumerate(MIGRATORS): if hasattr(migrator, "name"): extra_name = "-%s" % migrator.name else: extra_name = "" logger.info( "Total migrations for %s%s: %d - gets %f seconds (%f percent)", migrator.__class__.__name__, extra_name, num_nodes[i], time_per_migrator[i], time_per_migrator[i] / tot_time_per_migrator * 100, ) for mg_ind, migrator in enumerate(MIGRATORS): mmctx = MigratorContext(session=mctx, migrator=migrator) migrator.bind_to_ctx(mmctx) good_prs = 0 _mg_start = time.time() effective_graph = mmctx.effective_graph time_per = time_per_migrator[mg_ind] if hasattr(migrator, "name"): extra_name = "-%s" % migrator.name else: extra_name = "" logger.info( "Running migrations for %s%s: %d", migrator.__class__.__name__, extra_name, len(effective_graph.nodes), ) possible_nodes = list(migrator.order(effective_graph, mctx.graph)) # version debugging info if isinstance(migrator, Version): logger.info("possible version migrations:") for node_name in possible_nodes: with effective_graph.nodes[node_name]["payload"] as attrs: logger.info( " node|curr|new|attempts: %s|%s|%s|%d", node_name, attrs.get("version"), attrs.get("new_version"), (attrs.get("new_version_attempts", {}).get( attrs.get("new_version", ""), 0, )), ) for node_name in possible_nodes: with mctx.graph.nodes[node_name]["payload"] as attrs: # Don't let CI timeout, break ahead of the timeout so we make certain # to write to the repo # TODO: convert these env vars _now = time.time() if ((_now - int(env.get("START_TIME", time.time())) > int( env.get("TIMEOUT", 600))) or good_prs >= migrator.pr_limit or (_now - _mg_start) > time_per): break fctx = FeedstockContext( package_name=node_name, feedstock_name=attrs["feedstock_name"], attrs=attrs, ) print("\n", flush=True, end="") logger.info( "%s%s IS MIGRATING %s", migrator.__class__.__name__.upper(), extra_name, fctx.package_name, ) try: # Don't bother running if we are at zero if (args.dry_run or mctx.gh.rate_limit()["resources"] ["core"]["remaining"] == 0): break migrator_uid, pr_json = run( feedstock_ctx=fctx, migrator=migrator, rerender=migrator.rerender, protocol="https", hash_type=attrs.get("hash_type", "sha256"), ) # if migration successful if migrator_uid: d = frozen_to_json_friendly(migrator_uid) # if we have the PR already do nothing if d["data"] in [ existing_pr["data"] for existing_pr in attrs.get("PRed", []) ]: pass else: if not pr_json: pr_json = { "state": "closed", "head": { "ref": "<this_is_not_a_branch>" }, } d["PR"] = pr_json attrs.setdefault("PRed", []).append(d) attrs.update( { "smithy_version": mctx.smithy_version, "pinning_version": mctx.pinning_version, }, ) except github3.GitHubError as e: if e.msg == "Repository was archived so is read-only.": attrs["archived"] = True else: logger.critical( "GITHUB ERROR ON FEEDSTOCK: %s", fctx.feedstock_name, ) if is_github_api_limit_reached(e, mctx.gh): break except URLError as e: logger.exception("URLError ERROR") attrs["bad"] = { "exception": str(e), "traceback": str(traceback.format_exc()).split("\n"), "code": getattr(e, "code"), "url": getattr(e, "url"), } except Exception as e: logger.exception("NON GITHUB ERROR") attrs["bad"] = { "exception": str(e), "traceback": str(traceback.format_exc()).split("\n"), } else: if migrator_uid: # On successful PR add to our counter good_prs += 1 finally: # Write graph partially through if not args.dry_run: dump_graph(mctx.graph) eval_cmd(f"rm -rf {mctx.rever_dir}/*") logger.info(os.getcwd()) for f in glob.glob("/tmp/*"): if f not in temp: eval_cmd(f"rm -rf {f}") if not args.dry_run: logger.info( "API Calls Remaining: %d", mctx.gh.rate_limit()["resources"]["core"]["remaining"], ) logger.info("Done") # stop profiler profile_profiler.disable() # human readable s_stream = io.StringIO() # TODO: There are other ways to do this, with more freedom profile_stats = pstats.Stats(profile_profiler, stream=s_stream).sort_stats("tottime", ) profile_stats.print_stats() # get current time now = datetime.now() current_time = now.strftime("%d-%m-%Y") + "_" + now.strftime("%H_%M_%S") # output to data os.makedirs("profiler", exist_ok=True) with open(f"profiler/{current_time}.txt", "w+") as f: f.write(s_stream.getvalue())