def main(args: Any = None) -> None: if CONDA_FORGE_TICK_DEBUG: setup_logger(logger, level="debug") else: setup_logger(logger) logger.info("Reading graph") gx = load_graph() update_upstream_versions(gx) logger.info("writing out file") dump_graph(gx)
def main(args: Any = None) -> None: if CONDA_FORGE_TICK_DEBUG: setup_logger(logger, level="debug") else: setup_logger(logger) logger.info("Reading graph") # Graph enabled for inspection gx = load_graph() # call update to_update = new_update_upstream_versions(gx) logger.info("writing out file") with open("new_version.json", "w") as outfile: json.dump(to_update, outfile)
def main(args: Any = None) -> None: sources = (PyPI(), CRAN(), NPM(), ROSDistro(), RawURL(), Github()) if CONDA_FORGE_TICK_DEBUG: setup_logger(logger, level="debug") else: setup_logger(logger) logger.info("Reading graph") # Graph enabled for inspection gx = load_graph() # call update new_update_upstream_versions(gx) logger.info("writing out file") with open("new_version.json", "w") as outfile: json.dump(to_update, outfile)
def main(args: "CLIArgs") -> None: # logging if args.debug: setup_logger(logging.getLogger("conda_forge_tick"), level="debug") else: setup_logger(logging.getLogger("conda_forge_tick")) github_username = env.get("USERNAME", "") github_password = env.get("PASSWORD", "") github_token = env.get("GITHUB_TOKEN") mctx, temp, migrators = initialize_migrators( github_username=github_username, github_password=github_password, dry_run=args.dry_run, github_token=github_token, ) # compute the time per migrator print("computing time per migration", flush=True) (num_nodes, time_per_migrator, tot_time_per_migrator) = _compute_time_per_migrator( mctx, migrators, ) for i, migrator in enumerate(migrators): if hasattr(migrator, "name"): extra_name = "-%s" % migrator.name else: extra_name = "" print( " %s%s: %d - gets %f seconds (%f percent)" % ( migrator.__class__.__name__, extra_name, num_nodes[i], time_per_migrator[i], time_per_migrator[i] / max(tot_time_per_migrator, 1) * 100, ), flush=True, ) for mg_ind, migrator in enumerate(migrators): if hasattr(migrator, "name"): assert isinstance(migrator.name, str) migrator_name = migrator.name.lower().replace(" ", "") else: migrator_name = migrator.__class__.__name__.lower() mmctx = MigratorContext(session=mctx, migrator=migrator) migrator.bind_to_ctx(mmctx) good_prs = 0 _mg_start = time.time() effective_graph = mmctx.effective_graph time_per = time_per_migrator[mg_ind] if hasattr(migrator, "name"): extra_name = "-%s" % migrator.name else: extra_name = "" print( "\n========================================" "========================================" "\n" "========================================" "========================================", flush=True, ) print( "Running migrations for %s%s: %d\n" % ( migrator.__class__.__name__, extra_name, len(effective_graph.nodes), ), flush=True, ) possible_nodes = list(migrator.order(effective_graph, mctx.graph)) # version debugging info if isinstance(migrator, Version): LOGGER.info("possible version migrations:") for node_name in possible_nodes: with effective_graph.nodes[node_name]["payload"] as attrs: LOGGER.info( " node|curr|new|attempts: %s|%s|%s|%d", node_name, attrs.get("version"), attrs.get("new_version"), (attrs.get("new_version_attempts", {}).get( attrs.get("new_version", ""), 0, )), ) for node_name in possible_nodes: with mctx.graph.nodes[node_name]["payload"] as attrs: base_branches = migrator.get_possible_feedstock_branches(attrs) orig_branch = attrs.get("branch", "master") # Don't let CI timeout, break ahead of the timeout so we make certain # to write to the repo # TODO: convert these env vars _now = time.time() if ((_now - int(env.get("START_TIME", time.time())) > int( env.get("TIMEOUT", 600))) or good_prs >= migrator.pr_limit or (_now - _mg_start) > time_per): break fctx = FeedstockContext( package_name=node_name, feedstock_name=attrs["feedstock_name"], attrs=attrs, ) try: for base_branch in base_branches: attrs["branch"] = base_branch if migrator.filter(attrs): continue print("\n", flush=True, end="") LOGGER.info( "%s%s IS MIGRATING %s:%s", migrator.__class__.__name__.upper(), extra_name, fctx.package_name, base_branch, ) try: # Don't bother running if we are at zero if mctx.gh_api_requests_left == 0: break migrator_uid, pr_json = run( feedstock_ctx=fctx, migrator=migrator, rerender=migrator.rerender, protocol="https", hash_type=attrs.get("hash_type", "sha256"), base_branch=base_branch, ) # if migration successful if migrator_uid: d = frozen_to_json_friendly(migrator_uid) # if we have the PR already do nothing if d["data"] in [ existing_pr["data"] for existing_pr in attrs.get( "PRed", []) ]: pass else: if not pr_json: pr_json = { "state": "closed", "head": { "ref": "<this_is_not_a_branch>" }, } d["PR"] = pr_json attrs.setdefault("PRed", []).append(d) attrs.update( { "smithy_version": mctx.smithy_version, "pinning_version": mctx.pinning_version, }, ) except github3.GitHubError as e: if e.msg == "Repository was archived so is read-only.": attrs["archived"] = True else: LOGGER.critical( "GITHUB ERROR ON FEEDSTOCK: %s", fctx.feedstock_name, ) if is_github_api_limit_reached(e, mctx.gh): break except URLError as e: LOGGER.exception("URLError ERROR") attrs["bad"] = { "exception": str(e), "traceback": str(traceback.format_exc()).split("\n"), "code": getattr(e, "code"), "url": getattr(e, "url"), } pre_key = "pre_pr_migrator_status" if pre_key not in attrs: attrs[pre_key] = {} attrs[pre_key][migrator_name] = sanitize_string( "bot error (%s): %s: %s" % ( '<a href="' + os.getenv("CIRCLE_BUILD_URL", "") + '">bot CI job</a>', base_branch, str(traceback.format_exc()), ), ) except Exception as e: LOGGER.exception("NON GITHUB ERROR") # we don't set bad for rerendering errors if ("conda smithy rerender -c auto --no-check-uptodate" not in str(e)): attrs["bad"] = { "exception": str(e), "traceback": str(traceback.format_exc()).split("\n", ), } pre_key = "pre_pr_migrator_status" if pre_key not in attrs: attrs[pre_key] = {} attrs[pre_key][migrator_name] = sanitize_string( "bot error (%s): %s: %s" % ( '<a href="' + os.getenv("CIRCLE_BUILD_URL", "") + '">bot CI job</a>', base_branch, str(traceback.format_exc()), ), ) else: if migrator_uid: # On successful PR add to our counter good_prs += 1 finally: # reset branch attrs["branch"] = orig_branch # Write graph partially through if not args.dry_run: dump_graph(mctx.graph) eval_cmd(f"rm -rf {mctx.rever_dir}/*") LOGGER.info(os.getcwd()) for f in glob.glob("/tmp/*"): if f not in temp: try: eval_cmd(f"rm -rf {f}") except Exception: pass if mctx.gh_api_requests_left == 0: break print("\n", flush=True) LOGGER.info("API Calls Remaining: %d", mctx.gh_api_requests_left) LOGGER.info("Done")
def auto_tick(dry_run=False, debug=False, fork=False, organization='nsls-ii-forge'): ''' Automatically update package versions and submit pull requests to associated feedstocks Parameters ---------- dry_run: bool, optional Generate version migration yamls but do not run them debug: bool, optional Setup logging to be in debug mode fork: bool, optional Create a fork of the repo from the organization to $GITHUB_USERNAME organization: str, optional GitHub organization that manages feedstock repositories ''' from conda_forge_tick.xonsh_utils import env if debug: setup_logger(logger, level="debug") else: setup_logger(logger) # set Version.pr_body to custom pr_body function Version.pr_body = bot_pr_body # TODO: use ~/.netrc instead github_username = env.get("GITHUB_USERNAME", "") github_password = env.get("GITHUB_TOKEN", "") github_token = env.get("GITHUB_TOKEN") global MIGRATORS print('Initializing migrators...') mctx, MIGRATORS = initialize_migrators( github_username=github_username, github_password=github_password, dry_run=dry_run, github_token=github_token, ) # compute the time per migrator print('Computing time per migrator') (num_nodes, time_per_migrator, tot_time_per_migrator) = _compute_time_per_migrator(mctx, ) for i, migrator in enumerate(MIGRATORS): if hasattr(migrator, "name"): extra_name = "-%s" % migrator.name else: extra_name = "" logger.info( "Total migrations for %s%s: %d - gets %f seconds (%f percent)", migrator.__class__.__name__, extra_name, num_nodes[i], time_per_migrator[i], time_per_migrator[i] / tot_time_per_migrator * 100, ) print('Performing migrations...') for mg_ind, migrator in enumerate(MIGRATORS): mmctx = MigratorContext(session=mctx, migrator=migrator) migrator.bind_to_ctx(mmctx) good_prs = 0 _mg_start = time.time() effective_graph = mmctx.effective_graph time_per = time_per_migrator[mg_ind] if hasattr(migrator, "name"): extra_name = "-%s" % migrator.name else: extra_name = "" logger.info( "Running migrations for %s%s: %d", migrator.__class__.__name__, extra_name, len(effective_graph.nodes), ) possible_nodes = list(migrator.order(effective_graph, mctx.graph)) # version debugging info if isinstance(migrator, Version): logger.info("possible version migrations:") for node_name in possible_nodes: with effective_graph.nodes[node_name]["payload"] as attrs: logger.info( " node|curr|new|attempts: %s|%s|%s|%d", node_name, attrs.get("version"), attrs.get("new_version"), (attrs.get("new_version_attempts", {}).get( attrs.get("new_version", ""), 0, )), ) for node_name in possible_nodes: with mctx.graph.nodes[node_name]["payload"] as attrs: # Don't let CI timeout, break ahead of the timeout so we make certain # to write to the repo # TODO: convert these env vars _now = time.time() if ((_now - int(env.get("START_TIME", time.time())) > int( env.get("TIMEOUT", 600))) or good_prs >= migrator.pr_limit or (_now - _mg_start) > time_per): break fctx = FeedstockContext( package_name=node_name, feedstock_name=attrs["feedstock_name"], attrs=attrs, ) print("\n", flush=True, end="") logger.info( "%s%s IS MIGRATING %s", migrator.__class__.__name__.upper(), extra_name, fctx.package_name, ) try: # Don't bother running if we are at zero if (dry_run or mctx.gh.rate_limit()["resources"]["core"] ["remaining"] == 0): break migrator_uid, pr_json = run(feedstock_ctx=fctx, migrator=migrator, rerender=migrator.rerender, protocol="https", hash_type=attrs.get( "hash_type", "sha256"), fork=fork, organization=organization) # if migration successful if migrator_uid: d = frozen_to_json_friendly(migrator_uid) # if we have the PR already do nothing if d["data"] in [ existing_pr["data"] for existing_pr in attrs.get("PRed", []) ]: pass else: if pr_json is None: pr_json = { "state": "closed", "head": { "ref": "<this_is_not_a_branch>" }, } d["PR"] = pr_json attrs.setdefault("PRed", []).append(d) attrs.update( { "smithy_version": mctx.smithy_version, "pinning_version": mctx.pinning_version, }, ) except github3.GitHubError as e: if e.msg == "Repository was archived so is read-only.": attrs["archived"] = True else: logger.critical( "GITHUB ERROR ON FEEDSTOCK: %s", fctx.feedstock_name, ) if is_github_api_limit_reached(e, mctx.gh): break except URLError as e: logger.exception("URLError ERROR") attrs["bad"] = { "exception": str(e), "traceback": str(traceback.format_exc()).split("\n"), "code": getattr(e, "code"), "url": getattr(e, "url"), } except Exception as e: logger.exception("NON GITHUB ERROR") attrs["bad"] = { "exception": str(e), "traceback": str(traceback.format_exc()).split("\n"), } else: if migrator_uid: # On successful PR add to our counter good_prs += 1 finally: # Write graph partially through if not dry_run: dump_graph(mctx.graph) eval_cmd(f"rm -rf {mctx.rever_dir}/*") logger.info(os.getcwd()) if not dry_run: logger.info( "API Calls Remaining: %d", mctx.gh.rate_limit()["resources"]["core"]["remaining"], ) logger.info("Done")
def main(args: "CLIArgs") -> None: _setup_limits() # logging if args.debug: setup_logger(logging.getLogger("conda_forge_tick"), level="debug") else: setup_logger(logging.getLogger("conda_forge_tick")) github_username = env.get("USERNAME", "") github_password = env.get("PASSWORD", "") github_token = env.get("GITHUB_TOKEN") mctx, temp, migrators = initialize_migrators( github_username=github_username, github_password=github_password, dry_run=args.dry_run, github_token=github_token, ) # compute the time per migrator print("computing time per migration", flush=True) (num_nodes, time_per_migrator, tot_time_per_migrator) = _compute_time_per_migrator( mctx, migrators, ) for i, migrator in enumerate(migrators): if hasattr(migrator, "name"): extra_name = "-%s" % migrator.name else: extra_name = "" print( " %s%s: %d - gets %f seconds (%f percent)" % ( migrator.__class__.__name__, extra_name, num_nodes[i], time_per_migrator[i], time_per_migrator[i] / max(tot_time_per_migrator, 1) * 100, ), flush=True, ) for mg_ind, migrator in enumerate(migrators): print( "\n========================================" "========================================" "\n" "========================================" "========================================", flush=True, ) good_prs = _run_migrator( migrator, mctx, temp, time_per_migrator[mg_ind], args.dry_run, ) if good_prs > 0: pass # this has been causing issues with bad deploys # turning off for now # deploy(dry_run=args.dry_run) print("\n", flush=True) LOGGER.info("API Calls Remaining: %d", mctx.gh_api_requests_left) LOGGER.info("Done")