def run( feedstock_ctx: FeedstockContext, migrator: Migrator, protocol: str = "ssh", pull_request: bool = True, rerender: bool = True, fork: bool = True, **kwargs: typing.Any, ) -> Tuple["MigrationUidTypedDict", dict]: """For a given feedstock and migration run the migration Parameters ---------- feedstock_ctx: FeedstockContext The node attributes migrator: Migrator instance The migrator to run on the feedstock protocol : str, optional The git protocol to use, defaults to ``ssh`` pull_request : bool, optional If true issue pull request, defaults to true rerender : bool Whether to rerender fork : bool If true create a fork, defaults to true gh : github3.GitHub instance, optional Object for communicating with GitHub, if None build from $USERNAME and $PASSWORD, defaults to None kwargs: dict The key word arguments to pass to the migrator Returns ------- migrate_return: MigrationUidTypedDict The migration return dict used for tracking finished migrations pr_json: dict The PR json object for recreating the PR as needed """ # get the repo # TODO: stop doing this. migrator.attrs = feedstock_ctx.attrs # type: ignore branch_name = migrator.remote_branch(feedstock_ctx) + "_h" + uuid4().hex[0:6] # TODO: run this in parallel feedstock_dir, repo = get_repo( ctx=migrator.ctx.session, fctx=feedstock_ctx, branch=branch_name, feedstock=feedstock_ctx.feedstock_name, protocol=protocol, pull_request=pull_request, fork=fork, ) recipe_dir = os.path.join(feedstock_dir, "recipe") # migrate the feedstock migrator.run_pre_piggyback_migrations(recipe_dir, feedstock_ctx.attrs, **kwargs) # TODO - make a commit here if the repo changed migrate_return = migrator.migrate(recipe_dir, feedstock_ctx.attrs, **kwargs) if not migrate_return: logger.critical( "Failed to migrate %s, %s", feedstock_ctx.package_name, feedstock_ctx.attrs.get("bad"), ) eval_cmd(f"rm -rf {feedstock_dir}") return False, False # TODO - commit main migration here migrator.run_post_piggyback_migrations(recipe_dir, feedstock_ctx.attrs, **kwargs) # TODO commit post migration here # rerender, maybe diffed_files: typing.List[str] = [] with indir(feedstock_dir), env.swap(RAISE_SUBPROC_ERROR=False): msg = migrator.commit_message(feedstock_ctx) # noqa eval_cmd("git add --all .") eval_cmd(f"git commit -am '{msg}'") if rerender: head_ref = eval_cmd("git rev-parse HEAD") # noqa logger.info("Rerendering the feedstock") # In the event we can't rerender, try to update the pinnings, # then bail if it does not work again try: eval_cmd( "conda smithy rerender -c auto --no-check-uptodate", timeout=300, ) except SubprocessError: return False, False # If we tried to run the MigrationYaml and rerender did nothing (we only # bumped the build number and dropped a yaml file in migrations) bail # for instance platform specific migrations gdiff = eval_cmd(f"git diff --name-only {head_ref}...HEAD") diffed_files = [ _ for _ in gdiff.split() if not ( _.startswith("recipe") or _.startswith("migrators") or _.startswith("README") ) ] if ( migrator.check_solvable or feedstock_ctx.attrs["conda-forge.yml"] .get("bot", {}) .get("check_solvable", False) ) and not is_recipe_solvable(feedstock_dir): eval_cmd(f"rm -rf {feedstock_dir}") return False, False # TODO: Better annotation here pr_json: typing.Union[MutableMapping, None, bool] if ( isinstance(migrator, MigrationYaml) and not diffed_files and feedstock_ctx.attrs["name"] != "conda-forge-pinning" ): # spoof this so it looks like the package is done pr_json = { "state": "closed", "merged_at": "never issued", "id": str(uuid4()), } else: # push up try: pr_json = push_repo( session_ctx=migrator.ctx.session, fctx=feedstock_ctx, feedstock_dir=feedstock_dir, body=migrator.pr_body(feedstock_ctx), repo=repo, title=migrator.pr_title(feedstock_ctx), head=f"{migrator.ctx.github_username}:{branch_name}", branch=branch_name, ) # This shouldn't happen too often any more since we won't double PR except github3.GitHubError as e: if e.msg != "Validation Failed": raise else: print(f"Error during push {e}") # If we just push to the existing PR then do nothing to the json pr_json = False ljpr = False if pr_json: ljpr = LazyJson( os.path.join(migrator.ctx.session.prjson_dir, str(pr_json["id"]) + ".json"), ) ljpr.update(**pr_json) # from .dynamo_models import PRJson # PRJson.dump(pr_json) # If we've gotten this far then the node is good feedstock_ctx.attrs["bad"] = False logger.info("Removing feedstock dir") eval_cmd(f"rm -rf {feedstock_dir}") return migrate_return, ljpr
def run( feedstock_ctx: FeedstockContext, migrator: Migrator, protocol: str = "ssh", pull_request: bool = True, rerender: bool = True, fork: bool = True, base_branch: str = "master", **kwargs: typing.Any, ) -> Tuple["MigrationUidTypedDict", dict]: """For a given feedstock and migration run the migration Parameters ---------- feedstock_ctx: FeedstockContext The node attributes migrator: Migrator instance The migrator to run on the feedstock protocol : str, optional The git protocol to use, defaults to ``ssh`` pull_request : bool, optional If true issue pull request, defaults to true rerender : bool Whether to rerender fork : bool If true create a fork, defaults to true base_branch : str, optional The base branch to which the PR will be targeted. Defaults to "master". kwargs: dict The key word arguments to pass to the migrator Returns ------- migrate_return: MigrationUidTypedDict The migration return dict used for tracking finished migrations pr_json: dict The PR json object for recreating the PR as needed """ # get the repo # TODO: stop doing this. migrator.attrs = feedstock_ctx.attrs # type: ignore branch_name = migrator.remote_branch( feedstock_ctx) + "_h" + uuid4().hex[0:6] if hasattr(migrator, "name"): assert isinstance(migrator.name, str) migrator_name = migrator.name.lower().replace(" ", "") else: migrator_name = migrator.__class__.__name__.lower() # TODO: run this in parallel feedstock_dir, repo = get_repo( ctx=migrator.ctx.session, fctx=feedstock_ctx, branch=branch_name, feedstock=feedstock_ctx.feedstock_name, protocol=protocol, pull_request=pull_request, fork=fork, base_branch=base_branch, ) if not feedstock_dir or not repo: LOGGER.critical( "Failed to migrate %s, %s", feedstock_ctx.package_name, feedstock_ctx.attrs.get("bad"), ) return False, False recipe_dir = os.path.join(feedstock_dir, "recipe") # migrate the feedstock migrator.run_pre_piggyback_migrations(recipe_dir, feedstock_ctx.attrs, **kwargs) # TODO - make a commit here if the repo changed migrate_return = migrator.migrate(recipe_dir, feedstock_ctx.attrs, **kwargs) if not migrate_return: LOGGER.critical( "Failed to migrate %s, %s", feedstock_ctx.package_name, feedstock_ctx.attrs.get("bad"), ) eval_cmd(f"rm -rf {feedstock_dir}") return False, False # TODO - commit main migration here migrator.run_post_piggyback_migrations(recipe_dir, feedstock_ctx.attrs, **kwargs) # TODO commit post migration here # rerender, maybe diffed_files: typing.List[str] = [] with indir(feedstock_dir), env.swap(RAISE_SUBPROC_ERROR=False): msg = migrator.commit_message(feedstock_ctx) # noqa try: eval_cmd("git add --all .") eval_cmd(f"git commit -am '{msg}'") except CalledProcessError as e: LOGGER.info( "could not commit to feedstock - " "likely no changes - error is '%s'" % (repr(e)), ) if rerender: head_ref = eval_cmd("git rev-parse HEAD").strip() LOGGER.info("Rerendering the feedstock") try: eval_cmd( "conda smithy rerender -c auto --no-check-uptodate", timeout=300, ) make_rerender_comment = False except Exception as e: # I am trying this bit of code to force these errors # to be surfaced in the logs at the right time. print(f"RERENDER ERROR: {e}", flush=True) if not isinstance(migrator, Version): raise else: # for check solvable or automerge, we always raise rerender errors if feedstock_ctx.attrs["conda-forge.yml"].get( "bot", {}).get( "check_solvable", False, ) or (feedstock_ctx.attrs["conda-forge.yml"].get( "bot", {}).get( "automerge", False, )): raise else: make_rerender_comment = True # If we tried to run the MigrationYaml and rerender did nothing (we only # bumped the build number and dropped a yaml file in migrations) bail # for instance platform specific migrations gdiff = eval_cmd(f"git diff --name-only {head_ref.strip()}...HEAD") diffed_files = [ _ for _ in gdiff.split() if not (_.startswith("recipe") or _.startswith("migrators") or _.startswith("README")) ] else: make_rerender_comment = False if (feedstock_ctx.feedstock_name != "conda-forge-pinning" and base_branch == "master" and (( migrator.check_solvable # we always let stuff in cycles go and feedstock_ctx.attrs["name"] not in getattr( migrator, "cycles", set()) # we always let stuff at the top go and feedstock_ctx.attrs["name"] not in getattr( migrator, "top_level", set()) # for solveability always assume automerge is on. and (feedstock_ctx.attrs["conda-forge.yml"].get("bot", {}).get( "automerge", True))) or feedstock_ctx.attrs["conda-forge.yml"].get("bot", {}).get( "check_solvable", False, ))): solvable, errors, _ = is_recipe_solvable( feedstock_dir, build_platform=feedstock_ctx.attrs["conda-forge.yml"].get( "build_platform", None, ), ) if not solvable: _solver_err_str = "not solvable ({}): {}: {}".format( ('<a href="' + os.getenv("CIRCLE_BUILD_URL", "") + '">bot CI job</a>'), base_branch, sorted(set(errors)), ) if isinstance(migrator, Version): _new_ver = feedstock_ctx.attrs["new_version"] if _new_ver in feedstock_ctx.attrs["new_version_errors"]: feedstock_ctx.attrs["new_version_errors"][ _new_ver] += "\n\nsolver error - {}".format( _solver_err_str, ) else: feedstock_ctx.attrs["new_version_errors"][ _new_ver] = _solver_err_str feedstock_ctx.attrs["new_version_errors"][ _new_ver] = sanitize_string( feedstock_ctx.attrs["new_version_errors"][_new_ver], ) # remove part of a try for solver errors to make those slightly # higher priority feedstock_ctx.attrs["new_version_attempts"][_new_ver] -= 0.8 pre_key = "pre_pr_migrator_status" if pre_key not in feedstock_ctx.attrs: feedstock_ctx.attrs[pre_key] = {} feedstock_ctx.attrs[pre_key][migrator_name] = sanitize_string( _solver_err_str, ) eval_cmd(f"rm -rf {feedstock_dir}") return False, False # TODO: Better annotation here pr_json: typing.Union[MutableMapping, None, bool] if (isinstance(migrator, MigrationYaml) and not diffed_files and feedstock_ctx.attrs["name"] != "conda-forge-pinning"): # spoof this so it looks like the package is done pr_json = { "state": "closed", "merged_at": "never issued", "id": str(uuid4()), } else: # push up try: # TODO: remove this hack, but for now this is the only way to get # the feedstock dir into pr_body feedstock_ctx.feedstock_dir = feedstock_dir pr_json = push_repo( session_ctx=migrator.ctx.session, fctx=feedstock_ctx, feedstock_dir=feedstock_dir, body=migrator.pr_body(feedstock_ctx), repo=repo, title=migrator.pr_title(feedstock_ctx), head=f"{migrator.ctx.github_username}:{branch_name}", branch=branch_name, base_branch=base_branch, ) # This shouldn't happen too often any more since we won't double PR except github3.GitHubError as e: if e.msg != "Validation Failed": raise else: print(f"Error during push {e}") # If we just push to the existing PR then do nothing to the json pr_json = False ljpr = False if pr_json and pr_json["state"] != "closed" and make_rerender_comment: comment_on_pr( pr_json, """\ Hi! This feedstock was not able to be rerendered after the version update changes. I have pushed the version update changes anyways and am trying to rerender again with this comment. Hopefully you all can fix this! @conda-forge-admin rerender""", repo, ) if pr_json: ljpr = LazyJson( os.path.join(migrator.ctx.session.prjson_dir, str(pr_json["id"]) + ".json"), ) ljpr.update(**pr_json) # from .dynamo_models import PRJson # PRJson.dump(pr_json) else: ljpr = False # If we've gotten this far then the node is good feedstock_ctx.attrs["bad"] = False LOGGER.info("Removing feedstock dir") eval_cmd(f"rm -rf {feedstock_dir}") return migrate_return, ljpr