コード例 #1
0
ファイル: git_utils.py プロジェクト: viniciusdc/cf-scripts
def close_out_labels(
    ctx: GithubContext,
    pr_json: LazyJson,
    gh: Optional[github3.GitHub] = None,
    dry_run: bool = False,
) -> Optional[dict]:
    gh = ensure_gh(ctx, gh)
    # run this twice so we always have the latest info (eg a thing was already closed)
    if pr_json["state"] != "closed" and "bot-rerun" in [
            l["name"] for l in pr_json.get("labels", [])
    ]:
        # update
        if dry_run:
            print("dry run: checking pr %s" % pr_json["id"])
        else:
            pr_obj = github3.pulls.PullRequest(dict(pr_json), gh)
            pr_obj.refresh(True)
            pr_json = pr_obj.as_dict()

    if pr_json["state"] != "closed" and "bot-rerun" in [
            l["name"] for l in pr_json.get("labels", [])
    ]:
        if dry_run:
            print("dry run: comment and close pr %s" % pr_json["id"])
        else:
            pr_obj.create_comment(
                "Due to the `bot-rerun` label I'm closing "
                "this PR. I will make another one as"
                " appropriate. This was generated by {}".format(
                    ctx.circle_build_url), )
            pr_obj.close()
            delete_branch(ctx=ctx, pr_json=pr_json, dry_run=dry_run)
            pr_obj.refresh(True)
        return pr_obj.as_dict()
    return None
コード例 #2
0
def get_attrs(name, organization):
    '''
    Generates node attributes for feedstocks from their recipe files

    Parameters
    ----------
    name: str
        Feedstock repo name to fetch recipe files from
    organization: str
        Name of GitHub organization containing feedstock repos.

    Returns
    -------
    lzj: LazyJson
        Dictionary containing feedstock attributes with ability to dump
        to a JSON file
    '''
    # These fetches could be done via async/multiprocessing
    from conda_forge_tick.make_graph import populate_feedstock_attributes
    from conda_forge_tick.utils import LazyJson
    meta_yaml = _fetch_file(organization, name, "recipe/meta.yaml")
    conda_forge_yaml = _fetch_file(organization, name, "conda-forge.yml")

    lzj = LazyJson(f"node_attrs/{name}.json")
    with lzj as sub_graph:
        populate_feedstock_attributes(name,
                                      sub_graph,
                                      meta_yaml=meta_yaml,
                                      conda_forge_yaml=conda_forge_yaml)
    return lzj
コード例 #3
0
def test_lazy_json(tmpdir):
    f = os.path.join(tmpdir, "hi.json")
    assert not os.path.exists(f)
    lj = LazyJson(f)
    assert os.path.exists(lj.file_name)
    with open(f, "r") as ff:
        assert ff.read() == json.dumps({})
    lj["hi"] = "world"
    assert lj["hi"] == "world"
    assert os.path.exists(lj.file_name)
    with open(f, "r") as ff:
        assert ff.read() == dumps({"hi": "world"})
    lj.update({"hi": "globe"})
    with open(f, "r") as ff:
        assert ff.read() == dumps({"hi": "globe"})
    p = pickle.dumps(lj)
    lj2 = pickle.loads(p)
    assert not getattr(lj2, "data", None)
    assert lj2["hi"] == "globe"
コード例 #4
0
def test_latest_version_npm(
    name,
    inp,
    curr_ver,
    ver,
    source,
    urls,
    requests_mock,
    tmpdir,
):
    pmy = LazyJson(tmpdir.join("cf-scripts-test.json"))
    pmy.update(parse_meta_yaml(inp)["source"])
    pmy.update(
        {
            "feedstock_name": name,
            "version": curr_ver,
            "raw_meta_yaml": inp,
            "meta_yaml": parse_meta_yaml(inp),
        },
    )
    [requests_mock.get(url, text=text) for url, text in urls.items()]
    attempt = get_latest_version(name, pmy, [source])
    if ver is None:
        assert not (attempt["new_version"] is False)
        assert attempt["new_version"] != curr_ver
        assert VersionOrder(attempt["new_version"]) > VersionOrder(curr_ver)
    elif ver is False:
        assert attempt["new_version"] is ver
    else:
        assert ver == attempt["new_version"]
コード例 #5
0
def close_out_dirty_prs(
    ctx: GithubContext,
    pr_json: LazyJson,
    gh: Optional[github3.GitHub] = None,
    dry_run: bool = False,
) -> Optional[dict]:
    gh = ensure_gh(ctx, gh)
    # run this twice so we always have the latest info (eg a thing was already closed)
    if pr_json["state"] != "closed" and pr_json["mergeable_state"] == "dirty":
        # update
        if dry_run:
            print("dry run: checking pr %s" % pr_json["id"])
        else:
            pr_json = lazy_update_pr_json(pr_json, ctx)

    if (pr_json["state"] != "closed" and pr_json["mergeable_state"] == "dirty"
            and not pr_json.get("draft", False)):
        d = dict(pr_json)

        if dry_run:
            print("dry run: comment and close pr %s" % pr_json["id"])
        else:
            pr_obj = get_pr_obj_from_pr_json(pr_json, gh)

            if all(c.as_dict()["commit"]["author"]["name"] in CF_BOT_NAMES
                   for c in pr_obj.commits()):
                pr_obj.create_comment(
                    "I see that this PR has conflicts, and I'm the only committer. "
                    "I'm going to close this PR and will make another one as"
                    " appropriate. This was generated by {}".format(
                        ctx.circle_build_url, ), )
                pr_obj.close()

                delete_branch(ctx=ctx, pr_json=pr_json, dry_run=dry_run)

                pr_json = lazy_update_pr_json(pr_json, ctx)
                d = dict(pr_json)

                # This will cause the _update_nodes_with_bot_rerun to trigger
                # properly and shouldn't be overridden since
                # this is the last function to run, the long term solution here
                # is to add the bot to conda-forge and then
                # it should have label adding capability and we can just add
                # the label properly
                d["labels"].append(DUMMY_BOT_RERUN_METADATA)

        return d

    return None
コード例 #6
0
ファイル: test_utils.py プロジェクト: ryanvolz/cf-scripts
def test_lazy_json(tmpdir):
    f = os.path.join(tmpdir, "hi.json")
    assert not os.path.exists(f)
    lj = LazyJson(f)
    assert os.path.exists(lj.file_name)
    with open(f) as ff:
        assert ff.read() == json.dumps({})
    lj["hi"] = "world"
    assert lj["hi"] == "world"
    assert os.path.exists(lj.file_name)
    with open(f) as ff:
        assert ff.read() == dumps({"hi": "world"})
    lj.update({"hi": "globe"})
    with open(f) as ff:
        assert ff.read() == dumps({"hi": "globe"})
    p = pickle.dumps(lj)
    lj2 = pickle.loads(p)
    assert not getattr(lj2, "_data", None)
    assert lj2["hi"] == "globe"

    with lj as attrs:
        attrs.setdefault("lst", []).append("universe")
    with open(f) as ff:
        assert ff.read() == dumps({"hi": "globe", "lst": ["universe"]})

    with lj as attrs:
        attrs.setdefault("lst", []).append("universe")
        with lj as attrs_again:
            attrs_again.setdefault("lst", []).append("universe")
            attrs.setdefault("lst", []).append("universe")
    with open(f) as ff:
        assert ff.read() == dumps({"hi": "globe", "lst": ["universe"] * 4})

    with lj as attrs:
        with lj as attrs_again:
            attrs_again.setdefault("lst2", []).append("universe")
            attrs.setdefault("lst2", []).append("universe")
    with open(f) as ff:
        assert ff.read() == dumps(
            {
                "hi": "globe",
                "lst": ["universe"] * 4,
                "lst2": ["universe"] * 2
            }, )
    lj.clear()
    with open(f) as ff:
        assert ff.read() == dumps({})
コード例 #7
0
def refresh_pr(
    ctx: GithubContext,
    pr_json: LazyJson,
    gh: Optional[github3.GitHub] = None,
    dry_run: bool = False,
) -> Optional[dict]:
    if not pr_json["state"] == "closed":
        if dry_run:
            print("dry run: refresh pr %s" % pr_json["id"])
            pr_dict = dict(pr_json)
        else:
            pr_json = lazy_update_pr_json(copy.deepcopy(pr_json), ctx)

            # if state passed from opened to merged or if it
            # closed for a day delete the branch
            if pr_json["state"] == "closed" and pr_json.get(
                    "merged_at", False):
                delete_branch(ctx=ctx, pr_json=pr_json, dry_run=dry_run)
            pr_dict = dict(pr_json)

        return pr_dict

    return None
コード例 #8
0
def test_latest_version_rawurl(name, inp, curr_ver, ver, source, urls, tmpdir):
    pmy = LazyJson(tmpdir.join("cf-scripts-test.json"))
    pmy.update(parse_meta_yaml(inp)["source"])
    pmy.update(
        {
            "feedstock_name": name,
            "version": curr_ver,
            "raw_meta_yaml": inp,
            "meta_yaml": parse_meta_yaml(inp),
        }, )
    attempt = get_latest_version(name, pmy, [source])
    if ver is None:
        assert not (attempt["new_version"] is False)
        assert attempt["new_version"] != curr_ver
        assert VersionOrder(attempt["new_version"]) > VersionOrder(curr_ver)
    elif ver is False:
        assert attempt["new_version"] is ver
    else:
        assert ver == attempt["new_version"]
コード例 #9
0
def test_latest_version(inp, ver, source, urls, requests_mock, tmpdir):
    pmy = LazyJson(tmpdir.join("cf-scripts-test.json"))
    pmy.update(parse_meta_yaml(inp)["source"])
    [requests_mock.get(url, text=text) for url, text in urls.items()]
    attempt = get_latest_version("configurable-http-proxy", pmy, [source])
    assert ver == attempt["new_version"]
コード例 #10
0
ファイル: migrate_to_lzjson.py プロジェクト: tadeu/cf-scripts
from conda_forge_tick.utils import LazyJson


import networkx as nx
gx = nx.read_gpickle('graph.pkl')
for k in gx.nodes.keys():
    lzj = LazyJson(f'node_attrs/{k}.json')
    lzj.update(**gx.nodes[k])
    gx.nodes[k] = lzj
nx.write_gpickle(gx, 'graph.pkl')
コード例 #11
0
def test_latest_version(inp, ver, source, urls, requests_mock, tmpdir):
    pmy = LazyJson(tmpdir.join("cf-scripts-test.json"))
    pmy.update(parse_meta_yaml(inp)["source"])
    [requests_mock.get(url, text=text) for url, text in urls.items()]
    assert ver == get_latest_version(pmy, [source])
コード例 #12
0
ファイル: auto_tick.py プロジェクト: ryanvolz/cf-scripts
def run(
    feedstock_ctx: FeedstockContext,
    migrator: Migrator,
    protocol: str = "ssh",
    pull_request: bool = True,
    rerender: bool = True,
    fork: bool = True,
    base_branch: str = "master",
    **kwargs: typing.Any,
) -> Tuple["MigrationUidTypedDict", dict]:
    """For a given feedstock and migration run the migration

    Parameters
    ----------
    feedstock_ctx: FeedstockContext
        The node attributes
    migrator: Migrator instance
        The migrator to run on the feedstock
    protocol : str, optional
        The git protocol to use, defaults to ``ssh``
    pull_request : bool, optional
        If true issue pull request, defaults to true
    rerender : bool
        Whether to rerender
    fork : bool
        If true create a fork, defaults to true
    base_branch : str, optional
        The base branch to which the PR will be targeted. Defaults to "master".
    kwargs: dict
        The key word arguments to pass to the migrator

    Returns
    -------
    migrate_return: MigrationUidTypedDict
        The migration return dict used for tracking finished migrations
    pr_json: dict
        The PR json object for recreating the PR as needed
    """
    # get the repo
    # TODO: stop doing this.
    migrator.attrs = feedstock_ctx.attrs  # type: ignore

    branch_name = migrator.remote_branch(
        feedstock_ctx) + "_h" + uuid4().hex[0:6]

    if hasattr(migrator, "name"):
        assert isinstance(migrator.name, str)
        migrator_name = migrator.name.lower().replace(" ", "")
    else:
        migrator_name = migrator.__class__.__name__.lower()

    # TODO: run this in parallel
    feedstock_dir, repo = get_repo(
        ctx=migrator.ctx.session,
        fctx=feedstock_ctx,
        branch=branch_name,
        feedstock=feedstock_ctx.feedstock_name,
        protocol=protocol,
        pull_request=pull_request,
        fork=fork,
        base_branch=base_branch,
    )
    if not feedstock_dir or not repo:
        LOGGER.critical(
            "Failed to migrate %s, %s",
            feedstock_ctx.package_name,
            feedstock_ctx.attrs.get("bad"),
        )
        return False, False

    recipe_dir = os.path.join(feedstock_dir, "recipe")

    # migrate the feedstock
    migrator.run_pre_piggyback_migrations(recipe_dir, feedstock_ctx.attrs,
                                          **kwargs)

    # TODO - make a commit here if the repo changed

    migrate_return = migrator.migrate(recipe_dir, feedstock_ctx.attrs,
                                      **kwargs)

    if not migrate_return:
        LOGGER.critical(
            "Failed to migrate %s, %s",
            feedstock_ctx.package_name,
            feedstock_ctx.attrs.get("bad"),
        )
        eval_cmd(f"rm -rf {feedstock_dir}")
        return False, False

    # TODO - commit main migration here

    migrator.run_post_piggyback_migrations(recipe_dir, feedstock_ctx.attrs,
                                           **kwargs)

    # TODO commit post migration here

    # rerender, maybe
    diffed_files: typing.List[str] = []
    with indir(feedstock_dir), env.swap(RAISE_SUBPROC_ERROR=False):
        msg = migrator.commit_message(feedstock_ctx)  # noqa
        try:
            eval_cmd("git add --all .")
            eval_cmd(f"git commit -am '{msg}'")
        except CalledProcessError as e:
            LOGGER.info(
                "could not commit to feedstock - "
                "likely no changes - error is '%s'" % (repr(e)), )
        if rerender:
            head_ref = eval_cmd("git rev-parse HEAD").strip()
            LOGGER.info("Rerendering the feedstock")

            try:
                eval_cmd(
                    "conda smithy rerender -c auto --no-check-uptodate",
                    timeout=300,
                )
                make_rerender_comment = False
            except Exception as e:
                # I am trying this bit of code to force these errors
                # to be surfaced in the logs at the right time.
                print(f"RERENDER ERROR: {e}", flush=True)
                if not isinstance(migrator, Version):
                    raise
                else:
                    # for check solvable or automerge, we always raise rerender errors
                    if feedstock_ctx.attrs["conda-forge.yml"].get(
                            "bot", {}).get(
                                "check_solvable",
                                False,
                            ) or (feedstock_ctx.attrs["conda-forge.yml"].get(
                                "bot", {}).get(
                                    "automerge",
                                    False,
                                )):
                        raise
                    else:
                        make_rerender_comment = True

            # If we tried to run the MigrationYaml and rerender did nothing (we only
            # bumped the build number and dropped a yaml file in migrations) bail
            # for instance platform specific migrations
            gdiff = eval_cmd(f"git diff --name-only {head_ref.strip()}...HEAD")

            diffed_files = [
                _ for _ in gdiff.split()
                if not (_.startswith("recipe") or _.startswith("migrators")
                        or _.startswith("README"))
            ]
        else:
            make_rerender_comment = False

    if (feedstock_ctx.feedstock_name != "conda-forge-pinning"
            and base_branch == "master" and
        ((
            migrator.check_solvable
            # we always let stuff in cycles go
            and feedstock_ctx.attrs["name"] not in getattr(
                migrator, "cycles", set())
            # we always let stuff at the top go
            and feedstock_ctx.attrs["name"] not in getattr(
                migrator, "top_level", set())
            # for solveability always assume automerge is on.
            and
            (feedstock_ctx.attrs["conda-forge.yml"].get("bot", {}).get(
                "automerge", True)))
         or feedstock_ctx.attrs["conda-forge.yml"].get("bot", {}).get(
             "check_solvable",
             False,
         ))):
        solvable, errors, _ = is_recipe_solvable(
            feedstock_dir,
            build_platform=feedstock_ctx.attrs["conda-forge.yml"].get(
                "build_platform",
                None,
            ),
        )
        if not solvable:
            _solver_err_str = "not solvable ({}): {}: {}".format(
                ('<a href="' + os.getenv("CIRCLE_BUILD_URL", "") +
                 '">bot CI job</a>'),
                base_branch,
                sorted(set(errors)),
            )

            if isinstance(migrator, Version):
                _new_ver = feedstock_ctx.attrs["new_version"]
                if _new_ver in feedstock_ctx.attrs["new_version_errors"]:
                    feedstock_ctx.attrs["new_version_errors"][
                        _new_ver] += "\n\nsolver error - {}".format(
                            _solver_err_str, )
                else:
                    feedstock_ctx.attrs["new_version_errors"][
                        _new_ver] = _solver_err_str
                feedstock_ctx.attrs["new_version_errors"][
                    _new_ver] = sanitize_string(
                        feedstock_ctx.attrs["new_version_errors"][_new_ver], )
                # remove part of a try for solver errors to make those slightly
                # higher priority
                feedstock_ctx.attrs["new_version_attempts"][_new_ver] -= 0.8

            pre_key = "pre_pr_migrator_status"
            if pre_key not in feedstock_ctx.attrs:
                feedstock_ctx.attrs[pre_key] = {}
            feedstock_ctx.attrs[pre_key][migrator_name] = sanitize_string(
                _solver_err_str, )
            eval_cmd(f"rm -rf {feedstock_dir}")
            return False, False

    # TODO: Better annotation here
    pr_json: typing.Union[MutableMapping, None, bool]
    if (isinstance(migrator, MigrationYaml) and not diffed_files
            and feedstock_ctx.attrs["name"] != "conda-forge-pinning"):
        # spoof this so it looks like the package is done
        pr_json = {
            "state": "closed",
            "merged_at": "never issued",
            "id": str(uuid4()),
        }
    else:
        # push up
        try:
            # TODO: remove this hack, but for now this is the only way to get
            # the feedstock dir into pr_body
            feedstock_ctx.feedstock_dir = feedstock_dir
            pr_json = push_repo(
                session_ctx=migrator.ctx.session,
                fctx=feedstock_ctx,
                feedstock_dir=feedstock_dir,
                body=migrator.pr_body(feedstock_ctx),
                repo=repo,
                title=migrator.pr_title(feedstock_ctx),
                head=f"{migrator.ctx.github_username}:{branch_name}",
                branch=branch_name,
                base_branch=base_branch,
            )

        # This shouldn't happen too often any more since we won't double PR
        except github3.GitHubError as e:
            if e.msg != "Validation Failed":
                raise
            else:
                print(f"Error during push {e}")
                # If we just push to the existing PR then do nothing to the json
                pr_json = False
                ljpr = False

    if pr_json and pr_json["state"] != "closed" and make_rerender_comment:
        comment_on_pr(
            pr_json,
            """\
Hi! This feedstock was not able to be rerendered after the version update changes. I
have pushed the version update changes anyways and am trying to rerender again with this
comment. Hopefully you all can fix this!

@conda-forge-admin rerender""",
            repo,
        )

    if pr_json:
        ljpr = LazyJson(
            os.path.join(migrator.ctx.session.prjson_dir,
                         str(pr_json["id"]) + ".json"), )
        ljpr.update(**pr_json)

        # from .dynamo_models import PRJson
        # PRJson.dump(pr_json)
    else:
        ljpr = False

    # If we've gotten this far then the node is good
    feedstock_ctx.attrs["bad"] = False
    LOGGER.info("Removing feedstock dir")
    eval_cmd(f"rm -rf {feedstock_dir}")
    return migrate_return, ljpr
コード例 #13
0
def run(feedstock_ctx,
        migrator,
        protocol='ssh',
        pull_request=True,
        rerender=True,
        fork=False,
        organization='nsls-ii-forge',
        **kwargs):
    """
    For a given feedstock and migration run the migration and possibly submit
    pull request

    Parameters
    ----------
    feedstock_ctx: FeedstockContext
        The node attributes of the feedstock
    migrator: Migrator
        The migrator to run on the feedstock
    protocol: str, optional
        The git protocol to use, defaults to ``ssh``
    pull_request: bool, optional
        If true issue pull request, defaults to true
    rerender: bool
        Whether to rerender, defaults to true
    fork: bool
        If true create a fork, defaults to false
    organization: str, optional
        GitHub organization to get repo from
    gh: github3.GitHub, optional
        Object for communicating with GitHub, if None, build from $GITHUB_USERNAME
        and $GITHUB_PASSWORD, defaults to None
    kwargs: dict
        The key word arguments to pass to the migrator

    Returns
    -------
    migrate_return: MigrationUidTypedDict
        The migration return dict used for tracking finished migrations
    pr_json: dict
        The PR json object for recreating the PR as needed
    """
    # get the repo
    migrator.attrs = feedstock_ctx.attrs

    branch_name = migrator.remote_branch(
        feedstock_ctx) + "_h" + uuid4().hex[:6]

    # TODO: run this in parallel
    feedstock_dir, repo = get_repo(
        ctx=migrator.ctx.session,
        fctx=feedstock_ctx,
        branch=branch_name,
        organization=organization,
        feedstock=feedstock_ctx.feedstock_name,
        protocol=protocol,
        pull_request=pull_request,
        fork=fork,
    )

    recipe_dir = os.path.join(feedstock_dir, "recipe")

    # migrate the feedstock
    migrator.run_pre_piggyback_migrations(recipe_dir, feedstock_ctx.attrs,
                                          **kwargs)

    # TODO - make a commit here if the repo changed

    migrate_return = migrator.migrate(recipe_dir, feedstock_ctx.attrs,
                                      **kwargs)

    if not migrate_return:
        logger.critical(
            "Failed to migrate %s, %s",
            feedstock_ctx.package_name,
            feedstock_ctx.attrs.get("bad"),
        )
        eval_cmd(f"rm -rf {feedstock_dir}")
        return False, False

    # TODO - commit main migration here

    migrator.run_post_piggyback_migrations(recipe_dir, feedstock_ctx.attrs,
                                           **kwargs)

    # TODO commit post migration here

    # rerender, maybe
    diffed_files = []
    with indir(feedstock_dir), env.swap(RAISE_SUBPROC_ERROR=False):
        msg = migrator.commit_message(feedstock_ctx)  # noqa
        try:
            eval_cmd("git add --all .")
            eval_cmd(f"git commit -am '{msg}'")
        except CalledProcessError as e:
            logger.info(
                "could not commit to feedstock - "
                "likely no changes - error is '%s'" % (repr(e)), )
        if rerender:
            head_ref = eval_cmd("git rev-parse HEAD").strip()
            logger.info("Rerendering the feedstock")

            # In the event we can't rerender, try to update the pinnings,
            # then bail if it does not work again
            try:
                eval_cmd(
                    "conda smithy rerender -c auto --no-check-uptodate",
                    timeout=300,
                )
            except SubprocessError:
                return False, False

            # If we tried to run the MigrationYaml and rerender did nothing (we only
            # bumped the build number and dropped a yaml file in migrations) bail
            # for instance platform specific migrations
            gdiff = eval_cmd(f"git diff --name-only {head_ref.strip()}...HEAD")

            diffed_files = [
                _ for _ in gdiff.split()
                if not (_.startswith("recipe") or _.startswith("migrators")
                        or _.startswith("README"))
            ]

    if ((migrator.check_solvable
         and feedstock_ctx.attrs["conda-forge.yml"].get("bot",
                                                        {}).get("automerge"))
            or feedstock_ctx.attrs["conda-forge.yml"].get("bot", {}).get(
                "check_solvable",
                False)) and not is_recipe_solvable(feedstock_dir):
        eval_cmd(f"rm -rf {feedstock_dir}")
        return False, False

    if (isinstance(migrator, MigrationYaml) and not diffed_files
            and feedstock_ctx.attrs["name"] != "conda-forge-pinning"):
        # spoof this so it looks like the package is done
        pr_json = {
            "state": "closed",
            "merged_at": "never issued",
            "id": str(uuid4()),
        }
    else:
        # push up
        try:
            if fork:
                head = f"{migrator.ctx.github_username}:{branch_name}"
            else:
                head = f"{organization}:{branch_name}"
            pr_json = push_repo(session_ctx=migrator.ctx.session,
                                fctx=feedstock_ctx,
                                feedstock_dir=feedstock_dir,
                                body=migrator.pr_body(feedstock_ctx),
                                repo=repo,
                                title=migrator.pr_title(feedstock_ctx),
                                head=head,
                                branch=branch_name,
                                fork=fork,
                                organization=organization)

        # This shouldn't happen too often any more since we won't double PR
        except github3.GitHubError as e:
            if e.msg != "Validation Failed":
                raise
            else:
                print(f"Error during push {e}")
                print(f'Errors: {e.errors}')
                # If we just push to the existing PR then do nothing to the json
                pr_json = None
                ljpr = None
    if pr_json is not None:
        ljpr = LazyJson(
            os.path.join(migrator.ctx.session.prjson_dir,
                         str(pr_json["id"]) + ".json"), )
        ljpr.update(**pr_json)
    else:
        ljpr = None
    # If we've gotten this far then the node is good
    feedstock_ctx.attrs["bad"] = False
    logger.info("Removing feedstock dir")
    eval_cmd(f"rm -rf {feedstock_dir}")
    return migrate_return, ljpr
コード例 #14
0
def make_graph(names, organization, gx=None):
    '''
    Creates/Updates a dependency graph based on names of packages.
    The dependency graph is used to decide which packages
    need to be upgraded before others.

    Parameters
    ----------
    names: list
        List of package names for placement into the graph.
    organization: str
        Name of GitHub organization containing feedstock repos.
    gx: nx.DiGraph, optional
        Dependency graph to be updated.

    Returns
    -------
    gx: nx.DiGraph()
        New/Updated dependency graph displaying the relationships
        between packages listed in names.
    '''
    from conda_forge_tick.utils import LazyJson
    logger.info("reading graph")
    if gx is None:
        print('Creating graph from scratch...')
        gx = nx.DiGraph()
    else:
        print('Updating graph with new packages...')
    new_names = [name for name in names if name not in gx.nodes]
    old_names = [name for name in names if name in gx.nodes]
    assert gx is not None
    old_names = sorted(old_names, key=lambda n: gx.nodes[n].get("time", 0))
    total_names = new_names + old_names
    logger.info("start feedstock fetch loop")
    print('Fetching feedstock attributes...')

    builder = _build_graph_sequential if DEBUG else _build_graph_process_pool
    builder(gx, total_names, new_names, organization)
    logger.info("feedstock fetch loop completed")
    print('Finished fetching feedstock attributes')

    gx2 = deepcopy(gx)
    logger.info("inferring nodes and edges")
    print('Creating nodes and edges...')
    # make the outputs look up table so we can link properly
    outputs_lut = {
        k: node_name
        for node_name, node in gx.nodes.items()
        for k in node.get("payload", {}).get("outputs_names", [])
    }
    # add this as an attr so we can use later
    gx.graph["outputs_lut"] = outputs_lut
    strong_exports = {
        node_name
        for node_name, node in gx.nodes.items()
        if node.get("payload").get("strong_exports", False)
    }
    # This drops all the edge data and only keeps the node data
    gx = nx.create_empty_copy(gx)
    # TODO: label these edges with the kind of dep they are and their platform
    for node, node_attrs in gx2.nodes.items():
        with node_attrs["payload"] as attrs:
            # replace output package names with feedstock names via LUT
            deps = set(
                map(
                    lambda x: outputs_lut.get(x, x),
                    set().union(*attrs.get("requirements", {}).values()),
                ))

            # handle strong run exports
            overlap = deps & strong_exports
            requirements = attrs.get("requirements")
            if requirements:
                requirements["host"].update(overlap)
                requirements["run"].update(overlap)

        for dep in deps:
            if dep not in gx.nodes:
                # for packages which aren't feedstocks and aren't outputs
                # usually these are stubs
                lzj = LazyJson(f"node_attrs/{dep}.json")
                lzj.update(feedstock_name=dep, bad=False, archived=True)
                gx.add_node(dep, payload=lzj)
            gx.add_edge(dep, node)
    logger.info("new nodes and edges infered")
    print('Dependency graph complete')
    return gx