Пример #1
0
def migrate(d: dict):
    pred = d.get('PRed', None)
    if pred:
        l = []
        for element in d['PRed']:
            l.append(frozen_to_json_friendly(element))
        d['PRed'] = l

    pred_json = d.get('PRed_json', None)
    if pred_json:
        l = []
        for element, pr in d['PRed_json'].items():
            l.append(frozen_to_json_friendly(element, pr))
        d['PRed_json'] = l
Пример #2
0
    def predecessors_not_yet_built(self, attrs: "AttrsTypedDict") -> bool:
        # Check if all upstreams have been built
        for node, payload in _gen_active_feedstocks_payloads(
                self.graph.predecessors(attrs["feedstock_name"]),
                self.graph,
        ):
            muid = frozen_to_json_friendly(self.migrator_uid(payload))

            if muid not in _sanitized_muids(payload.get("PRed", []), ):
                LOGGER.debug("not yet built: %s" % node)
                return True

            # This is due to some PRed_json loss due to bad graph deploy outage
            for m_pred_json in payload.get("PRed", []):
                if m_pred_json["data"] == muid["data"]:
                    break
            else:
                m_pred_json = None

            # note that if the bot is missing the PR we assume it is open
            # so that errors halt the migration and can be fixed
            if (m_pred_json and m_pred_json.get("PR", {
                    "state": "open"
            }).get("state", "") == "open"):
                LOGGER.debug("not yet built dataloss: %s" % node)
                return True

        return False
Пример #3
0
 def predecessors_not_yet_built(self, attrs: "AttrsTypedDict") -> bool:
     # Check if all upstreams have been built
     for node in self.graph.predecessors(attrs["feedstock_name"]):
         try:
             payload = self.graph.nodes[node]["payload"]
         except KeyError as e:
             print(node)
             raise e
         muid = frozen_to_json_friendly(self.migrator_uid(payload))
         if muid not in _sanitized_muids(
             payload.get("PRed", []),
         ) and not payload.get("archived", False):
             return True
         # This is due to some PRed_json loss due to bad graph deploy outage
         for m_pred_json in payload.get("PRed", []):
             if m_pred_json["data"] == muid["data"]:
                 break
         else:
             m_pred_json = None
         # note that if the bot is missing the PR we assume it is open
         # so that errors halt the migration and can be fixed
         if (
             m_pred_json
             and m_pred_json.get("PR", {"state": "open"}).get("state", "") == "open"
         ):
             return True
     return False
Пример #4
0
        def parse_already_pred() -> bool:
            pr_data = frozen_to_json_friendly(self.migrator_uid(attrs))
            migrator_uid: "MigrationUidTypedDict" = typing.cast(
                "MigrationUidTypedDict",
                pr_data["data"],
            )
            already_migrated_uids: typing.Iterable[
                "MigrationUidTypedDict"] = list(z["data"]
                                                for z in attrs.get("PRed", []))
            already_pred = migrator_uid in already_migrated_uids
            if already_pred:
                ind = already_migrated_uids.index(migrator_uid)
                LOGGER.debug(f"{__name}: already PRed: uid: {migrator_uid}")
                if "PR" in attrs.get("PRed", [])[ind]:
                    if isinstance(attrs.get("PRed", [])[ind]["PR"], LazyJson):
                        with attrs.get("PRed", [])[ind]["PR"] as mg_attrs:

                            LOGGER.debug(
                                "%s: already PRed: PR file: %s" %
                                (__name, mg_attrs.file_name), )

                            html_url = mg_attrs.get("html_url", "no url")

                            LOGGER.debug(
                                f"{__name}: already PRed: url: {html_url}")

            return already_pred
Пример #5
0
 def parse_already_pred() -> bool:
     migrator_uid: "MigrationUidTypedDict" = typing.cast(
         "MigrationUidTypedDict",
         frozen_to_json_friendly(self.migrator_uid(attrs))["data"],
     )
     already_migrated_uids: typing.Iterable["MigrationUidTypedDict"] = (
         z["data"] for z in attrs.get("PRed", []))
     return migrator_uid in already_migrated_uids
Пример #6
0
    def all_predecessors_issued_and_stale(self,
                                          attrs: "AttrsTypedDict") -> bool:
        # Check if all upstreams have been issue and are stale
        for node, payload in _gen_active_feedstocks_payloads(
                self.graph.predecessors(attrs["feedstock_name"]),
                self.graph,
        ):
            if node in self.ignored_deps_per_node.get(
                    attrs.get("feedstock_name", None),
                [],
            ):
                continue

            muid = frozen_to_json_friendly(self.migrator_uid(payload))
            pr_muids = _sanitized_muids(payload.get("PRed", []))
            if muid not in pr_muids:
                LOGGER.debug(
                    "node %s PR %s not yet issued!",
                    node,
                    muid.get("data", {}).get("name", None),
                )
                # not yet issued
                return False
            else:
                # issued so check timestamp
                pr_index = pr_muids.index(muid)
                ts = (payload.get("PRed", [])[pr_index].get("PR", {}).get(
                    "created_at", None))
                state = (payload.get("PRed",
                                     [])[pr_index].get("PR", {
                                         "state": "open"
                                     }).get("state", ""))
                if state == "open":
                    if ts is not None:
                        now = datetime.datetime.now(datetime.timezone.utc)
                        ts = dateutil.parser.parse(ts)
                        if now - ts < datetime.timedelta(days=14):
                            LOGGER.debug(
                                "node %s has PR %s open for %s",
                                node,
                                muid.get("data", {}).get("name", None),
                                now - ts,
                            )
                            return False
                    else:
                        # no timestamp so keep things open
                        LOGGER.debug(
                            "node %s has PR %s:%s with no timestamp",
                            node,
                            muid.get("data", {}).get("name", None),
                            payload.get("PRed", [])[pr_index]["PR"].file_name,
                        )
                        return False

        return True
Пример #7
0
 def filter(self, attrs: "AttrsTypedDict", not_bad_str_start: str = "") -> bool:
     if super().filter(attrs):
         return True
     muid = frozen_to_json_friendly(self.migrator_uid(attrs))
     for arch in self.arches:
         configured_arch = (
             attrs.get("conda-forge.yml", {}).get("provider", {}).get(arch)
         )
         if configured_arch:
             return muid in _sanitized_muids(attrs.get("PRed", []))
     else:
         return False
Пример #8
0
def run_test_yaml_migration(m,
                            *,
                            inp,
                            output,
                            kwargs,
                            prb,
                            mr_out,
                            tmpdir,
                            should_filter=False):
    os.makedirs(os.path.join(tmpdir, "recipe"), exist_ok=True)
    with open(os.path.join(tmpdir, "recipe", "meta.yaml"), "w") as f:
        f.write(inp)

    with indir(tmpdir):
        subprocess.run(["git", "init"])
    # Load the meta.yaml (this is done in the graph)
    try:
        pmy = parse_meta_yaml(inp)
    except Exception:
        pmy = {}
    if pmy:
        pmy["version"] = pmy["package"]["version"]
        pmy["req"] = set()
        for k in ["build", "host", "run"]:
            pmy["req"] |= set(pmy.get("requirements", {}).get(k, set()))
        try:
            pmy["meta_yaml"] = parse_meta_yaml(inp)
        except Exception:
            pmy["meta_yaml"] = {}
    pmy["raw_meta_yaml"] = inp
    pmy.update(kwargs)

    assert m.filter(pmy) is should_filter
    if should_filter:
        return

    mr = m.migrate(os.path.join(tmpdir, "recipe"), pmy)
    assert mr_out == mr

    pmy.update(PRed=[frozen_to_json_friendly(mr)])
    with open(os.path.join(tmpdir, "recipe/meta.yaml")) as f:
        actual_output = f.read()
    assert actual_output == output
    assert os.path.exists(
        os.path.join(tmpdir, ".ci_support/migrations/hi.yaml"))
    with open(os.path.join(tmpdir, ".ci_support/migrations/hi.yaml")) as f:
        saved_migration = f.read()
    assert saved_migration == m.yaml_contents
Пример #9
0
        def parse_already_pred() -> bool:
            migrator_uid: "MigrationUidTypedDict" = typing.cast(
                "MigrationUidTypedDict",
                frozen_to_json_friendly(self.migrator_uid(attrs))["data"],
            )
            already_migrated_uids: typing.Iterable[
                "MigrationUidTypedDict"] = list(
                    copy.deepcopy(z["data"]) for z in attrs.get("PRed", []))

            # we shipped a bug, so fixing this here -
            # need to ignore name in uuid
            for uid in already_migrated_uids:
                if uid["migrator_name"] == "MatplotlibBase" and "name" in uid:
                    del uid["name"]
            del migrator_uid["name"]

            return migrator_uid in already_migrated_uids
Пример #10
0
def test_migration(m, inp, output, kwargs, prb, mr_out, should_filter, tmpdir):
    mr_out.update(bot_rerun=False)
    with open(os.path.join(tmpdir, "meta.yaml"), "w") as f:
        f.write(inp)
    # Load the meta.yaml (this is done in the graph)
    try:
        pmy = parse_meta_yaml(inp)
    except Exception:
        pmy = {}
    if pmy:
        pmy["version"] = pmy["package"]["version"]
        pmy["req"] = set()
        for k in ["build", "host", "run"]:
            pmy["req"] |= set(pmy.get("requirements", {}).get(k, set()))
        try:
            pmy["meta_yaml"] = parse_meta_yaml(inp)
        except Exception:
            pmy["meta_yaml"] = {}
    pmy["raw_meta_yaml"] = inp
    pmy.update(kwargs)

    assert m.filter(pmy) is should_filter
    if should_filter:
        return

    mr = m.migrate(tmpdir, pmy)
    assert mr_out == mr

    pmy.update(PRed=[frozen_to_json_friendly(mr)])
    with open(os.path.join(tmpdir, "meta.yaml"), "r") as f:
        actual_output = f.read()
    assert actual_output == output
    if isinstance(m, Compiler):
        assert m.messages in m.pr_body()
    # TODO: fix subgraph here (need this to be xsh file)
    elif isinstance(m, Version):
        pass
    elif isinstance(m, Rebuild):
        return
    else:
        assert prb in m.pr_body()
    assert m.filter(pmy) is True
Пример #11
0
        def _test_node_built(node):
            # check to see if node has been built
            payload = self.graph.nodes[node]["payload"]
            muid = frozen_to_json_friendly(self.migrator_uid(payload))
            if muid not in _sanitized_muids(payload.get(
                    "PRed", []), ) and not payload.get("archived", False):
                return False
            # This is due to some PRed_json loss due to bad graph deploy outage
            for m_pred_json in payload.get("PRed", []):
                if m_pred_json["data"] == muid["data"]:
                    break
            else:
                m_pred_json = None
            # note that if the bot is missing the PR we assume it is open
            # so that errors halt the migration and can be fixed
            if (m_pred_json and (m_pred_json.get("PR", {
                    "state": "open"
            }).get("state", "")) == "open"):
                return False

            return True
Пример #12
0
def main(args: "CLIArgs") -> None:
    # logging
    from .xonsh_utils import env

    debug = env.get("CONDA_FORGE_TICK_DEBUG", False)
    if debug:
        setup_logger(logging.getLogger("conda_forge_tick"), level="debug")
    else:
        setup_logger(logging.getLogger("conda_forge_tick"))

    github_username = env.get("USERNAME", "")
    github_password = env.get("PASSWORD", "")
    github_token = env.get("GITHUB_TOKEN")
    global MIGRATORS
    mctx, temp, MIGRATORS = initialize_migrators(
        github_username=github_username,
        github_password=github_password,
        dry_run=args.dry_run,
        github_token=github_token,
    )

    # compute the time per migrator
    (num_nodes, time_per_migrator, tot_time_per_migrator) = _compute_time_per_migrator(
        mctx,
    )
    for i, migrator in enumerate(MIGRATORS):
        if hasattr(migrator, "name"):
            extra_name = "-%s" % migrator.name
        else:
            extra_name = ""

        logger.info(
            "Total migrations for %s%s: %d - gets %f seconds (%f percent)",
            migrator.__class__.__name__,
            extra_name,
            num_nodes[i],
            time_per_migrator[i],
            time_per_migrator[i] / tot_time_per_migrator * 100,
        )

    for mg_ind, migrator in enumerate(MIGRATORS):

        mmctx = MigratorContext(session=mctx, migrator=migrator)
        migrator.bind_to_ctx(mmctx)

        good_prs = 0
        _mg_start = time.time()
        effective_graph = mmctx.effective_graph
        time_per = time_per_migrator[mg_ind]

        if hasattr(migrator, "name"):
            extra_name = "-%s" % migrator.name
        else:
            extra_name = ""

        logger.info(
            "Running migrations for %s%s: %d",
            migrator.__class__.__name__,
            extra_name,
            len(effective_graph.nodes),
        )

        possible_nodes = list(migrator.order(effective_graph, mctx.graph))

        # version debugging info
        if isinstance(migrator, Version):
            logger.info("possible version migrations:")
            for node_name in possible_nodes:
                with effective_graph.nodes[node_name]["payload"] as attrs:
                    logger.info(
                        "    node|curr|new|attempts: %s|%s|%s|%d",
                        node_name,
                        attrs.get("version"),
                        attrs.get("new_version"),
                        (
                            attrs.get("new_version_attempts", {}).get(
                                attrs.get("new_version", ""), 0,
                            )
                        ),
                    )

        for node_name in possible_nodes:
            with mctx.graph.nodes[node_name]["payload"] as attrs:
                # Don't let CI timeout, break ahead of the timeout so we make certain
                # to write to the repo
                # TODO: convert these env vars
                _now = time.time()
                if (
                    (
                        _now - int(env.get("START_TIME", time.time()))
                        > int(env.get("TIMEOUT", 600))
                    )
                    or good_prs >= migrator.pr_limit
                    or (_now - _mg_start) > time_per
                ):
                    break

                fctx = FeedstockContext(
                    package_name=node_name,
                    feedstock_name=attrs["feedstock_name"],
                    attrs=attrs,
                )

                print("\n", flush=True, end="")
                logger.info(
                    "%s%s IS MIGRATING %s",
                    migrator.__class__.__name__.upper(),
                    extra_name,
                    fctx.package_name,
                )
                try:
                    # Don't bother running if we are at zero
                    if (
                        args.dry_run
                        or mctx.gh.rate_limit()["resources"]["core"]["remaining"] == 0
                    ):
                        break
                    migrator_uid, pr_json = run(
                        feedstock_ctx=fctx,
                        migrator=migrator,
                        rerender=migrator.rerender,
                        protocol="https",
                        hash_type=attrs.get("hash_type", "sha256"),
                    )
                    # if migration successful
                    if migrator_uid:
                        d = frozen_to_json_friendly(migrator_uid)
                        # if we have the PR already do nothing
                        if d["data"] in [
                            existing_pr["data"] for existing_pr in attrs.get("PRed", [])
                        ]:
                            pass
                        else:
                            if not pr_json:
                                pr_json = {
                                    "state": "closed",
                                    "head": {"ref": "<this_is_not_a_branch>"},
                                }
                            d["PR"] = pr_json
                            attrs.setdefault("PRed", []).append(d)
                        attrs.update(
                            {
                                "smithy_version": mctx.smithy_version,
                                "pinning_version": mctx.pinning_version,
                            },
                        )

                except github3.GitHubError as e:
                    if e.msg == "Repository was archived so is read-only.":
                        attrs["archived"] = True
                    else:
                        logger.critical(
                            "GITHUB ERROR ON FEEDSTOCK: %s", fctx.feedstock_name,
                        )
                        if is_github_api_limit_reached(e, mctx.gh):
                            break
                except URLError as e:
                    logger.exception("URLError ERROR")
                    attrs["bad"] = {
                        "exception": str(e),
                        "traceback": str(traceback.format_exc()).split("\n"),
                        "code": getattr(e, "code"),
                        "url": getattr(e, "url"),
                    }
                except Exception as e:
                    logger.exception("NON GITHUB ERROR")
                    attrs["bad"] = {
                        "exception": str(e),
                        "traceback": str(traceback.format_exc()).split("\n"),
                    }
                else:
                    if migrator_uid:
                        # On successful PR add to our counter
                        good_prs += 1
                finally:
                    # Write graph partially through
                    if not args.dry_run:
                        dump_graph(mctx.graph)

                    eval_cmd(f"rm -rf {mctx.rever_dir}/*")
                    logger.info(os.getcwd())
                    for f in glob.glob("/tmp/*"):
                        if f not in temp:
                            eval_cmd(f"rm -rf {f}")

    if not args.dry_run:
        logger.info(
            "API Calls Remaining: %d",
            mctx.gh.rate_limit()["resources"]["core"]["remaining"],
        )
    logger.info("Done")
Пример #13
0
def _run_migrator(migrator, mctx, temp, time_per, dry_run):
    if hasattr(migrator, "name"):
        assert isinstance(migrator.name, str)
        migrator_name = migrator.name.lower().replace(" ", "")
    else:
        migrator_name = migrator.__class__.__name__.lower()

    mmctx = MigratorContext(session=mctx, migrator=migrator)
    migrator.bind_to_ctx(mmctx)

    good_prs = 0
    _mg_start = time.time()
    effective_graph = mmctx.effective_graph

    if hasattr(migrator, "name"):
        extra_name = "-%s" % migrator.name
    else:
        extra_name = ""

    print(
        "Running migrations for %s%s: %d\n"
        % (
            migrator.__class__.__name__,
            extra_name,
            len(effective_graph.nodes),
        ),
        flush=True,
    )

    possible_nodes = list(migrator.order(effective_graph, mctx.graph))

    # version debugging info
    if isinstance(migrator, Version):
        LOGGER.info("possible version migrations:")
        for node_name in possible_nodes:
            with effective_graph.nodes[node_name]["payload"] as attrs:
                LOGGER.info(
                    "    node|curr|new|attempts: %s|%s|%s|%f",
                    node_name,
                    attrs.get("version"),
                    attrs.get("new_version"),
                    (
                        attrs.get("new_version_attempts", {}).get(
                            attrs.get("new_version", ""),
                            0,
                        )
                    ),
                )

    for node_name in possible_nodes:
        with mctx.graph.nodes[node_name]["payload"] as attrs:
            # Don't let CI timeout, break ahead of the timeout so we make certain
            # to write to the repo
            # TODO: convert these env vars
            _now = time.time()
            if (
                (
                    _now - int(env.get("START_TIME", time.time()))
                    > int(env.get("TIMEOUT", 600))
                )
                or good_prs >= migrator.pr_limit
                or (_now - _mg_start) > time_per
            ):
                break

            base_branches = migrator.get_possible_feedstock_branches(attrs)
            if "branch" in attrs:
                has_attrs_branch = True
                orig_branch = attrs.get("branch")
            else:
                has_attrs_branch = False
                orig_branch = None

            fctx = FeedstockContext(
                package_name=node_name,
                feedstock_name=attrs["feedstock_name"],
                attrs=attrs,
            )

            # map main to current default branch
            base_branches = [
                br if br != "main" else fctx.default_branch for br in base_branches
            ]

            try:
                for base_branch in base_branches:
                    attrs["branch"] = base_branch
                    if migrator.filter(attrs):
                        continue

                    print("\n", flush=True, end="")
                    sys.stderr.flush()
                    sys.stdout.flush()
                    LOGGER.info(
                        "%s%s IS MIGRATING %s:%s",
                        migrator.__class__.__name__.upper(),
                        extra_name,
                        fctx.package_name,
                        base_branch,
                    )
                    try:
                        # Don't bother running if we are at zero
                        if mctx.gh_api_requests_left == 0:
                            break
                        migrator_uid, pr_json = run(
                            feedstock_ctx=fctx,
                            migrator=migrator,
                            rerender=migrator.rerender,
                            protocol="https",
                            hash_type=attrs.get("hash_type", "sha256"),
                            base_branch=base_branch,
                        )
                        # if migration successful
                        if migrator_uid:
                            d = frozen_to_json_friendly(migrator_uid)
                            # if we have the PR already do nothing
                            if d["data"] in [
                                existing_pr["data"]
                                for existing_pr in attrs.get("PRed", [])
                            ]:
                                pass
                            else:
                                if not pr_json:
                                    pr_json = {
                                        "state": "closed",
                                        "head": {"ref": "<this_is_not_a_branch>"},
                                    }
                                d["PR"] = pr_json
                                attrs.setdefault("PRed", []).append(d)
                            attrs.update(
                                {
                                    "smithy_version": mctx.smithy_version,
                                    "pinning_version": mctx.pinning_version,
                                },
                            )

                    except github3.GitHubError as e:
                        if e.msg == "Repository was archived so is read-only.":
                            attrs["archived"] = True
                        else:
                            LOGGER.critical(
                                "GITHUB ERROR ON FEEDSTOCK: %s",
                                fctx.feedstock_name,
                            )
                            if is_github_api_limit_reached(e, mctx.gh):
                                break
                    except URLError as e:
                        LOGGER.exception("URLError ERROR")
                        attrs["bad"] = {
                            "exception": str(e),
                            "traceback": str(traceback.format_exc()).split("\n"),
                            "code": getattr(e, "code"),
                            "url": getattr(e, "url"),
                        }

                        _set_pre_pr_migrator_fields(
                            attrs,
                            migrator_name,
                            sanitize_string(
                                "bot error (%s): %s: %s"
                                % (
                                    '<a href="'
                                    + os.getenv("CIRCLE_BUILD_URL", "")
                                    + '">bot CI job</a>',
                                    base_branch,
                                    str(traceback.format_exc()),
                                ),
                            ),
                        )
                    except Exception as e:
                        LOGGER.exception("NON GITHUB ERROR")
                        # we don't set bad for rerendering errors
                        if (
                            "conda smithy rerender -c auto --no-check-uptodate"
                            not in str(e)
                        ):
                            attrs["bad"] = {
                                "exception": str(e),
                                "traceback": str(traceback.format_exc()).split(
                                    "\n",
                                ),
                            }

                        _set_pre_pr_migrator_fields(
                            attrs,
                            migrator_name,
                            sanitize_string(
                                "bot error (%s): %s: %s"
                                % (
                                    '<a href="'
                                    + os.getenv("CIRCLE_BUILD_URL", "")
                                    + '">bot CI job</a>',
                                    base_branch,
                                    str(traceback.format_exc()),
                                ),
                            ),
                        )
                    else:
                        if migrator_uid:
                            # On successful PR add to our counter
                            good_prs += 1
            finally:
                # reset branch
                if has_attrs_branch:
                    attrs["branch"] = orig_branch

                # do this but it is crazy
                gc.collect()

                # Write graph partially through
                if not dry_run:
                    dump_graph(mctx.graph)

                eval_cmd(f"rm -rf {mctx.rever_dir}/*")
                LOGGER.info(os.getcwd())
                for f in glob.glob("/tmp/*"):
                    if f not in temp:
                        try:
                            eval_cmd(f"rm -rf {f}")
                        except Exception:
                            pass

            if mctx.gh_api_requests_left == 0:
                break

    return good_prs
def run_test_migration(
    m,
    inp,
    output,
    kwargs,
    prb,
    mr_out,
    should_filter=False,
    tmpdir=None,
):
    mm_ctx = MigratorSessionContext(
        graph=G,
        smithy_version="",
        pinning_version="",
        github_username="",
        github_password="",
        circle_build_url=env["CIRCLE_BUILD_URL"],
    )
    m_ctx = MigratorContext(mm_ctx, m)
    m.bind_to_ctx(m_ctx)

    if mr_out:
        mr_out.update(bot_rerun=False)
    with open(os.path.join(tmpdir, "meta.yaml"), "w") as f:
        f.write(inp)

    # read the conda-forge.yml
    if os.path.exists(os.path.join(tmpdir, "..", "conda-forge.yml")):
        with open(os.path.join(tmpdir, "..", "conda-forge.yml")) as fp:
            cf_yml = fp.read()
    else:
        cf_yml = "{}"

    # Load the meta.yaml (this is done in the graph)
    try:
        name = parse_meta_yaml(inp)["package"]["name"]
    except Exception:
        name = "blah"

    pmy = populate_feedstock_attributes(name, {}, inp, cf_yml)

    # these are here for legacy migrators
    pmy["version"] = pmy["meta_yaml"]["package"]["version"]
    pmy["req"] = set()
    for k in ["build", "host", "run"]:
        req = pmy["meta_yaml"].get("requirements", {}) or {}
        _set = req.get(k) or set()
        pmy["req"] |= set(_set)
    pmy["raw_meta_yaml"] = inp
    pmy.update(kwargs)

    assert m.filter(pmy) is should_filter
    if should_filter:
        return

    m.run_pre_piggyback_migrations(
        tmpdir,
        pmy,
        hash_type=pmy.get("hash_type", "sha256"),
    )
    mr = m.migrate(tmpdir, pmy, hash_type=pmy.get("hash_type", "sha256"))
    m.run_post_piggyback_migrations(
        tmpdir,
        pmy,
        hash_type=pmy.get("hash_type", "sha256"),
    )

    assert mr_out == mr
    if not mr:
        return

    pmy.update(PRed=[frozen_to_json_friendly(mr)])
    with open(os.path.join(tmpdir, "meta.yaml")) as f:
        actual_output = f.read()
    # strip jinja comments
    pat = re.compile(r"{#.*#}")
    actual_output = pat.sub("", actual_output)
    output = pat.sub("", output)
    assert actual_output == output
Пример #15
0
def auto_tick(dry_run=False,
              debug=False,
              fork=False,
              organization='nsls-ii-forge'):
    '''
    Automatically update package versions and submit pull requests to
    associated feedstocks

    Parameters
    ----------
    dry_run: bool, optional
        Generate version migration yamls but do not run them
    debug: bool, optional
        Setup logging to be in debug mode
    fork: bool, optional
        Create a fork of the repo from the organization to $GITHUB_USERNAME
    organization: str, optional
        GitHub organization that manages feedstock repositories
    '''
    from conda_forge_tick.xonsh_utils import env

    if debug:
        setup_logger(logger, level="debug")
    else:
        setup_logger(logger)

    # set Version.pr_body to custom pr_body function
    Version.pr_body = bot_pr_body

    # TODO: use ~/.netrc instead
    github_username = env.get("GITHUB_USERNAME", "")
    github_password = env.get("GITHUB_TOKEN", "")
    github_token = env.get("GITHUB_TOKEN")
    global MIGRATORS

    print('Initializing migrators...')
    mctx, MIGRATORS = initialize_migrators(
        github_username=github_username,
        github_password=github_password,
        dry_run=dry_run,
        github_token=github_token,
    )

    # compute the time per migrator
    print('Computing time per migrator')
    (num_nodes, time_per_migrator,
     tot_time_per_migrator) = _compute_time_per_migrator(mctx, )
    for i, migrator in enumerate(MIGRATORS):
        if hasattr(migrator, "name"):
            extra_name = "-%s" % migrator.name
        else:
            extra_name = ""

        logger.info(
            "Total migrations for %s%s: %d - gets %f seconds (%f percent)",
            migrator.__class__.__name__,
            extra_name,
            num_nodes[i],
            time_per_migrator[i],
            time_per_migrator[i] / tot_time_per_migrator * 100,
        )

    print('Performing migrations...')
    for mg_ind, migrator in enumerate(MIGRATORS):

        mmctx = MigratorContext(session=mctx, migrator=migrator)
        migrator.bind_to_ctx(mmctx)

        good_prs = 0
        _mg_start = time.time()
        effective_graph = mmctx.effective_graph
        time_per = time_per_migrator[mg_ind]

        if hasattr(migrator, "name"):
            extra_name = "-%s" % migrator.name
        else:
            extra_name = ""

        logger.info(
            "Running migrations for %s%s: %d",
            migrator.__class__.__name__,
            extra_name,
            len(effective_graph.nodes),
        )

        possible_nodes = list(migrator.order(effective_graph, mctx.graph))

        # version debugging info
        if isinstance(migrator, Version):
            logger.info("possible version migrations:")
            for node_name in possible_nodes:
                with effective_graph.nodes[node_name]["payload"] as attrs:
                    logger.info(
                        "    node|curr|new|attempts: %s|%s|%s|%d",
                        node_name,
                        attrs.get("version"),
                        attrs.get("new_version"),
                        (attrs.get("new_version_attempts", {}).get(
                            attrs.get("new_version", ""),
                            0,
                        )),
                    )

        for node_name in possible_nodes:
            with mctx.graph.nodes[node_name]["payload"] as attrs:
                # Don't let CI timeout, break ahead of the timeout so we make certain
                # to write to the repo
                # TODO: convert these env vars
                _now = time.time()
                if ((_now - int(env.get("START_TIME", time.time())) > int(
                        env.get("TIMEOUT", 600)))
                        or good_prs >= migrator.pr_limit
                        or (_now - _mg_start) > time_per):
                    break

                fctx = FeedstockContext(
                    package_name=node_name,
                    feedstock_name=attrs["feedstock_name"],
                    attrs=attrs,
                )

                print("\n", flush=True, end="")
                logger.info(
                    "%s%s IS MIGRATING %s",
                    migrator.__class__.__name__.upper(),
                    extra_name,
                    fctx.package_name,
                )
                try:
                    # Don't bother running if we are at zero
                    if (dry_run or mctx.gh.rate_limit()["resources"]["core"]
                        ["remaining"] == 0):
                        break
                    migrator_uid, pr_json = run(feedstock_ctx=fctx,
                                                migrator=migrator,
                                                rerender=migrator.rerender,
                                                protocol="https",
                                                hash_type=attrs.get(
                                                    "hash_type", "sha256"),
                                                fork=fork,
                                                organization=organization)
                    # if migration successful
                    if migrator_uid:
                        d = frozen_to_json_friendly(migrator_uid)
                        # if we have the PR already do nothing
                        if d["data"] in [
                                existing_pr["data"]
                                for existing_pr in attrs.get("PRed", [])
                        ]:
                            pass
                        else:
                            if pr_json is None:
                                pr_json = {
                                    "state": "closed",
                                    "head": {
                                        "ref": "<this_is_not_a_branch>"
                                    },
                                }
                            d["PR"] = pr_json
                            attrs.setdefault("PRed", []).append(d)
                        attrs.update(
                            {
                                "smithy_version": mctx.smithy_version,
                                "pinning_version": mctx.pinning_version,
                            }, )

                except github3.GitHubError as e:
                    if e.msg == "Repository was archived so is read-only.":
                        attrs["archived"] = True
                    else:
                        logger.critical(
                            "GITHUB ERROR ON FEEDSTOCK: %s",
                            fctx.feedstock_name,
                        )
                        if is_github_api_limit_reached(e, mctx.gh):
                            break
                except URLError as e:
                    logger.exception("URLError ERROR")
                    attrs["bad"] = {
                        "exception": str(e),
                        "traceback": str(traceback.format_exc()).split("\n"),
                        "code": getattr(e, "code"),
                        "url": getattr(e, "url"),
                    }
                except Exception as e:
                    logger.exception("NON GITHUB ERROR")
                    attrs["bad"] = {
                        "exception": str(e),
                        "traceback": str(traceback.format_exc()).split("\n"),
                    }
                else:
                    if migrator_uid:
                        # On successful PR add to our counter
                        good_prs += 1
                finally:
                    # Write graph partially through
                    if not dry_run:
                        dump_graph(mctx.graph)

                    eval_cmd(f"rm -rf {mctx.rever_dir}/*")
                    logger.info(os.getcwd())

    if not dry_run:
        logger.info(
            "API Calls Remaining: %d",
            mctx.gh.rate_limit()["resources"]["core"]["remaining"],
        )
    logger.info("Done")
Пример #16
0
def main(args: "CLIArgs") -> None:

    # logging
    if args.debug:
        setup_logger(logging.getLogger("conda_forge_tick"), level="debug")
    else:
        setup_logger(logging.getLogger("conda_forge_tick"))

    github_username = env.get("USERNAME", "")
    github_password = env.get("PASSWORD", "")
    github_token = env.get("GITHUB_TOKEN")

    mctx, temp, migrators = initialize_migrators(
        github_username=github_username,
        github_password=github_password,
        dry_run=args.dry_run,
        github_token=github_token,
    )

    # compute the time per migrator
    print("computing time per migration", flush=True)
    (num_nodes, time_per_migrator,
     tot_time_per_migrator) = _compute_time_per_migrator(
         mctx,
         migrators,
     )
    for i, migrator in enumerate(migrators):
        if hasattr(migrator, "name"):
            extra_name = "-%s" % migrator.name
        else:
            extra_name = ""

        print(
            "    %s%s: %d - gets %f seconds (%f percent)" % (
                migrator.__class__.__name__,
                extra_name,
                num_nodes[i],
                time_per_migrator[i],
                time_per_migrator[i] / max(tot_time_per_migrator, 1) * 100,
            ),
            flush=True,
        )

    for mg_ind, migrator in enumerate(migrators):
        if hasattr(migrator, "name"):
            assert isinstance(migrator.name, str)
            migrator_name = migrator.name.lower().replace(" ", "")
        else:
            migrator_name = migrator.__class__.__name__.lower()

        mmctx = MigratorContext(session=mctx, migrator=migrator)
        migrator.bind_to_ctx(mmctx)

        good_prs = 0
        _mg_start = time.time()
        effective_graph = mmctx.effective_graph
        time_per = time_per_migrator[mg_ind]

        if hasattr(migrator, "name"):
            extra_name = "-%s" % migrator.name
        else:
            extra_name = ""

        print(
            "\n========================================"
            "========================================"
            "\n"
            "========================================"
            "========================================",
            flush=True,
        )
        print(
            "Running migrations for %s%s: %d\n" % (
                migrator.__class__.__name__,
                extra_name,
                len(effective_graph.nodes),
            ),
            flush=True,
        )

        possible_nodes = list(migrator.order(effective_graph, mctx.graph))

        # version debugging info
        if isinstance(migrator, Version):
            LOGGER.info("possible version migrations:")
            for node_name in possible_nodes:
                with effective_graph.nodes[node_name]["payload"] as attrs:
                    LOGGER.info(
                        "    node|curr|new|attempts: %s|%s|%s|%d",
                        node_name,
                        attrs.get("version"),
                        attrs.get("new_version"),
                        (attrs.get("new_version_attempts", {}).get(
                            attrs.get("new_version", ""),
                            0,
                        )),
                    )

        for node_name in possible_nodes:
            with mctx.graph.nodes[node_name]["payload"] as attrs:
                base_branches = migrator.get_possible_feedstock_branches(attrs)
                orig_branch = attrs.get("branch", "master")

                # Don't let CI timeout, break ahead of the timeout so we make certain
                # to write to the repo
                # TODO: convert these env vars
                _now = time.time()
                if ((_now - int(env.get("START_TIME", time.time())) > int(
                        env.get("TIMEOUT", 600)))
                        or good_prs >= migrator.pr_limit
                        or (_now - _mg_start) > time_per):
                    break

                fctx = FeedstockContext(
                    package_name=node_name,
                    feedstock_name=attrs["feedstock_name"],
                    attrs=attrs,
                )

                try:
                    for base_branch in base_branches:
                        attrs["branch"] = base_branch
                        if migrator.filter(attrs):
                            continue

                        print("\n", flush=True, end="")
                        LOGGER.info(
                            "%s%s IS MIGRATING %s:%s",
                            migrator.__class__.__name__.upper(),
                            extra_name,
                            fctx.package_name,
                            base_branch,
                        )
                        try:
                            # Don't bother running if we are at zero
                            if mctx.gh_api_requests_left == 0:
                                break
                            migrator_uid, pr_json = run(
                                feedstock_ctx=fctx,
                                migrator=migrator,
                                rerender=migrator.rerender,
                                protocol="https",
                                hash_type=attrs.get("hash_type", "sha256"),
                                base_branch=base_branch,
                            )
                            # if migration successful
                            if migrator_uid:
                                d = frozen_to_json_friendly(migrator_uid)
                                # if we have the PR already do nothing
                                if d["data"] in [
                                        existing_pr["data"]
                                        for existing_pr in attrs.get(
                                            "PRed", [])
                                ]:
                                    pass
                                else:
                                    if not pr_json:
                                        pr_json = {
                                            "state": "closed",
                                            "head": {
                                                "ref": "<this_is_not_a_branch>"
                                            },
                                        }
                                    d["PR"] = pr_json
                                    attrs.setdefault("PRed", []).append(d)
                                attrs.update(
                                    {
                                        "smithy_version": mctx.smithy_version,
                                        "pinning_version":
                                        mctx.pinning_version,
                                    }, )

                        except github3.GitHubError as e:
                            if e.msg == "Repository was archived so is read-only.":
                                attrs["archived"] = True
                            else:
                                LOGGER.critical(
                                    "GITHUB ERROR ON FEEDSTOCK: %s",
                                    fctx.feedstock_name,
                                )
                                if is_github_api_limit_reached(e, mctx.gh):
                                    break
                        except URLError as e:
                            LOGGER.exception("URLError ERROR")
                            attrs["bad"] = {
                                "exception":
                                str(e),
                                "traceback":
                                str(traceback.format_exc()).split("\n"),
                                "code":
                                getattr(e, "code"),
                                "url":
                                getattr(e, "url"),
                            }

                            pre_key = "pre_pr_migrator_status"
                            if pre_key not in attrs:
                                attrs[pre_key] = {}
                            attrs[pre_key][migrator_name] = sanitize_string(
                                "bot error (%s): %s: %s" % (
                                    '<a href="' +
                                    os.getenv("CIRCLE_BUILD_URL", "") +
                                    '">bot CI job</a>',
                                    base_branch,
                                    str(traceback.format_exc()),
                                ), )
                        except Exception as e:
                            LOGGER.exception("NON GITHUB ERROR")
                            # we don't set bad for rerendering errors
                            if ("conda smithy rerender -c auto --no-check-uptodate"
                                    not in str(e)):
                                attrs["bad"] = {
                                    "exception":
                                    str(e),
                                    "traceback":
                                    str(traceback.format_exc()).split("\n", ),
                                }

                            pre_key = "pre_pr_migrator_status"
                            if pre_key not in attrs:
                                attrs[pre_key] = {}
                            attrs[pre_key][migrator_name] = sanitize_string(
                                "bot error (%s): %s: %s" % (
                                    '<a href="' +
                                    os.getenv("CIRCLE_BUILD_URL", "") +
                                    '">bot CI job</a>',
                                    base_branch,
                                    str(traceback.format_exc()),
                                ), )
                        else:
                            if migrator_uid:
                                # On successful PR add to our counter
                                good_prs += 1
                finally:
                    # reset branch
                    attrs["branch"] = orig_branch

                    # Write graph partially through
                    if not args.dry_run:
                        dump_graph(mctx.graph)

                    eval_cmd(f"rm -rf {mctx.rever_dir}/*")
                    LOGGER.info(os.getcwd())
                    for f in glob.glob("/tmp/*"):
                        if f not in temp:
                            try:
                                eval_cmd(f"rm -rf {f}")
                            except Exception:
                                pass

                if mctx.gh_api_requests_left == 0:
                    break

        print("\n", flush=True)

    LOGGER.info("API Calls Remaining: %d", mctx.gh_api_requests_left)
    LOGGER.info("Done")
Пример #17
0
def main(args: "CLIArgs") -> None:
    github_username = env.get("USERNAME", "")
    github_password = env.get("PASSWORD", "")
    github_token = env.get("GITHUB_TOKEN")
    global MIGRATORS
    mctx, temp, MIGRATORS = initialize_migrators(
        github_username=github_username,
        github_password=github_password,
        dry_run=args.dry_run,
        github_token=github_token,
    )

    for migrator in MIGRATORS:

        mmctx = MigratorContext(session=mctx, migrator=migrator)
        migrator.bind_to_ctx(mmctx)

        good_prs = 0
        effective_graph = mmctx.effective_graph

        logger.info(
            "Total migrations for %s: %d",
            migrator.__class__.__name__,
            len(effective_graph.nodes),
        )

        for node_name in migrator.order(effective_graph, mctx.graph):
            with mctx.graph.nodes[node_name]["payload"] as attrs:
                # Don't let CI timeout, break ahead of the timeout so we make certain
                # to write to the repo
                # TODO: convert these env vars
                if (
                    time.time() - int(env.get("START_TIME", time.time()))
                    > int(env.get("TIMEOUT", 600))
                    or good_prs >= migrator.pr_limit
                ):
                    break

                fctx = FeedstockContext(
                    package_name=node_name,
                    feedstock_name=attrs["feedstock_name"],
                    attrs=attrs,
                )

                logger.info(
                    "%s IS MIGRATING %s",
                    migrator.__class__.__name__.upper(),
                    fctx.package_name,
                )
                try:
                    # Don't bother running if we are at zero
                    if (
                        args.dry_run
                        or mctx.gh.rate_limit()["resources"]["core"]["remaining"] == 0
                    ):
                        break
                    # FIXME: this causes the bot to not-rerender things when it
                    #  should. For instance, if the bot rerenders but the PR is
                    #  left open then we don't rerender again even though we should.
                    #  This need logic to check if the rerender has been merged.
                    rerender = (
                        attrs.get("smithy_version") != mctx.smithy_version
                        or attrs.get("pinning_version") != mctx.pinning_version
                        or migrator.rerender
                    )
                    migrator_uid, pr_json = run(
                        feedstock_ctx=fctx,
                        migrator=migrator,
                        rerender=rerender,
                        protocol="https",
                        hash_type=attrs.get("hash_type", "sha256"),
                    )
                    # if migration successful
                    if migrator_uid:
                        d = frozen_to_json_friendly(migrator_uid)
                        # if we have the PR already do nothing
                        if d["data"] in [
                            existing_pr["data"] for existing_pr in attrs.get("PRed", [])
                        ]:
                            pass
                        else:
                            if not pr_json:
                                pr_json = {
                                    "state": "closed",
                                    "head": {"ref": "<this_is_not_a_branch>"},
                                }
                            d["PR"] = pr_json
                            attrs.setdefault("PRed", []).append(d)
                        attrs.update(
                            {
                                "smithy_version": mctx.smithy_version,
                                "pinning_version": mctx.pinning_version,
                            },
                        )

                except github3.GitHubError as e:
                    if e.msg == "Repository was archived so is read-only.":
                        attrs["archived"] = True
                    else:
                        logger.critical(
                            "GITHUB ERROR ON FEEDSTOCK: %s", fctx.feedstock_name,
                        )
                        if is_github_api_limit_reached(e, mctx.gh):
                            break
                except URLError as e:
                    logger.exception("URLError ERROR")
                    attrs["bad"] = {
                        "exception": str(e),
                        "traceback": str(traceback.format_exc()).split("\n"),
                        "code": getattr(e, "code"),
                        "url": getattr(e, "url"),
                    }
                except Exception as e:
                    logger.exception("NON GITHUB ERROR")
                    attrs["bad"] = {
                        "exception": str(e),
                        "traceback": str(traceback.format_exc()).split("\n"),
                    }
                else:
                    if migrator_uid:
                        # On successful PR add to our counter
                        good_prs += 1
                finally:
                    # Write graph partially through
                    dump_graph(mctx.graph)

                    eval_xonsh(f"rm -rf {mctx.rever_dir}/*")
                    logger.info(os.getcwd())
                    for f in glob.glob("/tmp/*"):
                        if f not in temp:
                            eval_xonsh(f"rm -rf {f}")

    if not args.dry_run:
        logger.info(
            "API Calls Remaining: %d",
            mctx.gh.rate_limit()["resources"]["core"]["remaining"],
        )
    logger.info("Done")
Пример #18
0
def graph_migrator_status(
    migrator: Migrator,
    gx: nx.DiGraph,
) -> Tuple[dict, list, nx.DiGraph]:
    """Gets the migrator progress for a given migrator"""

    if hasattr(migrator, "name"):
        assert isinstance(migrator.name, str)
        migrator_name = migrator.name.lower().replace(" ", "")
    else:
        migrator_name = migrator.__class__.__name__.lower()

    num_viz = 0

    out: Dict[str, Set[str]] = {
        "done": set(),
        "in-pr": set(),
        "awaiting-pr": set(),
        "not-solvable": set(),
        "awaiting-parents": set(),
        "bot-error": set(),
    }

    gx2 = copy.deepcopy(getattr(migrator, "graph", gx))

    top_level = {node for node in gx2 if not list(gx2.predecessors(node))}
    build_sequence = list(cyclic_topological_sort(gx2, top_level))

    feedstock_metadata = dict()

    import graphviz
    from streamz.graph import _clean_text

    gv = graphviz.Digraph(graph_attr={"packmode": "array_3"})

    # pinning isn't actually in the migration
    if "conda-forge-pinning" in gx2.nodes():
        gx2.remove_node("conda-forge-pinning")

    for node, node_attrs in gx2.nodes.items():
        attrs = node_attrs["payload"]
        # remove archived from status
        if attrs.get("archived", False):
            continue
        node_metadata: Dict = {}
        feedstock_metadata[node] = node_metadata
        nuid = migrator.migrator_uid(attrs)
        all_pr_jsons = []
        for pr_json in attrs.get("PRed", []):
            all_pr_jsons.append(copy.deepcopy(pr_json))

        feedstock_ctx = FeedstockContext(
            package_name=node,
            feedstock_name=attrs.get("feedstock_name", node),
            attrs=attrs,
        )

        # hack around bug in migrator vs graph data for this one
        if isinstance(migrator, MatplotlibBase):
            if "name" in nuid:
                del nuid["name"]
            for i in range(len(all_pr_jsons)):
                if (all_pr_jsons[i] and "name" in all_pr_jsons[i]["data"]
                        and all_pr_jsons[i]["data"]["migrator_name"]
                        == "MatplotlibBase"):
                    del all_pr_jsons[i]["data"]["name"]

        for pr_json in all_pr_jsons:
            if pr_json and pr_json["data"] == frozen_to_json_friendly(
                    nuid)["data"]:
                break
        else:
            pr_json = None

        # No PR was ever issued but the migration was performed.
        # This is only the case when the migration was done manually
        # before the bot could issue any PR.
        manually_done = pr_json is None and frozen_to_json_friendly(
            nuid)["data"] in (z["data"] for z in all_pr_jsons)

        buildable = not migrator.filter(attrs)
        fntc = "black"
        status_icon = ""
        if manually_done:
            out["done"].add(node)
            fc = "#440154"
            fntc = "white"
        elif pr_json is None:
            if buildable:
                if "not solvable" in (attrs.get("pre_pr_migrator_status",
                                                {}).get(migrator_name, "")):
                    out["not-solvable"].add(node)
                    fc = "#ff8c00"
                elif "bot error" in (attrs.get("pre_pr_migrator_status",
                                               {}).get(migrator_name, "")):
                    out["bot-error"].add(node)
                    fc = "#000000"
                    fntc = "white"
                else:
                    out["awaiting-pr"].add(node)
                    fc = "#35b779"
            elif not isinstance(migrator, Replacement):
                if "bot error" in (attrs.get("pre_pr_migrator_status",
                                             {}).get(migrator_name, "")):
                    out["bot-error"].add(node)
                    fc = "#000000"
                    fntc = "white"
                else:
                    out["awaiting-parents"].add(node)
                    fc = "#fde725"
        elif "PR" not in pr_json:
            out["bot-error"].add(node)
            fc = "#000000"
            fntc = "white"
        elif pr_json["PR"]["state"] == "closed":
            out["done"].add(node)
            fc = "#440154"
            fntc = "white"
        else:
            out["in-pr"].add(node)
            fc = "#31688e"
            fntc = "white"
            pr_status = pr_json["PR"]["mergeable_state"]
            if pr_status == "clean":
                status_icon = " ✓"
            else:
                status_icon = " ❎"
        if node not in out["done"]:
            num_viz += 1
            gv.node(
                node,
                label=_clean_text(node) + status_icon,
                fillcolor=fc,
                style="filled",
                fontcolor=fntc,
                URL=(pr_json or {}).get("PR", {}).get(
                    "html_url",
                    feedstock_url(fctx=feedstock_ctx,
                                  protocol="https").strip(".git"),
                ),
            )

        # additional metadata for reporting
        node_metadata["num_descendants"] = len(nx.descendants(gx2, node))
        node_metadata["immediate_children"] = [
            k for k in sorted(gx2.successors(node))
            if not gx2[k].get("payload", {}).get("archived", False)
        ]
        if node in out["not-solvable"] or node in out["bot-error"]:
            node_metadata["pre_pr_migrator_status"] = attrs.get(
                "pre_pr_migrator_status",
                {},
            ).get(migrator_name, "")
        else:
            node_metadata["pre_pr_migrator_status"] = ""

        if pr_json and "PR" in pr_json:
            # I needed to fake some PRs they don't have html_urls though
            node_metadata["pr_url"] = pr_json["PR"].get(
                "html_url",
                feedstock_url(fctx=feedstock_ctx,
                              protocol="https").strip(".git"),
            )
            node_metadata["pr_status"] = pr_json["PR"].get("mergeable_state")

    out2: Dict = {}
    for k in out.keys():
        out2[k] = list(
            sorted(
                out[k],
                key=lambda x: build_sequence.index(x)
                if x in build_sequence else -1,
            ), )

    out2["_feedstock_status"] = feedstock_metadata
    for (e0, e1), edge_attrs in gx2.edges.items():
        if (e0 not in out["done"] and e1 not in out["done"]
                and not gx2.nodes[e0]["payload"].get("archived", False)
                and not gx2.nodes[e1]["payload"].get("archived", False)):
            gv.edge(e0, e1)

    print("    len(gv):", num_viz, flush=True)
    out2["_num_viz"] = num_viz

    return out2, build_sequence, gv
Пример #19
0
from conda_forge_tick.utils import load_graph, dump_graph, frozen_to_json_friendly

gx = load_graph()
for k, node_attrs in gx.nodes.items():
    prs = node_attrs.get('PRed', [])
    for i, pr in enumerate(prs):
        pr['data']['bot_rerun'] = False
        pr.update(frozen_to_json_friendly(pr['data']))
dump_graph(gx)
Пример #20
0
def migrator_status(
    migrator: Migrator, gx: nx.DiGraph,
) -> Tuple[dict, list, nx.DiGraph]:
    """Gets the migrator progress for a given migrator

    Returns
    -------
    out : dict
        Dictionary of statuses with the feedstocks in them
    order :
        Build order for this migrator
    """
    out: Dict[str, Set[str]] = {
        "done": set(),
        "in-pr": set(),
        "awaiting-pr": set(),
        "awaiting-parents": set(),
        "bot-error": set(),
    }

    gx2 = copy.deepcopy(getattr(migrator, "graph", gx))

    top_level = {node for node in gx2 if not list(gx2.predecessors(node))}
    build_sequence = list(cyclic_topological_sort(gx2, top_level))

    feedstock_metadata = dict()

    import graphviz
    from streamz.graph import _clean_text

    gv = graphviz.Digraph(graph_attr={"packmode": "array_3"})
    for node, node_attrs in gx2.nodes.items():
        attrs = node_attrs["payload"]
        # remove archived from status
        if attrs.get("archived", False):
            continue
        node_metadata: Dict = {}
        feedstock_metadata[node] = node_metadata
        nuid = migrator.migrator_uid(attrs)
        all_pr_jsons = []
        for pr_json in attrs.get("PRed", []):
            all_pr_jsons.append(copy.deepcopy(pr_json))

        # hack around bug in migrator vs graph data for this one
        if isinstance(migrator, MatplotlibBase):
            if "name" in nuid:
                del nuid["name"]
            for i in range(len(all_pr_jsons)):
                if (
                    all_pr_jsons[i]
                    and "name" in all_pr_jsons[i]["data"]
                    and all_pr_jsons[i]["data"]["migrator_name"] == "MatplotlibBase"
                ):
                    del all_pr_jsons[i]["data"]["name"]

        for pr_json in all_pr_jsons:
            if pr_json and pr_json["data"] == frozen_to_json_friendly(nuid)["data"]:
                break
        else:
            pr_json = None

        # No PR was ever issued but the migration was performed.
        # This is only the case when the migration was done manually
        # before the bot could issue any PR.
        manually_done = pr_json is None and frozen_to_json_friendly(nuid)["data"] in (
            z["data"] for z in all_pr_jsons
        )

        buildable = not migrator.filter(attrs)
        fntc = "black"
        if manually_done:
            out["done"].add(node)
            fc = "#440154"
            fntc = "white"
        elif pr_json is None:
            if buildable:
                out["awaiting-pr"].add(node)
                fc = "#35b779"
            elif not isinstance(migrator, Replacement):
                out["awaiting-parents"].add(node)
                fc = "#fde725"
        elif "PR" not in pr_json:
            out["bot-error"].add(node)
            fc = "#000000"
            fntc = "white"
        elif pr_json["PR"]["state"] == "closed":
            out["done"].add(node)
            fc = "#440154"
            fntc = "white"
        else:
            out["in-pr"].add(node)
            fc = "#31688e"
            fntc = "white"
        if node not in out["done"]:
            gv.node(
                node,
                label=_clean_text(node),
                fillcolor=fc,
                style="filled",
                fontcolor=fntc,
                URL=(pr_json or {}).get("PR", {}).get("html_url", ""),
            )

        # additional metadata for reporting
        node_metadata["num_descendants"] = len(nx.descendants(gx2, node))
        node_metadata["immediate_children"] = [
            k
            for k in sorted(gx2.successors(node))
            if not gx2[k].get("payload", {}).get("archived", False)
        ]
        if pr_json and "PR" in pr_json:
            # I needed to fake some PRs they don't have html_urls though
            node_metadata["pr_url"] = pr_json["PR"].get("html_url", "")

    out2: Dict = {}
    for k in out.keys():
        out2[k] = list(
            sorted(
                out[k],
                key=lambda x: build_sequence.index(x) if x in build_sequence else -1,
            ),
        )

    out2["_feedstock_status"] = feedstock_metadata
    for (e0, e1), edge_attrs in gx2.edges.items():
        if (
            e0 not in out["done"]
            and e1 not in out["done"]
            and not gx2.nodes[e0]["payload"].get("archived", False)
            and not gx2.nodes[e1]["payload"].get("archived", False)
        ):
            gv.edge(e0, e1)

    return out2, build_sequence, gv
Пример #21
0
from conda_forge_tick.utils import load_graph, dump_graph, frozen_to_json_friendly

gx = load_graph()
for k, node_attrs in gx.nodes.items():
    prs = node_attrs.get("PRed", [])
    for i, pr in enumerate(prs):
        pr["data"]["bot_rerun"] = False
        pr.update(frozen_to_json_friendly(pr["data"]))
dump_graph(gx)