示例#1
0
def _compute_time_per_migrator(mctx, migrators):
    # we weight each migrator by the number of available nodes to migrate
    num_nodes = []
    for migrator in tqdm.tqdm(migrators):
        mmctx = MigratorContext(session=mctx, migrator=migrator)
        migrator.bind_to_ctx(mmctx)

        if isinstance(migrator, Version):
            _num_nodes = 0
            for node_name in mmctx.effective_graph.nodes:
                with mmctx.effective_graph.nodes[node_name]["payload"] as attrs:
                    _attempts = attrs.get("new_version_attempts", {}).get(
                        attrs.get("new_version", ""),
                        0,
                    )
                    if _attempts < 3:
                        _num_nodes += 1
            _num_nodes = max(
                _num_nodes,
                min(PR_LIMIT * 4, len(mmctx.effective_graph.nodes)),
            )
            num_nodes.append(_num_nodes)
        else:
            num_nodes.append(len(mmctx.effective_graph.nodes))

    num_nodes_tot = sum(num_nodes)
    # do not divide by zero
    time_per_node = float(env.get("TIMEOUT", 600)) / max(num_nodes_tot, 1)

    # also enforce a minimum of 300 seconds if any nodes can be migrated
    time_per_migrator = []
    for i, migrator in enumerate(migrators):
        _time_per = num_nodes[i] * time_per_node

        if num_nodes[i] > 0 and _time_per < 300:
            _time_per = 300

        time_per_migrator.append(_time_per)

    # finally rescale to fit in the time we have
    tot_time_per_migrator = sum(time_per_migrator)
    if tot_time_per_migrator > 0:
        time_fac = float(env.get("TIMEOUT", 600)) / tot_time_per_migrator
    else:
        time_fac = 1.0
    for i in range(len(time_per_migrator)):
        time_per_migrator[i] = time_per_migrator[i] * time_fac

    # recompute the total here
    tot_time_per_migrator = sum(time_per_migrator)

    return num_nodes, time_per_migrator, tot_time_per_migrator
示例#2
0
def main(args):
    gx = load_graph()
    ctx = MigratorSessionContext("", "", "")
    start_time = time.time()

    os.makedirs("audits", exist_ok=True)
    for k in AUDIT_REGISTRY:
        os.makedirs(os.path.join("audits", k), exist_ok=True)

    # TODO: generalize for cran skeleton
    # limit graph to things that depend on python
    python_des = nx.descendants(gx, "pypy-meta")
    for node in sorted(
            python_des,
            key=lambda x: (len(nx.descendants(gx, x)), x),
            reverse=True,
    ):
        if time.time() - int(env.get("START_TIME", start_time)) > int(
                env.get("TIMEOUT", 60 * 30), ):
            break
        # depfinder only work on python at the moment so only work on things
        # with python as runtime dep
        payload = gx.nodes[node]["payload"]
        for k, v in AUDIT_REGISTRY.items():
            version = payload.get("version", None)
            ext = v["ext"]
            if (not payload.get("archived", False) and version
                    and "python" in payload["requirements"]["run"]
                    and f"{node}_{version}.{ext}"
                    not in os.listdir(f"audits/{k}")):
                print(node)
                fctx = FeedstockContext(
                    package_name=node,
                    feedstock_name=payload["name"],
                    attrs=payload,
                )
                try:
                    deps = v["run"](fctx, ctx)
                except Exception as e:
                    deps = {
                        "exception": str(e),
                        "traceback": str(traceback.format_exc()).split("\n"),
                    }
                    if "dumper" in v:
                        deps = v["dumper"](deps)
                finally:
                    with open(f"audits/{k}/{node}_{version}.{ext}", "w") as f:
                        v["writer"](deps, f)

    compare_grayskull_audits(gx)
    compare_depfinder_audits(gx)
示例#3
0
def main(args):
    gx = load_graph()
    ctx = MigratorSessionContext("", "", "")
    start_time = time.time()
    # limit graph to things that depend on python
    python_des = nx.descendants(gx, "pypy-meta")
    for node in sorted(
            python_des,
            key=lambda x: (len(nx.descendants(gx, x)), x),
            reverse=True,
    ):
        if time.time() - int(env.get("START_TIME", start_time)) > int(
                env.get("TIMEOUT", 60 * 30)):
            break
        # depfinder only work on python at the moment so only work on things
        # with python as runtime dep
        os.makedirs("audits", exist_ok=True)
        with gx.nodes[node]["payload"] as payload:
            version = payload.get('version', None)
            if (not payload.get("archived", False) and version
                    and "python" in payload["requirements"]["run"]
                    and f'{node}_{version}.json' not in os.listdir("audits")):
                print(node)
                fctx = FeedstockContext(package_name=node,
                                        feedstock_name=payload["name"],
                                        attrs=payload)
                try:
                    deps = audit_feedstock(fctx, ctx)
                except Exception as e:
                    deps = {
                        "exception": str(e),
                        "traceback": str(traceback.format_exc()).split("\n"),
                    }
                finally:
                    with open(f"audits/{node}_{version}.json", "w") as f:
                        dump(deps, f)
示例#4
0
def main(args):
    gx = load_graph()
    ctx = MigratorSessionContext("", "", "")
    start_time = time.time()

    os.makedirs("audits", exist_ok=True)
    for k, v in AUDIT_REGISTRY.items():
        audit_dir = os.path.join("audits", k)
        version_path = os.path.join(audit_dir, "_version.json")
        audit_version = "_".join([v["version"], v["creation_version"]])
        if os.path.exists(version_path):
            version = load(open(version_path))
            # if the version of the code generating the audits is different from our current audit data
            # clear out the audit data so we always use the latest version
            if version != audit_version:
                shutil.rmtree(audit_dir)
        os.makedirs(audit_dir, exist_ok=True)
        dump(audit_version, open(version_path, "w"))

    # TODO: generalize for cran skeleton
    # limit graph to things that depend on python
    python_des = nx.descendants(gx, "python")
    for node in sorted(
        python_des,
        key=lambda x: (len(nx.descendants(gx, x)), x),
        reverse=True,
    ):
        if time.time() - int(env.get("START_TIME", start_time)) > int(
            env.get("TIMEOUT", 60 * RUNTIME_MINUTES),
        ):
            break
        # depfinder only work on python at the moment so only work on things
        # with python as runtime dep
        payload = gx.nodes[node]["payload"]
        for k, v in AUDIT_REGISTRY.items():
            version = payload.get("version", None)
            ext = v["ext"]
            if (
                not payload.get("archived", False)
                and not payload.get("bad", False)
                and version
                and "python" in payload["requirements"]["run"]
                and f"{node}_{version}.{ext}" not in os.listdir(f"audits/{k}")
            ):
                fctx = FeedstockContext(
                    package_name=node,
                    feedstock_name=payload["feedstock_name"],
                    attrs=payload,
                )
                try:
                    deps = v["run"](fctx, ctx)
                except Exception as e:
                    deps = {
                        "exception": str(e),
                        "traceback": str(traceback.format_exc()).split("\n"),
                    }
                    if "dumper" in v:
                        deps = v["dumper"](deps)
                finally:
                    if deps:
                        with open(f"audits/{k}/{node}_{version}.{ext}", "w") as f:
                            v["writer"](deps, f)

    # grayskull_audit_outcome = compare_grayskull_audits(gx)
    # compute_grayskull_accuracy(grayskull_audit_outcome)
    depfinder_audit_outcome = compare_depfinder_audits(gx)
    compute_depfinder_accuracy(depfinder_audit_outcome)
示例#5
0
def main(args: "CLIArgs") -> None:

    # logging
    if args.debug:
        setup_logger(logging.getLogger("conda_forge_tick"), level="debug")
    else:
        setup_logger(logging.getLogger("conda_forge_tick"))

    github_username = env.get("USERNAME", "")
    github_password = env.get("PASSWORD", "")
    github_token = env.get("GITHUB_TOKEN")

    mctx, temp, migrators = initialize_migrators(
        github_username=github_username,
        github_password=github_password,
        dry_run=args.dry_run,
        github_token=github_token,
    )

    # compute the time per migrator
    print("computing time per migration", flush=True)
    (num_nodes, time_per_migrator,
     tot_time_per_migrator) = _compute_time_per_migrator(
         mctx,
         migrators,
     )
    for i, migrator in enumerate(migrators):
        if hasattr(migrator, "name"):
            extra_name = "-%s" % migrator.name
        else:
            extra_name = ""

        print(
            "    %s%s: %d - gets %f seconds (%f percent)" % (
                migrator.__class__.__name__,
                extra_name,
                num_nodes[i],
                time_per_migrator[i],
                time_per_migrator[i] / max(tot_time_per_migrator, 1) * 100,
            ),
            flush=True,
        )

    for mg_ind, migrator in enumerate(migrators):
        if hasattr(migrator, "name"):
            assert isinstance(migrator.name, str)
            migrator_name = migrator.name.lower().replace(" ", "")
        else:
            migrator_name = migrator.__class__.__name__.lower()

        mmctx = MigratorContext(session=mctx, migrator=migrator)
        migrator.bind_to_ctx(mmctx)

        good_prs = 0
        _mg_start = time.time()
        effective_graph = mmctx.effective_graph
        time_per = time_per_migrator[mg_ind]

        if hasattr(migrator, "name"):
            extra_name = "-%s" % migrator.name
        else:
            extra_name = ""

        print(
            "\n========================================"
            "========================================"
            "\n"
            "========================================"
            "========================================",
            flush=True,
        )
        print(
            "Running migrations for %s%s: %d\n" % (
                migrator.__class__.__name__,
                extra_name,
                len(effective_graph.nodes),
            ),
            flush=True,
        )

        possible_nodes = list(migrator.order(effective_graph, mctx.graph))

        # version debugging info
        if isinstance(migrator, Version):
            LOGGER.info("possible version migrations:")
            for node_name in possible_nodes:
                with effective_graph.nodes[node_name]["payload"] as attrs:
                    LOGGER.info(
                        "    node|curr|new|attempts: %s|%s|%s|%d",
                        node_name,
                        attrs.get("version"),
                        attrs.get("new_version"),
                        (attrs.get("new_version_attempts", {}).get(
                            attrs.get("new_version", ""),
                            0,
                        )),
                    )

        for node_name in possible_nodes:
            with mctx.graph.nodes[node_name]["payload"] as attrs:
                base_branches = migrator.get_possible_feedstock_branches(attrs)
                orig_branch = attrs.get("branch", "master")

                # Don't let CI timeout, break ahead of the timeout so we make certain
                # to write to the repo
                # TODO: convert these env vars
                _now = time.time()
                if ((_now - int(env.get("START_TIME", time.time())) > int(
                        env.get("TIMEOUT", 600)))
                        or good_prs >= migrator.pr_limit
                        or (_now - _mg_start) > time_per):
                    break

                fctx = FeedstockContext(
                    package_name=node_name,
                    feedstock_name=attrs["feedstock_name"],
                    attrs=attrs,
                )

                try:
                    for base_branch in base_branches:
                        attrs["branch"] = base_branch
                        if migrator.filter(attrs):
                            continue

                        print("\n", flush=True, end="")
                        LOGGER.info(
                            "%s%s IS MIGRATING %s:%s",
                            migrator.__class__.__name__.upper(),
                            extra_name,
                            fctx.package_name,
                            base_branch,
                        )
                        try:
                            # Don't bother running if we are at zero
                            if mctx.gh_api_requests_left == 0:
                                break
                            migrator_uid, pr_json = run(
                                feedstock_ctx=fctx,
                                migrator=migrator,
                                rerender=migrator.rerender,
                                protocol="https",
                                hash_type=attrs.get("hash_type", "sha256"),
                                base_branch=base_branch,
                            )
                            # if migration successful
                            if migrator_uid:
                                d = frozen_to_json_friendly(migrator_uid)
                                # if we have the PR already do nothing
                                if d["data"] in [
                                        existing_pr["data"]
                                        for existing_pr in attrs.get(
                                            "PRed", [])
                                ]:
                                    pass
                                else:
                                    if not pr_json:
                                        pr_json = {
                                            "state": "closed",
                                            "head": {
                                                "ref": "<this_is_not_a_branch>"
                                            },
                                        }
                                    d["PR"] = pr_json
                                    attrs.setdefault("PRed", []).append(d)
                                attrs.update(
                                    {
                                        "smithy_version": mctx.smithy_version,
                                        "pinning_version":
                                        mctx.pinning_version,
                                    }, )

                        except github3.GitHubError as e:
                            if e.msg == "Repository was archived so is read-only.":
                                attrs["archived"] = True
                            else:
                                LOGGER.critical(
                                    "GITHUB ERROR ON FEEDSTOCK: %s",
                                    fctx.feedstock_name,
                                )
                                if is_github_api_limit_reached(e, mctx.gh):
                                    break
                        except URLError as e:
                            LOGGER.exception("URLError ERROR")
                            attrs["bad"] = {
                                "exception":
                                str(e),
                                "traceback":
                                str(traceback.format_exc()).split("\n"),
                                "code":
                                getattr(e, "code"),
                                "url":
                                getattr(e, "url"),
                            }

                            pre_key = "pre_pr_migrator_status"
                            if pre_key not in attrs:
                                attrs[pre_key] = {}
                            attrs[pre_key][migrator_name] = sanitize_string(
                                "bot error (%s): %s: %s" % (
                                    '<a href="' +
                                    os.getenv("CIRCLE_BUILD_URL", "") +
                                    '">bot CI job</a>',
                                    base_branch,
                                    str(traceback.format_exc()),
                                ), )
                        except Exception as e:
                            LOGGER.exception("NON GITHUB ERROR")
                            # we don't set bad for rerendering errors
                            if ("conda smithy rerender -c auto --no-check-uptodate"
                                    not in str(e)):
                                attrs["bad"] = {
                                    "exception":
                                    str(e),
                                    "traceback":
                                    str(traceback.format_exc()).split("\n", ),
                                }

                            pre_key = "pre_pr_migrator_status"
                            if pre_key not in attrs:
                                attrs[pre_key] = {}
                            attrs[pre_key][migrator_name] = sanitize_string(
                                "bot error (%s): %s: %s" % (
                                    '<a href="' +
                                    os.getenv("CIRCLE_BUILD_URL", "") +
                                    '">bot CI job</a>',
                                    base_branch,
                                    str(traceback.format_exc()),
                                ), )
                        else:
                            if migrator_uid:
                                # On successful PR add to our counter
                                good_prs += 1
                finally:
                    # reset branch
                    attrs["branch"] = orig_branch

                    # Write graph partially through
                    if not args.dry_run:
                        dump_graph(mctx.graph)

                    eval_cmd(f"rm -rf {mctx.rever_dir}/*")
                    LOGGER.info(os.getcwd())
                    for f in glob.glob("/tmp/*"):
                        if f not in temp:
                            try:
                                eval_cmd(f"rm -rf {f}")
                            except Exception:
                                pass

                if mctx.gh_api_requests_left == 0:
                    break

        print("\n", flush=True)

    LOGGER.info("API Calls Remaining: %d", mctx.gh_api_requests_left)
    LOGGER.info("Done")
示例#6
0
def auto_tick(dry_run=False,
              debug=False,
              fork=False,
              organization='nsls-ii-forge'):
    '''
    Automatically update package versions and submit pull requests to
    associated feedstocks

    Parameters
    ----------
    dry_run: bool, optional
        Generate version migration yamls but do not run them
    debug: bool, optional
        Setup logging to be in debug mode
    fork: bool, optional
        Create a fork of the repo from the organization to $GITHUB_USERNAME
    organization: str, optional
        GitHub organization that manages feedstock repositories
    '''
    from conda_forge_tick.xonsh_utils import env

    if debug:
        setup_logger(logger, level="debug")
    else:
        setup_logger(logger)

    # set Version.pr_body to custom pr_body function
    Version.pr_body = bot_pr_body

    # TODO: use ~/.netrc instead
    github_username = env.get("GITHUB_USERNAME", "")
    github_password = env.get("GITHUB_TOKEN", "")
    github_token = env.get("GITHUB_TOKEN")
    global MIGRATORS

    print('Initializing migrators...')
    mctx, MIGRATORS = initialize_migrators(
        github_username=github_username,
        github_password=github_password,
        dry_run=dry_run,
        github_token=github_token,
    )

    # compute the time per migrator
    print('Computing time per migrator')
    (num_nodes, time_per_migrator,
     tot_time_per_migrator) = _compute_time_per_migrator(mctx, )
    for i, migrator in enumerate(MIGRATORS):
        if hasattr(migrator, "name"):
            extra_name = "-%s" % migrator.name
        else:
            extra_name = ""

        logger.info(
            "Total migrations for %s%s: %d - gets %f seconds (%f percent)",
            migrator.__class__.__name__,
            extra_name,
            num_nodes[i],
            time_per_migrator[i],
            time_per_migrator[i] / tot_time_per_migrator * 100,
        )

    print('Performing migrations...')
    for mg_ind, migrator in enumerate(MIGRATORS):

        mmctx = MigratorContext(session=mctx, migrator=migrator)
        migrator.bind_to_ctx(mmctx)

        good_prs = 0
        _mg_start = time.time()
        effective_graph = mmctx.effective_graph
        time_per = time_per_migrator[mg_ind]

        if hasattr(migrator, "name"):
            extra_name = "-%s" % migrator.name
        else:
            extra_name = ""

        logger.info(
            "Running migrations for %s%s: %d",
            migrator.__class__.__name__,
            extra_name,
            len(effective_graph.nodes),
        )

        possible_nodes = list(migrator.order(effective_graph, mctx.graph))

        # version debugging info
        if isinstance(migrator, Version):
            logger.info("possible version migrations:")
            for node_name in possible_nodes:
                with effective_graph.nodes[node_name]["payload"] as attrs:
                    logger.info(
                        "    node|curr|new|attempts: %s|%s|%s|%d",
                        node_name,
                        attrs.get("version"),
                        attrs.get("new_version"),
                        (attrs.get("new_version_attempts", {}).get(
                            attrs.get("new_version", ""),
                            0,
                        )),
                    )

        for node_name in possible_nodes:
            with mctx.graph.nodes[node_name]["payload"] as attrs:
                # Don't let CI timeout, break ahead of the timeout so we make certain
                # to write to the repo
                # TODO: convert these env vars
                _now = time.time()
                if ((_now - int(env.get("START_TIME", time.time())) > int(
                        env.get("TIMEOUT", 600)))
                        or good_prs >= migrator.pr_limit
                        or (_now - _mg_start) > time_per):
                    break

                fctx = FeedstockContext(
                    package_name=node_name,
                    feedstock_name=attrs["feedstock_name"],
                    attrs=attrs,
                )

                print("\n", flush=True, end="")
                logger.info(
                    "%s%s IS MIGRATING %s",
                    migrator.__class__.__name__.upper(),
                    extra_name,
                    fctx.package_name,
                )
                try:
                    # Don't bother running if we are at zero
                    if (dry_run or mctx.gh.rate_limit()["resources"]["core"]
                        ["remaining"] == 0):
                        break
                    migrator_uid, pr_json = run(feedstock_ctx=fctx,
                                                migrator=migrator,
                                                rerender=migrator.rerender,
                                                protocol="https",
                                                hash_type=attrs.get(
                                                    "hash_type", "sha256"),
                                                fork=fork,
                                                organization=organization)
                    # if migration successful
                    if migrator_uid:
                        d = frozen_to_json_friendly(migrator_uid)
                        # if we have the PR already do nothing
                        if d["data"] in [
                                existing_pr["data"]
                                for existing_pr in attrs.get("PRed", [])
                        ]:
                            pass
                        else:
                            if pr_json is None:
                                pr_json = {
                                    "state": "closed",
                                    "head": {
                                        "ref": "<this_is_not_a_branch>"
                                    },
                                }
                            d["PR"] = pr_json
                            attrs.setdefault("PRed", []).append(d)
                        attrs.update(
                            {
                                "smithy_version": mctx.smithy_version,
                                "pinning_version": mctx.pinning_version,
                            }, )

                except github3.GitHubError as e:
                    if e.msg == "Repository was archived so is read-only.":
                        attrs["archived"] = True
                    else:
                        logger.critical(
                            "GITHUB ERROR ON FEEDSTOCK: %s",
                            fctx.feedstock_name,
                        )
                        if is_github_api_limit_reached(e, mctx.gh):
                            break
                except URLError as e:
                    logger.exception("URLError ERROR")
                    attrs["bad"] = {
                        "exception": str(e),
                        "traceback": str(traceback.format_exc()).split("\n"),
                        "code": getattr(e, "code"),
                        "url": getattr(e, "url"),
                    }
                except Exception as e:
                    logger.exception("NON GITHUB ERROR")
                    attrs["bad"] = {
                        "exception": str(e),
                        "traceback": str(traceback.format_exc()).split("\n"),
                    }
                else:
                    if migrator_uid:
                        # On successful PR add to our counter
                        good_prs += 1
                finally:
                    # Write graph partially through
                    if not dry_run:
                        dump_graph(mctx.graph)

                    eval_cmd(f"rm -rf {mctx.rever_dir}/*")
                    logger.info(os.getcwd())

    if not dry_run:
        logger.info(
            "API Calls Remaining: %d",
            mctx.gh.rate_limit()["resources"]["core"]["remaining"],
        )
    logger.info("Done")
示例#7
0
def main(args: "CLIArgs") -> None:
    _setup_limits()

    # logging
    if args.debug:
        setup_logger(logging.getLogger("conda_forge_tick"), level="debug")
    else:
        setup_logger(logging.getLogger("conda_forge_tick"))

    github_username = env.get("USERNAME", "")
    github_password = env.get("PASSWORD", "")
    github_token = env.get("GITHUB_TOKEN")

    mctx, temp, migrators = initialize_migrators(
        github_username=github_username,
        github_password=github_password,
        dry_run=args.dry_run,
        github_token=github_token,
    )

    # compute the time per migrator
    print("computing time per migration", flush=True)
    (num_nodes, time_per_migrator, tot_time_per_migrator) = _compute_time_per_migrator(
        mctx,
        migrators,
    )
    for i, migrator in enumerate(migrators):
        if hasattr(migrator, "name"):
            extra_name = "-%s" % migrator.name
        else:
            extra_name = ""

        print(
            "    %s%s: %d - gets %f seconds (%f percent)"
            % (
                migrator.__class__.__name__,
                extra_name,
                num_nodes[i],
                time_per_migrator[i],
                time_per_migrator[i] / max(tot_time_per_migrator, 1) * 100,
            ),
            flush=True,
        )

    for mg_ind, migrator in enumerate(migrators):
        print(
            "\n========================================"
            "========================================"
            "\n"
            "========================================"
            "========================================",
            flush=True,
        )

        good_prs = _run_migrator(
            migrator,
            mctx,
            temp,
            time_per_migrator[mg_ind],
            args.dry_run,
        )
        if good_prs > 0:
            pass
            # this has been causing issues with bad deploys
            # turning off for now
            # deploy(dry_run=args.dry_run)

        print("\n", flush=True)

    LOGGER.info("API Calls Remaining: %d", mctx.gh_api_requests_left)
    LOGGER.info("Done")
示例#8
0
def _run_migrator(migrator, mctx, temp, time_per, dry_run):
    if hasattr(migrator, "name"):
        assert isinstance(migrator.name, str)
        migrator_name = migrator.name.lower().replace(" ", "")
    else:
        migrator_name = migrator.__class__.__name__.lower()

    mmctx = MigratorContext(session=mctx, migrator=migrator)
    migrator.bind_to_ctx(mmctx)

    good_prs = 0
    _mg_start = time.time()
    effective_graph = mmctx.effective_graph

    if hasattr(migrator, "name"):
        extra_name = "-%s" % migrator.name
    else:
        extra_name = ""

    print(
        "Running migrations for %s%s: %d\n"
        % (
            migrator.__class__.__name__,
            extra_name,
            len(effective_graph.nodes),
        ),
        flush=True,
    )

    possible_nodes = list(migrator.order(effective_graph, mctx.graph))

    # version debugging info
    if isinstance(migrator, Version):
        LOGGER.info("possible version migrations:")
        for node_name in possible_nodes:
            with effective_graph.nodes[node_name]["payload"] as attrs:
                LOGGER.info(
                    "    node|curr|new|attempts: %s|%s|%s|%f",
                    node_name,
                    attrs.get("version"),
                    attrs.get("new_version"),
                    (
                        attrs.get("new_version_attempts", {}).get(
                            attrs.get("new_version", ""),
                            0,
                        )
                    ),
                )

    for node_name in possible_nodes:
        with mctx.graph.nodes[node_name]["payload"] as attrs:
            # Don't let CI timeout, break ahead of the timeout so we make certain
            # to write to the repo
            # TODO: convert these env vars
            _now = time.time()
            if (
                (
                    _now - int(env.get("START_TIME", time.time()))
                    > int(env.get("TIMEOUT", 600))
                )
                or good_prs >= migrator.pr_limit
                or (_now - _mg_start) > time_per
            ):
                break

            base_branches = migrator.get_possible_feedstock_branches(attrs)
            if "branch" in attrs:
                has_attrs_branch = True
                orig_branch = attrs.get("branch")
            else:
                has_attrs_branch = False
                orig_branch = None

            fctx = FeedstockContext(
                package_name=node_name,
                feedstock_name=attrs["feedstock_name"],
                attrs=attrs,
            )

            # map main to current default branch
            base_branches = [
                br if br != "main" else fctx.default_branch for br in base_branches
            ]

            try:
                for base_branch in base_branches:
                    attrs["branch"] = base_branch
                    if migrator.filter(attrs):
                        continue

                    print("\n", flush=True, end="")
                    sys.stderr.flush()
                    sys.stdout.flush()
                    LOGGER.info(
                        "%s%s IS MIGRATING %s:%s",
                        migrator.__class__.__name__.upper(),
                        extra_name,
                        fctx.package_name,
                        base_branch,
                    )
                    try:
                        # Don't bother running if we are at zero
                        if mctx.gh_api_requests_left == 0:
                            break
                        migrator_uid, pr_json = run(
                            feedstock_ctx=fctx,
                            migrator=migrator,
                            rerender=migrator.rerender,
                            protocol="https",
                            hash_type=attrs.get("hash_type", "sha256"),
                            base_branch=base_branch,
                        )
                        # if migration successful
                        if migrator_uid:
                            d = frozen_to_json_friendly(migrator_uid)
                            # if we have the PR already do nothing
                            if d["data"] in [
                                existing_pr["data"]
                                for existing_pr in attrs.get("PRed", [])
                            ]:
                                pass
                            else:
                                if not pr_json:
                                    pr_json = {
                                        "state": "closed",
                                        "head": {"ref": "<this_is_not_a_branch>"},
                                    }
                                d["PR"] = pr_json
                                attrs.setdefault("PRed", []).append(d)
                            attrs.update(
                                {
                                    "smithy_version": mctx.smithy_version,
                                    "pinning_version": mctx.pinning_version,
                                },
                            )

                    except github3.GitHubError as e:
                        if e.msg == "Repository was archived so is read-only.":
                            attrs["archived"] = True
                        else:
                            LOGGER.critical(
                                "GITHUB ERROR ON FEEDSTOCK: %s",
                                fctx.feedstock_name,
                            )
                            if is_github_api_limit_reached(e, mctx.gh):
                                break
                    except URLError as e:
                        LOGGER.exception("URLError ERROR")
                        attrs["bad"] = {
                            "exception": str(e),
                            "traceback": str(traceback.format_exc()).split("\n"),
                            "code": getattr(e, "code"),
                            "url": getattr(e, "url"),
                        }

                        _set_pre_pr_migrator_fields(
                            attrs,
                            migrator_name,
                            sanitize_string(
                                "bot error (%s): %s: %s"
                                % (
                                    '<a href="'
                                    + os.getenv("CIRCLE_BUILD_URL", "")
                                    + '">bot CI job</a>',
                                    base_branch,
                                    str(traceback.format_exc()),
                                ),
                            ),
                        )
                    except Exception as e:
                        LOGGER.exception("NON GITHUB ERROR")
                        # we don't set bad for rerendering errors
                        if (
                            "conda smithy rerender -c auto --no-check-uptodate"
                            not in str(e)
                        ):
                            attrs["bad"] = {
                                "exception": str(e),
                                "traceback": str(traceback.format_exc()).split(
                                    "\n",
                                ),
                            }

                        _set_pre_pr_migrator_fields(
                            attrs,
                            migrator_name,
                            sanitize_string(
                                "bot error (%s): %s: %s"
                                % (
                                    '<a href="'
                                    + os.getenv("CIRCLE_BUILD_URL", "")
                                    + '">bot CI job</a>',
                                    base_branch,
                                    str(traceback.format_exc()),
                                ),
                            ),
                        )
                    else:
                        if migrator_uid:
                            # On successful PR add to our counter
                            good_prs += 1
            finally:
                # reset branch
                if has_attrs_branch:
                    attrs["branch"] = orig_branch

                # do this but it is crazy
                gc.collect()

                # Write graph partially through
                if not dry_run:
                    dump_graph(mctx.graph)

                eval_cmd(f"rm -rf {mctx.rever_dir}/*")
                LOGGER.info(os.getcwd())
                for f in glob.glob("/tmp/*"):
                    if f not in temp:
                        try:
                            eval_cmd(f"rm -rf {f}")
                        except Exception:
                            pass

            if mctx.gh_api_requests_left == 0:
                break

    return good_prs