コード例 #1
0
def main():
    # load all dependencies
    releases = load_releases(SOURCE_FILE)

    # make a dict of all issues that are in releases
    lookup = build_lookup(releases)

    # load all the issues
    issues = _utils.load_issues(DIR)

    # get the new issue number of every issue in a release
    for issue in issues:
        repo_id = issue.get("repo_id")
        issue_number = issue.get("number")
        key = f"{repo_id}${issue_number}"

        if key in lookup:
            new_issue_number = issue["migration"].get("new_issue_number")

            if not new_issue_number:
                # issue has apparently not been created in github
                logger.error(key)
                raise Exception(f"New {issue_number} does not exist in github!")

            lookup[key]["new_issue_number"] = new_issue_number

    # build new releases file
    releases = update_releases(releases, lookup)

    releases = drop_null_issues(releases)

    with open(DEST_FILE, "w") as fout:
        fout.write(json.dumps(releases))
コード例 #2
0
def main():
    issues = _utils.load_issues(DIR)

    issue_count = 0
    created_count = 0
    for issue in issues:
        # skip issues which have already been created
        if issue.get("migration").get("created_github"):
            continue

        if issue.get("repo_id") == 140626918:
            """
            We do not create issues from atd-data-tech,
            Because that's the repo we're migrating to,
            But we do need to reference these isssue
            To connect the dependencies and epics to new issues.
            """
            issue["migration"]["created_github"] = True
            issue["migration"]["new_issue_number"] = issue["number"]
            write_issue(issue, DIR)
            issue_count += 1
            continue
        else:
            res = create_issue(issue, DEST_REPO)
            issue["migration"]["created_github"] = True
            issue["migration"]["new_issue_number"] = res["number"]
            issue_count += 1
            created_count += 1
            write_issue(issue, DIR)

    logger.info(f"Issues Processed: {issue_count}")
    logger.info(f"Issues Created: {created_count}")
コード例 #3
0
def main():

    issues = _utils.load_issues(DIR)

    issue_count = 0
    error_count = 0

    for issue in issues:

        if issue["migration"].get("zenhub_downloaded"):
            continue

        time.sleep(.6)  # zenhub rate limit is 100 requests/minute

        try:
            issue = get_zenhub_issue(issue)
        except:
            issue["migration"]["zenhub_downloaded"] = False
            logger.error(f"Error: {issue['path']}")
            error_count += 1
            continue

        if issue["is_epic"]:
            get_epic_issues(issue)

        fname = issue["path"]

        with open(fname, "w") as fout:
            logger.info(f"{issue['repo_name']} {issue['number']}")
            fout.write(json.dumps(issue))
            issue_count += 1

    logger.info(f"Issues Processed: {issue_count}")
    logger.info(f"Errors: {error_count}")
コード例 #4
0
def main():

    issues = _utils.load_issues(DIR)

    issue_count = 0
    
    for issue in issues:

        if issue.get("is_epic") and issue.get("repo_id") != 140626918:
            """
            we skip existing atd-data-tech epics.
            These issues already exist, but we need to connect the dependencies, etc.
            """

            # new issue number of issue that will be converted to epic
            issue_number = issue["migration"].get("new_issue_number")
            
            payload = {"issues": []}

            res = zenhub_request(DEST_REPO_ID, issue_number, payload)

            if not res:
                logger.error(f"ERROR: {issue['path']}")
                issue["migration"]["epic_created"] = False
            else:
                logger.info(issue["path"])
                issue["migration"]["epic_created"] = True

            write_issue(issue, DIR)
            issue_count += 1

    logger.info(f"Issues Processed: {issue_count}")
コード例 #5
0
def main():

    # test only
    test_pipes = get_test_pipelines()

    # assign positions to issues
    issues = _utils.load_issues(DIR)
    sorted_issues = sort_issues(issues)

    for issue_element in sorted_issues:
        issue = issue_element.get("data")
        issue_number = issue.get("migration").get("new_issue_number")
        pos = issue.get("migration").get("pipeline").get("position")

        # TODO: TEST ONLY. Prod just use existing pipeline ID
        # pipe_id = issue.get("migration").get("pipeline").get("pipeline_id")
        pipe_name = issue.get("migration").get("pipeline").get("pipeline_name")
        pipe_id = replace_pipe(test_pipes, pipe_name)

        payload = {"pipeline_id": pipe_id, "position": pos}

        res = zenhub_request(DEST_REPO_ID, WORKSPACE_ID, issue_number, payload)

        if not res:
            logger.error(f"ERROR: {issue['path']}")
            issue["migration"]["pipeline_processed"] = False
        else:
            logger.info(issue["path"])
            issue["migration"]["pipeline_processed"] = True

        write_issue(issue, DIR)
コード例 #6
0
def main():

    issues = _utils.load_issues(DIR)
    issue_count = 0
    epic_count = 0
    child_issue_count = 0
    for issue in issues:
        issue_count += 1
        if issue.get("is_epic"):

            # new issue number of issue that will be converted to epic
            issue_number = issue["migration"].get("new_issue_number")

            payload = {"add_issues": [], "remove_issues": []}

            for e in issue["epic_issues"]:

                if e.get("repo_id") == 140626918 and issue.get(
                        "repo_id") == 140626918:
                    """
                    We do not need to add issues to epics that are already
                    in atd-data-tech when the issue is already in atd-data-tech.
                    Are you confused yet? This is a complication of the fact that
                    an epic might contain a mix of issues that are being migrated
                    and which already exist in the destination repo :/
                    """
                    continue

                if e.get("new_issue_number"):
                    child_issue_count += 1
                    payload["add_issues"].append({
                        "repo_id":
                        DEST_REPO_ID,
                        "issue_number":
                        e["new_issue_number"]
                    })
                else:
                    logger.error(
                        f"Child issue for issue #{issue_number} does not exist: {e}"
                    )

            if payload["add_issues"]:

                res = zenhub_request(DEST_REPO_ID, issue_number, payload)

                if not res:
                    logger.error(f"ERROR: {issue['path']}")
                    issue["migration"]["epic_created"] = False
                else:
                    logger.info(issue["path"])

                issue["migration"]["epic_created"] = True

                write_issue(issue, DIR)
                epic_count += 1

    logger.info(f"Issues Processed: {issue_count}")
    logger.info(f"Epics Processed: {epic_count}")
    logger.info(f"Child Issues Processed: {child_issue_count}")
コード例 #7
0
def main():
    issues = _utils.load_issues(DIR)
    
    dest_milestones = get_milestones_from_repo(DEST_REPO)

    issues, update_count = update_milestones(issues, dest_milestones)

    issue_count = write_issues(issues, DIR)
    logger.info(f"Issues Processed: {issue_count}")
    logger.info(f"Milestones Updated: {update_count}")
def main():
    test_pipes = get_pipelines()

    issues_with_positions = {}

    for repo in SOURCE_REPOS:
        # fetch all the issues in the workspace to get issue positions

        if repo["id"] == 140626918:
            """
            we skip in atd-data-tech
            Those issue will not have pipelines updated,
            but do need to reconnect the dependencies, etc.
            """
            print("yep")
            continue

        pipelines = zenhub_request_get(repo["id"], WORKSPACE_ID)

        for pipe in pipelines.get("pipelines"):
            for issue in pipe.get("issues"):

                key = f"{repo['id']}${issue['issue_number']}"

                issues_with_positions[key] = {
                    "old_issue_number": issue["issue_number"],
                    "pipeline_id": pipe.get("id"),
                    "pipeline_name": pipe.get("name"),
                    "position": issue.get("position"),
                    "repo_id": DEST_REPO_ID,
                }

    # assign positions to issues
    issues = _utils.load_issues(DIR)

    for issue in issues:
        repo_id = issue.get("repo_id")

        if repo["id"] == 140626918:
            """
            we skip in atd-data-tech
            Those issue will not have pipelines updated,
            but do need to reconnect the dependencies, etc.
            """
            continue

        issue_number = issue.get("number")
        key = f"{repo_id}${issue_number}"
        issue["migration"]["pipeline"] = issues_with_positions.get(key)
        write_issue(issue, DIR)
コード例 #9
0
def main():
    with open(LABEL_FILE, "r") as fin:
        reader = csv.DictReader(fin)
        label_map = [row for row in reader if "map" in row["action"]]

        label_lookup = build_lookup(label_map)

    issues = _utils.load_issues(DIR)

    for issue in issues:
        labels = issue.get("labels")
        labels = map_labels(labels, label_lookup)
        labels = map_repos(labels, issue["repo_name"], REPO_MAP)
        issue["migration"]["labels"] = labels
        write_issue(issue, DIR)
コード例 #10
0
def main():
    missing_dependency_issues = []

    # load all dependencies
    depends = load_dependencies(SOURCE_FILE)

    # make a dict of all issues that are blocking or blocked
    d_lookup = build_d_lookup(depends)

    # load all the issues
    issues = _utils.load_issues(DIR)

    for issue in issues:
        repo_id = issue.get("repo_id")
        issue_number = issue.get("number")
        key = f"{repo_id}${issue_number}"

        if key in d_lookup:
            new_issue_number = issue["migration"].get("new_issue_number")

            if not new_issue_number:
                # issue has apparently not been created in github
                logger.error(key)
                raise Exception(
                    f"New {issue_number} does not exist in github!")

            d_lookup[key]["new_issue_number"] = new_issue_number

        else:
            # issue must be closed, because we haven't downloaded it
            missing_dependency_issues.append(key)

    # build new dependencies file
    depends = build_new_dependencies(depends, d_lookup)

    depends = drop_null_dependencies(depends)

    with open(DEST_FILE, "w") as fout:
        fout.write(json.dumps(depends))

    with open(MISSING_DEPEND_FILE, "w") as fout:
        fout.write(json.dumps(missing_dependency_issues))
コード例 #11
0
def main():
    issues = _utils.load_issues(DIR)
    
    issue_count = 0

    for issue in issues:
        if (
            not issue.get("migration").get("comments_retreived")
            and issue.get("repo_id") != 140626918
        ):
            """
            we skip comments atd-data-tech
            The issues already exist, but we need to connect the dependencies, etc.
            """
            issue["comments"] = get_comments(issue["repo_name"], issue["number"])
            issue["comments"] = parse_comments(issue["comments"])
            issue["migration"]["comments_retreived"] = True

        logger.info(issue["number"])
        write_issue(issue, DEST_REPO)
        issue_count += 1

    logger.info(f"Issues Processed: {issue_count}")
コード例 #12
0
def main():
    missing_epic_issues = []
    child_issues = {}

    issues = _utils.load_issues(DIR)

    issue_count = 0
    epic_count = 0
    child_issue_count = 0

    # iterate through all issues, identify epics, and collect their child issues
    for issue in issues:
        if not issue.get("is_epic"):
            continue

        for child_issue in issue["epic_issues"]:
            repo_id = child_issue["repo_id"]
            issue_number = child_issue["issue_number"]
            key = f"{repo_id}${issue_number}"
            child_issues[key] = {
                "repo_id": repo_id,
                "issue_number": issue_number
            }

    # iterate through all issues and identify new issue numbers of child issues
    for issue in issues:
        key = f"{issue['repo_id']}${issue['number']}"
        if key in child_issues:
            issue_number = issue["migration"].get("new_issue_number")

            if not issue_number:
                raise Exception(
                    f"{key} does not have a new github issue number!")

            child_issues[key]["new_issue_number"] = issue_number

    # update epics' child issues with their new issue numbers
    for issue in issues:
        issue_count += 1

        if not issue.get("is_epic"):
            continue

        for child_issue in issue["epic_issues"]:
            repo_id = child_issue["repo_id"]
            issue_number = child_issue["issue_number"]
            key = f"{repo_id}${issue_number}"
            new_issue_number = child_issues[key].get("new_issue_number")
            child_issue["new_issue_number"] = new_issue_number
            child_issue_count += 1

            if not new_issue_number:
                # child issue has not been processed, it's probably a closed issue
                # which we're not migrating
                missing_epic_issues.append({
                    "repo_id":
                    child_issue["repo_id"],
                    "issue_number":
                    child_issue["issue_number"],
                })

        # write update issue to file
        issue["migration"]["epics_staged"] = True
        write_issue(issue, DIR)
        epic_count += 1

    logger.info(f"Issues Processed: {issue_count}")
    logger.info(f"Epics Processed: {epic_count}")
    logger.info(f"Child Issues Processed: {child_issue_count}")

    with open(MISSING_CHILDREN_FILE, "w") as fout:
        fout.write(json.dumps(missing_epic_issues))