Beispiel #1
0
def land_commit(
    sha: str,
    repo: Repository,
    base_repo: Repository,
    pr: Optional[PullRequest],
    files: Iterable[Union[File, str]],
    *,
    target_app: Optional[str] = None,
):
    """
    :param sha: The hash of the commit we are building
    :param repo: The repo containing the above commit
    :param base_repo: The *base* cs61a-apps repo containing the deploy.yaml config
    :param pr: The PR made to trigger the build, if any
    :param files: Files changed in the commit, used for target determination
    :param target_app: App to rebuild, if not all
    :param dequeue_only: Only pop targets off the queue, do not build any new targets
    """
    if target_app:
        targets = [target_app]
    else:
        targets = determine_targets(
            repo, files if repo.full_name == base_repo.full_name else [])
    pr_number = pr.number if pr else 0
    enqueue_builds(targets, pr_number, pack(repo.clone_url, sha))
    dequeue_and_build(base_repo)
Beispiel #2
0
def webhook():
    if not hmac.compare_digest(
        "sha1="
        + hmac.new(
            get_secret(secret_name="GITHUB_WEBHOOK_SECRET").encode("ascii"),
            request.get_data(),
            "sha1",
        ).hexdigest(),
        request.headers["X-Hub-Signature"],
    ):
        abort(401)

    payload = request.json

    g = Github(get_secret(secret_name="GITHUB_ACCESS_TOKEN"))

    if "pusher" in payload and payload["ref"] == "refs/heads/master":
        base_repo = g.get_repo(GITHUB_REPO)
        repo = g.get_repo(payload["repository"]["id"])
        sha = payload["after"]
        land_commit(
            sha,
            repo,
            base_repo,
            None,
            [
                file
                for commit in payload["commits"]
                for file in commit["added"] + commit["modified"] + commit["removed"]
            ],
        )
        delete_unused_services()

    if "pull_request" in payload:
        repo_id = payload["repository"]["id"]
        repo = g.get_repo(repo_id)
        pr = repo.get_pull(payload["pull_request"]["number"])

        if payload["action"] in ("opened", "synchronize", "reopened"):
            if repo.full_name != GITHUB_REPO:
                land_commit(pr.head.sha, repo, g.get_repo(GITHUB_REPO), pr, [])
            else:
                for target in determine_targets(repo, pr.get_files()):
                    report_build_status(
                        target,
                        pr.number,
                        pack(repo.clone_url, pr.head.sha),
                        BuildStatus.pushed,
                        None,
                        None,
                        private=True,
                    )

        elif payload["action"] == "closed":
            set_pr_comment("PR closed, shutting down PR builds...", pr)
            delete_unused_services(pr.number)
            set_pr_comment("All PR builds shut down.", pr)

    return ""
Beispiel #3
0
def update_status(packed_ref: str, pr_number: int):
    g = Github(get_secret(secret_name="GITHUB_ACCESS_TOKEN"))
    repo_url, sha = unpack(packed_ref)
    repo_name = urlparse(repo_url).path.split(".")[0][
        1:
    ]  # This is awful ... but it works
    repo = g.get_repo(repo_name)

    # First we will update the commit-specific status indicator
    with connect_db() as db:
        statuses = db(
            "SELECT app, status FROM builds WHERE packed_ref=%s", [packed_ref]
        ).fetchall()
    statuses = [(app, BuildStatus(status)) for app, status in statuses]
    if all(status == BuildStatus.success for _, status in statuses):
        repo.get_commit(sha).create_status(
            "success",
            "https://logs.cs61a.org/service/buildserver",
            "All modified services built!",
            "Pusher",
        )
    elif any(status == BuildStatus.failure for _, status in statuses):
        repo.get_commit(sha).create_status(
            "failure",
            "https://logs.cs61a.org/service/buildserver",
            "Pusher failed to build a modified service",
            "Pusher",
        )
    elif all(
        status in (BuildStatus.building, BuildStatus.queued) for _, status in statuses
    ):
        repo.get_commit(sha).create_status(
            "pending",
            "https://logs.cs61a.org/service/buildserver",
            "Pusher is building all modified services",
            "Pusher",
        )
    else:
        # There are no failures, but not everything is building / built
        repo.get_commit(sha).create_status(
            "pending",
            "https://logs.cs61a.org/service/buildserver",
            "You must build all modified apps before merging",
            "Pusher",
        )

    if pr_number == 0:
        return

    pr = repo.get_pull(pr_number)
    # Now we will update the PR comment, looking at builds for all packed_refs in the PR
    apps = determine_targets(repo, pr.get_files())
    success = []
    failure = []
    running = []
    queued = []
    triggerable = []
    with connect_db() as db:
        for app in apps:
            successful_build = db(
                "SELECT url, log_url, unix, packed_ref FROM builds WHERE app=%s AND pr_number=%s AND status='success' ORDER BY unix DESC LIMIT 1",
                [app, pr_number],
            ).fetchone()
            if successful_build:
                url, log_url, success_unix, packed_ref = successful_build
                _, sha = unpack(packed_ref)
                if url:
                    for link in url.split(","):
                        success.append((app, link, sha, log_url))
                else:
                    success.append((app, None, sha, log_url))

            failed_build = db(
                "SELECT unix, log_url, packed_ref FROM builds WHERE app=%s AND pr_number=%s AND status='failure' ORDER BY unix DESC LIMIT 1",
                [app, pr_number],
            ).fetchone()
            if failed_build:
                unix, log_url, packed_ref = failed_build
                if not successful_build or success_unix < unix:
                    _, sha = unpack(packed_ref)
                    failure.append((app, sha, log_url))

            running_build = db(
                "SELECT packed_ref FROM builds WHERE app=%s AND pr_number=%s AND status='building'",
                [app, pr_number],
            ).fetchone()
            if running_build:
                [packed_ref] = running_build
                _, sha = unpack(packed_ref)
                running.append((app, sha))

            queued_build = db(
                "SELECT packed_ref FROM builds WHERE app=%s AND pr_number=%s AND status='queued'",
                [app, pr_number],
            ).fetchone()
            if queued_build:
                [packed_ref] = queued_build
                _, sha = unpack(packed_ref)
                queued.append((app, sha))

            latest_commit_build = db(
                "SELECT * FROM builds WHERE app=%s AND pr_number=%s AND packed_ref=%s AND status!='pushed'",
                [app, pr_number, pack(repo_url, pr.head.sha)],
            ).fetchone()
            if not latest_commit_build:
                triggerable.append(app)

    if repo.name == "berkeley-cs61a":
        message = f"## Build Status ([pr/{pr_number}]({pr.html_url}))\n\n"
    elif repo.name == "cs61a-apps":
        message = f"## Build Status ([apps/{pr_number}]({pr.html_url}))\n\n"
    else:
        message = f"## Build Status (#{pr_number})\n\n"

    if success:
        message += (
            "**Successful Builds**\n"
            + "\n".join(
                f" - [{host}](https://{host}) ({sha}) [[logs]({log_url})]"
                if host
                else f" - `{app}` ({sha}) [[logs]({log_url})]"
                for app, host, sha, log_url in success
            )
            + "\n\n"
        )

    if failure:
        message += (
            "**Failed Builds**\n"
            + "\n".join(
                f" - `{app}` ({sha}) [[logs]({log_url})]"
                for app, sha, log_url in failure
            )
            + "\n\n"
        )

    if running:
        message += (
            "**Running Builds**\n"
            + "\n".join(f" - `{app}` ({sha})" for app, sha in running)
            + "\n\n"
        )

    if queued:
        message += (
            "**Queued Builds**\n"
            + "\n".join(f" - `{app}` ({sha})" for app, sha in queued)
            + "\n\n"
        )

    if (success or failure or running or queued) and triggerable:
        message += "-----\n"

    if triggerable:
        message += (
            f"**[Click here]({url_for('trigger_build', pr_number=pr.number)})** to trigger all builds "
            f"for the most recent commit ({pr.head.sha})\n\n"
            "Or trigger builds individually:\n"
        ) + "\n".join(
            f" - [Click here]({url_for('trigger_build', pr_number=pr.number, app=app)}) "
            f"to build `{app}` at the most recent commit ({pr.head.sha})"
            for app in triggerable
        )

    set_pr_comment(message, pr)
Beispiel #4
0
def land_commit(
    sha: str,
    repo: Repository,
    base_repo: Repository,
    pr: Optional[PullRequest],
    files: Iterable[Union[File, str]],
    *,
    target_app: Optional[str] = None,
    dequeue_only=False,
):
    """
    :param sha: The hash of the commit we are building
    :param repo: The repo containing the above commit
    :param base_repo: The *base* cs61a-apps repo containing the deploy.yaml config
    :param pr: The PR made to trigger the build, if any
    :param files: Files changed in the commit, used for target determination
    :param target_app: App to rebuild, if not all
    :param dequeue_only: Only pop targets off the queue, do not build any new targets
    """
    if dequeue_only:
        targets = []
    elif target_app:
        targets = [target_app]
    else:
        targets = determine_targets(
            repo, files if repo.full_name == base_repo.full_name else [])
    pr_number = pr.number if pr else 0
    grouped_targets = enqueue_builds(targets, pr_number,
                                     pack(repo.clone_url, sha))
    for packed_ref, targets in grouped_targets.items():
        repo_clone_url, sha = unpack(packed_ref)
        # If the commit is made on the base repo, take the config from the current commit.
        # Otherwise, retrieve it from master
        clone_commit(
            base_repo.clone_url,
            sha if repo_clone_url == base_repo.clone_url else
            base_repo.get_branch(base_repo.default_branch).commit.sha,
        )
        apps = [App(target) for target in targets]
        for app in apps:
            with tempfile.TemporaryFile("w+") as logs:
                try:
                    with redirect_descriptor(stdout,
                                             logs), redirect_descriptor(
                                                 stderr, logs):
                        land_app(app, pr_number, sha, repo)
                    if app.config is not None:
                        update_service_routes([app], pr_number)
                except:
                    traceback.print_exc(file=logs)
                    logs.seek(0)
                    report_build_status(
                        app.name,
                        pr_number,
                        pack(repo.clone_url, sha),
                        BuildStatus.failure,
                        None,
                        logs.read(),
                        private=repo.full_name == base_repo.full_name,
                    )
                else:
                    logs.seek(0)
                    report_build_status(
                        app.name,
                        pr_number,
                        pack(repo.clone_url, sha),
                        BuildStatus.success,
                        None if app.config is None else ",".join(
                            hostname.to_str()
                            for hostname in get_pr_subdomains(app, pr_number)),
                        logs.read(),
                        private=repo.full_name == base_repo.full_name,
                    )

    if grouped_targets:
        # because we ran a build, we need to clear the queue of anyone we blocked
        # we run this in a new worker to avoid timing out
        clear_queue(repo=repo.full_name, pr_number=pr_number, noreply=True)