コード例 #1
0
 def act(self, *args, **kwargs):
     """ Run the command
     :param context: A context dict. `context.set_env` is a `dict` of env vars to set for command (overriding existing).
     """
     context = kwargs["context"]
     set_env = context["set_env"]
     with Dir(context['distgit_path']):
         cmd_assert(self.command, set_env=set_env)
コード例 #2
0
ファイル: image.py プロジェクト: tnozicka/doozer
    def covscan(self, result_archive, repo_type='unsigned', local_repo=[]):
        self.logger.info('Setting up for coverity scan')
        all_js = 'all_results.js'
        diff_js = 'diff_results.js'
        all_html = 'all_results.html'
        diff_html = 'diff_results.html'
        waived_flag = 'waived.flag'

        archive_path = pathlib.Path(result_archive)
        dg_archive_path = archive_path.joinpath(self.distgit_key)
        dg_archive_path.mkdir(parents=True,
                              exist_ok=True)  # /<archive-dir>/<dg-key>

        builders = self.config['from'].builder
        if builders is Missing:
            self.logger.info(
                'No builder images -- does not appear to be container first. Skipping.'
            )
            return

        dgr = self.distgit_repo()
        with Dir(dgr.distgit_dir):
            dg_commit_hash, _ = exectools.cmd_assert('git rev-parse HEAD',
                                                     strip=True)
            archive_commit_results_path = dg_archive_path.joinpath(
                dg_commit_hash)  # /<archive-dir>/<dg-key>/<hash>
            archive_all_results_js_path = archive_commit_results_path.joinpath(
                all_js)  # /<archive-dir>/<dg-key>/<hash>/all_results.js
            archive_all_results_html_path = archive_commit_results_path.joinpath(
                all_html)  # /<archive-dir>/<dg-key>/<hash>/all_results.html
            archive_diff_results_js_path = archive_commit_results_path.joinpath(
                diff_js)
            archive_diff_results_html_path = archive_commit_results_path.joinpath(
                diff_html)
            archive_waived_flag_path = archive_commit_results_path.joinpath(
                waived_flag)

            def write_record():
                diff = json.loads(
                    archive_diff_results_js_path.read_text(encoding='utf-8'))
                diff_count = len(diff['issues'])
                if diff_count == 0:
                    self.logger.info('No new issues found during scan')
                    archive_waived_flag_path.write_text(
                        '')  # No new differences, mark as waived

                owners = ",".join(self.config.owners or [])
                self.runtime.add_record(
                    'covscan',
                    distgit=self.qualified_name,
                    distgit_key=self.distgit_key,
                    commit_results_path=str(archive_commit_results_path),
                    all_results_js_path=str(archive_all_results_js_path),
                    all_results_html_path=str(archive_all_results_html_path),
                    diff_results_js_path=str(archive_diff_results_js_path),
                    diff_results_html_path=str(archive_diff_results_html_path),
                    diff_count=str(diff_count),
                    waive_path=str(archive_waived_flag_path),
                    waived=str(archive_waived_flag_path.exists()).lower(),
                    owners=owners,
                    image=self.config.name,
                    commit_hash=dg_commit_hash)

            if archive_diff_results_html_path.exists():
                self.logger.info(
                    f'This commit already has scan results ({str(archive_all_results_js_path)}; skipping scan'
                )
                write_record()
                return

            archive_commit_results_path.mkdir(parents=True, exist_ok=True)

            dg_path = pathlib.Path(Dir.getcwd())
            cov_path = dg_path.joinpath('cov')

            dockerfile_path = dg_path.joinpath('Dockerfile')
            if not dockerfile_path.exists():
                self.logger.error(
                    'Dockerfile does not exist in distgit; not rebased yet?')
                return

            dfp = DockerfileParser(str(dockerfile_path))
            covscan_builder_df_path = dg_path.joinpath(
                'Dockerfile.covscan.builder')

            with covscan_builder_df_path.open(mode='w+') as df_out:
                first_parent = dfp.parent_images[0]
                ns, repo_tag = first_parent.split(
                    '/'
                )  # e.g. openshift/golang-builder:latest => [openshift, golang-builder:latest]
                if '@' in repo_tag:
                    repo, tag = repo_tag.split(
                        '@'
                    )  # e.g. golang-builder@sha256:12345 =>  golang-builder & tag=sha256:12345
                    tag = '@' + tag
                else:
                    if ':' in repo_tag:
                        repo, tag = repo_tag.split(
                            ':'
                        )  # e.g. golang-builder:latest =>  golang-builder & tag=latest
                    else:
                        repo = repo_tag
                        tag = 'latest'
                    tag = ':' + tag

                first_parent_url = f'registry-proxy.engineering.redhat.com/rh-osbs/{ns}-{repo}{tag}'

                # build a local image name we can use.
                # We will build a local scanner image based on the target distgit's first parent.
                # It will have coverity tools installed.
                m = hashlib.md5()
                m.update(first_parent.encode('utf-8'))
                local_builder_tag = f'{repo}-{m.hexdigest()}'

                vol_mount_arg = ''
                make_image_repo_files = ''

                if local_repo:
                    for idx, lr in enumerate(local_repo):
                        make_image_repo_files += f"""
# Create a repo able to pull from the local filesystem and prioritize it for speed.
RUN echo '[covscan_local_{idx}]' > /etc/yum.repos.d/covscan_local_{idx}.repo
RUN echo 'baseurl=file:///covscan_local_{idx}' >> /etc/yum.repos.d/covscan_local_{idx}.repo
RUN echo skip_if_unavailable=True >> /etc/yum.repos.d/covscan_local_{idx}.repo
RUN echo gpgcheck=0 >> /etc/yum.repos.d/covscan_local_{idx}.repo
RUN echo enabled=1 >> /etc/yum.repos.d/covscan_local_{idx}.repo
RUN echo enabled_metadata=1 >> /etc/yum.repos.d/covscan_local_{idx}.repo
RUN echo priority=1 >> /etc/yum.repos.d/covscan_local_{idx}.repo
    """
                        vol_mount_arg += f' -mount {lr}:/covscan_local_{idx}'
                else:
                    make_image_repo_files = 'RUN wget https://cov01.lab.eng.brq.redhat.com/coverity/install/covscan/covscan-rhel-7.repo -O /etc/yum.repos.d/covscan.repo\n'

                df_out.write(f'''FROM {first_parent_url}
LABEL DOOZER_COVSCAN_PARENT_IMAGE=true
LABEL DOOZER_COVSCAN_FIRST_PARENT={local_builder_tag}
LABEL DOOZER_COVSCAN_GROUP={self.runtime.group_config.name}

{make_image_repo_files}

RUN yum install -y wget

RUN wget {self.cgit_url(".oit/" + repo_type + ".repo")} -O /etc/yum.repos.d/oit.repo
RUN yum install -y python36

# Enable epel for csmock
RUN wget https://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm
RUN yum -y install epel-release-latest-7.noarch.rpm

# Certs necessary to install from covscan repos
RUN wget https://password.corp.redhat.com/RH-IT-Root-CA.crt -O /etc/pki/ca-trust/source/anchors/RH-IT-Root-CA.crt --no-check-certificate
RUN wget https://password.corp.redhat.com/legacy.crt -O /etc/pki/ca-trust/source/anchors/legacy.crt --no-check-certificate
RUN wget https://engineering.redhat.com/Eng-CA.crt -O /etc/pki/ca-trust/source/anchors/Eng-CA.crt --no-check-certificate
RUN update-ca-trust
RUN update-ca-trust enable

RUN yum install -y cov-sa csmock csmock-plugin-coverity csdiff
    ''')

            rc, out, err = exectools.cmd_gather(
                f'docker image inspect {local_builder_tag}')
            if rc != 0:
                rc, _, _ = exectools.cmd_gather(
                    f'imagebuilder {vol_mount_arg} -t {local_builder_tag} -f {str(covscan_builder_df_path)} {str(dg_path)}'
                )
                if rc != 0:
                    self.logger.error(
                        f'Unable to create scanner image based on builder image: {first_parent_url}'
                    )
                    # TODO: log this as a record and make sure the pipeline warns artist.
                    # until covscan can be installed on rhel-8, this is expected for some images.
                    return

            # We should now have an image tagged local_builder_tag based on the original image's builder image
            # but also with covscan tools installed.

            covscan_df_path = dg_path.joinpath('Dockerfile.covscan')

            runs = ['#!/bin/bash', 'set -o xtrace']
            setup = []
            first_from = False
            for entry in dfp.structure:
                content = entry['content']
                instruction = entry['instruction'].upper()
                if instruction.upper() == 'FROM':
                    if first_from:
                        break
                    first_from = True

                if instruction == 'COPY':
                    setup.append(content)

                if instruction == 'ENV':
                    setup.append(content)

                if instruction == 'RUN':
                    runs.append(content.strip()[4:])

                if instruction == 'WORKDIR':
                    setup.append(
                        content
                    )  # Pass into setup so things like ADD work as expected
                    path = content.strip()[7:]
                    runs.append(
                        'mkdir -p ' + path
                    )  # Also pass into RUN so they have the correct working dir
                    runs.append('cd ' + path)

            run_file = '\n'.join(runs)
            self.logger.info(f'Constructed run file:\n{run_file}')

            build_script_name = 'doozer_covscan_runit.sh'
            with dg_path.joinpath(build_script_name).open(mode='w+') as runit:
                runit.write(run_file)

            with covscan_df_path.open(mode='w+') as df_out:
                df_out.write(f'FROM {local_builder_tag}\n')
                df_out.write(
                    f'LABEL DOOZER_COVSCAN_GROUP={self.runtime.group_config.name}\n'
                )
                df_out.write(f'ADD {build_script_name} /\n')
                df_out.write(f'RUN chmod +x /{build_script_name}\n')
                df_out.write('\n'.join(setup) + '\n')
                df_out.write('ENV PATH=/opt/coverity/bin:${PATH}\n')

            run_tag = f'{local_builder_tag}.{self.image_name_short}'
            rc, out, err = exectools.cmd_gather(
                f'docker image inspect {run_tag}')
            if rc != 0:
                exectools.cmd_assert(
                    f'imagebuilder -t {run_tag} -f {str(covscan_df_path)} {str(dg_path)}'
                )

            cov_path.mkdir(exist_ok=True)
            emit_path = cov_path.joinpath('emit')
            if not emit_path.exists():
                rc, out, err = exectools.cmd_gather(
                    f'docker run --hostname=covscan --rm -v {str(cov_path)}:/cov:z {run_tag} cov-build --dir=/cov /{build_script_name}'
                )
                if rc != 0:
                    self.logger.error('Did not achieve full compilation')

                builg_log_path = cov_path.joinpath('build-log.txt')
                build_log = builg_log_path.read_text(encoding='utf-8')
                if '[WARNING] No files were emitted' in build_log:
                    self.logger.error(
                        f'Build did not emit anything. Check out the build-log.txt: {builg_log_path}'
                    )
                    # TODO: log this as a record and make sure the pipeline warns artist
                    return

            else:
                self.logger.info(
                    'covscan emit already exists -- skipping this step')

            def run_docker_cov(cmd):
                return exectools.cmd_assert(
                    f'docker run --hostname=covscan --rm -v {str(cov_path)}:/cov:z {run_tag} {cmd}'
                )

            summary_path = cov_path.joinpath('output', 'summary.txt')
            if not summary_path.exists(
            ) or 'Time taken by analysis' not in summary_path.read_text(
                    encoding='utf-8'):
                # This can take an extremely long time and use virtually all CPU
                run_docker_cov(
                    'cov-analyze  --dir=/cov "--wait-for-license" "-co" "ASSERT_SIDE_EFFECT:macro_name_lacks:^assert_(return|se)\\$" "-co" "BAD_FREE:allow_first_field:true" "--include-java" "--fb-max-mem=4096" "--security" "--concurrency" --allow-unmerged-emits'
                )
            else:
                self.logger.info(
                    'covscan analysis already exists -- skipping this step')

            all_results_js, _ = run_docker_cov(
                'cov-format-errors --json-output-v2 /dev/stdout --dir=/cov')
            all_results_js_path = cov_path.joinpath(all_js)
            all_results_js_path.write_text(all_results_js, encoding='utf-8')

            all_results_html = "<html>Error generating HTML report.</html>"
            try:
                # Rarely, cshtml just outputs empty html and rc==1; just ignore it.
                all_results_html, _ = run_docker_cov(f'cshtml /cov/{all_js}')
            except:
                self.logger.warning(
                    f'Error generating HTML report for {str(archive_all_results_js_path)}'
                )
                pass

            all_results_html_path = cov_path.joinpath(all_html)
            all_results_html_path.write_text(all_results_html,
                                             encoding='utf-8')

            run_docker_cov(f'chown -R {os.getuid()}:{os.getgid()} /cov'
                           )  # Otherwise, root will own these files

            # Write out the files to the archive directory as well
            archive_all_results_js_path.write_text(all_results_js,
                                                   encoding='utf-8')
            archive_all_results_html_path.write_text(all_results_html,
                                                     encoding='utf-8')

            # Now on to computing diffs

            # Search backwards through commit history; try to find a hash for this distgit that has been scanned before
            diff_results_js = all_results_js  # Unless we find an old has, diff results matches all results
            commit_log, _ = exectools.cmd_assert(
                "git --no-pager log --pretty='%H' -1000")
            for old_commit in commit_log.split()[1:]:
                old_all_results_js_path = dg_archive_path.joinpath(
                    old_commit, all_js)
                old_are_results_waived_path = dg_archive_path.joinpath(
                    old_commit, waived_flag)
                # Only compute diff from commit if results were actually waived
                # This file should be created by the Jenkins / scanning pipeline.
                if old_are_results_waived_path.exists():
                    diff_results_js, _ = exectools.cmd_assert(
                        f'csdiff {str(archive_all_results_js_path)} {str(old_all_results_js_path)}'
                    )
                    break

            archive_diff_results_js_path.write_text(diff_results_js,
                                                    encoding='utf-8')

            diff_results_html = "<html>Error generating HTML report.</html>"
            try:
                # Rarely, cshtml just outputs empty html and rc==1; just ignore it.
                diff_results_html, _ = exectools.cmd_assert(
                    f'cshtml {str(archive_diff_results_js_path)}')
            except:
                self.logger.warning(
                    f'Error generating HTML report for {str(archive_diff_results_js_path)}'
                )
                pass

            archive_diff_results_html_path.write_text(diff_results_html,
                                                      encoding='utf-8')

            write_record()
コード例 #3
0
ファイル: images_streams.py プロジェクト: adarshtri/doozer
def images_streams_prs(runtime, github_access_token, bug, interstitial,
                       ignore_ci_master, draft_prs, moist_run, add_labels):
    runtime.initialize(clone_distgits=False, clone_source=False)
    g = Github(login_or_token=github_access_token)
    github_user = g.get_user()

    major = runtime.group_config.vars['MAJOR']
    minor = runtime.group_config.vars['MINOR']
    interstitial = int(interstitial)

    master_major, master_minor = extract_version_fields(what_is_in_master(),
                                                        at_least=2)
    if not ignore_ci_master and (major > master_major or minor > master_minor):
        # ART building a release before is is in master. Too early to open PRs.
        runtime.logger.warning(
            f'Target {major}.{minor} has not been in master yet (it is tracking {master_major}.{master_minor}); skipping PRs'
        )
        exit(0)

    prs_in_master = (major == master_major
                     and minor == master_minor) and not ignore_ci_master

    pr_links = {}  # map of distgit_key to PR URLs associated with updates
    new_pr_links = {}
    skipping_dgks = set(
    )  # If a distgit key is skipped, it children will see it in this list and skip themselves.
    for image_meta in runtime.ordered_image_metas():
        dgk = image_meta.distgit_key
        logger = image_meta.logger
        logger.info('Analyzing image')

        alignment_prs_config = image_meta.config.content.source.ci_alignment.streams_prs

        if alignment_prs_config and alignment_prs_config.enabled is not Missing and not alignment_prs_config.enabled:
            # Make sure this is an explicit False. Missing means the default or True.
            logger.info('The image has alignment PRs disabled; ignoring')
            continue

        from_config = image_meta.config['from']
        if not from_config:
            logger.info('Skipping PRs since there is no configured .from')
            continue

        desired_parents = []
        builders = from_config.builder or []
        for builder in builders:
            upstream_image = resolve_upstream_from(runtime, builder)
            if not upstream_image:
                logger.warning(
                    f'Unable to resolve upstream image for: {builder}')
                break
            desired_parents.append(upstream_image)

        parent_upstream_image = resolve_upstream_from(runtime, from_config)
        if len(desired_parents) != len(builders) or not parent_upstream_image:
            logger.warning(
                'Unable to find all ART equivalent upstream images for this image'
            )
            continue

        desired_parents.append(parent_upstream_image)
        desired_parent_digest = calc_parent_digest(desired_parents)
        logger.info(
            f'Found desired FROM state of: {desired_parents} with digest: {desired_parent_digest}'
        )

        source_repo_url, source_repo_branch = _get_upstream_source(
            runtime, image_meta)

        if not source_repo_url:
            # No upstream to clone; no PRs to open
            continue

        public_repo_url, public_branch = runtime.get_public_upstream(
            source_repo_url)
        if not public_branch:
            public_branch = source_repo_branch

        # There are two standard upstream branching styles:
        # release-4.x   : CI fast-forwards from master when appropriate
        # openshift-4.x : Upstream team manages completely.
        # For the former style, we may need to open the PRs against master.
        # For the latter style, always open directly against named branch
        if public_branch.startswith('release-') and prs_in_master:
            # TODO: auto-detect default branch for repo instead of assuming master
            public_branch = 'master'

        _, org, repo_name = split_git_url(public_repo_url)

        public_source_repo = g.get_repo(f'{org}/{repo_name}')

        try:
            fork_repo_name = f'{github_user.login}/{repo_name}'
            fork_repo = g.get_repo(fork_repo_name)
        except UnknownObjectException:
            # Repo doesn't exist; fork it
            fork_repo = github_user.create_fork(public_source_repo)

        fork_branch_name = f'art-consistency-{runtime.group_config.name}-{dgk}'
        fork_branch_head = f'{github_user.login}:{fork_branch_name}'

        fork_branch = None
        try:
            fork_branch = fork_repo.get_branch(fork_branch_name)
        except UnknownObjectException:
            # Doesn't presently exist and will need to be created
            pass
        except GithubException as ge:
            # This API seems to return 404 instead of UnknownObjectException.
            # So allow 404 to pass through as well.
            if ge.status != 404:
                raise

        public_repo_url = convert_remote_git_to_ssh(public_repo_url)
        clone_dir = os.path.join(runtime.working_dir, 'clones', dgk)
        # Clone the private url to make the best possible use of our doozer_cache
        runtime.git_clone(source_repo_url, clone_dir)

        with Dir(clone_dir):
            exectools.cmd_assert(f'git remote add public {public_repo_url}')
            exectools.cmd_assert(
                f'git remote add fork {convert_remote_git_to_ssh(fork_repo.git_url)}'
            )
            exectools.cmd_assert('git fetch --all')

            # The path to the Dockerfile in the target branch
            if image_meta.config.content.source.dockerfile is not Missing:
                # Be aware that this attribute sometimes contains path elements too.
                dockerfile_name = image_meta.config.content.source.dockerfile
            else:
                dockerfile_name = "Dockerfile"

            df_path = Dir.getpath()
            if image_meta.config.content.source.path:
                dockerfile_name = os.path.join(
                    image_meta.config.content.source.path, dockerfile_name)

            df_path = df_path.joinpath(dockerfile_name)

            fork_branch_parent_digest = None
            fork_branch_parents = None
            if fork_branch:
                # If there is already an art reconciliation branch, get an MD5
                # of the FROM images in the Dockerfile in that branch.
                exectools.cmd_assert(f'git checkout fork/{fork_branch_name}')
                fork_branch_parent_digest, fork_branch_parents = extract_parent_digest(
                    df_path)

            # Now change over to the target branch in the actual public repo
            exectools.cmd_assert(f'git checkout public/{public_branch}')

            source_branch_parent_digest, source_branch_parents = extract_parent_digest(
                df_path)

            if desired_parent_digest == source_branch_parent_digest:
                green_print(
                    'Desired digest and source digest match; Upstream is in a good state'
                )
                continue

            yellow_print(
                f'Upstream dockerfile does not match desired state in {public_repo_url}/blob/{public_branch}/{dockerfile_name}'
            )
            print(
                f'Desired parents: {desired_parents} ({desired_parent_digest})'
            )
            print(
                f'Source parents: {source_branch_parents} ({source_branch_parent_digest})'
            )
            print(
                f'Fork branch digest: {fork_branch_parents} ({fork_branch_parent_digest})'
            )

            first_commit_line = f"Updating {image_meta.name} builder & base images to be consistent with ART"
            reconcile_info = f"Reconciling with {convert_remote_git_to_https(runtime.gitdata.origin_url)}/tree/{runtime.gitdata.commit_hash}/images/{os.path.basename(image_meta.config_filename)}"

            diff_text = None
            if fork_branch_parent_digest != desired_parent_digest:
                # The fork branch either does not exist, or does not have the desired parent image state
                # Let's create a local branch that will contain the Dockerfile in the state we desire.
                work_branch_name = '__mod'
                exectools.cmd_assert(f'git checkout public/{public_branch}')
                exectools.cmd_assert(f'git checkout -b {work_branch_name}')
                with df_path.open(mode='r+') as handle:
                    dfp = DockerfileParser(cache_content=True,
                                           fileobj=io.BytesIO())
                    dfp.content = handle.read()
                    dfp.parent_images = desired_parents
                    handle.truncate(0)
                    handle.seek(0)
                    handle.write(dfp.content)

                diff_text, _ = exectools.cmd_assert(f'git diff {str(df_path)}')

                if not moist_run:
                    exectools.cmd_assert(f'git add {str(df_path)}')
                    commit_prefix = ''
                    if repo_name.startswith('kubernetes'):
                        # couple repos have this requirement; openshift/kubernetes & openshift/kubernetes-autoscaler.
                        # This check may suffice  for now, but it may eventually need to be in doozer metadata.
                        commit_prefix = 'UPSTREAM: <carry>: '
                    commit_msg = f"""{commit_prefix}{first_commit_line}
{reconcile_info}
"""
                    exectools.cmd_assert(
                        f'git commit -m "{commit_msg}"'
                    )  # Add a commit atop the public branch's current state
                    # Create or update the remote fork branch
                    exectools.cmd_assert(
                        f'git push --force fork {work_branch_name}:{fork_branch_name}'
                    )

            # At this point, we have a fork branch in the proper state
            pr_body = f"""{first_commit_line}
{reconcile_info}

If you have any questions about this pull request, please reach out to `@art-team` in the `#aos-art` coreos slack channel.
"""

            parent_pr_url = None
            parent_meta = image_meta.resolve_parent()
            if parent_meta:
                if parent_meta.distgit_key in skipping_dgks:
                    skipping_dgks.add(image_meta.distgit_key)
                    yellow_print(
                        f'Image has parent {parent_meta.distgit_key} which was skipped; skipping self: {image_meta.distgit_key}'
                    )
                    continue

                parent_pr_url = pr_links.get(parent_meta.distgit_key, None)
                if parent_pr_url:
                    if parent_meta.config.content.source.ci_alignment.streams_prs.merge_first:
                        skipping_dgks.add(image_meta.distgit_key)
                        yellow_print(
                            f'Image has parent {parent_meta.distgit_key} open PR ({parent_pr_url}) and streams_prs.merge_first==True; skipping PR opening for this image {image_meta.distgit_key}'
                        )
                        continue

                    # If the parent has an open PR associated with it, make sure the
                    # child PR notes that the parent PR should merge first.
                    pr_body += f'\nDepends on {parent_pr_url} . Allow it to merge and then run `/test all` on this PR.'

            # Let's see if there is a PR opened
            open_prs = list(
                public_source_repo.get_pulls(state='open',
                                             head=fork_branch_head))
            if open_prs:
                existing_pr = open_prs[0]
                # Update body, but never title; The upstream team may need set something like a Bug XXXX: there.
                # Don't muck with it.

                if alignment_prs_config.auto_label and add_labels:
                    # If we are to automatically add labels to this upstream PR, do so.
                    existing_pr.set_labels(*alignment_prs_config.auto_label)

                existing_pr.edit(body=pr_body)
                pr_url = existing_pr.html_url
                pr_links[dgk] = pr_url
                yellow_print(
                    f'A PR is already open requesting desired reconciliation with ART: {pr_url}'
                )
                continue

            # Otherwise, we need to create a pull request
            if moist_run:
                pr_links[dgk] = f'MOIST-RUN-PR:{dgk}'
                green_print(
                    f'Would have opened PR against: {public_source_repo.html_url}/blob/{public_branch}/{dockerfile_name}.'
                )
                if parent_pr_url:
                    green_print(
                        f'Would have identified dependency on PR: {parent_pr_url}.'
                    )
                if diff_text:
                    yellow_print(diff_text)
                else:
                    yellow_print(
                        f'Fork from which PR would be created ({fork_branch_head}) is populated with desired state.'
                    )
            else:
                pr_title = first_commit_line
                if bug:
                    pr_title = f'Bug {bug}: {pr_title}'
                new_pr = public_source_repo.create_pull(title=pr_title,
                                                        body=pr_body,
                                                        base=public_branch,
                                                        head=fork_branch_head,
                                                        draft=draft_prs)
                if alignment_prs_config.auto_label and add_labels:
                    # If we are to automatically add labels to this upstream PR, do so.
                    new_pr.set_labels(*alignment_prs_config.auto_label)
                pr_msg = f'A new PR has been opened: {new_pr.html_url}'
                pr_links[dgk] = new_pr.html_url
                new_pr_links[dgk] = new_pr.html_url
                logger.info(pr_msg)
                yellow_print(pr_msg)
                print(
                    f'Sleeping {interstitial} seconds before opening another PR to prevent flooding prow...'
                )
                time.sleep(interstitial)

    if new_pr_links:
        print('Newly opened PRs:')
        print(yaml.safe_dump(new_pr_links))

    if pr_links:
        print('Currently open PRs:')
        print(yaml.safe_dump(pr_links))