예제 #1
0
def find_bugs_blocker_cli(runtime: Runtime, include_status, exclude_status,
                          output):
    """
List active OCP blocker bugs for the target-releases.
default bug status to search: ['NEW', 'ASSIGNED', 'POST', 'MODIFIED', 'ON_DEV', 'ON_QA']
Use --exclude_status to filter out from default status list.

    Find blocker bugs for 4.6:
\b
    $ elliott -g openshift-4.6 find-bugs:blocker

    Output in json format:
\b
    $ elliott -g openshift-4.6 find-bugs:blocker --output json
"""
    runtime.initialize()
    find_bugs_obj = FindBugsBlocker()
    find_bugs_obj.include_status(include_status)
    find_bugs_obj.exclude_status(exclude_status)
    exit_code = 0
    for b in runtime.bug_trackers.values():
        try:
            find_bugs_blocker(runtime, output, find_bugs_obj, b)
        except Exception as e:
            runtime.logger.error(traceback.format_exc())
            runtime.logger.error(f'exception with {b.type} bug tracker: {e}')
            exit_code = 1
    sys.exit(exit_code)
예제 #2
0
def find_bugs_sweep_cli(runtime: Runtime, advisory_id, default_advisory_type,
                        check_builds, include_status, exclude_status, report,
                        output, into_default_advisories, brew_event, noop):
    """Find OCP bugs and (optional) add them to ADVISORY.

 The --group automatically determines the correct target-releases to search
for bugs claimed to be fixed, but not yet attached to advisories.
--check-builds flag forces bug validation with attached builds to rpm advisory.
It assumes builds have been attached and only attaches bugs with matching builds.
default statuses: ['MODIFIED', 'ON_QA', 'VERIFIED']

Using --use-default-advisory without a value set for the matching key
in the build-data will cause an error and elliott will exit in a
non-zero state. Use of this option silently overrides providing an
advisory with the --add option.

    List bugs that WOULD be swept into advisories (NOOP):

\b
    $ elliott -g openshift-4.8 --assembly 4.8.32 find-bugs:sweep

    Sweep bugs for an assembly into the advisories defined

\b
    $ elliott -g openshift-4.8 --assembly 4.8.32 find-bugs:sweep --into-default-advisories

    Sweep rpm bugs into the rpm advisory defined

\b
    $ elliott -g openshift-4.8 --assembly 4.8.32 find-bugs:sweep --use-default-advisory rpm

"""
    count_advisory_attach_flags = sum(
        map(bool,
            [advisory_id, default_advisory_type, into_default_advisories]))
    if count_advisory_attach_flags > 1:
        raise click.BadParameter(
            "Use only one of --use-default-advisory, --add, or --into-default-advisories"
        )

    runtime.initialize(mode="both")
    major_version, _ = runtime.get_major_minor()
    find_bugs_obj = FindBugsSweep()
    find_bugs_obj.include_status(include_status)
    find_bugs_obj.exclude_status(exclude_status)

    exit_code = 0
    for b in runtime.bug_trackers.values():
        try:
            find_bugs_sweep(runtime, advisory_id, default_advisory_type,
                            check_builds, major_version, find_bugs_obj, report,
                            output, brew_event, noop,
                            count_advisory_attach_flags, b)
        except Exception as e:
            runtime.logger.error(traceback.format_exc())
            runtime.logger.error(f'exception with {b.type} bug tracker: {e}')
            exit_code = 1
    sys.exit(exit_code)
예제 #3
0
def attach_bugs_cli(runtime: Runtime, advisory, default_advisory_type, bug_ids,
                    report, output, noop):
    """Attach OCP Bugs to ADVISORY
Print bug details with --report
For attaching use --advisory, --use-default-advisory <TYPE>

    Print bug report (no attach)

\b
    $ elliott -g openshift-4.10 attach-bugs 8675309 7001337 --report


    Print bug report for jira bugs (no attach)

\b
    $ USEJIRA=true elliott -g openshift-4.10 attach-bugs OCPBUGS-10 OCPBUGS-9 --report


    Attach bugs to the advisory 123456

\b
    $ elliott -g openshift-4.10 attach-bugs 8675309 7001337 --advisory 123456


    Attach bugs to the 4.10.2 assembly defined image advisory

\b
    $ elliott -g openshift-4.10 --assembly 4.10.2 attach-bugs 8675309 7001337 --use-default-advisory image

"""
    if advisory and default_advisory_type:
        raise click.BadParameter(
            "Use only one of --use-default-advisory <TYPE> or --advisory <ADVISORY_ID>"
        )

    runtime.initialize()
    if default_advisory_type is not None:
        advisory = find_default_advisory(runtime, default_advisory_type)

    bug_trackers = runtime.bug_trackers
    if runtime.use_jira or runtime.only_jira:
        bug_ids = cli_opts.id_convert_str(bug_ids)
        attach_bugs(runtime, advisory, bug_ids, report, output, noop,
                    bug_trackers['jira'])
    else:
        bug_ids = cli_opts.id_convert(bug_ids)
        attach_bugs(runtime, advisory, bug_ids, report, output, noop,
                    bug_trackers['bugzilla'])
예제 #4
0
파일: common.py 프로젝트: yazug/elliott
def cli(ctx, **kwargs):
    cfg = dotconfig.Config(
        'elliott', 'settings',
        template=cli_opts.CLI_CONFIG_TEMPLATE,
        envvars=cli_opts.CLI_ENV_VARS,
        cli_args=kwargs)
    ctx.obj = Runtime(cfg_obj=cfg, **cfg.to_dict())
예제 #5
0
def _fetch_builds_by_kind_image(runtime: Runtime, tag_pv_map: Dict[str, str],
                                brew_session: koji.ClientSession,
                                payload_only: bool, non_payload_only: bool):
    image_metas: List[ImageMetadata] = []
    for image in runtime.image_metas():
        if image.base_only or not image.is_release:
            continue
        if (payload_only and not image.is_payload) or (non_payload_only
                                                       and image.is_payload):
            continue
        image_metas.append(image)

    pbar_header(
        'Generating list of images: ',
        f'Hold on a moment, fetching Brew builds for {len(image_metas)} components...'
    )

    brew_latest_builds: List[Dict] = []
    for image in image_metas:
        LOGGER.info("Getting latest build for %s...", image.distgit_key)
        brew_latest_builds.append(image.get_latest_build(brew_session))

    _ensure_accepted_tags(brew_latest_builds, brew_session, tag_pv_map)

    shipped = _find_shipped_builds([b["id"] for b in brew_latest_builds],
                                   brew_session)
    unshipped = [b for b in brew_latest_builds if b["id"] not in shipped]
    click.echo(
        f'Found {len(shipped)+len(unshipped)} builds, of which {len(unshipped)} are new.'
    )
    nvrps = _gen_nvrp_tuples(unshipped, tag_pv_map)
    return nvrps
예제 #6
0
def _fetch_builds_by_kind_image(runtime: Runtime, tag_pv_map: Dict[str, str],
                                brew_session: koji.ClientSession,
                                payload_only: bool, non_payload_only: bool):
    image_metas: List[ImageMetadata] = []
    for image in runtime.image_metas():
        if image.base_only or not image.is_release:
            continue
        if (payload_only and not image.is_payload) or (non_payload_only
                                                       and image.is_payload):
            continue
        image_metas.append(image)

    pbar_header(
        'Generating list of images: ',
        f'Hold on a moment, fetching Brew builds for {len(image_metas)} components...'
    )

    brew_latest_builds: List[Dict] = asyncio.get_event_loop(
    ).run_until_complete(
        asyncio.gather(*[
            exectools.to_thread(progress_func, image.get_latest_build)
            for image in image_metas
        ]))

    _ensure_accepted_tags(brew_latest_builds, brew_session, tag_pv_map)

    shipped = _find_shipped_builds([b["id"] for b in brew_latest_builds],
                                   brew_session)
    unshipped = [b for b in brew_latest_builds if b["id"] not in shipped]
    click.echo(
        f'Found {len(shipped)+len(unshipped)} builds, of which {len(unshipped)} are new.'
    )
    nvrps = _gen_nvrp_tuples(unshipped, tag_pv_map)
    return nvrps
예제 #7
0
def find_bugs_qe_cli(runtime: Runtime, noop):
    """Find MODIFIED bugs for the target-releases, and set them to ON_QA.
    with a release comment on each bug

\b
    $ elliott -g openshift-4.6 find-bugs:qe

"""
    runtime.initialize()
    find_bugs_obj = FindBugsQE()
    exit_code = 0
    for b in runtime.bug_trackers.values():
        try:
            find_bugs_qe(runtime, find_bugs_obj, noop, b)
        except Exception as e:
            runtime.logger.error(traceback.format_exc())
            runtime.logger.error(f'exception with {b.type} bug tracker: {e}')
            exit_code = 1
    sys.exit(exit_code)
예제 #8
0
def _fetch_builds_by_kind_image(runtime: Runtime, tag_pv_map: Dict[str, str],
                                brew_event: Optional[int],
                                brew_session: koji.ClientSession, p: bool,
                                np: bool):
    # filter out image like 'openshift-enterprise-base'
    image_metas = [i for i in runtime.image_metas() if not i.base_only]

    # type judge
    def tj(image):
        if not image.is_release:
            return False
        if p:
            return p == image.is_payload
        if np:
            # boolean xor.
            return np != image.is_payload
        else:
            return True

    tag_component_tuples = [(tag, image.get_component_name())
                            for tag in tag_pv_map for image in image_metas
                            if tj(image)]

    pbar_header(
        'Generating list of images: ',
        f'Hold on a moment, fetching Brew builds for {len(image_metas)} components with tags {", ".join(tag_pv_map.keys())}...',
        tag_component_tuples)

    brew_builds = brew.get_tagged_builds(tag_component_tuples,
                                         "image",
                                         event=brew_event,
                                         session=brew_session)
    brew_latest_builds = list(
        _find_latest_builds(brew_builds, runtime.assembly))

    click.echo(
        f'Found {len(brew_latest_builds)} builds. Filtering out shipped builds...'
    )
    shipped = _find_shipped_builds([b["id"] for b in brew_latest_builds],
                                   brew_session)
    unshipped = [b for b in brew_latest_builds if b["id"] not in shipped]
    nvrps = _gen_nvrp_tuples(unshipped, tag_pv_map)
    return nvrps
예제 #9
0
def find_builds_cli(runtime: Runtime, advisory, default_advisory_type, builds,
                    kind, from_diff, as_json, allow_attached, remove, clean,
                    no_cdn_repos, payload, non_payload):
    '''Automatically or manually find or attach/remove viable rpm or image builds
to ADVISORY. Default behavior searches Brew for viable builds in the
given group. Provide builds manually by giving one or more --build
(-b) options. Manually provided builds are verified against the Errata
Tool API.

\b
  * Attach the builds to ADVISORY by giving --attach
  * Remove the builds to ADVISORY by giving --remove
  * Specify the build type using --kind KIND

Example: Assuming --group=openshift-3.7, then a build is a VIABLE
BUILD IFF it meets ALL of the following criteria:

\b
  * HAS the tag in brew: rhaos-3.7-rhel7-candidate
  * DOES NOT have the tag in brew: rhaos-3.7-rhel7
  * IS NOT attached to ANY existing RHBA, RHSA, or RHEA

That is to say, a viable build is tagged as a "candidate", has NOT
received the "shipped" tag yet, and is NOT attached to any PAST or
PRESENT advisory. Here are some examples:

    SHOW the latest OSE 3.6 image builds that would be attached to a
    3.6 advisory:

    $ elliott --group openshift-3.6 find-builds -k image

    ATTACH the latest OSE 3.6 rpm builds to advisory 123456:

\b
    $ elliott --group openshift-3.6 find-builds -k rpm --attach 123456

    VERIFY (no --attach) that the manually provided RPM NVR and build
    ID are viable builds:

    $ elliott --group openshift-3.6 find-builds -k rpm -b megafrobber-1.0.1-2.el7 -a 93170

\b
    Remove specific RPM NVR and build ID from advisory:

    $ elliott --group openshift-4.3 find-builds -k image -b oauth-server-container-v4.3.22-202005212137 -a 55017 --remove
'''

    if from_diff and builds:
        raise click.BadParameter(
            'Use only one of --build or --from-diff/--between.')
    if clean and (remove or from_diff or builds):
        raise click.BadParameter(
            'Option --clean cannot be used with --build or --from-diff/--between.'
        )
    if not builds and remove:
        raise click.BadParameter(
            'Option --remove only support removing specific build with -b.')
    if from_diff and kind != "image":
        raise click.BadParameter(
            'Option --from-diff/--between should be used with --kind/-k image.'
        )
    if advisory and default_advisory_type:
        raise click.BadParameter(
            'Use only one of --use-default-advisory or --attach')
    if payload and non_payload:
        raise click.BadParameter('Use only one of --payload or --non-payload.')

    runtime.initialize(mode='images' if kind == 'image' else 'rpms')
    replace_vars = runtime.group_config.vars.primitive(
    ) if runtime.group_config.vars else {}
    et_data = runtime.gitdata.load_data(key='erratatool',
                                        replace_vars=replace_vars).data
    tag_pv_map = et_data.get('brew_tag_product_version_mapping')

    if default_advisory_type is not None:
        advisory = find_default_advisory(runtime, default_advisory_type)

    ensure_erratatool_auth(
    )  # before we waste time looking up builds we can't process

    unshipped_nvrps = []
    unshipped_builds = []
    to_remove = []

    # get the builds we want to add
    brew_session = runtime.build_retrying_koji_client(caching=True)
    if builds:
        green_prefix('Fetching builds...')
        unshipped_nvrps = _fetch_nvrps_by_nvr_or_id(
            builds,
            tag_pv_map,
            ignore_product_version=remove,
            brew_session=brew_session)
    elif clean:
        unshipped_builds = errata.get_brew_builds(advisory)
    elif from_diff:
        unshipped_nvrps = _fetch_builds_from_diff(from_diff[0], from_diff[1],
                                                  tag_pv_map)
    else:
        if kind == 'image':
            unshipped_nvrps = _fetch_builds_by_kind_image(
                runtime, tag_pv_map, brew_session, payload, non_payload)
        elif kind == 'rpm':
            unshipped_nvrps = _fetch_builds_by_kind_rpm(
                runtime, tag_pv_map, brew_session)

    pbar_header('Fetching builds from Errata: ',
                'Hold on a moment, fetching buildinfos from Errata Tool...',
                unshipped_builds if clean else unshipped_nvrps)

    if not clean and not remove:
        # if is --clean then batch fetch from Erratum no need to fetch them individually
        # if is not for --clean fetch individually using nvrp tuples then get specific
        # elliottlib.brew.Build Objects by get_brew_build()
        # e.g. :
        # ('atomic-openshift-descheduler-container', 'v4.3.23', '202005250821', 'RHEL-7-OSE-4.3').
        # Build(atomic-openshift-descheduler-container-v4.3.23-202005250821).
        unshipped_builds = parallel_results_with_progress(
            unshipped_nvrps, lambda nvrp: elliottlib.errata.get_brew_build(
                '{}-{}-{}'.format(nvrp[0], nvrp[1], nvrp[2]),
                nvrp[3],
                session=requests.Session()))
        if not allow_attached:
            unshipped_builds = _filter_out_inviable_builds(
                kind, unshipped_builds, elliottlib.errata)

        _json_dump(as_json, unshipped_builds, kind, tag_pv_map)

        if not unshipped_builds:
            green_print('No builds needed to be attached.')
            return

    if not advisory:
        click.echo('The following {n} builds '.format(n=len(unshipped_builds)),
                   nl=False)
        if not (remove or clean):
            click.secho('may be attached', bold=True, nl=False)
            click.echo(' to an advisory:')
        else:
            click.secho('may be removed from', bold=True, nl=False)
            click.echo(' from an advisory:')
        for b in sorted(unshipped_builds):
            click.echo(' ' + b.nvr)
        return

    if not unshipped_builds and not (remove and unshipped_nvrps):
        # Do not change advisory state unless strictly necessary
        return

    try:
        erratum = elliottlib.errata.Advisory(errata_id=advisory)
        erratum.ensure_state('NEW_FILES')
        if remove:
            to_remove = [
                f"{nvrp[0]}-{nvrp[1]}-{nvrp[2]}" for nvrp in unshipped_nvrps
            ]
        elif clean:
            to_remove = [b.nvr for b in unshipped_builds]

        if to_remove:
            erratum.remove_builds(to_remove)
        else:  # attach
            erratum.attach_builds(unshipped_builds, kind)
            cdn_repos = et_data.get('cdn_repos')
            if cdn_repos and not no_cdn_repos and kind == "image":
                erratum.set_cdn_repos(cdn_repos)

    except GSSError:
        exit_unauthenticated()
    except ErrataException as e:
        red_print(f'Cannot change advisory {advisory}: {e}')
        exit(1)
예제 #10
0
def _fetch_builds_by_kind_rpm(runtime: Runtime, tag_pv_map: Dict[str, str],
                              brew_session: koji.ClientSession):
    assembly = runtime.assembly
    if runtime.assembly_basis_event:
        LOGGER.warning(
            f'Constraining rpm search to stream assembly due to assembly basis event {runtime.assembly_basis_event}'
        )
        # If an assembly has a basis event, its latest rpms can only be sourced from
        # "is:" or the stream assembly.
        assembly = 'stream'

        # ensures the runtime assembly doesn't include any image member specific or rhcos specific dependencies
        image_configs = [
            assembly_metadata_config(runtime.get_releases_config(),
                                     runtime.assembly, 'image',
                                     image.distgit_key, image.config)
            for _, image in runtime.image_map.items()
        ]
        if any(nvr for image_config in image_configs
               for dep in image_config.dependencies.rpms
               for _, nvr in dep.items()):
            raise ElliottFatalError(
                f"Assembly {runtime.assembly} is not appliable for build sweep because it contains image member specific dependencies for a custom release."
            )
        rhcos_config = assembly_rhcos_config(runtime.get_releases_config(),
                                             runtime.assembly)
        if any(nvr for dep in rhcos_config.dependencies.rpms
               for _, nvr in dep.items()):
            raise ElliottFatalError(
                f"Assembly {runtime.assembly} is not appliable for build sweep because it contains RHCOS specific dependencies for a custom release."
            )

    green_prefix('Generating list of rpms: ')
    click.echo('Hold on a moment, fetching Brew builds')

    builder = BuildFinder(brew_session, logger=LOGGER)
    builds: List[Dict] = []
    for tag in tag_pv_map:
        # keys are rpm component names, values are nvres
        component_builds: Dict[str, Dict] = builder.from_tag(
            "rpm",
            tag,
            inherit=False,
            assembly=assembly,
            event=runtime.brew_event)

        if runtime.assembly_basis_event:
            # If an assembly has a basis event, rpms pinned by "is" and group dependencies should take precedence over every build from the tag
            el_version = isolate_el_version_in_brew_tag(tag)
            if not el_version:
                continue  # Only honor pinned rpms if this tag is relevant to a RHEL version

            # Honors pinned NVRs by "is"
            pinned_by_is = builder.from_pinned_by_is(
                el_version, runtime.assembly, runtime.get_releases_config(),
                runtime.rpm_map)
            _ensure_accepted_tags(pinned_by_is.values(), brew_session,
                                  tag_pv_map)

            # Builds pinned by "is" should take precedence over every build from tag
            for component, pinned_build in pinned_by_is.items():
                if component in component_builds and pinned_build[
                        "id"] != component_builds[component]["id"]:
                    LOGGER.warning(
                        "Swapping stream nvr %s for pinned nvr %s...",
                        component_builds[component]["nvr"],
                        pinned_build["nvr"])

            component_builds.update(
                pinned_by_is
            )  # pinned rpms take precedence over those from tags

            # Honors group dependencies
            group_deps = builder.from_group_deps(
                el_version, runtime.group_config, runtime.rpm_map
            )  # the return value doesn't include any ART managed rpms
            # Group dependencies should take precedence over anything previously determined except those pinned by "is".
            for component, dep_build in group_deps.items():
                if component in component_builds and dep_build[
                        "id"] != component_builds[component]["id"]:
                    LOGGER.warning(
                        "Swapping stream nvr %s for group dependency nvr %s...",
                        component_builds[component]["nvr"], dep_build["nvr"])
            component_builds.update(group_deps)
        builds.extend(component_builds.values())

    _ensure_accepted_tags(builds,
                          brew_session,
                          tag_pv_map,
                          raise_exception=False)
    qualified_builds = [b for b in builds if "tag_name" in b]
    not_attachable_nvrs = [b["nvr"] for b in builds if "tag_name" not in b]

    if not_attachable_nvrs:
        yellow_print(
            f"The following NVRs will not be swept because they don't have allowed tags {list(tag_pv_map.keys())}:"
        )
        for nvr in not_attachable_nvrs:
            yellow_print(f"\t{nvr}")

    click.echo("Filtering out shipped builds...")
    shipped = _find_shipped_builds([b["id"] for b in qualified_builds],
                                   brew_session)
    unshipped = [b for b in qualified_builds if b["id"] not in shipped]
    click.echo(
        f'Found {len(shipped)+len(unshipped)} builds, of which {len(unshipped)} are new.'
    )
    nvrps = _gen_nvrp_tuples(unshipped, tag_pv_map)
    nvrps = sorted(set(nvrps))  # remove duplicates
    return nvrps
예제 #11
0
def verify_cvp_cli(runtime: Runtime, all_images, nvrs, optional_checks,
                   all_optional_checks, fix, message):
    """ Verify CVP test results

    Example 1: Verify CVP test results for all latest 4.4 image builds, also warn those with failed content_set_check

    $ elliott --group openshift-4.4 verify-cvp --all --include-optional-check content_set_check

    Example 2: Apply patches to ocp-build-data to fix the redundant content sets error:

    $ elliott --group openshift-4.4 verify-cvp --all --include-optional-check content_set_check --fix

    Note:
    1. If `--message` is not given, `--fix` will leave changed ocp-build-data files uncommitted.
    2. Make sure your ocp-build-data directory is clean before running `--fix`.
    """
    if bool(all_images) + bool(nvrs) != 1:
        raise click.BadParameter('You must use one of --all or --build.')
    if all_optional_checks and optional_checks:
        raise click.BadParameter(
            'Use only one of --all-optional-checks or --include-optional-check.'
        )

    runtime.initialize(mode='images')
    tag_pv_map = runtime.gitdata.load_data(
        key='erratatool',
        replace_vars=runtime.group_config.vars.primitive()
        if runtime.group_config.vars else
        {}).data.get('brew_tag_product_version_mapping')
    brew_session = koji.ClientSession(runtime.group_config.urls.brewhub
                                      or constants.BREW_HUB)

    builds = []
    if all_images:
        runtime.logger.info("Getting latest image builds from Brew...")
        builds = get_latest_image_builds(brew_session, tag_pv_map.keys(),
                                         runtime.image_metas)
    elif nvrs:
        runtime.logger.info(f"Finding {len(builds)} builds from Brew...")
        builds = brew.get_build_objects(nvrs, brew_session)
    runtime.logger.info(f"Found {len(builds)} image builds.")

    resultsdb_api = ResultsDBAPI()
    nvrs = [b["nvr"] for b in builds]
    runtime.logger.info(
        f"Getting CVP test results for {len(builds)} image builds...")
    latest_cvp_results = get_latest_cvp_results(runtime, resultsdb_api, nvrs)

    # print a summary for all CVP results
    good_results = []  # good means PASSED or INFO
    bad_results = []  # bad means NEEDS_INSPECTION or FAILED
    incomplete_nvrs = []
    for nvr, result in zip(nvrs, latest_cvp_results):
        if not result:
            incomplete_nvrs.append(nvr)
            continue
        outcome = result.get(
            "outcome"
        )  # only PASSED, FAILED, INFO, NEEDS_INSPECTION are now valid outcome values (https://resultsdb20.docs.apiary.io/#introduction/changes-since-1.0)
        if outcome in {"PASSED", "INFO"}:
            good_results.append(result)
        elif outcome in {"NEEDS_INSPECTION", "FAILED"}:
            bad_results.append(result)
    green_prefix("good: {}".format(len(good_results)))
    click.echo(", ", nl=False)
    red_prefix("bad: {}".format(len(bad_results)))
    click.echo(", ", nl=False)
    yellow_print("incomplete: {}".format(len(incomplete_nvrs)))

    if bad_results:
        red_print("The following builds didn't pass CVP tests:")
        for r in bad_results:
            nvr = r["data"]["item"][0]
            red_print(f"{nvr} {r['outcome']}: {r['ref_url']}")

    if incomplete_nvrs:
        yellow_print(
            "We couldn't find CVP test results for the following builds:")
        for nvr in incomplete_nvrs:
            yellow_print(nvr)

    if not optional_checks and not all_optional_checks:
        return  # no need to print failed optional CVP checks
    # Find failed optional CVP checks in case some of the tiem *will* become required.
    optional_checks = set(optional_checks)
    complete_results = good_results + bad_results
    runtime.logger.info(
        f"Getting optional checks for {len(complete_results)} CVP tests...")
    optional_check_results = get_optional_checks(runtime, complete_results)

    component_distgit_keys = {
    }  # a dict of brew component names to distgit keys
    content_set_repo_names = {
    }  # a map of x86_64 content set names to group.yml repo names
    if fix:  # Fixing redundant content sets requires those dicts
        for image in runtime.image_metas():
            component_distgit_keys[
                image.get_component_name()] = image.distgit_key
        for repo_name, repo_info in runtime.group_config.get("repos",
                                                             {}).items():
            content_set_name = repo_info.get(
                'content_set', {}).get('x86_64') or repo_info.get(
                    'content_set', {}).get('default')
            if content_set_name:
                content_set_repo_names[content_set_name] = repo_name

    ocp_build_data_updated = False

    for cvp_result, checks in zip(complete_results, optional_check_results):
        # example optional checks: http://external-ci-coldstorage.datahub.redhat.com/cvp/cvp-product-test/hive-container-v4.6.0-202008010302.p0/da01e36c-8c69-4a19-be7d-ba4593a7b085/sanity-tests-optional-results.json
        bad_checks = [
            check for check in checks["checks"]
            if check["status"] != "PASS" and (
                all_optional_checks or check["name"] in optional_checks)
        ]
        if not bad_checks:
            continue
        nvr = cvp_result["data"]["item"][0]
        yellow_print("----------")
        yellow_print(
            f"Build {nvr} has {len(bad_checks)} problematic CVP optional checks:"
        )
        for check in bad_checks:
            yellow_print(f"* {check['name']} {check['status']}")
            if fix and check["name"] == "content_set_check":
                if "Some content sets are redundant." in check["logs"]:
                    # fix redundant content sets
                    name = nvr.rsplit('-', 2)[0]
                    distgit_keys = component_distgit_keys.get(name)
                    if not distgit_keys:
                        runtime.logger.warning(
                            f"Will not apply the redundant content sets fix to image {name}: We don't know its distgit key."
                        )
                        continue
                    amd64_content_sets = list(
                        filter(lambda item: item.get("arch") == "amd64",
                               check["logs"][-1])
                    )  # seems only x86_64 (amd64) content sets are defined in ocp-build-data.
                    if not amd64_content_sets:
                        runtime.logger.warning(
                            f"Will not apply the redundant content sets fix to image {name}: It doesn't have redundant x86_64 (amd64) content sets"
                        )
                        continue
                    amd64_redundant_cs = amd64_content_sets[0]["redundant_cs"]
                    redundant_repos = [
                        content_set_repo_names[cs] for cs in amd64_redundant_cs
                        if cs in content_set_repo_names
                    ]
                    if len(redundant_repos) != len(amd64_redundant_cs):
                        runtime.logger.error(
                            f"Not all content sets have a repo entry in group.yml: #content_sets is {len(amd64_redundant_cs)}, #repos is {len(redundant_repos)}"
                        )
                    runtime.logger.info(
                        f"Applying redundant content sets fix to {distgit_keys}..."
                    )
                    fix_redundant_content_set(runtime, distgit_keys,
                                              redundant_repos)
                    ocp_build_data_updated = True
                    runtime.logger.info(
                        f"Fixed redundant content sets for {distgit_keys}")
        yellow_print(
            f"See {cvp_result['ref_url']}sanity-tests-optional-results.json for more details."
        )

    if message and ocp_build_data_updated:
        runtime.gitdata.commit(message)
예제 #12
0
async def verify_cvp_cli(runtime: Runtime, all_images, nvrs, optional_checks, all_optional_checks, fix, message):
    """ Verify CVP test results

    Example 1: Verify CVP test results for all latest 4.4 image builds, also warn those with failed content_set_check

    $ elliott --group openshift-4.4 verify-cvp --all --include-optional-check content_set_check

    Example 2: Apply patches to ocp-build-data to fix the redundant content sets error:

    $ elliott --group openshift-4.4 verify-cvp --all --include-optional-check content_set_check --fix

    Note:
    1. If `--message` is not given, `--fix` will leave changed ocp-build-data files uncommitted.
    2. Make sure your ocp-build-data directory is clean before running `--fix`.
    """
    if bool(all_images) + bool(nvrs) != 1:
        raise click.BadParameter('You must use one of --all or --build.')
    if all_optional_checks and optional_checks:
        raise click.BadParameter('Use only one of --all-optional-checks or --include-optional-check.')

    runtime.initialize(mode='images')
    brew_session = koji.ClientSession(runtime.group_config.urls.brewhub or constants.BREW_HUB)

    builds = []
    if all_images:
        image_metas = runtime.image_metas()
        builds = await get_latest_image_builds(image_metas)
    elif nvrs:
        runtime.logger.info(f"Finding {len(builds)} builds from Brew...")
        builds = brew.get_build_objects(nvrs, brew_session)
    runtime.logger.info(f"Found {len(builds)} image builds.")

    resultsdb_api = ResultsDBAPI()
    nvrs = [b["nvr"] for b in builds]
    runtime.logger.info(f"Getting CVP test results for {len(builds)} image builds...")
    latest_cvp_results = await get_latest_cvp_results(runtime, resultsdb_api, nvrs)

    # print a summary for all CVP results
    good_results = []  # good means PASSED or INFO
    bad_results = []  # bad means NEEDS_INSPECTION or FAILED
    incomplete_nvrs = []
    for nvr, result in zip(nvrs, latest_cvp_results):
        if not result:
            incomplete_nvrs.append(nvr)
            continue
        outcome = result.get("outcome")  # only PASSED, FAILED, INFO, NEEDS_INSPECTION are now valid outcome values (https://resultsdb20.docs.apiary.io/#introduction/changes-since-1.0)
        if outcome in {"PASSED", "INFO"}:
            good_results.append(result)
        elif outcome in {"NEEDS_INSPECTION", "FAILED"}:
            bad_results.append(result)
    green_prefix("good: {}".format(len(good_results)))
    click.echo(", ", nl=False)
    red_prefix("bad: {}".format(len(bad_results)))
    click.echo(", ", nl=False)
    yellow_print("incomplete: {}".format(len(incomplete_nvrs)))

    if bad_results:
        red_print("The following builds didn't pass CVP tests:")
        for r in bad_results:
            nvr = r["data"]["item"][0]
            red_print(f"{nvr} {r['outcome']}: {r['ref_url']}")

    if incomplete_nvrs:
        yellow_print("We couldn't find CVP test results for the following builds:")
        for nvr in incomplete_nvrs:
            yellow_print(nvr)

    if not optional_checks and not all_optional_checks:
        return  # no need to print failed optional CVP checks
    # Find failed optional CVP checks in case some of the tiem *will* become required.
    optional_checks = set(optional_checks)
    complete_results = good_results + bad_results
    runtime.logger.info(f"Getting optional checks for {len(complete_results)} CVP tests...")
    optional_check_results = await get_optional_checks(runtime, complete_results)

    component_distgit_keys = {}  # a dict of brew component names to distgit keys
    content_set_to_repo_names = {}  # a map of content set names to group.yml repo names
    for image in runtime.image_metas():
        component_distgit_keys[image.get_component_name()] = image.distgit_key
    for repo_name, repo_info in runtime.group_config.get("repos", {}).items():
        for arch, cs_name in repo_info.get('content_set', {}).items():
            if arch == "optional":
                continue  # not a real arch name
            content_set_to_repo_names[cs_name] = repo_name

    nvr_to_builds = {build["nvr"]: build for build in builds}

    ocp_build_data_updated = False

    failed_with_not_covered_rpms = set()
    failed_with_redundant_repos = set()
    only_failed_in_non_x86_with_not_covered_rpms = set()
    only_failed_in_non_x86_with_redundant_repos = set()

    for cvp_result, checks in zip(complete_results, optional_check_results):
        # example optional checks: http://external-ci-coldstorage.datahub.redhat.com/cvp/cvp-product-test/hive-container-v4.6.0-202008010302.p0/da01e36c-8c69-4a19-be7d-ba4593a7b085/sanity-tests-optional-results.json
        bad_checks = [check for check in checks["checks"] if check["status"] != "PASS" and (all_optional_checks or check["name"] in optional_checks)]
        if not bad_checks:
            continue
        nvr = cvp_result["data"]["item"][0]
        build = nvr_to_builds[nvr]
        yellow_print("----------")
        yellow_print(f"Build {nvr} (https://brewweb.engineering.redhat.com/brew/buildinfo?buildID={nvr_to_builds[nvr]['id']}) has {len(bad_checks)} problematic CVP optional checks:")
        for check in bad_checks:
            yellow_print(f"* {check['name']} {check['status']}")
            try:
                amd64_result = list(filter(lambda item: item.get("arch") == "amd64", check["logs"][-1]))
            except AttributeError:
                red_print("CVP result malformed.")
            if len(amd64_result) != 1:
                red_print("WHAT?! This build doesn't include an amd64 image? This shouldn't happen. Check Brew and CVP logs with the CVP team!")
                continue
            amd64_result = amd64_result[0]
            image_component_name = nvr.rsplit('-', 2)[0]
            distgit_key = component_distgit_keys.get(image_component_name)

            amd64_redundant_cs = amd64_result.get("redundant_cs", [])
            amd64_redundant_repos = {content_set_to_repo_names[cs] for cs in amd64_redundant_cs}

            def _strip_arch_suffix(rpm):
                # rh-nodejs10-3.2-3.el7.x86_64 -> rh-nodejs10-3.2-3.el7
                rpm_split = rpm.rsplit(".", 1)
                return rpm_split[0]

            amd64_not_covered_rpms = {_strip_arch_suffix(rpm) for rpm in amd64_result.get("not_covered_rpms", [])}

            if check["name"] == "content_set_check":
                details = check["logs"][-1]  # example: http://external-ci-coldstorage.datahub.redhat.com/cvp/cvp-product-test/logging-fluentd-container-v4.6.0-202008261251.p0/dd9f2024-5440-4f33-b508-472ccf258439/sanity-tests-optional-results.json
                if not details:
                    red_print("content_set_check failed without any explanation. Report to CVP team!")
                    continue
                if len(details) > 1:  # if this build is multi-arch, check if all per-arch results are consistent
                    for result in details:
                        if result["arch"] == "amd64":
                            continue
                        redundant_repos = {content_set_to_repo_names[cs] for cs in result.get("redundant_cs", [])}
                        if redundant_repos != amd64_redundant_repos:
                            only_failed_in_non_x86_with_redundant_repos.add(nvr)
                            red_print(f"""content_set_check for {nvr} arch {result["arch"]} has different redundant_cs result from the one for amd64:
                            {result["arch"]} has redundant_cs {result.get("redundant_cs")},
                            but amd64 has redundant_cs {amd64_redundant_cs}.
                            Not sure what happened. Please see Brew and CVP logs and/or check with the CVP team.""")
                        not_covered_rpms = {_strip_arch_suffix(rpm) for rpm in result.get("not_covered_rpms", [])}
                        if not_covered_rpms != amd64_not_covered_rpms:
                            only_failed_in_non_x86_with_not_covered_rpms.add(nvr)
                            red_print(f"""content_set_check for {nvr} arch {result["arch"]} has different not_covered_rpms result from the one for amd64:
                            {result["arch"]} has extra not_covered_rpms {not_covered_rpms - amd64_not_covered_rpms},
                            and missing not_covered_rpms {amd64_not_covered_rpms - not_covered_rpms}.
                            Not sure what happened. Check Brew and CVP logs with the CVP team!""")

                if amd64_not_covered_rpms:  # This build has not_covered_rpms
                    failed_with_not_covered_rpms.add(nvr)
                    yellow_print(f"Image {distgit_key} has not_covered_rpms: {amd64_not_covered_rpms}")
                    brew_repos = await find_repos_for_rpms(amd64_not_covered_rpms, build)
                    yellow_print(f"Those repos shown in Brew logs might be a good hint: {brew_repos}")
                    runtime.logger.info("Looking for parent image's content_sets...")
                    parent = get_parent_build_ids([build])[0]
                    if parent:
                        parent_build = brew.get_build_objects([parent])[0]
                        parent_cs = await get_content_sets_for_build(parent_build)
                        parent_enabled_repos = {content_set_to_repo_names[cs] for cs in parent_cs.get("x86_64", [])}
                        enabled_repos = set(runtime.image_map[distgit_key].config.get("enabled_repos", []))
                        missing_repos = parent_enabled_repos - enabled_repos
                        yellow_print(f"""The following repos are defined in parent {parent_build["nvr"]} {component_distgit_keys.get(parent_build["name"], "?")}.yml but not in
                                     {component_distgit_keys[build["name"]]}.yml: {missing_repos}""")
                        if fix and missing_repos:
                            runtime.logger.info("Trying to merge parent image's content_sets...")
                            fix_missing_content_set(runtime, distgit_key, missing_repos)
                            ocp_build_data_updated = True
                            runtime.logger.info(f"{distgit_key}.yml patched")

                if amd64_redundant_repos:  # This build has redundant_cs
                    failed_with_redundant_repos.add(nvr)
                    yellow_print(f"Image {distgit_key} has redundant repos: {amd64_redundant_repos}")
                    if not fix:
                        yellow_print(f"Please add the following repos to non_shipping_repos in {distgit_key}.yml: {amd64_redundant_repos}")
                    else:
                        runtime.logger.info(f"Applying redundant content sets fix to {distgit_key}.yml...")
                        fix_redundant_content_set(runtime, distgit_key, amd64_redundant_repos)
                        ocp_build_data_updated = True
                        runtime.logger.info(f"{distgit_key}.yml patched")

        print(f"See {cvp_result['ref_url']}sanity-tests-optional-results.json for more details.")

    if failed_with_not_covered_rpms or failed_with_redundant_repos:
        yellow_print(f"{len(failed_with_not_covered_rpms | failed_with_redundant_repos)} images failed content_sets.\n Where")

    if failed_with_not_covered_rpms:
        yellow_print(f"\t{len(failed_with_not_covered_rpms)} images failed content_sets check because of not_covered_rpms:")
        for rpm in failed_with_not_covered_rpms:
            line = f"\t\t{rpm}"
            if rpm in only_failed_in_non_x86_with_not_covered_rpms:
                line += " - non-x86 arches are different from x86 one"
            yellow_print(line)
    if failed_with_redundant_repos:
        yellow_print(f"\t{len(failed_with_redundant_repos)} images failed content_sets check because of redundant_repos:")
        for rpm in failed_with_redundant_repos:
            line = f"\t\t{rpm}"
            if rpm in only_failed_in_non_x86_with_redundant_repos:
                line += " - non-x86 arches are different from x86 one"
            yellow_print(line)

    if message and ocp_build_data_updated:
        runtime.gitdata.commit(message)
예제 #13
0
def find_bugs_cli(runtime: Runtime, advisory, default_advisory_type, mode, check_builds, status, exclude_status, id, cve_trackers, from_diff,
                  flag, report, into_default_advisories, brew_event, noop):
    """Find Red Hat Bugzilla bugs or add them to ADVISORY. Bugs can be
"swept" into the advisory either automatically (--mode sweep), or by
manually specifying one or more bugs using --mode list with the --id option.
Use cases are described below:

    Note: Using --id without --add is basically pointless

SWEEP: For this use-case the --group option MUST be provided. The
--group automatically determines the correct target-releases to search
for bugs claimed to be fixed, but not yet attached to advisories.
--check-builds flag forces bug validation with attached builds to rpm advisory.
It assumes builds have been attached and only attaches bugs with matching builds.
default --status: ['MODIFIED', 'ON_QA', 'VERIFIED']

LIST: The --group option is not required if you are specifying advisory
manually. Provide one or more --id's for manual bug addition. In LIST
mode you must provide a list of IDs to perform operation on with the --id option.
Supported operations: report with --report, attach with --attach and --into-default-advisories

DIFF: For this use case, you must provide the --between option using two
URLs to payloads.

QE: Find MODIFIED bugs for the target-releases, and set them to ON_QA.
The --group option MUST be provided. Cannot be used in combination
with --add, --use-default-advisory, --into-default-advisories, --exclude-status.

BLOCKER: List active blocker+ bugs for the target-releases.
The --group option MUST be provided. Cannot be used in combination
with --add, --use-default-advisory, --into-default-advisories.
default --status: ['NEW', 'ASSIGNED', 'POST', 'MODIFIED', 'ON_DEV', 'ON_QA']
Use --exclude_status to filter out from default status list.
By default --cve-trackers is True.

Using --use-default-advisory without a value set for the matching key
in the build-data will cause an error and elliott will exit in a
non-zero state. Use of this option silently overrides providing an
advisory with the --add option.

    Automatically add bugs with target-release matching 3.7.Z or 3.7.0
    to advisory 123456:

\b
    $ elliott --group openshift-3.7 find-bugs --mode sweep --add 123456

    List bugs that WOULD be added to an advisory and have set the bro_ok flag on them (NOOP):

\b
    $ elliott --group openshift-3.7 find-bugs --mode sweep --flag bro_ok

    Attach bugs to their correct default advisories, e.g. operator-related bugs go to "extras" instead of the default "image":

\b
    $ elliott --group=openshift-4.4 find-bugs --mode=sweep --into-default-advisories

    Add two bugs to advisory 123456. Note that --group is not required
    because we're not auto searching:

\b
    $ elliott find-bugs --mode list --id 8675309 --id 7001337 --add 123456

    Add given list of bugs to the appropriate advisories. This would apply sweep logic to the given bugs
    grouping them to be attached to rpm/extras/image advisories

\b
    $ elliott -g openshift-4.8 find-bugs --mode list --id 8675309,7001337 --into-default-advisories

    Automatically find bugs for openshift-4.1 and attach them to the
    rpm advisory defined in ocp-build-data:

\b
    $ elliott --group=openshift-4.1 --mode sweep --use-default-advisory rpm

    Find bugs for 4.6 that are in MODIFIED state, and set them to ON_QA:

\b
    $ elliott --group=openshift-4.6 --mode qe

\b
    $ elliott --group=openshift-4.6 --mode blocker --report
"""
    count_advisory_attach_flags = sum(map(bool, [advisory, default_advisory_type, into_default_advisories]))

    if mode != 'list' and len(id) > 0:
        raise click.BadParameter("Combining the automatic and manual bug attachment options is not supported")

    if mode == 'list' and len(id) == 0:
        raise click.BadParameter("When using mode=list, you must provide a list of bug IDs")

    if mode == 'diff' and not len(from_diff) == 2:
        raise click.BadParameter("If using mode=diff, you must provide two payloads to compare")

    if count_advisory_attach_flags > 1:
        raise click.BadParameter("Use only one of --use-default-advisory, --add, or --into-default-advisories")

    if mode in ['qe', 'blocker'] and count_advisory_attach_flags > 0:
        raise click.BadParameter("Mode does not operate on an advisory. Do not specify any of "
                                 "`--use-default-advisory`, `--add`, or `--into-default-advisories`")

    runtime.initialize(mode="both")
    bz_data = runtime.gitdata.load_data(key='bugzilla').data
    bzapi = bzutil.get_bzapi(bz_data)

    # filter out bugs ART does not manage
    m = re.match(r"rhaos-(\d+).(\d+)",
                 runtime.branch)  # extract OpenShift version from the branch name. there should be a better way...
    if not m:
        raise ElliottFatalError(f"Unable to determine OpenShift version from branch name {runtime.branch}.")
    major_version = int(m[1])
    minor_version = int(m[2])

    if default_advisory_type is not None:
        advisory = find_default_advisory(runtime, default_advisory_type)

    if mode in ['sweep', 'qe', 'blocker']:
        if not cve_trackers:
            if mode == 'blocker':
                cve_trackers = True
            else:
                cve_trackers = False

        if not status:  # use default status filter according to mode
            if mode == 'sweep':
                status = ['MODIFIED', 'ON_QA', 'VERIFIED']
            if mode == 'qe':
                status = ['MODIFIED']
            if mode == 'blocker':
                status = ['NEW', 'ASSIGNED', 'POST', 'MODIFIED', 'ON_DEV', 'ON_QA']

        if mode != 'qe' and exclude_status:
            status = set(status) - set(exclude_status)

        green_prefix(f"Searching for bugs with status {' '.join(status)} and target release(s):")
        click.echo(" {tr}".format(tr=", ".join(bz_data['target_release'])))

        search_flag = 'blocker+' if mode == 'blocker' else None
        bugs = bzutil.search_for_bugs(bz_data, status, flag=search_flag, filter_out_security_bugs=not(cve_trackers),
                                      verbose=runtime.debug)

        sweep_cutoff_timestamp = 0
        if brew_event:
            green_print(f"Using command line specified cutoff event {runtime.assembly_basis_event}...")
            sweep_cutoff_timestamp = runtime.build_retrying_koji_client().getEvent(brew_event)["ts"]
        elif runtime.assembly_basis_event:
            green_print(f"Determining approximate cutoff timestamp from basis event {runtime.assembly_basis_event}...")
            brew_api = runtime.build_retrying_koji_client()
            sweep_cutoff_timestamp = bzutil.approximate_cutoff_timestamp(runtime.assembly_basis_event, brew_api, runtime.rpm_metas() + runtime.image_metas())

        if sweep_cutoff_timestamp:
            green_print(f"Filtering bugs that have changed to one of the desired statuses before the cutoff time {datetime.utcfromtimestamp(sweep_cutoff_timestamp)}...")
            qualified_bugs = bzutil.filter_bugs_by_cutoff_event(bzapi, bugs, status, sweep_cutoff_timestamp)
            click.echo(f"{len(qualified_bugs)} of {len(bugs)} bugs are qualified for the cutoff time {datetime.utcfromtimestamp(sweep_cutoff_timestamp)}...")
            bugs = qualified_bugs

        # Loads included/excluded bugs from assembly config
        issues_config = assembly_issues_config(runtime.get_releases_config(), runtime.assembly)
        # JIRA issues are not supported yet. Only loads issues with integer IDs.
        included_bug_ids: Set[int] = {int(issue["id"]) for issue in issues_config.include if isinstance(issue["id"], int) or issue["id"].isdigit()}
        excluded_bug_ids: Set[int] = {int(issue["id"]) for issue in issues_config.exclude if isinstance(issue["id"], int) or issue["id"].isdigit()}
        if included_bug_ids & excluded_bug_ids:
            raise ValueError(f"The following bugs are defined in both 'include' and 'exclude': {included_bug_ids & excluded_bug_ids}")
        if included_bug_ids:
            yellow_print(f"The following bugs will be additionally included because they are explicitly defined in the assembly config: {included_bug_ids}")
            included_bugs = bzapi.getbugs(included_bug_ids)
            bugs.extend(included_bugs)
        if excluded_bug_ids:
            yellow_print(f"The following bugs will be excluded because they are explicitly defined in the assembly config: {excluded_bug_ids}")
            bugs = [bug for bug in bugs if bug.id not in excluded_bug_ids]

    elif mode == 'list':
        bugs = [bzapi.getbug(i) for i in cli_opts.id_convert(id)]
        if not into_default_advisories:
            mode_list(advisory=advisory, bugs=bugs, flags=flag, report=report, noop=noop)
            return
    elif mode == 'diff':
        click.echo(runtime.working_dir)
        bug_id_strings = openshiftclient.get_bug_list(runtime.working_dir, from_diff[0], from_diff[1])
        bugs = [bzapi.getbug(i) for i in bug_id_strings]

    filtered_bugs = filter_bugs(bugs, major_version, minor_version, runtime)
    green_prefix(f"Found {len(filtered_bugs)} bugs ({len(bugs) - len(filtered_bugs)} ignored): ")
    bugs = filtered_bugs
    click.echo(", ".join(sorted(str(b.bug_id) for b in bugs)))

    if mode == 'qe':
        for bug in bugs:
            bzutil.set_state(bug, 'ON_QA', noop=noop, comment_for_release=f"{major_version}.{minor_version}")

    if len(flag) > 0:
        add_flags(bugs=bugs, flags=flag, noop=noop)

    if report:
        print_report(bugs)

    if advisory and not default_advisory_type:  # `--add ADVISORY_NUMBER` should respect the user's wish and attach all available bugs to whatever advisory is specified.
        errata.add_bugs_with_retry(advisory, bugs, noop=noop)
        return

    # If --use-default-advisory or --into-default-advisories is given, we need to determine which bugs should be swept into which advisory.
    # Otherwise we don't need to sweep bugs at all.
    if not (into_default_advisories or default_advisory_type):
        return

    # key is impetus ("rpm", "image", "extras"), value is a set of bug IDs.
    impetus_bugs = {
        "rpm": set(),
        "image": set(),
        "extras": set()
    }

    # @lmeyer: simple and stupid would still be keeping the logic in python,
    # possibly with config flags for branched logic.
    # until that logic becomes too ugly to keep in python, i suppose..
    if major_version < 4:  # for 3.x, all bugs should go to the rpm advisory
        impetus_bugs["rpm"] = set(bugs)
    else:  # for 4.x
        # sweep rpm cve trackers into "rpm" advisory
        rpm_bugs = dict()
        if mode == 'sweep' and cve_trackers:
            rpm_bugs = bzutil.get_valid_rpm_cves(bugs)
            green_prefix("RPM CVEs found: ")
            click.echo(sorted(b.id for b in rpm_bugs))

            if rpm_bugs:
                # if --check-builds flag is set
                # only attach bugs that have corresponding brew builds attached to rpm advisory
                if check_builds:
                    click.echo("Validating bugs with builds attached to the rpm advisory")
                    attached_builds = errata.get_advisory_nvrs(runtime.group_config.advisories["rpm"])
                    packages = attached_builds.keys()
                    not_found = []
                    for bug, package_name in rpm_bugs.items():
                        if package_name not in packages:
                            not_found.append((bug.id, package_name))
                        else:
                            click.echo(f"Build found for #{bug.id}, {package_name}")
                            impetus_bugs["rpm"].add(bug)

                    if not_found:
                        red_prefix("RPM CVE Warning: ")
                        click.echo("The following CVE (bug, package) were found but not attached, because no corresponding brew builds were found attached to the rpm advisory. First attach builds and then rerun to attach the bugs")
                        click.echo(not_found)
                else:
                    click.echo("Skipping attaching RPM CVEs. Use --check-builds flag to validate with builds.")

        impetus_bugs["extras"] = extras_bugs(bugs)

        # all other bugs should go into "image" advisory
        impetus_bugs["image"] = set(bugs) - impetus_bugs["extras"] - rpm_bugs.keys()

    if default_advisory_type and impetus_bugs.get(default_advisory_type):
        errata.add_bugs_with_retry(advisory, impetus_bugs[default_advisory_type], noop=noop)
    elif into_default_advisories:
        for impetus, bugs in impetus_bugs.items():
            if bugs:
                green_prefix(f'{impetus} advisory: ')
                errata.add_bugs_with_retry(runtime.group_config.advisories[impetus], bugs, noop=noop)
예제 #14
0
def tag_builds_cli(runtime: Runtime, advisories: Tuple[int],
                   default_advisory_type: str, product_version: str,
                   builds: Tuple[str], tag: str, dont_untag: bool,
                   dry_run: bool):
    """ Tag builds into Brew tag and optionally untag unspecified builds.

    Example 1: Tag RHEL7 RPMs that on ocp-build-data recorded advisory into rhaos-4.3-rhel-7-image-build

    $ elliott --group=openshift-4.3 tag-builds --use-default-advisory rpm --product-version RHEL-7-OSE-4.3 --tag rhaos-4.3-rhel-7-image-build

    Example 2: Tag RHEL8 RPMs that are on advisory 55016 into rhaos-4.3-rhel-8-image-build

    $ elliott --group=openshift-4.3 tag-builds --advisory 55016 --product-version OSE-4.4-RHEL-8 --tag rhaos-4.3-rhel-8-image-build

    Example 3: Tag specified builds into rhaos-4.3-rhel-8-image-build

    $ elliott --group=openshift-4.3 tag-builds --build buildah-1.11.6-6.rhaos4.3.el8 --build openshift-4.3.23-202005230952.g1.b596217.el8 --tag rhaos-4.3-rhel-8-image-build
    """
    if advisories and builds:
        raise click.BadParameter('Use only one of --build or --advisory/-a.')
    if advisories and default_advisory_type:
        raise click.BadParameter(
            'Use only one of --use-default-advisory or --advisory/-a.')
    if default_advisory_type and builds:
        raise click.BadParameter(
            'Use only one of --build or --use-default-advisory.')
    if product_version and not advisories and not default_advisory_type:
        raise click.BadParameter(
            '--product-version should only be used with --use-default-advisory or --advisory/-a.'
        )

    runtime.initialize()
    logger = runtime.logger
    if default_advisory_type:
        advisories = (find_default_advisory(runtime, default_advisory_type), )

    all_builds = set()  # All Brew builds that should be in the tag

    if advisories:
        errata_session = requests.session()
        for advisory in advisories:
            logger.info(
                f"Fetching attached Brew builds from advisory {advisory}...")
            errata_builds = errata.get_builds(advisory, errata_session)
            product_versions = list(errata_builds.keys())
            logger.debug(
                f"Advisory {advisory} has builds for {len(product_versions)} product versions: {product_versions}"
            )
            if product_version:  # Only this product version should be concerned
                product_versions = [product_version]
            for pv in product_versions:
                logger.debug(f"Extract Errata builds for product version {pv}")
                nvrs = _extract_nvrs_from_errata_build_list(errata_builds, pv)
                logger.info(
                    f"Found {len(nvrs)} builds from advisory {advisory} with product version {pv}"
                )
                logger.debug(
                    f"The following builds are found for product version {pv}:\n\t{list(nvrs)}"
                )
                all_builds |= set(nvrs)

    brew_session = koji.ClientSession(runtime.group_config.urls.brewhub
                                      or constants.BREW_HUB)
    if builds:  # NVRs are directly specified with --build
        build_objs = brew.get_build_objects(list(builds), brew_session)
        all_builds = {build["nvr"] for build in build_objs}

    click.echo(
        f"The following {len(all_builds)} build(s) should be in tag {tag}:")
    for nvr in all_builds:
        green_print(f"\t{nvr}")

    # get NVRs that have been tagged
    tagged_build_objs = brew_session.listTagged(tag,
                                                latest=False,
                                                inherit=False)
    tagged_builds = {build["nvr"] for build in tagged_build_objs}

    # get NVRs that should be tagged
    missing_builds = all_builds - tagged_builds
    click.echo(f"{len(missing_builds)} build(s) need to be tagged into {tag}:")
    for nvr in missing_builds:
        green_print(f"\t{nvr}")

    # get NVRs that should be untagged
    extra_builds = tagged_builds - all_builds
    click.echo(f"{len(extra_builds)} build(s) need to be untagged from {tag}:")
    for nvr in extra_builds:
        green_print(f"\t{nvr}")

    if dry_run:
        yellow_print("Dry run: Do nothing.")
        return

    brew_session.gssapi_login()

    if not dont_untag:
        # untag extra builds
        extra_builds = list(extra_builds)
        logger.info(f"Untagging {len(extra_builds)} build(s) from {tag}...")
        multicall_tasks = brew.untag_builds(tag, extra_builds, brew_session)
        failed_to_untag = []
        for index, task in enumerate(multicall_tasks):
            try:
                task.result
                click.echo(f"{nvr} has been successfully untagged from {tag}")
            except Exception as ex:
                nvr = extra_builds[index]
                failed_to_untag.append(nvr)
                logger.error(f"Failed to untag {nvr}: {ex}")

    # tag missing builds
    missing_builds = list(missing_builds)
    task_id_nvr_map = {}
    logger.info(f"Tagging {len(missing_builds)} build(s) into {tag}...")
    multicall_tasks = brew.tag_builds(tag, missing_builds, brew_session)
    failed_to_tag = []
    for index, task in enumerate(multicall_tasks):
        nvr = missing_builds[index]
        try:
            task_id = task.result
            task_id_nvr_map[task_id] = nvr
        except Exception as ex:
            failed_to_tag.append(nvr)
            logger.error(f"Failed to tag {nvr}: {ex}")

    if task_id_nvr_map:
        # wait for tag task to finish
        logger.info("Waiting for tag tasks to finish")
        brew.wait_tasks(task_id_nvr_map.keys(), brew_session, logger=logger)
        # get tagging results
        stopped_tasks = list(task_id_nvr_map.keys())
        with brew_session.multicall(strict=False) as m:
            multicall_tasks = []
            for task_id in stopped_tasks:
                multicall_tasks.append(
                    m.getTaskResult(task_id, raise_fault=False))
        for index, t in enumerate(multicall_tasks):
            task_id = stopped_tasks[index]
            nvr = task_id_nvr_map[task_id]
            tag_res = t.result
            logger.debug(
                f"Tagging task {task_id} {nvr} returned result {tag_res}")
            click.echo(f"{nvr} has been successfully tagged into {tag}")
            if tag_res and 'faultCode' in tag_res:
                if "already tagged" not in tag_res["faultString"]:
                    failed_to_tag.append(nvr)
                    logger.error(
                        f'Failed to tag {nvr} into {tag}: {tag_res["faultString"]}'
                    )

    if failed_to_untag:
        red_print("The following builds were failed to untag:")
        for nvr in failed_to_untag:
            red_print(f"\t{nvr}")
    elif not dont_untag:
        green_print(
            f"All unspecified builds have been successfully untagged from {tag}."
        )

    if failed_to_tag:
        red_print("The following builds were failed to tag:")
        for nvr in failed_to_tag:
            red_print(f"\t{nvr}")
    else:
        green_print(f"All builds have been successfully tagged into {tag}.")

    if failed_to_untag or failed_to_tag:
        raise exceptions.ElliottFatalError(
            "Not all builds were successfully tagged/untagged.")