Exemple #1
0
def _fetch_builds_by_kind_image(runtime: Runtime, tag_pv_map: Dict[str, str],
                                brew_session: koji.ClientSession,
                                payload_only: bool, non_payload_only: bool):
    image_metas: List[ImageMetadata] = []
    for image in runtime.image_metas():
        if image.base_only or not image.is_release:
            continue
        if (payload_only and not image.is_payload) or (non_payload_only
                                                       and image.is_payload):
            continue
        image_metas.append(image)

    pbar_header(
        'Generating list of images: ',
        f'Hold on a moment, fetching Brew builds for {len(image_metas)} components...'
    )

    brew_latest_builds: List[Dict] = asyncio.get_event_loop(
    ).run_until_complete(
        asyncio.gather(*[
            exectools.to_thread(progress_func, image.get_latest_build)
            for image in image_metas
        ]))

    _ensure_accepted_tags(brew_latest_builds, brew_session, tag_pv_map)

    shipped = _find_shipped_builds([b["id"] for b in brew_latest_builds],
                                   brew_session)
    unshipped = [b for b in brew_latest_builds if b["id"] not in shipped]
    click.echo(
        f'Found {len(shipped)+len(unshipped)} builds, of which {len(unshipped)} are new.'
    )
    nvrps = _gen_nvrp_tuples(unshipped, tag_pv_map)
    return nvrps
Exemple #2
0
def _fetch_builds_by_kind_image(runtime: Runtime, tag_pv_map: Dict[str, str],
                                brew_session: koji.ClientSession,
                                payload_only: bool, non_payload_only: bool):
    image_metas: List[ImageMetadata] = []
    for image in runtime.image_metas():
        if image.base_only or not image.is_release:
            continue
        if (payload_only and not image.is_payload) or (non_payload_only
                                                       and image.is_payload):
            continue
        image_metas.append(image)

    pbar_header(
        'Generating list of images: ',
        f'Hold on a moment, fetching Brew builds for {len(image_metas)} components...'
    )

    brew_latest_builds: List[Dict] = []
    for image in image_metas:
        LOGGER.info("Getting latest build for %s...", image.distgit_key)
        brew_latest_builds.append(image.get_latest_build(brew_session))

    _ensure_accepted_tags(brew_latest_builds, brew_session, tag_pv_map)

    shipped = _find_shipped_builds([b["id"] for b in brew_latest_builds],
                                   brew_session)
    unshipped = [b for b in brew_latest_builds if b["id"] not in shipped]
    click.echo(
        f'Found {len(shipped)+len(unshipped)} builds, of which {len(unshipped)} are new.'
    )
    nvrps = _gen_nvrp_tuples(unshipped, tag_pv_map)
    return nvrps
Exemple #3
0
def _fetch_builds_by_kind_image(runtime, tag_pv_map, brew_event, brew_session, p, np):
    # filter out image like 'openshift-enterprise-base'
    image_metas = [i for i in runtime.image_metas() if not i.base_only]
    # Returns a list of (name, version, release, product_version) tuples of each build
    nvrps = []

    # type judge
    def tj(image, p, np):
        if not image.is_release:
            return False
        if p:
            return p == image.is_payload
        if np:
            # boolean xor.
            return np != image.is_payload
        else:
            return True

    tag_component_tuples = [(tag, image.get_component_name()) for tag in tag_pv_map for image in image_metas if tj(image, p, np)]

    pbar_header(
        'Generating list of images: ',
        f'Hold on a moment, fetching Brew builds for {len(image_metas)} components with tags {", ".join(tag_pv_map.keys())}...',
        tag_component_tuples)
    latest_builds = brew.get_latest_builds(tag_component_tuples, event=brew_event, session=brew_session)

    for i, build in enumerate(latest_builds):
        if not build:
            continue
        tag = tag_component_tuples[i][0]
        nvrps.append((build[0]['name'], build[0]['version'], build[0]['release'], tag_pv_map[tag]))

    return nvrps
Exemple #4
0
def repair_bugs(runtime, advisory, auto, id, original_state, new_state,
                comment, close_placeholder, use_jira, noop,
                default_advisory_type, bug_tracker):
    changed_bug_count = 0

    if default_advisory_type is not None:
        advisory = find_default_advisory(runtime, default_advisory_type)

    if auto:
        click.echo("Fetching Advisory(errata_id={})".format(advisory))
        if use_jira:
            raw_bug_list = [
                issue["key"]
                for issue in errata.get_jira_issue_from_advisory(advisory)
            ]
        else:
            e = elliottlib.errata.Advisory(errata_id=advisory)
            raw_bug_list = e.errata_bugs
    else:
        click.echo("Bypassed fetching erratum, using provided BZs")
        raw_bug_list = cli_opts.id_convert(id)

    green_print("Getting bugs for advisory")

    # Fetch bugs in parallel because it can be really slow doing it
    # one-by-one when you have hundreds of bugs
    pbar_header("Fetching data for {} bugs: ".format(len(raw_bug_list)),
                "Hold on a moment, we have to grab each one", raw_bug_list)
    pool = ThreadPool(cpu_count())
    click.secho("[", nl=False)

    attached_bugs = pool.map(
        lambda bug: progress_func(lambda: bug_tracker.get_bug(bug), '*'),
        raw_bug_list)
    # Wait for results
    pool.close()
    pool.join()
    click.echo(']')

    green_print("Got bugs for advisory")
    for bug in attached_bugs:
        if close_placeholder and "Placeholder" in bug.summary:
            # if set close placeholder, ignore bug state
            bug_tracker.update_bug_status(bug, "CLOSED")
            changed_bug_count += 1
        else:
            if bug.status in original_state:
                bug_tracker.update_bug_status(bug, new_state)
                # only add comments for non-placeholder bug
                if comment and not noop:
                    bug_tracker.add_comment(bug, comment, private=False)
                changed_bug_count += 1

    green_print("{} bugs successfully modified (or would have been)".format(
        changed_bug_count))
Exemple #5
0
def _fetch_builds_by_kind_rpm(builds, base_tag, product_version, session):
    green_prefix('Generating list of rpms: ')
    click.echo('Hold on a moment, fetching Brew builds')
    candidates = elliottlib.brew.find_unshipped_build_candidates(
        base_tag, product_version, kind='rpm')

    pbar_header('Gathering additional information: ',
                'Brew buildinfo is required to continue', candidates)
    # We could easily be making scores of requests, one for each build
    # we need information about. May as well do it in parallel.
    results = parallel_results_with_progress(
        candidates, lambda nvr: elliottlib.brew.get_brew_build(
            nvr, product_version, session=session))
    return _attached_to_open_erratum_with_correct_product_version(
        results, product_version, elliottlib.errata)
Exemple #6
0
def _fetch_builds_by_kind_image(runtime: Runtime, tag_pv_map: Dict[str, str],
                                brew_event: Optional[int],
                                brew_session: koji.ClientSession, p: bool,
                                np: bool):
    # filter out image like 'openshift-enterprise-base'
    image_metas = [i for i in runtime.image_metas() if not i.base_only]

    # type judge
    def tj(image):
        if not image.is_release:
            return False
        if p:
            return p == image.is_payload
        if np:
            # boolean xor.
            return np != image.is_payload
        else:
            return True

    tag_component_tuples = [(tag, image.get_component_name())
                            for tag in tag_pv_map for image in image_metas
                            if tj(image)]

    pbar_header(
        'Generating list of images: ',
        f'Hold on a moment, fetching Brew builds for {len(image_metas)} components with tags {", ".join(tag_pv_map.keys())}...',
        tag_component_tuples)

    brew_builds = brew.get_tagged_builds(tag_component_tuples,
                                         "image",
                                         event=brew_event,
                                         session=brew_session)
    brew_latest_builds = list(
        _find_latest_builds(brew_builds, runtime.assembly))

    click.echo(
        f'Found {len(brew_latest_builds)} builds. Filtering out shipped builds...'
    )
    shipped = _find_shipped_builds([b["id"] for b in brew_latest_builds],
                                   brew_session)
    unshipped = [b for b in brew_latest_builds if b["id"] not in shipped]
    nvrps = _gen_nvrp_tuples(unshipped, tag_pv_map)
    return nvrps
Exemple #7
0
def _fetch_builds_by_kind_image(runtime, default_product_version, session):
    image_metadata = []
    product_version_overide = {}
    for b in runtime.image_metas():
        # filter out non_release builds
        if b not in runtime.group_config.get('non_release', []):
            product_version_overide[b.name] = default_product_version
            if b.branch() != runtime.branch:
                product_version_overide[b.name] = override_product_version(
                    default_product_version, b.branch())
            image_metadata.append(b)

    pbar_header('Generating list of images: ',
                'Hold on a moment, fetching Brew buildinfo', image_metadata)

    # Returns a list of (n, v, r, pv) tuples of each build
    image_tuples = parallel_results_with_progress(
        image_metadata,
        lambda build: build.get_latest_build_info(product_version_overide))

    pbar_header('Generating build metadata: ',
                'Fetching data for {n} builds '.format(n=len(image_tuples)),
                image_tuples)

    # By 'meta' I mean the lil bits of meta data given back from
    # get_latest_build_info
    #
    # TODO: Update the ImageMetaData class to include the NVR as
    # an object attribute.
    results = parallel_results_with_progress(
        image_tuples,
        lambda meta: elliottlib.brew.get_brew_build('{}-{}-{}'.format(
            meta[0], meta[1], meta[2]),
                                                    product_version=meta[3],
                                                    session=session))

    return [
        b for b in results if not b.attached_to_open_erratum
        # filter out 'openshift-enterprise-base-container' since it's not needed in advisory
        if 'openshift-enterprise-base-container' not in b.nvr
    ]
Exemple #8
0
def find_builds_cli(runtime, advisory, default_advisory_type, builds, kind,
                    from_diff, as_json, allow_attached, remove, clean,
                    no_cdn_repos, payload, non_payload, brew_event):
    '''Automatically or manually find or attach/remove viable rpm or image builds
to ADVISORY. Default behavior searches Brew for viable builds in the
given group. Provide builds manually by giving one or more --build
(-b) options. Manually provided builds are verified against the Errata
Tool API.

\b
  * Attach the builds to ADVISORY by giving --attach
  * Remove the builds to ADVISORY by giving --remove
  * Specify the build type using --kind KIND

Example: Assuming --group=openshift-3.7, then a build is a VIABLE
BUILD IFF it meets ALL of the following criteria:

\b
  * HAS the tag in brew: rhaos-3.7-rhel7-candidate
  * DOES NOT have the tag in brew: rhaos-3.7-rhel7
  * IS NOT attached to ANY existing RHBA, RHSA, or RHEA

That is to say, a viable build is tagged as a "candidate", has NOT
received the "shipped" tag yet, and is NOT attached to any PAST or
PRESENT advisory. Here are some examples:

    SHOW the latest OSE 3.6 image builds that would be attached to a
    3.6 advisory:

    $ elliott --group openshift-3.6 find-builds -k image

    ATTACH the latest OSE 3.6 rpm builds to advisory 123456:

\b
    $ elliott --group openshift-3.6 find-builds -k rpm --attach 123456

    VERIFY (no --attach) that the manually provided RPM NVR and build
    ID are viable builds:

    $ elliott --group openshift-3.6 find-builds -k rpm -b megafrobber-1.0.1-2.el7 -a 93170

\b
    Remove specific RPM NVR and build ID from advisory:

    $ elliott --group openshift-4.3 find-builds -k image -b oauth-server-container-v4.3.22-202005212137 -a 55017 --remove
'''

    if from_diff and builds:
        raise click.BadParameter(
            'Use only one of --build or --from-diff/--between.')
    if clean and (remove or from_diff or builds):
        raise click.BadParameter(
            'Option --clean cannot be used with --build or --from-diff/--between.'
        )
    if not builds and remove:
        raise click.BadParameter(
            'Option --remove only support removing specific build with -b.')
    if from_diff and kind != "image":
        raise click.BadParameter(
            'Option --from-diff/--between should be used with --kind/-k image.'
        )
    if advisory and default_advisory_type:
        raise click.BadParameter(
            'Use only one of --use-default-advisory or --attach')
    if payload and non_payload:
        raise click.BadParameter('Use only one of --payload or --non-payload.')

    runtime.initialize(mode='images' if kind == 'image' else 'none')
    replace_vars = runtime.group_config.vars.primitive(
    ) if runtime.group_config.vars else {}
    et_data = runtime.gitdata.load_data(key='erratatool',
                                        replace_vars=replace_vars).data
    tag_pv_map = et_data.get('brew_tag_product_version_mapping')

    if default_advisory_type is not None:
        advisory = find_default_advisory(runtime, default_advisory_type)

    ensure_erratatool_auth(
    )  # before we waste time looking up builds we can't process

    # get the builds we want to add
    unshipped_nvrps = []
    brew_session = koji.ClientSession(runtime.group_config.urls.brewhub
                                      or constants.BREW_HUB)
    if builds:
        green_prefix('Fetching builds...')
        unshipped_nvrps = _fetch_nvrps_by_nvr_or_id(
            builds, tag_pv_map, ignore_product_version=remove)
    elif clean:
        unshipped_builds = errata.get_brew_builds(advisory)
    elif from_diff:
        unshipped_nvrps = _fetch_builds_from_diff(from_diff[0], from_diff[1],
                                                  tag_pv_map)
    else:
        if kind == 'image':
            unshipped_nvrps = _fetch_builds_by_kind_image(
                runtime, tag_pv_map, brew_event, brew_session, payload,
                non_payload)
        elif kind == 'rpm':
            unshipped_nvrps = _fetch_builds_by_kind_rpm(
                runtime, tag_pv_map, brew_event, brew_session)

    pbar_header('Fetching builds from Errata: ',
                'Hold on a moment, fetching buildinfos from Errata Tool...',
                unshipped_builds if clean else unshipped_nvrps)

    if not clean and not remove:
        # if is --clean then batch fetch from Erratum no need to fetch them individually
        # if is not for --clean fetch individually using nvrp tuples then get specific
        # elliottlib.brew.Build Objects by get_brew_build()
        # e.g. :
        # ('atomic-openshift-descheduler-container', 'v4.3.23', '202005250821', 'RHEL-7-OSE-4.3').
        # Build(atomic-openshift-descheduler-container-v4.3.23-202005250821).
        unshipped_builds = parallel_results_with_progress(
            unshipped_nvrps, lambda nvrp: elliottlib.errata.get_brew_build(
                '{}-{}-{}'.format(nvrp[0], nvrp[1], nvrp[2]),
                nvrp[3],
                session=requests.Session()))
        if not (allow_attached or builds):
            unshipped_builds = _filter_out_inviable_builds(
                kind, unshipped_builds, elliottlib.errata)

        _json_dump(as_json, unshipped_builds, kind, tag_pv_map)

        if not unshipped_builds:
            green_print('No builds needed to be attached.')
            return

    if advisory:
        if remove:
            _detach_builds(
                advisory,
                [f"{nvrp[0]}-{nvrp[1]}-{nvrp[2]}" for nvrp in unshipped_nvrps])
        elif clean:
            _detach_builds(advisory, [b.nvr for b in unshipped_builds])
        else:  # attach
            erratum = _update_to_advisory(unshipped_builds, kind, advisory,
                                          remove, clean)
            if not no_cdn_repos and kind == "image" and not (remove or clean):
                cdn_repos = et_data.get('cdn_repos')
                if cdn_repos:
                    # set up CDN repos
                    click.echo(
                        f"Configuring CDN repos {', '.join(cdn_repos)}...")
                    erratum.metadataCdnRepos(enable=cdn_repos)
                    click.echo("Done")

    else:
        click.echo('The following {n} builds '.format(n=len(unshipped_builds)),
                   nl=False)
        if not (remove or clean):
            click.secho('may be attached', bold=True, nl=False)
            click.echo(' to an advisory:')
        else:
            click.secho('may be removed from', bold=True, nl=False)
            click.echo(' from an advisory:')
        for b in sorted(unshipped_builds):
            click.echo(' ' + b.nvr)
Exemple #9
0
def poll_signed(runtime, minutes, advisory, default_advisory_type, noop):
    """Poll for the signed-status of RPM builds attached to
ADVISORY. Returns rc=0 when all builds have been signed. Returns non-0
after MINUTES have passed and all builds have not been signed. This
non-0 return code is the number of unsigned builds remaining. All
builds must show 'signed' for this command to succeed.

    NOTE: The two advisory options are mutually exclusive.

For testing in pipeline scripts this sub-command accepts a --noop
option. When --noop is used the value of --minutes is irrelevant. This
command will print out the signed state of all attached builds and
then exit with rc=0 if all builds are signed and non-0 if builds are
still unsigned. In the non-0 case the return code is the number of
unsigned builds.

    Wait 15 minutes for the default 4.2 advisory to show all RPMS have
    been signed:

    $ elliott -g openshift-4.2 poll-signed --use-default-advisory rpm

    Wait 5 mintes for the provided 4.2 advisory to show all RPMs have
    been signed:

    $ elliott -g openshift-4.2 poll-signed -m 5 --advisory 123456

    Print the signed status of all attached builds, exit
    immediately. Return code is the number of unsigned builds.

\b
    $ elliott -g openshift-4.2 poll-signed --noop --use-default-advisory rpm
"""
    if not (bool(advisory) ^ bool(default_advisory_type)):
        raise click.BadParameter(
            "Use only one of --use-default-advisory or --advisory")

    runtime.initialize(no_group=default_advisory_type is None)

    if default_advisory_type is not None:
        advisory = find_default_advisory(runtime, default_advisory_type)

    if not noop:
        click.echo("Polling up to {} minutes for all RPMs to be signed".format(
            minutes))

    try:
        e = elliottlib.errata.Advisory(errata_id=advisory)
        all_builds = set([])
        all_signed = False
        # `errata_builds` is a dict with brew tags as keys, values are
        # lists of builds on the advisory with that tag
        for k, v in e.errata_builds.items():
            all_builds = all_builds.union(set(v))
        green_prefix("Fetching initial states: ")
        click.echo("{} builds to check".format(len(all_builds)))
        start_time = datetime.datetime.now()
        while datetime.datetime.now() - start_time < datetime.timedelta(
                minutes=minutes):
            pbar_header("Getting build signatures: ", "Should be pretty quick",
                        all_builds)
            pool = ThreadPool(cpu_count())
            # Look up builds concurrently
            click.secho("[", nl=False)

            build_sigs = pool.map(
                lambda build: progress_func(
                    lambda: elliottlib.errata.build_signed(build), '*'),
                all_builds)
            # Wait for results
            pool.close()
            pool.join()
            click.echo(']')

            if all(build_sigs):
                all_signed = True
                break
            elif noop:
                # Escape the time-loop
                break
            else:
                yellow_prefix("Not all builds signed: ")
                click.echo("re-checking")
                continue

        if not all_signed:
            red_prefix("Signing incomplete: ")
            if noop:
                click.echo("All builds not signed. ")
            else:
                click.echo(
                    "All builds not signed in given window ({} minutes). ".
                    format(minutes))
                exit(1)
        else:
            green_prefix("All builds signed: ")
            click.echo("Enjoy!")
    except ErrataException as ex:
        raise ElliottFatalError(getattr(ex, 'message', repr(ex)))
Exemple #10
0
async def get_latest_image_builds(image_metas: Iterable[ImageMetadata]):
    pbar_header(
        'Generating list of images: ',
        f'Hold on a moment, fetching Brew builds for {len(image_metas)} components...')
    builds: List[Dict] = await asyncio.gather(*[exectools.to_thread(progress_func, image.get_latest_build) for image in image_metas])
    return builds
Exemple #11
0
def repair_bugs(runtime, advisory, auto, id, original_state, new_state, noop,
                default_advisory_type):
    """Move bugs attached to the advisory from one state to another
state. This is useful if the bugs have changed states *after* they
were attached. Similar to `find-bugs` but in reverse. `repair-bugs`
begins by reading bugs from an advisory, whereas `find-bugs` reads
from bugzilla.

This looks at attached bugs in the provided --from state and moves
them to the provided --to state.

\b
    Background: This is intended for bugs which went to MODIFIED, were
    attached to advisories, set to ON_QA, and then failed
    testing. When this happens their state is reset back to ASSIGNED.

Using --use-default-advisory without a value set for the matching key
in the build-data will cause an error and elliott will exit in a
non-zero state. Most likely you will only want to use the `rpm` state,
but that could change in the future. Use of this option conflicts with
providing an advisory with the -a/--advisory option.

    Move bugs on 123456 FROM the MODIFIED state back TO ON_QA state:

\b
    $ elliott --group=openshift-4.1 repair-bugs --auto --advisory 123456 --from MODIFIED --to ON_QA

    As above, but using the default RPM advisory defined in ocp-build-data:

\b
    $ elliott --group=openshift-4.1 repair-bugs --auto --use-default-advisory rpm --from MODIFIED --to ON_QA

    The previous examples could also be ran like this (MODIFIED and ON_QA are both defaults):

\b
    $ elliott --group=openshift-4.1 repair-bugs --auto --use-default-advisory rpm

    Bug ids may be given manually instead of using --auto:

\b
    $ elliott --group=openshift-4.1 repair-bugs --id 170899 --id 8675309 --use-default-advisory rpm
"""
    if auto and len(id) > 0:
        raise click.BadParameter(
            "Combining the automatic and manual bug modification options is not supported"
        )

    if not auto and len(id) == 0:
        # No bugs were provided
        raise click.BadParameter(
            "If not using --auto then one or more --id's must be provided")

    if advisory and default_advisory_type:
        raise click.BadParameter(
            "Use only one of --use-default-advisory or --advisory")

    if len(id) == 0 and advisory is None and default_advisory_type is None:
        # error, no bugs, advisory, or default selected
        raise click.BadParameter(
            "No input provided: Must use one of --id, --advisory, or --use-default-advisory"
        )

    # Load bugzilla infomation and get a reference to the api
    runtime.initialize()
    bz_data = runtime.gitdata.load_data(key='bugzilla').data
    bzapi = elliottlib.bzutil.get_bzapi(bz_data)
    changed_bug_count = 0
    attached_bugs = []

    if default_advisory_type is not None:
        advisory = find_default_advisory(runtime, default_advisory_type)

    raw_bug_list = []
    if auto:
        click.echo("Fetching Erratum(errata_id={})".format(advisory))
        e = Erratum(errata_id=advisory)
        raw_bug_list = e.errata_bugs
    else:
        click.echo("Bypassed fetching erratum, using provided BZs")
        raw_bug_list = cli_opts.id_convert(id)

    green_print("Getting bugs for advisory")

    # Fetch bugs in parallel because it can be really slow doing it
    # one-by-one when you have hundreds of bugs
    pbar_header("Fetching data for {} bugs: ".format(len(raw_bug_list)),
                "Hold on a moment, we have to grab each one", raw_bug_list)
    pool = ThreadPool(cpu_count())
    click.secho("[", nl=False)

    attached_bugs = pool.map(
        lambda bug: progress_func(lambda: bzapi.getbug(bug), '*'),
        raw_bug_list)
    # Wait for results
    pool.close()
    pool.join()
    click.echo(']')

    green_print("Got bugs for advisory")
    for bug in attached_bugs:
        if bug.status in original_state:
            changed_bug_count += 1
            elliottlib.bzutil.set_state(bug, new_state, noop=noop)

    green_print("{} bugs successfullly modified (or would have been)".format(
        changed_bug_count))