Esempio n. 1
0
    def resolve_metadata(self):
        """
        The group control data can be on a local filesystem, in a git
        repository that can be checked out, or some day in a database

        If the scheme is empty, assume file:///...
        Allow http, https, ssh and ssh+git (all valid git clone URLs)
        """

        if self.data_path is None:
            raise ElliottFatalError(
                ("No metadata path provided. Must be set via one of:\n"
                 "* data_path key in {}\n"
                 "* elliott --data-path [PATH|URL]\n"
                 "* Environment variable ELLIOTT_DATA_PATH\n").format(
                     self.cfg_obj.full_path))

        try:
            self.gitdata = gitdata.GitData(data_path=self.data_path,
                                           clone_dir=self.working_dir,
                                           branch=self.group,
                                           logger=self.logger)
            self.data_dir = self.gitdata.data_dir

        except gitdata.GitDataException as ex:
            raise ElliottFatalError(ex)
Esempio n. 2
0
def remove_bugs(advisory_id, bug_ids, remove_all, bug_tracker, noop):
    try:
        advisory = errata.Advisory(errata_id=advisory_id)
    except GSSError:
        exit_unauthenticated()

    if not advisory:
        raise ElliottFatalError(f"Error: Could not locate advisory {advisory_id}")

    try:
        attached_bug_ids = bug_tracker.advisory_bug_ids(advisory)
        if not remove_all:
            bug_ids = [b for b in bug_ids if b in attached_bug_ids]
        else:
            bug_ids = attached_bug_ids
        green_prefix(f"Found {len(bug_ids)} bugs attached to advisory: ")
        click.echo(f"{bug_ids}")

        if not bug_ids:
            return

        green_prefix(f"Removing bugs from advisory {advisory_id}..")
        bug_tracker.remove_bugs(advisory, bug_ids, noop)
    except ErrataException as ex:
        raise ElliottFatalError(getattr(ex, 'message', repr(ex)))
Esempio n. 3
0
def verify_attached_operators_cli(runtime, advisories):
    """
    Verify attached operator manifest references are shipping or already shipped.

    Takes a list of advisories that may contain operator metadata/bundle builds
    or image builds that are shipping alongside. Then determines whether the
    operator manifests refer only to images that have shipped in the past or
    are shipping in these advisories. An error is raised if there are no
    manifest builds attached, or if any references are missing.

    NOTE: this will fail before 4.3 because they referred to images not manifest lists.
    """

    runtime.initialize()
    brew_session = koji.ClientSession(runtime.group_config.urls.brewhub
                                      or constants.BREW_HUB)
    image_builds = _get_attached_image_builds(brew_session, advisories)

    referenced_specs = _extract_operator_manifest_image_references(
        image_builds)
    if not referenced_specs:
        # you are probably using this because you expect attached operator bundles or metadata
        raise ElliottFatalError(
            f"No bundle or appregistry builds found in advisories {advisories}."
        )

    # check if references are satisfied by any image we are shipping or have shipped
    image_builds.extend(_get_shipped_images(runtime, brew_session))
    available_shasums = _extract_available_image_shasums(image_builds)
    if _any_references_are_missing(referenced_specs, available_shasums):
        raise ElliottFatalError(
            "Some references were missing. Ensure all manifest references are shipped or shipping."
        )
    green_print("All operator manifest references were found.")
def _download_appregistry_image_references(appregistry_build):
    # for appregistry, image references are buried in the CSV in an archive
    url = constants.BREW_DOWNLOAD_TEMPLATE.format(
        name=appregistry_build['package_name'],
        version=appregistry_build['version'],
        release=appregistry_build['release'],
        file_path="operator-manifests/operator_manifests.zip",
    )
    try:
        res = requests.get(url, timeout=10.0)
    except Exception as ex:
        raise ElliottFatalError(f"appregistry data download {url} failed: {ex}")
    if res.status_code != 200:
        raise ElliottFatalError(f"appregistry data download {url} failed (status_code={res.status_code}): {res.text}")

    minor_version = re.match(r'^v(\d+\.\d+)', appregistry_build['version']).groups()[0]
    csv = {}
    with ZipFile(BytesIO(res.content)) as z:
        for filename in z.namelist():
            if re.match(f"^{minor_version}/.*clusterserviceversion.yaml", filename):
                with z.open(filename) as csv_file:
                    if csv:
                        raise ElliottFatalError(f"found more than one CSV in {appregistry_build['nvr']}?!? {filename}")
                    csv = yaml.full_load(csv_file)

    if not csv:
        raise ElliottFatalError(f"could not find the csv for appregistry {appregistry_build['nvr']}")
    return [ref['image'] for ref in csv['spec']['relatedImages']]
Esempio n. 5
0
def _attach_to_advisory(builds, kind, advisory):
    if kind is None:
        raise ElliottFatalError(
            'Need to specify with --kind=image or --kind=rpm with packages: {}'
            .format(builds))

    try:
        erratum = Erratum(errata_id=advisory)
        file_type = 'tar' if kind == 'image' else 'rpm'

        product_version_set = {build.product_version for build in builds}
        for pv in product_version_set:
            erratum.addBuilds(buildlist=[
                build.nvr for build in builds if build.product_version == pv
            ],
                              release=pv,
                              file_types={
                                  build.nvr: [file_type]
                                  for build in builds
                              })
            erratum.commit()

        build_nvrs = sorted(build.nvr for build in builds)
        green_print('Attached build(s) successfully:')
        for b in build_nvrs:
            click.echo(' ' + b)

    except GSSError:
        exit_unauthenticated()
    except elliottlib.exceptions.BrewBuildException as ex:
        raise ElliottFatalError('Error attaching builds: {}'.format(
            getattr(ex, 'message', repr(ex))))
Esempio n. 6
0
def remove_bugs(runtime, advisory, default_advisory_type, id):
    """Remove given BUGS from ADVISORY.

    Remove bugs that have been attached an advisory:

\b
    $ elliott --group openshift-3.7 remove-bugs --id 123456 --advisory 1234123

    Remove two bugs from default rpm advisory. Note that --group is required
    because default advisory is from ocp-build-data:

\b
    $ elliott --group openshift-3.7 remove-bugs --id 123456 --id 3412311 --use-default-advisory rpm


"""
    if bool(advisory) == bool(default_advisory_type):
        raise click.BadParameter(
            "Specify exactly one of --use-default-advisory or advisory arg")

    runtime.initialize()
    bz_data = runtime.gitdata.load_data(key='bugzilla').data
    bzapi = elliottlib.bzutil.get_bzapi(bz_data)

    bug_ids = [bzapi.getbug(i) for i in cli_opts.id_convert(id)]

    green_prefix("Found {} bugs:".format(len(bug_ids)))
    click.echo(" {}".format(", ".join([str(b.bug_id) for b in bug_ids])))

    if default_advisory_type is not None:
        advisory = find_default_advisory(runtime, default_advisory_type)

    if advisory is not False:
        try:
            advs = Erratum(errata_id=advisory)
        except GSSError:
            exit_unauthenticated()

        if advs is False:
            raise ElliottFatalError(
                "Error: Could not locate advisory {advs}".format(
                    advs=advisory))

        try:
            green_prefix("Removing {count} bugs from advisory:".format(
                count=len(bug_ids)))
            click.echo(" {advs}".format(advs=advisory))
            advs.removeBugs([bug.id for bug in bug_ids])
            advs.commit()
        except ErrataException as ex:
            raise ElliottFatalError(getattr(ex, 'message', repr(ex)))
Esempio n. 7
0
def create_placeholder_cli(runtime, kind, advisory, default_advisory_type):
    """Create a placeholder bug for attaching to an advisory.

    KIND - The kind of placeholder to create ({}).
    ADVISORY - Optional. The advisory to attach the bug to.

    $ elliott --group openshift-4.1 create-placeholder --kind rpm --attach 12345
""".format('/'.join(elliottlib.constants.standard_advisory_types))

    runtime.initialize()
    if advisory and default_advisory_type:
        raise click.BadParameter(
            "Use only one of --use-default-advisory or --advisory")

    if default_advisory_type is not None:
        advisory = find_default_advisory(runtime, default_advisory_type)
        kind = default_advisory_type

    if kind is None:
        raise click.BadParameter(
            "--kind must be specified when not using --use-default-advisory")

    bz_data = runtime.gitdata.load_data(key='bugzilla').data
    target_release = bz_data['target_release'][0]
    newbug = elliottlib.bzutil.create_placeholder(bz_data, kind,
                                                  target_release)

    click.echo("Created BZ: {} {}".format(newbug.id, newbug.weburl))

    if advisory is not False:
        click.echo("Attaching to advisory...")

        try:
            advs = Erratum(errata_id=advisory)
        except GSSError:
            exit_unauthenticated()

        if advs is False:
            raise ElliottFatalError(
                "Error: Could not locate advisory {advs}".format(
                    advs=advisory))

        try:
            green_prefix("Adding placeholder bug to advisory:")
            click.echo(" {advs}".format(advs=advisory))
            advs.addBugs([newbug.id])
            advs.commit()
        except ErrataException as ex:
            raise ElliottFatalError(getattr(ex, 'message', repr(ex)))
Esempio n. 8
0
def _update_to_advisory(builds, kind, advisory, remove, clean):
    click.echo(f"Attaching to advisory {advisory}...")
    if kind not in {"rpm", "image"}:
        raise ValueError(f"{kind} should be one of 'rpm' or 'image'")
    try:
        erratum = Erratum(errata_id=advisory)
        file_type = 'tar' if kind == 'image' else 'rpm'
        product_version_set = {build.product_version for build in builds}
        for pv in product_version_set:
            erratum.addBuilds(buildlist=[
                build.nvr for build in builds if build.product_version == pv
            ],
                              release=pv,
                              file_types={
                                  build.nvr: [file_type]
                                  for build in builds
                              })
            erratum.commit()

        build_nvrs = sorted(build.nvr for build in builds)
        green_print('Attached build(s) successfully:')
        for b in build_nvrs:
            click.echo(' ' + b)
        return erratum

    except GSSError:
        exit_unauthenticated()
    except elliottlib.exceptions.BrewBuildException as ex:
        raise ElliottFatalError(f'Error attaching/removing builds: {str(ex)}')
Esempio n. 9
0
def get_image_nvr(image):
    """
    Get brew NVR from a oc output.

    :param str image: reference to an image in the payload

    :return: A brew NVR
    :raises exceptions.CalledProcessError: When oc returns a non-zero exit

    """
    try:
        oc_output = check_output(['oc', 'image', 'info', '--output=json', image])
    except CalledProcessError as e:
        raise ElliottFatalError("command '{}' return with error (code {}): {}".format(e.cmd, e.returncode, e.output))

    try:
        image_json = json.loads(oc_output)
        image_name = image_json['config']['config']['Labels']['com.redhat.component']
        image_version = image_json['config']['config']['Labels']['version']
        image_release = image_json['config']['config']['Labels']['release']
    except Exception:
        print("This image json does not have the expected fields:\n" + oc_output)
        raise

    return "{}-{}-{}".format(image_name, image_version, image_release)
Esempio n. 10
0
def add_metadata_cli(runtime, kind, impetus, advisory):
    """Add metadata to an advisory. This is usually called by
create immediately after creation. It is only useful to you if
you are going back and adding metadata to older advisories.

    Note: Requires you provide a --group

Example to add standard metadata to a 3.10 images release

\b
    $ elliott --group=openshift-3.10 add-metadata --impetus standard --kind image
"""
    runtime.initialize()
    release = release_from_branch(runtime.group_config.branch)

    try:
        advisory = Erratum(errata_id=advisory)
    except GSSError:
        exit_unauthenticated()

    result = elliottlib.errata.add_comment(
        advisory.errata_id, {'release': release, 'kind': kind, 'impetus': impetus})

    if result.status_code == 201:
        green_prefix("Added metadata successfully")
        click.echo()
    elif result.status_code == 403:
        exit_unauthorized()
    else:
        red_print("Something weird may have happened")
        raise ElliottFatalError(
            "Unexpected response from ET API: {code}".format(code=result.status_code))
Esempio n. 11
0
def list_cli(ctx, filter_id, n):
    """Print a list of one-line informational strings of RHOSE
advisories. By default the 5 most recently created advisories are
printed. Note, they are NOT sorted by release date.

    NOTE: new filters must be created in the Errata Tool web
    interface.

Default filter definition: RHBA; Active; Product: RHOSE; Devel Group:
ENG OpenShift Enterprise; sorted by newest. Browse this filter
yourself online: https://errata.devel.redhat.com/filter/1965

    List 10 advisories instead of the default 6 with your custom
    filter #1337:

    $ elliott list -n 10 -f 1337
"""
    try:
        for erratum in elliottlib.errata.get_filtered_list(filter_id, limit=n):
            click.echo(
                "{release_date:11s} {state:15s} {synopsis:80s} {url}".format(
                    release_date=erratum.publish_date_override,
                    state=erratum.errata_state,
                    synopsis=erratum.synopsis,
                    url=erratum.url()))
    except GSSError:
        exit_unauthenticated()
    except elliottlib.exceptions.ErrataToolError as ex:
        raise ElliottFatalError(getattr(ex, 'message', repr(ex)))
Esempio n. 12
0
def get_bug_list(working_dir, old, new):
    """
    Get fixed bugzilla IDs between two payloads. Needs to clone
    the entire okd repo, so it can be quite slow.

    :param str working_dir: file location to clone okd repo
    :param str old: URL to the previous payload
    :param str new: URL to the current payload

    :return: A list of BZ IDs
    :raises exceptions.CalledProcessError: When oc returns a non-zero exit

    """
    bug_list = []
    try:
        bug_list = check_output([
            'oc',
            'adm',
            'release',
            'info',
            '-o',
            'name',  # only output BZ IDs
            '--bugs={}/origin'.format(
                working_dir),  # clone origin to working dir
            '--changes-from={}'.format(old),
            new  # payloads to compare
        ]).splitlines()
    except CalledProcessError as e:
        raise ElliottFatalError(
            "command '{}' return with error (code {}): {}".format(
                e.cmd, e.returncode, e.output))

    return bug_list
Esempio n. 13
0
def get_changelog(working_dir, old, new):
    """
    Get changelog between two payloads. Needs to clone the entire
    okd repo, so it can be quite slow.

    :param str working_dir: file location to clone okd repo
    :param str old: URL to the previous payload
    :param str new: URL to the current payload

    :return: A str with the changelog text
    :raises exceptions.CalledProcessError: When oc returns a non-zero exit

    """
    changelog = ""
    try:
        changelog = check_output([
            'oc',
            'adm',
            'release',
            'info',
            '--changelog={}/origin'.format(
                working_dir),  # clone origin to working dir
            '--changes-from={}'.format(old),
            new  # payloads to compare
        ])
    except CalledProcessError as e:
        raise ElliottFatalError(
            "command '{}' return with error (code {}): {}".format(
                e.cmd, e.returncode, e.output))

    return changelog
Esempio n. 14
0
def _assert_bugs_are_viable(errata_type, bugs, bug_objects):
    for index, bug in enumerate(bug_objects):
        bug_id = bugs[index]
        if not bug:
            raise ElliottFatalError("Couldn't find bug {}. Did you log in?".format(bug_id))
        if not elliottlib.bzutil.is_viable_bug(bug):
            raise ElliottFatalError("Bug {} is not viable: Status is {}.".format(bug_id, bug.status))
        if errata_type == 'RHSA' and not elliottlib.bzutil.is_cve_tracker(bug):
            raise ElliottFatalError("Bug {} is not a CVE tracker: Keywords are {}.".format(bug_id, bug.keywords))
        LOGGER.info("Checking if bug {} is already attached to an advisory...".format(bug_id))
        advisories = elliottlib.errata.get_advisories_for_bug(bug_id)
        if advisories:
            raise ElliottFatalError(
                "Bug {} is already attached to advisories: {}"
                .format(bug_id, " ".join(map(lambda item: str(item["id"]), advisories))))
        LOGGER.info("Bug {} is viable.".format(bug_id))
Esempio n. 15
0
def _fetch_builds_by_id(builds, product_version, session):
    green_prefix('Build NVRs provided: ')
    click.echo('Manually verifying the builds exist')
    try:
        return [
            elliottlib.brew.get_brew_build(b, product_version, session=session)
            for b in builds
        ]
    except elliottlib.exceptions.BrewBuildException as ex:
        raise ElliottFatalError(getattr(ex, 'message', repr(ex)))
Esempio n. 16
0
def _determine_errata_info(runtime):
    if not runtime.branch:
        raise ElliottFatalError(
            'Need to specify a branch either in group.yml or with --branch option'
        )
    base_tag = runtime.branch

    et_data = runtime.gitdata.load_data(key='erratatool').data
    product_version = override_product_version(et_data.get('product_version'),
                                               base_tag)
    return base_tag, product_version
Esempio n. 17
0
def _assert_bugs_are_viable(bugs, bug_objects):
    for index, bug in enumerate(bug_objects):
        bug_id = bugs[index]
        if not bug:
            raise ElliottFatalError(
                "Couldn't find bug {}. Did you log in?".format(bug_id))
        if not elliottlib.bzutil.is_viable_bug(bug):
            raise ElliottFatalError(
                "Bug {} is not viable: Status is {}.".format(
                    bug_id, bug.status))
        LOGGER.info(
            "Checking if bug {} is already attached to an advisory...".format(
                bug_id))
        advisories = elliottlib.errata.get_advisories_for_bug(bug_id)
        if advisories:
            raise ElliottFatalError(
                "Bug {} is already attached to advisories: {}".format(
                    bug_id,
                    " ".join([str(item["id"]) for item in advisories])))
        LOGGER.info("Bug {} is viable.".format(bug_id))
Esempio n. 18
0
def show(ctx, advisory, yaml, json):
    """ Show RPMDiff failures for an advisory.
    """
    runtime = ctx.obj  # type: Runtime
    if not advisory:
        runtime.initialize()
        advisory = runtime.group_config.advisories.get("rpm", 0)
        if not advisory:
            raise ElliottFatalError(
                "No RPM advisory number configured in ocp-build-data.")
    else:
        runtime.initialize(no_group=True)
    logger = runtime.logger
    logger.info(
        "Fetching RPMDiff runs from Errata Tool for advisory {}...".format(
            advisory))
    rpmdiff_runs = list(errata.get_rpmdiff_runs(advisory))
    logger.info("Found {} RPMDiff runs.".format(len(rpmdiff_runs)))
    # "good" means PASSED, INFO, or WAIVED
    good_runs = []
    # "bad" means NEEDS_INSPECTION or FAILED
    bad_runs = []
    incomplete_runs = []
    for rpmdiff_run in rpmdiff_runs:
        attr = rpmdiff_run['attributes']
        if attr["status"] in constants.ET_GOOD_EXTERNAL_TEST_STATUSES:
            good_runs.append(rpmdiff_run)
        elif attr["status"] in constants.ET_BAD_EXTERNAL_TEST_STATUSES:
            bad_runs.append(rpmdiff_run)
        else:
            incomplete_runs.append(rpmdiff_run)
    util.green_prefix("good: {}".format(len(good_runs)))
    click.echo(", ", nl=False)
    util.red_prefix("bad: {}".format(len(bad_runs)))
    click.echo(", ", nl=False)
    util.yellow_print("incomplete: {}".format(len(incomplete_runs)))

    if not bad_runs:
        return

    logger.info(
        "Fetching detailed information from RPMDiff for bad RPMDiff runs...")
    rpmdiff_client = RPMDiffClient(constants.RPMDIFF_HUB_URL)
    rpmdiff_client.authenticate()

    if yaml or json:
        _structured_output(bad_runs, rpmdiff_client, yaml)
    else:
        _unstructured_output(bad_runs, rpmdiff_client)
Esempio n. 19
0
def puddle_advisories_cli(runtime, filter_id, details):
    """Print a comma separated list of advisory numbers which can be used
when filling in the 'errata_whitelist' parameter in a signed puddle
config.

Uses an Errata Tool filter to find in-progress and being-released
advisories for OpenShift. This list is trimmed down to only advisories
matching the given --group by parsing the ART metadata embedded in the
first comment.

    List advisories required to create a signed 4.2 puddle:

\b
    $ elliott --group=openshift-4.1 puddle-advisories
    44849, 44740
"""
    use_in_puddle_conf = []
    runtime.initialize()
    major = major_from_branch(runtime.group_config.branch)
    minor = minor_from_branch(runtime.group_config.branch)
    release = "{}.{}".format(major, minor)

    try:
        for erratum in elliottlib.errata.get_filtered_list(filter_id,
                                                           limit=50):
            metadata_comments_json = elliottlib.errata.get_metadata_comments_json(
                erratum.errata_id)
            if not metadata_comments_json:
                # Does not contain ART metadata, skip it
                sys.stderr.write("Does not contain ART metadata: {}\n".format(
                    erratum.errata_id))
                continue

            metadata = metadata_comments_json[0]
            if str(metadata['release']) == str(release) and (
                    metadata['impetus'] != 'test'):
                use_in_puddle_conf.append(str(erratum.errata_id))
                if details:
                    sys.stderr.write(str(erratum))
                    sys.stderr.flush()

        click.echo(", ".join(use_in_puddle_conf))
    except GSSError:
        exit_unauthenticated()
    except elliottlib.exceptions.ErrataToolError as ex:
        raise ElliottFatalError(getattr(ex, 'message', repr(ex)))
Esempio n. 20
0
    def late_resolve_image(self, distgit_name, add=False, data_obj=None):
        """Resolve image and retrieve meta, optionally adding to image_map.
        If image not found, error will be thrown"""

        if distgit_name in self.image_map:
            return self.image_map[distgit_name]
        if not data_obj:
            replace_vars = self.group_config.vars.primitive() if self.group_config.vars else {}
            if self.assembly:
                replace_vars['runtime_assembly'] = self.assembly
            data_obj = self.gitdata.load_data(path='images', key=distgit_name, replace_vars=replace_vars)
            if not data_obj:
                raise ElliottFatalError('Unable to resovle image metadata for {}'.format(distgit_name))

        meta = ImageMetadata(self, data_obj)
        if add:
            self.image_map[distgit_name] = meta
        return meta
Esempio n. 21
0
def get_build_list(old, new):
    """
    Get changed container builds between two payloads.

    :param str old: URL to the previous payload
    :param str new: URL to the current payload

    :return: A list of brew NVRs
    :raises exceptions.CalledProcessError: When oc returns a non-zero exit

    """
    build_list = []
    oc_output = ""
    try:
        oc_output = check_output([
            'oc',
            'adm',
            'release',
            'info',
            '--output=json',
            '--changes-from={}'.format(old),
            new  # payloads to compare
        ])
    except CalledProcessError as e:
        raise ElliottFatalError(
            "command '{}' return with error (code {}): {}".format(
                e.cmd, e.returncode, e.output))

    payload_json = json.loads(oc_output)
    changed_images = []

    for k, v in payload_json["changedImages"].items():
        if k == "machine-os-content":
            continue  # no use in comparing this as it doesn't go in the advisory
        if v["to"]:
            changed_images.append(v["to"]["from"]["name"])

    for i in changed_images:
        build_list.append(get_image_nvr(i))

    return build_list
Esempio n. 22
0
def create_placeholder(kind, advisory_id, bug_tracker, noop):
    newbug = bug_tracker.create_placeholder(kind, noop)
    if noop:
        return

    click.echo(f"Created Bug: {newbug.id} {newbug.weburl}")

    if not advisory_id:
        return

    try:
        advisory = Erratum(errata_id=advisory_id)
    except GSSError:
        exit_unauthenticated()

    if advisory is False:
        raise ElliottFatalError(
            f"Error: Could not locate advisory {advisory_id}")

    click.echo("Attaching bug to advisory...")
    bug_tracker.attach_bugs(advisory_id, [newbug.id])
Esempio n. 23
0
def show(ctx, advisory):
    """ Show RPMDiff failures for an advisory.
    """
    runtime = ctx.obj  # type: Runtime
    if not advisory:
        runtime.initialize()
        advisory = runtime.group_config.advisories.get("rpm", 0)
        if not advisory:
            raise ElliottFatalError(
                "No RPM advisory number configured in ocp-build-data.")
    else:
        runtime.initialize(no_group=True)
    logger = runtime.logger
    logger.info(
        "Fetching RPMDiff runs from Errata Tool for advisory {}...".format(
            advisory))
    rpmdiff_runs = list(errata.get_rpmdiff_runs(advisory))
    logger.info("Found {} RPMDiff runs.".format(len(rpmdiff_runs)))
    # "good" means PASSED, INFO, or WAIVED
    good_runs = []
    # "bad" means NEEDS_INSPECTION or FAILED
    bad_runs = []
    incomplete_runs = []
    for rpmdiff_run in rpmdiff_runs:
        attr = rpmdiff_run['attributes']
        if attr["status"] in constants.ET_GOOD_EXTERNAL_TEST_STATUSES:
            good_runs.append(rpmdiff_run)
        elif attr["status"] in constants.ET_BAD_EXTERNAL_TEST_STATUSES:
            bad_runs.append(rpmdiff_run)
        else:
            incomplete_runs.append(rpmdiff_run)
    util.green_prefix("good: {}".format(len(good_runs)))
    click.echo(", ", nl=False)
    util.red_prefix("bad:{}".format(len(bad_runs)))
    click.echo(", ", nl=False)
    util.yellow_print("incomplete: {}".format(len(incomplete_runs)))

    if not bad_runs:
        return

    logger.info(
        "Fetching detailed information from RPMDiff for bad RPMDiff runs...")
    rpmdiff_client = RPMDiffClient(constants.RPMDIFF_HUB_URL)
    rpmdiff_client.authenticate()
    for run in bad_runs:
        attr = run["attributes"]
        run_id = attr["external_id"]
        run_url = "{}/run/{}/".format(constants.RPMDIFF_WEB_URL, run_id)
        print("----------------")
        msg = "{0} {1}".format(run["relationships"]["brew_build"]["nvr"],
                               attr["status"])
        if attr["status"] == "NEEDS_INSPECTION":
            util.yellow_print(msg)
        else:
            util.red_print(msg)
        test_results = rpmdiff_client.get_test_results(run_id)
        run_obj = rpmdiff_client.get_run(run_id)

        for result in test_results:
            score = result["score"]
            if score >= 0 and score < 3:  # good test result
                continue
            result_id = result["result_id"]
            test = result["test"]
            details = result["details"]
            test_id = test["test_id"]
            package_name = run_obj["package_name"]
            result_url = run_url + str(test_id) + "/"
            result_msg = "* TEST {0} {2} {1} {3}".format(
                result_id, constants.RPMDIFF_SCORE_NAMES[score],
                test["description"], result_url)
            if score == 3:  # NEEDS_INSPECTION
                util.yellow_print(result_msg)
            else:
                util.red_print(result_msg)
            # get last waiver message
            waivers = rpmdiff_client.list_waivers(package_name,
                                                  test_id,
                                                  limit=1)
            if waivers:
                util.green_print("    Last waiver: @" +
                                 waivers[0]["owner"]["username"] + ": " +
                                 waivers[0]["description"])
            else:
                util.yellow_print("    No last waiver found.")
            for detail in details:
                detail_msg = "    * {1} {0}".format(
                    constants.RPMDIFF_SCORE_NAMES[detail["score"]],
                    detail["subpackage"])
                if detail["score"] == 3:
                    util.yellow_print(detail_msg)
                else:
                    util.red_print(detail_msg)
                content = re.sub('^',
                                 '        ',
                                 detail["content"],
                                 flags=re.MULTILINE)
                print(content)
        print()
Esempio n. 24
0
def create_cli(ctx, runtime, errata_type, kind, impetus, date, assigned_to,
               manager, package_owner, with_placeholder, with_liveid, yes,
               bugs):
    """Create a new advisory. The kind of advisory must be specified with
'--kind'. Valid choices are 'rpm' and 'image'.

    You MUST specify a group (ex: "openshift-3.9") manually using the
    --group option. See examples below.

You must set a Release Date by providing a YYYY-Mon-DD formatted string to the
--date option.

The default behavior for this command is to show what the generated
advisory would look like. The raw JSON used to create the advisory
will be printed to the screen instead of posted to the Errata Tool
API.

The impetus option only affects the metadata added to the new
advisory and its synopsis.

The --assigned-to, --manager and --package-owner options are required.
They are the email addresses of the parties responsible for managing and
approving the advisory.

Adding a list of bug ids with one or more --bugs arguments attaches those bugs to the
advisory on creation.

Provide the '--yes' or '-y' option to confirm creation of the
advisory.

    PREVIEW an RPM Advisory 21 days from now (the default release date) for OSE 3.9:

    $ elliott --group openshift-3.9 create

    CREATE Image Advisory for the 3.5 series on the first Monday in March:

\b
    $ elliott --group openshift-3.5 create --yes -k image --date 2018-Mar-05
"""
    runtime.initialize()

    et_data = runtime.gitdata.load_data(key='erratatool').data

    # User entered a valid value for --date, set the release date
    release_date = datetime.datetime.strptime(date, YMD)

    ######################################################################

    unique_bugs = set(bugs)

    if bugs:
        bug_tracker = BugzillaBugTracker(
            BugzillaBugTracker.get_config(runtime))
        LOGGER.info("Fetching bugs {} from Bugzilla...".format(" ".join(
            map(str, bugs))))
        bug_objects = bug_tracker.get_bugs(bugs)
        # assert bugs are viable for a new advisory.
        _assert_bugs_are_viable(bugs, bug_objects)

    ######################################################################

    try:
        erratum = elliottlib.errata.new_erratum(
            et_data,
            errata_type=errata_type,
            kind=kind,
            boilerplate_name=(impetus if impetus != "standard" else kind),
            release_date=release_date.strftime(YMD),
            assigned_to=assigned_to,
            manager=manager,
            package_owner=package_owner)
    except elliottlib.exceptions.ErrataToolUnauthorizedException:
        exit_unauthorized()
    except elliottlib.exceptions.ErrataToolError as ex:
        raise ElliottFatalError(getattr(ex, 'message', repr(ex)))

    erratum.addBugs(unique_bugs)

    if yes:
        erratum.commit()
        green_prefix("Created new advisory: ")
        click.echo(str(erratum))

        # This is a little strange, I grant you that. For reference you
        # may wish to review the click docs
        #
        # http://click.pocoo.org/5/advanced/#invoking-other-commands
        #
        # You may be thinking, "But, add_metadata doesn't take keyword
        # arguments!" and that would be correct. However, we're not
        # calling that function directly. We actually use the context
        # 'invoke' method to call the _command_ (remember, it's wrapped
        # with click to create a 'command'). 'invoke' ensures the correct
        # options/arguments are mapped to the right parameters.
        ctx.invoke(add_metadata_cli,
                   kind=kind,
                   impetus=impetus,
                   advisory=erratum.errata_id)
        click.echo(str(erratum))

        if with_placeholder:
            click.echo("Creating and attaching placeholder bug...")
            ctx.invoke(create_placeholder_cli,
                       kind=kind,
                       advisory=erratum.errata_id)

        if with_liveid:
            click.echo("Requesting Live ID...")
            base_url = "https://errata.devel.redhat.com/errata/set_live_advisory_name"
            cmd_assert(
                f"curl -X POST --fail --negotiate -u : {base_url}/{erratum.errata_id}",
                retries=3,
                pollrate=10,
            )

    else:
        green_prefix("Would have created advisory: ")
        click.echo("")
        click.echo(erratum)
Esempio n. 25
0
    def initialize(self, mode='none', no_group=False):

        if self.initialized:
            return

        if self.quiet and self.verbose:
            click.echo("Flags --quiet and --verbose are mutually exclusive")
            exit(1)

        # We could mark these as required and the click library would do this for us,
        # but this seems to prevent getting help from the various commands (unless you
        # specify the required parameters). This can probably be solved more cleanly, but TODO
        if not no_group and self.group is None:
            click.echo("Group must be specified")
            exit(1)

        if self.working_dir is None:
            self.working_dir = tempfile.mkdtemp(".tmp", "elliott-")
            # This can be set to False by operations which want the working directory to be left around
            self.remove_tmp_working_dir = True
            atexit.register(remove_tmp_working_dir, self)
        else:
            self.working_dir = os.path.abspath(self.working_dir)
            if not os.path.isdir(self.working_dir):
                os.makedirs(self.working_dir)

        self.initialize_logging()

        if no_group:
            return  # nothing past here should be run without a group

        self.resolve_metadata()

        self.group_dir = self.gitdata.data_dir
        self.group_config = self.get_group_config()
        if self.group_config.name != self.group:
            raise IOError(
                "Name in group.yml does not match group name. Someone may have copied this group without updating group.yml (make sure to check branch)"
            )

        if self.branch is not None:
            self.logger.info("Using branch from command line: %s" %
                             self.branch)
        elif self.group_config.branch is not Missing:
            self.branch = self.group_config.branch
            self.logger.info("Using branch from group.yml: %s" % self.branch)
        else:
            self.logger.info(
                "No branch specified either in group.yml or on the command line; all included images will need to specify their own."
            )

        # Flattens a list like like [ 'x', 'y,z' ] into [ 'x.yml', 'y.yml', 'z.yml' ]
        # for later checking we need to remove from the lists, but they are tuples. Clone to list
        def flatten_list(names):
            if not names:
                return []
            # split csv values
            result = []
            for n in names:
                result.append(
                    [x for x in n.replace(' ', ',').split(',') if x != ''])
            # flatten result and remove dupes
            return list(set([y for x in result for y in x]))

        def filter_enabled(n, d):
            return d.get('mode', 'enabled') == 'enabled'

        exclude_keys = flatten_list(self.exclude)
        image_keys = flatten_list(self.images)

        filter_func = filter_enabled

        replace_vars = self.group_config.vars.primitive(
        ) if self.group_config.vars else {}

        image_data = {}
        if mode in ['images', 'both']:
            image_data = self.gitdata.load_data(
                path='images',
                keys=image_keys,
                exclude=exclude_keys,
                filter_funcs=None if len(image_keys) else filter_func,
                replace_vars=replace_vars)
            for i in image_data.values():
                self.late_resolve_image(i.key, add=True, data_obj=i)
            if not self.image_map:
                self.logger.warning(
                    "No image metadata directories found for given options within: {}"
                    .format(self.group_dir))

        missed_include = set(image_keys) - set(image_data.keys())
        if len(missed_include) > 0:
            raise ElliottFatalError(
                'The following images or rpms were either missing or filtered out: {}'
                .format(', '.join(missed_include)))
Esempio n. 26
0
def find_builds_cli(runtime, advisory, default_advisory_type, builds, kind,
                    from_diff, as_json):
    '''Automatically or manually find or attach viable rpm or image builds
to ADVISORY. Default behavior searches Brew for viable builds in the
given group. Provide builds manually by giving one or more --build
(-b) options. Manually provided builds are verified against the Errata
Tool API.

\b
  * Attach the builds to ADVISORY by giving --attach
  * Specify the build type using --kind KIND

Example: Assuming --group=openshift-3.7, then a build is a VIABLE
BUILD IFF it meets ALL of the following criteria:

\b
  * HAS the tag in brew: rhaos-3.7-rhel7-candidate
  * DOES NOT have the tag in brew: rhaos-3.7-rhel7
  * IS NOT attached to ANY existing RHBA, RHSA, or RHEA

That is to say, a viable build is tagged as a "candidate", has NOT
received the "shipped" tag yet, and is NOT attached to any PAST or
PRESENT advisory. Here are some examples:

    SHOW the latest OSE 3.6 image builds that would be attached to a
    3.6 advisory:

    $ elliott --group openshift-3.6 find-builds -k image

    ATTACH the latest OSE 3.6 rpm builds to advisory 123456:

\b
    $ elliott --group openshift-3.6 find-builds -k rpm --attach 123456

    VERIFY (no --attach) that the manually provided RPM NVR and build
    ID are viable builds:

\b
    $ elliott --group openshift-3.6 find-builds -k rpm -b megafrobber-1.0.1-2.el7 -b 93170
'''

    if from_diff and builds:
        raise ElliottFatalError('Use only one of --build or --from-diff.')
    if advisory and default_advisory_type:
        raise click.BadParameter(
            'Use only one of --use-default-advisory or --attach')

    runtime.initialize()
    base_tag, product_version = _determine_errata_info(runtime)

    if default_advisory_type is not None:
        advisory = find_default_advisory(runtime, default_advisory_type)

    ensure_erratatool_auth(
    )  # before we waste time looking up builds we can't process

    # get the builds we want to add
    unshipped_builds = []
    session = requests.Session()
    if builds:
        unshipped_builds = _fetch_builds_by_id(builds, product_version,
                                               session)
    elif from_diff:
        unshipped_builds = _fetch_builds_from_diff(from_diff[0], from_diff[1],
                                                   product_version, session)
    else:
        if kind == 'image':
            unshipped_builds = _fetch_builds_by_kind_image(
                runtime, product_version, session)
        elif kind == 'rpm':
            unshipped_builds = _fetch_builds_by_kind_rpm(
                builds, base_tag, product_version, session)

    _json_dump(as_json, unshipped_builds, base_tag, kind)

    if not unshipped_builds:
        green_print('No builds needed to be attached.')
        return

    if advisory is not False:
        _attach_to_advisory(unshipped_builds, kind, advisory)
    else:
        click.echo('The following {n} builds '.format(n=len(unshipped_builds)),
                   nl=False)
        click.secho('may be attached ', bold=True, nl=False)
        click.echo('to an advisory:')
        for b in sorted(unshipped_builds):
            click.echo(' ' + b.nvr)
Esempio n. 27
0
def find_bugs_cli(runtime, advisory, default_advisory_type, mode, status, id,
                  cve_trackers, from_diff, flag, report,
                  into_default_advisories, noop):
    """Find Red Hat Bugzilla bugs or add them to ADVISORY. Bugs can be
"swept" into the advisory either automatically (--mode sweep), or by
manually specifying one or more bugs using --mode list and the --id option.
Use cases are described below:

    Note: Using --id without --add is basically pointless

SWEEP: For this use-case the --group option MUST be provided. The
--group automatically determines the correct target-releases to search
for bugs claimed to be fixed, but not yet attached to advisories.

LIST: The --group option is not required if you are specifying bugs
manually. Provide one or more --id's for manual bug addition. In LIST
mode you must provide a list of IDs to attach with the --id option.

DIFF: For this use case, you must provide the --between option using two
URLs to payloads.

QE: Find MODIFIED bugs for the target-releases, and set them to ON_QA.
The --group option MUST be provided. Cannot be used in combination
with --into-default-advisories, --add, --into-default-advisories

Using --use-default-advisory without a value set for the matching key
in the build-data will cause an error and elliott will exit in a
non-zero state. Use of this option silently overrides providing an
advisory with the --add option.

    Automatically add bugs with target-release matching 3.7.Z or 3.7.0
    to advisory 123456:

\b
    $ elliott --group openshift-3.7 find-bugs --mode sweep --add 123456

    List bugs that WOULD be added to an advisory and have set the bro_ok flag on them (NOOP):

\b
    $ elliott --group openshift-3.7 find-bugs --mode sweep --flag bro_ok

    Attach bugs to their correct default advisories, e.g. operator-related bugs go to "extras" instead of the default "image":

\b
    $ elliott --group=openshift-4.4 find-bugs --mode=sweep --into-default-advisories

    Add two bugs to advisory 123456. Note that --group is not required
    because we're not auto searching:

\b
    $ elliott find-bugs --mode list --id 8675309 --id 7001337 --add 123456

    Automatically find bugs for openshift-4.1 and attach them to the
    rpm advisory defined in ocp-build-data:

\b
    $ elliott --group=openshift-4.1 --mode sweep --use-default-advisory rpm

    Find bugs for 4.6 that are in MODIFIED state, and set them to ON_QA:

\b
    $ elliott --group=openshift-4.6 --mode qe
"""
    if mode != 'list' and len(id) > 0:
        raise click.BadParameter(
            "Combining the automatic and manual bug attachment options is not supported"
        )

    if mode == 'list' and len(id) == 0:
        raise click.BadParameter(
            "When using mode=list, you must provide a list of bug IDs")

    if mode == 'payload' and not len(from_diff) == 2:
        raise click.BadParameter(
            "If using mode=payload, you must provide two payloads to compare")

    if sum(
            map(bool,
                [advisory, default_advisory_type, into_default_advisories
                 ])) > 1:
        raise click.BadParameter(
            "Use only one of --use-default-advisory, --add, or --into-default-advisories"
        )

    if mode == 'qe' and sum(
            map(bool,
                [advisory, default_advisory_type, into_default_advisories
                 ])) > 0:
        raise click.BadParameter(
            "--mode=qe does not operate on an advisory. Do not specify any of `--use-default-advisory`, `--add`, or `--into-default-advisories`"
        )

    runtime.initialize()
    bz_data = runtime.gitdata.load_data(key='bugzilla').data
    bzapi = bzutil.get_bzapi(bz_data)

    if default_advisory_type is not None:
        advisory = find_default_advisory(runtime, default_advisory_type)

    if mode == 'sweep' or mode == 'qe':
        if mode == 'qe':
            status = ['MODIFIED']
        green_prefix(
            f"Searching for bugs with status {' '.join(status)} and target release(s):"
        )
        click.echo(" {tr}".format(tr=", ".join(bz_data['target_release'])))
        bugs = bzutil.search_for_bugs(
            bz_data,
            status,
            filter_out_security_bugs=not (cve_trackers),
            verbose=runtime.debug)
    elif mode == 'list':
        bugs = [bzapi.getbug(i) for i in cli_opts.id_convert(id)]
    elif mode == 'diff':
        click.echo(runtime.working_dir)
        bug_id_strings = openshiftclient.get_bug_list(runtime.working_dir,
                                                      from_diff[0],
                                                      from_diff[1])
        bugs = [bzapi.getbug(i) for i in bug_id_strings]

    # Some bugs should goes to CPaaS so we should ignore them
    m = re.match(
        r"rhaos-(\d+).(\d+)", runtime.branch
    )  # extract OpenShift version from the branch name. there should be a better way...
    if not m:
        raise ElliottFatalError(
            f"Unable to determine OpenShift version from branch name {runtime.branch}."
        )
    major_version = int(m[1])
    minor_version = int(m[2])

    def _filter_bugs(bugs):  # returns a list of bugs that should be processed
        r = []
        ignored_repos = set()  # GitHub repos that should be ignored
        if major_version == 4 and minor_version == 5:
            # per https://issues.redhat.com/browse/ART-997: these repos should have their release-4.5 branches ignored by ART:
            ignored_repos = {
                "https://github.com/openshift/aws-ebs-csi-driver",
                "https://github.com/openshift/aws-ebs-csi-driver-operator",
                "https://github.com/openshift/cloud-provider-openstack",
                "https://github.com/openshift/csi-driver-nfs",
                "https://github.com/openshift/csi-driver-manila-operator"
            }
        for bug in bugs:
            external_links = [
                ext["type"]["full_url"].replace("%id%", ext["ext_bz_bug_id"])
                for ext in bug.external_bugs
            ]  # https://github.com/python-bugzilla/python-bugzilla/blob/7aa70edcfea9b524cd8ac51a891b6395ca40dc87/bugzilla/_cli.py#L750
            public_links = [
                runtime.get_public_upstream(url)[0] for url in external_links
            ]  # translate openshift-priv org to openshift org when comparing to filter (i.e. prow may link to a PR on the private org).
            # if a bug has 1 or more public_links, we should ignore the bug if ALL of the public_links are ANY of `ignored_repos`
            if public_links and all(
                    map(
                        lambda url: any(
                            map(
                                lambda repo: url != repo and url.startswith(
                                    repo), ignored_repos)), public_links)):
                continue
            r.append(bug)
        return r

    if len(
            id
    ) == 0:  # unless --id is given, we should ignore bugs that don't belong to ART. e.g. some bugs should go to CPaaS
        filtered_bugs = _filter_bugs(bugs)
        green_prefix(
            f"Found {len(filtered_bugs)} bugs ({len(bugs) - len(filtered_bugs)} ignored):"
        )
        bugs = filtered_bugs
    else:
        green_prefix("Found {} bugs:".format(len(bugs)))
    click.echo(" {}".format(", ".join([str(b.bug_id) for b in bugs])))

    if mode == 'qe':
        for bug in bugs:
            bzutil.set_state(bug, 'ON_QA', noop=noop)

    if len(flag) > 0:
        for bug in bugs:
            for f in flag:
                if noop:
                    click.echo(
                        f'Would have updated bug {bug.id} by setting flag {f}')
                    continue
                bug.updateflags({f: "+"})

    if report:
        green_print("{:<8s} {:<25s} {:<12s} {:<7s} {:<10s} {:60s}".format(
            "ID", "COMPONENT", "STATUS", "SCORE", "AGE", "SUMMARY"))
        for bug in bugs:
            created_date = datetime.datetime.strptime(str(bug.creation_time),
                                                      '%Y%m%dT%H:%M:%S')
            days_ago = (datetime.datetime.today() - created_date).days
            click.echo(
                "{:<8d} {:<25s} {:<12s} {:<7s} {:<3d} days   {:60s} ".format(
                    bug.id, bug.component, bug.status,
                    bug.cf_pm_score if hasattr(bug, "cf_pm_score") else '?',
                    days_ago, bug.summary[:60]))

    if advisory and not default_advisory_type:  # `--add ADVISORY_NUMBER` should respect the user's wish and attach all available bugs to whatever advisory is specified.
        errata.add_bugs_with_retry(advisory, bugs, noop=noop)
        return

    # If --use-default-advisory or --into-default-advisories is given, we need to determine which bugs should be swept into which advisory.
    # Otherwise we don't need to sweep bugs at all.
    if not (into_default_advisories or default_advisory_type):
        return
    impetus_bugs = {
    }  # key is impetus ("rpm", "image", "extras"), value is a set of bug IDs.
    # @lmeyer: simple and stupid would still be keeping the logic in python, possibly with config flags for branched logic. until that logic becomes too ugly to keep in python, i suppose..
    if major_version < 4:  # for 3.x, all bugs should go to the rpm advisory
        impetus_bugs["rpm"] = set(bugs)
    else:  # for 4.x
        # optional operators bugs should be swept to the "extras" advisory, while other bugs should be swept to "image" advisory.
        # a way to identify operator-related bugs is by its "Component" value. temporarily hardcode here until we need to move it to ocp-build-data.
        extra_components = {
            "Logging", "Service Brokers", "Metering Operator",
            "Node Feature Discovery Operator"
        }  # we will probably find more
        impetus_bugs["extras"] = {
            b
            for b in bugs if b.component in extra_components
        }
        impetus_bugs["image"] = {
            b
            for b in bugs if b.component not in extra_components
        }

    if default_advisory_type and impetus_bugs.get(default_advisory_type):
        errata.add_bugs_with_retry(advisory,
                                   impetus_bugs[default_advisory_type],
                                   noop=noop)
    elif into_default_advisories:
        for impetus, bugs in impetus_bugs.items():
            if bugs:
                errata.add_bugs_with_retry(
                    runtime.group_config.advisories[impetus], bugs, noop=noop)
Esempio n. 28
0
def poll_signed(runtime, minutes, advisory, default_advisory_type, noop):
    """Poll for the signed-status of RPM builds attached to
ADVISORY. Returns rc=0 when all builds have been signed. Returns non-0
after MINUTES have passed and all builds have not been signed. This
non-0 return code is the number of unsigned builds remaining. All
builds must show 'signed' for this command to succeed.

    NOTE: The two advisory options are mutually exclusive.

For testing in pipeline scripts this sub-command accepts a --noop
option. When --noop is used the value of --minutes is irrelevant. This
command will print out the signed state of all attached builds and
then exit with rc=0 if all builds are signed and non-0 if builds are
still unsigned. In the non-0 case the return code is the number of
unsigned builds.

    Wait 15 minutes for the default 4.2 advisory to show all RPMS have
    been signed:

    $ elliott -g openshift-4.2 poll-signed --use-default-advisory rpm

    Wait 5 mintes for the provided 4.2 advisory to show all RPMs have
    been signed:

    $ elliott -g openshift-4.2 poll-signed -m 5 --advisory 123456

    Print the signed status of all attached builds, exit
    immediately. Return code is the number of unsigned builds.

\b
    $ elliott -g openshift-4.2 poll-signed --noop --use-default-advisory rpm
"""
    if not (bool(advisory) ^ bool(default_advisory_type)):
        raise click.BadParameter(
            "Use only one of --use-default-advisory or --advisory")

    runtime.initialize(no_group=default_advisory_type is None)

    if default_advisory_type is not None:
        advisory = find_default_advisory(runtime, default_advisory_type)

    if not noop:
        click.echo("Polling up to {} minutes for all RPMs to be signed".format(
            minutes))

    try:
        e = elliottlib.errata.Advisory(errata_id=advisory)
        all_builds = set([])
        all_signed = False
        # `errata_builds` is a dict with brew tags as keys, values are
        # lists of builds on the advisory with that tag
        for k, v in e.errata_builds.items():
            all_builds = all_builds.union(set(v))
        green_prefix("Fetching initial states: ")
        click.echo("{} builds to check".format(len(all_builds)))
        start_time = datetime.datetime.now()
        while datetime.datetime.now() - start_time < datetime.timedelta(
                minutes=minutes):
            pbar_header("Getting build signatures: ", "Should be pretty quick",
                        all_builds)
            pool = ThreadPool(cpu_count())
            # Look up builds concurrently
            click.secho("[", nl=False)

            build_sigs = pool.map(
                lambda build: progress_func(
                    lambda: elliottlib.errata.build_signed(build), '*'),
                all_builds)
            # Wait for results
            pool.close()
            pool.join()
            click.echo(']')

            if all(build_sigs):
                all_signed = True
                break
            elif noop:
                # Escape the time-loop
                break
            else:
                yellow_prefix("Not all builds signed: ")
                click.echo("re-checking")
                continue

        if not all_signed:
            red_prefix("Signing incomplete: ")
            if noop:
                click.echo("All builds not signed. ")
            else:
                click.echo(
                    "All builds not signed in given window ({} minutes). ".
                    format(minutes))
                exit(1)
        else:
            green_prefix("All builds signed: ")
            click.echo("Enjoy!")
    except ErrataException as ex:
        raise ElliottFatalError(getattr(ex, 'message', repr(ex)))
Esempio n. 29
0
def change_state_cli(runtime, state, advisory, default_advisory_type, noop):
    """Change the state of an ADVISORY. Additional permissions may be
required to change an advisory to certain states.

An advisory may not move between some states until all criteria have
been met. For example, an advisory can not move from NEW_FILES to QE
unless Bugzilla Bugs or JIRA Issues have been attached.

    NOTE: The two advisory options are mutually exclusive and can not
    be used together.

See the find-bugs help for additional information on adding
Bugzilla Bugs.

    Move the advisory 123456 from NEW_FILES to QE state:

    $ elliott change-state --state QE --advisory 123456

    Move the advisory 123456 back to NEW_FILES (short option flag):

    $ elliott change-state -s NEW_FILES -a 123456

    Do not actually change state, just check that the command could
    have ran (for example, when testing out pipelines)

    $ elliott change-state -s NEW_FILES -a 123456 --noop
"""
    if not (bool(advisory) ^ bool(default_advisory_type)):
        raise click.BadParameter(
            "Use only one of --use-default-advisory or --advisory")

    runtime.initialize(no_group=default_advisory_type is None)

    if default_advisory_type is not None:
        advisory = find_default_advisory(runtime, default_advisory_type)

    if noop:
        prefix = "[NOOP] "
    else:
        prefix = ""

    try:
        e = Erratum(errata_id=advisory)

        if e.errata_state == state:
            green_prefix("{}No change to make: ".format(prefix))
            click.echo("Target state is same as current state")
            return
        # we have 5 different states we can only change the state if it's in NEW_FILES or QE
        # "NEW_FILES",
        # "QE",
        # "REL_PREP",
        # "PUSH_READY",
        # "IN_PUSH"
        if e.errata_state != 'NEW_FILES' and e.errata_state != 'QE':
            if default_advisory_type is not None:
                raise ElliottFatalError(
                    "Error: Could not change '{state}' advisory {advs}, group.yml is probably pointing at old one"
                    .format(state=e.errata_state, advs=advisory))
            else:
                raise ElliottFatalError(
                    "Error: we can only change the state if it's in NEW_FILES or QE, current state is {s}"
                    .format(s=e.errata_state))
        else:
            if noop:
                green_prefix("{}Would have changed state: ".format(prefix))
                click.echo("{} ➔ {}".format(e.errata_state, state))
                return
            else:
                # Capture current state because `e.commit()` will
                # refresh the `e.errata_state` attribute
                old_state = e.errata_state
                e.setState(state)
                e.commit()
                green_prefix("Changed state: ")
                click.echo("{old_state} ➔ {new_state}".format(
                    old_state=old_state, new_state=state))
    except ErrataException as ex:
        raise ElliottFatalError(getattr(ex, 'message', repr(ex)))

    green_print("Successfully changed advisory state")
Esempio n. 30
0
def _fetch_builds_by_kind_rpm(runtime: Runtime, tag_pv_map: Dict[str, str],
                              brew_session: koji.ClientSession):
    assembly = runtime.assembly
    if runtime.assembly_basis_event:
        LOGGER.warning(
            f'Constraining rpm search to stream assembly due to assembly basis event {runtime.assembly_basis_event}'
        )
        # If an assembly has a basis event, its latest rpms can only be sourced from
        # "is:" or the stream assembly.
        assembly = 'stream'

        # ensures the runtime assembly doesn't include any image member specific or rhcos specific dependencies
        image_configs = [
            assembly_metadata_config(runtime.get_releases_config(),
                                     runtime.assembly, 'image',
                                     image.distgit_key, image.config)
            for _, image in runtime.image_map.items()
        ]
        if any(nvr for image_config in image_configs
               for dep in image_config.dependencies.rpms
               for _, nvr in dep.items()):
            raise ElliottFatalError(
                f"Assembly {runtime.assembly} is not appliable for build sweep because it contains image member specific dependencies for a custom release."
            )
        rhcos_config = assembly_rhcos_config(runtime.get_releases_config(),
                                             runtime.assembly)
        if any(nvr for dep in rhcos_config.dependencies.rpms
               for _, nvr in dep.items()):
            raise ElliottFatalError(
                f"Assembly {runtime.assembly} is not appliable for build sweep because it contains RHCOS specific dependencies for a custom release."
            )

    green_prefix('Generating list of rpms: ')
    click.echo('Hold on a moment, fetching Brew builds')

    builder = BuildFinder(brew_session, logger=LOGGER)
    builds: List[Dict] = []
    for tag in tag_pv_map:
        # keys are rpm component names, values are nvres
        component_builds: Dict[str, Dict] = builder.from_tag(
            "rpm",
            tag,
            inherit=False,
            assembly=assembly,
            event=runtime.brew_event)

        if runtime.assembly_basis_event:
            # If an assembly has a basis event, rpms pinned by "is" and group dependencies should take precedence over every build from the tag
            el_version = isolate_el_version_in_brew_tag(tag)
            if not el_version:
                continue  # Only honor pinned rpms if this tag is relevant to a RHEL version

            # Honors pinned NVRs by "is"
            pinned_by_is = builder.from_pinned_by_is(
                el_version, runtime.assembly, runtime.get_releases_config(),
                runtime.rpm_map)
            _ensure_accepted_tags(pinned_by_is.values(), brew_session,
                                  tag_pv_map)

            # Builds pinned by "is" should take precedence over every build from tag
            for component, pinned_build in pinned_by_is.items():
                if component in component_builds and pinned_build[
                        "id"] != component_builds[component]["id"]:
                    LOGGER.warning(
                        "Swapping stream nvr %s for pinned nvr %s...",
                        component_builds[component]["nvr"],
                        pinned_build["nvr"])

            component_builds.update(
                pinned_by_is
            )  # pinned rpms take precedence over those from tags

            # Honors group dependencies
            group_deps = builder.from_group_deps(
                el_version, runtime.group_config, runtime.rpm_map
            )  # the return value doesn't include any ART managed rpms
            # Group dependencies should take precedence over anything previously determined except those pinned by "is".
            for component, dep_build in group_deps.items():
                if component in component_builds and dep_build[
                        "id"] != component_builds[component]["id"]:
                    LOGGER.warning(
                        "Swapping stream nvr %s for group dependency nvr %s...",
                        component_builds[component]["nvr"], dep_build["nvr"])
            component_builds.update(group_deps)
        builds.extend(component_builds.values())

    _ensure_accepted_tags(builds,
                          brew_session,
                          tag_pv_map,
                          raise_exception=False)
    qualified_builds = [b for b in builds if "tag_name" in b]
    not_attachable_nvrs = [b["nvr"] for b in builds if "tag_name" not in b]

    if not_attachable_nvrs:
        yellow_print(
            f"The following NVRs will not be swept because they don't have allowed tags {list(tag_pv_map.keys())}:"
        )
        for nvr in not_attachable_nvrs:
            yellow_print(f"\t{nvr}")

    click.echo("Filtering out shipped builds...")
    shipped = _find_shipped_builds([b["id"] for b in qualified_builds],
                                   brew_session)
    unshipped = [b for b in qualified_builds if b["id"] not in shipped]
    click.echo(
        f'Found {len(shipped)+len(unshipped)} builds, of which {len(unshipped)} are new.'
    )
    nvrps = _gen_nvrp_tuples(unshipped, tag_pv_map)
    nvrps = sorted(set(nvrps))  # remove duplicates
    return nvrps