def attach_builds(self, builds, kind): """ Attach a list of builds to Advisory :param builds: List of brew builds :param kind: rpm or image :raises ValueError: When wrong kind :raises ErrataException: """ click.echo(f"Attaching to advisory {self.errata_id}...") if kind not in {"rpm", "image"}: raise ValueError(f"{kind} should be one of 'rpm' or 'image'") file_type = 'tar' if kind == 'image' else 'rpm' product_version_set = {build.product_version for build in builds} for pv in product_version_set: self.addBuilds(buildlist=[ build.nvr for build in builds if build.product_version == pv ], release=pv, file_types={ build.nvr: [file_type] for build in builds if build.product_version == pv }) build_nvrs = sorted(build.nvr for build in builds) green_print('Attached build(s) successfully:') click.echo(' '.join(build_nvrs))
def _attach_to_advisory(builds, kind, advisory): if kind is None: raise ElliottFatalError( 'Need to specify with --kind=image or --kind=rpm with packages: {}' .format(builds)) try: erratum = Erratum(errata_id=advisory) file_type = 'tar' if kind == 'image' else 'rpm' product_version_set = {build.product_version for build in builds} for pv in product_version_set: erratum.addBuilds(buildlist=[ build.nvr for build in builds if build.product_version == pv ], release=pv, file_types={ build.nvr: [file_type] for build in builds }) erratum.commit() build_nvrs = sorted(build.nvr for build in builds) green_print('Attached build(s) successfully:') for b in build_nvrs: click.echo(' ' + b) except GSSError: exit_unauthenticated() except elliottlib.exceptions.BrewBuildException as ex: raise ElliottFatalError('Error attaching builds: {}'.format( getattr(ex, 'message', repr(ex))))
def _update_to_advisory(builds, kind, advisory, remove, clean): click.echo(f"Attaching to advisory {advisory}...") if kind not in {"rpm", "image"}: raise ValueError(f"{kind} should be one of 'rpm' or 'image'") try: erratum = Erratum(errata_id=advisory) file_type = 'tar' if kind == 'image' else 'rpm' product_version_set = {build.product_version for build in builds} for pv in product_version_set: erratum.addBuilds(buildlist=[ build.nvr for build in builds if build.product_version == pv ], release=pv, file_types={ build.nvr: [file_type] for build in builds }) erratum.commit() build_nvrs = sorted(build.nvr for build in builds) green_print('Attached build(s) successfully:') for b in build_nvrs: click.echo(' ' + b) return erratum except GSSError: exit_unauthenticated() except elliottlib.exceptions.BrewBuildException as ex: raise ElliottFatalError(f'Error attaching/removing builds: {str(ex)}')
def verify_attached_operators_cli(runtime, advisories): """ Verify attached operator manifest references are shipping or already shipped. Takes a list of advisories that may contain operator metadata/bundle builds or image builds that are shipping alongside. Then determines whether the operator manifests refer only to images that have shipped in the past or are shipping in these advisories. An error is raised if there are no manifest builds attached, or if any references are missing. NOTE: this will fail before 4.3 because they referred to images not manifest lists. """ runtime.initialize() brew_session = koji.ClientSession(runtime.group_config.urls.brewhub or constants.BREW_HUB) image_builds = _get_attached_image_builds(brew_session, advisories) referenced_specs = _extract_operator_manifest_image_references( image_builds) if not referenced_specs: # you are probably using this because you expect attached operator bundles or metadata raise ElliottFatalError( f"No bundle or appregistry builds found in advisories {advisories}." ) # check if references are satisfied by any image we are shipping or have shipped image_builds.extend(_get_shipped_images(runtime, brew_session)) available_shasums = _extract_available_image_shasums(image_builds) if _any_references_are_missing(referenced_specs, available_shasums): raise ElliottFatalError( "Some references were missing. Ensure all manifest references are shipped or shipping." ) green_print("All operator manifest references were found.")
def print_report(bugs: type_bug_list, output: str = 'text') -> None: if output == 'slack': for bug in bugs: click.echo("<{}|{}> - {:<25s} ".format(bug.weburl, bug.id, bug.component)) elif output == 'json': print(json.dumps( [ { "id": bug.id, "component": bug.component, "status": bug.status, "date": str(bug.creation_time_parsed()), "summary": bug.summary[:60], "url": bug.weburl } for bug in bugs ], indent=4 )) else: # output == 'text' green_print( "{:<13s} {:<25s} {:<12s} {:<7s} {:<10s} {:60s}".format("ID", "COMPONENT", "STATUS", "SCORE", "AGE", "SUMMARY")) for bug in bugs: days_ago = bug.created_days_ago() cf_pm_score = bug.cf_pm_score if hasattr(bug, "cf_pm_score") else '?' click.echo("{:<13s} {:<25s} {:<12s} {:<7s} {:<3d} days {:60s} ".format(str(bug.id), bug.component, bug.status, cf_pm_score, days_ago, bug.summary[:60]))
def _detach_builds(advisory, nvrs): session = requests.Session() click.echo(f"Removing build(s) from advisory {advisory}...") for nvr in nvrs: errata.detach_build(advisory, nvr, session) green_print('Removed build(s) successfully:') for nvr in nvrs: click.echo(' ' + nvr)
def validate(self, advisories): bugs = self._get_attached_filtered_bugs(advisories) blocking_bugs_for = self._get_blocking_bugs_for(bugs) self._verify_blocking_bugs(blocking_bugs_for) if self.problems: red_print("Some bug problems were listed above. Please investigate.") exit(1) green_print("All bugs were verified.")
def _fetch_builds_from_diff(from_payload, to_payload, product_version, session): green_print('Fetching changed images between payloads...') changed_builds = elliottlib.openshiftclient.get_build_list( from_payload, to_payload) return [ elliottlib.brew.get_brew_build(b, product_version, session=session) for b in changed_builds ]
def remove_builds(self, to_remove): """ Remove list of builds from Advisory :param to_remove: List of NVRs to remove """ click.echo( f"Removing build(s) from advisory {self.errata_id}: {' '.join(to_remove)}" ) self.removeBuilds(to_remove) green_print('Removed build(s) successfully')
def _unstructured_output(bad_runs, rpmdiff_client): for run in bad_runs: attr = run["attributes"] run_id = attr["external_id"] run_url = "{}/run/{}/".format(constants.RPMDIFF_WEB_URL, run_id) test_results = rpmdiff_client.get_test_results(run_id) run_obj = rpmdiff_client.get_run(run_id) print("----------------") msg = "{0} {1}".format(run["relationships"]["brew_build"]["nvr"], attr["status"]) if attr["status"] == "NEEDS_INSPECTION": util.yellow_print(msg) else: util.red_print(msg) for result in test_results: score = result["score"] if score >= 0 and score < 3: # good test result continue result_id = result["result_id"] test = result["test"] details = result["details"] test_id = test["test_id"] package_name = run_obj["package_name"] result_url = run_url + str(test_id) + "/" result_msg = "* TEST {0} {2} {1} {3}".format( result_id, constants.RPMDIFF_SCORE_NAMES[score], test["description"], result_url) if score == 3: # NEEDS_INSPECTION util.yellow_print(result_msg) else: util.red_print(result_msg) # get last waiver message waivers = rpmdiff_client.list_waivers(package_name, test_id, limit=1) if waivers: util.green_print(" Last waiver: @" + waivers[0]["owner"]["username"] + ": " + waivers[0]["description"]) else: util.yellow_print(" No last waiver found.") for detail in details: detail_msg = " * {1} {0}".format( constants.RPMDIFF_SCORE_NAMES[detail["score"]], detail["subpackage"]) if detail["score"] == 3: util.yellow_print(detail_msg) else: util.red_print(detail_msg) content = re.sub('^', ' ', detail["content"], flags=re.MULTILINE) print(content) print()
def validate(self, bugs, verify_bug_status): blocking_bugs_for = self._get_blocking_bugs_for(bugs) self._verify_blocking_bugs(blocking_bugs_for) if verify_bug_status: self._verify_bug_status(bugs) if self.problems: red_print( "Some bug problems were listed above. Please investigate.") exit(1) green_print("All bugs were verified.")
def _get_attached_bugs(self, advisories): # get bugs attached to all advisories bugs = set() try: for advisory in advisories: green_print(f"Retrieving bugs for advisory {advisory}") bugs.update(errata.get_bug_ids(advisory)) except GSSError: exit_unauthenticated() green_print(f"Found {len(bugs)} bugs") return list(bzutil.get_bugs(self.bzapi, list(bugs)).values())
def print_report(bugs: type_bug_list) -> None: green_print("{:<8s} {:<25s} {:<12s} {:<7s} {:<10s} {:60s}".format( "ID", "COMPONENT", "STATUS", "SCORE", "AGE", "SUMMARY")) for bug in bugs: created_date = datetime.datetime.strptime(str(bug.creation_time), '%Y%m%dT%H:%M:%S') days_ago = (datetime.datetime.today() - created_date).days click.echo( "{:<8d} {:<25s} {:<12s} {:<7s} {:<3d} days {:60s} ".format( bug.id, bug.component, bug.status, bug.cf_pm_score if hasattr(bug, "cf_pm_score") else '?', days_ago, bug.summary[:60]))
def get_sweep_cutoff_timestamp(runtime, cli_brew_event): sweep_cutoff_timestamp = 0 if cli_brew_event: green_print(f"Using command line specified cutoff event {runtime.assembly_basis_event}...") sweep_cutoff_timestamp = runtime.build_retrying_koji_client().getEvent(cli_brew_event)["ts"] elif runtime.assembly_basis_event: green_print(f"Determining approximate cutoff timestamp from basis event {runtime.assembly_basis_event}...") brew_api = runtime.build_retrying_koji_client() sweep_cutoff_timestamp = bzutil.approximate_cutoff_timestamp(runtime.assembly_basis_event, brew_api, runtime.rpm_metas() + runtime.image_metas()) return sweep_cutoff_timestamp
def _get_attached_image_builds(brew_session, advisories): # get all attached image builds build_nvrs = [] try: for advisory in advisories: green_print(f"Retrieving builds from advisory {advisory}") advisory = Erratum(errata_id=advisory) for build_list in advisory.errata_builds.values(): # one per product version build_nvrs.extend(build_list) except GSSError: exit_unauthenticated() green_print(f"Found {len(build_nvrs)} builds") return [build for build in brew.get_build_objects(build_nvrs, brew_session) if _is_image(build)]
async def verify_attached_flaws(self, advisory_bugs: Dict[int, List[Bug]]): futures = [] for advisory_id, attached_bugs in advisory_bugs.items(): attached_trackers = [ b for b in attached_bugs if b.is_tracker_bug() ] attached_flaws = [b for b in attached_bugs if b.is_flaw_bug()] futures.append( self._verify_attached_flaws_for(advisory_id, attached_trackers, attached_flaws)) await asyncio.gather(*futures) if self.problems: red_print( "Some bug problems were listed above. Please investigate.") exit(1) green_print("All CVE flaw bugs were verified.")
def validate( self, non_flaw_bugs: Iterable[Bug], verify_bug_status: bool, ): non_flaw_bugs = self.filter_bugs_by_release(non_flaw_bugs, complain=True) blocking_bugs_for = self._get_blocking_bugs_for(non_flaw_bugs) self._verify_blocking_bugs(blocking_bugs_for) if verify_bug_status: self._verify_bug_status(non_flaw_bugs) if self.problems: if self.output != 'slack': red_print( "Some bug problems were listed above. Please investigate.") exit(1) green_print( "All bugs were verified. This check doesn't cover CVE flaw bugs.")
def print_success_message(tarball_sources_list, out_dir): relative_paths = [ os.path.join(os.path.relpath(os.path.dirname(path), out_dir), os.path.basename(path)) for path in tarball_sources_list ] relative_paths.sort() util.green_print(""" All tarball sources are successfully created. To send all tarball sources to rcm-guest, run: rsync -avz --no-perms --no-owner --no-group {} [email protected]:/mnt/rcm-guest/ocp-client-handoff/ Then notify RCM (https://projects.engineering.redhat.com/projects/RCM/issues) that the following tarball sources have been uploaded to rcm-guest: {} """.format(pipes.quote(os.path.abspath(out_dir) + "/"), "\n".join(relative_paths)))
def _via_build_id(build_id, arch, version, packages, go, logger): if not build_id: Exception('Cannot find build_id') arch = util.brew_arch_for_go_arch(arch) util.green_print(f'Build: {build_id} Arch: {arch}') nvrs = rhcos.get_rpm_nvrs(build_id, version, arch) if not nvrs: return if packages: packages = [p.strip() for p in packages.split(',')] if 'openshift' in packages: packages.remove('openshift') packages.append('openshift-hyperkube') nvrs = [p for p in nvrs if p[0] in packages] if go: go_rpm_nvrs = util.get_golang_rpm_nvrs(nvrs, logger) util.pretty_print_nvrs_go(go_rpm_nvrs, ignore_na=True) return for nvr in sorted(nvrs): print('-'.join(nvr))
async def get_attached_bugs( self, advisory_ids: Iterable[str]) -> Dict[int, Set[Bug]]: """ Get bugs attached to specified advisories :return: a dict with advisory id as key and set of bug objects as value """ green_print(f"Retrieving bugs for advisory {advisory_ids}") if self.use_jira: issue_keys = { advisory_id: [ issue["key"] for issue in errata.get_jira_issue_from_advisory(advisory_id) ] for advisory_id in advisory_ids } bug_map = self.bug_tracker.get_bugs_map( [key for keys in issue_keys.values() for key in keys]) result = { advisory_id: {bug_map[key] for key in issue_keys[advisory_id]} for advisory_id in advisory_ids } else: advisories = await asyncio.gather(*[ self.errata_api.get_advisory(advisory_id) for advisory_id in advisory_ids ]) bug_map = self.bug_tracker.get_bugs_map( list({ b["bug"]["id"] for ad in advisories for b in ad["bugs"]["bugs"] })) result = { ad["content"]["content"]["errata_id"]: {bug_map[b["bug"]["id"]] for b in ad["bugs"]["bugs"]} for ad in advisories } return result
def repair_bugs(runtime, advisory, auto, id, original_state, new_state, comment, close_placeholder, use_jira, noop, default_advisory_type, bug_tracker): changed_bug_count = 0 if default_advisory_type is not None: advisory = find_default_advisory(runtime, default_advisory_type) if auto: click.echo("Fetching Advisory(errata_id={})".format(advisory)) if use_jira: raw_bug_list = [ issue["key"] for issue in errata.get_jira_issue_from_advisory(advisory) ] else: e = elliottlib.errata.Advisory(errata_id=advisory) raw_bug_list = e.errata_bugs else: click.echo("Bypassed fetching erratum, using provided BZs") raw_bug_list = cli_opts.id_convert(id) green_print("Getting bugs for advisory") # Fetch bugs in parallel because it can be really slow doing it # one-by-one when you have hundreds of bugs pbar_header("Fetching data for {} bugs: ".format(len(raw_bug_list)), "Hold on a moment, we have to grab each one", raw_bug_list) pool = ThreadPool(cpu_count()) click.secho("[", nl=False) attached_bugs = pool.map( lambda bug: progress_func(lambda: bug_tracker.get_bug(bug), '*'), raw_bug_list) # Wait for results pool.close() pool.join() click.echo(']') green_print("Got bugs for advisory") for bug in attached_bugs: if close_placeholder and "Placeholder" in bug.summary: # if set close placeholder, ignore bug state bug_tracker.update_bug_status(bug, "CLOSED") changed_bug_count += 1 else: if bug.status in original_state: bug_tracker.update_bug_status(bug, new_state) # only add comments for non-placeholder bug if comment and not noop: bug_tracker.add_comment(bug, comment, private=False) changed_bug_count += 1 green_print("{} bugs successfully modified (or would have been)".format( changed_bug_count))
def create(ctx, advisories, out_dir, out_layout, components, force): """ Create tarball sources for advisories. To create tarball sources for Brew component (package) logging-fluentd-container that was shipped on advisories 45606, 45527, and 46049: $ elliott tarball-sources create --component logging-fluentd-container --out-dir=out/ 45606 45527 46049 """ if not force and os.path.isdir(out_dir) and os.listdir(out_dir): util.red_print("Output directory {} is not empty.\n\ Use --force to add new tarball sources to an existing directory.".format( os.path.abspath(out_dir))) exit(1) mkdirs(out_dir) working_dir = os.path.join(ctx.obj.working_dir, "tarball-sources") LOGGER.debug("Use working directory {}.".format( os.path.abspath(working_dir))) mkdirs(working_dir) # `nvr_dirs` is a dict with brew build NVRs as keys, values are # a set of directories for the generated tarballs, # since a build can be attached to multiple advisories. # For example: # nvr_dirs = { # "logging-fluentd-container-v3.11.141-2": { # "RHOSE/RHEL-7-OSE-3.11/45606/release/" # }, # "logging-fluentd-container-v4.1.14-201908291507": { # "RHOSE/RHEL-7-OSE-4.1/45527/release/" # }, # "logging-fluentd-container-v4.1.15-201909041605": { # "RHOSE/RHEL-7-OSE-4.1/46049/release/" # } # } nvr_dirs = {} # type: Dict[str, Set[str]] # Getting build NVRs for specified Koji/Brew components from advisories # NOTE This is SLOW. However doing this in parallel doesn't work # due to a race condition existing in the implementation of `errata_tool.Erratum`'s parant class ErrataConnector. for advisory in advisories: click.echo("Finding builds from advisory {}...".format(advisory)) builds = tarball_sources.find_builds_from_advisory( advisory, components) if not builds: util.yellow_print( "No matched builds found from advisory {}. Wrong advisory number?" .format(advisory)) continue util.green_print("Found {} matched build(s) from advisory {}".format( len(builds), advisory)) for nvr, product, product_version in builds: util.green_print("\t{}\t{}\t{}".format(nvr, product, product_version)) for nvr, product, product_version in builds: if nvr not in nvr_dirs: nvr_dirs[nvr] = set() if out_layout == "flat": nvr_dirs[nvr].add(out_dir) else: nvr_dirs[nvr].add( os.path.join(out_dir, product_version, str(advisory), "release")) if not nvr_dirs: util.red_print( "Exiting because no matched builds from all specified advisories.") exit(1) # Check build infos from Koji/Brew # in order to figure out the source Git repo and commit hash for each build. click.echo("Fetching build infos for {} from Koji/Brew...".format( ", ".join(nvr_dirs.keys()))) brew_session = koji.ClientSession(constants.BREW_HUB) brew_builds = brew.get_build_objects(nvr_dirs.keys(), brew_session) # Ready to generate tarballs tarball_sources_list = [] for build_info in brew_builds: nvr = build_info["nvr"] tarball_filename = nvr + ".tar.gz" click.echo("Generating tarball source {} for {}...".format( tarball_filename, nvr)) with tempfile.NamedTemporaryFile(suffix="-" + tarball_filename, dir=working_dir) as temp_tarball: temp_tarball_path = temp_tarball.name LOGGER.debug( "Temporary tarball file is {}".format(temp_tarball_path)) tarball_sources.generate_tarball_source( temp_tarball, nvr + "/", os.path.join(working_dir, "repos", build_info["name"]), build_info["source"]) for dest_dir in nvr_dirs[nvr]: mkdirs(dest_dir) tarball_abspath = os.path.abspath( os.path.join(dest_dir, tarball_filename)) if os.path.exists(tarball_abspath): util.yellow_print( "File {} will be overwritten.".format(tarball_abspath)) LOGGER.debug("Copying {} to {}...".format( temp_tarball_path, tarball_abspath)) shutil.copyfile( temp_tarball_path, tarball_abspath) # `shutil.copyfile` uses default umask tarball_sources_list.append(tarball_abspath) util.green_print( "Created tarball source {}.".format(tarball_abspath)) print_success_message(tarball_sources_list, out_dir)
async def verify_payload(ctx, payload, advisory): """Cross-check that the builds present in PAYLOAD match the builds attached to ADVISORY. The payload is treated as the source of truth. If something is absent or different in the advisory it is treated as an error with the advisory. \b PAYLOAD - Full pullspec of the payload to verify ADVISORY - Numerical ID of the advisory Two checks are made: \b 1. Missing in Advisory - No payload components are absent from the given advisory 2. Payload Advisory Mismatch - The version-release of each payload item match what is in the advisory Results are summarily printed at the end of the run. They are also written out to summary_results.json. Verify builds in the given payload match the builds attached to advisory 41567 \b $ elliott verify-payload quay.io/openshift-release-dev/ocp-release:4.1.0-rc.6 41567 """ all_advisory_nvrs = elliottlib.errata.get_advisory_nvrs(advisory) click.echo("Found {} builds".format(len(all_advisory_nvrs))) all_payload_nvrs = {} click.echo("Fetching release info") release_export_cmd = 'oc adm release info {} -o json'.format(payload) rc, stdout, stderr = exectools.cmd_gather(release_export_cmd) if rc != 0: # Probably no point in continuing.. can't contact brew? print("Unable to run oc release info: out={} ; err={}".format( stdout, stderr)) exit(1) else: click.echo("Got release info") payload_json = json.loads(stdout) green_prefix("Looping over payload images: ") click.echo("{} images to check".format( len(payload_json['references']['spec']['tags']))) cmds = [['oc', 'image', 'info', '-o', 'json', tag['from']['name']] for tag in payload_json['references']['spec']['tags']] green_prefix("Querying image infos...") cmd_results = await asyncio.gather( *[exectools.cmd_gather_async(cmd) for cmd in cmds]) for image, cmd, cmd_result in zip( payload_json['references']['spec']['tags'], cmds, cmd_results): click.echo("----") image_name = image['name'] rc, stdout, stderr = cmd_result if rc != 0: # Probably no point in continuing.. can't contact brew? red_prefix("Unable to run oc image info: ") red_print(f"cmd={cmd!r}, out={stdout} ; err={stderr}") exit(1) image_info = json.loads(stdout) labels = image_info['config']['config']['Labels'] # The machine-os-content image doesn't follow the standard # pattern. We need to skip that image when we find it, it is # not attached to advisories. if 'com.coreos.ostree-commit' in labels: yellow_prefix("Skipping machine-os-content image: ") click.echo("Not required for checks") continue component = labels['com.redhat.component'] n = image_name click.echo("Payload name: {}".format(n)) click.echo("Brew name: {}".format(component)) if labels: v = labels['version'] r = labels['release'] all_payload_nvrs[component] = "{}-{}".format(v, r) else: print("For image {} Labels doesn't exist, image_info: {}".format( image_name, image_info)) missing_in_errata = {} payload_doesnt_match_errata = {} in_pending_advisory = [] in_shipped_advisory = [] output = { 'missing_in_advisory': missing_in_errata, 'payload_advisory_mismatch': payload_doesnt_match_errata, "in_pending_advisory": in_pending_advisory, "in_shipped_advisory": in_shipped_advisory, } green_prefix("Analyzing data: ") click.echo("{} images to consider from payload".format( len(all_payload_nvrs))) for image, vr in all_payload_nvrs.items(): yellow_prefix("Cross-checking from payload: ") click.echo("{}-{}".format(image, vr)) if image not in all_advisory_nvrs: missing_in_errata[image] = "{}-{}".format(image, vr) click.echo("{} in payload not found in advisory".format( "{}-{}".format(image, vr))) elif image in all_advisory_nvrs and vr != all_advisory_nvrs[image]: click.echo( "{} from payload has version {} which does not match {} from advisory" .format(image, vr, all_advisory_nvrs[image])) payload_doesnt_match_errata[image] = { 'payload': vr, 'errata': all_advisory_nvrs[image] } if missing_in_errata: # check if missing images are already shipped or pending to ship advisory_nvrs: Dict[int, List[str]] = { } # a dict mapping advisory numbers to lists of NVRs green_print( f"Checking if {len(missing_in_errata)} missing images are shipped..." ) for nvr in missing_in_errata.copy().values(): # get the list of advisories that this build has been attached to build = elliottlib.errata.get_brew_build(nvr) # filter out dropped advisories advisories = [ ad for ad in build.all_errata if ad["status"] != "DROPPED_NO_SHIP" ] if not advisories: red_print(f"Build {nvr} is not attached to any advisories.") continue for advisory in advisories: if advisory["status"] == "SHIPPED_LIVE": green_print( f"Missing build {nvr} has been shipped with advisory {advisory}." ) else: yellow_print( f"Missing build {nvr} is in another pending advisory.") advisory_nvrs.setdefault(advisory["id"], []).append(nvr) name = nvr.rsplit("-", 2)[0] del missing_in_errata[name] if advisory_nvrs: click.echo( f"Getting information of {len(advisory_nvrs)} advisories...") for advisory, nvrs in advisory_nvrs.items(): advisory_obj = elliottlib.errata.get_raw_erratum(advisory) adv_type, adv_info = next(iter(advisory_obj["errata"].items())) item = { "id": advisory, "type": adv_type.upper(), "url": elliottlib.constants.errata_url + f"/{advisory}", "summary": adv_info["synopsis"], "state": adv_info["status"], "nvrs": nvrs, } if adv_info["status"] == "SHIPPED_LIVE": in_shipped_advisory.append(item) else: in_pending_advisory.append(item) green_print("Summary results:") click.echo(json.dumps(output, indent=4)) with open('summary_results.json', 'w') as fp: json.dump(output, fp, indent=4) green_prefix("Wrote out summary results: ") click.echo("summary_results.json")
def find_bugs_sweep(runtime: Runtime, advisory_id, default_advisory_type, check_builds, major_version, find_bugs_obj, report, output, brew_event, noop, count_advisory_attach_flags, bug_tracker): if output == 'text': statuses = sorted(find_bugs_obj.status) tr = bug_tracker.target_release() green_prefix(f"Searching {bug_tracker.type} for bugs with status {statuses} and target releases: {tr}\n") bugs = find_bugs_obj.search(bug_tracker_obj=bug_tracker, verbose=runtime.debug) sweep_cutoff_timestamp = get_sweep_cutoff_timestamp(runtime, cli_brew_event=brew_event) if sweep_cutoff_timestamp: utc_ts = datetime.utcfromtimestamp(sweep_cutoff_timestamp) green_print(f"Filtering bugs that have changed ({len(bugs)}) to one of the desired statuses before the " f"cutoff time {utc_ts}...") qualified_bugs = [] for chunk_of_bugs in chunk(bugs, constants.BUG_LOOKUP_CHUNK_SIZE): b = bug_tracker.filter_bugs_by_cutoff_event(chunk_of_bugs, find_bugs_obj.status, sweep_cutoff_timestamp) qualified_bugs.extend(b) click.echo(f"{len(qualified_bugs)} of {len(bugs)} bugs are qualified for the cutoff time " f"{utc_ts}...") bugs = qualified_bugs included_bug_ids, excluded_bug_ids = get_assembly_bug_ids(runtime) if included_bug_ids & excluded_bug_ids: raise ValueError("The following bugs are defined in both 'include' and 'exclude': " f"{included_bug_ids & excluded_bug_ids}") if included_bug_ids: yellow_print("The following bugs will be additionally included because they are " f"explicitly defined in the assembly config: {included_bug_ids}") included_bugs = bug_tracker.get_bugs(included_bug_ids) bugs.extend(included_bugs) if excluded_bug_ids: yellow_print("The following bugs will be excluded because they are explicitly " f"defined in the assembly config: {excluded_bug_ids}") bugs = [bug for bug in bugs if bug.id not in excluded_bug_ids] if output == 'text': green_prefix(f"Found {len(bugs)} bugs: ") click.echo(", ".join(sorted(str(b.id) for b in bugs))) if report: print_report(bugs, output) if count_advisory_attach_flags < 1: return # `--add ADVISORY_NUMBER` should respect the user's wish # and attach all available bugs to whatever advisory is specified. if advisory_id and not default_advisory_type: bug_tracker.attach_bugs(advisory_id, [b.id for b in bugs], noop=noop) return rpm_advisory_id = common.find_default_advisory(runtime, 'rpm') if check_builds else None bugs_by_type = categorize_bugs_by_type(bugs, rpm_advisory_id=rpm_advisory_id, major_version=major_version, check_builds=check_builds) advisory_types_to_attach = [default_advisory_type] if default_advisory_type else bugs_by_type.keys() for advisory_type in sorted(advisory_types_to_attach): bugs = bugs_by_type.get(advisory_type) green_prefix(f'{advisory_type} advisory: ') if bugs: adv_id = common.find_default_advisory(runtime, advisory_type) bug_tracker.attach_bugs(adv_id, [b.id for b in bugs], noop=noop) else: click.echo("0 bugs found")
def find_bugs_cli(runtime, advisory, default_advisory_type, mode, status, id, cve_trackers, from_diff, flag, report, into_default_advisories, noop): """Find Red Hat Bugzilla bugs or add them to ADVISORY. Bugs can be "swept" into the advisory either automatically (--mode sweep), or by manually specifying one or more bugs using --mode list and the --id option. Use cases are described below: Note: Using --id without --add is basically pointless SWEEP: For this use-case the --group option MUST be provided. The --group automatically determines the correct target-releases to search for bugs claimed to be fixed, but not yet attached to advisories. LIST: The --group option is not required if you are specifying bugs manually. Provide one or more --id's for manual bug addition. In LIST mode you must provide a list of IDs to attach with the --id option. DIFF: For this use case, you must provide the --between option using two URLs to payloads. QE: Find MODIFIED bugs for the target-releases, and set them to ON_QA. The --group option MUST be provided. Cannot be used in combination with --into-default-advisories, --add, --into-default-advisories Using --use-default-advisory without a value set for the matching key in the build-data will cause an error and elliott will exit in a non-zero state. Use of this option silently overrides providing an advisory with the --add option. Automatically add bugs with target-release matching 3.7.Z or 3.7.0 to advisory 123456: \b $ elliott --group openshift-3.7 find-bugs --mode sweep --add 123456 List bugs that WOULD be added to an advisory and have set the bro_ok flag on them (NOOP): \b $ elliott --group openshift-3.7 find-bugs --mode sweep --flag bro_ok Attach bugs to their correct default advisories, e.g. operator-related bugs go to "extras" instead of the default "image": \b $ elliott --group=openshift-4.4 find-bugs --mode=sweep --into-default-advisories Add two bugs to advisory 123456. Note that --group is not required because we're not auto searching: \b $ elliott find-bugs --mode list --id 8675309 --id 7001337 --add 123456 Automatically find bugs for openshift-4.1 and attach them to the rpm advisory defined in ocp-build-data: \b $ elliott --group=openshift-4.1 --mode sweep --use-default-advisory rpm Find bugs for 4.6 that are in MODIFIED state, and set them to ON_QA: \b $ elliott --group=openshift-4.6 --mode qe """ if mode != 'list' and len(id) > 0: raise click.BadParameter( "Combining the automatic and manual bug attachment options is not supported" ) if mode == 'list' and len(id) == 0: raise click.BadParameter( "When using mode=list, you must provide a list of bug IDs") if mode == 'payload' and not len(from_diff) == 2: raise click.BadParameter( "If using mode=payload, you must provide two payloads to compare") if sum( map(bool, [advisory, default_advisory_type, into_default_advisories ])) > 1: raise click.BadParameter( "Use only one of --use-default-advisory, --add, or --into-default-advisories" ) if mode == 'qe' and sum( map(bool, [advisory, default_advisory_type, into_default_advisories ])) > 0: raise click.BadParameter( "--mode=qe does not operate on an advisory. Do not specify any of `--use-default-advisory`, `--add`, or `--into-default-advisories`" ) runtime.initialize() bz_data = runtime.gitdata.load_data(key='bugzilla').data bzapi = bzutil.get_bzapi(bz_data) if default_advisory_type is not None: advisory = find_default_advisory(runtime, default_advisory_type) if mode == 'sweep' or mode == 'qe': if mode == 'qe': status = ['MODIFIED'] green_prefix( f"Searching for bugs with status {' '.join(status)} and target release(s):" ) click.echo(" {tr}".format(tr=", ".join(bz_data['target_release']))) bugs = bzutil.search_for_bugs( bz_data, status, filter_out_security_bugs=not (cve_trackers), verbose=runtime.debug) elif mode == 'list': bugs = [bzapi.getbug(i) for i in cli_opts.id_convert(id)] elif mode == 'diff': click.echo(runtime.working_dir) bug_id_strings = openshiftclient.get_bug_list(runtime.working_dir, from_diff[0], from_diff[1]) bugs = [bzapi.getbug(i) for i in bug_id_strings] # Some bugs should goes to CPaaS so we should ignore them m = re.match( r"rhaos-(\d+).(\d+)", runtime.branch ) # extract OpenShift version from the branch name. there should be a better way... if not m: raise ElliottFatalError( f"Unable to determine OpenShift version from branch name {runtime.branch}." ) major_version = int(m[1]) minor_version = int(m[2]) def _filter_bugs(bugs): # returns a list of bugs that should be processed r = [] ignored_repos = set() # GitHub repos that should be ignored if major_version == 4 and minor_version == 5: # per https://issues.redhat.com/browse/ART-997: these repos should have their release-4.5 branches ignored by ART: ignored_repos = { "https://github.com/openshift/aws-ebs-csi-driver", "https://github.com/openshift/aws-ebs-csi-driver-operator", "https://github.com/openshift/cloud-provider-openstack", "https://github.com/openshift/csi-driver-nfs", "https://github.com/openshift/csi-driver-manila-operator" } for bug in bugs: external_links = [ ext["type"]["full_url"].replace("%id%", ext["ext_bz_bug_id"]) for ext in bug.external_bugs ] # https://github.com/python-bugzilla/python-bugzilla/blob/7aa70edcfea9b524cd8ac51a891b6395ca40dc87/bugzilla/_cli.py#L750 public_links = [ runtime.get_public_upstream(url)[0] for url in external_links ] # translate openshift-priv org to openshift org when comparing to filter (i.e. prow may link to a PR on the private org). # if a bug has 1 or more public_links, we should ignore the bug if ALL of the public_links are ANY of `ignored_repos` if public_links and all( map( lambda url: any( map( lambda repo: url != repo and url.startswith( repo), ignored_repos)), public_links)): continue r.append(bug) return r if len( id ) == 0: # unless --id is given, we should ignore bugs that don't belong to ART. e.g. some bugs should go to CPaaS filtered_bugs = _filter_bugs(bugs) green_prefix( f"Found {len(filtered_bugs)} bugs ({len(bugs) - len(filtered_bugs)} ignored):" ) bugs = filtered_bugs else: green_prefix("Found {} bugs:".format(len(bugs))) click.echo(" {}".format(", ".join([str(b.bug_id) for b in bugs]))) if mode == 'qe': for bug in bugs: bzutil.set_state(bug, 'ON_QA', noop=noop) if len(flag) > 0: for bug in bugs: for f in flag: if noop: click.echo( f'Would have updated bug {bug.id} by setting flag {f}') continue bug.updateflags({f: "+"}) if report: green_print("{:<8s} {:<25s} {:<12s} {:<7s} {:<10s} {:60s}".format( "ID", "COMPONENT", "STATUS", "SCORE", "AGE", "SUMMARY")) for bug in bugs: created_date = datetime.datetime.strptime(str(bug.creation_time), '%Y%m%dT%H:%M:%S') days_ago = (datetime.datetime.today() - created_date).days click.echo( "{:<8d} {:<25s} {:<12s} {:<7s} {:<3d} days {:60s} ".format( bug.id, bug.component, bug.status, bug.cf_pm_score if hasattr(bug, "cf_pm_score") else '?', days_ago, bug.summary[:60])) if advisory and not default_advisory_type: # `--add ADVISORY_NUMBER` should respect the user's wish and attach all available bugs to whatever advisory is specified. errata.add_bugs_with_retry(advisory, bugs, noop=noop) return # If --use-default-advisory or --into-default-advisories is given, we need to determine which bugs should be swept into which advisory. # Otherwise we don't need to sweep bugs at all. if not (into_default_advisories or default_advisory_type): return impetus_bugs = { } # key is impetus ("rpm", "image", "extras"), value is a set of bug IDs. # @lmeyer: simple and stupid would still be keeping the logic in python, possibly with config flags for branched logic. until that logic becomes too ugly to keep in python, i suppose.. if major_version < 4: # for 3.x, all bugs should go to the rpm advisory impetus_bugs["rpm"] = set(bugs) else: # for 4.x # optional operators bugs should be swept to the "extras" advisory, while other bugs should be swept to "image" advisory. # a way to identify operator-related bugs is by its "Component" value. temporarily hardcode here until we need to move it to ocp-build-data. extra_components = { "Logging", "Service Brokers", "Metering Operator", "Node Feature Discovery Operator" } # we will probably find more impetus_bugs["extras"] = { b for b in bugs if b.component in extra_components } impetus_bugs["image"] = { b for b in bugs if b.component not in extra_components } if default_advisory_type and impetus_bugs.get(default_advisory_type): errata.add_bugs_with_retry(advisory, impetus_bugs[default_advisory_type], noop=noop) elif into_default_advisories: for impetus, bugs in impetus_bugs.items(): if bugs: errata.add_bugs_with_retry( runtime.group_config.advisories[impetus], bugs, noop=noop)
def find_builds_cli(runtime, advisory, default_advisory_type, builds, kind, from_diff, as_json): '''Automatically or manually find or attach viable rpm or image builds to ADVISORY. Default behavior searches Brew for viable builds in the given group. Provide builds manually by giving one or more --build (-b) options. Manually provided builds are verified against the Errata Tool API. \b * Attach the builds to ADVISORY by giving --attach * Specify the build type using --kind KIND Example: Assuming --group=openshift-3.7, then a build is a VIABLE BUILD IFF it meets ALL of the following criteria: \b * HAS the tag in brew: rhaos-3.7-rhel7-candidate * DOES NOT have the tag in brew: rhaos-3.7-rhel7 * IS NOT attached to ANY existing RHBA, RHSA, or RHEA That is to say, a viable build is tagged as a "candidate", has NOT received the "shipped" tag yet, and is NOT attached to any PAST or PRESENT advisory. Here are some examples: SHOW the latest OSE 3.6 image builds that would be attached to a 3.6 advisory: $ elliott --group openshift-3.6 find-builds -k image ATTACH the latest OSE 3.6 rpm builds to advisory 123456: \b $ elliott --group openshift-3.6 find-builds -k rpm --attach 123456 VERIFY (no --attach) that the manually provided RPM NVR and build ID are viable builds: \b $ elliott --group openshift-3.6 find-builds -k rpm -b megafrobber-1.0.1-2.el7 -b 93170 ''' if from_diff and builds: raise ElliottFatalError('Use only one of --build or --from-diff.') if advisory and default_advisory_type: raise click.BadParameter( 'Use only one of --use-default-advisory or --attach') runtime.initialize() base_tag, product_version = _determine_errata_info(runtime) if default_advisory_type is not None: advisory = find_default_advisory(runtime, default_advisory_type) ensure_erratatool_auth( ) # before we waste time looking up builds we can't process # get the builds we want to add unshipped_builds = [] session = requests.Session() if builds: unshipped_builds = _fetch_builds_by_id(builds, product_version, session) elif from_diff: unshipped_builds = _fetch_builds_from_diff(from_diff[0], from_diff[1], product_version, session) else: if kind == 'image': unshipped_builds = _fetch_builds_by_kind_image( runtime, product_version, session) elif kind == 'rpm': unshipped_builds = _fetch_builds_by_kind_rpm( builds, base_tag, product_version, session) _json_dump(as_json, unshipped_builds, base_tag, kind) if not unshipped_builds: green_print('No builds needed to be attached.') return if advisory is not False: _attach_to_advisory(unshipped_builds, kind, advisory) else: click.echo('The following {n} builds '.format(n=len(unshipped_builds)), nl=False) click.secho('may be attached ', bold=True, nl=False) click.echo('to an advisory:') for b in sorted(unshipped_builds): click.echo(' ' + b.nvr)
def show(ctx, advisory): """ Show RPMDiff failures for an advisory. """ runtime = ctx.obj # type: Runtime if not advisory: runtime.initialize() advisory = runtime.group_config.advisories.get("rpm", 0) if not advisory: raise ElliottFatalError( "No RPM advisory number configured in ocp-build-data.") else: runtime.initialize(no_group=True) logger = runtime.logger logger.info( "Fetching RPMDiff runs from Errata Tool for advisory {}...".format( advisory)) rpmdiff_runs = list(errata.get_rpmdiff_runs(advisory)) logger.info("Found {} RPMDiff runs.".format(len(rpmdiff_runs))) # "good" means PASSED, INFO, or WAIVED good_runs = [] # "bad" means NEEDS_INSPECTION or FAILED bad_runs = [] incomplete_runs = [] for rpmdiff_run in rpmdiff_runs: attr = rpmdiff_run['attributes'] if attr["status"] in constants.ET_GOOD_EXTERNAL_TEST_STATUSES: good_runs.append(rpmdiff_run) elif attr["status"] in constants.ET_BAD_EXTERNAL_TEST_STATUSES: bad_runs.append(rpmdiff_run) else: incomplete_runs.append(rpmdiff_run) util.green_prefix("good: {}".format(len(good_runs))) click.echo(", ", nl=False) util.red_prefix("bad:{}".format(len(bad_runs))) click.echo(", ", nl=False) util.yellow_print("incomplete: {}".format(len(incomplete_runs))) if not bad_runs: return logger.info( "Fetching detailed information from RPMDiff for bad RPMDiff runs...") rpmdiff_client = RPMDiffClient(constants.RPMDIFF_HUB_URL) rpmdiff_client.authenticate() for run in bad_runs: attr = run["attributes"] run_id = attr["external_id"] run_url = "{}/run/{}/".format(constants.RPMDIFF_WEB_URL, run_id) print("----------------") msg = "{0} {1}".format(run["relationships"]["brew_build"]["nvr"], attr["status"]) if attr["status"] == "NEEDS_INSPECTION": util.yellow_print(msg) else: util.red_print(msg) test_results = rpmdiff_client.get_test_results(run_id) run_obj = rpmdiff_client.get_run(run_id) for result in test_results: score = result["score"] if score >= 0 and score < 3: # good test result continue result_id = result["result_id"] test = result["test"] details = result["details"] test_id = test["test_id"] package_name = run_obj["package_name"] result_url = run_url + str(test_id) + "/" result_msg = "* TEST {0} {2} {1} {3}".format( result_id, constants.RPMDIFF_SCORE_NAMES[score], test["description"], result_url) if score == 3: # NEEDS_INSPECTION util.yellow_print(result_msg) else: util.red_print(result_msg) # get last waiver message waivers = rpmdiff_client.list_waivers(package_name, test_id, limit=1) if waivers: util.green_print(" Last waiver: @" + waivers[0]["owner"]["username"] + ": " + waivers[0]["description"]) else: util.yellow_print(" No last waiver found.") for detail in details: detail_msg = " * {1} {0}".format( constants.RPMDIFF_SCORE_NAMES[detail["score"]], detail["subpackage"]) if detail["score"] == 3: util.yellow_print(detail_msg) else: util.red_print(detail_msg) content = re.sub('^', ' ', detail["content"], flags=re.MULTILINE) print(content) print()
def change_state_cli(runtime, state, advisory, default_advisory_type, noop): """Change the state of an ADVISORY. Additional permissions may be required to change an advisory to certain states. An advisory may not move between some states until all criteria have been met. For example, an advisory can not move from NEW_FILES to QE unless Bugzilla Bugs or JIRA Issues have been attached. NOTE: The two advisory options are mutually exclusive and can not be used together. See the find-bugs help for additional information on adding Bugzilla Bugs. Move the advisory 123456 from NEW_FILES to QE state: $ elliott change-state --state QE --advisory 123456 Move the advisory 123456 back to NEW_FILES (short option flag): $ elliott change-state -s NEW_FILES -a 123456 Do not actually change state, just check that the command could have ran (for example, when testing out pipelines) $ elliott change-state -s NEW_FILES -a 123456 --noop """ if not (bool(advisory) ^ bool(default_advisory_type)): raise click.BadParameter( "Use only one of --use-default-advisory or --advisory") runtime.initialize(no_group=default_advisory_type is None) if default_advisory_type is not None: advisory = find_default_advisory(runtime, default_advisory_type) if noop: prefix = "[NOOP] " else: prefix = "" try: e = Erratum(errata_id=advisory) if e.errata_state == state: green_prefix("{}No change to make: ".format(prefix)) click.echo("Target state is same as current state") return # we have 5 different states we can only change the state if it's in NEW_FILES or QE # "NEW_FILES", # "QE", # "REL_PREP", # "PUSH_READY", # "IN_PUSH" if e.errata_state != 'NEW_FILES' and e.errata_state != 'QE': if default_advisory_type is not None: raise ElliottFatalError( "Error: Could not change '{state}' advisory {advs}, group.yml is probably pointing at old one" .format(state=e.errata_state, advs=advisory)) else: raise ElliottFatalError( "Error: we can only change the state if it's in NEW_FILES or QE, current state is {s}" .format(s=e.errata_state)) else: if noop: green_prefix("{}Would have changed state: ".format(prefix)) click.echo("{} ➔ {}".format(e.errata_state, state)) return else: # Capture current state because `e.commit()` will # refresh the `e.errata_state` attribute old_state = e.errata_state e.setState(state) e.commit() green_prefix("Changed state: ") click.echo("{old_state} ➔ {new_state}".format( old_state=old_state, new_state=state)) except ErrataException as ex: raise ElliottFatalError(getattr(ex, 'message', repr(ex))) green_print("Successfully changed advisory state")
def find_builds_cli(runtime, advisory, default_advisory_type, builds, kind, from_diff, as_json, allow_attached, remove, clean, no_cdn_repos, payload, non_payload, brew_event): '''Automatically or manually find or attach/remove viable rpm or image builds to ADVISORY. Default behavior searches Brew for viable builds in the given group. Provide builds manually by giving one or more --build (-b) options. Manually provided builds are verified against the Errata Tool API. \b * Attach the builds to ADVISORY by giving --attach * Remove the builds to ADVISORY by giving --remove * Specify the build type using --kind KIND Example: Assuming --group=openshift-3.7, then a build is a VIABLE BUILD IFF it meets ALL of the following criteria: \b * HAS the tag in brew: rhaos-3.7-rhel7-candidate * DOES NOT have the tag in brew: rhaos-3.7-rhel7 * IS NOT attached to ANY existing RHBA, RHSA, or RHEA That is to say, a viable build is tagged as a "candidate", has NOT received the "shipped" tag yet, and is NOT attached to any PAST or PRESENT advisory. Here are some examples: SHOW the latest OSE 3.6 image builds that would be attached to a 3.6 advisory: $ elliott --group openshift-3.6 find-builds -k image ATTACH the latest OSE 3.6 rpm builds to advisory 123456: \b $ elliott --group openshift-3.6 find-builds -k rpm --attach 123456 VERIFY (no --attach) that the manually provided RPM NVR and build ID are viable builds: $ elliott --group openshift-3.6 find-builds -k rpm -b megafrobber-1.0.1-2.el7 -a 93170 \b Remove specific RPM NVR and build ID from advisory: $ elliott --group openshift-4.3 find-builds -k image -b oauth-server-container-v4.3.22-202005212137 -a 55017 --remove ''' if from_diff and builds: raise click.BadParameter( 'Use only one of --build or --from-diff/--between.') if clean and (remove or from_diff or builds): raise click.BadParameter( 'Option --clean cannot be used with --build or --from-diff/--between.' ) if not builds and remove: raise click.BadParameter( 'Option --remove only support removing specific build with -b.') if from_diff and kind != "image": raise click.BadParameter( 'Option --from-diff/--between should be used with --kind/-k image.' ) if advisory and default_advisory_type: raise click.BadParameter( 'Use only one of --use-default-advisory or --attach') if payload and non_payload: raise click.BadParameter('Use only one of --payload or --non-payload.') runtime.initialize(mode='images' if kind == 'image' else 'none') replace_vars = runtime.group_config.vars.primitive( ) if runtime.group_config.vars else {} et_data = runtime.gitdata.load_data(key='erratatool', replace_vars=replace_vars).data tag_pv_map = et_data.get('brew_tag_product_version_mapping') if default_advisory_type is not None: advisory = find_default_advisory(runtime, default_advisory_type) ensure_erratatool_auth( ) # before we waste time looking up builds we can't process # get the builds we want to add unshipped_nvrps = [] brew_session = koji.ClientSession(runtime.group_config.urls.brewhub or constants.BREW_HUB) if builds: green_prefix('Fetching builds...') unshipped_nvrps = _fetch_nvrps_by_nvr_or_id( builds, tag_pv_map, ignore_product_version=remove) elif clean: unshipped_builds = errata.get_brew_builds(advisory) elif from_diff: unshipped_nvrps = _fetch_builds_from_diff(from_diff[0], from_diff[1], tag_pv_map) else: if kind == 'image': unshipped_nvrps = _fetch_builds_by_kind_image( runtime, tag_pv_map, brew_event, brew_session, payload, non_payload) elif kind == 'rpm': unshipped_nvrps = _fetch_builds_by_kind_rpm( runtime, tag_pv_map, brew_event, brew_session) pbar_header('Fetching builds from Errata: ', 'Hold on a moment, fetching buildinfos from Errata Tool...', unshipped_builds if clean else unshipped_nvrps) if not clean and not remove: # if is --clean then batch fetch from Erratum no need to fetch them individually # if is not for --clean fetch individually using nvrp tuples then get specific # elliottlib.brew.Build Objects by get_brew_build() # e.g. : # ('atomic-openshift-descheduler-container', 'v4.3.23', '202005250821', 'RHEL-7-OSE-4.3'). # Build(atomic-openshift-descheduler-container-v4.3.23-202005250821). unshipped_builds = parallel_results_with_progress( unshipped_nvrps, lambda nvrp: elliottlib.errata.get_brew_build( '{}-{}-{}'.format(nvrp[0], nvrp[1], nvrp[2]), nvrp[3], session=requests.Session())) if not (allow_attached or builds): unshipped_builds = _filter_out_inviable_builds( kind, unshipped_builds, elliottlib.errata) _json_dump(as_json, unshipped_builds, kind, tag_pv_map) if not unshipped_builds: green_print('No builds needed to be attached.') return if advisory: if remove: _detach_builds( advisory, [f"{nvrp[0]}-{nvrp[1]}-{nvrp[2]}" for nvrp in unshipped_nvrps]) elif clean: _detach_builds(advisory, [b.nvr for b in unshipped_builds]) else: # attach erratum = _update_to_advisory(unshipped_builds, kind, advisory, remove, clean) if not no_cdn_repos and kind == "image" and not (remove or clean): cdn_repos = et_data.get('cdn_repos') if cdn_repos: # set up CDN repos click.echo( f"Configuring CDN repos {', '.join(cdn_repos)}...") erratum.metadataCdnRepos(enable=cdn_repos) click.echo("Done") else: click.echo('The following {n} builds '.format(n=len(unshipped_builds)), nl=False) if not (remove or clean): click.secho('may be attached', bold=True, nl=False) click.echo(' to an advisory:') else: click.secho('may be removed from', bold=True, nl=False) click.echo(' from an advisory:') for b in sorted(unshipped_builds): click.echo(' ' + b.nvr)
def _fetch_builds_from_diff(from_payload, to_payload, tag_pv_map): green_print('Fetching changed images between payloads...') nvrs = elliottlib.openshiftclient.get_build_list(from_payload, to_payload) return _fetch_nvrps_by_nvr_or_id(nvrs, tag_pv_map)