def get_tracker_bugs(bzapi, bugs): """Returns a list of tracking bugs from a list of bug ids. For a definition of these terms see https://docs.engineering.redhat.com/display/PRODSEC/%5BDRAFT%5D+Security+bug+types :param bzapi: An instance of the python-bugzilla Bugzilla class :param bugs: The IDs of the bugs you want to create an Erratum for. These can be security tracking bugs or non-security tracking bugs. This method will determine if they are security tracking bugs or not :returns: A list of tracking bugs :raises: BugzillaFatorError: If bugs contains invalid bug ids, or if some other error occurs trying to use the Bugzilla XMLRPC api. Could be because you are not logged in to Bugzilla or the login session has expired. """ if len(bugs) == 0: return bugs = bzapi.getbugs(bugs) tracker_bugs = [] for t in bugs: if t is None: raise exceptions.BugzillaFatalError( "Couldn't find bug with list of ids provided") if "SecurityTracking" not in t.keywords or "Security" not in t.keywords: util.yellow_print("Non-SecurityTracking bug to be added: %s" % t.id) else: tracker_bugs.append(t) return tracker_bugs
def show(ctx, advisory, yaml, json): """ Show RPMDiff failures for an advisory. """ runtime = ctx.obj # type: Runtime if not advisory: runtime.initialize() advisory = runtime.group_config.advisories.get("rpm", 0) if not advisory: raise ElliottFatalError( "No RPM advisory number configured in ocp-build-data.") else: runtime.initialize(no_group=True) logger = runtime.logger logger.info( "Fetching RPMDiff runs from Errata Tool for advisory {}...".format( advisory)) rpmdiff_runs = list(errata.get_rpmdiff_runs(advisory)) logger.info("Found {} RPMDiff runs.".format(len(rpmdiff_runs))) # "good" means PASSED, INFO, or WAIVED good_runs = [] # "bad" means NEEDS_INSPECTION or FAILED bad_runs = [] incomplete_runs = [] for rpmdiff_run in rpmdiff_runs: attr = rpmdiff_run['attributes'] if attr["status"] in constants.ET_GOOD_EXTERNAL_TEST_STATUSES: good_runs.append(rpmdiff_run) elif attr["status"] in constants.ET_BAD_EXTERNAL_TEST_STATUSES: bad_runs.append(rpmdiff_run) else: incomplete_runs.append(rpmdiff_run) util.green_prefix("good: {}".format(len(good_runs))) click.echo(", ", nl=False) util.red_prefix("bad: {}".format(len(bad_runs))) click.echo(", ", nl=False) util.yellow_print("incomplete: {}".format(len(incomplete_runs))) if not bad_runs: return logger.info( "Fetching detailed information from RPMDiff for bad RPMDiff runs...") rpmdiff_client = RPMDiffClient(constants.RPMDIFF_HUB_URL) rpmdiff_client.authenticate() if yaml or json: _structured_output(bad_runs, rpmdiff_client, yaml) else: _unstructured_output(bad_runs, rpmdiff_client)
def _unstructured_output(bad_runs, rpmdiff_client): for run in bad_runs: attr = run["attributes"] run_id = attr["external_id"] run_url = "{}/run/{}/".format(constants.RPMDIFF_WEB_URL, run_id) test_results = rpmdiff_client.get_test_results(run_id) run_obj = rpmdiff_client.get_run(run_id) print("----------------") msg = "{0} {1}".format(run["relationships"]["brew_build"]["nvr"], attr["status"]) if attr["status"] == "NEEDS_INSPECTION": util.yellow_print(msg) else: util.red_print(msg) for result in test_results: score = result["score"] if score >= 0 and score < 3: # good test result continue result_id = result["result_id"] test = result["test"] details = result["details"] test_id = test["test_id"] package_name = run_obj["package_name"] result_url = run_url + str(test_id) + "/" result_msg = "* TEST {0} {2} {1} {3}".format( result_id, constants.RPMDIFF_SCORE_NAMES[score], test["description"], result_url) if score == 3: # NEEDS_INSPECTION util.yellow_print(result_msg) else: util.red_print(result_msg) # get last waiver message waivers = rpmdiff_client.list_waivers(package_name, test_id, limit=1) if waivers: util.green_print(" Last waiver: @" + waivers[0]["owner"]["username"] + ": " + waivers[0]["description"]) else: util.yellow_print(" No last waiver found.") for detail in details: detail_msg = " * {1} {0}".format( constants.RPMDIFF_SCORE_NAMES[detail["score"]], detail["subpackage"]) if detail["score"] == 3: util.yellow_print(detail_msg) else: util.red_print(detail_msg) content = re.sub('^', ' ', detail["content"], flags=re.MULTILINE) print(content) print()
def get_flaw_bugs(trackers): """Get a list of flaw bugs blocked by a list of tracking bugs. For a definition of these terms see https://docs.engineering.redhat.com/display/PRODSEC/%5BDRAFT%5D+Security+bug+types :param trackers: A list of tracking bugs :return: A list of flaw bug ids """ flaw_ids = [] for t in trackers: # Tracker bugs can block more than one flaw bug, but must be more than 0 if not t.blocks: # This should never happen, log a warning here if it does util.yellow_print( "Warning: found tracker bugs which doesn't block any other bugs") else: flaw_ids.extend(t.blocks) return flaw_ids
def find_bugs_sweep(runtime: Runtime, advisory_id, default_advisory_type, check_builds, major_version, find_bugs_obj, report, output, brew_event, noop, count_advisory_attach_flags, bug_tracker): if output == 'text': statuses = sorted(find_bugs_obj.status) tr = bug_tracker.target_release() green_prefix(f"Searching {bug_tracker.type} for bugs with status {statuses} and target releases: {tr}\n") bugs = find_bugs_obj.search(bug_tracker_obj=bug_tracker, verbose=runtime.debug) sweep_cutoff_timestamp = get_sweep_cutoff_timestamp(runtime, cli_brew_event=brew_event) if sweep_cutoff_timestamp: utc_ts = datetime.utcfromtimestamp(sweep_cutoff_timestamp) green_print(f"Filtering bugs that have changed ({len(bugs)}) to one of the desired statuses before the " f"cutoff time {utc_ts}...") qualified_bugs = [] for chunk_of_bugs in chunk(bugs, constants.BUG_LOOKUP_CHUNK_SIZE): b = bug_tracker.filter_bugs_by_cutoff_event(chunk_of_bugs, find_bugs_obj.status, sweep_cutoff_timestamp) qualified_bugs.extend(b) click.echo(f"{len(qualified_bugs)} of {len(bugs)} bugs are qualified for the cutoff time " f"{utc_ts}...") bugs = qualified_bugs included_bug_ids, excluded_bug_ids = get_assembly_bug_ids(runtime) if included_bug_ids & excluded_bug_ids: raise ValueError("The following bugs are defined in both 'include' and 'exclude': " f"{included_bug_ids & excluded_bug_ids}") if included_bug_ids: yellow_print("The following bugs will be additionally included because they are " f"explicitly defined in the assembly config: {included_bug_ids}") included_bugs = bug_tracker.get_bugs(included_bug_ids) bugs.extend(included_bugs) if excluded_bug_ids: yellow_print("The following bugs will be excluded because they are explicitly " f"defined in the assembly config: {excluded_bug_ids}") bugs = [bug for bug in bugs if bug.id not in excluded_bug_ids] if output == 'text': green_prefix(f"Found {len(bugs)} bugs: ") click.echo(", ".join(sorted(str(b.id) for b in bugs))) if report: print_report(bugs, output) if count_advisory_attach_flags < 1: return # `--add ADVISORY_NUMBER` should respect the user's wish # and attach all available bugs to whatever advisory is specified. if advisory_id and not default_advisory_type: bug_tracker.attach_bugs(advisory_id, [b.id for b in bugs], noop=noop) return rpm_advisory_id = common.find_default_advisory(runtime, 'rpm') if check_builds else None bugs_by_type = categorize_bugs_by_type(bugs, rpm_advisory_id=rpm_advisory_id, major_version=major_version, check_builds=check_builds) advisory_types_to_attach = [default_advisory_type] if default_advisory_type else bugs_by_type.keys() for advisory_type in sorted(advisory_types_to_attach): bugs = bugs_by_type.get(advisory_type) green_prefix(f'{advisory_type} advisory: ') if bugs: adv_id = common.find_default_advisory(runtime, advisory_type) bug_tracker.attach_bugs(adv_id, [b.id for b in bugs], noop=noop) else: click.echo("0 bugs found")
def _fetch_builds_by_kind_rpm(runtime: Runtime, tag_pv_map: Dict[str, str], brew_session: koji.ClientSession): assembly = runtime.assembly if runtime.assembly_basis_event: LOGGER.warning( f'Constraining rpm search to stream assembly due to assembly basis event {runtime.assembly_basis_event}' ) # If an assembly has a basis event, its latest rpms can only be sourced from # "is:" or the stream assembly. assembly = 'stream' # ensures the runtime assembly doesn't include any image member specific or rhcos specific dependencies image_configs = [ assembly_metadata_config(runtime.get_releases_config(), runtime.assembly, 'image', image.distgit_key, image.config) for _, image in runtime.image_map.items() ] if any(nvr for image_config in image_configs for dep in image_config.dependencies.rpms for _, nvr in dep.items()): raise ElliottFatalError( f"Assembly {runtime.assembly} is not appliable for build sweep because it contains image member specific dependencies for a custom release." ) rhcos_config = assembly_rhcos_config(runtime.get_releases_config(), runtime.assembly) if any(nvr for dep in rhcos_config.dependencies.rpms for _, nvr in dep.items()): raise ElliottFatalError( f"Assembly {runtime.assembly} is not appliable for build sweep because it contains RHCOS specific dependencies for a custom release." ) green_prefix('Generating list of rpms: ') click.echo('Hold on a moment, fetching Brew builds') builder = BuildFinder(brew_session, logger=LOGGER) builds: List[Dict] = [] for tag in tag_pv_map: # keys are rpm component names, values are nvres component_builds: Dict[str, Dict] = builder.from_tag( "rpm", tag, inherit=False, assembly=assembly, event=runtime.brew_event) if runtime.assembly_basis_event: # If an assembly has a basis event, rpms pinned by "is" and group dependencies should take precedence over every build from the tag el_version = isolate_el_version_in_brew_tag(tag) if not el_version: continue # Only honor pinned rpms if this tag is relevant to a RHEL version # Honors pinned NVRs by "is" pinned_by_is = builder.from_pinned_by_is( el_version, runtime.assembly, runtime.get_releases_config(), runtime.rpm_map) _ensure_accepted_tags(pinned_by_is.values(), brew_session, tag_pv_map) # Builds pinned by "is" should take precedence over every build from tag for component, pinned_build in pinned_by_is.items(): if component in component_builds and pinned_build[ "id"] != component_builds[component]["id"]: LOGGER.warning( "Swapping stream nvr %s for pinned nvr %s...", component_builds[component]["nvr"], pinned_build["nvr"]) component_builds.update( pinned_by_is ) # pinned rpms take precedence over those from tags # Honors group dependencies group_deps = builder.from_group_deps( el_version, runtime.group_config, runtime.rpm_map ) # the return value doesn't include any ART managed rpms # Group dependencies should take precedence over anything previously determined except those pinned by "is". for component, dep_build in group_deps.items(): if component in component_builds and dep_build[ "id"] != component_builds[component]["id"]: LOGGER.warning( "Swapping stream nvr %s for group dependency nvr %s...", component_builds[component]["nvr"], dep_build["nvr"]) component_builds.update(group_deps) builds.extend(component_builds.values()) _ensure_accepted_tags(builds, brew_session, tag_pv_map, raise_exception=False) qualified_builds = [b for b in builds if "tag_name" in b] not_attachable_nvrs = [b["nvr"] for b in builds if "tag_name" not in b] if not_attachable_nvrs: yellow_print( f"The following NVRs will not be swept because they don't have allowed tags {list(tag_pv_map.keys())}:" ) for nvr in not_attachable_nvrs: yellow_print(f"\t{nvr}") click.echo("Filtering out shipped builds...") shipped = _find_shipped_builds([b["id"] for b in qualified_builds], brew_session) unshipped = [b for b in qualified_builds if b["id"] not in shipped] click.echo( f'Found {len(shipped)+len(unshipped)} builds, of which {len(unshipped)} are new.' ) nvrps = _gen_nvrp_tuples(unshipped, tag_pv_map) nvrps = sorted(set(nvrps)) # remove duplicates return nvrps
def show(ctx, advisory): """ Show RPMDiff failures for an advisory. """ runtime = ctx.obj # type: Runtime if not advisory: runtime.initialize() advisory = runtime.group_config.advisories.get("rpm", 0) if not advisory: raise ElliottFatalError( "No RPM advisory number configured in ocp-build-data.") else: runtime.initialize(no_group=True) logger = runtime.logger logger.info( "Fetching RPMDiff runs from Errata Tool for advisory {}...".format( advisory)) rpmdiff_runs = list(errata.get_rpmdiff_runs(advisory)) logger.info("Found {} RPMDiff runs.".format(len(rpmdiff_runs))) # "good" means PASSED, INFO, or WAIVED good_runs = [] # "bad" means NEEDS_INSPECTION or FAILED bad_runs = [] incomplete_runs = [] for rpmdiff_run in rpmdiff_runs: attr = rpmdiff_run['attributes'] if attr["status"] in constants.ET_GOOD_EXTERNAL_TEST_STATUSES: good_runs.append(rpmdiff_run) elif attr["status"] in constants.ET_BAD_EXTERNAL_TEST_STATUSES: bad_runs.append(rpmdiff_run) else: incomplete_runs.append(rpmdiff_run) util.green_prefix("good: {}".format(len(good_runs))) click.echo(", ", nl=False) util.red_prefix("bad:{}".format(len(bad_runs))) click.echo(", ", nl=False) util.yellow_print("incomplete: {}".format(len(incomplete_runs))) if not bad_runs: return logger.info( "Fetching detailed information from RPMDiff for bad RPMDiff runs...") rpmdiff_client = RPMDiffClient(constants.RPMDIFF_HUB_URL) rpmdiff_client.authenticate() for run in bad_runs: attr = run["attributes"] run_id = attr["external_id"] run_url = "{}/run/{}/".format(constants.RPMDIFF_WEB_URL, run_id) print("----------------") msg = "{0} {1}".format(run["relationships"]["brew_build"]["nvr"], attr["status"]) if attr["status"] == "NEEDS_INSPECTION": util.yellow_print(msg) else: util.red_print(msg) test_results = rpmdiff_client.get_test_results(run_id) run_obj = rpmdiff_client.get_run(run_id) for result in test_results: score = result["score"] if score >= 0 and score < 3: # good test result continue result_id = result["result_id"] test = result["test"] details = result["details"] test_id = test["test_id"] package_name = run_obj["package_name"] result_url = run_url + str(test_id) + "/" result_msg = "* TEST {0} {2} {1} {3}".format( result_id, constants.RPMDIFF_SCORE_NAMES[score], test["description"], result_url) if score == 3: # NEEDS_INSPECTION util.yellow_print(result_msg) else: util.red_print(result_msg) # get last waiver message waivers = rpmdiff_client.list_waivers(package_name, test_id, limit=1) if waivers: util.green_print(" Last waiver: @" + waivers[0]["owner"]["username"] + ": " + waivers[0]["description"]) else: util.yellow_print(" No last waiver found.") for detail in details: detail_msg = " * {1} {0}".format( constants.RPMDIFF_SCORE_NAMES[detail["score"]], detail["subpackage"]) if detail["score"] == 3: util.yellow_print(detail_msg) else: util.red_print(detail_msg) content = re.sub('^', ' ', detail["content"], flags=re.MULTILINE) print(content) print()
async def verify_payload(ctx, payload, advisory): """Cross-check that the builds present in PAYLOAD match the builds attached to ADVISORY. The payload is treated as the source of truth. If something is absent or different in the advisory it is treated as an error with the advisory. \b PAYLOAD - Full pullspec of the payload to verify ADVISORY - Numerical ID of the advisory Two checks are made: \b 1. Missing in Advisory - No payload components are absent from the given advisory 2. Payload Advisory Mismatch - The version-release of each payload item match what is in the advisory Results are summarily printed at the end of the run. They are also written out to summary_results.json. Verify builds in the given payload match the builds attached to advisory 41567 \b $ elliott verify-payload quay.io/openshift-release-dev/ocp-release:4.1.0-rc.6 41567 """ all_advisory_nvrs = elliottlib.errata.get_advisory_nvrs(advisory) click.echo("Found {} builds".format(len(all_advisory_nvrs))) all_payload_nvrs = {} click.echo("Fetching release info") release_export_cmd = 'oc adm release info {} -o json'.format(payload) rc, stdout, stderr = exectools.cmd_gather(release_export_cmd) if rc != 0: # Probably no point in continuing.. can't contact brew? print("Unable to run oc release info: out={} ; err={}".format( stdout, stderr)) exit(1) else: click.echo("Got release info") payload_json = json.loads(stdout) green_prefix("Looping over payload images: ") click.echo("{} images to check".format( len(payload_json['references']['spec']['tags']))) cmds = [['oc', 'image', 'info', '-o', 'json', tag['from']['name']] for tag in payload_json['references']['spec']['tags']] green_prefix("Querying image infos...") cmd_results = await asyncio.gather( *[exectools.cmd_gather_async(cmd) for cmd in cmds]) for image, cmd, cmd_result in zip( payload_json['references']['spec']['tags'], cmds, cmd_results): click.echo("----") image_name = image['name'] rc, stdout, stderr = cmd_result if rc != 0: # Probably no point in continuing.. can't contact brew? red_prefix("Unable to run oc image info: ") red_print(f"cmd={cmd!r}, out={stdout} ; err={stderr}") exit(1) image_info = json.loads(stdout) labels = image_info['config']['config']['Labels'] # The machine-os-content image doesn't follow the standard # pattern. We need to skip that image when we find it, it is # not attached to advisories. if 'com.coreos.ostree-commit' in labels: yellow_prefix("Skipping machine-os-content image: ") click.echo("Not required for checks") continue component = labels['com.redhat.component'] n = image_name click.echo("Payload name: {}".format(n)) click.echo("Brew name: {}".format(component)) if labels: v = labels['version'] r = labels['release'] all_payload_nvrs[component] = "{}-{}".format(v, r) else: print("For image {} Labels doesn't exist, image_info: {}".format( image_name, image_info)) missing_in_errata = {} payload_doesnt_match_errata = {} in_pending_advisory = [] in_shipped_advisory = [] output = { 'missing_in_advisory': missing_in_errata, 'payload_advisory_mismatch': payload_doesnt_match_errata, "in_pending_advisory": in_pending_advisory, "in_shipped_advisory": in_shipped_advisory, } green_prefix("Analyzing data: ") click.echo("{} images to consider from payload".format( len(all_payload_nvrs))) for image, vr in all_payload_nvrs.items(): yellow_prefix("Cross-checking from payload: ") click.echo("{}-{}".format(image, vr)) if image not in all_advisory_nvrs: missing_in_errata[image] = "{}-{}".format(image, vr) click.echo("{} in payload not found in advisory".format( "{}-{}".format(image, vr))) elif image in all_advisory_nvrs and vr != all_advisory_nvrs[image]: click.echo( "{} from payload has version {} which does not match {} from advisory" .format(image, vr, all_advisory_nvrs[image])) payload_doesnt_match_errata[image] = { 'payload': vr, 'errata': all_advisory_nvrs[image] } if missing_in_errata: # check if missing images are already shipped or pending to ship advisory_nvrs: Dict[int, List[str]] = { } # a dict mapping advisory numbers to lists of NVRs green_print( f"Checking if {len(missing_in_errata)} missing images are shipped..." ) for nvr in missing_in_errata.copy().values(): # get the list of advisories that this build has been attached to build = elliottlib.errata.get_brew_build(nvr) # filter out dropped advisories advisories = [ ad for ad in build.all_errata if ad["status"] != "DROPPED_NO_SHIP" ] if not advisories: red_print(f"Build {nvr} is not attached to any advisories.") continue for advisory in advisories: if advisory["status"] == "SHIPPED_LIVE": green_print( f"Missing build {nvr} has been shipped with advisory {advisory}." ) else: yellow_print( f"Missing build {nvr} is in another pending advisory.") advisory_nvrs.setdefault(advisory["id"], []).append(nvr) name = nvr.rsplit("-", 2)[0] del missing_in_errata[name] if advisory_nvrs: click.echo( f"Getting information of {len(advisory_nvrs)} advisories...") for advisory, nvrs in advisory_nvrs.items(): advisory_obj = elliottlib.errata.get_raw_erratum(advisory) adv_type, adv_info = next(iter(advisory_obj["errata"].items())) item = { "id": advisory, "type": adv_type.upper(), "url": elliottlib.constants.errata_url + f"/{advisory}", "summary": adv_info["synopsis"], "state": adv_info["status"], "nvrs": nvrs, } if adv_info["status"] == "SHIPPED_LIVE": in_shipped_advisory.append(item) else: in_pending_advisory.append(item) green_print("Summary results:") click.echo(json.dumps(output, indent=4)) with open('summary_results.json', 'w') as fp: json.dump(output, fp, indent=4) green_prefix("Wrote out summary results: ") click.echo("summary_results.json")
def verify_cvp_cli(runtime: Runtime, all_images, nvrs, optional_checks, all_optional_checks, fix, message): """ Verify CVP test results Example 1: Verify CVP test results for all latest 4.4 image builds, also warn those with failed content_set_check $ elliott --group openshift-4.4 verify-cvp --all --include-optional-check content_set_check Example 2: Apply patches to ocp-build-data to fix the redundant content sets error: $ elliott --group openshift-4.4 verify-cvp --all --include-optional-check content_set_check --fix Note: 1. If `--message` is not given, `--fix` will leave changed ocp-build-data files uncommitted. 2. Make sure your ocp-build-data directory is clean before running `--fix`. """ if bool(all_images) + bool(nvrs) != 1: raise click.BadParameter('You must use one of --all or --build.') if all_optional_checks and optional_checks: raise click.BadParameter( 'Use only one of --all-optional-checks or --include-optional-check.' ) runtime.initialize(mode='images') tag_pv_map = runtime.gitdata.load_data( key='erratatool', replace_vars=runtime.group_config.vars.primitive() if runtime.group_config.vars else {}).data.get('brew_tag_product_version_mapping') brew_session = koji.ClientSession(runtime.group_config.urls.brewhub or constants.BREW_HUB) builds = [] if all_images: runtime.logger.info("Getting latest image builds from Brew...") builds = get_latest_image_builds(brew_session, tag_pv_map.keys(), runtime.image_metas) elif nvrs: runtime.logger.info(f"Finding {len(builds)} builds from Brew...") builds = brew.get_build_objects(nvrs, brew_session) runtime.logger.info(f"Found {len(builds)} image builds.") resultsdb_api = ResultsDBAPI() nvrs = [b["nvr"] for b in builds] runtime.logger.info( f"Getting CVP test results for {len(builds)} image builds...") latest_cvp_results = get_latest_cvp_results(runtime, resultsdb_api, nvrs) # print a summary for all CVP results good_results = [] # good means PASSED or INFO bad_results = [] # bad means NEEDS_INSPECTION or FAILED incomplete_nvrs = [] for nvr, result in zip(nvrs, latest_cvp_results): if not result: incomplete_nvrs.append(nvr) continue outcome = result.get( "outcome" ) # only PASSED, FAILED, INFO, NEEDS_INSPECTION are now valid outcome values (https://resultsdb20.docs.apiary.io/#introduction/changes-since-1.0) if outcome in {"PASSED", "INFO"}: good_results.append(result) elif outcome in {"NEEDS_INSPECTION", "FAILED"}: bad_results.append(result) green_prefix("good: {}".format(len(good_results))) click.echo(", ", nl=False) red_prefix("bad: {}".format(len(bad_results))) click.echo(", ", nl=False) yellow_print("incomplete: {}".format(len(incomplete_nvrs))) if bad_results: red_print("The following builds didn't pass CVP tests:") for r in bad_results: nvr = r["data"]["item"][0] red_print(f"{nvr} {r['outcome']}: {r['ref_url']}") if incomplete_nvrs: yellow_print( "We couldn't find CVP test results for the following builds:") for nvr in incomplete_nvrs: yellow_print(nvr) if not optional_checks and not all_optional_checks: return # no need to print failed optional CVP checks # Find failed optional CVP checks in case some of the tiem *will* become required. optional_checks = set(optional_checks) complete_results = good_results + bad_results runtime.logger.info( f"Getting optional checks for {len(complete_results)} CVP tests...") optional_check_results = get_optional_checks(runtime, complete_results) component_distgit_keys = { } # a dict of brew component names to distgit keys content_set_repo_names = { } # a map of x86_64 content set names to group.yml repo names if fix: # Fixing redundant content sets requires those dicts for image in runtime.image_metas(): component_distgit_keys[ image.get_component_name()] = image.distgit_key for repo_name, repo_info in runtime.group_config.get("repos", {}).items(): content_set_name = repo_info.get( 'content_set', {}).get('x86_64') or repo_info.get( 'content_set', {}).get('default') if content_set_name: content_set_repo_names[content_set_name] = repo_name ocp_build_data_updated = False for cvp_result, checks in zip(complete_results, optional_check_results): # example optional checks: http://external-ci-coldstorage.datahub.redhat.com/cvp/cvp-product-test/hive-container-v4.6.0-202008010302.p0/da01e36c-8c69-4a19-be7d-ba4593a7b085/sanity-tests-optional-results.json bad_checks = [ check for check in checks["checks"] if check["status"] != "PASS" and ( all_optional_checks or check["name"] in optional_checks) ] if not bad_checks: continue nvr = cvp_result["data"]["item"][0] yellow_print("----------") yellow_print( f"Build {nvr} has {len(bad_checks)} problematic CVP optional checks:" ) for check in bad_checks: yellow_print(f"* {check['name']} {check['status']}") if fix and check["name"] == "content_set_check": if "Some content sets are redundant." in check["logs"]: # fix redundant content sets name = nvr.rsplit('-', 2)[0] distgit_keys = component_distgit_keys.get(name) if not distgit_keys: runtime.logger.warning( f"Will not apply the redundant content sets fix to image {name}: We don't know its distgit key." ) continue amd64_content_sets = list( filter(lambda item: item.get("arch") == "amd64", check["logs"][-1]) ) # seems only x86_64 (amd64) content sets are defined in ocp-build-data. if not amd64_content_sets: runtime.logger.warning( f"Will not apply the redundant content sets fix to image {name}: It doesn't have redundant x86_64 (amd64) content sets" ) continue amd64_redundant_cs = amd64_content_sets[0]["redundant_cs"] redundant_repos = [ content_set_repo_names[cs] for cs in amd64_redundant_cs if cs in content_set_repo_names ] if len(redundant_repos) != len(amd64_redundant_cs): runtime.logger.error( f"Not all content sets have a repo entry in group.yml: #content_sets is {len(amd64_redundant_cs)}, #repos is {len(redundant_repos)}" ) runtime.logger.info( f"Applying redundant content sets fix to {distgit_keys}..." ) fix_redundant_content_set(runtime, distgit_keys, redundant_repos) ocp_build_data_updated = True runtime.logger.info( f"Fixed redundant content sets for {distgit_keys}") yellow_print( f"See {cvp_result['ref_url']}sanity-tests-optional-results.json for more details." ) if message and ocp_build_data_updated: runtime.gitdata.commit(message)
async def verify_cvp_cli(runtime: Runtime, all_images, nvrs, optional_checks, all_optional_checks, fix, message): """ Verify CVP test results Example 1: Verify CVP test results for all latest 4.4 image builds, also warn those with failed content_set_check $ elliott --group openshift-4.4 verify-cvp --all --include-optional-check content_set_check Example 2: Apply patches to ocp-build-data to fix the redundant content sets error: $ elliott --group openshift-4.4 verify-cvp --all --include-optional-check content_set_check --fix Note: 1. If `--message` is not given, `--fix` will leave changed ocp-build-data files uncommitted. 2. Make sure your ocp-build-data directory is clean before running `--fix`. """ if bool(all_images) + bool(nvrs) != 1: raise click.BadParameter('You must use one of --all or --build.') if all_optional_checks and optional_checks: raise click.BadParameter('Use only one of --all-optional-checks or --include-optional-check.') runtime.initialize(mode='images') brew_session = koji.ClientSession(runtime.group_config.urls.brewhub or constants.BREW_HUB) builds = [] if all_images: image_metas = runtime.image_metas() builds = await get_latest_image_builds(image_metas) elif nvrs: runtime.logger.info(f"Finding {len(builds)} builds from Brew...") builds = brew.get_build_objects(nvrs, brew_session) runtime.logger.info(f"Found {len(builds)} image builds.") resultsdb_api = ResultsDBAPI() nvrs = [b["nvr"] for b in builds] runtime.logger.info(f"Getting CVP test results for {len(builds)} image builds...") latest_cvp_results = await get_latest_cvp_results(runtime, resultsdb_api, nvrs) # print a summary for all CVP results good_results = [] # good means PASSED or INFO bad_results = [] # bad means NEEDS_INSPECTION or FAILED incomplete_nvrs = [] for nvr, result in zip(nvrs, latest_cvp_results): if not result: incomplete_nvrs.append(nvr) continue outcome = result.get("outcome") # only PASSED, FAILED, INFO, NEEDS_INSPECTION are now valid outcome values (https://resultsdb20.docs.apiary.io/#introduction/changes-since-1.0) if outcome in {"PASSED", "INFO"}: good_results.append(result) elif outcome in {"NEEDS_INSPECTION", "FAILED"}: bad_results.append(result) green_prefix("good: {}".format(len(good_results))) click.echo(", ", nl=False) red_prefix("bad: {}".format(len(bad_results))) click.echo(", ", nl=False) yellow_print("incomplete: {}".format(len(incomplete_nvrs))) if bad_results: red_print("The following builds didn't pass CVP tests:") for r in bad_results: nvr = r["data"]["item"][0] red_print(f"{nvr} {r['outcome']}: {r['ref_url']}") if incomplete_nvrs: yellow_print("We couldn't find CVP test results for the following builds:") for nvr in incomplete_nvrs: yellow_print(nvr) if not optional_checks and not all_optional_checks: return # no need to print failed optional CVP checks # Find failed optional CVP checks in case some of the tiem *will* become required. optional_checks = set(optional_checks) complete_results = good_results + bad_results runtime.logger.info(f"Getting optional checks for {len(complete_results)} CVP tests...") optional_check_results = await get_optional_checks(runtime, complete_results) component_distgit_keys = {} # a dict of brew component names to distgit keys content_set_to_repo_names = {} # a map of content set names to group.yml repo names for image in runtime.image_metas(): component_distgit_keys[image.get_component_name()] = image.distgit_key for repo_name, repo_info in runtime.group_config.get("repos", {}).items(): for arch, cs_name in repo_info.get('content_set', {}).items(): if arch == "optional": continue # not a real arch name content_set_to_repo_names[cs_name] = repo_name nvr_to_builds = {build["nvr"]: build for build in builds} ocp_build_data_updated = False failed_with_not_covered_rpms = set() failed_with_redundant_repos = set() only_failed_in_non_x86_with_not_covered_rpms = set() only_failed_in_non_x86_with_redundant_repos = set() for cvp_result, checks in zip(complete_results, optional_check_results): # example optional checks: http://external-ci-coldstorage.datahub.redhat.com/cvp/cvp-product-test/hive-container-v4.6.0-202008010302.p0/da01e36c-8c69-4a19-be7d-ba4593a7b085/sanity-tests-optional-results.json bad_checks = [check for check in checks["checks"] if check["status"] != "PASS" and (all_optional_checks or check["name"] in optional_checks)] if not bad_checks: continue nvr = cvp_result["data"]["item"][0] build = nvr_to_builds[nvr] yellow_print("----------") yellow_print(f"Build {nvr} (https://brewweb.engineering.redhat.com/brew/buildinfo?buildID={nvr_to_builds[nvr]['id']}) has {len(bad_checks)} problematic CVP optional checks:") for check in bad_checks: yellow_print(f"* {check['name']} {check['status']}") try: amd64_result = list(filter(lambda item: item.get("arch") == "amd64", check["logs"][-1])) except AttributeError: red_print("CVP result malformed.") if len(amd64_result) != 1: red_print("WHAT?! This build doesn't include an amd64 image? This shouldn't happen. Check Brew and CVP logs with the CVP team!") continue amd64_result = amd64_result[0] image_component_name = nvr.rsplit('-', 2)[0] distgit_key = component_distgit_keys.get(image_component_name) amd64_redundant_cs = amd64_result.get("redundant_cs", []) amd64_redundant_repos = {content_set_to_repo_names[cs] for cs in amd64_redundant_cs} def _strip_arch_suffix(rpm): # rh-nodejs10-3.2-3.el7.x86_64 -> rh-nodejs10-3.2-3.el7 rpm_split = rpm.rsplit(".", 1) return rpm_split[0] amd64_not_covered_rpms = {_strip_arch_suffix(rpm) for rpm in amd64_result.get("not_covered_rpms", [])} if check["name"] == "content_set_check": details = check["logs"][-1] # example: http://external-ci-coldstorage.datahub.redhat.com/cvp/cvp-product-test/logging-fluentd-container-v4.6.0-202008261251.p0/dd9f2024-5440-4f33-b508-472ccf258439/sanity-tests-optional-results.json if not details: red_print("content_set_check failed without any explanation. Report to CVP team!") continue if len(details) > 1: # if this build is multi-arch, check if all per-arch results are consistent for result in details: if result["arch"] == "amd64": continue redundant_repos = {content_set_to_repo_names[cs] for cs in result.get("redundant_cs", [])} if redundant_repos != amd64_redundant_repos: only_failed_in_non_x86_with_redundant_repos.add(nvr) red_print(f"""content_set_check for {nvr} arch {result["arch"]} has different redundant_cs result from the one for amd64: {result["arch"]} has redundant_cs {result.get("redundant_cs")}, but amd64 has redundant_cs {amd64_redundant_cs}. Not sure what happened. Please see Brew and CVP logs and/or check with the CVP team.""") not_covered_rpms = {_strip_arch_suffix(rpm) for rpm in result.get("not_covered_rpms", [])} if not_covered_rpms != amd64_not_covered_rpms: only_failed_in_non_x86_with_not_covered_rpms.add(nvr) red_print(f"""content_set_check for {nvr} arch {result["arch"]} has different not_covered_rpms result from the one for amd64: {result["arch"]} has extra not_covered_rpms {not_covered_rpms - amd64_not_covered_rpms}, and missing not_covered_rpms {amd64_not_covered_rpms - not_covered_rpms}. Not sure what happened. Check Brew and CVP logs with the CVP team!""") if amd64_not_covered_rpms: # This build has not_covered_rpms failed_with_not_covered_rpms.add(nvr) yellow_print(f"Image {distgit_key} has not_covered_rpms: {amd64_not_covered_rpms}") brew_repos = await find_repos_for_rpms(amd64_not_covered_rpms, build) yellow_print(f"Those repos shown in Brew logs might be a good hint: {brew_repos}") runtime.logger.info("Looking for parent image's content_sets...") parent = get_parent_build_ids([build])[0] if parent: parent_build = brew.get_build_objects([parent])[0] parent_cs = await get_content_sets_for_build(parent_build) parent_enabled_repos = {content_set_to_repo_names[cs] for cs in parent_cs.get("x86_64", [])} enabled_repos = set(runtime.image_map[distgit_key].config.get("enabled_repos", [])) missing_repos = parent_enabled_repos - enabled_repos yellow_print(f"""The following repos are defined in parent {parent_build["nvr"]} {component_distgit_keys.get(parent_build["name"], "?")}.yml but not in {component_distgit_keys[build["name"]]}.yml: {missing_repos}""") if fix and missing_repos: runtime.logger.info("Trying to merge parent image's content_sets...") fix_missing_content_set(runtime, distgit_key, missing_repos) ocp_build_data_updated = True runtime.logger.info(f"{distgit_key}.yml patched") if amd64_redundant_repos: # This build has redundant_cs failed_with_redundant_repos.add(nvr) yellow_print(f"Image {distgit_key} has redundant repos: {amd64_redundant_repos}") if not fix: yellow_print(f"Please add the following repos to non_shipping_repos in {distgit_key}.yml: {amd64_redundant_repos}") else: runtime.logger.info(f"Applying redundant content sets fix to {distgit_key}.yml...") fix_redundant_content_set(runtime, distgit_key, amd64_redundant_repos) ocp_build_data_updated = True runtime.logger.info(f"{distgit_key}.yml patched") print(f"See {cvp_result['ref_url']}sanity-tests-optional-results.json for more details.") if failed_with_not_covered_rpms or failed_with_redundant_repos: yellow_print(f"{len(failed_with_not_covered_rpms | failed_with_redundant_repos)} images failed content_sets.\n Where") if failed_with_not_covered_rpms: yellow_print(f"\t{len(failed_with_not_covered_rpms)} images failed content_sets check because of not_covered_rpms:") for rpm in failed_with_not_covered_rpms: line = f"\t\t{rpm}" if rpm in only_failed_in_non_x86_with_not_covered_rpms: line += " - non-x86 arches are different from x86 one" yellow_print(line) if failed_with_redundant_repos: yellow_print(f"\t{len(failed_with_redundant_repos)} images failed content_sets check because of redundant_repos:") for rpm in failed_with_redundant_repos: line = f"\t\t{rpm}" if rpm in only_failed_in_non_x86_with_redundant_repos: line += " - non-x86 arches are different from x86 one" yellow_print(line) if message and ocp_build_data_updated: runtime.gitdata.commit(message)
def create(ctx, advisories, out_dir, out_layout, components, force): """ Create tarball sources for advisories. To create tarball sources for Brew component (package) logging-fluentd-container that was shipped on advisories 45606, 45527, and 46049: $ elliott tarball-sources create --component logging-fluentd-container --out-dir=out/ 45606 45527 46049 """ if not force and os.path.isdir(out_dir) and os.listdir(out_dir): util.red_print("Output directory {} is not empty.\n\ Use --force to add new tarball sources to an existing directory.".format( os.path.abspath(out_dir))) exit(1) mkdirs(out_dir) working_dir = os.path.join(ctx.obj.working_dir, "tarball-sources") LOGGER.debug("Use working directory {}.".format( os.path.abspath(working_dir))) mkdirs(working_dir) # `nvr_dirs` is a dict with brew build NVRs as keys, values are # a set of directories for the generated tarballs, # since a build can be attached to multiple advisories. # For example: # nvr_dirs = { # "logging-fluentd-container-v3.11.141-2": { # "RHOSE/RHEL-7-OSE-3.11/45606/release/" # }, # "logging-fluentd-container-v4.1.14-201908291507": { # "RHOSE/RHEL-7-OSE-4.1/45527/release/" # }, # "logging-fluentd-container-v4.1.15-201909041605": { # "RHOSE/RHEL-7-OSE-4.1/46049/release/" # } # } nvr_dirs = {} # type: Dict[str, Set[str]] # Getting build NVRs for specified Koji/Brew components from advisories # NOTE This is SLOW. However doing this in parallel doesn't work # due to a race condition existing in the implementation of `errata_tool.Erratum`'s parant class ErrataConnector. for advisory in advisories: click.echo("Finding builds from advisory {}...".format(advisory)) builds = tarball_sources.find_builds_from_advisory( advisory, components) if not builds: util.yellow_print( "No matched builds found from advisory {}. Wrong advisory number?" .format(advisory)) continue util.green_print("Found {} matched build(s) from advisory {}".format( len(builds), advisory)) for nvr, product, product_version in builds: util.green_print("\t{}\t{}\t{}".format(nvr, product, product_version)) for nvr, product, product_version in builds: if nvr not in nvr_dirs: nvr_dirs[nvr] = set() if out_layout == "flat": nvr_dirs[nvr].add(out_dir) else: nvr_dirs[nvr].add( os.path.join(out_dir, product_version, str(advisory), "release")) if not nvr_dirs: util.red_print( "Exiting because no matched builds from all specified advisories.") exit(1) # Check build infos from Koji/Brew # in order to figure out the source Git repo and commit hash for each build. click.echo("Fetching build infos for {} from Koji/Brew...".format( ", ".join(nvr_dirs.keys()))) brew_session = koji.ClientSession(constants.BREW_HUB) brew_builds = brew.get_build_objects(nvr_dirs.keys(), brew_session) # Ready to generate tarballs tarball_sources_list = [] for build_info in brew_builds: nvr = build_info["nvr"] tarball_filename = nvr + ".tar.gz" click.echo("Generating tarball source {} for {}...".format( tarball_filename, nvr)) with tempfile.NamedTemporaryFile(suffix="-" + tarball_filename, dir=working_dir) as temp_tarball: temp_tarball_path = temp_tarball.name LOGGER.debug( "Temporary tarball file is {}".format(temp_tarball_path)) tarball_sources.generate_tarball_source( temp_tarball, nvr + "/", os.path.join(working_dir, "repos", build_info["name"]), build_info["source"]) for dest_dir in nvr_dirs[nvr]: mkdirs(dest_dir) tarball_abspath = os.path.abspath( os.path.join(dest_dir, tarball_filename)) if os.path.exists(tarball_abspath): util.yellow_print( "File {} will be overwritten.".format(tarball_abspath)) LOGGER.debug("Copying {} to {}...".format( temp_tarball_path, tarball_abspath)) shutil.copyfile( temp_tarball_path, tarball_abspath) # `shutil.copyfile` uses default umask tarball_sources_list.append(tarball_abspath) util.green_print( "Created tarball source {}.".format(tarball_abspath)) print_success_message(tarball_sources_list, out_dir)
def find_bugs_cli(runtime: Runtime, advisory, default_advisory_type, mode, check_builds, status, exclude_status, id, cve_trackers, from_diff, flag, report, into_default_advisories, brew_event, noop): """Find Red Hat Bugzilla bugs or add them to ADVISORY. Bugs can be "swept" into the advisory either automatically (--mode sweep), or by manually specifying one or more bugs using --mode list with the --id option. Use cases are described below: Note: Using --id without --add is basically pointless SWEEP: For this use-case the --group option MUST be provided. The --group automatically determines the correct target-releases to search for bugs claimed to be fixed, but not yet attached to advisories. --check-builds flag forces bug validation with attached builds to rpm advisory. It assumes builds have been attached and only attaches bugs with matching builds. default --status: ['MODIFIED', 'ON_QA', 'VERIFIED'] LIST: The --group option is not required if you are specifying advisory manually. Provide one or more --id's for manual bug addition. In LIST mode you must provide a list of IDs to perform operation on with the --id option. Supported operations: report with --report, attach with --attach and --into-default-advisories DIFF: For this use case, you must provide the --between option using two URLs to payloads. QE: Find MODIFIED bugs for the target-releases, and set them to ON_QA. The --group option MUST be provided. Cannot be used in combination with --add, --use-default-advisory, --into-default-advisories, --exclude-status. BLOCKER: List active blocker+ bugs for the target-releases. The --group option MUST be provided. Cannot be used in combination with --add, --use-default-advisory, --into-default-advisories. default --status: ['NEW', 'ASSIGNED', 'POST', 'MODIFIED', 'ON_DEV', 'ON_QA'] Use --exclude_status to filter out from default status list. By default --cve-trackers is True. Using --use-default-advisory without a value set for the matching key in the build-data will cause an error and elliott will exit in a non-zero state. Use of this option silently overrides providing an advisory with the --add option. Automatically add bugs with target-release matching 3.7.Z or 3.7.0 to advisory 123456: \b $ elliott --group openshift-3.7 find-bugs --mode sweep --add 123456 List bugs that WOULD be added to an advisory and have set the bro_ok flag on them (NOOP): \b $ elliott --group openshift-3.7 find-bugs --mode sweep --flag bro_ok Attach bugs to their correct default advisories, e.g. operator-related bugs go to "extras" instead of the default "image": \b $ elliott --group=openshift-4.4 find-bugs --mode=sweep --into-default-advisories Add two bugs to advisory 123456. Note that --group is not required because we're not auto searching: \b $ elliott find-bugs --mode list --id 8675309 --id 7001337 --add 123456 Add given list of bugs to the appropriate advisories. This would apply sweep logic to the given bugs grouping them to be attached to rpm/extras/image advisories \b $ elliott -g openshift-4.8 find-bugs --mode list --id 8675309,7001337 --into-default-advisories Automatically find bugs for openshift-4.1 and attach them to the rpm advisory defined in ocp-build-data: \b $ elliott --group=openshift-4.1 --mode sweep --use-default-advisory rpm Find bugs for 4.6 that are in MODIFIED state, and set them to ON_QA: \b $ elliott --group=openshift-4.6 --mode qe \b $ elliott --group=openshift-4.6 --mode blocker --report """ count_advisory_attach_flags = sum(map(bool, [advisory, default_advisory_type, into_default_advisories])) if mode != 'list' and len(id) > 0: raise click.BadParameter("Combining the automatic and manual bug attachment options is not supported") if mode == 'list' and len(id) == 0: raise click.BadParameter("When using mode=list, you must provide a list of bug IDs") if mode == 'diff' and not len(from_diff) == 2: raise click.BadParameter("If using mode=diff, you must provide two payloads to compare") if count_advisory_attach_flags > 1: raise click.BadParameter("Use only one of --use-default-advisory, --add, or --into-default-advisories") if mode in ['qe', 'blocker'] and count_advisory_attach_flags > 0: raise click.BadParameter("Mode does not operate on an advisory. Do not specify any of " "`--use-default-advisory`, `--add`, or `--into-default-advisories`") runtime.initialize(mode="both") bz_data = runtime.gitdata.load_data(key='bugzilla').data bzapi = bzutil.get_bzapi(bz_data) # filter out bugs ART does not manage m = re.match(r"rhaos-(\d+).(\d+)", runtime.branch) # extract OpenShift version from the branch name. there should be a better way... if not m: raise ElliottFatalError(f"Unable to determine OpenShift version from branch name {runtime.branch}.") major_version = int(m[1]) minor_version = int(m[2]) if default_advisory_type is not None: advisory = find_default_advisory(runtime, default_advisory_type) if mode in ['sweep', 'qe', 'blocker']: if not cve_trackers: if mode == 'blocker': cve_trackers = True else: cve_trackers = False if not status: # use default status filter according to mode if mode == 'sweep': status = ['MODIFIED', 'ON_QA', 'VERIFIED'] if mode == 'qe': status = ['MODIFIED'] if mode == 'blocker': status = ['NEW', 'ASSIGNED', 'POST', 'MODIFIED', 'ON_DEV', 'ON_QA'] if mode != 'qe' and exclude_status: status = set(status) - set(exclude_status) green_prefix(f"Searching for bugs with status {' '.join(status)} and target release(s):") click.echo(" {tr}".format(tr=", ".join(bz_data['target_release']))) search_flag = 'blocker+' if mode == 'blocker' else None bugs = bzutil.search_for_bugs(bz_data, status, flag=search_flag, filter_out_security_bugs=not(cve_trackers), verbose=runtime.debug) sweep_cutoff_timestamp = 0 if brew_event: green_print(f"Using command line specified cutoff event {runtime.assembly_basis_event}...") sweep_cutoff_timestamp = runtime.build_retrying_koji_client().getEvent(brew_event)["ts"] elif runtime.assembly_basis_event: green_print(f"Determining approximate cutoff timestamp from basis event {runtime.assembly_basis_event}...") brew_api = runtime.build_retrying_koji_client() sweep_cutoff_timestamp = bzutil.approximate_cutoff_timestamp(runtime.assembly_basis_event, brew_api, runtime.rpm_metas() + runtime.image_metas()) if sweep_cutoff_timestamp: green_print(f"Filtering bugs that have changed to one of the desired statuses before the cutoff time {datetime.utcfromtimestamp(sweep_cutoff_timestamp)}...") qualified_bugs = bzutil.filter_bugs_by_cutoff_event(bzapi, bugs, status, sweep_cutoff_timestamp) click.echo(f"{len(qualified_bugs)} of {len(bugs)} bugs are qualified for the cutoff time {datetime.utcfromtimestamp(sweep_cutoff_timestamp)}...") bugs = qualified_bugs # Loads included/excluded bugs from assembly config issues_config = assembly_issues_config(runtime.get_releases_config(), runtime.assembly) # JIRA issues are not supported yet. Only loads issues with integer IDs. included_bug_ids: Set[int] = {int(issue["id"]) for issue in issues_config.include if isinstance(issue["id"], int) or issue["id"].isdigit()} excluded_bug_ids: Set[int] = {int(issue["id"]) for issue in issues_config.exclude if isinstance(issue["id"], int) or issue["id"].isdigit()} if included_bug_ids & excluded_bug_ids: raise ValueError(f"The following bugs are defined in both 'include' and 'exclude': {included_bug_ids & excluded_bug_ids}") if included_bug_ids: yellow_print(f"The following bugs will be additionally included because they are explicitly defined in the assembly config: {included_bug_ids}") included_bugs = bzapi.getbugs(included_bug_ids) bugs.extend(included_bugs) if excluded_bug_ids: yellow_print(f"The following bugs will be excluded because they are explicitly defined in the assembly config: {excluded_bug_ids}") bugs = [bug for bug in bugs if bug.id not in excluded_bug_ids] elif mode == 'list': bugs = [bzapi.getbug(i) for i in cli_opts.id_convert(id)] if not into_default_advisories: mode_list(advisory=advisory, bugs=bugs, flags=flag, report=report, noop=noop) return elif mode == 'diff': click.echo(runtime.working_dir) bug_id_strings = openshiftclient.get_bug_list(runtime.working_dir, from_diff[0], from_diff[1]) bugs = [bzapi.getbug(i) for i in bug_id_strings] filtered_bugs = filter_bugs(bugs, major_version, minor_version, runtime) green_prefix(f"Found {len(filtered_bugs)} bugs ({len(bugs) - len(filtered_bugs)} ignored): ") bugs = filtered_bugs click.echo(", ".join(sorted(str(b.bug_id) for b in bugs))) if mode == 'qe': for bug in bugs: bzutil.set_state(bug, 'ON_QA', noop=noop, comment_for_release=f"{major_version}.{minor_version}") if len(flag) > 0: add_flags(bugs=bugs, flags=flag, noop=noop) if report: print_report(bugs) if advisory and not default_advisory_type: # `--add ADVISORY_NUMBER` should respect the user's wish and attach all available bugs to whatever advisory is specified. errata.add_bugs_with_retry(advisory, bugs, noop=noop) return # If --use-default-advisory or --into-default-advisories is given, we need to determine which bugs should be swept into which advisory. # Otherwise we don't need to sweep bugs at all. if not (into_default_advisories or default_advisory_type): return # key is impetus ("rpm", "image", "extras"), value is a set of bug IDs. impetus_bugs = { "rpm": set(), "image": set(), "extras": set() } # @lmeyer: simple and stupid would still be keeping the logic in python, # possibly with config flags for branched logic. # until that logic becomes too ugly to keep in python, i suppose.. if major_version < 4: # for 3.x, all bugs should go to the rpm advisory impetus_bugs["rpm"] = set(bugs) else: # for 4.x # sweep rpm cve trackers into "rpm" advisory rpm_bugs = dict() if mode == 'sweep' and cve_trackers: rpm_bugs = bzutil.get_valid_rpm_cves(bugs) green_prefix("RPM CVEs found: ") click.echo(sorted(b.id for b in rpm_bugs)) if rpm_bugs: # if --check-builds flag is set # only attach bugs that have corresponding brew builds attached to rpm advisory if check_builds: click.echo("Validating bugs with builds attached to the rpm advisory") attached_builds = errata.get_advisory_nvrs(runtime.group_config.advisories["rpm"]) packages = attached_builds.keys() not_found = [] for bug, package_name in rpm_bugs.items(): if package_name not in packages: not_found.append((bug.id, package_name)) else: click.echo(f"Build found for #{bug.id}, {package_name}") impetus_bugs["rpm"].add(bug) if not_found: red_prefix("RPM CVE Warning: ") click.echo("The following CVE (bug, package) were found but not attached, because no corresponding brew builds were found attached to the rpm advisory. First attach builds and then rerun to attach the bugs") click.echo(not_found) else: click.echo("Skipping attaching RPM CVEs. Use --check-builds flag to validate with builds.") impetus_bugs["extras"] = extras_bugs(bugs) # all other bugs should go into "image" advisory impetus_bugs["image"] = set(bugs) - impetus_bugs["extras"] - rpm_bugs.keys() if default_advisory_type and impetus_bugs.get(default_advisory_type): errata.add_bugs_with_retry(advisory, impetus_bugs[default_advisory_type], noop=noop) elif into_default_advisories: for impetus, bugs in impetus_bugs.items(): if bugs: green_prefix(f'{impetus} advisory: ') errata.add_bugs_with_retry(runtime.group_config.advisories[impetus], bugs, noop=noop)
def tag_builds_cli(runtime: Runtime, advisories: Tuple[int], default_advisory_type: str, product_version: str, builds: Tuple[str], tag: str, dont_untag: bool, dry_run: bool): """ Tag builds into Brew tag and optionally untag unspecified builds. Example 1: Tag RHEL7 RPMs that on ocp-build-data recorded advisory into rhaos-4.3-rhel-7-image-build $ elliott --group=openshift-4.3 tag-builds --use-default-advisory rpm --product-version RHEL-7-OSE-4.3 --tag rhaos-4.3-rhel-7-image-build Example 2: Tag RHEL8 RPMs that are on advisory 55016 into rhaos-4.3-rhel-8-image-build $ elliott --group=openshift-4.3 tag-builds --advisory 55016 --product-version OSE-4.4-RHEL-8 --tag rhaos-4.3-rhel-8-image-build Example 3: Tag specified builds into rhaos-4.3-rhel-8-image-build $ elliott --group=openshift-4.3 tag-builds --build buildah-1.11.6-6.rhaos4.3.el8 --build openshift-4.3.23-202005230952.g1.b596217.el8 --tag rhaos-4.3-rhel-8-image-build """ if advisories and builds: raise click.BadParameter('Use only one of --build or --advisory/-a.') if advisories and default_advisory_type: raise click.BadParameter( 'Use only one of --use-default-advisory or --advisory/-a.') if default_advisory_type and builds: raise click.BadParameter( 'Use only one of --build or --use-default-advisory.') if product_version and not advisories and not default_advisory_type: raise click.BadParameter( '--product-version should only be used with --use-default-advisory or --advisory/-a.' ) runtime.initialize() logger = runtime.logger if default_advisory_type: advisories = (find_default_advisory(runtime, default_advisory_type), ) all_builds = set() # All Brew builds that should be in the tag if advisories: errata_session = requests.session() for advisory in advisories: logger.info( f"Fetching attached Brew builds from advisory {advisory}...") errata_builds = errata.get_builds(advisory, errata_session) product_versions = list(errata_builds.keys()) logger.debug( f"Advisory {advisory} has builds for {len(product_versions)} product versions: {product_versions}" ) if product_version: # Only this product version should be concerned product_versions = [product_version] for pv in product_versions: logger.debug(f"Extract Errata builds for product version {pv}") nvrs = _extract_nvrs_from_errata_build_list(errata_builds, pv) logger.info( f"Found {len(nvrs)} builds from advisory {advisory} with product version {pv}" ) logger.debug( f"The following builds are found for product version {pv}:\n\t{list(nvrs)}" ) all_builds |= set(nvrs) brew_session = koji.ClientSession(runtime.group_config.urls.brewhub or constants.BREW_HUB) if builds: # NVRs are directly specified with --build build_objs = brew.get_build_objects(list(builds), brew_session) all_builds = {build["nvr"] for build in build_objs} click.echo( f"The following {len(all_builds)} build(s) should be in tag {tag}:") for nvr in all_builds: green_print(f"\t{nvr}") # get NVRs that have been tagged tagged_build_objs = brew_session.listTagged(tag, latest=False, inherit=False) tagged_builds = {build["nvr"] for build in tagged_build_objs} # get NVRs that should be tagged missing_builds = all_builds - tagged_builds click.echo(f"{len(missing_builds)} build(s) need to be tagged into {tag}:") for nvr in missing_builds: green_print(f"\t{nvr}") # get NVRs that should be untagged extra_builds = tagged_builds - all_builds click.echo(f"{len(extra_builds)} build(s) need to be untagged from {tag}:") for nvr in extra_builds: green_print(f"\t{nvr}") if dry_run: yellow_print("Dry run: Do nothing.") return brew_session.gssapi_login() if not dont_untag: # untag extra builds extra_builds = list(extra_builds) logger.info(f"Untagging {len(extra_builds)} build(s) from {tag}...") multicall_tasks = brew.untag_builds(tag, extra_builds, brew_session) failed_to_untag = [] for index, task in enumerate(multicall_tasks): try: task.result click.echo(f"{nvr} has been successfully untagged from {tag}") except Exception as ex: nvr = extra_builds[index] failed_to_untag.append(nvr) logger.error(f"Failed to untag {nvr}: {ex}") # tag missing builds missing_builds = list(missing_builds) task_id_nvr_map = {} logger.info(f"Tagging {len(missing_builds)} build(s) into {tag}...") multicall_tasks = brew.tag_builds(tag, missing_builds, brew_session) failed_to_tag = [] for index, task in enumerate(multicall_tasks): nvr = missing_builds[index] try: task_id = task.result task_id_nvr_map[task_id] = nvr except Exception as ex: failed_to_tag.append(nvr) logger.error(f"Failed to tag {nvr}: {ex}") if task_id_nvr_map: # wait for tag task to finish logger.info("Waiting for tag tasks to finish") brew.wait_tasks(task_id_nvr_map.keys(), brew_session, logger=logger) # get tagging results stopped_tasks = list(task_id_nvr_map.keys()) with brew_session.multicall(strict=False) as m: multicall_tasks = [] for task_id in stopped_tasks: multicall_tasks.append( m.getTaskResult(task_id, raise_fault=False)) for index, t in enumerate(multicall_tasks): task_id = stopped_tasks[index] nvr = task_id_nvr_map[task_id] tag_res = t.result logger.debug( f"Tagging task {task_id} {nvr} returned result {tag_res}") click.echo(f"{nvr} has been successfully tagged into {tag}") if tag_res and 'faultCode' in tag_res: if "already tagged" not in tag_res["faultString"]: failed_to_tag.append(nvr) logger.error( f'Failed to tag {nvr} into {tag}: {tag_res["faultString"]}' ) if failed_to_untag: red_print("The following builds were failed to untag:") for nvr in failed_to_untag: red_print(f"\t{nvr}") elif not dont_untag: green_print( f"All unspecified builds have been successfully untagged from {tag}." ) if failed_to_tag: red_print("The following builds were failed to tag:") for nvr in failed_to_tag: red_print(f"\t{nvr}") else: green_print(f"All builds have been successfully tagged into {tag}.") if failed_to_untag or failed_to_tag: raise exceptions.ElliottFatalError( "Not all builds were successfully tagged/untagged.")
def create_cli(ctx, runtime, errata_type, kind, impetus, date, assigned_to, manager, package_owner, with_placeholder, yes, bugs): """Create a new advisory. The kind of advisory must be specified with '--kind'. Valid choices are 'rpm' and 'image'. You MUST specify a group (ex: "openshift-3.9") manually using the --group option. See examples below. You must set a Release Date by providing a YYYY-Mon-DD formatted string to the --date option. The default behavior for this command is to show what the generated advisory would look like. The raw JSON used to create the advisory will be printed to the screen instead of posted to the Errata Tool API. The impetus option only effects the metadata added to the new advisory and its synopsis. The --assigned-to, --manager and --package-owner options are required. They are the email addresses of the parties responsible for managing and approving the advisory. Adding a list of bug ids with one or more --bugs arguments attaches those bugs to the advisory on creation. When creating a security advisory, the list of bugs will also be checked for any CVE flaw bugs which they are blocking, and those will be added as well. Any CVE flaw bugs being added will also calculate the Impact for the release if it's type is RHSA. Provide the '--yes' or '-y' option to confirm creation of the advisory. PREVIEW an RPM Advisory 21 days from now (the default release date) for OSE 3.9: $ elliott --group openshift-3.9 create CREATE Image Advisory for the 3.5 series on the first Monday in March: \b $ elliott --group openshift-3.5 create --yes -k image --date 2018-Mar-05 """ # perform sanity checks and provide default values if errata_type == 'RHSA': if not bugs: raise ElliottFatalError( "When creating an RHSA, you must provide a list of bug id(s) using one or more `--bug` options.") if not impetus: impetus = 'cve' elif impetus != 'cve': raise ElliottFatalError("Invalid impetus") elif not impetus: impetus = 'standard' runtime.initialize() et_data = runtime.gitdata.load_data(key='erratatool').data bz_data = runtime.gitdata.load_data(key='bugzilla').data impact = None # User entered a valid value for --date, set the release date release_date = datetime.datetime.strptime(date, YMD) ###################################################################### flaw_cve_map = {} impact = None unique_bugs = set(bugs) if bugs: bzapi = elliottlib.bzutil.get_bzapi(bz_data) LOGGER.info("Fetching bugs {} from Bugzilla...".format( " ".join(map(str, bugs)))) bug_objects = bzapi.getbugs(bugs) # assert bugs are viable for a new advisory. _assert_bugs_are_viable(errata_type, bugs, bug_objects) if errata_type == 'RHSA': LOGGER.info("Fetching flaw bugs for trackers {}...".format(" ".join(map(str, bugs)))) tracker_flaws_map = elliottlib.bzutil.get_tracker_flaws_map(bzapi, bug_objects) impact = elliottlib.bzutil.get_highest_impact(bug_objects, tracker_flaws_map) flaw_bugs = [flaw for tracker, flaws in tracker_flaws_map.items() for flaw in flaws] flaw_cve_map = elliottlib.bzutil.get_flaw_aliases(flaw_bugs) unique_bugs |= set(flaw_cve_map.keys()) ###################################################################### try: erratum = elliottlib.errata.new_erratum( et_data, errata_type=errata_type, kind=kind, boilerplate_name=(impetus if impetus != "standard" else kind), release_date=release_date.strftime(YMD), assigned_to=assigned_to, manager=manager, package_owner=package_owner, impact=impact, cves=' '.join((alias) for alias in flaw_cve_map.values()) ) except elliottlib.exceptions.ErrataToolUnauthorizedException: exit_unauthorized() except elliottlib.exceptions.ErrataToolError as ex: raise ElliottFatalError(getattr(ex, 'message', repr(ex))) erratum.addBugs(unique_bugs) if yes: erratum.commit() green_prefix("Created new advisory: ") click.echo(str(erratum)) if errata_type == 'RHSA': yellow_print("Remember to manually set the Security Reviewer in the Errata Tool Web UI") # This is a little strange, I grant you that. For reference you # may wish to review the click docs # # http://click.pocoo.org/5/advanced/#invoking-other-commands # # You may be thinking, "But, add_metadata doesn't take keyword # arguments!" and that would be correct. However, we're not # calling that function directly. We actually use the context # 'invoke' method to call the _command_ (remember, it's wrapped # with click to create a 'command'). 'invoke' ensures the correct # options/arguments are mapped to the right parameters. ctx.invoke(add_metadata_cli, kind=kind, impetus=impetus, advisory=erratum.errata_id) click.echo(str(erratum)) if with_placeholder: click.echo("Creating and attaching placeholder bug...") ctx.invoke(create_placeholder_cli, kind=kind, advisory=erratum.errata_id) else: green_prefix("Would have created advisory: ") click.echo("") click.echo(erratum)