def __init__(self, meta_type, runtime, data_obj): """ :param: meta_type - a string. Index to the sub-class <'rpm'|'image'>. :param: runtime - a Runtime object. :param: name - a filename to load as metadata """ self.meta_type = meta_type self.runtime = runtime self.data_obj = data_obj self.base_dir = data_obj.base_dir self.config_filename = data_obj.filename self.full_config_path = data_obj.path # Some config filenames have suffixes to avoid name collisions; strip off the suffix to find the real # distgit repo name (which must be combined with the distgit namespace). # e.g. openshift-enterprise-mediawiki.apb.yml # distgit_key=openshift-enterprise-mediawiki.apb # name (repo name)=openshift-enterprise-mediawiki self.distgit_key = data_obj.key self.name = self.distgit_key.split('.')[ 0] # Split off any '.apb' style differentiator (if present) self.runtime.logger.debug("Loading metadata from {}".format( self.full_config_path)) self.raw_config = Model( data_obj.data) # Config straight from ocp-build-data assert (self.raw_config.name is not Missing) self.config = assembly_metadata_config(runtime.get_releases_config(), runtime.assembly, meta_type, self.distgit_key, self.raw_config) self.namespace, self._component_name = Metadata.extract_component_info( meta_type, self.name, self.config) self.mode = self.config.get('mode', CONFIG_MODE_DEFAULT).lower() if self.mode not in CONFIG_MODES: raise ValueError('Invalid mode for {}'.format( self.config_filename)) self.enabled = (self.mode == CONFIG_MODE_DEFAULT) self.qualified_name = "%s/%s" % (self.namespace, self.name) self.qualified_key = "%s/%s" % (self.namespace, self.distgit_key) # Includes information to identify the metadata being used with each log message self.logger = logutil.EntityLoggingAdapter( logger=self.runtime.logger, extra={'entity': self.qualified_key}) self._distgit_repo = None
def from_image_member_deps( self, el_version: int, assembly: str, releases_config: Model, image_meta: ImageMetadata, rpm_map: Dict[str, RPMMetadata]) -> Dict[str, Dict]: """ Returns RPM builds defined in image member dependencies :param el_version: RHEL version :param assembly: Assembly name to query. If None, this method will return true latest builds. :param releases_config: a Model for releases.yaml :param image_meta: An instance of ImageMetadata :param rpm_map: Map of rpm_distgit_key -> RPMMetadata :return: a dict; keys are component names, values are Brew build dicts """ component_builds: Dict[str, Dict] = { } # rpms pinned to the runtime assembly; keys are rpm component names, values are brew build dicts meta_config = assembly_metadata_config(releases_config, assembly, 'image', image_meta.distgit_key, image_meta.config) # honor image member dependencies dep_nvrs = { parse_nvr(dep[f"el{el_version}"])["name"]: dep[f"el{el_version}"] for dep in meta_config.dependencies.rpms if dep[f"el{el_version}"] } # rpms for this rhel version listed in member dependencies; keys are rpm component names, values are nvrs if dep_nvrs: dep_nvr_list = list(dep_nvrs.values()) self._logger.info( "Found %s NVRs defined in image member '%s' dependencies. Fetching build infos from Brew...", len(dep_nvr_list), image_meta.distgit_key) dep_builds = self._get_builds(dep_nvr_list) missing_nvrs = [ nvr for nvr, build in zip(dep_nvr_list, dep_builds) if not build ] if missing_nvrs: raise IOError( f"The following image member dependency NVRs don't exist: {missing_nvrs}" ) # Make sure image member dependencies have no ART managed rpms. art_rpms_in_deps = {dep_build["name"] for dep_build in dep_builds} & { meta.rpm_name for meta in rpm_map.values() } if art_rpms_in_deps: raise ValueError( f"attachableImage member dependencies cannot have ART managed RPMs: {art_rpms_in_deps}" ) for dep_build in dep_builds: component_builds[dep_build["name"]] = dep_build return component_builds
def from_pinned_by_is(self, el_version: int, assembly: str, releases_config: Model, rpm_map: Dict[str, RPMMetadata]) -> Dict[str, Dict]: """ Returns RPM builds pinned by "is" in assembly config :param el_version: RHEL version :param assembly: Assembly name to query. If None, this method will return true latest builds. :param releases_config: a Model for releases.yaml :param rpm_map: Map of rpm_distgit_key -> RPMMetadata :return: a dict; keys are component names, values are Brew build dicts """ pinned_nvrs: Dict[str, str] = { } # rpms pinned to the runtime assembly; keys are rpm component names, values are nvrs component_builds: Dict[str, Dict] = { } # rpms pinned to the runtime assembly; keys are rpm component names, values are brew build dicts # Honor pinned rpm nvrs pinned by "is" for distgit_key, rpm_meta in rpm_map.items(): meta_config = assembly_metadata_config(releases_config, assembly, 'rpm', distgit_key, rpm_meta.config) nvr = meta_config["is"][f"el{el_version}"] if not nvr: continue nvre_obj = parse_nvr(str(nvr)) if nvre_obj["name"] != rpm_meta.rpm_name: raise ValueError( f"RPM {nvr} is pinned to assembly {assembly} for distgit key {distgit_key}, but its package name is not {rpm_meta.rpm_name}." ) pinned_nvrs[nvre_obj["name"]] = nvr if pinned_nvrs: pinned_nvr_list = list(pinned_nvrs.values()) self._logger.info( "Found %s NVRs pinned to the runtime assembly %s. Fetching build infos from Brew...", len(pinned_nvr_list), assembly) pinned_builds = self._get_builds(pinned_nvr_list) missing_nvrs = [ nvr for nvr, build in zip(pinned_nvr_list, pinned_builds) if not build ] if missing_nvrs: raise IOError( f"The following NVRs pinned by 'is' don't exist: {missing_nvrs}" ) for pinned_build in pinned_builds: component_builds[pinned_build["name"]] = pinned_build return component_builds
def _fetch_builds_by_kind_rpm(runtime: Runtime, tag_pv_map: Dict[str, str], brew_session: koji.ClientSession): assembly = runtime.assembly if runtime.assembly_basis_event: LOGGER.warning( f'Constraining rpm search to stream assembly due to assembly basis event {runtime.assembly_basis_event}' ) # If an assembly has a basis event, its latest rpms can only be sourced from # "is:" or the stream assembly. assembly = 'stream' # ensures the runtime assembly doesn't include any image member specific or rhcos specific dependencies image_configs = [ assembly_metadata_config(runtime.get_releases_config(), runtime.assembly, 'image', image.distgit_key, image.config) for _, image in runtime.image_map.items() ] if any(nvr for image_config in image_configs for dep in image_config.dependencies.rpms for _, nvr in dep.items()): raise ElliottFatalError( f"Assembly {runtime.assembly} is not appliable for build sweep because it contains image member specific dependencies for a custom release." ) rhcos_config = assembly_rhcos_config(runtime.get_releases_config(), runtime.assembly) if any(nvr for dep in rhcos_config.dependencies.rpms for _, nvr in dep.items()): raise ElliottFatalError( f"Assembly {runtime.assembly} is not appliable for build sweep because it contains RHCOS specific dependencies for a custom release." ) green_prefix('Generating list of rpms: ') click.echo('Hold on a moment, fetching Brew builds') builder = BuildFinder(brew_session, logger=LOGGER) builds: List[Dict] = [] for tag in tag_pv_map: # keys are rpm component names, values are nvres component_builds: Dict[str, Dict] = builder.from_tag( "rpm", tag, inherit=False, assembly=assembly, event=runtime.brew_event) if runtime.assembly_basis_event: # If an assembly has a basis event, rpms pinned by "is" and group dependencies should take precedence over every build from the tag el_version = isolate_el_version_in_brew_tag(tag) if not el_version: continue # Only honor pinned rpms if this tag is relevant to a RHEL version # Honors pinned NVRs by "is" pinned_by_is = builder.from_pinned_by_is( el_version, runtime.assembly, runtime.get_releases_config(), runtime.rpm_map) _ensure_accepted_tags(pinned_by_is.values(), brew_session, tag_pv_map) # Builds pinned by "is" should take precedence over every build from tag for component, pinned_build in pinned_by_is.items(): if component in component_builds and pinned_build[ "id"] != component_builds[component]["id"]: LOGGER.warning( "Swapping stream nvr %s for pinned nvr %s...", component_builds[component]["nvr"], pinned_build["nvr"]) component_builds.update( pinned_by_is ) # pinned rpms take precedence over those from tags # Honors group dependencies group_deps = builder.from_group_deps( el_version, runtime.group_config, runtime.rpm_map ) # the return value doesn't include any ART managed rpms # Group dependencies should take precedence over anything previously determined except those pinned by "is". for component, dep_build in group_deps.items(): if component in component_builds and dep_build[ "id"] != component_builds[component]["id"]: LOGGER.warning( "Swapping stream nvr %s for group dependency nvr %s...", component_builds[component]["nvr"], dep_build["nvr"]) component_builds.update(group_deps) builds.extend(component_builds.values()) _ensure_accepted_tags(builds, brew_session, tag_pv_map, raise_exception=False) qualified_builds = [b for b in builds if "tag_name" in b] not_attachable_nvrs = [b["nvr"] for b in builds if "tag_name" not in b] if not_attachable_nvrs: yellow_print( f"The following NVRs will not be swept because they don't have allowed tags {list(tag_pv_map.keys())}:" ) for nvr in not_attachable_nvrs: yellow_print(f"\t{nvr}") click.echo("Filtering out shipped builds...") shipped = _find_shipped_builds([b["id"] for b in qualified_builds], brew_session) unshipped = [b for b in qualified_builds if b["id"] not in shipped] click.echo( f'Found {len(shipped)+len(unshipped)} builds, of which {len(unshipped)} are new.' ) nvrps = _gen_nvrp_tuples(unshipped, tag_pv_map) nvrps = sorted(set(nvrps)) # remove duplicates return nvrps
def test_asembly_metadata_config(self): meta_config = Model( dict_to_model={ 'owners': ['*****@*****.**'], 'content': { 'source': { 'git': { 'url': '[email protected]:openshift-priv/kuryr-kubernetes.git', 'branch': { 'target': 'release-4.8', } }, 'specfile': 'openshift-kuryr-kubernetes-rhel8.spec' } }, 'name': 'openshift-kuryr' }) config = assembly_metadata_config(self.releases_config, 'ART_1', 'rpm', 'openshift-kuryr', meta_config) # Ensure no loss self.assertEqual(config.name, 'openshift-kuryr') self.assertEqual(len(config.owners), 1) self.assertEqual(config.owners[0], '*****@*****.**') # Check that things were overridden self.assertEqual(config.content.source.git.url, '[email protected]:jupierce/kuryr-kubernetes.git') self.assertEqual(config.content.source.git.branch.target, '1_hash') config = assembly_metadata_config(self.releases_config, 'ART_5', 'rpm', 'openshift-kuryr', meta_config) # Ensure no loss self.assertEqual(config.name, 'openshift-kuryr') self.assertEqual(len(config.owners), 1) self.assertEqual(config.owners[0], '*****@*****.**') # Check that things were overridden self.assertEqual(config.content.source.git.url, '[email protected]:jupierce/kuryr-kubernetes.git') self.assertEqual(config.content.source.git.branch.target, '2_hash') config = assembly_metadata_config(self.releases_config, 'ART_6', 'rpm', 'openshift-kuryr', meta_config) # Ensure no loss self.assertEqual(config.name, 'openshift-kuryr') self.assertEqual(len(config.owners), 1) self.assertEqual(config.owners[0], '*****@*****.**') # Check that things were overridden. 6 changes branches for all rpms self.assertEqual(config.content.source.git.url, '[email protected]:jupierce/kuryr-kubernetes.git') self.assertEqual(config.content.source.git.branch.target, 'customer_6') config = assembly_metadata_config(self.releases_config, 'ART_8', 'image', 'openshift-kuryr', meta_config) # Ensure no loss self.assertEqual(config.name, 'openshift-kuryr') self.assertEqual(config.content.source.git.url, '[email protected]:jupierce/kuryr-kubernetes.git') self.assertEqual(config.content.source.git.branch.target, '1_hash') # Ensure that 'is' comes from ART_8 and not ART_7 self.assertEqual(config['is'], 'kuryr-nvr2') # Ensure that 'dependencies' were accumulate self.assertEqual(len(config.dependencies.rpms), 2) try: assembly_metadata_config(self.releases_config, 'ART_INFINITE', 'rpm', 'openshift-kuryr', meta_config) self.fail('Expected ValueError on assembly infinite recursion') except ValueError: pass except Exception as e: self.fail( f'Expected ValueError on assembly infinite recursion but got: {type(e)}: {e}' )