def execute(self, init_build_dirs: Optional[bool] = False): """Execute the plugins defined in plugins_def. :param init_build_dirs: bool, whether to initialize build dirs :return: None """ workflow = self.prepare_workflow() if init_build_dirs: workflow.build_dir.init_build_dirs(get_platforms(workflow.data), workflow.source) try: workflow.build_docker_image() except Exception as e: logger.error("task failed: %s", e) raise finally: # For whatever the reason a build fails, always write the workflow # data into the data file. workflow.data.save(self.get_context_dir()) # OSBS2 TBD: OSBS used to log the original Dockerfile after executing the workflow. # It probably doesn't make sense to do that here, but it would be good to log the # Dockerfile somewhere at the end of the build process. logger.info(r"task finished successfully \o/")
def run(self): """Run the plugin.""" metadatas: Dict[str, Dict[str, Any]] = {} wf_data = self.workflow.data enabled_platforms = get_platforms(wf_data) if not enabled_platforms: raise ValueError("No enabled platforms.") for platform in enabled_platforms: koji_metadata, output_files = self._get_build_metadata(platform) self._update_remote_host_metadata(platform, koji_metadata) if not is_scratch_build(self.workflow): for output in output_files: wf_data.koji_upload_files.append({ "local_filename": output.filename, "dest_filename": output.metadata["filename"], }) metadatas[platform] = koji_metadata return metadatas
def set_manifest_list_expectations(self, expected_media_types): if not self.workflow.postbuild_results.get(PLUGIN_GROUP_MANIFESTS_KEY): self.log.debug('Cannot check if only manifest list digest should be returned ' 'because group manifests plugin did not run') return expected_media_types platforms = get_platforms(self.workflow) if not platforms: self.log.debug('Cannot check if only manifest list digest should be returned ' 'because we have no platforms list') return expected_media_types try: platform_to_goarch = get_platform_to_goarch_mapping(self.workflow) except KeyError: self.log.debug('Cannot check if only manifest list digest should be returned ' 'because there are no platform descriptors') return expected_media_types for plat in platforms: if platform_to_goarch[plat] == 'amd64': self.log.debug('amd64 was built, all media types available') return expected_media_types self.log.debug('amd64 was not built, only manifest list digest is available') return [MEDIA_TYPE_DOCKER_V2_MANIFEST_LIST]
def __init__(self, tasker, workflow, poll_interval=DEFAULT_POLL_INTERVAL, poll_timeout=DEFAULT_POLL_TIMEOUT): """ :param tasker: ContainerTasker instance :param workflow: DockerBuildWorkflow instance :param poll_interval: int, seconds between polling for Koji build :param poll_timeout: int, max amount of seconds to wait for Koji build """ super(KojiParentPlugin, self).__init__(tasker, workflow) self.koji_session = get_koji_session(self.workflow) self.poll_interval = poll_interval self.poll_timeout = poll_timeout self._base_image_nvr = None self._base_image_build = None self._parent_builds = {} self._poll_start = None self.platforms = get_platforms(self.workflow) # RegistryClient instances cached by registry name self.registry_clients = {} self._deep_manifest_list_inspection = get_deep_manifest_list_inspection( self.workflow, fallback=True)
def execute(self, init_build_dirs: Optional[bool] = False): """Execute the plugins defined in plugins_def. :param init_build_dirs: bool, whether to initialize build dirs :return: None """ util.validate_with_schema({"plugins_conf": self.plugins_conf}, "schemas/plugins.json") workflow = self.prepare_workflow() if init_build_dirs: workflow.build_dir.init_build_dirs(get_platforms(workflow.data), workflow.source) try: workflow.build_docker_image() except Exception as e: logger.error("task failed: %s", e) raise # OSBS2 TBD: OSBS used to log the original Dockerfile after executing the workflow. # It probably doesn't make sense to do that here, but it would be good to log the # Dockerfile somewhere at the end of the build process. logger.info(r"task finished successfully \o/")
def __init__(self, workflow, poll_interval=5, blocksize=DEFAULT_DOWNLOAD_BLOCK_SIZE, repos=None, koji_target=None): """ :param workflow: DockerBuildWorkflow instance :param poll_interval: int, seconds between polling Koji while waiting for task completion :param blocksize: int, chunk size for downloading files from koji :param repos: list<str>: list of yum repo URLs to be used during base filesystem creation. First value will also be used as install_tree. Only baseurl value is used from each repo file. :param koji_target: str, koji target name """ # call parent constructor super(AddFilesystemPlugin, self).__init__(workflow) self.poll_interval = poll_interval self.blocksize = blocksize self.repos = repos or [] self.architectures = get_platforms(self.workflow.data) self.scratch = util.is_scratch_build(self.workflow) self.koji_target = koji_target self.session = None
def get_manifest_list_only_expectation(self): """ Get expectation for manifest list only :return: bool, expect manifest list only? """ manifest_results = self.workflow.data.postbuild_results.get(PLUGIN_GROUP_MANIFESTS_KEY) if not manifest_results or not is_manifest_list(manifest_results.get("media_type")): self.log.debug('Cannot check if only manifest list digest should be returned ' 'because group manifests plugin did not run') return False platforms = get_platforms(self.workflow.data) if not platforms: self.log.debug('Cannot check if only manifest list digest should be returned ' 'because we have no platforms list') return False platform_to_goarch = self.workflow.conf.platform_to_goarch_mapping for plat in platforms: if platform_to_goarch[plat] == 'amd64': self.log.debug('amd64 was built, all media types available') return False self.log.debug('amd64 was not built, only manifest list digest is available') return True
def read_configs(self): self.odcs_config = get_config(self.workflow).get_odcs_config() if not self.odcs_config: raise SkipResolveComposesPlugin('ODCS config not found') data = self.workflow.source.config.compose if not data and not self.all_compose_ids: raise SkipResolveComposesPlugin( '"compose" config not set and compose_ids not given') pulp_data = util.read_content_sets(self.workflow) or {} platforms = get_platforms(self.workflow) if platforms: platforms = sorted( platforms) # sorted to keep predictable for tests koji_tag = None if self.koji_target: target_info = self.koji_session.getBuildTarget(self.koji_target, strict=True) koji_tag = target_info['build_tag_name'] self.compose_config = ComposeConfig(data, pulp_data, self.odcs_config, koji_tag=koji_tag, arches=platforms)
def get_built_images(self, session: RegistrySession) -> List[BuiltImage]: """Get information about all the per-arch images that were built by the build tasks.""" tag_conf = self.workflow.data.tag_conf client = RegistryClient(session) built_images = [] for platform in get_platforms(self.workflow.data): # At this point, only the unique image has been built and pushed. Primary tags will # be pushed by this plugin, floating tags by the push_floating_tags plugin. image = tag_conf.get_unique_images_with_platform(platform)[0] manifest_digests = client.get_manifest_digests(image, versions=("v2", "oci")) if len(manifest_digests) != 1: raise RuntimeError( f"Expected to find a single manifest digest for {image}, " f"but found multiple: {manifest_digests}") manifest_version, manifest_digest = manifest_digests.popitem() built_images.append( BuiltImage(image, platform, manifest_digest, manifest_version)) return built_images
def set_manifest_list_expectations(self): # Decide whether we expect v2schema2list based on whether # group_manifests grouped any manifests if self.workflow.postbuild_results.get(PLUGIN_GROUP_MANIFESTS_KEY): self.expect_v2schema2list = True platforms = get_platforms(self.workflow) if not platforms: self.log.debug( 'Cannot check if only manifest list digest should be checked ' 'because we have no platforms list') return try: platform_to_goarch = get_platform_to_goarch_mapping( self.workflow) except KeyError: self.log.debug( 'Cannot check if only manifest list digest should be checked ' 'because there are no platform descriptors') return for plat in platforms: if platform_to_goarch[plat] == 'amd64': break else: self.log.debug( 'amd64 was not built, only manifest list digest is available' ) self.expect_v2schema2list_only = True self.expect_v2schema2 = False
def read_configs(self): self.odcs_config = get_config(self.workflow).get_odcs_config() if not self.odcs_config: raise SkipResolveComposesPlugin('ODCS config not found') data = self.workflow.source.config.compose if not data and not self.all_compose_ids: raise SkipResolveComposesPlugin( '"compose" config not set and compose_ids not given') workdir = self.workflow.source.get_build_file_path()[1] file_path = os.path.join(workdir, REPO_CONTENT_SETS_CONFIG) pulp_data = None if os.path.exists(file_path): with open(file_path) as f: pulp_data = yaml.safe_load(f) or {} platforms = get_platforms(self.workflow) if platforms: platforms = sorted( platforms) # sorted to keep predictable for tests self.compose_config = ComposeConfig(data, pulp_data, self.odcs_config, arches=platforms)
def set_manifest_list_expectations(self): # Decide whether we expect v2schema2list based on whether # group_manifests grouped any manifests if self.workflow.postbuild_results.get(PLUGIN_GROUP_MANIFESTS_KEY): self.expect_v2schema2list = True platforms = get_platforms(self.workflow) if not platforms: self.log.debug('Cannot check if only manifest list digest should be checked ' 'because we have no platforms list') return try: platform_to_goarch = get_platform_to_goarch_mapping(self.workflow) except KeyError: self.log.debug('Cannot check if only manifest list digest should be checked ' 'because there are no platform descriptors') return for plat in platforms: if platform_to_goarch[plat] == 'amd64': break else: self.log.debug('amd64 was not built, only manifest list digest is available') self.expect_v2schema2list_only = True self.expect_v2schema2 = False
def read_configs(self): self.odcs_config = get_config(self.workflow).get_odcs_config() if not self.odcs_config: raise SkipResolveComposesPlugin('ODCS config not found') data = self.workflow.source.config.compose if not data and not self.all_compose_ids: raise SkipResolveComposesPlugin( '"compose" config not set and compose_ids not given') workdir = self.workflow.source.get_build_file_path()[1] file_path = os.path.join(workdir, REPO_CONTENT_SETS_CONFIG) pulp_data = None if os.path.exists(file_path): pulp_data = read_yaml_from_file_path( file_path, 'schemas/content_sets.json') or {} platforms = get_platforms(self.workflow) if platforms: platforms = sorted( platforms) # sorted to keep predictable for tests self.compose_config = ComposeConfig(data, pulp_data, self.odcs_config, arches=platforms) if self.compose_config.has_complete_repos(): self.has_complete_repos = True
def get_manifest_list_only_expectation(self): """ Get expectation for manifest list only :return: bool, expect manifest list only? """ if not self.workflow.postbuild_results.get(PLUGIN_GROUP_MANIFESTS_KEY): self.log.debug('Cannot check if only manifest list digest should be returned ' 'because group manifests plugin did not run') return False platforms = get_platforms(self.workflow) if not platforms: self.log.debug('Cannot check if only manifest list digest should be returned ' 'because we have no platforms list') return False try: platform_to_goarch = get_platform_to_goarch_mapping(self.workflow) except KeyError: self.log.debug('Cannot check if only manifest list digest should be returned ' 'because there are no platform descriptors') return False for plat in platforms: if platform_to_goarch[plat] == 'amd64': self.log.debug('amd64 was built, all media types available') return False self.log.debug('amd64 was not built, only manifest list digest is available') return True
def __init__(self, tasker, workflow, from_task_id=None, poll_interval=5, blocksize=DEFAULT_DOWNLOAD_BLOCK_SIZE, repos=None, architectures=None, architecture=None, koji_target=None): """ :param tasker: ContainerTasker instance :param workflow: DockerBuildWorkflow instance :param from_task_id: int, use existing Koji image task ID :param poll_interval: int, seconds between polling Koji while waiting for task completion :param blocksize: int, chunk size for streaming files from koji :param repos: list<str>: list of yum repo URLs to be used during base filesystem creation. First value will also be used as install_tree. Only baseurl value is used from each repo file. :param architectures: list<str>, list of arches to build on (orchestrator) - UNUSED :param architecture: str, arch to build on (worker) :param koji_target: str, koji target name """ # call parent constructor super(AddFilesystemPlugin, self).__init__(tasker, workflow) self.from_task_id = from_task_id self.poll_interval = poll_interval self.blocksize = blocksize self.repos = repos or [] self.architectures = get_platforms(self.workflow) self.architecture = architecture self.scratch = util.is_scratch_build(self.workflow) self.koji_target = koji_target self.session = None
def is_orchestrator(self): """ Check if the plugin is running in orchestrator. :return: bool """ if get_platforms(self.workflow): return True return False
def __init__(self, tasker, workflow, koji_hub=None, koji_proxyuser=None, koji_ssl_certs_dir=None, koji_krb_principal=None, koji_krb_keytab=None, from_task_id=None, poll_interval=5, blocksize=DEFAULT_DOWNLOAD_BLOCK_SIZE, repos=None, architectures=None, architecture=None, koji_target=None): """ :param tasker: DockerTasker instance :param workflow: DockerBuildWorkflow instance :param koji_hub: str, koji hub (xmlrpc) :param koji_proxyuser: str, proxy user :param koji_ssl_certs_dir: str, path to "cert", "ca", and "serverca" :param koji_krb_principal: str, name of Kerberos principal :param koji_krb_keytab: str, Kerberos keytab :param from_task_id: int, use existing Koji image task ID :param poll_interval: int, seconds between polling Koji while waiting for task completion :param blocksize: int, chunk size for streaming files from koji :param repos: list<str>: list of yum repo URLs to be used during base filesystem creation. First value will also be used as install_tree. Only baseurl value is used from each repo file. :param architectures: list<str>, list of arches to build on (orchestrator) - UNUSED :param architecture: str, arch to build on (worker) :param koji_target: str, koji target name """ # call parent constructor super(AddFilesystemPlugin, self).__init__(tasker, workflow) self.koji_fallback = { 'hub_url': koji_hub, 'auth': { 'proxyuser': koji_proxyuser, 'ssl_certs_dir': koji_ssl_certs_dir, 'krb_principal': str(koji_krb_principal), 'krb_keytab_path': str(koji_krb_keytab) } } self.from_task_id = from_task_id self.poll_interval = poll_interval self.blocksize = blocksize self.repos = repos or [] self.architectures = get_platforms(self.workflow) self.is_orchestrator = True if self.architectures else False self.architecture = architecture self.scratch = util.is_scratch_build() self.koji_target = koji_target
def _validate_platforms_in_image(self, image): """Ensure that the image provides all platforms expected for the build.""" expected_platforms = get_platforms(self.workflow) if not expected_platforms: self.log.info('Skipping validation of available platforms ' 'because expected platforms are unknown') return if not image.registry: self.log.info('Cannot validate available platforms for base image ' 'because base image registry is not defined') return try: platform_to_arch = get_platform_to_goarch_mapping(self.workflow) except KeyError: self.log.info('Cannot validate available platforms for base image ' 'because platform descriptors are not defined') return manifest_list = self._get_manifest_list(image) if not manifest_list: if len(expected_platforms) == 1: self.log.warning( 'Skipping validation of available platforms for base image: ' 'this is a single platform build and base image has no manifest ' 'list') return else: raise RuntimeError( 'Unable to fetch manifest list for base image {}'.format( image)) all_manifests = manifest_list.json()['manifests'] manifest_list_arches = set(manifest['platform']['architecture'] for manifest in all_manifests) expected_arches = set(platform_to_arch[platform] for platform in expected_platforms) self.log.info('Manifest list arches: %s, expected arches: %s', manifest_list_arches, expected_arches) missing_arches = expected_arches - manifest_list_arches if missing_arches: arches_str = ', '.join(sorted(missing_arches)) raise RuntimeError( 'Base image {} not available for arches: {}'.format( image, arches_str)) self.log.info( 'Base image is a manifest list for all required platforms')
def get_output(self, buildroot_id: str) -> List[Dict[str, Any]]: # Both binary and source build have log files. outputs: List[Dict[str, Any]] = [] koji_upload_files = self.workflow.data.koji_upload_files osbs_logs = OSBSLogs(self.log, get_platforms(self.workflow.data)) log_files_outputs = osbs_logs.get_log_files( self.workflow.osbs, self.workflow.pipeline_run_name) for output in log_files_outputs: metadata = output.metadata metadata['buildroot_id'] = buildroot_id outputs.append(metadata) koji_upload_files.append({ "local_filename": output.filename, "dest_filename": metadata["filename"], }) return outputs
def run(self) -> Optional[str]: """ Run the plugin. This plugin extracts the operator manifest files from an image, saves them as a zip archive, and returns its path :return: str, path to operator manifests zip file """ if not (has_operator_bundle_manifest(self.workflow) or has_operator_appregistry_manifest(self.workflow)): self.log.info( "Operator manifests label not set in Dockerfile. Skipping") return None platforms = get_platforms(self.workflow.data) image: ImageName = self.workflow.data.tag_conf.get_unique_images_with_platform( platforms[0])[0] tmp_dir = tempfile.mkdtemp( dir=self.workflow.build_dir.any_platform.path) manifests_dir = os.path.join(tmp_dir, MANIFESTS_DIR_NAME) os.mkdir(manifests_dir) self.workflow.imageutil.extract_file_from_image( image, IMG_MANIFESTS_PATH, manifests_dir) if has_operator_bundle_manifest(self.workflow): self._verify_csv(manifests_dir) manifests_zipfile_path = (self.workflow.build_dir.any_platform.path / OPERATOR_MANIFESTS_ARCHIVE) with zipfile.ZipFile(manifests_zipfile_path, 'w') as archive: for root, _, files in os.walk(manifests_dir): for f in files: filedir = os.path.relpath(root, manifests_dir) filepath = os.path.join(filedir, f) archive.write(os.path.join(root, f), filepath, zipfile.ZIP_DEFLATED) manifest_files = archive.namelist() self.log.debug("Archiving operator manifests: %s", manifest_files) shutil.rmtree(tmp_dir) return str(manifests_zipfile_path)
def __init__(self, workflow, build_kwargs=None, worker_build_image=None, config_kwargs=None, find_cluster_retry_delay=FIND_CLUSTER_RETRY_DELAY, failure_retry_delay=FAILURE_RETRY_DELAY, max_cluster_fails=MAX_CLUSTER_FAILS): """ constructor :param workflow: DockerBuildWorkflow instance :param build_kwargs: dict, keyword arguments for starting worker builds :param worker_build_image: str, the builder image to use for worker builds (not used, image is inherited from the orchestrator) :param config_kwargs: dict, keyword arguments to override worker configuration :param find_cluster_retry_delay: the delay in seconds to try again reaching a cluster :param failure_retry_delay: the delay in seconds to try again starting a build :param max_cluster_fails: the maximum number of times a cluster can fail before being ignored """ super(OrchestrateBuildPlugin, self).__init__(workflow) self.platforms = get_platforms(self.workflow.data) self.build_kwargs = build_kwargs or {} self.config_kwargs = config_kwargs or {} self.adjust_build_kwargs() self.adjust_config_kwargs() self.reactor_config = self.workflow.conf self.find_cluster_retry_delay = find_cluster_retry_delay self.failure_retry_delay = failure_retry_delay self.max_cluster_fails = max_cluster_fails self.koji_upload_dir = generate_koji_upload_dir() self.fs_task_id = self.get_fs_task_id() self.release = self.get_release() if worker_build_image: self.log.warning('worker_build_image is deprecated') self.worker_builds = [] self.namespace = get_build_json().get('metadata', {}).get('namespace', None) self.build_image_digests = {} # by platform self._openshift_session = None self.build_image_override = workflow.conf.build_image_override
def read_configs(self): self.odcs_config = get_config(self.workflow).get_odcs_config() if not self.odcs_config: raise SkipResolveComposesPlugin('ODCS config not found') data = self.workflow.source.config.compose if not data and not self.all_compose_ids: raise SkipResolveComposesPlugin('"compose" config not set and compose_ids not given') workdir = self.workflow.source.get_build_file_path()[1] file_path = os.path.join(workdir, REPO_CONTENT_SETS_CONFIG) pulp_data = None if os.path.exists(file_path): with open(file_path) as f: pulp_data = yaml.safe_load(f) or {} platforms = get_platforms(self.workflow) if platforms: platforms = sorted(platforms) # sorted to keep predictable for tests self.compose_config = ComposeConfig(data, pulp_data, self.odcs_config, arches=platforms)
def __init__(self, workflow, koji_target=None, signing_intent=None, compose_ids=tuple(), repourls=None, minimum_time_to_expire=MINIMUM_TIME_TO_EXPIRE): """ :param workflow: DockerBuildWorkflow instance :param koji_target: str, koji target contains build tag to be used when requesting compose from "tag" :param signing_intent: override the signing intent from git repo configuration :param compose_ids: use the given compose_ids instead of requesting a new one :param repourls: list of str, URLs to the repo files :param minimum_time_to_expire: int, used in deciding when to extend compose's time to expire in seconds """ super(ResolveComposesPlugin, self).__init__(workflow) if signing_intent and compose_ids: raise ValueError('signing_intent and compose_ids cannot be used at the same time') self.signing_intent = signing_intent self.compose_ids = compose_ids self.koji_target = koji_target self.minimum_time_to_expire = minimum_time_to_expire self._koji_session = None self._odcs_client = None self.odcs_config = None self.compose_config = None self.composes_info = None self._parent_signing_intent = None self.repourls = repourls or [] self.has_complete_repos = len(self.repourls) > 0 self.plugin_result = self.workflow.data.prebuild_results.get(PLUGIN_KOJI_PARENT_KEY) self.all_compose_ids = list(self.compose_ids) self.new_compose_ids = [] self.parent_compose_ids = [] self.include_koji_repo = False self.yum_repourls = defaultdict(list) self.architectures = get_platforms(self.workflow.data)
def __init__(self, workflow, target=None, inject_proxy=None): """ constructor :param workflow: DockerBuildWorkflow instance :param target: string, koji target to use as a source :param inject_proxy: set proxy server for this repo """ super().__init__(workflow) self.target = target self.repourls = {} self.inject_proxy = inject_proxy self.yum_repos = defaultdict(list) self.allowed_domains = self.workflow.conf.yum_repo_allowed_domains self.include_koji_repo = False self._builder_ca_bundle = None self._ca_bundle_pem = None self.platforms = get_platforms(workflow.data) resolve_comp_result = self.workflow.data.plugins_results.get( PLUGIN_RESOLVE_COMPOSES_KEY) self.include_koji_repo = resolve_comp_result['include_koji_repo'] self.repourls = resolve_comp_result['yum_repourls']
def __init__(self, tasker, workflow, build_kwargs, platforms=None, osbs_client_config=None, worker_build_image=None, config_kwargs=None, find_cluster_retry_delay=FIND_CLUSTER_RETRY_DELAY, failure_retry_delay=FAILURE_RETRY_DELAY, max_cluster_fails=MAX_CLUSTER_FAILS, url=None, verify_ssl=True, use_auth=True, goarch=None): """ constructor :param tasker: ContainerTasker instance :param workflow: DockerBuildWorkflow instance :param build_kwargs: dict, keyword arguments for starting worker builds :param platforms: list<str>, platforms to build (used via utils.get_orchestrator_platforms()) :param osbs_client_config: str, path to directory containing osbs.conf :param worker_build_image: str, the builder image to use for worker builds (not used, image is inherited from the orchestrator) :param config_kwargs: dict, keyword arguments to override worker configuration :param find_cluster_retry_delay: the delay in seconds to try again reaching a cluster :param failure_retry_delay: the delay in seconds to try again starting a build :param max_cluster_fails: the maximum number of times a cluster can fail before being ignored :param goarch: dict, keys are platform, values are go language platform names """ super(OrchestrateBuildPlugin, self).__init__(tasker, workflow) self.platforms = get_platforms(self.workflow) self.build_kwargs = build_kwargs self.osbs_client_config_fallback = osbs_client_config self.config_kwargs = config_kwargs or {} self.adjust_build_kwargs() self.validate_arrangement_version() self.adjust_config_kwargs() self.reactor_config = get_config(self.workflow) self.find_cluster_retry_delay = find_cluster_retry_delay self.failure_retry_delay = failure_retry_delay self.max_cluster_fails = max_cluster_fails self.koji_upload_dir = generate_koji_upload_dir() self.fs_task_id = self.get_fs_task_id() self.release = self.get_release() self.plat_des_fallback = [] for plat, architecture in (goarch or {}).items(): plat_dic = {'platform': plat, 'architecture': architecture} self.plat_des_fallback.append(plat_dic) self.openshift_fallback = { 'url': url, 'insecure': not verify_ssl, 'auth': { 'enable': use_auth } } if worker_build_image: self.log.warning('worker_build_image is deprecated') self.worker_builds = [] self.namespace = get_build_json().get('metadata', {}).get('namespace', None) self.build_image_digests = {} # by platform self._openshift_session = None self.build_image_override = get_build_image_override(workflow, {}) self.platform_descriptors = get_platform_descriptors( self.workflow, self.plat_des_fallback)
def combine_metadata_fragments(self) -> Dict[str, Any]: """Construct the CG metadata and collect the output files for upload later.""" def add_buildroot_id(output: Output, buildroot_id: str) -> Output: output.metadata.update({'buildroot_id': buildroot_id}) return Output(filename=output.filename, metadata=output.metadata) def add_log_type(output: Output) -> Output: output.metadata.update({'type': 'log', 'arch': 'noarch'}) return Output(filename=output.filename, metadata=output.metadata) build = self.get_build() buildroot = self.get_buildroot() buildroot_id = buildroot[0]['id'] # Collect the output files, which will be uploaded later. koji_upload_files = self.workflow.data.koji_upload_files output: List[Dict[str, Any]] # List of metadatas # The corresponding output file, only has one for source build output_file: Optional[Output] output, output_file = self.get_output(buildroot_id) if output_file: koji_upload_files.append({ "local_filename": output_file.filename, "dest_filename": output[0]["filename"], }) # Collect log files osbs_logs = OSBSLogs(self.log, get_platforms(self.workflow.data)) log_files_output = [ add_log_type(add_buildroot_id(md, buildroot_id)) for md in osbs_logs.get_log_files(self.osbs, self.workflow.pipeline_run_name) ] for log_file_output in log_files_output: output.append(log_file_output.metadata) koji_upload_files.append({ "local_filename": log_file_output.filename, "dest_filename": log_file_output.metadata["filename"], }) remote_source_file_outputs, kojifile_components = get_maven_metadata( self.workflow.data) # add maven components alongside RPM components for metadata in output: if metadata['type'] == 'docker-image': metadata['components'] += kojifile_components # add remote sources tarballs and remote sources json files to output for remote_source_output in [ *get_source_tarballs_output(self.workflow), *get_remote_sources_json_output(self.workflow) ]: add_custom_type(remote_source_output, KOJI_BTYPE_REMOTE_SOURCES) remote_source = add_buildroot_id(remote_source_output, buildroot_id) output.append(remote_source.metadata) koji_upload_files.append({ "local_filename": remote_source.filename, "dest_filename": remote_source.metadata["filename"], }) for remote_source_file_output in remote_source_file_outputs: remote_source_file = add_buildroot_id(remote_source_file_output, buildroot_id) output.append(remote_source_file.metadata) koji_upload_files.append({ "local_filename": remote_source_file_output.filename, "dest_filename": remote_source_file_output.metadata["filename"], }) koji_metadata = { 'metadata_version': 0, 'build': build, 'buildroots': buildroot, 'output': output, } return koji_metadata
def __init__(self, tasker, workflow, build_kwargs, platforms=None, osbs_client_config=None, worker_build_image=None, config_kwargs=None, find_cluster_retry_delay=FIND_CLUSTER_RETRY_DELAY, failure_retry_delay=FAILURE_RETRY_DELAY, max_cluster_fails=MAX_CLUSTER_FAILS, url=None, verify_ssl=True, use_auth=True, goarch=None): """ constructor :param tasker: DockerTasker instance :param workflow: DockerBuildWorkflow instance :param build_kwargs: dict, keyword arguments for starting worker builds :param platforms: list<str>, platforms to build (used via utils.get_orchestrator_platforms()) :param osbs_client_config: str, path to directory containing osbs.conf :param worker_build_image: str, the builder image to use for worker builds (not used, image is inherited from the orchestrator) :param config_kwargs: dict, keyword arguments to override worker configuration :param find_cluster_retry_delay: the delay in seconds to try again reaching a cluster :param failure_retry_delay: the delay in seconds to try again starting a build :param max_cluster_fails: the maximum number of times a cluster can fail before being ignored :param goarch: dict, keys are platform, values are go language platform names """ super(OrchestrateBuildPlugin, self).__init__(tasker, workflow) self.platforms = get_platforms(self.workflow) self.build_kwargs = build_kwargs self.osbs_client_config_fallback = osbs_client_config self.config_kwargs = config_kwargs or {} self.adjust_build_kwargs() self.validate_arrangement_version() self.adjust_config_kwargs() self.reactor_config = get_config(self.workflow) self.find_cluster_retry_delay = find_cluster_retry_delay self.failure_retry_delay = failure_retry_delay self.max_cluster_fails = max_cluster_fails self.koji_upload_dir = self.get_koji_upload_dir() self.fs_task_id = self.get_fs_task_id() self.release = self.get_release() self.plat_des_fallback = [] for plat, architecture in (goarch or {}).items(): plat_dic = {'platform': plat, 'architecture': architecture} self.plat_des_fallback.append(plat_dic) self.openshift_fallback = { 'url': url, 'insecure': not verify_ssl, 'auth': {'enable': use_auth} } if worker_build_image: self.log.warning('worker_build_image is deprecated') self.worker_builds = [] self.namespace = get_build_json().get('metadata', {}).get('namespace', None) self.build_image_digests = {} # by platform self._openshift_session = None self.build_image_override = get_build_image_override(workflow, {}) self.platform_descriptors = get_platform_descriptors(self.workflow, self.plat_des_fallback)
def _resolve_compose(self): odcs_config = get_config(self.workflow).get_odcs_config() odcs_client = get_odcs_session(self.workflow, self.odcs_fallback) self.read_configs_general() modules = self.data.get('modules', []) if not modules: raise RuntimeError('"compose" config has no modules, a module is required for Flatpaks') source_spec = modules[0] if len(modules) > 1: self.log.info("compose config contains multiple modules," "using first module %s", source_spec) module = ModuleSpec.from_str(source_spec) self.log.info("Resolving module compose for name=%s, stream=%s, version=%s", module.name, module.stream, module.version) noprofile_spec = module.to_str(include_profile=False) if self.compose_ids: if len(self.compose_ids) > 1: self.log.info("Multiple compose_ids, using first compose %d", self.compose_ids[0]) self.compose_id = self.compose_ids[0] if self.signing_intent_name is not None: signing_intent_name = self.signing_intent_name else: signing_intent_name = self.data.get('signing_intent', odcs_config.default_signing_intent) signing_intent = odcs_config.get_signing_intent_by_name(signing_intent_name) if self.compose_id is None: arches = sorted(get_platforms(self.workflow)) self.compose_id = odcs_client.start_compose(source_type='module', source=noprofile_spec, sigkeys=signing_intent['keys'], arches=arches)['id'] compose_info = odcs_client.wait_for_compose(self.compose_id) if compose_info['state_name'] != "done": raise RuntimeError("Compose cannot be retrieved, state='%s'" % compose_info['state_name']) compose_source = compose_info['source'] self.log.info("Resolved list of modules: %s", compose_source) resolved_modules = self._resolve_modules(compose_source) base_module = resolved_modules[module.name] assert base_module.stream == module.stream if module.version is not None: assert base_module.version == module.version return ComposeInfo(source_spec=source_spec, compose_id=self.compose_id, base_module=base_module, modules=resolved_modules, repo_url=compose_info['result_repo'] + '/$basearch/os/', signing_intent=signing_intent_name, signing_intent_overridden=self.signing_intent_name is not None)
def run(self) -> Dict[str, Union[List, Dict[str, List[str]]]]: is_source_build = PLUGIN_FETCH_SOURCES_KEY in self.workflow.data.prebuild_results if not is_source_build and not is_flatpak_build(self.workflow): self.log.info('not a flatpak or source build, skipping plugin') return {'pushed_images': [], 'repositories': self.get_repositories()} pushed_images = [] wf_data = self.workflow.data tag_conf = wf_data.tag_conf images = [] if is_source_build: source_image = self.source_get_unique_image() plugin_results = wf_data.buildstep_result[PLUGIN_SOURCE_CONTAINER_KEY] image = plugin_results['image_metadata'] tag_conf.add_unique_image(source_image) images.append((image, source_image)) else: for image_platform in get_platforms(self.workflow.data): plugin_results = wf_data.postbuild_results[PLUGIN_FLATPAK_CREATE_OCI] image = plugin_results[image_platform]['metadata'] registry_image = tag_conf.get_unique_images_with_platform(image_platform)[0] images.append((image, registry_image)) insecure = self.registry.get('insecure', False) docker_push_secret = self.registry.get('secret', None) self.log.info("Registry %s secret %s", self.registry['uri'], docker_push_secret) for image, registry_image in images: max_retries = DOCKER_PUSH_MAX_RETRIES for retry in range(max_retries + 1): self.push_with_skopeo(image, registry_image, insecure, docker_push_secret) if is_source_build: manifests_dict = get_all_manifests(registry_image, self.registry['uri'], insecure, docker_push_secret, versions=('v2',)) try: koji_source_manifest_response = manifests_dict['v2'] except KeyError as exc: raise RuntimeError( f'Unable to fetch v2 schema 2 digest for {registry_image.to_str()}' ) from exc wf_data.koji_source_manifest = koji_source_manifest_response.json() digests = get_manifest_digests(registry_image, self.registry['uri'], insecure, docker_push_secret) if not (digests.v2 or digests.oci) and (retry < max_retries): sleep_time = DOCKER_PUSH_BACKOFF_FACTOR * (2 ** retry) self.log.info("Retrying push because V2 schema 2 or " "OCI manifest not found in %is", sleep_time) time.sleep(sleep_time) else: break pushed_images.append(registry_image) self.log.info("All images were tagged and pushed") return {'pushed_images': pushed_images, 'repositories': self.get_repositories()}
def execute(self) -> None: """Build a container image for the platform specified in the task parameters. The built image will be pushed to the unique tag for this platform, which can be found in tag_conf.get_unique_images_with_platform() (where tag_conf comes from context data). """ platform = self._params.platform data = self.load_workflow_data() enabled_platforms = util.get_platforms(data) flatpak = self._params.user_params.get('flatpak', False) squash_all = flatpak # squash all layers for flatpak build if platform not in enabled_platforms: logger.info( r"Platform %s is not enabled for this build (enabled platforms: %s). Exiting.", platform, enabled_platforms, ) return config = self.load_config() build_dir = self.get_build_dir().platform_dir(platform) dest_tag = data.tag_conf.get_unique_images_with_platform(platform)[0] logger.info("Building for the %s platform from %s", platform, build_dir.dockerfile_path) with contextlib.ExitStack() as defer: defer.callback(logger.info, "Dockerfile used for build:\n%s", build_dir.dockerfile_path.read_text()) build_log_file = defer.enter_context( open(self.get_context_dir().get_platform_build_log(platform), 'w+')) remote_resource = self.acquire_remote_resource(config.remote_hosts) defer.callback(remote_resource.unlock) podman_remote = PodmanRemote.setup_for( remote_resource, registries_authfile=get_authfile_path(config.registry)) # log the image+host for auditing purposes logger.info("Building image=%s on host=%s", dest_tag, remote_resource.host.hostname) output_lines = podman_remote.build_container( build_dir=build_dir, build_args=data.buildargs, dest_tag=dest_tag, squash_all=squash_all, ) for line in output_lines: logger.info(line.rstrip()) build_log_file.write(line) logger.info("Build finished successfully! Pushing image to %s", dest_tag) image_size_limit = config.image_size_limit['binary_image'] image_size = podman_remote.get_image_size(dest_tag) if image_size > image_size_limit: raise ExceedsImageSizeError( 'The size {} of image {} exceeds the limitation {} ' 'configured in reactor config.'.format( image_size, dest_tag, image_size_limit)) podman_remote.push_container(dest_tag, insecure=config.registry.get( "insecure", False))