def run(self): if self.workflow.build_result.skip_layer_squash: return # enable build plugins to prevent unnecessary squashes if self.save_archive: output_path = os.path.join(self.workflow.source.workdir, EXPORTED_SQUASHED_IMAGE_NAME) metadata = {"path": output_path} else: output_path = None # Squash the image and output tarfile # If the parameter dont_load is set to True squashed image won't be # loaded in to Docker daemon. If it's set to False it will be loaded. new_id = Squash(log=self.log, image=self.image, from_layer=self.from_layer, tag=self.tag, output_path=output_path, load_image=not self.dont_load).run() if ':' not in new_id: # Older versions of the daemon do not include the prefix new_id = 'sha256:{}'.format(new_id) if not self.dont_load: self.workflow.builder.image_id = new_id if self.save_archive: metadata.update(get_exported_image_metadata(output_path, IMAGE_TYPE_DOCKER_ARCHIVE)) self.workflow.exported_image_sequence.append(metadata) defer_removal(self.workflow, self.image)
def run(self): pushed_images = [] if not self.workflow.tag_conf.unique_images: self.workflow.tag_conf.add_unique_image(self.workflow.image) for registry, registry_conf in self.registries.items(): insecure = registry_conf.get('insecure', False) push_conf_registry = \ self.workflow.push_conf.add_docker_registry(registry, insecure=insecure) docker_push_secret = registry_conf.get('secret', None) self.log.info("Registry %s secret %s", registry, docker_push_secret) for image in self.workflow.tag_conf.images: if image.registry: raise RuntimeError("Image name must not contain registry: %r" % image.registry) registry_image = image.copy() registry_image.registry = registry logs = self.tasker.tag_and_push_image(self.workflow.builder.image_id, registry_image, insecure=insecure, force=True, dockercfg=docker_push_secret) pushed_images.append(registry_image) defer_removal(self.workflow, registry_image) digest = self.extract_digest(logs, image.tag or 'latest') if digest: tag = registry_image.to_str(registry=False) push_conf_registry.digests[tag] = digest self.log.info("All images were tagged and pushed") return pushed_images
def run(self): metadata = { "path": os.path.join(self.workflow.source.workdir, EXPORTED_SQUASHED_IMAGE_NAME) } if self.dont_load: # squash the image, don't load it back to docker Squash(log=self.log, image=self.image, from_layer=self.from_layer, tag=self.tag, output_path=metadata["path"], load_image=False).run() else: # squash the image and output both tarfile and Docker engine image new_id = Squash(log=self.log, image=self.image, from_layer=self.from_layer, tag=self.tag, output_path=metadata["path"], load_image=True).run() self.workflow.builder.image_id = new_id metadata.update(get_exported_image_metadata(metadata["path"])) self.workflow.exported_image_sequence.append(metadata) defer_removal(self.workflow, self.image)
def run(self): base_image = self.workflow.builder.base_image if base_image.namespace != 'koji' or base_image.repo != 'image-build': self.log.info('Base image not supported: %s', base_image) return image_build_conf = base_image.tag if not image_build_conf or image_build_conf == 'latest': image_build_conf = 'image-build.conf' self.session = create_koji_session(self.koji_hub, self.koji_auth_info) task_id, filesystem_regex = self.build_filesystem(image_build_conf) task = TaskWatcher(self.session, task_id, self.poll_interval) task.wait() if task.failed(): raise RuntimeError( 'Create filesystem task failed: {}'.format(task_id)) filesystem = self.download_filesystem(task_id, filesystem_regex) new_base_image = self.import_base_image(filesystem) self.workflow.builder.set_base_image(new_base_image) defer_removal(self.workflow, new_base_image) return { 'base-image-id': new_base_image, 'filesystem-koji-task-id': task_id, }
def run(self): base_image = self.workflow.builder.base_image if base_image.namespace != 'koji' or base_image.repo != 'image-build': self.log.info('Base image not supported: %s', base_image) return image_build_conf = base_image.tag if not image_build_conf or image_build_conf == 'latest': image_build_conf = 'image-build.conf' self.session = create_koji_session(self.koji_hub, self.koji_auth_info) task_id, filesystem_regex = self.build_filesystem(image_build_conf) task = TaskWatcher(self.session, task_id, self.poll_interval) task.wait() if task.failed(): try: # Koji may re-raise the error that caused task to fail task_result = self.session.getTaskResult(task_id) except Exception as exc: task_result = repr(exc) raise RuntimeError('image task, {}, failed: {}'.format( task_id, task_result)) filesystem = self.download_filesystem(task_id, filesystem_regex) new_base_image = self.import_base_image(filesystem) self.workflow.builder.set_base_image(new_base_image) defer_removal(self.workflow, new_base_image) return { 'base-image-id': new_base_image, 'filesystem-koji-task-id': task_id, }
def run(self): if self.workflow.build_result.skip_layer_squash: return # enable build plugins to prevent unnecessary squashes if self.save_archive: output_path = os.path.join(self.workflow.source.workdir, EXPORTED_SQUASHED_IMAGE_NAME) metadata = {"path": output_path} else: output_path = None # Squash the image and output tarfile # If the parameter dont_load is set to True squashed image won't be # loaded in to Docker daemon. If it's set to False it will be loaded. new_id = Squash(log=self.log, image=self.image, from_layer=self.from_layer, tag=self.tag, output_path=output_path, load_image=not self.dont_load).run() if ':' not in new_id: # Older versions of the daemon do not include the prefix new_id = 'sha256:{}'.format(new_id) if not self.dont_load: self.workflow.builder.image_id = new_id if self.save_archive: metadata.update( get_exported_image_metadata(output_path, IMAGE_TYPE_DOCKER_ARCHIVE)) self.workflow.exported_image_sequence.append(metadata) defer_removal(self.workflow, self.image)
def stream_filesystem(self, task_id, filesystem_regex): filesystem = self.download_filesystem(task_id, filesystem_regex) new_base_image = self.import_base_image(filesystem) self.workflow.builder.set_base_image(new_base_image) defer_removal(self.workflow, new_base_image) return new_base_image
def run(self): source = get_flatpak_source_info(self.workflow) if source is None: raise RuntimeError( "flatpak_create_dockerfile must be run before flatpak_create_oci" ) self.builder = FlatpakBuilder(source, self.workflow.source.workdir, 'var/tmp/flatpak-build', parse_manifest=parse_rpm_output, flatpak_metadata=self.flatpak_metadata) df_labels = df_parser(self.workflow.builder.df_path, workflow=self.workflow).labels self.builder.add_labels(df_labels) tarred_filesystem, manifest = self._export_filesystem() self.log.info('filesystem tarfile written to %s', tarred_filesystem) self.log.info('manifest written to %s', manifest) image_components = self.builder.get_components(manifest) self.workflow.image_components = image_components ref_name, outfile, tarred_outfile = self.builder.build_container( tarred_filesystem) self.log.info('Marking filesystem image "%s" for removal', self.workflow.builder.image_id) defer_removal(self.workflow, self.workflow.builder.image_id) image_id = self._get_oci_image_id(outfile) self.log.info('New OCI image ID is %s', image_id) self.workflow.builder.image_id = image_id labels = Labels(df_labels) _, image_name = labels.get_name_and_value(Labels.LABEL_TYPE_NAME) _, image_version = labels.get_name_and_value(Labels.LABEL_TYPE_VERSION) _, image_release = labels.get_name_and_value(Labels.LABEL_TYPE_RELEASE) name = '{}-{}'.format(self.key, image_name) tag = '{}-{}'.format(image_version, image_release) # The OCI id is tracked by the builder. The image will be removed in the exit phase # No need to mark it for removal after pushing to the local storage self._copy_oci_to_local_storage(outfile, name, tag) metadata = get_exported_image_metadata(outfile, IMAGE_TYPE_OCI) metadata['ref_name'] = ref_name self.workflow.exported_image_sequence.append(metadata) self.log.info('OCI image is available as %s', outfile) metadata = get_exported_image_metadata(tarred_outfile, IMAGE_TYPE_OCI_TAR) metadata['ref_name'] = ref_name self.workflow.exported_image_sequence.append(metadata) self.log.info('OCI tarfile is available as %s', tarred_outfile)
def run(self): pushed_images = [] if not self.workflow.tag_conf.unique_images: self.workflow.tag_conf.add_unique_image(self.workflow.image) config_manifest_digest = None config_manifest_type = None config_registry_image = None for registry, registry_conf in self.registries.items(): insecure = registry_conf.get('insecure', False) push_conf_registry = \ self.workflow.push_conf.add_docker_registry(registry, insecure=insecure) docker_push_secret = registry_conf.get('secret', None) self.log.info("Registry %s secret %s", registry, docker_push_secret) for image in self.workflow.tag_conf.images: if image.registry: raise RuntimeError("Image name must not contain registry: %r" % image.registry) registry_image = image.copy() registry_image.registry = registry if self.need_skopeo_push(): self.push_with_skopeo(registry_image, insecure, docker_push_secret) else: self.tasker.tag_and_push_image(self.workflow.builder.image_id, registry_image, insecure=insecure, force=True, dockercfg=docker_push_secret) defer_removal(self.workflow, registry_image) pushed_images.append(registry_image) digests = get_manifest_digests(registry_image, registry, insecure, docker_push_secret) tag = registry_image.to_str(registry=False) push_conf_registry.digests[tag] = digests if not config_manifest_digest and (digests.v2 or digests.oci): if digests.v2: config_manifest_digest = digests.v2 config_manifest_type = 'v2' else: config_manifest_digest = digests.oci config_manifest_type = 'oci' config_registry_image = registry_image if config_manifest_digest: push_conf_registry.config = get_config_from_registry( config_registry_image, registry, config_manifest_digest, insecure, docker_push_secret, config_manifest_type) else: self.log.info("V2 schema 2 or OCI manifest is not available to get config from") self.log.info("All images were tagged and pushed") return pushed_images
def run(self): pushed_images = [] if not self.workflow.tag_conf.unique_images: self.workflow.tag_conf.add_unique_image(self.workflow.image) first_v2_digest = None first_registry_image = None for registry, registry_conf in self.registries.items(): insecure = registry_conf.get('insecure', False) push_conf_registry = \ self.workflow.push_conf.add_docker_registry(registry, insecure=insecure) docker_push_secret = registry_conf.get('secret', None) self.log.info("Registry %s secret %s", registry, docker_push_secret) for image in self.workflow.tag_conf.images: if image.registry: raise RuntimeError( "Image name must not contain registry: %r" % image.registry) registry_image = image.copy() registry_image.registry = registry logs = self.tasker.tag_and_push_image( self.workflow.builder.image_id, registry_image, insecure=insecure, force=True, dockercfg=docker_push_secret) pushed_images.append(registry_image) defer_removal(self.workflow, registry_image) digests = get_manifest_digests(registry_image, registry, insecure, docker_push_secret) tag = registry_image.to_str(registry=False) push_conf_registry.digests[tag] = digests if not first_v2_digest and digests.v2: first_v2_digest = digests.v2 first_registry_image = registry_image if first_v2_digest: push_conf_registry.config = get_config_from_registry( first_registry_image, registry, first_v2_digest, insecure, docker_push_secret, 'v2') else: self.log.info("V2 schema 2 digest is not available") self.log.info("All images were tagged and pushed") return pushed_images
def stream_filesystem(self, task_id, filesystem_regex): filesystem = self.download_filesystem(task_id, filesystem_regex) new_parent_image = self.import_base_image(filesystem) new_imagename = ImageName.parse(new_parent_image) for parent in self.workflow.builder.dockerfile_images: if base_image_is_custom(parent.to_str()): self.workflow.builder.dockerfile_images[parent] = new_imagename break defer_removal(self.workflow, new_parent_image) return new_parent_image
def run(self): pushed_images = [] if not self.workflow.tag_conf.unique_images: self.workflow.tag_conf.add_unique_image(self.workflow.image) first_v2_digest = None first_registry_image = None for registry, registry_conf in self.registries.items(): insecure = registry_conf.get('insecure', False) push_conf_registry = \ self.workflow.push_conf.add_docker_registry(registry, insecure=insecure) docker_push_secret = registry_conf.get('secret', None) self.log.info("Registry %s secret %s", registry, docker_push_secret) for image in self.workflow.tag_conf.images: if image.registry: raise RuntimeError("Image name must not contain registry: %r" % image.registry) registry_image = image.copy() registry_image.registry = registry logs = self.tasker.tag_and_push_image(self.workflow.builder.image_id, registry_image, insecure=insecure, force=True, dockercfg=docker_push_secret) pushed_images.append(registry_image) defer_removal(self.workflow, registry_image) digests = get_manifest_digests(registry_image, registry, insecure, docker_push_secret) tag = registry_image.to_str(registry=False) push_conf_registry.digests[tag] = digests if not first_v2_digest and digests.v2: first_v2_digest = digests.v2 first_registry_image = registry_image if first_v2_digest: push_conf_registry.config = get_config_from_registry( first_registry_image, registry, first_v2_digest, insecure, docker_push_secret, 'v2') else: self.log.info("V2 schema 2 digest is not available") self.log.info("All images were tagged and pushed") return pushed_images
def run(self): # TODO: Temporary backwards compatibility with pub image_id = imgutils.get_id( self.workflow.exported_image_sequence[-1].get('path')) self.workflow.builder.image_id = image_id return image_id # End of workaround start = time() # Work out the name of the image to pull assert self.workflow.tag_conf.unique_images # must be set image = self.workflow.tag_conf.unique_images[0] assert self.workflow.push_conf.pulp_registries # must be configured registry = self.workflow.push_conf.pulp_registries[0] pullspec = image.copy() pullspec.registry = registry.uri # the image on Crane while True: # Pull the image from Crane name = self.tasker.pull_image(pullspec) # Inspect it try: metadata = self.tasker.inspect_image(name) except NotFound: if time() - start > self.timeout: raise CraneTimeoutError("{} seconds exceeded" .format(self.timeout)) self.log.info("will try again in %ss", self.retry_delay) sleep(self.retry_delay) continue defer_removal(self.workflow, name) break # Adjust our idea of the image ID image_id = metadata['Id'] self.log.debug("image ID changed from %s to %s", self.workflow.builder.image_id, image_id) self.workflow.builder.image_id = image_id return image_id
def stream_filesystem(self, task_id, filesystem_regex): filesystem = self.download_filesystem(task_id, filesystem_regex) new_parent_image = self.import_base_image(filesystem) new_imagename = ImageName.parse(new_parent_image) if self.workflow.builder.custom_base_image: self.workflow.builder.set_base_image(new_imagename) for parent in self.workflow.builder.parent_images.keys(): if base_image_is_custom(parent.to_str()): self.workflow.builder.parent_images[parent] = new_imagename break defer_removal(self.workflow, new_parent_image) return new_parent_image
def run(self): metadata = {"path": os.path.join(self.workflow.source.workdir, EXPORTED_SQUASHED_IMAGE_NAME)} if self.dont_load: # squash the image, don't load it back to docker Squash(log=self.log, image=self.image, from_layer=self.from_layer, tag=self.tag, output_path=metadata["path"], load_image=False).run() else: # squash the image and output both tarfile and Docker engine image new_id = Squash(log=self.log, image=self.image, from_layer=self.from_layer, tag=self.tag, output_path=metadata["path"], load_image=True).run() self.workflow.builder.image_id = new_id metadata.update(get_exported_image_metadata(metadata["path"])) self.workflow.exported_image_sequence.append(metadata) defer_removal(self.workflow, self.image)
def run(self): # TODO: Temporary backwards compatibility with pub image_id = imgutils.get_id( self.workflow.exported_image_sequence[-1].get('path')) self.workflow.builder.image_id = image_id return image_id # End of workaround start = time() # Work out the name of the image to pull assert self.workflow.tag_conf.unique_images # must be set image = self.workflow.tag_conf.unique_images[0] assert self.workflow.push_conf.pulp_registries # must be configured registry = self.workflow.push_conf.pulp_registries[0] pullspec = image.copy() pullspec.registry = registry.uri # the image on Crane while True: # Pull the image from Crane name = self.tasker.pull_image(pullspec) # Inspect it try: metadata = self.tasker.inspect_image(name) except NotFound: if time() - start > self.timeout: raise CraneTimeoutError("{} seconds exceeded".format( self.timeout)) self.log.info("will try again in %ss", self.retry_delay) sleep(self.retry_delay) continue defer_removal(self.workflow, name) break # Adjust our idea of the image ID image_id = metadata['Id'] self.log.debug("image ID changed from %s to %s", self.workflow.builder.image_id, image_id) self.workflow.builder.image_id = image_id return image_id
def run(self): start = time() # Work out the name of the image to pull assert self.workflow.tag_conf.unique_images # must be set image = self.workflow.tag_conf.unique_images[0] assert self.workflow.push_conf.pulp_registries # must be configured registry = self.workflow.push_conf.pulp_registries[0] pullspec = image.copy() pullspec.registry = registry.uri # the image on Crane while True: # Pull the image from Crane name = self.tasker.pull_image(pullspec, insecure=self.insecure) # Inspect it try: metadata = self.tasker.inspect_image(name) except NotFound: if time() - start > self.timeout: raise CraneTimeoutError("{} seconds exceeded" .format(self.timeout)) self.log.info("will try again in %ss", self.retry_delay) sleep(self.retry_delay) continue defer_removal(self.workflow, name) break # Adjust our idea of the image ID image_id = metadata['Id'] self.log.debug("image ID changed from %s to %s", self.workflow.builder.image_id, image_id) self.workflow.builder.image_id = image_id return image_id
def test_remove_built_image_plugin(self, remove_base, deferred, expected): tasker, workflow = mock_environment() runner = PostBuildPluginsRunner( tasker, workflow, [{ 'name': GarbageCollectionPlugin.key, 'args': {'remove_pulled_base_image': remove_base}, }] ) removed_images = [] def spy_remove_image(image_id, force=None): removed_images.append(image_id) flexmock(tasker, remove_image=spy_remove_image) for image in deferred: defer_removal(workflow, image) output = runner.run() image_set = set(removed_images) assert len(image_set) == len(removed_images) assert image_set == expected
def test_remove_built_image_plugin(self, remove_base, deferred, expected): tasker, workflow = mock_environment() runner = PostBuildPluginsRunner( tasker, workflow, [{ 'name': GarbageCollectionPlugin.key, 'args': { 'remove_pulled_base_image': remove_base }, }]) removed_images = [] def spy_remove_image(image_id, force=None): removed_images.append(image_id) flexmock(tasker, remove_image=spy_remove_image) for image in deferred: defer_removal(workflow, image) runner.run() image_set = set(removed_images) assert len(image_set) == len(removed_images) assert image_set == expected
def run(self): pushed_images = [] if not self.workflow.tag_conf.unique_images: self.workflow.tag_conf.add_unique_image(self.workflow.image) config_manifest_digest = None config_manifest_type = None config_registry_image = None for registry, registry_conf in self.registries.items(): insecure = registry_conf.get('insecure', False) push_conf_registry = \ self.workflow.push_conf.add_docker_registry(registry, insecure=insecure) docker_push_secret = registry_conf.get('secret', None) self.log.info("Registry %s secret %s", registry, docker_push_secret) for image in self.workflow.tag_conf.images: if image.registry: raise RuntimeError( "Image name must not contain registry: %r" % image.registry) registry_image = image.copy() registry_image.registry = registry if self.need_skopeo_push(): self.push_with_skopeo(registry_image, insecure, docker_push_secret) else: self.tasker.tag_and_push_image( self.workflow.builder.image_id, registry_image, insecure=insecure, force=True, dockercfg=docker_push_secret) defer_removal(self.workflow, registry_image) pushed_images.append(registry_image) digests = get_manifest_digests(registry_image, registry, insecure, docker_push_secret) tag = registry_image.to_str(registry=False) push_conf_registry.digests[tag] = digests if not config_manifest_digest and (digests.v2 or digests.oci): if digests.v2: config_manifest_digest = digests.v2 config_manifest_type = 'v2' else: config_manifest_digest = digests.oci config_manifest_type = 'oci' config_registry_image = registry_image if config_manifest_digest: push_conf_registry.config = get_config_from_registry( config_registry_image, registry, config_manifest_digest, insecure, docker_push_secret, config_manifest_type) else: self.log.info( "V2 schema 2 or OCI manifest is not available to get config from" ) self.log.info("All images were tagged and pushed") return pushed_images
def set_new_parent_image(self): self.workflow.builder.set_base_image(self._new_parent_image) defer_removal(self.workflow, self._new_parent_image)
def run(self): # Only run if the build was successful if self.workflow.build_process_failed: self.log.info("Not running for failed build") self.workflow.builder.image_id = None return [] self.set_manifest_list_expectations() # Work out the name of the image to pull assert self.workflow.tag_conf.unique_images # must be set image = self.workflow.tag_conf.unique_images[0] assert self.workflow.push_conf.pulp_registries # must be configured registry = self.workflow.push_conf.pulp_registries[0] pullspec = image.copy() pullspec.registry = registry.uri # the image on Crane media_types = [] for plugin in self.workflow.postbuild_plugins_conf: if plugin['name'] == PLUGIN_PULP_SYNC_KEY: media_types.append(MEDIA_TYPE_DOCKER_V2_SCHEMA1) if plugin['name'] == PLUGIN_PULP_PUSH_KEY: media_types.append(MEDIA_TYPE_DOCKER_V1) # We only expect to find a v2 digest from Crane if the # pulp_sync plugin was used. If we do find a v2 digest, there # is no need to pull the image. if registry.server_side_sync: digests = self.retry_if_not_found(get_manifest_digests, pullspec, registry.uri, self.insecure, self.secret, require_digest=False) if digests: if digests.v2_list: self.log.info("Manifest list found") media_types.append(MEDIA_TYPE_DOCKER_V2_MANIFEST_LIST) if self.expect_v2schema2list_only: self.log.info( "Only V2 schema 2 manifest list is expected, " "leaving image ID unchanged %s", self.workflow.builder.image_id) return [MEDIA_TYPE_DOCKER_V2_MANIFEST_LIST] if digests.v2: self.log.info( "V2 schema 2 digest found, leaving image ID unchanged %s", self.workflow.builder.image_id) media_types.append(MEDIA_TYPE_DOCKER_V2_SCHEMA2) # No need to pull the image to work out the image ID as # we already know it. return sorted(media_types) else: self.log.info("No digests were found") # Pull the image from Crane to find out the image ID for the # v2 schema 1 manifest (which we have not seen before). self.tasker.pull_image(pullspec, insecure=self.insecure) name = pullspec.to_str() # Inspect it metadata = self.tasker.inspect_image(name) defer_removal(self.workflow, name) # Adjust our idea of the image ID image_id = metadata['Id'] self.log.debug("image ID changed from %s to %s", self.workflow.builder.image_id, image_id) self.workflow.builder.image_id = image_id return sorted(media_types)
def run(self): pushed_images = [] source_oci_image_path = self.workflow.build_result.oci_image_path if source_oci_image_path: source_unique_image = self.source_get_unique_image() if not self.workflow.tag_conf.unique_images: if source_oci_image_path: self.workflow.tag_conf.add_unique_image(source_unique_image) else: self.workflow.tag_conf.add_unique_image(self.workflow.image) config_manifest_digest = None config_manifest_type = None config_registry_image = None image_size_limit = get_image_size_limit(self.workflow) for registry, registry_conf in self.registries.items(): insecure = registry_conf.get('insecure', False) push_conf_registry = \ self.workflow.push_conf.add_docker_registry(registry, insecure=insecure) docker_push_secret = registry_conf.get('secret', None) self.log.info("Registry %s secret %s", registry, docker_push_secret) for image in self.workflow.tag_conf.images: if image.registry: raise RuntimeError( "Image name must not contain registry: %r" % image.registry) if not source_oci_image_path: image_size = sum(item['size'] for item in self.workflow.layer_sizes) config_image_size = image_size_limit['binary_image'] # Only handle the case when size is set > 0 in config if config_image_size and image_size > config_image_size: raise ExceedsImageSizeError( 'The size {} of image {} exceeds the limitation {} ' 'configured in reactor config.'.format( image_size, image, image_size_limit)) registry_image = image.copy() registry_image.registry = registry max_retries = DOCKER_PUSH_MAX_RETRIES for retry in range(max_retries + 1): if self.need_skopeo_push() or source_oci_image_path: self.push_with_skopeo(registry_image, insecure, docker_push_secret, source_oci_image_path) else: self.tasker.tag_and_push_image( self.workflow.builder.image_id, registry_image, insecure=insecure, force=True, dockercfg=docker_push_secret) if source_oci_image_path: manifests_dict = get_all_manifests(registry_image, registry, insecure, docker_push_secret, versions=('v2', )) try: koji_source_manifest_response = manifests_dict[ 'v2'] except KeyError as exc: raise RuntimeError( f'Unable to fetch v2 schema 2 digest for {registry_image.to_str()}' ) from exc self.workflow.koji_source_manifest = koji_source_manifest_response.json( ) digests = get_manifest_digests(registry_image, registry, insecure, docker_push_secret) if (not (digests.v2 or digests.oci) and (retry < max_retries)): sleep_time = DOCKER_PUSH_BACKOFF_FACTOR * (2**retry) self.log.info( "Retrying push because V2 schema 2 or " "OCI manifest not found in %is", sleep_time) time.sleep(sleep_time) else: if not self.need_skopeo_push(): defer_removal(self.workflow, registry_image) break pushed_images.append(registry_image) tag = registry_image.to_str(registry=False) push_conf_registry.digests[tag] = digests if not config_manifest_digest and (digests.v2 or digests.oci): if digests.v2: config_manifest_digest = digests.v2 config_manifest_type = 'v2' else: config_manifest_digest = digests.oci config_manifest_type = 'oci' config_registry_image = registry_image if config_manifest_digest: push_conf_registry.config = get_config_from_registry( config_registry_image, registry, config_manifest_digest, insecure, docker_push_secret, config_manifest_type) else: self.log.info( "V2 schema 2 or OCI manifest is not available to get config from" ) self.log.info("All images were tagged and pushed") return pushed_images
def run(self): pushed_images = [] if not self.workflow.tag_conf.unique_images: self.workflow.tag_conf.add_unique_image(self.workflow.image) config_manifest_digest = None config_manifest_type = None config_registry_image = None for registry, registry_conf in self.registries.items(): insecure = registry_conf.get('insecure', False) push_conf_registry = \ self.workflow.push_conf.add_docker_registry(registry, insecure=insecure) docker_push_secret = registry_conf.get('secret', None) self.log.info("Registry %s secret %s", registry, docker_push_secret) for image in self.workflow.tag_conf.images: if image.registry: raise RuntimeError("Image name must not contain registry: %r" % image.registry) registry_image = image.copy() registry_image.registry = registry max_retries = DOCKER_PUSH_MAX_RETRIES expect_v2s2 = False for registry in self.registries: media_types = self.registries[registry].get('expected_media_types', []) if MEDIA_TYPE_DOCKER_V2_SCHEMA2 in media_types: expect_v2s2 = True if not (self.group or expect_v2s2): max_retries = 0 for retry in range(max_retries + 1): if self.need_skopeo_push(): self.push_with_skopeo(registry_image, insecure, docker_push_secret) else: self.tasker.tag_and_push_image(self.workflow.builder.image_id, registry_image, insecure=insecure, force=True, dockercfg=docker_push_secret) digests = get_manifest_digests(registry_image, registry, insecure, docker_push_secret) if (not (digests.v2 or digests.oci) and (retry < max_retries)): sleep_time = DOCKER_PUSH_BACKOFF_FACTOR * (2 ** retry) self.log.info("Retrying push because V2 schema 2 or " "OCI manifest not found in %is", sleep_time) time.sleep(sleep_time) else: if not self.need_skopeo_push(): defer_removal(self.workflow, registry_image) break pushed_images.append(registry_image) tag = registry_image.to_str(registry=False) push_conf_registry.digests[tag] = digests if not config_manifest_digest and (digests.v2 or digests.oci): if digests.v2: config_manifest_digest = digests.v2 config_manifest_type = 'v2' else: config_manifest_digest = digests.oci config_manifest_type = 'oci' config_registry_image = registry_image if config_manifest_digest: push_conf_registry.config = get_config_from_registry( config_registry_image, registry, config_manifest_digest, insecure, docker_push_secret, config_manifest_type) else: self.log.info("V2 schema 2 or OCI manifest is not available to get config from") self.log.info("All images were tagged and pushed") return pushed_images
def run(self): pushed_images = [] source_oci_image_path = self.workflow.build_result.oci_image_path if source_oci_image_path: source_unique_image = self.source_get_unique_image() if not self.workflow.tag_conf.unique_images: if source_oci_image_path: self.workflow.tag_conf.add_unique_image(source_unique_image) else: self.workflow.tag_conf.add_unique_image(self.workflow.image) config_manifest_digest = None config_manifest_type = None config_registry_image = None for registry, registry_conf in self.registries.items(): insecure = registry_conf.get('insecure', False) push_conf_registry = \ self.workflow.push_conf.add_docker_registry(registry, insecure=insecure) docker_push_secret = registry_conf.get('secret', None) self.log.info("Registry %s secret %s", registry, docker_push_secret) for image in self.workflow.tag_conf.images: if image.registry: raise RuntimeError( "Image name must not contain registry: %r" % image.registry) registry_image = image.copy() registry_image.registry = registry max_retries = DOCKER_PUSH_MAX_RETRIES expect_v2s2 = False for registry in self.registries: media_types = self.registries[registry].get( 'expected_media_types', []) if MEDIA_TYPE_DOCKER_V2_SCHEMA2 in media_types: expect_v2s2 = True if not (self.group or expect_v2s2): max_retries = 0 for retry in range(max_retries + 1): if self.need_skopeo_push() or source_oci_image_path: self.push_with_skopeo(registry_image, insecure, docker_push_secret, source_oci_image_path) else: self.tasker.tag_and_push_image( self.workflow.builder.image_id, registry_image, insecure=insecure, force=True, dockercfg=docker_push_secret) if source_oci_image_path: manifests_dict = get_all_manifests(registry_image, registry, insecure, docker_push_secret, versions=('v2', )) try: koji_source_manifest_response = manifests_dict[ 'v2'] except KeyError: raise RuntimeError( 'Unable to fetch v2 schema 2 digest for {}'. format(registry_image.to_str())) self.workflow.koji_source_manifest = koji_source_manifest_response.json( ) digests = get_manifest_digests(registry_image, registry, insecure, docker_push_secret) if (not (digests.v2 or digests.oci) and (retry < max_retries)): sleep_time = DOCKER_PUSH_BACKOFF_FACTOR * (2**retry) self.log.info( "Retrying push because V2 schema 2 or " "OCI manifest not found in %is", sleep_time) time.sleep(sleep_time) else: if not self.need_skopeo_push(): defer_removal(self.workflow, registry_image) break pushed_images.append(registry_image) tag = registry_image.to_str(registry=False) push_conf_registry.digests[tag] = digests if not config_manifest_digest and (digests.v2 or digests.oci): if digests.v2: config_manifest_digest = digests.v2 config_manifest_type = 'v2' else: config_manifest_digest = digests.oci config_manifest_type = 'oci' config_registry_image = registry_image if config_manifest_digest: push_conf_registry.config = get_config_from_registry( config_registry_image, registry, config_manifest_digest, insecure, docker_push_secret, config_manifest_type) else: self.log.info( "V2 schema 2 or OCI manifest is not available to get config from" ) self.log.info("All images were tagged and pushed") return pushed_images
def run(self): # Only run if the build was successful if self.workflow.build_process_failed: self.log.info("Not running for failed build") self.workflow.builder.image_id = None return [] self.set_manifest_list_expectations() # Work out the name of the image to pull assert self.workflow.tag_conf.unique_images # must be set image = self.workflow.tag_conf.unique_images[0] assert self.workflow.push_conf.pulp_registries # must be configured registry = self.workflow.push_conf.pulp_registries[0] pullspec = image.copy() pullspec.registry = registry.uri # the image on Crane media_types = [] for plugin in self.workflow.postbuild_plugins_conf: if plugin['name'] == PLUGIN_PULP_SYNC_KEY: media_types.append(MEDIA_TYPE_DOCKER_V2_SCHEMA1) if plugin['name'] == PLUGIN_PULP_PUSH_KEY: media_types.append(MEDIA_TYPE_DOCKER_V1) # We only expect to find a v2 digest from Crane if the # pulp_sync plugin was used. If we do find a v2 digest, there # is no need to pull the image. if registry.server_side_sync: digests = self.retry_if_not_found(get_manifest_digests, pullspec, registry.uri, self.insecure, self.secret, require_digest=False) if digests: if digests.v2_list: self.log.info("Manifest list found") media_types.append(MEDIA_TYPE_DOCKER_V2_MANIFEST_LIST) if self.expect_v2schema2list_only: self.log.info("Only V2 schema 2 manifest list is expected, " "leaving image ID unchanged %s", self.workflow.builder.image_id) return [MEDIA_TYPE_DOCKER_V2_MANIFEST_LIST] if digests.v2: self.log.info("V2 schema 2 digest found, leaving image ID unchanged %s", self.workflow.builder.image_id) media_types.append(MEDIA_TYPE_DOCKER_V2_SCHEMA2) # No need to pull the image to work out the image ID as # we already know it. return sorted(media_types) else: self.log.info("No digests were found") # Pull the image from Crane to find out the image ID for the # v2 schema 1 manifest (which we have not seen before). self.tasker.pull_image(pullspec, insecure=self.insecure) name = pullspec.to_str() # Inspect it metadata = self.tasker.inspect_image(name) defer_removal(self.workflow, name) # Adjust our idea of the image ID image_id = metadata['Id'] self.log.debug("image ID changed from %s to %s", self.workflow.builder.image_id, image_id) self.workflow.builder.image_id = image_id return sorted(media_types)
def set_new_parent_image(self): base_image_key = self.workflow.builder.dockerfile_images.base_image_key self.workflow.builder.dockerfile_images[ base_image_key] = self._new_parent_image defer_removal(self.workflow, self._new_parent_image)