def run(self): # Only run if the build was successful if self.workflow.build_process_failed: self.log.info("Not importing failed build") return if is_scratch_build(self.workflow): self.log.info('scratch build, skipping plugin') return if not self.imagestream_name: self.log.info('no imagestream provided, skipping plugin') return self.floating_images = get_floating_images(self.workflow) if not self.floating_images: self.log.info('No floating tags to import, skipping import_image') return self.resolve_docker_image_repo() self.osbs = get_openshift_session(self.workflow, self.openshift_fallback) self.get_or_create_imagestream() self.osbs.import_image_tags(self.imagestream_name, self.get_trackable_tags(), self.docker_image_repo, insecure=self.insecure_registry)
def run(self): """ Run the plugin. """ floating_tags = get_floating_images(self.workflow) if not floating_tags: self.log.info('No floating images to tag, skipping %s', PLUGIN_PUSH_FLOATING_TAGS_KEY) return manifest_data = self.workflow.data.plugins_results.get( PLUGIN_GROUP_MANIFESTS_KEY) if not manifest_data or not manifest_data.get("manifest_digest"): self.log.info('No manifest digest available, skipping %s', PLUGIN_PUSH_FLOATING_TAGS_KEY) return digests = dict() session = self.manifest_util.get_registry_session() repo, digest = self.add_floating_tags(session, manifest_data, floating_tags) digests[repo] = digest return digests
def run(self): """ Run the plugin. """ if self.workflow.build_process_failed: self.log.info('Build failed, skipping %s', PLUGIN_PUSH_FLOATING_TAGS_KEY) return floating_tags = get_floating_images(self.workflow) if not floating_tags: self.log.info('No floating images to tag, skipping %s', PLUGIN_PUSH_FLOATING_TAGS_KEY) return # can't run in the worker build if not self.workflow.is_orchestrator_build(): self.log.warning('%s cannot be used by a worker builder', PLUGIN_PUSH_FLOATING_TAGS_KEY) return manifest_data = self.workflow.postbuild_results.get(PLUGIN_GROUP_MANIFESTS_KEY) if not manifest_data or not manifest_data.get("manifest_digest"): self.log.info('No manifest digest available, skipping %s', PLUGIN_PUSH_FLOATING_TAGS_KEY) return digests = dict() for registry in self.manifest_util.registries: session = self.manifest_util.get_registry_session(registry) repo, digest = self.add_floating_tags(session, manifest_data, floating_tags) digests[repo] = digest return digests
def run(self): # Only run if the build was successful if self.workflow.build_process_failed: self.log.info("Not importing failed build") return self.floating_images = get_floating_images(self.workflow) if not self.floating_images: raise RuntimeError('Could not find floating images in workflow') self.resolve_docker_image_repo() self.osbs = get_openshift_session(self.workflow, self.openshift_fallback) self.get_or_create_imagestream() try: self.osbs.import_image_tags(self.imagestream_name, self.get_trackable_tags(), self.docker_image_repo, insecure=self.insecure_registry) except AttributeError: self.log.info('Falling back to calling import_image instead of import_image_tags') self.process_tags() self.osbs.import_image(self.imagestream_name, tags=self.get_trackable_tags())
def run(self): # Only run if the build was successful if self.workflow.build_process_failed: self.log.info("Not importing failed build") return self.floating_images = get_floating_images(self.workflow) if not self.floating_images: self.log.info('No floating tags to import, skipping import_image') return self.resolve_docker_image_repo() self.osbs = get_openshift_session(self.workflow, self.openshift_fallback) self.get_or_create_imagestream() try: self.osbs.import_image_tags(self.imagestream_name, self.get_trackable_tags(), self.docker_image_repo, insecure=self.insecure_registry) except AttributeError: self.log.info('Falling back to calling import_image instead of import_image_tags') self.process_tags() self.osbs.import_image(self.imagestream_name, tags=self.get_trackable_tags())
def set_group_manifest_info(self, extra, worker_metadatas): version_release = None primary_images = get_primary_images(self.workflow) floating_images = get_floating_images(self.workflow) unique_images = get_unique_images(self.workflow) if primary_images: version_release = primary_images[0].tag if is_scratch_build(self.workflow): tags = [image.tag for image in self.workflow.tag_conf.images] version_release = tags[0] else: assert version_release is not None, 'Unable to find version-release image' tags = [image.tag for image in primary_images] floating_tags = [image.tag for image in floating_images] unique_tags = [image.tag for image in unique_images] manifest_data = self.workflow.postbuild_results.get(PLUGIN_GROUP_MANIFESTS_KEY, {}) if manifest_data and is_manifest_list(manifest_data.get("media_type")): manifest_digest = manifest_data.get("manifest_digest") index = {} index['tags'] = tags index['floating_tags'] = floating_tags index['unique_tags'] = unique_tags build_image = get_unique_images(self.workflow)[0] repo = ImageName.parse(build_image).to_str(registry=False, tag=False) # group_manifests added the registry, so this should be valid registries = self.workflow.push_conf.all_registries digest_version = get_manifest_media_version(manifest_digest) digest = manifest_digest.default for registry in registries: pullspec = "{0}/{1}@{2}".format(registry.uri, repo, digest) index['pull'] = [pullspec] pullspec = "{0}/{1}:{2}".format(registry.uri, repo, version_release) index['pull'].append(pullspec) # Store each digest with according media type index['digests'] = {} media_type = get_manifest_media_type(digest_version) index['digests'][media_type] = digest break extra['image']['index'] = index # group_manifests returns None if didn't run, {} if group=False else: for platform in worker_metadatas: if platform == "x86_64": for instance in worker_metadatas[platform]['output']: if instance['type'] == 'docker-image': # koji_upload, running in the worker, doesn't have the full tags # so set them here instance['extra']['docker']['tags'] = tags instance['extra']['docker']['floating_tags'] = floating_tags instance['extra']['docker']['unique_tags'] = unique_tags repositories = [] for pullspec in instance['extra']['docker']['repositories']: if '@' not in pullspec: image = ImageName.parse(pullspec) image.tag = version_release pullspec = image.to_str() repositories.append(pullspec) instance['extra']['docker']['repositories'] = repositories self.log.debug("reset tags to so that docker is %s", instance['extra']['docker']) annotations = get_worker_build_info(self.workflow, platform).\ build.get_annotations() digests = {} if 'digests' in annotations: digests = get_digests_map_from_annotations(annotations['digests']) instance['extra']['docker']['digests'] = digests
def test_group_manifests(workflow, source_dir, schema_version, test_name, group, foreign_layers, per_platform_images, expected_exception, user_params): test_images = ['namespace/httpd:2.4', 'namespace/httpd:latest'] goarch = { 'ppc64le': 'powerpc', 'x86_64': 'amd64', } registry_conf = {REGISTRY_V2: {'version': 'v2', 'insecure': True}} temp_dir = mkdtemp(dir=str(source_dir)) with open(os.path.join(temp_dir, ".dockercfg"), "w+") as dockerconfig: dockerconfig_contents = { REGISTRY_V2: { "username": "******", "password": DOCKER0_REGISTRY } } dockerconfig.write(json.dumps(dockerconfig_contents)) dockerconfig.flush() registry_conf[REGISTRY_V2]['secret'] = temp_dir registry_images_conf = { platform: {REGISTRY_V2: images} for platform, images in per_platform_images.items() } mocked_registries, platform_digests = mock_registries(registry_conf, registry_images_conf, schema_version=schema_version, foreign_layers=foreign_layers) some_per_platform_image = next( image for images in per_platform_images.values() for image in images ) # NOTE: this test assumes that all the images in per_platform_images follow the format of # {noarch_image}-{platform}. If they don't, this test will fail with cryptic errors noarch_image, *_ = some_per_platform_image.rsplit("-", 1) mock_environment(workflow, unique_image=noarch_image, primary_images=test_images) platform_descriptors_list = [] for platform, arch in goarch.items(): new_plat = { 'platform': platform, 'architecture': arch, } platform_descriptors_list.append(new_plat) runner = ( MockEnv(workflow) .for_plugin(GroupManifestsPlugin.key) .set_check_platforms_result(list(per_platform_images.keys())) .set_reactor_config( { 'version': 1, 'group_manifests': group, 'registry': { 'url': f'https://{REGISTRY_V2}/{registry_conf[REGISTRY_V2]["version"]}', 'auth': True, }, 'registries_cfg_path': str(temp_dir), 'platform_descriptors': platform_descriptors_list, } ) .create_runner() ) if expected_exception is None: results = runner.run() manifest_type, list_type = { 'v2': ( 'application/vnd.docker.distribution.manifest.v2+json', 'application/vnd.docker.distribution.manifest.list.v2+json', ), 'oci': ( 'application/vnd.oci.image.manifest.v1+json', 'application/vnd.oci.image.index.v1+json', ), }[schema_version] def verify_manifest_in_repository(registry, repo, manifest, platform, tag=None): config = 'config-' + platform assert registry.get_blob(repo, make_digest(config)) == config layer = 'layer-' + platform assert registry.get_blob(repo, make_digest(layer)) == layer assert registry.get_manifest(repo, make_digest(manifest)) == manifest if tag is not None: assert registry.get_manifest(repo, tag) == manifest if group: source_builds = {} source_manifests = {} for platform in per_platform_images: build = platform_digests[platform]['digests'][0] source_builds[platform] = build source_registry = mocked_registries[build['registry']] source_manifests[platform] = source_registry.get_manifest(build['repository'], build['digest']) for registry, conf in registry_conf.items(): target_registry = mocked_registries[registry] for image in test_images: name, tag = image.split(':') if tag not in target_registry.get_repo(name)['tags']: continue raw_manifest_list = to_text(target_registry.get_manifest(name, tag)) manifest_list = json.loads(raw_manifest_list, object_pairs_hook=OrderedDict) # Check if the manifest list is sorted assert json.dumps(manifest_list, indent=4, sort_keys=True, separators=(',', ': ')) == raw_manifest_list arch_list = [m['platform']['architecture'] for m in manifest_list['manifests']] assert arch_list == sorted(arch_list) assert manifest_list['mediaType'] == list_type assert manifest_list['schemaVersion'] == 2 manifests = manifest_list['manifests'] assert all(d['mediaType'] == manifest_type for d in manifests) assert all(d['platform']['os'] == 'linux' for d in manifests) for platform in platform_digests: descs = [d for d in manifests if d['platform']['architecture'] == goarch[platform]] assert len(descs) == 1 assert descs[0]['digest'] == source_builds[platform]['digest'] verify_manifest_in_repository(target_registry, name, source_manifests[platform], platform) else: platforms = list(platform_digests) assert len(platforms) == 1 platform = platforms[0] source_build = platform_digests[platform]['digests'][0] source_registry = mocked_registries[source_build['registry']] source_manifest = source_registry.get_manifest(source_build['repository'], source_build['digest']) for registry, conf in registry_conf.items(): if conf['version'] == 'v1': continue target_registry = mocked_registries[registry] for image in get_primary_images(workflow): repo = image.to_str(registry=False, tag=False) if image.tag not in target_registry.get_repo(repo)['tags']: continue verify_manifest_in_repository(target_registry, repo, source_manifest, platform, image.tag) for image in get_floating_images(workflow): repo = image.to_str(registry=False, tag=False) assert image.tag not in target_registry.get_repo(repo)['tags'] # Check that plugin returns ManifestDigest object plugin_results = results[GroupManifestsPlugin.key] result_digest = plugin_results["manifest_digest"] assert isinstance(result_digest, ManifestDigest) result_digest = plugin_results["manifest_digest"] assert isinstance(result_digest, ManifestDigest) assert plugin_results["media_type"] assert plugin_results["manifest"] else: with pytest.raises(PluginFailedException) as ex: runner.run() assert expected_exception in str(ex.value)
def set_group_manifest_info(self, extra): version_release = None primary_images = get_primary_images(self.workflow) if primary_images: version_release = primary_images[0].tag if is_scratch_build(self.workflow): tags = [image.tag for image in self.workflow.data.tag_conf.images] version_release = tags[0] else: assert version_release is not None, 'Unable to find version-release image' tags = [image.tag for image in primary_images] floating_tags = [ image.tag for image in get_floating_images(self.workflow) ] unique_images = get_unique_images(self.workflow) unique_tags = [image.tag for image in unique_images] manifest_data = self.workflow.data.postbuild_results.get( PLUGIN_GROUP_MANIFESTS_KEY, {}) if manifest_data and is_manifest_list(manifest_data.get("media_type")): manifest_digest = manifest_data["manifest_digest"] digest = manifest_digest.default build_image = unique_images[0] repo = ImageName.parse(build_image).to_str(registry=False, tag=False) # group_manifests added the registry, so this should be valid registry_uri = self.workflow.conf.registry['uri'] digest_version = get_manifest_media_version(manifest_digest) media_type = get_manifest_media_type(digest_version) extra['image']['index'] = { 'tags': tags, 'floating_tags': floating_tags, 'unique_tags': unique_tags, 'pull': [ f'{registry_uri}/{repo}@{digest}', f'{registry_uri}/{repo}:{version_release}', ], 'digests': { media_type: digest }, } # group_manifests returns None if didn't run, {} if group=False else: platform = "x86_64" _, instance = next( self._iter_build_metadata_outputs(platform, {"type": "docker-image"}), (None, None), ) if instance: # koji_upload, running in the worker, doesn't have the full tags # so set them here instance['extra']['docker']['tags'] = tags instance['extra']['docker']['floating_tags'] = floating_tags instance['extra']['docker']['unique_tags'] = unique_tags repositories = [] for pullspec in instance['extra']['docker']['repositories']: if '@' not in pullspec: image = ImageName.parse(pullspec) image.tag = version_release pullspec = image.to_str() repositories.append(pullspec) instance['extra']['docker']['repositories'] = repositories self.log.debug("reset tags to so that docker is %s", instance['extra']['docker'])
def test_group_manifests(tmpdir, schema_version, test_name, group, foreign_layers, registries, workers, expected_exception, reactor_config_map, user_params): if MOCK: mock_docker() test_images = ['namespace/httpd:2.4', 'namespace/httpd:latest'] goarch = { 'ppc64le': 'powerpc', 'x86_64': 'amd64', } all_registry_conf = { REGISTRY_V2: {'version': 'v2', 'insecure': True}, OTHER_V2: {'version': 'v2', 'insecure': False}, } temp_dir = mkdtemp(dir=str(tmpdir)) with open(os.path.join(temp_dir, ".dockercfg"), "w+") as dockerconfig: dockerconfig_contents = { REGISTRY_V2: { "username": "******", "password": DOCKER0_REGISTRY } } dockerconfig.write(json.dumps(dockerconfig_contents)) dockerconfig.flush() all_registry_conf[REGISTRY_V2]['secret'] = temp_dir registry_conf = { k: v for k, v in all_registry_conf.items() if k in registries } plugins_conf = [{ 'name': GroupManifestsPlugin.key, 'args': { 'registries': registry_conf, 'group': group, 'goarch': goarch, }, }] mocked_registries, annotations = mock_registries(registry_conf, workers, schema_version=schema_version, foreign_layers=foreign_layers) tasker, workflow = mock_environment(tmpdir, primary_images=test_images, annotations=annotations) if reactor_config_map: registries_list = [] for docker_uri in registry_conf: reg_ver = registry_conf[docker_uri]['version'] reg_secret = None if 'secret' in registry_conf[docker_uri]: reg_secret = registry_conf[docker_uri]['secret'] new_reg = {} if reg_secret: new_reg['auth'] = {'cfg_path': reg_secret} else: new_reg['auth'] = {'cfg_path': str(temp_dir)} new_reg['url'] = 'https://' + docker_uri + '/' + reg_ver registries_list.append(new_reg) platform_descriptors_list = [] for platform in goarch: new_plat = { 'platform': platform, 'architecture': goarch[platform], } platform_descriptors_list.append(new_plat) workflow.plugin_workspace[ReactorConfigPlugin.key] = {} workflow.plugin_workspace[ReactorConfigPlugin.key][WORKSPACE_CONF_KEY] =\ ReactorConfig({'version': 1, 'group_manifests': group, 'registries': registries_list, 'platform_descriptors': platform_descriptors_list}) runner = PostBuildPluginsRunner(tasker, workflow, plugins_conf) if expected_exception is None: results = runner.run() manifest_type, list_type = { 'v2': ( 'application/vnd.docker.distribution.manifest.v2+json', 'application/vnd.docker.distribution.manifest.list.v2+json', ), 'oci': ( 'application/vnd.oci.image.manifest.v1+json', 'application/vnd.oci.image.index.v1+json', ), }[schema_version] def verify_manifest_in_repository(registry, repo, manifest, platform, tag=None): config = 'config-' + platform assert registry.get_blob(repo, make_digest(config)) == config layer = 'layer-' + platform assert registry.get_blob(repo, make_digest(layer)) == layer assert registry.get_manifest(repo, make_digest(manifest)) == manifest if tag is not None: assert registry.get_manifest(repo, tag) == manifest if group: source_builds = {} source_manifests = {} for platform in workers: build = annotations['worker-builds'][platform]['digests'][0] source_builds[platform] = build source_registry = mocked_registries[build['registry']] source_manifests[platform] = source_registry.get_manifest(build['repository'], build['digest']) for registry, conf in registry_conf.items(): target_registry = mocked_registries[registry] for image in test_images: name, tag = image.split(':') if tag not in target_registry.get_repo(name)['tags']: continue raw_manifest_list = to_text(target_registry.get_manifest(name, tag)) manifest_list = json.loads(raw_manifest_list, object_pairs_hook=OrderedDict) # Check if the manifest list is sorted assert json.dumps(manifest_list, indent=4, sort_keys=True, separators=(',', ': ')) == raw_manifest_list arch_list = [m['platform']['architecture'] for m in manifest_list['manifests']] assert arch_list == sorted(arch_list) assert manifest_list['mediaType'] == list_type assert manifest_list['schemaVersion'] == 2 manifests = manifest_list['manifests'] assert all(d['mediaType'] == manifest_type for d in manifests) assert all(d['platform']['os'] == 'linux' for d in manifests) for platform in annotations['worker-builds']: descs = [d for d in manifests if d['platform']['architecture'] == goarch[platform]] assert len(descs) == 1 assert descs[0]['digest'] == source_builds[platform]['digest'] verify_manifest_in_repository(target_registry, name, source_manifests[platform], platform) else: platforms = annotations['worker-builds'] assert len(platforms) == 1 platform = list(platforms.keys())[0] source_build = annotations['worker-builds'][platform]['digests'][0] source_registry = mocked_registries[source_build['registry']] source_manifest = source_registry.get_manifest(source_build['repository'], source_build['digest']) for registry, conf in registry_conf.items(): if conf['version'] == 'v1': continue target_registry = mocked_registries[registry] for image in get_primary_images(workflow): repo = image.to_str(registry=False, tag=False) if image.tag not in target_registry.get_repo(repo)['tags']: continue verify_manifest_in_repository(target_registry, repo, source_manifest, platform, image.tag) for image in get_floating_images(workflow): repo = image.to_str(registry=False, tag=False) assert image.tag not in target_registry.get_repo(repo)['tags'] # Check that plugin returns ManifestDigest object plugin_results = results[GroupManifestsPlugin.key] result_digest = plugin_results["manifest_digest"] assert isinstance(result_digest, ManifestDigest) result_digest = plugin_results["manifest_digest"] assert isinstance(result_digest, ManifestDigest) assert plugin_results["media_type"] assert plugin_results["manifest"] else: with pytest.raises(PluginFailedException) as ex: runner.run() assert expected_exception in str(ex.value)