def run_in_worker(self): """ Run plugin in worker. Replace image pullspecs based on replacements computed in orchestrator, then create relatedImages sections in CSVs. Exclude CSVs which already have a relatedImages section. """ operator_manifest = self._get_operator_manifest() replacement_pullspecs = { ImageName.parse(old): ImageName.parse(new) for old, new in self.replacement_pullspecs.items() } self.log.info("Updating operator CSV files") for operator_csv in operator_manifest.files: if not operator_csv.has_related_images(): self.log.info("Replacing pullspecs in %s", operator_csv.path) # Replace pullspecs everywhere, not just in locations in which they # are expected to be found - OCP 4.4 workaround operator_csv.replace_pullspecs_everywhere(replacement_pullspecs) self.log.info("Creating relatedImages section in %s", operator_csv.path) operator_csv.set_related_images() operator_csv.dump() else: self.log.warning("%s has a relatedImages section, skipping", operator_csv.path)
def test_get_primary_images(tag_conf, tag_annotation, expected): template_image = ImageName.parse('registry.example.com/fedora') workflow = DockerBuildWorkflow(MOCK_SOURCE, 'test-image') for tag in tag_conf: image_name = ImageName.parse(str(template_image)) image_name.tag = tag workflow.tag_conf.add_primary_image(str(image_name)) annotations = {} for tag in tag_annotation: annotations.setdefault('repositories', {}).setdefault('primary', []) image_name = ImageName.parse(str(template_image)) image_name.tag = tag annotations['repositories']['primary'].append(str(image_name)) build_result = BuildResult(annotations=annotations, image_id='foo') workflow.build_result = build_result actual = get_primary_images(workflow) assert len(actual) == len(expected) for index, primary_image in enumerate(actual): assert primary_image.registry == template_image.registry assert primary_image.namespace == template_image.namespace assert primary_image.repo == template_image.repo assert primary_image.tag == expected[index]
def test_update_base_image(organization, tmpdir, reactor_config_map, docker_tasker): df_content = dedent("""\ FROM {} LABEL horses=coconuts CMD whoah """) dfp = df_parser(str(tmpdir)) image_str = "base:image" dfp.content = df_content.format(image_str) base_str = "base@sha256:1234" base_image_name = ImageName.parse("base@sha256:1234") enclosed_parent = ImageName.parse(image_str) if organization and reactor_config_map: enclosed_parent.enclose(organization) workflow = mock_workflow() workflow.builder.set_df_path(dfp.dockerfile_path) workflow.builder.parent_images = {enclosed_parent: base_image_name} workflow.builder.base_image = base_image_name workflow.builder.set_parent_inspection_data(base_str, dict(Id=base_str)) workflow.builder.tasker.inspect_image = lambda *_: dict(Id=base_str) run_plugin(workflow, reactor_config_map, docker_tasker, organization=organization) expected_df = df_content.format(base_str) assert dfp.content == expected_df
def test_pull_parent_images(organization, reactor_config_map, inspect_only): builder_image = 'builder:image' parent_images = {BASE_IMAGE_NAME.copy(): None, ImageName.parse(builder_image): None} enclosed_base_image = BASE_IMAGE_W_REGISTRY enclosed_builder_image = LOCALHOST_REGISTRY + '/' + builder_image if organization and reactor_config_map: base_image_name = ImageName.parse(enclosed_base_image) base_image_name.enclose(organization) enclosed_base_image = base_image_name.to_str() builder_image_name = ImageName.parse(enclosed_builder_image) builder_image_name.enclose(organization) enclosed_builder_image = builder_image_name.to_str() test_pull_base_image_plugin( LOCALHOST_REGISTRY, BASE_IMAGE, [ # expected to pull enclosed_base_image, enclosed_builder_image, ], [], # should not be pulled reactor_config_map=reactor_config_map, inspect_only=inspect_only, parent_images=parent_images, organization=organization)
def test_pull_parent_images(organization, reactor_config_map, inspect_only): builder_image = 'builder:image' parent_images = {BASE_IMAGE_NAME.copy(): None, ImageName.parse(builder_image): None} enclosed_base_image = BASE_IMAGE_W_REGISTRY enclosed_builder_image = LOCALHOST_REGISTRY + '/' + builder_image if organization and reactor_config_map: base_image_name = ImageName.parse(enclosed_base_image) base_image_name.enclose(organization) enclosed_base_image = base_image_name.to_str() builder_image_name = ImageName.parse(enclosed_builder_image) builder_image_name.enclose(organization) enclosed_builder_image = builder_image_name.to_str() test_pull_base_image_plugin( LOCALHOST_REGISTRY, BASE_IMAGE, [ # expected to pull enclosed_base_image, enclosed_builder_image, ], [], # should not be pulled reactor_config_map=reactor_config_map, inspect_only=inspect_only, parent_images=parent_images, organization=organization)
def __init__(self, source, image, **kwargs): """ """ LastLogger.__init__(self) BuilderStateMachine.__init__(self) print_version_of_tools() self.tasker = DockerTasker() info, version = self.tasker.get_info(), self.tasker.get_version() logger.debug(json.dumps(info, indent=2)) logger.info(json.dumps(version, indent=2)) # arguments for build self.source = source self.base_image_id = None self.image_id = None self.built_image_info = None self.image = ImageName.parse(image) # get info about base image from dockerfile self.df_path, self.df_dir = self.source.get_dockerfile_path() self.base_image = ImageName.parse(DockerfileParser(self.df_path).baseimage) logger.debug("base image specified in dockerfile = '%s'", self.base_image) if not self.base_image.tag: self.base_image.tag = 'latest'
def __init__(self, source, image, **kwargs): """ """ LastLogger.__init__(self) BuilderStateMachine.__init__(self) print_version_of_tools() self.tasker = DockerTasker() info, version = self.tasker.get_info(), self.tasker.get_version() logger.debug(json.dumps(info, indent=2)) logger.info(json.dumps(version, indent=2)) # arguments for build self.source = source self.base_image_id = None self.image_id = None self.built_image_info = None self.image = ImageName.parse(image) # get info about base image from dockerfile self.df_path, self.df_dir = self.source.get_dockerfile_path() self.base_image = ImageName.parse( DockerfileParser(self.df_path).baseimage) logger.debug("base image specified in dockerfile = '%s'", self.base_image) if not self.base_image.tag: self.base_image.tag = 'latest'
def test_update_base_image(organization, tmpdir, reactor_config_map, docker_tasker): df_content = dedent("""\ FROM {} LABEL horses=coconuts CMD whoah """) dfp = df_parser(str(tmpdir)) image_str = "base:image" dfp.content = df_content.format(image_str) base_str = "base@sha256:1234" base_image_name = ImageName.parse("base@sha256:1234") enclosed_parent = ImageName.parse(image_str) if organization and reactor_config_map: enclosed_parent.enclose(organization) workflow = mock_workflow() workflow.builder.set_df_path(dfp.dockerfile_path) workflow.builder.parent_images = {enclosed_parent: base_image_name} workflow.builder.base_image = base_image_name workflow.builder.set_parent_inspection_data(base_str, dict(Id=base_str)) workflow.builder.tasker.inspect_image = lambda *_: dict(Id=base_str) run_plugin(workflow, reactor_config_map, docker_tasker, organization=organization) expected_df = df_content.format(base_str) assert dfp.content == expected_df
def test_get_primary_images(tag_conf, tag_annotation, expected): template_image = ImageName.parse('registry.example.com/fedora') workflow = DockerBuildWorkflow(MOCK_SOURCE, 'test-image') for tag in tag_conf: image_name = ImageName.parse(str(template_image)) image_name.tag = tag workflow.tag_conf.add_primary_image(str(image_name)) annotations = {} for tag in tag_annotation: annotations.setdefault('repositories', {}).setdefault('primary', []) image_name = ImageName.parse(str(template_image)) image_name.tag = tag annotations['repositories']['primary'].append(str(image_name)) build_result = BuildResult(annotations=annotations, image_id='foo') workflow.build_result = build_result actual = get_primary_images(workflow) assert len(actual) == len(expected) for index, primary_image in enumerate(actual): assert primary_image.registry == template_image.registry assert primary_image.namespace == template_image.namespace assert primary_image.repo == template_image.repo assert primary_image.tag == expected[index]
def test_replace_repo(self, image, site_replacements, user_replacements, replaced, should_query, tmpdir, caplog): image = ImageName.parse(image) replaced = ImageName.parse(replaced) mock_package_mapping_files(site_replacements) mock_inspect_query(image, {PKG_LABEL: '{}-package'.format(image.repo)}, times=1 if should_query else 0) site_config = get_site_config(repo_replacements=site_replacements) user_config = get_user_config(manifests_dir=str(tmpdir), repo_replacements=user_replacements) replacer = PullspecReplacer(user_config=user_config, site_config=site_config) assert replacer.replace_repo(image) == replaced if site_replacements and image.registry in site_replacements: assert "Downloading mapping file for {}".format( image.registry) in caplog.text if should_query: assert "Querying {} for image labels".format( image.registry) in caplog.text assert "Resolved package name" in caplog.text assert "Replacement for package" in caplog.text else: assert "repo_replacements not configured for {}".format( image.registry) in caplog.text
def test_image_download(tmpdir, docker_tasker, parents, skip_plugin, architecture, architectures, download_filesystem, reactor_config_map, caplog): if MOCK: mock_docker() workflow = mock_workflow(tmpdir, for_orchestrator=architectures is not None) if not skip_plugin: mock_koji_session(download_filesystem=download_filesystem) mock_image_build_file(str(tmpdir)) workflow.builder.base_image = ImageName.parse(parents[-1]) workflow.builder.parents_ordered = parents workflow.builder.custom_parent_image = 'koji/image-build' in parents workflow.builder.custom_base_image = 'koji/image-build' == parents[-1] workflow.builder.parent_images = {} for image in parents: if image == 'scratch': continue workflow.builder.parent_images[ImageName.parse(image)] = None if architectures: workflow.prebuild_results[PLUGIN_CHECK_AND_SET_PLATFORMS_KEY] = set( architectures) if reactor_config_map: make_and_store_reactor_config_map(workflow, { 'root_url': '', 'auth': {} }) runner = PreBuildPluginsRunner(docker_tasker, workflow, [{ 'name': PLUGIN_ADD_FILESYSTEM_KEY, 'args': { 'koji_hub': KOJI_HUB, 'architecture': architecture, } }]) results = runner.run() plugin_result = results[PLUGIN_ADD_FILESYSTEM_KEY] if skip_plugin: message = 'Nothing to do for non-custom base images' assert message in caplog.text assert plugin_result is None return assert 'base-image-id' in plugin_result assert 'filesystem-koji-task-id' in plugin_result if download_filesystem: assert plugin_result['base-image-id'] == IMPORTED_IMAGE_ID assert plugin_result['filesystem-koji-task-id'] == FILESYSTEM_TASK_ID else: assert plugin_result['base-image-id'] is None assert plugin_result['filesystem-koji-task-id'] is None
def test_image_download(tmpdir, docker_tasker, parents, skip_plugin, architecture, architectures, download_filesystem, reactor_config_map, caplog): if MOCK: mock_docker() workflow = mock_workflow(tmpdir) if not skip_plugin: mock_koji_session(download_filesystem=download_filesystem) mock_image_build_file(str(tmpdir)) workflow.builder.base_image = ImageName.parse(parents[-1]) workflow.builder.parents_ordered = parents workflow.builder.custom_parent_image = 'koji/image-build' in parents workflow.builder.custom_base_image = 'koji/image-build' == parents[-1] workflow.builder.parent_images = {} for image in parents: if image == 'scratch': continue workflow.builder.parent_images[ImageName.parse(image)] = None if architectures: workflow.prebuild_results[PLUGIN_CHECK_AND_SET_PLATFORMS_KEY] = set(architectures) if reactor_config_map: make_and_store_reactor_config_map(workflow, {'root_url': '', 'auth': {}}) runner = PreBuildPluginsRunner( docker_tasker, workflow, [{ 'name': PLUGIN_ADD_FILESYSTEM_KEY, 'args': { 'koji_hub': KOJI_HUB, 'architecture': architecture, } }] ) results = runner.run() plugin_result = results[PLUGIN_ADD_FILESYSTEM_KEY] if skip_plugin: message = 'Nothing to do for non-custom base images' assert message in caplog.text assert plugin_result is None return assert 'base-image-id' in plugin_result assert 'filesystem-koji-task-id' in plugin_result if download_filesystem: assert plugin_result['base-image-id'] == IMPORTED_IMAGE_ID assert plugin_result['filesystem-koji-task-id'] == FILESYSTEM_TASK_ID else: assert plugin_result['base-image-id'] is None assert plugin_result['filesystem-koji-task-id'] is None
def test_try_with_library_pull_base_image(library, reactor_config_map): if MOCK: mock_docker(remember_images=True) tasker = DockerTasker(retry_times=0) workflow = DockerBuildWorkflow(MOCK_SOURCE, 'test-image') workflow.builder = MockBuilder() if library: base_image = 'library/parent-image' else: base_image = 'parent-image' workflow.builder.base_image = ImageName.parse(base_image) workflow.builder.parent_images = {ImageName.parse(base_image): None} class MockResponse(object): content = '' cr = CommandResult() cr._error = "cmd_error" cr._error_detail = {"message": "error_detail"} if library: call_wait = 1 else: call_wait = 2 (flexmock(atomic_reactor.util) .should_receive('wait_for_command') .times(call_wait) .and_return(cr)) error_message = 'registry.example.com/' + base_image if reactor_config_map: workflow.plugin_workspace[ReactorConfigPlugin.key] = {} workflow.plugin_workspace[ReactorConfigPlugin.key][WORKSPACE_CONF_KEY] =\ ReactorConfig({'version': 1, 'source_registry': {'url': 'registry.example.com', 'insecure': True}}) runner = PreBuildPluginsRunner( tasker, workflow, [{ 'name': PullBaseImagePlugin.key, 'args': {'parent_registry': 'registry.example.com', 'parent_registry_insecure': True}, }], ) with pytest.raises(PluginFailedException) as exc: runner.run() assert error_message in exc.value.args[0]
def test_try_with_library_pull_base_image(library, reactor_config_map): if MOCK: mock_docker(remember_images=True) tasker = DockerTasker(retry_times=0) workflow = DockerBuildWorkflow(MOCK_SOURCE, 'test-image') workflow.builder = MockBuilder() if library: base_image = 'library/parent-image' else: base_image = 'parent-image' workflow.builder.base_image = ImageName.parse(base_image) workflow.builder.parent_images = {ImageName.parse(base_image): None} class MockResponse(object): content = '' cr = CommandResult() cr._error = "cmd_error" cr._error_detail = {"message": "error_detail"} if library: call_wait = 1 else: call_wait = 2 (flexmock(atomic_reactor.util) .should_receive('wait_for_command') .times(call_wait) .and_return(cr)) error_message = 'registry.example.com/' + base_image if reactor_config_map: workflow.plugin_workspace[ReactorConfigPlugin.key] = {} workflow.plugin_workspace[ReactorConfigPlugin.key][WORKSPACE_CONF_KEY] =\ ReactorConfig({'version': 1, 'source_registry': {'url': 'registry.example.com', 'insecure': True}}) runner = PreBuildPluginsRunner( tasker, workflow, [{ 'name': PullBaseImagePlugin.key, 'args': {'parent_registry': 'registry.example.com', 'parent_registry_insecure': True}, }], ) with pytest.raises(PluginFailedException) as exc: runner.run() assert error_message in exc.value.args[0]
def _find_image(img, ignore_registry=False): tagged_img = ImageName.parse(img).to_str(explicit_tag=True) for im in mock_images: im_name = im['RepoTags'][0] if im_name == tagged_img: return im if ignore_registry: im_name_wo_reg = ImageName.parse(im_name).to_str(registry=False) if im_name_wo_reg == tagged_img: return im return None
def workflow_callback(workflow): workflow = self.prepare(workflow) release = 'rel1' version = 'ver1' config_blob = {'config': {'Labels': {'release': release, 'version': version}}} (flexmock(atomic_reactor.util) .should_receive('get_config_from_registry') .and_return(config_blob) .times(0 if sha_is_manifest_list else 1)) manifest_list = { 'manifests': [ {'platform': {'architecture': 'amd64'}, 'digest': 'sha256:123456'}, {'platform': {'architecture': 'ppc64le'}, 'digest': 'sha256:654321'}, ] } manifest_tag = 'registry.example.com' + '/' + BASE_IMAGE_W_SHA base_image_result = ImageName.parse(manifest_tag) manifest_image = base_image_result.copy() if sha_is_manifest_list: (flexmock(atomic_reactor.util) .should_receive('get_manifest_list') .with_args(image=manifest_image, registry=manifest_image.registry, insecure=True, dockercfg_path=None) .and_return(flexmock(json=lambda: manifest_list, content=json.dumps(manifest_list).encode('utf-8'))) .once()) else: (flexmock(atomic_reactor.util) .should_receive('get_manifest_list') .with_args(image=manifest_image, registry=manifest_image.registry, insecure=True, dockercfg_path=None) .and_return(None) .once() .ordered()) docker_tag = "%s-%s" % (version, release) manifest_tag = 'registry.example.com' + '/' +\ BASE_IMAGE_W_SHA[:BASE_IMAGE_W_SHA.find('@sha256')] +\ ':' + docker_tag base_image_result = ImageName.parse(manifest_tag) manifest_image = base_image_result.copy() (flexmock(atomic_reactor.util) .should_receive('get_manifest_list') .with_args(image=manifest_image, registry=manifest_image.registry, insecure=True, dockercfg_path=None) .and_return(flexmock(json=lambda: manifest_list)) .once() .ordered()) return workflow
def test_parent_images_mismatch_base_image(tmpdir, docker_tasker): """test when base_image has been updated differently from parent_images.""" dfp = df_parser(str(tmpdir)) dfp.content = "FROM base:image" workflow = mock_workflow() workflow.builder.set_df_path(dfp.dockerfile_path) workflow.builder.base_image = ImageName.parse("base:image") workflow.builder.parent_images = { ImageName.parse("base:image"): ImageName.parse("different-parent-tag") } with pytest.raises(BaseImageMismatch): ChangeFromPlugin(docker_tasker, workflow).run()
def test_parent_images_mismatch_base_image(tmpdir, docker_tasker): """test when base_image has been updated differently from parent_images.""" dfp = df_parser(str(tmpdir)) dfp.content = "FROM base:image" workflow = mock_workflow() workflow.builder.set_df_path(dfp.dockerfile_path) workflow.builder.base_image = ImageName.parse("base:image") workflow.builder.parent_images = { ImageName.parse("base:image"): ImageName.parse("different-parent-tag") } with pytest.raises(BaseImageMismatch): ChangeFromPlugin(docker_tasker, workflow).run()
def __init__(self): self.tasker = flexmock() self.base_image = ImageName(repo='Fedora', tag='22') self.original_base_image = ImageName(repo='Fedora', tag='22') self.base_from_scratch = False self.custom_base_image = False self.parent_images = {ImageName.parse('base'): ImageName.parse('base:stubDigest')} base_inspect = {INSPECT_CONFIG: {'Labels': BASE_IMAGE_LABELS.copy()}} self._parent_images_inspect = {ImageName.parse('base:stubDigest'): base_inspect} self.parent_images_digests = {'base:latest': {V2_LIST: 'stubDigest'}} self.image_id = 'image_id' self.image = 'image' self._df_path = 'df_path' self.df_dir = 'df_dir'
def test_pull_parent_wrong_registry(reactor_config_map, inspect_only): # noqa: F811 parent_images = { ImageName.parse("base:image"): None, ImageName.parse("some.registry:8888/builder:image"): None} with pytest.raises(PluginFailedException) as exc: test_pull_base_image_plugin( 'different.registry:5000', "base:image", [], [], reactor_config_map=reactor_config_map, inspect_only=inspect_only, parent_images=parent_images ) assert "Dockerfile: 'some.registry:8888/builder:image'" in str(exc.value) assert "expected registry: 'different.registry:5000'" in str(exc.value) assert "base:image" not in str(exc.value)
def test_pull_parent_wrong_registry(reactor_config_map, inspect_only): # noqa: F811 parent_images = { ImageName.parse("base:image"): None, ImageName.parse("some.registry:8888/builder:image"): None} with pytest.raises(PluginFailedException) as exc: test_pull_base_image_plugin( 'different.registry:5000', "base:image", [], [], reactor_config_map=reactor_config_map, inspect_only=inspect_only, parent_images=parent_images ) assert "Dockerfile: 'some.registry:8888/builder:image'" in str(exc.value) assert "expected registry: 'different.registry:5000'" in str(exc.value) assert "base:image" not in str(exc.value)
def _find_image(img, ignore_registry=False): global mock_images tagged_img = ImageName.parse(img).to_str(explicit_tag=True) for im in mock_images: im_name = im['RepoTags'][0] if im_name == tagged_img: return im if ignore_registry: im_name_wo_reg = ImageName.parse(im_name).to_str(registry=False) if im_name_wo_reg == tagged_img: return im return None
def test_multiple_parent_images(self, workflow, koji_session, reactor_config_map, special_base): parent_images = { ImageName.parse('somebuilder'): ImageName.parse('b1tag'), ImageName.parse('otherbuilder'): ImageName.parse('b2tag'), ImageName.parse('base'): ImageName.parse('basetag'), } koji_builds = dict( somebuilder=dict(nvr='somebuilder-1.0-1', id=42), otherbuilder=dict(nvr='otherbuilder-2.0-1', id=43), base=dict(nvr='base-16.0-1', id=16), unresolved=None, ) image_inspects = {} koji_expects = {} # need to load up our mock objects with expected responses for the parents for img, build in koji_builds.items(): if build is None: continue name, version, release = koji_builds[img]['nvr'].split('-') labels = { 'com.redhat.component': name, 'version': version, 'release': release } image_inspects[img] = {INSPECT_CONFIG: dict(Labels=labels)} (workflow.builder.tasker.should_receive('inspect_image').with_args( parent_images[ImageName.parse(img)]).and_return( image_inspects[img])) (koji_session.should_receive('getBuild').with_args( koji_builds[img]['nvr']).and_return(koji_builds[img])) koji_expects[ImageName.parse(img)] = build if special_base == 'scratch': workflow.builder.set_base_image(SCRATCH_FROM) elif special_base == 'custom': workflow.builder.set_base_image('koji/image-build') parent_images[ImageName.parse('koji/image-build')] = None else: workflow.builder.set_base_image('basetag') workflow.builder.base_image_inspect.update(image_inspects['base']) workflow.builder.parent_images = parent_images expected = { BASE_IMAGE_KOJI_BUILD: koji_builds['base'], PARENT_IMAGES_KOJI_BUILDS: koji_expects, } if special_base: del expected[BASE_IMAGE_KOJI_BUILD] self.run_plugin_with_args(workflow, expect_result=expected, reactor_config_map=reactor_config_map)
def test_replace_registry(self, image, replacement_registries, replaced, caplog): image = ImageName.parse(image) replaced = ImageName.parse(replaced) site_config = get_site_config( registry_post_replace=replacement_registries) replacer = PullspecReplacer(user_config={}, site_config=site_config) assert replacer.replace_registry(image) == replaced if image.registry not in replacement_registries: msg = "registry_post_replace not configured for {}".format( image.registry) assert msg in caplog.text
def workflow_callback(workflow): workflow = self.prepare(workflow) release = 'rel1' version = 'ver1' config_blob = {'config': {'Labels': {'release': release, 'version': version}}} (flexmock(atomic_reactor.util) .should_receive('get_config_from_registry') .and_return(config_blob) .times(0 if sha_is_manifest_list else 1)) manifest_list = { 'manifests': [ {'platform': {'architecture': 'amd64'}, 'digest': 'sha256:123456'}, {'platform': {'architecture': 'ppc64le'}, 'digest': 'sha256:654321'}, ] } manifest_tag = 'registry.example.com' + '/' + BASE_IMAGE_W_SHA base_image_result = ImageName.parse(manifest_tag) manifest_image = base_image_result.copy() if sha_is_manifest_list: (flexmock(atomic_reactor.util) .should_receive('get_manifest_list') .with_args(image=manifest_image, registry=manifest_image.registry, insecure=True) .and_return(flexmock(json=lambda: manifest_list)) .once()) else: (flexmock(atomic_reactor.util) .should_receive('get_manifest_list') .with_args(image=manifest_image, registry=manifest_image.registry, insecure=True) .and_return(None) .once() .ordered()) docker_tag = "%s-%s" % (version, release) manifest_tag = 'registry.example.com' + '/' +\ BASE_IMAGE_W_SHA[:BASE_IMAGE_W_SHA.find('@sha256')] +\ ':' + docker_tag base_image_result = ImageName.parse(manifest_tag) manifest_image = base_image_result.copy() (flexmock(atomic_reactor.util) .should_receive('get_manifest_list') .with_args(image=manifest_image, registry=manifest_image.registry, insecure=True) .and_return(flexmock(json=lambda: manifest_list)) .once() .ordered()) return workflow
def test_pull_base_image_plugin(df_base, parent_registry, expected_w_reg, expected_wo_reg): if MOCK: mock_docker(remember_images=True) tasker = DockerTasker() workflow = DockerBuildWorkflow(MOCK_SOURCE, 'test-image') workflow.builder = MockBuilder() workflow.builder.base_image = ImageName.parse(df_base) assert not tasker.image_exists(BASE_IMAGE) assert not tasker.image_exists(BASE_IMAGE_W_REGISTRY) runner = PreBuildPluginsRunner( tasker, workflow, [{ 'name': PullBaseImagePlugin.key, 'args': {'parent_registry': parent_registry, 'parent_registry_insecure': True} }] ) runner.run() assert tasker.image_exists(BASE_IMAGE) == expected_wo_reg assert tasker.image_exists(BASE_IMAGE_W_REGISTRY) == expected_w_reg try: tasker.remove_image(BASE_IMAGE) tasker.remove_image(BASE_IMAGE_W_REGISTRY) except: pass
def test_push(tmpdir): """ this is an integration test which should be run against real pulp """ client = docker.AutoVersionClient() try: client.inspect_image("busybox:latest") except APIError: client.pull("busybox", tag="latest") image = client.get_image("busybox:latest") image_tar_path = os.path.join(str(tmpdir), "busybox.tar") image_file = open(image_tar_path, "w") image_file.write(image.data) image_file.close() registry_name = os.environ.get("PULP_INSTANCE", None) or "dev" secret_path = os.path.expanduser("~/.pulp/") image_names = [ImageName.parse("test/busybox-test")] workflow = DockerBuildWorkflow(SOURCE, "test/busybox-test") uploader = PulpUploader(workflow, registry_name, image_tar_path, logger, pulp_secret_path=secret_path) uploader.push_tarball_to_pulp(image_names)
def workflow_callback(workflow): workflow = self.prepare(workflow) workflow.prebuild_results[PLUGIN_CHECK_AND_SET_PLATFORMS_KEY] = set(['ppc64le']) release = 'rel1' version = 'ver1' config_blob = {'config': {'Labels': {'release': release, 'version': version}}} (flexmock(atomic_reactor.util) .should_receive('get_config_from_registry') .and_return(config_blob) .times(0)) manifest_list = { 'manifests': [ {'platform': {'architecture': 'ppc64le'}, 'digest': 'sha256:654321'}, ] } manifest_tag = 'registry.example.com' + '/' + BASE_IMAGE_W_SHA base_image_result = ImageName.parse(manifest_tag) manifest_image = base_image_result.copy() (flexmock(atomic_reactor.util) .should_receive('get_manifest_list') .with_args(image=manifest_image, registry=manifest_image.registry, insecure=True) .and_return(flexmock(json=lambda: manifest_list)) .once()) return workflow
def test_parent_images_missing(tmpdir, docker_tasker): """test when parent_images has been mangled and lacks parents compared to dockerfile.""" dfp = df_parser(str(tmpdir)) dfp.content = dedent("""\ FROM first:parent AS builder1 FROM second:parent AS builder2 FROM monty """) workflow = mock_workflow() workflow.builder.set_df_path(dfp.dockerfile_path) workflow.builder.parent_images = {ImageName.parse("monty"): ImageName.parse("build-name:3")} workflow.builder.base_image = ImageName.parse("build-name:3") with pytest.raises(ParentImageMissing): ChangeFromPlugin(docker_tasker, workflow).run()
def test_parent_images_unresolved(tmpdir, docker_tasker): """test when parent_images hasn't been filled in with unique tags.""" dfp = df_parser(str(tmpdir)) dfp.content = "FROM spam" workflow = mock_workflow() workflow.builder.set_df_path(dfp.dockerfile_path) workflow.builder.base_image = ImageName.parse('eggs') # we want to fail because some img besides base was not resolved workflow.builder.parent_images = { ImageName.parse('spam'): ImageName.parse('eggs'), ImageName.parse('extra:image'): None } with pytest.raises(ParentImageUnresolved): ChangeFromPlugin(docker_tasker, workflow).run()
def build_image_privileged_container(self, build_image, json_args_path): """ Build image inside privileged container: this will run another docker instance inside This operation is asynchronous and you should wait for container to finish. :param build_image: str, name of image where build is performed :param json_args_path: str, this dir is mounted inside build container and used as a way to transport data between host and buildroot; there has to be a file inside this dir with name atomic_reactor.BUILD_JSON which is used to feed build :return: dict, keys container_id and stream """ logger.info("building image '%s' inside privileged container", build_image) self._check_build_input(build_image, json_args_path) self._obtain_source_from_path_if_needed(json_args_path, CONTAINER_SHARE_PATH) volume_bindings = {json_args_path: {"bind": CONTAINER_SHARE_PATH}} if self._volume_bind_understands_mode(): volume_bindings[json_args_path]["mode"] = "rw,Z" else: volume_bindings[json_args_path]["rw"] = True logger.debug("build json mounted in container: %s", open(os.path.join(json_args_path, BUILD_JSON)).read()) container_id = self.tasker.run( ImageName.parse(build_image), create_kwargs={"volumes": [json_args_path]}, start_kwargs={"binds": volume_bindings, "privileged": True}, ) return container_id
def create_image(self, df_dir_path, image, use_cache=False): """ create image: get atomic-reactor sdist tarball, build image and tag it :param df_path: :param image: :return: """ logger.debug("creating build image: df_dir_path = '%s', image = '%s'", df_dir_path, image) if not os.path.isdir(df_dir_path): raise RuntimeError("Directory '%s' does not exist." % df_dir_path) tmpdir = tempfile.mkdtemp() df_tmpdir = os.path.join(tmpdir, 'df-%s' % uuid.uuid4()) git_tmpdir = os.path.join(tmpdir, 'git-%s' % uuid.uuid4()) os.mkdir(df_tmpdir) logger.debug("tmp dir with dockerfile '%s' created", df_tmpdir) os.mkdir(git_tmpdir) logger.debug("tmp dir with atomic-reactor '%s' created", git_tmpdir) try: for f in glob(os.path.join(df_dir_path, '*')): shutil.copy(f, df_tmpdir) logger.debug("cp '%s' -> '%s'", f, df_tmpdir) logger.debug("df dir: %s", os.listdir(df_tmpdir)) reactor_tarball = self.get_reactor_tarball_path(tmpdir=git_tmpdir) reactor_tb_path = os.path.join(df_tmpdir, DOCKERFILE_REACTOR_TARBALL_NAME) shutil.copy(reactor_tarball, reactor_tb_path) image_name = ImageName.parse(image) logs_gen = self.tasker.build_image_from_path(df_tmpdir, image_name, use_cache=use_cache) wait_for_command(logs_gen) finally: shutil.rmtree(tmpdir)
def __init__(self, source, image, **kwargs): """ """ LastLogger.__init__(self) BuilderStateMachine.__init__(self) print_version_of_tools() self.tasker = DockerTasker() info, version = self.tasker.get_info(), self.tasker.get_version() logger.debug(json.dumps(info, indent=2)) logger.info(json.dumps(version, indent=2)) # arguments for build self.source = source self.base_image = None self.image_id = None self.built_image_info = None self.image = ImageName.parse(image) # get info about base image from dockerfile build_file_path, build_file_dir = self.source.get_build_file_path() self.df_dir = build_file_dir self._df_path = None # If the build file isn't a Dockerfile, but say, a flatpak.json then a # plugin needs to create the Dockerfile and set the base image if build_file_path.endswith(DOCKERFILE_FILENAME): self.set_df_path(build_file_path)
def run(self): image_names = self.workflow.tag_conf.images[:] # Add in additional image names, if any if self.image_names: self.log.info("extending image names: %s", self.image_names) image_names += [ImageName.parse(x) for x in self.image_names] if self.load_exported_image: if len(self.workflow.exported_image_sequence) == 0: raise RuntimeError('no exported image to push to pulp') export_path = self.workflow.exported_image_sequence[-1].get("path") top_layer, crane_repos = self.push_tar(export_path, image_names) else: # Work out image ID image = self.workflow.image self.log.info("fetching image %s from docker", image) with tempfile.NamedTemporaryFile(prefix='docker-image-', suffix='.tar') as image_file: image_file.write(self.tasker.d.get_image(image).data) # This file will be referenced by its filename, not file # descriptor - must ensure contents are written to disk image_file.flush() top_layer, crane_repos = self.push_tar(image_file.name, image_names) if self.publish: for image_name in crane_repos: self.log.info("image available at %s", str(image_name)) return top_layer, crane_repos
def mock_environment(tmpdir, workflow, primary_images=None, floating_images=None, manifest_results=None, annotations=None): if MOCK: mock_docker() tasker = DockerTasker() base_image_id = '123456parent-id' setattr(workflow, '_base_image_inspect', {'Id': base_image_id}) setattr(workflow, 'builder', StubInsideBuilder()) setattr(workflow.builder, 'image_id', '123456imageid') setattr(workflow.builder, 'base_image', ImageName(repo='Fedora', tag='22')) setattr(workflow.builder, 'source', StubInsideBuilder()) setattr(workflow.builder, 'built_image_info', {'ParentId': base_image_id}) setattr(workflow.builder.source, 'dockerfile_path', None) setattr(workflow.builder.source, 'path', None) if primary_images: for image in primary_images: if '-' in ImageName.parse(image).tag: workflow.tag_conf.add_primary_image(image) workflow.tag_conf.add_unique_image(primary_images[0]) if floating_images: workflow.tag_conf.add_floating_images(floating_images) workflow.build_result = BuildResult(image_id='123456', annotations=annotations or {}) workflow.postbuild_results = {} if manifest_results: workflow.postbuild_results[PLUGIN_GROUP_MANIFESTS_KEY] = manifest_results return tasker, workflow
class X(object): image_id = INPUT_IMAGE source = Y() source.dockerfile_path = None source.path = None base_image = ImageName(repo="qwe", tag="asd") image = ImageName.parse("test-image:unique_tag_123")
def test_repository_selection(self, workflow, organization, archive_registry, repositories, selected, reactor_config_map): archive_repo_template = archive_registry + '/fedora{}' archives = [{ 'id': 1, 'extra': { 'docker': { 'repositories': [ archive_repo_template.format(repo) for repo in repositories ] } } }] enclosed_repo_template = 'spam.com/{}/fedora{}' repo_template = 'spam.com/fedora{}' koji_session(archives=archives) workflow.builder.base_image = ImageName.parse( 'spam.com/fedora:some_tag') self.run_plugin_with_args(workflow, reactor_config_map=reactor_config_map, organization=organization) if organization and reactor_config_map: selected_repo = enclosed_repo_template.format( organization, selected) else: selected_repo = repo_template.format(selected) assert str(workflow.builder.base_image) == selected_repo
def test_repository_from_koji_build(self, workflow, repositories, selected, reactor_config_map): # Populate archives to ensure koji build takes precedence archives = [{ 'id': 1, 'extra': { 'docker': { 'repositories': [ 'spam.com/notselected/fedora{}'.format(repo) for repo in repositories ] } } }] repo_template = 'spam.com/fedora{}' koji_build_info = copy.deepcopy(KOJI_BUILD_INFO) koji_build_info['extra'] = { 'image': { 'index': { 'pull': [repo_template.format(repo) for repo in repositories] } } } koji_session(archives=archives, koji_build_info=koji_build_info) workflow.builder.base_image = ImageName.parse( 'spam.com/fedora:some_tag') self.run_plugin_with_args(workflow, reactor_config_map=reactor_config_map) assert str( workflow.builder.base_image) == repo_template.format(selected)
def run(self): image_names = self.workflow.tag_conf.images[:] # Add in additional image names, if any if self.image_names: self.log.info("extending image names: %s", self.image_names) image_names += [ImageName.parse(x) for x in self.image_names] if self.load_exported_image: if len(self.workflow.exported_image_sequence) == 0: raise RuntimeError('no exported image to push to pulp') crane_repos = self.push_tar( self.workflow.exported_image_sequence[-1].get("path"), image_names) else: # Work out image ID image = self.workflow.image self.log.info("fetching image %s from docker", image) with tempfile.NamedTemporaryFile(prefix='docker-image-', suffix='.tar') as image_file: image_file.write(self.tasker.d.get_image(image).data) # This file will be referenced by its filename, not file # descriptor - must ensure contents are written to disk image_file.flush() crane_repos = self.push_tar(image_file.name, image_names) if self.publish: for image_name in crane_repos: self.log.info("image available at %s", str(image_name)) return crane_repos
def test_parent_images_missing(tmpdir, docker_tasker): """test when parent_images has been mangled and lacks parents compared to dockerfile.""" dfp = df_parser(str(tmpdir)) dfp.content = dedent("""\ FROM first:parent AS builder1 FROM second:parent AS builder2 FROM monty """) workflow = mock_workflow() workflow.builder.set_df_path(dfp.dockerfile_path) workflow.builder.parent_images = {ImageName.parse("monty"): ImageName.parse("build-name:3")} workflow.builder.base_image = ImageName.parse("build-name:3") with pytest.raises(ParentImageMissing): ChangeFromPlugin(docker_tasker, workflow).run()
def mock_environment(tmpdir, primary_images=None, annotations={}): if MOCK: mock_docker() tasker = DockerTasker() workflow = DockerBuildWorkflow(SOURCE, "test-image") base_image_id = '123456parent-id' setattr(workflow, '_base_image_inspect', {'Id': base_image_id}) setattr(workflow, 'builder', X()) setattr(workflow.builder, 'image_id', '123456imageid') setattr(workflow.builder, 'base_image', ImageName(repo='Fedora', tag='22')) setattr(workflow.builder, 'source', X()) setattr(workflow.builder, 'built_image_info', {'ParentId': base_image_id}) setattr(workflow.builder.source, 'dockerfile_path', None) setattr(workflow.builder.source, 'path', None) setattr(workflow, 'tag_conf', TagConf()) if primary_images: for image in primary_images: if '-' in ImageName.parse(image).tag: workflow.tag_conf.add_primary_image(image) workflow.tag_conf.add_unique_image(primary_images[0]) workflow.build_result = BuildResult(image_id='123456', annotations=annotations) return tasker, workflow
def test_retry_pull_base_image(exc, failures, should_succeed): if MOCK: mock_docker(remember_images=True) tasker = DockerTasker() workflow = DockerBuildWorkflow(MOCK_SOURCE, 'test-image') workflow.builder = MockBuilder() workflow.builder.base_image = ImageName.parse('parent-image') class MockResponse(object): content = '' expectation = flexmock(tasker).should_receive('tag_image') for _ in range(failures): expectation = expectation.and_raise(exc('', MockResponse())) expectation.and_return('foo') expectation.and_return('parent-image') runner = PreBuildPluginsRunner( tasker, workflow, [{ 'name': PullBaseImagePlugin.key, 'args': {'parent_registry': 'registry.example.com', 'parent_registry_insecure': True}, }], ) if should_succeed: runner.run() else: with pytest.raises(Exception): runner.run()
def run(self): """ Pull parent images and retag them uniquely for this build. """ build_json = get_build_json() base_image_str = str(self.workflow.builder.original_base_image) current_platform = platform.processor() or 'x86_64' self.manifest_list_cache = {} for nonce, parent in enumerate( sorted(self.workflow.builder.parent_images.keys())): image = ImageName.parse(parent) if parent == base_image_str: image = self._resolve_base_image(build_json) image = self._ensure_image_registry(image) if self.check_platforms: self._validate_platforms_in_image(image) new_arch_image = self._get_image_for_different_arch( image, current_platform) if new_arch_image: image = new_arch_image new_image = self._pull_and_tag_image(image, build_json, str(nonce)) self.workflow.builder.parent_images[parent] = str(new_image) if parent == base_image_str: self.workflow.builder.set_base_image(str(new_image))
def _get_image_for_different_arch(self, image, platform): manifest_list = self._get_manifest_list(image) new_image = None if manifest_list: manifest_list_dict = manifest_list.json() arch_digests = {} build_image_digests = {} image_name = image.to_str(tag=False) for manifest in manifest_list_dict['manifests']: arch = manifest['platform']['architecture'] arch_digests[arch] = image_name + '@' + manifest['digest'] present_platform = None try: arch_to_platform = get_goarch_to_platform_mapping( self.workflow) for arch, digest in arch_digests.items(): present_platform = arch_to_platform[arch] build_image_digests[present_platform] = digest if platform not in build_image_digests: new_image = ImageName.parse( build_image_digests[present_platform]) except KeyError: self.log.info( 'Cannot validate available platforms for base image ' 'because platform descriptors are not defined') return new_image
def run(self): image = self.workflow.builder.image_id if not image: self.log.error("no built image, nothing to remove") return try: self.tasker.remove_image(image, force=True) except APIError as ex: if ex.is_client_error(): self.log.warning("failed to remove built image %s (%s: %s), ignoring", image, ex.response.status_code, ex.response.reason) else: raise if self.remove_base_image and self.workflow.pulled_base_images: # FIXME: we may need to add force here, let's try it like this for now # FIXME: when ID of pulled img matches an ID of an image already present, don't remove for base_image_tag in self.workflow.pulled_base_images: try: self.tasker.remove_image(ImageName.parse(base_image_tag)) except APIError as ex: if ex.is_client_error(): self.log.warning("failed to remove base image %s (%s: %s), ignoring", base_image_tag, ex.response.status_code, ex.response.reason) else: raise
def test_create_image(tmpdir, insecure_registry, namespace, organization, monkeypatch, reactor_config_map): """ Test that an ImageStream is created if not found """ runner = prepare(tmpdir, insecure_registry=insecure_registry, namespace=namespace, organization=organization, reactor_config_map=reactor_config_map) kwargs = {} build_json = {"metadata": {}} if namespace is not None: build_json['metadata']['namespace'] = namespace monkeypatch.setenv("BUILD", json.dumps(build_json)) (flexmock(OSBS).should_receive('get_image_stream').once().with_args( TEST_IMAGESTREAM).and_raise(OsbsResponseException('none', 404))) if insecure_registry is not None: kwargs['insecure_registry'] = insecure_registry enclose_repo = ImageName.parse(TEST_REPO_WITH_REGISTRY) if reactor_config_map and organization: enclose_repo.enclose(organization) (flexmock(OSBS).should_receive('create_image_stream').once().with_args( TEST_IMAGESTREAM, enclose_repo.to_str(registry=True, tag=False), **kwargs).and_return(ImageStreamResponse())) (flexmock(OSBS).should_receive('import_image_tags').once().and_return(True) ) runner.run()
def test_parent_images_unresolved(tmpdir, docker_tasker): """test when parent_images hasn't been filled in with unique tags.""" dfp = df_parser(str(tmpdir)) dfp.content = "FROM spam" workflow = mock_workflow() workflow.builder.set_df_path(dfp.dockerfile_path) workflow.builder.base_image = ImageName.parse('eggs') # we want to fail because some img besides base was not resolved workflow.builder.parent_images = { ImageName.parse('spam'): ImageName.parse('eggs'), ImageName.parse('extra:image'): None } with pytest.raises(ParentImageUnresolved): ChangeFromPlugin(docker_tasker, workflow).run()
def tag_image(self, image, target_image, force=False): """ tag provided image with specified image_name, registry and tag :param image: str or ImageName, image to tag :param target_image: ImageName, new name for the image :param force: bool, force tag the image? :return: str, image (reg.om/img:v1) """ logger.info("tagging image '%s' as '%s'", image, target_image) logger.debug("image = '%s', target_image_name = '%s'", image, target_image) if not isinstance(image, ImageName): image = ImageName.parse(image) if image != target_image: response = self.d.tag(image.to_str(), target_image.to_str(tag=False), tag=target_image.tag, force=force) # returns True/False if not response: logger.error("failed to tag image") raise RuntimeError( "Failed to tag image '%s': target_image = '%s'" % image.to_str(), target_image) else: logger.debug('image already tagged correctly, nothing to do') return target_image.to_str( ) # this will be the proper name, not just repo/img
def run(self): dockerfile = DockerfileParser(self.workflow.builder.df_path) image_name = ImageName.parse(dockerfile.baseimage) if image_name.namespace != 'koji' or image_name.repo != 'image-build' : self.log.info('Base image not supported: %s', dockerfile.baseimage) return image_build_conf = image_name.tag or 'image-build.conf' self.session = create_koji_session(self.koji_hub, self.koji_auth_info) task_id, filesystem_regex = self.build_filesystem(image_build_conf) task = TaskWatcher(self.session, task_id, self.poll_interval) task.wait() if task.failed(): raise RuntimeError('Create filesystem task failed: {}' .format(task_id)) filesystem = self.download_filesystem(task_id, filesystem_regex) base_image = self.import_base_image(filesystem) dockerfile.baseimage = base_image return base_image
def create_image(self, df_dir_path, image, use_cache=False): """ create image: get atomic-reactor sdist tarball, build image and tag it :param df_path: :param image: :return: """ logger.debug("creating build image: df_dir_path = '%s', image = '%s'", df_dir_path, image) if not os.path.isdir(df_dir_path): raise RuntimeError("Directory '%s' does not exist.", df_dir_path) tmpdir = tempfile.mkdtemp() df_tmpdir = os.path.join(tmpdir, 'df-%s' % uuid.uuid4()) git_tmpdir = os.path.join(tmpdir, 'git-%s' % uuid.uuid4()) os.mkdir(df_tmpdir) logger.debug("tmp dir with dockerfile '%s' created", df_tmpdir) os.mkdir(git_tmpdir) logger.debug("tmp dir with atomic-reactor '%s' created", git_tmpdir) try: for f in glob(os.path.join(df_dir_path, '*')): shutil.copy(f, df_tmpdir) logger.debug("cp '%s' -> '%s'", f, df_tmpdir) logger.debug("df dir: %s", os.listdir(df_tmpdir)) reactor_tarball = self.get_reactor_tarball_path(tmpdir=git_tmpdir) reactor_tb_path = os.path.join(df_tmpdir, DOCKERFILE_REACTOR_TARBALL_NAME) shutil.copy(reactor_tarball, reactor_tb_path) image_name = ImageName.parse(image) logs_gen = self.tasker.build_image_from_path(df_tmpdir, image_name, stream=True, use_cache=use_cache) wait_for_command(logs_gen) finally: shutil.rmtree(tmpdir)
def tag_image(self, image, target_image, force=False): """ tag provided image with specified image_name, registry and tag :param image: str or ImageName, image to tag :param target_image: ImageName, new name for the image :param force: bool, force tag the image? :return: str, image (reg.om/img:v1) """ logger.info("tagging image '%s' as '%s'", image, target_image) logger.debug("image = '%s', target_image_name = '%s'", image, target_image) if not isinstance(image, ImageName): image = ImageName.parse(image) if image != target_image: response = self.d.tag( image.to_str(), target_image.to_str(tag=False), tag=target_image.tag, force=force) # returns True/False if not response: logger.error("failed to tag image") raise RuntimeError("Failed to tag image '%s': target_image = '%s'" % image.to_str(), target_image) else: logger.debug('image already tagged correctly, nothing to do') return target_image.to_str() # this will be the proper name, not just repo/img
def __init__(self, source, image, **kwargs): """ """ LastLogger.__init__(self) BuilderStateMachine.__init__(self) print_version_of_tools() self.tasker = DockerTasker() info, version = self.tasker.get_info(), self.tasker.get_version() logger.debug(json.dumps(info, indent=2)) logger.info(json.dumps(version, indent=2)) # arguments for build self.source = source self.base_image = None self.image_id = None self.built_image_info = None self.image = ImageName.parse(image) # get info about base image from dockerfile build_file_path, build_file_dir = self.source.get_build_file_path() self.df_dir = build_file_dir self._df_path = None # If the Dockerfile will be entirely generated from the container.yaml # (in the Flatpak case, say), then a plugin needs to create the Dockerfile # and set the base image if build_file_path.endswith(DOCKERFILE_FILENAME): self.set_df_path(build_file_path)
def test_pull_base_image_plugin(df_base, parent_registry, expected_w_reg, expected_wo_reg): if MOCK: mock_docker(remember_images=True) tasker = DockerTasker() workflow = DockerBuildWorkflow(MOCK_SOURCE, 'test-image') workflow.builder = MockBuilder() workflow.builder.base_image = ImageName.parse(df_base) assert not tasker.image_exists(BASE_IMAGE) assert not tasker.image_exists(BASE_IMAGE_W_REGISTRY) runner = PreBuildPluginsRunner(tasker, workflow, [{ 'name': PullBaseImagePlugin.key, 'args': { 'parent_registry': parent_registry, 'parent_registry_insecure': True } }]) runner.run() assert tasker.image_exists(BASE_IMAGE) == expected_wo_reg assert tasker.image_exists(BASE_IMAGE_W_REGISTRY) == expected_w_reg try: tasker.remove_image(BASE_IMAGE) tasker.remove_image(BASE_IMAGE_W_REGISTRY) except: pass
def __init__(self): self.image_id = "xxx" self.base_image = ImageName.parse("koji/image-build") self.parent_images = {self.base_image: None} self.parents_ordered = "koji/image-build" self.custom_base_image = True self.custom_parent_image = True self.set_base_image = flexmock()
def test_get_image_info_by_name_tag_in_name_library(): if MOCK: mock_docker() t = DockerTasker() image_name = ImageName.parse("library/busybox") response = t.get_image_info_by_image_name(image_name) assert len(response) == 1
def mock_workflow(): """ Provide just enough structure that workflow can be used to run the plugin. Defaults below are solely to enable that purpose; tests where those values matter should provide their own. """ workflow = DockerBuildWorkflow(SOURCE, "mock:default_built") workflow.source = StubSource() builder = StubInsideBuilder().for_workflow(workflow) builder.set_df_path('/mock-path') base_image_name = ImageName.parse("mock:tag") builder.parent_images[ImageName.parse("mock:base")] = base_image_name builder.base_image = base_image_name builder.tasker = flexmock() workflow.builder = flexmock(builder) return workflow
def test_update_base_image_inspect_broken(tmpdir, caplog, docker_tasker): """exercise code branch where the base image inspect comes back without an Id""" df_content = "FROM base:image" dfp = df_parser(str(tmpdir)) dfp.content = df_content image_str = "base@sha256:1234" image_name = ImageName.parse(image_str) workflow = mock_workflow() workflow.builder.set_df_path(dfp.dockerfile_path) workflow.builder.parent_images = {ImageName.parse("base:image"): image_name} workflow.builder.base_image = image_name workflow.builder.set_parent_inspection_data(image_str, dict(no_id="here")) with pytest.raises(NoIdInspection): ChangeFromPlugin(docker_tasker, workflow).run() assert dfp.content == df_content # nothing changed assert "missing in inspection" in caplog.text
def set_base_image(self, base_image, parents_pulled=True, insecure=False, dockercfg_path=None): self.base_from_scratch = base_image_is_scratch(base_image) if not self.custom_base_image: self.custom_base_image = base_image_is_custom(base_image) self.base_image = ImageName.parse(base_image) self.original_base_image = self.original_base_image or self.base_image self.recreate_parent_images() if not self.base_from_scratch: self.parent_images[self.original_base_image] = self.base_image
def test_ensure_primary(tmpdir, monkeypatch, osbs_error, tag_conf, annotations, tag_prefix, reactor_config_map): """ Test that primary image tags are ensured """ runner = prepare(tmpdir, primary_images_annotations=annotations, primary_images_tag_conf=tag_conf, reactor_config_map=reactor_config_map) monkeypatch.setenv("BUILD", json.dumps({ "metadata": {} })) tags = [] floating_images = runner.workflow.tag_conf.floating_images if not floating_images: floating_images = [ ImageName.parse(floating) for floating in runner.workflow.build_result.annotations['repositories']['floating']] for floating_image in floating_images: tag = floating_image.tag tags.append(tag) (flexmock(OSBS) .should_receive('get_image_stream') .once() .with_args(TEST_IMAGESTREAM) .and_return(ImageStreamResponse())) # By using a combination of ordered and once, we verify that # ensure_image_stream_tag is not called with version-release tag for x in range(DEFAULT_TAGS_AMOUNT): expectation = ( flexmock(OSBS) .should_receive('ensure_image_stream_tag') .with_args(dict, tag_prefix + str(x)) .once() .ordered() ) if osbs_error: expectation.and_raise(OsbsResponseException('None', 500)) (flexmock(OSBS) .should_receive('import_image_tags') .once() .and_raise(AttributeError)) (flexmock(OSBS) .should_receive('import_image') .with_args(TEST_IMAGESTREAM, tags=tags) .times(0 if osbs_error else 1) .and_return(True)) if osbs_error: with pytest.raises(PluginFailedException): runner.run() else: runner.run()
def add_unique_image(self, image): """ add image with unpredictable name used by tag_by_labels plugin :param image: str, name of image (e.g. "namespace/httpd:2.4") :return: None """ self._unique_images.append(ImageName.parse(image))