def test_distgit_fetch_artefacts_plugin(tmpdir, docker_tasker): # noqa command = 'fedpkg sources' expected_command = ['fedpkg', 'sources'] workflow = DockerBuildWorkflow(MOCK_SOURCE, 'test-image') workflow.builder = X() workflow.source = flexmock(path=str(tmpdir)) initial_dir = os.getcwd() assert initial_dir != str(tmpdir) def assert_tmpdir(*args, **kwargs): assert os.getcwd() == str(tmpdir) (flexmock(pre_pyrpkg_fetch_artefacts.subprocess) .should_receive('check_call') .with_args(expected_command) .replace_with(assert_tmpdir) .once()) runner = PreBuildPluginsRunner( docker_tasker, workflow, [{ 'name': DistgitFetchArtefactsPlugin.key, 'args': {'command': command} }] ) runner.run() assert os.getcwd() == initial_dir
def test_assertlabels_plugin(tmpdir, df_content, req_labels, expected): df = DockerfileParser(str(tmpdir)) df.content = df_content tasker = DockerTasker() workflow = DockerBuildWorkflow(MOCK_SOURCE, 'test-image') workflow.builder = X workflow.builder.df_path = df.dockerfile_path workflow.builder.df_dir = str(tmpdir) runner = PreBuildPluginsRunner( tasker, workflow, [{ 'name': AssertLabelsPlugin.key, 'args': {'required_labels': req_labels} }] ) assert AssertLabelsPlugin.key is not None if isinstance(expected, PluginFailedException): with pytest.raises(PluginFailedException): runner.run() else: runner.run()
def test_distgit_fetch_artefacts_failure(tmpdir, docker_tasker): # noqa command = 'fedpkg sources' expected_command = ['fedpkg', 'sources'] workflow = DockerBuildWorkflow(MOCK_SOURCE, 'test-image') workflow.builder = X() workflow.source = flexmock(path=str(tmpdir)) initial_dir = os.getcwd() assert initial_dir != str(tmpdir) (flexmock(pre_pyrpkg_fetch_artefacts.subprocess) .should_receive('check_call') .with_args(expected_command) .and_raise(RuntimeError) .once()) runner = PreBuildPluginsRunner( docker_tasker, workflow, [{ 'name': DistgitFetchArtefactsPlugin.key, 'args': {'command': command} }] ) with pytest.raises(PluginFailedException): runner.run() assert os.getcwd() == initial_dir
def prepare(pulp_registries=None, docker_registries=None, before_dockerfile=False): if pulp_registries is None: pulp_registries = ( ("test", LOCALHOST_REGISTRY), ) if docker_registries is None: docker_registries = (DOCKER0_REGISTRY,) def set_annotations_on_build(build_id, annotations): pass def update_labels_on_build(build_id, labels): pass new_environ = deepcopy(os.environ) new_environ["BUILD"] = dedent('''\ { "metadata": { "name": "asd", "namespace": "namespace" } } ''') flexmock(OSBS, set_annotations_on_build=set_annotations_on_build) flexmock(OSBS, update_labels_on_build=update_labels_on_build) (flexmock(osbs.conf) .should_call("Configuration") .with_args(namespace="namespace", conf_file=None, verify_ssl=True, openshift_url="http://example.com/", openshift_uri="http://example.com/", use_auth=True)) flexmock(os) os.should_receive("environ").and_return(new_environ) workflow = DockerBuildWorkflow({"provider": "git", "uri": "asd"}, "test-image") for name, crane_uri in pulp_registries: workflow.push_conf.add_pulp_registry(name, crane_uri) workflow.tag_conf.add_primary_image(TEST_IMAGE) workflow.tag_conf.add_unique_image("namespace/image:asd123") for docker_registry in docker_registries: r = workflow.push_conf.add_docker_registry(docker_registry) r.digests[TEST_IMAGE] = ManifestDigest(v1=DIGEST_NOT_USED, v2=DIGEST1) r.digests["namespace/image:asd123"] = ManifestDigest(v1=DIGEST_NOT_USED, v2=DIGEST2) if before_dockerfile: setattr(workflow, 'builder', XBeforeDockerfile()) else: setattr(workflow, 'builder', X) setattr(workflow, '_base_image_inspect', {'Id': '01234567'}) workflow.build_logs = [ "a", "b", ] workflow.source.lg = LazyGit(None, commit="commit") flexmock(workflow.source.lg) workflow.source.lg.should_receive("_commit_id").and_return("commit") return workflow
def test_retry_pull_base_image(exc, failures, should_succeed): if MOCK: mock_docker(remember_images=True) tasker = DockerTasker() workflow = DockerBuildWorkflow(MOCK_SOURCE, 'test-image') workflow.builder = MockBuilder() workflow.builder.base_image = ImageName.parse('parent-image') class MockResponse(object): content = '' expectation = flexmock(tasker).should_receive('tag_image') for _ in range(failures): expectation = expectation.and_raise(exc('', MockResponse())) expectation.and_return('foo') expectation.and_return('parent-image') runner = PreBuildPluginsRunner( tasker, workflow, [{ 'name': PullBaseImagePlugin.key, 'args': {'parent_registry': 'registry.example.com', 'parent_registry_insecure': True}, }], ) if should_succeed: runner.run() else: with pytest.raises(Exception): runner.run()
def test_returndockerfile_plugin(tmpdir): df_content = """ FROM fedora RUN yum install -y python-django CMD blabla""" df = DockerfileParser(str(tmpdir)) df.content = df_content tasker = DockerTasker() workflow = DockerBuildWorkflow(MOCK_SOURCE, 'test-image') workflow.builder = X workflow.builder.df_path = df.dockerfile_path workflow.builder.df_dir = str(tmpdir) runner = PreBuildPluginsRunner( tasker, workflow, [{ 'name': CpDockerfilePlugin.key }] ) runner.run() assert CpDockerfilePlugin.key is not None assert workflow.prebuild_results.get(CpDockerfilePlugin.key, "") == df_content
def prepare(self, df_path, inherited_user='', hide_files=None, parent_images=None): if MOCK: mock_docker() tasker = DockerTasker() workflow = DockerBuildWorkflow(SOURCE, "test-image") workflow.source = MockSource(df_path) workflow.builder = (StubInsideBuilder() .for_workflow(workflow) .set_df_path(df_path)) for parent in parent_images or []: workflow.builder.set_parent_inspection_data(parent, { INSPECT_CONFIG: { 'User': inherited_user, }, }) if hide_files is not None: reactor_config = ReactorConfig({ 'version': 1, 'hide_files': hide_files }) workflow.plugin_workspace[ReactorConfigPlugin.key] = { WORKSPACE_CONF_KEY: reactor_config } return tasker, workflow
def test_workflow(): """ Test normal workflow. """ this_file = inspect.getfile(PreWatched) mock_docker() fake_builder = MockInsideBuilder() flexmock(InsideBuilder).new_instances(fake_builder) watch_pre = Watcher() watch_prepub = Watcher() watch_post = Watcher() watch_exit = Watcher() workflow = DockerBuildWorkflow( MOCK_SOURCE, "test-image", prebuild_plugins=[{"name": "pre_watched", "args": {"watcher": watch_pre}}], prepublish_plugins=[{"name": "prepub_watched", "args": {"watcher": watch_prepub}}], postbuild_plugins=[{"name": "post_watched", "args": {"watcher": watch_post}}], exit_plugins=[{"name": "exit_watched", "args": {"watcher": watch_exit}}], plugin_files=[this_file], ) workflow.build_docker_image() assert watch_pre.was_called() assert watch_prepub.was_called() assert watch_post.was_called() assert watch_exit.was_called()
def test_autorebuild_stop_prevents_build(): """ test that a plugin that raises AutoRebuildCanceledException results in actually skipped build """ this_file = inspect.getfile(PreWatched) mock_docker() fake_builder = MockInsideBuilder() flexmock(InsideBuilder).new_instances(fake_builder) watch_prepub = Watcher() watch_post = Watcher() watch_exit = Watcher() workflow = DockerBuildWorkflow( MOCK_SOURCE, "test-image", prebuild_plugins=[{"name": "stopstopstop", "args": {}}], prepublish_plugins=[{"name": "prepub_watched", "args": {"watcher": watch_prepub}}], postbuild_plugins=[{"name": "post_watched", "args": {"watcher": watch_post}}], exit_plugins=[{"name": "exit_watched", "args": {"watcher": watch_exit}}], plugin_files=[this_file], ) with pytest.raises(AutoRebuildCanceledException): workflow.build_docker_image() assert not watch_prepub.was_called() assert not watch_post.was_called() assert watch_exit.was_called() assert workflow.autorebuild_canceled == True
def test_add_labels_plugin(tmpdir, labels_conf_base, labels_conf, dont_overwrite, expected_output): df = DockerfileParser(str(tmpdir)) df.content = DF_CONTENT tasker = DockerTasker() workflow = DockerBuildWorkflow(MOCK_SOURCE, 'test-image') setattr(workflow, 'builder', X) workflow.base_image_inspect = labels_conf_base setattr(workflow.builder, 'df_path', df.dockerfile_path) runner = PreBuildPluginsRunner( tasker, workflow, [{ 'name': AddLabelsPlugin.key, 'args': {'labels': labels_conf, "dont_overwrite": dont_overwrite} }] ) if isinstance(expected_output, RuntimeError): with pytest.raises(RuntimeError): runner.run() else: runner.run() assert AddLabelsPlugin.key is not None assert df.content in expected_output
def prepare(): def set_annotations_on_build(build_id, labels, namespace='default'): assert namespace == 'namespace' new_environ = deepcopy(os.environ) new_environ["BUILD"] = ''' { "metadata": { "name": "asd", "namespace": "namespace" } } ''' flexmock(OSBS, set_annotations_on_build=set_annotations_on_build) flexmock(os) os.should_receive("environ").and_return(new_environ) workflow = DockerBuildWorkflow({"provider": "git", "uri": "asd"}, "test-image") workflow.push_conf.add_pulp_registry("test", LOCALHOST_REGISTRY) workflow.tag_conf.add_primary_image(TEST_IMAGE) workflow.tag_conf.add_unique_image("namespace/image:asd123") r = workflow.push_conf.add_docker_registry(DOCKER0_REGISTRY) r.digests[TEST_IMAGE] = DIGEST1 r.digests["namespace/image:asd123"] = DIGEST2 setattr(workflow, 'builder', X) setattr(workflow, '_base_image_inspect', {'Id': '01234567'}) workflow.build_logs = ["a", "b"] workflow.source.lg = LazyGit(None, commit="commit") flexmock(workflow.source.lg) workflow.source.lg.should_receive("_commit_id").and_return("commit") return workflow
def test_compress(self, tmpdir, method, load_exported_image, extension): if MOCK: mock_docker() tasker = DockerTasker() workflow = DockerBuildWorkflow({'provider': 'git', 'uri': 'asd'}, 'test-image') workflow.builder = X() exp_img = os.path.join(str(tmpdir), 'img.tar') if load_exported_image: tarfile.open(exp_img, mode='w').close() workflow.exported_image_sequence.append({'path': exp_img}) runner = PostBuildPluginsRunner( tasker, workflow, [{ 'name': CompressPlugin.key, 'args': { 'method': method, 'load_exported_image': load_exported_image, }, }] ) runner.run() compressed_img = os.path.join( workflow.source.tmpdir, EXPORTED_COMPRESSED_IMAGE_NAME_TEMPLATE.format(extension)) assert os.path.exists(compressed_img) assert workflow.exported_image_sequence[-1]['path'] == compressed_img
def test_build(is_failed, image_id): """ tests docker build api plugin working """ flexmock(DockerfileParser, content='df_content') mock_docker() fake_builder = MockInsideBuilder(image_id=image_id) flexmock(InsideBuilder).new_instances(fake_builder) workflow = DockerBuildWorkflow(MOCK_SOURCE, 'test-image') flexmock(CommandResult).should_receive('is_failed').and_return(is_failed) error = "error message" error_detail = "{u'message': u\"%s\"}" % error if is_failed: flexmock(CommandResult, error=error, error_detail=error_detail) with pytest.raises(PluginFailedException): workflow.build_docker_image() else: workflow.build_docker_image() assert isinstance(workflow.buildstep_result['docker_api'], BuildResult) assert workflow.build_result == workflow.buildstep_result['docker_api'] assert workflow.build_result.is_failed() == is_failed if is_failed: assert workflow.build_result.fail_reason == error assert '\\' not in workflow.plugins_errors['docker_api'] assert error in workflow.plugins_errors['docker_api'] else: assert workflow.build_result.image_id.startswith('sha256:') assert workflow.build_result.image_id.count(':') == 1
def test_syntax_error(): """ tests reporting of syntax errors """ flexmock(DockerfileParser, content='df_content') mock_docker() fake_builder = MockInsideBuilder() def raise_exc(*args, **kwargs): explanation = ("Syntax error - can't find = in \"CMD\". " "Must be of the form: name=value") http_error = requests.HTTPError('500 Server Error') raise docker.errors.APIError(message='foo', response=http_error, explanation=explanation) yield {} fake_builder.tasker.build_image_from_path = raise_exc flexmock(InsideBuilder).new_instances(fake_builder) workflow = DockerBuildWorkflow(MOCK_SOURCE, 'test-image') with pytest.raises(PluginFailedException): workflow.build_docker_image() assert isinstance(workflow.buildstep_result['docker_api'], BuildResult) assert workflow.build_result == workflow.buildstep_result['docker_api'] assert workflow.build_result.is_failed() assert "Syntax error" in workflow.build_result.fail_reason
def test_plugin_errors(plugins, should_fail, should_log): """ Try bad plugin configuration. """ this_file = inspect.getfile(PreRaises) mock_docker() fake_builder = MockInsideBuilder() flexmock(InsideBuilder).new_instances(fake_builder) fake_logger = FakeLogger() atomic_reactor.plugin.logger = fake_logger workflow = DockerBuildWorkflow(MOCK_SOURCE, 'test-image', plugin_files=[this_file], **plugins) # Find the 'watcher' parameter watchers = [conf.get('args', {}).get('watcher') for plugin in plugins.values() for conf in plugin] watcher = [x for x in watchers if x][0] if should_fail: with pytest.raises(PluginFailedException): workflow.build_docker_image() assert not watcher.was_called() else: workflow.build_docker_image() assert watcher.was_called() if should_log: assert len(fake_logger.errors) > 0 else: assert len(fake_logger.errors) == 0
def mock_environment(tmpdir, primary_images=None, annotations={}): if MOCK: mock_docker() tasker = DockerTasker() workflow = DockerBuildWorkflow(SOURCE, "test-image") base_image_id = '123456parent-id' setattr(workflow, '_base_image_inspect', {'Id': base_image_id}) setattr(workflow, 'builder', X()) setattr(workflow.builder, 'image_id', '123456imageid') setattr(workflow.builder, 'base_image', ImageName(repo='Fedora', tag='22')) setattr(workflow.builder, 'source', X()) setattr(workflow.builder, 'built_image_info', {'ParentId': base_image_id}) setattr(workflow.builder.source, 'dockerfile_path', None) setattr(workflow.builder.source, 'path', None) setattr(workflow, 'tag_conf', TagConf()) if primary_images: for image in primary_images: if '-' in ImageName.parse(image).tag: workflow.tag_conf.add_primary_image(image) workflow.tag_conf.add_unique_image(primary_images[0]) workflow.build_result = BuildResult(image_id='123456', annotations=annotations) return tasker, workflow
def mock_environment(tmpdir, session=None, build_process_failed=False, koji_build_id=None, use_import=False): if session is None: session = MockedClientSession('') if MOCK: mock_docker() tasker = DockerTasker() workflow = DockerBuildWorkflow(SOURCE, 'test-image') setattr(workflow, 'builder', X()) setattr(workflow.builder, 'image_id', '123456imageid') setattr(workflow.builder, 'base_image', ImageName(repo='Fedora', tag='22')) setattr(workflow.builder, 'source', X()) setattr(workflow.builder.source, 'dockerfile_path', None) setattr(workflow.builder.source, 'path', None) flexmock(koji, ClientSession=lambda hub, opts: session) if build_process_failed: workflow.build_result = BuildResult(fail_reason="not built") else: workflow.build_result = BuildResult(image_id="id1234") if koji_build_id: if use_import: workflow.exit_results[KojiImportPlugin.key] = koji_build_id else: workflow.exit_results[KojiPromotePlugin.key] = koji_build_id (flexmock(time) .should_receive('sleep') .and_return(None)) return tasker, workflow
def test_layer_sizes(): flexmock(DockerfileParser, content='df_content') this_file = inspect.getfile(PreRaises) mock_docker() fake_builder = MockInsideBuilder() flexmock(InsideBuilder).new_instances(fake_builder) watch_exit = Watcher() watch_buildstep = Watcher() workflow = DockerBuildWorkflow(SOURCE, 'test-image', exit_plugins=[{'name': 'uses_source', 'args': { 'watcher': watch_exit, }}], buildstep_plugins=[{'name': 'buildstep_watched', 'args': { 'watcher': watch_buildstep, }}], plugin_files=[this_file]) workflow.build_docker_image() expected = [ {'diff_id': u'sha256:diff_id1-oldest', 'size': 4}, {'diff_id': u'sha256:diff_id2', 'size': 3}, {'diff_id': u'sha256:diff_id3', 'size': 2}, {'diff_id': u'sha256:diff_id4-newest', 'size': 1} ] assert workflow.layer_sizes == expected
def test_workflow_docker_build_error(): """ This is a test for what happens when the docker build fails. """ this_file = inspect.getfile(PreRaises) mock_docker() fake_builder = MockInsideBuilder(failed=True) flexmock(InsideBuilder).new_instances(fake_builder) watch_prepub = Watcher() watch_post = Watcher() watch_exit = Watcher() workflow = DockerBuildWorkflow(MOCK_SOURCE, 'test-image', prepublish_plugins=[{'name': 'prepub_watched', 'args': { 'watcher': watch_prepub, }}], postbuild_plugins=[{'name': 'post_watched', 'args': { 'watcher': watch_post }}], exit_plugins=[{'name': 'exit_watched', 'args': { 'watcher': watch_exit }}], plugin_files=[this_file]) assert workflow.build_docker_image().is_failed() # No subsequent build phases should have run except 'exit' assert not watch_prepub.was_called() assert not watch_post.was_called() assert watch_exit.was_called()
def test_workflow_plugin_results(): """ Verifies the results of plugins in different phases are stored properly. """ this_file = inspect.getfile(PreRaises) mock_docker() fake_builder = MockInsideBuilder() flexmock(InsideBuilder).new_instances(fake_builder) prebuild_plugins = [{'name': 'pre_build_value'}] postbuild_plugins = [{'name': 'post_build_value'}] prepublish_plugins = [{'name': 'pre_publish_value'}] exit_plugins = [{'name': 'exit_value'}] workflow = DockerBuildWorkflow(MOCK_SOURCE, 'test-image', prebuild_plugins=prebuild_plugins, prepublish_plugins=prepublish_plugins, postbuild_plugins=postbuild_plugins, exit_plugins=exit_plugins, plugin_files=[this_file]) workflow.build_docker_image() assert workflow.prebuild_results == {'pre_build_value': 'pre_build_value_result'} assert workflow.postbuild_results == {'post_build_value': 'post_build_value_result'} assert workflow.prepub_results == {'pre_publish_value': 'pre_publish_value_result'} assert workflow.exit_results == {'exit_value': 'exit_value_result'}
def test_adddockerfile_todest(tmpdir, docker_tasker): df_content = """ FROM fedora RUN yum install -y python-django CMD blabla""" df = df_parser(str(tmpdir)) df.content = df_content workflow = DockerBuildWorkflow(MOCK_SOURCE, 'test-image') workflow.builder = X workflow.builder.df_path = df.dockerfile_path workflow.builder.df_dir = str(tmpdir) runner = PreBuildPluginsRunner( docker_tasker, workflow, [{ 'name': AddDockerfilePlugin.key, 'args': {'nvr': 'jboss-eap-6-docker-6.4-77', 'destdir': '/usr/share/doc/'} }] ) runner.run() assert AddDockerfilePlugin.key is not None expected_output = """ FROM fedora RUN yum install -y python-django ADD Dockerfile-jboss-eap-6-docker-6.4-77 /usr/share/doc/Dockerfile-jboss-eap-6-docker-6.4-77 CMD blabla""" assert df.content == expected_output
def test_workflow_compat(request): """ Some of our plugins have changed from being run post-build to being run at exit. Let's test what happens when we try running an exit plugin as a post-build plugin. """ this_file = inspect.getfile(PreWatched) mock_docker() fake_builder = MockInsideBuilder() flexmock(InsideBuilder).new_instances(fake_builder) watch_exit = Watcher() fake_logger = FakeLogger() existing_logger = atomic_reactor.plugin.logger def restore_logger(): atomic_reactor.plugin.logger = existing_logger request.addfinalizer(restore_logger) atomic_reactor.plugin.logger = fake_logger workflow = DockerBuildWorkflow(MOCK_SOURCE, 'test-image', postbuild_plugins=[{'name': 'store_logs_to_file', 'args': { 'watcher': watch_exit }}], plugin_files=[this_file]) workflow.build_docker_image() assert watch_exit.was_called() assert len(fake_logger.errors) > 0
def test_adddockerfile_nvr_from_labels2(tmpdir, docker_tasker): df_content = """ FROM fedora RUN yum install -y python-django CMD blabla""" df = df_parser(str(tmpdir)) df.content = df_content if MOCK: mock_docker() workflow = DockerBuildWorkflow(MOCK_SOURCE, 'test-image') flexmock(workflow, base_image_inspect={INSPECT_CONFIG: {"Labels": {}}}) workflow.builder = X workflow.builder.df_path = df.dockerfile_path workflow.builder.df_dir = str(tmpdir) runner = PreBuildPluginsRunner( docker_tasker, workflow, [{ 'name': AddLabelsPlugin.key, 'args': {'labels': {'Name': 'jboss-eap-6-docker', 'Version': '6.4', 'Release': '77'}, 'auto_labels': []} }, { 'name': AddDockerfilePlugin.key }] ) runner.run() assert AddDockerfilePlugin.key is not None assert "ADD Dockerfile-jboss-eap-6-docker-6.4-77 /root/buildinfo/Dockerfile-jboss-eap-6-docker-6.4-77" in df.content
def test_adddockerfile_plugin(tmpdir, docker_tasker): df_content = """ FROM fedora RUN yum install -y python-django CMD blabla""" df = df_parser(str(tmpdir)) df.content = df_content workflow = DockerBuildWorkflow(MOCK_SOURCE, 'test-image') workflow.builder = X workflow.builder.df_path = df.dockerfile_path workflow.builder.df_dir = str(tmpdir) runner = PreBuildPluginsRunner( docker_tasker, workflow, [{ 'name': AddDockerfilePlugin.key, 'args': {'nvr': 'rhel-server-docker-7.1-20'} }] ) runner.run() assert AddDockerfilePlugin.key is not None expected_output = """ FROM fedora RUN yum install -y python-django ADD Dockerfile-rhel-server-docker-7.1-20 /root/buildinfo/Dockerfile-rhel-server-docker-7.1-20 CMD blabla""" assert df.content == expected_output
def test_cp_built_image_to_nfs(tmpdir, docker_tasker, dest_dir): mountpoint = tmpdir.join("mountpoint") def fake_check_call(cmd): assert cmd == [ "mount", "-t", "nfs", "-o", "nolock", NFS_SERVER_PATH, mountpoint, ] flexmock(subprocess, check_call=fake_check_call) workflow = DockerBuildWorkflow({"provider": "git", "uri": "asd"}, "test-image") workflow.builder = X() workflow.exported_image_sequence.append({"path": os.path.join(str(tmpdir), EXPORTED_SQUASHED_IMAGE_NAME)}) open(workflow.exported_image_sequence[-1].get("path"), 'a').close() runner = PostBuildPluginsRunner( docker_tasker, workflow, [{ 'name': CopyBuiltImageToNFSPlugin.key, 'args': { "nfs_server_path": NFS_SERVER_PATH, "dest_dir": dest_dir, "mountpoint": str(mountpoint), } }] ) runner.run() if dest_dir is None: assert os.path.isfile(os.path.join(str(mountpoint), EXPORTED_SQUASHED_IMAGE_NAME)) else: assert os.path.isfile(os.path.join(str(mountpoint), dest_dir, EXPORTED_SQUASHED_IMAGE_NAME))
def test_adddockerfile_nvr_from_labels(tmpdir, docker_tasker): df_content = """ FROM fedora RUN yum install -y python-django LABEL Name="jboss-eap-6-docker" "Version"="6.4" "Release"=77 CMD blabla""" df = df_parser(str(tmpdir)) df.content = df_content workflow = DockerBuildWorkflow(MOCK_SOURCE, 'test-image') workflow.builder = X workflow.builder.df_path = df.dockerfile_path workflow.builder.df_dir = str(tmpdir) runner = PreBuildPluginsRunner( docker_tasker, workflow, [{ 'name': AddDockerfilePlugin.key }] ) runner.run() assert AddDockerfilePlugin.key is not None assert "ADD Dockerfile-jboss-eap-6-docker-6.4-77 /root/buildinfo/Dockerfile-jboss-eap-6-docker-6.4-77" in df.content
def test_pull_base_image_plugin(df_base, parent_registry, expected_w_reg, expected_wo_reg): if MOCK: mock_docker(remember_images=True) tasker = DockerTasker() workflow = DockerBuildWorkflow(MOCK_SOURCE, 'test-image') workflow.builder = MockBuilder() workflow.builder.base_image = ImageName.parse(df_base) assert not tasker.image_exists(BASE_IMAGE) assert not tasker.image_exists(BASE_IMAGE_W_REGISTRY) runner = PreBuildPluginsRunner( tasker, workflow, [{ 'name': PullBaseImagePlugin.key, 'args': {'parent_registry': parent_registry, 'parent_registry_insecure': True} }] ) runner.run() assert tasker.image_exists(BASE_IMAGE) == expected_wo_reg assert tasker.image_exists(BASE_IMAGE_W_REGISTRY) == expected_w_reg try: tasker.remove_image(BASE_IMAGE) tasker.remove_image(BASE_IMAGE_W_REGISTRY) except: pass
def test_get_primary_images(tag_conf, tag_annotation, expected): template_image = ImageName.parse('registry.example.com/fedora') workflow = DockerBuildWorkflow(MOCK_SOURCE, 'test-image') for tag in tag_conf: image_name = ImageName.parse(str(template_image)) image_name.tag = tag workflow.tag_conf.add_primary_image(str(image_name)) annotations = {} for tag in tag_annotation: annotations.setdefault('repositories', {}).setdefault('primary', []) image_name = ImageName.parse(str(template_image)) image_name.tag = tag annotations['repositories']['primary'].append(str(image_name)) build_result = BuildResult(annotations=annotations, image_id='foo') workflow.build_result = build_result actual = get_primary_images(workflow) assert len(actual) == len(expected) for index, primary_image in enumerate(actual): assert primary_image.registry == template_image.registry assert primary_image.namespace == template_image.namespace assert primary_image.repo == template_image.repo assert primary_image.tag == expected[index]
def build_image_here(source, image, parent_registry=None, target_registries=None, parent_registry_insecure=False, target_registries_insecure=False, dont_pull_base_image=False, **kwargs): """ build image from provided dockerfile (specified by `source`) in current environment :param source: dict, where/how to get source code to put in image :param image: str, tag for built image ([registry/]image_name[:tag]) :param parent_registry: str, registry to pull base image from :param target_registries: list of str, list of registries to push image to (might change in future) :param parent_registry_insecure: bool, allow connecting to parent registry over plain http :param target_registries_insecure: bool, allow connecting to target registries over plain http :param dont_pull_base_image: bool, don't pull or update base image specified in dockerfile :return: BuildResults """ build_json = { "image": image, "source": source, "parent_registry": parent_registry, "target_registries": target_registries, "parent_registry_insecure": parent_registry_insecure, "target_registries_insecure": target_registries_insecure, "dont_pull_base_image": dont_pull_base_image, } build_json.update(kwargs) m = DockerBuildWorkflow(**build_json) return m.build_docker_image()
def test_tag_by_labels_plugin(tmpdir, args): if MOCK: mock_docker() tasker = DockerTasker() workflow = DockerBuildWorkflow({"provider": "git", "uri": "asd"}, "test-image") version = "1.0" release = "1" workflow.built_image_inspect = { INSPECT_CONFIG: { "Labels": { "name": TEST_IMAGE, "version": version, "release": release } } } workflow.push_conf.add_docker_registry(LOCALHOST_REGISTRY, insecure=True) image = ImageName(repo=TEST_IMAGE, tag="%s_%s" % (version, release), registry=LOCALHOST_REGISTRY) setattr(workflow, 'builder', X) runner = PostBuildPluginsRunner( tasker, workflow, [{ 'name': TagByLabelsPlugin.key, 'args': args, }] ) output = runner.run() assert TagByLabelsPlugin.key in output.keys() images = [i.to_str() for i in workflow.tag_conf.images] primary_images = [i.to_str() for i in workflow.tag_conf.primary_images] unique_images = [i.to_str() for i in workflow.tag_conf.unique_images] if args.get('unique_tag_only'): assert len(workflow.tag_conf.images) == 1 assert len(primary_images) == 0 else: assert len(workflow.tag_conf.images) == 4 assert len(primary_images) == 3 assert ("%s:%s-%s" % (TEST_IMAGE, version, release)) in images assert ("%s:%s" % (TEST_IMAGE, version)) in images assert ("%s:latest" % (TEST_IMAGE, )) in images assert ("%s:%s-%s" % (TEST_IMAGE, version, release)) in primary_images assert ("%s:%s" % (TEST_IMAGE, version)) in primary_images assert ("%s:latest" % (TEST_IMAGE, )) in primary_images assert len(unique_images) == 1 assert ("%s:%s" % (TEST_IMAGE, "unique_tag_123")) in images assert ("%s:%s" % (TEST_IMAGE, "unique_tag_123")) in unique_images tasker.remove_image(image)
def test_tag_and_push_plugin_oci(tmpdir, monkeypatch, use_secret, fail_push, caplog, reactor_config_map): # For now, we don't want to require having a skopeo and an OCI-supporting # registry in the test environment if MOCK: mock_docker() else: return tasker = DockerTasker() workflow = DockerBuildWorkflow({ "provider": "git", "uri": "asd" }, TEST_IMAGE) setattr(workflow, 'builder', X) secret_path = None if use_secret: temp_dir = mkdtemp() with open(os.path.join(temp_dir, ".dockercfg"), "w+") as dockerconfig: dockerconfig_contents = { LOCALHOST_REGISTRY: { "username": "******", "email": "*****@*****.**", "password": "******" } } dockerconfig.write(json.dumps(dockerconfig_contents)) dockerconfig.flush() secret_path = temp_dir CONFIG_DIGEST = 'sha256:b79482f7dcab2a326c1e8c7025a4336d900e99f50db8b35a659fda67b5ebb3c2' MEDIA_TYPE = 'application/vnd.oci.image.manifest.v1+json' REF_NAME = "app/org.gnome.eog/x86_64/master" manifest_json = { "schemaVersion": 2, "mediaType": "application/vnd.oci.image.manifest.v1+json", "config": { "mediaType": MEDIA_TYPE, "digest": CONFIG_DIGEST, "size": 314 }, "layers": [{ "mediaType": "application/vnd.oci.image.layer.v1.tar+gzip", "digest": "sha256:fd2b341d2ff3751ecdee8d8daacaa650d8a1703360c85d4cfc452d6ec32e147f", "size": 1863477 }], "annotations": { "org.flatpak.commit-metadata.xa.ref": "YXBwL29yZy5nbm9tZS5lb2cveDg2XzY0L21hc3RlcgAAcw==", # noqa "org.flatpak.body": "Name: org.gnome.eog\nArch: x86_64\nBranch: master\nBuilt with: Flatpak 0.9.7\n", # noqa "org.flatpak.commit-metadata.xa.metadata": "W0FwcGxpY2F0aW9uXQpuYW1lPW9yZy5nbm9tZS5lb2cKcnVudGltZT1vcmcuZmVkb3JhcHJvamVjdC5QbGF0Zm9ybS94ODZfNjQvMjYKc2RrPW9yZy5mZWRvcmFwcm9qZWN0LlBsYXRmb3JtL3g4Nl82NC8yNgpjb21tYW5kPWVvZwoKW0NvbnRleHRdCnNoYXJlZD1pcGM7CnNvY2tldHM9eDExO3dheWxhbmQ7c2Vzc2lvbi1idXM7CmZpbGVzeXN0ZW1zPXhkZy1ydW4vZGNvbmY7aG9zdDt+Ly5jb25maWcvZGNvbmY6cm87CgpbU2Vzc2lvbiBCdXMgUG9saWN5XQpjYS5kZXNydC5kY29uZj10YWxrCgpbRW52aXJvbm1lbnRdCkRDT05GX1VTRVJfQ09ORklHX0RJUj0uY29uZmlnL2Rjb25mCgAAcw==", # noqa "org.flatpak.download-size": "1863477", "org.flatpak.commit-metadata.xa.download-size": "AAAAAAAdF/IAdA==", "org.flatpak.commit-metadata.xa.installed-size": "AAAAAABDdgAAdA==", "org.flatpak.subject": "Export org.gnome.eog", "org.flatpak.installed-size": "4421120", "org.flatpak.commit": "d7b8789350660724b20643ebb615df466566b6d04682fa32800d3f10116eec54", # noqa "org.flatpak.metadata": "[Application]\nname=org.gnome.eog\nruntime=org.fedoraproject.Platform/x86_64/26\nsdk=org.fedoraproject.Platform/x86_64/26\ncommand=eog\n\n[Context]\nshared=ipc;\nsockets=x11;wayland;session-bus;\nfilesystems=xdg-run/dconf;host;~/.config/dconf:ro;\n\n[Session Bus Policy]\nca.desrt.dconf=talk\n\n[Environment]\nDCONF_USER_CONFIG_DIR=.config/dconf\n", # noqa "org.opencontainers.image.ref.name": REF_NAME, "org.flatpak.timestamp": "1499376525" } } config_json = { "created": "2017-07-06T21:28:45Z", "architecture": "arm64", "os": "linux", "config": { "Memory": 0, "MemorySwap": 0, "CpuShares": 0 }, "rootfs": { "type": "layers", "diff_ids": [ "sha256:4c5160fea65110aa1eb8ca022e2693bb868367c2502855887f21c77247199339" ] } } # Add a mock OCI image to exported_image_sequence; this forces the tag_and_push # plugin to push with skopeo rather than with 'docker push' # Since we are always mocking the push for now, we can get away with a stub image oci_dir = os.path.join(str(tmpdir), 'oci-image') os.mkdir(oci_dir) with open(os.path.join(oci_dir, "index.json"), "w") as f: f.write('"Not a real index.json"') with open(os.path.join(oci_dir, "oci-layout"), "w") as f: f.write('{"imageLayoutVersion": "1.0.0"}') os.mkdir(os.path.join(oci_dir, 'blobs')) metadata = get_exported_image_metadata(oci_dir, IMAGE_TYPE_OCI) metadata['ref_name'] = REF_NAME workflow.exported_image_sequence.append(metadata) oci_tarpath = os.path.join(str(tmpdir), 'oci-image.tar') with open(oci_tarpath, "wb") as f: with tarfile.TarFile(mode="w", fileobj=f) as tf: for f in os.listdir(oci_dir): tf.add(os.path.join(oci_dir, f), f) metadata = get_exported_image_metadata(oci_tarpath, IMAGE_TYPE_OCI_TAR) metadata['ref_name'] = REF_NAME workflow.exported_image_sequence.append(metadata) # Mock the subprocess call to skopeo def check_check_output(args, **kwargs): if fail_push: raise subprocess.CalledProcessError(returncode=1, cmd=args, output="Failed") assert args[0] == 'skopeo' if use_secret: assert '--dest-creds=user:mypassword' in args assert '--dest-tls-verify=false' in args assert args[-2] == 'oci:' + oci_dir + ':' + REF_NAME assert args[-1] == 'docker://' + LOCALHOST_REGISTRY + '/' + TEST_IMAGE return '' (flexmock(subprocess).should_receive("check_output").once().replace_with( check_check_output)) # Mock out the response from the registry once the OCI image is uploaded manifest_latest_url = "https://{}/v2/{}/manifests/latest".format( LOCALHOST_REGISTRY, TEST_IMAGE) manifest_url = "https://{}/v2/{}/manifests/{}".format( LOCALHOST_REGISTRY, TEST_IMAGE, DIGEST_OCI) config_blob_url = "https://{}/v2/{}/blobs/{}".format( LOCALHOST_REGISTRY, TEST_IMAGE, CONFIG_DIGEST) manifest_response = requests.Response() (flexmock(manifest_response, raise_for_status=lambda: None, json=manifest_json, headers={ 'Content-Type': MEDIA_TYPE, 'Docker-Content-Digest': DIGEST_OCI })) manifest_unacceptable_response = requests.Response() (flexmock(manifest_unacceptable_response, status_code=404, json={"errors": [{ "code": "MANIFEST_UNKNOWN" }]})) config_blob_response = requests.Response() (flexmock(config_blob_response, raise_for_status=lambda: None, json=config_json)) def custom_get(method, url, headers, **kwargs): if url == manifest_latest_url: if headers['Accept'] == MEDIA_TYPE: return manifest_response else: return manifest_unacceptable_response if url == manifest_url: return manifest_response if url == config_blob_url: return config_blob_response mock_get_retry_session() (flexmock( requests.Session).should_receive('request').replace_with(custom_get)) if reactor_config_map: workflow.plugin_workspace[ReactorConfigPlugin.key] = {} workflow.plugin_workspace[ReactorConfigPlugin.key][WORKSPACE_CONF_KEY] =\ ReactorConfig({'version': 1, 'registries': [{ 'url': LOCALHOST_REGISTRY, 'insecure': True, 'auth': {'cfg_path': secret_path}, }]}) runner = PostBuildPluginsRunner(tasker, workflow, [{ 'name': TagAndPushPlugin.key, 'args': { 'registries': { LOCALHOST_REGISTRY: { 'insecure': True, 'secret': secret_path } } }, }]) with caplog.atLevel(logging.DEBUG): if fail_push: with pytest.raises(PluginFailedException): output = runner.run() else: output = runner.run() for r in caplog.records(): assert 'mypassword' not in r.getMessage() if not fail_push: image = output[TagAndPushPlugin.key][0] tasker.remove_image(image) assert len(workflow.push_conf.docker_registries) > 0 assert workflow.push_conf.docker_registries[0].digests[ TEST_IMAGE].v1 is None assert workflow.push_conf.docker_registries[0].digests[ TEST_IMAGE].v2 is None assert workflow.push_conf.docker_registries[0].digests[ TEST_IMAGE].oci == DIGEST_OCI assert workflow.push_conf.docker_registries[0].config is config_json
def test_add_help_md2man_error(request, tmpdir, docker_tasker, filename, go_md2man_result, caplog): df_content = "FROM fedora" df = df_parser(str(tmpdir)) df.content = df_content workflow = DockerBuildWorkflow(MOCK_SOURCE, 'test-image') workflow.builder = X workflow.builder.df_path = df.dockerfile_path workflow.builder.df_dir = str(tmpdir) help_markdown_path = os.path.join(workflow.builder.df_dir, filename) if go_md2man_result != 'input_missing': generate_a_file(help_markdown_path, "foo") help_man_path = os.path.join(workflow.builder.df_dir, AddHelpPlugin.man_filename) if go_md2man_result != 'result_missing': generate_a_file(help_man_path, "bar") cmd = ['go-md2man', '-in={}'.format(help_markdown_path), '-out={}'.format(help_man_path)] def check_popen_pass(*args, **kwargs): assert args[0] == cmd return MockedPopen() def check_popen_binary_missing(*args, **kwargs): check_popen_pass(*args, **kwargs) raise OSError(2, "No such file or directory") def check_popen_other_os_error(*args, **kwargs): check_popen_pass(*args, **kwargs) raise OSError(0, "Other error") def check_popen_fail(*args, **kwargs): check_popen_pass(*args, **kwargs) raise subprocess.CalledProcessError(returncode=1, cmd=args[0]) if go_md2man_result == 'binary_missing': (flexmock(subprocess) .should_receive("Popen") .once() .replace_with(check_popen_binary_missing)) elif go_md2man_result == 'other_os_error': (flexmock(subprocess) .should_receive("Popen") .once() .replace_with(check_popen_other_os_error)) elif go_md2man_result == 'fail': (flexmock(subprocess) .should_receive("Popen") .once() .replace_with(check_popen_fail)) elif go_md2man_result in ['pass', 'result_missing']: (flexmock(subprocess) .should_receive("Popen") .once() .replace_with(check_popen_pass)) runner = PreBuildPluginsRunner( docker_tasker, workflow, [{ 'name': AddHelpPlugin.key, 'args': {'help_file': filename} }] ) result = runner.run() if go_md2man_result == 'binary_missing': assert list(result.keys()) == ['add_help'] assert isinstance(result['add_help'], RuntimeError) assert 'Help file is available, but go-md2man is not present in a buildroot' \ == str(result['add_help']) elif go_md2man_result == 'other_os_error': assert list(result.keys()) == ['add_help'] assert isinstance(result['add_help'], OSError) elif go_md2man_result == 'result_missing': assert list(result.keys()) == ['add_help'] assert isinstance(result['add_help'], RuntimeError) assert 'go-md2man run complete, but man file is not found' == str(result['add_help']) elif go_md2man_result == 'input_missing': expected_result = { 'add_help': { 'status': AddHelpPlugin.NO_HELP_FILE_FOUND, 'help_file': filename } } assert result == expected_result elif go_md2man_result == 'pass': expected_result = { 'add_help': { 'status': AddHelpPlugin.HELP_GENERATED, 'help_file': filename } } assert result == expected_result elif go_md2man_result == 'fail': assert list(result.keys()) == ['add_help'] assert isinstance(result['add_help'], RuntimeError) assert 'Error running' in str(result['add_help'])
def test_tag_and_push_plugin(tmpdir, monkeypatch, image_name, logs, should_raise, has_config, use_secret, reactor_config_map, file_name, dockerconfig_contents): if MOCK: mock_docker() flexmock(docker.APIClient, push=lambda iid, **kwargs: iter(logs), login=lambda username, registry, dockercfg_path: {'Status': 'Login Succeeded'}) tasker = DockerTasker(retry_times=0) workflow = DockerBuildWorkflow({ "provider": "git", "uri": "asd" }, TEST_IMAGE) workflow.tag_conf.add_primary_image(image_name) setattr(workflow, 'builder', X) secret_path = None if use_secret: temp_dir = mkdtemp() with open(os.path.join(temp_dir, file_name), "w+") as dockerconfig: dockerconfig.write(json.dumps(dockerconfig_contents)) dockerconfig.flush() secret_path = temp_dir CONFIG_DIGEST = 'sha256:2c782e3a93d34d89ea4cf54052768be117caed54803263dd1f3798ce42aac14e' media_type = 'application/vnd.docker.distribution.manifest.v2+json' manifest_json = { 'config': { 'digest': CONFIG_DIGEST, 'mediaType': 'application/octet-stream', 'size': 4132 }, 'layers': [{ 'digest': 'sha256:16dc1f96e3a1bb628be2e00518fec2bb97bd5933859de592a00e2eb7774b6ecf', 'mediaType': 'application/vnd.docker.image.rootfs.diff.tar.gzip', 'size': 71907148 }, { 'digest': 'sha256:cebc0565e1f096016765f55fde87a6f60fdb1208c0b5017e35a856ff578f5ccb', 'mediaType': 'application/vnd.docker.image.rootfs.diff.tar.gzip', 'size': 3945724 }], 'mediaType': media_type, 'schemaVersion': 2 } config_json = { 'config': { 'Size': 12509448, 'architecture': 'amd64', 'author': 'Red Hat, Inc.', 'config': { 'Cmd': ['/bin/rsyslog.sh'], 'Entrypoint': None, 'Image': 'c3fb36aafd5692d2a45115d32bb120edb6edf6c0c3c783ed6592a8dab969fb88', 'Labels': { 'Architecture': 'x86_64', 'Authoritative_Registry': 'registry.access.redhat.com', 'BZComponent': 'rsyslog-docker', 'Name': 'rhel7/rsyslog', 'Release': '28.vrutkovs.31', 'Vendor': 'Red Hat, Inc.', 'Version': '7.2', }, }, 'created': '2016-10-07T10:20:05.38595Z', 'docker_version': '1.9.1', 'id': '1ca220fbc2aed7c141b236c8729fe59db5771b32bf2da74e4a663407f32ec2a2', 'os': 'linux', 'parent': '47eed7a8ea3f131e9281ae09fcbfb0041872fd8b74a048f1c739302c8148505d' }, 'container_config': { 'foo': 'bar', 'spam': 'maps' }, 'id': '1ca220fbc2aed7c141b236c8729fe59db5771b32bf2da74e4a663407f32ec2a2', 'parent_id': 'c3fb36aafd5692d2a45115d32bb120edb6edf6c0c3c783ed6592a8dab969fb88' } # To test out the lack of a config, we really should be testing what happens # when we only return a v1 response and not a v2 response at all; what are # doing now is simply testing that if we return a None instead of json for the # config blob, that None is stored rather than json if not has_config: config_json = None manifest_latest_url = "https://{}/v2/{}/manifests/latest".format( LOCALHOST_REGISTRY, TEST_IMAGE) manifest_url = "https://{}/v2/{}/manifests/{}".format( LOCALHOST_REGISTRY, TEST_IMAGE, DIGEST_V2) config_blob_url = "https://{}/v2/{}/blobs/{}".format( LOCALHOST_REGISTRY, TEST_IMAGE, CONFIG_DIGEST) # We return our v2 manifest in the mocked v1 response as a placeholder - only the # digest matters anyways manifest_response_v1 = requests.Response() (flexmock(manifest_response_v1, status_code=200, json=manifest_json, headers={ 'Content-Type': 'application/vnd.docker.distribution.manifest.v1+json', 'Docker-Content-Digest': DIGEST_V1 })) manifest_response_v2 = requests.Response() (flexmock(manifest_response_v2, status_code=200, json=manifest_json, headers={ 'Content-Type': 'application/vnd.docker.distribution.manifest.v2+json', 'Docker-Content-Digest': DIGEST_V2 })) manifest_response_v2_list = requests.Response() (flexmock(manifest_response_v2_list, raise_for_status=lambda: None, json=manifest_json, headers={ 'Content-Type': 'application/vnd.docker.distribution.manifest.list.v2+json', })) config_blob_response = requests.Response() (flexmock(config_blob_response, status_code=200, json=config_json)) def custom_get(method, url, headers, **kwargs): if url == manifest_latest_url: # For a manifest stored as v2 or v1, the docker registry defaults to # returning a v1 manifest if a v2 manifest is not explicitly requested if headers[ 'Accept'] == 'application/vnd.docker.distribution.manifest.v2+json': return manifest_response_v2 else: return manifest_response_v1 if headers[ 'Accept'] == 'application/vnd.docker.distribution.manifest.list.v2+json': return manifest_response_v2_list if url == manifest_url: return manifest_response_v2 if url == config_blob_url: return config_blob_response mock_get_retry_session() (flexmock( requests.Session).should_receive('request').replace_with(custom_get)) if reactor_config_map: workflow.plugin_workspace[ReactorConfigPlugin.key] = {} workflow.plugin_workspace[ReactorConfigPlugin.key][WORKSPACE_CONF_KEY] =\ ReactorConfig({'version': 1, 'registries': [{ 'url': LOCALHOST_REGISTRY, 'insecure': True, 'auth': {'cfg_path': secret_path}, }]}) runner = PostBuildPluginsRunner(tasker, workflow, [{ 'name': TagAndPushPlugin.key, 'args': { 'registries': { LOCALHOST_REGISTRY: { 'insecure': True, 'secret': secret_path } } }, }]) if should_raise: with pytest.raises(PluginFailedException): runner.run() else: output = runner.run() image = output[TagAndPushPlugin.key][0] tasker.remove_image(image) assert len(workflow.push_conf.docker_registries) > 0 if MOCK: # we only test this when mocking docker because we don't expect # running actual docker against v2 registry expected_digest = ManifestDigest(v1=DIGEST_V1, v2=DIGEST_V2, oci=None) assert workflow.push_conf.docker_registries[0].digests[image_name].v1 == \ expected_digest.v1 assert workflow.push_conf.docker_registries[0].digests[image_name].v2 == \ expected_digest.v2 assert workflow.push_conf.docker_registries[0].digests[image_name].oci == \ expected_digest.oci if has_config: assert isinstance( workflow.push_conf.docker_registries[0].config, dict) else: assert workflow.push_conf.docker_registries[0].config is None
def prepare(pulp_registries=None, docker_registries=None): if pulp_registries is None: pulp_registries = (("test", LOCALHOST_REGISTRY), ) if docker_registries is None: docker_registries = (DOCKER0_REGISTRY, ) def set_annotations_on_build(build_id, annotations): pass def update_labels_on_build(build_id, labels): pass new_environ = deepcopy(os.environ) new_environ["BUILD"] = dedent('''\ { "metadata": { "name": "asd", "namespace": "namespace" } } ''') flexmock(OSBS, set_annotations_on_build=set_annotations_on_build) flexmock(OSBS, update_labels_on_build=update_labels_on_build) (flexmock(osbs.conf).should_call("Configuration").with_args( namespace="namespace", conf_file=None, verify_ssl=True, openshift_url="http://example.com/", openshift_uri="http://example.com/", use_auth=True)) flexmock(os) os.should_receive("environ").and_return(new_environ) workflow = DockerBuildWorkflow({ "provider": "git", "uri": "asd" }, "test-image") for name, crane_uri in pulp_registries: workflow.push_conf.add_pulp_registry(name, crane_uri) workflow.tag_conf.add_primary_image(TEST_IMAGE) workflow.tag_conf.add_unique_image("namespace/image:asd123") for docker_registry in docker_registries: r = workflow.push_conf.add_docker_registry(docker_registry) r.digests[TEST_IMAGE] = ManifestDigest(v1='not-used', v2=DIGEST1) r.digests["namespace/image:asd123"] = ManifestDigest(v1='not-used', v2=DIGEST2) setattr(workflow, 'builder', X) setattr(workflow, '_base_image_inspect', {'Id': '01234567'}) workflow.build_logs = [ "a", "b", ] workflow.source.lg = LazyGit(None, commit="commit") flexmock(workflow.source.lg) workflow.source.lg.should_receive("_commit_id").and_return("commit") return workflow