def test_save_and_load(self, tmpdir): """Test save workflow data and then load them back properly.""" tag_conf = TagConf() tag_conf.add_floating_image(ImageName.parse("registry/image:latest")) tag_conf.add_primary_image(ImageName.parse("registry/image:1.0")) wf_data = ImageBuildWorkflowData( dockerfile_images=DockerfileImages(["scratch", "registry/f:35"]), # Test object in dict values is serialized tag_conf=tag_conf, plugins_results={ "plugin_a": { 'parent-images-koji-builds': { ImageName(repo='base', tag='latest').to_str(): { 'id': 123456789, 'nvr': 'base-image-1.0-99', 'state': 1, }, }, }, "tag_and_push": [ # Such object in a list should be handled properly. ImageName(registry="localhost:5000", repo='image', tag='latest'), ], "image_build": { "logs": ["Build succeeds."] }, }, koji_upload_files=[ { "local_filename": "/path/to/build1.log", "dest_filename": "x86_64-build.log", }, { "local_filename": "/path/to/dir1/remote-source.tar.gz", "dest_filename": "remote-source.tar.gz", }, ]) context_dir = ContextDir(Path(tmpdir.join("context_dir").mkdir())) wf_data.save(context_dir) assert context_dir.workflow_json.exists() # Verify the saved data matches the schema saved_data = json.loads(context_dir.workflow_json.read_bytes()) try: validate_with_schema(saved_data, "schemas/workflow_data.json") except osbs.exceptions.OsbsValidationException as e: pytest.fail( f"The dumped workflow data does not match JSON schema: {e}") # Load and verify the loaded data loaded_wf_data = ImageBuildWorkflowData.load_from_dir(context_dir) assert wf_data.dockerfile_images == loaded_wf_data.dockerfile_images assert wf_data.tag_conf == loaded_wf_data.tag_conf assert wf_data.plugins_results == loaded_wf_data.plugins_results
def test_image_name_comparison(): # make sure that both "==" and "!=" are implemented right on both Python major releases i1 = ImageName(registry='foo.com', namespace='spam', repo='bar', tag='1') i2 = ImageName(registry='foo.com', namespace='spam', repo='bar', tag='1') assert i1 == i2 assert not i1 != i2 i2 = ImageName(registry='foo.com', namespace='spam', repo='bar', tag='2') assert not i1 == i2 assert i1 != i2
def mock_environment(tmpdir, session=None, build_process_failed=False, koji_build_id=None, scratch=None): if session is None: session = MockedClientSession('') if MOCK: mock_docker() tasker = DockerTasker() workflow = DockerBuildWorkflow(source=SOURCE) setattr(workflow, 'builder', X()) setattr(workflow.builder, 'image_id', '123456imageid') setattr(workflow.builder, 'base_image', ImageName(repo='Fedora', tag='22')) setattr(workflow.builder, 'source', X()) setattr(workflow.builder.source, 'dockerfile_path', None) setattr(workflow.builder.source, 'path', None) if scratch is not None: workflow.user_params['scratch'] = scratch flexmock(koji, ClientSession=lambda hub, opts: session) if build_process_failed: workflow.build_result = BuildResult(fail_reason="not built") else: workflow.build_result = BuildResult(image_id="id1234") workflow.exit_results[KojiImportPlugin.key] = koji_build_id (flexmock(time) .should_receive('sleep') .and_return(None)) return tasker, workflow
def mock_environment(tmpdir, primary_images=None, annotations=None): if MOCK: mock_docker() tasker = DockerTasker() workflow = DockerBuildWorkflow(source=SOURCE) base_image_id = '123456parent-id' setattr(workflow, '_base_image_inspect', {'Id': base_image_id}) setattr(workflow, 'builder', StubInsideBuilder()) setattr(workflow.builder, 'image_id', '123456imageid') setattr(workflow.builder, 'base_image', ImageName(repo='Fedora', tag='22')) setattr(workflow.builder, 'source', StubInsideBuilder()) setattr(workflow.builder, 'built_image_info', {'ParentId': base_image_id}) setattr(workflow.builder.source, 'dockerfile_path', None) setattr(workflow.builder.source, 'path', None) setattr(workflow, 'tag_conf', TagConf()) if primary_images: for image in primary_images: if '-' in ImageName.parse(image).tag: workflow.tag_conf.add_primary_image(image) workflow.tag_conf.add_unique_image(primary_images[0]) workflow.tag_conf.add_floating_image('namespace/httpd:floating') workflow.build_result = BuildResult(image_id='123456', annotations=annotations or {}) return tasker, workflow
def mock_environment(tmpdir, workflow, primary_images=None, floating_images=None, manifest_results=None, annotations=None): if MOCK: mock_docker() tasker = DockerTasker() base_image_id = '123456parent-id' setattr(workflow, '_base_image_inspect', {'Id': base_image_id}) setattr(workflow, 'builder', StubInsideBuilder()) setattr(workflow.builder, 'image_id', '123456imageid') setattr(workflow.builder, 'base_image', ImageName(repo='Fedora', tag='22')) setattr(workflow.builder, 'source', StubInsideBuilder()) setattr(workflow.builder, 'built_image_info', {'ParentId': base_image_id}) setattr(workflow.builder.source, 'dockerfile_path', None) setattr(workflow.builder.source, 'path', None) if primary_images: for image in primary_images: if '-' in ImageName.parse(image).tag: workflow.tag_conf.add_primary_image(image) workflow.tag_conf.add_unique_image(primary_images[0]) if floating_images: workflow.tag_conf.add_floating_images(floating_images) workflow.build_result = BuildResult(image_id='123456', annotations=annotations or {}) workflow.postbuild_results = {} if manifest_results: workflow.postbuild_results[ PLUGIN_GROUP_MANIFESTS_KEY] = manifest_results return tasker, workflow
def add_platform(image: ImageName) -> ImageName: return ImageName( registry=image.registry, namespace=image.namespace, repo=image.repo, tag=f'{image.tag}-{platform}', )
def __init__(self, failed=False, image_id=None): self.tasker = None self.base_image = ImageName(repo='Fedora', tag='29') self.image_id = image_id or 'asd' self.image = ImageName.parse('image') self.failed = failed self.df_path = 'some' self.df_dir = 'some'
def _replace(self, image, registry=_KEEP, namespace=_KEEP, repo=_KEEP, tag=_KEEP): """ Replace specified parts of image pullspec, keep the rest """ return ImageName( registry=image.registry if registry is _KEEP else registry, namespace=image.namespace if namespace is _KEEP else namespace, repo=image.repo if repo is _KEEP else repo, tag=image.tag if tag is _KEEP else tag, )
def __init__(self): mock_docker() self.tasker = DockerTasker() self.base_image = ImageName(repo='fedora', tag='25') self.image_id = 'image_id' self.image = INPUT_IMAGE self.df_path = 'df_path' self.df_dir = 'df_dir' def simplegen(x, y): yield "some\u2018".encode('utf-8') flexmock(self.tasker, build_image_from_path=simplegen)
def commit_buildroot(self): """ create image from buildroot :return: """ logger.info("committing buildroot") self.ensure_is_built() commit_message = "docker build of '%s' (%s)" % (self.image, self.uri) self.buildroot_image_name = ImageName( repo="buildroot-%s" % self.image, # save the time when image was built tag=datetime.datetime.now().strftime('%Y-%m-%d-%H-%M-%S')) self.buildroot_image_id = self.dt.commit_container(self.build_container_id, commit_message) return self.buildroot_image_id
def prepare(tmpdir, labels=None): if MOCK: mock_docker() labels = labels or {} tasker = DockerTasker() workflow = DockerBuildWorkflow(source=SOURCE) workflow.user_params['scratch'] = labels.get('scratch', False) workflow.user_params['isolated'] = labels.get('isolated', False) setattr(workflow, 'builder', X()) source = MockSource(tmpdir) setattr(workflow.builder, 'image_id', "asd123") setattr(workflow.builder, 'base_image', ImageName(repo='Fedora', tag='21')) setattr(workflow.builder, 'source', source) setattr(workflow, 'source', source) return tasker, workflow
def test_prebuild_plugin_failure(docker_tasker): # noqa workflow = DockerBuildWorkflow(source=SOURCE) setattr(workflow, 'builder', X()) setattr(workflow.builder, 'image_id', "asd123") setattr(workflow.builder, 'base_image', ImageName(repo='fedora', tag='21')) setattr(workflow.builder, "source", X()) setattr(workflow.builder.source, 'dockerfile_path', "/non/existent") setattr(workflow.builder.source, 'path', "/non/existent") runner = PreBuildPluginsRunner(docker_tasker, workflow, [{ "name": AddYumRepoByUrlPlugin.key, "args": { 'repourls': True } }]) with pytest.raises(PluginFailedException): runner.run() assert workflow.build_process_failed is True
def _pull_and_tag_image(self, image, build_json, nonce): """Docker pull the image and tag it uniquely for use by this build""" image = image.copy() reg_client = self._get_registry_client(image.registry) for _ in range(20): # retry until pull and tag is successful or definitively fails. # should never require 20 retries but there's a race condition at work. # just in case something goes wildly wrong, limit to 20 so it terminates. try: self.tasker.pull_image( image, insecure=reg_client.insecure, dockercfg_path=reg_client.dockercfg_path) self.workflow.pulled_base_images.add(image.to_str()) except RetryGeneratorException: self.log.error('failed to pull image: %s', image) raise # Attempt to tag it using a unique ID. We might have to retry # if another build with the same parent image is finishing up # and removing images it pulled. # Use the OpenShift build name as the unique ID unique_id = build_json['metadata']['name'] new_image = ImageName(repo=unique_id, tag=nonce) try: self.log.info("tagging pulled image") response = self.tasker.tag_image(image, new_image) self.workflow.pulled_base_images.add(response) self.log.debug("image '%s' is available as '%s'", image, new_image) return new_image except docker.errors.NotFound: # If we get here, some other build raced us to remove # the parent image, and that build won. # Retry the pull immediately. self.log.info("re-pulling removed image") continue # Failed to tag it after 20 tries self.log.error("giving up trying to pull image") raise RuntimeError("too many attempts to pull and tag image")
def test_privileged_gitrepo_build(caplog, source_params): if MOCK: mock_docker() image_name = ImageName(repo="atomic-reactor-test-ssh-image") remote_image = image_name.copy() remote_image.registry = LOCALHOST_REGISTRY m = PrivilegedBuildManager("buildroot-fedora", { "source": source_params, "image": remote_image.to_str(), "parent_registry": LOCALHOST_REGISTRY, # faster "target_registries_insecure": True, "parent_registry_insecure": True, }) results = m.build() dt = DockerTasker() dt.pull_image(remote_image, insecure=True) assert len(results.build_logs) > 0 dt.remove_container(results.container_id) dt.remove_image(remote_image)
def check_manifest_list(self, build_image, orchestrator_platform, platforms, current_buildimage): registry_name, image = build_image.split('/', 1) repo, tag = image.rsplit(':', 1) registry = ImageName(registry=registry_name, repo=repo, tag=tag) manifest_list = get_manifest_list(registry, registry_name, insecure=True) # we don't have manifest list, but we want to build on different platforms if not manifest_list: raise RuntimeError("Buildroot image isn't manifest list," " which is needed for specified arch") arch_digests = {} image_name = build_image.rsplit(':', 1)[0] manifest_list_dict = manifest_list.json() for manifest in manifest_list_dict['manifests']: arch = manifest['platform']['architecture'] arch_digests[arch] = image_name + '@' + manifest['digest'] arch_to_platform = get_goarch_to_platform_mapping( self.workflow, self.plat_des_fallback) for arch in arch_digests: self.build_image_digests[ arch_to_platform[arch]] = arch_digests[arch] # orchestrator platform is in manifest list if orchestrator_platform not in self.build_image_digests: raise RuntimeError( "Platform for orchestrator '%s' isn't in manifest list" % orchestrator_platform) if ('@sha256:' in current_buildimage and self.build_image_digests[orchestrator_platform] != current_buildimage): raise RuntimeError( "Orchestrator is using image digest '%s' which isn't" " in manifest list" % current_buildimage)
from osbs.utils import (buildconfig_update, git_repo_humanish_part_from_uri, sanitize_strings_for_openshift, get_time_from_rfc3339, TarWriter, TarReader, make_name_from_git, wrap_name_from_git, get_instance_token_file_name, sanitize_version, has_triggers, clone_git_repo, get_repo_info, ImageName) from osbs.exceptions import OsbsException, OsbsCommitNotFound from tests.constants import (TEST_DOCKERFILE_GIT, TEST_DOCKERFILE_SHA1, TEST_DOCKERFILE_INIT_SHA1, TEST_DOCKERFILE_BRANCH) import osbs.kerberos_ccache BC_NAME_REGEX = r'^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$' BC_LABEL_REGEX = r'^[a-z0-9]([-a-z0-9]*[a-z0-9])?([\/\.]*[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$' TEST_DATA = { "repository.com/image-name:latest": ImageName(registry="repository.com", repo="image-name"), "repository.com/prefix/image-name:1": ImageName(registry="repository.com", namespace="prefix", repo="image-name", tag="1"), "repository.com/prefix/image-name@sha256:12345": ImageName(registry="repository.com", namespace="prefix", repo="image-name", tag="sha256:12345"), "repository.com/prefix/image-name:latest": ImageName(registry="repository.com", namespace="prefix", repo="image-name"), "image-name:latest": ImageName(repo="image-name"), "registry:5000/image-name@sha256:12345": ImageName(registry="registry:5000", repo="image-name", tag="sha256:12345"), "registry:5000/image-name:latest": ImageName(registry="registry:5000", repo="image-name"),
class X(object): image_id = INPUT_IMAGE git_dockerfile_path = None git_path = None base_image = ImageName(repo="qwe", tag="asd")
def get_workflow_data_json(): tag_conf = TagConf() tag_conf.add_floating_image(ImageName.parse("registry/image:latest")) tag_conf.add_primary_image(ImageName.parse("registry/image:1.0")) wf_data = ImageBuildWorkflowData( dockerfile_images=DockerfileImages(["scratch", "registry/f:35"]), # Test object in dict values is serialized plugins_results={ "image_build": { "logs": ["Build succeeds."] }, "tag_and_push": [ # Such object in a list should be handled properly. ImageName(registry="localhost:5000", repo='image', tag='latest'), ], "plugin_a": { 'parent-images-koji-builds': { ImageName(repo='base', tag='latest').to_str(): { 'id': 123456789, 'nvr': 'base-image-1.0-99', 'state': 1, }, }, }, }, tag_conf=tag_conf, koji_upload_files=[ { "local_filename": "/path/to/build1.log", "dest_filename": "x86_64-build.log", }, { "local_filename": "/path/to/dir1/remote-source.tar.gz", "dest_filename": "remote-source.tar.gz", }, ]) wf_data.image_components = { 'x86_64': [{ 'type': 'rpm', 'name': 'python-docker-py', 'version': '1.3.1', 'release': '1.fc24', 'arch': 'noarch', 'sigmd5': '7c1f60d8cde73e97a45e0c489f4a3b26', 'signature': None, 'epoch': None }, { 'type': 'rpm', 'name': 'fedora-repos-rawhide', 'version': '24', 'release': '0.1', 'arch': 'noarch', 'sigmd5': 'd41df1e059544d906363605d47477e60', 'signature': None, 'epoch': None }, { 'type': 'rpm', 'name': 'gpg-pubkey-doc', 'version': '1.0', 'release': '1', 'arch': 'noarch', 'sigmd5': '00000000000000000000000000000000', 'signature': None, 'epoch': None }], 'ppc64le': [{ 'type': 'rpm', 'name': 'python-docker-py', 'version': '1.3.1', 'release': '1.fc24', 'arch': 'noarch', 'sigmd5': '7c1f60d8cde73e97a45e0c489f4a3b26', 'signature': None, 'epoch': None }, { 'type': 'rpm', 'name': 'fedora-repos-rawhide', 'version': '24', 'release': '0.1', 'arch': 'noarch', 'sigmd5': 'd41df1e059544d906363605d47477e60', 'signature': None, 'epoch': None }, { 'type': 'rpm', 'name': 'gpg-pubkey-doc', 'version': '1.0', 'release': '1', 'arch': 'noarch', 'sigmd5': '00000000000000000000000000000000', 'signature': None, 'epoch': None }], } with TemporaryDirectory() as d: with open(os.path.join(d, 'workflow_data.json'), 'w') as f: json.dump(wf_data.as_dict(), f, cls=WorkflowDataEncoder) with open(os.path.join(d, 'workflow_data.json')) as f: workflow_json = json.load(f) return workflow_json
def temp_image_name(): return ImageName(repo=("atomic-reactor-tests-%s" % uuid_value()))
def __init__(self): self.image_id = "xxx" self.source = MockSource() self.base_image = ImageName(repo="qwe", tag="asd")
def __init__(self): self.base_image = ImageName(repo="qwe", tag="asd") self.df_path = None
DOCKERFILE_MULTISTAGE_CUSTOM_BAD_PATH) from atomic_reactor.constants import CONTAINER_DOCKERPY_BUILD_METHOD from osbs.utils import ImageName from tests.util import requires_internet from flexmock import flexmock from textwrap import dedent if MOCK: from tests.docker_mock import mock_docker # This stuff is used in tests; you have to have internet connection, # running registry on port 5000 and it helps if you've pulled fedora:latest before git_base_repo = "fedora" git_base_tag = "latest" git_base_image = ImageName(registry=LOCALHOST_REGISTRY, repo="fedora", tag="latest") with_all_sources = pytest.mark.parametrize('source_params', [ SOURCE, { 'provider': 'path', 'uri': 'file://' + DOCKERFILE_OK_PATH }, { 'provider': 'path', 'uri': 'file://' + DOCKERFILE_MULTISTAGE_PATH }, { 'provider': 'path', 'uri': 'file://' + DOCKERFILE_MULTISTAGE_SCRATCH_PATH
sanitize_strings_for_openshift, get_time_from_rfc3339, TarWriter, TarReader, make_name_from_git, wrap_name_from_git, get_instance_token_file_name, sanitize_version, has_triggers, clone_git_repo, get_repo_info, UserWarningsStore, ImageName) from osbs.exceptions import OsbsException, OsbsCommitNotFound from tests.constants import (TEST_DOCKERFILE_GIT, TEST_DOCKERFILE_SHA1, TEST_DOCKERFILE_INIT_SHA1, TEST_DOCKERFILE_BRANCH) import osbs.kerberos_ccache BC_NAME_REGEX = r'^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$' BC_LABEL_REGEX = r'^[a-z0-9]([-a-z0-9]*[a-z0-9])?([\/\.]*[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$' TEST_DATA = { "repository.com/image-name:latest": ImageName(registry="repository.com", repo="image-name"), "repository.com/prefix/image-name:1": ImageName(registry="repository.com", namespace="prefix", repo="image-name", tag="1"), "repository.com/prefix/image-name@sha256:12345": ImageName(registry="repository.com", namespace="prefix", repo="image-name", tag="sha256:12345"), "repository.com/prefix/image-name:latest": ImageName(registry="repository.com", namespace="prefix", repo="image-name"), "image-name:latest": ImageName(repo="image-name"),