Beispiel #1
0
    def _get_manifest_list(self, image: ImageName) -> requests.Response:
        """try to figure out manifest list"""
        if image in self.manifest_list_cache:
            return self.manifest_list_cache[image]

        reg_client = self._get_registry_client(image.registry)

        manifest_list = reg_client.get_manifest_list(image)
        if '@sha256:' in str(image) and not manifest_list:
            # we want to adjust the tag only for manifest list fetching
            image = image.copy()

            try:
                config_blob = reg_client.get_config_from_registry(
                    image, image.tag)
            except (HTTPError, RetryError, Timeout) as ex:
                self.log.warning('Unable to fetch config for %s, got error %s',
                                 image, ex.response.status_code)
                raise RuntimeError(
                    'Unable to fetch config for base image') from ex

            release = config_blob['config']['Labels']['release']
            version = config_blob['config']['Labels']['version']
            docker_tag = "%s-%s" % (version, release)
            image.tag = docker_tag

            manifest_list = reg_client.get_manifest_list(image)
        self.manifest_list_cache[image] = manifest_list
        return self.manifest_list_cache[image]
Beispiel #2
0
    def test_update_dockerfile_images_from_config(self, tmp_path, images_exist, organization):
        config = REQUIRED_CONFIG

        if organization:
            config += "\nregistries_organization: " + organization

        config_yaml = tmp_path / 'config.yaml'
        config_yaml.write_text(dedent(config), "utf-8")

        if images_exist:
            parent_images = ['parent:latest', 'base:latest']
            if organization:
                expect_images = [ImageName.parse('source_registry.com/organization/base:latest'),
                                 ImageName.parse('source_registry.com/organization/parent:latest')]
            else:
                expect_images = [ImageName.parse('source_registry.com/base:latest'),
                                 ImageName.parse('source_registry.com/parent:latest')]
        else:
            parent_images = []

        dockerfile_images = DockerfileImages(parent_images)

        conf = Configuration(config_path=str(config_yaml))
        conf.update_dockerfile_images_from_config(dockerfile_images)

        if images_exist:
            assert len(dockerfile_images) == 2
            assert dockerfile_images.keys() == expect_images
        else:
            assert not dockerfile_images
    def test_base_image_missing_labels(self, workflow, koji_session, remove_labels,
                                       exp_result, external, caplog):
        base_tag = ImageName.parse('base:stubDigest')

        base_inspect = {INSPECT_CONFIG: {'Labels': BASE_IMAGE_LABELS_W_ALIASES.copy()}}
        parent_inspect = {INSPECT_CONFIG: {'Labels': BASE_IMAGE_LABELS_W_ALIASES.copy()}}
        workflow.builder.set_inspection_data(base_inspect)
        workflow.builder.set_parent_inspection_data(base_tag, parent_inspect)

        for label in remove_labels:
            del workflow.builder._inspection_data[INSPECT_CONFIG]['Labels'][label]
            del workflow.builder._parent_inspection_data[base_tag][INSPECT_CONFIG]['Labels'][label]

        if not exp_result:
            if not external:
                with pytest.raises(PluginFailedException) as exc:
                    self.run_plugin_with_args(workflow, expect_result=exp_result,
                                              external_base=external)
                assert 'Was this image built in OSBS?' in str(exc.value)
            else:
                result = {PARENT_IMAGES_KOJI_BUILDS: {ImageName.parse('base'): None}}
                self.run_plugin_with_args(workflow, expect_result=result,
                                          external_base=external)
                assert 'Was this image built in OSBS?' in caplog.text
        else:
            self.run_plugin_with_args(workflow, expect_result=exp_result)
Beispiel #4
0
    def test_base_image_missing_labels(self, workflow, koji_session, remove_labels,
                                       exp_result, external, caplog):
        base_tag = ImageName.parse('base:stubDigest')

        base_inspect = {INSPECT_CONFIG: {'Labels': BASE_IMAGE_LABELS_W_ALIASES.copy()}}
        parent_inspect = {INSPECT_CONFIG: {'Labels': BASE_IMAGE_LABELS_W_ALIASES.copy()}}

        for label in remove_labels:
            del base_inspect[INSPECT_CONFIG]['Labels'][label]
            del parent_inspect[INSPECT_CONFIG]['Labels'][label]

        flexmock(workflow.imageutil).should_receive('base_image_inspect').and_return(base_inspect)
        (flexmock(workflow.imageutil)
         .should_receive('get_inspect_for_image')
         .with_args(base_tag)
         .and_return(parent_inspect))

        if not exp_result:
            if not external:
                with pytest.raises(PluginFailedException) as exc:
                    self.run_plugin_with_args(workflow, expect_result=exp_result,
                                              external_base=external)
                assert 'Was this image built in OSBS?' in str(exc.value)
            else:
                result = {
                    PARENT_IMAGES_KOJI_BUILDS: {
                        ImageName.parse('base').to_str(): None,
                    }
                }
                self.run_plugin_with_args(workflow, expect_result=result,
                                          external_base=external)
                assert 'Was this image built in OSBS?' in caplog.text
        else:
            self.run_plugin_with_args(workflow, expect_result=exp_result)
Beispiel #5
0
    def test_save_and_load(self, tmpdir):
        """Test save workflow data and then load them back properly."""
        tag_conf = TagConf()
        tag_conf.add_floating_image(ImageName.parse("registry/image:latest"))
        tag_conf.add_primary_image(ImageName.parse("registry/image:1.0"))

        wf_data = ImageBuildWorkflowData(
            dockerfile_images=DockerfileImages(["scratch", "registry/f:35"]),
            # Test object in dict values is serialized
            tag_conf=tag_conf,
            plugins_results={
                "plugin_a": {
                    'parent-images-koji-builds': {
                        ImageName(repo='base', tag='latest').to_str(): {
                            'id': 123456789,
                            'nvr': 'base-image-1.0-99',
                            'state': 1,
                        },
                    },
                },
                "tag_and_push": [
                    # Such object in a list should be handled properly.
                    ImageName(registry="localhost:5000",
                              repo='image',
                              tag='latest'),
                ],
                "image_build": {
                    "logs": ["Build succeeds."]
                },
            },
            koji_upload_files=[
                {
                    "local_filename": "/path/to/build1.log",
                    "dest_filename": "x86_64-build.log",
                },
                {
                    "local_filename": "/path/to/dir1/remote-source.tar.gz",
                    "dest_filename": "remote-source.tar.gz",
                },
            ])

        context_dir = ContextDir(Path(tmpdir.join("context_dir").mkdir()))
        wf_data.save(context_dir)

        assert context_dir.workflow_json.exists()

        # Verify the saved data matches the schema
        saved_data = json.loads(context_dir.workflow_json.read_bytes())
        try:
            validate_with_schema(saved_data, "schemas/workflow_data.json")
        except osbs.exceptions.OsbsValidationException as e:
            pytest.fail(
                f"The dumped workflow data does not match JSON schema: {e}")

        # Load and verify the loaded data
        loaded_wf_data = ImageBuildWorkflowData.load_from_dir(context_dir)

        assert wf_data.dockerfile_images == loaded_wf_data.dockerfile_images
        assert wf_data.tag_conf == loaded_wf_data.tag_conf
        assert wf_data.plugins_results == loaded_wf_data.plugins_results
    def test_known_vs_other_annotations(self):
        # All annotation must be found and replaced exactly once, heuristic
        # must not look in keys that are known pullspec sources
        data = {
            'kind': 'ClusterServiceVersion',
            'metadata': {
                'annotations': {
                    'containerImage': 'a.b/c:1',
                    'notContainerImage': 'a.b/c:1'
                }
            },
            'spec': {
                'metadata': {
                    'annotations': {
                        'containerImage': 'a.b/c:1',
                        'notContainerImage': 'a.b/c:1'
                    }
                }
            }
        }
        replacements = {
            ImageName.parse(old): ImageName.parse(new) for old, new in [
                ('a.b/c:1', 'd.e/f:1'),
                ('d.e/f:1', 'g.h/i:1'),
            ]
        }
        self._mock_check_csv()
        csv = OperatorCSV("original.yaml", data)
        csv.replace_pullspecs(replacements)

        assert csv.data["metadata"]["annotations"]["containerImage"] == 'd.e/f:1'
        assert csv.data["metadata"]["annotations"]["notContainerImage"] == 'd.e/f:1'
        assert csv.data["spec"]["metadata"]["annotations"]["containerImage"] == 'd.e/f:1'
        assert csv.data["spec"]["metadata"]["annotations"]["notContainerImage"] == 'd.e/f:1'
Beispiel #7
0
def test_image_name_parse_image_name(caplog):
    warning = 'Attempting to parse ImageName test:latest as an ImageName'
    test = ImageName.parse("test")
    assert warning not in caplog.text
    image_test = ImageName.parse(test)
    assert warning in caplog.text
    assert test is image_test
def test_pull_raises_retry_error(workflow, caplog):
    if MOCK:
        mock_docker(remember_images=True)

    tasker = DockerTasker(retry_times=1)
    workflow.builder = MockBuilder()
    image_name = ImageName.parse(IMAGE_RAISE_RETRYGENERATOREXCEPTION)
    base_image_str = "{}/{}:{}".format(SOURCE_REGISTRY, image_name.repo,
                                       'some')
    source_registry = image_name.registry
    workflow.builder.dockerfile_images = DockerfileImages([base_image_str])
    workflow.plugin_workspace[ReactorConfigPlugin.key] = {}
    workflow.plugin_workspace[ReactorConfigPlugin.key][WORKSPACE_CONF_KEY] =\
        ReactorConfig({'version': 1,
                       'source_registry': {'url': source_registry,
                                           'insecure': True}})

    runner = PreBuildPluginsRunner(
        tasker,
        workflow,
        [{
            'name': PullBaseImagePlugin.key,
            'args': {},
        }],
    )

    with pytest.raises(Exception):
        runner.run()

    exp_img = ImageName.parse(base_image_str)
    exp_img.registry = source_registry
    assert 'failed to pull image: {}'.format(exp_img.to_str()) in caplog.text
    def _store_manifest_digest(self, image: ImageName, use_original_tag: bool) -> None:
        """Store media type and digest for manifest list or v2 schema 2 manifest digest"""
        image_str = image.to_str()
        manifest_list = self._get_manifest_list(image)
        reg_client = self._get_registry_client(image.registry)
        if manifest_list:
            digest_dict = get_checksums(BytesIO(manifest_list.content), ['sha256'])
            media_type = get_manifest_media_type('v2_list')
        else:
            digests_dict = reg_client.get_all_manifests(image, versions=('v2',))
            media_type = get_manifest_media_type('v2')
            try:
                manifest_digest_response = digests_dict['v2']
            except KeyError as exc:
                raise RuntimeError(
                    'Unable to fetch manifest list or '
                    'v2 schema 2 digest for {} (Does image exist?)'.format(image_str)
                ) from exc

            digest_dict = get_checksums(BytesIO(manifest_digest_response.content), ['sha256'])

        manifest_digest = 'sha256:{}'.format(digest_dict['sha256sum'])
        parent_digests = {media_type: manifest_digest}
        if use_original_tag:
            # image tag may have been replaced with a ref for autorebuild; use original tag
            # to simplify fetching parent_images_digests data in other plugins
            image = image.copy()
            base_image_key: ImageName = self.workflow.data.dockerfile_images.base_image_key
            image.tag = base_image_key.tag
            image_str = image.to_str()

        self.workflow.data.parent_images_digests[image_str] = parent_digests
Beispiel #10
0
def test_get_built_images(workflow, manifest_version):
    MockEnv(workflow).set_check_platforms_result(["ppc64le", "x86_64"])
    workflow.data.tag_conf.add_unique_image(UNIQUE_IMAGE)

    _, platform_digests = mock_registries(
        [REGISTRY_V2],
        {
            "ppc64le": {REGISTRY_V2: ["namespace/httpd:2.4-ppc64le"]},
            "x86_64": {REGISTRY_V2: ["namespace/httpd:2.4-x86_64"]},
        },
        schema_version=manifest_version,
    )

    ppc_digest = platform_digests["ppc64le"]["digests"][0]["digest"]
    x86_digest = platform_digests["x86_64"]["digests"][0]["digest"]

    flexmock(ManifestUtil).should_receive("__init__")  # and do nothing, this test doesn't use it

    plugin = GroupManifestsPlugin(workflow)
    session = RegistrySession(REGISTRY_V2)

    assert plugin.get_built_images(session) == [
        BuiltImage(
            pullspec=ImageName.parse(f"{UNIQUE_IMAGE}-ppc64le"),
            platform="ppc64le",
            manifest_digest=ppc_digest,
            manifest_version=manifest_version,
        ),
        BuiltImage(
            pullspec=ImageName.parse(f"{UNIQUE_IMAGE}-x86_64"),
            platform="x86_64",
            manifest_digest=x86_digest,
            manifest_version=manifest_version,
        ),
    ]
Beispiel #11
0
def mock_environment(tmpdir, primary_images=None,
                     annotations=None):
    if MOCK:
        mock_docker()
    tasker = DockerTasker()
    workflow = DockerBuildWorkflow(source=SOURCE)
    base_image_id = '123456parent-id'
    setattr(workflow, '_base_image_inspect', {'Id': base_image_id})
    setattr(workflow, 'builder', StubInsideBuilder())
    setattr(workflow.builder, 'image_id', '123456imageid')
    setattr(workflow.builder, 'base_image', ImageName(repo='Fedora', tag='22'))
    setattr(workflow.builder, 'source', StubInsideBuilder())
    setattr(workflow.builder, 'built_image_info', {'ParentId': base_image_id})
    setattr(workflow.builder.source, 'dockerfile_path', None)
    setattr(workflow.builder.source, 'path', None)
    setattr(workflow, 'tag_conf', TagConf())
    if primary_images:
        for image in primary_images:
            if '-' in ImageName.parse(image).tag:
                workflow.tag_conf.add_primary_image(image)
        workflow.tag_conf.add_unique_image(primary_images[0])

    workflow.tag_conf.add_floating_image('namespace/httpd:floating')
    workflow.build_result = BuildResult(image_id='123456', annotations=annotations or {})

    return tasker, workflow
def mock_environment(tmpdir,
                     workflow,
                     primary_images=None,
                     floating_images=None,
                     manifest_results=None,
                     annotations=None):
    if MOCK:
        mock_docker()
    tasker = DockerTasker()
    base_image_id = '123456parent-id'
    setattr(workflow, '_base_image_inspect', {'Id': base_image_id})
    setattr(workflow, 'builder', StubInsideBuilder())
    setattr(workflow.builder, 'image_id', '123456imageid')
    setattr(workflow.builder, 'base_image', ImageName(repo='Fedora', tag='22'))
    setattr(workflow.builder, 'source', StubInsideBuilder())
    setattr(workflow.builder, 'built_image_info', {'ParentId': base_image_id})
    setattr(workflow.builder.source, 'dockerfile_path', None)
    setattr(workflow.builder.source, 'path', None)
    if primary_images:
        for image in primary_images:
            if '-' in ImageName.parse(image).tag:
                workflow.tag_conf.add_primary_image(image)
        workflow.tag_conf.add_unique_image(primary_images[0])

    if floating_images:
        workflow.tag_conf.add_floating_images(floating_images)

    workflow.build_result = BuildResult(image_id='123456',
                                        annotations=annotations or {})
    workflow.postbuild_results = {}
    if manifest_results:
        workflow.postbuild_results[
            PLUGIN_GROUP_MANIFESTS_KEY] = manifest_results

    return tasker, workflow
Beispiel #13
0
 def __init__(self, failed=False, image_id=None):
     self.tasker = None
     self.base_image = ImageName(repo='Fedora', tag='29')
     self.image_id = image_id or 'asd'
     self.image = ImageName.parse('image')
     self.failed = failed
     self.df_path = 'some'
     self.df_dir = 'some'
    def test_deep_digests_with_requested_arches(self, workflow, koji_session, caplog,
                                                manifest_list, requested_platforms, expected_logs,
                                                not_expected_logs):  # noqa
        registry = 'example.com'
        image_str = '{}/base:latest'.format(registry)
        extra = {'image': {'index': {'digests': {V2_LIST: 'stubDigest'}}}}
        parent_tag = 'notExpectedDigest'
        workflow.prebuild_results[PLUGIN_CHECK_AND_SET_PLATFORMS_KEY] = requested_platforms

        koji_build = dict(nvr='base-image-1.0-99',
                          id=KOJI_BUILD_ID,
                          state=KOJI_STATE_COMPLETE,
                          extra=extra)
        (koji_session.should_receive('getBuild')
         .and_return(koji_build))
        archives = [{
            'btype': 'image',
            'extra': {
                'docker': {
                    'config': {
                        'architecture': 'amd64'
                    },
                    'digests': {
                        V2: 'stubDigest'}}}}]
        (koji_session.should_receive('listArchives')
         .and_return(archives))

        name, version, release = koji_build['nvr'].rsplit('-', 2)
        labels = {'com.redhat.component': name, 'version': version, 'release': release}

        image_inspect = {INSPECT_CONFIG: {'Labels': labels}}
        flexmock(workflow.builder, parent_image_inspect=image_inspect)

        if manifest_list:
            response = flexmock(content=json.dumps(manifest_list))
        else:
            response = {}
        (flexmock(atomic_reactor.util.RegistryClient)
         .should_receive('get_manifest')
         .and_return((response, None)))

        expected_result = {BASE_IMAGE_KOJI_BUILD: KOJI_BUILD,
                           PARENT_IMAGES_KOJI_BUILDS: {
                               ImageName.parse(image_str): KOJI_BUILD}}

        workflow.builder.parent_images_digests = {image_str: {V2_LIST: parent_tag}}
        workflow.builder.set_dockerfile_images([image_str])
        image_for_key = ImageName.parse(image_str)
        image_for_key.tag = parent_tag
        workflow.builder.dockerfile_images[image_str] = image_for_key.to_str()
        self.run_plugin_with_args(workflow, expect_result=expected_result, deep_inspection=True,
                                  pull_registries=[{'url': registry}])

        for log in expected_logs:
            assert log in caplog.text

        for log in not_expected_logs:
            assert log not in caplog.text
Beispiel #15
0
def test_image_name_comparison():
    # make sure that both "==" and "!=" are implemented right on both Python major releases
    i1 = ImageName(registry='foo.com', namespace='spam', repo='bar', tag='1')
    i2 = ImageName(registry='foo.com', namespace='spam', repo='bar', tag='1')
    assert i1 == i2
    assert not i1 != i2

    i2 = ImageName(registry='foo.com', namespace='spam', repo='bar', tag='2')
    assert not i1 == i2
    assert i1 != i2
Beispiel #16
0
    def _pin_to_digest(self, image: ImageName,
                       digests: Dict[str, str]) -> ImageName:
        v2_list_type = get_manifest_media_type('v2_list')
        v2_type = get_manifest_media_type('v2')
        # one of v2_list, v2 *must* be present in the dict
        raw_digest = digests.get(v2_list_type) or digests[v2_type]

        digest = raw_digest.split(':', 1)[1]
        image_name = image.to_str(tag=False)
        new_image = '{}@sha256:{}'.format(image_name, digest)
        return ImageName.parse(new_image)
Beispiel #17
0
def _find_image(img, ignore_registry=False):
    tagged_img = ImageName.parse(img).to_str(explicit_tag=True)
    for im in mock_images:
        im_name = im['RepoTags'][0]
        if im_name == tagged_img:
            return im
        if ignore_registry:
            im_name_wo_reg = ImageName.parse(im_name).to_str(registry=False)
            if im_name_wo_reg == tagged_img:
                return im

    return None
Beispiel #18
0
    def _get_replacement_pullspecs_from_csv_modifications(self, pullspecs):
        """Replace components of pullspecs based on externally provided CSV modifications

        :param pullspecs: a list of pullspecs.
        :type pullspecs: list[ImageName]
        :return: a list of replacement result. Each of the replacement result
            is a mapping containing key/value pairs:

            * ``original``: ImageName, the original pullspec.
            * ``new``: ImageName, the replaced/non-replaced pullspec.
            * ``pinned``: bool, indicate whether the tag is replaced with a
                          specific digest.
            * ``replaced``: bool, indicate whether the new pullspec has change
                            of repository or registry.

        :rtype: list[dict[str, ImageName or bool]]
        :raises RuntimeError: if provided CSV modification doesn't contain all
                              required pullspecs or contain different ones
        """
        operator_csv_modifications = self._fetch_operator_csv_modifications()
        mod_pullspec_repl = operator_csv_modifications.get('pullspec_replacements', [])

        # check if modification data contains all required pullspecs
        pullspecs_set = set(pullspecs)
        mod_pullspecs_set = set((ImageName.parse(p['original']) for p in mod_pullspec_repl))

        missing = pullspecs_set - mod_pullspecs_set
        if missing:
            raise RuntimeError(
                f"Provided operator CSV modifications misses following pullspecs: "
                f"{', '.join(sorted(str(p) for p in missing))}"
            )

        extra = mod_pullspecs_set - pullspecs_set
        if extra:
            raise RuntimeError(
                f"Provided operator CSV modifications defines extra pullspecs: "
                f"{','.join(sorted(str(p) for p in extra))}"
            )

        # Copy replacements from provided CSV modifications file, fill missing 'replaced' filed
        replacements = [
            {
                'original': ImageName.parse(repl['original']),
                'new': ImageName.parse(repl['new']),
                'pinned': repl['pinned'],
                'replaced': repl['original'] != repl['new']
            }
            for repl in mod_pullspec_repl
        ]

        return replacements
Beispiel #19
0
 def test_as_dict(self):
     tag_conf = TagConf()
     tag_conf.add_primary_image('r.fp.o/f:35')
     tag_conf.add_floating_image('ns/img:latest')
     tag_conf.add_floating_image('ns1/img2:devel')
     expected = {
         'primary_images': [ImageName.parse('r.fp.o/f:35')],
         'unique_images': [],
         'floating_images': [
             ImageName.parse('ns/img:latest'),
             ImageName.parse('ns1/img2:devel'),
         ],
     }
     assert expected == tag_conf.as_dict()
Beispiel #20
0
        def workflow_callback(workflow):
            workflow = self.prepare(workflow, mock_get_manifest_list=False)
            release = 'rel1'
            version = 'ver1'
            config_blob = {'config': {'Labels': {'release': release, 'version': version}}}
            (flexmock(atomic_reactor.util.RegistryClient)
             .should_receive('get_config_from_registry')
             .and_return(config_blob)
             .times(0 if sha_is_manifest_list else 2))

            manifest_list = {
                'manifests': [
                    {'platform': {'architecture': 'amd64'}, 'digest': 'sha256:123456'},
                    {'platform': {'architecture': 'ppc64le'}, 'digest': 'sha256:654321'},
                ]
            }

            manifest_tag = SOURCE_REGISTRY + '/' + BASE_IMAGE_W_SHA
            base_image_result = ImageName.parse(manifest_tag)
            manifest_image_original = base_image_result.copy()

            if sha_is_manifest_list:
                (flexmock(atomic_reactor.util.RegistryClient)
                 .should_receive('get_manifest_list')
                 .with_args(manifest_image_original)
                 .and_return(flexmock(json=lambda: manifest_list,
                                      content=json.dumps(manifest_list).encode('utf-8')))
                 .once())
            else:
                (flexmock(atomic_reactor.util.RegistryClient)
                 .should_receive('get_manifest_list')
                 .with_args(manifest_image_original)
                 .and_return(None)
                 .times(2))
                docker_tag = '{}-{}'.format(version, release)
                manifest_tag = '{}/{}:{}'. \
                    format(SOURCE_REGISTRY,
                           BASE_IMAGE_W_SHA[:BASE_IMAGE_W_SHA.find('@sha256')],
                           docker_tag)
                base_image_result = ImageName.parse(manifest_tag)
                manifest_image_new = base_image_result.copy()
                (flexmock(atomic_reactor.util.RegistryClient)
                 .should_receive('get_manifest_list')
                 .with_args(manifest_image_new)
                 .and_return(flexmock(json=lambda: manifest_list,
                                      content=json.dumps(manifest_list).encode('utf-8')))
                 .times(2))
            return workflow
Beispiel #21
0
    def add_unique_image(self, image: Union[str, "ImageName"]) -> None:
        """add image with unpredictable name

        :param image: str, name of image (e.g. "namespace/httpd:2.4")
        :return: None
        """
        self._unique_images.append(ImageName.parse(image))
        def workflow_callback(workflow):
            workflow = self.prepare(workflow, mock_get_manifest_list=False)
            workflow.prebuild_results[PLUGIN_CHECK_AND_SET_PLATFORMS_KEY] = {
                'ppc64le'
            }
            release = 'rel1'
            version = 'ver1'
            config_blob = {
                'config': {
                    'Labels': {
                        'release': release,
                        'version': version
                    }
                }
            }
            (flexmock(atomic_reactor.util.RegistryClient).should_receive(
                'get_config_from_registry').and_return(config_blob).times(0))

            manifest_tag = SOURCE_REGISTRY + '/' + BASE_IMAGE_W_SHA
            base_image_result = ImageName.parse(manifest_tag)
            manifest_image = base_image_result.copy()

            (flexmock(atomic_reactor.util.RegistryClient).should_receive(
                'get_manifest_list').with_args(manifest_image).and_return(
                    flexmock(json=lambda: manifest_list,
                             content=json.dumps(manifest_list).encode(
                                 'utf-8'))).once())
            return workflow
Beispiel #23
0
    def add_floating_image(self, image: Union[str, "ImageName"]) -> None:
        """add image with floating name

        :param image: str, name of image (e.g. "namespace/httpd:2.4")
        :return: None
        """
        self._floating_images.append(ImageName.parse(image))
Beispiel #24
0
 def add_platform(image: ImageName) -> ImageName:
     return ImageName(
         registry=image.registry,
         namespace=image.namespace,
         repo=image.repo,
         tag=f'{image.tag}-{platform}',
     )
Beispiel #25
0
    def commit_buildroot(self):
        """
        create image from buildroot

        :return:
        """
        logger.info("committing buildroot")
        self.ensure_is_built()

        commit_message = "docker build of '%s' (%s)" % (self.image, self.uri)
        self.buildroot_image_name = ImageName(
            repo="buildroot-%s" % self.image,
            # save the time when image was built
            tag=datetime.datetime.now().strftime('%Y-%m-%d-%H-%M-%S'))
        self.buildroot_image_id = self.dt.commit_container(self.build_container_id, commit_message)
        return self.buildroot_image_id
Beispiel #26
0
 def _replace_named_pullspec(self, pullspec, replacement_pullspecs):
     old = ImageName.parse(pullspec.image)
     new = replacement_pullspecs.get(old)
     if new is not None and old != new:
         log.debug("%s - Replaced pullspec for %s: %s -> %s",
                   self.path, pullspec.description, old, new)
         pullspec.image = new.to_str()  # `new` is an ImageName
Beispiel #27
0
    def replace_repo(self, image):
        """
        Replace image repo based on OSBS site/user configuration for image registry

        Note: repo can also mean "namespace/repo"

        :param image: ImageName
        :return: ImageName
        """
        site_mapping = self._get_site_mapping(image.registry)
        if site_mapping is None and image.registry not in self.user_package_mappings:
            self.log.debug("repo_replacements not configured for %s", image.registry)
            return image

        package = self._get_component_name(image)
        mapping = self._get_final_mapping(image.registry, package)
        replacements = mapping.get(package)

        if replacements is None:
            raise RuntimeError("Replacement not configured for package {} (from {}). "
                               "Please specify replacement in {}"
                               .format(package, image, REPO_CONTAINER_CONFIG))
        elif len(replacements) > 1:
            options = ", ".join(replacements)
            raise RuntimeError("Multiple replacements for package {} (from {}): {}. "
                               "Please specify replacement in {}"
                               .format(package, image, options, REPO_CONTAINER_CONFIG))

        self.log.debug("Replacement for package %s: %s", package, replacements[0])
        replacement = ImageName.parse(replacements[0])
        return self._replace(image, namespace=replacement.namespace, repo=replacement.repo)
Beispiel #28
0
    def _fetch_manifest_digest(self, image: ImageName) -> Dict[str, str]:
        """Fetch media type and digest for manifest list or v2 schema 2 manifest digest"""
        image_str = image.to_str()
        manifest_list = self._get_manifest_list(image)
        reg_client = self._get_registry_client(image.registry)
        if manifest_list:
            digest_dict = get_checksums(BytesIO(manifest_list.content),
                                        ['sha256'])
            media_type = get_manifest_media_type('v2_list')
        else:
            digests_dict = reg_client.get_all_manifests(image,
                                                        versions=('v2', ))
            media_type = get_manifest_media_type('v2')
            try:
                manifest_digest_response = digests_dict['v2']
            except KeyError as exc:
                raise RuntimeError(
                    'Unable to fetch manifest list or '
                    'v2 schema 2 digest for {} (Does image exist?)'.format(
                        image_str)) from exc

            digest_dict = get_checksums(
                BytesIO(manifest_digest_response.content), ['sha256'])

        manifest_digest = 'sha256:{}'.format(digest_dict['sha256sum'])
        parent_digest = {media_type: manifest_digest}
        return parent_digest
Beispiel #29
0
def cmd_list_builds(args, osbs):
    kwargs = {}
    if args.running:
        kwargs['running'] = args.running

    if args.from_json:
        with open(args.from_json) as fp:
            builds = [BuildResponse(build, osbs) for build in json.load(fp)]
    else:
        builds = osbs.list_builds(**kwargs)

    if args.output == 'json':
        json_output = []
        for build in builds:
            json_output.append(build.json)
        print_json_nicely(json_output)
    elif args.output == 'text':
        if args.columns:
            cols_to_display = args.columns.split(",")
        else:
            cols_to_display = CLI_LIST_BUILDS_DEFAULT_COLS
        data = [{
            "base_image": "BASE IMAGE NAME",
            "base_image_id": "BASE IMAGE ID",
            "commit": "COMMIT",
            "image": "IMAGE NAME",
            "unique_image": "UNIQUE IMAGE NAME",
            "image_id": "IMAGE ID",
            "koji_build_id": "KOJI BUILD ID",
            "name": "BUILD ID",
            "status": "STATUS",
            "time_created": "TIME CREATED",
        }]
        for build in sorted(builds,
                            key=lambda x: x.get_time_created_in_seconds()):
            unique_image = build.get_image_tag()
            try:
                image = \
                    ImageName.parse(build.get_repositories()["primary"][0]).to_str(registry=False)
            except (TypeError, KeyError, IndexError):
                image = ""  # "" or unique_image? failed builds don't have that ^
            if args.FILTER and args.FILTER not in image:
                continue
            if args.running and not build.is_in_progress():
                continue
            b = {
                "base_image": build.get_base_image_name() or '',
                "base_image_id": build.get_base_image_id() or '',
                "commit": build.get_commit_id(),
                "image": image,
                "unique_image": unique_image,
                "image_id": build.get_image_id() or '',
                "koji_build_id": build.get_koji_build_id() or '',
                "name": build.get_build_name(),
                "status": build.status,
                "time_created": build.get_time_created(),
            }
            data.append(b)
        tp = TablePrinter(data, cols_to_display)
        tp.render()
Beispiel #30
0
    def tag_image(self, image, target_image, force=False):
        """
        tag provided image with specified image_name, registry and tag

        :param image: str or ImageName, image to tag
        :param target_image: ImageName, new name for the image
        :param force: bool, force tag the image?
        :return: str, image (reg.om/img:v1)
        """
        logger.info("tagging image '%s' as '%s'", image, target_image)
        logger.debug("image = '%s', target_image_name = '%s'", image, target_image)
        if not isinstance(image, ImageName):
            image = ImageName.parse(image)

        if image != target_image:
            response = self.d.tag(
                image.to_str(),
                target_image.to_str(tag=False),
                tag=target_image.tag,
                force=force)  # returns True/False
            if not response:
                logger.error("failed to tag image")
                raise RuntimeError("Failed to tag image '%s': target_image = '%s'" %
                                   image.to_str(), target_image)
        else:
            logger.debug('image already tagged correctly, nothing to do')
        return target_image.to_str()  # this will be the proper name, not just repo/img