Пример #1
0
    def tag_manifest_into_registry(self, session, worker_digest):
        """
        Tags the manifest identified by worker_digest into session.registry with all the
        configured tags found in workflow.tag_conf.
        """
        self.log.info("%s: Tagging manifest", session.registry)

        digest = worker_digest['digest']
        source_repo = worker_digest['repository']

        image_manifest, _, media_type, _ = self.get_manifest(
            session, source_repo, digest)
        if media_type == MEDIA_TYPE_DOCKER_V2_SCHEMA2:
            digests = ManifestDigest(v1=digest)
        elif media_type == MEDIA_TYPE_OCI_V1:
            digests = ManifestDigest(oci=digest)
        else:
            raise RuntimeError(
                "Unexpected media type found in worker repository: {}".format(
                    media_type))

        push_conf_registry = self.workflow.push_conf.add_docker_registry(
            session.registry, insecure=session.insecure)
        for image in self.workflow.tag_conf.images:
            target_repo = image.to_str(registry=False, tag=False)
            self.store_manifest_in_repository(session,
                                              image_manifest,
                                              media_type,
                                              source_repo,
                                              target_repo,
                                              tag=image.tag)

            # add a tag for any plugins running later that expect it
            push_conf_registry.digests[image.tag] = digests
def prepare(pulp_registries=None, docker_registries=None):
    if pulp_registries is None:
        pulp_registries = (
            ("test", LOCALHOST_REGISTRY),
        )

    if docker_registries is None:
        docker_registries = (DOCKER0_REGISTRY,)

    def set_annotations_on_build(build_id, annotations):
        pass
    def update_labels_on_build(build_id, labels):
        pass
    new_environ = deepcopy(os.environ)
    new_environ["BUILD"] = dedent('''\
        {
          "metadata": {
            "name": "asd",
            "namespace": "namespace"
          }
        }
        ''')
    flexmock(OSBS, set_annotations_on_build=set_annotations_on_build)
    flexmock(OSBS, update_labels_on_build=update_labels_on_build)
    (flexmock(osbs.conf)
     .should_call("Configuration")
     .with_args(namespace="namespace", conf_file=None, verify_ssl=True,
                openshift_url="http://example.com/", openshift_uri="http://example.com/",
                use_auth=True))
    flexmock(os)
    os.should_receive("environ").and_return(new_environ)

    workflow = DockerBuildWorkflow({"provider": "git", "uri": "asd"}, "test-image")

    for name, crane_uri in pulp_registries:
        workflow.push_conf.add_pulp_registry(name, crane_uri)

    workflow.tag_conf.add_primary_image(TEST_IMAGE)
    workflow.tag_conf.add_unique_image("namespace/image:asd123")

    for docker_registry in docker_registries:
        r = workflow.push_conf.add_docker_registry(docker_registry)
        r.digests[TEST_IMAGE] = ManifestDigest(v1='not-used', v2=DIGEST1)
        r.digests["namespace/image:asd123"] = ManifestDigest(v1='not-used',
                                                             v2=DIGEST2)

    setattr(workflow, 'builder', X)
    setattr(workflow, '_base_image_inspect', {'Id': '01234567'})
    workflow.build_logs = [
        "a", "b",
    ]
    workflow.source.lg = LazyGit(None, commit="commit")
    flexmock(workflow.source.lg)
    workflow.source.lg.should_receive("_commit_id").and_return("commit")

    return workflow
Пример #3
0
def test_delete_from_registry_plugin(saved_digests, req_registries, tmpdir):
    if MOCK:
        mock_docker()

    tasker = DockerTasker()
    workflow = DockerBuildWorkflow({"provider": "git", "uri": "asd"}, TEST_IMAGE)
    setattr(workflow, 'builder', X)

    args_registries = {}
    for reg, use_secret in req_registries.items():
        if use_secret:
            temp_dir = mkdtemp(dir=str(tmpdir))
            with open(os.path.join(temp_dir, ".dockercfg"), "w+") as dockerconfig:
                dockerconfig_contents = {
                    reg: {
                        "username": "******", "password": reg
                    }
                }
                dockerconfig.write(json.dumps(dockerconfig_contents))
                dockerconfig.flush()
                args_registries[reg] = { 'secret': temp_dir }
        else:
            args_registries[reg] = {}

    for reg, digests in saved_digests.items():
        r = DockerRegistry(reg)
        for tag, dig in digests.items():
            r.digests[tag] = ManifestDigest(v1='not-used', v2=dig)
        workflow.push_conf._registries['docker'].append(r)

    runner = ExitPluginsRunner(
        tasker,
        workflow,
        [{
            'name': DeleteFromRegistryPlugin.key,
            'args': {
                'registries': args_registries
            },
        }]
    )

    deleted_digests = set()
    for reg, digests in saved_digests.items():
        if reg not in req_registries:
            continue

        for tag, dig in digests.items():
            if dig in deleted_digests:
                continue
            url = "https://" + reg + "/v2/" + tag.split(":")[0] + "/manifests/" + dig
            auth_type = requests.auth.HTTPBasicAuth if req_registries[reg] else None
            (flexmock(requests)
                .should_receive('delete')
                .with_args(url, verify=bool, auth=auth_type)
                .once()
                .and_return(flexmock(status_code=202)))
            deleted_digests.add(dig)

    result = runner.run()
    assert result[DeleteFromRegistryPlugin.key] == deleted_digests
Пример #4
0
def test_manifest_digest(v1, v2, v2_list, oci, oci_index, default):
    md = ManifestDigest(v1=v1, v2=v2, v2_list=v2_list, oci=oci, oci_index=oci_index)
    assert md.v1 == v1
    assert md.v2 == v2
    assert md.v2_list == v2_list
    assert md.oci == oci
    assert md.default == default
    with pytest.raises(AttributeError):
        assert md.no_such_version
Пример #5
0
    def tag_manifest_into_registry(self, session, digest, source_repo, configured_tags):
        """
        Tags the manifest identified by worker_digest into session.registry with all the
        configured_tags
        """
        self.log.info("%s: Tagging manifest", session.registry)

        image_manifest, _, media_type, _ = self.get_manifest(session, source_repo, digest)
        if media_type == MEDIA_TYPE_DOCKER_V2_SCHEMA2:
            digests = ManifestDigest(v1=digest)
        elif media_type == MEDIA_TYPE_OCI_V1:
            digests = ManifestDigest(oci=digest)
        else:
            raise RuntimeError("Unexpected media type {} found in source_repo: {}"
                               .format(media_type, source_repo))

        self.add_tag_and_manifest(session, image_manifest, media_type, digests, source_repo,
                                  configured_tags)
        return image_manifest, media_type, digests
Пример #6
0
def test_exceed_binary_image_size(image_size_limit, workflow):
    config = {
        'version': 1,
        'registries': [
            {'url': LOCALHOST_REGISTRY}
        ],
    }
    if image_size_limit is not None:
        config['image_size_limit'] = image_size_limit

    # workflow = DockerBuildWorkflow(source=MOCK_SOURCE)
    workflow.plugin_workspace[ReactorConfigPlugin.key] = {
        WORKSPACE_CONF_KEY: ReactorConfig(config)
    }
    workflow.builder = StubInsideBuilder()
    workflow.builder.image_id = INPUT_IMAGE
    # fake layer sizes of the test image
    workflow.layer_sizes = [
        {'diff_id': '12345', 'size': 1000},
        {'diff_id': '23456', 'size': 2000},
        {'diff_id': '34567', 'size': 3000},
    ]

    mock_docker()

    plugin = TagAndPushPlugin(DockerTasker(), workflow)

    if image_size_limit is None or image_size_limit['binary_image'] == 0:
        # The plugin should skip the check on image size

        (flexmock(atomic_reactor.plugins.post_tag_and_push)
         .should_receive('get_manifest_digests')
         .and_return(ManifestDigest({
             'v2': 'application/vnd.docker.distribution.manifest.list.v2+json',
         })))

        (flexmock(atomic_reactor.plugins.post_tag_and_push)
         .should_receive('get_config_from_registry'))

        assert workflow.image == plugin.run()[0].repo
    else:
        with pytest.raises(ExceedsImageSizeError):
            plugin.run()
Пример #7
0
def test_get_built_images_multiple_manifest_types(workflow):
    MockEnv(workflow).set_check_platforms_result(["x86_64"])
    workflow.data.tag_conf.add_unique_image(UNIQUE_IMAGE)

    flexmock(ManifestUtil).should_receive("__init__")  # and do nothing, this test doesn't use it

    (
        flexmock(RegistryClient)
        .should_receive("get_manifest_digests")
        .with_args(ImageName.parse(f"{UNIQUE_IMAGE}-x86_64"), versions=("v2", "oci"))
        .and_return(ManifestDigest({"v2": make_digest("foo"), "oci": make_digest("bar")}))
    )

    plugin = GroupManifestsPlugin(workflow)
    session = RegistrySession(REGISTRY_V2)

    expect_error = (
        f"Expected to find a single manifest digest for {UNIQUE_IMAGE}-x86_64, "
        "but found multiple: {'v2': .*, 'oci': .*}"
    )

    with pytest.raises(RuntimeError, match=expect_error):
        plugin.get_built_images(session)
Пример #8
0
def test_manifest_digest(v1, v2, default):
    md = ManifestDigest(v1=v1, v2=v2)
    assert md.v1 == v1
    assert md.v2 == v2
    assert md.default == default
Пример #9
0
def prepare(workflow, registry=None, no_dockerfile=True):
    if not registry:
        registry = LOCALHOST_REGISTRY

    def update_annotations_on_build(build_id, annotations):
        pass

    flexmock(OSBS, update_annotations_on_build=update_annotations_on_build)
    if no_dockerfile:
        os.remove(os.path.join(workflow.source.path, 'Dockerfile'))
    workflow.build_dir.init_build_dirs(["x86_64"], workflow.source)
    config_kwargs = {
        'namespace': workflow.namespace,
        'verify_ssl': True,
        'openshift_url': 'http://example.com/',
        'use_auth': True,
        'conf_file': None,
    }
    (flexmock(osbs.conf.Configuration)
     .should_call("__init__")
     .with_args(**config_kwargs))

    openshift_map = {
        'url': 'http://example.com/',
        'insecure': False,
        'auth': {'enable': True},
    }
    registries_conf = [{'url': registry,
                        'insecure': True}]
    rcm = {'version': 1, 'openshift': openshift_map, 'registries': registries_conf}
    workflow.conf.conf = rcm
    add_koji_map_in_workflow(workflow, hub_url='/', root_url='')

    tag_conf = workflow.data.tag_conf

    tag_conf.add_floating_image(f'{registry}/{TEST_IMAGE}')
    tag_conf.add_primary_image(f'{registry}/namespace/image:version-release')
    tag_conf.add_unique_image(f'{registry}/namespace/image:asd123')

    (flexmock(RegistryClient)
     .should_receive('get_manifest_digests')
     .with_args(image=ImageName.parse(f'{registry}/{TEST_IMAGE_NAME}'),
                versions=('v1', 'v2', 'v2_list', 'oci', 'oci_index'), require_digest=True)
     .and_return(ManifestDigest(v1=DIGEST_NOT_USED, v2=DIGEST1)))

    (flexmock(RegistryClient)
     .should_receive('get_manifest_digests')
     .with_args(image=ImageName.parse(f'{registry}/namespace/image:version-release'),
                versions=('v1', 'v2', 'v2_list', 'oci', 'oci_index'), require_digest=True)
     .and_return(None))

    (flexmock(RegistryClient)
     .should_receive('get_manifest_digests')
     .with_args(image=ImageName.parse(f'{registry}/namespace/image:asd123'),
                versions=('v1', 'v2', 'v2_list', 'oci', 'oci_index'), require_digest=True)
     .and_return(ManifestDigest(v1=DIGEST_NOT_USED, v2=DIGEST2)))

    flexmock(workflow.imageutil).should_receive('base_image_inspect').and_return({'Id': '01234567'})
    workflow.build_logs = [
        "a", "b",
    ]

    workflow.source = GitSource('git', 'git://fake-url.com/repo')
    flexmock(workflow.source).should_receive('commit_id').and_return('commit')
def test_delete_from_registry_plugin(saved_digests, req_registries, tmpdir, orchestrator,
                                     manifest_list_digests, reactor_config_map):
    if MOCK:
        mock_docker()
        mock_get_retry_session()

    buildstep_plugin = None
    if orchestrator:
        ann_digests = []
        buildstep_plugin = [{
            'name': OrchestrateBuildPlugin.key,
            'args': {
                'platforms': "x86_64"
            },
        }]

    tasker = DockerTasker()
    workflow = DockerBuildWorkflow({"provider": "git", "uri": "asd"}, TEST_IMAGE,
                                   buildstep_plugins=buildstep_plugin, )
    setattr(workflow, 'builder', X)

    args_registries = {}
    config_map_regiestries = []
    for reg, use_secret in req_registries.items():
        cm_reg = {'url': reg}
        if use_secret:
            temp_dir = mkdtemp(dir=str(tmpdir))
            with open(os.path.join(temp_dir, ".dockercfg"), "w+") as dockerconfig:
                dockerconfig_contents = {
                    reg: {
                        "username": "******", "password": reg
                    }
                }
                dockerconfig.write(json.dumps(dockerconfig_contents))
                dockerconfig.flush()
                args_registries[reg] = {'secret': temp_dir}
                cm_reg['auth'] = {'cfg_path': temp_dir}
        else:
            args_registries[reg] = {}
        config_map_regiestries.append(cm_reg)

    for reg, digests in saved_digests.items():
        if orchestrator:
            for tag, dig in digests.items():
                repo = tag.split(':')[0]
                t = tag.split(':')[1]
                ann_digests.append({
                    'digest': dig,
                    'tag': t,
                    'repository': repo,
                    'registry': reg,
                })
        else:
            r = DockerRegistry(reg)
            for tag, dig in digests.items():
                r.digests[tag] = ManifestDigest(v1='not-used', v2=dig)
            workflow.push_conf._registries['docker'].append(r)

    group_manifest_digests = {}
    if orchestrator:
        build_annotations = {'digests': ann_digests}
        annotations = {'worker-builds': {'x86_64': build_annotations}}
        setattr(workflow, 'build_result', Y)
        setattr(workflow.build_result, 'annotations', annotations)

        # group_manifest digest should be added only
        # if there are worker builds and images are pushed to one registry
        if len(req_registries) == 1 and len(saved_digests.keys()) == 1 and \
           all(saved_digests.values()):
            workflow.postbuild_results[PLUGIN_GROUP_MANIFESTS_KEY] = manifest_list_digests
            for ml_repo, ml_digest in manifest_list_digests.items():
                for reg in req_registries:
                    if reg in saved_digests:
                        group_manifest_digests.setdefault(reg, {})
                        group_manifest_digests[reg] = saved_digests[reg].copy()
                        group_manifest_digests[reg][ml_repo] = ml_digest.default

    result_digests = saved_digests.copy()
    result_digests.update(group_manifest_digests)

    if reactor_config_map:
        workflow.plugin_workspace[ReactorConfigPlugin.key] = {}
        workflow.plugin_workspace[ReactorConfigPlugin.key][WORKSPACE_CONF_KEY] =\
            ReactorConfig({'version': 1, 'registries': config_map_regiestries})

    runner = ExitPluginsRunner(
        tasker,
        workflow,
        [{
            'name': DeleteFromRegistryPlugin.key,
            'args': {
                'registries': args_registries
            },
        }]
    )

    deleted_digests = set()
    for reg, digests in result_digests.items():
        if reg not in req_registries:
            continue

        for tag, dig in digests.items():
            if dig in deleted_digests:
                continue
            url = "https://" + reg + "/v2/" + tag.split(":")[0] + "/manifests/" + dig
            auth_type = HTTPRegistryAuth
            (flexmock(requests.Session)
                .should_receive('delete')
                .with_args(url, verify=bool, auth=auth_type)
                .once()
                .and_return(flexmock(status_code=202, ok=True, raise_for_status=lambda: None)))
            deleted_digests.add(dig)

    result = runner.run()
    assert result[DeleteFromRegistryPlugin.key] == deleted_digests
def test_delete_from_registry_failures(tmpdir, status_code, reactor_config_map):
    if MOCK:
        mock_docker()
        mock_get_retry_session()

    req_registries = {DOCKER0_REGISTRY: True}
    saved_digests = {DOCKER0_REGISTRY: {'foo/bar:latest': DIGEST1}}

    tasker = DockerTasker()
    workflow = DockerBuildWorkflow({"provider": "git", "uri": "asd"}, TEST_IMAGE)
    setattr(workflow, 'builder', X)

    args_registries = {}
    config_map_regiestries = []
    for reg, use_secret in req_registries.items():
        cm_reg = {'url': reg}
        if use_secret:
            temp_dir = mkdtemp(dir=str(tmpdir))
            with open(os.path.join(temp_dir, ".dockercfg"), "w+") as dockerconfig:
                dockerconfig_contents = {
                    reg: {
                        "username": "******", "password": reg
                    }
                }
                dockerconfig.write(json.dumps(dockerconfig_contents))
                dockerconfig.flush()
                args_registries[reg] = {'secret': temp_dir}
                cm_reg['auth'] = {'cfg_path': temp_dir}
        else:
            args_registries[reg] = {}
    config_map_regiestries.append(cm_reg)

    for reg, digests in saved_digests.items():
        r = DockerRegistry(reg)
        for tag, dig in digests.items():
            r.digests[tag] = ManifestDigest(v1='not-used', v2=dig)
        workflow.push_conf._registries['docker'].append(r)

    if reactor_config_map:
        workflow.plugin_workspace[ReactorConfigPlugin.key] = {}
        workflow.plugin_workspace[ReactorConfigPlugin.key][WORKSPACE_CONF_KEY] =\
            ReactorConfig({'version': 1, 'registries': config_map_regiestries})

    runner = ExitPluginsRunner(
        tasker,
        workflow,
        [{
            'name': DeleteFromRegistryPlugin.key,
            'args': {
                'registries': args_registries
            },
        }]
    )

    deleted_digests = set()
    for reg, digests in saved_digests.items():
        if reg not in req_registries:
            continue

        for tag, dig in digests.items():
            if dig in deleted_digests:
                continue
            url = "https://" + reg + "/v2/" + tag.split(":")[0] + "/manifests/" + dig
            auth_type = HTTPRegistryAuth

            response = requests.Response()
            response.status_code = status_code

            (flexmock(requests.Session)
                .should_receive('delete')
                .with_args(url, verify=bool, auth=auth_type)
                .and_return(response))

            deleted_digests.add(dig)

    if status_code == 520:
        with pytest.raises(PluginFailedException):
            result = runner.run()
            assert result[DeleteFromRegistryPlugin.key] == set([])
    else:
        result = runner.run()

        if status_code == requests.codes.ACCEPTED:
            assert result[DeleteFromRegistryPlugin.key] == deleted_digests
        else:
            assert result[DeleteFromRegistryPlugin.key] == set([])
Пример #12
0
def test_tag_and_push_plugin(
        tmpdir, monkeypatch, image_name, logs, should_raise, has_config, use_secret):

    if MOCK:
        mock_docker()
        flexmock(docker.Client, push=lambda iid, **kwargs: iter(logs),
                 login=lambda username, registry, dockercfg_path: {'Status': 'Login Succeeded'})

    tasker = DockerTasker()
    workflow = DockerBuildWorkflow({"provider": "git", "uri": "asd"}, TEST_IMAGE)
    workflow.tag_conf.add_primary_image(image_name)
    setattr(workflow, 'builder', X)

    secret_path = None
    if use_secret:
        temp_dir = mkdtemp()
        with open(os.path.join(temp_dir, ".dockercfg"), "w+") as dockerconfig:
            dockerconfig_contents = {
                LOCALHOST_REGISTRY: {
                    "username": "******", "email": "*****@*****.**", "password": "******"}}
            dockerconfig.write(json.dumps(dockerconfig_contents))
            dockerconfig.flush()
            secret_path = temp_dir

    CONFIG_DIGEST = 'sha256:2c782e3a93d34d89ea4cf54052768be117caed54803263dd1f3798ce42aac14e'
    media_type = 'application/vnd.docker.distribution.manifest.v2+json'

    response_config_json = {
        'config': {
            'digest': CONFIG_DIGEST,
            'mediaType': 'application/octet-stream',
            'size': 4132
        },
        'layers': [
            {
                'digest': 'sha256:16dc1f96e3a1bb628be2e00518fec2bb97bd5933859de592a00e2eb7774b6ecf',
                'mediaType': 'application/vnd.docker.image.rootfs.diff.tar.gzip',
                'size': 71907148
            },
            {
                'digest': 'sha256:cebc0565e1f096016765f55fde87a6f60fdb1208c0b5017e35a856ff578f5ccb',
                'mediaType': 'application/vnd.docker.image.rootfs.diff.tar.gzip',
                'size': 3945724
            }
        ],
        'mediaType': media_type,
        'schemaVersion': 2
    }

    response_json = {
        'config': {
            'Size': 12509448,
            'architecture': 'amd64',
            'author': 'Red Hat, Inc.',
            'config': {
                'Cmd': ['/bin/rsyslog.sh'],
                'Entrypoint': None,
                'Image': 'c3fb36aafd5692d2a45115d32bb120edb6edf6c0c3c783ed6592a8dab969fb88',
                'Labels': {
                    'Architecture': 'x86_64',
                    'Authoritative_Registry': 'registry.access.redhat.com',
                    'BZComponent': 'rsyslog-docker',
                    'Name': 'rhel7/rsyslog',
                    'Release': '28.vrutkovs.31',
                    'Vendor': 'Red Hat, Inc.',
                    'Version': '7.2',
                },
            },
            'created': '2016-10-07T10:20:05.38595Z',
            'docker_version': '1.9.1',
            'id': '1ca220fbc2aed7c141b236c8729fe59db5771b32bf2da74e4a663407f32ec2a2',
            'os': 'linux',
            'parent': '47eed7a8ea3f131e9281ae09fcbfb0041872fd8b74a048f1c739302c8148505d'
        },
        'container_config': {
            'foo': 'bar',
            'spam': 'maps'
        },
        'id': '1ca220fbc2aed7c141b236c8729fe59db5771b32bf2da74e4a663407f32ec2a2',
        'parent_id': 'c3fb36aafd5692d2a45115d32bb120edb6edf6c0c3c783ed6592a8dab969fb88'
    }

    if not has_config:
        response_json = None

    config_latest_url = "https://{}/v2/{}/manifests/latest".format(LOCALHOST_REGISTRY, TEST_IMAGE,)
    config_url = "https://{}/v2/{}/manifests/{}".format(LOCALHOST_REGISTRY, TEST_IMAGE, DIGEST_V2)
    blob_url = "https://{}/v2/{}/blobs/{}".format(
        LOCALHOST_REGISTRY, TEST_IMAGE, CONFIG_DIGEST)

    config_response_config_v1 = requests.Response()
    (flexmock(config_response_config_v1,
              raise_for_status=lambda: None,
              json=response_config_json,
              headers={
                'Content-Type': 'application/vnd.docker.distribution.manifest.v1+json',
                'Docker-Content-Digest': DIGEST_V1
              }))

    config_response_config_v2 = requests.Response()
    (flexmock(config_response_config_v2,
              raise_for_status=lambda: None,
              json=response_config_json,
              headers={
                'Content-Type': 'application/vnd.docker.distribution.manifest.v2+json',
                'Docker-Content-Digest': DIGEST_V2
              }))

    blob_config = requests.Response()
    (flexmock(blob_config, raise_for_status=lambda: None, json=response_json))

    def custom_get(url, headers, **kwargs):
        if url == config_latest_url:
            if headers['Accept'] == 'application/vnd.docker.distribution.manifest.v1+json':
                return config_response_config_v1

            if headers['Accept'] == 'application/vnd.docker.distribution.manifest.v2+json':
                return config_response_config_v2

        if url == config_url:
            return config_response_config_v2

        if url == blob_url:
            return blob_config

    (flexmock(requests)
        .should_receive('get')
        .replace_with(custom_get))

    runner = PostBuildPluginsRunner(
        tasker,
        workflow,
        [{
            'name': TagAndPushPlugin.key,
            'args': {
                'registries': {
                    LOCALHOST_REGISTRY: {
                        'insecure': True,
                        'secret': secret_path
                    }
                }
            },
        }]
    )

    if should_raise:
        with pytest.raises(Exception):
            runner.run()
    else:
        output = runner.run()
        image = output[TagAndPushPlugin.key][0]
        tasker.remove_image(image)
        assert len(workflow.push_conf.docker_registries) > 0

        if MOCK:
            # we only test this when mocking docker because we don't expect
            # running actual docker against v2 registry
            expected_digest = ManifestDigest(v1=DIGEST_V1, v2=DIGEST_V2)
            assert workflow.push_conf.docker_registries[0].digests[image_name].v1 == \
                expected_digest.v1
            assert workflow.push_conf.docker_registries[0].digests[image_name].v2 == \
                expected_digest.v2

            if has_config:
                assert isinstance(workflow.push_conf.docker_registries[0].config, dict)
            else:
                assert workflow.push_conf.docker_registries[0].config is None
Пример #13
0
def test_binary_build_get_output(no_v2_digest: bool, from_scratch: bool,
                                 is_flatpak: bool,
                                 workflow: DockerBuildWorkflow, tmpdir):
    platform = "x86_64"
    buildroot_id = f'{platform}-1'
    image_pullspec = ImageName.parse("ns/image:latest")
    expected_components = []

    if not from_scratch:
        package_list = [
            'python-docker-py;1.3.1;1.fc24;noarch;(none);191456;'
            '7c1f60d8cde73e97a45e0c489f4a3b26;1438058212;(none);(none);(none);(none)',
            'fedora-repos-rawhide;24;0.1;noarch;(none);2149;'
            'd41df1e059544d906363605d47477e60;1436940126;(none);(none);(none);(none)',
            'gpg-pubkey-doc;1.0;1;noarch;(none);1000;'
            '00000000000000000000000000000000;1436940126;(none);(none);(none);(none)'
        ]
        expected_components = parse_rpm_output(package_list)

    if is_flatpak:
        workflow.user_params['flatpak'] = True

    workflow.data.image_components = {platform: expected_components}

    if from_scratch:
        workflow.data.dockerfile_images = DockerfileImages(['scratch'])
        parent_id = None
    else:
        workflow.data.dockerfile_images = DockerfileImages(['fedora:35'])
        parent_id = 'parent-id'
        (flexmock(workflow.imageutil).should_receive(
            'base_image_inspect').with_args(platform).and_return(
                {'Id': parent_id}))

    # For verifying the tags in final metadata
    workflow.data.tag_conf.add_unique_image("ns/image:1")
    workflow.data.tag_conf.add_unique_image("ns/image:2")
    # This primary image is noise. For binary build, this should not be
    # included in the metadata.
    workflow.data.tag_conf.add_primary_image("ns/image:1-2")

    # Mock for ImageUtil.get_uncompressed_layer_sizes
    layer_sizes = [
        {
            "diff_id": 1,
            "size": 100
        },
        {
            "diff_id": 2,
            "size": 200
        },
    ]
    workflow.build_dir.init_build_dirs([platform], workflow.source)
    platform_dir = workflow.build_dir.platform_dir(platform)
    (flexmock(workflow.imageutil).should_receive(
        'get_uncompressed_image_layer_sizes').with_args(
            str(platform_dir.exported_squashed_image)).and_return(layer_sizes))

    workflow.conf.conf = {
        'registries': [
            {
                'url': 'https://registry.host/',
                'insecure': False
            },
        ],
    }

    # Mock get_inspect_for_image
    image_id = 'image-id-1234'
    (flexmock(
        workflow.imageutil).should_receive('get_inspect_for_image').with_args(
            image_pullspec, platform=platform).and_return({'Id': image_id}))

    # Mock get manifest digests
    image_manifest_digest = ManifestDigest(
        {'oci': 'oci-1234'} if no_v2_digest else {'v2': '1234'})
    (flexmock(RegistryClient).should_receive(
        'get_manifest_digests').and_return(image_manifest_digest))
    # Mock getting image config
    blob_config = {'oci': 'oci-1234'} if no_v2_digest else {'v2': '1234'}
    (flexmock(RegistryClient).should_receive(
        'get_config_and_id_from_registry').and_return((blob_config, None)))

    # Assume FetchDockerArchivePlugin has run and metadata of the
    # platform-specific built image archive has been saved.
    workflow.data.plugins_results[FetchDockerArchivePlugin.key] = {
        platform: {
            'type': IMAGE_TYPE_DOCKER_ARCHIVE
        }
    }

    output, output_file = get_output(workflow,
                                     buildroot_id,
                                     image_pullspec,
                                     platform,
                                     source_build=False)

    # Prepare expected metadata

    expected_repositories = sorted([
        # Pull image with a specific tag
        image_pullspec.to_str(),
        # Pull image with a specific digest
        f'{image_pullspec.to_str(tag=False)}@{image_manifest_digest.oci}'
        if no_v2_digest else
        f'{image_pullspec.to_str(tag=False)}@{image_manifest_digest.v2}',
    ])
    per_platform_image_tags = sorted(
        image.tag
        for image in workflow.data.tag_conf.get_unique_images_with_platform(
            platform))
    expected_metadata: Dict[str, Any] = {
        'buildroot_id': buildroot_id,
        'checksum_type': 'md5',
        'arch': platform,
        'type': 'docker-image',
        'components': expected_components,
        'extra': {
            'image': {
                'arch': platform
            },
            'docker': {
                'id': image_id,
                'repositories': expected_repositories,
                'layer_sizes': layer_sizes,
                'tags': per_platform_image_tags,
                'config': blob_config,
                'digests': None,  # Set later below
            },
        },
    }

    extra_docker = expected_metadata['extra']['docker']
    if not from_scratch:
        extra_docker['parent_id'] = parent_id

    extra_docker['digests'] = (
        {
            ManifestDigest.content_type['oci']: image_manifest_digest.oci
        } if no_v2_digest else {
            ManifestDigest.content_type['v2']: image_manifest_digest.v2
        })

    # Start assertions
    assert output_file is None
    assert len(output) == 1

    image_metadata = output[0].metadata

    # Assert these image metadata firstly, then remove them and assert the
    # rest. So, no need to mock anything for get_image_output.
    assert f'docker-image-{image_id}.x86_64.tar.gz' == image_metadata.pop(
        'filename')
    assert image_metadata.pop('filesize') > 0
    assert re.match(r'^[0-9a-f]+$', image_metadata.pop('checksum'))

    # Make it easier for comparison below
    extra_docker = image_metadata['extra']['docker']
    extra_docker['repositories'] = sorted(extra_docker['repositories'])

    assert expected_metadata == image_metadata
Пример #14
0
def prepare(docker_registries=None, before_dockerfile=False):
    if docker_registries is None:
        docker_registries = (
            LOCALHOST_REGISTRY,
            DOCKER0_REGISTRY,
        )

    def update_annotations_on_build(build_id, annotations):
        pass

    def update_labels_on_build(build_id, labels):
        pass

    new_environ = deepcopy(os.environ)
    new_environ["BUILD"] = dedent('''\
        {
          "metadata": {
            "name": "asd",
            "namespace": "namespace"
          }
        }
        ''')
    flexmock(OSBS, update_annotations_on_build=update_annotations_on_build)
    flexmock(OSBS, update_labels_on_build=update_labels_on_build)
    config_kwargs = {
        'namespace': 'namespace',
        'verify_ssl': True,
        'openshift_url': 'http://example.com/',
        'use_auth': True,
        'conf_file': None,
        'build_json_dir': None
    }
    (flexmock(osbs.conf.Configuration).should_call("__init__").with_args(
        **config_kwargs))

    flexmock(os)
    os.should_receive("environ").and_return(new_environ)  # pylint: disable=no-member

    workflow = DockerBuildWorkflow(source=MOCK_SOURCE)

    openshift_map = {
        'url': 'http://example.com/',
        'insecure': False,
        'auth': {
            'enable': True
        },
    }
    workflow.plugin_workspace[ReactorConfigPlugin.key] = {}
    workflow.plugin_workspace[ReactorConfigPlugin.key][WORKSPACE_CONF_KEY] =\
        ReactorConfig({'version': 1, 'openshift': openshift_map})
    add_koji_map_in_workflow(workflow, hub_url='/', root_url='')

    workflow.tag_conf.add_floating_image(TEST_IMAGE)
    workflow.tag_conf.add_primary_image("namespace/image:version-release")

    workflow.tag_conf.add_unique_image("namespace/image:asd123")

    for docker_registry in docker_registries:
        r = workflow.push_conf.add_docker_registry(docker_registry)
        r.digests[TEST_IMAGE_NAME] = ManifestDigest(v1=DIGEST_NOT_USED,
                                                    v2=DIGEST1)
        r.digests["namespace/image:asd123"] = ManifestDigest(
            v1=DIGEST_NOT_USED, v2=DIGEST2)

    if before_dockerfile:
        setattr(workflow, 'builder', XBeforeDockerfile())
        setattr(workflow.builder, 'base_image_inspect', {})
    else:
        setattr(workflow, 'builder', X())
        setattr(workflow.builder, 'base_image_inspect', {'Id': '01234567'})
        workflow.build_logs = [
            "a",
            "b",
        ]
    workflow.source.lg = LazyGit(None, commit="commit")
    flexmock(workflow.source.lg)
    # pylint: disable=no-member
    workflow.source.lg.should_receive("_commit_id").and_return("commit")
    # pylint: enable=no-member

    return workflow
Пример #15
0
def mock_environment(tmpdir,
                     session=None,
                     name=None,
                     component=None,
                     version=None,
                     release=None,
                     source=None,
                     build_process_failed=False,
                     docker_registry=True,
                     pulp_registries=0,
                     blocksize=None,
                     task_states=None,
                     additional_tags=None,
                     has_config=None,
                     prefer_schema1_digest=True):
    if session is None:
        session = MockedClientSession('', task_states=None)
    if source is None:
        source = GitSource('git', 'git://hostname/path')

    if MOCK:
        mock_docker()
    tasker = DockerTasker()
    workflow = DockerBuildWorkflow(SOURCE, "test-image")
    base_image_id = '123456parent-id'
    workflow.source = StubSource()
    workflow.builder = StubInsideBuilder().for_workflow(workflow)
    workflow.builder.image_id = '123456imageid'
    workflow.builder.set_inspection_data({'Id': base_image_id})
    setattr(workflow, 'tag_conf', TagConf())
    with open(os.path.join(str(tmpdir), 'Dockerfile'), 'wt') as df:
        df.write(
            'FROM base\n'
            'LABEL BZComponent={component} com.redhat.component={component}\n'
            'LABEL Version={version} version={version}\n'
            'LABEL Release={release} release={release}\n'.format(
                component=component, version=version, release=release))
        workflow.builder.set_df_path(df.name)
    if name and version:
        workflow.tag_conf.add_unique_image(
            'user/test-image:{v}-timestamp'.format(v=version))
    if name and version and release:
        workflow.tag_conf.add_primary_images([
            "{0}:{1}-{2}".format(name, version, release),
            "{0}:{1}".format(name, version), "{0}:latest".format(name)
        ])

    if additional_tags:
        workflow.tag_conf.add_primary_images(
            ["{0}:{1}".format(name, tag) for tag in additional_tags])

    flexmock(subprocess, Popen=fake_Popen)
    flexmock(koji, ClientSession=lambda hub, opts: session)
    flexmock(GitSource)
    setattr(workflow, 'source', source)
    setattr(workflow.source, 'lg', X())
    setattr(workflow.source.lg, 'commit_id', '123456')
    setattr(workflow, 'push_conf', PushConf())
    if docker_registry:
        docker_reg = workflow.push_conf.add_docker_registry(
            'docker.example.com')

        for image in workflow.tag_conf.images:
            tag = image.to_str(registry=False)

            if pulp_registries and prefer_schema1_digest:
                docker_reg.digests[tag] = ManifestDigest(v1=fake_digest(image),
                                                         v2='sha256:not-used')
            else:
                docker_reg.digests[tag] = ManifestDigest(v1='sha256:not-used',
                                                         v2=fake_digest(image))

            if has_config:
                docker_reg.config = {
                    'config': {
                        'architecture': LOCAL_ARCH
                    },
                    'container_config': {}
                }

    for pulp_registry in range(pulp_registries):
        workflow.push_conf.add_pulp_registry('env', 'pulp.example.com')

    with open(os.path.join(str(tmpdir), 'image.tar.xz'), 'wt') as fp:
        fp.write('x' * 2**12)
        setattr(workflow, 'exported_image_sequence',
                [{
                    'path': fp.name,
                    'type': IMAGE_TYPE_DOCKER_ARCHIVE
                }])

    if build_process_failed:
        workflow.build_result = BuildResult(
            logs=["docker build log - \u2018 \u2017 \u2019 \n'"],
            fail_reason="not built")
    else:
        workflow.build_result = BuildResult(
            logs=["docker build log - \u2018 \u2017 \u2019 \n'"],
            image_id="id1234")
    workflow.prebuild_plugins_conf = {}

    workflow.image_components = parse_rpm_output([
        "name1;1.0;1;" + LOCAL_ARCH + ";0;2000;" + FAKE_SIGMD5.decode() +
        ";23000;"
        "RSA/SHA256, Tue 30 Aug 2016 00:00:00, Key ID 01234567890abc;(none)",
        "name2;2.0;1;" + LOCAL_ARCH + ";0;3000;" + FAKE_SIGMD5.decode() +
        ";24000"
        "RSA/SHA256, Tue 30 Aug 2016 00:00:00, Key ID 01234567890abd;(none)",
    ])

    return tasker, workflow
Пример #16
0
                wf_data.tag_conf.add_primary_image(image)
        wf_data.tag_conf.add_unique_image(primary_images[0])

    if floating_images:
        image: str
        for image in floating_images:
            wf_data.tag_conf.add_floating_image(image)

    return env


REGISTRY_V2 = 'registry_v2.example.com'


GROUPED_V2_RESULTS = {
    "manifest_digest": ManifestDigest(v2_list="sha256:11c3ecdbfa"),
    "media_type": "application/vnd.docker.distribution.manifest.list.v2+json",
    "manifest": json.dumps({
        "manifests": [
            {
                "digest": "sha256:9dc3bbcd6c",
                "mediaType": "application/vnd.docker.distribution.manifest.v2+json",
                "platform": {
                    "architecture": "amd64",
                    "os": "linux"
                },
                "size": 306
            },
            {
                "digest": "sha256:cd619643ae",
                "mediaType": "application/vnd.docker.distribution.manifest.v2+json",
Пример #17
0
def mock_environment(tmpdir, session=None, name=None,
                     component=None, version=None, release=None,
                     source=None, build_process_failed=False,
                     is_rebuild=True, docker_registry=True,
                     pulp_registries=0, blocksize=None,
                     task_states=None, additional_tags=None,
                     has_config=None):
    if session is None:
        session = MockedClientSession('', task_states=None)
    if source is None:
        source = GitSource('git', 'git://hostname/path')

    if MOCK:
        mock_docker()
    tasker = DockerTasker()
    workflow = DockerBuildWorkflow(SOURCE, "test-image")
    base_image_id = '123456parent-id'
    setattr(workflow, '_base_image_inspect', {'Id': base_image_id})
    setattr(workflow, 'builder', X())
    setattr(workflow.builder, 'image_id', '123456imageid')
    setattr(workflow.builder, 'base_image', ImageName(repo='Fedora', tag='22'))
    setattr(workflow.builder, 'source', X())
    setattr(workflow.builder, 'built_image_info', {'ParentId': base_image_id})
    setattr(workflow.builder.source, 'dockerfile_path', None)
    setattr(workflow.builder.source, 'path', None)
    setattr(workflow, 'tag_conf', TagConf())
    with open(os.path.join(str(tmpdir), 'Dockerfile'), 'wt') as df:
        df.write('FROM base\n'
                 'LABEL BZComponent={component} com.redhat.component={component}\n'
                 'LABEL Version={version} version={version}\n'
                 'LABEL Release={release} release={release}\n'
                 .format(component=component, version=version, release=release))
        setattr(workflow.builder, 'df_path', df.name)
    if name and version:
        workflow.tag_conf.add_unique_image('user/test-image:{v}-timestamp'
                                           .format(v=version))
    if name and version and release:
        workflow.tag_conf.add_primary_images(["{0}:{1}-{2}".format(name,
                                                                   version,
                                                                   release),
                                              "{0}:{1}".format(name, version),
                                              "{0}:latest".format(name)])

    if additional_tags:
        workflow.tag_conf.add_primary_images(["{0}:{1}".format(name, tag)
                                              for tag in additional_tags])

    flexmock(subprocess, Popen=fake_Popen)
    flexmock(koji, ClientSession=lambda hub, opts: session)
    flexmock(GitSource)
    (flexmock(OSBS)
        .should_receive('get_build_logs')
        .with_args(BUILD_ID)
        .and_return('build logs - \u2018 \u2017 \u2019'))
    (flexmock(OSBS)
        .should_receive('get_pod_for_build')
        .with_args(BUILD_ID)
        .and_return(MockedPodResponse()))
    setattr(workflow, 'source', source)
    setattr(workflow.source, 'lg', X())
    setattr(workflow.source.lg, 'commit_id', '123456')
    setattr(workflow, 'push_conf', PushConf())
    if docker_registry:
        docker_reg = workflow.push_conf.add_docker_registry('docker.example.com')

        for image in workflow.tag_conf.images:
            tag = image.to_str(registry=False)
            if pulp_registries:
                docker_reg.digests[tag] = ManifestDigest(v1=fake_digest(image),
                                                         v2='sha256:not-used')
            else:
                docker_reg.digests[tag] = ManifestDigest(v1='sha256:not-used',
                                                         v2=fake_digest(image))

            if has_config:
                docker_reg.config = {
                    'config': {'architecture': 'x86_64'},
                    'container_config': {}
                }

    for pulp_registry in range(pulp_registries):
        workflow.push_conf.add_pulp_registry('env', 'pulp.example.com')

    with open(os.path.join(str(tmpdir), 'image.tar.xz'), 'wt') as fp:
        fp.write('x' * 2**12)
        setattr(workflow, 'exported_image_sequence', [{'path': fp.name}])

    if build_process_failed:
        workflow.build_result = BuildResult(logs=["docker build log - \u2018 \u2017 \u2019 \n'"],
                                            fail_reason="not built")
    else:
        workflow.build_result = BuildResult(logs=["docker build log - \u2018 \u2017 \u2019 \n'"],
                                            image_id="id1234")
    workflow.prebuild_plugins_conf = {}
    workflow.prebuild_results[CheckAndSetRebuildPlugin.key] = is_rebuild
    workflow.postbuild_results[PostBuildRPMqaPlugin.key] = [
        "name1;1.0;1;x86_64;0;2000;" + FAKE_SIGMD5.decode() + ";23000;"
        "RSA/SHA256, Tue 30 Aug 2016 00:00:00, Key ID 01234567890abc;(none)",
        "name2;2.0;1;x86_64;0;3000;" + FAKE_SIGMD5.decode() + ";24000"
        "RSA/SHA256, Tue 30 Aug 2016 00:00:00, Key ID 01234567890abd;(none)",
    ]

    return tasker, workflow
Пример #18
0
def prepare(
        pulp_registries=None,
        docker_registries=None,
        before_dockerfile=False,  # noqa
        reactor_config_map=False):
    if pulp_registries is None:
        pulp_registries = (("test", LOCALHOST_REGISTRY), )

    if docker_registries is None:
        docker_registries = (DOCKER0_REGISTRY, )

    def update_annotations_on_build(build_id, annotations):
        pass

    def update_labels_on_build(build_id, labels):
        pass

    new_environ = deepcopy(os.environ)
    new_environ["BUILD"] = dedent('''\
        {
          "metadata": {
            "name": "asd",
            "namespace": "namespace"
          }
        }
        ''')
    flexmock(OSBS, update_annotations_on_build=update_annotations_on_build)
    flexmock(OSBS, update_labels_on_build=update_labels_on_build)
    config_kwargs = {
        'namespace': 'namespace',
        'verify_ssl': True,
        'openshift_url': 'http://example.com/',
        'use_auth': True,
        'conf_file': None,
        'build_json_dir': None
    }
    (flexmock(osbs.conf.Configuration).should_call("__init__").with_args(
        **config_kwargs))

    flexmock(os)
    os.should_receive("environ").and_return(new_environ)

    workflow = DockerBuildWorkflow({
        "provider": "git",
        "uri": "asd"
    }, "test-image")

    if reactor_config_map:
        openshift_map = {
            'url': 'http://example.com/',
            'insecure': False,
            'auth': {
                'enable': True
            },
        }
        workflow.plugin_workspace[ReactorConfigPlugin.key] = {}
        workflow.plugin_workspace[ReactorConfigPlugin.key][WORKSPACE_CONF_KEY] =\
            ReactorConfig({'version': 1, 'openshift': openshift_map})

    for name, crane_uri in pulp_registries:
        workflow.push_conf.add_pulp_registry(name, crane_uri)

    workflow.tag_conf.add_primary_image(TEST_IMAGE)
    workflow.tag_conf.add_unique_image("namespace/image:asd123")

    for docker_registry in docker_registries:
        r = workflow.push_conf.add_docker_registry(docker_registry)
        r.digests[TEST_IMAGE] = ManifestDigest(v1=DIGEST_NOT_USED, v2=DIGEST1)
        r.digests["namespace/image:asd123"] = ManifestDigest(
            v1=DIGEST_NOT_USED, v2=DIGEST2)

    if before_dockerfile:
        setattr(workflow, 'builder', XBeforeDockerfile())
    else:
        setattr(workflow, 'builder', X)
        setattr(workflow, '_base_image_inspect', {'Id': '01234567'})
        workflow.build_logs = [
            "a",
            "b",
        ]
    workflow.source.lg = LazyGit(None, commit="commit")
    flexmock(workflow.source.lg)
    workflow.source.lg.should_receive("_commit_id").and_return("commit")

    return workflow
Пример #19
0
def test_get_manifest_media_version_unknown():
    with pytest.raises(RuntimeError):
        assert get_manifest_media_version(ManifestDigest())
Пример #20
0
def test_tag_and_push_plugin(tmpdir, monkeypatch, image_name, logs,
                             should_raise, has_config, use_secret,
                             reactor_config_map, file_name,
                             dockerconfig_contents):

    if MOCK:
        mock_docker()
        flexmock(docker.APIClient,
                 push=lambda iid, **kwargs: iter(logs),
                 login=lambda username, registry, dockercfg_path:
                 {'Status': 'Login Succeeded'})

    tasker = DockerTasker(retry_times=0)
    workflow = DockerBuildWorkflow({
        "provider": "git",
        "uri": "asd"
    }, TEST_IMAGE)
    workflow.tag_conf.add_primary_image(image_name)
    setattr(workflow, 'builder', X)

    secret_path = None
    if use_secret:
        temp_dir = mkdtemp()
        with open(os.path.join(temp_dir, file_name), "w+") as dockerconfig:
            dockerconfig.write(json.dumps(dockerconfig_contents))
            dockerconfig.flush()
            secret_path = temp_dir

    CONFIG_DIGEST = 'sha256:2c782e3a93d34d89ea4cf54052768be117caed54803263dd1f3798ce42aac14e'
    media_type = 'application/vnd.docker.distribution.manifest.v2+json'

    manifest_json = {
        'config': {
            'digest': CONFIG_DIGEST,
            'mediaType': 'application/octet-stream',
            'size': 4132
        },
        'layers': [{
            'digest':
            'sha256:16dc1f96e3a1bb628be2e00518fec2bb97bd5933859de592a00e2eb7774b6ecf',
            'mediaType': 'application/vnd.docker.image.rootfs.diff.tar.gzip',
            'size': 71907148
        }, {
            'digest':
            'sha256:cebc0565e1f096016765f55fde87a6f60fdb1208c0b5017e35a856ff578f5ccb',
            'mediaType': 'application/vnd.docker.image.rootfs.diff.tar.gzip',
            'size': 3945724
        }],
        'mediaType':
        media_type,
        'schemaVersion':
        2
    }

    config_json = {
        'config': {
            'Size':
            12509448,
            'architecture':
            'amd64',
            'author':
            'Red Hat, Inc.',
            'config': {
                'Cmd': ['/bin/rsyslog.sh'],
                'Entrypoint': None,
                'Image':
                'c3fb36aafd5692d2a45115d32bb120edb6edf6c0c3c783ed6592a8dab969fb88',
                'Labels': {
                    'Architecture': 'x86_64',
                    'Authoritative_Registry': 'registry.access.redhat.com',
                    'BZComponent': 'rsyslog-docker',
                    'Name': 'rhel7/rsyslog',
                    'Release': '28.vrutkovs.31',
                    'Vendor': 'Red Hat, Inc.',
                    'Version': '7.2',
                },
            },
            'created':
            '2016-10-07T10:20:05.38595Z',
            'docker_version':
            '1.9.1',
            'id':
            '1ca220fbc2aed7c141b236c8729fe59db5771b32bf2da74e4a663407f32ec2a2',
            'os':
            'linux',
            'parent':
            '47eed7a8ea3f131e9281ae09fcbfb0041872fd8b74a048f1c739302c8148505d'
        },
        'container_config': {
            'foo': 'bar',
            'spam': 'maps'
        },
        'id':
        '1ca220fbc2aed7c141b236c8729fe59db5771b32bf2da74e4a663407f32ec2a2',
        'parent_id':
        'c3fb36aafd5692d2a45115d32bb120edb6edf6c0c3c783ed6592a8dab969fb88'
    }

    # To test out the lack of a config, we really should be testing what happens
    # when we only return a v1 response and not a v2 response at all; what are
    # doing now is simply testing that if we return a None instead of json for the
    # config blob, that None is stored rather than json
    if not has_config:
        config_json = None

    manifest_latest_url = "https://{}/v2/{}/manifests/latest".format(
        LOCALHOST_REGISTRY, TEST_IMAGE)
    manifest_url = "https://{}/v2/{}/manifests/{}".format(
        LOCALHOST_REGISTRY, TEST_IMAGE, DIGEST_V2)
    config_blob_url = "https://{}/v2/{}/blobs/{}".format(
        LOCALHOST_REGISTRY, TEST_IMAGE, CONFIG_DIGEST)

    # We return our v2 manifest in the mocked v1 response as a placeholder - only the
    # digest matters anyways
    manifest_response_v1 = requests.Response()
    (flexmock(manifest_response_v1,
              status_code=200,
              json=manifest_json,
              headers={
                  'Content-Type':
                  'application/vnd.docker.distribution.manifest.v1+json',
                  'Docker-Content-Digest': DIGEST_V1
              }))

    manifest_response_v2 = requests.Response()
    (flexmock(manifest_response_v2,
              status_code=200,
              json=manifest_json,
              headers={
                  'Content-Type':
                  'application/vnd.docker.distribution.manifest.v2+json',
                  'Docker-Content-Digest': DIGEST_V2
              }))
    manifest_response_v2_list = requests.Response()
    (flexmock(manifest_response_v2_list,
              raise_for_status=lambda: None,
              json=manifest_json,
              headers={
                  'Content-Type':
                  'application/vnd.docker.distribution.manifest.list.v2+json',
              }))

    config_blob_response = requests.Response()
    (flexmock(config_blob_response, status_code=200, json=config_json))

    def custom_get(method, url, headers, **kwargs):
        if url == manifest_latest_url:
            # For a manifest stored as v2 or v1, the docker registry defaults to
            # returning a v1 manifest if a v2 manifest is not explicitly requested
            if headers[
                    'Accept'] == 'application/vnd.docker.distribution.manifest.v2+json':
                return manifest_response_v2
            else:
                return manifest_response_v1

            if headers[
                    'Accept'] == 'application/vnd.docker.distribution.manifest.list.v2+json':
                return manifest_response_v2_list

        if url == manifest_url:
            return manifest_response_v2

        if url == config_blob_url:
            return config_blob_response

    mock_get_retry_session()

    (flexmock(
        requests.Session).should_receive('request').replace_with(custom_get))

    if reactor_config_map:
        workflow.plugin_workspace[ReactorConfigPlugin.key] = {}
        workflow.plugin_workspace[ReactorConfigPlugin.key][WORKSPACE_CONF_KEY] =\
            ReactorConfig({'version': 1, 'registries': [{
                'url': LOCALHOST_REGISTRY,
                'insecure': True,
                'auth': {'cfg_path': secret_path},
            }]})

    runner = PostBuildPluginsRunner(tasker, workflow,
                                    [{
                                        'name': TagAndPushPlugin.key,
                                        'args': {
                                            'registries': {
                                                LOCALHOST_REGISTRY: {
                                                    'insecure': True,
                                                    'secret': secret_path
                                                }
                                            }
                                        },
                                    }])

    if should_raise:
        with pytest.raises(PluginFailedException):
            runner.run()
    else:
        output = runner.run()
        image = output[TagAndPushPlugin.key][0]
        tasker.remove_image(image)
        assert len(workflow.push_conf.docker_registries) > 0

        if MOCK:
            # we only test this when mocking docker because we don't expect
            # running actual docker against v2 registry
            expected_digest = ManifestDigest(v1=DIGEST_V1,
                                             v2=DIGEST_V2,
                                             oci=None)
            assert workflow.push_conf.docker_registries[0].digests[image_name].v1 == \
                expected_digest.v1
            assert workflow.push_conf.docker_registries[0].digests[image_name].v2 == \
                expected_digest.v2
            assert workflow.push_conf.docker_registries[0].digests[image_name].oci == \
                expected_digest.oci

            if has_config:
                assert isinstance(
                    workflow.push_conf.docker_registries[0].config, dict)
            else:
                assert workflow.push_conf.docker_registries[0].config is None
    {DOCKER0_REGISTRY: {'foo/bar:latest': DIGEST1}},
    {DOCKER0_REGISTRY: {'foo/bar:latest': DIGEST1, 'foo/bar:1.0-1': DIGEST1}},
    {DOCKER0_REGISTRY: {'foo/bar:1.0-1': DIGEST1, 'foo/bar:1.0': DIGEST2}},
    {DOCKER0_REGISTRY: {'foo/bar:1.0-1': DIGEST1}, LOCALHOST_REGISTRY: {'foo/bar:1.0-1': DIGEST2}},
])
@pytest.mark.parametrize("req_registries", [
    {},
    {LOCALHOST_REGISTRY: True},
    {DOCKER0_REGISTRY: False},
    {DOCKER0_REGISTRY: True, LOCALHOST_REGISTRY: True},
    {DOCKER0_REGISTRY: False, LOCALHOST_REGISTRY: True},
])
@pytest.mark.parametrize("orchestrator", [True, False])
@pytest.mark.parametrize("manifest_list_digests", [
    {},
    {'foo/bar': ManifestDigest(v2_list=DIGEST_LIST)}
])
def test_delete_from_registry_plugin(saved_digests, req_registries, tmpdir, orchestrator,
                                     manifest_list_digests, reactor_config_map):
    if MOCK:
        mock_docker()
        mock_get_retry_session()

    buildstep_plugin = None
    if orchestrator:
        ann_digests = []
        buildstep_plugin = [{
            'name': OrchestrateBuildPlugin.key,
            'args': {
                'platforms': "x86_64"
            },
Пример #22
0
def test_delete_from_registry_plugin(saved_digests, req_registries, tmpdir, orchestrator):
    if MOCK:
        mock_docker()
        mock_get_retry_session()

    buildstep_plugin = None
    if orchestrator:
        ann_digests = []
        buildstep_plugin = [{
            'name': OrchestrateBuildPlugin.key,
            'args': {
                'platforms': "x86_64"
            },
        }]

    tasker = DockerTasker()
    workflow = DockerBuildWorkflow({"provider": "git", "uri": "asd"}, TEST_IMAGE,
                                   buildstep_plugins=buildstep_plugin, )
    setattr(workflow, 'builder', X)

    args_registries = {}
    for reg, use_secret in req_registries.items():
        if use_secret:
            temp_dir = mkdtemp(dir=str(tmpdir))
            with open(os.path.join(temp_dir, ".dockercfg"), "w+") as dockerconfig:
                dockerconfig_contents = {
                    reg: {
                        "username": "******", "password": reg
                    }
                }
                dockerconfig.write(json.dumps(dockerconfig_contents))
                dockerconfig.flush()
                args_registries[reg] = {'secret': temp_dir}
        else:
            args_registries[reg] = {}

    for reg, digests in saved_digests.items():
        if orchestrator:
            for tag, dig in digests.items():
                repo = tag.split(':')[0]
                t = tag.split(':')[1]
                ann_digests.append({
                    'digest': dig,
                    'tag': t,
                    'repository': repo,
                    'registry': reg,
                })
        else:
            r = DockerRegistry(reg)
            for tag, dig in digests.items():
                r.digests[tag] = ManifestDigest(v1='not-used', v2=dig)
            workflow.push_conf._registries['docker'].append(r)

    if orchestrator:
        build_annotations = {'digests': ann_digests}
        annotations = {'worker-builds': {'x86_64': build_annotations}}
        setattr(workflow, 'build_result', Y)
        setattr(workflow.build_result, 'annotations', annotations)

    runner = ExitPluginsRunner(
        tasker,
        workflow,
        [{
            'name': DeleteFromRegistryPlugin.key,
            'args': {
                'registries': args_registries
            },
        }]
    )

    deleted_digests = set()
    for reg, digests in saved_digests.items():
        if reg not in req_registries:
            continue

        for tag, dig in digests.items():
            if dig in deleted_digests:
                continue
            url = "https://" + reg + "/v2/" + tag.split(":")[0] + "/manifests/" + dig
            auth_type = requests.auth.HTTPBasicAuth if req_registries[reg] else None
            (flexmock(requests.Session)
                .should_receive('delete')
                .with_args(url, verify=bool, auth=auth_type)
                .once()
                .and_return(flexmock(status_code=202, ok=True, raise_for_status=lambda: None)))
            deleted_digests.add(dig)

    result = runner.run()
    assert result[DeleteFromRegistryPlugin.key] == deleted_digests
Пример #23
0
    def group_manifests_and_tag(self, session, worker_digests):
        """
        Creates a manifest list or OCI image index that groups the different manifests
        in worker_digests, then tags the result with with all the configured tags found
        in workflow.tag_conf.
        """
        self.log.info("%s: Creating manifest list", session.registry)

        # Extract information about the manifests that we will group - we get the
        # size and content type of the manifest by querying the registry
        manifests = []
        for platform, worker_image in worker_digests.items():
            repository = worker_image['repository']
            digest = worker_image['digest']
            media_type = get_manifest_media_type(worker_image['version'])
            if media_type not in self.manifest_util.manifest_media_types:
                continue
            content, _, media_type, size = self.manifest_util.get_manifest(
                session, repository, digest)

            manifests.append({
                'content':
                content,
                'repository':
                repository,
                'digest':
                digest,
                'size':
                size,
                'media_type':
                media_type,
                'architecture':
                self.goarch.get(platform, platform),
            })

        list_type, list_json = self.manifest_util.build_list(manifests)
        self.log.info("%s: Created manifest, Content-Type=%s\n%s",
                      session.registry, list_type, list_json)

        # Now push the manifest list to the registry once per each tag
        self.log.info("%s: Tagging manifest list", session.registry)

        for image in self.non_floating_images:
            target_repo = image.to_str(registry=False, tag=False)
            # We have to call store_manifest_in_repository directly for each
            # referenced manifest, since they potentially come from different repos
            for manifest in manifests:
                self.manifest_util.store_manifest_in_repository(
                    session,
                    manifest['content'],
                    manifest['media_type'],
                    manifest['repository'],
                    target_repo,
                    ref=manifest['digest'])
            self.manifest_util.store_manifest_in_repository(session,
                                                            list_json,
                                                            list_type,
                                                            target_repo,
                                                            target_repo,
                                                            ref=image.tag)
        # Get the digest of the manifest list using one of the tags
        registry_image = get_unique_images(self.workflow)[0]
        _, digest_str, _, _ = self.manifest_util.get_manifest(
            session, registry_image.to_str(registry=False, tag=False),
            registry_image.tag)

        if list_type == MEDIA_TYPE_OCI_V1_INDEX:
            digest = ManifestDigest(oci_index=digest_str)
        else:
            digest = ManifestDigest(v2_list=digest_str)

        # And store the manifest list in the push_conf
        push_conf_registry = self.workflow.push_conf.add_docker_registry(
            session.registry, insecure=session.insecure)
        tags = []
        for image in self.non_floating_images:
            push_conf_registry.digests[image.tag] = digest
            tags.append(image.tag)

        self.log.info("%s: Manifest list digest is %s", session.registry,
                      digest_str)
        self.log.debug("tags: %s digest: %s", tags, digest)

        return {
            'manifest': list_json,
            'media_type': list_type,
            'manifest_digest': digest
        }
Пример #24
0
def mock_environment(tmpdir, session=None, name=None,
                     component=None, version=None, release=None,
                     source=None, build_process_failed=False,
                     is_rebuild=True, docker_registry=True,
                     pulp_registries=0, blocksize=None,
                     task_states=None, additional_tags=None,
                     has_config=None,
                     logs_return_bytes=True):
    if session is None:
        session = MockedClientSession('', task_states=None)
    if source is None:
        source = GitSource('git', 'git://hostname/path')

    if MOCK:
        mock_docker()
    tasker = DockerTasker()
    workflow = DockerBuildWorkflow(SOURCE, "test-image")
    base_image_id = '123456parent-id'
    setattr(workflow, '_base_image_inspect', {'Id': base_image_id})
    setattr(workflow, 'builder', X())
    setattr(workflow.builder, 'image_id', '123456imageid')
    setattr(workflow.builder, 'base_image', ImageName(repo='Fedora', tag='22'))
    setattr(workflow.builder, 'source', X())
    setattr(workflow.builder, 'built_image_info', {'ParentId': base_image_id})
    setattr(workflow.builder.source, 'dockerfile_path', None)
    setattr(workflow.builder.source, 'path', None)
    setattr(workflow, 'tag_conf', TagConf())
    with open(os.path.join(str(tmpdir), 'Dockerfile'), 'wt') as df:
        df.write('FROM base\n'
                 'LABEL BZComponent={component} com.redhat.component={component}\n'
                 'LABEL Version={version} version={version}\n'
                 'LABEL Release={release} release={release}\n'
                 .format(component=component, version=version, release=release))
        setattr(workflow.builder, 'df_path', df.name)
    if name and version:
        workflow.tag_conf.add_unique_image('user/test-image:{v}-timestamp'
                                           .format(v=version))
    if name and version and release:
        workflow.tag_conf.add_primary_images(["{0}:{1}-{2}".format(name,
                                                                   version,
                                                                   release),
                                              "{0}:{1}".format(name, version),
                                              "{0}:latest".format(name)])

    if additional_tags:
        workflow.tag_conf.add_primary_images(["{0}:{1}".format(name, tag)
                                              for tag in additional_tags])

    flexmock(subprocess, Popen=fake_Popen)
    flexmock(koji, ClientSession=lambda hub, opts: session)
    flexmock(GitSource)
    if logs_return_bytes:
        logs = b'build logs - \xe2\x80\x98 \xe2\x80\x97 \xe2\x80\x99'
    else:
        logs = 'build logs - \u2018 \u2017 \u2019'
    (flexmock(OSBS)
        .should_receive('get_build_logs')
        .with_args(BUILD_ID)
        .and_return(logs))
    (flexmock(OSBS)
        .should_receive('get_pod_for_build')
        .with_args(BUILD_ID)
        .and_return(MockedPodResponse()))
    setattr(workflow, 'source', source)
    setattr(workflow.source, 'lg', X())
    setattr(workflow.source.lg, 'commit_id', '123456')
    setattr(workflow, 'push_conf', PushConf())
    if docker_registry:
        docker_reg = workflow.push_conf.add_docker_registry('docker.example.com')

        for image in workflow.tag_conf.images:
            tag = image.to_str(registry=False)
            if pulp_registries:
                docker_reg.digests[tag] = ManifestDigest(v1=fake_digest(image),
                                                         v2='sha256:not-used')
            else:
                docker_reg.digests[tag] = ManifestDigest(v1='sha256:not-used',
                                                         v2=fake_digest(image))

            if has_config:
                docker_reg.config = {
                    'config': {'architecture': 'x86_64'},
                    'container_config': {}
                }

    for pulp_registry in range(pulp_registries):
        workflow.push_conf.add_pulp_registry('env', 'pulp.example.com')

    with open(os.path.join(str(tmpdir), 'image.tar.xz'), 'wt') as fp:
        fp.write('x' * 2**12)
        setattr(workflow, 'exported_image_sequence', [{'path': fp.name}])

    annotations = {
        'worker-builds': {
            'x86_64': {
                'build': {
                    'build-name': 'build-1-x64_64',
                },
                'metadata_fragment': 'configmap/build-1-x86_64-md',
                'metadata_fragment_key': 'metadata.json',
            },
            'ppc64le': {
                'build': {
                    'build-name': 'build-1-ppc64le',
                },
                'metadata_fragment': 'configmap/build-1-ppc64le-md',
                'metadata_fragment_key': 'metadata.json',
            }
        }
    }

    if build_process_failed:
        workflow.build_result = BuildResult(logs=["docker build log - \u2018 \u2017 \u2019 \n'"],
                                            fail_reason="not built")
    else:
        workflow.build_result = BuildResult(logs=["docker build log - \u2018 \u2017 \u2019 \n'"],
                                            image_id="id1234",
                                            annotations=annotations)
    workflow.prebuild_plugins_conf = {}
    workflow.prebuild_results[CheckAndSetRebuildPlugin.key] = is_rebuild
    workflow.postbuild_results[PostBuildRPMqaPlugin.key] = [
        "name1;1.0;1;x86_64;0;2000;" + FAKE_SIGMD5.decode() + ";23000;"
        "RSA/SHA256, Tue 30 Aug 2016 00:00:00, Key ID 01234567890abc;(none)",
        "name2;2.0;1;x86_64;0;3000;" + FAKE_SIGMD5.decode() + ";24000"
        "RSA/SHA256, Tue 30 Aug 2016 00:00:00, Key ID 01234567890abd;(none)",
    ]

    workflow.postbuild_results[FetchWorkerMetadataPlugin.key] = {
        'x86_64': {
            'buildroots': [
                {
                    'container': {
                        'type': 'docker',
                        'arch': 'x86_64'
                    },
                    'extra': {
                        'osbs': {
                            'build_id': '12345',
                            'builder_image_id': '67890'
                        }
                    },
                    'content_generator': {
                        'version': '1.6.23',
                        'name': 'atomic-reactor'
                    },
                    'host': {
                        'os': 'Red Hat Enterprise Linux Server 7.3 (Maipo)',
                        'arch': 'x86_64'
                    },
                    'components': [
                        {
                            'name': 'perl-Net-LibIDN',
                            'sigmd5': '1dba38d073ea8f3e6c99cfe32785c81e',
                            'arch': 'x86_64',
                            'epoch': None,
                            'version': '0.12',
                            'signature': '199e2f91fd431d51',
                            'release': '15.el7',
                            'type': 'rpm'
                        },
                        {
                            'name': 'tzdata',
                            'sigmd5': '2255a5807ca7e4d7274995db18e52bea',
                            'arch': 'noarch',
                            'epoch': None,
                            'version': '2017b',
                            'signature': '199e2f91fd431d51',
                            'release': '1.el7',
                            'type': 'rpm'
                        },
                    ],
                    'tools': [
                        {
                            'version': '1.12.6',
                            'name': 'docker'
                        }
                    ],
                    'id': 1
                }
            ],
            'metadata_version': 0,
            'output': [
                {
                    'type': 'log',
                    'arch': 'noarch',
                    'filename': 'openshift-final.log',
                    'filesize': 106690,
                    'checksum': '2efa754467c0d2ea1a98fb8bfe435955',
                    'checksum_type': 'md5',
                    'buildroot_id': 1
                },
                {
                    'type': 'log',
                    'arch': 'noarch',
                    'filename': 'build.log',
                    'filesize': 1660,
                    'checksum': '8198de09fc5940cf7495e2657039ee72',
                    'checksum_type': 'md5',
                    'buildroot_id': 1
                },
                {
                    'extra': {
                        'image': {
                            'arch': 'x86_64'
                        },
                        'docker': {
                            'repositories': [
                                'brew-pulp-docker:8888/myproject/hello-world:0.0.1-9',
                            ],
                            'parent_id': 'sha256:bf203442',
                            'id': '123456',
                        }
                    },
                    'checksum': '58a52e6f3ed52818603c2744b4e2b0a2',
                    'filename': 'test.x86_64.tar.gz',
                    'buildroot_id': 1,
                    'components': [
                        {
                            'name': 'tzdata',
                            'sigmd5': 'd9dc4e4f205428bc08a52e602747c1e9',
                            'arch': 'noarch',
                            'epoch': None,
                            'version': '2016d',
                            'signature': '199e2f91fd431d51',
                            'release': '1.el7',
                            'type': 'rpm'
                        },
                        {
                            'name': 'setup',
                            'sigmd5': 'b1e5ca72c71f94112cd9fb785b95d4da',
                            'arch': 'noarch',
                            'epoch': None,
                            'version': '2.8.71',
                            'signature': '199e2f91fd431d51',
                            'release': '6.el7',
                            'type': 'rpm'
                        },

                    ],
                    'type': 'docker-image',
                    'checksum_type': 'md5',
                    'arch': 'x86_64',
                    'filesize': 71268781
                }
            ]
        }
    }
    workflow.plugin_workspace = {
        OrchestrateBuildPlugin.key: {
            WORKSPACE_KEY_UPLOAD_DIR: 'test-dir',
            WORKSPACE_KEY_BUILD_INFO: {
               'x86_64': BuildInfo('help.md')
            }
        }
    }

    return tasker, workflow