def workflow(self, push=True, sync=True, build_process_failed=False,
                 postbuild_results=None, prebuild_results=None, expectv2schema2=False,
                 platform_descriptors=False):
        tag_conf = TagConf()
        tag_conf.add_unique_image(self.TEST_UNIQUE_IMAGE)
        push_conf = PushConf()
        if push:
            push_conf.add_pulp_registry('pulp', crane_uri=self.CRANE_URI, server_side_sync=False)
        if sync:
            push_conf.add_pulp_registry('pulp', crane_uri=self.CRANE_URI, server_side_sync=True)

        conf = {
            ReactorConfigKeys.VERSION_KEY: 1,
            'prefer_schema1_digest': not expectv2schema2
        }
        if platform_descriptors:
            conf['platform_descriptors'] = [
                {'platform': 'x86_64', 'architecture': 'amd64'},
            ]
        plugin_workspace = {
            ReactorConfigPlugin.key: {
                WORKSPACE_CONF_KEY: ReactorConfig(conf)
            }
        }

        mock_get_retry_session()
        builder = flexmock()
        setattr(builder, 'image_id', 'sha256:(old)')
        return flexmock(tag_conf=tag_conf,
                        push_conf=push_conf,
                        builder=builder,
                        build_process_failed=build_process_failed,
                        plugin_workspace=plugin_workspace,
                        postbuild_results=postbuild_results or {},
                        prebuild_results=prebuild_results or {})
    def workflow(self, push=True, sync=True, build_process_failed=False,
                 postbuild_results=None, prebuild_results=None, expectv2schema2=False,
                 platform_descriptors=False):
        tag_conf = TagConf()
        tag_conf.add_unique_image(self.TEST_UNIQUE_IMAGE)
        push_conf = PushConf()
        if push:
            push_conf.add_pulp_registry('pulp', crane_uri=self.CRANE_URI, server_side_sync=False)
        if sync:
            push_conf.add_pulp_registry('pulp', crane_uri=self.CRANE_URI, server_side_sync=True)

        conf = {
            ReactorConfigKeys.VERSION_KEY: 1,
            'prefer_schema1_digest': not expectv2schema2
        }
        if platform_descriptors:
            conf['platform_descriptors'] = [
                {'platform': 'x86_64', 'architecture': 'amd64'},
            ]
        plugin_workspace = {
            ReactorConfigPlugin.key: {
                WORKSPACE_CONF_KEY: ReactorConfig(conf)
            }
        }

        mock_get_retry_session()
        builder = flexmock()
        setattr(builder, 'image_id', 'sha256:(old)')
        return flexmock(tag_conf=tag_conf,
                        push_conf=push_conf,
                        builder=builder,
                        build_process_failed=build_process_failed,
                        plugin_workspace=plugin_workspace,
                        postbuild_results=postbuild_results or {},
                        prebuild_results=prebuild_results or {})
def mock_workflow(tmpdir):
    workflow = DockerBuildWorkflow(MOCK_SOURCE, 'test-image')
    mock_source = MockSource(tmpdir)
    setattr(workflow, 'builder', X)
    workflow.builder.source = mock_source
    flexmock(workflow, source=mock_source)
    mock_get_retry_session()

    return workflow
def mock_workflow(tmpdir):
    workflow = DockerBuildWorkflow(MOCK_SOURCE, 'test-image')
    mock_source = MockSource(tmpdir)
    setattr(workflow, 'builder', X)
    workflow.builder.source = mock_source
    flexmock(workflow, source=mock_source)
    mock_get_retry_session()

    return workflow
def mock_workflow(tmpdir, dockerfile):
    workflow = DockerBuildWorkflow(MOCK_SOURCE, 'test-image')
    mock_source = MockSource(tmpdir)
    setattr(workflow, 'builder', X)
    workflow.builder.source = mock_source
    flexmock(workflow, source=mock_source)

    df = df_parser(str(tmpdir))
    df.content = dockerfile
    setattr(workflow.builder, 'df_path', df.dockerfile_path)
    mock_get_retry_session()

    return workflow
def mock_workflow(tmpdir, dockerfile=DEFAULT_DOCKERFILE):
    workflow = DockerBuildWorkflow(MOCK_SOURCE, 'test-image')
    mock_source = MockSource(tmpdir)
    setattr(workflow, 'builder', X)
    workflow.builder.source = mock_source
    flexmock(workflow, source=mock_source)

    df = df_parser(str(tmpdir))
    df.content = dockerfile
    setattr(workflow.builder, 'df_path', df.dockerfile_path)
    mock_get_retry_session()

    return workflow
def mock_workflow(tmpdir, dockerfile=DEFAULT_DOCKERFILE, scratch=False):
    flexmock(util).should_receive('is_scratch_build').and_return(scratch)
    workflow = DockerBuildWorkflow(MOCK_SOURCE, 'test-image')
    mock_source = MockSource(tmpdir)
    setattr(workflow, 'builder', X())
    workflow.builder.source = mock_source
    flexmock(workflow, source=mock_source)

    df = df_parser(str(tmpdir))
    df.content = dockerfile
    setattr(workflow.builder, 'df_path', df.dockerfile_path)
    mock_get_retry_session()

    return workflow
def mock_workflow(tmpdir, dockerfile=DEFAULT_DOCKERFILE, scratch=False):
    flexmock(util).should_receive('is_scratch_build').and_return(scratch)
    workflow = DockerBuildWorkflow(MOCK_SOURCE, 'test-image')
    mock_source = MockSource(tmpdir)
    setattr(workflow, 'builder', X())
    workflow.builder.source = mock_source
    flexmock(workflow, source=mock_source)

    df = df_parser(str(tmpdir))
    df.content = dockerfile
    setattr(workflow.builder, 'df_path', df.dockerfile_path)
    mock_get_retry_session()

    return workflow
def prepare():
    if MOCK:
        mock_docker()
    tasker = DockerTasker()
    workflow = DockerBuildWorkflow({
        "provider": "git",
        "uri": DOCKERFILE_GIT
    }, "test-image")
    workflow.source = StubSource()
    workflow.builder = StubInsideBuilder().for_workflow(workflow)
    (flexmock(requests.Response, content=repocontent).should_receive(
        'raise_for_status').and_return(None))
    (flexmock(requests.Session, get=lambda *_: requests.Response()))
    mock_get_retry_session()

    return tasker, workflow
def prepare(scratch=False):
    if MOCK:
        mock_docker()
    tasker = DockerTasker()
    workflow = DockerBuildWorkflow(source={"provider": "git", "uri": DOCKERFILE_GIT})
    workflow.source = StubSource()
    workflow.builder = StubInsideBuilder().for_workflow(workflow)
    workflow.builder.set_dockerfile_images([])
    workflow.user_params['scratch'] = scratch
    (flexmock(requests.Response, content=repocontent)
        .should_receive('raise_for_status')
        .and_return(None))
    (flexmock(requests.Session, get=lambda *_: requests.Response()))
    mock_get_retry_session()

    return tasker, workflow
Exemple #11
0
    def workflow(self, push=True, sync=True, build_process_failed=False):
        tag_conf = TagConf()
        tag_conf.add_unique_image(self.TEST_UNIQUE_IMAGE)
        push_conf = PushConf()
        if push:
            push_conf.add_pulp_registry('pulp', crane_uri=self.CRANE_URI, server_side_sync=False)
        if sync:
            push_conf.add_pulp_registry('pulp', crane_uri=self.CRANE_URI, server_side_sync=True)

        mock_get_retry_session()
        builder = flexmock()
        setattr(builder, 'image_id', 'sha256:(old)')
        return flexmock(tag_conf=tag_conf,
                        push_conf=push_conf,
                        builder=builder,
                        build_process_failed=build_process_failed,
                        plugin_workspace={})
Exemple #12
0
def mock_workflow(workflow,
                  build_dir: Path,
                  dockerfile=DEFAULT_DOCKERFILE,
                  platforms=None,
                  scratch=False):
    workflow.user_params['scratch'] = scratch
    workflow.source = MockSource(build_dir)
    if not platforms:
        platforms = ['x86_64']
    workflow.data.plugins_results[PLUGIN_CHECK_AND_SET_PLATFORMS_KEY] = set(
        platforms)
    with open(workflow.source.dockerfile_path, 'w') as f:
        f.write(dockerfile)
    workflow.build_dir.init_build_dirs(platforms, workflow.source)
    df = DockerfileParser(str(build_dir))
    workflow.data.dockerfile_images = DockerfileImages(df.parent_images)
    mock_get_retry_session()
def odcs_client(tmpdir, request):
    insecure, token, cert = request.param

    mock_get_retry_session()

    odcs_client = ODCSClient(ODCS_URL, insecure=insecure, token=token, cert=cert)

    assert odcs_client.session.verify == (not insecure)
    assert odcs_client.session.cert == cert

    if token:
        expected_token_header = 'Bearer {}'.format(token)
        token_header = odcs_client.session.headers[ODCSClient.OIDC_TOKEN_HEADER]
        assert token_header == expected_token_header
    else:
        assert ODCSClient.OIDC_TOKEN_HEADER not in odcs_client.session.headers

    return odcs_client
Exemple #14
0
def odcs_client(tmpdir, request):
    insecure, token, cert = request.param

    mock_get_retry_session()

    odcs_client = ODCSClient(ODCS_URL, insecure=insecure, token=token, cert=cert)

    assert odcs_client.session.verify == (not insecure)
    assert odcs_client.session.cert == cert

    if token:
        expected_token_header = 'Bearer {}'.format(token)
        token_header = odcs_client.session.headers[ODCSClient.OIDC_TOKEN_HEADER]
        assert token_header == expected_token_header
    else:
        assert ODCSClient.OIDC_TOKEN_HEADER not in odcs_client.session.headers

    return odcs_client
Exemple #15
0
def prepare():
    if MOCK:
        mock_docker()
    tasker = DockerTasker()
    workflow = DockerBuildWorkflow({"provider": "git", "uri": DOCKERFILE_GIT}, "test-image")
    setattr(workflow, 'builder', X())

    setattr(workflow.builder, 'image_id', "asd123")
    setattr(workflow.builder, 'base_image', ImageName(repo='Fedora', tag='21'))
    setattr(workflow.builder, 'source', X())
    setattr(workflow.builder.source, 'dockerfile_path', None)
    setattr(workflow.builder.source, 'path', None)
    (flexmock(requests.Response, content=repocontent)
        .should_receive('raise_for_status')
        .and_return(None))
    (flexmock(requests.Session, get=lambda *_: requests.Response()))
    mock_get_retry_session()

    return tasker, workflow
Exemple #16
0
def prepare(scratch=False):
    if MOCK:
        mock_docker()
    build_json = {'metadata': {'labels': {'scratch': scratch}}}
    flexmock(util).should_receive('get_build_json').and_return(build_json)
    tasker = DockerTasker()
    workflow = DockerBuildWorkflow("test-image",
                                   source={
                                       "provider": "git",
                                       "uri": DOCKERFILE_GIT
                                   })
    workflow.source = StubSource()
    workflow.builder = StubInsideBuilder().for_workflow(workflow)
    (flexmock(requests.Response, content=repocontent).should_receive(
        'raise_for_status').and_return(None))
    (flexmock(requests.Session, get=lambda *_: requests.Response()))
    mock_get_retry_session()

    return tasker, workflow
def mock_workflow(tmpdir,
                  dockerfile=DEFAULT_DOCKERFILE,
                  scratch=False,
                  for_orchestrator=False):
    workflow = DockerBuildWorkflow(source=MOCK_SOURCE)
    workflow.user_params['scratch'] = scratch
    mock_source = MockSource(tmpdir)
    df = df_parser(str(tmpdir))
    df.content = dockerfile
    setattr(workflow, 'builder', X(df.parent_images))
    workflow.builder.source = mock_source
    flexmock(workflow, source=mock_source)

    setattr(workflow.builder, 'df_path', df.dockerfile_path)
    mock_get_retry_session()

    if for_orchestrator:
        workflow.buildstep_plugins_conf = [{
            'name': PLUGIN_BUILD_ORCHESTRATE_KEY
        }]

    return workflow
Exemple #18
0
def mock_workflow(tmpdir,
                  dockerfile=DEFAULT_DOCKERFILE,
                  scratch=False,
                  for_orchestrator=False):
    flexmock(util).should_receive('is_scratch_build').and_return(scratch)
    workflow = DockerBuildWorkflow('test-image', source=MOCK_SOURCE)
    mock_source = MockSource(tmpdir)
    setattr(workflow, 'builder', X())
    workflow.builder.source = mock_source
    flexmock(workflow, source=mock_source)

    df = df_parser(str(tmpdir))
    df.content = dockerfile
    setattr(workflow.builder, 'df_path', df.dockerfile_path)
    mock_get_retry_session()

    if for_orchestrator:
        workflow.buildstep_plugins_conf = [{
            'name': PLUGIN_BUILD_ORCHESTRATE_KEY
        }]

    return workflow
Exemple #19
0
def test_tag_and_push_plugin_oci(tmpdir, monkeypatch, use_secret, fail_push,
                                 caplog, reactor_config_map):

    # For now, we don't want to require having a skopeo and an OCI-supporting
    # registry in the test environment
    if MOCK:
        mock_docker()
    else:
        return

    tasker = DockerTasker()
    workflow = DockerBuildWorkflow({
        "provider": "git",
        "uri": "asd"
    }, TEST_IMAGE)
    setattr(workflow, 'builder', X)

    secret_path = None
    if use_secret:
        temp_dir = mkdtemp()
        with open(os.path.join(temp_dir, ".dockercfg"), "w+") as dockerconfig:
            dockerconfig_contents = {
                LOCALHOST_REGISTRY: {
                    "username": "******",
                    "email": "*****@*****.**",
                    "password": "******"
                }
            }
            dockerconfig.write(json.dumps(dockerconfig_contents))
            dockerconfig.flush()
            secret_path = temp_dir

    CONFIG_DIGEST = 'sha256:b79482f7dcab2a326c1e8c7025a4336d900e99f50db8b35a659fda67b5ebb3c2'
    MEDIA_TYPE = 'application/vnd.oci.image.manifest.v1+json'
    REF_NAME = "app/org.gnome.eog/x86_64/master"

    manifest_json = {
        "schemaVersion":
        2,
        "mediaType":
        "application/vnd.oci.image.manifest.v1+json",
        "config": {
            "mediaType": MEDIA_TYPE,
            "digest": CONFIG_DIGEST,
            "size": 314
        },
        "layers": [{
            "mediaType": "application/vnd.oci.image.layer.v1.tar+gzip",
            "digest":
            "sha256:fd2b341d2ff3751ecdee8d8daacaa650d8a1703360c85d4cfc452d6ec32e147f",
            "size": 1863477
        }],
        "annotations": {
            "org.flatpak.commit-metadata.xa.ref":
            "YXBwL29yZy5nbm9tZS5lb2cveDg2XzY0L21hc3RlcgAAcw==",  # noqa
            "org.flatpak.body":
            "Name: org.gnome.eog\nArch: x86_64\nBranch: master\nBuilt with: Flatpak 0.9.7\n",  # noqa
            "org.flatpak.commit-metadata.xa.metadata":
            "W0FwcGxpY2F0aW9uXQpuYW1lPW9yZy5nbm9tZS5lb2cKcnVudGltZT1vcmcuZmVkb3JhcHJvamVjdC5QbGF0Zm9ybS94ODZfNjQvMjYKc2RrPW9yZy5mZWRvcmFwcm9qZWN0LlBsYXRmb3JtL3g4Nl82NC8yNgpjb21tYW5kPWVvZwoKW0NvbnRleHRdCnNoYXJlZD1pcGM7CnNvY2tldHM9eDExO3dheWxhbmQ7c2Vzc2lvbi1idXM7CmZpbGVzeXN0ZW1zPXhkZy1ydW4vZGNvbmY7aG9zdDt+Ly5jb25maWcvZGNvbmY6cm87CgpbU2Vzc2lvbiBCdXMgUG9saWN5XQpjYS5kZXNydC5kY29uZj10YWxrCgpbRW52aXJvbm1lbnRdCkRDT05GX1VTRVJfQ09ORklHX0RJUj0uY29uZmlnL2Rjb25mCgAAcw==",  # noqa
            "org.flatpak.download-size": "1863477",
            "org.flatpak.commit-metadata.xa.download-size": "AAAAAAAdF/IAdA==",
            "org.flatpak.commit-metadata.xa.installed-size":
            "AAAAAABDdgAAdA==",
            "org.flatpak.subject": "Export org.gnome.eog",
            "org.flatpak.installed-size": "4421120",
            "org.flatpak.commit":
            "d7b8789350660724b20643ebb615df466566b6d04682fa32800d3f10116eec54",  # noqa
            "org.flatpak.metadata":
            "[Application]\nname=org.gnome.eog\nruntime=org.fedoraproject.Platform/x86_64/26\nsdk=org.fedoraproject.Platform/x86_64/26\ncommand=eog\n\n[Context]\nshared=ipc;\nsockets=x11;wayland;session-bus;\nfilesystems=xdg-run/dconf;host;~/.config/dconf:ro;\n\n[Session Bus Policy]\nca.desrt.dconf=talk\n\n[Environment]\nDCONF_USER_CONFIG_DIR=.config/dconf\n",  # noqa
            "org.opencontainers.image.ref.name": REF_NAME,
            "org.flatpak.timestamp": "1499376525"
        }
    }

    config_json = {
        "created": "2017-07-06T21:28:45Z",
        "architecture": "arm64",
        "os": "linux",
        "config": {
            "Memory": 0,
            "MemorySwap": 0,
            "CpuShares": 0
        },
        "rootfs": {
            "type":
            "layers",
            "diff_ids": [
                "sha256:4c5160fea65110aa1eb8ca022e2693bb868367c2502855887f21c77247199339"
            ]
        }
    }

    # Add a mock OCI image to exported_image_sequence; this forces the tag_and_push
    # plugin to push with skopeo rather than with 'docker push'

    # Since we are always mocking the push for now, we can get away with a stub image
    oci_dir = os.path.join(str(tmpdir), 'oci-image')
    os.mkdir(oci_dir)
    with open(os.path.join(oci_dir, "index.json"), "w") as f:
        f.write('"Not a real index.json"')
    with open(os.path.join(oci_dir, "oci-layout"), "w") as f:
        f.write('{"imageLayoutVersion": "1.0.0"}')
    os.mkdir(os.path.join(oci_dir, 'blobs'))

    metadata = get_exported_image_metadata(oci_dir, IMAGE_TYPE_OCI)
    metadata['ref_name'] = REF_NAME
    workflow.exported_image_sequence.append(metadata)

    oci_tarpath = os.path.join(str(tmpdir), 'oci-image.tar')
    with open(oci_tarpath, "wb") as f:
        with tarfile.TarFile(mode="w", fileobj=f) as tf:
            for f in os.listdir(oci_dir):
                tf.add(os.path.join(oci_dir, f), f)

    metadata = get_exported_image_metadata(oci_tarpath, IMAGE_TYPE_OCI_TAR)
    metadata['ref_name'] = REF_NAME
    workflow.exported_image_sequence.append(metadata)

    # Mock the subprocess call to skopeo

    def check_check_output(args, **kwargs):
        if fail_push:
            raise subprocess.CalledProcessError(returncode=1,
                                                cmd=args,
                                                output="Failed")
        assert args[0] == 'skopeo'
        if use_secret:
            assert '--dest-creds=user:mypassword' in args
        assert '--dest-tls-verify=false' in args
        assert args[-2] == 'oci:' + oci_dir + ':' + REF_NAME
        assert args[
            -1] == 'docker://' + LOCALHOST_REGISTRY + '/' + TEST_IMAGE_NAME
        return ''

    (flexmock(subprocess).should_receive("check_output").once().replace_with(
        check_check_output))

    # Mock out the response from the registry once the OCI image is uploaded

    manifest_latest_url = "https://{}/v2/{}/manifests/latest".format(
        LOCALHOST_REGISTRY, TEST_IMAGE)
    manifest_url = "https://{}/v2/{}/manifests/{}".format(
        LOCALHOST_REGISTRY, TEST_IMAGE, DIGEST_OCI)
    config_blob_url = "https://{}/v2/{}/blobs/{}".format(
        LOCALHOST_REGISTRY, TEST_IMAGE, CONFIG_DIGEST)

    manifest_response = requests.Response()
    (flexmock(manifest_response,
              raise_for_status=lambda: None,
              json=manifest_json,
              headers={
                  'Content-Type': MEDIA_TYPE,
                  'Docker-Content-Digest': DIGEST_OCI
              }))

    manifest_unacceptable_response = requests.Response()
    (flexmock(manifest_unacceptable_response,
              status_code=404,
              json={"errors": [{
                  "code": "MANIFEST_UNKNOWN"
              }]}))

    config_blob_response = requests.Response()
    (flexmock(config_blob_response,
              raise_for_status=lambda: None,
              json=config_json))

    def custom_get(method, url, headers, **kwargs):
        if url == manifest_latest_url:
            if headers['Accept'] == MEDIA_TYPE:
                return manifest_response
            else:
                return manifest_unacceptable_response

        if url == manifest_url:
            return manifest_response

        if url == config_blob_url:
            return config_blob_response

    mock_get_retry_session()

    (flexmock(
        requests.Session).should_receive('request').replace_with(custom_get))

    if reactor_config_map:
        workflow.plugin_workspace[ReactorConfigPlugin.key] = {}
        workflow.plugin_workspace[ReactorConfigPlugin.key][WORKSPACE_CONF_KEY] =\
            ReactorConfig({'version': 1, 'registries': [{
                'url': LOCALHOST_REGISTRY,
                'insecure': True,
                'auth': {'cfg_path': secret_path},
            }]})

    runner = PostBuildPluginsRunner(tasker, workflow,
                                    [{
                                        'name': TagAndPushPlugin.key,
                                        'args': {
                                            'registries': {
                                                LOCALHOST_REGISTRY: {
                                                    'insecure': True,
                                                    'secret': secret_path
                                                }
                                            }
                                        },
                                    }])

    with caplog.at_level(logging.DEBUG):
        if fail_push:
            with pytest.raises(PluginFailedException):
                output = runner.run()
        else:
            output = runner.run()

    for r in caplog.records:
        assert 'mypassword' not in r.getMessage()

    if not fail_push:
        image = output[TagAndPushPlugin.key][0]
        tasker.remove_image(image)
        assert len(workflow.push_conf.docker_registries) > 0

        assert workflow.push_conf.docker_registries[0].digests[
            TEST_IMAGE_NAME].v1 is None
        assert workflow.push_conf.docker_registries[0].digests[
            TEST_IMAGE_NAME].v2 is None
        assert workflow.push_conf.docker_registries[0].digests[
            TEST_IMAGE_NAME].oci == DIGEST_OCI

        assert workflow.push_conf.docker_registries[0].config is config_json
Exemple #20
0
def test_tag_and_push_plugin(tmpdir, monkeypatch, image_name, logs,
                             should_raise, has_config, use_secret,
                             reactor_config_map, file_name,
                             dockerconfig_contents):

    if MOCK:
        mock_docker()
        flexmock(docker.APIClient,
                 push=lambda iid, **kwargs: iter(logs),
                 login=lambda username, registry, dockercfg_path:
                 {'Status': 'Login Succeeded'})

    tasker = DockerTasker(retry_times=0)
    workflow = DockerBuildWorkflow({
        "provider": "git",
        "uri": "asd"
    }, TEST_IMAGE)
    workflow.tag_conf.add_primary_image(image_name)
    setattr(workflow, 'builder', X)

    secret_path = None
    if use_secret:
        temp_dir = mkdtemp()
        with open(os.path.join(temp_dir, file_name), "w+") as dockerconfig:
            dockerconfig.write(json.dumps(dockerconfig_contents))
            dockerconfig.flush()
            secret_path = temp_dir

    CONFIG_DIGEST = 'sha256:2c782e3a93d34d89ea4cf54052768be117caed54803263dd1f3798ce42aac14e'
    media_type = 'application/vnd.docker.distribution.manifest.v2+json'

    manifest_json = {
        'config': {
            'digest': CONFIG_DIGEST,
            'mediaType': 'application/octet-stream',
            'size': 4132
        },
        'layers': [{
            'digest':
            'sha256:16dc1f96e3a1bb628be2e00518fec2bb97bd5933859de592a00e2eb7774b6ecf',
            'mediaType': 'application/vnd.docker.image.rootfs.diff.tar.gzip',
            'size': 71907148
        }, {
            'digest':
            'sha256:cebc0565e1f096016765f55fde87a6f60fdb1208c0b5017e35a856ff578f5ccb',
            'mediaType': 'application/vnd.docker.image.rootfs.diff.tar.gzip',
            'size': 3945724
        }],
        'mediaType':
        media_type,
        'schemaVersion':
        2
    }

    config_json = {
        'config': {
            'Size':
            12509448,
            'architecture':
            'amd64',
            'author':
            'Red Hat, Inc.',
            'config': {
                'Cmd': ['/bin/rsyslog.sh'],
                'Entrypoint': None,
                'Image':
                'c3fb36aafd5692d2a45115d32bb120edb6edf6c0c3c783ed6592a8dab969fb88',
                'Labels': {
                    'Architecture': 'x86_64',
                    'Authoritative_Registry': 'registry.access.redhat.com',
                    'BZComponent': 'rsyslog-docker',
                    'Name': 'rhel7/rsyslog',
                    'Release': '28.vrutkovs.31',
                    'Vendor': 'Red Hat, Inc.',
                    'Version': '7.2',
                },
            },
            'created':
            '2016-10-07T10:20:05.38595Z',
            'docker_version':
            '1.9.1',
            'id':
            '1ca220fbc2aed7c141b236c8729fe59db5771b32bf2da74e4a663407f32ec2a2',
            'os':
            'linux',
            'parent':
            '47eed7a8ea3f131e9281ae09fcbfb0041872fd8b74a048f1c739302c8148505d'
        },
        'container_config': {
            'foo': 'bar',
            'spam': 'maps'
        },
        'id':
        '1ca220fbc2aed7c141b236c8729fe59db5771b32bf2da74e4a663407f32ec2a2',
        'parent_id':
        'c3fb36aafd5692d2a45115d32bb120edb6edf6c0c3c783ed6592a8dab969fb88'
    }

    # To test out the lack of a config, we really should be testing what happens
    # when we only return a v1 response and not a v2 response at all; what are
    # doing now is simply testing that if we return a None instead of json for the
    # config blob, that None is stored rather than json
    if not has_config:
        config_json = None

    manifest_latest_url = "https://{}/v2/{}/manifests/latest".format(
        LOCALHOST_REGISTRY, TEST_IMAGE)
    manifest_url = "https://{}/v2/{}/manifests/{}".format(
        LOCALHOST_REGISTRY, TEST_IMAGE, DIGEST_V2)
    config_blob_url = "https://{}/v2/{}/blobs/{}".format(
        LOCALHOST_REGISTRY, TEST_IMAGE, CONFIG_DIGEST)

    # We return our v2 manifest in the mocked v1 response as a placeholder - only the
    # digest matters anyways
    manifest_response_v1 = requests.Response()
    (flexmock(manifest_response_v1,
              status_code=200,
              json=manifest_json,
              headers={
                  'Content-Type':
                  'application/vnd.docker.distribution.manifest.v1+json',
                  'Docker-Content-Digest': DIGEST_V1
              }))

    manifest_response_v2 = requests.Response()
    (flexmock(manifest_response_v2,
              status_code=200,
              json=manifest_json,
              headers={
                  'Content-Type':
                  'application/vnd.docker.distribution.manifest.v2+json',
                  'Docker-Content-Digest': DIGEST_V2
              }))
    manifest_response_v2_list = requests.Response()
    (flexmock(manifest_response_v2_list,
              raise_for_status=lambda: None,
              json=manifest_json,
              headers={
                  'Content-Type':
                  'application/vnd.docker.distribution.manifest.list.v2+json',
              }))

    config_blob_response = requests.Response()
    (flexmock(config_blob_response, status_code=200, json=config_json))

    def custom_get(method, url, headers, **kwargs):
        if url == manifest_latest_url:
            # For a manifest stored as v2 or v1, the docker registry defaults to
            # returning a v1 manifest if a v2 manifest is not explicitly requested
            if headers[
                    'Accept'] == 'application/vnd.docker.distribution.manifest.v2+json':
                return manifest_response_v2
            else:
                return manifest_response_v1

            if headers[
                    'Accept'] == 'application/vnd.docker.distribution.manifest.list.v2+json':
                return manifest_response_v2_list

        if url == manifest_url:
            return manifest_response_v2

        if url == config_blob_url:
            return config_blob_response

    mock_get_retry_session()

    (flexmock(
        requests.Session).should_receive('request').replace_with(custom_get))

    if reactor_config_map:
        workflow.plugin_workspace[ReactorConfigPlugin.key] = {}
        workflow.plugin_workspace[ReactorConfigPlugin.key][WORKSPACE_CONF_KEY] =\
            ReactorConfig({'version': 1, 'registries': [{
                'url': LOCALHOST_REGISTRY,
                'insecure': True,
                'auth': {'cfg_path': secret_path},
            }]})

    runner = PostBuildPluginsRunner(tasker, workflow,
                                    [{
                                        'name': TagAndPushPlugin.key,
                                        'args': {
                                            'registries': {
                                                LOCALHOST_REGISTRY: {
                                                    'insecure': True,
                                                    'secret': secret_path
                                                }
                                            }
                                        },
                                    }])

    if should_raise:
        with pytest.raises(PluginFailedException):
            runner.run()
    else:
        output = runner.run()
        image = output[TagAndPushPlugin.key][0]
        tasker.remove_image(image)
        assert len(workflow.push_conf.docker_registries) > 0

        if MOCK:
            # we only test this when mocking docker because we don't expect
            # running actual docker against v2 registry
            expected_digest = ManifestDigest(v1=DIGEST_V1,
                                             v2=DIGEST_V2,
                                             oci=None)
            assert workflow.push_conf.docker_registries[0].digests[image_name].v1 == \
                expected_digest.v1
            assert workflow.push_conf.docker_registries[0].digests[image_name].v2 == \
                expected_digest.v2
            assert workflow.push_conf.docker_registries[0].digests[image_name].oci == \
                expected_digest.oci

            if has_config:
                assert isinstance(
                    workflow.push_conf.docker_registries[0].config, dict)
            else:
                assert workflow.push_conf.docker_registries[0].config is None
Exemple #21
0
def test_resolve_module_compose(tmpdir, docker_tasker, compose_ids, modules,
                                signing_intent, signing_intent_source,
                                sigkeys):
    secrets_path = os.path.join(str(tmpdir), "secret")
    os.mkdir(secrets_path)
    with open(os.path.join(secrets_path, "token"), "w") as f:
        f.write("green_eggs_and_ham")

    if modules is not None:
        data = "compose:\n"
        data += "    modules:\n"
        for mod in modules:
            data += "    - {}\n".format(mod)
        if signing_intent_source == 'container_yaml':
            data += '    signing_intent: ' + signing_intent
        tmpdir.join(REPO_CONTAINER_CONFIG).write(data)

    module = None
    if modules:
        module = modules[0]

    workflow = mock_workflow(tmpdir)
    mock_get_retry_session()
    mock_koji_session()

    def handle_composes_post(request):
        assert request.headers['Authorization'] == 'Bearer green_eggs_and_ham'

        if isinstance(request.body, six.text_type):
            body = request.body
        else:
            body = request.body.decode()
        body_json = json.loads(body)
        assert body_json['source']['type'] == 'module'
        assert body_json['source']['source'] == module
        assert body_json['source']['sigkeys'] == sigkeys
        assert body_json['arches'] == ['ppc64le', 'x86_64']
        return (200, {}, compose_json(0, 'wait'))

    responses.add_callback(responses.POST,
                           ODCS_URL + '/composes/',
                           content_type='application/json',
                           callback=handle_composes_post)

    state = {'count': 1}

    def handle_composes_get(request):
        assert request.headers['Authorization'] == 'Bearer green_eggs_and_ham'

        if state['count'] == 1:
            response_json = compose_json(1, 'generating')
        else:
            response_json = compose_json(2, 'done')
        state['count'] += 1

        return (200, {}, response_json)

    responses.add_callback(responses.GET,
                           ODCS_URL + '/composes/84',
                           content_type='application/json',
                           callback=handle_composes_get)

    args = {
        'odcs_url': ODCS_URL,
        'odcs_openidc_secret_path': secrets_path,
        'compose_ids': compose_ids
    }

    if signing_intent_source == 'command_line':
        args['signing_intent'] = signing_intent

    workflow.plugin_workspace[ReactorConfigPlugin.key] = {}
    workflow.plugin_workspace[ReactorConfigPlugin.key][WORKSPACE_CONF_KEY] =\
        ReactorConfig({'version': 1,
                       'odcs': {'api_url': ODCS_URL,
                                'auth': {'openidc_dir': secrets_path},
                                'signing_intents': [
                                    {
                                        'name': 'unsigned',
                                        'keys': [],
                                    },
                                    {
                                        'name': 'release',
                                        'keys': ['R123', 'R234'],
                                    },
                                    {
                                        'name': 'beta',
                                        'keys': ['R123', 'B456', 'B457'],
                                    },
                                ],
                                'default_signing_intent': 'unsigned'},
                       'koji':  {'auth': {},
                                 'hub_url': 'https://koji.example.com/hub'}})

    runner = PreBuildPluginsRunner(docker_tasker, workflow,
                                   [{
                                       'name': ResolveModuleComposePlugin.key,
                                       'args': args
                                   }])

    if modules is None:
        with pytest.raises(PluginFailedException) as exc_info:
            runner.run()
        assert '"compose" config in container.yaml is required ' in str(
            exc_info.value)
    elif not modules:
        with pytest.raises(PluginFailedException) as exc_info:
            runner.run()
        assert '"compose" config has no modules' in str(exc_info.value)
    else:
        runner.run()

        compose_info = get_compose_info(workflow)

        assert compose_info.compose_id == 84
        assert compose_info.base_module.name == MODULE_NAME
        assert compose_info.base_module.stream == MODULE_STREAM
        assert compose_info.base_module.version == MODULE_VERSION
        assert compose_info.base_module.mmd.props.summary == 'Eye of GNOME Application Module'
        assert compose_info.base_module.rpms == [
            'eog-0:3.28.3-1.module_2123+73a9ef6f.src.rpm',
            'eog-0:3.28.3-1.module_2123+73a9ef6f.x86_64.rpm',
            'eog-0:3.28.3-1.module_2123+73a9ef6f.ppc64le.rpm',
        ]
def test_tag_and_push_plugin_oci(
        tmpdir, monkeypatch, use_secret, fail_push, caplog, reactor_config_map):

    # For now, we don't want to require having a skopeo and an OCI-supporting
    # registry in the test environment
    if MOCK:
        mock_docker()
    else:
        return

    tasker = DockerTasker()
    workflow = DockerBuildWorkflow({"provider": "git", "uri": "asd"}, TEST_IMAGE)
    setattr(workflow, 'builder', X)

    secret_path = None
    if use_secret:
        temp_dir = mkdtemp()
        with open(os.path.join(temp_dir, ".dockercfg"), "w+") as dockerconfig:
            dockerconfig_contents = {
                LOCALHOST_REGISTRY: {
                    "username": "******", "email": "*****@*****.**", "password": "******"}}
            dockerconfig.write(json.dumps(dockerconfig_contents))
            dockerconfig.flush()
            secret_path = temp_dir

    CONFIG_DIGEST = 'sha256:b79482f7dcab2a326c1e8c7025a4336d900e99f50db8b35a659fda67b5ebb3c2'
    MEDIA_TYPE = 'application/vnd.oci.image.manifest.v1+json'
    REF_NAME = "app/org.gnome.eog/x86_64/master"

    manifest_json = {
        "schemaVersion": 2,
        "mediaType": "application/vnd.oci.image.manifest.v1+json",
        "config": {
            "mediaType": MEDIA_TYPE,
            "digest": CONFIG_DIGEST,
            "size": 314
        },
        "layers": [
            {
                "mediaType": "application/vnd.oci.image.layer.v1.tar+gzip",
                "digest": "sha256:fd2b341d2ff3751ecdee8d8daacaa650d8a1703360c85d4cfc452d6ec32e147f",
                "size": 1863477
            }
        ],
        "annotations": {
            "org.flatpak.commit-metadata.xa.ref": "YXBwL29yZy5nbm9tZS5lb2cveDg2XzY0L21hc3RlcgAAcw==",  # noqa
            "org.flatpak.body": "Name: org.gnome.eog\nArch: x86_64\nBranch: master\nBuilt with: Flatpak 0.9.7\n",  # noqa
            "org.flatpak.commit-metadata.xa.metadata": "W0FwcGxpY2F0aW9uXQpuYW1lPW9yZy5nbm9tZS5lb2cKcnVudGltZT1vcmcuZmVkb3JhcHJvamVjdC5QbGF0Zm9ybS94ODZfNjQvMjYKc2RrPW9yZy5mZWRvcmFwcm9qZWN0LlBsYXRmb3JtL3g4Nl82NC8yNgpjb21tYW5kPWVvZwoKW0NvbnRleHRdCnNoYXJlZD1pcGM7CnNvY2tldHM9eDExO3dheWxhbmQ7c2Vzc2lvbi1idXM7CmZpbGVzeXN0ZW1zPXhkZy1ydW4vZGNvbmY7aG9zdDt+Ly5jb25maWcvZGNvbmY6cm87CgpbU2Vzc2lvbiBCdXMgUG9saWN5XQpjYS5kZXNydC5kY29uZj10YWxrCgpbRW52aXJvbm1lbnRdCkRDT05GX1VTRVJfQ09ORklHX0RJUj0uY29uZmlnL2Rjb25mCgAAcw==",  # noqa
            "org.flatpak.download-size": "1863477",
            "org.flatpak.commit-metadata.xa.download-size": "AAAAAAAdF/IAdA==",
            "org.flatpak.commit-metadata.xa.installed-size": "AAAAAABDdgAAdA==",
            "org.flatpak.subject": "Export org.gnome.eog",
            "org.flatpak.installed-size": "4421120",
            "org.flatpak.commit": "d7b8789350660724b20643ebb615df466566b6d04682fa32800d3f10116eec54",  # noqa
            "org.flatpak.metadata": "[Application]\nname=org.gnome.eog\nruntime=org.fedoraproject.Platform/x86_64/26\nsdk=org.fedoraproject.Platform/x86_64/26\ncommand=eog\n\n[Context]\nshared=ipc;\nsockets=x11;wayland;session-bus;\nfilesystems=xdg-run/dconf;host;~/.config/dconf:ro;\n\n[Session Bus Policy]\nca.desrt.dconf=talk\n\n[Environment]\nDCONF_USER_CONFIG_DIR=.config/dconf\n",  # noqa
            "org.opencontainers.image.ref.name": REF_NAME,
            "org.flatpak.timestamp": "1499376525"
        }
    }

    config_json = {
        "created": "2017-07-06T21:28:45Z",
        "architecture": "arm64",
        "os": "linux",
        "config": {
            "Memory": 0,
            "MemorySwap": 0,
            "CpuShares": 0
        },
        "rootfs": {
            "type": "layers",
            "diff_ids": [
                "sha256:4c5160fea65110aa1eb8ca022e2693bb868367c2502855887f21c77247199339"
            ]
        }
    }

    # Add a mock OCI image to exported_image_sequence; this forces the tag_and_push
    # plugin to push with skopeo rather than with 'docker push'

    # Since we are always mocking the push for now, we can get away with a stub image
    oci_dir = os.path.join(str(tmpdir), 'oci-image')
    os.mkdir(oci_dir)
    with open(os.path.join(oci_dir, "index.json"), "w") as f:
        f.write('"Not a real index.json"')
    with open(os.path.join(oci_dir, "oci-layout"), "w") as f:
        f.write('{"imageLayoutVersion": "1.0.0"}')
    os.mkdir(os.path.join(oci_dir, 'blobs'))

    metadata = get_exported_image_metadata(oci_dir, IMAGE_TYPE_OCI)
    metadata['ref_name'] = REF_NAME
    workflow.exported_image_sequence.append(metadata)

    oci_tarpath = os.path.join(str(tmpdir), 'oci-image.tar')
    with open(oci_tarpath, "wb") as f:
        with tarfile.TarFile(mode="w", fileobj=f) as tf:
            for f in os.listdir(oci_dir):
                tf.add(os.path.join(oci_dir, f), f)

    metadata = get_exported_image_metadata(oci_tarpath, IMAGE_TYPE_OCI_TAR)
    metadata['ref_name'] = REF_NAME
    workflow.exported_image_sequence.append(metadata)

    # Mock the subprocess call to skopeo

    def check_check_output(args, **kwargs):
        if fail_push:
            raise subprocess.CalledProcessError(returncode=1, cmd=args, output="Failed")
        assert args[0] == 'skopeo'
        if use_secret:
            assert '--authfile=' + os.path.join(secret_path, '.dockercfg') in args
        assert '--dest-tls-verify=false' in args
        assert args[-2] == 'oci:' + oci_dir + ':' + REF_NAME
        assert args[-1] == 'docker://' + LOCALHOST_REGISTRY + '/' + TEST_IMAGE_NAME
        return ''

    (flexmock(subprocess)
     .should_receive("check_output")
     .once()
     .replace_with(check_check_output))

    # Mock out the response from the registry once the OCI image is uploaded

    manifest_latest_url = "https://{}/v2/{}/manifests/latest".format(LOCALHOST_REGISTRY, TEST_IMAGE)
    manifest_url = "https://{}/v2/{}/manifests/{}".format(
        LOCALHOST_REGISTRY, TEST_IMAGE, DIGEST_OCI)
    config_blob_url = "https://{}/v2/{}/blobs/{}".format(
        LOCALHOST_REGISTRY, TEST_IMAGE, CONFIG_DIGEST)

    manifest_response = requests.Response()
    (flexmock(manifest_response,
              raise_for_status=lambda: None,
              json=manifest_json,
              headers={
                'Content-Type': MEDIA_TYPE,
                'Docker-Content-Digest': DIGEST_OCI
              }))

    manifest_unacceptable_response = requests.Response()
    (flexmock(manifest_unacceptable_response,
              status_code=404,
              json={
                  "errors": [{"code": "MANIFEST_UNKNOWN"}]
              }))

    config_blob_response = requests.Response()
    (flexmock(config_blob_response, raise_for_status=lambda: None, json=config_json))

    def custom_get(method, url, headers, **kwargs):
        if url == manifest_latest_url:
            if headers['Accept'] == MEDIA_TYPE:
                return manifest_response
            else:
                return manifest_unacceptable_response

        if url == manifest_url:
            return manifest_response

        if url == config_blob_url:
            return config_blob_response

    mock_get_retry_session()

    (flexmock(requests.Session)
        .should_receive('request')
        .replace_with(custom_get))

    if reactor_config_map:
        workflow.plugin_workspace[ReactorConfigPlugin.key] = {}
        workflow.plugin_workspace[ReactorConfigPlugin.key][WORKSPACE_CONF_KEY] =\
            ReactorConfig({'version': 1, 'registries': [{
                'url': LOCALHOST_REGISTRY,
                'insecure': True,
                'auth': {'cfg_path': secret_path},
            }]})

    runner = PostBuildPluginsRunner(
        tasker,
        workflow,
        [{
            'name': TagAndPushPlugin.key,
            'args': {
                'registries': {
                    LOCALHOST_REGISTRY: {
                        'insecure': True,
                        'secret': secret_path
                    }
                }
            },
        }]
    )

    with caplog.at_level(logging.DEBUG):
        if fail_push:
            with pytest.raises(PluginFailedException):
                output = runner.run()
        else:
            output = runner.run()

    if not fail_push:
        image = output[TagAndPushPlugin.key][0]
        tasker.remove_image(image)
        assert len(workflow.push_conf.docker_registries) > 0

        assert workflow.push_conf.docker_registries[0].digests[TEST_IMAGE_NAME].v1 is None
        assert workflow.push_conf.docker_registries[0].digests[TEST_IMAGE_NAME].v2 is None
        assert workflow.push_conf.docker_registries[0].digests[TEST_IMAGE_NAME].oci == DIGEST_OCI

        assert workflow.push_conf.docker_registries[0].config is config_json
    def workflow(self,
                 build_process_failed=False,
                 registries=None,
                 registry_types=None,
                 platforms=None,
                 platform_descriptors=None,
                 group=True,
                 fail=False,
                 limit_media_types=None):
        tag_conf = TagConf()
        tag_conf.add_unique_image(self.TEST_UNIQUE_IMAGE)

        if platform_descriptors is None:
            platform_descriptors = [
                {
                    'platform': 'x86_64',
                    'architecture': 'amd64'
                },
                {
                    'platform': 'ppc64le',
                    'architecture': 'ppc64le'
                },
                {
                    'platform': 's390x',
                    'architecture': 's390x'
                },
            ]

        if platforms is None:
            platforms = [
                descriptor['platform'] for descriptor in platform_descriptors
            ]
        no_amd64 = 'x86_64' not in platforms

        keep_types = False
        if registries or registry_types:
            keep_types = True

        if registries is None and registry_types is None:
            registry_types = [
                MEDIA_TYPE_DOCKER_V2_SCHEMA1, MEDIA_TYPE_DOCKER_V2_SCHEMA2,
                MEDIA_TYPE_DOCKER_V2_MANIFEST_LIST, MEDIA_TYPE_OCI_V1,
                MEDIA_TYPE_OCI_V1_INDEX
            ]

        if registries is None:
            registries = [{
                'url': 'https://container-registry.example.com/v2',
                'version': 'v2',
                'insecure': True,
                'expected_media_types': registry_types
            }]
        conf = {
            'version': 1,
            'registries': registries,
        }

        if limit_media_types is not None:
            conf['source_container'] = {
                'limit_media_types': limit_media_types,
            }

        if platform_descriptors:
            conf['platform_descriptors'] = platform_descriptors

        for registry in registries:

            def get_manifest(request):
                media_types = request.headers.get('Accept', '').split(',')
                content_type = media_types[0]

                return 200, {'Content-Type': content_type}, '{}'

            url_regex = "r'" + registry['url'] + ".*/manifests/.*'"
            url = re.compile(url_regex)
            responses.add_callback(responses.GET, url, callback=get_manifest)

            expected_types = registry.get('expected_media_types',
                                          registry_types or [])
            if fail == "bad_results":
                response_types = []
            elif not keep_types and no_amd64:
                response_types = [MEDIA_TYPE_DOCKER_V2_MANIFEST_LIST]
            else:
                response_types = expected_types

            reguri = RegistryURI(registry['url']).docker_uri
            if re.match('http(s)?://', reguri):
                urlbase = reguri
            else:
                urlbase = 'https://{0}'.format(reguri)

            actual_v2_url = urlbase + "/v2/foo/manifests/unique-tag"

            if fail == "bad_results":
                response = requests.Response()
                (flexmock(response,
                          raise_for_status=lambda: None,
                          status_code=requests.codes.ok,
                          json={},
                          headers={'Content-Type': 'application/json'}))
                v1_response = response
                v1_oci_response = response
                v1_oci_index_response = response
                v2_response = response
                v2_list_response = response
            else:
                v1_response = self.config_response_none
                v1_oci_response = self.config_response_none
                v1_oci_index_response = self.config_response_none
                v2_response = self.config_response_none
                v2_list_response = self.config_response_none

            if MEDIA_TYPE_DOCKER_V2_SCHEMA1 in response_types:
                v1_response = self.config_response_config_v1
            if MEDIA_TYPE_DOCKER_V2_SCHEMA2 in response_types:
                v2_response = self.config_response_config_v2
            if MEDIA_TYPE_DOCKER_V2_MANIFEST_LIST in response_types:
                v2_list_response = self.config_response_config_v2_list
            if MEDIA_TYPE_OCI_V1 in response_types:
                v1_oci_response = self.config_response_config_oci_v1
            if MEDIA_TYPE_OCI_V1_INDEX in response_types:
                v1_oci_index_response = self.config_response_config_oci_v1_index

            v2_header_v1 = {'Accept': MEDIA_TYPE_DOCKER_V2_SCHEMA1}
            v2_header_v2 = {'Accept': MEDIA_TYPE_DOCKER_V2_SCHEMA2}
            manifest_header = {'Accept': MEDIA_TYPE_DOCKER_V2_MANIFEST_LIST}

            (flexmock(requests.Session).should_receive('get').with_args(
                actual_v2_url,
                headers=v2_header_v1,
                auth=HTTPRegistryAuth,
                verify=False).and_return(v1_response))
            (flexmock(requests.Session).should_receive('get').with_args(
                actual_v2_url,
                headers=v2_header_v2,
                auth=HTTPRegistryAuth,
                verify=False).and_return(v2_response))
            (flexmock(requests.Session).should_receive('get').with_args(
                actual_v2_url,
                headers={
                    'Accept': MEDIA_TYPE_OCI_V1
                },
                auth=HTTPRegistryAuth,
                verify=False).and_return(v1_oci_response))
            (flexmock(requests.Session).should_receive('get').with_args(
                actual_v2_url,
                headers={
                    'Accept': MEDIA_TYPE_OCI_V1_INDEX
                },
                auth=HTTPRegistryAuth,
                verify=False).and_return(v1_oci_index_response))
            (flexmock(requests.Session).should_receive('get').with_args(
                actual_v2_url,
                headers=manifest_header,
                auth=HTTPRegistryAuth,
                verify=False).and_return(v2_list_response))

        digests = {'media_type': MEDIA_TYPE_DOCKER_V2_MANIFEST_LIST}
        if not group:
            digests = {'media_type': MEDIA_TYPE_DOCKER_V2_SCHEMA2}
        plugins_results = {
            PLUGIN_CHECK_AND_SET_PLATFORMS_KEY: platforms,
            PLUGIN_GROUP_MANIFESTS_KEY: digests,
        }

        mock_get_retry_session()
        builder = flexmock()
        setattr(builder, 'image_id', 'sha256:(old)')

        flexmock(tag_conf=tag_conf)
        wf_data = ImageBuildWorkflowData()
        wf_data.tag_conf = tag_conf
        wf_data.plugins_results = plugins_results

        return flexmock(data=wf_data,
                        builder=builder,
                        conf=Configuration(raw_config=conf),
                        build_process_failed=build_process_failed)
def test_tag_and_push_plugin(workflow, monkeypatch, caplog, image_name,
                             should_raise, missing_v2, use_secret, file_name,
                             dockerconfig_contents):
    workflow.user_params['flatpak'] = True
    platforms = ['x86_64', 'ppc64le', 's390x', 'aarch64']
    workflow.data.tag_conf.add_unique_image(ImageName.parse(image_name))
    workflow.build_dir.init_build_dirs(platforms, workflow.source)

    secret_path = None
    if use_secret:
        temp_dir = mkdtemp()
        with open(os.path.join(temp_dir, file_name), "w+") as dockerconfig:
            dockerconfig.write(json.dumps(dockerconfig_contents))
            dockerconfig.flush()
            secret_path = temp_dir

    # Add a mock OCI image to 'flatpak_create_oci' results; this forces the tag_and_push
    # plugin to push with skopeo
    flatpak_create_oci_result: Dict[str, Any] = {}
    # Since we are always mocking the push for now, we can get away with a stub image
    for current_platform in platforms:
        metadata = deepcopy(IMAGE_METADATA_OCI)
        metadata['ref_name'] = f'app/org.gnome.eog/{current_platform}/master'
        flatpak_create_oci_result[current_platform] = metadata

    manifest_latest_url = "https://{}/v2/{}/manifests/latest".format(
        LOCALHOST_REGISTRY, TEST_IMAGE)
    manifest_url = "https://{}/v2/{}/manifests/{}".format(
        LOCALHOST_REGISTRY, TEST_IMAGE, DIGEST_V2)

    # We return our v2 manifest in the mocked v1 response as a placeholder - only the
    # digest matters anyways
    manifest_response_v1 = requests.Response()
    (flexmock(manifest_response_v1,
              status_code=200,
              headers={
                  'Content-Type':
                  'application/vnd.docker.distribution.manifest.v1+json',
                  'Docker-Content-Digest': DIGEST_V1
              }))

    manifest_response_v2 = requests.Response()
    (flexmock(manifest_response_v2,
              status_code=200,
              headers={
                  'Content-Type':
                  'application/vnd.docker.distribution.manifest.v2+json',
                  'Docker-Content-Digest': DIGEST_V2
              }))
    manifest_response_v2_list = requests.Response()
    (flexmock(manifest_response_v2_list,
              raise_for_status=lambda: None,
              headers={
                  'Content-Type':
                  'application/vnd.docker.distribution.manifest.list.v2+json',
              }))
    if should_raise:
        (flexmock(retries).should_receive('run_cmd').and_raise(
            subprocess.CalledProcessError(1,
                                          'echo',
                                          output=b'something went wrong')))
    else:
        (flexmock(retries).should_receive('run_cmd').and_return(0))

    manifest_unknown_response = requests.Response()
    (flexmock(manifest_unknown_response,
              status_code=404,
              json={"errors": [{
                  "code": "MANIFEST_UNKNOWN"
              }]}))

    def custom_get(method, url, headers, **kwargs):
        if url.startswith(manifest_latest_url):
            # For a manifest stored as v2 or v1, the docker registry defaults to
            # returning a v1 manifest if a v2 manifest is not explicitly requested
            if headers[
                    'Accept'] == 'application/vnd.docker.distribution.manifest.v2+json':
                if missing_v2:
                    return manifest_unknown_response
                else:
                    return manifest_response_v2
            elif headers[
                    'Accept'] == 'application/vnd.docker.distribution.manifest.list.v2+json':
                return manifest_response_v2_list
            else:
                return manifest_response_v1

        if url == manifest_url:
            if missing_v2:
                return manifest_unknown_response
            else:
                return manifest_response_v2

    mock_get_retry_session()
    (flexmock(
        requests.Session).should_receive('request').replace_with(custom_get))
    (flexmock(time).should_receive('sleep').and_return(None))

    reactor_config = {
        'registries': [{
            'url': LOCALHOST_REGISTRY,
            'insecure': True,
            'auth': {
                'cfg_path': secret_path
            },
        }]
    }
    runner = (MockEnv(workflow).for_plugin(
        TagAndPushPlugin.key).set_reactor_config(
            reactor_config).set_plugin_result(
                CheckAndSetPlatformsPlugin.key, platforms).set_plugin_result(
                    FlatpakCreateOciPlugin.key,
                    flatpak_create_oci_result).create_runner())
    add_koji_map_in_workflow(workflow, hub_url='', root_url='')

    if should_raise:
        with pytest.raises(PluginFailedException):
            runner.run()
    else:
        runner.run()
        assert workflow.conf.registry
        repos_annotations = get_repositories_annotations(
            workflow.data.tag_conf)
        assert workflow.data.annotations['repositories'] == repos_annotations

        if MOCK:
            # we only test this when mocking docker because we don't expect
            # running actual docker against v2 registry
            if missing_v2:
                assert "Retrying push because V2 schema 2" in caplog.text
Exemple #25
0
def test_delete_from_registry_plugin(saved_digests, req_registries, tmpdir, orchestrator):
    if MOCK:
        mock_docker()
        mock_get_retry_session()

    buildstep_plugin = None
    if orchestrator:
        ann_digests = []
        buildstep_plugin = [{
            'name': OrchestrateBuildPlugin.key,
            'args': {
                'platforms': "x86_64"
            },
        }]

    tasker = DockerTasker()
    workflow = DockerBuildWorkflow({"provider": "git", "uri": "asd"}, TEST_IMAGE,
                                   buildstep_plugins=buildstep_plugin, )
    setattr(workflow, 'builder', X)

    args_registries = {}
    for reg, use_secret in req_registries.items():
        if use_secret:
            temp_dir = mkdtemp(dir=str(tmpdir))
            with open(os.path.join(temp_dir, ".dockercfg"), "w+") as dockerconfig:
                dockerconfig_contents = {
                    reg: {
                        "username": "******", "password": reg
                    }
                }
                dockerconfig.write(json.dumps(dockerconfig_contents))
                dockerconfig.flush()
                args_registries[reg] = {'secret': temp_dir}
        else:
            args_registries[reg] = {}

    for reg, digests in saved_digests.items():
        if orchestrator:
            for tag, dig in digests.items():
                repo = tag.split(':')[0]
                t = tag.split(':')[1]
                ann_digests.append({
                    'digest': dig,
                    'tag': t,
                    'repository': repo,
                    'registry': reg,
                })
        else:
            r = DockerRegistry(reg)
            for tag, dig in digests.items():
                r.digests[tag] = ManifestDigest(v1='not-used', v2=dig)
            workflow.push_conf._registries['docker'].append(r)

    if orchestrator:
        build_annotations = {'digests': ann_digests}
        annotations = {'worker-builds': {'x86_64': build_annotations}}
        setattr(workflow, 'build_result', Y)
        setattr(workflow.build_result, 'annotations', annotations)

    runner = ExitPluginsRunner(
        tasker,
        workflow,
        [{
            'name': DeleteFromRegistryPlugin.key,
            'args': {
                'registries': args_registries
            },
        }]
    )

    deleted_digests = set()
    for reg, digests in saved_digests.items():
        if reg not in req_registries:
            continue

        for tag, dig in digests.items():
            if dig in deleted_digests:
                continue
            url = "https://" + reg + "/v2/" + tag.split(":")[0] + "/manifests/" + dig
            auth_type = requests.auth.HTTPBasicAuth if req_registries[reg] else None
            (flexmock(requests.Session)
                .should_receive('delete')
                .with_args(url, verify=bool, auth=auth_type)
                .once()
                .and_return(flexmock(status_code=202, ok=True, raise_for_status=lambda: None)))
            deleted_digests.add(dig)

    result = runner.run()
    assert result[DeleteFromRegistryPlugin.key] == deleted_digests
def test_delete_from_registry_plugin(saved_digests, req_registries, tmpdir, orchestrator,
                                     manifest_list_digests):
    if MOCK:
        mock_docker()
        mock_get_retry_session()

    buildstep_plugin = None
    if orchestrator:
        ann_digests = []
        buildstep_plugin = [{
            'name': OrchestrateBuildPlugin.key,
            'args': {
                'platforms': "x86_64"
            },
        }]

    tasker = DockerTasker()
    workflow = DockerBuildWorkflow({"provider": "git", "uri": "asd"}, TEST_IMAGE,
                                   buildstep_plugins=buildstep_plugin, )
    setattr(workflow, 'builder', X)

    args_registries = {}
    for reg, use_secret in req_registries.items():
        if use_secret:
            temp_dir = mkdtemp(dir=str(tmpdir))
            with open(os.path.join(temp_dir, ".dockercfg"), "w+") as dockerconfig:
                dockerconfig_contents = {
                    reg: {
                        "username": "******", "password": reg
                    }
                }
                dockerconfig.write(json.dumps(dockerconfig_contents))
                dockerconfig.flush()
                args_registries[reg] = {'secret': temp_dir}
        else:
            args_registries[reg] = {}

    for reg, digests in saved_digests.items():
        if orchestrator:
            for tag, dig in digests.items():
                repo = tag.split(':')[0]
                t = tag.split(':')[1]
                ann_digests.append({
                    'digest': dig,
                    'tag': t,
                    'repository': repo,
                    'registry': reg,
                })
        else:
            r = DockerRegistry(reg)
            for tag, dig in digests.items():
                r.digests[tag] = ManifestDigest(v1='not-used', v2=dig)
            workflow.push_conf._registries['docker'].append(r)

    group_manifest_digests = {}
    if orchestrator:
        build_annotations = {'digests': ann_digests}
        annotations = {'worker-builds': {'x86_64': build_annotations}}
        setattr(workflow, 'build_result', Y)
        setattr(workflow.build_result, 'annotations', annotations)

        # group_manifest digest should be added only
        # if there are worker builds and images are pushed to one registry
        if len(req_registries) == 1 and len(saved_digests.keys()) == 1 and \
           all(saved_digests.values()):
            workflow.postbuild_results[PLUGIN_GROUP_MANIFESTS_KEY] = manifest_list_digests
            for ml_repo, ml_digest in manifest_list_digests.items():
                for reg in req_registries:
                    if reg in saved_digests:
                        group_manifest_digests.setdefault(reg, {})
                        group_manifest_digests[reg] = saved_digests[reg].copy()
                        group_manifest_digests[reg][ml_repo] = ml_digest.default

    result_digests = saved_digests.copy()
    result_digests.update(group_manifest_digests)

    runner = ExitPluginsRunner(
        tasker,
        workflow,
        [{
            'name': DeleteFromRegistryPlugin.key,
            'args': {
                'registries': args_registries
            },
        }]
    )

    deleted_digests = set()
    for reg, digests in result_digests.items():
        if reg not in req_registries:
            continue

        for tag, dig in digests.items():
            if dig in deleted_digests:
                continue
            url = "https://" + reg + "/v2/" + tag.split(":")[0] + "/manifests/" + dig
            auth_type = requests.auth.HTTPBasicAuth if req_registries[reg] else None
            (flexmock(requests.Session)
                .should_receive('delete')
                .with_args(url, verify=bool, auth=auth_type)
                .once()
                .and_return(flexmock(status_code=202, ok=True, raise_for_status=lambda: None)))
            deleted_digests.add(dig)

    result = runner.run()
    assert result[DeleteFromRegistryPlugin.key] == deleted_digests
Exemple #27
0
def test_get_manifest_digests_missing(tmpdir, has_content_type_header, has_content_digest,
                                      manifest_type, can_convert_v2_v1):
    kwargs = {}

    image = ImageName.parse('example.com/spam:latest')
    kwargs['image'] = image

    kwargs['registry'] = 'https://example.com'

    expected_url = 'https://example.com/v2/spam/manifests/latest'

    mock_get_retry_session()

    def custom_get(url, headers, **kwargs):
        assert url == expected_url

        media_type = headers['Accept']
        media_type_prefix = media_type.split('+')[0]

        assert media_type.endswith('+json')

        # Attempt to simulate how a docker registry behaves:
        #  * If the stored digest is v1, return it
        #  * If the stored digest is v2, and v2 is requested, return it
        #  * If the stored digest is v2, and v1 is requested, try
        #    to convert and return v1 or an error.
        if manifest_type == 'v1':
            digest = 'v1-digest'
            media_type_prefix = 'application/vnd.docker.distribution.manifest.v1'
        elif manifest_type == 'v2':
            if media_type_prefix == 'application/vnd.docker.distribution.manifest.v2':
                digest = 'v2-digest'
            else:
                if not can_convert_v2_v1:
                    response_json = {"errors": [{"code": "MANIFEST_INVALID"}]}
                    response = requests.Response()
                    flexmock(response,
                             status_code=400,
                             content=json.dumps(response_json).encode("utf-8"),
                             headers=headers)

                    return response

                digest = 'v1-converted-digest'
                media_type_prefix = 'application/vnd.docker.distribution.manifest.v1'
        elif manifest_type == 'oci':
            if media_type_prefix == 'application/vnd.oci.image.manifest.v1':
                digest = 'oci-digest'
            else:
                headers = {}
                response_json = {"errors": [{"code": "MANIFEST_UNKNOWN"}]}
                response = requests.Response()
                flexmock(response,
                         status_code=requests.codes.not_found,
                         content=json.dumps(response_json).encode("utf-8"),
                         headers=headers)

                return response
        elif manifest_type == 'oci_index':
            if media_type_prefix == 'application/vnd.oci.image.index.v1':
                digest = 'oci-index-digest'
            else:
                headers = {}
                response_json = {"errors": [{"code": "MANIFEST_UNKNOWN"}]}
                response = requests.Response()
                flexmock(response,
                         status_code=requests.codes.not_found,
                         content=json.dumps(response_json).encode("utf-8"),
                         headers=headers)

                return response

        headers = {}
        if has_content_type_header:
            headers['Content-Type'] = '{}+jsonish'.format(media_type_prefix)
        if has_content_digest:
            headers['Docker-Content-Digest'] = digest

        if media_type_prefix == 'application/vnd.docker.distribution.manifest.v1':
            response_json = {'schemaVersion': 1}
        else:
            response_json = {'schemaVersion': 2,
                             'mediaType': media_type_prefix + '+json'}

        response = requests.Response()
        flexmock(response,
                 status_code=200,
                 content=json.dumps(response_json).encode("utf-8"),
                 headers=headers)

        return response

    (flexmock(requests.Session)
        .should_receive('get')
        .replace_with(custom_get))

    if manifest_type == 'v1' and not has_content_type_header:
        # v1 manifests don't have a mediaType field, so we can't fall back
        # to looking at the returned manifest to detect the type.
        with pytest.raises(RuntimeError):
            get_manifest_digests(**kwargs)
        return
    else:
        actual_digests = get_manifest_digests(**kwargs)

    if manifest_type == 'v1':
        if has_content_digest:
            assert actual_digests.v1 == 'v1-digest'
        else:
            assert actual_digests.v1 is True
        assert actual_digests.v2 is None
        assert actual_digests.oci is None
        assert actual_digests.oci_index is None
    elif manifest_type == 'v2':
        if can_convert_v2_v1:
            if has_content_type_header:
                if has_content_digest:
                    assert actual_digests.v1 == 'v1-converted-digest'
                else:
                    assert actual_digests.v1 is True
            else:  # don't even know the response is v1 without Content-Type
                assert actual_digests.v1 is None
        else:
            assert actual_digests.v1 is None
        if has_content_digest:
            assert actual_digests.v2 == 'v2-digest'
        else:
            assert actual_digests.v2 is True
        assert actual_digests.oci is None
        assert actual_digests.oci_index is None
    elif manifest_type == 'oci':
        assert actual_digests.v1 is None
        assert actual_digests.v2 is None
        if has_content_digest:
            assert actual_digests.oci == 'oci-digest'
        else:
            assert actual_digests.oci is True
        assert actual_digests.oci_index is None
    elif manifest_type == 'oci_index':
        assert actual_digests.v1 is None
        assert actual_digests.v2 is None
        assert actual_digests.oci is None
        if has_content_digest:
            assert actual_digests.oci_index == 'oci-index-digest'
        else:
            assert actual_digests.oci_index is True
def test_resolve_module_compose(tmpdir, docker_tasker, compose_ids, modules,
                                signing_intent, signing_intent_source, sigkeys):
    secrets_path = os.path.join(str(tmpdir), "secret")
    os.mkdir(secrets_path)
    with open(os.path.join(secrets_path, "token"), "w") as f:
        f.write("green_eggs_and_ham")

    if modules is not None:
        data = "compose:\n"
        data += "    modules:\n"
        for mod in modules:
            data += "    - {}\n".format(mod)
        if signing_intent_source == 'container_yaml':
            data += '    signing_intent: ' + signing_intent
        tmpdir.join(REPO_CONTAINER_CONFIG).write(data)

    module = None
    if modules:
        module = modules[0]

    workflow = mock_workflow(tmpdir)
    mock_get_retry_session()
    mock_koji_session()

    def handle_composes_post(request):
        assert request.headers['Authorization'] == 'Bearer green_eggs_and_ham'

        if isinstance(request.body, six.text_type):
            body = request.body
        else:
            body = request.body.decode()
        body_json = json.loads(body)
        assert body_json['source']['type'] == 'module'
        assert body_json['source']['source'] == module
        assert body_json['source']['sigkeys'] == sigkeys
        assert body_json['arches'] == ['ppc64le', 'x86_64']
        return (200, {}, compose_json(0, 'wait'))

    responses.add_callback(responses.POST, ODCS_URL + '/composes/',
                           content_type='application/json',
                           callback=handle_composes_post)

    state = {'count': 1}

    def handle_composes_get(request):
        assert request.headers['Authorization'] == 'Bearer green_eggs_and_ham'

        if state['count'] == 1:
            response_json = compose_json(1, 'generating')
        else:
            response_json = compose_json(2, 'done')
        state['count'] += 1

        return (200, {}, response_json)

    responses.add_callback(responses.GET, ODCS_URL + '/composes/84',
                           content_type='application/json',
                           callback=handle_composes_get)

    args = {
        'odcs_url': ODCS_URL,
        'odcs_openidc_secret_path': secrets_path,
        'compose_ids': compose_ids
    }

    if signing_intent_source == 'command_line':
        args['signing_intent'] = signing_intent

    workflow.plugin_workspace[ReactorConfigPlugin.key] = {}
    workflow.plugin_workspace[ReactorConfigPlugin.key][WORKSPACE_CONF_KEY] =\
        ReactorConfig({'version': 1,
                       'odcs': {'api_url': ODCS_URL,
                                'auth': {'openidc_dir': secrets_path},
                                'signing_intents': [
                                    {
                                        'name': 'unsigned',
                                        'keys': [],
                                    },
                                    {
                                        'name': 'release',
                                        'keys': ['R123', 'R234'],
                                    },
                                    {
                                        'name': 'beta',
                                        'keys': ['R123', 'B456', 'B457'],
                                    },
                                ],
                                'default_signing_intent': 'unsigned'},
                       'koji':  {'auth': {},
                                 'hub_url': 'https://koji.example.com/hub'}})

    runner = PreBuildPluginsRunner(
        docker_tasker,
        workflow,
        [{
            'name': ResolveModuleComposePlugin.key,
            'args': args
        }]
    )

    if modules is None:
        with pytest.raises(PluginFailedException) as exc_info:
            runner.run()
        assert '"compose" config in container.yaml is required ' in str(exc_info.value)
    elif not modules:
        with pytest.raises(PluginFailedException) as exc_info:
            runner.run()
        assert '"compose" config has no modules' in str(exc_info.value)
    else:
        runner.run()

        compose_info = get_compose_info(workflow)

        assert compose_info.compose_id == 84
        assert compose_info.base_module.name == MODULE_NAME
        assert compose_info.base_module.stream == MODULE_STREAM
        assert compose_info.base_module.version == MODULE_VERSION
        assert compose_info.base_module.mmd.props.summary == 'Eye of GNOME Application Module'
        assert compose_info.base_module.rpms == [
            'eog-0:3.28.3-1.module_2123+73a9ef6f.src.rpm',
            'eog-0:3.28.3-1.module_2123+73a9ef6f.x86_64.rpm',
            'eog-0:3.28.3-1.module_2123+73a9ef6f.ppc64le.rpm',
        ]
def test_delete_from_registry_failures(tmpdir, status_code, reactor_config_map):
    if MOCK:
        mock_docker()
        mock_get_retry_session()

    req_registries = {DOCKER0_REGISTRY: True}
    saved_digests = {DOCKER0_REGISTRY: {'foo/bar:latest': DIGEST1}}

    tasker = DockerTasker()
    workflow = DockerBuildWorkflow({"provider": "git", "uri": "asd"}, TEST_IMAGE)
    setattr(workflow, 'builder', X)

    args_registries = {}
    config_map_regiestries = []
    for reg, use_secret in req_registries.items():
        cm_reg = {'url': reg}
        if use_secret:
            temp_dir = mkdtemp(dir=str(tmpdir))
            with open(os.path.join(temp_dir, ".dockercfg"), "w+") as dockerconfig:
                dockerconfig_contents = {
                    reg: {
                        "username": "******", "password": reg
                    }
                }
                dockerconfig.write(json.dumps(dockerconfig_contents))
                dockerconfig.flush()
                args_registries[reg] = {'secret': temp_dir}
                cm_reg['auth'] = {'cfg_path': temp_dir}
        else:
            args_registries[reg] = {}
    config_map_regiestries.append(cm_reg)

    for reg, digests in saved_digests.items():
        r = DockerRegistry(reg)
        for tag, dig in digests.items():
            r.digests[tag] = ManifestDigest(v1='not-used', v2=dig)
        workflow.push_conf._registries['docker'].append(r)

    if reactor_config_map:
        workflow.plugin_workspace[ReactorConfigPlugin.key] = {}
        workflow.plugin_workspace[ReactorConfigPlugin.key][WORKSPACE_CONF_KEY] =\
            ReactorConfig({'version': 1, 'registries': config_map_regiestries})

    runner = ExitPluginsRunner(
        tasker,
        workflow,
        [{
            'name': DeleteFromRegistryPlugin.key,
            'args': {
                'registries': args_registries
            },
        }]
    )

    deleted_digests = set()
    for reg, digests in saved_digests.items():
        if reg not in req_registries:
            continue

        for tag, dig in digests.items():
            if dig in deleted_digests:
                continue
            url = "https://" + reg + "/v2/" + tag.split(":")[0] + "/manifests/" + dig
            auth_type = HTTPRegistryAuth

            response = requests.Response()
            response.status_code = status_code

            (flexmock(requests.Session)
                .should_receive('delete')
                .with_args(url, verify=bool, auth=auth_type)
                .and_return(response))

            deleted_digests.add(dig)

    if status_code == 520:
        with pytest.raises(PluginFailedException):
            result = runner.run()
            assert result[DeleteFromRegistryPlugin.key] == set([])
    else:
        result = runner.run()

        if status_code == requests.codes.ACCEPTED:
            assert result[DeleteFromRegistryPlugin.key] == deleted_digests
        else:
            assert result[DeleteFromRegistryPlugin.key] == set([])
    def workflow(self, build_process_failed=False, registries=None, registry_types=None,
                 platforms=None, platform_descriptors=None, group=True, no_amd64=False,
                 fail=False):
        tag_conf = TagConf()
        tag_conf.add_unique_image(self.TEST_UNIQUE_IMAGE)

        push_conf = PushConf()

        if platform_descriptors is None:
            platform_descriptors = [
                {'platform': 'x86_64', 'architecture': 'amd64'},
                {'platform': 'ppc64le', 'architecture': 'ppc64le'},
                {'platform': 's390x', 'architecture': 's390x'},
            ]

        if platforms is None:
            platforms = [descriptor['platform'] for descriptor in platform_descriptors]

        if registries is None and registry_types is None:
            registry_types = [MEDIA_TYPE_DOCKER_V1, MEDIA_TYPE_DOCKER_V2_SCHEMA1,
                              MEDIA_TYPE_DOCKER_V2_SCHEMA2, MEDIA_TYPE_DOCKER_V2_MANIFEST_LIST]

        if registries is None:
            registries = [{
                'url': 'https://container-registry.example.com/v2',
                'version': 'v2',
                'insecure': True,
                'expected_media_types': registry_types
            }]
        conf = {
            ReactorConfigKeys.VERSION_KEY: 1,
            'registries': registries,
        }
        if platform_descriptors:
            conf['platform_descriptors'] = platform_descriptors

        plugin_workspace = {
            ReactorConfigPlugin.key: {
                WORKSPACE_CONF_KEY: ReactorConfig(conf)
            }
        }

        flexmock(HTTPRegistryAuth).should_receive('__new__').and_return(None)
        mock_auth = None
        for registry in registries:
            def get_manifest(request):
                media_types = request.headers.get('Accept', '').split(',')
                content_type = media_types[0]

                return (200, {'Content-Type': content_type}, '{}')

            url_regex = "r'" + registry['url'] + ".*/manifests/.*'"
            url = re.compile(url_regex)
            responses.add_callback(responses.GET, url, callback=get_manifest)

            expected_types = registry.get('expected_media_types', [])
            if fail == "bad_results":
                response_types = [MEDIA_TYPE_DOCKER_V1]
            elif no_amd64:
                response_types = [MEDIA_TYPE_DOCKER_V2_MANIFEST_LIST]
            else:
                response_types = expected_types

            reguri = RegistryURI(registry['url']).docker_uri
            if re.match('http(s)?://', reguri):
                urlbase = reguri
            else:
                urlbase = 'https://{0}'.format(reguri)

            actual_v2_url = urlbase + "/v2/foo/manifests/unique-tag"
            actual_v1_url = urlbase + "/v1/repositories/foo/tags/unique-tag"

            v1_response = self.config_response_none
            v2_response = self.config_response_none
            v2_list_response = self.config_response_none
            if MEDIA_TYPE_DOCKER_V2_SCHEMA1 in response_types:
                v1_response = self.config_response_config_v1
            if MEDIA_TYPE_DOCKER_V2_SCHEMA2 in response_types:
                v2_response = self.config_response_config_v2
            if MEDIA_TYPE_DOCKER_V2_MANIFEST_LIST in response_types:
                v2_list_response = self.config_response_config_v2_list
            v2_header_v1 = {'Accept': MEDIA_TYPE_DOCKER_V2_SCHEMA1}
            v2_header_v2 = {'Accept': MEDIA_TYPE_DOCKER_V2_SCHEMA2}
            manifest_header = {'Accept': MEDIA_TYPE_DOCKER_V2_MANIFEST_LIST}

            (flexmock(requests.Session)
                .should_receive('get')
                .with_args(actual_v2_url, headers=v2_header_v1,
                           auth=mock_auth, verify=False)
                .and_return(v1_response))
            (flexmock(requests.Session)
                .should_receive('get')
                .with_args(actual_v2_url, headers=v2_header_v2,
                           auth=mock_auth, verify=False)
                .and_return(v2_response))
            (flexmock(requests.Session)
                .should_receive('get')
                .with_args(actual_v2_url, headers={'Accept': MEDIA_TYPE_OCI_V1},
                           auth=mock_auth, verify=False)
                .and_return(self.config_response_none))
            (flexmock(requests.Session)
                .should_receive('get')
                .with_args(actual_v2_url, headers={'Accept': MEDIA_TYPE_OCI_V1_INDEX},
                           auth=mock_auth, verify=False)
                .and_return(self.config_response_none))
            (flexmock(requests.Session)
                .should_receive('get')
                .with_args(actual_v2_url, headers=manifest_header,
                           auth=mock_auth, verify=False)
                .and_return(v2_list_response))

            if MEDIA_TYPE_DOCKER_V1 in response_types:
                (flexmock(requests.Session)
                    .should_receive('get')
                    .with_args(actual_v1_url, headers={'Accept': MEDIA_TYPE_DOCKER_V1},
                               auth=mock_auth, verify=False)
                    .and_return(self.config_response_v1))

        digests = {'digest': None} if group else {}
        prebuild_results = {PLUGIN_CHECK_AND_SET_PLATFORMS_KEY: platforms}
        postbuild_results = {PLUGIN_GROUP_MANIFESTS_KEY: digests}

        mock_get_retry_session()
        builder = flexmock()
        setattr(builder, 'image_id', 'sha256:(old)')
        return flexmock(tag_conf=tag_conf,
                        push_conf=push_conf,
                        builder=builder,
                        build_process_failed=build_process_failed,
                        plugin_workspace=plugin_workspace,
                        prebuild_results=prebuild_results,
                        postbuild_results=postbuild_results)
Exemple #31
0
def test_resolve_module_compose(tmpdir, docker_tasker, specify_version):
    secrets_path = os.path.join(str(tmpdir), "secret")
    os.mkdir(secrets_path)
    with open(os.path.join(secrets_path, "token"), "w") as f:
        f.write("green_eggs_and_ham")

    workflow = mock_workflow(tmpdir)
    mock_get_retry_session()

    def handle_composes_post(request):
        assert request.headers['Authorization'] == 'Bearer green_eggs_and_ham'

        if isinstance(request.body, six.text_type):
            body = request.body
        else:
            body = request.body.decode()
        body_json = json.loads(body)
        assert body_json['source']['type'] == 'module'
        if specify_version:
            assert body_json['source']['source'] == MODULE_NSV
        else:
            assert body_json['source']['source'] == MODULE_NS
        return (200, {}, compose_json(0, 'wait'))

    responses.add_callback(responses.POST, ODCS_URL + '/composes/',
                           content_type='application/json',
                           callback=handle_composes_post)

    state = {'count': 1}

    def handle_composes_get(request):
        assert request.headers['Authorization'] == 'Bearer green_eggs_and_ham'

        if state['count'] == 1:
            response_json = compose_json(1, 'generating')
        else:
            response_json = compose_json(2, 'done')
        state['count'] += 1

        return (200, {}, response_json)

    responses.add_callback(responses.GET, ODCS_URL + '/composes/84',
                           content_type='application/json',
                           callback=handle_composes_get)

    def handle_unreleasedvariants(request):
        query = parse_qs(urlparse(request.url).query)

        assert query['variant_id'] == [MODULE_NAME]
        assert query['variant_version'] == [MODULE_STREAM]
        assert query['variant_release'] == [MODULE_VERSION]

        return (200, {}, json.dumps(LATEST_VERSION_JSON))

    responses.add_callback(responses.GET, PDC_URL + '/unreleasedvariants/',
                           content_type='application/json',
                           callback=handle_unreleasedvariants)

    args = {
        'module_name': 'eog',
        'module_stream': 'f26',
        'base_image': "registry.fedoraproject.org/fedora:latest",
        'odcs_url': ODCS_URL,
        'odcs_openidc_secret_path': secrets_path,
        'pdc_url': PDC_URL
    }
    if specify_version:
        args['module_version'] = MODULE_VERSION

    runner = PreBuildPluginsRunner(
        docker_tasker,
        workflow,
        [{
            'name': ResolveModuleComposePlugin.key,
            'args': args
        }]
    )

    runner.run()

    compose_info = get_compose_info(workflow)

    assert compose_info.compose_id == 84
    assert compose_info.base_module.name == MODULE_NAME
    assert compose_info.base_module.stream == MODULE_STREAM
    assert compose_info.base_module.version == MODULE_VERSION
    assert compose_info.base_module.mmd.summary == 'Eye of GNOME Application Module'
Exemple #32
0
def test_resolve_module_compose(
        tmpdir,
        docker_tasker,
        compose_ids,
        modules,  # noqa
        reactor_config_map):
    secrets_path = os.path.join(str(tmpdir), "secret")
    os.mkdir(secrets_path)
    with open(os.path.join(secrets_path, "token"), "w") as f:
        f.write("green_eggs_and_ham")

    if modules is not None:
        data = "compose:\n"
        data += "    modules:\n"
        for mod in modules:
            data += "    - {}\n".format(mod)
        tmpdir.join(REPO_CONTAINER_CONFIG).write(data)

    module = None
    if modules:
        module = modules[0]

    workflow = mock_workflow(tmpdir)
    mock_get_retry_session()

    def handle_composes_post(request):
        assert request.headers['Authorization'] == 'Bearer green_eggs_and_ham'

        if isinstance(request.body, six.text_type):
            body = request.body
        else:
            body = request.body.decode()
        body_json = json.loads(body)
        assert body_json['source']['type'] == 'module'
        assert body_json['source']['source'] == module
        return (200, {}, compose_json(0, 'wait'))

    responses.add_callback(responses.POST,
                           ODCS_URL + '/composes/',
                           content_type='application/json',
                           callback=handle_composes_post)

    state = {'count': 1}

    def handle_composes_get(request):
        assert request.headers['Authorization'] == 'Bearer green_eggs_and_ham'

        if state['count'] == 1:
            response_json = compose_json(1, 'generating')
        else:
            response_json = compose_json(2, 'done')
        state['count'] += 1

        return (200, {}, response_json)

    responses.add_callback(responses.GET,
                           ODCS_URL + '/composes/84',
                           content_type='application/json',
                           callback=handle_composes_get)

    def handle_unreleasedvariants(request):
        query = parse_qs(urlparse(request.url).query)

        assert query['variant_id'] == [MODULE_NAME]
        assert query['variant_version'] == [MODULE_STREAM]
        assert query['variant_release'] == [MODULE_VERSION]

        return (200, {}, json.dumps(LATEST_VERSION_JSON))

    responses.add_callback(responses.GET,
                           PDC_URL + '/unreleasedvariants/',
                           content_type='application/json',
                           callback=handle_unreleasedvariants)

    args = {
        'odcs_url': ODCS_URL,
        'odcs_openidc_secret_path': secrets_path,
        'pdc_url': PDC_URL,
        'compose_ids': compose_ids
    }

    if reactor_config_map:
        workflow.plugin_workspace[ReactorConfigPlugin.key] = {}
        workflow.plugin_workspace[ReactorConfigPlugin.key][WORKSPACE_CONF_KEY] =\
            ReactorConfig({'version': 1,
                           'odcs': {'api_url': ODCS_URL,
                                    'auth': {'openidc_dir': secrets_path}},
                           'pdc': {'api_url': PDC_URL}})

    runner = PreBuildPluginsRunner(docker_tasker, workflow,
                                   [{
                                       'name': ResolveModuleComposePlugin.key,
                                       'args': args
                                   }])

    if modules is None:
        with pytest.raises(PluginFailedException) as exc_info:
            runner.run()
        assert '"compose" config in container.yaml is required ' in str(
            exc_info.value)
    elif not modules:
        with pytest.raises(PluginFailedException) as exc_info:
            runner.run()
        assert '"compose" config has no modules' in str(exc_info.value)
    else:
        runner.run()

        compose_info = get_compose_info(workflow)

        assert compose_info.compose_id == 84
        assert compose_info.base_module.name == MODULE_NAME
        assert compose_info.base_module.stream == MODULE_STREAM
        assert compose_info.base_module.version == MODULE_VERSION
        assert compose_info.base_module.mmd.props.summary == 'Eye of GNOME Application Module'
def test_tag_and_push_plugin_oci(workflow, monkeypatch, is_source_build, v2s2,
                                 unsupported_image_type, use_secret, fail_push,
                                 caplog):
    sources_koji_id = '123456'
    sources_koji_target = 'source_target'
    sources_koji_repo = 'namespace/container_build_image'
    sources_koji_pull_spec = 'registry_url/{}@sha256:987654321'.format(
        sources_koji_repo)
    sources_random_number = 1234567890
    sources_timestamp = datetime(year=2019, month=12, day=12)
    current_platform = platform.processor() or 'x86_64'
    sources_tagname = '{}-{}-{}-{}'.format(
        sources_koji_target, sources_random_number,
        sources_timestamp.strftime('%Y%m%d%H%M%S'), current_platform)

    secret_path = None
    if use_secret:
        temp_dir = mkdtemp()
        with open(os.path.join(temp_dir, ".dockercfg"), "w+") as dockerconfig:
            dockerconfig_contents = {
                LOCALHOST_REGISTRY: {
                    "username": "******",
                    "email": "*****@*****.**",
                    "password": "******"
                }
            }
            dockerconfig.write(json.dumps(dockerconfig_contents))
            dockerconfig.flush()
            secret_path = temp_dir

    reactor_config = {
        'registries': [
            {
                'url': LOCALHOST_REGISTRY,
                'insecure': True,
                'auth': {
                    'cfg_path': secret_path
                },
            },
        ],
    }
    env = (MockEnv(workflow).for_plugin(TagAndPushPlugin.key).set_plugin_args({
        'koji_target':
        sources_koji_target
    }).set_reactor_config(reactor_config))

    add_koji_map_in_workflow(workflow, hub_url='', root_url='')

    wf_data = workflow.data
    if is_source_build:
        platforms = ['x86_64']
        workflow.build_dir.init_build_dirs(platforms, workflow.source)

        env.set_plugin_result(
            FetchSourcesPlugin.key,
            {'sources_for_koji_build_id': sources_koji_id},
        )
        env.set_plugin_result(
            SourceContainerPlugin.key,
            {'image_metadata': deepcopy(IMAGE_METADATA_DOCKER_ARCHIVE)},
        )
    else:
        platforms = ['x86_64', 'ppc64le', 's390x', 'aarch64']
        wf_data.tag_conf.add_unique_image(f'{LOCALHOST_REGISTRY}/{TEST_IMAGE}')
        workflow.user_params['flatpak'] = True
        workflow.build_dir.init_build_dirs(platforms, workflow.source)
        env.set_plugin_result(CheckAndSetPlatformsPlugin.key, platforms)

    class MockedClientSession(object):
        def __init__(self, hub, opts=None):
            pass

        def getBuild(self, build_info):
            if is_source_build:
                assert build_info == sources_koji_id
                return {
                    'extra': {
                        'image': {
                            'index': {
                                'pull': [sources_koji_pull_spec]
                            }
                        }
                    }
                }

            else:
                return None

        def krb_login(self, *args, **kwargs):
            return True

    session = MockedClientSession('')
    flexmock(koji, ClientSession=session)
    flexmock(random).should_receive('randrange').and_return(
        sources_random_number)
    flexmock(osbs.utils).should_receive('utcnow').and_return(sources_timestamp)

    if is_source_build:
        media_type = 'application/vnd.docker.distribution.manifest.v2+json'
    else:
        media_type = 'application/vnd.oci.image.manifest.v1+json'
    ref_name = "app/org.gnome.eog/x86_64/master"

    if not is_source_build:
        # Add a mock OCI image to 'flatpak_create_oci' results; this forces the tag_and_push
        # plugin to push with skopeo
        flatpak_create_oci_result: Dict[str, Any] = {}
        # No need to create image archives, just need to mock its metadata
        for current_platform in platforms:
            if unsupported_image_type:
                image_type = 'unsupported_type'
            else:
                image_type = IMAGE_TYPE_OCI
            metadata = deepcopy(IMAGE_METADATA_OCI)
            metadata['ref_name'] = ref_name.replace('x86_64', current_platform)
            metadata['type'] = image_type
            flatpak_create_oci_result[current_platform] = metadata
        env.set_plugin_result(FlatpakCreateOciPlugin.key,
                              flatpak_create_oci_result)

    # Mock the call to skopeo

    def check_run_skopeo(args):
        if fail_push:
            raise subprocess.CalledProcessError(returncode=1,
                                                cmd=args,
                                                output="Failed")
        assert args[0] == 'skopeo'
        if use_secret:
            assert '--authfile=' + os.path.join(secret_path,
                                                '.dockercfg') in args
        assert '--dest-tls-verify=false' in args
        if is_source_build:
            assert args[
                -2] == 'docker-archive://' + IMAGE_METADATA_DOCKER_ARCHIVE[
                    'path']
            output_image = 'docker://{}/{}:{}'.format(LOCALHOST_REGISTRY,
                                                      sources_koji_repo,
                                                      sources_tagname)
            assert args[-1] == output_image
        else:
            current_platform = args[-1].split('-')[-1]
            assert args[-2] == ('oci:' + IMAGE_METADATA_OCI['path'] + ':' +
                                ref_name.replace('x86_64', current_platform))
            assert args[-1].startswith('docker://' + LOCALHOST_REGISTRY +
                                       f'/{TEST_IMAGE_NAME}')
            assert '--format=v2s2' in args
        return ''

    (flexmock(retries).should_receive("run_cmd").replace_with(check_run_skopeo)
     )

    # Mock out the response from the registry once the OCI image is uploaded

    manifest_latest_url = "https://{}/v2/{}/manifests/latest".format(
        LOCALHOST_REGISTRY, TEST_IMAGE)
    manifest_url = "https://{}/v2/{}/manifests/{}".format(
        LOCALHOST_REGISTRY, TEST_IMAGE, DIGEST_OCI)
    manifest_source_tag_url = "https://{}/v2/{}/manifests/{}".format(
        LOCALHOST_REGISTRY, sources_koji_repo, sources_tagname)
    manifest_source_digest_url = "https://{}/v2/{}/manifests/{}".format(
        LOCALHOST_REGISTRY, sources_koji_repo, DIGEST_OCI)

    manifest_response = requests.Response()
    (flexmock(manifest_response,
              raise_for_status=lambda: None,
              json={},
              headers={
                  'Content-Type': media_type,
                  'Docker-Content-Digest': DIGEST_OCI
              }))

    manifest_unacceptable_response = requests.Response()
    (flexmock(manifest_unacceptable_response,
              status_code=404,
              json={"errors": [{
                  "code": "MANIFEST_UNKNOWN"
              }]}))

    def custom_get(method, url, headers, **kwargs):
        if url.startswith(
                manifest_latest_url) or url == manifest_source_tag_url:
            if headers['Accept'] == media_type:
                if is_source_build and not v2s2:
                    return manifest_unacceptable_response
                else:
                    return manifest_response
            else:
                return manifest_unacceptable_response

        if url == manifest_url or url == manifest_source_digest_url:
            return manifest_response

    mock_get_retry_session()

    (flexmock(
        requests.Session).should_receive('request').replace_with(custom_get))

    if fail_push or unsupported_image_type or (is_source_build and not v2s2):
        with pytest.raises(PluginFailedException):
            env.create_runner().run()

        if not fail_push and is_source_build and not v2s2:
            assert "Unable to fetch v2 schema 2 digest for" in caplog.text

        if unsupported_image_type and not fail_push:
            assert (
                'Attempt to push unsupported image type unsupported_type with skopeo'
                in caplog.text)
    else:
        env.create_runner().run()

        assert workflow.conf.registry
        repos_annotations = get_repositories_annotations(wf_data.tag_conf)
        assert wf_data.annotations['repositories'] == repos_annotations
def test_tag_and_push_plugin(
        tmpdir, monkeypatch, caplog, image_name, logs, should_raise, has_config, missing_v2,
        use_secret, reactor_config_map, file_name, dockerconfig_contents):

    if MOCK:
        mock_docker()
        flexmock(docker.APIClient, push=lambda iid, **kwargs: iter(logs),
                 login=lambda username, registry, dockercfg_path: {'Status': 'Login Succeeded'})

    tasker = DockerTasker(retry_times=0)
    workflow = DockerBuildWorkflow({"provider": "git", "uri": "asd"}, TEST_IMAGE)
    workflow.tag_conf.add_primary_image(image_name)
    setattr(workflow, 'builder', X)

    secret_path = None
    if use_secret:
        temp_dir = mkdtemp()
        with open(os.path.join(temp_dir, file_name), "w+") as dockerconfig:
            dockerconfig.write(json.dumps(dockerconfig_contents))
            dockerconfig.flush()
            secret_path = temp_dir

    CONFIG_DIGEST = 'sha256:2c782e3a93d34d89ea4cf54052768be117caed54803263dd1f3798ce42aac14e'
    media_type = 'application/vnd.docker.distribution.manifest.v2+json'

    manifest_json = {
        'config': {
            'digest': CONFIG_DIGEST,
            'mediaType': 'application/octet-stream',
            'size': 4132
        },
        'layers': [
            {
                'digest': 'sha256:16dc1f96e3a1bb628be2e00518fec2bb97bd5933859de592a00e2eb7774b6ecf',
                'mediaType': 'application/vnd.docker.image.rootfs.diff.tar.gzip',
                'size': 71907148
            },
            {
                'digest': 'sha256:cebc0565e1f096016765f55fde87a6f60fdb1208c0b5017e35a856ff578f5ccb',
                'mediaType': 'application/vnd.docker.image.rootfs.diff.tar.gzip',
                'size': 3945724
            }
        ],
        'mediaType': media_type,
        'schemaVersion': 2
    }

    config_json = {
        'config': {
            'Size': 12509448,
            'architecture': 'amd64',
            'author': 'Red Hat, Inc.',
            'config': {
                'Cmd': ['/bin/rsyslog.sh'],
                'Entrypoint': None,
                'Image': 'c3fb36aafd5692d2a45115d32bb120edb6edf6c0c3c783ed6592a8dab969fb88',
                'Labels': {
                    'Architecture': 'x86_64',
                    'Authoritative_Registry': 'registry.access.redhat.com',
                    'BZComponent': 'rsyslog-docker',
                    'Name': 'rhel7/rsyslog',
                    'Release': '28.vrutkovs.31',
                    'Vendor': 'Red Hat, Inc.',
                    'Version': '7.2',
                },
            },
            'created': '2016-10-07T10:20:05.38595Z',
            'docker_version': '1.9.1',
            'id': '1ca220fbc2aed7c141b236c8729fe59db5771b32bf2da74e4a663407f32ec2a2',
            'os': 'linux',
            'parent': '47eed7a8ea3f131e9281ae09fcbfb0041872fd8b74a048f1c739302c8148505d'
        },
        'container_config': {
            'foo': 'bar',
            'spam': 'maps'
        },
        'id': '1ca220fbc2aed7c141b236c8729fe59db5771b32bf2da74e4a663407f32ec2a2',
        'parent_id': 'c3fb36aafd5692d2a45115d32bb120edb6edf6c0c3c783ed6592a8dab969fb88'
    }

    # To test out the lack of a config, we really should be testing what happens
    # when we only return a v1 response and not a v2 response at all; what are
    # doing now is simply testing that if we return a None instead of json for the
    # config blob, that None is stored rather than json
    if not has_config:
        config_json = None

    manifest_latest_url = "https://{}/v2/{}/manifests/latest".format(LOCALHOST_REGISTRY, TEST_IMAGE)
    manifest_url = "https://{}/v2/{}/manifests/{}".format(LOCALHOST_REGISTRY, TEST_IMAGE, DIGEST_V2)
    config_blob_url = "https://{}/v2/{}/blobs/{}".format(
        LOCALHOST_REGISTRY, TEST_IMAGE, CONFIG_DIGEST)

    # We return our v2 manifest in the mocked v1 response as a placeholder - only the
    # digest matters anyways
    manifest_response_v1 = requests.Response()
    (flexmock(manifest_response_v1,
              status_code=200,
              json=manifest_json,
              headers={
                'Content-Type': 'application/vnd.docker.distribution.manifest.v1+json',
                'Docker-Content-Digest': DIGEST_V1
              }))

    manifest_response_v2 = requests.Response()
    (flexmock(manifest_response_v2,
              status_code=200,
              json=manifest_json,
              headers={
                'Content-Type': 'application/vnd.docker.distribution.manifest.v2+json',
                'Docker-Content-Digest': DIGEST_V2
              }))
    manifest_response_v2_list = requests.Response()
    (flexmock(manifest_response_v2_list,
              raise_for_status=lambda: None,
              json=manifest_json,
              headers={
                'Content-Type': 'application/vnd.docker.distribution.manifest.list.v2+json',
              }))

    config_blob_response = requests.Response()
    (flexmock(config_blob_response, status_code=200, json=config_json))

    manifest_unknown_response = requests.Response()
    (flexmock(manifest_unknown_response,
              status_code=404,
              json={
                  "errors": [{"code": "MANIFEST_UNKNOWN"}]
              }))

    def custom_get(method, url, headers, **kwargs):
        if url == manifest_latest_url:
            # For a manifest stored as v2 or v1, the docker registry defaults to
            # returning a v1 manifest if a v2 manifest is not explicitly requested
            if headers['Accept'] == 'application/vnd.docker.distribution.manifest.v2+json':
                if missing_v2:
                    return manifest_unknown_response
                else:
                    return manifest_response_v2
            else:
                return manifest_response_v1

            if headers['Accept'] == 'application/vnd.docker.distribution.manifest.list.v2+json':
                return manifest_response_v2_list

        if url == manifest_url:
            if missing_v2:
                return manifest_unknown_response
            else:
                return manifest_response_v2

        if url == config_blob_url:
            return config_blob_response

    mock_get_retry_session()
    (flexmock(requests.Session)
        .should_receive('request')
        .replace_with(custom_get))
    (flexmock(time)
        .should_receive('sleep')
        .and_return(None))

    if reactor_config_map:
        workflow.plugin_workspace[ReactorConfigPlugin.key] = {}
        workflow.plugin_workspace[ReactorConfigPlugin.key][WORKSPACE_CONF_KEY] =\
            ReactorConfig({'version': 1, 'registries': [{
                'url': LOCALHOST_REGISTRY,
                'insecure': True,
                'auth': {'cfg_path': secret_path}}],
                           'group_manifests': missing_v2})

    runner = PostBuildPluginsRunner(
        tasker,
        workflow,
        [{
            'name': TagAndPushPlugin.key,
            'args': {
                'registries': {
                    LOCALHOST_REGISTRY: {
                        'insecure': True,
                        'secret': secret_path
                    }
                }
            },
        }]
    )

    if should_raise:
        with pytest.raises(PluginFailedException):
            runner.run()
    else:
        output = runner.run()
        image = output[TagAndPushPlugin.key][0]
        tasker.remove_image(image)
        assert len(workflow.push_conf.docker_registries) > 0

        if MOCK:
            # we only test this when mocking docker because we don't expect
            # running actual docker against v2 registry
            if missing_v2:
                expected_digest = ManifestDigest(v1=DIGEST_V1, v2=None, oci=None)
                if reactor_config_map:
                    assert "Retrying push because V2 schema 2" in caplog.text
                else:
                    assert "Retrying push because V2 schema 2" not in caplog.text
            else:
                expected_digest = ManifestDigest(v1=DIGEST_V1, v2=DIGEST_V2, oci=None)
                assert workflow.push_conf.docker_registries[0].digests[image_name].v2 == \
                    expected_digest.v2

            assert workflow.push_conf.docker_registries[0].digests[image_name].v1 == \
                expected_digest.v1
            assert workflow.push_conf.docker_registries[0].digests[image_name].oci == \
                expected_digest.oci

            if has_config:
                assert isinstance(workflow.push_conf.docker_registries[0].config, dict)
            else:
                assert workflow.push_conf.docker_registries[0].config is None
def test_delete_from_registry_failures(tmpdir, status_code):
    if MOCK:
        mock_docker()
        mock_get_retry_session()

    req_registries = {DOCKER0_REGISTRY: True}
    saved_digests = {DOCKER0_REGISTRY: {'foo/bar:latest': DIGEST1}}

    tasker = DockerTasker()
    workflow = DockerBuildWorkflow({"provider": "git", "uri": "asd"}, TEST_IMAGE)
    setattr(workflow, 'builder', X)

    args_registries = {}
    for reg, use_secret in req_registries.items():
        if use_secret:
            temp_dir = mkdtemp(dir=str(tmpdir))
            with open(os.path.join(temp_dir, ".dockercfg"), "w+") as dockerconfig:
                dockerconfig_contents = {
                    reg: {
                        "username": "******", "password": reg
                    }
                }
                dockerconfig.write(json.dumps(dockerconfig_contents))
                dockerconfig.flush()
                args_registries[reg] = {'secret': temp_dir}
        else:
            args_registries[reg] = {}

    for reg, digests in saved_digests.items():
        r = DockerRegistry(reg)
        for tag, dig in digests.items():
            r.digests[tag] = ManifestDigest(v1='not-used', v2=dig)
        workflow.push_conf._registries['docker'].append(r)

    runner = ExitPluginsRunner(
        tasker,
        workflow,
        [{
            'name': DeleteFromRegistryPlugin.key,
            'args': {
                'registries': args_registries
            },
        }]
    )

    deleted_digests = set()
    for reg, digests in saved_digests.items():
        if reg not in req_registries:
            continue

        for tag, dig in digests.items():
            if dig in deleted_digests:
                continue
            url = "https://" + reg + "/v2/" + tag.split(":")[0] + "/manifests/" + dig
            auth_type = requests.auth.HTTPBasicAuth if req_registries[reg] else None

            response = requests.Response()
            response.status_code = status_code

            (flexmock(requests.Session)
                .should_receive('delete')
                .with_args(url, verify=bool, auth=auth_type)
                .and_return(response))

            deleted_digests.add(dig)

    if status_code == 520:
        with pytest.raises(PluginFailedException):
            result = runner.run()
            assert result[DeleteFromRegistryPlugin.key] == set([])
    else:
        result = runner.run()

        if status_code == requests.codes.ACCEPTED:
            assert result[DeleteFromRegistryPlugin.key] == deleted_digests
        else:
            assert result[DeleteFromRegistryPlugin.key] == set([])
def test_get_manifest_digests_missing(tmpdir, has_content_type_header,
                                      has_content_digest, digest_is_v1,
                                      can_convert_v2_v1):
    kwargs = {}

    image = ImageName.parse('example.com/spam:latest')
    kwargs['image'] = image

    kwargs['registry'] = 'https://example.com'

    expected_url = 'https://example.com/v2/spam/manifests/latest'

    mock_get_retry_session()

    def custom_get(url, headers, **kwargs):
        assert url == expected_url

        media_type = headers['Accept']
        media_type_prefix = media_type.split('+')[0]

        assert media_type.endswith('v2+json') or media_type.endswith('v1+json')

        # Attempt to simulate how a docker registry behaves:
        #  * If the stored digest is v1, return it
        #  * If the stored digest is v2, and v2 is requested, return it
        #  * If the stored digest is v2, and v1 is requested, try
        #    to convert and return v1 or an error.
        if digest_is_v1:
            digest = 'v1-digest'
            media_type_prefix = media_type_prefix.replace('v2', 'v1', 1)
        else:
            if media_type.endswith('v2+json'):
                digest = 'v2-digest'
            else:
                if not can_convert_v2_v1:
                    response_json = {"errors": [{"code": "MANIFEST_INVALID"}]}
                    response = requests.Response()
                    flexmock(response,
                             status_code=400,
                             content=json.dumps(response_json).encode("utf-8"),
                             headers=headers)

                    return response

                digest = 'v1-converted-digest'

        headers = {}
        if has_content_type_header:
            headers['Content-Type'] = '{}+jsonish'.format(media_type_prefix)
        if has_content_digest:
            headers['Docker-Content-Digest'] = digest

        if media_type_prefix.endswith('v2'):
            response_json = {
                'schemaVersion': 2,
                'mediaType':
                'application/vnd.docker.distribution.manifest.v2+json'
            }
        else:
            response_json = {'schemaVersion': 1}

        response = requests.Response()
        flexmock(response,
                 status_code=200,
                 content=json.dumps(response_json).encode("utf-8"),
                 headers=headers)

        return response

    (flexmock(requests.Session).should_receive('get').replace_with(custom_get))

    if digest_is_v1 and not has_content_type_header:
        # v1 manifests don't have a mediaType field, so we can't fall back
        # to looking at the returned manifest to detect the type.
        with pytest.raises(RuntimeError):
            get_manifest_digests(**kwargs)
        return
    else:
        actual_digests = get_manifest_digests(**kwargs)

    if digest_is_v1:
        if has_content_digest:
            assert actual_digests.v1 == 'v1-digest'
        else:
            assert actual_digests.v1 is True
        assert actual_digests.v2 is None
    else:
        if can_convert_v2_v1:
            if has_content_type_header:
                if has_content_digest:
                    assert actual_digests.v1 == 'v1-converted-digest'
                else:
                    assert actual_digests.v1 is True
            else:  # don't even know the response is v1 without Content-Type
                assert actual_digests.v1 is None
        else:
            assert actual_digests.v1 is None
        if has_content_digest:
            assert actual_digests.v2 == 'v2-digest'
        else:
            assert actual_digests.v2 is True
def test_delete_from_registry_plugin(saved_digests, req_registries, tmpdir, orchestrator,
                                     manifest_list_digests, reactor_config_map):
    if MOCK:
        mock_docker()
        mock_get_retry_session()

    buildstep_plugin = None
    if orchestrator:
        ann_digests = []
        buildstep_plugin = [{
            'name': OrchestrateBuildPlugin.key,
            'args': {
                'platforms': "x86_64"
            },
        }]

    tasker = DockerTasker()
    workflow = DockerBuildWorkflow({"provider": "git", "uri": "asd"}, TEST_IMAGE,
                                   buildstep_plugins=buildstep_plugin, )
    setattr(workflow, 'builder', X)

    args_registries = {}
    config_map_regiestries = []
    for reg, use_secret in req_registries.items():
        cm_reg = {'url': reg}
        if use_secret:
            temp_dir = mkdtemp(dir=str(tmpdir))
            with open(os.path.join(temp_dir, ".dockercfg"), "w+") as dockerconfig:
                dockerconfig_contents = {
                    reg: {
                        "username": "******", "password": reg
                    }
                }
                dockerconfig.write(json.dumps(dockerconfig_contents))
                dockerconfig.flush()
                args_registries[reg] = {'secret': temp_dir}
                cm_reg['auth'] = {'cfg_path': temp_dir}
        else:
            args_registries[reg] = {}
        config_map_regiestries.append(cm_reg)

    for reg, digests in saved_digests.items():
        if orchestrator:
            for tag, dig in digests.items():
                repo = tag.split(':')[0]
                t = tag.split(':')[1]
                ann_digests.append({
                    'digest': dig,
                    'tag': t,
                    'repository': repo,
                    'registry': reg,
                })
        else:
            r = DockerRegistry(reg)
            for tag, dig in digests.items():
                r.digests[tag] = ManifestDigest(v1='not-used', v2=dig)
            workflow.push_conf._registries['docker'].append(r)

    group_manifest_digests = {}
    if orchestrator:
        build_annotations = {'digests': ann_digests}
        annotations = {'worker-builds': {'x86_64': build_annotations}}
        setattr(workflow, 'build_result', Y)
        setattr(workflow.build_result, 'annotations', annotations)

        # group_manifest digest should be added only
        # if there are worker builds and images are pushed to one registry
        if len(req_registries) == 1 and len(saved_digests.keys()) == 1 and \
           all(saved_digests.values()):
            workflow.postbuild_results[PLUGIN_GROUP_MANIFESTS_KEY] = manifest_list_digests
            for ml_repo, ml_digest in manifest_list_digests.items():
                for reg in req_registries:
                    if reg in saved_digests:
                        group_manifest_digests.setdefault(reg, {})
                        group_manifest_digests[reg] = saved_digests[reg].copy()
                        group_manifest_digests[reg][ml_repo] = ml_digest.default

    result_digests = saved_digests.copy()
    result_digests.update(group_manifest_digests)

    if reactor_config_map:
        workflow.plugin_workspace[ReactorConfigPlugin.key] = {}
        workflow.plugin_workspace[ReactorConfigPlugin.key][WORKSPACE_CONF_KEY] =\
            ReactorConfig({'version': 1, 'registries': config_map_regiestries})

    runner = ExitPluginsRunner(
        tasker,
        workflow,
        [{
            'name': DeleteFromRegistryPlugin.key,
            'args': {
                'registries': args_registries
            },
        }]
    )

    deleted_digests = set()
    for reg, digests in result_digests.items():
        if reg not in req_registries:
            continue

        for tag, dig in digests.items():
            if dig in deleted_digests:
                continue
            url = "https://" + reg + "/v2/" + tag.split(":")[0] + "/manifests/" + dig
            auth_type = HTTPRegistryAuth
            (flexmock(requests.Session)
                .should_receive('delete')
                .with_args(url, verify=bool, auth=auth_type)
                .once()
                .and_return(flexmock(status_code=202, ok=True, raise_for_status=lambda: None)))
            deleted_digests.add(dig)

    result = runner.run()
    assert result[DeleteFromRegistryPlugin.key] == deleted_digests
Exemple #38
0
def test_get_manifest_digests_missing(tmpdir, has_content_type_header,
                                      has_content_digest, manifest_type,
                                      can_convert_v2_v1):
    kwargs = {}

    image = ImageName.parse('example.com/spam:latest')
    kwargs['image'] = image

    kwargs['registry'] = 'https://example.com'

    expected_url = 'https://example.com/v2/spam/manifests/latest'

    mock_get_retry_session()

    def custom_get(url, headers, **kwargs):
        assert url == expected_url

        media_type = headers['Accept']
        media_type_prefix = media_type.split('+')[0]

        assert media_type.endswith('+json')

        # Attempt to simulate how a docker registry behaves:
        #  * If the stored digest is v1, return it
        #  * If the stored digest is v2, and v2 is requested, return it
        #  * If the stored digest is v2, and v1 is requested, try
        #    to convert and return v1 or an error.
        if manifest_type == 'v1':
            digest = 'v1-digest'
            media_type_prefix = 'application/vnd.docker.distribution.manifest.v1'
        elif manifest_type == 'v2':
            if media_type_prefix == 'application/vnd.docker.distribution.manifest.v2':
                digest = 'v2-digest'
            else:
                if not can_convert_v2_v1:
                    response_json = {"errors": [{"code": "MANIFEST_INVALID"}]}
                    response = requests.Response()
                    flexmock(response,
                             status_code=400,
                             content=json.dumps(response_json).encode("utf-8"),
                             headers=headers)

                    return response

                digest = 'v1-converted-digest'
                media_type_prefix = 'application/vnd.docker.distribution.manifest.v1'
        elif manifest_type == 'oci':
            if media_type_prefix == 'application/vnd.oci.image.manifest.v1':
                digest = 'oci-digest'
            else:
                headers = {}
                response_json = {"errors": [{"code": "MANIFEST_UNKNOWN"}]}
                response = requests.Response()
                flexmock(response,
                         status_code=requests.codes.not_found,
                         content=json.dumps(response_json).encode("utf-8"),
                         headers=headers)

                return response
        elif manifest_type == 'oci_index':
            if media_type_prefix == 'application/vnd.oci.image.index.v1':
                digest = 'oci-index-digest'
            else:
                headers = {}
                response_json = {"errors": [{"code": "MANIFEST_UNKNOWN"}]}
                response = requests.Response()
                flexmock(response,
                         status_code=requests.codes.not_found,
                         content=json.dumps(response_json).encode("utf-8"),
                         headers=headers)

                return response

        headers = {}
        if has_content_type_header:
            headers['Content-Type'] = '{}+jsonish'.format(media_type_prefix)
        if has_content_digest:
            headers['Docker-Content-Digest'] = digest

        if media_type_prefix == 'application/vnd.docker.distribution.manifest.v1':
            response_json = {'schemaVersion': 1}
        else:
            response_json = {
                'schemaVersion': 2,
                'mediaType': media_type_prefix + '+json'
            }

        response = requests.Response()
        flexmock(response,
                 status_code=200,
                 content=json.dumps(response_json).encode("utf-8"),
                 headers=headers)

        return response

    (flexmock(requests.Session).should_receive('get').replace_with(custom_get))

    actual_digests = get_manifest_digests(**kwargs)
    if manifest_type == 'v1':
        if has_content_digest:
            assert actual_digests.v1 == 'v1-digest'
        else:
            assert actual_digests.v1 is True
        assert actual_digests.v2 is None
        assert actual_digests.oci is None
        assert actual_digests.oci_index is None
    elif manifest_type == 'v2':
        if can_convert_v2_v1:
            if has_content_digest:
                assert actual_digests.v1 == 'v1-converted-digest'
            else:
                assert actual_digests.v1 is True
        else:
            assert actual_digests.v1 is None
        if has_content_digest:
            assert actual_digests.v2 == 'v2-digest'
        else:
            assert actual_digests.v2 is True
        assert actual_digests.oci is None
        assert actual_digests.oci_index is None
    elif manifest_type == 'oci':
        assert actual_digests.v1 is None
        assert actual_digests.v2 is None
        if has_content_digest:
            assert actual_digests.oci == 'oci-digest'
        else:
            assert actual_digests.oci is True
        assert actual_digests.oci_index is None
    elif manifest_type == 'oci_index':
        assert actual_digests.v1 is None
        assert actual_digests.v2 is None
        assert actual_digests.oci is None
        if has_content_digest:
            assert actual_digests.oci_index == 'oci-index-digest'
        else:
            assert actual_digests.oci_index is True