Ejemplo n.º 1
0
    def test_get_methods(self, fallback, method):
        tasker, workflow = self.prepare()
        workflow.plugin_workspace[ReactorConfigPlugin.key] = {}
        if fallback is False:
            workflow.plugin_workspace[ReactorConfigPlugin.key][WORKSPACE_CONF_KEY] = \
                 ReactorConfig(yaml.safe_load(REACTOR_CONFIG_MAP))
        else:
            if fallback:
                fall_source = ReactorConfig(yaml.safe_load(REACTOR_CONFIG_MAP))
            else:
                fall_source = ReactorConfig(yaml.safe_load("version: 1"))

        method_name = 'get_' + method
        real_method = getattr(atomic_reactor.plugins.pre_reactor_config,
                              method_name)

        if fallback is True:
            output = real_method(workflow, fall_source.conf[method])
        else:
            if fallback is False:
                output = real_method(workflow)
            else:
                with pytest.raises(KeyError):
                    real_method(workflow)
                return

        expected = yaml.safe_load(REACTOR_CONFIG_MAP)[method]

        if method == 'registries':
            registries_cm = {}
            for registry in expected:
                reguri = RegistryURI(registry.get('url'))
                regdict = {}
                regdict['version'] = reguri.version
                if registry.get('auth'):
                    regdict['secret'] = registry['auth']['cfg_path']
                regdict['insecure'] = registry.get('insecure', False)
                regdict['expected_media_types'] = registry.get(
                    'expected_media_types', [])

                registries_cm[reguri.docker_uri] = regdict

            if fallback:
                output = real_method(workflow, registries_cm)
            assert output == registries_cm
            return

        if method == 'source_registry':
            expect = {
                'uri': RegistryURI(expected['url']),
                'insecure': expected.get('insecure', False)
            }
            if fallback:
                output = real_method(workflow, expect)
            assert output['insecure'] == expect['insecure']
            assert output['uri'].uri == expect['uri'].uri
            return

        assert output == expected
Ejemplo n.º 2
0
    def remove_pulp_plugins(self):
        def has_plugin(self, phase, target_plugin):
            return self.find_plugin(phase, target_plugin) >= 0

        phases = ('postbuild_plugins', 'exit_plugins')
        pulp_registry = self.get_value('pulp')
        koji_hub = self.get_value('koji', {}).get('hub_url')
        has_pulp_pull = False
        for phase in phases:
            if not (pulp_registry and koji_hub):
                self.remove_plugin(phase, PLUGIN_PULP_PULL_KEY,
                                   'no pulp or koji available')

        if not pulp_registry:
            self.remove_plugin('postbuild_plugins', PLUGIN_PULP_PUSH_KEY,
                               'no pulp available')
            self.remove_plugin('postbuild_plugins', PLUGIN_PULP_SYNC_KEY,
                               'no pulp available')
            self.remove_plugin('postbuild_plugins', PLUGIN_PULP_TAG_KEY,
                               'no pulp available')
            self.remove_plugin('exit_plugins', PLUGIN_DELETE_FROM_REG_KEY,
                               'no pulp available')
            self.remove_plugin('exit_plugins', PLUGIN_PULP_PUBLISH_KEY,
                               'no pulp available')
        else:
            docker_registry = None
            all_registries = self.get_value('registries', {})

            versions = self.get_value('content_versions', ['v1', 'v2'])

            for registry in all_registries:
                reguri = RegistryURI(registry.get('url'))
                if reguri.version == 'v2':
                    # First specified v2 registry is the one we'll tell pulp
                    # to sync from. Keep the http prefix -- pulp wants it.
                    docker_registry = registry
                    break

            if 'v1' not in versions:
                self.remove_plugin('postbuild_plugins', PLUGIN_PULP_PUSH_KEY,
                                   'v1 content not enabled')

            if docker_registry:
                source_registry_str = self.get_value('source_registry',
                                                     {}).get('url')
                perform_delete = (
                    source_registry_str is None
                    or RegistryURI(source_registry_str).uri != reguri.uri)
                if not perform_delete:
                    self.remove_plugin('exit_plugins',
                                       PLUGIN_DELETE_FROM_REG_KEY,
                                       'no delete needed')
            else:
                self.remove_plugin('postbuild_plugins', PLUGIN_PULP_SYNC_KEY,
                                   'no V2 pulp available')
                self.remove_plugin('exit_plugins', PLUGIN_DELETE_FROM_REG_KEY,
                                   'no V2 pulp available')
Ejemplo n.º 3
0
    def test_get_methods(self, parse_from, method, tmpdir, caplog,
                         monkeypatch):
        if parse_from == 'raw':
            conf = Configuration(raw_config=yaml.safe_load(REACTOR_CONFIG_MAP))
        elif parse_from == 'env':
            monkeypatch.setenv(REACTOR_CONFIG_ENV_NAME,
                               dedent(REACTOR_CONFIG_MAP))
            conf = Configuration(env_name=REACTOR_CONFIG_ENV_NAME)
        elif parse_from == 'file':
            filename = str(tmpdir.join('config.yaml'))
            with open(filename, 'w') as fp:
                fp.write(dedent(REACTOR_CONFIG_MAP))
            conf = Configuration(config_path=filename)

        real_attr = getattr(conf, method)

        output = real_attr
        reactor_config_map = yaml.safe_load(REACTOR_CONFIG_MAP)

        if method == 'registry':
            expected = reactor_config_map['registry']
        else:
            expected = reactor_config_map[method]

        if method == 'registry':
            # since there will only be exactly one registry
            registry = expected
            reguri = RegistryURI(registry.get('url'))
            regdict = {'uri': reguri.docker_uri, 'version': reguri.version}
            regdict['secret'] = reactor_config_map['registries_cfg_path']
            regdict['insecure'] = registry.get('insecure', False)
            regdict['expected_media_types'] = registry.get(
                'expected_media_types', [])

            assert output == regdict
            return

        if method == 'source_registry':
            expect = {
                'uri': RegistryURI(expected['url']),
                'insecure': expected.get('insecure', False)
            }
            assert output['insecure'] == expect['insecure']
            assert output['uri'].uri == expect['uri'].uri
            return

        assert output == expected
        os.environ.pop(REACTOR_CONFIG_ENV_NAME, None)

        if parse_from == 'raw':
            log_msg = "reading config from raw_config kwarg"
        elif parse_from == 'env':
            log_msg = f"reading config from {REACTOR_CONFIG_ENV_NAME} env variable"
        elif parse_from == 'file':
            log_msg = f"reading config from {filename}"
        assert log_msg in caplog.text
Ejemplo n.º 4
0
    def __init__(self,
                 tasker,
                 workflow,
                 parent_registry=None,
                 parent_registry_insecure=False,
                 check_platforms=False):
        """
        constructor

        :param tasker: DockerTasker instance
        :param workflow: DockerBuildWorkflow instance
        :param parent_registry: registry to enforce pulling from
        :param parent_registry_insecure: allow connecting to the registry over plain http
        :param check_platforms: validate parent images provide all platforms expected for the build
        """
        # call parent constructor
        super(PullBaseImagePlugin, self).__init__(tasker, workflow)

        self.check_platforms = check_platforms
        source_registry = get_source_registry(
            self.workflow, {
                'uri':
                RegistryURI(parent_registry) if parent_registry else None,
                'insecure': parent_registry_insecure
            })

        if source_registry.get('uri'):
            self.parent_registry = source_registry['uri'].docker_uri
            self.parent_registry_insecure = source_registry['insecure']
        else:
            self.parent_registry = None
            self.parent_registry_insecure = False
Ejemplo n.º 5
0
def get_source_registry(workflow, fallback=NO_FALLBACK):
    try:
        source_registry = get_config(workflow).conf['source_registry']
    except KeyError:
        if fallback != NO_FALLBACK:
            return fallback
        raise

    return {
        'uri': RegistryURI(source_registry['url']),
        'insecure': source_registry.get('insecure', False)
    }
Ejemplo n.º 6
0
    def registry(self):
        all_registries = self._get_value(ReactorConfigKeys.REGISTRIES_KEY)

        registry = all_registries[0]

        reguri = RegistryURI(registry.get('url'))
        regdict = {'uri': reguri.docker_uri, 'version': reguri.version}
        if registry.get('auth'):
            regdict['secret'] = registry['auth']['cfg_path']
        regdict['insecure'] = registry.get('insecure', False)
        regdict['expected_media_types'] = registry.get('expected_media_types',
                                                       [])

        return regdict
Ejemplo n.º 7
0
    def registry(self):
        registry = self._get_value(ReactorConfigKeys.REGISTRY_KEY)

        reguri = RegistryURI(registry.get('url'))
        if reguri.version == 'v2':
            info = {
                'version': reguri.version,
                'uri': reguri.docker_uri,
                'insecure': registry.get('insecure', False),
                'expected_media_types': registry.get('expected_media_types',
                                                     []),
            }
            if (cfg_path := self.registries_cfg_path) is not None:
                info['secret'] = cfg_path
            return info
Ejemplo n.º 8
0
    def docker_registry(self):
        all_registries = self._get_value(ReactorConfigKeys.REGISTRIES_KEY)

        for registry in all_registries:
            reguri = RegistryURI(registry.get('url'))
            if reguri.version == 'v2':
                regdict = {
                    'url': reguri.uri,
                    'insecure': registry.get('insecure', False)
                }
                if registry.get('auth'):
                    regdict['secret'] = registry['auth']['cfg_path']
                return regdict

        raise RuntimeError("Expected V2 registry but none in REACTOR_CONFIG")
Ejemplo n.º 9
0
def get_source_registry(workflow, fallback=NO_FALLBACK):
    try:
        source_registry = get_config(workflow).conf['source_registry']
    except KeyError:
        if fallback != NO_FALLBACK:
            return fallback
        raise

    regdict = {
        'uri': RegistryURI(source_registry['url']),
        'insecure': source_registry.get('insecure', False),
        'dockercfg_path': None
    }
    if source_registry.get('auth'):
        regdict['dockercfg_path'] = source_registry['auth']['cfg_path']
    return regdict
Ejemplo n.º 10
0
    def __init__(self,
                 tasker,
                 workflow,
                 parent_registry=None,
                 parent_registry_insecure=False,
                 check_platforms=False,
                 inspect_only=False,
                 parent_images_digests=None):
        """
        constructor

        :param tasker: ContainerTasker instance
        :param workflow: DockerBuildWorkflow instance
        :param parent_registry: registry to enforce pulling from
        :param parent_registry_insecure: allow connecting to the registry over plain http
        :param check_platforms: validate parent images provide all platforms expected for the build
        :param inspect_only: bool, if set to True, base images will not be pulled
        :param parent_images_digests: dict, parent images manifest digests
        """
        # call parent constructor
        super(PullBaseImagePlugin, self).__init__(tasker, workflow)

        self.check_platforms = check_platforms
        self.inspect_only = inspect_only
        source_registry = get_source_registry(
            self.workflow, {
                'uri':
                RegistryURI(parent_registry) if parent_registry else None,
                'insecure': parent_registry_insecure
            })

        if source_registry.get('uri'):
            self.parent_registry = source_registry['uri'].docker_uri
            self.parent_registry_insecure = source_registry['insecure']
            self.parent_registry_dockercfg_path = source_registry.get(
                'dockercfg_path', None)
        else:
            self.parent_registry = None
            self.parent_registry_insecure = False
            self.parent_registry_dockercfg_path = None
        if parent_images_digests:
            metadata = self.workflow.builder.parent_images_digests
            metadata.update(parent_images_digests)

        self.manifest_list_cache = {}
Ejemplo n.º 11
0
def get_registries(workflow, fallback=NO_FALLBACK):
    try:
        all_registries = get_config(workflow).conf['registries']
    except KeyError:
        if fallback != NO_FALLBACK:
            return fallback
        raise

    registries_cm = {}
    for registry in all_registries:
        reguri = RegistryURI(registry.get('url'))
        regdict = {}
        regdict['version'] = reguri.version
        if registry.get('auth'):
            regdict['secret'] = registry['auth']['cfg_path']
        regdict['insecure'] = registry.get('insecure', False)

        registries_cm[reguri.docker_uri] = regdict

    return registries_cm
Ejemplo n.º 12
0
def get_docker_registry(workflow, fallback=NO_FALLBACK):
    try:
        all_registries = get_config(workflow).conf['registries']
    except KeyError:
        if fallback != NO_FALLBACK:
            return fallback
        raise

    for registry in all_registries:
        reguri = RegistryURI(registry.get('url'))
        if reguri.version == 'v2':
            regdict = {
                'url': reguri.uri,
                'insecure': registry.get('insecure', False)
            }
            if registry.get('auth'):
                regdict['secret'] = registry['auth']['cfg_path']
            return regdict

    raise RuntimeError("Expected V2 registry but none in REACTOR_CONFIG")
Ejemplo n.º 13
0
    def set_data_from_reactor_config(self, reactor_config_data):
        """
        Sets data from reactor config
        """
        super(BuildRequestV2,
              self).set_data_from_reactor_config(reactor_config_data)

        if not reactor_config_data:
            if self.user_params.flatpak and not self.user_params.base_image:
                raise OsbsValidationException(
                    "Flatpak base_image must be be set in container.yaml or reactor config"
                )
        else:
            self._set_flatpak(reactor_config_data)

        if not self.source_registry:
            raise RuntimeError(
                'mandatory "source_registry" is not defined in reactor_config')
        source_registry = RegistryURI(self.source_registry['url']).docker_uri
        self._update_trigger_imagestreamtag(source_registry)
        self._update_imagestream_name(source_registry)
    def adjust_config_kwargs(self):
        koji_fallback = {
            'hub_url': self.config_kwargs.get('koji_hub'),
            'root_url': self.config_kwargs.get('koji_root')
        }
        koji_map = get_koji(self.workflow, koji_fallback)
        self.config_kwargs['koji_hub'] = koji_map['hub_url']
        self.config_kwargs['koji_root'] = koji_map['root_url']

        odcs_fallback = {
            'api_url': self.config_kwargs.get('odcs_url'),
            'insecure': self.config_kwargs.get('odcs_insecure')
        }
        odcs_map = get_odcs(self.workflow, odcs_fallback)
        self.config_kwargs['odcs_url'] = odcs_map['api_url']
        self.config_kwargs['odcs_insecure'] = odcs_map.get('insecure', False)

        smtp_fallback = {
            'host': self.config_kwargs.get('smtp_host'),
            'from_address': self.config_kwargs.get('smtp_from'),
            'domain': self.config_kwargs.get('smtp_email_domain'),
            'send_to_submitter': self.config_kwargs.get('smtp_to_submitter'),
            'send_to_pkg_owner': self.config_kwargs.get('smtp_to_pkgowner')
        }
        additional_addresses = self.config_kwargs.get('smtp_error_addresses')
        error_addresses = self.config_kwargs.get('smtp_additional_addresses')

        smtp_fallback['additional_addresses'] =\
            additional_addresses.split(',') if additional_addresses else ()
        smtp_fallback['error_addresses'] = error_addresses.split(
            ',') if error_addresses else ()

        smtp_map = get_smtp(self.workflow, smtp_fallback)
        self.config_kwargs['smtp_additional_addresses'] =\
            ','.join(smtp_map.get('additional_addresses', ()))
        self.config_kwargs['smtp_email_domain'] = smtp_map.get('domain')
        self.config_kwargs['smtp_error_addresses'] = ','.join(
            smtp_map.get('error_addresses', ()))
        self.config_kwargs['smtp_from'] = smtp_map['from_address']
        self.config_kwargs['smtp_host'] = smtp_map['host']
        self.config_kwargs['smtp_to_pkgowner'] = smtp_map.get(
            'send_to_pkg_owner', False)
        self.config_kwargs['smtp_to_submitter'] = smtp_map.get(
            'send_to_submitter', False)

        source_reg = self.config_kwargs.get('source_registry_uri')
        souce_registry = get_source_registry(
            self.workflow,
            {'uri': RegistryURI(source_reg) if source_reg else None})['uri']
        self.config_kwargs[
            'source_registry_uri'] = souce_registry.uri if souce_registry else None

        artifacts = self.config_kwargs.get('artifacts_allowed_domains')
        self.config_kwargs['artifacts_allowed_domains'] =\
            ','.join(get_artifacts_allowed_domains(
                self.workflow, artifacts.split(',') if artifacts else ()))

        equal_labels_fallback = []
        equal_labels_str = self.config_kwargs.get('equal_labels')

        if equal_labels_str:
            label_groups = [x.strip() for x in equal_labels_str.split(',')]

            for label_group in label_groups:
                equal_labels_fallback.append(
                    [label.strip() for label in label_group.split(':')])

        equal_labels = get_image_equal_labels(self.workflow,
                                              equal_labels_fallback)
        if equal_labels:
            equal_labels_sets = []
            for equal_set in equal_labels:
                equal_labels_sets.append(':'.join(equal_set))
            equal_labels_string = ','.join(equal_labels_sets)
            self.config_kwargs['equal_labels'] = equal_labels_string

        self.config_kwargs['prefer_schema1_digest'] =\
            get_prefer_schema1_digest(self.workflow,
                                      self.config_kwargs.get('prefer_schema1_digest'))

        registry_api_ver = self.config_kwargs.get('registry_api_versions')
        self.config_kwargs['registry_api_versions'] =\
            ','.join(get_content_versions(self.workflow,
                                          registry_api_ver.split(',') if registry_api_ver else ()))

        self.config_kwargs['yum_proxy'] =\
            get_yum_proxy(self.workflow, self.config_kwargs.get('yum_proxy'))

        self.config_kwargs['sources_command'] =\
            get_sources_command(self.workflow, self.config_kwargs.get('sources_command'))
Ejemplo n.º 15
0
 def _as_source_registry(self, registry):
     return {
         'uri': RegistryURI(registry['url']),
         'insecure': registry.get('insecure', False),
         'dockercfg_path': registry.get('auth', {}).get('cfg_path', None)
     }
Ejemplo n.º 16
0
class TestConfiguration(object):
    @pytest.mark.parametrize(('config', 'exc'), [
        ("""\
registries:
- url: https://container-registry.example.com/v2
  auth:
    cfg_path: /var/run/secrets/atomic-reactor/v2-registry-dockercfg
         """, None),
        ("""\
registries:
- url: https://container-registry.example.com/v2
  auth:
    cfg_path: /var/run/secrets/atomic-reactor/v2-registry-dockercfg
- url: https://another-container-registry.example.com/
  auth:
    cfg_path: /var/run/secrets/atomic-reactor/another-registry-dockercfg
         """, None),
        ("""\
registries:
- url: https://old-container-registry.example.com/v1
  auth:
    cfg_path: /var/run/secrets/atomic-reactor/v1-registry-dockercfg
         """, OsbsValidationException),
        ("""\
registries:
- url: https://wrong-container-registry.example.com/v3
  auth:
    cfg_path: /var/run/secrets/atomic-reactor/wrong-registry-dockercfg
         """, RuntimeError),
    ])
    def test_get_docker_registry(self, config, exc):
        required_config = """\
version: 1
koji:
  hub_url: /
  root_url: ''
  auth: {}
openshift:
  url: openshift_url
source_registry:
  url: source_registry.com
"""
        config += "\n" + required_config
        config_json = read_yaml(config, 'schemas/config.json')

        expected = {
            'url': 'https://container-registry.example.com',
            'insecure': False,
            'secret': '/var/run/secrets/atomic-reactor/v2-registry-dockercfg'
        }
        conf = Configuration(raw_config=config_json)

        if exc is None:
            docker_registry = conf.docker_registry
            assert docker_registry == expected
        else:
            with pytest.raises(exc):
                getattr(conf, 'docker_registry')

    @pytest.mark.parametrize(('config', 'expected'), [
        ("""\
pull_registries: []
         """, []),
        ("""\
pull_registries:
- url: registry.io
         """, [
            {
                "uri": RegistryURI("registry.io"),
                "insecure": False,
                "dockercfg_path": None
            },
        ]),
        ("""\
pull_registries:
- url: https://registry.io
         """, [
            {
                "uri": RegistryURI("https://registry.io"),
                "insecure": False,
                "dockercfg_path": None
            },
        ]),
        ("""\
pull_registries:
- url: registry.io
  auth:
    cfg_path: /var/run/secrets/atomic-reactor/v2-registry-dockercfg
         """, [
            {
                "uri":
                RegistryURI("registry.io"),
                "insecure":
                False,
                "dockercfg_path":
                "/var/run/secrets/atomic-reactor/v2-registry-dockercfg"
            },
        ]),
        ("""\
pull_registries:
- url: registry.io
  insecure: true
  auth:
    cfg_path: /var/run/secrets/atomic-reactor/v2-registry-dockercfg
         """, [
            {
                "uri":
                RegistryURI("registry.io"),
                "insecure":
                True,
                "dockercfg_path":
                "/var/run/secrets/atomic-reactor/v2-registry-dockercfg"
            },
        ]),
        ("""\
pull_registries:
- url: registry.io
  insecure: true
  auth:
    cfg_path: /var/run/secrets/atomic-reactor/v2-registry-dockercfg
- url: registry.org
         """, [
            {
                "uri":
                RegistryURI("registry.io"),
                "insecure":
                True,
                "dockercfg_path":
                "/var/run/secrets/atomic-reactor/v2-registry-dockercfg"
            },
            {
                "uri": RegistryURI("registry.org"),
                "insecure": False,
                "dockercfg_path": None
            },
        ]),
    ])
    def test_get_pull_registries(self, config, expected):
        config += "\n" + REQUIRED_CONFIG

        config_json = read_yaml(config, 'schemas/config.json')
        conf = Configuration(raw_config=config_json)

        pull_registries = conf.pull_registries

        # RegistryURI does not implement equality, check URI as string
        for reg in pull_registries + expected:
            reg['uri'] = reg['uri'].uri

        assert pull_registries == expected

    @pytest.mark.parametrize(
        ('config', 'expected_slots_dir', 'expected_enabled_hosts'), [
            ("""\
remote_hosts:
  slots_dir: path/foo
  pools:
    x86_64:
      remote-host1.x86_64:
        enabled: true
        auth: foo
        username: bar
        slots: 1
        socket_path: /user/foo/podman.sock
      remote-host2.x86_64:
        enabled: false
        auth: foo
        username: bar
        slots: 2
        socket_path: /user/foo/podman.sock
    ppc64le:
      remote-host3.ppc64le:
        enabled: true
        auth: foo
        username: bar
        slots: 3
        socket_path: /user/foo/podman.sock
         """, 'path/foo', {
                'x86_64': ['remote-host1.x86_64'],
                'ppc64le': ['remote-host3.ppc64le']
            }),
        ])
    def test_get_remote_hosts(self, config, expected_slots_dir,
                              expected_enabled_hosts):
        config += "\n" + REQUIRED_CONFIG
        config_json = read_yaml(config, 'schemas/config.json')

        conf = Configuration(raw_config=config_json)

        remote_hosts = conf.remote_hosts
        assert expected_slots_dir == remote_hosts['slots_dir']

        pools = remote_hosts['pools']
        assert len(pools), 'Remote hosts do not have 2 architectures'
        assert len(
            pools['x86_64']) == 2, '2 entries expected for x86_64 architecture'
        assert sorted(pools['x86_64']) == sorted(
            ['remote-host1.x86_64', 'remote-host2.x86_64'])

        assert len(
            pools['ppc64le']) == 1, '1 entry expected for ppc64le architecture'

        host1_x86_64 = pools['x86_64']['remote-host1.x86_64']
        assert host1_x86_64['auth'] == 'foo', 'Unexpected SSH key path'
        assert host1_x86_64[
            'socket_path'] == '/user/foo/podman.sock', 'Unexpected socket path'

        host2_x86_64 = pools['x86_64']['remote-host2.x86_64']
        assert host2_x86_64['username'] == 'bar', 'Unexpected user name'
        host3_ppc64le = pools['ppc64le']['remote-host3.ppc64le']
        assert host3_ppc64le['slots'] == 3, 'Unexpected number of slots'

        for arch in ['x86_64', 'ppc64le']:
            enabled_hosts = [
                host for host, items in pools[arch].items() if items['enabled']
            ]
            assert enabled_hosts == expected_enabled_hosts[arch]

    @pytest.mark.parametrize('config, error', [
        ("""\
remote_hosts: []
         """, "is not of type {!r}".format("object")),
        ("""\
remote_hosts:
  slots_dir: path/foo
         """, "{!r} is a required property".format("pools")),
        ("""\
remote_hosts:
  pools:
    x86_64:
      remote-host1.x86_64:
        enabled: true
        auth: foo
        username: bar
        slots: 1
        socket_path: /user/foo/podman.sock
         """, "{!r} is a required property".format("slots_dir")),
        ("""\
remote_hosts:
  pools:
    amd-64:
      remote-host1:
        enabled: true
        auth: foo
        username: bar
        slots: 1
        socket_path: /user/foo/podman.sock
         """, "{!r} does not match any of the regexes".format("amd-64")),
        ("""\
remote_hosts:
  slots_dir: path/foo
  pools:
    s390x:
      remote-host1:
        auth: foo
        username: bar
        slots: 1
        socket_path: /user/foo/podman.sock
         """, "{!r} is a required property".format("enabled")),
        ("""\
remote_hosts:
  slots_dir: path/foo
  pools:
    s390x:
      remote-host1.s390x:
        enabled: true
        username: bar
        slots: 1
        socket_path: /user/foo/podman.sock
         """, "{!r} is a required property".format("auth")),
        ("""\
remote_hosts:
  slots_dir: path/foo
  pools:
    s390x:
      remote-host1.s390x:
        enabled: true
        auth: foo
        slots: 1
        socket_path: /user/foo/podman.sock
         """, "{!r} is a required property".format("username")),
        ("""\
remote_hosts:
  slots_dir: path/foo
  pools:
    s390x:
      remote-host1.s390x:
        enabled: true
        auth: foo
        username: bar
        slots: 1
         """, "{!r} is a required property".format("socket_path")),
        ("""\
remote_hosts:
  slots_dir: path/foo
  pools:
    s390x:
      remote-host1.s390x:
        enabled: true
        auth: foo
        username: bar
        socket_path: /user/foo/podman.sock
         """, "{!r} is a required property".format("slots")),
        ("""\
remote_hosts:
  slots_dir: path/foo
  pools:
    aarch64:
      remote-host1.@aarch64@@:
        enabled: true
        auth: foo
        username: bar
        socket_path: /user/foo/podman.sock
         """, "{!r} does not match any of the regexes".format(
            "remote-host1.@aarch64@@")),
    ])
    def test_get_remote_hosts_schema_validation(self, config, error):
        config += "\n" + REQUIRED_CONFIG
        with pytest.raises(OsbsValidationException) as exc_info:
            read_yaml(config, 'schemas/config.json')
        assert error in str(exc_info.value)

    @pytest.mark.parametrize('config, error', [
        ("""\
pull_registries: {}
         """, "is not of type {!r}".format("array")),
        ("""\
pull_registries:
- insecure: false
         """, "{!r} is a required property".format("url")),
        ("""\
pull_registries:
- url: registry.io
  auth: {}
         """, "{!r} is a required property".format("cfg_path")),
    ])
    def test_get_pull_registries_schema_validation(self, config, error):
        config += "\n" + REQUIRED_CONFIG
        with pytest.raises(OsbsValidationException) as exc_info:
            read_yaml(config, 'schemas/config.json')
        assert error in str(exc_info.value)

    def test_filename(self, tmpdir):
        filename = os.path.join(str(tmpdir), 'config.yaml')
        with open(filename, 'w') as fp:
            fp.write(dedent(REQUIRED_CONFIG))

        Configuration(config_path=filename)

    def test_no_schema_resource(self, tmpdir, caplog):
        class FakeProvider(object):
            def get_resource_stream(self, pkg, rsc):
                raise IOError

        # pkg_resources.resource_stream() cannot be mocked directly
        # Instead mock the module-level function it calls.
        (flexmock(pkg_resources).should_receive('get_provider').and_return(
            FakeProvider()))

        filename = os.path.join(str(tmpdir), 'config.yaml')
        with open(filename, 'w'):
            pass

        with caplog.at_level(logging.ERROR), pytest.raises(Exception):
            Configuration(config_path=filename)

        captured_errs = [x.message for x in caplog.records]
        assert "unable to extract JSON schema, cannot validate" in captured_errs

    @pytest.mark.parametrize(
        'schema',
        [
            # Invalid JSON
            '{',

            # Invalid schema
            '{"properties": {"any": null}}',
        ])
    def test_invalid_schema_resource(self, tmpdir, caplog, schema):
        class FakeProvider(object):
            def get_resource_stream(self, pkg, rsc):
                return io.BufferedReader(io.BytesIO(schema))

        # pkg_resources.resource_stream() cannot be mocked directly
        # Instead mock the module-level function it calls.
        (flexmock(pkg_resources).should_receive('get_provider').and_return(
            FakeProvider()))

        filename = os.path.join(str(tmpdir), 'config.yaml')
        with open(filename, 'w'):
            pass

        with caplog.at_level(logging.ERROR), pytest.raises(Exception):
            Configuration(config_path=filename)

        captured_errs = [x.message for x in caplog.records]
        assert any("cannot validate" in x for x in captured_errs)

    @pytest.mark.parametrize(('config', 'errors'), [
        ("""\
clusters:
  foo:
  bar: 1
  plat/form:
  - name: foo
    max_concurrent_builds: 1
        """, [
            "validation error: .clusters.foo: "
            "validating 'type' has failed "
            "(None is not of type %r)" % u'array',
            "validation error: .clusters.bar: "
            "validating 'type' has failed "
            "(1 is not of type %r)" % u'array',
            re.compile("validation error: .clusters: "
                       "validating 'additionalProperties' has failed"),
        ]),
        ("""\
clusters:
  foo:
  - name: 1
    max_concurrent_builds: 1
  - name: blah
    max_concurrent_builds: one
  - name: "2"  # quoting prevents error
    max_concurrent_builds: 2
  - name: negative
    max_concurrent_builds: -1
        """, [
            "validation error: .clusters.foo[0].name: "
            "validating 'type' has failed "
            "(1 is not of type %r)" % u'string',
            "validation error: .clusters.foo[1].max_concurrent_builds: "
            "validating 'type' has failed "
            "('one' is not of type %r)" % u'integer',
            re.compile(
                r"validation error: \.clusters\.foo\[3\]\.max_concurrent_builds: "
                r"validating 'minimum' has failed "
                r"\(-1(\.0)? is less than the minimum of 0\)"),
        ]),
        ("""\
clusters:
  foo:
  - name: blah
    max_concurrent_builds: 1
    enabled: never
        """, [
            "validation error: .clusters.foo[0].enabled: "
            "validating 'type' has failed "
            "('never' is not of type %r)" % u'boolean',
        ]),
        ("""\
clusters:
  foo:
  # missing name
  - nam: bar
    max_concurrent_builds: 1
  # missing max_concurrent_builds
  - name: baz
    max_concurrrent_builds: 2
  - name: bar
    max_concurrent_builds: 4
    extra: false
        """, [
            "validation error: .clusters.foo[0]: validating 'required' has failed "
            "(%r is a required property)" % u'name',
            "validation error: .clusters.foo[1]: validating 'required' has failed "
            "(%r is a required property)" % u'max_concurrent_builds',
            "validation error: .clusters.foo[2]: validating 'additionalProperties' has failed "
            "(Additional properties are not allowed ('extra' was unexpected))",
        ])
    ])
    def test_bad_cluster_config(self, tmpdir, caplog, config, errors):
        config += "\n" + REQUIRED_CONFIG
        filename = os.path.join(str(tmpdir), 'config.yaml')
        with open(filename, 'w') as fp:
            fp.write(dedent(config))

        with caplog.at_level(
                logging.DEBUG,
                logger='osbs'), pytest.raises(OsbsValidationException):
            Configuration(config_path=filename)

        captured_errs = [x.message for x in caplog.records]
        for error in errors:
            try:
                # Match regexp
                assert any(filter(error.match, captured_errs))
            except AttributeError:
                # String comparison
                assert error in captured_errs

    def test_bad_version(self, tmpdir):
        filename = os.path.join(str(tmpdir), 'config.yaml')
        with open(filename, 'w') as fp:
            fp.write(
                dedent("""\
                version: 2
                koji:
                  hub_url: /
                  root_url: ''
                  auth: {}
                openshift:
                  url: openshift_url
                source_registry:
                  url: source_registry.com
                registries:
                  - url: registry_url
            """))

        with pytest.raises(ValueError):
            Configuration(config_path=filename)

    @pytest.mark.parametrize(
        ('config', 'clusters', 'defined_platforms'),
        [
            # Default config
            ("", [], []),

            # Unknown key
            ("""\
special: foo
        """, [], []),
            ("""\
clusters:
  all_disabled:
  - name: foo
    max_concurrent_builds: 2
    enabled: false
  platform:
  - name: one
    max_concurrent_builds: 4
  - name: two
    max_concurrent_builds: 8
    enabled: true
  - name: three
    max_concurrent_builds: 16
    enabled: false
        """, [
                ('one', 4),
                ('two', 8),
            ], ['all_disabled', 'platform']),
        ])
    def test_good_cluster_config(self, tmpdir, config, clusters,
                                 defined_platforms):
        config += "\n" + REQUIRED_CONFIG

        filename = os.path.join(str(tmpdir), 'config.yaml')
        with open(filename, 'w') as fp:
            fp.write(dedent(config))
        conf = Configuration(config_path=filename)

        enabled = conf.get_enabled_clusters_for_platform('platform')
        assert {(x.name, x.max_concurrent_builds)
                for x in enabled} == set(clusters)

        for platform in defined_platforms:
            assert conf.cluster_defined_for_platform(platform)

    @pytest.mark.parametrize('default', (
        'release',
        'beta',
        'unsigned',
    ))
    def test_odcs_config(self, tmpdir, default):
        config = """\
odcs:
  signing_intents:
  - name: release
    keys: [R123, R234]
  - name: beta
    keys: [R123, B456, B457]
  - name: unsigned
    keys: []
  default_signing_intent: {default}
  api_url: http://odcs.example.com
  auth:
    ssl_certs_dir: /var/run/secrets/atomic-reactor/odcssecret
""".format(default=default)

        config += "\n" + REQUIRED_CONFIG
        filename = str(tmpdir.join('config.yaml'))
        with open(filename, 'w') as fp:
            fp.write(dedent(config))

        conf = Configuration(config_path=filename)

        odcs_config = conf.odcs_config

        assert odcs_config.default_signing_intent == default

        unsigned_intent = {
            'name': 'unsigned',
            'keys': [],
            'restrictiveness': 0
        }
        beta_intent = {
            'name': 'beta',
            'keys': ['R123', 'B456', 'B457'],
            'restrictiveness': 1
        }
        release_intent = {
            'name': 'release',
            'keys': ['R123', 'R234'],
            'restrictiveness': 2
        }
        assert odcs_config.signing_intents == [
            unsigned_intent, beta_intent, release_intent
        ]
        assert odcs_config.get_signing_intent_by_name(
            'release') == release_intent
        assert odcs_config.get_signing_intent_by_name('beta') == beta_intent
        assert odcs_config.get_signing_intent_by_name(
            'unsigned') == unsigned_intent

        with pytest.raises(ValueError):
            odcs_config.get_signing_intent_by_name('missing')

        assert odcs_config.get_signing_intent_by_keys(['R123', 'R234'
                                                       ])['name'] == 'release'
        assert odcs_config.get_signing_intent_by_keys(
            'R123 R234')['name'] == 'release'
        assert odcs_config.get_signing_intent_by_keys(['R123'
                                                       ])['name'] == 'release'
        assert odcs_config.get_signing_intent_by_keys(
            'R123')['name'] == 'release'
        assert odcs_config.get_signing_intent_by_keys(['R123', 'B456'
                                                       ])['name'] == 'beta'
        assert odcs_config.get_signing_intent_by_keys(['B456', 'R123'
                                                       ])['name'] == 'beta'
        assert odcs_config.get_signing_intent_by_keys(
            'B456 R123')['name'] == 'beta'
        assert odcs_config.get_signing_intent_by_keys(
            'R123 B456 ')['name'] == 'beta'
        assert odcs_config.get_signing_intent_by_keys(['B456'
                                                       ])['name'] == 'beta'
        assert odcs_config.get_signing_intent_by_keys('B456')['name'] == 'beta'
        assert odcs_config.get_signing_intent_by_keys([])['name'] == 'unsigned'
        assert odcs_config.get_signing_intent_by_keys('')['name'] == 'unsigned'

        with pytest.raises(ValueError):
            assert odcs_config.get_signing_intent_by_keys(['missing'])
        with pytest.raises(ValueError):
            assert odcs_config.get_signing_intent_by_keys(
                ['R123', 'R234', 'B457'])

    def test_odcs_config_invalid_default_signing_intent(self, tmpdir):
        config = """\
odcs:
  signing_intents:
  - name: release
    keys: [R123]
  - name: beta
    keys: [R123, B456]
  - name: unsigned
    keys: []
  default_signing_intent: spam
  api_url: http://odcs.example.com
  auth:
    ssl_certs_dir: /var/run/secrets/atomic-reactor/odcssecret
"""
        config += "\n" + REQUIRED_CONFIG
        filename = str(tmpdir.join('config.yaml'))
        with open(filename, 'w') as fp:
            fp.write(dedent(config))

        conf = Configuration(config_path=filename)

        with pytest.raises(ValueError) as exc_info:
            getattr(conf, 'odcs_config')
        message = str(exc_info.value)
        assert message == dedent("""\
            unknown signing intent name "spam", valid names: unsigned, beta, release
            """.rstrip())

    def test_odcs_config_deprecated_signing_intent(self, tmpdir, caplog):
        config = """\
odcs:
  signing_intents:
  - name: release
    keys: [R123]
    deprecated_keys: [R122]
  default_signing_intent: release
  api_url: http://odcs.example.com
  auth:
    ssl_certs_dir: /var/run/secrets/atomic-reactor/odcssecret
"""
        config += "\n" + REQUIRED_CONFIG
        filename = str(tmpdir.join('config.yaml'))
        with open(filename, 'w') as fp:
            fp.write(dedent(config))

        conf = Configuration(config_path=filename)

        odcs_config = conf.odcs_config
        signing_intent = odcs_config.get_signing_intent_by_keys(['R123'])
        assert signing_intent['name'] == 'release'
        assert 'contain deprecated entries' not in caplog.text

        signing_intent = odcs_config.get_signing_intent_by_keys(
            ['R123', 'R122'])
        assert signing_intent['name'] == 'release'
        assert 'contain deprecated entries' in caplog.text

    @pytest.mark.parametrize('parse_from', ['env', 'file', 'raw'])
    @pytest.mark.parametrize('method', [
        'odcs', 'smtp', 'artifacts_allowed_domains',
        'yum_repo_allowed_domains', 'image_labels',
        'image_label_info_url_format', 'image_equal_labels',
        'fail_on_digest_mismatch', 'openshift', 'group_manifests',
        'platform_descriptors', 'prefer_schema1_digest', 'content_versions',
        'registry', 'yum_proxy', 'source_registry', 'sources_command',
        'required_secrets', 'worker_token_secrets', 'clusters', 'hide_files',
        'skip_koji_check_for_base_image', 'deep_manifest_list_inspection'
    ])
    def test_get_methods(self, parse_from, method, tmpdir, caplog):
        if parse_from == 'raw':
            conf = Configuration(raw_config=yaml.safe_load(REACTOR_CONFIG_MAP))
        elif parse_from == 'env':
            os.environ[REACTOR_CONFIG_ENV_NAME] = dedent(REACTOR_CONFIG_MAP)
            conf = Configuration(env_name=REACTOR_CONFIG_ENV_NAME)
        elif parse_from == 'file':
            filename = str(tmpdir.join('config.yaml'))
            with open(filename, 'w') as fp:
                fp.write(dedent(REACTOR_CONFIG_MAP))
            conf = Configuration(config_path=filename)

        real_attr = getattr(conf, method)

        output = real_attr

        if method == 'registry':
            # map `registry` to `registries` in config so that neither use nor devs are confused
            expected = yaml.safe_load(REACTOR_CONFIG_MAP)['registries']
        else:
            expected = yaml.safe_load(REACTOR_CONFIG_MAP)[method]

        if method == 'registry':
            # since there will only be exactly one registry
            registry = expected[0]
            reguri = RegistryURI(registry.get('url'))
            regdict = {'uri': reguri.docker_uri, 'version': reguri.version}
            if registry.get('auth'):
                regdict['secret'] = registry['auth']['cfg_path']
            regdict['insecure'] = registry.get('insecure', False)
            regdict['expected_media_types'] = registry.get(
                'expected_media_types', [])

            assert output == regdict
            return

        if method == 'source_registry':
            expect = {
                'uri': RegistryURI(expected['url']),
                'insecure': expected.get('insecure', False)
            }
            assert output['insecure'] == expect['insecure']
            assert output['uri'].uri == expect['uri'].uri
            return

        assert output == expected
        os.environ.pop(REACTOR_CONFIG_ENV_NAME, None)

        if parse_from == 'raw':
            log_msg = "reading config from raw_config kwarg"
        elif parse_from == 'env':
            log_msg = f"reading config from {REACTOR_CONFIG_ENV_NAME} env variable"
        elif parse_from == 'file':
            log_msg = f"reading config from {filename}"
        assert log_msg in caplog.text

    @pytest.mark.parametrize(('config', 'expect'), [
        ("""\
platform_descriptors:
  - platform: x86_64
    architecture: amd64
         """, {
            'x86_64': 'amd64',
            'ppc64le': 'ppc64le'
        }),
    ])
    def test_get_platform_to_goarch_mapping(self, config, expect):
        config += "\n" + REQUIRED_CONFIG

        config_json = read_yaml(config, 'schemas/config.json')

        conf = Configuration(raw_config=config_json)

        platform_to_goarch = conf.platform_to_goarch_mapping
        goarch_to_platform = conf.goarch_to_platform_mapping
        for plat, goarch in expect.items():
            assert platform_to_goarch[plat] == goarch
            assert goarch_to_platform[goarch] == plat

    @pytest.mark.parametrize(('config', 'expect'), [
        ("""\
build_image_override:
  ppc64le: registry.example.com/buildroot-ppc64le:latest
  arm: registry.example.com/buildroot-arm:latest
         """, {
            'ppc64le': 'registry.example.com/buildroot-ppc64le:latest',
            'arm': 'registry.example.com/buildroot-arm:latest'
        }),
    ])
    def test_get_build_image_override(self, config, expect):
        config += "\n" + REQUIRED_CONFIG

        config_json = read_yaml(config, 'schemas/config.json')

        conf = Configuration(raw_config=config_json)

        build_image_override = conf.build_image_override
        assert build_image_override == expect

    @pytest.mark.parametrize(('config', 'expect'), [
        ("""\
flatpak:
  base_image: fedora:latest
         """, "fedora:latest"),
        ("""\
         """, None),
        ("""\
flatpak: {}
         """, None),
    ])
    def test_get_flatpak_base_image(self, config, expect):
        config += "\n" + REQUIRED_CONFIG
        config_json = read_yaml(config, 'schemas/config.json')

        conf = Configuration(raw_config=config_json)

        if expect:
            base_image = conf.flatpak_base_image
            assert base_image == expect
        else:
            with pytest.raises(KeyError):
                getattr(conf, 'flatpak_base_image')

    @pytest.mark.parametrize(('config', 'expect'), [
        ("""\
flatpak:
  metadata: labels
         """, "labels"),
        ("""\
         """, None),
        ("""\
flatpak: {}
         """, None),
    ])
    def test_get_flatpak_metadata(self, config, expect):
        config += "\n" + REQUIRED_CONFIG
        config_json = read_yaml(config, 'schemas/config.json')

        conf = Configuration(raw_config=config_json)

        if expect:
            base_image = conf.flatpak_metadata
            assert base_image == expect
        else:
            with pytest.raises(KeyError):
                getattr(conf, 'flatpak_metadata')

    @pytest.mark.parametrize(('config', 'raise_error'), [
        ("""\
koji:
  hub_url: https://koji.example.com/hub
  root_url: https://koji.example.com/root
  auth:
    proxyuser: proxyuser
    krb_principal: krb_principal
    krb_keytab_path: /tmp/krb_keytab
        """, False),
        ("""\
koji:
  hub_url: https://koji.example.com/hub
  root_url: https://koji.example.com/root
  auth:
    proxyuser: proxyuser
    krb_principal: krb_principal
    krb_keytab_path: /tmp/krb_keytab
  use_fast_upload: false
        """, False),
        ("""\
koji:
  hub_url: https://koji.example.com/hub
  root_url: https://koji.example.com/root
  auth:
    proxyuser: proxyuser
    ssl_certs_dir: /var/certs
        """, False),
        ("""\
koji:
  hub_url: https://koji.example.com/hub
  root_url: https://koji.example.com/root
  auth:
    proxyuser: proxyuser
        """, False),
        ("""\
koji:
  hub_url: https://koji.example.com/hub
  root_url: https://koji.example.com/root
  auth:
        """, True),
        ("""\
koji:
  hub_url: https://koji.example.com/hub
  root_url: https://koji.example.com/root
  auth:
    proxyuser: proxyuser
    krb_principal: krb_principal
    krb_keytab_path: /tmp/krb_keytab
    ssl_certs_dir: /var/certs
        """, True),
        ("""\
koji:
  hub_url: https://koji.example.com/hub
  root_url: https://koji.example.com/root
  auth:
    proxyuser: proxyuser
    krb_keytab_path: /tmp/krb_keytab
        """, True),
        ("""\
koji:
  hub_url: https://koji.example.com/hub
  root_url: https://koji.example.com/root
  auth:
    proxyuser: proxyuser
    krb_principal: krb_principal
        """, True),
        ("""\
koji:
  hub_url: https://koji.example.com/hub
  root_url: https://koji.example.com/root
  auth:
    proxyuser: proxyuser
    krb_principal: krb_principal
    ssl_certs_dir: /var/certs
        """, True),
        ("""\
koji:
  hub_url: https://koji.example.com/hub
  root_url: https://koji.example.com/root
  auth:
    proxyuser: proxyuser
    krb_keytab_path: /tmp/krb_keytab
    ssl_certs_dir: /var/certs
        """, True),
    ])
    def test_get_koji_session(self, config, raise_error):
        required_config = """\
version: 1
source_registry:
  url: source_registry.com
registries:
  - url: registry_url
openshift:
  url: openshift_url
"""
        config += "\n" + required_config
        if raise_error:
            with pytest.raises(Exception):
                read_yaml(config, 'schemas/config.json')
            return
        config_json = read_yaml(config, 'schemas/config.json')

        auth_info = {
            "proxyuser": config_json['koji']['auth'].get('proxyuser'),
            "ssl_certs_dir": config_json['koji']['auth'].get('ssl_certs_dir'),
            "krb_principal": config_json['koji']['auth'].get('krb_principal'),
            "krb_keytab": config_json['koji']['auth'].get('krb_keytab_path')
        }

        use_fast_upload = config_json['koji'].get('use_fast_upload', True)

        conf = Configuration(raw_config=config_json)

        (flexmock(atomic_reactor.utils.koji).should_receive(
            'create_koji_session').with_args(
                config_json['koji']['hub_url'], auth_info,
                use_fast_upload).once().and_return(True))

        get_koji_session(conf)

    @pytest.mark.parametrize('root_url',
                             ('https://koji.example.com/root',
                              'https://koji.example.com/root/', None))
    def test_get_koji_path_info(self, root_url):

        config = {
            'version': 1,
            'koji': {
                'hub_url': 'https://koji.example.com/hub',
                'auth': {
                    'ssl_certs_dir': '/var/certs'
                }
            },
            'openshift': {
                'url': 'openshift_url'
            },
            'source_registry': {
                'url': 'source_registry'
            },
            'registries': [{
                'url': 'registry_url'
            }]
        }
        expected_root_url = 'https://koji.example.com/root'

        if root_url:
            config['koji']['root_url'] = root_url

        config_yaml = yaml.safe_dump(config)

        expect_error = not root_url
        if expect_error:
            with pytest.raises(Exception):
                read_yaml(config_yaml, 'schemas/config.json')
            return

        parsed_config = read_yaml(config_yaml, 'schemas/config.json')

        conf = Configuration(raw_config=parsed_config)

        (flexmock(koji.PathInfo).should_receive('__init__').with_args(
            topdir=expected_root_url).once())
        getattr(conf, 'koji_path_info')

    @pytest.mark.parametrize(('config', 'raise_error'), [
        ("""\
odcs:
  api_url: https://odcs.example.com/api/1
  auth:
    ssl_certs_dir: /var/run/secrets/atomic-reactor/odcssecret
  signing_intents:
  - name: release
    keys: [R123]
  default_signing_intent: default
  timeout: 3600
        """, False),
        ("""\
odcs:
  api_url: https://odcs.example.com/api/1
  auth:
    ssl_certs_dir: nonexistent
  signing_intents:
  - name: release
    keys: [R123]
  default_signing_intent: default
        """, False),
        ("""\
odcs:
  api_url: https://odcs.example.com/api/1
  auth:
    openidc_dir: /var/run/open_idc
  signing_intents:
  - name: release
    keys: [R123]
  default_signing_intent: default
        """, False),
        ("""\
odcs:
  api_url: https://odcs.example.com/api/1
  auth:
    openidc_dir: /var/run/open_idc
    ssl_certs_dir: /var/run/secrets/atomic-reactor/odcssecret
  signing_intents:
  - name: release
    keys: [R123]
  default_signing_intent: default
        """, True),
        ("""\
odcs:
  api_url: https://odcs.example.com/api/1
  auth:
    openidc_dir: /var/run/open_idc
  signing_intents:
  - name: release
    keys: [R123]
        """, True),
        ("""\
odcs:
  api_url: https://odcs.example.com/api/1
  auth:
    openidc_dir: /var/run/open_idc
  default_signing_intent: default
        """, True),
        ("""\
odcs:
  auth:
    openidc_dir: /var/run/open_idc
  signing_intents:
  - name: release
    keys: [R123]
  default_signing_intent: default
        """, True),
    ])
    def test_get_odcs_session(self, tmpdir, config, raise_error):
        config += "\n" + REQUIRED_CONFIG

        if raise_error:
            with pytest.raises(Exception):
                read_yaml(config, 'schemas/config.json')
            return
        config_json = read_yaml(config, 'schemas/config.json')

        auth_info = {
            'insecure': config_json['odcs'].get('insecure', False),
            'timeout': config_json['odcs'].get('timeout', None),
        }
        if 'openidc_dir' in config_json['odcs']['auth']:
            config_json['odcs']['auth']['openidc_dir'] = str(tmpdir)
            filename = str(tmpdir.join('token'))
            with open(filename, 'w') as fp:
                fp.write("my_token")
            auth_info['token'] = "my_token"

        ssl_dir_raise = False
        if 'ssl_certs_dir' in config_json['odcs']['auth']:
            if config_json['odcs']['auth']['ssl_certs_dir'] != "nonexistent":
                config_json['odcs']['auth']['ssl_certs_dir'] = str(tmpdir)
                filename = str(tmpdir.join('cert'))
                with open(filename, 'w') as fp:
                    fp.write("my_cert")
                auth_info['cert'] = filename
            else:
                ssl_dir_raise = True

        conf = Configuration(raw_config=config_json)

        if not ssl_dir_raise:
            (flexmock(atomic_reactor.utils.odcs.ODCSClient).should_receive(
                '__init__').with_args(config_json['odcs']['api_url'],
                                      **auth_info).once().and_return(None))

            get_odcs_session(conf)
        else:
            with pytest.raises(KeyError):
                get_odcs_session(conf)

    @pytest.mark.parametrize(('config', 'raise_error'), [
        ("""\
smtp:
  host: smtp.example.com
  from_address: [email protected]
        """, False),
        ("""\
smtp:
  from_address: [email protected]
        """, True),
        ("""\
smtp:
  host: smtp.example.com
        """, True),
        ("""\
smtp:
        """, True),
    ])
    def test_get_smtp_session(self, config, raise_error):
        config += "\n" + REQUIRED_CONFIG

        if raise_error:
            with pytest.raises(Exception):
                read_yaml(config, 'schemas/config.json')
            return
        config_json = read_yaml(config, 'schemas/config.json')

        conf = Configuration(raw_config=config_json)

        (flexmock(smtplib.SMTP).should_receive('__init__').with_args(
            config_json['smtp']['host']).once().and_return(None))

        get_smtp_session(conf)

    @pytest.mark.parametrize(('config', 'error'), [
        ("""\
cachito:
  api_url: https://cachito.example.com
  auth:
    ssl_certs_dir: /var/run/secrets/atomic-reactor/cachitosecret
  timeout: 1000
        """, False),
        ("""\
cachito:
  api_url: https://cachito.example.com
  insecure: true
  auth:
    ssl_certs_dir: /var/run/secrets/atomic-reactor/cachitosecret
        """, False),
        ("""\
cachito:
  api_url: https://cachito.example.com
  auth:
        """, OsbsValidationException),
        ("""\
cachito:
  api_url: https://cachito.example.com
        """, OsbsValidationException),
        ("""\
cachito:
  auth:
    ssl_certs_dir: /var/run/secrets/atomic-reactor/cachitosecret
        """, OsbsValidationException),
        ("""\
cachito:
  api_url: https://cachito.example.com
  auth:
    ssl_certs_dir: /var/run/secrets/atomic-reactor/cachitosecret
  spam: ham
        """, OsbsValidationException),
        ("""\
cachito:
  api_url: https://cachito.example.com
  auth:
    ssl_certs_dir: nonexistent
        """, False),
    ])
    def test_get_cachito_session(self, tmpdir, config, error):
        config += "\n" + REQUIRED_CONFIG

        if error:
            with pytest.raises(error):
                read_yaml(config, 'schemas/config.json')
            return
        config_json = read_yaml(config, 'schemas/config.json')

        auth_info = {
            'insecure': config_json['cachito'].get('insecure', False),
            'timeout': config_json['cachito'].get('timeout'),
        }

        ssl_dir_raise = False
        if 'ssl_certs_dir' in config_json['cachito']['auth']:
            if config_json['cachito']['auth']['ssl_certs_dir'] != "nonexistent":
                config_json['cachito']['auth']['ssl_certs_dir'] = str(tmpdir)
                filename = str(tmpdir.join('cert'))
                with open(filename, 'w') as fp:
                    fp.write("my_cert")
                auth_info['cert'] = filename
            else:
                ssl_dir_raise = True

        conf = Configuration(raw_config=config_json)

        if not ssl_dir_raise:
            (flexmock(atomic_reactor.utils.cachito.CachitoAPI).should_receive(
                '__init__').with_args(config_json['cachito']['api_url'],
                                      **auth_info).once().and_return(None))

            get_cachito_session(conf)
        else:
            with pytest.raises(RuntimeError,
                               match="Cachito ssl_certs_dir doesn't exist"):
                get_cachito_session(conf)

    @pytest.mark.parametrize(('config', 'raise_error'), [
        ("""\
openshift:
  url: https://openshift.example.com
  auth:
    ssl_certs_dir: /var/run/secrets/atomic-reactor/odcssecret
        """, False),
        ("""\
openshift:
  url: https://openshift.example.com
        """, False),
        ("""\
openshift:
  url: https://openshift.example.com
  auth:
    krb_principal: principal
    krb_keytab_path: /var/keytab
        """, False),
        ("""\
openshift:
  url: https://openshift.example.com
  auth:
    krb_principal: principal
    krb_keytab_path: /var/keytab
    krb_cache_path: /var/krb/cache
        """, False),
        ("""\
openshift:
  url: https://openshift.example.com
  auth:
    enable: True
        """, False),
        ("""\
openshift:
  url: https://openshift.example.com
  auth:
    krb_keytab_path: /var/keytab
        """, True),
        ("""\
openshift:
  url: https://openshift.example.com
  auth:
    krb_principal: principal
        """, True),
        ("""\
openshift:
  auth:
    ssl_certs_dir: /var/run/secrets/atomic-reactor/odcssecret
        """, True),
        ("""\
openshift:
  auth:
    krb_principal: principal
    krb_keytab_path: /var/keytab
        """, True),
        ("""\
openshift:
  url: https://openshift.example.com
  auth:
        """, True),
        ("""\
openshift:
  auth:
    ssl_certs_dir: /var/run/secrets/atomic-reactor/odcssecret
        """, True),
    ])
    def test_get_openshift_session(self, config, raise_error):
        required_config = """\
version: 1
koji:
  hub_url: /
  root_url: ''
  auth: {}
source_registry:
  url: source_registry.com
registries:
  - url: registry_url
"""

        config += "\n" + required_config

        if raise_error:
            with pytest.raises(Exception):
                read_yaml(config, 'schemas/config.json')
            return
        config_json = read_yaml(config, 'schemas/config.json')

        auth_info = {
            'openshift_url': config_json['openshift']['url'],
            'verify_ssl': not config_json['openshift'].get('insecure', False),
            'use_auth': False,
            'conf_file': None,
            'namespace': 'namespace',
        }
        if config_json['openshift'].get('auth'):
            if config_json['openshift']['auth'].get('krb_keytab_path'):
                auth_info['kerberos_keytab'] =\
                    config_json['openshift']['auth'].get('krb_keytab_path')
            if config_json['openshift']['auth'].get('krb_principal'):
                auth_info['kerberos_principal'] =\
                    config_json['openshift']['auth'].get('krb_principal')
            if config_json['openshift']['auth'].get('krb_cache_path'):
                auth_info['kerberos_ccache'] =\
                    config_json['openshift']['auth'].get('krb_cache_path')
            if config_json['openshift']['auth'].get('ssl_certs_dir'):
                auth_info['client_cert'] =\
                    os.path.join(config_json['openshift']['auth'].get('ssl_certs_dir'), 'cert')
                auth_info['client_key'] =\
                    os.path.join(config_json['openshift']['auth'].get('ssl_certs_dir'), 'key')
            auth_info['use_auth'] = config_json['openshift']['auth'].get(
                'enable', False)

        (flexmock(osbs.conf.Configuration).should_call('__init__').with_args(
            **auth_info).once())
        (flexmock(osbs.api.OSBS).should_call('__init__').once())

        conf = Configuration(raw_config=config_json)
        get_openshift_session(conf, 'namespace')

    @pytest.mark.parametrize(
        'config, valid',
        [
            ("""\
operator_manifests:
  allowed_registries: null
        """, True),  # minimal valid example, allows all registries
            ("""\
operator_manifests:
  allowed_registries:
    - foo
    - bar
  repo_replacements:
    - registry: foo
      package_mappings_url: https://somewhere.net/mapping.yaml
  registry_post_replace:
    - old: foo
      new: bar
        """, True),  # all known properties
            ("""\
operator_manifests: null
        """, False),  # has to be a dict
            ("""\
operator_manifests: {}
        """, False),  # allowed_registries is required
            ("""\
operator_manifests:
  allowed_registries: []
        """, False),  # if not null, allowed_registries must not be empty
            ("""\
operator_manifests:
  allowed_registries: null
  something_else: null
        """, False),  # additional properties not allowed
            ("""\
operator_manifests:
  allowed_registries: null
  registry_post_replace:
    - old: foo
        """, False),  # missing replacement registry
            ("""\
operator_manifests:
  allowed_registries: null
  registry_post_replace:
    - new: foo
        """, False),  # missing original registry
            ("""\
operator_manifests:
  allowed_registries: null
  repo_replacements:
    - registry: foo
        """, False),  # missing package mappings url
            ("""\
operator_manifests:
  allowed_registries: null
  repo_replacements:
    - package_mappings_url: https://somewhere.net/mapping.yaml
        """, False),  # missing registry
            ("""\
operator_manifests:
  allowed_registries: null,
  repo_replacements:
    - registry: foo
      package_mappings_url: mapping.yaml
        """, False),  # package mappings url is not a url
        ])
    def test_get_operator_manifests(self, tmpdir, config, valid):
        config += "\n" + REQUIRED_CONFIG
        if valid:
            read_yaml(config, 'schemas/config.json')
        else:
            with pytest.raises(OsbsValidationException):
                read_yaml(config, 'schemas/config.json')
            return

        filename = os.path.join(str(tmpdir), 'config.yaml')
        with open(filename, 'w') as fp:
            fp.write(dedent(config))
        conf = Configuration(config_path=filename)

        operator_config = conf.operator_manifests
        assert isinstance(operator_config, dict)
        assert "allowed_registries" in operator_config

    @pytest.mark.parametrize(
        'config, valid',
        [
            ("""\
build_env_vars: []
        """, True),
            ("""\
build_env_vars:
- name: HTTP_PROXY
  value: example.proxy.net
- name: NO_PROXY
  value: localhost
        """, True),
            ("""\
build_env_vars:
- name: FOO
  value: 1
        """, False),  # values must be strings
            ("""\
build_env_vars:
- name: FOO
        """, False),  # values must be defined
        ])
    def test_validate_build_env_vars(self, config, valid):
        # Only test schema validation, atomic-reactor has no additional support
        # for build_env_vars (osbs-client does, however)
        config += "\n" + REQUIRED_CONFIG
        if valid:
            read_yaml(config, 'schemas/config.json')
        else:
            with pytest.raises(OsbsValidationException):
                read_yaml(config, 'schemas/config.json')

    @pytest.mark.parametrize(('images_exist', 'organization'), [
        (True, None),
        (True, 'organization'),
        (False, None),
        (False, 'organization'),
    ])
    def test_update_dockerfile_images_from_config(self, tmp_path, images_exist,
                                                  organization):
        config = REQUIRED_CONFIG

        if organization:
            config += "\nregistries_organization: " + organization

        config_yaml = tmp_path / 'config.yaml'
        config_yaml.write_text(dedent(config), "utf-8")

        if images_exist:
            parent_images = ['parent:latest', 'base:latest']
            if organization:
                expect_images = [
                    ImageName.parse(
                        'source_registry.com/organization/base:latest'),
                    ImageName.parse(
                        'source_registry.com/organization/parent:latest')
                ]
            else:
                expect_images = [
                    ImageName.parse('source_registry.com/base:latest'),
                    ImageName.parse('source_registry.com/parent:latest')
                ]
        else:
            parent_images = []

        dockerfile_images = DockerfileImages(parent_images)

        conf = Configuration(config_path=str(config_yaml))
        conf.update_dockerfile_images_from_config(dockerfile_images)

        if images_exist:
            assert len(dockerfile_images) == 2
            assert dockerfile_images.keys() == expect_images
        else:
            assert not dockerfile_images
Ejemplo n.º 17
0
 def _as_registry(self, registry):
     return {
         'uri': RegistryURI(registry['url']),
         'insecure': registry.get('insecure', False),
         'dockercfg_path': self.registries_cfg_path,
     }
    def workflow(self,
                 build_process_failed=False,
                 registries=None,
                 registry_types=None,
                 platforms=None,
                 platform_descriptors=None,
                 group=True,
                 fail=False,
                 limit_media_types=None):
        tag_conf = TagConf()
        tag_conf.add_unique_image(self.TEST_UNIQUE_IMAGE)

        if platform_descriptors is None:
            platform_descriptors = [
                {
                    'platform': 'x86_64',
                    'architecture': 'amd64'
                },
                {
                    'platform': 'ppc64le',
                    'architecture': 'ppc64le'
                },
                {
                    'platform': 's390x',
                    'architecture': 's390x'
                },
            ]

        if platforms is None:
            platforms = [
                descriptor['platform'] for descriptor in platform_descriptors
            ]
        no_amd64 = 'x86_64' not in platforms

        keep_types = False
        if registries or registry_types:
            keep_types = True

        if registries is None and registry_types is None:
            registry_types = [
                MEDIA_TYPE_DOCKER_V2_SCHEMA1, MEDIA_TYPE_DOCKER_V2_SCHEMA2,
                MEDIA_TYPE_DOCKER_V2_MANIFEST_LIST, MEDIA_TYPE_OCI_V1,
                MEDIA_TYPE_OCI_V1_INDEX
            ]

        if registries is None:
            registries = [{
                'url': 'https://container-registry.example.com/v2',
                'version': 'v2',
                'insecure': True,
                'expected_media_types': registry_types
            }]
        conf = {
            'version': 1,
            'registries': registries,
        }

        if limit_media_types is not None:
            conf['source_container'] = {
                'limit_media_types': limit_media_types,
            }

        if platform_descriptors:
            conf['platform_descriptors'] = platform_descriptors

        for registry in registries:

            def get_manifest(request):
                media_types = request.headers.get('Accept', '').split(',')
                content_type = media_types[0]

                return 200, {'Content-Type': content_type}, '{}'

            url_regex = "r'" + registry['url'] + ".*/manifests/.*'"
            url = re.compile(url_regex)
            responses.add_callback(responses.GET, url, callback=get_manifest)

            expected_types = registry.get('expected_media_types',
                                          registry_types or [])
            if fail == "bad_results":
                response_types = []
            elif not keep_types and no_amd64:
                response_types = [MEDIA_TYPE_DOCKER_V2_MANIFEST_LIST]
            else:
                response_types = expected_types

            reguri = RegistryURI(registry['url']).docker_uri
            if re.match('http(s)?://', reguri):
                urlbase = reguri
            else:
                urlbase = 'https://{0}'.format(reguri)

            actual_v2_url = urlbase + "/v2/foo/manifests/unique-tag"

            if fail == "bad_results":
                response = requests.Response()
                (flexmock(response,
                          raise_for_status=lambda: None,
                          status_code=requests.codes.ok,
                          json={},
                          headers={'Content-Type': 'application/json'}))
                v1_response = response
                v1_oci_response = response
                v1_oci_index_response = response
                v2_response = response
                v2_list_response = response
            else:
                v1_response = self.config_response_none
                v1_oci_response = self.config_response_none
                v1_oci_index_response = self.config_response_none
                v2_response = self.config_response_none
                v2_list_response = self.config_response_none

            if MEDIA_TYPE_DOCKER_V2_SCHEMA1 in response_types:
                v1_response = self.config_response_config_v1
            if MEDIA_TYPE_DOCKER_V2_SCHEMA2 in response_types:
                v2_response = self.config_response_config_v2
            if MEDIA_TYPE_DOCKER_V2_MANIFEST_LIST in response_types:
                v2_list_response = self.config_response_config_v2_list
            if MEDIA_TYPE_OCI_V1 in response_types:
                v1_oci_response = self.config_response_config_oci_v1
            if MEDIA_TYPE_OCI_V1_INDEX in response_types:
                v1_oci_index_response = self.config_response_config_oci_v1_index

            v2_header_v1 = {'Accept': MEDIA_TYPE_DOCKER_V2_SCHEMA1}
            v2_header_v2 = {'Accept': MEDIA_TYPE_DOCKER_V2_SCHEMA2}
            manifest_header = {'Accept': MEDIA_TYPE_DOCKER_V2_MANIFEST_LIST}

            (flexmock(requests.Session).should_receive('get').with_args(
                actual_v2_url,
                headers=v2_header_v1,
                auth=HTTPRegistryAuth,
                verify=False).and_return(v1_response))
            (flexmock(requests.Session).should_receive('get').with_args(
                actual_v2_url,
                headers=v2_header_v2,
                auth=HTTPRegistryAuth,
                verify=False).and_return(v2_response))
            (flexmock(requests.Session).should_receive('get').with_args(
                actual_v2_url,
                headers={
                    'Accept': MEDIA_TYPE_OCI_V1
                },
                auth=HTTPRegistryAuth,
                verify=False).and_return(v1_oci_response))
            (flexmock(requests.Session).should_receive('get').with_args(
                actual_v2_url,
                headers={
                    'Accept': MEDIA_TYPE_OCI_V1_INDEX
                },
                auth=HTTPRegistryAuth,
                verify=False).and_return(v1_oci_index_response))
            (flexmock(requests.Session).should_receive('get').with_args(
                actual_v2_url,
                headers=manifest_header,
                auth=HTTPRegistryAuth,
                verify=False).and_return(v2_list_response))

        digests = {'media_type': MEDIA_TYPE_DOCKER_V2_MANIFEST_LIST}
        if not group:
            digests = {'media_type': MEDIA_TYPE_DOCKER_V2_SCHEMA2}
        plugins_results = {
            PLUGIN_CHECK_AND_SET_PLATFORMS_KEY: platforms,
            PLUGIN_GROUP_MANIFESTS_KEY: digests,
        }

        mock_get_retry_session()
        builder = flexmock()
        setattr(builder, 'image_id', 'sha256:(old)')

        flexmock(tag_conf=tag_conf)
        wf_data = ImageBuildWorkflowData()
        wf_data.tag_conf = tag_conf
        wf_data.plugins_results = plugins_results

        return flexmock(data=wf_data,
                        builder=builder,
                        conf=Configuration(raw_config=conf),
                        build_process_failed=build_process_failed)
Ejemplo n.º 19
0
 def value(self, val):  # pylint: disable=W0221
     BuildParam.value.fset(self, RegistryURI(val) if val else None)
Ejemplo n.º 20
0
 def value(self, val):  # pylint: disable=W0221
     registry_uris = [RegistryURI(uri) for uri in val]
     BuildParam.value.fset(self, registry_uris)  # pylint: disable=no-member
    def workflow(self, build_process_failed=False, registries=None, registry_types=None,
                 platforms=None, platform_descriptors=None, group=True, no_amd64=False,
                 fail=False):
        tag_conf = TagConf()
        tag_conf.add_unique_image(self.TEST_UNIQUE_IMAGE)

        push_conf = PushConf()

        if platform_descriptors is None:
            platform_descriptors = [
                {'platform': 'x86_64', 'architecture': 'amd64'},
                {'platform': 'ppc64le', 'architecture': 'ppc64le'},
                {'platform': 's390x', 'architecture': 's390x'},
            ]

        if platforms is None:
            platforms = [descriptor['platform'] for descriptor in platform_descriptors]

        if registries is None and registry_types is None:
            registry_types = [MEDIA_TYPE_DOCKER_V1, MEDIA_TYPE_DOCKER_V2_SCHEMA1,
                              MEDIA_TYPE_DOCKER_V2_SCHEMA2, MEDIA_TYPE_DOCKER_V2_MANIFEST_LIST]

        if registries is None:
            registries = [{
                'url': 'https://container-registry.example.com/v2',
                'version': 'v2',
                'insecure': True,
                'expected_media_types': registry_types
            }]
        conf = {
            ReactorConfigKeys.VERSION_KEY: 1,
            'registries': registries,
        }
        if platform_descriptors:
            conf['platform_descriptors'] = platform_descriptors

        plugin_workspace = {
            ReactorConfigPlugin.key: {
                WORKSPACE_CONF_KEY: ReactorConfig(conf)
            }
        }

        flexmock(HTTPRegistryAuth).should_receive('__new__').and_return(None)
        mock_auth = None
        for registry in registries:
            def get_manifest(request):
                media_types = request.headers.get('Accept', '').split(',')
                content_type = media_types[0]

                return (200, {'Content-Type': content_type}, '{}')

            url_regex = "r'" + registry['url'] + ".*/manifests/.*'"
            url = re.compile(url_regex)
            responses.add_callback(responses.GET, url, callback=get_manifest)

            expected_types = registry.get('expected_media_types', [])
            if fail == "bad_results":
                response_types = [MEDIA_TYPE_DOCKER_V1]
            elif no_amd64:
                response_types = [MEDIA_TYPE_DOCKER_V2_MANIFEST_LIST]
            else:
                response_types = expected_types

            reguri = RegistryURI(registry['url']).docker_uri
            if re.match('http(s)?://', reguri):
                urlbase = reguri
            else:
                urlbase = 'https://{0}'.format(reguri)

            actual_v2_url = urlbase + "/v2/foo/manifests/unique-tag"
            actual_v1_url = urlbase + "/v1/repositories/foo/tags/unique-tag"

            v1_response = self.config_response_none
            v2_response = self.config_response_none
            v2_list_response = self.config_response_none
            if MEDIA_TYPE_DOCKER_V2_SCHEMA1 in response_types:
                v1_response = self.config_response_config_v1
            if MEDIA_TYPE_DOCKER_V2_SCHEMA2 in response_types:
                v2_response = self.config_response_config_v2
            if MEDIA_TYPE_DOCKER_V2_MANIFEST_LIST in response_types:
                v2_list_response = self.config_response_config_v2_list
            v2_header_v1 = {'Accept': MEDIA_TYPE_DOCKER_V2_SCHEMA1}
            v2_header_v2 = {'Accept': MEDIA_TYPE_DOCKER_V2_SCHEMA2}
            manifest_header = {'Accept': MEDIA_TYPE_DOCKER_V2_MANIFEST_LIST}

            (flexmock(requests.Session)
                .should_receive('get')
                .with_args(actual_v2_url, headers=v2_header_v1,
                           auth=mock_auth, verify=False)
                .and_return(v1_response))
            (flexmock(requests.Session)
                .should_receive('get')
                .with_args(actual_v2_url, headers=v2_header_v2,
                           auth=mock_auth, verify=False)
                .and_return(v2_response))
            (flexmock(requests.Session)
                .should_receive('get')
                .with_args(actual_v2_url, headers={'Accept': MEDIA_TYPE_OCI_V1},
                           auth=mock_auth, verify=False)
                .and_return(self.config_response_none))
            (flexmock(requests.Session)
                .should_receive('get')
                .with_args(actual_v2_url, headers={'Accept': MEDIA_TYPE_OCI_V1_INDEX},
                           auth=mock_auth, verify=False)
                .and_return(self.config_response_none))
            (flexmock(requests.Session)
                .should_receive('get')
                .with_args(actual_v2_url, headers=manifest_header,
                           auth=mock_auth, verify=False)
                .and_return(v2_list_response))

            if MEDIA_TYPE_DOCKER_V1 in response_types:
                (flexmock(requests.Session)
                    .should_receive('get')
                    .with_args(actual_v1_url, headers={'Accept': MEDIA_TYPE_DOCKER_V1},
                               auth=mock_auth, verify=False)
                    .and_return(self.config_response_v1))

        digests = {'digest': None} if group else {}
        prebuild_results = {PLUGIN_CHECK_AND_SET_PLATFORMS_KEY: platforms}
        postbuild_results = {PLUGIN_GROUP_MANIFESTS_KEY: digests}

        mock_get_retry_session()
        builder = flexmock()
        setattr(builder, 'image_id', 'sha256:(old)')
        return flexmock(tag_conf=tag_conf,
                        push_conf=push_conf,
                        builder=builder,
                        build_process_failed=build_process_failed,
                        plugin_workspace=plugin_workspace,
                        prebuild_results=prebuild_results,
                        postbuild_results=postbuild_results)