def test_get_init_containers_with_git_without_connection(self):
     git1 = V1GitConnection(revision="test", url="https://test.com")
     git2 = V1GitConnection(revision="test", url="https://test.com")
     containers = self.converter.get_init_containers(
         contexts=None,
         artifacts_store=None,
         init_connections=[
             V1Init(git=git1,
                    container=k8s_schemas.V1Container(name="test")),
             V1Init(git=git2, path="/test"),
         ],
         init_containers=[],
         connection_by_names={},
         polyaxon_init=V1PolyaxonInitContainer(image="foo/foo"),
     )
     assert containers == [
         get_git_init_container(
             connection=V1ConnectionType(name=git1.get_name(),
                                         kind=V1ConnectionKind.GIT,
                                         schema=git1),
             polyaxon_init=V1PolyaxonInitContainer(image="foo/foo"),
             env=self.converter.get_init_service_env_vars(),
             contexts=None,
         ),
         get_git_init_container(
             container=k8s_schemas.V1Container(name="test"),
             connection=V1ConnectionType(name=git2.get_name(),
                                         kind=V1ConnectionKind.GIT,
                                         schema=git1),
             mount_path="/test",
             polyaxon_init=V1PolyaxonInitContainer(image="foo/foo"),
             env=self.converter.get_init_service_env_vars(),
             contexts=None,
         ),
     ]
Exemplo n.º 2
0
    def test_multi_connections(self):
        assert (len(
            get_volume_mounts(
                contexts=None,
                init=[],
                connections=[
                    self.s3_store,
                    self.gcs_store,
                    self.az_store,
                    self.claim_store,
                    self.host_path_store,
                ],
                secrets=[],
                config_maps=[],
            )) == 2)

        assert (len(
            get_volume_mounts(
                contexts=None,
                init=[
                    V1Init(connection=self.s3_store.name, path="/test-1"),
                    V1Init(connection=self.gcs_store.name, path="/test-2"),
                    V1Init(connection=self.az_store.name, path="/test-3"),
                    V1Init(connection=self.claim_store.name, path="/test-4"),
                    V1Init(connection=self.host_path_store.name,
                           path="/test-5"),
                ],
                connections=[],
                secrets=[],
                config_maps=[],
            )) == 5)

        assert (len(
            get_volume_mounts(
                contexts=None,
                init=[
                    V1Init(connection=self.s3_store.name, path="/test-1"),
                    V1Init(connection=self.gcs_store.name, path="/test-2"),
                    V1Init(connection=self.az_store.name, path="/test-3"),
                    V1Init(connection=self.claim_store.name, path="/test-4"),
                    V1Init(connection=self.host_path_store.name,
                           path="/test-5"),
                ],
                connections=[
                    self.s3_store,
                    self.gcs_store,
                    self.az_store,
                    self.claim_store,
                    self.host_path_store,
                ],
                secrets=[],
                config_maps=[],
            )) == 7)
    def test_get_init_containers_with_dockerfiles(self):
        dockerfile_args1 = V1DockerfileType(image="foo/test",
                                            lang_env="LANG",
                                            env=[],
                                            run=["step1", "step2"])
        dockerfile_args2 = V1DockerfileType(
            image="foo/test",
            lang_env="LANG",
            env=[],
            run=["step1", "step2"],
            filename="dockerfile2",
            path="/test",
        )
        containers = self.converter.get_init_containers(
            contexts=None,
            artifacts_store=None,
            init_connections=[
                V1Init(dockerfile=dockerfile_args1),
                V1Init(dockerfile=dockerfile_args2, path="/test"),
            ],
            init_containers=[],
            connection_by_names={},
            polyaxon_init=V1PolyaxonInitContainer(image="foo/foo"),
        )
        for container in containers:
            container.name = ""
        expected_containers = [
            get_dockerfile_init_container(
                dockerfile_args=dockerfile_args1,
                polyaxon_init=V1PolyaxonInitContainer(image="foo/foo"),
                env=self.converter.get_init_service_env_vars(),
                contexts=None,
                run_path=self.converter.run_path,
                run_instance=self.converter.run_instance,
            ),
            get_dockerfile_init_container(
                dockerfile_args=dockerfile_args2,
                polyaxon_init=V1PolyaxonInitContainer(image="foo/foo"),
                env=self.converter.get_init_service_env_vars(),
                mount_path="/test",
                contexts=None,
                run_path=self.converter.run_path,
                run_instance=self.converter.run_instance,
            ),
        ]
        for container in expected_containers:
            container.name = ""

        assert expected_containers == containers
 def apply_run_connections_params(
     cls,
     config: V1CompiledOperation,
     artifact_store: str = None,
     contexts: Dict = None,
 ) -> V1CompiledOperation:
     params = config.validate_params(is_template=False, check_runs=True)
     params = {param.name: param for param in params}
     params = cls._update_params_with_contexts(params, contexts)
     if config.run.kind in {V1RunKind.JOB, V1RunKind.SERVICE}:
         if config.run.connections:
             config.run.connections = Parser.parse_section(
                 config.run.connections, params=params, parse_params=True)
         if config.run.init:
             init = []
             for i in config.run.init:
                 if i.artifacts and not i.connection:
                     i.connection = artifact_store
                 resolved_i = V1Init.from_dict(
                     Parser.parse_section(i.to_dict(),
                                          params=params,
                                          parse_params=True))
                 init.append(resolved_i)
             config.run.init = init
     return config
Exemplo n.º 5
0
    def _apply_connections_params(
        cls,
        connections: List[str],
        init: List[V1Init],
        artifact_store: str = None,
        param_spec: Dict[str, ParamSpec] = None,
    ):
        if connections:
            connections = Parser.parse_section(connections,
                                               param_spec=param_spec,
                                               parse_params=True)
        _init = []
        if init:
            for i in init:
                if i.artifacts and not i.connection:
                    i.connection = artifact_store
                resolved_i = V1Init.from_dict(
                    Parser.parse_section(i.to_dict(),
                                         param_spec=param_spec,
                                         parse_params=True))
                _init.append(resolved_i)

        # Prepend any param that has to_init after validation
        init_params = [
            v.to_init() for v in param_spec.values() if v.validate_to_init()
        ]
        init_params = [v for v in init_params if v]
        _init = init_params + _init
        return _init, connections
Exemplo n.º 6
0
 def assert_single_init_store(store, results):
     assert (get_volume_mounts(
         contexts=None,
         init=[V1Init(connection=store.name, path="/test")],
         connections=[],
         secrets=[],
         config_maps=[],
     ) == results)
Exemplo n.º 7
0
 def assert_single_init_artifacts_store(store, results):
     assert (get_pod_volumes(
         contexts=None,
         artifacts_store=None,
         init_connections=[V1Init(connection=store.name)],
         connection_by_names={store.name: store},
         secrets=[],
         config_maps=[],
         volumes=[],
     ) == results)
    def test_get_main_container(self):
        container = get_main_container(
            container_id="test",
            main_container=k8s_schemas.V1Container(name="main"),
            contexts=None,
            volume_mounts=None,
            log_level=None,
            artifacts_store=None,
            init=[
                V1Init(connection=self.claim_store.name),
                V1Init(connection=self.s3_store.name),
            ],
            connections=[self.host_path_store.name, self.gcs_store.name],
            connection_by_names={
                self.claim_store.name: self.claim_store,
                self.s3_store.name: self.s3_store,
                self.host_path_store.name: self.host_path_store,
                self.gcs_store.name: self.gcs_store,
            },
            secrets=[self.mount_resource1, self.request_non_mount_resource1],
            config_maps=[
                self.non_mount_resource1, self.request_mount_resource2
            ],
            kv_env_vars=None,
            env=None,
            ports=None,
            run_path="run_path",
        )

        assert container.name == "test"
        assert container.image is None
        assert container.image_pull_policy is None
        assert container.command is None
        assert container.args is None
        assert container.ports == []
        # 2 env vars from the secret mount
        # + 2 for the connection (context_path + spec)
        # + 1 for the connection spec (non mount)
        assert len(container.env) == 5
        assert container.env_from == []
        assert container.resources is None
        assert len(container.volume_mounts) == 4
    def test_get_init_containers_with_files(self):
        file_args1 = V1FileType(filename="test.sh", content="test", chmod="+x")
        file_args2 = V1FileType(
            filename="test.csv",
            content="csv",
            kind=V1ArtifactKind.CSV,
        )
        containers = self.converter.get_init_containers(
            contexts=None,
            artifacts_store=None,
            init_connections=[
                V1Init(dockerfile=file_args1),
                V1Init(dockerfile=file_args2, path="/test"),
            ],
            init_containers=[],
            connection_by_names={},
            polyaxon_init=V1PolyaxonInitContainer(image="foo/foo"),
        )
        expected_containers = [
            get_dockerfile_init_container(
                dockerfile_args=file_args1,
                polyaxon_init=V1PolyaxonInitContainer(image="foo/foo"),
                env=self.converter.get_init_service_env_vars(),
                contexts=None,
                run_path=self.converter.run_path,
                run_instance=self.converter.run_instance,
            ),
            get_dockerfile_init_container(
                dockerfile_args=file_args2,
                polyaxon_init=V1PolyaxonInitContainer(image="foo/foo"),
                env=self.converter.get_init_service_env_vars(),
                mount_path="/test",
                contexts=None,
                run_path=self.converter.run_path,
                run_instance=self.converter.run_instance,
            ),
        ]

        self.assert_containers(expected_containers, containers)
Exemplo n.º 10
0
    def _get_meta_artifacts_presets(self) -> List:
        if not self.run.meta_info or META_COPY_ARTIFACTS not in self.run.meta_info:
            return []

        artifacts = self.run.meta_info.pop(META_COPY_ARTIFACTS)
        artifacts = V1ArtifactsType.read(artifacts)

        def get_relative_to_run_artifacts(v: str):
            paths = v.split("/")[1:]
            paths = ["{{ globals.run_artifacts_path }}"] + paths
            return "/".join(paths)

        # Populate all paths
        if artifacts.dirs:
            artifacts.dirs = [[d, get_relative_to_run_artifacts(d)]
                              for d in artifacts.dirs]
        if artifacts.files:
            artifacts.files = [[d, get_relative_to_run_artifacts(d)]
                               for d in artifacts.files]
        init = V1Init(artifacts=artifacts)
        return [{"runPatch": {"init": [init.to_dict()]}}]
Exemplo n.º 11
0
 def _apply_connections_params(
     cls,
     connections: List[str],
     init: List[V1Init],
     artifact_store: str = None,
     param_spec: Dict[str, ParamSpec] = None,
 ):
     if connections:
         connections = Parser.parse_section(connections,
                                            param_spec=param_spec,
                                            parse_params=True)
     _init = []
     if init:
         for i in init:
             if i.artifacts and not i.connection:
                 i.connection = artifact_store
             resolved_i = V1Init.from_dict(
                 Parser.parse_section(i.to_dict(),
                                      param_spec=param_spec,
                                      parse_params=True))
             _init.append(resolved_i)
     return _init, connections
Exemplo n.º 12
0
    def test_wrong_init_configs(self):
        # Git without url and connection
        config_dict = {
            "git": {
                "revision": "branch1"
            },
            "container": {
                "name": "init1",
                "args": ["/subpath1", "subpath2"]
            },
        }
        with self.assertRaises(ValidationError):
            V1Init.from_dict(config_dict)

        # artifacts without connection
        config_dict = {
            "git": {
                "revision": "branch1"
            },
            "artifacts": {
                "files": ["path1", "path2"]
            },
        }
        with self.assertRaises(ValidationError):
            V1Init.from_dict(config_dict)

        # both git and dockerfile at the same time
        config_dict = {
            "git": {
                "revision": "branch1"
            },
            "dockerfile": {
                "image": "tensorflow:1.3.0"
            },
        }
        with self.assertRaises(ValidationError):
            V1Init.from_dict(config_dict)
Exemplo n.º 13
0
    def test_get_main_container_with_mounted_artifacts_store(self):
        container = get_main_container(
            container_id="test",
            main_container=k8s_schemas.V1Container(name="main"),
            contexts=None,
            volume_mounts=None,
            log_level=None,
            artifacts_store=None,
            init=[V1Init(connection=self.claim_store.name)],
            connections=None,
            connection_by_names={self.claim_store.name: self.claim_store},
            secrets=None,
            config_maps=None,
            kv_env_vars=None,
            env=None,
            ports=None,
            run_path="run_path",
        )

        assert container.name == "test"
        assert container.image is None
        assert container.image_pull_policy is None
        assert container.command is None
        assert container.args is None
        assert container.ports == []
        assert container.env_from == []
        assert container.resources is None
        assert len(container.volume_mounts) == 1

        container = get_main_container(
            container_id="",
            main_container=k8s_schemas.V1Container(name="main"),
            contexts=None,
            volume_mounts=None,
            log_level=None,
            artifacts_store=None,
            init=[V1Init(connection=self.claim_store.name)],
            connections=[self.claim_store.name],
            connection_by_names={self.claim_store.name: self.claim_store},
            secrets=None,
            config_maps=None,
            kv_env_vars=None,
            env=None,
            ports=None,
            run_path="run_path",
        )

        assert container.name == "main"
        assert container.image is None
        assert container.image_pull_policy is None
        assert container.command is None
        assert container.args is None
        assert container.ports == []
        assert container.env_from == []
        assert container.resources is None
        assert len(container.volume_mounts) == 2

        container = get_main_container(
            container_id="main-job",
            main_container=k8s_schemas.V1Container(name="main"),
            contexts=PluginsContextsSpec.from_config(
                V1Plugins(collect_artifacts=True,
                          collect_logs=True,
                          collect_resources=True)),
            volume_mounts=None,
            log_level=None,
            artifacts_store=self.claim_store,
            init=None,
            connections=[],
            connection_by_names={self.claim_store.name: self.claim_store},
            secrets=None,
            config_maps=None,
            kv_env_vars=None,
            env=None,
            ports=None,
            run_path="run_path",
        )

        assert container.name == "main-job"
        assert container.image is None
        assert container.image_pull_policy is None
        assert container.command is None
        assert container.args is None
        assert container.ports == []
        assert len(container.env) == 2
        assert container.env_from == []
        assert container.resources is None
        assert len(container.volume_mounts) == 1
Exemplo n.º 14
0
    def test_init_config(self):
        config_dict = {
            "container": {
                "name": "init1",
                "args": ["/subpath1", "subpath2"]
            }
        }
        config = V1Init.from_dict(config_dict)
        assert config.to_light_dict() == config_dict

        config_dict = {"connection": "foo"}
        config = V1Init.from_dict(config_dict)
        assert config.to_light_dict() == config_dict

        config_dict = {
            "connection": "foo",
            "container": {
                "name": "init1",
                "args": ["/subpath1", "subpath2"]
            },
        }
        config = V1Init.from_dict(config_dict)
        assert config.to_light_dict() == config_dict

        config_dict = {
            "connection": "foo",
            "git": {
                "revision": "branch1"
            },
            "container": {
                "name": "init1",
                "args": ["/subpath1", "subpath2"]
            },
        }
        config = V1Init.from_dict(config_dict)
        assert config.to_light_dict() == config_dict

        config_dict = {
            "connection": "foo",
            "artifacts": {
                "files": ["path1", "path2"]
            },
            "container": {
                "name": "init1",
                "args": ["/subpath1", "subpath2"]
            },
        }
        config = V1Init.from_dict(config_dict)
        assert config.to_light_dict() == config_dict

        config_dict = {
            "dockerfile": {
                "image": "tensorflow:1.3.0",
                "path": ["./module"],
                "copy": ["/foo/bar"],
                "run": ["pip install tensor2tensor"],
                "env": {
                    "LC_ALL": "en_US.UTF-8"
                },
                "filename": "dockerfile",
                "workdir": "",
                "shell": "sh",
            }
        }
        config = V1Init.from_dict(config_dict)
        assert config.to_light_dict() == config_dict

        # artifacts without connection
        config_dict = {"artifacts": {"files": ["path1", "path2"]}}
        config = V1Init.from_dict(config_dict)
        assert config.to_light_dict() == config_dict
Exemplo n.º 15
0
def get_op_specification(
    config: Union[V1Component, V1Operation] = None,
    hub: str = None,
    params: Dict = None,
    presets: List[str] = None,
    queue: str = None,
    nocache: bool = None,
    cache: bool = None,
    validate_params: bool = True,
    preset_files: List[str] = None,
    git_init: V1Init = None,
) -> V1Operation:
    if cache and nocache:
        raise PolyaxonfileError("Received both cache and nocache")
    job_data = {
        "version": config.version if config else pkg.SCHEMA_VERSION,
        "kind": kinds.OPERATION,
    }
    if params:
        if not isinstance(params, Mapping):
            raise PolyaxonfileError(
                "Params: `{}` must be a valid mapping".format(params))
        job_data["params"] = params
    if presets:
        job_data["presets"] = presets
    if queue:
        # Check only
        get_queue_info(queue)
        job_data["queue"] = queue
    if cache:
        job_data["cache"] = {"disable": False}
    if nocache:
        job_data["cache"] = {"disable": True}

    if config and config.kind == kinds.COMPONENT:
        job_data["component"] = config.to_dict()
        config = get_specification(data=[job_data])
    elif config and config.kind == kinds.OPERATION:
        config = get_specification(data=[config.to_dict(), job_data])
    elif hub:
        job_data["hubRef"] = hub
        config = get_specification(data=[job_data])

    if hub and config.hub_ref is None:
        config.hub_ref = hub

    # Check if there's presets
    for preset_plx_file in preset_files:
        preset_plx_file = OperationSpecification.read(preset_plx_file,
                                                      is_preset=True)
        config = config.patch(preset_plx_file,
                              strategy=preset_plx_file.patch_strategy)
    # Turn git_init to a pre_merge preset
    if git_init:
        git_preset = V1Operation(run_patch={"init": [git_init.to_dict()]},
                                 is_preset=True)
        config = config.patch(git_preset, strategy=V1PatchStrategy.PRE_MERGE)

    # Sanity check if params were passed and we are not dealing with a hub component
    params = copy.deepcopy(config.params)
    if validate_params:
        # Avoid in-place patch
        run_config = get_specification(config.to_dict())
        run_config = OperationSpecification.compile_operation(run_config)
        run_config.validate_params(params=params, is_template=False)
        if run_config.is_dag_run:
            CompiledOperationSpecification.apply_operation_contexts(run_config)
    return config
Exemplo n.º 16
0
         type=types.STR,
         value="relu",
         is_optional=True),
    V1IO(name="optimizer", type=types.STR, value="adam", is_optional=True),
    V1IO(name="learning_rate", type=types.FLOAT, value=0.01, is_optional=True),
    V1IO(name="epochs", type=types.INT),
]

outputs = [
    V1IO(name="loss", type=types.FLOAT),
    V1IO(name="accuracy", type=types.FLOAT),
]

job = V1Job(
    init=[
        V1Init(git=V1GitType(
            url="https://github.com/polyaxon/polyaxon-quick-start"))
    ],
    container=V1Container(
        image="polyaxon/polyaxon-quick-start",
        working_dir="{{ globals.artifacts_path }}",
        command=["python3", "polyaxon-quick-start/model.py"],
        args=[
            "--conv1_size={{ conv1_size }}", "--conv2_size={{ conv2_size }}",
            "--dropout={{ dropout }}", "--hidden1_size={{ hidden1_size }}",
            "--optimizer={{ optimizer }}",
            "--conv_activation={{ conv_activation }}",
            "--dense_activation={{ dense_activation }}",
            "--learning_rate={{ learning_rate }}", "--epochs={{ epochs }}"
        ]),
)
Exemplo n.º 17
0
    def test_all_volumes(self):
        assert (
            len(
                get_volume_mounts(
                    contexts=PluginsContextsSpec.from_config(
                        V1Plugins(collect_logs=False, collect_artifacts=True)),
                    init=[
                        V1Init(connection=self.s3_store.name, path="/test-1"),
                        V1Init(connection=self.gcs_store.name, path="/test-2"),
                        V1Init(connection=self.az_store.name, path="/test-3"),
                        V1Init(connection=self.claim_store.name,
                               path="/test-4"),
                        V1Init(connection=self.host_path_store.name,
                               path="/test-5"),
                    ],
                    connections=[
                        self.s3_store,
                        self.gcs_store,
                        self.az_store,
                        self.claim_store,
                        self.host_path_store,
                    ],
                    secrets=[
                        self.non_mount_resource1,
                        self.non_mount_resource1,
                        self.mount_resource1,
                        self.mount_resource2,
                    ],
                    config_maps=[
                        self.non_mount_resource1,
                        self.non_mount_resource1,
                        self.mount_resource1,
                        self.mount_resource2,
                    ],
                ))
            # 1: output store
            # 7: 5 managed contexts + 2 mounts
            # 4: 4 mount resources (secrets + configs)
            == 1 + 7 + 4)
        assert (
            len(
                get_volume_mounts(
                    contexts=None,
                    init=[
                        V1Init(connection=self.s3_store.name, path="/test-1"),
                        V1Init(connection=self.gcs_store.name, path="/test-2"),
                        V1Init(connection=self.az_store.name, path="/test-3"),
                        V1Init(connection=self.claim_store.name,
                               path="/test-4"),
                        V1Init(connection=self.host_path_store.name,
                               path="/test-5"),
                    ],
                    connections=[
                        self.s3_store,
                        self.gcs_store,
                        self.az_store,
                        self.claim_store,
                        self.host_path_store,
                    ],
                    secrets=[
                        self.non_mount_resource1,
                        self.non_mount_resource1,
                        self.mount_resource1,
                        self.mount_resource2,
                    ],
                    config_maps=[
                        self.non_mount_resource1,
                        self.non_mount_resource1,
                        self.mount_resource1,
                        self.mount_resource2,
                    ],
                ))
            # 7: 5 managed contexts + 2 mounts
            # 4: 4 mount resources (secrets + configs)
            == 7 + 4)

        assert (
            len(
                get_volume_mounts(
                    contexts=PluginsContextsSpec.from_config(
                        V1Plugins(collect_logs=True, collect_artifacts=True)),
                    init=[
                        V1Init(connection=self.s3_store.name, path="/test-1"),
                        V1Init(connection=self.gcs_store.name, path="/test-2"),
                        V1Init(connection=self.az_store.name, path="/test-3"),
                        V1Init(connection=self.claim_store.name,
                               path="/test-4"),
                        V1Init(connection=self.host_path_store.name,
                               path="/test-5"),
                    ],
                    connections=[
                        self.s3_store,
                        self.gcs_store,
                        self.az_store,
                        self.claim_store,
                        self.host_path_store,
                    ],
                    secrets=[
                        self.non_mount_resource1,
                        self.non_mount_resource1,
                        self.mount_resource1,
                        self.mount_resource2,
                    ],
                    config_maps=[
                        self.non_mount_resource1,
                        self.non_mount_resource1,
                        self.mount_resource1,
                        self.mount_resource2,
                    ],
                ))
            # 1: outputs context store
            # 7: 5 managed contexts + 2 mounts
            # 4: 4 mount resources (secrets + configs)
            == 1 + 7 + 4)
        assert (
            len(
                get_volume_mounts(
                    contexts=PluginsContextsSpec.from_config(
                        V1Plugins(collect_logs=True, collect_artifacts=False)),
                    init=[
                        V1Init(connection=self.s3_store.name, path="/test-1"),
                        V1Init(connection=self.gcs_store.name, path="/test-2"),
                        V1Init(connection=self.az_store.name, path="/test-3"),
                        V1Init(connection=self.claim_store.name,
                               path="/test-4"),
                        V1Init(connection=self.host_path_store.name,
                               path="/test-5"),
                    ],
                    connections=[
                        self.s3_store,
                        self.gcs_store,
                        self.az_store,
                        self.claim_store,
                        self.host_path_store,
                    ],
                    secrets=[
                        self.non_mount_resource1,
                        self.non_mount_resource1,
                        self.mount_resource1,
                        self.mount_resource2,
                    ],
                    config_maps=[
                        self.non_mount_resource1,
                        self.non_mount_resource1,
                        self.mount_resource1,
                        self.mount_resource2,
                    ],
                ))
            # 7: 5 managed contexts + 2 mounts
            # 4: 4 mount resources (secrets + configs)
            == 7 + 4)
    def test_get_init_containers_with_claim_outputs(self):
        store = V1ConnectionType(
            name="test_claim",
            kind=V1ConnectionKind.VOLUME_CLAIM,
            schema=V1ClaimConnection(mount_path="/claim/path",
                                     volume_claim="claim",
                                     read_only=True),
        )

        # No context to enable the outputs
        containers = self.converter.get_init_containers(
            contexts=None,
            artifacts_store=store.name,
            init_connections=None,
            connection_by_names={},
            init_containers=[],
            polyaxon_init=V1PolyaxonInitContainer(image="foo/foo"),
        )
        assert containers == []

        # Enable outputs
        containers = self.converter.get_init_containers(
            contexts=PluginsContextsSpec.from_config(
                V1Plugins(collect_artifacts=True, collect_logs=False)),
            artifacts_store=store,
            connection_by_names={},
            init_connections=None,
            init_containers=[],
            polyaxon_init=V1PolyaxonInitContainer(image="foo/foo"),
        )
        assert containers == [
            get_artifacts_path_container(
                polyaxon_init=V1PolyaxonInitContainer(image="foo/foo"),
                artifacts_store=store,
                run_path=self.converter.run_path,
                auto_resume=True,
            ),
        ]

        # Use store for init
        containers = self.converter.get_init_containers(
            contexts=None,
            artifacts_store=None,
            connection_by_names={store.name: store},
            init_connections=[V1Init(connection=store.name)],
            init_containers=[],
            polyaxon_init=V1PolyaxonInitContainer(image="foo/foo"),
        )
        assert containers == [
            get_store_container(
                polyaxon_init=V1PolyaxonInitContainer(image="foo/foo"),
                connection=store,
                artifacts=None,
                env=self.converter.get_init_service_env_vars(),
            )
        ]

        # Use store for init and outputs
        containers = self.converter.get_init_containers(
            contexts=PluginsContextsSpec.from_config(
                V1Plugins(collect_artifacts=True, collect_logs=False)),
            artifacts_store=store,
            init_connections=[V1Init(connection=store.name)],
            connection_by_names={store.name: store},
            init_containers=[],
            polyaxon_init=V1PolyaxonInitContainer(image="foo/foo"),
        )
        assert containers == [
            get_artifacts_path_container(
                polyaxon_init=V1PolyaxonInitContainer(image="foo/foo"),
                artifacts_store=store,
                run_path=self.converter.run_path,
                auto_resume=True,
            ),
            get_store_container(
                polyaxon_init=V1PolyaxonInitContainer(image="foo/foo"),
                connection=store,
                artifacts=None,
                env=self.converter.get_init_service_env_vars(),
            ),
        ]

        # Add Store
        store1 = V1ConnectionType(
            name="test_gcs",
            kind=V1ConnectionKind.S3,
            schema=V1BucketConnection(bucket="s3://foo"),
            secret=None,
        )

        containers = self.converter.get_init_containers(
            contexts=PluginsContextsSpec.from_config(
                V1Plugins(collect_artifacts=True,
                          collect_logs=False,
                          auth=True)),
            artifacts_store=store,
            init_connections=[
                V1Init(
                    connection=store.name,
                    artifacts=V1ArtifactsType(files=["/foo", "/bar"]),
                ),
                V1Init(
                    connection=store1.name,
                    artifacts=V1ArtifactsType(files=["/foo", "/bar"]),
                ),
            ],
            connection_by_names={
                store.name: store,
                store1.name: store1
            },
            init_containers=[],
            polyaxon_init=V1PolyaxonInitContainer(image="foo/foo"),
        )
        assert containers == [
            get_auth_context_container(
                polyaxon_init=V1PolyaxonInitContainer(image="foo/foo"),
                env=self.converter.get_auth_service_env_vars(),
            ),
            get_artifacts_path_container(
                polyaxon_init=V1PolyaxonInitContainer(image="foo/foo"),
                artifacts_store=store,
                run_path=self.converter.run_path,
                auto_resume=True,
            ),
            get_store_container(
                polyaxon_init=V1PolyaxonInitContainer(image="foo/foo"),
                connection=store,
                artifacts=V1ArtifactsType(files=["/foo", "/bar"]),
                env=self.converter.get_init_service_env_vars(),
            ),
            get_store_container(
                polyaxon_init=V1PolyaxonInitContainer(image="foo/foo"),
                connection=store1,
                artifacts=V1ArtifactsType(files=["/foo", "/bar"]),
                env=self.converter.get_init_service_env_vars(),
            ),
        ]
Exemplo n.º 19
0
    def test_all_volumes(self):
        connection_by_names = {
            self.s3_store.name: self.s3_store,
            self.gcs_store.name: self.gcs_store,
            self.az_store.name: self.az_store,
            self.claim_store.name: self.claim_store,
            self.host_path_store.name: self.host_path_store,
        }

        # Test all init are in the same context
        assert (
            len(
                get_pod_volumes(
                    contexts=PluginsContextsSpec.from_config(
                        V1Plugins(
                            docker=True,
                            shm=True,
                            auth=True,
                            collect_artifacts=True,
                            collect_logs=True,
                        )),
                    artifacts_store=self.claim_store,
                    init_connections=[
                        V1Init(connection=self.s3_store.name),
                        V1Init(connection=self.gcs_store.name),
                        V1Init(connection=self.az_store.name),
                        V1Init(connection=self.claim_store.name),
                        V1Init(connection=self.host_path_store.name),
                    ],
                    connection_by_names=connection_by_names,
                    secrets=[
                        self.non_mount_resource1,
                        self.non_mount_resource1,
                        self.mount_resource1,
                        self.mount_resource2,
                    ],
                    config_maps=[
                        self.non_mount_resource1,
                        self.non_mount_resource1,
                        self.mount_resource1,
                        self.mount_resource2,
                    ],
                    volumes=[self.vol1, self.vol2, self.vol3],
                ))
            # 1: logs/output contexts (same volume) / 1 managed contexts
            # 3: 3 context requested constant contexts
            # 3: 3 volumes
            # 7: 2 mount volumes
            # 4: 4 mount resources (secrets + configs)
            == 1 + 3 + 3 + 2 + 4)

        init_connections = [
            V1Init(connection=self.s3_store.name, path="/test-1"),
            V1Init(connection=self.gcs_store.name, path="/test-2"),
            V1Init(connection=self.az_store.name, path="/test-3"),
            V1Init(connection=self.claim_store.name, path="/test-4"),
            V1Init(connection=self.host_path_store.name, path="/test-5"),
        ]
        assert (
            len(
                get_pod_volumes(
                    contexts=PluginsContextsSpec.from_config(
                        V1Plugins(
                            docker=True,
                            shm=True,
                            auth=True,
                            collect_artifacts=True,
                            collect_logs=True,
                        )),
                    artifacts_store=self.claim_store,
                    init_connections=init_connections,
                    connection_by_names=connection_by_names,
                    secrets=[
                        self.non_mount_resource1,
                        self.non_mount_resource1,
                        self.mount_resource1,
                        self.mount_resource2,
                    ],
                    config_maps=[
                        self.non_mount_resource1,
                        self.non_mount_resource1,
                        self.mount_resource1,
                        self.mount_resource2,
                    ],
                    volumes=[self.vol1, self.vol2, self.vol3],
                ))
            # 1: logs/output contexts (same volume)
            # 3: 3 context requested constant contexts
            # 3: 3 volumes
            # 7: 5 managed contexts + 2 mount volumes
            # 4: 4 mount resources (secrets + configs)
            == 1 + 3 + 3 + 7 + 4)
        assert (
            len(
                get_pod_volumes(
                    contexts=PluginsContextsSpec.from_config(
                        V1Plugins(
                            docker=True,
                            shm=True,
                            auth=True,
                            collect_artifacts=True,
                            collect_logs=True,
                        )),
                    artifacts_store=self.s3_store,
                    init_connections=init_connections,
                    connection_by_names=connection_by_names,
                    secrets=[
                        self.non_mount_resource1,
                        self.non_mount_resource1,
                        self.mount_resource1,
                        self.mount_resource2,
                    ],
                    config_maps=[
                        self.non_mount_resource1,
                        self.non_mount_resource1,
                        self.mount_resource1,
                        self.mount_resource2,
                    ],
                    volumes=[self.vol1, self.vol2, self.vol3],
                ))
            # 1: logs/output contexts (same volume)
            # 3: 3 context requested constant contexts
            # 3: 3 volumes
            # 7: 5 managed contexts + 2 mount volumes
            # 4: 4 mount resources (secrets + configs)
            == 1 + 3 + 3 + 7 + 4)

        assert (
            len(
                get_pod_volumes(
                    contexts=PluginsContextsSpec.from_config(
                        V1Plugins(
                            docker=True,
                            shm=True,
                            auth=True,
                            collect_artifacts=False,
                            collect_logs=False,
                        )),
                    artifacts_store=None,
                    init_connections=init_connections,
                    connection_by_names=connection_by_names,
                    secrets=[
                        self.non_mount_resource1,
                        self.non_mount_resource1,
                        self.mount_resource1,
                        self.mount_resource2,
                    ],
                    config_maps=[
                        self.non_mount_resource1,
                        self.non_mount_resource1,
                        self.mount_resource1,
                        self.mount_resource2,
                    ],
                    volumes=[self.vol1, self.vol2, self.vol3],
                ))
            # 3: 3 context requested constant contexts
            # 3: 3 volumes
            # 7: 5 managed contexts + 2 mount volumes
            # 4: 4 mount resources (secrets + configs)
            == 3 + 3 + 7 + 4)
        assert (
            len(
                get_pod_volumes(
                    contexts=PluginsContextsSpec.from_config(
                        V1Plugins(
                            docker=True,
                            shm=True,
                            auth=True,
                            collect_artifacts=False,
                            collect_logs=False,
                        )),
                    artifacts_store=None,
                    init_connections=init_connections,
                    connection_by_names=connection_by_names,
                    secrets=[
                        self.non_mount_resource1,
                        self.non_mount_resource1,
                        self.mount_resource1,
                        self.mount_resource2,
                    ],
                    config_maps=[
                        self.non_mount_resource1,
                        self.non_mount_resource1,
                        self.mount_resource1,
                        self.mount_resource2,
                    ],
                    volumes=[self.vol1, self.vol2, self.vol3],
                ))
            # 3: 3 context requested constant contexts
            # 3: 3 volumes
            # 7: 5 managed contexts + 2 mount volumes
            # 4: 4 mount resources (secrets + configs)
            == 3 + 3 + 7 + 4)
        assert (
            len(
                get_pod_volumes(
                    contexts=PluginsContextsSpec.from_config(
                        V1Plugins(
                            docker=True,
                            shm=True,
                            auth=True,
                            collect_artifacts=True,
                            collect_logs=True,
                        )),
                    artifacts_store=self.host_path_store,
                    init_connections=[
                        V1Init(connection=self.s3_store.name),
                        V1Init(connection=self.gcs_store.name),
                        V1Init(connection=self.az_store.name),
                    ],
                    connection_by_names=connection_by_names,
                    secrets=[
                        self.non_mount_resource1,
                        self.non_mount_resource1,
                        self.mount_resource1,
                        self.mount_resource2,
                    ],
                    config_maps=[
                        self.non_mount_resource1,
                        self.non_mount_resource1,
                        self.mount_resource1,
                        self.mount_resource2,
                    ],
                    volumes=[self.vol1, self.vol2, self.vol3],
                ))
            # 4: 4 context requested constant contexts / init volumes contexts
            # 3: 3 volumes
            # 2: 2 managed volumes
            # 4: 4 mount resources (secrets + configs)
            == 4 + 2 + 3 + 4)
Exemplo n.º 20
0
    def test_all_volumes_and_artifacts_store(self):
        connection_by_names = {
            self.s3_store.name: self.s3_store,
            self.gcs_store.name: self.gcs_store,
            self.az_store.name: self.az_store,
            self.claim_store.name: self.claim_store,
            self.host_path_store.name: self.host_path_store,
        }

        init_connections = [
            V1Init(connection=self.s3_store.name, path="/test-1"),
            V1Init(connection=self.gcs_store.name, path="/test-2"),
            V1Init(connection=self.az_store.name, path="/test-3"),
            V1Init(connection=self.claim_store.name, path="/test-4"),
            V1Init(connection=self.host_path_store.name, path="/test-5"),
        ]

        pod_volumes = get_pod_volumes(
            contexts=PluginsContextsSpec.from_config(
                V1Plugins(
                    docker=True,
                    shm=True,
                    auth=True,
                    collect_artifacts=False,
                    collect_logs=False,
                )
            ),
            artifacts_store=None,
            init_connections=init_connections,
            connection_by_names=connection_by_names,
            connections=[],
            secrets=[
                self.non_mount_resource1,
                self.non_mount_resource1,
                self.mount_resource1,
                self.mount_resource2,
            ],
            config_maps=[
                self.non_mount_resource1,
                self.non_mount_resource1,
                self.mount_resource1,
                self.mount_resource2,
            ],
            volumes=[self.vol1, self.vol2, self.vol3],
        )
        # 3: 3 context requested constant contexts
        # 3: 3 volumes
        # 5: 5 managed contexts + 2 mount volumes
        # 1: 1 secret
        assert len(pod_volumes) == 3 + 3 + 7 + 1

        pod_volumes = get_pod_volumes(
            contexts=PluginsContextsSpec.from_config(
                V1Plugins(
                    docker=True,
                    shm=True,
                    auth=True,
                    collect_artifacts=False,
                    collect_logs=False,
                )
            ),
            artifacts_store=None,
            init_connections=init_connections,
            connections=list(connection_by_names.keys()),
            connection_by_names=connection_by_names,
            secrets=[
                self.non_mount_resource1,
                self.non_mount_resource1,
                self.mount_resource1,
                self.mount_resource2,
            ],
            config_maps=[
                self.non_mount_resource1,
                self.non_mount_resource1,
                self.mount_resource1,
                self.mount_resource2,
            ],
            volumes=[self.vol1, self.vol2, self.vol3],
        )
        # 3: 3 context requested constant contexts
        # 3: 3 volumes
        # 7: 5 managed contexts + 2 mount volumes
        assert len(pod_volumes) == 3 + 3 + 7 + 1

        pod_volumes = get_pod_volumes(
            contexts=PluginsContextsSpec.from_config(
                V1Plugins(
                    docker=True,
                    shm=True,
                    auth=True,
                    collect_artifacts=True,
                    collect_logs=True,
                )
            ),
            artifacts_store=self.host_path_store,
            init_connections=[
                V1Init(connection=self.s3_store.name),
                V1Init(connection=self.gcs_store.name),
                V1Init(connection=self.az_store.name),
            ],
            connections=list(connection_by_names.keys()),
            connection_by_names=connection_by_names,
            secrets=[
                self.non_mount_resource1,
                self.non_mount_resource1,
                self.mount_resource1,
                self.mount_resource2,
            ],
            config_maps=[
                self.non_mount_resource1,
                self.non_mount_resource1,
                self.mount_resource1,
                self.mount_resource2,
            ],
            volumes=[self.vol1, self.vol2, self.vol3],
        )
        # 4: 4 context requested constant contexts / init volumes contexts
        # 3: 3 volumes
        # 2: 2 managed volumes
        assert len(pod_volumes) == 4 + 2 + 3 + 1

        # Enable requesting resources
        self.mount_resource1.is_requested = True
        self.mount_resource2.is_requested = True
        pod_volumes = get_pod_volumes(
            contexts=PluginsContextsSpec.from_config(
                V1Plugins(
                    docker=True,
                    shm=True,
                    auth=True,
                    collect_artifacts=True,
                    collect_logs=True,
                )
            ),
            artifacts_store=self.host_path_store,
            init_connections=[
                V1Init(connection=self.s3_store.name),
                V1Init(connection=self.gcs_store.name),
                V1Init(connection=self.az_store.name),
            ],
            connections=list(connection_by_names.keys()),
            connection_by_names=connection_by_names,
            secrets=[
                self.non_mount_resource1,
                self.non_mount_resource1,
                self.mount_resource1,
                self.mount_resource2,
            ],
            config_maps=[
                self.non_mount_resource1,
                self.non_mount_resource1,
                self.mount_resource1,
                self.mount_resource2,
            ],
            volumes=[self.vol1, self.vol2, self.vol3],
        )
        # 4: 4 context requested constant contexts / init volumes contexts
        # 3: 3 volumes
        # 2: 2 managed volumes
        # 4: 4 mount resources (secrets + configs)
        assert len(pod_volumes) == 4 + 2 + 3 + 4
Exemplo n.º 21
0
    def test_multi_connections(self):
        connection_by_names = {
            self.s3_store.name: self.s3_store,
            self.gcs_store.name: self.gcs_store,
            self.az_store.name: self.az_store,
            self.claim_store.name: self.claim_store,
            self.host_path_store.name: self.host_path_store,
        }
        init_connections = [
            V1Init(connection=self.s3_store.name, path="/test-1"),
            V1Init(connection=self.gcs_store.name, path="/test-2"),
            V1Init(connection=self.az_store.name, path="/test-3"),
            V1Init(connection=self.claim_store.name, path="/test-4"),
            V1Init(connection=self.host_path_store.name, path="/test-5"),
        ]
        assert (
            len(
                get_pod_volumes(
                    contexts=None,
                    artifacts_store=None,
                    init_connections=[],
                    connection_by_names=connection_by_names,
                    connections=[],
                    secrets=[],
                    config_maps=[],
                    volumes=[],
                )
            )
            == 2
        )

        # test all inits are mounted to the same context and a single secret requested for all
        assert (
            len(
                get_pod_volumes(
                    contexts=None,
                    artifacts_store=None,
                    init_connections=[
                        V1Init(connection=self.s3_store.name),
                        V1Init(connection=self.gcs_store.name),
                        V1Init(connection=self.az_store.name),
                        V1Init(connection=self.claim_store.name),
                        V1Init(connection=self.host_path_store.name),
                    ],
                    connection_by_names=connection_by_names,
                    connections=[],
                    secrets=[],
                    config_maps=[],
                    volumes=[],
                )
            )
            == 4
        )

        assert (
            len(
                get_pod_volumes(
                    contexts=None,
                    artifacts_store=None,
                    init_connections=init_connections,
                    connection_by_names=connection_by_names,
                    connections=[],
                    secrets=[],
                    config_maps=[],
                    volumes=[],
                )
            )
            == 8
        )

        assert (
            len(
                get_pod_volumes(
                    contexts=None,
                    artifacts_store=None,
                    init_connections=init_connections,
                    connection_by_names=connection_by_names,
                    connections=[],
                    secrets=[],
                    config_maps=[],
                    volumes=[],
                )
            )
            == 8
        )

        assert (
            len(
                get_pod_volumes(
                    contexts=PluginsContextsSpec.from_config(
                        V1Plugins(
                            docker=True,
                            shm=True,
                            auth=True,
                            collect_artifacts=True,
                            collect_logs=True,
                        )
                    ),
                    artifacts_store=self.claim_store,
                    init_connections=init_connections,
                    connection_by_names=connection_by_names,
                    connections=[],
                    secrets=[],
                    config_maps=[],
                    volumes=[],
                )
            )
            == 12
        )

        assert (
            len(
                get_pod_volumes(
                    contexts=PluginsContextsSpec.from_config(
                        V1Plugins(
                            docker=True,
                            shm=True,
                            auth=True,
                            collect_artifacts=True,
                            collect_logs=True,
                        )
                    ),
                    artifacts_store=self.claim_store,
                    init_connections=init_connections,
                    connection_by_names=connection_by_names,
                    connections=list(connection_by_names.keys()),
                    secrets=[],
                    config_maps=[],
                    volumes=[],
                )
            )
            == 12
        )
Exemplo n.º 22
0
    def init_run(
        self,
        project_id: int,
        user_id: int,
        op_spec: V1Operation = None,
        compiled_operation: V1CompiledOperation = None,
        name: str = None,
        description: str = None,
        tags: str = None,
        override: Union[str, Dict] = None,
        override_post: bool = True,
        params: Dict = None,
        readme: str = None,
        original_id: int = None,
        original_uuid: int = None,
        cloning_kind: str = None,
        is_managed: bool = True,
        supported_kinds: Set[str] = None,
        **kwargs,
    ) -> Tuple[V1CompiledOperation, BaseRun]:
        content = None
        raw_content = None
        if op_spec:
            op_spec = self.set_spec(op_spec)
            raw_content = op_spec.to_dict(dump=True)
        if op_spec:
            if not compiled_operation or override:
                compiled_operation = OperationSpecification.compile_operation(
                    op_spec, override=override, override_post=override_post)
            params = op_spec.params

        params = params or {}
        inputs = {p: pv.value for p, pv in params.items() if pv.is_literal}
        params = {p: pv.to_dict() for p, pv in params.items()}
        kind = None
        meta_info = {}
        if compiled_operation:
            name = name or compiled_operation.name
            description = description or compiled_operation.description
            tags = tags or compiled_operation.tags
            kind, meta_kind = self.get_kind(compiled_operation)
            kind, meta_info = self.get_meta_info(compiled_operation, kind,
                                                 meta_kind)
            self.supports_kind(kind, meta_kind, supported_kinds, is_managed)
            if cloning_kind == V1CloningKind.COPY:
                if meta_kind not in {V1RunKind.JOB, V1RunKind.SERVICE}:
                    raise ValueError(
                        "Operation with kind `{}` does not support restart with copy mode."
                    )
                compiled_operation.run.add_init(
                    V1Init(artifacts=V1ArtifactsType(dirs=[original_uuid])))
            content = compiled_operation.to_dict(dump=True)
        instance = get_run_model()(
            project_id=project_id,
            user_id=user_id,
            name=name,
            description=description,
            tags=tags,
            readme=readme,
            raw_content=raw_content,
            content=content,
            params=params,
            inputs=inputs,
            kind=kind,
            meta_info=meta_info,
            original_id=original_id,
            cloning_kind=cloning_kind,
            is_managed=is_managed,
            status_conditions=[
                V1StatusCondition.get_condition(
                    type=V1Statuses.CREATED,
                    status="True",
                    reason="PolyaxonRunCreated",
                    message="Run is created",
                ).to_dict()
            ],
            **self.sanitize_kwargs(**kwargs),
        )
        return compiled_operation, instance
Exemplo n.º 23
0
    def init_run(
        self,
        project_id: int,
        user_id: int,
        op_spec: V1Operation = None,
        compiled_operation: V1CompiledOperation = None,
        name: str = None,
        description: str = None,
        tags: str = None,
        override: Union[str, Dict] = None,
        params: Dict = None,
        readme: str = None,
        original_id: int = None,
        original_uuid: int = None,
        cloning_kind: str = None,
        is_managed: bool = True,
        is_approved: bool = True,
        meta_info: Dict = None,
        supported_kinds: Set[str] = None,
        init: Optional[List[V1Init]] = None,
        **kwargs,
    ) -> Tuple[V1CompiledOperation, BaseRun]:
        if op_spec:
            op_spec, kwargs = self.set_spec(op_spec, **kwargs)
        if op_spec:
            if not compiled_operation or override:
                compiled_operation = OperationSpecification.compile_operation(
                    op_spec, override=override
                )
            params = op_spec.params

        params = params or {}
        inputs = {p: pv.value for p, pv in params.items() if pv.is_literal}
        params = {p: pv.to_dict() for p, pv in params.items()}
        kind = None
        meta_info = meta_info or {}
        if compiled_operation:
            if is_approved and compiled_operation.is_approved is not None:
                is_approved = compiled_operation.is_approved
            name = name or compiled_operation.name
            description = description or compiled_operation.description
            tags = tags or compiled_operation.tags
            kind, runtime = self.get_kind(compiled_operation)
            kind, runtime, meta_info = self.get_meta_info(
                compiled_operation, kind, runtime, meta_info, **kwargs
            )
            self.supports_kind(kind, runtime, supported_kinds, is_managed)
            if cloning_kind == V1CloningKind.COPY:
                if runtime not in {V1RunKind.JOB, V1RunKind.SERVICE}:
                    raise ValueError(
                        "Operation with kind `{}` does not support restart with copy mode.".format(
                            runtime
                        )
                    )
                compiled_operation.run.add_init(
                    V1Init(
                        artifacts=V1ArtifactsType(
                            dirs=[[original_uuid, "{{ globals.run_artifacts_path }}"]]
                        )
                    )
                )
            if init:
                if runtime not in {V1RunKind.JOB, V1RunKind.SERVICE}:
                    raise ValueError(
                        "Operation with kind `{}` does not support "
                        "additional init containers.".format(runtime)
                    )
                compiled_operation.run.add_init(init)
            kwargs["content"] = compiled_operation.to_dict(dump=True)
        instance = get_run_model()(
            project_id=project_id,
            user_id=user_id,
            name=name,
            description=description,
            tags=tags,
            readme=readme,
            params=params,
            inputs=inputs,
            kind=kind,
            runtime=runtime,
            meta_info=meta_info,
            original_id=original_id,
            cloning_kind=cloning_kind,
            is_managed=is_managed,
            is_approved=is_approved,
            status_conditions=[
                V1StatusCondition.get_condition(
                    type=V1Statuses.CREATED,
                    status="True",
                    reason=kwargs.pop("reason", "OperationServiceInit"),
                    message=kwargs.pop("message", "Run is created"),
                ).to_dict()
            ],
            **self.sanitize_kwargs(**kwargs),
        )
        return compiled_operation, instance
Exemplo n.º 24
0
    def test_all_volumes_and_init_in_the_same_context(self):
        connection_by_names = {
            self.s3_store.name: self.s3_store,
            self.gcs_store.name: self.gcs_store,
            self.az_store.name: self.az_store,
            self.claim_store.name: self.claim_store,
            self.host_path_store.name: self.host_path_store,
        }

        # Test all init are in the same context
        pod_volumes = get_pod_volumes(
            contexts=PluginsContextsSpec.from_config(
                V1Plugins(
                    docker=True,
                    shm=True,
                    auth=True,
                    collect_artifacts=True,
                    collect_logs=True,
                )
            ),
            artifacts_store=self.claim_store,
            init_connections=[
                V1Init(connection=self.s3_store.name),
                V1Init(connection=self.gcs_store.name),
                V1Init(connection=self.az_store.name),
                V1Init(connection=self.claim_store.name),
                V1Init(connection=self.host_path_store.name),
            ],
            connections=[],
            connection_by_names=connection_by_names,
            secrets=[
                self.non_mount_resource1,
                self.non_mount_resource1,
                self.mount_resource1,
                self.mount_resource2,
            ],
            config_maps=[
                self.non_mount_resource1,
                self.non_mount_resource1,
                self.mount_resource1,
                self.mount_resource2,
            ],
            volumes=[self.vol1, self.vol2, self.vol3],
        )
        # 1: logs/output contexts (same volume) / 1 managed contexts
        # 3: 3 context requested constant contexts
        # 3: 3 volumes
        # 7: 2 mount volumes
        # 1: 1 mount secret
        assert len(pod_volumes) == 1 + 3 + 3 + 2 + 1

        # Test all init are in the same context
        pod_volumes = get_pod_volumes(
            contexts=PluginsContextsSpec.from_config(
                V1Plugins(
                    docker=True,
                    shm=True,
                    auth=True,
                    collect_artifacts=True,
                    collect_logs=True,
                )
            ),
            artifacts_store=self.claim_store,
            init_connections=[
                V1Init(connection=self.s3_store.name),
                V1Init(connection=self.gcs_store.name),
                V1Init(connection=self.az_store.name),
                V1Init(connection=self.claim_store.name),
                V1Init(connection=self.host_path_store.name),
            ],
            connections=list(connection_by_names.keys()),
            connection_by_names=connection_by_names,
            secrets=[
                self.non_mount_resource1,
                self.non_mount_resource1,
                self.mount_resource1,
                self.mount_resource2,
            ],
            config_maps=[
                self.non_mount_resource1,
                self.non_mount_resource1,
                self.mount_resource1,
                self.mount_resource2,
            ],
            volumes=[self.vol1, self.vol2, self.vol3],
        )
        # 1: logs/output contexts (same volume) / 1 managed contexts
        # 3: 3 context requested constant contexts
        # 3: 3 volumes
        # 7: 2 mount volumes
        # 4: 1 mount resources (secrets + configs)
        assert len(pod_volumes) == 1 + 3 + 3 + 2 + 1

        # Enable requesting resources
        self.mount_resource1.is_requested = True
        self.mount_resource2.is_requested = True
        # Test all init are in the same context and requested values
        pod_volumes = get_pod_volumes(
            contexts=PluginsContextsSpec.from_config(
                V1Plugins(
                    docker=True,
                    shm=True,
                    auth=True,
                    collect_artifacts=True,
                    collect_logs=True,
                )
            ),
            artifacts_store=self.claim_store,
            init_connections=[
                V1Init(connection=self.s3_store.name),
                V1Init(connection=self.gcs_store.name),
                V1Init(connection=self.az_store.name),
                V1Init(connection=self.claim_store.name),
                V1Init(connection=self.host_path_store.name),
            ],
            connections=list(connection_by_names.keys()),
            connection_by_names=connection_by_names,
            secrets=[
                self.non_mount_resource1,
                self.non_mount_resource1,
                self.mount_resource1,
                self.mount_resource2,
            ],
            config_maps=[
                self.non_mount_resource1,
                self.non_mount_resource1,
                self.mount_resource1,
                self.mount_resource2,
            ],
            volumes=[self.vol1, self.vol2, self.vol3],
        )
        # 1: logs/output contexts (same volume) / 1 managed contexts
        # 3: 3 context requested constant contexts
        # 3: 3 volumes
        # 7: 2 mount volumes
        # 4: 4 mount resources (secrets + configs)
        assert len(pod_volumes) == 1 + 3 + 3 + 2 + 4