Ejemplo n.º 1
0
    def env_from_sources(self):
        '''This constructs a list of env_from sources. Along with a default base environment
        config map which we always load, the ConfigMaps and Secrets specified via
        config_map_env_froms and secret_env_froms will be pulled into the job construction here.
        '''
        base_env = client.V1EnvFromSource(
            config_map_ref=client.V1ConfigMapEnvSource(
                name='dagster-job-runner-env'))

        config_maps = [
            client.V1EnvFromSource(config_map_ref=client.V1ConfigMapEnvSource(
                name=config_map)) for config_map in self._env_config_maps
        ]

        secrets = [
            client.V1EnvFromSource(secret_ref=client.V1SecretEnvSource(
                name=secret)) for secret in self._env_secrets
        ]

        return [base_env] + config_maps + secrets
Ejemplo n.º 2
0
    def deployment_object(self, instance_uuid, cnf_yaml, service_uuid):
        """
        CNF modeling method. This build a deployment object in kubernetes
        instance_uuid: k8s deployment name
        cnf_yaml: CNF Descriptor in yaml format
        """
        LOG.debug("CNFD: {}".format(cnf_yaml))
        container_list = []
        deployment_k8s = None
        if "cloudnative_deployment_units" in cnf_yaml:
            cdu = cnf_yaml.get('cloudnative_deployment_units')
            for cdu_obj in cdu:
                env_vars = env_from = cpu = memory = huge_pages = gpu = sr_iov = resources = None
                port_list = []
                environment = []
                cdu_id = cdu_obj.get('id')
                image = cdu_obj.get('image')
                cdu_conex = cdu_obj.get('connection_points')
                container_name = cdu_id
                config_map_id = cdu_id
                if cdu_obj.get('parameters'):
                    env_vars = cdu_obj['parameters'].get('env')
                if cdu_obj.get('resource_requirements'):
                    gpu = cdu_obj['resource_requirements'].get('gpu')
                    cpu = cdu_obj['resource_requirements'].get('cpu')
                    memory = cdu_obj['resource_requirements'].get('memory')
                    sr_iov = cdu_obj['resource_requirements'].get('sr-iov')
                    huge_pages = cdu_obj['resource_requirements'].get(
                        'huge-pages')
                if cdu_conex:
                    for po in cdu_conex:
                        port = po.get('port')
                        port_name = po.get('id')
                        port_list.append(
                            client.V1ContainerPort(container_port=port,
                                                   name=port_name))

                limits = {}
                requests = {}
                if gpu:
                    LOG.debug("Features requested: {}".format(gpu))
                    # gpu_type can be amd or nvidia
                    for gpu_type, amount in gpu.items():
                        limits["{}.com/gpu".format(gpu_type)] = amount
                if cpu:
                    # TODO
                    pass
                if memory:
                    # TODO
                    pass
                if sr_iov:
                    # TODO
                    pass
                if huge_pages:
                    # TODO
                    pass

                resources = client.V1ResourceRequirements(limits=limits,
                                                          requests=requests)

                # Environment variables from descriptor
                if env_vars:
                    LOG.debug("Configmap: {}".format(config_map_id))
                    KubernetesWrapperEngine.create_configmap(
                        self,
                        config_map_id,
                        instance_uuid,
                        env_vars,
                        service_uuid,
                        namespace="default")
                else:
                    env_vars = {"sonata": "rules"}
                    LOG.debug("Configmap: {}".format(config_map_id))
                    KubernetesWrapperEngine.create_configmap(
                        self,
                        config_map_id,
                        instance_uuid,
                        env_vars,
                        service_uuid,
                        namespace="default")
                env_from = client.V1EnvFromSource(
                    config_map_ref=client.V1ConfigMapEnvSource(
                        name=config_map_id, optional=False))

                # Default static environment variables
                environment.append(
                    client.V1EnvVar(name="instance_uuid", value=instance_uuid))
                environment.append(
                    client.V1EnvVar(name="service_uuid", value=service_uuid))
                environment.append(
                    client.V1EnvVar(name="container_name",
                                    value=container_name))
                environment.append(
                    client.V1EnvVar(name="vendor",
                                    value=KubernetesWrapperEngine.normalize(
                                        self, cnf_yaml.get('vendor'))))
                environment.append(
                    client.V1EnvVar(name="name",
                                    value=KubernetesWrapperEngine.normalize(
                                        self, cnf_yaml.get('name'))))
                environment.append(
                    client.V1EnvVar(name="version",
                                    value=KubernetesWrapperEngine.normalize(
                                        self, cnf_yaml.get('version'))))

                image_pull_policy = KubernetesWrapperEngine.check_connection(
                    self)

                # Configureate Pod template cont ainer
                container = client.V1Container(
                    env=environment,
                    name=container_name,
                    resources=resources,
                    image=image,
                    image_pull_policy=image_pull_policy,
                    ports=port_list,
                    env_from=[env_from])
                container_list.append(container)
        else:
            return deployment_k8s

        # Create and configurate a spec section
        deployment_label = ("{}-{}-{}-{}".format(
            cnf_yaml.get("vendor"), cnf_yaml.get("name"),
            cnf_yaml.get("version"),
            instance_uuid.split("-")[0])).replace(".", "-")
        template = client.V1PodTemplateSpec(
            metadata=client.V1ObjectMeta(
                labels={
                    'deployment': deployment_label,
                    'instance_uuid': cnf_yaml['instance_uuid'],
                    'service_uuid': service_uuid,
                    "sp": "sonata"
                }),
            spec=client.V1PodSpec(containers=container_list))
        # Create the specification of deployment
        spec = client.ExtensionsV1beta1DeploymentSpec(replicas=1,
                                                      template=template)
        # Instantiate the deployment object
        deployment_k8s = client.ExtensionsV1beta1Deployment(
            api_version="extensions/v1beta1",
            kind="Deployment",
            metadata=client.V1ObjectMeta(name=deployment_label),
            spec=spec)
        return deployment_k8s
Ejemplo n.º 3
0
def update_op(op: dsl.ContainerOp,
              pipeline_name: dsl.PipelineParam,
              pipeline_root: dsl.PipelineParam,
              launcher_image: Optional[str] = None) -> None:
    """Updates the passed in Op for running in v2-compatible mode.

    Args:
      op: The Op to update.
      pipeline_spec: The PipelineSpec for the pipeline under which `op`
        runs.
      pipeline_root: The root output directory for pipeline artifacts.
      launcher_image: An optional launcher image. Useful for tests.
    """
    op.is_v2 = True
    # Inject the launcher binary and overwrite the entrypoint.
    image_name = launcher_image or _DEFAULT_LAUNCHER_IMAGE
    launcher_container = dsl.UserContainer(
        name="kfp-launcher",
        image=image_name,
        command=["launcher", "--copy", "/kfp-launcher/launch"],
        mirror_volume_mounts=True)

    op.add_init_container(launcher_container)
    op.add_volume(k8s_client.V1Volume(name='kfp-launcher'))
    op.add_volume_mount(
        k8s_client.V1VolumeMount(name='kfp-launcher',
                                 mount_path='/kfp-launcher'))

    # op.command + op.args will have the following sections:
    # 1. args passed to kfp-launcher
    # 2. a separator "--"
    # 3. parameters in format "key1=value1", "key2=value2", ...
    # 4. a separator "--" as end of arguments passed to launcher
    # 5. (start of op.args) arguments of the original user program command + args
    #
    # example:
    # - command:
    # - /kfp-launcher/launch
    # - '--mlmd_server_address'
    # - $(METADATA_GRPC_SERVICE_HOST)
    # - '--mlmd_server_port'
    # - $(METADATA_GRPC_SERVICE_PORT)
    # - ... # more launcher params
    # - '--pipeline_task_id'
    # - $(KFP_POD_NAME)
    # - '--pipeline_root'
    # - ''
    # - '--' # start of parameter values
    # - first=first
    # - second=second
    # - '--' # start of user command and args
    # args:
    # - sh
    # - '-ec'
    # - |
    #     program_path=$(mktemp)
    #     printf "%s" "$0" > "$program_path"
    #     python3 -u "$program_path" "$@"
    # - >
    #     import json
    #     import xxx
    #     ...
    op.command = [
        "/kfp-launcher/launch",
        "--mlmd_server_address",
        "$(METADATA_GRPC_SERVICE_HOST)",
        "--mlmd_server_port",
        "$(METADATA_GRPC_SERVICE_PORT)",
        "--runtime_info_json",
        "$(KFP_V2_RUNTIME_INFO)",
        "--container_image",
        "$(KFP_V2_IMAGE)",
        "--task_name",
        op.name,
        "--pipeline_name",
        pipeline_name,
        "--run_id",
        "$(KFP_RUN_ID)",
        "--run_resource",
        "workflows.argoproj.io/$(WORKFLOW_ID)",
        "--namespace",
        "$(KFP_NAMESPACE)",
        "--pod_name",
        "$(KFP_POD_NAME)",
        "--pod_uid",
        "$(KFP_POD_UID)",
        "--pipeline_root",
        pipeline_root,
        "--enable_caching",
        "$(ENABLE_CACHING)",
    ]

    # Mount necessary environment variables.
    op.apply(_default_transformers.add_kfp_pod_env)
    op.container.add_env_variable(
        k8s_client.V1EnvVar(name="KFP_V2_IMAGE", value=op.container.image))

    config_map_ref = k8s_client.V1ConfigMapEnvSource(
        name='metadata-grpc-configmap', optional=True)
    op.container.add_env_from(
        k8s_client.V1EnvFromSource(config_map_ref=config_map_ref))

    op.arguments = list(op.container_spec.command) + list(
        op.container_spec.args)

    runtime_info = {
        "inputParameters": collections.OrderedDict(),
        "inputArtifacts": collections.OrderedDict(),
        "outputParameters": collections.OrderedDict(),
        "outputArtifacts": collections.OrderedDict(),
    }

    op.command += ["--"]
    component_spec = op.component_spec
    for parameter, spec in sorted(
            component_spec.input_definitions.parameters.items()):
        parameter_info = {
            "type":
            pipeline_spec_pb2.PrimitiveType.PrimitiveTypeEnum.Name(spec.type),
        }
        op.command += [f"{parameter}={op._parameter_arguments[parameter]}"]
        runtime_info["inputParameters"][parameter] = parameter_info
    op.command += ["--"]

    for artifact_name, spec in sorted(
            component_spec.input_definitions.artifacts.items()):
        artifact_info = {
            "metadataPath": op.input_artifact_paths[artifact_name],
            "schemaTitle": spec.artifact_type.schema_title,
            "instanceSchema": spec.artifact_type.instance_schema,
        }
        runtime_info["inputArtifacts"][artifact_name] = artifact_info

    for parameter, spec in sorted(
            component_spec.output_definitions.parameters.items()):
        parameter_info = {
            "type":
            pipeline_spec_pb2.PrimitiveType.PrimitiveTypeEnum.Name(spec.type),
            "path":
            op.file_outputs[parameter],
        }
        runtime_info["outputParameters"][parameter] = parameter_info

    for artifact_name, spec in sorted(
            component_spec.output_definitions.artifacts.items()):
        # TODO: Assert instance_schema.
        artifact_info = {
            # Type used to register output artifacts.
            "schemaTitle": spec.artifact_type.schema_title,
            "instanceSchema": spec.artifact_type.instance_schema,
            # File used to write out the registered artifact ID.
            "metadataPath": op.file_outputs[artifact_name],
        }
        runtime_info["outputArtifacts"][artifact_name] = artifact_info

    op.container.add_env_variable(
        k8s_client.V1EnvVar(name="KFP_V2_RUNTIME_INFO",
                            value=json.dumps(runtime_info)))

    op.pod_annotations['pipelines.kubeflow.org/v2_component'] = "true"
    op.pod_labels['pipelines.kubeflow.org/v2_component'] = "true"
Ejemplo n.º 4
0
 def mount_config_map(container_op: dsl.ContainerOp):
   config_map_ref = k8s_client.V1ConfigMapEnvSource(
       name=config_map_name, optional=True)
   container_op.container.add_env_from(
       k8s_client.V1EnvFromSource(config_map_ref=config_map_ref))
Ejemplo n.º 5
0
  def mount_secret(container_op: dsl.ContainerOp):
    secret_ref = k8s_client.V1ConfigMapEnvSource(
        name=secret_name, optional=True)

    container_op.container.add_env_from(
        k8s_client.V1EnvFromSource(secret_ref=secret_ref))
Ejemplo n.º 6
0
    def test_sanitize_k8s_container_attribute(self):
        # test cases for implicit type sanitization(conversion)
        op = dsl.ContainerOp(name='echo', image='image', command=['sh', '-c'],
                           arguments=['echo test | tee /tmp/message.txt'],
                           file_outputs={'merged': '/tmp/message.txt'})
        op.container \
                .add_volume_mount(k8s_client.V1VolumeMount(
                    mount_path='/secret/gcp-credentials',
                    name='gcp-credentials')) \
                .add_env_variable(k8s_client.V1EnvVar(
                    name=80,
                    value=80)) \
                .add_env_variable(k8s_client.V1EnvVar(
                    name=80,
                    value_from=k8s_client.V1EnvVarSource(
                        config_map_key_ref=k8s_client.V1ConfigMapKeySelector(key=80, name=8080, optional='False'),
                        field_ref=k8s_client.V1ObjectFieldSelector(api_version=80, field_path=8080),
                        resource_field_ref=k8s_client.V1ResourceFieldSelector(container_name=80, divisor=8080, resource=8888),
                        secret_key_ref=k8s_client.V1SecretKeySelector(key=80, name=8080, optional='False')
                    )
                )) \
                .add_env_from(k8s_client.V1EnvFromSource(
                    config_map_ref=k8s_client.V1ConfigMapEnvSource(name=80, optional='True'),
                    prefix=999
                )) \
                .add_env_from(k8s_client.V1EnvFromSource(
                    secret_ref=k8s_client.V1SecretEnvSource(name=80, optional='True'),
                    prefix=888
                )) \
                .add_volume_mount(k8s_client.V1VolumeMount(
                    mount_path=111,
                    mount_propagation=222,
                    name=333,
                    read_only='False',
                    sub_path=444,
                    sub_path_expr=555
                )) \
                .add_volume_devices(k8s_client.V1VolumeDevice(
                    device_path=111,
                    name=222
                )) \
                .add_port(k8s_client.V1ContainerPort(
                    container_port='8080',
                    host_ip=111,
                    host_port='8888',
                    name=222,
                    protocol=333
                )) \
                .set_security_context(k8s_client.V1SecurityContext(
                    allow_privilege_escalation='True',
                    capabilities=k8s_client.V1Capabilities(add=[11, 22], drop=[33, 44]),
                    privileged='False',
                    proc_mount=111,
                    read_only_root_filesystem='False',
                    run_as_group='222',
                    run_as_non_root='True',
                    run_as_user='******',
                    se_linux_options=k8s_client.V1SELinuxOptions(level=11, role=22, type=33, user=44),
                    windows_options=k8s_client.V1WindowsSecurityContextOptions(
                        gmsa_credential_spec=11, gmsa_credential_spec_name=22)
                )) \
                .set_stdin(stdin='False') \
                .set_stdin_once(stdin_once='False') \
                .set_termination_message_path(termination_message_path=111) \
                .set_tty(tty='False') \
                .set_readiness_probe(readiness_probe=k8s_client.V1Probe(
                    _exec=k8s_client.V1ExecAction(command=[11, 22, 33]),
                    failure_threshold='111',
                    http_get=k8s_client.V1HTTPGetAction(
                        host=11,
                        http_headers=[k8s_client.V1HTTPHeader(name=22, value=33)],
                        path=44,
                        port='55',
                        scheme=66),
                    initial_delay_seconds='222',
                    period_seconds='333',
                    success_threshold='444',
                    tcp_socket=k8s_client.V1TCPSocketAction(host=555, port='666'),
                    timeout_seconds='777'
                )) \
                .set_liveness_probe(liveness_probe=k8s_client.V1Probe(
                    _exec=k8s_client.V1ExecAction(command=[11, 22, 33]),
                    failure_threshold='111',
                    http_get=k8s_client.V1HTTPGetAction(
                        host=11,
                        http_headers=[k8s_client.V1HTTPHeader(name=22, value=33)],
                        path=44,
                        port='55',
                        scheme=66),
                    initial_delay_seconds='222',
                    period_seconds='333',
                    success_threshold='444',
                    tcp_socket=k8s_client.V1TCPSocketAction(host=555, port='666'),
                    timeout_seconds='777'
                )) \
                .set_lifecycle(lifecycle=k8s_client.V1Lifecycle(
                    post_start=k8s_client.V1Handler(
                        _exec=k8s_client.V1ExecAction(command=[11, 22, 33]),
                        http_get=k8s_client.V1HTTPGetAction(
                            host=11,
                            http_headers=[k8s_client.V1HTTPHeader(name=22, value=33)],
                            path=44,
                            port='55',
                            scheme=66),
                        tcp_socket=k8s_client.V1TCPSocketAction(host=555, port='666')
                    ),
                    pre_stop=k8s_client.V1Handler(
                        _exec=k8s_client.V1ExecAction(command=[11, 22, 33]),
                        http_get=k8s_client.V1HTTPGetAction(
                            host=11,
                            http_headers=[k8s_client.V1HTTPHeader(name=22, value=33)],
                            path=44,
                            port='55',
                            scheme=66),
                        tcp_socket=k8s_client.V1TCPSocketAction(host=555, port='666')
                    )
                ))

        sanitize_k8s_object(op.container)

        for e in op.container.env:
            self.assertIsInstance(e.name, str)
            if e.value:
                self.assertIsInstance(e.value, str)
            if e.value_from:
                if e.value_from.config_map_key_ref:
                    self.assertIsInstance(e.value_from.config_map_key_ref.key, str)
                    if e.value_from.config_map_key_ref.name:
                        self.assertIsInstance(e.value_from.config_map_key_ref.name, str)
                    if e.value_from.config_map_key_ref.optional:
                        self.assertIsInstance(e.value_from.config_map_key_ref.optional, bool)
                if e.value_from.field_ref:
                    self.assertIsInstance(e.value_from.field_ref.field_path, str)
                    if e.value_from.field_ref.api_version:
                        self.assertIsInstance(e.value_from.field_ref.api_version, str)
                if e.value_from.resource_field_ref:
                    self.assertIsInstance(e.value_from.resource_field_ref.resource, str)
                    if e.value_from.resource_field_ref.container_name:
                        self.assertIsInstance(e.value_from.resource_field_ref.container_name, str)
                    if e.value_from.resource_field_ref.divisor:
                        self.assertIsInstance(e.value_from.resource_field_ref.divisor, str)
                if e.value_from.secret_key_ref:
                    self.assertIsInstance(e.value_from.secret_key_ref.key, str)
                    if e.value_from.secret_key_ref.name:
                        self.assertIsInstance(e.value_from.secret_key_ref.name, str)
                    if e.value_from.secret_key_ref.optional:
                        self.assertIsInstance(e.value_from.secret_key_ref.optional, bool)

        for e in op.container.env_from:
            if e.prefix:
                self.assertIsInstance(e.prefix, str)
            if e.config_map_ref:
                if e.config_map_ref.name:
                    self.assertIsInstance(e.config_map_ref.name, str)
                if e.config_map_ref.optional:
                    self.assertIsInstance(e.config_map_ref.optional, bool)
            if e.secret_ref:
                if e.secret_ref.name:
                    self.assertIsInstance(e.secret_ref.name, str)
                if e.secret_ref.optional:
                    self.assertIsInstance(e.secret_ref.optional, bool)

        for e in op.container.volume_mounts:
            if e.mount_path:
                self.assertIsInstance(e.mount_path, str)
            if e.mount_propagation:
                self.assertIsInstance(e.mount_propagation, str)
            if e.name:
                self.assertIsInstance(e.name, str)
            if e.read_only:
                self.assertIsInstance(e.read_only, bool)
            if e.sub_path:
                self.assertIsInstance(e.sub_path, str)
            if e.sub_path_expr:
                self.assertIsInstance(e.sub_path_expr, str)

        for e in op.container.volume_devices:
            if e.device_path:
                self.assertIsInstance(e.device_path, str)
            if e.name:
                self.assertIsInstance(e.name, str)

        for e in op.container.ports:
            if e.container_port:
                self.assertIsInstance(e.container_port, int)
            if e.host_ip:
                self.assertIsInstance(e.host_ip, str)
            if e.host_port:
                self.assertIsInstance(e.host_port, int)
            if e.name:
                self.assertIsInstance(e.name, str)
            if e.protocol:
                self.assertIsInstance(e.protocol, str)

        if op.container.security_context:
            e = op.container.security_context
            if e.allow_privilege_escalation:
                self.assertIsInstance(e.allow_privilege_escalation, bool)
            if e.capabilities:
                for a in e.capabilities.add:
                    self.assertIsInstance(a, str)
                for d in e.capabilities.drop:
                    self.assertIsInstance(d, str)
            if e.privileged:
                self.assertIsInstance(e.privileged, bool)
            if e.proc_mount:
                self.assertIsInstance(e.proc_mount, str)
            if e.read_only_root_filesystem:
                self.assertIsInstance(e.read_only_root_filesystem, bool)
            if e.run_as_group:
                self.assertIsInstance(e.run_as_group, int)
            if e.run_as_non_root:
                self.assertIsInstance(e.run_as_non_root, bool)
            if e.run_as_user:
                self.assertIsInstance(e.run_as_user, int)
            if e.se_linux_options:
                if e.se_linux_options.level:
                    self.assertIsInstance(e.se_linux_options.level, str)
                if e.se_linux_options.role:
                    self.assertIsInstance(e.se_linux_options.role, str)
                if e.se_linux_options.type:
                    self.assertIsInstance(e.se_linux_options.type, str)
                if e.se_linux_options.user:
                    self.assertIsInstance(e.se_linux_options.user, str)
            if e.windows_options:
                if e.windows_options.gmsa_credential_spec:
                    self.assertIsInstance(e.windows_options.gmsa_credential_spec, str)
                if e.windows_options.gmsa_credential_spec_name:
                    self.assertIsInstance(e.windows_options.gmsa_credential_spec_name, str)
            
        if op.container.stdin:
            self.assertIsInstance(op.container.stdin, bool)

        if op.container.stdin_once:
            self.assertIsInstance(op.container.stdin_once, bool)
        
        if op.container.termination_message_path:
            self.assertIsInstance(op.container.termination_message_path, str)

        if op.container.tty:
            self.assertIsInstance(op.container.tty, bool)

        for e in [op.container.readiness_probe, op.container.liveness_probe]:
            if e:
                if e._exec:
                    for c in e._exec.command:
                        self.assertIsInstance(c, str)
                if e.failure_threshold:
                    self.assertIsInstance(e.failure_threshold, int)
                if e.http_get:
                    if e.http_get.host:
                        self.assertIsInstance(e.http_get.host, str)
                    if e.http_get.http_headers:
                        for h in e.http_get.http_headers:
                            if h.name:
                                self.assertIsInstance(h.name, str)
                            if h.value:
                                self.assertIsInstance(h.value, str)
                    if e.http_get.path:
                        self.assertIsInstance(e.http_get.path, str)
                    if e.http_get.port:
                        self.assertIsInstance(e.http_get.port, (str, int))
                    if e.http_get.scheme:
                        self.assertIsInstance(e.http_get.scheme, str)
                if e.initial_delay_seconds:
                    self.assertIsInstance(e.initial_delay_seconds, int)
                if e.period_seconds:
                    self.assertIsInstance(e.period_seconds, int)
                if e.success_threshold:
                    self.assertIsInstance(e.success_threshold, int)
                if e.tcp_socket:
                    if e.tcp_socket.host:
                        self.assertIsInstance(e.tcp_socket.host, str)
                    if e.tcp_socket.port:
                        self.assertIsInstance(e.tcp_socket.port, (str, int))
                if e.timeout_seconds:
                    self.assertIsInstance(e.timeout_seconds, int)
        if op.container.lifecycle:
            for e in [op.container.lifecycle.post_start, op.container.lifecycle.pre_stop]:
                if e:
                    if e._exec:
                        for c in e._exec.command:
                            self.assertIsInstance(c, str)
                    if e.http_get:
                        if e.http_get.host:
                            self.assertIsInstance(e.http_get.host, str)
                        if e.http_get.http_headers:
                            for h in e.http_get.http_headers:
                                if h.name:
                                    self.assertIsInstance(h.name, str)
                                if h.value:
                                    self.assertIsInstance(h.value, str)
                        if e.http_get.path:
                            self.assertIsInstance(e.http_get.path, str)
                        if e.http_get.port:
                            self.assertIsInstance(e.http_get.port, (str, int))
                        if e.http_get.scheme:
                            self.assertIsInstance(e.http_get.scheme, str)
                    if e.tcp_socket:
                        if e.tcp_socket.host:
                            self.assertIsInstance(e.tcp_socket.host, str)
                        if e.tcp_socket.port:
                            self.assertIsInstance(e.tcp_socket.port, (str, int))

        # test cases for checking value after sanitization
        check_value_op = dsl.ContainerOp(name='echo', image='image', command=['sh', '-c'],
                           arguments=['echo test | tee /tmp/message.txt'],
                           file_outputs={'merged': '/tmp/message.txt'})
        check_value_op.container \
                .add_env_variable(k8s_client.V1EnvVar(
                    name=80,
                    value=8080)) \
                .set_security_context(k8s_client.V1SecurityContext(
                    allow_privilege_escalation='true',
                    capabilities=k8s_client.V1Capabilities(add=[11, 22], drop=[33, 44]),
                    privileged='false',
                    proc_mount=111,
                    read_only_root_filesystem='False',
                    run_as_group='222',
                    run_as_non_root='True',
                    run_as_user='******',
                    se_linux_options=k8s_client.V1SELinuxOptions(level=11, role=22, type=33, user=44),
                    windows_options=k8s_client.V1WindowsSecurityContextOptions(
                        gmsa_credential_spec=11, gmsa_credential_spec_name=22)
                ))
        
        sanitize_k8s_object(check_value_op.container)

        self.assertEqual(check_value_op.container.env[0].name, '80')
        self.assertEqual(check_value_op.container.env[0].value, '8080')
        self.assertEqual(check_value_op.container.security_context.allow_privilege_escalation, True)
        self.assertEqual(check_value_op.container.security_context.capabilities.add[0], '11')
        self.assertEqual(check_value_op.container.security_context.capabilities.add[1], '22')
        self.assertEqual(check_value_op.container.security_context.capabilities.drop[0], '33')
        self.assertEqual(check_value_op.container.security_context.capabilities.drop[1], '44')
        self.assertEqual(check_value_op.container.security_context.privileged, False)
        self.assertEqual(check_value_op.container.security_context.proc_mount, '111')
        self.assertEqual(check_value_op.container.security_context.read_only_root_filesystem, False)
        self.assertEqual(check_value_op.container.security_context.run_as_group, 222)
        self.assertEqual(check_value_op.container.security_context.run_as_non_root, True)
        self.assertEqual(check_value_op.container.security_context.run_as_user, 333)
        self.assertEqual(check_value_op.container.security_context.se_linux_options.level, '11')
        self.assertEqual(check_value_op.container.security_context.se_linux_options.role, '22')
        self.assertEqual(check_value_op.container.security_context.se_linux_options.type, '33')
        self.assertEqual(check_value_op.container.security_context.se_linux_options.user, '44')
        self.assertEqual(check_value_op.container.security_context.windows_options.gmsa_credential_spec, '11')
        self.assertEqual(check_value_op.container.security_context.windows_options.gmsa_credential_spec_name, '22')

        # test cases for exception
        with self.assertRaises(ValueError, msg='Invalid boolean string 2. Should be boolean.'):
            exception_op = dsl.ContainerOp(name='echo', image='image')
            exception_op.container \
                    .set_security_context(k8s_client.V1SecurityContext(
                        allow_privilege_escalation=1
                    ))
            sanitize_k8s_object(exception_op.container)

        with self.assertRaises(ValueError, msg='Invalid boolean string Test. Should be "true" or "false".'):
            exception_op = dsl.ContainerOp(name='echo', image='image')
            exception_op.container \
                    .set_security_context(k8s_client.V1SecurityContext(
                        allow_privilege_escalation='Test'
                    ))
            sanitize_k8s_object(exception_op.container)

        with self.assertRaises(ValueError, msg='Invalid test. Should be integer.'):
            exception_op = dsl.ContainerOp(name='echo', image='image')
            exception_op.container \
                    .set_security_context(k8s_client.V1SecurityContext(
                        run_as_group='test',
                    ))
            sanitize_k8s_object(exception_op.container)
Ejemplo n.º 7
0
def create_job_object(data):
    meta = client.V1ObjectMeta(name=data["name"], namespace=data["namespace"])

    labels = None
    if "labels" in data:
        labels_array = data["labels"].split(',')
        labels = dict(s.split('=') for s in labels_array)
        meta.labels = labels

    annotations = None
    if "annotations" in data:
        annotations_array = data["annotations"].split(',')
        annotations = dict(s.split('=') for s in annotations_array)
        meta.annotations = annotations

    envs = []
    if "environments" in data:
        envs_array = data["environments"].splitlines()
        tmp_envs = dict(s.split('=', 1) for s in envs_array)
        for key in tmp_envs:
            envs.append(client.V1EnvVar(name=key, value=tmp_envs[key]))

    if "environments_secrets" in data:
        envs_array = data["environments_secrets"].splitlines()
        tmp_envs = dict(s.split('=', 1) for s in envs_array)

        for key in tmp_envs:

            if (":" in tmp_envs[key]):
                # passing secret env
                value = tmp_envs[key]
                secrets = value.split(':')
                secrect_key = secrets[1]
                secrect_name = secrets[0]

                envs.append(
                    client.V1EnvVar(
                        name=key,
                        value="",
                        value_from=client.V1EnvVarSource(
                            secret_key_ref=client.V1SecretKeySelector(
                                key=secrect_key, name=secrect_name))))

    container = client.V1Container(name=data["container_name"],
                                   image=data["container_image"],
                                   image_pull_policy=data["image_pull_policy"])

    if "container_command" in data:
        container.command = data["container_command"].split(' ')

    if "container_args" in data:
        args_array = data["container_args"].splitlines()
        container.args = args_array

    if "resources_requests" in data:
        resources_array = data["resources_requests"].split(",")
        tmp = dict(s.split('=', 1) for s in resources_array)
        container.resources = client.V1ResourceRequirements(requests=tmp)

    if "volume_mounts" in data:
        mounts = common.create_volume_mount_yaml(data)
        container.volume_mounts = mounts

    container.env = envs

    if "env_from" in data:
        env_froms_data = yaml.full_load(data["env_from"])
        env_from = []
        for env_from_data in env_froms_data:
            if 'configMapRef' in env_from_data:
                env_from.append(
                    client.V1EnvFromSource(
                        config_map_ref=client.V1ConfigMapEnvSource(
                            env_from_data['configMapRef']['name'])))
            elif 'secretRef' in env_from_data:
                env_from.append(
                    client.V1EnvFromSource(secret_ref=client.V1SecretEnvSource(
                        env_from_data['secretRef']['name'])))

        container.env_from = env_from

    template_spec = client.V1PodSpec(containers=[container],
                                     restart_policy=data["job_restart_policy"])

    if "volumes" in data:
        volumes_data = yaml.safe_load(data["volumes"])
        volumes = []

        if (isinstance(volumes_data, list)):
            for volume_data in volumes_data:
                volume = common.create_volume(volume_data)

                if volume:
                    volumes.append(volume)
        else:
            volume = common.create_volume(volumes_data)

            if volume:
                volumes.append(volume)

        template_spec.volumes = volumes

    if "image_pull_secrets" in data:
        images_array = data["image_pull_secrets"].split(",")
        images = []
        for image in images_array:
            images.append(client.V1LocalObjectReference(name=image))

        template_spec.image_pull_secrets = images

    if "tolerations" in data:
        tolerations_data = yaml.safe_load(data["tolerations"])
        tolerations = []
        for toleration_data in tolerations_data:
            toleration = common.create_toleration(toleration_data)

            if toleration:
                tolerations.append(toleration)

        template_spec.tolerations = tolerations

    template = client.V1PodTemplateSpec(metadata=client.V1ObjectMeta(
        name=data["name"],
        labels=labels,
        annotations=annotations,
    ),
                                        spec=template_spec)

    spec = client.V1JobSpec(template=template)

    if "completions" in data:
        spec.completions = int(data["completions"])
    if "selectors" in data:
        selectors_array = data["selectors"].split(',')
        selectors = dict(s.split('=') for s in selectors_array)
        spec.selector = selectors
    if "node_selector" in data:
        node_selectors_array = data["node_selector"].split(',')
        node_selectors = dict(s.split('=') for s in node_selectors_array)
        spec.nodeSelector = node_selectors
    if "parallelism" in data:
        spec.parallelism = int(data["parallelism"])
    if "active_deadline_seconds" in data:
        spec.active_deadline_seconds = int(data["active_deadline_seconds"])
    if "backoff_limit" in data:
        spec.backoff_limit = int(data["backoff_limit"])

    job = client.V1Job(api_version=data["api_version"],
                       kind='Job',
                       metadata=meta,
                       spec=spec)

    return job
Ejemplo n.º 8
0
def update_op(op: dsl.ContainerOp,
              pipeline_name: dsl.PipelineParam,
              pipeline_root: dsl.PipelineParam,
              launcher_image: Optional[str] = None) -> None:
  """Updates the passed in Op for running in v2-compatible mode.

    Args:
      op: The Op to update.
      pipeline_spec: The PipelineSpec for the pipeline under which `op`
        runs.
      pipeline_root: The root output directory for pipeline artifacts.
      launcher_image: An optional launcher image. Useful for tests.
    """
  # Inject the launcher binary and overwrite the entrypoint.
  image_name = launcher_image or _DEFAULT_LAUNCHER_IMAGE
  launcher_container = dsl.UserContainer(name="kfp-launcher",
                                         image=image_name,
                                         command="/bin/mount_launcher.sh",
                                         mirror_volume_mounts=True)

  op.add_init_container(launcher_container)
  op.add_volume(k8s_client.V1Volume(name='kfp-launcher'))
  op.add_volume_mount(
      k8s_client.V1VolumeMount(name='kfp-launcher', mount_path='/kfp-launcher'))

  op.command = [
      "/kfp-launcher/launch",
      "--mlmd_server_address",
      "$(METADATA_GRPC_SERVICE_HOST)",
      "--mlmd_server_port",
      "$(METADATA_GRPC_SERVICE_PORT)",
      "--runtime_info_json",
      "$(KFP_V2_RUNTIME_INFO)",
      "--container_image",
      "$(KFP_V2_IMAGE)",
      "--task_name",
      op.name,
      "--pipeline_name",
      pipeline_name,
      "--pipeline_run_id",
      "$(WORKFLOW_ID)",
      "--pipeline_task_id",
      "$(KFP_POD_NAME)",
      "--pipeline_root",
      pipeline_root,
  ]

  # Mount necessary environment variables.
  op.apply(_default_transformers.add_kfp_pod_env)
  op.container.add_env_variable(
      k8s_client.V1EnvVar(name="KFP_V2_IMAGE", value=op.container.image))

  config_map_ref = k8s_client.V1ConfigMapEnvSource(
      name='metadata-grpc-configmap', optional=True)
  op.container.add_env_from(
      k8s_client.V1EnvFromSource(config_map_ref=config_map_ref))

  op.arguments = list(op.container_spec.command) + list(op.container_spec.args)

  runtime_info = {
      "inputParameters": collections.OrderedDict(),
      "inputArtifacts": collections.OrderedDict(),
      "outputParameters": collections.OrderedDict(),
      "outputArtifacts": collections.OrderedDict(),
  }

  component_spec = op.component_spec
  for parameter, spec in sorted(
      component_spec.input_definitions.parameters.items()):
    parameter_info = {
        "parameterType":
            pipeline_spec_pb2.PrimitiveType.PrimitiveTypeEnum.Name(spec.type),
        "parameterValue":
            op._parameter_arguments[parameter],
    }
    runtime_info["inputParameters"][parameter] = parameter_info

  for artifact_name, spec in sorted(
      component_spec.input_definitions.artifacts.items()):
    artifact_info = {"fileInputPath": op.input_artifact_paths[artifact_name]}
    runtime_info["inputArtifacts"][artifact_name] = artifact_info

  for parameter, spec in sorted(
      component_spec.output_definitions.parameters.items()):
    parameter_info = {
        "parameterType":
            pipeline_spec_pb2.PrimitiveType.PrimitiveTypeEnum.Name(spec.type),
        "fileOutputPath":
            op.file_outputs[parameter],
    }
    runtime_info["outputParameters"][parameter] = parameter_info

  for artifact_name, spec in sorted(
      component_spec.output_definitions.artifacts.items()):
    # TODO: Assert instance_schema.
    artifact_info = {
        # Type used to register output artifacts.
        "artifactSchema": spec.artifact_type.instance_schema,
        # File used to write out the registered artifact ID.
        "fileOutputPath": op.file_outputs[artifact_name],
    }
    runtime_info["outputArtifacts"][artifact_name] = artifact_info

  op.container.add_env_variable(
      k8s_client.V1EnvVar(name="KFP_V2_RUNTIME_INFO",
                          value=json.dumps(runtime_info)))

  op.pod_annotations['pipelines.kubeflow.org/v2_component'] = "true"
Ejemplo n.º 9
0
 def env_froms(self):
     return [
         client.V1EnvFromSource(config_map_ref=client.V1ConfigMapEnvSource(name=env_from))
         for env_from in self._env_froms
     ]
Ejemplo n.º 10
0
    def deployment_object(self, instance_uuid, cnf_yaml, service_uuid, vim_uuid):
        """
        CNF modeling method. This build a deployment object in kubernetes
        instance_uuid: k8s deployment name
        cnf_yaml: CNF Descriptor in yaml format
        """
        t0 = time.time()
        LOG.debug("CNFD: {}".format(cnf_yaml))
        container_list = []
        pod_volume_list = []
        deployment_k8s = None
        privileged = False
        node_selector = {}
        host_network = False
        if "cloudnative_deployment_units" in cnf_yaml:
            cdu = cnf_yaml.get('cloudnative_deployment_units')
            for cdu_obj in cdu:
                env_vars = env_from = cpu = memory = huge_pages = gpu = sr_iov = resources = volume_mounts = None
                port_list = []
                environment = []
                capabilities_list = []
                cdu_id = cdu_obj.get('id')
                image = cdu_obj.get('image')
                cdu_conex = cdu_obj.get('connection_points')
                container_name = cdu_id
                config_map_id = cdu_id
                if cdu_obj.get('parameters'):
                    env_vars = cdu_obj['parameters'].get('env')
                    volume_mounts = cdu_obj['parameters'].get('volume_mounts')
                    capabilities_list = cdu_obj['parameters'].get('capabilities')
                    if cdu_obj['parameters'].get('privileged'):
                        privileged = cdu_obj['parameters'].get('privileged')
                    if cdu_obj['parameters'].get('host_network'):
                        privileged = cdu_obj['parameters']['host_network']
                    if not isinstance(capabilities_list, list):
                        capabilities_list = []
                    if cdu_obj['parameters'].get('node_selector'):
                        node_selector = cdu_obj['parameters']['node_selector']
                if cdu_obj.get('resource_requirements'):
                    gpu = cdu_obj['resource_requirements'].get('gpu')
                    cpu = cdu_obj['resource_requirements'].get('cpu')
                    memory = cdu_obj['resource_requirements'].get('memory')
                    sr_iov = cdu_obj['resource_requirements'].get('sr-iov')
                    huge_pages = cdu_obj['resource_requirements'].get('huge-pages')
                if cdu_conex:
                    for po in cdu_conex:
                        port = po.get('port')
                        port_name = po.get('id')
                        protocol = "TCP"
                        if po.get("protocol"):
                            protocol = po["protocol"]
                        port_list.append(client.V1ContainerPort(container_port = port, name = port_name, protocol=protocol))

                limits = {}
                requests = {}
                if gpu:
                    LOG.debug("Features requested: {}".format(gpu))
                    # gpu_type can be amd or nvidia
                    for gpu_type, amount in gpu.items():
                        limits["{}.com/gpu".format(gpu_type)] = amount
                if cpu:
                    # TODO
                    pass
                if memory:
                    # TODO
                    pass               
                if sr_iov:
                    # TODO
                    pass                  
                if huge_pages:
                    # TODO
                    pass  
                
                resources = client.V1ResourceRequirements(limits=limits, requests=requests)             

                # Environment variables from descriptor
                if env_vars:
                    LOG.debug("Configmap: {}".format(config_map_id))
                    KubernetesWrapperEngine.create_configmap(self, config_map_id, instance_uuid, env_vars, service_uuid,
                                                             vim_uuid, namespace = "default")
                else:
                    env_vars = {"sonata": "rules"}
                    LOG.debug("Configmap: {}".format(config_map_id))
                    KubernetesWrapperEngine.create_configmap(self, config_map_id, instance_uuid, env_vars, service_uuid, 
                                                             vim_uuid, namespace = "default")
                env_from = client.V1EnvFromSource(config_map_ref = client.V1ConfigMapEnvSource(name = config_map_id, 
                                                  optional = False))

                # Default static environment variables
                environment.append(client.V1EnvVar(name="instance_uuid", value=instance_uuid))
                environment.append(client.V1EnvVar(name="service_uuid", value=service_uuid))
                environment.append(client.V1EnvVar(name="container_name", value=container_name))
                environment.append(client.V1EnvVar(name="vendor", value=KubernetesWrapperEngine.normalize(self, cnf_yaml.get('vendor'))))
                environment.append(client.V1EnvVar(name="name", value=KubernetesWrapperEngine.normalize(self, cnf_yaml.get('name'))))
                environment.append(client.V1EnvVar(name="version", value=KubernetesWrapperEngine.normalize(self, cnf_yaml.get('version'))))

                image_pull_policy = KubernetesWrapperEngine.check_connection(self)
                
                # Volume mounts
                container_volume_mount_list = []
                if volume_mounts:
                    LOG.debug("volume mounts: {}".format(volume_mounts))
                    # Create the specification of volumes
                    for volume_mounts_item in volume_mounts:
                        if volume_mounts_item.get('id') and volume_mounts_item.get('location'):
                            if volume_mounts_item.get('persistent'):
                                volumes = client.V1Volume(name=volume_mounts_item['id'], 
                                                          host_path=client.V1HostPathVolumeSource(path='/mnt/data', type='DirectoryOrCreate' ))
                            else:
                                volumes = client.V1Volume(name=volume_mounts_item['id'], 
                                                          empty_dir=client.V1EmptyDirVolumeSource(medium='' ))
                            if volumes not in pod_volume_list:
                                pod_volume_list.append(volumes)
                            container_volume_mount = client.V1VolumeMount(name=volume_mounts_item['id'], mount_path=volume_mounts_item['location'] )
                            container_volume_mount_list.append(container_volume_mount)



                LOG.debug("Security capabilities: {}, privileged: {} applied to {}".format(capabilities_list, privileged, container_name))
                sec_context = client.V1SecurityContext(privileged=privileged, capabilities=client.V1Capabilities(add=capabilities_list))

                # Configureate Pod template container
                container = client.V1Container(
                    env = environment,
                    name = container_name,
                    resources = resources,
                    image = image,
                    image_pull_policy = image_pull_policy,
                    ports = port_list,
                    env_from = [env_from],
                    volume_mounts = container_volume_mount_list,
                    security_context=sec_context)
                container_list.append(container)
        else:
            return deployment_k8s

        # Create and configurate a spec section
        deployment_label =  ("{}-{}-{}-{}".format(cnf_yaml.get("vendor"), cnf_yaml.get("name"), cnf_yaml.get("version"),
                             instance_uuid.split("-")[0])).replace(".", "-")
        template = client.V1PodTemplateSpec(
            metadata=client.V1ObjectMeta(labels={'deployment': deployment_label,
                                                 'instance_uuid': cnf_yaml['instance_uuid'],
                                                 'service_uuid': service_uuid,
                                                 'sp': "sonata",
                                                 'descriptor_uuid': cnf_yaml['uuid']} 
                                                 ),
            spec=client.V1PodSpec(containers=container_list, volumes=pod_volume_list, node_selector=node_selector, 
                                  host_network=host_network))

        selector=client.V1LabelSelector(match_labels={'deployment': deployment_label,
                                                 'instance_uuid': cnf_yaml['instance_uuid'],
                                                 'service_uuid': service_uuid,
                                                 'sp': "sonata",
                                                 'descriptor_uuid': cnf_yaml['uuid']} 
                                                 )

        # Create the specification of deployment
        spec = client.V1DeploymentSpec(
            replicas=1,
            template=template,
            selector=selector)
        # Instantiate the deployment object
        deployment_k8s = client.V1Deployment(
            api_version="apps/v1",
            kind="Deployment",
            metadata=client.V1ObjectMeta(name=deployment_label,
                                         labels={'deployment': deployment_label,
                                                 'instance_uuid': cnf_yaml['instance_uuid'],
                                                 'service_uuid': service_uuid,
                                                 'sp': "sonata",
                                                 'descriptor_uuid': cnf_yaml['uuid']} 
                                                 ),
                                         spec=spec)
        LOG.info("Deployment object: {}".format(deployment_k8s))
        LOG.info("CreatingDeploymentObject-time: {} ms".format(int((time.time() - t0)* 1000)))
        return deployment_k8s
Ejemplo n.º 11
0
def create_deployment(deployment_name, containers, **kwargs):
    appv1 = client.AppsV1Api()

    # TODO: Validation
    deployment_namespace = kwargs[
        'namespace'] if 'namespace' in kwargs else 'default'
    deployment_labels = kwargs['labels'] if 'labels' in kwargs else {}
    deployment_annotations = kwargs[
        'annotations'] if 'annotations' in kwargs else {}

    deployment_replicas = kwargs['replicas'] if 'replicas' in kwargs else 0

    podspec_annotations = kwargs[
        'pod_annotations'] if 'pod_annotations' in kwargs else {}

    service_account_name = kwargs[
        'service_account'] if 'service_account' in kwargs else 'workbench'

    # TODO: Abstract to parameter array
    #configmap_name = "%s-config" % deployment_name
    #container_name = 'workbench-container'
    #container_image = 'k8s.gcr.io/busybox'
    #container_command = ["/bin/sh", "-c", "env"]
    #container_poststart_command = ["/bin/sh", "-c", "echo Hello from the postStart handler > /usr/share/message/postStart"]
    #container_prestop_command = ["/bin/sh", "-c", "echo Hello from the preStop handler > /usr/share/message/preStop"]
    #container_ports = [80, 443]

    #client.V1Lifecycle(
    #    post_start=client.V1Handler(
    #        _exec=client.V1ExecAction(
    #            command=container_poststart_command
    #        )
    #    ),
    #    pre_stop=client.V1Handler(
    #        _exec=client.V1ExecAction(
    #            command=container_prestop_command
    #        )
    #    ),
    #)

    podspec = client.V1PodSpec(
        service_account_name=service_account_name,
        containers=[
            client.V1Container(
                name=container['name'],
                command=container['command'],
                env_from=[
                    client.V1EnvFromSource(
                        config_map_ref=client.V1ConfigMapEnvSource(
                            name=container['configmap']))
                ],
                # TODO: container.lifecycle?
                lifecycle=container['lifecycle']
                if 'lifecycle' in container else None,
                image=container['image'],
                ports=[
                    client.V1ContainerPort(
                        name=port, container_port=container['ports'][port])
                    for port in container['ports']
                ]) for container in containers
        ])

    try:
        deployment = appv1.create_namespaced_deployment(
            namespace=deployment_namespace,
            body=client.V1Deployment(
                api_version='apps/v1',
                kind='Deployment',
                metadata=client.V1ObjectMeta(
                    name=deployment_name,
                    namespace=deployment_namespace,
                    labels=deployment_labels,
                    annotations=deployment_annotations),
                spec=client.V1DeploymentSpec(
                    replicas=deployment_replicas,
                    selector=client.V1LabelSelector(
                        match_labels=deployment_labels),
                    template=client.V1PodTemplateSpec(
                        metadata=client.V1ObjectMeta(
                            name=deployment_name,
                            namespace=deployment_namespace,
                            annotations=podspec_annotations,
                            labels=deployment_labels),
                        spec=podspec))))
        logger.debug("Created deployment resource: " + str(deployment))
        return deployment
    except (ApiException, HTTPError) as exc:
        if isinstance(exc, ApiException) and exc.status == 409:
            return None
        else:
            logger.error("Error creating service resource: %s" % str(exc))
            raise exc
Ejemplo n.º 12
0
    def construct_job(self, run):
        check.inst_param(run, 'run', PipelineRun)

        dagster_labels = {
            'app.kubernetes.io/name': 'dagster',
            'app.kubernetes.io/instance': 'dagster',
            'app.kubernetes.io/version': dagster_version,
        }

        execution_params = {
            'executionParams': {
                'selector': run.selector.to_graphql_input(),
                "environmentConfigData": run.environment_dict,
                'executionMetadata': {
                    "runId": run.run_id
                },
                "mode": run.mode,
            },
        }

        job_container = client.V1Container(
            name='dagster-job-%s' % run.run_id,
            image=self.job_image,
            command=['dagster-graphql'],
            args=[
                "-p", "startPipelineExecution", "-v",
                json.dumps(execution_params)
            ],
            image_pull_policy=self.image_pull_policy,
            env=[
                client.V1EnvVar(name='DAGSTER_HOME',
                                value='/opt/dagster/dagster_home')
            ],
            env_from=[
                client.V1EnvFromSource(
                    config_map_ref=client.V1ConfigMapEnvSource(
                        name='dagster-job-env'))
            ] + self.env_froms,
            volume_mounts=[
                client.V1VolumeMount(
                    name='dagster-instance',
                    mount_path='/opt/dagster/dagster_home/dagster.yaml',
                    sub_path='dagster.yaml',
                )
            ],
        )

        config_map_volume = client.V1Volume(
            name='dagster-instance',
            config_map=client.V1ConfigMapVolumeSource(
                name=self.instance_config_map),
        )

        template = client.V1PodTemplateSpec(
            metadata=client.V1ObjectMeta(
                name='dagster-job-pod-%s' % run.run_id,
                labels=dagster_labels,
            ),
            spec=client.V1PodSpec(
                image_pull_secrets=self.image_pull_secrets,
                service_account_name=self.service_account_name,
                restart_policy='Never',
                containers=[job_container],
                volumes=[config_map_volume],
            ),
        )

        job = client.V1Job(
            api_version="batch/v1",
            kind="Job",
            metadata=client.V1ObjectMeta(name='dagster-job-%s' % run.run_id,
                                         labels=dagster_labels),
            spec=client.V1JobSpec(
                template=template,
                backoff_limit=BACKOFF_LIMIT,
                ttl_seconds_after_finished=TTL_SECONDS_AFTER_FINISHED,
            ),
        )
        return job