コード例 #1
0
ファイル: utils.py プロジェクト: rpatil524/fairing
    def _volume_mounts(kube_manager, pod_spec, namespace):  #pylint:disable=unused-argument
        volume_mount = client.V1VolumeMount(name=mount_name,
                                            mount_path=mount_path,
                                            sub_path=sub_path)
        if pod_spec.containers[0].volume_mounts:
            pod_spec.containers[0].volume_mounts.append(volume_mount)
        else:
            pod_spec.containers[0].volume_mounts = [volume_mount]

        if volume_type == 'pvc':
            volume = client.V1Volume(
                name=mount_name,
                persistent_volume_claim=client.
                V1PersistentVolumeClaimVolumeSource(claim_name=volume_name))
        elif volume_type == 'secret':
            volume = client.V1Volume(
                name=mount_name,
                secret=client.V1SecretVolumeSource(secret_name=volume_name))
        elif volume_type == 'config_map':
            volume = client.V1Volume(
                name=mount_name,
                config_map=client.V1ConfigMapVolumeSource(name=volume_name))
        else:
            raise RuntimeError("Unsupport type %s" % volume_type)

        if pod_spec.volumes:
            pod_spec.volumes.append(volume)
        else:
            pod_spec.volumes = [volume]
コード例 #2
0
    def _mount_v3iod(task):
        from kubernetes import client as k8s_client

        def add_vol(name, mount_path, host_path):
            vol = k8s_client.V1Volume(
                name=name,
                host_path=k8s_client.V1HostPathVolumeSource(path=host_path, type=''),
            )
            task.add_volume(vol).add_volume_mount(
                k8s_client.V1VolumeMount(mount_path=mount_path, name=name)
            )

        add_vol(name='shm', mount_path='/dev/shm', host_path='/dev/shm/' + namespace)
        add_vol(
            name='v3iod-comm',
            mount_path='/var/run/iguazio/dayman',
            host_path='/var/run/iguazio/dayman/' + namespace,
        )

        vol = k8s_client.V1Volume(
            name='daemon-health', empty_dir=k8s_client.V1EmptyDirVolumeSource()
        )
        task.add_volume(vol).add_volume_mount(
            k8s_client.V1VolumeMount(
                mount_path='/var/run/iguazio/daemon_health', name='daemon-health'
            )
        )

        vol = k8s_client.V1Volume(
            name='v3io-config',
            config_map=k8s_client.V1ConfigMapVolumeSource(
                name=v3io_config_configmap, default_mode=420
            ),
        )
        task.add_volume(vol).add_volume_mount(
            k8s_client.V1VolumeMount(mount_path='/etc/config/v3io', name='v3io-config')
        )

        # vol = k8s_client.V1Volume(name='v3io-auth',
        #                           secret=k8s_client.V1SecretVolumeSource(secret_name=v3io_auth_secret,
        #                                                                  default_mode=420))
        # task.add_volume(vol).add_volume_mount(k8s_client.V1VolumeMount(mount_path='/igz/.igz', name='v3io-auth'))

        task.add_env_variable(
            k8s_client.V1EnvVar(
                name='CURRENT_NODE_IP',
                value_from=k8s_client.V1EnvVarSource(
                    field_ref=k8s_client.V1ObjectFieldSelector(
                        api_version='v1', field_path='status.hostIP'
                    )
                ),
            )
        )
        task.add_env_variable(
            k8s_client.V1EnvVar(
                name='IGZ_DATA_CONFIG_FILE', value='/igz/java/conf/v3io.conf'
            )
        )

        return task
コード例 #3
0
    def define_config_map_volume(self,
                                 name,
                                 config_name,
                                 config_items,
                                 default_mode=0o644,
                                 optional=False):
        """
        The contents of the target ConfigMap's Data field will be presented in a volume as files using the keys in the
        Data field as the file names, unless the items element is populated with specific mappings of keys to paths.
        ConfigMap volumes support ownership management and SELinux relabeling.

        :param name : name of volume that is created.
        :type name: str
        :param config_name : name of the config map being used
        :type config_name: str
        :param config_items : key value to project and The relative path of the file to map the key to. May not be an absolute path. May not contain the path element '..'. May not start with the string '..'.
        :type config_items: dict
        :param default_mode : = mode bits to use on created files by default. Must be a value between 0 and 0777. Defaults to 0644. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.
        :type default_mode: Integer
        :param optional : Specify whether the ConfigMap or it's keys must be defined
        :type optional: bool
        :return: volume
        """
        config_map_vol = client.V1ConfigMapVolumeSource(
            default_mode=default_mode,
            optional=optional,
            name=config_name,
            items=config_items)
        return client.V1Volume(name=name, config_map=config_map_vol)
コード例 #4
0
def create_nginx_deployment(blogpost_name):
    container = client.V1Container(
        name="nginx",
        image="nginx:alpine",
        ports=[client.V1ContainerPort(container_port=80)],
        volume_mounts=[
            client.V1VolumeMount(name="config", mount_path="/etc/nginx/conf.d")
        ],
    )
    volume = client.V1Volume(
        name="config",
        config_map=client.V1ConfigMapVolumeSource(name=f"{blogpost_name}"),
    )
    template = client.V1PodTemplateSpec(
        metadata=client.V1ObjectMeta(labels={"app": f"nginx-{blogpost_name}"}),
        spec=client.V1PodSpec(containers=[container], volumes=[volume]),
    )
    spec = client.V1DeploymentSpec(
        replicas=1,
        template=template,
        selector={"matchLabels": {"app": f"nginx-{blogpost_name}"}},
    )

    return client.V1Deployment(
        api_version="apps/v1",
        kind="Deployment",
        metadata=client.V1ObjectMeta(name=f"nginx-{blogpost_name}"),
        spec=spec,
    )
コード例 #5
0
ファイル: k8s_utils.py プロジェクト: pacodiaz2020/mlrun
 def mount_cfgmap(self, name, path="/config"):
     self.add_volume(
         client.V1Volume(
             name=name, config_map=client.V1ConfigMapVolumeSource(name=name)
         ),
         mount_path=path,
     )
コード例 #6
0
 def k8s_object(self):
     depl = client.AppsV1beta1Deployment(
         metadata=client.V1ObjectMeta(
             name=self.name,
             labels=self.labels
         ),
         spec=client.AppsV1beta1DeploymentSpec(
             strategy=client.AppsV1beta1DeploymentStrategy(
                 type='RollingUpdate',
                 rolling_update=client.AppsV1beta1RollingUpdateDeployment(
                     max_surge=0
                 )
             ),
             template=client.V1PodTemplateSpec(
                 metadata=client.V1ObjectMeta(
                     labels=self.template_labels),
                 spec=client.V1PodSpec(
                     affinity=client.V1Affinity(
                         pod_anti_affinity=client.V1PodAntiAffinity(
                             required_during_scheduling_ignored_during_execution=[
                                 {"topologyKey": e2e_globals.ANTI_AFFINITY_KEY},
                             ]
                         ),
                     ),
                     volumes=[client.V1Volume(
                         name='data',
                         config_map=client.V1ConfigMapVolumeSource(
                             name=self.cfgmap_name)
                     )]
                     ,
                     containers=[client.V1Container(
                         image=e2e_globals.TEST_DEPLOYMENT_IMAGE,
                         name="testapp",
                         volume_mounts=[client.V1VolumeMount(
                             name='data',
                             mount_path='/usr/share/nginx/html')
                         ],
                         ports=[client.V1ContainerPort(
                             container_port=e2e_globals.TEST_CONTAINER_PORT)],
                         resources=client.V1ResourceRequirements(
                             requests={
                                 'cpu': '1m',
                                 'memory': '1Mi',
                             },
                         ),
                     )])),
             replicas=self.replicas)
     )
     if self.vol_claim_name is not None:
         volume = client.V1Volume(name='test-volume',
                                  persistent_volume_claim=client.V1PersistentVolumeClaimVolumeSource(
                                      claim_name=self.vol_claim_name))
         mount = client.V1VolumeMount(
             name='test-volume',
             mount_path='/usr/blank'
         )
         depl.spec.template.spec.containers[0].volume_mounts.append(mount)
         depl.spec.template.spec.volumes.append(volume)
     return depl
コード例 #7
0
ファイル: origin.py プロジェクト: pawanrana/kubeflow-workshop
 def _use_config_map(task):
     config_map = k8s.V1ConfigMapVolumeSource(
         name=name,
         items=[k8s.V1KeyToPath(key=key, path=key) \
             for key in key_path_mapper]
     )
     return task \
         .add_volume(k8s.V1Volume(config_map=config_map, name=name)) \
         .add_volume_mount(k8s.V1VolumeMount(mount_path=mount_path, name=name))
コード例 #8
0
 def get_obj(self):
     """
     :description: Generate volume spec.
     """
     if self.config.get("configmap"):
         return client.V1Volume(name=self.slug,
                                config_map=client.V1ConfigMapVolumeSource(
                                    name=self.config.get("configmap")))
     return client.V1Volume(name=self.slug,
                            empty_dir=client.V1EmptyDirVolumeSource())
コード例 #9
0
ファイル: other.py プロジェクト: AlonMaor14/mlrun
    def _mount_configmap(task):
        from kubernetes import client as k8s_client

        vol = k8s_client.V1ConfigMapVolumeSource(name=configmap_name,
                                                 items=items)
        return task.add_volume(
            k8s_client.V1Volume(
                name=volume_name, config_map=vol)).add_volume_mount(
                    k8s_client.V1VolumeMount(mount_path=mount_path,
                                             name=volume_name))
コード例 #10
0
    def _mount_v3iod(task):
        from kubernetes import client as k8s_client

        def add_vol(name, mount_path, host_path):
            vol = k8s_client.V1Volume(
                name=name,
                host_path=k8s_client.V1HostPathVolumeSource(path=host_path,
                                                            type=""),
            )
            task.add_volume(vol).add_volume_mount(
                k8s_client.V1VolumeMount(mount_path=mount_path, name=name))

        add_vol(name="shm",
                mount_path="/dev/shm",
                host_path="/dev/shm/" + namespace)
        add_vol(
            name="v3iod-comm",
            mount_path="/var/run/iguazio/dayman",
            host_path="/var/run/iguazio/dayman/" + namespace,
        )

        vol = k8s_client.V1Volume(
            name="daemon-health",
            empty_dir=k8s_client.V1EmptyDirVolumeSource())
        task.add_volume(vol).add_volume_mount(
            k8s_client.V1VolumeMount(
                mount_path="/var/run/iguazio/daemon_health",
                name="daemon-health"))

        vol = k8s_client.V1Volume(
            name="v3io-config",
            config_map=k8s_client.V1ConfigMapVolumeSource(
                name=v3io_config_configmap, default_mode=420),
        )
        task.add_volume(vol).add_volume_mount(
            k8s_client.V1VolumeMount(mount_path="/etc/config/v3io",
                                     name="v3io-config"))

        task.add_env_variable(
            k8s_client.V1EnvVar(
                name="CURRENT_NODE_IP",
                value_from=k8s_client.V1EnvVarSource(
                    field_ref=k8s_client.V1ObjectFieldSelector(
                        api_version="v1", field_path="status.hostIP")),
            ))
        task.add_env_variable(
            k8s_client.V1EnvVar(name="IGZ_DATA_CONFIG_FILE",
                                value="/igz/java/conf/v3io.conf"))

        return task
コード例 #11
0
def create_volume(volume_data):
    if "name" in volume_data:
        volume = client.V1Volume(
            name=volume_data["name"]
        )

        # persistent claim
        if "persistentVolumeClaim" in volume_data:
            volume_pvc = volume_data["persistentVolumeClaim"]
            if "claimName" in volume_pvc:
                pvc = client.V1PersistentVolumeClaimVolumeSource(
                    claim_name=volume_pvc["claimName"]
                )
                volume.persistent_volume_claim = pvc

        # hostpath
        if "hostPath" in volume_data and "path" in volume_data["hostPath"]:
            host_path = client.V1HostPathVolumeSource(
                path=volume_data["hostPath"]["path"]
            )

            if "hostPath" in volume_data and "type" in volume_data["hostPath"]:
                host_path.type = volume_data["hostPath"]["type"]
                volume.host_path = host_path
        # nfs
        if ("nfs" in volume_data and
                "path" in volume_data["nfs"] and
                "server" in volume_data["nfs"]):
            volume.nfs = client.V1NFSVolumeSource(
                path=volume_data["nfs"]["path"],
                server=volume_data["nfs"]["server"]
            )

        # secret
        if "secret" in volume_data:
            volume.secret = client.V1SecretVolumeSource(
                secret_name=volume_data["secret"]["secretName"]
            )

        # configMap 
        if "configMap" in volume_data:
            volume.config_map = client.V1ConfigMapVolumeSource(
                name=volume_data["configMap"]["name"]
            )

        return volume

    return None
コード例 #12
0
    def _get_job_object(self, algorithm):
        job = client.V1Job()

        # Define job metadata
        job.metadata = client.V1ObjectMeta(namespace=NAMESPACE,
                                           name=self.resources_identifier)

        # Define job spec
        template = client.V1PodTemplate()
        template.template = client.V1PodTemplateSpec()

        env_list = []
        env_list.append(
            client.V1EnvVar(name=ENV_VAR_JOB_NAME,
                            value=self.resources_identifier))

        docker_repo = os.environ.get(ENV_VAR_DOCKER_REPOSITORY, "")
        if docker_repo != "":
            image_name = f"{docker_repo}/{algorithm}:latest"
        else:
            image_name = f"{algorithm}:latest"

        volume_mounts = [
            client.V1VolumeMount(name=VOLUME_NAME_ALGORITHM_INPUT,
                                 mount_path="/etc/config")
        ]
        container = client.V1Container(name="algorithm",
                                       image=image_name,
                                       volume_mounts=volume_mounts,
                                       env=env_list,
                                       image_pull_policy="Always")
        # command=["sleep", "5"])

        cm_mount = client.V1ConfigMapVolumeSource(
            name=self.resources_identifier)
        volumes = [
            client.V1Volume(config_map=cm_mount,
                            name=VOLUME_NAME_ALGORITHM_INPUT)
        ]

        template.template.spec = client.V1PodSpec(containers=[container],
                                                  restart_policy='Never',
                                                  volumes=volumes)
        job.spec = client.V1JobSpec(ttl_seconds_after_finished=1200,
                                    template=template.template)

        return job
コード例 #13
0
 def _make_config_map_volumes(self):
     cmvls = []
     for cname in CONFIGMAPS:
         if cname in PWFILES:
             # If no_sudo is *not* set, provisionator will construct
             #  the user and group entries at startup.
             if not self.parent.config.lab_no_sudo:
                 continue
         vol = client.V1Volume(
             name=cname,
             config_map=client.V1ConfigMapVolumeSource(name=cname))
         if cname.endswith('shadow'):
             vol.default_mode = 0o600
         else:
             vol.default_mode = 0o444
         cmvls.append(vol)
     return cmvls
コード例 #14
0
def _add_elk_logging_sidecar(containers, volumes, volume_mounts,
                             component_name, log_info, filebeat):
    if not log_info or not filebeat:
        return
    log_dir = log_info.get("log_directory")
    if not log_dir:
        return
    sidecar_volume_mounts = []

    # Create the volume for component log files and volume mounts for the component and sidecar containers
    volumes.append(
        client.V1Volume(name="component-log",
                        empty_dir=client.V1EmptyDirVolumeSource()))
    volume_mounts.append(
        client.V1VolumeMount(name="component-log", mount_path=log_dir))
    sc_path = log_info.get("alternate_fb_path") or "{0}/{1}".format(
        filebeat["log_path"], component_name)
    sidecar_volume_mounts.append(
        client.V1VolumeMount(name="component-log", mount_path=sc_path))

    # Create the volume for sidecar data and the volume mount for it
    volumes.append(
        client.V1Volume(name="filebeat-data",
                        empty_dir=client.V1EmptyDirVolumeSource()))
    sidecar_volume_mounts.append(
        client.V1VolumeMount(name="filebeat-data",
                             mount_path=filebeat["data_path"]))

    # Create the volume for the sidecar configuration data and the volume mount for it
    # The configuration data is in a k8s ConfigMap that should be created when DCAE is installed.
    volumes.append(
        client.V1Volume(name="filebeat-conf",
                        config_map=client.V1ConfigMapVolumeSource(
                            name=filebeat["config_map"])))
    sidecar_volume_mounts.append(
        client.V1VolumeMount(name="filebeat-conf",
                             mount_path=filebeat["config_path"],
                             sub_path=filebeat["config_subpath"]))

    # Finally create the container for the sidecar
    containers.append(
        _create_container_object("filebeat",
                                 filebeat["image"],
                                 False,
                                 volume_mounts=sidecar_volume_mounts))
コード例 #15
0
ファイル: iguazio.py プロジェクト: rajacsp/mlrun
    def _mount_v3iod(task):
        from kubernetes import client as k8s_client

        def add_vol(name, mount_path, host_path):
            vol = k8s_client.V1Volume(
                name=name,
                host_path=k8s_client.V1HostPathVolumeSource(path=host_path,
                                                            type=''))
            task.add_volume(vol).add_volume_mount(
                k8s_client.V1VolumeMount(mount_path=mount_path, name=name))

        add_vol(name='shm',
                mount_path='/dev/shm',
                host_path='/dev/shm/' + namespace)
        add_vol(name='v3iod-comm',
                mount_path='/var/run/iguazio/dayman',
                host_path='/var/run/iguazio/dayman/' + namespace)

        vol = k8s_client.V1Volume(
            name='daemon-health',
            empty_dir=k8s_client.V1EmptyDirVolumeSource())
        task.add_volume(vol).add_volume_mount(
            k8s_client.V1VolumeMount(
                mount_path='/var/run/iguazio/daemon_health',
                name='daemon-health'))

        vol = k8s_client.V1Volume(
            name='v3io-config',
            config_map=k8s_client.V1ConfigMapVolumeSource(
                name=v3io_config_configmap, default_mode=420))
        task.add_volume(vol).add_volume_mount(
            k8s_client.V1VolumeMount(mount_path='/etc/config/v3io',
                                     name='v3io-config'))

        vol = k8s_client.V1Volume(name='v3io-auth',
                                  secret=k8s_client.V1SecretVolumeSource(
                                      secret_name=v3io_auth_secret,
                                      default_mode=420))
        task.add_volume(vol).add_volume_mount(
            k8s_client.V1VolumeMount(mount_path='/igz/.igz', name='v3io-auth'))

        return (task)
コード例 #16
0
 def set_configmap_volumes(self, original):
     name = "{}-swap".format(self.deployment_name)
     volume = client.V1Volume(
         config_map=client.V1ConfigMapVolumeSource(name=name,
                                                   default_mode=420),
         name=name,
     )
     volume_mount = client.V1VolumeMount(mount_path="/etc/nginx/conf.d",
                                         name=name)
     if not original.spec.template.spec.volumes:
         original.spec.template.spec.volumes = [volume]
     else:
         original.spec.template.spec.volumes.append(volume)
     if not original.spec.template.spec.containers[0].volume_mounts:
         original.spec.template.spec.containers[0].volume_mounts = [
             volume_mount
         ]
     else:
         original.spec.template.spec.containers[0].volume_mounts.append(
             volume_mount)
コード例 #17
0
ファイル: routes.py プロジェクト: ns500/flask-k8s
def deploy_with_cm(form, image, radio):
    app = form.app.data
    env = form.env.data
    command = form.command.data
    container_name = app
    targetport = form.targetport.data

    port = form.port.data

    volumes = client.V1Volume(
        name=app, config_map=client.V1ConfigMapVolumeSource(name=radio))
    if command == '':
        container = client.V1Container(
            name=container_name,
            image=image,
            volume_mounts=[
                client.V1VolumeMount(mount_path='/etc/config', name=app)
            ],
            ports=[client.V1ContainerPort(container_port=int(port))])
    else:
        container = client.V1Container(
            command=["/bin/sh"],
            args=["-c", command],
            env=[env],
            name=container_name,
            image=image,
            volume_mounts=[
                client.V1VolumeMount(mount_path='/etc/config', name=app)
            ],
            ports=[client.V1ContainerPort(container_port=int(port))])
    template = client.V1PodTemplateSpec(
        metadata=client.V1ObjectMeta(labels={"app": app}),
        spec=client.V1PodSpec(containers=[container], volumes=[volumes]))
    api_response_1 = deploy_part(app, template)
    logger.info('create deployment')
    logger.info(str(api_response_1))

    api_response = create_svc(port, targetport, app)
    return str(api_response.status)
コード例 #18
0
    def generate_pod_spec(self, image_name, push):  # pylint: disable=arguments-differ
        """
        :param image_name: name of image to be built
        :param push: whether to push image to given registry or not
        """
        args = [
            "--dockerfile=Dockerfile", "--destination=" + image_name,
            "--context=" + self.uploaded_context_url
        ]
        if not push:
            args.append("--no-push")

        return client.V1PodSpec(
            containers=[
                client.V1Container(
                    name='kaniko',
                    image=constants.KANIKO_IMAGE,
                    args=args,
                    env=[
                        client.V1EnvVar(name='AWS_REGION', value=self.region),
                        client.V1EnvVar(name='AWS_ACCESS_KEY_ID',
                                        value=self.aws_access_key_id),
                        client.V1EnvVar(name='AWS_SECRET_ACCESS_KEY',
                                        value=self.aws_secret_access_key),
                        client.V1EnvVar(name='S3_ENDPOINT',
                                        value=self.cos_endpoint_url),
                    ],
                    volume_mounts=[
                        client.V1VolumeMount(name="docker-config",
                                             mount_path="/kaniko/.docker/")
                    ])
            ],
            restart_policy='Never',
            volumes=[
                client.V1Volume(name="docker-config",
                                config_map=client.V1ConfigMapVolumeSource(
                                    name="docker-config"))
            ])
コード例 #19
0
def create_job(name,
               configmap_name,
               container_name,
               container_image,
               container_command,
               namespace="default",
               env_vars={}):
    """
    Create a k8 Job Object
    Args:
        name:
        configmap_name:
        container_name:
        container_image:
        container_command:list类型,执行程序的命令,例如:['python','/home/test.py']
        namespace:
        env_vars: 环境变量

    Returns:

    """
    try:
        # Body是对象体
        body = client.V1Job(api_version="batch/v1", kind="Job")

        # 对象需要 Metadata,每个JOB必须有一个不同的名称!
        body.metadata = client.V1ObjectMeta(namespace=namespace, name=name)
        # 添加 Status
        body.status = client.V1JobStatus()

        # 开始 Template...
        template = client.V1PodTemplateSpec()

        # 在Env中传递Arguments:
        env_list = []
        for env_name, env_value in env_vars.items():
            env_list.append(client.V1EnvVar(name=env_name, value=env_value))

        container = client.V1Container(command=container_command,
                                       env=env_list,
                                       image=container_image,
                                       image_pull_policy="IfNotPresent",
                                       name=container_name)

        volume_mount = client.V1VolumeMount(name="config-volume",
                                            mount_path=mount_path)
        container.volume_mounts = [volume_mount]

        config_map = client.V1ConfigMapVolumeSource(name=configmap_name)

        volumes = client.V1Volume(name="config-volume", config_map=config_map)

        template.spec = client.V1PodSpec(containers=[container],
                                         restart_policy='OnFailure',
                                         volumes=[volumes],
                                         node_selector={'gpu': 'true'})

        # 最后,创建V1JobSpec
        body.spec = client.V1JobSpec(ttl_seconds_after_finished=600,
                                     template=template)

        response = batch_v1_api.create_namespaced_job(namespace,
                                                      body,
                                                      pretty=True)

        return True, response
    except Exception as ex:
        print(ex)
        return False, "k8s Job Object creates Failed!"
コード例 #20
0
ファイル: base_handler.py プロジェクト: mieel/stackl
def create_job_object(name: str,
                      container_image: str,
                      env_list: dict,
                      command: List[str],
                      command_args: List[str],
                      volumes: List[Dict],
                      init_containers: List[Dict],
                      output: Output,
                      namespace: str = "stackl",
                      container_name: str = "jobcontainer",
                      api_version: str = "batch/v1",
                      image_pull_policy: str = "Always",
                      ttl_seconds_after_finished: int = 3600,
                      restart_policy: str = "Never",
                      backoff_limit: int = 0,
                      active_deadline_seconds: int = 3600,
                      service_account: str = "stackl-agent-stackl-agent",
                      image_pull_secrets: List[str] = [],
                      labels=None) -> client.V1Job:
    # pylint: disable=too-many-arguments,too-many-locals,too-many-branches,too-many-statements
    """Creates a Job object using the Kubernetes client

    :param name: Job name affix
    :type name: str
    :param container_image: automation container image
    :type container_image: str
    :param env_list: Dict with key/values for the environment inside the automation container
    :type env_list: dict
    :param command: entrypoint command
    :type command: List[str]
    :param command_args: command arguments
    :type command_args: List[str]
    :param volumes: volumes and volumemounts
    :type volumes: List[Dict]
    :param image_pull_secrets: secrets to pull images
    :type image_pull_secrets: List[str]
    :param init_containers: list with init_containers
    :type init_containers: List[Dict]
    :param output: output Object
    :type output: Output
    :param namespace: Kubernetes namespace, defaults to "stackl"
    :type namespace: str, optional
    :param container_name: name of automation container, defaults to "jobcontainer"
    :type container_name: str, optional
    :param api_version: Job api version, defaults to "batch/v1"
    :type api_version: str, optional
    :param image_pull_policy: always pull latest images, defaults to "Always"
    :type image_pull_policy: str, optional
    :param ttl_seconds_after_finished: Remove jobs after execution with ttl, defaults to 600
    :type ttl_seconds_after_finished: int, optional
    :param restart_policy: Restart the pod on the same node after failure, defaults to "Never"
    :type restart_policy: str, optional
    :param backoff_limit: Retries after failure, defaults to 0
    :type backoff_limit: int, optional
    :param active_deadline_seconds: Timeout on a job, defaults to 3600 seconds
    :type active_deadline_seconds: int, optional
    :param service_account: Kubernetes service account, defaults to "stackl-agent-stackl-agent"
    :type service_account: str, optional
    :param labels: metadata labels, defaults to {}
    :type labels: dict, optional
    :return: automation Job object
    :rtype: client.V1Job
    """
    id_job = id_generator()
    name = name + "-" + id_job
    body = client.V1Job(api_version=api_version, kind="Job")
    body.metadata = client.V1ObjectMeta(namespace=namespace, name=name)
    body.status = client.V1JobStatus()
    template = client.V1PodTemplate()
    template.template = client.V1PodTemplateSpec()
    k8s_volumes = []

    cms = []

    logging.debug(f"volumes: {volumes}")
    # create a k8s volume for each element in volumes
    for vol in volumes:
        vol_name = name + "-" + vol["name"]
        k8s_volume = client.V1Volume(name=vol_name)
        if vol["type"] == "config_map":
            config_map = client.V1ConfigMapVolumeSource()
            config_map.name = vol_name
            k8s_volume.config_map = config_map
            cms.append(create_cm(vol_name, namespace, vol['data']))
            vol['name'] = vol_name
        if vol["type"] == "empty_dir":
            k8s_volume.empty_dir = client.V1EmptyDirVolumeSource(
                medium="Memory")
            vol['name'] = vol_name
        k8s_volumes.append(k8s_volume)

    logging.debug(f"Volumes created for job {name}: {k8s_volumes}")

    # create a volume mount for each element in volumes
    k8s_volume_mounts = []
    for vol in volumes:
        if vol["mount_path"]:
            volume_mount = client.V1VolumeMount(name=vol["name"],
                                                mount_path=vol["mount_path"])
            if "sub_path" in vol:
                volume_mount.sub_path = vol["sub_path"]
            k8s_volume_mounts.append(volume_mount)

    logging.debug(f"Volume mounts created for job {name}: {k8s_volume_mounts}")

    # create an environment list
    k8s_env_list = []

    if env_list:
        for key, value in env_list.items():
            if isinstance(value, dict):
                if 'config_map_key_ref' in value:
                    k8s_env_from = client.V1EnvVar(
                        name=key,
                        value_from=client.V1EnvVarSource(
                            config_map_key_ref=client.V1ConfigMapKeySelector(
                                name=value['config_map_key_ref']["name"],
                                key=value['config_map_key_ref']["key"])))
                    k8s_env_list.append(k8s_env_from)
                elif 'field_ref' in value:
                    k8s_env_from = client.V1EnvVar(
                        name=key,
                        value_from=client.V1EnvVarSource(
                            field_ref=client.V1ObjectFieldSelector(
                                field_path=value['field_ref'])))
                    k8s_env_list.append(k8s_env_from)
            else:
                k8s_env = client.V1EnvVar(name=key, value=value)
                k8s_env_list.append(k8s_env)

    k8s_env_from_list = []

    # if env_from:
    #     for env in env_from:
    #         if 'config_map_ref' in env:
    #             k8s_env_from = client.V1EnvFromSource(
    #                 config_map_ref=env['config_map_ref'])
    #             k8s_env_from_list.append(k8s_env_from)
    #         elif 'secret_ref' in env:
    #             k8s_env_from = client.V1EnvFromSource(
    #                 secret_ref=env['secret_ref'])
    #             k8s_env_from_list.append(k8s_env_from)

    logging.debug(f"Environment list created for job {name}: {k8s_env_list}")
    print(f"Environment list created for job {name}: {k8s_env_list}")

    container = client.V1Container(name=container_name,
                                   image=container_image,
                                   env=k8s_env_list,
                                   volume_mounts=k8s_volume_mounts,
                                   image_pull_policy=image_pull_policy,
                                   command=command,
                                   args=command_args,
                                   env_from=k8s_env_from_list)

    k8s_init_containers = []

    logging.debug(f"Init containers for job {name}: {init_containers}")
    for c in init_containers:
        k8s_c = client.V1Container(name=c['name'],
                                   image=c['image'],
                                   volume_mounts=k8s_volume_mounts,
                                   env=k8s_env_list)

        if 'args' in c:
            k8s_c.args = c['args']

        k8s_init_containers.append(k8s_c)

    k8s_secrets = []
    for secret in image_pull_secrets:
        k8s_secrets.append(client.V1LocalObjectReference(name=secret))

    logging.debug(f"Secret list created for job {name}: {k8s_secrets}")

    containers = [container]
    if output:
        output.volume_mounts = k8s_volume_mounts
        output.env = k8s_env_list
        output_containers = output.containers
        containers = containers + output_containers

    template.template.metadata = client.V1ObjectMeta(labels=labels)
    template.template.spec = client.V1PodSpec(
        containers=containers,
        restart_policy=restart_policy,
        image_pull_secrets=k8s_secrets,
        volumes=k8s_volumes,
        init_containers=k8s_init_containers,
        service_account_name=service_account)
    template.template = client.V1PodTemplateSpec(
        metadata=template.template.metadata, spec=template.template.spec)
    body.spec = client.V1JobSpec(
        ttl_seconds_after_finished=ttl_seconds_after_finished,
        template=template.template,
        backoff_limit=backoff_limit,
        active_deadline_seconds=active_deadline_seconds)

    return body, cms
コード例 #21
0
def create_kb8s_job(workflow_id, minion_cmd, cluster):
    configuration = client.Configuration()
    configuration.host = cluster['address']
    configuration.verify_ssl = False
    configuration.debug = False
    if 'general_parameters' not in cluster:
        raise ValueError('Incorrect cluster config.')

    cluster_params = {}
    for parameter in cluster['general_parameters'].split(','):
        key, value = parameter.split('=')
        if key.startswith('kubernetes'):
            cluster_params[key] = value
    env_vars = {
        'HADOOP_CONF_DIR': '/usr/local/juicer/conf',
    }

    token = cluster['auth_token']
    configuration.api_key = {"authorization": "Bearer " + token}
    # noinspection PyUnresolvedReferences
    client.Configuration.set_default(configuration)

    job = client.V1Job(api_version="batch/v1", kind="Job")
    name = 'job-{}'.format(workflow_id)
    container_name = 'juicer-job'
    container_image = cluster_params['kubernetes.container']
    namespace = cluster_params['kubernetes.namespace']
    pull_policy = cluster_params.get('kubernetes.pull_policy', 'Always')

    gpus = int(cluster_params.get('kubernetes.resources.gpus', 0))

    print('-' * 30)
    print('GPU KB8s specification: ' + str(gpus))
    print('-' * 30)
    log.info('GPU specification: %s', gpus)

    job.metadata = client.V1ObjectMeta(namespace=namespace, name=name)
    job.status = client.V1JobStatus()

    # Now we start with the Template...
    template = client.V1PodTemplate()
    template.template = client.V1PodTemplateSpec()

    # Passing Arguments in Env:
    env_list = []
    for env_name, env_value in env_vars.items():
        env_list.append(client.V1EnvVar(name=env_name, value=env_value))

    # Subpath implies that the file is stored as a config map in kb8s
    volume_mounts = [
        client.V1VolumeMount(
            name='juicer-config',
            sub_path='juicer-config.yaml',
            mount_path='/usr/local/juicer/conf/juicer-config.yaml'),
        client.V1VolumeMount(
            name='hdfs-site',
            sub_path='hdfs-site.xml',
            mount_path='/usr/local/juicer/conf/hdfs-site.xml'),
        client.V1VolumeMount(name='hdfs-pvc', mount_path='/srv/storage/'),
    ]
    pvc_claim = client.V1PersistentVolumeClaimVolumeSource(
        claim_name='hdfs-pvc')

    if gpus:
        resources = {'limits': {'nvidia.com/gpu': gpus}}
    else:
        resources = {}

    container = client.V1Container(name=container_name,
                                   image=container_image,
                                   env=env_list,
                                   command=minion_cmd,
                                   image_pull_policy=pull_policy,
                                   volume_mounts=volume_mounts,
                                   resources=resources)

    volumes = [
        client.V1Volume(
            name='juicer-config',
            config_map=client.V1ConfigMapVolumeSource(name='juicer-config')),
        client.V1Volume(
            name='hdfs-site',
            config_map=client.V1ConfigMapVolumeSource(name='hdfs-site')),
        client.V1Volume(name='hdfs-pvc', persistent_volume_claim=pvc_claim),
    ]
    template.template.spec = client.V1PodSpec(containers=[container],
                                              restart_policy='Never',
                                              volumes=volumes)

    # And finally we can create our V1JobSpec!
    job.spec = client.V1JobSpec(ttl_seconds_after_finished=10,
                                template=template.template)
    api = client.ApiClient(configuration)
    batch_api = client.BatchV1Api(api)

    try:
        batch_api.create_namespaced_job(namespace, job, pretty=True)
    except ApiException as e:
        body = json.loads(e.body)
        if body['reason'] == 'AlreadyExists':
            pass
        else:
            print("Exception when calling BatchV1Api->: {}\n".format(e))
コード例 #22
0
    def create_builder_job(self, job_name, container_image, ns, bentoservice):

        print("at=starting-job-creation job=%s" % job_name)
        bento_env = client.V1EnvVar(name='BENTOML__YATAI_SERVICE__URL',
                                    value=self.yatai_service)

        volume = client.V1Volume(name=f"bento-storage",
                                 empty_dir=client.V1EmptyDirVolumeSource())

        volume_mount = client.V1VolumeMount(name=f"bento-storage",
                                            mount_path=bento_mount_dir)

        docker_config = client.V1Volume(
            name="docker-config",
            config_map=client.V1ConfigMapVolumeSource(name="docker-config"))

        docker_config_mount = client.V1VolumeMount(name=f"docker-config",
                                                   mount_path=config_mount_dir)

        # use bentoml retrieve to obtain the correct build context
        bento_args = [
            "bentoml", "retrieve", bentoservice,
            "--target_dir=%s" % target_dir, "--debug"
        ]

        # tl;dr build context into image and push without docker daemon
        # (for headless use in CI pipelines, etc)
        bento_container = client.V1Container(
            name="%s-bento" % job_name,
            image=yatai_image,
            args=bento_args,
            env=[bento_env],
            image_pull_policy="Always",
            volume_mounts=[volume_mount],
        )

        kaniko_args = [
            "--dockerfile=%s/Dockerfile" % target_dir,
            "--context=dir://%s" % target_dir,
            "--destination=%s" % container_image, "--verbosity=debug",
            "--digest-file=%s/digest.txt" % target_dir, "--single-snapshot"
        ]

        kaniko_container = client.V1Container(
            name="%s-kaniko" % job_name,
            image="gcr.io/kaniko-project/executor:latest",
            args=kaniko_args,
            image_pull_policy="Always",
            volume_mounts=[volume_mount, docker_config_mount],
            env=[
                client.V1EnvVar(name="DOCKER_CONFIG", value="/kaniko/.docker/")
            ])

        template = client.V1PodTemplateSpec(
            metadata=client.V1ObjectMeta(labels={"app": "bentobuild"}),
            spec=client.V1PodSpec(restart_policy="Never",
                                  init_containers=[
                                      bento_container,
                                  ],
                                  containers=[
                                      kaniko_container,
                                  ],
                                  volumes=[volume, docker_config]),
        )

        spec = client.V1JobSpec(template=template,
                                backoff_limit=3,
                                ttl_seconds_after_finished=60)

        job = client.V1Job(api_version="batch/v1",
                           kind="Job",
                           metadata=client.V1ObjectMeta(name=job_name,
                                                        namespace=ns),
                           spec=spec)

        print("at=initialized-job job=%s ns=%s" %
              (job.metadata.name, job.metadata.namespace))

        return job
コード例 #23
0
    def start_job(self, nuts_id):
        input_path = self.input_path
        output_path = self.pers_path + '/' + self.experiment_id
        pers_path = self.pers_path
        resolution = self.resolution
        experiment_id = self.experiment_id
        nuts_id = nuts_id.lower()

        container_name = 'br-process-raster'
        container_image = 'harbor.tilyes.eu/eugis/br-process-raster'
        body = client.V1Job(api_version="batch/v1", kind="Job")
        body.metadata = client.V1ObjectMeta(
            namespace=self.namespace,
            name=f'{container_name}-{experiment_id}-{nuts_id}')
        body.status = client.V1JobStatus()
        template = client.V1PodTemplate()
        template.template = client.V1PodTemplateSpec()
        container = client.V1Container(
            name=container_name,
            image=container_image,
            resources=client.V1ResourceRequirements(requests={
                'cpu': '1',
                'memory': '4Gi',
            },
                                                    limits={
                                                        'cpu': '2',
                                                        'memory': '6Gi',
                                                    }),
            volume_mounts=[
                client.V1VolumeMount(mount_path=self.input_path,
                                     name="source"),
                client.V1VolumeMount(mount_path=self.pers_path,
                                     name="persistence"),
                client.V1VolumeMount(mount_path="/app/config/",
                                     name="hu-raster-config")
            ],
            command=[
                'sh', '-c',
                f'python3 worker.py --input-path {input_path} --resolution {resolution} --output-path {output_path} --pers-path {pers_path} --nuts-id {nuts_id} && curl -X POST http://localhost:15020/quitquitquit'
            ])
        template.template.spec = client.V1PodSpec(
            containers=[container],
            volumes=[
                client.V1Volume(name='source',
                                persistent_volume_claim=client.
                                V1PersistentVolumeClaimVolumeSource(
                                    claim_name=self.source_pvc)),
                client.V1Volume(name='persistence',
                                persistent_volume_claim=client.
                                V1PersistentVolumeClaimVolumeSource(
                                    claim_name=self.persistence_pvc)),
                client.V1Volume(
                    name="hu-raster-config",
                    config_map=client.V1ConfigMapVolumeSource(
                        name=f"kf-pipeline-hu-raster-config-{experiment_id}"))
            ],
            restart_policy='Never')
        body.spec = client.V1JobSpec(template=template.template,
                                     ttl_seconds_after_finished=10)
        self.delete_job(f'{container_name}-{experiment_id}-{nuts_id}')
        self.v1.create_namespaced_job(self.namespace, body, pretty='true')

        return f'{container_name}-{experiment_id}-{nuts_id}'
コード例 #24
0
def get_flink_session_cluster_boilerplate(
        job: KubernetesFlinkJob) -> client.V1Job:
    from ai_flow.application_master.master import GLOBAL_MASTER_CONFIG
    job_master_args_default = [
        "session-cluster", "--job-classname", job.job_config.main_class,
        "-Djobmanager.rpc.address=flink-job-cluster-{}-svc".format(job.uuid),
        "-Dparallelism.default=1", "-Dblob.server.port=6124",
        "-Dqueryable-state.server.ports=6125"
    ]
    rpc_container_port = client.V1ContainerPort(name='rpc',
                                                container_port=6123)
    blob_container_port = client.V1ContainerPort(name='blob',
                                                 container_port=6124)
    query_container_port = client.V1ContainerPort(name='query',
                                                  container_port=6125)
    ui_container_port = client.V1ContainerPort(name='ui', container_port=8081)
    mount_path = '/opt/ai-flow/project'
    volume_mount = client.V1VolumeMount(name='download-volume',
                                        mount_path=mount_path)
    flink_config_volume_mount = client.V1VolumeMount(
        name="flink-config-volume", mount_path="/opt/flink/conf")
    workflow_id_env = client.V1EnvVar(
        name='WORKFLOW_ID', value=str(job.job_context.workflow_execution_id))
    execution_config_env = client.V1EnvVar(name='CONFIG_FILE_NAME',
                                           value=job.config_file)
    if job.job_config.language_type == LanguageType.PYTHON:
        language_type_env = client.V1EnvVar(name='LANGUAGE_TYPE',
                                            value='python')
    else:
        language_type_env = client.V1EnvVar(name='LANGUAGE_TYPE', value='java')

    entry_module_path_env = client.V1EnvVar(
        name='ENTRY_MODULE_PATH',
        value=job.job_config.properties['entry_module_path'])
    flink_job_master_rpc_address_env = client.V1EnvVar(
        name='FLINK_JOB_MASTER_RPC_ADDRESS',
        value="flink-job-cluster-{}-svc".format(job.uuid))

    job_master_container_image = None
    if 'flink_ai_flow_base_image' in GLOBAL_MASTER_CONFIG:
        job_master_container_image = GLOBAL_MASTER_CONFIG[
            'flink_ai_flow_base_image']
    if job.job_config.image is not None:
        job_master_container_image = job.job_config.image

    if job_master_container_image is None:
        raise Exception("flink_ai_flow_base_image not set")

    job_master_container = client.V1Container(
        name='flink-job-master-{}'.format(job.uuid),
        image=job_master_container_image,
        image_pull_policy='Always',
        ports=[
            rpc_container_port, blob_container_port, query_container_port,
            ui_container_port
        ],
        command=['/docker-entrypoint.sh'],
        args=job_master_args_default,
        volume_mounts=[volume_mount, flink_config_volume_mount],
        env=[
            workflow_id_env, execution_config_env,
            flink_job_master_rpc_address_env, entry_module_path_env,
            language_type_env
        ])

    try:
        jm_resources = job.job_config.resources['jobmanager']
        job_master_container.resources = client.V1ResourceRequirements(
            requests=jm_resources)
    except KeyError:
        pass

    init_args_default = [
        str(job.job_config.properties),
        str(job.job_context.workflow_execution_id),
        job.job_config.project_path, mount_path
    ]
    init_container = client.V1Container(
        name='init-container',
        image=GLOBAL_MASTER_CONFIG['ai_flow_base_init_image'],
        image_pull_policy='Always',
        command=["python", "/app/download.py"],
        args=init_args_default,
        volume_mounts=[volume_mount])
    volume = client.V1Volume(name='download-volume')

    # flink_conf.yaml config map volume
    config_name = "flink-config-{}".format(job.uuid)
    key_to_path_list = []
    key_to_path_list.append(
        client.V1KeyToPath(key="flink-conf.yaml", path="flink-conf.yaml"))
    key_to_path_list.append(
        client.V1KeyToPath(key="log4j.properties", path="log4j.properties"))
    key_to_path_list.append(
        client.V1KeyToPath(key="log4j-cli.properties",
                           path="log4j-cli.properties"))
    flink_config_volume = client.V1Volume(
        name="flink-config-volume",
        config_map=client.V1ConfigMapVolumeSource(name=config_name,
                                                  items=key_to_path_list))
    pod_spec = client.V1PodSpec(restart_policy='Never',
                                containers=[job_master_container],
                                init_containers=[init_container],
                                volumes=[volume, flink_config_volume])
    labels = {'app': 'flink', 'component': 'job-cluster-' + str(job.uuid)}
    object_meta = client.V1ObjectMeta(
        labels=labels,
        annotations={
            ANNOTATION_WATCHED: 'True',
            ANNOTATION_JOB_ID: str(job.instance_id),
            ANNOTATION_WORKFLOW_ID: str(job.job_context.workflow_execution_id),
            ANNOTATION_JOB_UUID: str(job.uuid)
        })
    template_spec = client.V1PodTemplateSpec(metadata=object_meta,
                                             spec=pod_spec)
    job_spec = client.V1JobSpec(template=template_spec, backoff_limit=0)
    object_meta = client.V1ObjectMeta(labels=labels,
                                      name=generate_job_name(job))
    job = client.V1Job(metadata=object_meta,
                       spec=job_spec,
                       api_version='batch/v1',
                       kind='Job')
    return job
コード例 #25
0
def get_config_map_volume_object(config_map_name, volume_name):
    config_map = client.V1ConfigMapVolumeSource(name=config_map_name)
    volume = client.V1Volume(config_map=config_map, name=volume_name)
    return volume
コード例 #26
0
def get_task_manager_boilerplate(
        job: KubernetesFlinkJob) -> client.V1Deployment:
    from ai_flow.application_master.master import GLOBAL_MASTER_CONFIG
    dep_resource_metadata = client.V1ObjectMeta(name='flink-task-manager-' +
                                                str(job.uuid))

    mount_path = '/opt/ai-flow/project'
    volume_mount = client.V1VolumeMount(name='download-volume',
                                        mount_path=mount_path)
    flink_config_volume_mount = client.V1VolumeMount(
        name="flink-config-volume", mount_path="/opt/flink/conf")
    init_args_default = [
        str(job.job_config.properties),
        str(job.job_context.workflow_execution_id),
        job.job_config.project_path, mount_path
    ]

    init_container = client.V1Container(
        name='init-container',
        image=GLOBAL_MASTER_CONFIG['ai_flow_base_init_image'],
        image_pull_policy='Always',
        command=["python", "/app/download.py"],
        args=init_args_default,
        volume_mounts=[volume_mount, flink_config_volume_mount])
    volume = client.V1Volume(name='download-volume')

    task_manager_args = [
        "task-manager", "-Djobmanager.rpc.address=" +
        'flink-job-cluster-{}-svc'.format(job.uuid)
    ]

    try:
        flink_conf = job.job_config.flink_conf
        for key, value in flink_conf.items():
            task_manager_args.extend(["-D{}={}".format(key, value)])
    except KeyError:
        pass

    workflow_id_env = client.V1EnvVar(
        name='WORKFLOW_ID', value=str(job.job_context.workflow_execution_id))
    execution_config_env = client.V1EnvVar(name='CONFIG_FILE_NAME',
                                           value=job.config_file)

    # flink_conf.yaml config map volume
    config_name = "flink-config-{}".format(job.uuid)
    key_to_path_list = []
    key_to_path_list.append(
        client.V1KeyToPath(key="flink-conf.yaml", path="flink-conf.yaml"))
    key_to_path_list.append(
        client.V1KeyToPath(key="log4j.properties", path="log4j.properties"))
    key_to_path_list.append(
        client.V1KeyToPath(key="log4j-cli.properties",
                           path="log4j-cli.properties"))
    flink_config_volume = client.V1Volume(
        name="flink-config-volume",
        config_map=client.V1ConfigMapVolumeSource(name=config_name,
                                                  items=key_to_path_list))

    task_manager_container_image = None
    if 'flink_ai_flow_base_image' in GLOBAL_MASTER_CONFIG:
        task_manager_container_image = GLOBAL_MASTER_CONFIG[
            'flink_ai_flow_base_image']
    try:
        if job.job_config.image is not None:
            task_manager_container_image = job.job_config.image
    except KeyError:
        pass
    if task_manager_container_image is None:
        raise Exception("flink_ai_flow_base_image not set")

    tm_container = client.V1Container(
        name='flink-task-manager-' + str(job.uuid),
        image=task_manager_container_image,
        command=['/docker-entrypoint.sh'],
        args=task_manager_args,
        env=[workflow_id_env, execution_config_env],
        volume_mounts=[volume_mount])

    try:
        tm_resource = job.job_config.resources['taskmanager']
        tm_container.resources = client.V1ResourceRequirements(
            requests=tm_resource)
    except KeyError:
        pass

    containers = [tm_container]
    labels = {'app': 'flink', 'component': 'task-manager-' + str(job.uuid)}
    pod_template = client.V1PodTemplateSpec(
        metadata=client.V1ObjectMeta(labels=labels),
        spec=client.V1PodSpec(containers=containers,
                              init_containers=[init_container],
                              volumes=[volume, flink_config_volume]))

    labels = {'app': 'flink', 'component': 'task-manager-' + str(job.uuid)}
    deployment_spec = client.V1DeploymentSpec(
        replicas=job.job_config.parallelism,
        template=pod_template,
        selector={'matchLabels': labels})
    dep_resource = client.V1Deployment(api_version='extensions/v1beta1',
                                       kind='Deployment',
                                       spec=deployment_spec,
                                       metadata=dep_resource_metadata)
    return dep_resource
コード例 #27
0
ファイル: deployment.py プロジェクト: toolskit/templatekit
def template(context):
    """
    handle yml env
    """
    name = context.get("name")
    version = context.get("version")
    labels = {
        "app": name,
        "version": "v1"
    } if not version else {
        "app": name.strip("-v2"),
        "version": version
    }
    image_tag = context["name"].split('-')[1]
    image = context["image_namespace"] + image_tag + ":" + context[
        "image_branch"]
    args = [arg for arg in context["args"]] if context.get("args") else None
    limits, requests = context["resources"]["limits"], context["resources"][
        "requests"]
    replicas = context.get("replicas", 1)
    workingDir = context["workingDir"]
    if name == "backend-logproxy":
        annotations = {"sidecar.istio.io/inject": "false"}
    else:
        annotations = {"traffic.sidecar.istio.io/excludeOutboundPorts": "6379"}
    """
    handle cmdb env
    """
    filename = "env_" + name.split("-")[1] + ".yml"
    env = handle_env("/tmp/{}".format(filename))
    """
    k8s yaml 组件模块
    """

    #从svn分支configmap目录中获取相关的目录结构
    parentDir, subdir = handle_configmap("configmap")
    volumemounts = [
        client.V1VolumeMount(mount_path="/{}".format(parentDir),
                             name="mainfiles")
    ]
    volumes = [
        client.V1Volume(
            name="mainfiles",
            config_map=client.V1ConfigMapVolumeSource(name="mainfiles"))
    ]

    for dir in subdir:
        volumemounts.append(
            client.V1VolumeMount(mount_path="/{}/{}".format(parentDir, dir),
                                 name=dir))
        volumes.append(
            client.V1Volume(
                name=dir, config_map=client.V1ConfigMapVolumeSource(name=dir)))

    if name.startswith("frontend-dispatch"):
        containers = [
            client.V1Container(
                name=name,
                image=image,
                env=env,
                args=args,
                volume_mounts=volumemounts,
                image_pull_policy="Always",
                lifecycle=client.V1Lifecycle(pre_stop=client.V1Handler(
                    _exec=client.V1ExecAction(
                        command=["nginx", "-s", "quit"]))),
                readiness_probe=client.V1Probe(_exec=client.V1ExecAction(
                    command=['cat', '/tmp/container_ready']),
                                               initial_delay_seconds=10,
                                               period_seconds=5),
                resources=client.V1ResourceRequirements(limits=limits,
                                                        requests=requests),
                security_context=client.V1SecurityContext(privileged=True),
                working_dir=workingDir,
            )
        ]
    else:
        containers = [
            client.V1Container(
                name=name,
                image=image,
                env=env,
                args=args,
                volume_mounts=volumemounts,
                image_pull_policy="Always",
                readiness_probe=client.V1Probe(_exec=client.V1ExecAction(
                    command=['cat', '/tmp/container_ready']),
                                               initial_delay_seconds=10,
                                               period_seconds=5),
                resources=client.V1ResourceRequirements(limits=limits,
                                                        requests=requests),
                security_context=client.V1SecurityContext(privileged=True),
                working_dir=workingDir,
            )
        ]

    template = client.V1PodTemplateSpec(
        metadata=client.V1ObjectMeta(labels=labels, annotations=annotations),
        spec=client.V1PodSpec(
            containers=containers,
            dns_policy="ClusterFirst",
            image_pull_secrets=[
                client.V1LocalObjectReference(name="image-pull-secret")
            ],
            restart_policy="Always",
            volumes=volumes))

    spec = client.V1DeploymentSpec(
        replicas=replicas,
        selector=client.V1LabelSelector(match_labels=labels),
        template=template,
        strategy=client.ExtensionsV1beta1DeploymentStrategy(
            rolling_update=client.ExtensionsV1beta1RollingUpdateDeployment(
                max_surge=1, max_unavailable='25%'),
            type="RollingUpdate",
        ),
    )

    return client.V1Deployment(api_version="apps/v1",
                               kind="Deployment",
                               metadata=client.V1ObjectMeta(name=name,
                                                            labels=labels),
                               spec=spec)
コード例 #28
0
    def construct_job(self, run):
        check.inst_param(run, 'run', PipelineRun)

        dagster_labels = {
            'app.kubernetes.io/name': 'dagster',
            'app.kubernetes.io/instance': 'dagster',
            'app.kubernetes.io/version': dagster_version,
        }

        execution_params = {
            'executionParams': {
                'selector': run.selector.to_graphql_input(),
                "environmentConfigData": run.environment_dict,
                'executionMetadata': {
                    "runId": run.run_id
                },
                "mode": run.mode,
            },
        }

        job_container = client.V1Container(
            name='dagster-job-%s' % run.run_id,
            image=self.job_image,
            command=['dagster-graphql'],
            args=["-p", "executePlan", "-v",
                  json.dumps(execution_params)],
            image_pull_policy=self.image_pull_policy,
            env=[
                client.V1EnvVar(name='DAGSTER_HOME',
                                value='/opt/dagster/dagster_home')
            ],
            volume_mounts=[
                client.V1VolumeMount(
                    name='dagster-instance',
                    mount_path='/opt/dagster/dagster_home/dagster.yaml',
                    sub_path='dagster.yaml',
                )
            ],
        )

        config_map_volume = client.V1Volume(
            name='dagster-instance',
            config_map=client.V1ConfigMapVolumeSource(
                name=self.instance_config_map),
        )

        template = client.V1PodTemplateSpec(
            metadata=client.V1ObjectMeta(name='dagster-job-pod-%s' %
                                         run.run_id,
                                         labels=dagster_labels),
            spec=client.V1PodSpec(
                image_pull_secrets=self.image_pull_secrets,
                service_account_name=self.service_account_name,
                restart_policy='Never',
                containers=[job_container],
                volumes=[config_map_volume],
            ),
        )

        job = client.V1Job(
            api_version="batch/v1",
            kind="Job",
            metadata=client.V1ObjectMeta(name='dagster-job-%s' % run.run_id,
                                         labels=dagster_labels),
            spec=client.V1JobSpec(
                template=template,
                backoff_limit=BACKOFF_LIMIT,
                ttl_seconds_after_finished=TTL_SECONDS_AFTER_FINISHED,
            ),
        )
        return job
コード例 #29
0
    def construct_job(self, run):
        check.inst_param(run, 'run', PipelineRun)

        dagster_labels = {
            'app.kubernetes.io/name': 'dagster',
            'app.kubernetes.io/instance': 'dagster',
            'app.kubernetes.io/version': dagster_version,
        }

        execution_params = execution_params_from_pipeline_run(run)

        job_container = client.V1Container(
            name='dagster-job-%s' % run.run_id,
            image=self.job_image,
            command=['dagster-graphql'],
            args=[
                "-p",
                "startPipelineExecution",
                "-v",
                json.dumps(
                    {'executionParams': execution_params.to_graphql_input()}),
            ],
            image_pull_policy=self.image_pull_policy,
            env=[
                client.V1EnvVar(
                    name='DAGSTER_PG_PASSWORD',
                    value_from=client.V1EnvVarSource(
                        secret_key_ref=client.V1SecretKeySelector(
                            name='dagster-postgresql',
                            key='postgresql-password')),
                ),
            ],
            env_from=self.env_from_sources,
            volume_mounts=[
                client.V1VolumeMount(
                    name='dagster-instance',
                    mount_path='{dagster_home}/dagster.yaml'.format(
                        dagster_home=self.dagster_home),
                    sub_path='dagster.yaml',
                )
            ],
        )

        config_map_volume = client.V1Volume(
            name='dagster-instance',
            config_map=client.V1ConfigMapVolumeSource(
                name=self.instance_config_map),
        )

        template = client.V1PodTemplateSpec(
            metadata=client.V1ObjectMeta(
                name='dagster-job-pod-%s' % run.run_id,
                labels=dagster_labels,
            ),
            spec=client.V1PodSpec(
                image_pull_secrets=self.image_pull_secrets,
                service_account_name=self.service_account_name,
                restart_policy='Never',
                containers=[job_container],
                volumes=[config_map_volume],
            ),
        )

        job = client.V1Job(
            api_version="batch/v1",
            kind="Job",
            metadata=client.V1ObjectMeta(name='dagster-job-%s' % run.run_id,
                                         labels=dagster_labels),
            spec=client.V1JobSpec(
                template=template,
                backoff_limit=BACKOFF_LIMIT,
                ttl_seconds_after_finished=TTL_SECONDS_AFTER_FINISHED,
            ),
        )
        return job
コード例 #30
0
def create_cron_job(name,
                    configmap_name,
                    init_container_name,
                    init_container_image,
                    init_container_command,
                    container_name,
                    container_image,
                    container_command,
                    schedule,
                    namespace="default",
                    env_vars={}):
    try:
        # Body是对象体
        body = client.V1beta1CronJob(api_version="batch/v1beta1",
                                     kind="CronJob")
        # 对象需要 Metadata,每个JOB必须有一个不同的名称!
        body.metadata = client.V1ObjectMeta(namespace=namespace, name=name)
        # 添加 Status
        body.status = client.V1beta1CronJobStatus()

        template = client.V1PodTemplateSpec()

        # 在Env中传递Arguments:
        env_list = []
        for env_name, env_value in env_vars.items():
            env_list.append(client.V1EnvVar(name=env_name, value=env_value))

        container = client.V1Container(command=container_command,
                                       env=env_list,
                                       image=container_image,
                                       image_pull_policy="IfNotPresent",
                                       name=container_name)

        volume_mount = client.V1VolumeMount(name="share-volume",
                                            mount_path=mount_path)
        container.volume_mounts = [volume_mount]
        container.args = [mount_path + '']

        init_container = client.V1Container(command=init_container_command,
                                            image=init_container_image,
                                            image_pull_policy="IfNotPresent",
                                            name=init_container_name)

        init_volume_mount = client.V1VolumeMount(name="config-volume",
                                                 mount_path=init_mount_path)
        init_container.volume_mounts = [volume_mount, init_volume_mount]

        share_volume = client.V1Volume(name="share-volume", empty_dir={})

        config_map = client.V1ConfigMapVolumeSource(name=configmap_name)
        config_map_volume = client.V1Volume(name="config-volume",
                                            config_map=config_map)

        vlor = V1LocalObjectReference(name='ceph-secret')
        cephfs = V1CephFSVolumeSource(monitors=[
            '192.168.4.21:6789', '192.168.4.22:6789', '192.168.4.29:6789'
        ],
                                      user='******',
                                      secret_ref=vlor,
                                      path='/k8svolume/ai-algo')
        config_map_volume_1 = client.V1Volume(name='demo-path', cephfs=cephfs)
        config_map_volume.template.spec = client.V1PodSpec(
            active_deadline_seconds=600,
            containers=[container],
            restart_policy='OnFailure',
            volumes=[config_map_volume, share_volume, config_map_volume_1],
            init_containers=[init_container])

        job_template = client.V1beta1JobTemplateSpec()
        job_template.spec = client.V1JobSpec(template=template)

        body.spec = client.V1beta1CronJobSpec(starting_deadline_seconds=600,
                                              job_template=job_template,
                                              schedule=schedule)

        # To make an asynchronous HTTP request
        thread = batch_v1_beta1_api.create_namespaced_cron_job(namespace,
                                                               body,
                                                               async_req=True,
                                                               pretty=True)
        result = thread.get()

        return True, result

    except Exception as ex:
        print(ex)
        return False, ""