Пример #1
0
def func_to_pod(image, runtime, extra_env, command, args, workdir):
    container = client.V1Container(
        name="base",
        image=image,
        env=extra_env + runtime.spec.env,
        command=[command],
        args=args,
        working_dir=workdir,
        image_pull_policy=runtime.spec.image_pull_policy,
        volume_mounts=runtime.spec.volume_mounts,
        resources=runtime.spec.resources,
    )

    pod_spec = client.V1PodSpec(
        containers=[container],
        restart_policy="Never",
        volumes=runtime.spec.volumes,
        service_account=runtime.spec.service_account,
    )

    if runtime.spec.image_pull_secret:
        pod_spec.image_pull_secrets = [
            client.V1LocalObjectReference(name=runtime.spec.image_pull_secret)
        ]

    return pod_spec
Пример #2
0
def create_deployment_object(deployment_name, image_name):
    # Configurate Pod template container
    container = client.V1Container(
        name=image_name,
        image="lizhengjie/hcp-re:" + image_name,
        image_pull_policy="Always",
        ports=[client.V1ContainerPort(container_port=8888)])
    image_pull_secret = client.V1LocalObjectReference(name='regcred')
    # Create and configurate a spec section
    template = client.V1PodTemplateSpec(
        metadata=client.V1ObjectMeta(labels={"k8s-app": deployment_name}),
        spec=client.V1PodSpec(image_pull_secrets=[image_pull_secret],
                              containers=[container]))
    # Create the specification of deployment
    spec = client.V1DeploymentSpec(
        replicas=1,
        template=template,
        selector={'matchLabels': {
            'k8s-app': deployment_name
        }})
    # Instantiate the deployment object
    deployment = client.V1Deployment(
        api_version="apps/v1",
        kind="Deployment",
        metadata=client.V1ObjectMeta(name=deployment_name),
        spec=spec)
    return deployment
def create_job_object(container_image, image_pull_secret=None,service_account_name=None):
    
    pull_secret = client.V1LocalObjectReference(
        name=image_pull_secret
    )
    # Configureate Pod template container
    container = client.V1Container(
        name="installer",
        image=container_image,
        command=list(install_command()))
    # Create and configurate a spec section
    template = client.V1PodTemplateSpec(
        metadata=client.V1ObjectMeta(labels={"app": "mcm-installer"}),
        spec=client.V1PodSpec(restart_policy="Never", containers=[container],
                                image_pull_secrets=[pull_secret],service_account_name=service_account_name))
    # Create the specification of deployment
    spec = client.V1JobSpec(
        template=template,
        backoff_limit=1)
    # Instantiate the job object
    job = client.V1Job(
        api_version="batch/v1",
        kind="Job",
        metadata=client.V1ObjectMeta(name=JOB_NAME),
        spec=spec)

    return job
Пример #4
0
def add_docker_credentials(kube_manager, pod_spec, namespace):
    secret_name = constants.DOCKER_CREDS_SECRET_NAME
    if not kube_manager.secret_exists(secret_name, namespace):
        raise ValueError(
            "Not able to find docker credentials secret: {}".format(
                secret_name))
    pod_spec.image_pull_secrets = [client.V1LocalObjectReference(secret_name)]
Пример #5
0
    def _get_pod_template(self, image_name, name, container_port, envs, volume_mounts, volumes, service_account_name,
                          prior_device_name):
        env_list = self._get_envs(envs)
        volume_mount_list = self._get_volume_mounts(volume_mounts)
        volume_list = self._get_volumes(volumes)
        image_pull_secret_name = prior_device_name + "-registry"

        if container_port != None and container_port != "":
            return client.V1PodTemplateSpec(
                metadata=client.V1ObjectMeta(labels={"run": name}),
                spec=client.V1PodSpec(
                    service_account_name=service_account_name,
                    image_pull_secrets=[client.V1LocalObjectReference(
                        name=image_pull_secret_name
                    )],
                    containers=[client.V1Container(
                        name=name,
                        image=image_name,
                        image_pull_policy="Always",
                        ports=[client.V1ContainerPort(container_port=int(container_port))],
                        env=env_list,
                        volume_mounts=volume_mount_list
                    )],
                    volumes=volume_list
                )
            )

        return client.V1PodTemplateSpec(
            metadata=client.V1ObjectMeta(labels={"run": name}),
            spec=client.V1PodSpec(
                service_account_name=service_account_name,
                image_pull_secrets=[client.V1LocalObjectReference(
                    name=image_pull_secret_name
                )],
                containers=[client.V1Container(
                    name=name,
                    image=image_name,
                    image_pull_policy="Always",
                    env=env_list,
                    volume_mounts=volume_mount_list
                )],
                volumes=volume_list
            )
        )
Пример #6
0
def create_deployment_object(name=None,namespace=None,image=None,port=None,image_pull_policy=None,\
    imagePullSecret=None,labels=None,replicas=None,cpu=None,memory=None,liveness_probe=None,readiness_probe=None):
    #configure pod template container
    resources = None
    volumeMounts = []
    volumes = []
    if (cpu or memory):
        resources = client.V1ResourceRequirements(requests={
            "cpu":
            str(int(cpu / 2)) + "m",
            "memory":
            str(int(memory / 2)) + "Mi"
        },
                                                  limits={
                                                      "cpu": str(cpu) + "m",
                                                      "memory":
                                                      str(memory) + "Mi"
                                                  })
    vm1 = client.V1VolumeMount(name='log',
                               mount_path="/opt/microservices/logs")
    volumeMounts.append(vm1)
    v1 = client.V1Volume(name="log", empty_dir=client.V1EmptyDirVolumeSource())
    volumes.append(v1)
    image_pull_secret = client.V1LocalObjectReference(name=imagePullSecret)
    container = client.V1Container(name=name,
                                   image=image,
                                   image_pull_policy=image_pull_policy,
                                   ports=[
                                       client.V1ContainerPort(
                                           container_port=port,
                                           name="web",
                                           protocol="TCP")
                                   ],
                                   resources=resources,
                                   readiness_probe=readiness_probe,
                                   liveness_probe=liveness_probe,
                                   volume_mounts=volumeMounts)
    template = client.V1PodTemplateSpec(
        metadata=client.V1ObjectMeta(labels=labels),
        spec=client.V1PodSpec(containers=[container],
                              image_pull_secrets=[image_pull_secret],
                              volumes=volumes))
    spec = client.V1DeploymentSpec(replicas=replicas,
                                   template=template,
                                   selector={'matchLabels': labels}
                                   #strategy
                                   )
    deployment = client.V1Deployment(api_version="apps/v1",
                                     kind="Deployment",
                                     metadata=client.V1ObjectMeta(
                                         name=name, namespace=namespace),
                                     spec=spec)
    return deployment
Пример #7
0
 def _get_pod_template(self):
     return client.V1PodTemplateSpec(
         metadata=client.V1ObjectMeta(labels={"run": self.name}),
         spec=client.V1PodSpec(
             service_account_name=self.service_account_name,
             image_pull_secrets=[
                 client.V1LocalObjectReference(name=self.prior_device_name +
                                               "-registry")
             ],
             containers=[self._get_container()],
             volumes=self._get_volumes(),
         ))
Пример #8
0
 def export_deployment(self):
     # Configureate Pod template container
     container = client.V1Container(
         name=self.dm_name,
         image=self.image,
         ports=[
             client.V1ContainerPort(container_port=int(port))
             for port in self.container_port
         ],
         image_pull_policy='Always',
         env=[
             client.V1EnvVar(name='LANG', value='en_US.UTF-8'),
             client.V1EnvVar(name='LC_ALL', value='en_US.UTF-8')
         ],
         resources=client.V1ResourceRequirements(limits=self.re_limits,
                                                 requests=self.re_requests),
         volume_mounts=[
             client.V1VolumeMount(mount_path='/opt/logs', name='logs')
         ],
         liveness_probe=client.V1Probe(
             initial_delay_seconds=5,
             tcp_socket=client.V1TCPSocketAction(
                 port=int(self.container_port[0]))))
     # Create and configurate a spec section
     secrets = client.V1LocalObjectReference('registrysecret')
     volume = client.V1Volume(
         name='logs',
         host_path=client.V1HostPathVolumeSource(path='/opt/logs'))
     template = client.V1PodTemplateSpec(
         metadata=client.V1ObjectMeta(labels={"project": self.dm_name}),
         spec=client.V1PodSpec(containers=[container],
                               image_pull_secrets=[secrets],
                               volumes=[volume]))
     selector = client.V1LabelSelector(
         match_labels={"project": self.dm_name})
     # Create the specification of deployment
     spec = client.ExtensionsV1beta1DeploymentSpec(replicas=int(
         self.replicas),
                                                   template=template,
                                                   selector=selector,
                                                   min_ready_seconds=3)
     # Instantiate the deployment object
     deployment = client.ExtensionsV1beta1Deployment(
         api_version="extensions/v1beta1",
         kind="Deployment",
         metadata=client.V1ObjectMeta(name=self.dm_name),
         spec=spec)
     return deployment
 def create_serviceaccount(self,
                           api_instance,
                           namespace,
                           name,
                           image_pull_secret_name=None):
     body = client.V1ServiceAccount(
         api_version='v1',
         kind='ServiceAccount',
         metadata=client.V1ObjectMeta(name=name, namespace=namespace),
         automount_service_account_token=True,
         image_pull_secrets=image_pull_secret_name
         if image_pull_secret_name is None else
         [client.V1LocalObjectReference(name=image_pull_secret_name)])
     result = api_instance.create_namespaced_service_account(
         namespace, body)
     return result
Пример #10
0
def func_to_pod(image, runtime, extra_env, command, args, workdir):
    container = client.V1Container(
        name="base",
        image=image,
        env=extra_env + runtime.spec.env,
        command=[command],
        args=args,
        working_dir=workdir,
        image_pull_policy=runtime.spec.image_pull_policy,
        volume_mounts=runtime.spec.volume_mounts,
        resources=runtime.spec.resources,
    )

    pod_spec = kube_resource_spec_to_pod_spec(runtime.spec, container)

    if runtime.spec.image_pull_secret:
        pod_spec.image_pull_secrets = [
            client.V1LocalObjectReference(name=runtime.spec.image_pull_secret)
        ]

    return pod_spec
Пример #11
0
def _create_deployment_object(component_name,
                              containers,
                              init_containers,
                              replicas,
                              volumes,
                              labels={},
                              pull_secrets=[]):

    deployment_name = _create_deployment_name(component_name)

    # Label the pod with the deployment name, so we can find it easily
    labels.update({"k8sdeployment": deployment_name})

    # pull_secrets is a list of the names of the k8s secrets containing docker registry credentials
    # See https://kubernetes.io/docs/concepts/containers/images/#specifying-imagepullsecrets-on-a-pod
    ips = []
    for secret in pull_secrets:
        ips.append(client.V1LocalObjectReference(name=secret))

    # Define pod template
    template = client.V1PodTemplateSpec(
        metadata=client.V1ObjectMeta(labels=labels),
        spec=client.V1PodSpec(hostname=component_name,
                              containers=containers,
                              init_containers=init_containers,
                              volumes=volumes,
                              image_pull_secrets=ips))

    # Define deployment spec
    spec = client.ExtensionsV1beta1DeploymentSpec(replicas=replicas,
                                                  template=template)

    # Create deployment object
    deployment = client.ExtensionsV1beta1Deployment(
        kind="Deployment",
        metadata=client.V1ObjectMeta(name=deployment_name),
        spec=spec)

    return deployment
Пример #12
0
    def create_deployment_object(self, name, image_tag):
        # Configureate Pod template container
        container = client.V1Container(
            name=name,
            image=image_tag,
            ports=[
                client.V1ContainerPort(
                    container_port=config.MLFLOW_MODEL_DEFAULT_TARGET_PORT,
                    name=name)
            ],
            resources=client.V1ResourceRequirements(requests={
                "cpu": "100m",
                "memory": "200Mi"
            }, ))
        # Create and configurate a spec section
        # !! need create a secret with type docker-registry contains private registry credential
        secret = client.V1LocalObjectReference(name='regcred')
        template = client.V1PodTemplateSpec(
            metadata=client.V1ObjectMeta(labels={"name": name}),
            spec=client.V1PodSpec(
                containers=[container],
                image_pull_secrets=[secret],
            ),
        )
        # Create the specification of deployment
        spec = client.V1DeploymentSpec(
            replicas=1,
            template=template,
            selector={'matchLabels': {
                'name': name
            }})
        # Instantiate the deployment object
        deployment = client.V1Deployment(
            api_version="apps/v1",
            kind="Deployment",
            metadata=client.V1ObjectMeta(name=name),
            spec=spec)

        return deployment
Пример #13
0
 def _create_kube_podspec(self,
                          container,
                          shm_size=None,
                          scheduler_name=KubernetesConfig.DEFAULT_SCHEDULER,
                          namespace=KubernetesConfig.K8S_NAMESPACE):
     logger.info("Creating pod with scheduler_name = %s, namespace = %s" %
                 (scheduler_name, namespace))
     volumes = []
     if shm_size and isinstance(shm_size, int):
         dshm_vol = client.V1EmptyDirVolumeSource(medium="Memory",
                                                  size_limit=shm_size)
         volumes.append(client.V1Volume(name='dshm', empty_dir=dshm_vol))
     if not volumes:
         volumes = None
     spec = client.V1PodSpec(containers=[container],
                             scheduler_name=scheduler_name,
                             restart_policy="OnFailure",
                             volumes=volumes,
                             image_pull_secrets=[
                                 client.V1LocalObjectReference(
                                     KubernetesConfig.IMAGE_PULL_SECRET)
                             ])
     return spec
Пример #14
0
 def export_deployment(self):
     # Configureate Pod template container
     volume_mounts = []
     containers = []
     volumes = []
     volume_mounts.append(
         client.V1VolumeMount(mount_path='/docker/logs', name='logs'))
     volumes.append(
         client.V1Volume(name='logs',
                         host_path=client.V1HostPathVolumeSource(
                             path='/opt/logs', type='DirectoryOrCreate')))
     if self.mounts:
         for path in self.mounts:
             volume_mounts.append(
                 client.V1VolumeMount(mount_path=path,
                                      name=self.mounts[path]))
             volumes.append(
                 client.V1Volume(name=self.mounts[path],
                                 host_path=client.V1HostPathVolumeSource(
                                     path=path, type='DirectoryOrCreate')))
     liveness_probe = client.V1Probe(initial_delay_seconds=15,
                                     tcp_socket=client.V1TCPSocketAction(
                                         port=int(self.container_port[0])))
     readiness_probe = client.V1Probe(initial_delay_seconds=15,
                                      tcp_socket=client.V1TCPSocketAction(
                                          port=int(self.container_port[0])))
     if self.healthcheck:
         liveness_probe = client.V1Probe(initial_delay_seconds=15,
                                         http_get=client.V1HTTPGetAction(
                                             path=self.healthcheck,
                                             port=int(
                                                 self.container_port[0])))
         readiness_probe = client.V1Probe(initial_delay_seconds=15,
                                          http_get=client.V1HTTPGetAction(
                                              path=self.healthcheck,
                                              port=int(
                                                  self.container_port[0])))
     Env = [
         client.V1EnvVar(name='LANG', value='en_US.UTF-8'),
         client.V1EnvVar(name='LC_ALL', value='en_US.UTF-8'),
         client.V1EnvVar(name='POD_NAME',
                         value_from=client.V1EnvVarSource(
                             field_ref=client.V1ObjectFieldSelector(
                                 field_path='metadata.name'))),
         client.V1EnvVar(name='POD_IP',
                         value_from=client.V1EnvVarSource(
                             field_ref=client.V1ObjectFieldSelector(
                                 field_path='status.podIP'))),
     ]
     container = client.V1Container(
         name=self.dm_name,
         image=self.image,
         ports=[
             client.V1ContainerPort(container_port=int(port))
             for port in self.container_port
         ],
         image_pull_policy='Always',
         env=Env,
         resources=client.V1ResourceRequirements(limits=self.re_limits,
                                                 requests=self.re_requests),
         volume_mounts=volume_mounts,
         liveness_probe=liveness_probe,
         readiness_probe=readiness_probe)
     containers.append(container)
     if self.sidecar:
         sidecar_container = client.V1Container(
             name='sidecar-%s' % self.dm_name,
             image=self.sidecar,
             image_pull_policy='Always',
             env=Env,
             resources=client.V1ResourceRequirements(
                 limits=self.re_limits, requests=self.re_requests),
             volume_mounts=volume_mounts)
         containers.append(sidecar_container)
     # Create and configurate a spec section
     secrets = client.V1LocalObjectReference('registrysecret')
     template = client.V1PodTemplateSpec(
         metadata=client.V1ObjectMeta(labels={"project": self.dm_name}),
         spec=client.V1PodSpec(
             containers=containers,
             image_pull_secrets=[secrets],
             volumes=volumes,
             affinity=client.V1Affinity(node_affinity=client.V1NodeAffinity(
                 preferred_during_scheduling_ignored_during_execution=[
                     client.V1PreferredSchedulingTerm(
                         preference=client.V1NodeSelectorTerm(
                             match_expressions=[
                                 client.V1NodeSelectorRequirement(
                                     key='project',
                                     operator='In',
                                     values=['moji'])
                             ]),
                         weight=30),
                     client.V1PreferredSchedulingTerm(
                         preference=client.V1NodeSelectorTerm(
                             match_expressions=[
                                 client.V1NodeSelectorRequirement(
                                     key='deploy',
                                     operator='In',
                                     values=[self.dm_name])
                             ]),
                         weight=70)
                 ]))))
     selector = client.V1LabelSelector(
         match_labels={"project": self.dm_name})
     # Create the specification of deployment
     spec = client.ExtensionsV1beta1DeploymentSpec(replicas=int(
         self.replicas),
                                                   template=template,
                                                   selector=selector,
                                                   min_ready_seconds=3)
     # Instantiate the deployment object
     deployment = client.ExtensionsV1beta1Deployment(
         api_version="extensions/v1beta1",
         kind="Deployment",
         metadata=client.V1ObjectMeta(name=self.dm_name),
         spec=spec)
     return deployment
Пример #15
0
def create_job_object(kJob, kImage, kVname, kVvalue, kimagepullpolicy,
                      kimagepullsecret, krestartpolicy, kbackofflimit,
                      khostpath, kvolname, kvolpath, kvolsubpath, kpvolclaim,
                      kcommands, kargs):
    # This creates a job object dynamically but supports only limited parameters
    # If you need any characteristics nt supported here, use a yaml manifest
    #

    # Configure environment variables
    env_list = []
    for key in kVname:
        value = kVvalue[kVname.index(key)]
        v1_envvar = client.V1EnvVar(name=key, value=value)
        env_list.append(v1_envvar)

    # Configure Volume Devices and Mounts
    volnames_list = []
    # if kvolname != 'none':
    #    volname = client.V1VolumeMount(
    #       name=kvolname,
    #       mount_path=kvolpath)
    #    volnames_list.append(volname)

    for key in kvolname:
        volpath = kvolpath[kvolname.index(key)]
        volsubpath = kvolsubpath[kvolname.index(key)]
        volname = client.V1VolumeMount(name=key,
                                       mount_path=volpath,
                                       sub_path=volsubpath)
        volnames_list.append(volname)

    # Configure Volumes list
    vol_list = []
    if kvolname != 'none':
        if kpvolclaim != 'none':
            vol = client.V1Volume(
                name=kvolname,
                persistent_volume_claim=client.
                V1PersistentVolumeClaimVolumeSource(claim_name=kpvolclaim))
        else:
            vol = client.V1Volume(name=kvolname,
                                  host_path=client.V1HostPathVolumeSource(
                                      path=khostpath, type='Directory'))
        vol_list.append(vol)

    # Configure Pod template container
    container = client.V1Container(
        name="ctmjob",
        image=kImage,
        image_pull_policy=kimagepullpolicy,
        env=env_list,
        command=kcommands if len(kcommands) > 0 else None,
        args=kargs if len(kargs) > 0 else None,
        volume_mounts=volnames_list)

    # Configure Image Pull Secret(s)
    imagesecrets = []
    isecret = client.V1LocalObjectReference(name=kimagepullsecret)
    imagesecrets.append(isecret)

    # Create and configure a spec section
    template = client.V1PodTemplateSpec(
        metadata=client.V1ObjectMeta(name=kJob),
        spec=client.V1PodSpec(containers=[container],
                              image_pull_secrets=imagesecrets,
                              restart_policy=krestartpolicy,
                              volumes=vol_list))

    # Create the specification of deployment
    spec = client.V1JobSpec(template=template, backoff_limit=kbackofflimit)

    # Instantiate the job object
    job = client.V1Job(api_version="batch/v1",
                       kind="Job",
                       metadata=client.V1ObjectMeta(name=kJob),
                       spec=spec)

    return job
Пример #16
0
def task_to_pod_spec(  # pylint: disable=too-many-locals
    task: Task,
    container_name: Optional[str] = None,
    secrets: Optional[List[Secret]] = None,
    configmaps: Optional[List[ConfigMap]] = None,
) -> client.V1PodSpec:
    """Convert this job into a POD spec that a k8s scheduler can run

    :param task: name for this task
    :type task: Task
    :param container_name: name for the container if None, it will be the job name
    :type container_name: Optional[str]
    :param secrets: A list of secrets to inject into the container.
    :type secrets: Optional[List[Secret]]
    :param configmaps: A list of configmaps to inject.
    :type configmaps: Optional[List[ConfigMap]]
    :returns: A PodSpec in the k8s client API
    :rtype: client.V1PodSpec
    """
    limits = {}
    if task.num_gpus is not None:
        limits['nvidia.com/gpu'] = task.num_gpus
    resources = client.V1ResourceRequirements(limits=limits)
    volume_mounts = []
    if task.mounts is not None:
        volume_mounts.extend(
            client.V1VolumeMount(mount_path=m.path, name=m.name)
            for m in task.mounts)

    if secrets is not None:
        volume_mounts.extend(
            client.V1VolumeMount(
                mount_path=s.path, name=s.name, sub_path=s.sub_path)
            for s in secrets)
    if configmaps is not None:
        volume_mounts.extend(
            client.V1VolumeMount(
                mount_path=c.path, name=c.name, sub_path=c.sub_path)
            for c in configmaps)

    container = client.V1Container(
        args=task.args,
        command=listify(task.command),
        name=container_name if container_name else task.name,
        image=task.image,
        volume_mounts=volume_mounts if volume_mounts else None,
        image_pull_policy=task.pull_policy,
        resources=resources,
        env=[
            client.V1EnvVar(ODIN_TASK_ENV, task.name),
            client.V1EnvVar(ODIN_CRED_ENV,
                            os.path.join(SECRET_LOC, ODIN_CRED_FILE)),
        ],
    )

    volumes = []
    if task.mounts is not None:
        for mount in task.mounts:
            pvc = client.V1PersistentVolumeClaimVolumeSource(
                claim_name=mount.claim)
            volumes.append(
                client.V1Volume(name=mount.name, persistent_volume_claim=pvc))
    if secrets is not None:
        secrets = {s.name: s for s in secrets}
        volumes.extend(
            client.V1Volume(name=secret.name,
                            secret=client.V1SecretVolumeSource(
                                secret_name=secret.name,
                                default_mode=secret.mode))
            for secret in secrets.values())
    if configmaps is not None:
        volumes.extend(
            client.V1Volume(name=c,
                            config_map=client.V1ConfigMapVolumeSource(name=c))
            for c in set(c.name for c in configmaps))

    selector = task.node_selector if task.node_selector else None

    regcred = client.V1LocalObjectReference(name='registry')
    pod_spec = client.V1PodSpec(
        containers=[container],
        image_pull_secrets=[regcred],
        volumes=volumes if volumes else None,
        node_selector=selector,
        restart_policy=ResourceHandler.RESTART_NEVER,
    )
    return pod_spec
Пример #17
0
    def _create_deployment_object(
            self,
            job_name,
            job_image,
            deployment_name,
            port=80,
            replicas=1,
            cmd_string=None,
            engine_json_file='~/.ipython/profile_default/security/ipcontroller-engine.json',
            engine_dir='.'):
        """ Create a kubernetes deployment for the job.
        Args:
              - job_name (string) : Name of the job and deployment
              - job_image (string) : Docker image to launch
        KWargs:
             - port (integer) : Container port
             - replicas : Number of replica containers to maintain
        Returns:
              - True: The deployment object to launch
        """

        # sorry, quick hack that doesn't pass this stuff through to test it works.
        # TODO it also doesn't only add what is set :(
        security_context = None
        if self.user_id and self.group_id:
            security_context = client.V1SecurityContext(
                run_as_group=self.group_id,
                run_as_user=self.user_id,
                run_as_non_root=self.run_as_non_root)

        # Create the enviornment variables and command to initiate IPP
        environment_vars = client.V1EnvVar(name="TEST", value="SOME DATA")

        launch_args = ["-c", "{0}; /app/deploy.sh;".format(cmd_string)]
        print(launch_args)

        # Configureate Pod template container
        container = None
        if security_context:
            container = client.V1Container(
                name=job_name,
                image=job_image,
                ports=[client.V1ContainerPort(container_port=port)],
                command=['/bin/bash'],
                args=launch_args,
                env=[environment_vars],
                security_context=security_context)
        else:
            container = client.V1Container(
                name=job_name,
                image=job_image,
                ports=[client.V1ContainerPort(container_port=port)],
                command=['/bin/bash'],
                args=launch_args,
                env=[environment_vars])
        # Create a secret to enable pulling images from secure repositories
        secret = None
        if self.secret:
            secret = client.V1LocalObjectReference(name=self.secret)

        # Create and configurate a spec section
        template = client.V1PodTemplateSpec(
            metadata=client.V1ObjectMeta(labels={"app": job_name}),
            spec=client.V1PodSpec(containers=[container],
                                  image_pull_secrets=[secret]))

        # Create the specification of deployment
        spec = client.ExtensionsV1beta1DeploymentSpec(replicas=replicas,
                                                      template=template)

        # Instantiate the deployment object
        deployment = client.ExtensionsV1beta1Deployment(
            api_version="extensions/v1beta1",
            kind="Deployment",
            metadata=client.V1ObjectMeta(name=deployment_name),
            spec=spec)

        return deployment
Пример #18
0
def create_job_object(name: str,
                      container_image: str,
                      env_list: dict,
                      command: List[str],
                      command_args: List[str],
                      volumes: List[Dict],
                      init_containers: List[Dict],
                      output: Output,
                      namespace: str = "stackl",
                      container_name: str = "jobcontainer",
                      api_version: str = "batch/v1",
                      image_pull_policy: str = "Always",
                      ttl_seconds_after_finished: int = 3600,
                      restart_policy: str = "Never",
                      backoff_limit: int = 0,
                      active_deadline_seconds: int = 3600,
                      service_account: str = "stackl-agent-stackl-agent",
                      image_pull_secrets: List[str] = [],
                      labels=None) -> client.V1Job:
    # pylint: disable=too-many-arguments,too-many-locals,too-many-branches,too-many-statements
    """Creates a Job object using the Kubernetes client

    :param name: Job name affix
    :type name: str
    :param container_image: automation container image
    :type container_image: str
    :param env_list: Dict with key/values for the environment inside the automation container
    :type env_list: dict
    :param command: entrypoint command
    :type command: List[str]
    :param command_args: command arguments
    :type command_args: List[str]
    :param volumes: volumes and volumemounts
    :type volumes: List[Dict]
    :param image_pull_secrets: secrets to pull images
    :type image_pull_secrets: List[str]
    :param init_containers: list with init_containers
    :type init_containers: List[Dict]
    :param output: output Object
    :type output: Output
    :param namespace: Kubernetes namespace, defaults to "stackl"
    :type namespace: str, optional
    :param container_name: name of automation container, defaults to "jobcontainer"
    :type container_name: str, optional
    :param api_version: Job api version, defaults to "batch/v1"
    :type api_version: str, optional
    :param image_pull_policy: always pull latest images, defaults to "Always"
    :type image_pull_policy: str, optional
    :param ttl_seconds_after_finished: Remove jobs after execution with ttl, defaults to 600
    :type ttl_seconds_after_finished: int, optional
    :param restart_policy: Restart the pod on the same node after failure, defaults to "Never"
    :type restart_policy: str, optional
    :param backoff_limit: Retries after failure, defaults to 0
    :type backoff_limit: int, optional
    :param active_deadline_seconds: Timeout on a job, defaults to 3600 seconds
    :type active_deadline_seconds: int, optional
    :param service_account: Kubernetes service account, defaults to "stackl-agent-stackl-agent"
    :type service_account: str, optional
    :param labels: metadata labels, defaults to {}
    :type labels: dict, optional
    :return: automation Job object
    :rtype: client.V1Job
    """
    id_job = id_generator()
    name = name + "-" + id_job
    body = client.V1Job(api_version=api_version, kind="Job")
    body.metadata = client.V1ObjectMeta(namespace=namespace, name=name)
    body.status = client.V1JobStatus()
    template = client.V1PodTemplate()
    template.template = client.V1PodTemplateSpec()
    k8s_volumes = []

    cms = []

    logging.debug(f"volumes: {volumes}")
    # create a k8s volume for each element in volumes
    for vol in volumes:
        vol_name = name + "-" + vol["name"]
        k8s_volume = client.V1Volume(name=vol_name)
        if vol["type"] == "config_map":
            config_map = client.V1ConfigMapVolumeSource()
            config_map.name = vol_name
            k8s_volume.config_map = config_map
            cms.append(create_cm(vol_name, namespace, vol['data']))
            vol['name'] = vol_name
        if vol["type"] == "empty_dir":
            k8s_volume.empty_dir = client.V1EmptyDirVolumeSource(
                medium="Memory")
            vol['name'] = vol_name
        k8s_volumes.append(k8s_volume)

    logging.debug(f"Volumes created for job {name}: {k8s_volumes}")

    # create a volume mount for each element in volumes
    k8s_volume_mounts = []
    for vol in volumes:
        if vol["mount_path"]:
            volume_mount = client.V1VolumeMount(name=vol["name"],
                                                mount_path=vol["mount_path"])
            if "sub_path" in vol:
                volume_mount.sub_path = vol["sub_path"]
            k8s_volume_mounts.append(volume_mount)

    logging.debug(f"Volume mounts created for job {name}: {k8s_volume_mounts}")

    # create an environment list
    k8s_env_list = []

    if env_list:
        for key, value in env_list.items():
            if isinstance(value, dict):
                if 'config_map_key_ref' in value:
                    k8s_env_from = client.V1EnvVar(
                        name=key,
                        value_from=client.V1EnvVarSource(
                            config_map_key_ref=client.V1ConfigMapKeySelector(
                                name=value['config_map_key_ref']["name"],
                                key=value['config_map_key_ref']["key"])))
                    k8s_env_list.append(k8s_env_from)
                elif 'field_ref' in value:
                    k8s_env_from = client.V1EnvVar(
                        name=key,
                        value_from=client.V1EnvVarSource(
                            field_ref=client.V1ObjectFieldSelector(
                                field_path=value['field_ref'])))
                    k8s_env_list.append(k8s_env_from)
            else:
                k8s_env = client.V1EnvVar(name=key, value=value)
                k8s_env_list.append(k8s_env)

    k8s_env_from_list = []

    # if env_from:
    #     for env in env_from:
    #         if 'config_map_ref' in env:
    #             k8s_env_from = client.V1EnvFromSource(
    #                 config_map_ref=env['config_map_ref'])
    #             k8s_env_from_list.append(k8s_env_from)
    #         elif 'secret_ref' in env:
    #             k8s_env_from = client.V1EnvFromSource(
    #                 secret_ref=env['secret_ref'])
    #             k8s_env_from_list.append(k8s_env_from)

    logging.debug(f"Environment list created for job {name}: {k8s_env_list}")
    print(f"Environment list created for job {name}: {k8s_env_list}")

    container = client.V1Container(name=container_name,
                                   image=container_image,
                                   env=k8s_env_list,
                                   volume_mounts=k8s_volume_mounts,
                                   image_pull_policy=image_pull_policy,
                                   command=command,
                                   args=command_args,
                                   env_from=k8s_env_from_list)

    k8s_init_containers = []

    logging.debug(f"Init containers for job {name}: {init_containers}")
    for c in init_containers:
        k8s_c = client.V1Container(name=c['name'],
                                   image=c['image'],
                                   volume_mounts=k8s_volume_mounts,
                                   env=k8s_env_list)

        if 'args' in c:
            k8s_c.args = c['args']

        k8s_init_containers.append(k8s_c)

    k8s_secrets = []
    for secret in image_pull_secrets:
        k8s_secrets.append(client.V1LocalObjectReference(name=secret))

    logging.debug(f"Secret list created for job {name}: {k8s_secrets}")

    containers = [container]
    if output:
        output.volume_mounts = k8s_volume_mounts
        output.env = k8s_env_list
        output_containers = output.containers
        containers = containers + output_containers

    template.template.metadata = client.V1ObjectMeta(labels=labels)
    template.template.spec = client.V1PodSpec(
        containers=containers,
        restart_policy=restart_policy,
        image_pull_secrets=k8s_secrets,
        volumes=k8s_volumes,
        init_containers=k8s_init_containers,
        service_account_name=service_account)
    template.template = client.V1PodTemplateSpec(
        metadata=template.template.metadata, spec=template.template.spec)
    body.spec = client.V1JobSpec(
        ttl_seconds_after_finished=ttl_seconds_after_finished,
        template=template.template,
        backoff_limit=backoff_limit,
        active_deadline_seconds=active_deadline_seconds)

    return body, cms
Пример #19
0
def template(context):
    """
    handle yml env
    """
    name = context.get("name")
    labels = {"name": name}
    image_tag=context["name"].split('-')[1]
    image = context["image_namespace"]+image_tag+":"+context["image_branch"]    
    args = [arg for arg in context["args"]] if context.get("args") else None
    limits,requests = context["resources"]["limits"],context["resources"]["requests"]
    replicas = context["replicas"][name]

    """
    handle cmdb env
    """
    filename = "env_" + name.split("-")[1] + ".yml"
    env = handle_env("/tmp/{}".format(filename))
    workingDir = context["workingDir"]

    """
    k8s yaml 组件模块
    """

    containers = [
                client.V1Container(
                    name = name, 
                    image = image,
                    env = env,
                    args = args,
                    image_pull_policy = "Always",
                    # readiness_probe=client.V1Probe(_exec=client.V1ExecAction(command=['cat','/tmp/container_ready']),initial_delay_seconds=10, period_seconds=5),
                    resources = client.V1ResourceRequirements(limits = limits,requests = requests),
                    security_context = client.V1SecurityContext(privileged=True),
                    working_dir = working_dir,
                )
    ]
    
    template = client.V1PodTemplateSpec(
                    metadata = client.V1ObjectMeta(labels=labels), 
                    spec = client.V1PodSpec(
                                containers = containers,
                                dns_policy = "ClusterFirst",
                                image_pull_secrets = [client.V1LocalObjectReference(name="image-pull-secret")],
                                restart_policy = "Always"
                            )
                    
               )
    
    spec = client.V1StatefulSetSpec(
                service_name = name,
                replicas = replicas,
                selector = client.V1LabelSelector(match_labels=labels),
                template = template
           )

    return client.V1StatefulSet(
        api_version = "apps/v1",
        kind = "StatefulSet",
        metadata = client.V1ObjectMeta(name = name,labels = labels),
        spec = spec 
    )
Пример #20
0
    def post(self, project_id):
        """
        """

        resource_registry = {
            'db_deployment': False,
            'db_service': False,
            'image_pull_secret': False,
            'app_deployment': False,
            'app_service': False,
            'ingress_entry': False
        }

        current_user_id = get_jwt_identity()
        current_user_roles = get_jwt_claims()['roles']

        app_schema = AppSchema()

        app_data = request.get_json()

        validated_app_data, errors = app_schema.load(app_data,
                                                     partial=("project_id", ))

        if errors:
            return dict(status='fail', message=errors), 400

        existing_app = App.find_first(name=validated_app_data['name'],
                                      project_id=project_id)

        if existing_app:
            return dict(
                status='fail',
                message=
                f'App with name {validated_app_data["name"]} already exists'
            ), 409

        validated_app_data['port'] = validated_app_data.get('port', 80)

        app_name = validated_app_data['name']
        app_alias = create_alias(validated_app_data['name'])
        app_image = validated_app_data['image']
        command = validated_app_data.get('command', None)
        # env_vars = validated_app_data['env_vars']
        env_vars = validated_app_data.get('env_vars', None)
        private_repo = validated_app_data.get('private_image', False)
        docker_server = validated_app_data.get('docker_server', None)
        docker_username = validated_app_data.get('docker_username', None)
        docker_password = validated_app_data.get('docker_password', None)
        docker_email = validated_app_data.get('docker_email', None)
        replicas = validated_app_data.get('replicas', 1)
        app_port = validated_app_data.get('port', None)
        image_pull_secret = None

        command = command.split() if command else None

        project = Project.get_by_id(project_id)

        if not project:
            return dict(status='fail',
                        message=f'project {project_id} not found'), 404

        if not is_owner_or_admin(project, current_user_id, current_user_roles):
            return dict(status='fail', message='Unauthorised'), 403

        cluster = project.cluster
        namespace = project.alias

        if not cluster:
            return dict(status='fail', message="Invalid Cluster"), 500

        # check if app already exists
        app = App.find_first(**{'name': app_name})

        if app:
            return dict(status='fail',
                        message=f'App {app_name} already exists'), 409

        kube_host = cluster.host
        kube_token = cluster.token
        service_host = urlsplit(kube_host).hostname

        kube_client = create_kube_clients(kube_host, kube_token)

        try:

            # create the app
            new_app = App(name=app_name,
                          image=app_image,
                          project_id=project_id,
                          alias=app_alias,
                          port=app_port)

            if private_repo:

                # handle gcr credentials
                if 'gcr' in docker_server and docker_username == '_json_key':
                    docker_password = json.dumps(
                        json.loads(base64.b64decode(docker_password)))

                # create image pull secrets
                authstring = base64.b64encode(
                    f'{docker_username}:{docker_password}'.encode("utf-8"))

                secret_dict = dict(
                    auths={
                        docker_server: {
                            "username": docker_username,
                            "password": docker_password,
                            "email": docker_email,
                            "auth": str(authstring, "utf-8")
                        }
                    })

                secret_b64 = base64.b64encode(
                    json.dumps(secret_dict).encode("utf-8"))

                secret_body = client.V1Secret(
                    metadata=client.V1ObjectMeta(name=app_alias),
                    type='kubernetes.io/dockerconfigjson',
                    data={'.dockerconfigjson': str(secret_b64, "utf-8")})

                kube_client.kube.create_namespaced_secret(
                    namespace=namespace,
                    body=secret_body,
                    _preload_content=False)

                # update registry
                resource_registry['image_pull_secret'] = True

                image_pull_secret = client.V1LocalObjectReference(
                    name=app_alias)

            # create app deployment's pvc meta and spec
            # pvc_name = f'{app_alias}-pvc'
            # pvc_meta = client.V1ObjectMeta(name=pvc_name)

            # access_modes = ['ReadWriteOnce']
            # storage_class = 'openebs-standard'
            # resources = client.V1ResourceRequirements(
            #     requests=dict(storage='1Gi'))

            # pvc_spec = client.V1PersistentVolumeClaimSpec(
            #     access_modes=access_modes, resources=resources, storage_class_name=storage_class)

            # Create a PVC
            # pvc = client.V1PersistentVolumeClaim(
            #     api_version="v1",
            #     kind="PersistentVolumeClaim",
            #     metadata=pvc_meta,
            #     spec=pvc_spec
            # )

            # kube_client.kube.create_namespaced_persistent_volume_claim(
            #     namespace=namespace,
            #     body=pvc
            # )

            # create deployment
            dep_name = f'{app_alias}-deployment'

            # # EnvVar
            env = []
            if env_vars:
                for key, value in env_vars.items():
                    env.append(client.V1EnvVar(name=str(key),
                                               value=str(value)))

            # pod template
            container = client.V1Container(
                name=app_alias,
                image=app_image,
                ports=[client.V1ContainerPort(container_port=app_port)],
                env=env,
                command=command
                # volume_mounts=[client.V1VolumeMount(mount_path="/data", name=dep_name)]
            )

            #pod volumes
            # volumes = client.V1Volume(
            #     name=dep_name
            #     # persistent_volume_claim=client.V1PersistentVolumeClaimVolumeSource(claim_name=pvc_name)
            # )

            # spec
            template = client.V1PodTemplateSpec(
                metadata=client.V1ObjectMeta(labels={'app': app_alias}),
                spec=client.V1PodSpec(containers=[container],
                                      image_pull_secrets=[image_pull_secret]
                                      # volumes=[volumes]
                                      ))

            # spec of deployment
            spec = client.V1DeploymentSpec(
                replicas=replicas,
                template=template,
                selector={'matchLabels': {
                    'app': app_alias
                }})

            # Instantiate the deployment
            deployment = client.V1Deployment(
                api_version="apps/v1",
                kind="Deployment",
                metadata=client.V1ObjectMeta(name=dep_name),
                spec=spec)

            # create deployment in  cluster

            kube_client.appsv1_api.create_namespaced_deployment(
                body=deployment, namespace=namespace, _preload_content=False)

            # update registry
            resource_registry['app_deployment'] = True

            # create service in the cluster
            service_name = f'{app_alias}-service'

            service_meta = client.V1ObjectMeta(name=service_name,
                                               labels={'app': app_alias})

            service_spec = client.V1ServiceSpec(
                type='ClusterIP',
                ports=[client.V1ServicePort(port=3000, target_port=app_port)],
                selector={'app': app_alias})

            service = client.V1Service(metadata=service_meta,
                                       spec=service_spec)

            kube_client.kube.create_namespaced_service(namespace=namespace,
                                                       body=service,
                                                       _preload_content=False)

            # update resource registry
            resource_registry['app_service'] = True

            # subdomain for the app
            # sub_domain = f'{app_alias}.cranecloud.io'
            sub_domain = get_app_subdomain(app_alias)

            # create new ingres rule for the application
            new_ingress_backend = client.ExtensionsV1beta1IngressBackend(
                service_name=service_name, service_port=3000)

            new_ingress_rule = client.ExtensionsV1beta1IngressRule(
                host=sub_domain,
                http=client.ExtensionsV1beta1HTTPIngressRuleValue(paths=[
                    client.ExtensionsV1beta1HTTPIngressPath(
                        path="", backend=new_ingress_backend)
                ]))

            ingress_name = f'{project.alias}-ingress'

            # Check if there is an ingress resource in the namespace, create if not

            ingress_list = kube_client.extension_api.list_namespaced_ingress(
                namespace=namespace).items

            if not ingress_list:

                ingress_meta = client.V1ObjectMeta(name=ingress_name)

                ingress_spec = client.ExtensionsV1beta1IngressSpec(
                    # backend=ingress_backend,
                    rules=[new_ingress_rule])

                ingress_body = client.ExtensionsV1beta1Ingress(
                    metadata=ingress_meta, spec=ingress_spec)

                kube_client.extension_api.create_namespaced_ingress(
                    namespace=namespace, body=ingress_body)

                # update registry
                resource_registry['ingress_entry'] = True
            else:
                # Update ingress with new entry
                ingress = ingress_list[0]

                ingress.spec.rules.append(new_ingress_rule)

                kube_client.extension_api.patch_namespaced_ingress(
                    name=ingress_name, namespace=namespace, body=ingress)

            service_url = f'https://{sub_domain}'

            new_app.url = service_url

            saved = new_app.save()

            if not saved:
                return dict(status='fail',
                            message='Internal Server Error'), 500

            new_app_data, _ = app_schema.dump(new_app)

            return dict(status='success', data=dict(app=new_app_data)), 201

        except client.rest.ApiException as e:
            resource_clean_up(resource_registry, app_alias, namespace,
                              kube_client)
            return dict(status='fail', message=json.loads(e.body)), 500

        except Exception as e:
            resource_clean_up(resource_registry, app_alias, namespace,
                              kube_client)
            return dict(status='fail', message=str(e)), 500
Пример #21
0
def create_job_object(data):
    meta = client.V1ObjectMeta(name=data["name"], namespace=data["namespace"])

    labels = None
    if "labels" in data:
        labels_array = data["labels"].split(',')
        labels = dict(s.split('=') for s in labels_array)
        meta.labels = labels

    annotations = None
    if "annotations" in data:
        annotations_array = data["annotations"].split(',')
        annotations = dict(s.split('=') for s in annotations_array)
        meta.annotations = annotations

    envs = []
    if "environments" in data:
        envs_array = data["environments"].splitlines()
        tmp_envs = dict(s.split('=', 1) for s in envs_array)
        for key in tmp_envs:
            envs.append(client.V1EnvVar(name=key, value=tmp_envs[key]))

    if "environments_secrets" in data:
        envs_array = data["environments_secrets"].splitlines()
        tmp_envs = dict(s.split('=', 1) for s in envs_array)

        for key in tmp_envs:

            if (":" in tmp_envs[key]):
                # passing secret env
                value = tmp_envs[key]
                secrets = value.split(':')
                secrect_key = secrets[1]
                secrect_name = secrets[0]

                envs.append(
                    client.V1EnvVar(
                        name=key,
                        value="",
                        value_from=client.V1EnvVarSource(
                            secret_key_ref=client.V1SecretKeySelector(
                                key=secrect_key, name=secrect_name))))

    container = client.V1Container(name=data["container_name"],
                                   image=data["container_image"],
                                   image_pull_policy=data["image_pull_policy"])

    if "container_command" in data:
        container.command = data["container_command"].split(' ')

    if "container_args" in data:
        args_array = data["container_args"].splitlines()
        container.args = args_array

    if "resources_requests" in data:
        resources_array = data["resources_requests"].split(",")
        tmp = dict(s.split('=', 1) for s in resources_array)
        container.resources = client.V1ResourceRequirements(requests=tmp)

    if "volume_mounts" in data:
        mounts = common.create_volume_mount_yaml(data)
        container.volume_mounts = mounts

    container.env = envs

    if "env_from" in data:
        env_froms_data = yaml.full_load(data["env_from"])
        env_from = []
        for env_from_data in env_froms_data:
            if 'configMapRef' in env_from_data:
                env_from.append(
                    client.V1EnvFromSource(
                        config_map_ref=client.V1ConfigMapEnvSource(
                            env_from_data['configMapRef']['name'])))
            elif 'secretRef' in env_from_data:
                env_from.append(
                    client.V1EnvFromSource(secret_ref=client.V1SecretEnvSource(
                        env_from_data['secretRef']['name'])))

        container.env_from = env_from

    template_spec = client.V1PodSpec(containers=[container],
                                     restart_policy=data["job_restart_policy"])

    if "volumes" in data:
        volumes_data = yaml.safe_load(data["volumes"])
        volumes = []

        if (isinstance(volumes_data, list)):
            for volume_data in volumes_data:
                volume = common.create_volume(volume_data)

                if volume:
                    volumes.append(volume)
        else:
            volume = common.create_volume(volumes_data)

            if volume:
                volumes.append(volume)

        template_spec.volumes = volumes

    if "image_pull_secrets" in data:
        images_array = data["image_pull_secrets"].split(",")
        images = []
        for image in images_array:
            images.append(client.V1LocalObjectReference(name=image))

        template_spec.image_pull_secrets = images

    if "tolerations" in data:
        tolerations_data = yaml.safe_load(data["tolerations"])
        tolerations = []
        for toleration_data in tolerations_data:
            toleration = common.create_toleration(toleration_data)

            if toleration:
                tolerations.append(toleration)

        template_spec.tolerations = tolerations

    template = client.V1PodTemplateSpec(metadata=client.V1ObjectMeta(
        name=data["name"],
        labels=labels,
        annotations=annotations,
    ),
                                        spec=template_spec)

    spec = client.V1JobSpec(template=template)

    if "completions" in data:
        spec.completions = int(data["completions"])
    if "selectors" in data:
        selectors_array = data["selectors"].split(',')
        selectors = dict(s.split('=') for s in selectors_array)
        spec.selector = selectors
    if "node_selector" in data:
        node_selectors_array = data["node_selector"].split(',')
        node_selectors = dict(s.split('=') for s in node_selectors_array)
        spec.nodeSelector = node_selectors
    if "parallelism" in data:
        spec.parallelism = int(data["parallelism"])
    if "active_deadline_seconds" in data:
        spec.active_deadline_seconds = int(data["active_deadline_seconds"])
    if "backoff_limit" in data:
        spec.backoff_limit = int(data["backoff_limit"])

    job = client.V1Job(api_version=data["api_version"],
                       kind='Job',
                       metadata=meta,
                       spec=spec)

    return job
Пример #22
0
def create_deployment(apps_v1_api, username, token, gpu):
    name = 'jlab-{}'.format(username)
    try:
        init_container = client.V1Container(
            name='{}-init'.format(name),
            image="ubuntu:18.04",
            image_pull_policy="IfNotPresent",
            command=["/bin/sh"],
            args=["-c", "chown 1001:1001 /persistent_volume"],
            volume_mounts=[
                client.V1VolumeMount(
                    name='persistent-volume',
                    mount_path="/persistent_volume",
                    sub_path='{}/jupyter'.format(username)
                )
            ]
        )
        if gpu == True:
            limits = {
                'nvidia.com/gpu': 1
            }
        else:
            limits = None
        container = client.V1Container(
            name=name,
            image=envvars.DOCKER_IMAGE_JLAB_SERVER,
            resources=client.V1ResourceRequirements(
                limits=limits
            ),
            image_pull_policy="Always",
            ports=[client.V1ContainerPort(container_port=8888)],
            env=[
                client.V1EnvVar(
                    name='DES_USER',
                    value=username
                ),
                client.V1EnvVar(
                    name='PIP_TARGET',
                    value='/home/jovyan/work/.pip'
                ),
                client.V1EnvVar(
                    name='PYTHONPATH',
                    value='/home/jovyan/work/.pip'
                )
            ],
            volume_mounts=[
                client.V1VolumeMount(
                    name='jupyter-config',
                    mount_path="/home/jovyan/.jupyter/"
                ),
                client.V1VolumeMount(
                    name='persistent-volume',
                    mount_path="/home/jovyan/jobs/cutout",
                    sub_path='{}/cutout'.format(username)
                ),
                client.V1VolumeMount(
                    name='persistent-volume',
                    mount_path="/home/jovyan/jobs/query",
                    sub_path='{}/query'.format(username)
                ),
                client.V1VolumeMount(
                    name='persistent-volume',
                    mount_path="/home/jovyan/work",
                    sub_path='{}/jupyter'.format(username)
                )
            ]
        )
        volume_config = client.V1Volume(
            name='jupyter-config',
            config_map=client.V1ConfigMapVolumeSource(
                name=name,
                items=[client.V1KeyToPath(
                    key=name,
                    path="jupyter_notebook_config.py"
                )]
            )
        )
        volume_persistent = client.V1Volume(
            name='persistent-volume',
            persistent_volume_claim=client.V1PersistentVolumeClaimVolumeSource(
                claim_name=envvars.PVC_NAME_BASE
            )
        )
        # Template
        template = client.V1PodTemplateSpec(
            metadata=client.V1ObjectMeta(labels={"app": name}),
                spec=client.V1PodSpec(
                    image_pull_secrets=[
                        client.V1LocalObjectReference(
                            name='registry-auth'
                        )
                    ],
                    init_containers=[
                        init_container
                    ],
                    containers=[
                        container
                    ],
                    volumes=[
                        volume_config,
                        volume_persistent
                    ],
                    node_selector = {'gpu': '{}'.format(gpu).lower()}
                )
            )
        # Spec
        spec = client.V1DeploymentSpec(
            replicas=1,
            template=template,
            selector=client.V1LabelSelector(
                match_labels=dict({'app': name})
            )
        )
        # Deployment
        deployment = client.V1Deployment(
            api_version="apps/v1",
            kind="Deployment",
            metadata=client.V1ObjectMeta(name=name),
            spec=spec)
        # Creation of the Deployment in specified namespace
        api_response = apps_v1_api.create_namespaced_deployment(
            namespace=namespace, body=deployment
        )
        # logger.info('Deployment created:\n{}'.format(api_response))
    except ApiException as e:
        error_msg = str(e).strip()
        logger.error(error_msg)
Пример #23
0
    def get_pod_manifest(self):
        """Include volume with the git repository."""
        repository = yield self.git_repository()
        options = self.user_options

        # make sure the pod name is less than 64 characters - if longer, keep
        # the last 16 untouched since it is the server hash
        if len(self.pod_name) > 63:
            self.pod_name = self.pod_name[:47] + self.pod_name[-16:]

        # Process the requested server options
        server_options = options.get("server_options", {})
        self.default_url = server_options.get("defaultUrl")
        self.cpu_guarantee = float(server_options.get("cpu_request", 0.1))

        # Make the user pods be in Guaranteed QoS class if the user
        # had specified a memory request. Otherwise use a sensible default.
        self.mem_guarantee = server_options.get("mem_request", "500M")
        self.mem_limit = server_options.get("mem_request", "1G")

        gpu = server_options.get("gpu_request", {})
        if gpu:
            self.extra_resource_limits = {"nvidia.com/gpu": str(gpu)}

        # Configure the git repository volume
        git_volume_name = self.pod_name[:54] + "-git-repo"

        # 1. Define a new empty volume.
        self.volumes = [
            volume for volume in self.volumes
            if volume["name"] != git_volume_name
        ]
        volume = {"name": git_volume_name, "emptyDir": {}}
        self.volumes.append(volume)

        # 2. Define a volume mount for both init and notebook containers.
        mount_path = f'/work/{options["project"]}'
        volume_mount = {"mountPath": mount_path, "name": git_volume_name}

        # 3. Configure the init container
        init_container_name = "git-clone"
        self.init_containers = [
            container for container in self.init_containers
            if not container.name.startswith(init_container_name)
        ]
        lfs_auto_fetch = server_options.get("lfs_auto_fetch")
        init_container = client.V1Container(
            name=init_container_name,
            env=[
                client.V1EnvVar(name="MOUNT_PATH", value=mount_path),
                client.V1EnvVar(name="REPOSITORY", value=repository),
                client.V1EnvVar(
                    name="LFS_AUTO_FETCH",
                    value="1" if lfs_auto_fetch else "0",
                ),
                client.V1EnvVar(name="COMMIT_SHA",
                                value=str(options.get("commit_sha"))),
                client.V1EnvVar(name="BRANCH",
                                value=options.get("branch", "master")),
                client.V1EnvVar(name="JUPYTERHUB_USER", value=self.user.name),
            ],
            image=options.get("git_clone_image"),
            volume_mounts=[volume_mount],
            working_dir=mount_path,
            security_context=client.V1SecurityContext(run_as_user=0),
        )
        self.init_containers.append(init_container)

        # 4. Configure notebook container git repo volume mount
        self.volume_mounts = [
            volume_mount for volume_mount in self.volume_mounts
            if volume_mount["mountPath"] != mount_path
        ]
        self.volume_mounts.append(volume_mount)

        # 5. Configure autosaving script execution hook
        self.lifecycle_hooks = {
            "preStop": {
                "exec": {
                    "command": [
                        "/bin/sh",
                        "-c",
                        "/usr/local/bin/pre-stop.sh",
                        "||",
                        "true",
                    ]
                }
            }
        }

        # Finalize the pod configuration

        # Set the repository path to the working directory
        self.working_dir = mount_path
        self.notebook_dir = mount_path

        # add git project-specific annotations
        self.extra_annotations = {
            RENKU_ANNOTATION_PREFIX + "namespace":
            options.get("namespace"),
            RENKU_ANNOTATION_PREFIX + "projectName":
            options.get("project"),
            RENKU_ANNOTATION_PREFIX + "projectId":
            "{}".format(options.get("project_id")),
            RENKU_ANNOTATION_PREFIX + "branch":
            options.get("branch"),
            RENKU_ANNOTATION_PREFIX + "commit-sha":
            options.get("commit_sha"),
        }

        # add username to labels
        safe_username = escapism.escape(self.user.name,
                                        escape_char="-").lower()
        self.extra_labels = {
            RENKU_ANNOTATION_PREFIX + "username": safe_username
        }

        self.delete_grace_period = 30

        pod = yield super().get_pod_manifest()

        # Because repository comes from a coroutine, we can't put it simply in `get_env()`
        pod.spec.containers[0].env.append(
            client.V1EnvVar("CI_REPOSITORY_URL", repository))

        # Add image pull secrets
        if options.get("image_pull_secrets"):
            secrets = [
                client.V1LocalObjectReference(name=name)
                for name in options.get("image_pull_secrets")
            ]
            pod.spec.image_pull_secrets = secrets

        return pod
Пример #24
0
 def get_pull_secrets(self):
     secrets = self.args.get('pull_secrets', ['docker'])
     return [client.V1LocalObjectReference(name=s) for s in secrets]
Пример #25
0
def submit_job(args, command=None):
    container_image = args.container
    container_name = args.name

    body = client.V1Job(api_version="batch/v1", kind="Job", metadata=client.V1ObjectMeta(name=container_name))
    body.status = client.V1JobStatus()
    template = client.V1PodTemplate()

    labels = {
        'hugin-job': "1",
        'hugin-job-name': f'{container_name}'
    }
    template.template = client.V1PodTemplateSpec(
        metadata=client.V1ObjectMeta(labels=labels)
    )

    tolerations = []
    env = []
    if args.environment:
        for env_spec in args.environment:
            env_name,env_value = env_spec.split("=", 1)
            env.append(client.V1EnvVar(name=env_name, value=env_value))

    containe_args = dict(
        name=f"container-{container_name}",
        image=container_image,
        env=env,
    )

    if args.gpu:
        tolerations.append(client.V1Toleration(
        key='nvidia.com/gpu', operator='Exists', effect='NoSchedule'))
        containe_args['resources'] = client.V1ResourceRequirements(limits={"nvidia.com/gpu": 1})
    if command or args.command:
        containe_args['command'] = command if command else args.command

    container = client.V1Container(**containe_args)
    pull_secrets = []
    if args.pull_secret is not None:
        pull_secrets.append(client.V1LocalObjectReference(name=args.pull_secret))
    pod_args = dict(containers=[container],
                    restart_policy='Never',
                    image_pull_secrets=pull_secrets)


    if tolerations:
        pod_args['tolerations'] = tolerations

    if args.node_selector is not None:
        parts = args.node_selector.split("=", 1)
        if len(parts) == 2:
            affinity = client.V1Affinity(
                node_affinity=client.V1NodeAffinity(
                    required_during_scheduling_ignored_during_execution=client.V1NodeSelector(
                        node_selector_terms=[client.V1NodeSelectorTerm(
                            match_expressions=[client.V1NodeSelectorRequirement(
                                key=parts[0], operator='In', values=[parts[1]])]
                        )]
                    )
                )
            )
            pod_args['affinity'] = affinity

    template.template.spec = client.V1PodSpec(**pod_args)
    body.spec = client.V1JobSpec(ttl_seconds_after_finished=1800, template=template.template)
    try:
        api_response = batch_v1.create_namespaced_job("default", body, pretty=True)
        #print (api_response)
    except client.exceptions.ApiException as e:
        logging.critical(f"Failed to start job: {e.reason}")
Пример #26
0
def template(context):
    """
    handle yml env
    """
    name = context.get("name")
    version = context.get("version")
    labels = {
        "app": name,
        "version": "v1"
    } if not version else {
        "app": name.strip("-v2"),
        "version": version
    }
    image_tag = context["name"].split('-')[1]
    image = context["image_namespace"] + image_tag + ":" + context[
        "image_branch"]
    args = [arg for arg in context["args"]] if context.get("args") else None
    limits, requests = context["resources"]["limits"], context["resources"][
        "requests"]
    replicas = context.get("replicas", 1)
    workingDir = context["workingDir"]
    if name == "backend-logproxy":
        annotations = {"sidecar.istio.io/inject": "false"}
    else:
        annotations = {"traffic.sidecar.istio.io/excludeOutboundPorts": "6379"}
    """
    handle cmdb env
    """
    filename = "env_" + name.split("-")[1] + ".yml"
    env = handle_env("/tmp/{}".format(filename))
    """
    k8s yaml 组件模块
    """

    #从svn分支configmap目录中获取相关的目录结构
    parentDir, subdir = handle_configmap("configmap")
    volumemounts = [
        client.V1VolumeMount(mount_path="/{}".format(parentDir),
                             name="mainfiles")
    ]
    volumes = [
        client.V1Volume(
            name="mainfiles",
            config_map=client.V1ConfigMapVolumeSource(name="mainfiles"))
    ]

    for dir in subdir:
        volumemounts.append(
            client.V1VolumeMount(mount_path="/{}/{}".format(parentDir, dir),
                                 name=dir))
        volumes.append(
            client.V1Volume(
                name=dir, config_map=client.V1ConfigMapVolumeSource(name=dir)))

    if name.startswith("frontend-dispatch"):
        containers = [
            client.V1Container(
                name=name,
                image=image,
                env=env,
                args=args,
                volume_mounts=volumemounts,
                image_pull_policy="Always",
                lifecycle=client.V1Lifecycle(pre_stop=client.V1Handler(
                    _exec=client.V1ExecAction(
                        command=["nginx", "-s", "quit"]))),
                readiness_probe=client.V1Probe(_exec=client.V1ExecAction(
                    command=['cat', '/tmp/container_ready']),
                                               initial_delay_seconds=10,
                                               period_seconds=5),
                resources=client.V1ResourceRequirements(limits=limits,
                                                        requests=requests),
                security_context=client.V1SecurityContext(privileged=True),
                working_dir=workingDir,
            )
        ]
    else:
        containers = [
            client.V1Container(
                name=name,
                image=image,
                env=env,
                args=args,
                volume_mounts=volumemounts,
                image_pull_policy="Always",
                readiness_probe=client.V1Probe(_exec=client.V1ExecAction(
                    command=['cat', '/tmp/container_ready']),
                                               initial_delay_seconds=10,
                                               period_seconds=5),
                resources=client.V1ResourceRequirements(limits=limits,
                                                        requests=requests),
                security_context=client.V1SecurityContext(privileged=True),
                working_dir=workingDir,
            )
        ]

    template = client.V1PodTemplateSpec(
        metadata=client.V1ObjectMeta(labels=labels, annotations=annotations),
        spec=client.V1PodSpec(
            containers=containers,
            dns_policy="ClusterFirst",
            image_pull_secrets=[
                client.V1LocalObjectReference(name="image-pull-secret")
            ],
            restart_policy="Always",
            volumes=volumes))

    spec = client.V1DeploymentSpec(
        replicas=replicas,
        selector=client.V1LabelSelector(match_labels=labels),
        template=template,
        strategy=client.ExtensionsV1beta1DeploymentStrategy(
            rolling_update=client.ExtensionsV1beta1RollingUpdateDeployment(
                max_surge=1, max_unavailable='25%'),
            type="RollingUpdate",
        ),
    )

    return client.V1Deployment(api_version="apps/v1",
                               kind="Deployment",
                               metadata=client.V1ObjectMeta(name=name,
                                                            labels=labels),
                               spec=spec)
Пример #27
0
    def _create_pod(self,
                    image,
                    pod_name,
                    job_name,
                    port=80,
                    cmd_string=None,
                    volumes=[]):
        """ Create a kubernetes pod for the job.
        Args:
              - image (string) : Docker image to launch
              - pod_name (string) : Name of the pod
              - job_name (string) : App label
        KWargs:
             - port (integer) : Container port
        Returns:
              - None
        """

        security_context = None
        if self.user_id and self.group_id:
            security_context = client.V1SecurityContext(
                run_as_group=self.group_id,
                run_as_user=self.user_id,
                run_as_non_root=self.run_as_non_root)

        # Create the enviornment variables and command to initiate IPP
        environment_vars = client.V1EnvVar(name="TEST", value="SOME DATA")

        launch_args = ["-c", "{0};".format(cmd_string)]

        volume_mounts = []
        # Create mount paths for the volumes
        for volume in volumes:
            volume_mounts.append(
                client.V1VolumeMount(mount_path=volume[1], name=volume[0]))
        resources = client.V1ResourceRequirements(limits={
            'cpu': str(self.max_cpu),
            'memory': self.max_mem
        },
                                                  requests={
                                                      'cpu':
                                                      str(self.init_cpu),
                                                      'memory': self.init_mem
                                                  })
        # Configure Pod template container
        container = client.V1Container(
            name=pod_name,
            image=image,
            resources=resources,
            ports=[client.V1ContainerPort(container_port=port)],
            volume_mounts=volume_mounts,
            command=['/bin/bash'],
            args=launch_args,
            env=[environment_vars],
            security_context=security_context)

        # Create a secret to enable pulling images from secure repositories
        secret = None
        if self.secret:
            secret = client.V1LocalObjectReference(name=self.secret)

        # Create list of volumes from (pvc, mount) tuples
        volume_defs = []
        for volume in volumes:
            volume_defs.append(
                client.V1Volume(
                    name=volume[0],
                    persistent_volume_claim=client.
                    V1PersistentVolumeClaimVolumeSource(claim_name=volume[0])))

        metadata = client.V1ObjectMeta(name=pod_name, labels={"app": job_name})
        spec = client.V1PodSpec(containers=[container],
                                image_pull_secrets=[secret],
                                volumes=volume_defs)

        pod = client.V1Pod(spec=spec, metadata=metadata)
        api_response = self.kube_client.create_namespaced_pod(
            namespace=self.namespace, body=pod)
        logger.debug("Pod created. status='{0}'".format(
            str(api_response.status)))
Пример #28
0
 def export_deployment(self):
     # Configureate Pod template container
     volume_mounts = []
     volume_mounts.append(
         client.V1VolumeMount(mount_path='/opt/logs', name='logs'))
     if self.dm_name == 'launch':
         volume_mounts.append(
             client.V1VolumeMount(mount_path='/opt/%s/conf' % self.dm_name,
                                  name=self.dm_name))
     container = client.V1Container(
         name=self.dm_name,
         image=self.image,
         ports=[
             client.V1ContainerPort(container_port=int(port))
             for port in self.container_port
         ],
         image_pull_policy='Always',
         env=[
             client.V1EnvVar(name='LANG', value='en_US.UTF-8'),
             client.V1EnvVar(name='LC_ALL', value='en_US.UTF-8')
         ],
         resources=client.V1ResourceRequirements(limits=self.re_limits,
                                                 requests=self.re_requests),
         volume_mounts=volume_mounts,
         liveness_probe=client.V1Probe(
             initial_delay_seconds=30,
             tcp_socket=client.V1TCPSocketAction(
                 port=int(self.container_port[0]))),
         readiness_probe=client.V1Probe(
             initial_delay_seconds=30,
             tcp_socket=client.V1TCPSocketAction(
                 port=int(self.container_port[0]))))
     # Create and configurate a spec section
     secrets = client.V1LocalObjectReference('registrysecret')
     volumes = []
     volume = client.V1Volume(
         name='logs',
         host_path=client.V1HostPathVolumeSource(path='/opt/logs'))
     volumes.append(volume)
     template = client.V1PodTemplateSpec(
         metadata=client.V1ObjectMeta(labels={"project": self.dm_name}),
         spec=client.V1PodSpec(
             containers=[container],
             image_pull_secrets=[secrets],
             volumes=volumes,
             affinity=client.V1Affinity(node_affinity=client.V1NodeAffinity(
                 preferred_during_scheduling_ignored_during_execution=[
                     client.V1PreferredSchedulingTerm(
                         preference=client.V1NodeSelectorTerm(
                             match_expressions=[
                                 client.V1NodeSelectorRequirement(
                                     key='project',
                                     operator='In',
                                     values=['moji'])
                             ]),
                         weight=30),
                     client.V1PreferredSchedulingTerm(
                         preference=client.V1NodeSelectorTerm(
                             match_expressions=[
                                 client.V1NodeSelectorRequirement(
                                     key='deploy',
                                     operator='In',
                                     values=[self.dm_name])
                             ]),
                         weight=70)
                 ]))))
     selector = client.V1LabelSelector(
         match_labels={"project": self.dm_name})
     # Create the specification of deployment
     spec = client.ExtensionsV1beta1DeploymentSpec(replicas=int(
         self.replicas),
                                                   template=template,
                                                   selector=selector,
                                                   min_ready_seconds=3)
     # Instantiate the deployment object
     deployment = client.ExtensionsV1beta1Deployment(
         api_version="extensions/v1beta1",
         kind="Deployment",
         metadata=client.V1ObjectMeta(name=self.dm_name),
         spec=spec)
     return deployment
Пример #29
0
def create_pod_template_spec(data):
    ports = []

    for port in data["ports"].split(','):
        portDefinition = client.V1ContainerPort(container_port=int(port))
        ports.append(portDefinition)

    envs = []
    if "environments" in data:
        envs_array = data["environments"].splitlines()

        tmp_envs = dict(s.split('=', 1) for s in envs_array)

        for key in tmp_envs:
            envs.append(client.V1EnvVar(name=key, value=tmp_envs[key]))

    if "environments_secrets" in data:
        envs_array = data["environments_secrets"].splitlines()
        tmp_envs = dict(s.split('=', 1) for s in envs_array)

        for key in tmp_envs:

            if (":" in tmp_envs[key]):
                # passing secret env
                value = tmp_envs[key]
                secrets = value.split(':')
                secrect_key = secrets[1]
                secrect_name = secrets[0]

                envs.append(
                    client.V1EnvVar(
                        name=key,
                        value="",
                        value_from=client.V1EnvVarSource(
                            secret_key_ref=client.V1SecretKeySelector(
                                key=secrect_key, name=secrect_name))))

    container = client.V1Container(name=data["container_name"],
                                   image=data["image"],
                                   ports=ports,
                                   env=envs)

    if "volume_mounts" in data:
        volume_mounts_data = yaml.full_load(data["volume_mounts"])
        volume_mounts = []

        if (isinstance(volume_mounts_data, list)):
            for volume_mount_data in volume_mounts_data:
                volume_mount = create_volume_mount(volume_mount_data)

                if volume_mount:
                    volume_mounts.append(volume_mount)
        else:
            volume_mount = create_volume_mount(volume_mounts_data)

            if volume_mount:
                volume_mounts.append(volume_mount)

        container.volume_mounts = volume_mounts

    if "liveness_probe" in data:
        container.liveness_probe = load_liveness_readiness_probe(
            data["liveness_probe"])

    if "readiness_probe" in data:
        container.readiness_probe = load_liveness_readiness_probe(
            data["readiness_probe"])

    if "container_command" in data:
        container.command = data["container_command"].split(' ')

    if "container_args" in data:
        args_array = data["container_args"].splitlines()
        container.args = args_array

    if "resources_requests" in data:
        resources_array = data["resources_requests"].split(",")
        tmp_resources = dict(s.split('=', 1) for s in resources_array)
        container.resources = client.V1ResourceRequirements(
            requests=tmp_resources)

    template_spec = client.V1PodSpec(containers=[container])

    if "image_pull_secrets" in data:
        images_array = data["image_pull_secrets"].split(",")
        images = []
        for image in images_array:
            images.append(client.V1LocalObjectReference(name=image))

        template_spec.image_pull_secrets = images

    if "volumes" in data:
        volumes_data = yaml.full_load(data["volumes"])
        volumes = []

        if (isinstance(volumes_data, list)):
            for volume_data in volumes_data:
                volume = create_volume(volume_data)

                if volume:
                    volumes.append(volume)
        else:
            volume = create_volume(volumes_data)

            if volume:
                volumes.append(volume)

        template_spec.volumes = volumes

    return template_spec
Пример #30
0
 def export_deployment(self):
     # Configureate Pod template container
     volume_mounts = []
     containers = []
     volumes = []
     ports = []
     liveness_probe = None
     readiness_probe = None
     volume_mounts.append(
         client.V1VolumeMount(mount_path='/docker/logs', name='logs'))
     volumes.append(
         client.V1Volume(name='logs',
                         host_path=client.V1HostPathVolumeSource(
                             path='/opt/logs', type='DirectoryOrCreate')))
     if self.mounts:
         for path in self.mounts:
             volume_mounts.append(
                 client.V1VolumeMount(mount_path=path,
                                      name=self.mounts[path]))
             volumes.append(
                 client.V1Volume(name=self.mounts[path],
                                 host_path=client.V1HostPathVolumeSource(
                                     path=path, type='DirectoryOrCreate')))
     if self.container_port:
         ports = [
             client.V1ContainerPort(container_port=int(port))
             for port in self.container_port
         ]
         liveness_probe = client.V1Probe(
             initial_delay_seconds=15,
             tcp_socket=client.V1TCPSocketAction(
                 port=int(self.container_port[0])))
         readiness_probe = client.V1Probe(
             initial_delay_seconds=15,
             tcp_socket=client.V1TCPSocketAction(
                 port=int(self.container_port[0])))
         if self.healthcheck:
             liveness_probe = client.V1Probe(
                 initial_delay_seconds=15,
                 http_get=client.V1HTTPGetAction(
                     path=self.healthcheck,
                     port=int(self.container_port[0])))
             readiness_probe = client.V1Probe(
                 initial_delay_seconds=15,
                 http_get=client.V1HTTPGetAction(
                     path=self.healthcheck,
                     port=int(self.container_port[0])))
     Env = [
         client.V1EnvVar(name='LANG', value='en_US.UTF-8'),
         client.V1EnvVar(name='LC_ALL', value='en_US.UTF-8'),
         client.V1EnvVar(name='POD_NAME',
                         value_from=client.V1EnvVarSource(
                             field_ref=client.V1ObjectFieldSelector(
                                 field_path='metadata.name'))),
         client.V1EnvVar(name='POD_IP',
                         value_from=client.V1EnvVarSource(
                             field_ref=client.V1ObjectFieldSelector(
                                 field_path='status.podIP'))),
     ]
     container = client.V1Container(name=self.dm_name,
                                    image=self.image,
                                    ports=ports,
                                    image_pull_policy='Always',
                                    env=Env,
                                    resources=client.V1ResourceRequirements(
                                        limits=self.re_limits,
                                        requests=self.re_requests),
                                    volume_mounts=volume_mounts)
     if liveness_probe and readiness_probe:
         container = client.V1Container(
             name=self.dm_name,
             image=self.image,
             ports=ports,
             image_pull_policy='Always',
             env=Env,
             resources=client.V1ResourceRequirements(
                 limits=self.re_limits, requests=self.re_requests),
             volume_mounts=volume_mounts,
             liveness_probe=liveness_probe,
             readiness_probe=readiness_probe)
     containers.append(container)
     if self.sidecar:
         sidecar_container = client.V1Container(
             name='sidecar-%s' % self.dm_name,
             image=self.sidecar,
             image_pull_policy='Always',
             env=Env,
             resources=client.V1ResourceRequirements(
                 limits=self.re_limits, requests=self.re_requests),
             volume_mounts=volume_mounts)
         containers.append(sidecar_container)
     # Create and configurate a spec section
     secrets = client.V1LocalObjectReference('registrysecret')
     preference_key = self.dm_name
     project_values = ['xxxx']
     host_aliases = []
     db_docker_hosts = db_op.docker_hosts
     values = db_docker_hosts.query.with_entities(
         db_docker_hosts.ip, db_docker_hosts.hostname).filter(
             and_(db_docker_hosts.deployment == self.dm_name,
                  db_docker_hosts.context == self.context)).all()
     db_op.DB.session.remove()
     if values:
         ips = []
         for value in values:
             try:
                 ip, hostname = value
                 key = "op_docker_hosts_%s" % ip
                 Redis.lpush(key, hostname)
                 ips.append(ip)
             except Exception as e:
                 logging.error(e)
         for ip in set(ips):
             try:
                 key = "op_docker_hosts_%s" % ip
                 if Redis.exists(key):
                     hostnames = Redis.lrange(key, 0, -1)
                     if hostnames:
                         host_aliases.append(
                             client.V1HostAlias(hostnames=hostnames, ip=ip))
                 Redis.delete(key)
             except Exception as e:
                 logging.error(e)
     if self.labels:
         if 'deploy' in self.labels:
             preference_key = self.labels['deploy']
         if 'project' in self.labels:
             project_values = [self.labels['project']]
     template = client.V1PodTemplateSpec(
         metadata=client.V1ObjectMeta(labels={"project": self.dm_name}),
         spec=client.V1PodSpec(
             containers=containers,
             image_pull_secrets=[secrets],
             volumes=volumes,
             host_aliases=host_aliases,
             affinity=client.V1Affinity(node_affinity=client.V1NodeAffinity(
                 preferred_during_scheduling_ignored_during_execution=[
                     client.V1PreferredSchedulingTerm(
                         preference=client.V1NodeSelectorTerm(
                             match_expressions=[
                                 client.V1NodeSelectorRequirement(
                                     key=preference_key,
                                     operator='In',
                                     values=['mark'])
                             ]),
                         weight=100)
                 ],
                 required_during_scheduling_ignored_during_execution=client.
                 V1NodeSelector(node_selector_terms=[
                     client.V1NodeSelectorTerm(match_expressions=[
                         client.V1NodeSelectorRequirement(
                             key='project',
                             operator='In',
                             values=project_values)
                     ])
                 ])))))
     selector = client.V1LabelSelector(
         match_labels={"project": self.dm_name})
     # Create the specification of deployment
     spec = client.ExtensionsV1beta1DeploymentSpec(replicas=int(
         self.replicas),
                                                   template=template,
                                                   selector=selector,
                                                   min_ready_seconds=3)
     # Instantiate the deployment object
     deployment = client.ExtensionsV1beta1Deployment(
         api_version="extensions/v1beta1",
         kind="Deployment",
         metadata=client.V1ObjectMeta(name=self.dm_name),
         spec=spec)
     return deployment